xref: /linux/fs/smb/client/cifsfs.c (revision 4b660dbd9ee2059850fd30e0df420ca7a38a1856)
1 // SPDX-License-Identifier: LGPL-2.1
2 /*
3  *
4  *   Copyright (C) International Business Machines  Corp., 2002,2008
5  *   Author(s): Steve French (sfrench@us.ibm.com)
6  *
7  *   Common Internet FileSystem (CIFS) client
8  *
9  */
10 
11 /* Note that BB means BUGBUG (ie something to fix eventually) */
12 
13 #include <linux/module.h>
14 #include <linux/fs.h>
15 #include <linux/filelock.h>
16 #include <linux/mount.h>
17 #include <linux/slab.h>
18 #include <linux/init.h>
19 #include <linux/list.h>
20 #include <linux/seq_file.h>
21 #include <linux/vfs.h>
22 #include <linux/mempool.h>
23 #include <linux/delay.h>
24 #include <linux/kthread.h>
25 #include <linux/freezer.h>
26 #include <linux/namei.h>
27 #include <linux/random.h>
28 #include <linux/splice.h>
29 #include <linux/uuid.h>
30 #include <linux/xattr.h>
31 #include <uapi/linux/magic.h>
32 #include <net/ipv6.h>
33 #include "cifsfs.h"
34 #include "cifspdu.h"
35 #define DECLARE_GLOBALS_HERE
36 #include "cifsglob.h"
37 #include "cifsproto.h"
38 #include "cifs_debug.h"
39 #include "cifs_fs_sb.h"
40 #include <linux/mm.h>
41 #include <linux/key-type.h>
42 #include "cifs_spnego.h"
43 #include "fscache.h"
44 #ifdef CONFIG_CIFS_DFS_UPCALL
45 #include "dfs_cache.h"
46 #endif
47 #ifdef CONFIG_CIFS_SWN_UPCALL
48 #include "netlink.h"
49 #endif
50 #include "fs_context.h"
51 #include "cached_dir.h"
52 
53 /*
54  * DOS dates from 1980/1/1 through 2107/12/31
55  * Protocol specifications indicate the range should be to 119, which
56  * limits maximum year to 2099. But this range has not been checked.
57  */
58 #define SMB_DATE_MAX (127<<9 | 12<<5 | 31)
59 #define SMB_DATE_MIN (0<<9 | 1<<5 | 1)
60 #define SMB_TIME_MAX (23<<11 | 59<<5 | 29)
61 
62 int cifsFYI = 0;
63 bool traceSMB;
64 bool enable_oplocks = true;
65 bool linuxExtEnabled = true;
66 bool lookupCacheEnabled = true;
67 bool disable_legacy_dialects; /* false by default */
68 bool enable_gcm_256 = true;
69 bool require_gcm_256; /* false by default */
70 bool enable_negotiate_signing; /* false by default */
71 unsigned int global_secflags = CIFSSEC_DEF;
72 /* unsigned int ntlmv2_support = 0; */
73 unsigned int sign_CIFS_PDUs = 1;
74 
75 /*
76  * Global transaction id (XID) information
77  */
78 unsigned int GlobalCurrentXid;	/* protected by GlobalMid_Sem */
79 unsigned int GlobalTotalActiveXid; /* prot by GlobalMid_Sem */
80 unsigned int GlobalMaxActiveXid;	/* prot by GlobalMid_Sem */
81 spinlock_t GlobalMid_Lock; /* protects above & list operations on midQ entries */
82 
83 /*
84  *  Global counters, updated atomically
85  */
86 atomic_t sesInfoAllocCount;
87 atomic_t tconInfoAllocCount;
88 atomic_t tcpSesNextId;
89 atomic_t tcpSesAllocCount;
90 atomic_t tcpSesReconnectCount;
91 atomic_t tconInfoReconnectCount;
92 
93 atomic_t mid_count;
94 atomic_t buf_alloc_count;
95 atomic_t small_buf_alloc_count;
96 #ifdef CONFIG_CIFS_STATS2
97 atomic_t total_buf_alloc_count;
98 atomic_t total_small_buf_alloc_count;
99 #endif/* STATS2 */
100 struct list_head	cifs_tcp_ses_list;
101 spinlock_t		cifs_tcp_ses_lock;
102 static const struct super_operations cifs_super_ops;
103 unsigned int CIFSMaxBufSize = CIFS_MAX_MSGSIZE;
104 module_param(CIFSMaxBufSize, uint, 0444);
105 MODULE_PARM_DESC(CIFSMaxBufSize, "Network buffer size (not including header) "
106 				 "for CIFS requests. "
107 				 "Default: 16384 Range: 8192 to 130048");
108 unsigned int cifs_min_rcv = CIFS_MIN_RCV_POOL;
109 module_param(cifs_min_rcv, uint, 0444);
110 MODULE_PARM_DESC(cifs_min_rcv, "Network buffers in pool. Default: 4 Range: "
111 				"1 to 64");
112 unsigned int cifs_min_small = 30;
113 module_param(cifs_min_small, uint, 0444);
114 MODULE_PARM_DESC(cifs_min_small, "Small network buffers in pool. Default: 30 "
115 				 "Range: 2 to 256");
116 unsigned int cifs_max_pending = CIFS_MAX_REQ;
117 module_param(cifs_max_pending, uint, 0444);
118 MODULE_PARM_DESC(cifs_max_pending, "Simultaneous requests to server for "
119 				   "CIFS/SMB1 dialect (N/A for SMB3) "
120 				   "Default: 32767 Range: 2 to 32767.");
121 unsigned int dir_cache_timeout = 30;
122 module_param(dir_cache_timeout, uint, 0644);
123 MODULE_PARM_DESC(dir_cache_timeout, "Number of seconds to cache directory contents for which we have a lease. Default: 30 "
124 				 "Range: 1 to 65000 seconds, 0 to disable caching dir contents");
125 #ifdef CONFIG_CIFS_STATS2
126 unsigned int slow_rsp_threshold = 1;
127 module_param(slow_rsp_threshold, uint, 0644);
128 MODULE_PARM_DESC(slow_rsp_threshold, "Amount of time (in seconds) to wait "
129 				   "before logging that a response is delayed. "
130 				   "Default: 1 (if set to 0 disables msg).");
131 #endif /* STATS2 */
132 
133 module_param(enable_oplocks, bool, 0644);
134 MODULE_PARM_DESC(enable_oplocks, "Enable or disable oplocks. Default: y/Y/1");
135 
136 module_param(enable_gcm_256, bool, 0644);
137 MODULE_PARM_DESC(enable_gcm_256, "Enable requesting strongest (256 bit) GCM encryption. Default: n/N/0");
138 
139 module_param(require_gcm_256, bool, 0644);
140 MODULE_PARM_DESC(require_gcm_256, "Require strongest (256 bit) GCM encryption. Default: n/N/0");
141 
142 module_param(enable_negotiate_signing, bool, 0644);
143 MODULE_PARM_DESC(enable_negotiate_signing, "Enable negotiating packet signing algorithm with server. Default: n/N/0");
144 
145 module_param(disable_legacy_dialects, bool, 0644);
146 MODULE_PARM_DESC(disable_legacy_dialects, "To improve security it may be "
147 				  "helpful to restrict the ability to "
148 				  "override the default dialects (SMB2.1, "
149 				  "SMB3 and SMB3.02) on mount with old "
150 				  "dialects (CIFS/SMB1 and SMB2) since "
151 				  "vers=1.0 (CIFS/SMB1) and vers=2.0 are weaker"
152 				  " and less secure. Default: n/N/0");
153 
154 struct workqueue_struct	*cifsiod_wq;
155 struct workqueue_struct	*decrypt_wq;
156 struct workqueue_struct	*fileinfo_put_wq;
157 struct workqueue_struct	*cifsoplockd_wq;
158 struct workqueue_struct	*deferredclose_wq;
159 __u32 cifs_lock_secret;
160 
161 /*
162  * Bumps refcount for cifs super block.
163  * Note that it should be only called if a referece to VFS super block is
164  * already held, e.g. in open-type syscalls context. Otherwise it can race with
165  * atomic_dec_and_test in deactivate_locked_super.
166  */
167 void
168 cifs_sb_active(struct super_block *sb)
169 {
170 	struct cifs_sb_info *server = CIFS_SB(sb);
171 
172 	if (atomic_inc_return(&server->active) == 1)
173 		atomic_inc(&sb->s_active);
174 }
175 
176 void
177 cifs_sb_deactive(struct super_block *sb)
178 {
179 	struct cifs_sb_info *server = CIFS_SB(sb);
180 
181 	if (atomic_dec_and_test(&server->active))
182 		deactivate_super(sb);
183 }
184 
185 static int
186 cifs_read_super(struct super_block *sb)
187 {
188 	struct inode *inode;
189 	struct cifs_sb_info *cifs_sb;
190 	struct cifs_tcon *tcon;
191 	struct timespec64 ts;
192 	int rc = 0;
193 
194 	cifs_sb = CIFS_SB(sb);
195 	tcon = cifs_sb_master_tcon(cifs_sb);
196 
197 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIXACL)
198 		sb->s_flags |= SB_POSIXACL;
199 
200 	if (tcon->snapshot_time)
201 		sb->s_flags |= SB_RDONLY;
202 
203 	if (tcon->ses->capabilities & tcon->ses->server->vals->cap_large_files)
204 		sb->s_maxbytes = MAX_LFS_FILESIZE;
205 	else
206 		sb->s_maxbytes = MAX_NON_LFS;
207 
208 	/*
209 	 * Some very old servers like DOS and OS/2 used 2 second granularity
210 	 * (while all current servers use 100ns granularity - see MS-DTYP)
211 	 * but 1 second is the maximum allowed granularity for the VFS
212 	 * so for old servers set time granularity to 1 second while for
213 	 * everything else (current servers) set it to 100ns.
214 	 */
215 	if ((tcon->ses->server->vals->protocol_id == SMB10_PROT_ID) &&
216 	    ((tcon->ses->capabilities &
217 	      tcon->ses->server->vals->cap_nt_find) == 0) &&
218 	    !tcon->unix_ext) {
219 		sb->s_time_gran = 1000000000; /* 1 second is max allowed gran */
220 		ts = cnvrtDosUnixTm(cpu_to_le16(SMB_DATE_MIN), 0, 0);
221 		sb->s_time_min = ts.tv_sec;
222 		ts = cnvrtDosUnixTm(cpu_to_le16(SMB_DATE_MAX),
223 				    cpu_to_le16(SMB_TIME_MAX), 0);
224 		sb->s_time_max = ts.tv_sec;
225 	} else {
226 		/*
227 		 * Almost every server, including all SMB2+, uses DCE TIME
228 		 * ie 100 nanosecond units, since 1601.  See MS-DTYP and MS-FSCC
229 		 */
230 		sb->s_time_gran = 100;
231 		ts = cifs_NTtimeToUnix(0);
232 		sb->s_time_min = ts.tv_sec;
233 		ts = cifs_NTtimeToUnix(cpu_to_le64(S64_MAX));
234 		sb->s_time_max = ts.tv_sec;
235 	}
236 
237 	sb->s_magic = CIFS_SUPER_MAGIC;
238 	sb->s_op = &cifs_super_ops;
239 	sb->s_xattr = cifs_xattr_handlers;
240 	rc = super_setup_bdi(sb);
241 	if (rc)
242 		goto out_no_root;
243 	/* tune readahead according to rsize if readahead size not set on mount */
244 	if (cifs_sb->ctx->rsize == 0)
245 		cifs_sb->ctx->rsize =
246 			tcon->ses->server->ops->negotiate_rsize(tcon, cifs_sb->ctx);
247 	if (cifs_sb->ctx->rasize)
248 		sb->s_bdi->ra_pages = cifs_sb->ctx->rasize / PAGE_SIZE;
249 	else
250 		sb->s_bdi->ra_pages = 2 * (cifs_sb->ctx->rsize / PAGE_SIZE);
251 
252 	sb->s_blocksize = CIFS_MAX_MSGSIZE;
253 	sb->s_blocksize_bits = 14;	/* default 2**14 = CIFS_MAX_MSGSIZE */
254 	inode = cifs_root_iget(sb);
255 
256 	if (IS_ERR(inode)) {
257 		rc = PTR_ERR(inode);
258 		goto out_no_root;
259 	}
260 
261 	if (tcon->nocase)
262 		sb->s_d_op = &cifs_ci_dentry_ops;
263 	else
264 		sb->s_d_op = &cifs_dentry_ops;
265 
266 	sb->s_root = d_make_root(inode);
267 	if (!sb->s_root) {
268 		rc = -ENOMEM;
269 		goto out_no_root;
270 	}
271 
272 #ifdef CONFIG_CIFS_NFSD_EXPORT
273 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) {
274 		cifs_dbg(FYI, "export ops supported\n");
275 		sb->s_export_op = &cifs_export_ops;
276 	}
277 #endif /* CONFIG_CIFS_NFSD_EXPORT */
278 
279 	return 0;
280 
281 out_no_root:
282 	cifs_dbg(VFS, "%s: get root inode failed\n", __func__);
283 	return rc;
284 }
285 
286 static void cifs_kill_sb(struct super_block *sb)
287 {
288 	struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
289 
290 	/*
291 	 * We ned to release all dentries for the cached directories
292 	 * before we kill the sb.
293 	 */
294 	if (cifs_sb->root) {
295 		close_all_cached_dirs(cifs_sb);
296 
297 		/* finally release root dentry */
298 		dput(cifs_sb->root);
299 		cifs_sb->root = NULL;
300 	}
301 
302 	kill_anon_super(sb);
303 	cifs_umount(cifs_sb);
304 }
305 
306 static int
307 cifs_statfs(struct dentry *dentry, struct kstatfs *buf)
308 {
309 	struct super_block *sb = dentry->d_sb;
310 	struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
311 	struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
312 	struct TCP_Server_Info *server = tcon->ses->server;
313 	unsigned int xid;
314 	int rc = 0;
315 
316 	xid = get_xid();
317 
318 	if (le32_to_cpu(tcon->fsAttrInfo.MaxPathNameComponentLength) > 0)
319 		buf->f_namelen =
320 		       le32_to_cpu(tcon->fsAttrInfo.MaxPathNameComponentLength);
321 	else
322 		buf->f_namelen = PATH_MAX;
323 
324 	buf->f_fsid.val[0] = tcon->vol_serial_number;
325 	/* are using part of create time for more randomness, see man statfs */
326 	buf->f_fsid.val[1] =  (int)le64_to_cpu(tcon->vol_create_time);
327 
328 	buf->f_files = 0;	/* undefined */
329 	buf->f_ffree = 0;	/* unlimited */
330 
331 	if (server->ops->queryfs)
332 		rc = server->ops->queryfs(xid, tcon, cifs_sb, buf);
333 
334 	free_xid(xid);
335 	return rc;
336 }
337 
338 static long cifs_fallocate(struct file *file, int mode, loff_t off, loff_t len)
339 {
340 	struct cifs_sb_info *cifs_sb = CIFS_FILE_SB(file);
341 	struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
342 	struct TCP_Server_Info *server = tcon->ses->server;
343 
344 	if (server->ops->fallocate)
345 		return server->ops->fallocate(file, tcon, mode, off, len);
346 
347 	return -EOPNOTSUPP;
348 }
349 
350 static int cifs_permission(struct mnt_idmap *idmap,
351 			   struct inode *inode, int mask)
352 {
353 	struct cifs_sb_info *cifs_sb;
354 
355 	cifs_sb = CIFS_SB(inode->i_sb);
356 
357 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_PERM) {
358 		if ((mask & MAY_EXEC) && !execute_ok(inode))
359 			return -EACCES;
360 		else
361 			return 0;
362 	} else /* file mode might have been restricted at mount time
363 		on the client (above and beyond ACL on servers) for
364 		servers which do not support setting and viewing mode bits,
365 		so allowing client to check permissions is useful */
366 		return generic_permission(&nop_mnt_idmap, inode, mask);
367 }
368 
369 static struct kmem_cache *cifs_inode_cachep;
370 static struct kmem_cache *cifs_req_cachep;
371 static struct kmem_cache *cifs_mid_cachep;
372 static struct kmem_cache *cifs_sm_req_cachep;
373 mempool_t *cifs_sm_req_poolp;
374 mempool_t *cifs_req_poolp;
375 mempool_t *cifs_mid_poolp;
376 
377 static struct inode *
378 cifs_alloc_inode(struct super_block *sb)
379 {
380 	struct cifsInodeInfo *cifs_inode;
381 	cifs_inode = alloc_inode_sb(sb, cifs_inode_cachep, GFP_KERNEL);
382 	if (!cifs_inode)
383 		return NULL;
384 	cifs_inode->cifsAttrs = 0x20;	/* default */
385 	cifs_inode->time = 0;
386 	/*
387 	 * Until the file is open and we have gotten oplock info back from the
388 	 * server, can not assume caching of file data or metadata.
389 	 */
390 	cifs_set_oplock_level(cifs_inode, 0);
391 	cifs_inode->flags = 0;
392 	spin_lock_init(&cifs_inode->writers_lock);
393 	cifs_inode->writers = 0;
394 	cifs_inode->netfs.inode.i_blkbits = 14;  /* 2**14 = CIFS_MAX_MSGSIZE */
395 	cifs_inode->netfs.remote_i_size = 0;
396 	cifs_inode->uniqueid = 0;
397 	cifs_inode->createtime = 0;
398 	cifs_inode->epoch = 0;
399 	spin_lock_init(&cifs_inode->open_file_lock);
400 	generate_random_uuid(cifs_inode->lease_key);
401 	cifs_inode->symlink_target = NULL;
402 
403 	/*
404 	 * Can not set i_flags here - they get immediately overwritten to zero
405 	 * by the VFS.
406 	 */
407 	/* cifs_inode->netfs.inode.i_flags = S_NOATIME | S_NOCMTIME; */
408 	INIT_LIST_HEAD(&cifs_inode->openFileList);
409 	INIT_LIST_HEAD(&cifs_inode->llist);
410 	INIT_LIST_HEAD(&cifs_inode->deferred_closes);
411 	spin_lock_init(&cifs_inode->deferred_lock);
412 	return &cifs_inode->netfs.inode;
413 }
414 
415 static void
416 cifs_free_inode(struct inode *inode)
417 {
418 	struct cifsInodeInfo *cinode = CIFS_I(inode);
419 
420 	if (S_ISLNK(inode->i_mode))
421 		kfree(cinode->symlink_target);
422 	kmem_cache_free(cifs_inode_cachep, cinode);
423 }
424 
425 static void
426 cifs_evict_inode(struct inode *inode)
427 {
428 	truncate_inode_pages_final(&inode->i_data);
429 	if (inode->i_state & I_PINNING_NETFS_WB)
430 		cifs_fscache_unuse_inode_cookie(inode, true);
431 	cifs_fscache_release_inode_cookie(inode);
432 	clear_inode(inode);
433 }
434 
435 static void
436 cifs_show_address(struct seq_file *s, struct TCP_Server_Info *server)
437 {
438 	struct sockaddr_in *sa = (struct sockaddr_in *) &server->dstaddr;
439 	struct sockaddr_in6 *sa6 = (struct sockaddr_in6 *) &server->dstaddr;
440 
441 	seq_puts(s, ",addr=");
442 
443 	switch (server->dstaddr.ss_family) {
444 	case AF_INET:
445 		seq_printf(s, "%pI4", &sa->sin_addr.s_addr);
446 		break;
447 	case AF_INET6:
448 		seq_printf(s, "%pI6", &sa6->sin6_addr.s6_addr);
449 		if (sa6->sin6_scope_id)
450 			seq_printf(s, "%%%u", sa6->sin6_scope_id);
451 		break;
452 	default:
453 		seq_puts(s, "(unknown)");
454 	}
455 	if (server->rdma)
456 		seq_puts(s, ",rdma");
457 }
458 
459 static void
460 cifs_show_security(struct seq_file *s, struct cifs_ses *ses)
461 {
462 	if (ses->sectype == Unspecified) {
463 		if (ses->user_name == NULL)
464 			seq_puts(s, ",sec=none");
465 		return;
466 	}
467 
468 	seq_puts(s, ",sec=");
469 
470 	switch (ses->sectype) {
471 	case NTLMv2:
472 		seq_puts(s, "ntlmv2");
473 		break;
474 	case Kerberos:
475 		seq_puts(s, "krb5");
476 		break;
477 	case RawNTLMSSP:
478 		seq_puts(s, "ntlmssp");
479 		break;
480 	default:
481 		/* shouldn't ever happen */
482 		seq_puts(s, "unknown");
483 		break;
484 	}
485 
486 	if (ses->sign)
487 		seq_puts(s, "i");
488 
489 	if (ses->sectype == Kerberos)
490 		seq_printf(s, ",cruid=%u",
491 			   from_kuid_munged(&init_user_ns, ses->cred_uid));
492 }
493 
494 static void
495 cifs_show_cache_flavor(struct seq_file *s, struct cifs_sb_info *cifs_sb)
496 {
497 	seq_puts(s, ",cache=");
498 
499 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO)
500 		seq_puts(s, "strict");
501 	else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO)
502 		seq_puts(s, "none");
503 	else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RW_CACHE)
504 		seq_puts(s, "singleclient"); /* assume only one client access */
505 	else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RO_CACHE)
506 		seq_puts(s, "ro"); /* read only caching assumed */
507 	else
508 		seq_puts(s, "loose");
509 }
510 
511 /*
512  * cifs_show_devname() is used so we show the mount device name with correct
513  * format (e.g. forward slashes vs. back slashes) in /proc/mounts
514  */
515 static int cifs_show_devname(struct seq_file *m, struct dentry *root)
516 {
517 	struct cifs_sb_info *cifs_sb = CIFS_SB(root->d_sb);
518 	char *devname = kstrdup(cifs_sb->ctx->source, GFP_KERNEL);
519 
520 	if (devname == NULL)
521 		seq_puts(m, "none");
522 	else {
523 		convert_delimiter(devname, '/');
524 		/* escape all spaces in share names */
525 		seq_escape(m, devname, " \t");
526 		kfree(devname);
527 	}
528 	return 0;
529 }
530 
531 /*
532  * cifs_show_options() is for displaying mount options in /proc/mounts.
533  * Not all settable options are displayed but most of the important
534  * ones are.
535  */
536 static int
537 cifs_show_options(struct seq_file *s, struct dentry *root)
538 {
539 	struct cifs_sb_info *cifs_sb = CIFS_SB(root->d_sb);
540 	struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
541 	struct sockaddr *srcaddr;
542 	srcaddr = (struct sockaddr *)&tcon->ses->server->srcaddr;
543 
544 	seq_show_option(s, "vers", tcon->ses->server->vals->version_string);
545 	cifs_show_security(s, tcon->ses);
546 	cifs_show_cache_flavor(s, cifs_sb);
547 
548 	if (tcon->no_lease)
549 		seq_puts(s, ",nolease");
550 	if (cifs_sb->ctx->multiuser)
551 		seq_puts(s, ",multiuser");
552 	else if (tcon->ses->user_name)
553 		seq_show_option(s, "username", tcon->ses->user_name);
554 
555 	if (tcon->ses->domainName && tcon->ses->domainName[0] != 0)
556 		seq_show_option(s, "domain", tcon->ses->domainName);
557 
558 	if (srcaddr->sa_family != AF_UNSPEC) {
559 		struct sockaddr_in *saddr4;
560 		struct sockaddr_in6 *saddr6;
561 		saddr4 = (struct sockaddr_in *)srcaddr;
562 		saddr6 = (struct sockaddr_in6 *)srcaddr;
563 		if (srcaddr->sa_family == AF_INET6)
564 			seq_printf(s, ",srcaddr=%pI6c",
565 				   &saddr6->sin6_addr);
566 		else if (srcaddr->sa_family == AF_INET)
567 			seq_printf(s, ",srcaddr=%pI4",
568 				   &saddr4->sin_addr.s_addr);
569 		else
570 			seq_printf(s, ",srcaddr=BAD-AF:%i",
571 				   (int)(srcaddr->sa_family));
572 	}
573 
574 	seq_printf(s, ",uid=%u",
575 		   from_kuid_munged(&init_user_ns, cifs_sb->ctx->linux_uid));
576 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_OVERR_UID)
577 		seq_puts(s, ",forceuid");
578 	else
579 		seq_puts(s, ",noforceuid");
580 
581 	seq_printf(s, ",gid=%u",
582 		   from_kgid_munged(&init_user_ns, cifs_sb->ctx->linux_gid));
583 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_OVERR_GID)
584 		seq_puts(s, ",forcegid");
585 	else
586 		seq_puts(s, ",noforcegid");
587 
588 	cifs_show_address(s, tcon->ses->server);
589 
590 	if (!tcon->unix_ext)
591 		seq_printf(s, ",file_mode=0%ho,dir_mode=0%ho",
592 					   cifs_sb->ctx->file_mode,
593 					   cifs_sb->ctx->dir_mode);
594 	if (cifs_sb->ctx->iocharset)
595 		seq_printf(s, ",iocharset=%s", cifs_sb->ctx->iocharset);
596 	if (tcon->seal)
597 		seq_puts(s, ",seal");
598 	else if (tcon->ses->server->ignore_signature)
599 		seq_puts(s, ",signloosely");
600 	if (tcon->nocase)
601 		seq_puts(s, ",nocase");
602 	if (tcon->nodelete)
603 		seq_puts(s, ",nodelete");
604 	if (cifs_sb->ctx->no_sparse)
605 		seq_puts(s, ",nosparse");
606 	if (tcon->local_lease)
607 		seq_puts(s, ",locallease");
608 	if (tcon->retry)
609 		seq_puts(s, ",hard");
610 	else
611 		seq_puts(s, ",soft");
612 	if (tcon->use_persistent)
613 		seq_puts(s, ",persistenthandles");
614 	else if (tcon->use_resilient)
615 		seq_puts(s, ",resilienthandles");
616 	if (tcon->posix_extensions)
617 		seq_puts(s, ",posix");
618 	else if (tcon->unix_ext)
619 		seq_puts(s, ",unix");
620 	else
621 		seq_puts(s, ",nounix");
622 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_DFS)
623 		seq_puts(s, ",nodfs");
624 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIX_PATHS)
625 		seq_puts(s, ",posixpaths");
626 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SET_UID)
627 		seq_puts(s, ",setuids");
628 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UID_FROM_ACL)
629 		seq_puts(s, ",idsfromsid");
630 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM)
631 		seq_puts(s, ",serverino");
632 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
633 		seq_puts(s, ",rwpidforward");
634 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL)
635 		seq_puts(s, ",forcemand");
636 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_XATTR)
637 		seq_puts(s, ",nouser_xattr");
638 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR)
639 		seq_puts(s, ",mapchars");
640 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SFM_CHR)
641 		seq_puts(s, ",mapposix");
642 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UNX_EMUL)
643 		seq_puts(s, ",sfu");
644 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_BRL)
645 		seq_puts(s, ",nobrl");
646 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_HANDLE_CACHE)
647 		seq_puts(s, ",nohandlecache");
648 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MODE_FROM_SID)
649 		seq_puts(s, ",modefromsid");
650 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_ACL)
651 		seq_puts(s, ",cifsacl");
652 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DYNPERM)
653 		seq_puts(s, ",dynperm");
654 	if (root->d_sb->s_flags & SB_POSIXACL)
655 		seq_puts(s, ",acl");
656 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MF_SYMLINKS)
657 		seq_puts(s, ",mfsymlinks");
658 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_FSCACHE)
659 		seq_puts(s, ",fsc");
660 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC)
661 		seq_puts(s, ",nostrictsync");
662 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_PERM)
663 		seq_puts(s, ",noperm");
664 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_BACKUPUID)
665 		seq_printf(s, ",backupuid=%u",
666 			   from_kuid_munged(&init_user_ns,
667 					    cifs_sb->ctx->backupuid));
668 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_BACKUPGID)
669 		seq_printf(s, ",backupgid=%u",
670 			   from_kgid_munged(&init_user_ns,
671 					    cifs_sb->ctx->backupgid));
672 	seq_show_option(s, "reparse",
673 			cifs_reparse_type_str(cifs_sb->ctx->reparse_type));
674 
675 	seq_printf(s, ",rsize=%u", cifs_sb->ctx->rsize);
676 	seq_printf(s, ",wsize=%u", cifs_sb->ctx->wsize);
677 	seq_printf(s, ",bsize=%u", cifs_sb->ctx->bsize);
678 	if (cifs_sb->ctx->rasize)
679 		seq_printf(s, ",rasize=%u", cifs_sb->ctx->rasize);
680 	if (tcon->ses->server->min_offload)
681 		seq_printf(s, ",esize=%u", tcon->ses->server->min_offload);
682 	if (tcon->ses->server->retrans)
683 		seq_printf(s, ",retrans=%u", tcon->ses->server->retrans);
684 	seq_printf(s, ",echo_interval=%lu",
685 			tcon->ses->server->echo_interval / HZ);
686 
687 	/* Only display the following if overridden on mount */
688 	if (tcon->ses->server->max_credits != SMB2_MAX_CREDITS_AVAILABLE)
689 		seq_printf(s, ",max_credits=%u", tcon->ses->server->max_credits);
690 	if (tcon->ses->server->tcp_nodelay)
691 		seq_puts(s, ",tcpnodelay");
692 	if (tcon->ses->server->noautotune)
693 		seq_puts(s, ",noautotune");
694 	if (tcon->ses->server->noblocksnd)
695 		seq_puts(s, ",noblocksend");
696 	if (tcon->ses->server->nosharesock)
697 		seq_puts(s, ",nosharesock");
698 
699 	if (tcon->snapshot_time)
700 		seq_printf(s, ",snapshot=%llu", tcon->snapshot_time);
701 	if (tcon->handle_timeout)
702 		seq_printf(s, ",handletimeout=%u", tcon->handle_timeout);
703 	if (tcon->max_cached_dirs != MAX_CACHED_FIDS)
704 		seq_printf(s, ",max_cached_dirs=%u", tcon->max_cached_dirs);
705 
706 	/*
707 	 * Display file and directory attribute timeout in seconds.
708 	 * If file and directory attribute timeout the same then actimeo
709 	 * was likely specified on mount
710 	 */
711 	if (cifs_sb->ctx->acdirmax == cifs_sb->ctx->acregmax)
712 		seq_printf(s, ",actimeo=%lu", cifs_sb->ctx->acregmax / HZ);
713 	else {
714 		seq_printf(s, ",acdirmax=%lu", cifs_sb->ctx->acdirmax / HZ);
715 		seq_printf(s, ",acregmax=%lu", cifs_sb->ctx->acregmax / HZ);
716 	}
717 	seq_printf(s, ",closetimeo=%lu", cifs_sb->ctx->closetimeo / HZ);
718 
719 	if (tcon->ses->chan_max > 1)
720 		seq_printf(s, ",multichannel,max_channels=%zu",
721 			   tcon->ses->chan_max);
722 
723 	if (tcon->use_witness)
724 		seq_puts(s, ",witness");
725 
726 	return 0;
727 }
728 
729 static void cifs_umount_begin(struct super_block *sb)
730 {
731 	struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
732 	struct cifs_tcon *tcon;
733 
734 	if (cifs_sb == NULL)
735 		return;
736 
737 	tcon = cifs_sb_master_tcon(cifs_sb);
738 
739 	spin_lock(&cifs_tcp_ses_lock);
740 	spin_lock(&tcon->tc_lock);
741 	if ((tcon->tc_count > 1) || (tcon->status == TID_EXITING)) {
742 		/* we have other mounts to same share or we have
743 		   already tried to umount this and woken up
744 		   all waiting network requests, nothing to do */
745 		spin_unlock(&tcon->tc_lock);
746 		spin_unlock(&cifs_tcp_ses_lock);
747 		return;
748 	}
749 	/*
750 	 * can not set tcon->status to TID_EXITING yet since we don't know if umount -f will
751 	 * fail later (e.g. due to open files).  TID_EXITING will be set just before tdis req sent
752 	 */
753 	spin_unlock(&tcon->tc_lock);
754 	spin_unlock(&cifs_tcp_ses_lock);
755 
756 	cifs_close_all_deferred_files(tcon);
757 	/* cancel_brl_requests(tcon); */ /* BB mark all brl mids as exiting */
758 	/* cancel_notify_requests(tcon); */
759 	if (tcon->ses && tcon->ses->server) {
760 		cifs_dbg(FYI, "wake up tasks now - umount begin not complete\n");
761 		wake_up_all(&tcon->ses->server->request_q);
762 		wake_up_all(&tcon->ses->server->response_q);
763 		msleep(1); /* yield */
764 		/* we have to kick the requests once more */
765 		wake_up_all(&tcon->ses->server->response_q);
766 		msleep(1);
767 	}
768 
769 	return;
770 }
771 
772 static int cifs_freeze(struct super_block *sb)
773 {
774 	struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
775 	struct cifs_tcon *tcon;
776 
777 	if (cifs_sb == NULL)
778 		return 0;
779 
780 	tcon = cifs_sb_master_tcon(cifs_sb);
781 
782 	cifs_close_all_deferred_files(tcon);
783 	return 0;
784 }
785 
786 #ifdef CONFIG_CIFS_STATS2
787 static int cifs_show_stats(struct seq_file *s, struct dentry *root)
788 {
789 	/* BB FIXME */
790 	return 0;
791 }
792 #endif
793 
794 static int cifs_write_inode(struct inode *inode, struct writeback_control *wbc)
795 {
796 	return netfs_unpin_writeback(inode, wbc);
797 }
798 
799 static int cifs_drop_inode(struct inode *inode)
800 {
801 	struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
802 
803 	/* no serverino => unconditional eviction */
804 	return !(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) ||
805 		generic_drop_inode(inode);
806 }
807 
808 static const struct super_operations cifs_super_ops = {
809 	.statfs = cifs_statfs,
810 	.alloc_inode = cifs_alloc_inode,
811 	.write_inode	= cifs_write_inode,
812 	.free_inode = cifs_free_inode,
813 	.drop_inode	= cifs_drop_inode,
814 	.evict_inode	= cifs_evict_inode,
815 /*	.show_path	= cifs_show_path, */ /* Would we ever need show path? */
816 	.show_devname   = cifs_show_devname,
817 /*	.delete_inode	= cifs_delete_inode,  */  /* Do not need above
818 	function unless later we add lazy close of inodes or unless the
819 	kernel forgets to call us with the same number of releases (closes)
820 	as opens */
821 	.show_options = cifs_show_options,
822 	.umount_begin   = cifs_umount_begin,
823 	.freeze_fs      = cifs_freeze,
824 #ifdef CONFIG_CIFS_STATS2
825 	.show_stats = cifs_show_stats,
826 #endif
827 };
828 
829 /*
830  * Get root dentry from superblock according to prefix path mount option.
831  * Return dentry with refcount + 1 on success and NULL otherwise.
832  */
833 static struct dentry *
834 cifs_get_root(struct smb3_fs_context *ctx, struct super_block *sb)
835 {
836 	struct dentry *dentry;
837 	struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
838 	char *full_path = NULL;
839 	char *s, *p;
840 	char sep;
841 
842 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_USE_PREFIX_PATH)
843 		return dget(sb->s_root);
844 
845 	full_path = cifs_build_path_to_root(ctx, cifs_sb,
846 				cifs_sb_master_tcon(cifs_sb), 0);
847 	if (full_path == NULL)
848 		return ERR_PTR(-ENOMEM);
849 
850 	cifs_dbg(FYI, "Get root dentry for %s\n", full_path);
851 
852 	sep = CIFS_DIR_SEP(cifs_sb);
853 	dentry = dget(sb->s_root);
854 	s = full_path;
855 
856 	do {
857 		struct inode *dir = d_inode(dentry);
858 		struct dentry *child;
859 
860 		if (!S_ISDIR(dir->i_mode)) {
861 			dput(dentry);
862 			dentry = ERR_PTR(-ENOTDIR);
863 			break;
864 		}
865 
866 		/* skip separators */
867 		while (*s == sep)
868 			s++;
869 		if (!*s)
870 			break;
871 		p = s++;
872 		/* next separator */
873 		while (*s && *s != sep)
874 			s++;
875 
876 		child = lookup_positive_unlocked(p, dentry, s - p);
877 		dput(dentry);
878 		dentry = child;
879 	} while (!IS_ERR(dentry));
880 	kfree(full_path);
881 	return dentry;
882 }
883 
884 static int cifs_set_super(struct super_block *sb, void *data)
885 {
886 	struct cifs_mnt_data *mnt_data = data;
887 	sb->s_fs_info = mnt_data->cifs_sb;
888 	return set_anon_super(sb, NULL);
889 }
890 
891 struct dentry *
892 cifs_smb3_do_mount(struct file_system_type *fs_type,
893 	      int flags, struct smb3_fs_context *old_ctx)
894 {
895 	struct cifs_mnt_data mnt_data;
896 	struct cifs_sb_info *cifs_sb;
897 	struct super_block *sb;
898 	struct dentry *root;
899 	int rc;
900 
901 	if (cifsFYI) {
902 		cifs_dbg(FYI, "%s: devname=%s flags=0x%x\n", __func__,
903 			 old_ctx->source, flags);
904 	} else {
905 		cifs_info("Attempting to mount %s\n", old_ctx->source);
906 	}
907 
908 	cifs_sb = kzalloc(sizeof(*cifs_sb), GFP_KERNEL);
909 	if (!cifs_sb)
910 		return ERR_PTR(-ENOMEM);
911 
912 	cifs_sb->ctx = kzalloc(sizeof(struct smb3_fs_context), GFP_KERNEL);
913 	if (!cifs_sb->ctx) {
914 		root = ERR_PTR(-ENOMEM);
915 		goto out;
916 	}
917 	rc = smb3_fs_context_dup(cifs_sb->ctx, old_ctx);
918 	if (rc) {
919 		root = ERR_PTR(rc);
920 		goto out;
921 	}
922 
923 	rc = cifs_setup_cifs_sb(cifs_sb);
924 	if (rc) {
925 		root = ERR_PTR(rc);
926 		goto out;
927 	}
928 
929 	rc = cifs_mount(cifs_sb, cifs_sb->ctx);
930 	if (rc) {
931 		if (!(flags & SB_SILENT))
932 			cifs_dbg(VFS, "cifs_mount failed w/return code = %d\n",
933 				 rc);
934 		root = ERR_PTR(rc);
935 		goto out;
936 	}
937 
938 	mnt_data.ctx = cifs_sb->ctx;
939 	mnt_data.cifs_sb = cifs_sb;
940 	mnt_data.flags = flags;
941 
942 	/* BB should we make this contingent on mount parm? */
943 	flags |= SB_NODIRATIME | SB_NOATIME;
944 
945 	sb = sget(fs_type, cifs_match_super, cifs_set_super, flags, &mnt_data);
946 	if (IS_ERR(sb)) {
947 		cifs_umount(cifs_sb);
948 		return ERR_CAST(sb);
949 	}
950 
951 	if (sb->s_root) {
952 		cifs_dbg(FYI, "Use existing superblock\n");
953 		cifs_umount(cifs_sb);
954 		cifs_sb = NULL;
955 	} else {
956 		rc = cifs_read_super(sb);
957 		if (rc) {
958 			root = ERR_PTR(rc);
959 			goto out_super;
960 		}
961 
962 		sb->s_flags |= SB_ACTIVE;
963 	}
964 
965 	root = cifs_get_root(cifs_sb ? cifs_sb->ctx : old_ctx, sb);
966 	if (IS_ERR(root))
967 		goto out_super;
968 
969 	if (cifs_sb)
970 		cifs_sb->root = dget(root);
971 
972 	cifs_dbg(FYI, "dentry root is: %p\n", root);
973 	return root;
974 
975 out_super:
976 	deactivate_locked_super(sb);
977 	return root;
978 out:
979 	kfree(cifs_sb->prepath);
980 	smb3_cleanup_fs_context(cifs_sb->ctx);
981 	kfree(cifs_sb);
982 	return root;
983 }
984 
985 
986 static ssize_t
987 cifs_loose_read_iter(struct kiocb *iocb, struct iov_iter *iter)
988 {
989 	ssize_t rc;
990 	struct inode *inode = file_inode(iocb->ki_filp);
991 
992 	if (iocb->ki_flags & IOCB_DIRECT)
993 		return cifs_user_readv(iocb, iter);
994 
995 	rc = cifs_revalidate_mapping(inode);
996 	if (rc)
997 		return rc;
998 
999 	return generic_file_read_iter(iocb, iter);
1000 }
1001 
1002 static ssize_t cifs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
1003 {
1004 	struct inode *inode = file_inode(iocb->ki_filp);
1005 	struct cifsInodeInfo *cinode = CIFS_I(inode);
1006 	ssize_t written;
1007 	int rc;
1008 
1009 	if (iocb->ki_filp->f_flags & O_DIRECT) {
1010 		written = cifs_user_writev(iocb, from);
1011 		if (written > 0 && CIFS_CACHE_READ(cinode)) {
1012 			cifs_zap_mapping(inode);
1013 			cifs_dbg(FYI,
1014 				 "Set no oplock for inode=%p after a write operation\n",
1015 				 inode);
1016 			cinode->oplock = 0;
1017 		}
1018 		return written;
1019 	}
1020 
1021 	written = cifs_get_writer(cinode);
1022 	if (written)
1023 		return written;
1024 
1025 	written = generic_file_write_iter(iocb, from);
1026 
1027 	if (CIFS_CACHE_WRITE(CIFS_I(inode)))
1028 		goto out;
1029 
1030 	rc = filemap_fdatawrite(inode->i_mapping);
1031 	if (rc)
1032 		cifs_dbg(FYI, "cifs_file_write_iter: %d rc on %p inode\n",
1033 			 rc, inode);
1034 
1035 out:
1036 	cifs_put_writer(cinode);
1037 	return written;
1038 }
1039 
1040 static loff_t cifs_llseek(struct file *file, loff_t offset, int whence)
1041 {
1042 	struct cifsFileInfo *cfile = file->private_data;
1043 	struct cifs_tcon *tcon;
1044 
1045 	/*
1046 	 * whence == SEEK_END || SEEK_DATA || SEEK_HOLE => we must revalidate
1047 	 * the cached file length
1048 	 */
1049 	if (whence != SEEK_SET && whence != SEEK_CUR) {
1050 		int rc;
1051 		struct inode *inode = file_inode(file);
1052 
1053 		/*
1054 		 * We need to be sure that all dirty pages are written and the
1055 		 * server has the newest file length.
1056 		 */
1057 		if (!CIFS_CACHE_READ(CIFS_I(inode)) && inode->i_mapping &&
1058 		    inode->i_mapping->nrpages != 0) {
1059 			rc = filemap_fdatawait(inode->i_mapping);
1060 			if (rc) {
1061 				mapping_set_error(inode->i_mapping, rc);
1062 				return rc;
1063 			}
1064 		}
1065 		/*
1066 		 * Some applications poll for the file length in this strange
1067 		 * way so we must seek to end on non-oplocked files by
1068 		 * setting the revalidate time to zero.
1069 		 */
1070 		CIFS_I(inode)->time = 0;
1071 
1072 		rc = cifs_revalidate_file_attr(file);
1073 		if (rc < 0)
1074 			return (loff_t)rc;
1075 	}
1076 	if (cfile && cfile->tlink) {
1077 		tcon = tlink_tcon(cfile->tlink);
1078 		if (tcon->ses->server->ops->llseek)
1079 			return tcon->ses->server->ops->llseek(file, tcon,
1080 							      offset, whence);
1081 	}
1082 	return generic_file_llseek(file, offset, whence);
1083 }
1084 
1085 static int
1086 cifs_setlease(struct file *file, int arg, struct file_lease **lease, void **priv)
1087 {
1088 	/*
1089 	 * Note that this is called by vfs setlease with i_lock held to
1090 	 * protect *lease from going away.
1091 	 */
1092 	struct inode *inode = file_inode(file);
1093 	struct cifsFileInfo *cfile = file->private_data;
1094 
1095 	/* Check if file is oplocked if this is request for new lease */
1096 	if (arg == F_UNLCK ||
1097 	    ((arg == F_RDLCK) && CIFS_CACHE_READ(CIFS_I(inode))) ||
1098 	    ((arg == F_WRLCK) && CIFS_CACHE_WRITE(CIFS_I(inode))))
1099 		return generic_setlease(file, arg, lease, priv);
1100 	else if (tlink_tcon(cfile->tlink)->local_lease &&
1101 		 !CIFS_CACHE_READ(CIFS_I(inode)))
1102 		/*
1103 		 * If the server claims to support oplock on this file, then we
1104 		 * still need to check oplock even if the local_lease mount
1105 		 * option is set, but there are servers which do not support
1106 		 * oplock for which this mount option may be useful if the user
1107 		 * knows that the file won't be changed on the server by anyone
1108 		 * else.
1109 		 */
1110 		return generic_setlease(file, arg, lease, priv);
1111 	else
1112 		return -EAGAIN;
1113 }
1114 
1115 struct file_system_type cifs_fs_type = {
1116 	.owner = THIS_MODULE,
1117 	.name = "cifs",
1118 	.init_fs_context = smb3_init_fs_context,
1119 	.parameters = smb3_fs_parameters,
1120 	.kill_sb = cifs_kill_sb,
1121 	.fs_flags = FS_RENAME_DOES_D_MOVE,
1122 };
1123 MODULE_ALIAS_FS("cifs");
1124 
1125 struct file_system_type smb3_fs_type = {
1126 	.owner = THIS_MODULE,
1127 	.name = "smb3",
1128 	.init_fs_context = smb3_init_fs_context,
1129 	.parameters = smb3_fs_parameters,
1130 	.kill_sb = cifs_kill_sb,
1131 	.fs_flags = FS_RENAME_DOES_D_MOVE,
1132 };
1133 MODULE_ALIAS_FS("smb3");
1134 MODULE_ALIAS("smb3");
1135 
1136 const struct inode_operations cifs_dir_inode_ops = {
1137 	.create = cifs_create,
1138 	.atomic_open = cifs_atomic_open,
1139 	.lookup = cifs_lookup,
1140 	.getattr = cifs_getattr,
1141 	.unlink = cifs_unlink,
1142 	.link = cifs_hardlink,
1143 	.mkdir = cifs_mkdir,
1144 	.rmdir = cifs_rmdir,
1145 	.rename = cifs_rename2,
1146 	.permission = cifs_permission,
1147 	.setattr = cifs_setattr,
1148 	.symlink = cifs_symlink,
1149 	.mknod   = cifs_mknod,
1150 	.listxattr = cifs_listxattr,
1151 	.get_acl = cifs_get_acl,
1152 	.set_acl = cifs_set_acl,
1153 };
1154 
1155 const struct inode_operations cifs_file_inode_ops = {
1156 	.setattr = cifs_setattr,
1157 	.getattr = cifs_getattr,
1158 	.permission = cifs_permission,
1159 	.listxattr = cifs_listxattr,
1160 	.fiemap = cifs_fiemap,
1161 	.get_acl = cifs_get_acl,
1162 	.set_acl = cifs_set_acl,
1163 };
1164 
1165 const char *cifs_get_link(struct dentry *dentry, struct inode *inode,
1166 			    struct delayed_call *done)
1167 {
1168 	char *target_path;
1169 
1170 	if (!dentry)
1171 		return ERR_PTR(-ECHILD);
1172 
1173 	target_path = kmalloc(PATH_MAX, GFP_KERNEL);
1174 	if (!target_path)
1175 		return ERR_PTR(-ENOMEM);
1176 
1177 	spin_lock(&inode->i_lock);
1178 	if (likely(CIFS_I(inode)->symlink_target)) {
1179 		strscpy(target_path, CIFS_I(inode)->symlink_target, PATH_MAX);
1180 	} else {
1181 		kfree(target_path);
1182 		target_path = ERR_PTR(-EOPNOTSUPP);
1183 	}
1184 	spin_unlock(&inode->i_lock);
1185 
1186 	if (!IS_ERR(target_path))
1187 		set_delayed_call(done, kfree_link, target_path);
1188 
1189 	return target_path;
1190 }
1191 
1192 const struct inode_operations cifs_symlink_inode_ops = {
1193 	.get_link = cifs_get_link,
1194 	.setattr = cifs_setattr,
1195 	.permission = cifs_permission,
1196 	.listxattr = cifs_listxattr,
1197 };
1198 
1199 /*
1200  * Advance the EOF marker to after the source range.
1201  */
1202 static int cifs_precopy_set_eof(struct inode *src_inode, struct cifsInodeInfo *src_cifsi,
1203 				struct cifs_tcon *src_tcon,
1204 				unsigned int xid, loff_t src_end)
1205 {
1206 	struct cifsFileInfo *writeable_srcfile;
1207 	int rc = -EINVAL;
1208 
1209 	writeable_srcfile = find_writable_file(src_cifsi, FIND_WR_FSUID_ONLY);
1210 	if (writeable_srcfile) {
1211 		if (src_tcon->ses->server->ops->set_file_size)
1212 			rc = src_tcon->ses->server->ops->set_file_size(
1213 				xid, src_tcon, writeable_srcfile,
1214 				src_inode->i_size, true /* no need to set sparse */);
1215 		else
1216 			rc = -ENOSYS;
1217 		cifsFileInfo_put(writeable_srcfile);
1218 		cifs_dbg(FYI, "SetFSize for copychunk rc = %d\n", rc);
1219 	}
1220 
1221 	if (rc < 0)
1222 		goto set_failed;
1223 
1224 	netfs_resize_file(&src_cifsi->netfs, src_end, true);
1225 	fscache_resize_cookie(cifs_inode_cookie(src_inode), src_end);
1226 	return 0;
1227 
1228 set_failed:
1229 	return filemap_write_and_wait(src_inode->i_mapping);
1230 }
1231 
1232 /*
1233  * Flush out either the folio that overlaps the beginning of a range in which
1234  * pos resides or the folio that overlaps the end of a range unless that folio
1235  * is entirely within the range we're going to invalidate.  We extend the flush
1236  * bounds to encompass the folio.
1237  */
1238 static int cifs_flush_folio(struct inode *inode, loff_t pos, loff_t *_fstart, loff_t *_fend,
1239 			    bool first)
1240 {
1241 	struct folio *folio;
1242 	unsigned long long fpos, fend;
1243 	pgoff_t index = pos / PAGE_SIZE;
1244 	size_t size;
1245 	int rc = 0;
1246 
1247 	folio = filemap_get_folio(inode->i_mapping, index);
1248 	if (IS_ERR(folio))
1249 		return 0;
1250 
1251 	size = folio_size(folio);
1252 	fpos = folio_pos(folio);
1253 	fend = fpos + size - 1;
1254 	*_fstart = min_t(unsigned long long, *_fstart, fpos);
1255 	*_fend   = max_t(unsigned long long, *_fend, fend);
1256 	if ((first && pos == fpos) || (!first && pos == fend))
1257 		goto out;
1258 
1259 	rc = filemap_write_and_wait_range(inode->i_mapping, fpos, fend);
1260 out:
1261 	folio_put(folio);
1262 	return rc;
1263 }
1264 
1265 static loff_t cifs_remap_file_range(struct file *src_file, loff_t off,
1266 		struct file *dst_file, loff_t destoff, loff_t len,
1267 		unsigned int remap_flags)
1268 {
1269 	struct inode *src_inode = file_inode(src_file);
1270 	struct inode *target_inode = file_inode(dst_file);
1271 	struct cifsInodeInfo *src_cifsi = CIFS_I(src_inode);
1272 	struct cifsInodeInfo *target_cifsi = CIFS_I(target_inode);
1273 	struct cifsFileInfo *smb_file_src = src_file->private_data;
1274 	struct cifsFileInfo *smb_file_target = dst_file->private_data;
1275 	struct cifs_tcon *target_tcon, *src_tcon;
1276 	unsigned long long destend, fstart, fend, new_size;
1277 	unsigned int xid;
1278 	int rc;
1279 
1280 	if (remap_flags & REMAP_FILE_DEDUP)
1281 		return -EOPNOTSUPP;
1282 	if (remap_flags & ~REMAP_FILE_ADVISORY)
1283 		return -EINVAL;
1284 
1285 	cifs_dbg(FYI, "clone range\n");
1286 
1287 	xid = get_xid();
1288 
1289 	if (!smb_file_src || !smb_file_target) {
1290 		rc = -EBADF;
1291 		cifs_dbg(VFS, "missing cifsFileInfo on copy range src file\n");
1292 		goto out;
1293 	}
1294 
1295 	src_tcon = tlink_tcon(smb_file_src->tlink);
1296 	target_tcon = tlink_tcon(smb_file_target->tlink);
1297 
1298 	/*
1299 	 * Note: cifs case is easier than btrfs since server responsible for
1300 	 * checks for proper open modes and file type and if it wants
1301 	 * server could even support copy of range where source = target
1302 	 */
1303 	lock_two_nondirectories(target_inode, src_inode);
1304 
1305 	if (len == 0)
1306 		len = src_inode->i_size - off;
1307 
1308 	cifs_dbg(FYI, "clone range\n");
1309 
1310 	/* Flush the source buffer */
1311 	rc = filemap_write_and_wait_range(src_inode->i_mapping, off,
1312 					  off + len - 1);
1313 	if (rc)
1314 		goto unlock;
1315 
1316 	/* The server-side copy will fail if the source crosses the EOF marker.
1317 	 * Advance the EOF marker after the flush above to the end of the range
1318 	 * if it's short of that.
1319 	 */
1320 	if (src_cifsi->netfs.remote_i_size < off + len) {
1321 		rc = cifs_precopy_set_eof(src_inode, src_cifsi, src_tcon, xid, off + len);
1322 		if (rc < 0)
1323 			goto unlock;
1324 	}
1325 
1326 	new_size = destoff + len;
1327 	destend = destoff + len - 1;
1328 
1329 	/* Flush the folios at either end of the destination range to prevent
1330 	 * accidental loss of dirty data outside of the range.
1331 	 */
1332 	fstart = destoff;
1333 	fend = destend;
1334 
1335 	rc = cifs_flush_folio(target_inode, destoff, &fstart, &fend, true);
1336 	if (rc)
1337 		goto unlock;
1338 	rc = cifs_flush_folio(target_inode, destend, &fstart, &fend, false);
1339 	if (rc)
1340 		goto unlock;
1341 
1342 	/* Discard all the folios that overlap the destination region. */
1343 	cifs_dbg(FYI, "about to discard pages %llx-%llx\n", fstart, fend);
1344 	truncate_inode_pages_range(&target_inode->i_data, fstart, fend);
1345 
1346 	fscache_invalidate(cifs_inode_cookie(target_inode), NULL,
1347 			   i_size_read(target_inode), 0);
1348 
1349 	rc = -EOPNOTSUPP;
1350 	if (target_tcon->ses->server->ops->duplicate_extents) {
1351 		rc = target_tcon->ses->server->ops->duplicate_extents(xid,
1352 			smb_file_src, smb_file_target, off, len, destoff);
1353 		if (rc == 0 && new_size > i_size_read(target_inode)) {
1354 			truncate_setsize(target_inode, new_size);
1355 			netfs_resize_file(&target_cifsi->netfs, new_size, true);
1356 			fscache_resize_cookie(cifs_inode_cookie(target_inode),
1357 					      new_size);
1358 		}
1359 	}
1360 
1361 	/* force revalidate of size and timestamps of target file now
1362 	   that target is updated on the server */
1363 	CIFS_I(target_inode)->time = 0;
1364 unlock:
1365 	/* although unlocking in the reverse order from locking is not
1366 	   strictly necessary here it is a little cleaner to be consistent */
1367 	unlock_two_nondirectories(src_inode, target_inode);
1368 out:
1369 	free_xid(xid);
1370 	return rc < 0 ? rc : len;
1371 }
1372 
1373 ssize_t cifs_file_copychunk_range(unsigned int xid,
1374 				struct file *src_file, loff_t off,
1375 				struct file *dst_file, loff_t destoff,
1376 				size_t len, unsigned int flags)
1377 {
1378 	struct inode *src_inode = file_inode(src_file);
1379 	struct inode *target_inode = file_inode(dst_file);
1380 	struct cifsInodeInfo *src_cifsi = CIFS_I(src_inode);
1381 	struct cifsInodeInfo *target_cifsi = CIFS_I(target_inode);
1382 	struct cifsFileInfo *smb_file_src;
1383 	struct cifsFileInfo *smb_file_target;
1384 	struct cifs_tcon *src_tcon;
1385 	struct cifs_tcon *target_tcon;
1386 	unsigned long long destend, fstart, fend;
1387 	ssize_t rc;
1388 
1389 	cifs_dbg(FYI, "copychunk range\n");
1390 
1391 	if (!src_file->private_data || !dst_file->private_data) {
1392 		rc = -EBADF;
1393 		cifs_dbg(VFS, "missing cifsFileInfo on copy range src file\n");
1394 		goto out;
1395 	}
1396 
1397 	rc = -EXDEV;
1398 	smb_file_target = dst_file->private_data;
1399 	smb_file_src = src_file->private_data;
1400 	src_tcon = tlink_tcon(smb_file_src->tlink);
1401 	target_tcon = tlink_tcon(smb_file_target->tlink);
1402 
1403 	if (src_tcon->ses != target_tcon->ses) {
1404 		cifs_dbg(VFS, "source and target of copy not on same server\n");
1405 		goto out;
1406 	}
1407 
1408 	rc = -EOPNOTSUPP;
1409 	if (!target_tcon->ses->server->ops->copychunk_range)
1410 		goto out;
1411 
1412 	/*
1413 	 * Note: cifs case is easier than btrfs since server responsible for
1414 	 * checks for proper open modes and file type and if it wants
1415 	 * server could even support copy of range where source = target
1416 	 */
1417 	lock_two_nondirectories(target_inode, src_inode);
1418 
1419 	cifs_dbg(FYI, "about to flush pages\n");
1420 
1421 	rc = filemap_write_and_wait_range(src_inode->i_mapping, off,
1422 					  off + len - 1);
1423 	if (rc)
1424 		goto unlock;
1425 
1426 	/* The server-side copy will fail if the source crosses the EOF marker.
1427 	 * Advance the EOF marker after the flush above to the end of the range
1428 	 * if it's short of that.
1429 	 */
1430 	if (src_cifsi->netfs.remote_i_size < off + len) {
1431 		rc = cifs_precopy_set_eof(src_inode, src_cifsi, src_tcon, xid, off + len);
1432 		if (rc < 0)
1433 			goto unlock;
1434 	}
1435 
1436 	destend = destoff + len - 1;
1437 
1438 	/* Flush the folios at either end of the destination range to prevent
1439 	 * accidental loss of dirty data outside of the range.
1440 	 */
1441 	fstart = destoff;
1442 	fend = destend;
1443 
1444 	rc = cifs_flush_folio(target_inode, destoff, &fstart, &fend, true);
1445 	if (rc)
1446 		goto unlock;
1447 	rc = cifs_flush_folio(target_inode, destend, &fstart, &fend, false);
1448 	if (rc)
1449 		goto unlock;
1450 
1451 	/* Discard all the folios that overlap the destination region. */
1452 	truncate_inode_pages_range(&target_inode->i_data, fstart, fend);
1453 
1454 	fscache_invalidate(cifs_inode_cookie(target_inode), NULL,
1455 			   i_size_read(target_inode), 0);
1456 
1457 	rc = file_modified(dst_file);
1458 	if (!rc) {
1459 		rc = target_tcon->ses->server->ops->copychunk_range(xid,
1460 			smb_file_src, smb_file_target, off, len, destoff);
1461 		if (rc > 0 && destoff + rc > i_size_read(target_inode)) {
1462 			truncate_setsize(target_inode, destoff + rc);
1463 			netfs_resize_file(&target_cifsi->netfs,
1464 					  i_size_read(target_inode), true);
1465 			fscache_resize_cookie(cifs_inode_cookie(target_inode),
1466 					      i_size_read(target_inode));
1467 		}
1468 		if (rc > 0 && destoff + rc > target_cifsi->netfs.zero_point)
1469 			target_cifsi->netfs.zero_point = destoff + rc;
1470 	}
1471 
1472 	file_accessed(src_file);
1473 
1474 	/* force revalidate of size and timestamps of target file now
1475 	 * that target is updated on the server
1476 	 */
1477 	CIFS_I(target_inode)->time = 0;
1478 
1479 unlock:
1480 	/* although unlocking in the reverse order from locking is not
1481 	 * strictly necessary here it is a little cleaner to be consistent
1482 	 */
1483 	unlock_two_nondirectories(src_inode, target_inode);
1484 
1485 out:
1486 	return rc;
1487 }
1488 
1489 /*
1490  * Directory operations under CIFS/SMB2/SMB3 are synchronous, so fsync()
1491  * is a dummy operation.
1492  */
1493 static int cifs_dir_fsync(struct file *file, loff_t start, loff_t end, int datasync)
1494 {
1495 	cifs_dbg(FYI, "Sync directory - name: %pD datasync: 0x%x\n",
1496 		 file, datasync);
1497 
1498 	return 0;
1499 }
1500 
1501 static ssize_t cifs_copy_file_range(struct file *src_file, loff_t off,
1502 				struct file *dst_file, loff_t destoff,
1503 				size_t len, unsigned int flags)
1504 {
1505 	unsigned int xid = get_xid();
1506 	ssize_t rc;
1507 	struct cifsFileInfo *cfile = dst_file->private_data;
1508 
1509 	if (cfile->swapfile) {
1510 		rc = -EOPNOTSUPP;
1511 		free_xid(xid);
1512 		return rc;
1513 	}
1514 
1515 	rc = cifs_file_copychunk_range(xid, src_file, off, dst_file, destoff,
1516 					len, flags);
1517 	free_xid(xid);
1518 
1519 	if (rc == -EOPNOTSUPP || rc == -EXDEV)
1520 		rc = splice_copy_file_range(src_file, off, dst_file,
1521 					    destoff, len);
1522 	return rc;
1523 }
1524 
1525 const struct file_operations cifs_file_ops = {
1526 	.read_iter = cifs_loose_read_iter,
1527 	.write_iter = cifs_file_write_iter,
1528 	.open = cifs_open,
1529 	.release = cifs_close,
1530 	.lock = cifs_lock,
1531 	.flock = cifs_flock,
1532 	.fsync = cifs_fsync,
1533 	.flush = cifs_flush,
1534 	.mmap  = cifs_file_mmap,
1535 	.splice_read = filemap_splice_read,
1536 	.splice_write = iter_file_splice_write,
1537 	.llseek = cifs_llseek,
1538 	.unlocked_ioctl	= cifs_ioctl,
1539 	.copy_file_range = cifs_copy_file_range,
1540 	.remap_file_range = cifs_remap_file_range,
1541 	.setlease = cifs_setlease,
1542 	.fallocate = cifs_fallocate,
1543 };
1544 
1545 const struct file_operations cifs_file_strict_ops = {
1546 	.read_iter = cifs_strict_readv,
1547 	.write_iter = cifs_strict_writev,
1548 	.open = cifs_open,
1549 	.release = cifs_close,
1550 	.lock = cifs_lock,
1551 	.flock = cifs_flock,
1552 	.fsync = cifs_strict_fsync,
1553 	.flush = cifs_flush,
1554 	.mmap = cifs_file_strict_mmap,
1555 	.splice_read = filemap_splice_read,
1556 	.splice_write = iter_file_splice_write,
1557 	.llseek = cifs_llseek,
1558 	.unlocked_ioctl	= cifs_ioctl,
1559 	.copy_file_range = cifs_copy_file_range,
1560 	.remap_file_range = cifs_remap_file_range,
1561 	.setlease = cifs_setlease,
1562 	.fallocate = cifs_fallocate,
1563 };
1564 
1565 const struct file_operations cifs_file_direct_ops = {
1566 	.read_iter = cifs_direct_readv,
1567 	.write_iter = cifs_direct_writev,
1568 	.open = cifs_open,
1569 	.release = cifs_close,
1570 	.lock = cifs_lock,
1571 	.flock = cifs_flock,
1572 	.fsync = cifs_fsync,
1573 	.flush = cifs_flush,
1574 	.mmap = cifs_file_mmap,
1575 	.splice_read = copy_splice_read,
1576 	.splice_write = iter_file_splice_write,
1577 	.unlocked_ioctl  = cifs_ioctl,
1578 	.copy_file_range = cifs_copy_file_range,
1579 	.remap_file_range = cifs_remap_file_range,
1580 	.llseek = cifs_llseek,
1581 	.setlease = cifs_setlease,
1582 	.fallocate = cifs_fallocate,
1583 };
1584 
1585 const struct file_operations cifs_file_nobrl_ops = {
1586 	.read_iter = cifs_loose_read_iter,
1587 	.write_iter = cifs_file_write_iter,
1588 	.open = cifs_open,
1589 	.release = cifs_close,
1590 	.fsync = cifs_fsync,
1591 	.flush = cifs_flush,
1592 	.mmap  = cifs_file_mmap,
1593 	.splice_read = filemap_splice_read,
1594 	.splice_write = iter_file_splice_write,
1595 	.llseek = cifs_llseek,
1596 	.unlocked_ioctl	= cifs_ioctl,
1597 	.copy_file_range = cifs_copy_file_range,
1598 	.remap_file_range = cifs_remap_file_range,
1599 	.setlease = cifs_setlease,
1600 	.fallocate = cifs_fallocate,
1601 };
1602 
1603 const struct file_operations cifs_file_strict_nobrl_ops = {
1604 	.read_iter = cifs_strict_readv,
1605 	.write_iter = cifs_strict_writev,
1606 	.open = cifs_open,
1607 	.release = cifs_close,
1608 	.fsync = cifs_strict_fsync,
1609 	.flush = cifs_flush,
1610 	.mmap = cifs_file_strict_mmap,
1611 	.splice_read = filemap_splice_read,
1612 	.splice_write = iter_file_splice_write,
1613 	.llseek = cifs_llseek,
1614 	.unlocked_ioctl	= cifs_ioctl,
1615 	.copy_file_range = cifs_copy_file_range,
1616 	.remap_file_range = cifs_remap_file_range,
1617 	.setlease = cifs_setlease,
1618 	.fallocate = cifs_fallocate,
1619 };
1620 
1621 const struct file_operations cifs_file_direct_nobrl_ops = {
1622 	.read_iter = cifs_direct_readv,
1623 	.write_iter = cifs_direct_writev,
1624 	.open = cifs_open,
1625 	.release = cifs_close,
1626 	.fsync = cifs_fsync,
1627 	.flush = cifs_flush,
1628 	.mmap = cifs_file_mmap,
1629 	.splice_read = copy_splice_read,
1630 	.splice_write = iter_file_splice_write,
1631 	.unlocked_ioctl  = cifs_ioctl,
1632 	.copy_file_range = cifs_copy_file_range,
1633 	.remap_file_range = cifs_remap_file_range,
1634 	.llseek = cifs_llseek,
1635 	.setlease = cifs_setlease,
1636 	.fallocate = cifs_fallocate,
1637 };
1638 
1639 const struct file_operations cifs_dir_ops = {
1640 	.iterate_shared = cifs_readdir,
1641 	.release = cifs_closedir,
1642 	.read    = generic_read_dir,
1643 	.unlocked_ioctl  = cifs_ioctl,
1644 	.copy_file_range = cifs_copy_file_range,
1645 	.remap_file_range = cifs_remap_file_range,
1646 	.llseek = generic_file_llseek,
1647 	.fsync = cifs_dir_fsync,
1648 };
1649 
1650 static void
1651 cifs_init_once(void *inode)
1652 {
1653 	struct cifsInodeInfo *cifsi = inode;
1654 
1655 	inode_init_once(&cifsi->netfs.inode);
1656 	init_rwsem(&cifsi->lock_sem);
1657 }
1658 
1659 static int __init
1660 cifs_init_inodecache(void)
1661 {
1662 	cifs_inode_cachep = kmem_cache_create("cifs_inode_cache",
1663 					      sizeof(struct cifsInodeInfo),
1664 					      0, (SLAB_RECLAIM_ACCOUNT|
1665 						SLAB_ACCOUNT),
1666 					      cifs_init_once);
1667 	if (cifs_inode_cachep == NULL)
1668 		return -ENOMEM;
1669 
1670 	return 0;
1671 }
1672 
1673 static void
1674 cifs_destroy_inodecache(void)
1675 {
1676 	/*
1677 	 * Make sure all delayed rcu free inodes are flushed before we
1678 	 * destroy cache.
1679 	 */
1680 	rcu_barrier();
1681 	kmem_cache_destroy(cifs_inode_cachep);
1682 }
1683 
1684 static int
1685 cifs_init_request_bufs(void)
1686 {
1687 	/*
1688 	 * SMB2 maximum header size is bigger than CIFS one - no problems to
1689 	 * allocate some more bytes for CIFS.
1690 	 */
1691 	size_t max_hdr_size = MAX_SMB2_HDR_SIZE;
1692 
1693 	if (CIFSMaxBufSize < 8192) {
1694 	/* Buffer size can not be smaller than 2 * PATH_MAX since maximum
1695 	Unicode path name has to fit in any SMB/CIFS path based frames */
1696 		CIFSMaxBufSize = 8192;
1697 	} else if (CIFSMaxBufSize > 1024*127) {
1698 		CIFSMaxBufSize = 1024 * 127;
1699 	} else {
1700 		CIFSMaxBufSize &= 0x1FE00; /* Round size to even 512 byte mult*/
1701 	}
1702 /*
1703 	cifs_dbg(VFS, "CIFSMaxBufSize %d 0x%x\n",
1704 		 CIFSMaxBufSize, CIFSMaxBufSize);
1705 */
1706 	cifs_req_cachep = kmem_cache_create_usercopy("cifs_request",
1707 					    CIFSMaxBufSize + max_hdr_size, 0,
1708 					    SLAB_HWCACHE_ALIGN, 0,
1709 					    CIFSMaxBufSize + max_hdr_size,
1710 					    NULL);
1711 	if (cifs_req_cachep == NULL)
1712 		return -ENOMEM;
1713 
1714 	if (cifs_min_rcv < 1)
1715 		cifs_min_rcv = 1;
1716 	else if (cifs_min_rcv > 64) {
1717 		cifs_min_rcv = 64;
1718 		cifs_dbg(VFS, "cifs_min_rcv set to maximum (64)\n");
1719 	}
1720 
1721 	cifs_req_poolp = mempool_create_slab_pool(cifs_min_rcv,
1722 						  cifs_req_cachep);
1723 
1724 	if (cifs_req_poolp == NULL) {
1725 		kmem_cache_destroy(cifs_req_cachep);
1726 		return -ENOMEM;
1727 	}
1728 	/* MAX_CIFS_SMALL_BUFFER_SIZE bytes is enough for most SMB responses and
1729 	almost all handle based requests (but not write response, nor is it
1730 	sufficient for path based requests).  A smaller size would have
1731 	been more efficient (compacting multiple slab items on one 4k page)
1732 	for the case in which debug was on, but this larger size allows
1733 	more SMBs to use small buffer alloc and is still much more
1734 	efficient to alloc 1 per page off the slab compared to 17K (5page)
1735 	alloc of large cifs buffers even when page debugging is on */
1736 	cifs_sm_req_cachep = kmem_cache_create_usercopy("cifs_small_rq",
1737 			MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN,
1738 			0, MAX_CIFS_SMALL_BUFFER_SIZE, NULL);
1739 	if (cifs_sm_req_cachep == NULL) {
1740 		mempool_destroy(cifs_req_poolp);
1741 		kmem_cache_destroy(cifs_req_cachep);
1742 		return -ENOMEM;
1743 	}
1744 
1745 	if (cifs_min_small < 2)
1746 		cifs_min_small = 2;
1747 	else if (cifs_min_small > 256) {
1748 		cifs_min_small = 256;
1749 		cifs_dbg(FYI, "cifs_min_small set to maximum (256)\n");
1750 	}
1751 
1752 	cifs_sm_req_poolp = mempool_create_slab_pool(cifs_min_small,
1753 						     cifs_sm_req_cachep);
1754 
1755 	if (cifs_sm_req_poolp == NULL) {
1756 		mempool_destroy(cifs_req_poolp);
1757 		kmem_cache_destroy(cifs_req_cachep);
1758 		kmem_cache_destroy(cifs_sm_req_cachep);
1759 		return -ENOMEM;
1760 	}
1761 
1762 	return 0;
1763 }
1764 
1765 static void
1766 cifs_destroy_request_bufs(void)
1767 {
1768 	mempool_destroy(cifs_req_poolp);
1769 	kmem_cache_destroy(cifs_req_cachep);
1770 	mempool_destroy(cifs_sm_req_poolp);
1771 	kmem_cache_destroy(cifs_sm_req_cachep);
1772 }
1773 
1774 static int init_mids(void)
1775 {
1776 	cifs_mid_cachep = kmem_cache_create("cifs_mpx_ids",
1777 					    sizeof(struct mid_q_entry), 0,
1778 					    SLAB_HWCACHE_ALIGN, NULL);
1779 	if (cifs_mid_cachep == NULL)
1780 		return -ENOMEM;
1781 
1782 	/* 3 is a reasonable minimum number of simultaneous operations */
1783 	cifs_mid_poolp = mempool_create_slab_pool(3, cifs_mid_cachep);
1784 	if (cifs_mid_poolp == NULL) {
1785 		kmem_cache_destroy(cifs_mid_cachep);
1786 		return -ENOMEM;
1787 	}
1788 
1789 	return 0;
1790 }
1791 
1792 static void destroy_mids(void)
1793 {
1794 	mempool_destroy(cifs_mid_poolp);
1795 	kmem_cache_destroy(cifs_mid_cachep);
1796 }
1797 
1798 static int __init
1799 init_cifs(void)
1800 {
1801 	int rc = 0;
1802 	cifs_proc_init();
1803 	INIT_LIST_HEAD(&cifs_tcp_ses_list);
1804 /*
1805  *  Initialize Global counters
1806  */
1807 	atomic_set(&sesInfoAllocCount, 0);
1808 	atomic_set(&tconInfoAllocCount, 0);
1809 	atomic_set(&tcpSesNextId, 0);
1810 	atomic_set(&tcpSesAllocCount, 0);
1811 	atomic_set(&tcpSesReconnectCount, 0);
1812 	atomic_set(&tconInfoReconnectCount, 0);
1813 
1814 	atomic_set(&buf_alloc_count, 0);
1815 	atomic_set(&small_buf_alloc_count, 0);
1816 #ifdef CONFIG_CIFS_STATS2
1817 	atomic_set(&total_buf_alloc_count, 0);
1818 	atomic_set(&total_small_buf_alloc_count, 0);
1819 	if (slow_rsp_threshold < 1)
1820 		cifs_dbg(FYI, "slow_response_threshold msgs disabled\n");
1821 	else if (slow_rsp_threshold > 32767)
1822 		cifs_dbg(VFS,
1823 		       "slow response threshold set higher than recommended (0 to 32767)\n");
1824 #endif /* CONFIG_CIFS_STATS2 */
1825 
1826 	atomic_set(&mid_count, 0);
1827 	GlobalCurrentXid = 0;
1828 	GlobalTotalActiveXid = 0;
1829 	GlobalMaxActiveXid = 0;
1830 	spin_lock_init(&cifs_tcp_ses_lock);
1831 	spin_lock_init(&GlobalMid_Lock);
1832 
1833 	cifs_lock_secret = get_random_u32();
1834 
1835 	if (cifs_max_pending < 2) {
1836 		cifs_max_pending = 2;
1837 		cifs_dbg(FYI, "cifs_max_pending set to min of 2\n");
1838 	} else if (cifs_max_pending > CIFS_MAX_REQ) {
1839 		cifs_max_pending = CIFS_MAX_REQ;
1840 		cifs_dbg(FYI, "cifs_max_pending set to max of %u\n",
1841 			 CIFS_MAX_REQ);
1842 	}
1843 
1844 	/* Limit max to about 18 hours, and setting to zero disables directory entry caching */
1845 	if (dir_cache_timeout > 65000) {
1846 		dir_cache_timeout = 65000;
1847 		cifs_dbg(VFS, "dir_cache_timeout set to max of 65000 seconds\n");
1848 	}
1849 
1850 	cifsiod_wq = alloc_workqueue("cifsiod", WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
1851 	if (!cifsiod_wq) {
1852 		rc = -ENOMEM;
1853 		goto out_clean_proc;
1854 	}
1855 
1856 	/*
1857 	 * Consider in future setting limit!=0 maybe to min(num_of_cores - 1, 3)
1858 	 * so that we don't launch too many worker threads but
1859 	 * Documentation/core-api/workqueue.rst recommends setting it to 0
1860 	 */
1861 
1862 	/* WQ_UNBOUND allows decrypt tasks to run on any CPU */
1863 	decrypt_wq = alloc_workqueue("smb3decryptd",
1864 				     WQ_UNBOUND|WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
1865 	if (!decrypt_wq) {
1866 		rc = -ENOMEM;
1867 		goto out_destroy_cifsiod_wq;
1868 	}
1869 
1870 	fileinfo_put_wq = alloc_workqueue("cifsfileinfoput",
1871 				     WQ_UNBOUND|WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
1872 	if (!fileinfo_put_wq) {
1873 		rc = -ENOMEM;
1874 		goto out_destroy_decrypt_wq;
1875 	}
1876 
1877 	cifsoplockd_wq = alloc_workqueue("cifsoplockd",
1878 					 WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
1879 	if (!cifsoplockd_wq) {
1880 		rc = -ENOMEM;
1881 		goto out_destroy_fileinfo_put_wq;
1882 	}
1883 
1884 	deferredclose_wq = alloc_workqueue("deferredclose",
1885 					   WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
1886 	if (!deferredclose_wq) {
1887 		rc = -ENOMEM;
1888 		goto out_destroy_cifsoplockd_wq;
1889 	}
1890 
1891 	rc = cifs_init_inodecache();
1892 	if (rc)
1893 		goto out_destroy_deferredclose_wq;
1894 
1895 	rc = init_mids();
1896 	if (rc)
1897 		goto out_destroy_inodecache;
1898 
1899 	rc = cifs_init_request_bufs();
1900 	if (rc)
1901 		goto out_destroy_mids;
1902 
1903 #ifdef CONFIG_CIFS_DFS_UPCALL
1904 	rc = dfs_cache_init();
1905 	if (rc)
1906 		goto out_destroy_request_bufs;
1907 #endif /* CONFIG_CIFS_DFS_UPCALL */
1908 #ifdef CONFIG_CIFS_UPCALL
1909 	rc = init_cifs_spnego();
1910 	if (rc)
1911 		goto out_destroy_dfs_cache;
1912 #endif /* CONFIG_CIFS_UPCALL */
1913 #ifdef CONFIG_CIFS_SWN_UPCALL
1914 	rc = cifs_genl_init();
1915 	if (rc)
1916 		goto out_register_key_type;
1917 #endif /* CONFIG_CIFS_SWN_UPCALL */
1918 
1919 	rc = init_cifs_idmap();
1920 	if (rc)
1921 		goto out_cifs_swn_init;
1922 
1923 	rc = register_filesystem(&cifs_fs_type);
1924 	if (rc)
1925 		goto out_init_cifs_idmap;
1926 
1927 	rc = register_filesystem(&smb3_fs_type);
1928 	if (rc) {
1929 		unregister_filesystem(&cifs_fs_type);
1930 		goto out_init_cifs_idmap;
1931 	}
1932 
1933 	return 0;
1934 
1935 out_init_cifs_idmap:
1936 	exit_cifs_idmap();
1937 out_cifs_swn_init:
1938 #ifdef CONFIG_CIFS_SWN_UPCALL
1939 	cifs_genl_exit();
1940 out_register_key_type:
1941 #endif
1942 #ifdef CONFIG_CIFS_UPCALL
1943 	exit_cifs_spnego();
1944 out_destroy_dfs_cache:
1945 #endif
1946 #ifdef CONFIG_CIFS_DFS_UPCALL
1947 	dfs_cache_destroy();
1948 out_destroy_request_bufs:
1949 #endif
1950 	cifs_destroy_request_bufs();
1951 out_destroy_mids:
1952 	destroy_mids();
1953 out_destroy_inodecache:
1954 	cifs_destroy_inodecache();
1955 out_destroy_deferredclose_wq:
1956 	destroy_workqueue(deferredclose_wq);
1957 out_destroy_cifsoplockd_wq:
1958 	destroy_workqueue(cifsoplockd_wq);
1959 out_destroy_fileinfo_put_wq:
1960 	destroy_workqueue(fileinfo_put_wq);
1961 out_destroy_decrypt_wq:
1962 	destroy_workqueue(decrypt_wq);
1963 out_destroy_cifsiod_wq:
1964 	destroy_workqueue(cifsiod_wq);
1965 out_clean_proc:
1966 	cifs_proc_clean();
1967 	return rc;
1968 }
1969 
1970 static void __exit
1971 exit_cifs(void)
1972 {
1973 	cifs_dbg(NOISY, "exit_smb3\n");
1974 	unregister_filesystem(&cifs_fs_type);
1975 	unregister_filesystem(&smb3_fs_type);
1976 	cifs_release_automount_timer();
1977 	exit_cifs_idmap();
1978 #ifdef CONFIG_CIFS_SWN_UPCALL
1979 	cifs_genl_exit();
1980 #endif
1981 #ifdef CONFIG_CIFS_UPCALL
1982 	exit_cifs_spnego();
1983 #endif
1984 #ifdef CONFIG_CIFS_DFS_UPCALL
1985 	dfs_cache_destroy();
1986 #endif
1987 	cifs_destroy_request_bufs();
1988 	destroy_mids();
1989 	cifs_destroy_inodecache();
1990 	destroy_workqueue(deferredclose_wq);
1991 	destroy_workqueue(cifsoplockd_wq);
1992 	destroy_workqueue(decrypt_wq);
1993 	destroy_workqueue(fileinfo_put_wq);
1994 	destroy_workqueue(cifsiod_wq);
1995 	cifs_proc_clean();
1996 }
1997 
1998 MODULE_AUTHOR("Steve French");
1999 MODULE_LICENSE("GPL");	/* combination of LGPL + GPL source behaves as GPL */
2000 MODULE_DESCRIPTION
2001 	("VFS to access SMB3 servers e.g. Samba, Macs, Azure and Windows (and "
2002 	"also older servers complying with the SNIA CIFS Specification)");
2003 MODULE_VERSION(CIFS_VERSION);
2004 MODULE_SOFTDEP("ecb");
2005 MODULE_SOFTDEP("hmac");
2006 MODULE_SOFTDEP("md5");
2007 MODULE_SOFTDEP("nls");
2008 MODULE_SOFTDEP("aes");
2009 MODULE_SOFTDEP("cmac");
2010 MODULE_SOFTDEP("sha256");
2011 MODULE_SOFTDEP("sha512");
2012 MODULE_SOFTDEP("aead2");
2013 MODULE_SOFTDEP("ccm");
2014 MODULE_SOFTDEP("gcm");
2015 module_init(init_cifs)
2016 module_exit(exit_cifs)
2017