xref: /linux/fs/smb/client/cifsfs.c (revision 4f372263ef92ed2af55a8c226750b72021ff8d0f)
1 // SPDX-License-Identifier: LGPL-2.1
2 /*
3  *
4  *   Copyright (C) International Business Machines  Corp., 2002,2008
5  *   Author(s): Steve French (sfrench@us.ibm.com)
6  *
7  *   Common Internet FileSystem (CIFS) client
8  *
9  */
10 
11 /* Note that BB means BUGBUG (ie something to fix eventually) */
12 
13 #include <linux/module.h>
14 #include <linux/fs.h>
15 #include <linux/filelock.h>
16 #include <linux/mount.h>
17 #include <linux/slab.h>
18 #include <linux/init.h>
19 #include <linux/list.h>
20 #include <linux/seq_file.h>
21 #include <linux/vfs.h>
22 #include <linux/mempool.h>
23 #include <linux/delay.h>
24 #include <linux/kthread.h>
25 #include <linux/freezer.h>
26 #include <linux/namei.h>
27 #include <linux/random.h>
28 #include <linux/splice.h>
29 #include <linux/uuid.h>
30 #include <linux/xattr.h>
31 #include <uapi/linux/magic.h>
32 #include <net/ipv6.h>
33 #include "cifsfs.h"
34 #include "cifspdu.h"
35 #define DECLARE_GLOBALS_HERE
36 #include "cifsglob.h"
37 #include "cifsproto.h"
38 #include "cifs_debug.h"
39 #include "cifs_fs_sb.h"
40 #include <linux/mm.h>
41 #include <linux/key-type.h>
42 #include "cifs_spnego.h"
43 #include "fscache.h"
44 #ifdef CONFIG_CIFS_DFS_UPCALL
45 #include "dfs_cache.h"
46 #endif
47 #ifdef CONFIG_CIFS_SWN_UPCALL
48 #include "netlink.h"
49 #endif
50 #include "fs_context.h"
51 #include "cached_dir.h"
52 
53 /*
54  * DOS dates from 1980/1/1 through 2107/12/31
55  * Protocol specifications indicate the range should be to 119, which
56  * limits maximum year to 2099. But this range has not been checked.
57  */
58 #define SMB_DATE_MAX (127<<9 | 12<<5 | 31)
59 #define SMB_DATE_MIN (0<<9 | 1<<5 | 1)
60 #define SMB_TIME_MAX (23<<11 | 59<<5 | 29)
61 
62 int cifsFYI = 0;
63 bool traceSMB;
64 bool enable_oplocks = true;
65 bool linuxExtEnabled = true;
66 bool lookupCacheEnabled = true;
67 bool disable_legacy_dialects; /* false by default */
68 bool enable_gcm_256 = true;
69 bool require_gcm_256; /* false by default */
70 bool enable_negotiate_signing; /* false by default */
71 unsigned int global_secflags = CIFSSEC_DEF;
72 /* unsigned int ntlmv2_support = 0; */
73 unsigned int sign_CIFS_PDUs = 1;
74 
75 /*
76  * Global transaction id (XID) information
77  */
78 unsigned int GlobalCurrentXid;	/* protected by GlobalMid_Lock */
79 unsigned int GlobalTotalActiveXid; /* prot by GlobalMid_Lock */
80 unsigned int GlobalMaxActiveXid;	/* prot by GlobalMid_Lock */
81 spinlock_t GlobalMid_Lock; /* protects above & list operations on midQ entries */
82 
83 /*
84  *  Global counters, updated atomically
85  */
86 atomic_t sesInfoAllocCount;
87 atomic_t tconInfoAllocCount;
88 atomic_t tcpSesNextId;
89 atomic_t tcpSesAllocCount;
90 atomic_t tcpSesReconnectCount;
91 atomic_t tconInfoReconnectCount;
92 
93 atomic_t mid_count;
94 atomic_t buf_alloc_count;
95 atomic_t small_buf_alloc_count;
96 #ifdef CONFIG_CIFS_STATS2
97 atomic_t total_buf_alloc_count;
98 atomic_t total_small_buf_alloc_count;
99 #endif/* STATS2 */
100 struct list_head	cifs_tcp_ses_list;
101 spinlock_t		cifs_tcp_ses_lock;
102 static const struct super_operations cifs_super_ops;
103 unsigned int CIFSMaxBufSize = CIFS_MAX_MSGSIZE;
104 module_param(CIFSMaxBufSize, uint, 0444);
105 MODULE_PARM_DESC(CIFSMaxBufSize, "Network buffer size (not including header) "
106 				 "for CIFS requests. "
107 				 "Default: 16384 Range: 8192 to 130048");
108 unsigned int cifs_min_rcv = CIFS_MIN_RCV_POOL;
109 module_param(cifs_min_rcv, uint, 0444);
110 MODULE_PARM_DESC(cifs_min_rcv, "Network buffers in pool. Default: 4 Range: "
111 				"1 to 64");
112 unsigned int cifs_min_small = 30;
113 module_param(cifs_min_small, uint, 0444);
114 MODULE_PARM_DESC(cifs_min_small, "Small network buffers in pool. Default: 30 "
115 				 "Range: 2 to 256");
116 unsigned int cifs_max_pending = CIFS_MAX_REQ;
117 module_param(cifs_max_pending, uint, 0444);
118 MODULE_PARM_DESC(cifs_max_pending, "Simultaneous requests to server for "
119 				   "CIFS/SMB1 dialect (N/A for SMB3) "
120 				   "Default: 32767 Range: 2 to 32767.");
121 unsigned int dir_cache_timeout = 30;
122 module_param(dir_cache_timeout, uint, 0644);
123 MODULE_PARM_DESC(dir_cache_timeout, "Number of seconds to cache directory contents for which we have a lease. Default: 30 "
124 				 "Range: 1 to 65000 seconds, 0 to disable caching dir contents");
125 #ifdef CONFIG_CIFS_STATS2
126 unsigned int slow_rsp_threshold = 1;
127 module_param(slow_rsp_threshold, uint, 0644);
128 MODULE_PARM_DESC(slow_rsp_threshold, "Amount of time (in seconds) to wait "
129 				   "before logging that a response is delayed. "
130 				   "Default: 1 (if set to 0 disables msg).");
131 #endif /* STATS2 */
132 
133 module_param(enable_oplocks, bool, 0644);
134 MODULE_PARM_DESC(enable_oplocks, "Enable or disable oplocks. Default: y/Y/1");
135 
136 module_param(enable_gcm_256, bool, 0644);
137 MODULE_PARM_DESC(enable_gcm_256, "Enable requesting strongest (256 bit) GCM encryption. Default: y/Y/0");
138 
139 module_param(require_gcm_256, bool, 0644);
140 MODULE_PARM_DESC(require_gcm_256, "Require strongest (256 bit) GCM encryption. Default: n/N/0");
141 
142 module_param(enable_negotiate_signing, bool, 0644);
143 MODULE_PARM_DESC(enable_negotiate_signing, "Enable negotiating packet signing algorithm with server. Default: n/N/0");
144 
145 module_param(disable_legacy_dialects, bool, 0644);
146 MODULE_PARM_DESC(disable_legacy_dialects, "To improve security it may be "
147 				  "helpful to restrict the ability to "
148 				  "override the default dialects (SMB2.1, "
149 				  "SMB3 and SMB3.02) on mount with old "
150 				  "dialects (CIFS/SMB1 and SMB2) since "
151 				  "vers=1.0 (CIFS/SMB1) and vers=2.0 are weaker"
152 				  " and less secure. Default: n/N/0");
153 
154 struct workqueue_struct	*cifsiod_wq;
155 struct workqueue_struct	*decrypt_wq;
156 struct workqueue_struct	*fileinfo_put_wq;
157 struct workqueue_struct	*cifsoplockd_wq;
158 struct workqueue_struct	*deferredclose_wq;
159 struct workqueue_struct	*serverclose_wq;
160 struct workqueue_struct	*cfid_put_wq;
161 __u32 cifs_lock_secret;
162 
163 /*
164  * Bumps refcount for cifs super block.
165  * Note that it should be only called if a reference to VFS super block is
166  * already held, e.g. in open-type syscalls context. Otherwise it can race with
167  * atomic_dec_and_test in deactivate_locked_super.
168  */
169 void
170 cifs_sb_active(struct super_block *sb)
171 {
172 	struct cifs_sb_info *server = CIFS_SB(sb);
173 
174 	if (atomic_inc_return(&server->active) == 1)
175 		atomic_inc(&sb->s_active);
176 }
177 
178 void
179 cifs_sb_deactive(struct super_block *sb)
180 {
181 	struct cifs_sb_info *server = CIFS_SB(sb);
182 
183 	if (atomic_dec_and_test(&server->active))
184 		deactivate_super(sb);
185 }
186 
187 static int
188 cifs_read_super(struct super_block *sb)
189 {
190 	struct inode *inode;
191 	struct cifs_sb_info *cifs_sb;
192 	struct cifs_tcon *tcon;
193 	struct timespec64 ts;
194 	int rc = 0;
195 
196 	cifs_sb = CIFS_SB(sb);
197 	tcon = cifs_sb_master_tcon(cifs_sb);
198 
199 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIXACL)
200 		sb->s_flags |= SB_POSIXACL;
201 
202 	if (tcon->snapshot_time)
203 		sb->s_flags |= SB_RDONLY;
204 
205 	if (tcon->ses->capabilities & tcon->ses->server->vals->cap_large_files)
206 		sb->s_maxbytes = MAX_LFS_FILESIZE;
207 	else
208 		sb->s_maxbytes = MAX_NON_LFS;
209 
210 	/*
211 	 * Some very old servers like DOS and OS/2 used 2 second granularity
212 	 * (while all current servers use 100ns granularity - see MS-DTYP)
213 	 * but 1 second is the maximum allowed granularity for the VFS
214 	 * so for old servers set time granularity to 1 second while for
215 	 * everything else (current servers) set it to 100ns.
216 	 */
217 	if ((tcon->ses->server->vals->protocol_id == SMB10_PROT_ID) &&
218 	    ((tcon->ses->capabilities &
219 	      tcon->ses->server->vals->cap_nt_find) == 0) &&
220 	    !tcon->unix_ext) {
221 		sb->s_time_gran = 1000000000; /* 1 second is max allowed gran */
222 		ts = cnvrtDosUnixTm(cpu_to_le16(SMB_DATE_MIN), 0, 0);
223 		sb->s_time_min = ts.tv_sec;
224 		ts = cnvrtDosUnixTm(cpu_to_le16(SMB_DATE_MAX),
225 				    cpu_to_le16(SMB_TIME_MAX), 0);
226 		sb->s_time_max = ts.tv_sec;
227 	} else {
228 		/*
229 		 * Almost every server, including all SMB2+, uses DCE TIME
230 		 * ie 100 nanosecond units, since 1601.  See MS-DTYP and MS-FSCC
231 		 */
232 		sb->s_time_gran = 100;
233 		ts = cifs_NTtimeToUnix(0);
234 		sb->s_time_min = ts.tv_sec;
235 		ts = cifs_NTtimeToUnix(cpu_to_le64(S64_MAX));
236 		sb->s_time_max = ts.tv_sec;
237 	}
238 
239 	sb->s_magic = CIFS_SUPER_MAGIC;
240 	sb->s_op = &cifs_super_ops;
241 	sb->s_xattr = cifs_xattr_handlers;
242 	rc = super_setup_bdi(sb);
243 	if (rc)
244 		goto out_no_root;
245 	/* tune readahead according to rsize if readahead size not set on mount */
246 	if (cifs_sb->ctx->rsize == 0)
247 		cifs_sb->ctx->rsize =
248 			tcon->ses->server->ops->negotiate_rsize(tcon, cifs_sb->ctx);
249 	if (cifs_sb->ctx->rasize)
250 		sb->s_bdi->ra_pages = cifs_sb->ctx->rasize / PAGE_SIZE;
251 	else
252 		sb->s_bdi->ra_pages = 2 * (cifs_sb->ctx->rsize / PAGE_SIZE);
253 
254 	sb->s_blocksize = CIFS_MAX_MSGSIZE;
255 	sb->s_blocksize_bits = 14;	/* default 2**14 = CIFS_MAX_MSGSIZE */
256 	inode = cifs_root_iget(sb);
257 
258 	if (IS_ERR(inode)) {
259 		rc = PTR_ERR(inode);
260 		goto out_no_root;
261 	}
262 
263 	if (tcon->nocase)
264 		sb->s_d_op = &cifs_ci_dentry_ops;
265 	else
266 		sb->s_d_op = &cifs_dentry_ops;
267 
268 	sb->s_root = d_make_root(inode);
269 	if (!sb->s_root) {
270 		rc = -ENOMEM;
271 		goto out_no_root;
272 	}
273 
274 #ifdef CONFIG_CIFS_NFSD_EXPORT
275 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) {
276 		cifs_dbg(FYI, "export ops supported\n");
277 		sb->s_export_op = &cifs_export_ops;
278 	}
279 #endif /* CONFIG_CIFS_NFSD_EXPORT */
280 
281 	return 0;
282 
283 out_no_root:
284 	cifs_dbg(VFS, "%s: get root inode failed\n", __func__);
285 	return rc;
286 }
287 
288 static void cifs_kill_sb(struct super_block *sb)
289 {
290 	struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
291 
292 	/*
293 	 * We need to release all dentries for the cached directories
294 	 * before we kill the sb.
295 	 */
296 	if (cifs_sb->root) {
297 		close_all_cached_dirs(cifs_sb);
298 
299 		/* finally release root dentry */
300 		dput(cifs_sb->root);
301 		cifs_sb->root = NULL;
302 	}
303 
304 	kill_anon_super(sb);
305 	cifs_umount(cifs_sb);
306 }
307 
308 static int
309 cifs_statfs(struct dentry *dentry, struct kstatfs *buf)
310 {
311 	struct super_block *sb = dentry->d_sb;
312 	struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
313 	struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
314 	struct TCP_Server_Info *server = tcon->ses->server;
315 	unsigned int xid;
316 	int rc = 0;
317 	const char *full_path;
318 	void *page;
319 
320 	xid = get_xid();
321 	page = alloc_dentry_path();
322 
323 	full_path = build_path_from_dentry(dentry, page);
324 	if (IS_ERR(full_path)) {
325 		rc = PTR_ERR(full_path);
326 		goto statfs_out;
327 	}
328 
329 	if (le32_to_cpu(tcon->fsAttrInfo.MaxPathNameComponentLength) > 0)
330 		buf->f_namelen =
331 		       le32_to_cpu(tcon->fsAttrInfo.MaxPathNameComponentLength);
332 	else
333 		buf->f_namelen = PATH_MAX;
334 
335 	buf->f_fsid.val[0] = tcon->vol_serial_number;
336 	/* are using part of create time for more randomness, see man statfs */
337 	buf->f_fsid.val[1] =  (int)le64_to_cpu(tcon->vol_create_time);
338 
339 	buf->f_files = 0;	/* undefined */
340 	buf->f_ffree = 0;	/* unlimited */
341 
342 	if (server->ops->queryfs)
343 		rc = server->ops->queryfs(xid, tcon, full_path, cifs_sb, buf);
344 
345 statfs_out:
346 	free_dentry_path(page);
347 	free_xid(xid);
348 	return rc;
349 }
350 
351 static long cifs_fallocate(struct file *file, int mode, loff_t off, loff_t len)
352 {
353 	struct cifs_sb_info *cifs_sb = CIFS_FILE_SB(file);
354 	struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
355 	struct TCP_Server_Info *server = tcon->ses->server;
356 
357 	if (server->ops->fallocate)
358 		return server->ops->fallocate(file, tcon, mode, off, len);
359 
360 	return -EOPNOTSUPP;
361 }
362 
363 static int cifs_permission(struct mnt_idmap *idmap,
364 			   struct inode *inode, int mask)
365 {
366 	struct cifs_sb_info *cifs_sb;
367 
368 	cifs_sb = CIFS_SB(inode->i_sb);
369 
370 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_PERM) {
371 		if ((mask & MAY_EXEC) && !execute_ok(inode))
372 			return -EACCES;
373 		else
374 			return 0;
375 	} else /* file mode might have been restricted at mount time
376 		on the client (above and beyond ACL on servers) for
377 		servers which do not support setting and viewing mode bits,
378 		so allowing client to check permissions is useful */
379 		return generic_permission(&nop_mnt_idmap, inode, mask);
380 }
381 
382 static struct kmem_cache *cifs_inode_cachep;
383 static struct kmem_cache *cifs_req_cachep;
384 static struct kmem_cache *cifs_mid_cachep;
385 static struct kmem_cache *cifs_sm_req_cachep;
386 static struct kmem_cache *cifs_io_request_cachep;
387 static struct kmem_cache *cifs_io_subrequest_cachep;
388 mempool_t *cifs_sm_req_poolp;
389 mempool_t *cifs_req_poolp;
390 mempool_t *cifs_mid_poolp;
391 mempool_t cifs_io_request_pool;
392 mempool_t cifs_io_subrequest_pool;
393 
394 static struct inode *
395 cifs_alloc_inode(struct super_block *sb)
396 {
397 	struct cifsInodeInfo *cifs_inode;
398 	cifs_inode = alloc_inode_sb(sb, cifs_inode_cachep, GFP_KERNEL);
399 	if (!cifs_inode)
400 		return NULL;
401 	cifs_inode->cifsAttrs = ATTR_ARCHIVE;	/* default */
402 	cifs_inode->time = 0;
403 	/*
404 	 * Until the file is open and we have gotten oplock info back from the
405 	 * server, can not assume caching of file data or metadata.
406 	 */
407 	cifs_set_oplock_level(cifs_inode, 0);
408 	cifs_inode->lease_granted = false;
409 	cifs_inode->flags = 0;
410 	spin_lock_init(&cifs_inode->writers_lock);
411 	cifs_inode->writers = 0;
412 	cifs_inode->netfs.inode.i_blkbits = 14;  /* 2**14 = CIFS_MAX_MSGSIZE */
413 	cifs_inode->netfs.remote_i_size = 0;
414 	cifs_inode->uniqueid = 0;
415 	cifs_inode->createtime = 0;
416 	cifs_inode->epoch = 0;
417 	spin_lock_init(&cifs_inode->open_file_lock);
418 	generate_random_uuid(cifs_inode->lease_key);
419 	cifs_inode->symlink_target = NULL;
420 
421 	/*
422 	 * Can not set i_flags here - they get immediately overwritten to zero
423 	 * by the VFS.
424 	 */
425 	/* cifs_inode->netfs.inode.i_flags = S_NOATIME | S_NOCMTIME; */
426 	INIT_LIST_HEAD(&cifs_inode->openFileList);
427 	INIT_LIST_HEAD(&cifs_inode->llist);
428 	INIT_LIST_HEAD(&cifs_inode->deferred_closes);
429 	spin_lock_init(&cifs_inode->deferred_lock);
430 	return &cifs_inode->netfs.inode;
431 }
432 
433 static void
434 cifs_free_inode(struct inode *inode)
435 {
436 	struct cifsInodeInfo *cinode = CIFS_I(inode);
437 
438 	if (S_ISLNK(inode->i_mode))
439 		kfree(cinode->symlink_target);
440 	kmem_cache_free(cifs_inode_cachep, cinode);
441 }
442 
443 static void
444 cifs_evict_inode(struct inode *inode)
445 {
446 	netfs_wait_for_outstanding_io(inode);
447 	truncate_inode_pages_final(&inode->i_data);
448 	if (inode->i_state & I_PINNING_NETFS_WB)
449 		cifs_fscache_unuse_inode_cookie(inode, true);
450 	cifs_fscache_release_inode_cookie(inode);
451 	clear_inode(inode);
452 }
453 
454 static void
455 cifs_show_address(struct seq_file *s, struct TCP_Server_Info *server)
456 {
457 	struct sockaddr_in *sa = (struct sockaddr_in *) &server->dstaddr;
458 	struct sockaddr_in6 *sa6 = (struct sockaddr_in6 *) &server->dstaddr;
459 
460 	seq_puts(s, ",addr=");
461 
462 	switch (server->dstaddr.ss_family) {
463 	case AF_INET:
464 		seq_printf(s, "%pI4", &sa->sin_addr.s_addr);
465 		break;
466 	case AF_INET6:
467 		seq_printf(s, "%pI6", &sa6->sin6_addr.s6_addr);
468 		if (sa6->sin6_scope_id)
469 			seq_printf(s, "%%%u", sa6->sin6_scope_id);
470 		break;
471 	default:
472 		seq_puts(s, "(unknown)");
473 	}
474 	if (server->rdma)
475 		seq_puts(s, ",rdma");
476 }
477 
478 static void
479 cifs_show_security(struct seq_file *s, struct cifs_ses *ses)
480 {
481 	if (ses->sectype == Unspecified) {
482 		if (ses->user_name == NULL)
483 			seq_puts(s, ",sec=none");
484 		return;
485 	}
486 
487 	seq_puts(s, ",sec=");
488 
489 	switch (ses->sectype) {
490 	case NTLMv2:
491 		seq_puts(s, "ntlmv2");
492 		break;
493 	case Kerberos:
494 		seq_puts(s, "krb5");
495 		break;
496 	case RawNTLMSSP:
497 		seq_puts(s, "ntlmssp");
498 		break;
499 	default:
500 		/* shouldn't ever happen */
501 		seq_puts(s, "unknown");
502 		break;
503 	}
504 
505 	if (ses->sign)
506 		seq_puts(s, "i");
507 
508 	if (ses->sectype == Kerberos)
509 		seq_printf(s, ",cruid=%u",
510 			   from_kuid_munged(&init_user_ns, ses->cred_uid));
511 }
512 
513 static void
514 cifs_show_cache_flavor(struct seq_file *s, struct cifs_sb_info *cifs_sb)
515 {
516 	seq_puts(s, ",cache=");
517 
518 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO)
519 		seq_puts(s, "strict");
520 	else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO)
521 		seq_puts(s, "none");
522 	else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RW_CACHE)
523 		seq_puts(s, "singleclient"); /* assume only one client access */
524 	else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RO_CACHE)
525 		seq_puts(s, "ro"); /* read only caching assumed */
526 	else
527 		seq_puts(s, "loose");
528 }
529 
530 /*
531  * cifs_show_devname() is used so we show the mount device name with correct
532  * format (e.g. forward slashes vs. back slashes) in /proc/mounts
533  */
534 static int cifs_show_devname(struct seq_file *m, struct dentry *root)
535 {
536 	struct cifs_sb_info *cifs_sb = CIFS_SB(root->d_sb);
537 	char *devname = kstrdup(cifs_sb->ctx->source, GFP_KERNEL);
538 
539 	if (devname == NULL)
540 		seq_puts(m, "none");
541 	else {
542 		convert_delimiter(devname, '/');
543 		/* escape all spaces in share names */
544 		seq_escape(m, devname, " \t");
545 		kfree(devname);
546 	}
547 	return 0;
548 }
549 
550 static void
551 cifs_show_upcall_target(struct seq_file *s, struct cifs_sb_info *cifs_sb)
552 {
553 	if (cifs_sb->ctx->upcall_target == UPTARGET_UNSPECIFIED) {
554 		seq_puts(s, ",upcall_target=app");
555 		return;
556 	}
557 
558 	seq_puts(s, ",upcall_target=");
559 
560 	switch (cifs_sb->ctx->upcall_target) {
561 	case UPTARGET_APP:
562 		seq_puts(s, "app");
563 		break;
564 	case UPTARGET_MOUNT:
565 		seq_puts(s, "mount");
566 		break;
567 	default:
568 		/* shouldn't ever happen */
569 		seq_puts(s, "unknown");
570 		break;
571 	}
572 }
573 
574 /*
575  * cifs_show_options() is for displaying mount options in /proc/mounts.
576  * Not all settable options are displayed but most of the important
577  * ones are.
578  */
579 static int
580 cifs_show_options(struct seq_file *s, struct dentry *root)
581 {
582 	struct cifs_sb_info *cifs_sb = CIFS_SB(root->d_sb);
583 	struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
584 	struct sockaddr *srcaddr;
585 	srcaddr = (struct sockaddr *)&tcon->ses->server->srcaddr;
586 
587 	seq_show_option(s, "vers", tcon->ses->server->vals->version_string);
588 	cifs_show_security(s, tcon->ses);
589 	cifs_show_cache_flavor(s, cifs_sb);
590 	cifs_show_upcall_target(s, cifs_sb);
591 
592 	if (tcon->no_lease)
593 		seq_puts(s, ",nolease");
594 	if (cifs_sb->ctx->multiuser)
595 		seq_puts(s, ",multiuser");
596 	else if (tcon->ses->user_name)
597 		seq_show_option(s, "username", tcon->ses->user_name);
598 
599 	if (tcon->ses->domainName && tcon->ses->domainName[0] != 0)
600 		seq_show_option(s, "domain", tcon->ses->domainName);
601 
602 	if (srcaddr->sa_family != AF_UNSPEC) {
603 		struct sockaddr_in *saddr4;
604 		struct sockaddr_in6 *saddr6;
605 		saddr4 = (struct sockaddr_in *)srcaddr;
606 		saddr6 = (struct sockaddr_in6 *)srcaddr;
607 		if (srcaddr->sa_family == AF_INET6)
608 			seq_printf(s, ",srcaddr=%pI6c",
609 				   &saddr6->sin6_addr);
610 		else if (srcaddr->sa_family == AF_INET)
611 			seq_printf(s, ",srcaddr=%pI4",
612 				   &saddr4->sin_addr.s_addr);
613 		else
614 			seq_printf(s, ",srcaddr=BAD-AF:%i",
615 				   (int)(srcaddr->sa_family));
616 	}
617 
618 	seq_printf(s, ",uid=%u",
619 		   from_kuid_munged(&init_user_ns, cifs_sb->ctx->linux_uid));
620 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_OVERR_UID)
621 		seq_puts(s, ",forceuid");
622 	else
623 		seq_puts(s, ",noforceuid");
624 
625 	seq_printf(s, ",gid=%u",
626 		   from_kgid_munged(&init_user_ns, cifs_sb->ctx->linux_gid));
627 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_OVERR_GID)
628 		seq_puts(s, ",forcegid");
629 	else
630 		seq_puts(s, ",noforcegid");
631 
632 	cifs_show_address(s, tcon->ses->server);
633 
634 	if (!tcon->unix_ext)
635 		seq_printf(s, ",file_mode=0%ho,dir_mode=0%ho",
636 					   cifs_sb->ctx->file_mode,
637 					   cifs_sb->ctx->dir_mode);
638 	if (cifs_sb->ctx->iocharset)
639 		seq_printf(s, ",iocharset=%s", cifs_sb->ctx->iocharset);
640 	if (tcon->ses->unicode == 0)
641 		seq_puts(s, ",nounicode");
642 	else if (tcon->ses->unicode == 1)
643 		seq_puts(s, ",unicode");
644 	if (tcon->seal)
645 		seq_puts(s, ",seal");
646 	else if (tcon->ses->server->ignore_signature)
647 		seq_puts(s, ",signloosely");
648 	if (tcon->nocase)
649 		seq_puts(s, ",nocase");
650 	if (tcon->nodelete)
651 		seq_puts(s, ",nodelete");
652 	if (cifs_sb->ctx->no_sparse)
653 		seq_puts(s, ",nosparse");
654 	if (tcon->local_lease)
655 		seq_puts(s, ",locallease");
656 	if (tcon->retry)
657 		seq_puts(s, ",hard");
658 	else
659 		seq_puts(s, ",soft");
660 	if (tcon->use_persistent)
661 		seq_puts(s, ",persistenthandles");
662 	else if (tcon->use_resilient)
663 		seq_puts(s, ",resilienthandles");
664 	if (tcon->posix_extensions)
665 		seq_puts(s, ",posix");
666 	else if (tcon->unix_ext)
667 		seq_puts(s, ",unix");
668 	else
669 		seq_puts(s, ",nounix");
670 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_DFS)
671 		seq_puts(s, ",nodfs");
672 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIX_PATHS)
673 		seq_puts(s, ",posixpaths");
674 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SET_UID)
675 		seq_puts(s, ",setuids");
676 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UID_FROM_ACL)
677 		seq_puts(s, ",idsfromsid");
678 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM)
679 		seq_puts(s, ",serverino");
680 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
681 		seq_puts(s, ",rwpidforward");
682 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL)
683 		seq_puts(s, ",forcemand");
684 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_XATTR)
685 		seq_puts(s, ",nouser_xattr");
686 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR)
687 		seq_puts(s, ",mapchars");
688 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SFM_CHR)
689 		seq_puts(s, ",mapposix");
690 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UNX_EMUL)
691 		seq_puts(s, ",sfu");
692 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_BRL)
693 		seq_puts(s, ",nobrl");
694 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_HANDLE_CACHE)
695 		seq_puts(s, ",nohandlecache");
696 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MODE_FROM_SID)
697 		seq_puts(s, ",modefromsid");
698 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_ACL)
699 		seq_puts(s, ",cifsacl");
700 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DYNPERM)
701 		seq_puts(s, ",dynperm");
702 	if (root->d_sb->s_flags & SB_POSIXACL)
703 		seq_puts(s, ",acl");
704 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MF_SYMLINKS)
705 		seq_puts(s, ",mfsymlinks");
706 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_FSCACHE)
707 		seq_puts(s, ",fsc");
708 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC)
709 		seq_puts(s, ",nostrictsync");
710 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_PERM)
711 		seq_puts(s, ",noperm");
712 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_BACKUPUID)
713 		seq_printf(s, ",backupuid=%u",
714 			   from_kuid_munged(&init_user_ns,
715 					    cifs_sb->ctx->backupuid));
716 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_BACKUPGID)
717 		seq_printf(s, ",backupgid=%u",
718 			   from_kgid_munged(&init_user_ns,
719 					    cifs_sb->ctx->backupgid));
720 	seq_show_option(s, "reparse",
721 			cifs_reparse_type_str(cifs_sb->ctx->reparse_type));
722 	if (cifs_sb->ctx->nonativesocket)
723 		seq_puts(s, ",nonativesocket");
724 	else
725 		seq_puts(s, ",nativesocket");
726 	seq_show_option(s, "symlink",
727 			cifs_symlink_type_str(get_cifs_symlink_type(cifs_sb)));
728 
729 	seq_printf(s, ",rsize=%u", cifs_sb->ctx->rsize);
730 	seq_printf(s, ",wsize=%u", cifs_sb->ctx->wsize);
731 	seq_printf(s, ",bsize=%u", cifs_sb->ctx->bsize);
732 	if (cifs_sb->ctx->rasize)
733 		seq_printf(s, ",rasize=%u", cifs_sb->ctx->rasize);
734 	if (tcon->ses->server->min_offload)
735 		seq_printf(s, ",esize=%u", tcon->ses->server->min_offload);
736 	if (tcon->ses->server->retrans)
737 		seq_printf(s, ",retrans=%u", tcon->ses->server->retrans);
738 	seq_printf(s, ",echo_interval=%lu",
739 			tcon->ses->server->echo_interval / HZ);
740 
741 	/* Only display the following if overridden on mount */
742 	if (tcon->ses->server->max_credits != SMB2_MAX_CREDITS_AVAILABLE)
743 		seq_printf(s, ",max_credits=%u", tcon->ses->server->max_credits);
744 	if (tcon->ses->server->tcp_nodelay)
745 		seq_puts(s, ",tcpnodelay");
746 	if (tcon->ses->server->noautotune)
747 		seq_puts(s, ",noautotune");
748 	if (tcon->ses->server->noblocksnd)
749 		seq_puts(s, ",noblocksend");
750 	if (tcon->ses->server->nosharesock)
751 		seq_puts(s, ",nosharesock");
752 
753 	if (tcon->snapshot_time)
754 		seq_printf(s, ",snapshot=%llu", tcon->snapshot_time);
755 	if (tcon->handle_timeout)
756 		seq_printf(s, ",handletimeout=%u", tcon->handle_timeout);
757 	if (tcon->max_cached_dirs != MAX_CACHED_FIDS)
758 		seq_printf(s, ",max_cached_dirs=%u", tcon->max_cached_dirs);
759 
760 	/*
761 	 * Display file and directory attribute timeout in seconds.
762 	 * If file and directory attribute timeout the same then actimeo
763 	 * was likely specified on mount
764 	 */
765 	if (cifs_sb->ctx->acdirmax == cifs_sb->ctx->acregmax)
766 		seq_printf(s, ",actimeo=%lu", cifs_sb->ctx->acregmax / HZ);
767 	else {
768 		seq_printf(s, ",acdirmax=%lu", cifs_sb->ctx->acdirmax / HZ);
769 		seq_printf(s, ",acregmax=%lu", cifs_sb->ctx->acregmax / HZ);
770 	}
771 	seq_printf(s, ",closetimeo=%lu", cifs_sb->ctx->closetimeo / HZ);
772 
773 	if (tcon->ses->chan_max > 1)
774 		seq_printf(s, ",multichannel,max_channels=%zu",
775 			   tcon->ses->chan_max);
776 
777 	if (tcon->use_witness)
778 		seq_puts(s, ",witness");
779 
780 	return 0;
781 }
782 
783 static void cifs_umount_begin(struct super_block *sb)
784 {
785 	struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
786 	struct cifs_tcon *tcon;
787 
788 	if (cifs_sb == NULL)
789 		return;
790 
791 	tcon = cifs_sb_master_tcon(cifs_sb);
792 
793 	spin_lock(&cifs_tcp_ses_lock);
794 	spin_lock(&tcon->tc_lock);
795 	trace_smb3_tcon_ref(tcon->debug_id, tcon->tc_count,
796 			    netfs_trace_tcon_ref_see_umount);
797 	if ((tcon->tc_count > 1) || (tcon->status == TID_EXITING)) {
798 		/* we have other mounts to same share or we have
799 		   already tried to umount this and woken up
800 		   all waiting network requests, nothing to do */
801 		spin_unlock(&tcon->tc_lock);
802 		spin_unlock(&cifs_tcp_ses_lock);
803 		return;
804 	}
805 	/*
806 	 * can not set tcon->status to TID_EXITING yet since we don't know if umount -f will
807 	 * fail later (e.g. due to open files).  TID_EXITING will be set just before tdis req sent
808 	 */
809 	spin_unlock(&tcon->tc_lock);
810 	spin_unlock(&cifs_tcp_ses_lock);
811 
812 	cifs_close_all_deferred_files(tcon);
813 	/* cancel_brl_requests(tcon); */ /* BB mark all brl mids as exiting */
814 	/* cancel_notify_requests(tcon); */
815 	if (tcon->ses && tcon->ses->server) {
816 		cifs_dbg(FYI, "wake up tasks now - umount begin not complete\n");
817 		wake_up_all(&tcon->ses->server->request_q);
818 		wake_up_all(&tcon->ses->server->response_q);
819 		msleep(1); /* yield */
820 		/* we have to kick the requests once more */
821 		wake_up_all(&tcon->ses->server->response_q);
822 		msleep(1);
823 	}
824 
825 	return;
826 }
827 
828 static int cifs_freeze(struct super_block *sb)
829 {
830 	struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
831 	struct cifs_tcon *tcon;
832 
833 	if (cifs_sb == NULL)
834 		return 0;
835 
836 	tcon = cifs_sb_master_tcon(cifs_sb);
837 
838 	cifs_close_all_deferred_files(tcon);
839 	return 0;
840 }
841 
842 #ifdef CONFIG_CIFS_STATS2
843 static int cifs_show_stats(struct seq_file *s, struct dentry *root)
844 {
845 	/* BB FIXME */
846 	return 0;
847 }
848 #endif
849 
850 static int cifs_write_inode(struct inode *inode, struct writeback_control *wbc)
851 {
852 	return netfs_unpin_writeback(inode, wbc);
853 }
854 
855 static int cifs_drop_inode(struct inode *inode)
856 {
857 	struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
858 
859 	/* no serverino => unconditional eviction */
860 	return !(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) ||
861 		generic_drop_inode(inode);
862 }
863 
864 static const struct super_operations cifs_super_ops = {
865 	.statfs = cifs_statfs,
866 	.alloc_inode = cifs_alloc_inode,
867 	.write_inode	= cifs_write_inode,
868 	.free_inode = cifs_free_inode,
869 	.drop_inode	= cifs_drop_inode,
870 	.evict_inode	= cifs_evict_inode,
871 /*	.show_path	= cifs_show_path, */ /* Would we ever need show path? */
872 	.show_devname   = cifs_show_devname,
873 /*	.delete_inode	= cifs_delete_inode,  */  /* Do not need above
874 	function unless later we add lazy close of inodes or unless the
875 	kernel forgets to call us with the same number of releases (closes)
876 	as opens */
877 	.show_options = cifs_show_options,
878 	.umount_begin   = cifs_umount_begin,
879 	.freeze_fs      = cifs_freeze,
880 #ifdef CONFIG_CIFS_STATS2
881 	.show_stats = cifs_show_stats,
882 #endif
883 };
884 
885 /*
886  * Get root dentry from superblock according to prefix path mount option.
887  * Return dentry with refcount + 1 on success and NULL otherwise.
888  */
889 static struct dentry *
890 cifs_get_root(struct smb3_fs_context *ctx, struct super_block *sb)
891 {
892 	struct dentry *dentry;
893 	struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
894 	char *full_path = NULL;
895 	char *s, *p;
896 	char sep;
897 
898 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_USE_PREFIX_PATH)
899 		return dget(sb->s_root);
900 
901 	full_path = cifs_build_path_to_root(ctx, cifs_sb,
902 				cifs_sb_master_tcon(cifs_sb), 0);
903 	if (full_path == NULL)
904 		return ERR_PTR(-ENOMEM);
905 
906 	cifs_dbg(FYI, "Get root dentry for %s\n", full_path);
907 
908 	sep = CIFS_DIR_SEP(cifs_sb);
909 	dentry = dget(sb->s_root);
910 	s = full_path;
911 
912 	do {
913 		struct inode *dir = d_inode(dentry);
914 		struct dentry *child;
915 
916 		if (!S_ISDIR(dir->i_mode)) {
917 			dput(dentry);
918 			dentry = ERR_PTR(-ENOTDIR);
919 			break;
920 		}
921 
922 		/* skip separators */
923 		while (*s == sep)
924 			s++;
925 		if (!*s)
926 			break;
927 		p = s++;
928 		/* next separator */
929 		while (*s && *s != sep)
930 			s++;
931 
932 		child = lookup_noperm_positive_unlocked(&QSTR_LEN(p, s - p),
933 							dentry);
934 		dput(dentry);
935 		dentry = child;
936 	} while (!IS_ERR(dentry));
937 	kfree(full_path);
938 	return dentry;
939 }
940 
941 static int cifs_set_super(struct super_block *sb, void *data)
942 {
943 	struct cifs_mnt_data *mnt_data = data;
944 	sb->s_fs_info = mnt_data->cifs_sb;
945 	return set_anon_super(sb, NULL);
946 }
947 
948 struct dentry *
949 cifs_smb3_do_mount(struct file_system_type *fs_type,
950 	      int flags, struct smb3_fs_context *old_ctx)
951 {
952 	struct cifs_mnt_data mnt_data;
953 	struct cifs_sb_info *cifs_sb;
954 	struct super_block *sb;
955 	struct dentry *root;
956 	int rc;
957 
958 	if (cifsFYI) {
959 		cifs_dbg(FYI, "%s: devname=%s flags=0x%x\n", __func__,
960 			 old_ctx->source, flags);
961 	} else {
962 		cifs_info("Attempting to mount %s\n", old_ctx->source);
963 	}
964 
965 	cifs_sb = kzalloc(sizeof(*cifs_sb), GFP_KERNEL);
966 	if (!cifs_sb)
967 		return ERR_PTR(-ENOMEM);
968 
969 	cifs_sb->ctx = kzalloc(sizeof(struct smb3_fs_context), GFP_KERNEL);
970 	if (!cifs_sb->ctx) {
971 		root = ERR_PTR(-ENOMEM);
972 		goto out;
973 	}
974 	rc = smb3_fs_context_dup(cifs_sb->ctx, old_ctx);
975 	if (rc) {
976 		root = ERR_PTR(rc);
977 		goto out;
978 	}
979 
980 	rc = cifs_setup_cifs_sb(cifs_sb);
981 	if (rc) {
982 		root = ERR_PTR(rc);
983 		goto out;
984 	}
985 
986 	rc = cifs_mount(cifs_sb, cifs_sb->ctx);
987 	if (rc) {
988 		if (!(flags & SB_SILENT))
989 			cifs_dbg(VFS, "cifs_mount failed w/return code = %d\n",
990 				 rc);
991 		root = ERR_PTR(rc);
992 		goto out;
993 	}
994 
995 	mnt_data.ctx = cifs_sb->ctx;
996 	mnt_data.cifs_sb = cifs_sb;
997 	mnt_data.flags = flags;
998 
999 	/* BB should we make this contingent on mount parm? */
1000 	flags |= SB_NODIRATIME | SB_NOATIME;
1001 
1002 	sb = sget(fs_type, cifs_match_super, cifs_set_super, flags, &mnt_data);
1003 	if (IS_ERR(sb)) {
1004 		cifs_umount(cifs_sb);
1005 		return ERR_CAST(sb);
1006 	}
1007 
1008 	if (sb->s_root) {
1009 		cifs_dbg(FYI, "Use existing superblock\n");
1010 		cifs_umount(cifs_sb);
1011 		cifs_sb = NULL;
1012 	} else {
1013 		rc = cifs_read_super(sb);
1014 		if (rc) {
1015 			root = ERR_PTR(rc);
1016 			goto out_super;
1017 		}
1018 
1019 		sb->s_flags |= SB_ACTIVE;
1020 	}
1021 
1022 	root = cifs_get_root(cifs_sb ? cifs_sb->ctx : old_ctx, sb);
1023 	if (IS_ERR(root))
1024 		goto out_super;
1025 
1026 	if (cifs_sb)
1027 		cifs_sb->root = dget(root);
1028 
1029 	cifs_dbg(FYI, "dentry root is: %p\n", root);
1030 	return root;
1031 
1032 out_super:
1033 	deactivate_locked_super(sb);
1034 	return root;
1035 out:
1036 	kfree(cifs_sb->prepath);
1037 	smb3_cleanup_fs_context(cifs_sb->ctx);
1038 	kfree(cifs_sb);
1039 	return root;
1040 }
1041 
1042 static loff_t cifs_llseek(struct file *file, loff_t offset, int whence)
1043 {
1044 	struct cifsFileInfo *cfile = file->private_data;
1045 	struct cifs_tcon *tcon;
1046 
1047 	/*
1048 	 * whence == SEEK_END || SEEK_DATA || SEEK_HOLE => we must revalidate
1049 	 * the cached file length
1050 	 */
1051 	if (whence != SEEK_SET && whence != SEEK_CUR) {
1052 		int rc;
1053 		struct inode *inode = file_inode(file);
1054 
1055 		/*
1056 		 * We need to be sure that all dirty pages are written and the
1057 		 * server has the newest file length.
1058 		 */
1059 		if (!CIFS_CACHE_READ(CIFS_I(inode)) && inode->i_mapping &&
1060 		    inode->i_mapping->nrpages != 0) {
1061 			rc = filemap_fdatawait(inode->i_mapping);
1062 			if (rc) {
1063 				mapping_set_error(inode->i_mapping, rc);
1064 				return rc;
1065 			}
1066 		}
1067 		/*
1068 		 * Some applications poll for the file length in this strange
1069 		 * way so we must seek to end on non-oplocked files by
1070 		 * setting the revalidate time to zero.
1071 		 */
1072 		CIFS_I(inode)->time = 0;
1073 
1074 		rc = cifs_revalidate_file_attr(file);
1075 		if (rc < 0)
1076 			return (loff_t)rc;
1077 	}
1078 	if (cfile && cfile->tlink) {
1079 		tcon = tlink_tcon(cfile->tlink);
1080 		if (tcon->ses->server->ops->llseek)
1081 			return tcon->ses->server->ops->llseek(file, tcon,
1082 							      offset, whence);
1083 	}
1084 	return generic_file_llseek(file, offset, whence);
1085 }
1086 
1087 static int
1088 cifs_setlease(struct file *file, int arg, struct file_lease **lease, void **priv)
1089 {
1090 	/*
1091 	 * Note that this is called by vfs setlease with i_lock held to
1092 	 * protect *lease from going away.
1093 	 */
1094 	struct inode *inode = file_inode(file);
1095 	struct cifsFileInfo *cfile = file->private_data;
1096 
1097 	/* Check if file is oplocked if this is request for new lease */
1098 	if (arg == F_UNLCK ||
1099 	    ((arg == F_RDLCK) && CIFS_CACHE_READ(CIFS_I(inode))) ||
1100 	    ((arg == F_WRLCK) && CIFS_CACHE_WRITE(CIFS_I(inode))))
1101 		return generic_setlease(file, arg, lease, priv);
1102 	else if (tlink_tcon(cfile->tlink)->local_lease &&
1103 		 !CIFS_CACHE_READ(CIFS_I(inode)))
1104 		/*
1105 		 * If the server claims to support oplock on this file, then we
1106 		 * still need to check oplock even if the local_lease mount
1107 		 * option is set, but there are servers which do not support
1108 		 * oplock for which this mount option may be useful if the user
1109 		 * knows that the file won't be changed on the server by anyone
1110 		 * else.
1111 		 */
1112 		return generic_setlease(file, arg, lease, priv);
1113 	else
1114 		return -EAGAIN;
1115 }
1116 
1117 struct file_system_type cifs_fs_type = {
1118 	.owner = THIS_MODULE,
1119 	.name = "cifs",
1120 	.init_fs_context = smb3_init_fs_context,
1121 	.parameters = smb3_fs_parameters,
1122 	.kill_sb = cifs_kill_sb,
1123 	.fs_flags = FS_RENAME_DOES_D_MOVE,
1124 };
1125 MODULE_ALIAS_FS("cifs");
1126 
1127 struct file_system_type smb3_fs_type = {
1128 	.owner = THIS_MODULE,
1129 	.name = "smb3",
1130 	.init_fs_context = smb3_init_fs_context,
1131 	.parameters = smb3_fs_parameters,
1132 	.kill_sb = cifs_kill_sb,
1133 	.fs_flags = FS_RENAME_DOES_D_MOVE,
1134 };
1135 MODULE_ALIAS_FS("smb3");
1136 MODULE_ALIAS("smb3");
1137 
1138 const struct inode_operations cifs_dir_inode_ops = {
1139 	.create = cifs_create,
1140 	.atomic_open = cifs_atomic_open,
1141 	.lookup = cifs_lookup,
1142 	.getattr = cifs_getattr,
1143 	.unlink = cifs_unlink,
1144 	.link = cifs_hardlink,
1145 	.mkdir = cifs_mkdir,
1146 	.rmdir = cifs_rmdir,
1147 	.rename = cifs_rename2,
1148 	.permission = cifs_permission,
1149 	.setattr = cifs_setattr,
1150 	.symlink = cifs_symlink,
1151 	.mknod   = cifs_mknod,
1152 	.listxattr = cifs_listxattr,
1153 	.get_acl = cifs_get_acl,
1154 	.set_acl = cifs_set_acl,
1155 };
1156 
1157 const struct inode_operations cifs_file_inode_ops = {
1158 	.setattr = cifs_setattr,
1159 	.getattr = cifs_getattr,
1160 	.permission = cifs_permission,
1161 	.listxattr = cifs_listxattr,
1162 	.fiemap = cifs_fiemap,
1163 	.get_acl = cifs_get_acl,
1164 	.set_acl = cifs_set_acl,
1165 };
1166 
1167 const char *cifs_get_link(struct dentry *dentry, struct inode *inode,
1168 			    struct delayed_call *done)
1169 {
1170 	char *target_path;
1171 
1172 	if (!dentry)
1173 		return ERR_PTR(-ECHILD);
1174 
1175 	target_path = kmalloc(PATH_MAX, GFP_KERNEL);
1176 	if (!target_path)
1177 		return ERR_PTR(-ENOMEM);
1178 
1179 	spin_lock(&inode->i_lock);
1180 	if (likely(CIFS_I(inode)->symlink_target)) {
1181 		strscpy(target_path, CIFS_I(inode)->symlink_target, PATH_MAX);
1182 	} else {
1183 		kfree(target_path);
1184 		target_path = ERR_PTR(-EOPNOTSUPP);
1185 	}
1186 	spin_unlock(&inode->i_lock);
1187 
1188 	if (!IS_ERR(target_path))
1189 		set_delayed_call(done, kfree_link, target_path);
1190 
1191 	return target_path;
1192 }
1193 
1194 const struct inode_operations cifs_symlink_inode_ops = {
1195 	.get_link = cifs_get_link,
1196 	.setattr = cifs_setattr,
1197 	.permission = cifs_permission,
1198 	.listxattr = cifs_listxattr,
1199 };
1200 
1201 /*
1202  * Advance the EOF marker to after the source range.
1203  */
1204 static int cifs_precopy_set_eof(struct inode *src_inode, struct cifsInodeInfo *src_cifsi,
1205 				struct cifs_tcon *src_tcon,
1206 				unsigned int xid, loff_t src_end)
1207 {
1208 	struct cifsFileInfo *writeable_srcfile;
1209 	int rc = -EINVAL;
1210 
1211 	writeable_srcfile = find_writable_file(src_cifsi, FIND_WR_FSUID_ONLY);
1212 	if (writeable_srcfile) {
1213 		if (src_tcon->ses->server->ops->set_file_size)
1214 			rc = src_tcon->ses->server->ops->set_file_size(
1215 				xid, src_tcon, writeable_srcfile,
1216 				src_inode->i_size, true /* no need to set sparse */);
1217 		else
1218 			rc = -ENOSYS;
1219 		cifsFileInfo_put(writeable_srcfile);
1220 		cifs_dbg(FYI, "SetFSize for copychunk rc = %d\n", rc);
1221 	}
1222 
1223 	if (rc < 0)
1224 		goto set_failed;
1225 
1226 	netfs_resize_file(&src_cifsi->netfs, src_end, true);
1227 	fscache_resize_cookie(cifs_inode_cookie(src_inode), src_end);
1228 	return 0;
1229 
1230 set_failed:
1231 	return filemap_write_and_wait(src_inode->i_mapping);
1232 }
1233 
1234 /*
1235  * Flush out either the folio that overlaps the beginning of a range in which
1236  * pos resides or the folio that overlaps the end of a range unless that folio
1237  * is entirely within the range we're going to invalidate.  We extend the flush
1238  * bounds to encompass the folio.
1239  */
1240 static int cifs_flush_folio(struct inode *inode, loff_t pos, loff_t *_fstart, loff_t *_fend,
1241 			    bool first)
1242 {
1243 	struct folio *folio;
1244 	unsigned long long fpos, fend;
1245 	pgoff_t index = pos / PAGE_SIZE;
1246 	size_t size;
1247 	int rc = 0;
1248 
1249 	folio = filemap_get_folio(inode->i_mapping, index);
1250 	if (IS_ERR(folio))
1251 		return 0;
1252 
1253 	size = folio_size(folio);
1254 	fpos = folio_pos(folio);
1255 	fend = fpos + size - 1;
1256 	*_fstart = min_t(unsigned long long, *_fstart, fpos);
1257 	*_fend   = max_t(unsigned long long, *_fend, fend);
1258 	if ((first && pos == fpos) || (!first && pos == fend))
1259 		goto out;
1260 
1261 	rc = filemap_write_and_wait_range(inode->i_mapping, fpos, fend);
1262 out:
1263 	folio_put(folio);
1264 	return rc;
1265 }
1266 
1267 static loff_t cifs_remap_file_range(struct file *src_file, loff_t off,
1268 		struct file *dst_file, loff_t destoff, loff_t len,
1269 		unsigned int remap_flags)
1270 {
1271 	struct inode *src_inode = file_inode(src_file);
1272 	struct inode *target_inode = file_inode(dst_file);
1273 	struct cifsInodeInfo *src_cifsi = CIFS_I(src_inode);
1274 	struct cifsInodeInfo *target_cifsi = CIFS_I(target_inode);
1275 	struct cifsFileInfo *smb_file_src = src_file->private_data;
1276 	struct cifsFileInfo *smb_file_target = dst_file->private_data;
1277 	struct cifs_tcon *target_tcon, *src_tcon;
1278 	unsigned long long destend, fstart, fend, old_size, new_size;
1279 	unsigned int xid;
1280 	int rc;
1281 
1282 	if (remap_flags & REMAP_FILE_DEDUP)
1283 		return -EOPNOTSUPP;
1284 	if (remap_flags & ~REMAP_FILE_ADVISORY)
1285 		return -EINVAL;
1286 
1287 	cifs_dbg(FYI, "clone range\n");
1288 
1289 	xid = get_xid();
1290 
1291 	if (!smb_file_src || !smb_file_target) {
1292 		rc = -EBADF;
1293 		cifs_dbg(VFS, "missing cifsFileInfo on copy range src file\n");
1294 		goto out;
1295 	}
1296 
1297 	src_tcon = tlink_tcon(smb_file_src->tlink);
1298 	target_tcon = tlink_tcon(smb_file_target->tlink);
1299 
1300 	/*
1301 	 * Note: cifs case is easier than btrfs since server responsible for
1302 	 * checks for proper open modes and file type and if it wants
1303 	 * server could even support copy of range where source = target
1304 	 */
1305 	lock_two_nondirectories(target_inode, src_inode);
1306 
1307 	if (len == 0)
1308 		len = src_inode->i_size - off;
1309 
1310 	cifs_dbg(FYI, "clone range\n");
1311 
1312 	/* Flush the source buffer */
1313 	rc = filemap_write_and_wait_range(src_inode->i_mapping, off,
1314 					  off + len - 1);
1315 	if (rc)
1316 		goto unlock;
1317 
1318 	/* The server-side copy will fail if the source crosses the EOF marker.
1319 	 * Advance the EOF marker after the flush above to the end of the range
1320 	 * if it's short of that.
1321 	 */
1322 	if (src_cifsi->netfs.remote_i_size < off + len) {
1323 		rc = cifs_precopy_set_eof(src_inode, src_cifsi, src_tcon, xid, off + len);
1324 		if (rc < 0)
1325 			goto unlock;
1326 	}
1327 
1328 	new_size = destoff + len;
1329 	destend = destoff + len - 1;
1330 
1331 	/* Flush the folios at either end of the destination range to prevent
1332 	 * accidental loss of dirty data outside of the range.
1333 	 */
1334 	fstart = destoff;
1335 	fend = destend;
1336 
1337 	rc = cifs_flush_folio(target_inode, destoff, &fstart, &fend, true);
1338 	if (rc)
1339 		goto unlock;
1340 	rc = cifs_flush_folio(target_inode, destend, &fstart, &fend, false);
1341 	if (rc)
1342 		goto unlock;
1343 	if (fend > target_cifsi->netfs.zero_point)
1344 		target_cifsi->netfs.zero_point = fend + 1;
1345 	old_size = target_cifsi->netfs.remote_i_size;
1346 
1347 	/* Discard all the folios that overlap the destination region. */
1348 	cifs_dbg(FYI, "about to discard pages %llx-%llx\n", fstart, fend);
1349 	truncate_inode_pages_range(&target_inode->i_data, fstart, fend);
1350 
1351 	fscache_invalidate(cifs_inode_cookie(target_inode), NULL,
1352 			   i_size_read(target_inode), 0);
1353 
1354 	rc = -EOPNOTSUPP;
1355 	if (target_tcon->ses->server->ops->duplicate_extents) {
1356 		rc = target_tcon->ses->server->ops->duplicate_extents(xid,
1357 			smb_file_src, smb_file_target, off, len, destoff);
1358 		if (rc == 0 && new_size > old_size) {
1359 			truncate_setsize(target_inode, new_size);
1360 			fscache_resize_cookie(cifs_inode_cookie(target_inode),
1361 					      new_size);
1362 		}
1363 		if (rc == 0 && new_size > target_cifsi->netfs.zero_point)
1364 			target_cifsi->netfs.zero_point = new_size;
1365 	}
1366 
1367 	/* force revalidate of size and timestamps of target file now
1368 	   that target is updated on the server */
1369 	CIFS_I(target_inode)->time = 0;
1370 unlock:
1371 	/* although unlocking in the reverse order from locking is not
1372 	   strictly necessary here it is a little cleaner to be consistent */
1373 	unlock_two_nondirectories(src_inode, target_inode);
1374 out:
1375 	free_xid(xid);
1376 	return rc < 0 ? rc : len;
1377 }
1378 
1379 ssize_t cifs_file_copychunk_range(unsigned int xid,
1380 				struct file *src_file, loff_t off,
1381 				struct file *dst_file, loff_t destoff,
1382 				size_t len, unsigned int flags)
1383 {
1384 	struct inode *src_inode = file_inode(src_file);
1385 	struct inode *target_inode = file_inode(dst_file);
1386 	struct cifsInodeInfo *src_cifsi = CIFS_I(src_inode);
1387 	struct cifsInodeInfo *target_cifsi = CIFS_I(target_inode);
1388 	struct cifsFileInfo *smb_file_src;
1389 	struct cifsFileInfo *smb_file_target;
1390 	struct cifs_tcon *src_tcon;
1391 	struct cifs_tcon *target_tcon;
1392 	ssize_t rc;
1393 
1394 	cifs_dbg(FYI, "copychunk range\n");
1395 
1396 	if (!src_file->private_data || !dst_file->private_data) {
1397 		rc = -EBADF;
1398 		cifs_dbg(VFS, "missing cifsFileInfo on copy range src file\n");
1399 		goto out;
1400 	}
1401 
1402 	rc = -EXDEV;
1403 	smb_file_target = dst_file->private_data;
1404 	smb_file_src = src_file->private_data;
1405 	src_tcon = tlink_tcon(smb_file_src->tlink);
1406 	target_tcon = tlink_tcon(smb_file_target->tlink);
1407 
1408 	if (src_tcon->ses != target_tcon->ses) {
1409 		cifs_dbg(FYI, "source and target of copy not on same server\n");
1410 		goto out;
1411 	}
1412 
1413 	rc = -EOPNOTSUPP;
1414 	if (!target_tcon->ses->server->ops->copychunk_range)
1415 		goto out;
1416 
1417 	/*
1418 	 * Note: cifs case is easier than btrfs since server responsible for
1419 	 * checks for proper open modes and file type and if it wants
1420 	 * server could even support copy of range where source = target
1421 	 */
1422 	lock_two_nondirectories(target_inode, src_inode);
1423 
1424 	cifs_dbg(FYI, "about to flush pages\n");
1425 
1426 	rc = filemap_write_and_wait_range(src_inode->i_mapping, off,
1427 					  off + len - 1);
1428 	if (rc)
1429 		goto unlock;
1430 
1431 	/* The server-side copy will fail if the source crosses the EOF marker.
1432 	 * Advance the EOF marker after the flush above to the end of the range
1433 	 * if it's short of that.
1434 	 */
1435 	if (src_cifsi->netfs.remote_i_size < off + len) {
1436 		rc = cifs_precopy_set_eof(src_inode, src_cifsi, src_tcon, xid, off + len);
1437 		if (rc < 0)
1438 			goto unlock;
1439 	}
1440 
1441 	/* Flush and invalidate all the folios in the destination region.  If
1442 	 * the copy was successful, then some of the flush is extra overhead,
1443 	 * but we need to allow for the copy failing in some way (eg. ENOSPC).
1444 	 */
1445 	rc = filemap_invalidate_inode(target_inode, true, destoff, destoff + len - 1);
1446 	if (rc)
1447 		goto unlock;
1448 
1449 	fscache_invalidate(cifs_inode_cookie(target_inode), NULL,
1450 			   i_size_read(target_inode), 0);
1451 
1452 	rc = file_modified(dst_file);
1453 	if (!rc) {
1454 		rc = target_tcon->ses->server->ops->copychunk_range(xid,
1455 			smb_file_src, smb_file_target, off, len, destoff);
1456 		if (rc > 0 && destoff + rc > i_size_read(target_inode)) {
1457 			truncate_setsize(target_inode, destoff + rc);
1458 			netfs_resize_file(&target_cifsi->netfs,
1459 					  i_size_read(target_inode), true);
1460 			fscache_resize_cookie(cifs_inode_cookie(target_inode),
1461 					      i_size_read(target_inode));
1462 		}
1463 		if (rc > 0 && destoff + rc > target_cifsi->netfs.zero_point)
1464 			target_cifsi->netfs.zero_point = destoff + rc;
1465 	}
1466 
1467 	file_accessed(src_file);
1468 
1469 	/* force revalidate of size and timestamps of target file now
1470 	 * that target is updated on the server
1471 	 */
1472 	CIFS_I(target_inode)->time = 0;
1473 
1474 unlock:
1475 	/* although unlocking in the reverse order from locking is not
1476 	 * strictly necessary here it is a little cleaner to be consistent
1477 	 */
1478 	unlock_two_nondirectories(src_inode, target_inode);
1479 
1480 out:
1481 	return rc;
1482 }
1483 
1484 /*
1485  * Directory operations under CIFS/SMB2/SMB3 are synchronous, so fsync()
1486  * is a dummy operation.
1487  */
1488 static int cifs_dir_fsync(struct file *file, loff_t start, loff_t end, int datasync)
1489 {
1490 	cifs_dbg(FYI, "Sync directory - name: %pD datasync: 0x%x\n",
1491 		 file, datasync);
1492 
1493 	return 0;
1494 }
1495 
1496 static ssize_t cifs_copy_file_range(struct file *src_file, loff_t off,
1497 				struct file *dst_file, loff_t destoff,
1498 				size_t len, unsigned int flags)
1499 {
1500 	unsigned int xid = get_xid();
1501 	ssize_t rc;
1502 	struct cifsFileInfo *cfile = dst_file->private_data;
1503 
1504 	if (cfile->swapfile) {
1505 		rc = -EOPNOTSUPP;
1506 		free_xid(xid);
1507 		return rc;
1508 	}
1509 
1510 	rc = cifs_file_copychunk_range(xid, src_file, off, dst_file, destoff,
1511 					len, flags);
1512 	free_xid(xid);
1513 
1514 	if (rc == -EOPNOTSUPP || rc == -EXDEV)
1515 		rc = splice_copy_file_range(src_file, off, dst_file,
1516 					    destoff, len);
1517 	return rc;
1518 }
1519 
1520 const struct file_operations cifs_file_ops = {
1521 	.read_iter = cifs_loose_read_iter,
1522 	.write_iter = cifs_file_write_iter,
1523 	.open = cifs_open,
1524 	.release = cifs_close,
1525 	.lock = cifs_lock,
1526 	.flock = cifs_flock,
1527 	.fsync = cifs_fsync,
1528 	.flush = cifs_flush,
1529 	.mmap  = cifs_file_mmap,
1530 	.splice_read = filemap_splice_read,
1531 	.splice_write = iter_file_splice_write,
1532 	.llseek = cifs_llseek,
1533 	.unlocked_ioctl	= cifs_ioctl,
1534 	.copy_file_range = cifs_copy_file_range,
1535 	.remap_file_range = cifs_remap_file_range,
1536 	.setlease = cifs_setlease,
1537 	.fallocate = cifs_fallocate,
1538 };
1539 
1540 const struct file_operations cifs_file_strict_ops = {
1541 	.read_iter = cifs_strict_readv,
1542 	.write_iter = cifs_strict_writev,
1543 	.open = cifs_open,
1544 	.release = cifs_close,
1545 	.lock = cifs_lock,
1546 	.flock = cifs_flock,
1547 	.fsync = cifs_strict_fsync,
1548 	.flush = cifs_flush,
1549 	.mmap = cifs_file_strict_mmap,
1550 	.splice_read = filemap_splice_read,
1551 	.splice_write = iter_file_splice_write,
1552 	.llseek = cifs_llseek,
1553 	.unlocked_ioctl	= cifs_ioctl,
1554 	.copy_file_range = cifs_copy_file_range,
1555 	.remap_file_range = cifs_remap_file_range,
1556 	.setlease = cifs_setlease,
1557 	.fallocate = cifs_fallocate,
1558 };
1559 
1560 const struct file_operations cifs_file_direct_ops = {
1561 	.read_iter = netfs_unbuffered_read_iter,
1562 	.write_iter = netfs_file_write_iter,
1563 	.open = cifs_open,
1564 	.release = cifs_close,
1565 	.lock = cifs_lock,
1566 	.flock = cifs_flock,
1567 	.fsync = cifs_fsync,
1568 	.flush = cifs_flush,
1569 	.mmap = cifs_file_mmap,
1570 	.splice_read = copy_splice_read,
1571 	.splice_write = iter_file_splice_write,
1572 	.unlocked_ioctl  = cifs_ioctl,
1573 	.copy_file_range = cifs_copy_file_range,
1574 	.remap_file_range = cifs_remap_file_range,
1575 	.llseek = cifs_llseek,
1576 	.setlease = cifs_setlease,
1577 	.fallocate = cifs_fallocate,
1578 };
1579 
1580 const struct file_operations cifs_file_nobrl_ops = {
1581 	.read_iter = cifs_loose_read_iter,
1582 	.write_iter = cifs_file_write_iter,
1583 	.open = cifs_open,
1584 	.release = cifs_close,
1585 	.fsync = cifs_fsync,
1586 	.flush = cifs_flush,
1587 	.mmap  = cifs_file_mmap,
1588 	.splice_read = filemap_splice_read,
1589 	.splice_write = iter_file_splice_write,
1590 	.llseek = cifs_llseek,
1591 	.unlocked_ioctl	= cifs_ioctl,
1592 	.copy_file_range = cifs_copy_file_range,
1593 	.remap_file_range = cifs_remap_file_range,
1594 	.setlease = cifs_setlease,
1595 	.fallocate = cifs_fallocate,
1596 };
1597 
1598 const struct file_operations cifs_file_strict_nobrl_ops = {
1599 	.read_iter = cifs_strict_readv,
1600 	.write_iter = cifs_strict_writev,
1601 	.open = cifs_open,
1602 	.release = cifs_close,
1603 	.fsync = cifs_strict_fsync,
1604 	.flush = cifs_flush,
1605 	.mmap = cifs_file_strict_mmap,
1606 	.splice_read = filemap_splice_read,
1607 	.splice_write = iter_file_splice_write,
1608 	.llseek = cifs_llseek,
1609 	.unlocked_ioctl	= cifs_ioctl,
1610 	.copy_file_range = cifs_copy_file_range,
1611 	.remap_file_range = cifs_remap_file_range,
1612 	.setlease = cifs_setlease,
1613 	.fallocate = cifs_fallocate,
1614 };
1615 
1616 const struct file_operations cifs_file_direct_nobrl_ops = {
1617 	.read_iter = netfs_unbuffered_read_iter,
1618 	.write_iter = netfs_file_write_iter,
1619 	.open = cifs_open,
1620 	.release = cifs_close,
1621 	.fsync = cifs_fsync,
1622 	.flush = cifs_flush,
1623 	.mmap = cifs_file_mmap,
1624 	.splice_read = copy_splice_read,
1625 	.splice_write = iter_file_splice_write,
1626 	.unlocked_ioctl  = cifs_ioctl,
1627 	.copy_file_range = cifs_copy_file_range,
1628 	.remap_file_range = cifs_remap_file_range,
1629 	.llseek = cifs_llseek,
1630 	.setlease = cifs_setlease,
1631 	.fallocate = cifs_fallocate,
1632 };
1633 
1634 const struct file_operations cifs_dir_ops = {
1635 	.iterate_shared = cifs_readdir,
1636 	.release = cifs_closedir,
1637 	.read    = generic_read_dir,
1638 	.unlocked_ioctl  = cifs_ioctl,
1639 	.copy_file_range = cifs_copy_file_range,
1640 	.remap_file_range = cifs_remap_file_range,
1641 	.llseek = generic_file_llseek,
1642 	.fsync = cifs_dir_fsync,
1643 };
1644 
1645 static void
1646 cifs_init_once(void *inode)
1647 {
1648 	struct cifsInodeInfo *cifsi = inode;
1649 
1650 	inode_init_once(&cifsi->netfs.inode);
1651 	init_rwsem(&cifsi->lock_sem);
1652 }
1653 
1654 static int __init
1655 cifs_init_inodecache(void)
1656 {
1657 	cifs_inode_cachep = kmem_cache_create("cifs_inode_cache",
1658 					      sizeof(struct cifsInodeInfo),
1659 					      0, (SLAB_RECLAIM_ACCOUNT|
1660 						SLAB_ACCOUNT),
1661 					      cifs_init_once);
1662 	if (cifs_inode_cachep == NULL)
1663 		return -ENOMEM;
1664 
1665 	return 0;
1666 }
1667 
1668 static void
1669 cifs_destroy_inodecache(void)
1670 {
1671 	/*
1672 	 * Make sure all delayed rcu free inodes are flushed before we
1673 	 * destroy cache.
1674 	 */
1675 	rcu_barrier();
1676 	kmem_cache_destroy(cifs_inode_cachep);
1677 }
1678 
1679 static int
1680 cifs_init_request_bufs(void)
1681 {
1682 	/*
1683 	 * SMB2 maximum header size is bigger than CIFS one - no problems to
1684 	 * allocate some more bytes for CIFS.
1685 	 */
1686 	size_t max_hdr_size = MAX_SMB2_HDR_SIZE;
1687 
1688 	if (CIFSMaxBufSize < 8192) {
1689 	/* Buffer size can not be smaller than 2 * PATH_MAX since maximum
1690 	Unicode path name has to fit in any SMB/CIFS path based frames */
1691 		CIFSMaxBufSize = 8192;
1692 	} else if (CIFSMaxBufSize > 1024*127) {
1693 		CIFSMaxBufSize = 1024 * 127;
1694 	} else {
1695 		CIFSMaxBufSize &= 0x1FE00; /* Round size to even 512 byte mult*/
1696 	}
1697 /*
1698 	cifs_dbg(VFS, "CIFSMaxBufSize %d 0x%x\n",
1699 		 CIFSMaxBufSize, CIFSMaxBufSize);
1700 */
1701 	cifs_req_cachep = kmem_cache_create_usercopy("cifs_request",
1702 					    CIFSMaxBufSize + max_hdr_size, 0,
1703 					    SLAB_HWCACHE_ALIGN, 0,
1704 					    CIFSMaxBufSize + max_hdr_size,
1705 					    NULL);
1706 	if (cifs_req_cachep == NULL)
1707 		return -ENOMEM;
1708 
1709 	if (cifs_min_rcv < 1)
1710 		cifs_min_rcv = 1;
1711 	else if (cifs_min_rcv > 64) {
1712 		cifs_min_rcv = 64;
1713 		cifs_dbg(VFS, "cifs_min_rcv set to maximum (64)\n");
1714 	}
1715 
1716 	cifs_req_poolp = mempool_create_slab_pool(cifs_min_rcv,
1717 						  cifs_req_cachep);
1718 
1719 	if (cifs_req_poolp == NULL) {
1720 		kmem_cache_destroy(cifs_req_cachep);
1721 		return -ENOMEM;
1722 	}
1723 	/* MAX_CIFS_SMALL_BUFFER_SIZE bytes is enough for most SMB responses and
1724 	almost all handle based requests (but not write response, nor is it
1725 	sufficient for path based requests).  A smaller size would have
1726 	been more efficient (compacting multiple slab items on one 4k page)
1727 	for the case in which debug was on, but this larger size allows
1728 	more SMBs to use small buffer alloc and is still much more
1729 	efficient to alloc 1 per page off the slab compared to 17K (5page)
1730 	alloc of large cifs buffers even when page debugging is on */
1731 	cifs_sm_req_cachep = kmem_cache_create_usercopy("cifs_small_rq",
1732 			MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN,
1733 			0, MAX_CIFS_SMALL_BUFFER_SIZE, NULL);
1734 	if (cifs_sm_req_cachep == NULL) {
1735 		mempool_destroy(cifs_req_poolp);
1736 		kmem_cache_destroy(cifs_req_cachep);
1737 		return -ENOMEM;
1738 	}
1739 
1740 	if (cifs_min_small < 2)
1741 		cifs_min_small = 2;
1742 	else if (cifs_min_small > 256) {
1743 		cifs_min_small = 256;
1744 		cifs_dbg(FYI, "cifs_min_small set to maximum (256)\n");
1745 	}
1746 
1747 	cifs_sm_req_poolp = mempool_create_slab_pool(cifs_min_small,
1748 						     cifs_sm_req_cachep);
1749 
1750 	if (cifs_sm_req_poolp == NULL) {
1751 		mempool_destroy(cifs_req_poolp);
1752 		kmem_cache_destroy(cifs_req_cachep);
1753 		kmem_cache_destroy(cifs_sm_req_cachep);
1754 		return -ENOMEM;
1755 	}
1756 
1757 	return 0;
1758 }
1759 
1760 static void
1761 cifs_destroy_request_bufs(void)
1762 {
1763 	mempool_destroy(cifs_req_poolp);
1764 	kmem_cache_destroy(cifs_req_cachep);
1765 	mempool_destroy(cifs_sm_req_poolp);
1766 	kmem_cache_destroy(cifs_sm_req_cachep);
1767 }
1768 
1769 static int init_mids(void)
1770 {
1771 	cifs_mid_cachep = kmem_cache_create("cifs_mpx_ids",
1772 					    sizeof(struct mid_q_entry), 0,
1773 					    SLAB_HWCACHE_ALIGN, NULL);
1774 	if (cifs_mid_cachep == NULL)
1775 		return -ENOMEM;
1776 
1777 	/* 3 is a reasonable minimum number of simultaneous operations */
1778 	cifs_mid_poolp = mempool_create_slab_pool(3, cifs_mid_cachep);
1779 	if (cifs_mid_poolp == NULL) {
1780 		kmem_cache_destroy(cifs_mid_cachep);
1781 		return -ENOMEM;
1782 	}
1783 
1784 	return 0;
1785 }
1786 
1787 static void destroy_mids(void)
1788 {
1789 	mempool_destroy(cifs_mid_poolp);
1790 	kmem_cache_destroy(cifs_mid_cachep);
1791 }
1792 
1793 static int cifs_init_netfs(void)
1794 {
1795 	cifs_io_request_cachep =
1796 		kmem_cache_create("cifs_io_request",
1797 				  sizeof(struct cifs_io_request), 0,
1798 				  SLAB_HWCACHE_ALIGN, NULL);
1799 	if (!cifs_io_request_cachep)
1800 		goto nomem_req;
1801 
1802 	if (mempool_init_slab_pool(&cifs_io_request_pool, 100, cifs_io_request_cachep) < 0)
1803 		goto nomem_reqpool;
1804 
1805 	cifs_io_subrequest_cachep =
1806 		kmem_cache_create("cifs_io_subrequest",
1807 				  sizeof(struct cifs_io_subrequest), 0,
1808 				  SLAB_HWCACHE_ALIGN, NULL);
1809 	if (!cifs_io_subrequest_cachep)
1810 		goto nomem_subreq;
1811 
1812 	if (mempool_init_slab_pool(&cifs_io_subrequest_pool, 100, cifs_io_subrequest_cachep) < 0)
1813 		goto nomem_subreqpool;
1814 
1815 	return 0;
1816 
1817 nomem_subreqpool:
1818 	kmem_cache_destroy(cifs_io_subrequest_cachep);
1819 nomem_subreq:
1820 	mempool_exit(&cifs_io_request_pool);
1821 nomem_reqpool:
1822 	kmem_cache_destroy(cifs_io_request_cachep);
1823 nomem_req:
1824 	return -ENOMEM;
1825 }
1826 
1827 static void cifs_destroy_netfs(void)
1828 {
1829 	mempool_exit(&cifs_io_subrequest_pool);
1830 	kmem_cache_destroy(cifs_io_subrequest_cachep);
1831 	mempool_exit(&cifs_io_request_pool);
1832 	kmem_cache_destroy(cifs_io_request_cachep);
1833 }
1834 
1835 static int __init
1836 init_cifs(void)
1837 {
1838 	int rc = 0;
1839 	cifs_proc_init();
1840 	INIT_LIST_HEAD(&cifs_tcp_ses_list);
1841 /*
1842  *  Initialize Global counters
1843  */
1844 	atomic_set(&sesInfoAllocCount, 0);
1845 	atomic_set(&tconInfoAllocCount, 0);
1846 	atomic_set(&tcpSesNextId, 0);
1847 	atomic_set(&tcpSesAllocCount, 0);
1848 	atomic_set(&tcpSesReconnectCount, 0);
1849 	atomic_set(&tconInfoReconnectCount, 0);
1850 
1851 	atomic_set(&buf_alloc_count, 0);
1852 	atomic_set(&small_buf_alloc_count, 0);
1853 #ifdef CONFIG_CIFS_STATS2
1854 	atomic_set(&total_buf_alloc_count, 0);
1855 	atomic_set(&total_small_buf_alloc_count, 0);
1856 	if (slow_rsp_threshold < 1)
1857 		cifs_dbg(FYI, "slow_response_threshold msgs disabled\n");
1858 	else if (slow_rsp_threshold > 32767)
1859 		cifs_dbg(VFS,
1860 		       "slow response threshold set higher than recommended (0 to 32767)\n");
1861 #endif /* CONFIG_CIFS_STATS2 */
1862 
1863 	atomic_set(&mid_count, 0);
1864 	GlobalCurrentXid = 0;
1865 	GlobalTotalActiveXid = 0;
1866 	GlobalMaxActiveXid = 0;
1867 	spin_lock_init(&cifs_tcp_ses_lock);
1868 	spin_lock_init(&GlobalMid_Lock);
1869 
1870 	cifs_lock_secret = get_random_u32();
1871 
1872 	if (cifs_max_pending < 2) {
1873 		cifs_max_pending = 2;
1874 		cifs_dbg(FYI, "cifs_max_pending set to min of 2\n");
1875 	} else if (cifs_max_pending > CIFS_MAX_REQ) {
1876 		cifs_max_pending = CIFS_MAX_REQ;
1877 		cifs_dbg(FYI, "cifs_max_pending set to max of %u\n",
1878 			 CIFS_MAX_REQ);
1879 	}
1880 
1881 	/* Limit max to about 18 hours, and setting to zero disables directory entry caching */
1882 	if (dir_cache_timeout > 65000) {
1883 		dir_cache_timeout = 65000;
1884 		cifs_dbg(VFS, "dir_cache_timeout set to max of 65000 seconds\n");
1885 	}
1886 
1887 	cifsiod_wq = alloc_workqueue("cifsiod", WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
1888 	if (!cifsiod_wq) {
1889 		rc = -ENOMEM;
1890 		goto out_clean_proc;
1891 	}
1892 
1893 	/*
1894 	 * Consider in future setting limit!=0 maybe to min(num_of_cores - 1, 3)
1895 	 * so that we don't launch too many worker threads but
1896 	 * Documentation/core-api/workqueue.rst recommends setting it to 0
1897 	 */
1898 
1899 	/* WQ_UNBOUND allows decrypt tasks to run on any CPU */
1900 	decrypt_wq = alloc_workqueue("smb3decryptd",
1901 				     WQ_UNBOUND|WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
1902 	if (!decrypt_wq) {
1903 		rc = -ENOMEM;
1904 		goto out_destroy_cifsiod_wq;
1905 	}
1906 
1907 	fileinfo_put_wq = alloc_workqueue("cifsfileinfoput",
1908 				     WQ_UNBOUND|WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
1909 	if (!fileinfo_put_wq) {
1910 		rc = -ENOMEM;
1911 		goto out_destroy_decrypt_wq;
1912 	}
1913 
1914 	cifsoplockd_wq = alloc_workqueue("cifsoplockd",
1915 					 WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
1916 	if (!cifsoplockd_wq) {
1917 		rc = -ENOMEM;
1918 		goto out_destroy_fileinfo_put_wq;
1919 	}
1920 
1921 	deferredclose_wq = alloc_workqueue("deferredclose",
1922 					   WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
1923 	if (!deferredclose_wq) {
1924 		rc = -ENOMEM;
1925 		goto out_destroy_cifsoplockd_wq;
1926 	}
1927 
1928 	serverclose_wq = alloc_workqueue("serverclose",
1929 					   WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
1930 	if (!serverclose_wq) {
1931 		rc = -ENOMEM;
1932 		goto out_destroy_deferredclose_wq;
1933 	}
1934 
1935 	cfid_put_wq = alloc_workqueue("cfid_put_wq",
1936 				      WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
1937 	if (!cfid_put_wq) {
1938 		rc = -ENOMEM;
1939 		goto out_destroy_serverclose_wq;
1940 	}
1941 
1942 	rc = cifs_init_inodecache();
1943 	if (rc)
1944 		goto out_destroy_cfid_put_wq;
1945 
1946 	rc = cifs_init_netfs();
1947 	if (rc)
1948 		goto out_destroy_inodecache;
1949 
1950 	rc = init_mids();
1951 	if (rc)
1952 		goto out_destroy_netfs;
1953 
1954 	rc = cifs_init_request_bufs();
1955 	if (rc)
1956 		goto out_destroy_mids;
1957 
1958 #ifdef CONFIG_CIFS_DFS_UPCALL
1959 	rc = dfs_cache_init();
1960 	if (rc)
1961 		goto out_destroy_request_bufs;
1962 #endif /* CONFIG_CIFS_DFS_UPCALL */
1963 #ifdef CONFIG_CIFS_UPCALL
1964 	rc = init_cifs_spnego();
1965 	if (rc)
1966 		goto out_destroy_dfs_cache;
1967 #endif /* CONFIG_CIFS_UPCALL */
1968 #ifdef CONFIG_CIFS_SWN_UPCALL
1969 	rc = cifs_genl_init();
1970 	if (rc)
1971 		goto out_register_key_type;
1972 #endif /* CONFIG_CIFS_SWN_UPCALL */
1973 
1974 	rc = init_cifs_idmap();
1975 	if (rc)
1976 		goto out_cifs_swn_init;
1977 
1978 	rc = register_filesystem(&cifs_fs_type);
1979 	if (rc)
1980 		goto out_init_cifs_idmap;
1981 
1982 	rc = register_filesystem(&smb3_fs_type);
1983 	if (rc) {
1984 		unregister_filesystem(&cifs_fs_type);
1985 		goto out_init_cifs_idmap;
1986 	}
1987 
1988 	return 0;
1989 
1990 out_init_cifs_idmap:
1991 	exit_cifs_idmap();
1992 out_cifs_swn_init:
1993 #ifdef CONFIG_CIFS_SWN_UPCALL
1994 	cifs_genl_exit();
1995 out_register_key_type:
1996 #endif
1997 #ifdef CONFIG_CIFS_UPCALL
1998 	exit_cifs_spnego();
1999 out_destroy_dfs_cache:
2000 #endif
2001 #ifdef CONFIG_CIFS_DFS_UPCALL
2002 	dfs_cache_destroy();
2003 out_destroy_request_bufs:
2004 #endif
2005 	cifs_destroy_request_bufs();
2006 out_destroy_mids:
2007 	destroy_mids();
2008 out_destroy_netfs:
2009 	cifs_destroy_netfs();
2010 out_destroy_inodecache:
2011 	cifs_destroy_inodecache();
2012 out_destroy_cfid_put_wq:
2013 	destroy_workqueue(cfid_put_wq);
2014 out_destroy_serverclose_wq:
2015 	destroy_workqueue(serverclose_wq);
2016 out_destroy_deferredclose_wq:
2017 	destroy_workqueue(deferredclose_wq);
2018 out_destroy_cifsoplockd_wq:
2019 	destroy_workqueue(cifsoplockd_wq);
2020 out_destroy_fileinfo_put_wq:
2021 	destroy_workqueue(fileinfo_put_wq);
2022 out_destroy_decrypt_wq:
2023 	destroy_workqueue(decrypt_wq);
2024 out_destroy_cifsiod_wq:
2025 	destroy_workqueue(cifsiod_wq);
2026 out_clean_proc:
2027 	cifs_proc_clean();
2028 	return rc;
2029 }
2030 
2031 static void __exit
2032 exit_cifs(void)
2033 {
2034 	cifs_dbg(NOISY, "exit_smb3\n");
2035 	unregister_filesystem(&cifs_fs_type);
2036 	unregister_filesystem(&smb3_fs_type);
2037 	cifs_release_automount_timer();
2038 	exit_cifs_idmap();
2039 #ifdef CONFIG_CIFS_SWN_UPCALL
2040 	cifs_genl_exit();
2041 #endif
2042 #ifdef CONFIG_CIFS_UPCALL
2043 	exit_cifs_spnego();
2044 #endif
2045 #ifdef CONFIG_CIFS_DFS_UPCALL
2046 	dfs_cache_destroy();
2047 #endif
2048 	cifs_destroy_request_bufs();
2049 	destroy_mids();
2050 	cifs_destroy_netfs();
2051 	cifs_destroy_inodecache();
2052 	destroy_workqueue(deferredclose_wq);
2053 	destroy_workqueue(cifsoplockd_wq);
2054 	destroy_workqueue(decrypt_wq);
2055 	destroy_workqueue(fileinfo_put_wq);
2056 	destroy_workqueue(serverclose_wq);
2057 	destroy_workqueue(cfid_put_wq);
2058 	destroy_workqueue(cifsiod_wq);
2059 	cifs_proc_clean();
2060 }
2061 
2062 MODULE_AUTHOR("Steve French");
2063 MODULE_LICENSE("GPL");	/* combination of LGPL + GPL source behaves as GPL */
2064 MODULE_DESCRIPTION
2065 	("VFS to access SMB3 servers e.g. Samba, Macs, Azure and Windows (and "
2066 	"also older servers complying with the SNIA CIFS Specification)");
2067 MODULE_VERSION(CIFS_VERSION);
2068 MODULE_SOFTDEP("ecb");
2069 MODULE_SOFTDEP("hmac");
2070 MODULE_SOFTDEP("md5");
2071 MODULE_SOFTDEP("nls");
2072 MODULE_SOFTDEP("aes");
2073 MODULE_SOFTDEP("cmac");
2074 MODULE_SOFTDEP("sha256");
2075 MODULE_SOFTDEP("sha512");
2076 MODULE_SOFTDEP("aead2");
2077 MODULE_SOFTDEP("ccm");
2078 MODULE_SOFTDEP("gcm");
2079 module_init(init_cifs)
2080 module_exit(exit_cifs)
2081