xref: /linux/fs/smb/client/cifsfs.c (revision 7f4f3b14e8079ecde096bd734af10e30d40c27b7)
1 // SPDX-License-Identifier: LGPL-2.1
2 /*
3  *
4  *   Copyright (C) International Business Machines  Corp., 2002,2008
5  *   Author(s): Steve French (sfrench@us.ibm.com)
6  *
7  *   Common Internet FileSystem (CIFS) client
8  *
9  */
10 
11 /* Note that BB means BUGBUG (ie something to fix eventually) */
12 
13 #include <linux/module.h>
14 #include <linux/fs.h>
15 #include <linux/filelock.h>
16 #include <linux/mount.h>
17 #include <linux/slab.h>
18 #include <linux/init.h>
19 #include <linux/list.h>
20 #include <linux/seq_file.h>
21 #include <linux/vfs.h>
22 #include <linux/mempool.h>
23 #include <linux/delay.h>
24 #include <linux/kthread.h>
25 #include <linux/freezer.h>
26 #include <linux/namei.h>
27 #include <linux/random.h>
28 #include <linux/splice.h>
29 #include <linux/uuid.h>
30 #include <linux/xattr.h>
31 #include <uapi/linux/magic.h>
32 #include <net/ipv6.h>
33 #include "cifsfs.h"
34 #include "cifspdu.h"
35 #define DECLARE_GLOBALS_HERE
36 #include "cifsglob.h"
37 #include "cifsproto.h"
38 #include "cifs_debug.h"
39 #include "cifs_fs_sb.h"
40 #include <linux/mm.h>
41 #include <linux/key-type.h>
42 #include "cifs_spnego.h"
43 #include "fscache.h"
44 #ifdef CONFIG_CIFS_DFS_UPCALL
45 #include "dfs_cache.h"
46 #endif
47 #ifdef CONFIG_CIFS_SWN_UPCALL
48 #include "netlink.h"
49 #endif
50 #include "fs_context.h"
51 #include "cached_dir.h"
52 
53 /*
54  * DOS dates from 1980/1/1 through 2107/12/31
55  * Protocol specifications indicate the range should be to 119, which
56  * limits maximum year to 2099. But this range has not been checked.
57  */
58 #define SMB_DATE_MAX (127<<9 | 12<<5 | 31)
59 #define SMB_DATE_MIN (0<<9 | 1<<5 | 1)
60 #define SMB_TIME_MAX (23<<11 | 59<<5 | 29)
61 
62 int cifsFYI = 0;
63 bool traceSMB;
64 bool enable_oplocks = true;
65 bool linuxExtEnabled = true;
66 bool lookupCacheEnabled = true;
67 bool disable_legacy_dialects; /* false by default */
68 bool enable_gcm_256 = true;
69 bool require_gcm_256; /* false by default */
70 bool enable_negotiate_signing; /* false by default */
71 unsigned int global_secflags = CIFSSEC_DEF;
72 /* unsigned int ntlmv2_support = 0; */
73 unsigned int sign_CIFS_PDUs = 1;
74 
75 /*
76  * Global transaction id (XID) information
77  */
78 unsigned int GlobalCurrentXid;	/* protected by GlobalMid_Lock */
79 unsigned int GlobalTotalActiveXid; /* prot by GlobalMid_Lock */
80 unsigned int GlobalMaxActiveXid;	/* prot by GlobalMid_Lock */
81 spinlock_t GlobalMid_Lock; /* protects above & list operations on midQ entries */
82 
83 /*
84  *  Global counters, updated atomically
85  */
86 atomic_t sesInfoAllocCount;
87 atomic_t tconInfoAllocCount;
88 atomic_t tcpSesNextId;
89 atomic_t tcpSesAllocCount;
90 atomic_t tcpSesReconnectCount;
91 atomic_t tconInfoReconnectCount;
92 
93 atomic_t mid_count;
94 atomic_t buf_alloc_count;
95 atomic_t small_buf_alloc_count;
96 #ifdef CONFIG_CIFS_STATS2
97 atomic_t total_buf_alloc_count;
98 atomic_t total_small_buf_alloc_count;
99 #endif/* STATS2 */
100 struct list_head	cifs_tcp_ses_list;
101 spinlock_t		cifs_tcp_ses_lock;
102 static const struct super_operations cifs_super_ops;
103 unsigned int CIFSMaxBufSize = CIFS_MAX_MSGSIZE;
104 module_param(CIFSMaxBufSize, uint, 0444);
105 MODULE_PARM_DESC(CIFSMaxBufSize, "Network buffer size (not including header) "
106 				 "for CIFS requests. "
107 				 "Default: 16384 Range: 8192 to 130048");
108 unsigned int cifs_min_rcv = CIFS_MIN_RCV_POOL;
109 module_param(cifs_min_rcv, uint, 0444);
110 MODULE_PARM_DESC(cifs_min_rcv, "Network buffers in pool. Default: 4 Range: "
111 				"1 to 64");
112 unsigned int cifs_min_small = 30;
113 module_param(cifs_min_small, uint, 0444);
114 MODULE_PARM_DESC(cifs_min_small, "Small network buffers in pool. Default: 30 "
115 				 "Range: 2 to 256");
116 unsigned int cifs_max_pending = CIFS_MAX_REQ;
117 module_param(cifs_max_pending, uint, 0444);
118 MODULE_PARM_DESC(cifs_max_pending, "Simultaneous requests to server for "
119 				   "CIFS/SMB1 dialect (N/A for SMB3) "
120 				   "Default: 32767 Range: 2 to 32767.");
121 unsigned int dir_cache_timeout = 30;
122 module_param(dir_cache_timeout, uint, 0644);
123 MODULE_PARM_DESC(dir_cache_timeout, "Number of seconds to cache directory contents for which we have a lease. Default: 30 "
124 				 "Range: 1 to 65000 seconds, 0 to disable caching dir contents");
125 #ifdef CONFIG_CIFS_STATS2
126 unsigned int slow_rsp_threshold = 1;
127 module_param(slow_rsp_threshold, uint, 0644);
128 MODULE_PARM_DESC(slow_rsp_threshold, "Amount of time (in seconds) to wait "
129 				   "before logging that a response is delayed. "
130 				   "Default: 1 (if set to 0 disables msg).");
131 #endif /* STATS2 */
132 
133 module_param(enable_oplocks, bool, 0644);
134 MODULE_PARM_DESC(enable_oplocks, "Enable or disable oplocks. Default: y/Y/1");
135 
136 module_param(enable_gcm_256, bool, 0644);
137 MODULE_PARM_DESC(enable_gcm_256, "Enable requesting strongest (256 bit) GCM encryption. Default: y/Y/0");
138 
139 module_param(require_gcm_256, bool, 0644);
140 MODULE_PARM_DESC(require_gcm_256, "Require strongest (256 bit) GCM encryption. Default: n/N/0");
141 
142 module_param(enable_negotiate_signing, bool, 0644);
143 MODULE_PARM_DESC(enable_negotiate_signing, "Enable negotiating packet signing algorithm with server. Default: n/N/0");
144 
145 module_param(disable_legacy_dialects, bool, 0644);
146 MODULE_PARM_DESC(disable_legacy_dialects, "To improve security it may be "
147 				  "helpful to restrict the ability to "
148 				  "override the default dialects (SMB2.1, "
149 				  "SMB3 and SMB3.02) on mount with old "
150 				  "dialects (CIFS/SMB1 and SMB2) since "
151 				  "vers=1.0 (CIFS/SMB1) and vers=2.0 are weaker"
152 				  " and less secure. Default: n/N/0");
153 
154 struct workqueue_struct	*cifsiod_wq;
155 struct workqueue_struct	*decrypt_wq;
156 struct workqueue_struct	*fileinfo_put_wq;
157 struct workqueue_struct	*cifsoplockd_wq;
158 struct workqueue_struct	*deferredclose_wq;
159 struct workqueue_struct	*serverclose_wq;
160 __u32 cifs_lock_secret;
161 
162 /*
163  * Bumps refcount for cifs super block.
164  * Note that it should be only called if a reference to VFS super block is
165  * already held, e.g. in open-type syscalls context. Otherwise it can race with
166  * atomic_dec_and_test in deactivate_locked_super.
167  */
168 void
169 cifs_sb_active(struct super_block *sb)
170 {
171 	struct cifs_sb_info *server = CIFS_SB(sb);
172 
173 	if (atomic_inc_return(&server->active) == 1)
174 		atomic_inc(&sb->s_active);
175 }
176 
177 void
178 cifs_sb_deactive(struct super_block *sb)
179 {
180 	struct cifs_sb_info *server = CIFS_SB(sb);
181 
182 	if (atomic_dec_and_test(&server->active))
183 		deactivate_super(sb);
184 }
185 
186 static int
187 cifs_read_super(struct super_block *sb)
188 {
189 	struct inode *inode;
190 	struct cifs_sb_info *cifs_sb;
191 	struct cifs_tcon *tcon;
192 	struct timespec64 ts;
193 	int rc = 0;
194 
195 	cifs_sb = CIFS_SB(sb);
196 	tcon = cifs_sb_master_tcon(cifs_sb);
197 
198 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIXACL)
199 		sb->s_flags |= SB_POSIXACL;
200 
201 	if (tcon->snapshot_time)
202 		sb->s_flags |= SB_RDONLY;
203 
204 	if (tcon->ses->capabilities & tcon->ses->server->vals->cap_large_files)
205 		sb->s_maxbytes = MAX_LFS_FILESIZE;
206 	else
207 		sb->s_maxbytes = MAX_NON_LFS;
208 
209 	/*
210 	 * Some very old servers like DOS and OS/2 used 2 second granularity
211 	 * (while all current servers use 100ns granularity - see MS-DTYP)
212 	 * but 1 second is the maximum allowed granularity for the VFS
213 	 * so for old servers set time granularity to 1 second while for
214 	 * everything else (current servers) set it to 100ns.
215 	 */
216 	if ((tcon->ses->server->vals->protocol_id == SMB10_PROT_ID) &&
217 	    ((tcon->ses->capabilities &
218 	      tcon->ses->server->vals->cap_nt_find) == 0) &&
219 	    !tcon->unix_ext) {
220 		sb->s_time_gran = 1000000000; /* 1 second is max allowed gran */
221 		ts = cnvrtDosUnixTm(cpu_to_le16(SMB_DATE_MIN), 0, 0);
222 		sb->s_time_min = ts.tv_sec;
223 		ts = cnvrtDosUnixTm(cpu_to_le16(SMB_DATE_MAX),
224 				    cpu_to_le16(SMB_TIME_MAX), 0);
225 		sb->s_time_max = ts.tv_sec;
226 	} else {
227 		/*
228 		 * Almost every server, including all SMB2+, uses DCE TIME
229 		 * ie 100 nanosecond units, since 1601.  See MS-DTYP and MS-FSCC
230 		 */
231 		sb->s_time_gran = 100;
232 		ts = cifs_NTtimeToUnix(0);
233 		sb->s_time_min = ts.tv_sec;
234 		ts = cifs_NTtimeToUnix(cpu_to_le64(S64_MAX));
235 		sb->s_time_max = ts.tv_sec;
236 	}
237 
238 	sb->s_magic = CIFS_SUPER_MAGIC;
239 	sb->s_op = &cifs_super_ops;
240 	sb->s_xattr = cifs_xattr_handlers;
241 	rc = super_setup_bdi(sb);
242 	if (rc)
243 		goto out_no_root;
244 	/* tune readahead according to rsize if readahead size not set on mount */
245 	if (cifs_sb->ctx->rsize == 0)
246 		cifs_sb->ctx->rsize =
247 			tcon->ses->server->ops->negotiate_rsize(tcon, cifs_sb->ctx);
248 	if (cifs_sb->ctx->rasize)
249 		sb->s_bdi->ra_pages = cifs_sb->ctx->rasize / PAGE_SIZE;
250 	else
251 		sb->s_bdi->ra_pages = 2 * (cifs_sb->ctx->rsize / PAGE_SIZE);
252 
253 	sb->s_blocksize = CIFS_MAX_MSGSIZE;
254 	sb->s_blocksize_bits = 14;	/* default 2**14 = CIFS_MAX_MSGSIZE */
255 	inode = cifs_root_iget(sb);
256 
257 	if (IS_ERR(inode)) {
258 		rc = PTR_ERR(inode);
259 		goto out_no_root;
260 	}
261 
262 	if (tcon->nocase)
263 		sb->s_d_op = &cifs_ci_dentry_ops;
264 	else
265 		sb->s_d_op = &cifs_dentry_ops;
266 
267 	sb->s_root = d_make_root(inode);
268 	if (!sb->s_root) {
269 		rc = -ENOMEM;
270 		goto out_no_root;
271 	}
272 
273 #ifdef CONFIG_CIFS_NFSD_EXPORT
274 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) {
275 		cifs_dbg(FYI, "export ops supported\n");
276 		sb->s_export_op = &cifs_export_ops;
277 	}
278 #endif /* CONFIG_CIFS_NFSD_EXPORT */
279 
280 	return 0;
281 
282 out_no_root:
283 	cifs_dbg(VFS, "%s: get root inode failed\n", __func__);
284 	return rc;
285 }
286 
287 static void cifs_kill_sb(struct super_block *sb)
288 {
289 	struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
290 
291 	/*
292 	 * We need to release all dentries for the cached directories
293 	 * before we kill the sb.
294 	 */
295 	if (cifs_sb->root) {
296 		close_all_cached_dirs(cifs_sb);
297 
298 		/* finally release root dentry */
299 		dput(cifs_sb->root);
300 		cifs_sb->root = NULL;
301 	}
302 
303 	kill_anon_super(sb);
304 	cifs_umount(cifs_sb);
305 }
306 
307 static int
308 cifs_statfs(struct dentry *dentry, struct kstatfs *buf)
309 {
310 	struct super_block *sb = dentry->d_sb;
311 	struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
312 	struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
313 	struct TCP_Server_Info *server = tcon->ses->server;
314 	unsigned int xid;
315 	int rc = 0;
316 	const char *full_path;
317 	void *page;
318 
319 	xid = get_xid();
320 	page = alloc_dentry_path();
321 
322 	full_path = build_path_from_dentry(dentry, page);
323 	if (IS_ERR(full_path)) {
324 		rc = PTR_ERR(full_path);
325 		goto statfs_out;
326 	}
327 
328 	if (le32_to_cpu(tcon->fsAttrInfo.MaxPathNameComponentLength) > 0)
329 		buf->f_namelen =
330 		       le32_to_cpu(tcon->fsAttrInfo.MaxPathNameComponentLength);
331 	else
332 		buf->f_namelen = PATH_MAX;
333 
334 	buf->f_fsid.val[0] = tcon->vol_serial_number;
335 	/* are using part of create time for more randomness, see man statfs */
336 	buf->f_fsid.val[1] =  (int)le64_to_cpu(tcon->vol_create_time);
337 
338 	buf->f_files = 0;	/* undefined */
339 	buf->f_ffree = 0;	/* unlimited */
340 
341 	if (server->ops->queryfs)
342 		rc = server->ops->queryfs(xid, tcon, full_path, cifs_sb, buf);
343 
344 statfs_out:
345 	free_dentry_path(page);
346 	free_xid(xid);
347 	return rc;
348 }
349 
350 static long cifs_fallocate(struct file *file, int mode, loff_t off, loff_t len)
351 {
352 	struct cifs_sb_info *cifs_sb = CIFS_FILE_SB(file);
353 	struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
354 	struct TCP_Server_Info *server = tcon->ses->server;
355 
356 	if (server->ops->fallocate)
357 		return server->ops->fallocate(file, tcon, mode, off, len);
358 
359 	return -EOPNOTSUPP;
360 }
361 
362 static int cifs_permission(struct mnt_idmap *idmap,
363 			   struct inode *inode, int mask)
364 {
365 	struct cifs_sb_info *cifs_sb;
366 
367 	cifs_sb = CIFS_SB(inode->i_sb);
368 
369 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_PERM) {
370 		if ((mask & MAY_EXEC) && !execute_ok(inode))
371 			return -EACCES;
372 		else
373 			return 0;
374 	} else /* file mode might have been restricted at mount time
375 		on the client (above and beyond ACL on servers) for
376 		servers which do not support setting and viewing mode bits,
377 		so allowing client to check permissions is useful */
378 		return generic_permission(&nop_mnt_idmap, inode, mask);
379 }
380 
381 static struct kmem_cache *cifs_inode_cachep;
382 static struct kmem_cache *cifs_req_cachep;
383 static struct kmem_cache *cifs_mid_cachep;
384 static struct kmem_cache *cifs_sm_req_cachep;
385 static struct kmem_cache *cifs_io_request_cachep;
386 static struct kmem_cache *cifs_io_subrequest_cachep;
387 mempool_t *cifs_sm_req_poolp;
388 mempool_t *cifs_req_poolp;
389 mempool_t *cifs_mid_poolp;
390 mempool_t cifs_io_request_pool;
391 mempool_t cifs_io_subrequest_pool;
392 
393 static struct inode *
394 cifs_alloc_inode(struct super_block *sb)
395 {
396 	struct cifsInodeInfo *cifs_inode;
397 	cifs_inode = alloc_inode_sb(sb, cifs_inode_cachep, GFP_KERNEL);
398 	if (!cifs_inode)
399 		return NULL;
400 	cifs_inode->cifsAttrs = 0x20;	/* default */
401 	cifs_inode->time = 0;
402 	/*
403 	 * Until the file is open and we have gotten oplock info back from the
404 	 * server, can not assume caching of file data or metadata.
405 	 */
406 	cifs_set_oplock_level(cifs_inode, 0);
407 	cifs_inode->lease_granted = false;
408 	cifs_inode->flags = 0;
409 	spin_lock_init(&cifs_inode->writers_lock);
410 	cifs_inode->writers = 0;
411 	cifs_inode->netfs.inode.i_blkbits = 14;  /* 2**14 = CIFS_MAX_MSGSIZE */
412 	cifs_inode->netfs.remote_i_size = 0;
413 	cifs_inode->uniqueid = 0;
414 	cifs_inode->createtime = 0;
415 	cifs_inode->epoch = 0;
416 	spin_lock_init(&cifs_inode->open_file_lock);
417 	generate_random_uuid(cifs_inode->lease_key);
418 	cifs_inode->symlink_target = NULL;
419 
420 	/*
421 	 * Can not set i_flags here - they get immediately overwritten to zero
422 	 * by the VFS.
423 	 */
424 	/* cifs_inode->netfs.inode.i_flags = S_NOATIME | S_NOCMTIME; */
425 	INIT_LIST_HEAD(&cifs_inode->openFileList);
426 	INIT_LIST_HEAD(&cifs_inode->llist);
427 	INIT_LIST_HEAD(&cifs_inode->deferred_closes);
428 	spin_lock_init(&cifs_inode->deferred_lock);
429 	return &cifs_inode->netfs.inode;
430 }
431 
432 static void
433 cifs_free_inode(struct inode *inode)
434 {
435 	struct cifsInodeInfo *cinode = CIFS_I(inode);
436 
437 	if (S_ISLNK(inode->i_mode))
438 		kfree(cinode->symlink_target);
439 	kmem_cache_free(cifs_inode_cachep, cinode);
440 }
441 
442 static void
443 cifs_evict_inode(struct inode *inode)
444 {
445 	netfs_wait_for_outstanding_io(inode);
446 	truncate_inode_pages_final(&inode->i_data);
447 	if (inode->i_state & I_PINNING_NETFS_WB)
448 		cifs_fscache_unuse_inode_cookie(inode, true);
449 	cifs_fscache_release_inode_cookie(inode);
450 	clear_inode(inode);
451 }
452 
453 static void
454 cifs_show_address(struct seq_file *s, struct TCP_Server_Info *server)
455 {
456 	struct sockaddr_in *sa = (struct sockaddr_in *) &server->dstaddr;
457 	struct sockaddr_in6 *sa6 = (struct sockaddr_in6 *) &server->dstaddr;
458 
459 	seq_puts(s, ",addr=");
460 
461 	switch (server->dstaddr.ss_family) {
462 	case AF_INET:
463 		seq_printf(s, "%pI4", &sa->sin_addr.s_addr);
464 		break;
465 	case AF_INET6:
466 		seq_printf(s, "%pI6", &sa6->sin6_addr.s6_addr);
467 		if (sa6->sin6_scope_id)
468 			seq_printf(s, "%%%u", sa6->sin6_scope_id);
469 		break;
470 	default:
471 		seq_puts(s, "(unknown)");
472 	}
473 	if (server->rdma)
474 		seq_puts(s, ",rdma");
475 }
476 
477 static void
478 cifs_show_security(struct seq_file *s, struct cifs_ses *ses)
479 {
480 	if (ses->sectype == Unspecified) {
481 		if (ses->user_name == NULL)
482 			seq_puts(s, ",sec=none");
483 		return;
484 	}
485 
486 	seq_puts(s, ",sec=");
487 
488 	switch (ses->sectype) {
489 	case NTLMv2:
490 		seq_puts(s, "ntlmv2");
491 		break;
492 	case Kerberos:
493 		seq_puts(s, "krb5");
494 		break;
495 	case RawNTLMSSP:
496 		seq_puts(s, "ntlmssp");
497 		break;
498 	default:
499 		/* shouldn't ever happen */
500 		seq_puts(s, "unknown");
501 		break;
502 	}
503 
504 	if (ses->sign)
505 		seq_puts(s, "i");
506 
507 	if (ses->sectype == Kerberos)
508 		seq_printf(s, ",cruid=%u",
509 			   from_kuid_munged(&init_user_ns, ses->cred_uid));
510 }
511 
512 static void
513 cifs_show_cache_flavor(struct seq_file *s, struct cifs_sb_info *cifs_sb)
514 {
515 	seq_puts(s, ",cache=");
516 
517 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO)
518 		seq_puts(s, "strict");
519 	else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO)
520 		seq_puts(s, "none");
521 	else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RW_CACHE)
522 		seq_puts(s, "singleclient"); /* assume only one client access */
523 	else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RO_CACHE)
524 		seq_puts(s, "ro"); /* read only caching assumed */
525 	else
526 		seq_puts(s, "loose");
527 }
528 
529 /*
530  * cifs_show_devname() is used so we show the mount device name with correct
531  * format (e.g. forward slashes vs. back slashes) in /proc/mounts
532  */
533 static int cifs_show_devname(struct seq_file *m, struct dentry *root)
534 {
535 	struct cifs_sb_info *cifs_sb = CIFS_SB(root->d_sb);
536 	char *devname = kstrdup(cifs_sb->ctx->source, GFP_KERNEL);
537 
538 	if (devname == NULL)
539 		seq_puts(m, "none");
540 	else {
541 		convert_delimiter(devname, '/');
542 		/* escape all spaces in share names */
543 		seq_escape(m, devname, " \t");
544 		kfree(devname);
545 	}
546 	return 0;
547 }
548 
549 static void
550 cifs_show_upcall_target(struct seq_file *s, struct cifs_sb_info *cifs_sb)
551 {
552 	if (cifs_sb->ctx->upcall_target == UPTARGET_UNSPECIFIED) {
553 		seq_puts(s, ",upcall_target=app");
554 		return;
555 	}
556 
557 	seq_puts(s, ",upcall_target=");
558 
559 	switch (cifs_sb->ctx->upcall_target) {
560 	case UPTARGET_APP:
561 		seq_puts(s, "app");
562 		break;
563 	case UPTARGET_MOUNT:
564 		seq_puts(s, "mount");
565 		break;
566 	default:
567 		/* shouldn't ever happen */
568 		seq_puts(s, "unknown");
569 		break;
570 	}
571 }
572 
573 /*
574  * cifs_show_options() is for displaying mount options in /proc/mounts.
575  * Not all settable options are displayed but most of the important
576  * ones are.
577  */
578 static int
579 cifs_show_options(struct seq_file *s, struct dentry *root)
580 {
581 	struct cifs_sb_info *cifs_sb = CIFS_SB(root->d_sb);
582 	struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
583 	struct sockaddr *srcaddr;
584 	srcaddr = (struct sockaddr *)&tcon->ses->server->srcaddr;
585 
586 	seq_show_option(s, "vers", tcon->ses->server->vals->version_string);
587 	cifs_show_security(s, tcon->ses);
588 	cifs_show_cache_flavor(s, cifs_sb);
589 	cifs_show_upcall_target(s, cifs_sb);
590 
591 	if (tcon->no_lease)
592 		seq_puts(s, ",nolease");
593 	if (cifs_sb->ctx->multiuser)
594 		seq_puts(s, ",multiuser");
595 	else if (tcon->ses->user_name)
596 		seq_show_option(s, "username", tcon->ses->user_name);
597 
598 	if (tcon->ses->domainName && tcon->ses->domainName[0] != 0)
599 		seq_show_option(s, "domain", tcon->ses->domainName);
600 
601 	if (srcaddr->sa_family != AF_UNSPEC) {
602 		struct sockaddr_in *saddr4;
603 		struct sockaddr_in6 *saddr6;
604 		saddr4 = (struct sockaddr_in *)srcaddr;
605 		saddr6 = (struct sockaddr_in6 *)srcaddr;
606 		if (srcaddr->sa_family == AF_INET6)
607 			seq_printf(s, ",srcaddr=%pI6c",
608 				   &saddr6->sin6_addr);
609 		else if (srcaddr->sa_family == AF_INET)
610 			seq_printf(s, ",srcaddr=%pI4",
611 				   &saddr4->sin_addr.s_addr);
612 		else
613 			seq_printf(s, ",srcaddr=BAD-AF:%i",
614 				   (int)(srcaddr->sa_family));
615 	}
616 
617 	seq_printf(s, ",uid=%u",
618 		   from_kuid_munged(&init_user_ns, cifs_sb->ctx->linux_uid));
619 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_OVERR_UID)
620 		seq_puts(s, ",forceuid");
621 	else
622 		seq_puts(s, ",noforceuid");
623 
624 	seq_printf(s, ",gid=%u",
625 		   from_kgid_munged(&init_user_ns, cifs_sb->ctx->linux_gid));
626 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_OVERR_GID)
627 		seq_puts(s, ",forcegid");
628 	else
629 		seq_puts(s, ",noforcegid");
630 
631 	cifs_show_address(s, tcon->ses->server);
632 
633 	if (!tcon->unix_ext)
634 		seq_printf(s, ",file_mode=0%ho,dir_mode=0%ho",
635 					   cifs_sb->ctx->file_mode,
636 					   cifs_sb->ctx->dir_mode);
637 	if (cifs_sb->ctx->iocharset)
638 		seq_printf(s, ",iocharset=%s", cifs_sb->ctx->iocharset);
639 	if (tcon->seal)
640 		seq_puts(s, ",seal");
641 	else if (tcon->ses->server->ignore_signature)
642 		seq_puts(s, ",signloosely");
643 	if (tcon->nocase)
644 		seq_puts(s, ",nocase");
645 	if (tcon->nodelete)
646 		seq_puts(s, ",nodelete");
647 	if (cifs_sb->ctx->no_sparse)
648 		seq_puts(s, ",nosparse");
649 	if (tcon->local_lease)
650 		seq_puts(s, ",locallease");
651 	if (tcon->retry)
652 		seq_puts(s, ",hard");
653 	else
654 		seq_puts(s, ",soft");
655 	if (tcon->use_persistent)
656 		seq_puts(s, ",persistenthandles");
657 	else if (tcon->use_resilient)
658 		seq_puts(s, ",resilienthandles");
659 	if (tcon->posix_extensions)
660 		seq_puts(s, ",posix");
661 	else if (tcon->unix_ext)
662 		seq_puts(s, ",unix");
663 	else
664 		seq_puts(s, ",nounix");
665 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_DFS)
666 		seq_puts(s, ",nodfs");
667 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIX_PATHS)
668 		seq_puts(s, ",posixpaths");
669 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SET_UID)
670 		seq_puts(s, ",setuids");
671 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UID_FROM_ACL)
672 		seq_puts(s, ",idsfromsid");
673 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM)
674 		seq_puts(s, ",serverino");
675 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
676 		seq_puts(s, ",rwpidforward");
677 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL)
678 		seq_puts(s, ",forcemand");
679 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_XATTR)
680 		seq_puts(s, ",nouser_xattr");
681 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR)
682 		seq_puts(s, ",mapchars");
683 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SFM_CHR)
684 		seq_puts(s, ",mapposix");
685 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UNX_EMUL)
686 		seq_puts(s, ",sfu");
687 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_BRL)
688 		seq_puts(s, ",nobrl");
689 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_HANDLE_CACHE)
690 		seq_puts(s, ",nohandlecache");
691 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MODE_FROM_SID)
692 		seq_puts(s, ",modefromsid");
693 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_ACL)
694 		seq_puts(s, ",cifsacl");
695 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DYNPERM)
696 		seq_puts(s, ",dynperm");
697 	if (root->d_sb->s_flags & SB_POSIXACL)
698 		seq_puts(s, ",acl");
699 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MF_SYMLINKS)
700 		seq_puts(s, ",mfsymlinks");
701 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_FSCACHE)
702 		seq_puts(s, ",fsc");
703 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC)
704 		seq_puts(s, ",nostrictsync");
705 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_PERM)
706 		seq_puts(s, ",noperm");
707 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_BACKUPUID)
708 		seq_printf(s, ",backupuid=%u",
709 			   from_kuid_munged(&init_user_ns,
710 					    cifs_sb->ctx->backupuid));
711 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_BACKUPGID)
712 		seq_printf(s, ",backupgid=%u",
713 			   from_kgid_munged(&init_user_ns,
714 					    cifs_sb->ctx->backupgid));
715 	seq_show_option(s, "reparse",
716 			cifs_reparse_type_str(cifs_sb->ctx->reparse_type));
717 
718 	seq_printf(s, ",rsize=%u", cifs_sb->ctx->rsize);
719 	seq_printf(s, ",wsize=%u", cifs_sb->ctx->wsize);
720 	seq_printf(s, ",bsize=%u", cifs_sb->ctx->bsize);
721 	if (cifs_sb->ctx->rasize)
722 		seq_printf(s, ",rasize=%u", cifs_sb->ctx->rasize);
723 	if (tcon->ses->server->min_offload)
724 		seq_printf(s, ",esize=%u", tcon->ses->server->min_offload);
725 	if (tcon->ses->server->retrans)
726 		seq_printf(s, ",retrans=%u", tcon->ses->server->retrans);
727 	seq_printf(s, ",echo_interval=%lu",
728 			tcon->ses->server->echo_interval / HZ);
729 
730 	/* Only display the following if overridden on mount */
731 	if (tcon->ses->server->max_credits != SMB2_MAX_CREDITS_AVAILABLE)
732 		seq_printf(s, ",max_credits=%u", tcon->ses->server->max_credits);
733 	if (tcon->ses->server->tcp_nodelay)
734 		seq_puts(s, ",tcpnodelay");
735 	if (tcon->ses->server->noautotune)
736 		seq_puts(s, ",noautotune");
737 	if (tcon->ses->server->noblocksnd)
738 		seq_puts(s, ",noblocksend");
739 	if (tcon->ses->server->nosharesock)
740 		seq_puts(s, ",nosharesock");
741 
742 	if (tcon->snapshot_time)
743 		seq_printf(s, ",snapshot=%llu", tcon->snapshot_time);
744 	if (tcon->handle_timeout)
745 		seq_printf(s, ",handletimeout=%u", tcon->handle_timeout);
746 	if (tcon->max_cached_dirs != MAX_CACHED_FIDS)
747 		seq_printf(s, ",max_cached_dirs=%u", tcon->max_cached_dirs);
748 
749 	/*
750 	 * Display file and directory attribute timeout in seconds.
751 	 * If file and directory attribute timeout the same then actimeo
752 	 * was likely specified on mount
753 	 */
754 	if (cifs_sb->ctx->acdirmax == cifs_sb->ctx->acregmax)
755 		seq_printf(s, ",actimeo=%lu", cifs_sb->ctx->acregmax / HZ);
756 	else {
757 		seq_printf(s, ",acdirmax=%lu", cifs_sb->ctx->acdirmax / HZ);
758 		seq_printf(s, ",acregmax=%lu", cifs_sb->ctx->acregmax / HZ);
759 	}
760 	seq_printf(s, ",closetimeo=%lu", cifs_sb->ctx->closetimeo / HZ);
761 
762 	if (tcon->ses->chan_max > 1)
763 		seq_printf(s, ",multichannel,max_channels=%zu",
764 			   tcon->ses->chan_max);
765 
766 	if (tcon->use_witness)
767 		seq_puts(s, ",witness");
768 
769 	return 0;
770 }
771 
772 static void cifs_umount_begin(struct super_block *sb)
773 {
774 	struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
775 	struct cifs_tcon *tcon;
776 
777 	if (cifs_sb == NULL)
778 		return;
779 
780 	tcon = cifs_sb_master_tcon(cifs_sb);
781 
782 	spin_lock(&cifs_tcp_ses_lock);
783 	spin_lock(&tcon->tc_lock);
784 	trace_smb3_tcon_ref(tcon->debug_id, tcon->tc_count,
785 			    netfs_trace_tcon_ref_see_umount);
786 	if ((tcon->tc_count > 1) || (tcon->status == TID_EXITING)) {
787 		/* we have other mounts to same share or we have
788 		   already tried to umount this and woken up
789 		   all waiting network requests, nothing to do */
790 		spin_unlock(&tcon->tc_lock);
791 		spin_unlock(&cifs_tcp_ses_lock);
792 		return;
793 	}
794 	/*
795 	 * can not set tcon->status to TID_EXITING yet since we don't know if umount -f will
796 	 * fail later (e.g. due to open files).  TID_EXITING will be set just before tdis req sent
797 	 */
798 	spin_unlock(&tcon->tc_lock);
799 	spin_unlock(&cifs_tcp_ses_lock);
800 
801 	cifs_close_all_deferred_files(tcon);
802 	/* cancel_brl_requests(tcon); */ /* BB mark all brl mids as exiting */
803 	/* cancel_notify_requests(tcon); */
804 	if (tcon->ses && tcon->ses->server) {
805 		cifs_dbg(FYI, "wake up tasks now - umount begin not complete\n");
806 		wake_up_all(&tcon->ses->server->request_q);
807 		wake_up_all(&tcon->ses->server->response_q);
808 		msleep(1); /* yield */
809 		/* we have to kick the requests once more */
810 		wake_up_all(&tcon->ses->server->response_q);
811 		msleep(1);
812 	}
813 
814 	return;
815 }
816 
817 static int cifs_freeze(struct super_block *sb)
818 {
819 	struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
820 	struct cifs_tcon *tcon;
821 
822 	if (cifs_sb == NULL)
823 		return 0;
824 
825 	tcon = cifs_sb_master_tcon(cifs_sb);
826 
827 	cifs_close_all_deferred_files(tcon);
828 	return 0;
829 }
830 
831 #ifdef CONFIG_CIFS_STATS2
832 static int cifs_show_stats(struct seq_file *s, struct dentry *root)
833 {
834 	/* BB FIXME */
835 	return 0;
836 }
837 #endif
838 
839 static int cifs_write_inode(struct inode *inode, struct writeback_control *wbc)
840 {
841 	return netfs_unpin_writeback(inode, wbc);
842 }
843 
844 static int cifs_drop_inode(struct inode *inode)
845 {
846 	struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
847 
848 	/* no serverino => unconditional eviction */
849 	return !(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) ||
850 		generic_drop_inode(inode);
851 }
852 
853 static const struct super_operations cifs_super_ops = {
854 	.statfs = cifs_statfs,
855 	.alloc_inode = cifs_alloc_inode,
856 	.write_inode	= cifs_write_inode,
857 	.free_inode = cifs_free_inode,
858 	.drop_inode	= cifs_drop_inode,
859 	.evict_inode	= cifs_evict_inode,
860 /*	.show_path	= cifs_show_path, */ /* Would we ever need show path? */
861 	.show_devname   = cifs_show_devname,
862 /*	.delete_inode	= cifs_delete_inode,  */  /* Do not need above
863 	function unless later we add lazy close of inodes or unless the
864 	kernel forgets to call us with the same number of releases (closes)
865 	as opens */
866 	.show_options = cifs_show_options,
867 	.umount_begin   = cifs_umount_begin,
868 	.freeze_fs      = cifs_freeze,
869 #ifdef CONFIG_CIFS_STATS2
870 	.show_stats = cifs_show_stats,
871 #endif
872 };
873 
874 /*
875  * Get root dentry from superblock according to prefix path mount option.
876  * Return dentry with refcount + 1 on success and NULL otherwise.
877  */
878 static struct dentry *
879 cifs_get_root(struct smb3_fs_context *ctx, struct super_block *sb)
880 {
881 	struct dentry *dentry;
882 	struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
883 	char *full_path = NULL;
884 	char *s, *p;
885 	char sep;
886 
887 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_USE_PREFIX_PATH)
888 		return dget(sb->s_root);
889 
890 	full_path = cifs_build_path_to_root(ctx, cifs_sb,
891 				cifs_sb_master_tcon(cifs_sb), 0);
892 	if (full_path == NULL)
893 		return ERR_PTR(-ENOMEM);
894 
895 	cifs_dbg(FYI, "Get root dentry for %s\n", full_path);
896 
897 	sep = CIFS_DIR_SEP(cifs_sb);
898 	dentry = dget(sb->s_root);
899 	s = full_path;
900 
901 	do {
902 		struct inode *dir = d_inode(dentry);
903 		struct dentry *child;
904 
905 		if (!S_ISDIR(dir->i_mode)) {
906 			dput(dentry);
907 			dentry = ERR_PTR(-ENOTDIR);
908 			break;
909 		}
910 
911 		/* skip separators */
912 		while (*s == sep)
913 			s++;
914 		if (!*s)
915 			break;
916 		p = s++;
917 		/* next separator */
918 		while (*s && *s != sep)
919 			s++;
920 
921 		child = lookup_positive_unlocked(p, dentry, s - p);
922 		dput(dentry);
923 		dentry = child;
924 	} while (!IS_ERR(dentry));
925 	kfree(full_path);
926 	return dentry;
927 }
928 
929 static int cifs_set_super(struct super_block *sb, void *data)
930 {
931 	struct cifs_mnt_data *mnt_data = data;
932 	sb->s_fs_info = mnt_data->cifs_sb;
933 	return set_anon_super(sb, NULL);
934 }
935 
936 struct dentry *
937 cifs_smb3_do_mount(struct file_system_type *fs_type,
938 	      int flags, struct smb3_fs_context *old_ctx)
939 {
940 	struct cifs_mnt_data mnt_data;
941 	struct cifs_sb_info *cifs_sb;
942 	struct super_block *sb;
943 	struct dentry *root;
944 	int rc;
945 
946 	if (cifsFYI) {
947 		cifs_dbg(FYI, "%s: devname=%s flags=0x%x\n", __func__,
948 			 old_ctx->source, flags);
949 	} else {
950 		cifs_info("Attempting to mount %s\n", old_ctx->source);
951 	}
952 
953 	cifs_sb = kzalloc(sizeof(*cifs_sb), GFP_KERNEL);
954 	if (!cifs_sb)
955 		return ERR_PTR(-ENOMEM);
956 
957 	cifs_sb->ctx = kzalloc(sizeof(struct smb3_fs_context), GFP_KERNEL);
958 	if (!cifs_sb->ctx) {
959 		root = ERR_PTR(-ENOMEM);
960 		goto out;
961 	}
962 	rc = smb3_fs_context_dup(cifs_sb->ctx, old_ctx);
963 	if (rc) {
964 		root = ERR_PTR(rc);
965 		goto out;
966 	}
967 
968 	rc = cifs_setup_cifs_sb(cifs_sb);
969 	if (rc) {
970 		root = ERR_PTR(rc);
971 		goto out;
972 	}
973 
974 	rc = cifs_mount(cifs_sb, cifs_sb->ctx);
975 	if (rc) {
976 		if (!(flags & SB_SILENT))
977 			cifs_dbg(VFS, "cifs_mount failed w/return code = %d\n",
978 				 rc);
979 		root = ERR_PTR(rc);
980 		goto out;
981 	}
982 
983 	mnt_data.ctx = cifs_sb->ctx;
984 	mnt_data.cifs_sb = cifs_sb;
985 	mnt_data.flags = flags;
986 
987 	/* BB should we make this contingent on mount parm? */
988 	flags |= SB_NODIRATIME | SB_NOATIME;
989 
990 	sb = sget(fs_type, cifs_match_super, cifs_set_super, flags, &mnt_data);
991 	if (IS_ERR(sb)) {
992 		cifs_umount(cifs_sb);
993 		return ERR_CAST(sb);
994 	}
995 
996 	if (sb->s_root) {
997 		cifs_dbg(FYI, "Use existing superblock\n");
998 		cifs_umount(cifs_sb);
999 		cifs_sb = NULL;
1000 	} else {
1001 		rc = cifs_read_super(sb);
1002 		if (rc) {
1003 			root = ERR_PTR(rc);
1004 			goto out_super;
1005 		}
1006 
1007 		sb->s_flags |= SB_ACTIVE;
1008 	}
1009 
1010 	root = cifs_get_root(cifs_sb ? cifs_sb->ctx : old_ctx, sb);
1011 	if (IS_ERR(root))
1012 		goto out_super;
1013 
1014 	if (cifs_sb)
1015 		cifs_sb->root = dget(root);
1016 
1017 	cifs_dbg(FYI, "dentry root is: %p\n", root);
1018 	return root;
1019 
1020 out_super:
1021 	deactivate_locked_super(sb);
1022 	return root;
1023 out:
1024 	kfree(cifs_sb->prepath);
1025 	smb3_cleanup_fs_context(cifs_sb->ctx);
1026 	kfree(cifs_sb);
1027 	return root;
1028 }
1029 
1030 static loff_t cifs_llseek(struct file *file, loff_t offset, int whence)
1031 {
1032 	struct cifsFileInfo *cfile = file->private_data;
1033 	struct cifs_tcon *tcon;
1034 
1035 	/*
1036 	 * whence == SEEK_END || SEEK_DATA || SEEK_HOLE => we must revalidate
1037 	 * the cached file length
1038 	 */
1039 	if (whence != SEEK_SET && whence != SEEK_CUR) {
1040 		int rc;
1041 		struct inode *inode = file_inode(file);
1042 
1043 		/*
1044 		 * We need to be sure that all dirty pages are written and the
1045 		 * server has the newest file length.
1046 		 */
1047 		if (!CIFS_CACHE_READ(CIFS_I(inode)) && inode->i_mapping &&
1048 		    inode->i_mapping->nrpages != 0) {
1049 			rc = filemap_fdatawait(inode->i_mapping);
1050 			if (rc) {
1051 				mapping_set_error(inode->i_mapping, rc);
1052 				return rc;
1053 			}
1054 		}
1055 		/*
1056 		 * Some applications poll for the file length in this strange
1057 		 * way so we must seek to end on non-oplocked files by
1058 		 * setting the revalidate time to zero.
1059 		 */
1060 		CIFS_I(inode)->time = 0;
1061 
1062 		rc = cifs_revalidate_file_attr(file);
1063 		if (rc < 0)
1064 			return (loff_t)rc;
1065 	}
1066 	if (cfile && cfile->tlink) {
1067 		tcon = tlink_tcon(cfile->tlink);
1068 		if (tcon->ses->server->ops->llseek)
1069 			return tcon->ses->server->ops->llseek(file, tcon,
1070 							      offset, whence);
1071 	}
1072 	return generic_file_llseek(file, offset, whence);
1073 }
1074 
1075 static int
1076 cifs_setlease(struct file *file, int arg, struct file_lease **lease, void **priv)
1077 {
1078 	/*
1079 	 * Note that this is called by vfs setlease with i_lock held to
1080 	 * protect *lease from going away.
1081 	 */
1082 	struct inode *inode = file_inode(file);
1083 	struct cifsFileInfo *cfile = file->private_data;
1084 
1085 	/* Check if file is oplocked if this is request for new lease */
1086 	if (arg == F_UNLCK ||
1087 	    ((arg == F_RDLCK) && CIFS_CACHE_READ(CIFS_I(inode))) ||
1088 	    ((arg == F_WRLCK) && CIFS_CACHE_WRITE(CIFS_I(inode))))
1089 		return generic_setlease(file, arg, lease, priv);
1090 	else if (tlink_tcon(cfile->tlink)->local_lease &&
1091 		 !CIFS_CACHE_READ(CIFS_I(inode)))
1092 		/*
1093 		 * If the server claims to support oplock on this file, then we
1094 		 * still need to check oplock even if the local_lease mount
1095 		 * option is set, but there are servers which do not support
1096 		 * oplock for which this mount option may be useful if the user
1097 		 * knows that the file won't be changed on the server by anyone
1098 		 * else.
1099 		 */
1100 		return generic_setlease(file, arg, lease, priv);
1101 	else
1102 		return -EAGAIN;
1103 }
1104 
1105 struct file_system_type cifs_fs_type = {
1106 	.owner = THIS_MODULE,
1107 	.name = "cifs",
1108 	.init_fs_context = smb3_init_fs_context,
1109 	.parameters = smb3_fs_parameters,
1110 	.kill_sb = cifs_kill_sb,
1111 	.fs_flags = FS_RENAME_DOES_D_MOVE,
1112 };
1113 MODULE_ALIAS_FS("cifs");
1114 
1115 struct file_system_type smb3_fs_type = {
1116 	.owner = THIS_MODULE,
1117 	.name = "smb3",
1118 	.init_fs_context = smb3_init_fs_context,
1119 	.parameters = smb3_fs_parameters,
1120 	.kill_sb = cifs_kill_sb,
1121 	.fs_flags = FS_RENAME_DOES_D_MOVE,
1122 };
1123 MODULE_ALIAS_FS("smb3");
1124 MODULE_ALIAS("smb3");
1125 
1126 const struct inode_operations cifs_dir_inode_ops = {
1127 	.create = cifs_create,
1128 	.atomic_open = cifs_atomic_open,
1129 	.lookup = cifs_lookup,
1130 	.getattr = cifs_getattr,
1131 	.unlink = cifs_unlink,
1132 	.link = cifs_hardlink,
1133 	.mkdir = cifs_mkdir,
1134 	.rmdir = cifs_rmdir,
1135 	.rename = cifs_rename2,
1136 	.permission = cifs_permission,
1137 	.setattr = cifs_setattr,
1138 	.symlink = cifs_symlink,
1139 	.mknod   = cifs_mknod,
1140 	.listxattr = cifs_listxattr,
1141 	.get_acl = cifs_get_acl,
1142 	.set_acl = cifs_set_acl,
1143 };
1144 
1145 const struct inode_operations cifs_file_inode_ops = {
1146 	.setattr = cifs_setattr,
1147 	.getattr = cifs_getattr,
1148 	.permission = cifs_permission,
1149 	.listxattr = cifs_listxattr,
1150 	.fiemap = cifs_fiemap,
1151 	.get_acl = cifs_get_acl,
1152 	.set_acl = cifs_set_acl,
1153 };
1154 
1155 const char *cifs_get_link(struct dentry *dentry, struct inode *inode,
1156 			    struct delayed_call *done)
1157 {
1158 	char *target_path;
1159 
1160 	if (!dentry)
1161 		return ERR_PTR(-ECHILD);
1162 
1163 	target_path = kmalloc(PATH_MAX, GFP_KERNEL);
1164 	if (!target_path)
1165 		return ERR_PTR(-ENOMEM);
1166 
1167 	spin_lock(&inode->i_lock);
1168 	if (likely(CIFS_I(inode)->symlink_target)) {
1169 		strscpy(target_path, CIFS_I(inode)->symlink_target, PATH_MAX);
1170 	} else {
1171 		kfree(target_path);
1172 		target_path = ERR_PTR(-EOPNOTSUPP);
1173 	}
1174 	spin_unlock(&inode->i_lock);
1175 
1176 	if (!IS_ERR(target_path))
1177 		set_delayed_call(done, kfree_link, target_path);
1178 
1179 	return target_path;
1180 }
1181 
1182 const struct inode_operations cifs_symlink_inode_ops = {
1183 	.get_link = cifs_get_link,
1184 	.setattr = cifs_setattr,
1185 	.permission = cifs_permission,
1186 	.listxattr = cifs_listxattr,
1187 };
1188 
1189 /*
1190  * Advance the EOF marker to after the source range.
1191  */
1192 static int cifs_precopy_set_eof(struct inode *src_inode, struct cifsInodeInfo *src_cifsi,
1193 				struct cifs_tcon *src_tcon,
1194 				unsigned int xid, loff_t src_end)
1195 {
1196 	struct cifsFileInfo *writeable_srcfile;
1197 	int rc = -EINVAL;
1198 
1199 	writeable_srcfile = find_writable_file(src_cifsi, FIND_WR_FSUID_ONLY);
1200 	if (writeable_srcfile) {
1201 		if (src_tcon->ses->server->ops->set_file_size)
1202 			rc = src_tcon->ses->server->ops->set_file_size(
1203 				xid, src_tcon, writeable_srcfile,
1204 				src_inode->i_size, true /* no need to set sparse */);
1205 		else
1206 			rc = -ENOSYS;
1207 		cifsFileInfo_put(writeable_srcfile);
1208 		cifs_dbg(FYI, "SetFSize for copychunk rc = %d\n", rc);
1209 	}
1210 
1211 	if (rc < 0)
1212 		goto set_failed;
1213 
1214 	netfs_resize_file(&src_cifsi->netfs, src_end, true);
1215 	fscache_resize_cookie(cifs_inode_cookie(src_inode), src_end);
1216 	return 0;
1217 
1218 set_failed:
1219 	return filemap_write_and_wait(src_inode->i_mapping);
1220 }
1221 
1222 /*
1223  * Flush out either the folio that overlaps the beginning of a range in which
1224  * pos resides or the folio that overlaps the end of a range unless that folio
1225  * is entirely within the range we're going to invalidate.  We extend the flush
1226  * bounds to encompass the folio.
1227  */
1228 static int cifs_flush_folio(struct inode *inode, loff_t pos, loff_t *_fstart, loff_t *_fend,
1229 			    bool first)
1230 {
1231 	struct folio *folio;
1232 	unsigned long long fpos, fend;
1233 	pgoff_t index = pos / PAGE_SIZE;
1234 	size_t size;
1235 	int rc = 0;
1236 
1237 	folio = filemap_get_folio(inode->i_mapping, index);
1238 	if (IS_ERR(folio))
1239 		return 0;
1240 
1241 	size = folio_size(folio);
1242 	fpos = folio_pos(folio);
1243 	fend = fpos + size - 1;
1244 	*_fstart = min_t(unsigned long long, *_fstart, fpos);
1245 	*_fend   = max_t(unsigned long long, *_fend, fend);
1246 	if ((first && pos == fpos) || (!first && pos == fend))
1247 		goto out;
1248 
1249 	rc = filemap_write_and_wait_range(inode->i_mapping, fpos, fend);
1250 out:
1251 	folio_put(folio);
1252 	return rc;
1253 }
1254 
1255 static loff_t cifs_remap_file_range(struct file *src_file, loff_t off,
1256 		struct file *dst_file, loff_t destoff, loff_t len,
1257 		unsigned int remap_flags)
1258 {
1259 	struct inode *src_inode = file_inode(src_file);
1260 	struct inode *target_inode = file_inode(dst_file);
1261 	struct cifsInodeInfo *src_cifsi = CIFS_I(src_inode);
1262 	struct cifsInodeInfo *target_cifsi = CIFS_I(target_inode);
1263 	struct cifsFileInfo *smb_file_src = src_file->private_data;
1264 	struct cifsFileInfo *smb_file_target = dst_file->private_data;
1265 	struct cifs_tcon *target_tcon, *src_tcon;
1266 	unsigned long long destend, fstart, fend, old_size, new_size;
1267 	unsigned int xid;
1268 	int rc;
1269 
1270 	if (remap_flags & REMAP_FILE_DEDUP)
1271 		return -EOPNOTSUPP;
1272 	if (remap_flags & ~REMAP_FILE_ADVISORY)
1273 		return -EINVAL;
1274 
1275 	cifs_dbg(FYI, "clone range\n");
1276 
1277 	xid = get_xid();
1278 
1279 	if (!smb_file_src || !smb_file_target) {
1280 		rc = -EBADF;
1281 		cifs_dbg(VFS, "missing cifsFileInfo on copy range src file\n");
1282 		goto out;
1283 	}
1284 
1285 	src_tcon = tlink_tcon(smb_file_src->tlink);
1286 	target_tcon = tlink_tcon(smb_file_target->tlink);
1287 
1288 	/*
1289 	 * Note: cifs case is easier than btrfs since server responsible for
1290 	 * checks for proper open modes and file type and if it wants
1291 	 * server could even support copy of range where source = target
1292 	 */
1293 	lock_two_nondirectories(target_inode, src_inode);
1294 
1295 	if (len == 0)
1296 		len = src_inode->i_size - off;
1297 
1298 	cifs_dbg(FYI, "clone range\n");
1299 
1300 	/* Flush the source buffer */
1301 	rc = filemap_write_and_wait_range(src_inode->i_mapping, off,
1302 					  off + len - 1);
1303 	if (rc)
1304 		goto unlock;
1305 
1306 	/* The server-side copy will fail if the source crosses the EOF marker.
1307 	 * Advance the EOF marker after the flush above to the end of the range
1308 	 * if it's short of that.
1309 	 */
1310 	if (src_cifsi->netfs.remote_i_size < off + len) {
1311 		rc = cifs_precopy_set_eof(src_inode, src_cifsi, src_tcon, xid, off + len);
1312 		if (rc < 0)
1313 			goto unlock;
1314 	}
1315 
1316 	new_size = destoff + len;
1317 	destend = destoff + len - 1;
1318 
1319 	/* Flush the folios at either end of the destination range to prevent
1320 	 * accidental loss of dirty data outside of the range.
1321 	 */
1322 	fstart = destoff;
1323 	fend = destend;
1324 
1325 	rc = cifs_flush_folio(target_inode, destoff, &fstart, &fend, true);
1326 	if (rc)
1327 		goto unlock;
1328 	rc = cifs_flush_folio(target_inode, destend, &fstart, &fend, false);
1329 	if (rc)
1330 		goto unlock;
1331 	if (fend > target_cifsi->netfs.zero_point)
1332 		target_cifsi->netfs.zero_point = fend + 1;
1333 	old_size = target_cifsi->netfs.remote_i_size;
1334 
1335 	/* Discard all the folios that overlap the destination region. */
1336 	cifs_dbg(FYI, "about to discard pages %llx-%llx\n", fstart, fend);
1337 	truncate_inode_pages_range(&target_inode->i_data, fstart, fend);
1338 
1339 	fscache_invalidate(cifs_inode_cookie(target_inode), NULL,
1340 			   i_size_read(target_inode), 0);
1341 
1342 	rc = -EOPNOTSUPP;
1343 	if (target_tcon->ses->server->ops->duplicate_extents) {
1344 		rc = target_tcon->ses->server->ops->duplicate_extents(xid,
1345 			smb_file_src, smb_file_target, off, len, destoff);
1346 		if (rc == 0 && new_size > old_size) {
1347 			truncate_setsize(target_inode, new_size);
1348 			fscache_resize_cookie(cifs_inode_cookie(target_inode),
1349 					      new_size);
1350 		}
1351 		if (rc == 0 && new_size > target_cifsi->netfs.zero_point)
1352 			target_cifsi->netfs.zero_point = new_size;
1353 	}
1354 
1355 	/* force revalidate of size and timestamps of target file now
1356 	   that target is updated on the server */
1357 	CIFS_I(target_inode)->time = 0;
1358 unlock:
1359 	/* although unlocking in the reverse order from locking is not
1360 	   strictly necessary here it is a little cleaner to be consistent */
1361 	unlock_two_nondirectories(src_inode, target_inode);
1362 out:
1363 	free_xid(xid);
1364 	return rc < 0 ? rc : len;
1365 }
1366 
1367 ssize_t cifs_file_copychunk_range(unsigned int xid,
1368 				struct file *src_file, loff_t off,
1369 				struct file *dst_file, loff_t destoff,
1370 				size_t len, unsigned int flags)
1371 {
1372 	struct inode *src_inode = file_inode(src_file);
1373 	struct inode *target_inode = file_inode(dst_file);
1374 	struct cifsInodeInfo *src_cifsi = CIFS_I(src_inode);
1375 	struct cifsInodeInfo *target_cifsi = CIFS_I(target_inode);
1376 	struct cifsFileInfo *smb_file_src;
1377 	struct cifsFileInfo *smb_file_target;
1378 	struct cifs_tcon *src_tcon;
1379 	struct cifs_tcon *target_tcon;
1380 	ssize_t rc;
1381 
1382 	cifs_dbg(FYI, "copychunk range\n");
1383 
1384 	if (!src_file->private_data || !dst_file->private_data) {
1385 		rc = -EBADF;
1386 		cifs_dbg(VFS, "missing cifsFileInfo on copy range src file\n");
1387 		goto out;
1388 	}
1389 
1390 	rc = -EXDEV;
1391 	smb_file_target = dst_file->private_data;
1392 	smb_file_src = src_file->private_data;
1393 	src_tcon = tlink_tcon(smb_file_src->tlink);
1394 	target_tcon = tlink_tcon(smb_file_target->tlink);
1395 
1396 	if (src_tcon->ses != target_tcon->ses) {
1397 		cifs_dbg(FYI, "source and target of copy not on same server\n");
1398 		goto out;
1399 	}
1400 
1401 	rc = -EOPNOTSUPP;
1402 	if (!target_tcon->ses->server->ops->copychunk_range)
1403 		goto out;
1404 
1405 	/*
1406 	 * Note: cifs case is easier than btrfs since server responsible for
1407 	 * checks for proper open modes and file type and if it wants
1408 	 * server could even support copy of range where source = target
1409 	 */
1410 	lock_two_nondirectories(target_inode, src_inode);
1411 
1412 	cifs_dbg(FYI, "about to flush pages\n");
1413 
1414 	rc = filemap_write_and_wait_range(src_inode->i_mapping, off,
1415 					  off + len - 1);
1416 	if (rc)
1417 		goto unlock;
1418 
1419 	/* The server-side copy will fail if the source crosses the EOF marker.
1420 	 * Advance the EOF marker after the flush above to the end of the range
1421 	 * if it's short of that.
1422 	 */
1423 	if (src_cifsi->netfs.remote_i_size < off + len) {
1424 		rc = cifs_precopy_set_eof(src_inode, src_cifsi, src_tcon, xid, off + len);
1425 		if (rc < 0)
1426 			goto unlock;
1427 	}
1428 
1429 	/* Flush and invalidate all the folios in the destination region.  If
1430 	 * the copy was successful, then some of the flush is extra overhead,
1431 	 * but we need to allow for the copy failing in some way (eg. ENOSPC).
1432 	 */
1433 	rc = filemap_invalidate_inode(target_inode, true, destoff, destoff + len - 1);
1434 	if (rc)
1435 		goto unlock;
1436 
1437 	fscache_invalidate(cifs_inode_cookie(target_inode), NULL,
1438 			   i_size_read(target_inode), 0);
1439 
1440 	rc = file_modified(dst_file);
1441 	if (!rc) {
1442 		rc = target_tcon->ses->server->ops->copychunk_range(xid,
1443 			smb_file_src, smb_file_target, off, len, destoff);
1444 		if (rc > 0 && destoff + rc > i_size_read(target_inode)) {
1445 			truncate_setsize(target_inode, destoff + rc);
1446 			netfs_resize_file(&target_cifsi->netfs,
1447 					  i_size_read(target_inode), true);
1448 			fscache_resize_cookie(cifs_inode_cookie(target_inode),
1449 					      i_size_read(target_inode));
1450 		}
1451 		if (rc > 0 && destoff + rc > target_cifsi->netfs.zero_point)
1452 			target_cifsi->netfs.zero_point = destoff + rc;
1453 	}
1454 
1455 	file_accessed(src_file);
1456 
1457 	/* force revalidate of size and timestamps of target file now
1458 	 * that target is updated on the server
1459 	 */
1460 	CIFS_I(target_inode)->time = 0;
1461 
1462 unlock:
1463 	/* although unlocking in the reverse order from locking is not
1464 	 * strictly necessary here it is a little cleaner to be consistent
1465 	 */
1466 	unlock_two_nondirectories(src_inode, target_inode);
1467 
1468 out:
1469 	return rc;
1470 }
1471 
1472 /*
1473  * Directory operations under CIFS/SMB2/SMB3 are synchronous, so fsync()
1474  * is a dummy operation.
1475  */
1476 static int cifs_dir_fsync(struct file *file, loff_t start, loff_t end, int datasync)
1477 {
1478 	cifs_dbg(FYI, "Sync directory - name: %pD datasync: 0x%x\n",
1479 		 file, datasync);
1480 
1481 	return 0;
1482 }
1483 
1484 static ssize_t cifs_copy_file_range(struct file *src_file, loff_t off,
1485 				struct file *dst_file, loff_t destoff,
1486 				size_t len, unsigned int flags)
1487 {
1488 	unsigned int xid = get_xid();
1489 	ssize_t rc;
1490 	struct cifsFileInfo *cfile = dst_file->private_data;
1491 
1492 	if (cfile->swapfile) {
1493 		rc = -EOPNOTSUPP;
1494 		free_xid(xid);
1495 		return rc;
1496 	}
1497 
1498 	rc = cifs_file_copychunk_range(xid, src_file, off, dst_file, destoff,
1499 					len, flags);
1500 	free_xid(xid);
1501 
1502 	if (rc == -EOPNOTSUPP || rc == -EXDEV)
1503 		rc = splice_copy_file_range(src_file, off, dst_file,
1504 					    destoff, len);
1505 	return rc;
1506 }
1507 
1508 const struct file_operations cifs_file_ops = {
1509 	.read_iter = cifs_loose_read_iter,
1510 	.write_iter = cifs_file_write_iter,
1511 	.open = cifs_open,
1512 	.release = cifs_close,
1513 	.lock = cifs_lock,
1514 	.flock = cifs_flock,
1515 	.fsync = cifs_fsync,
1516 	.flush = cifs_flush,
1517 	.mmap  = cifs_file_mmap,
1518 	.splice_read = filemap_splice_read,
1519 	.splice_write = iter_file_splice_write,
1520 	.llseek = cifs_llseek,
1521 	.unlocked_ioctl	= cifs_ioctl,
1522 	.copy_file_range = cifs_copy_file_range,
1523 	.remap_file_range = cifs_remap_file_range,
1524 	.setlease = cifs_setlease,
1525 	.fallocate = cifs_fallocate,
1526 };
1527 
1528 const struct file_operations cifs_file_strict_ops = {
1529 	.read_iter = cifs_strict_readv,
1530 	.write_iter = cifs_strict_writev,
1531 	.open = cifs_open,
1532 	.release = cifs_close,
1533 	.lock = cifs_lock,
1534 	.flock = cifs_flock,
1535 	.fsync = cifs_strict_fsync,
1536 	.flush = cifs_flush,
1537 	.mmap = cifs_file_strict_mmap,
1538 	.splice_read = filemap_splice_read,
1539 	.splice_write = iter_file_splice_write,
1540 	.llseek = cifs_llseek,
1541 	.unlocked_ioctl	= cifs_ioctl,
1542 	.copy_file_range = cifs_copy_file_range,
1543 	.remap_file_range = cifs_remap_file_range,
1544 	.setlease = cifs_setlease,
1545 	.fallocate = cifs_fallocate,
1546 };
1547 
1548 const struct file_operations cifs_file_direct_ops = {
1549 	.read_iter = netfs_unbuffered_read_iter,
1550 	.write_iter = netfs_file_write_iter,
1551 	.open = cifs_open,
1552 	.release = cifs_close,
1553 	.lock = cifs_lock,
1554 	.flock = cifs_flock,
1555 	.fsync = cifs_fsync,
1556 	.flush = cifs_flush,
1557 	.mmap = cifs_file_mmap,
1558 	.splice_read = copy_splice_read,
1559 	.splice_write = iter_file_splice_write,
1560 	.unlocked_ioctl  = cifs_ioctl,
1561 	.copy_file_range = cifs_copy_file_range,
1562 	.remap_file_range = cifs_remap_file_range,
1563 	.llseek = cifs_llseek,
1564 	.setlease = cifs_setlease,
1565 	.fallocate = cifs_fallocate,
1566 };
1567 
1568 const struct file_operations cifs_file_nobrl_ops = {
1569 	.read_iter = cifs_loose_read_iter,
1570 	.write_iter = cifs_file_write_iter,
1571 	.open = cifs_open,
1572 	.release = cifs_close,
1573 	.fsync = cifs_fsync,
1574 	.flush = cifs_flush,
1575 	.mmap  = cifs_file_mmap,
1576 	.splice_read = filemap_splice_read,
1577 	.splice_write = iter_file_splice_write,
1578 	.llseek = cifs_llseek,
1579 	.unlocked_ioctl	= cifs_ioctl,
1580 	.copy_file_range = cifs_copy_file_range,
1581 	.remap_file_range = cifs_remap_file_range,
1582 	.setlease = cifs_setlease,
1583 	.fallocate = cifs_fallocate,
1584 };
1585 
1586 const struct file_operations cifs_file_strict_nobrl_ops = {
1587 	.read_iter = cifs_strict_readv,
1588 	.write_iter = cifs_strict_writev,
1589 	.open = cifs_open,
1590 	.release = cifs_close,
1591 	.fsync = cifs_strict_fsync,
1592 	.flush = cifs_flush,
1593 	.mmap = cifs_file_strict_mmap,
1594 	.splice_read = filemap_splice_read,
1595 	.splice_write = iter_file_splice_write,
1596 	.llseek = cifs_llseek,
1597 	.unlocked_ioctl	= cifs_ioctl,
1598 	.copy_file_range = cifs_copy_file_range,
1599 	.remap_file_range = cifs_remap_file_range,
1600 	.setlease = cifs_setlease,
1601 	.fallocate = cifs_fallocate,
1602 };
1603 
1604 const struct file_operations cifs_file_direct_nobrl_ops = {
1605 	.read_iter = netfs_unbuffered_read_iter,
1606 	.write_iter = netfs_file_write_iter,
1607 	.open = cifs_open,
1608 	.release = cifs_close,
1609 	.fsync = cifs_fsync,
1610 	.flush = cifs_flush,
1611 	.mmap = cifs_file_mmap,
1612 	.splice_read = copy_splice_read,
1613 	.splice_write = iter_file_splice_write,
1614 	.unlocked_ioctl  = cifs_ioctl,
1615 	.copy_file_range = cifs_copy_file_range,
1616 	.remap_file_range = cifs_remap_file_range,
1617 	.llseek = cifs_llseek,
1618 	.setlease = cifs_setlease,
1619 	.fallocate = cifs_fallocate,
1620 };
1621 
1622 const struct file_operations cifs_dir_ops = {
1623 	.iterate_shared = cifs_readdir,
1624 	.release = cifs_closedir,
1625 	.read    = generic_read_dir,
1626 	.unlocked_ioctl  = cifs_ioctl,
1627 	.copy_file_range = cifs_copy_file_range,
1628 	.remap_file_range = cifs_remap_file_range,
1629 	.llseek = generic_file_llseek,
1630 	.fsync = cifs_dir_fsync,
1631 };
1632 
1633 static void
1634 cifs_init_once(void *inode)
1635 {
1636 	struct cifsInodeInfo *cifsi = inode;
1637 
1638 	inode_init_once(&cifsi->netfs.inode);
1639 	init_rwsem(&cifsi->lock_sem);
1640 }
1641 
1642 static int __init
1643 cifs_init_inodecache(void)
1644 {
1645 	cifs_inode_cachep = kmem_cache_create("cifs_inode_cache",
1646 					      sizeof(struct cifsInodeInfo),
1647 					      0, (SLAB_RECLAIM_ACCOUNT|
1648 						SLAB_ACCOUNT),
1649 					      cifs_init_once);
1650 	if (cifs_inode_cachep == NULL)
1651 		return -ENOMEM;
1652 
1653 	return 0;
1654 }
1655 
1656 static void
1657 cifs_destroy_inodecache(void)
1658 {
1659 	/*
1660 	 * Make sure all delayed rcu free inodes are flushed before we
1661 	 * destroy cache.
1662 	 */
1663 	rcu_barrier();
1664 	kmem_cache_destroy(cifs_inode_cachep);
1665 }
1666 
1667 static int
1668 cifs_init_request_bufs(void)
1669 {
1670 	/*
1671 	 * SMB2 maximum header size is bigger than CIFS one - no problems to
1672 	 * allocate some more bytes for CIFS.
1673 	 */
1674 	size_t max_hdr_size = MAX_SMB2_HDR_SIZE;
1675 
1676 	if (CIFSMaxBufSize < 8192) {
1677 	/* Buffer size can not be smaller than 2 * PATH_MAX since maximum
1678 	Unicode path name has to fit in any SMB/CIFS path based frames */
1679 		CIFSMaxBufSize = 8192;
1680 	} else if (CIFSMaxBufSize > 1024*127) {
1681 		CIFSMaxBufSize = 1024 * 127;
1682 	} else {
1683 		CIFSMaxBufSize &= 0x1FE00; /* Round size to even 512 byte mult*/
1684 	}
1685 /*
1686 	cifs_dbg(VFS, "CIFSMaxBufSize %d 0x%x\n",
1687 		 CIFSMaxBufSize, CIFSMaxBufSize);
1688 */
1689 	cifs_req_cachep = kmem_cache_create_usercopy("cifs_request",
1690 					    CIFSMaxBufSize + max_hdr_size, 0,
1691 					    SLAB_HWCACHE_ALIGN, 0,
1692 					    CIFSMaxBufSize + max_hdr_size,
1693 					    NULL);
1694 	if (cifs_req_cachep == NULL)
1695 		return -ENOMEM;
1696 
1697 	if (cifs_min_rcv < 1)
1698 		cifs_min_rcv = 1;
1699 	else if (cifs_min_rcv > 64) {
1700 		cifs_min_rcv = 64;
1701 		cifs_dbg(VFS, "cifs_min_rcv set to maximum (64)\n");
1702 	}
1703 
1704 	cifs_req_poolp = mempool_create_slab_pool(cifs_min_rcv,
1705 						  cifs_req_cachep);
1706 
1707 	if (cifs_req_poolp == NULL) {
1708 		kmem_cache_destroy(cifs_req_cachep);
1709 		return -ENOMEM;
1710 	}
1711 	/* MAX_CIFS_SMALL_BUFFER_SIZE bytes is enough for most SMB responses and
1712 	almost all handle based requests (but not write response, nor is it
1713 	sufficient for path based requests).  A smaller size would have
1714 	been more efficient (compacting multiple slab items on one 4k page)
1715 	for the case in which debug was on, but this larger size allows
1716 	more SMBs to use small buffer alloc and is still much more
1717 	efficient to alloc 1 per page off the slab compared to 17K (5page)
1718 	alloc of large cifs buffers even when page debugging is on */
1719 	cifs_sm_req_cachep = kmem_cache_create_usercopy("cifs_small_rq",
1720 			MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN,
1721 			0, MAX_CIFS_SMALL_BUFFER_SIZE, NULL);
1722 	if (cifs_sm_req_cachep == NULL) {
1723 		mempool_destroy(cifs_req_poolp);
1724 		kmem_cache_destroy(cifs_req_cachep);
1725 		return -ENOMEM;
1726 	}
1727 
1728 	if (cifs_min_small < 2)
1729 		cifs_min_small = 2;
1730 	else if (cifs_min_small > 256) {
1731 		cifs_min_small = 256;
1732 		cifs_dbg(FYI, "cifs_min_small set to maximum (256)\n");
1733 	}
1734 
1735 	cifs_sm_req_poolp = mempool_create_slab_pool(cifs_min_small,
1736 						     cifs_sm_req_cachep);
1737 
1738 	if (cifs_sm_req_poolp == NULL) {
1739 		mempool_destroy(cifs_req_poolp);
1740 		kmem_cache_destroy(cifs_req_cachep);
1741 		kmem_cache_destroy(cifs_sm_req_cachep);
1742 		return -ENOMEM;
1743 	}
1744 
1745 	return 0;
1746 }
1747 
1748 static void
1749 cifs_destroy_request_bufs(void)
1750 {
1751 	mempool_destroy(cifs_req_poolp);
1752 	kmem_cache_destroy(cifs_req_cachep);
1753 	mempool_destroy(cifs_sm_req_poolp);
1754 	kmem_cache_destroy(cifs_sm_req_cachep);
1755 }
1756 
1757 static int init_mids(void)
1758 {
1759 	cifs_mid_cachep = kmem_cache_create("cifs_mpx_ids",
1760 					    sizeof(struct mid_q_entry), 0,
1761 					    SLAB_HWCACHE_ALIGN, NULL);
1762 	if (cifs_mid_cachep == NULL)
1763 		return -ENOMEM;
1764 
1765 	/* 3 is a reasonable minimum number of simultaneous operations */
1766 	cifs_mid_poolp = mempool_create_slab_pool(3, cifs_mid_cachep);
1767 	if (cifs_mid_poolp == NULL) {
1768 		kmem_cache_destroy(cifs_mid_cachep);
1769 		return -ENOMEM;
1770 	}
1771 
1772 	return 0;
1773 }
1774 
1775 static void destroy_mids(void)
1776 {
1777 	mempool_destroy(cifs_mid_poolp);
1778 	kmem_cache_destroy(cifs_mid_cachep);
1779 }
1780 
1781 static int cifs_init_netfs(void)
1782 {
1783 	cifs_io_request_cachep =
1784 		kmem_cache_create("cifs_io_request",
1785 				  sizeof(struct cifs_io_request), 0,
1786 				  SLAB_HWCACHE_ALIGN, NULL);
1787 	if (!cifs_io_request_cachep)
1788 		goto nomem_req;
1789 
1790 	if (mempool_init_slab_pool(&cifs_io_request_pool, 100, cifs_io_request_cachep) < 0)
1791 		goto nomem_reqpool;
1792 
1793 	cifs_io_subrequest_cachep =
1794 		kmem_cache_create("cifs_io_subrequest",
1795 				  sizeof(struct cifs_io_subrequest), 0,
1796 				  SLAB_HWCACHE_ALIGN, NULL);
1797 	if (!cifs_io_subrequest_cachep)
1798 		goto nomem_subreq;
1799 
1800 	if (mempool_init_slab_pool(&cifs_io_subrequest_pool, 100, cifs_io_subrequest_cachep) < 0)
1801 		goto nomem_subreqpool;
1802 
1803 	return 0;
1804 
1805 nomem_subreqpool:
1806 	kmem_cache_destroy(cifs_io_subrequest_cachep);
1807 nomem_subreq:
1808 	mempool_exit(&cifs_io_request_pool);
1809 nomem_reqpool:
1810 	kmem_cache_destroy(cifs_io_request_cachep);
1811 nomem_req:
1812 	return -ENOMEM;
1813 }
1814 
1815 static void cifs_destroy_netfs(void)
1816 {
1817 	mempool_exit(&cifs_io_subrequest_pool);
1818 	kmem_cache_destroy(cifs_io_subrequest_cachep);
1819 	mempool_exit(&cifs_io_request_pool);
1820 	kmem_cache_destroy(cifs_io_request_cachep);
1821 }
1822 
1823 static int __init
1824 init_cifs(void)
1825 {
1826 	int rc = 0;
1827 	cifs_proc_init();
1828 	INIT_LIST_HEAD(&cifs_tcp_ses_list);
1829 /*
1830  *  Initialize Global counters
1831  */
1832 	atomic_set(&sesInfoAllocCount, 0);
1833 	atomic_set(&tconInfoAllocCount, 0);
1834 	atomic_set(&tcpSesNextId, 0);
1835 	atomic_set(&tcpSesAllocCount, 0);
1836 	atomic_set(&tcpSesReconnectCount, 0);
1837 	atomic_set(&tconInfoReconnectCount, 0);
1838 
1839 	atomic_set(&buf_alloc_count, 0);
1840 	atomic_set(&small_buf_alloc_count, 0);
1841 #ifdef CONFIG_CIFS_STATS2
1842 	atomic_set(&total_buf_alloc_count, 0);
1843 	atomic_set(&total_small_buf_alloc_count, 0);
1844 	if (slow_rsp_threshold < 1)
1845 		cifs_dbg(FYI, "slow_response_threshold msgs disabled\n");
1846 	else if (slow_rsp_threshold > 32767)
1847 		cifs_dbg(VFS,
1848 		       "slow response threshold set higher than recommended (0 to 32767)\n");
1849 #endif /* CONFIG_CIFS_STATS2 */
1850 
1851 	atomic_set(&mid_count, 0);
1852 	GlobalCurrentXid = 0;
1853 	GlobalTotalActiveXid = 0;
1854 	GlobalMaxActiveXid = 0;
1855 	spin_lock_init(&cifs_tcp_ses_lock);
1856 	spin_lock_init(&GlobalMid_Lock);
1857 
1858 	cifs_lock_secret = get_random_u32();
1859 
1860 	if (cifs_max_pending < 2) {
1861 		cifs_max_pending = 2;
1862 		cifs_dbg(FYI, "cifs_max_pending set to min of 2\n");
1863 	} else if (cifs_max_pending > CIFS_MAX_REQ) {
1864 		cifs_max_pending = CIFS_MAX_REQ;
1865 		cifs_dbg(FYI, "cifs_max_pending set to max of %u\n",
1866 			 CIFS_MAX_REQ);
1867 	}
1868 
1869 	/* Limit max to about 18 hours, and setting to zero disables directory entry caching */
1870 	if (dir_cache_timeout > 65000) {
1871 		dir_cache_timeout = 65000;
1872 		cifs_dbg(VFS, "dir_cache_timeout set to max of 65000 seconds\n");
1873 	}
1874 
1875 	cifsiod_wq = alloc_workqueue("cifsiod", WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
1876 	if (!cifsiod_wq) {
1877 		rc = -ENOMEM;
1878 		goto out_clean_proc;
1879 	}
1880 
1881 	/*
1882 	 * Consider in future setting limit!=0 maybe to min(num_of_cores - 1, 3)
1883 	 * so that we don't launch too many worker threads but
1884 	 * Documentation/core-api/workqueue.rst recommends setting it to 0
1885 	 */
1886 
1887 	/* WQ_UNBOUND allows decrypt tasks to run on any CPU */
1888 	decrypt_wq = alloc_workqueue("smb3decryptd",
1889 				     WQ_UNBOUND|WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
1890 	if (!decrypt_wq) {
1891 		rc = -ENOMEM;
1892 		goto out_destroy_cifsiod_wq;
1893 	}
1894 
1895 	fileinfo_put_wq = alloc_workqueue("cifsfileinfoput",
1896 				     WQ_UNBOUND|WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
1897 	if (!fileinfo_put_wq) {
1898 		rc = -ENOMEM;
1899 		goto out_destroy_decrypt_wq;
1900 	}
1901 
1902 	cifsoplockd_wq = alloc_workqueue("cifsoplockd",
1903 					 WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
1904 	if (!cifsoplockd_wq) {
1905 		rc = -ENOMEM;
1906 		goto out_destroy_fileinfo_put_wq;
1907 	}
1908 
1909 	deferredclose_wq = alloc_workqueue("deferredclose",
1910 					   WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
1911 	if (!deferredclose_wq) {
1912 		rc = -ENOMEM;
1913 		goto out_destroy_cifsoplockd_wq;
1914 	}
1915 
1916 	serverclose_wq = alloc_workqueue("serverclose",
1917 					   WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
1918 	if (!serverclose_wq) {
1919 		rc = -ENOMEM;
1920 		goto out_destroy_deferredclose_wq;
1921 	}
1922 
1923 	rc = cifs_init_inodecache();
1924 	if (rc)
1925 		goto out_destroy_serverclose_wq;
1926 
1927 	rc = cifs_init_netfs();
1928 	if (rc)
1929 		goto out_destroy_inodecache;
1930 
1931 	rc = init_mids();
1932 	if (rc)
1933 		goto out_destroy_netfs;
1934 
1935 	rc = cifs_init_request_bufs();
1936 	if (rc)
1937 		goto out_destroy_mids;
1938 
1939 #ifdef CONFIG_CIFS_DFS_UPCALL
1940 	rc = dfs_cache_init();
1941 	if (rc)
1942 		goto out_destroy_request_bufs;
1943 #endif /* CONFIG_CIFS_DFS_UPCALL */
1944 #ifdef CONFIG_CIFS_UPCALL
1945 	rc = init_cifs_spnego();
1946 	if (rc)
1947 		goto out_destroy_dfs_cache;
1948 #endif /* CONFIG_CIFS_UPCALL */
1949 #ifdef CONFIG_CIFS_SWN_UPCALL
1950 	rc = cifs_genl_init();
1951 	if (rc)
1952 		goto out_register_key_type;
1953 #endif /* CONFIG_CIFS_SWN_UPCALL */
1954 
1955 	rc = init_cifs_idmap();
1956 	if (rc)
1957 		goto out_cifs_swn_init;
1958 
1959 	rc = register_filesystem(&cifs_fs_type);
1960 	if (rc)
1961 		goto out_init_cifs_idmap;
1962 
1963 	rc = register_filesystem(&smb3_fs_type);
1964 	if (rc) {
1965 		unregister_filesystem(&cifs_fs_type);
1966 		goto out_init_cifs_idmap;
1967 	}
1968 
1969 	return 0;
1970 
1971 out_init_cifs_idmap:
1972 	exit_cifs_idmap();
1973 out_cifs_swn_init:
1974 #ifdef CONFIG_CIFS_SWN_UPCALL
1975 	cifs_genl_exit();
1976 out_register_key_type:
1977 #endif
1978 #ifdef CONFIG_CIFS_UPCALL
1979 	exit_cifs_spnego();
1980 out_destroy_dfs_cache:
1981 #endif
1982 #ifdef CONFIG_CIFS_DFS_UPCALL
1983 	dfs_cache_destroy();
1984 out_destroy_request_bufs:
1985 #endif
1986 	cifs_destroy_request_bufs();
1987 out_destroy_mids:
1988 	destroy_mids();
1989 out_destroy_netfs:
1990 	cifs_destroy_netfs();
1991 out_destroy_inodecache:
1992 	cifs_destroy_inodecache();
1993 out_destroy_serverclose_wq:
1994 	destroy_workqueue(serverclose_wq);
1995 out_destroy_deferredclose_wq:
1996 	destroy_workqueue(deferredclose_wq);
1997 out_destroy_cifsoplockd_wq:
1998 	destroy_workqueue(cifsoplockd_wq);
1999 out_destroy_fileinfo_put_wq:
2000 	destroy_workqueue(fileinfo_put_wq);
2001 out_destroy_decrypt_wq:
2002 	destroy_workqueue(decrypt_wq);
2003 out_destroy_cifsiod_wq:
2004 	destroy_workqueue(cifsiod_wq);
2005 out_clean_proc:
2006 	cifs_proc_clean();
2007 	return rc;
2008 }
2009 
2010 static void __exit
2011 exit_cifs(void)
2012 {
2013 	cifs_dbg(NOISY, "exit_smb3\n");
2014 	unregister_filesystem(&cifs_fs_type);
2015 	unregister_filesystem(&smb3_fs_type);
2016 	cifs_release_automount_timer();
2017 	exit_cifs_idmap();
2018 #ifdef CONFIG_CIFS_SWN_UPCALL
2019 	cifs_genl_exit();
2020 #endif
2021 #ifdef CONFIG_CIFS_UPCALL
2022 	exit_cifs_spnego();
2023 #endif
2024 #ifdef CONFIG_CIFS_DFS_UPCALL
2025 	dfs_cache_destroy();
2026 #endif
2027 	cifs_destroy_request_bufs();
2028 	destroy_mids();
2029 	cifs_destroy_netfs();
2030 	cifs_destroy_inodecache();
2031 	destroy_workqueue(deferredclose_wq);
2032 	destroy_workqueue(cifsoplockd_wq);
2033 	destroy_workqueue(decrypt_wq);
2034 	destroy_workqueue(fileinfo_put_wq);
2035 	destroy_workqueue(serverclose_wq);
2036 	destroy_workqueue(cifsiod_wq);
2037 	cifs_proc_clean();
2038 }
2039 
2040 MODULE_AUTHOR("Steve French");
2041 MODULE_LICENSE("GPL");	/* combination of LGPL + GPL source behaves as GPL */
2042 MODULE_DESCRIPTION
2043 	("VFS to access SMB3 servers e.g. Samba, Macs, Azure and Windows (and "
2044 	"also older servers complying with the SNIA CIFS Specification)");
2045 MODULE_VERSION(CIFS_VERSION);
2046 MODULE_SOFTDEP("ecb");
2047 MODULE_SOFTDEP("hmac");
2048 MODULE_SOFTDEP("md5");
2049 MODULE_SOFTDEP("nls");
2050 MODULE_SOFTDEP("aes");
2051 MODULE_SOFTDEP("cmac");
2052 MODULE_SOFTDEP("sha256");
2053 MODULE_SOFTDEP("sha512");
2054 MODULE_SOFTDEP("aead2");
2055 MODULE_SOFTDEP("ccm");
2056 MODULE_SOFTDEP("gcm");
2057 module_init(init_cifs)
2058 module_exit(exit_cifs)
2059