xref: /linux/fs/smb/client/cifsfs.c (revision 40ccd6aa3e2e05be93394e3cd560c718dedfcc77)
1 // SPDX-License-Identifier: LGPL-2.1
2 /*
3  *
4  *   Copyright (C) International Business Machines  Corp., 2002,2008
5  *   Author(s): Steve French (sfrench@us.ibm.com)
6  *
7  *   Common Internet FileSystem (CIFS) client
8  *
9  */
10 
11 /* Note that BB means BUGBUG (ie something to fix eventually) */
12 
13 #include <linux/module.h>
14 #include <linux/fs.h>
15 #include <linux/filelock.h>
16 #include <linux/mount.h>
17 #include <linux/slab.h>
18 #include <linux/init.h>
19 #include <linux/list.h>
20 #include <linux/seq_file.h>
21 #include <linux/vfs.h>
22 #include <linux/mempool.h>
23 #include <linux/delay.h>
24 #include <linux/kthread.h>
25 #include <linux/freezer.h>
26 #include <linux/namei.h>
27 #include <linux/random.h>
28 #include <linux/splice.h>
29 #include <linux/uuid.h>
30 #include <linux/xattr.h>
31 #include <uapi/linux/magic.h>
32 #include <net/ipv6.h>
33 #include "cifsfs.h"
34 #include "cifspdu.h"
35 #define DECLARE_GLOBALS_HERE
36 #include "cifsglob.h"
37 #include "cifsproto.h"
38 #include "cifs_debug.h"
39 #include "cifs_fs_sb.h"
40 #include <linux/mm.h>
41 #include <linux/key-type.h>
42 #include "cifs_spnego.h"
43 #include "fscache.h"
44 #ifdef CONFIG_CIFS_DFS_UPCALL
45 #include "dfs_cache.h"
46 #endif
47 #ifdef CONFIG_CIFS_SWN_UPCALL
48 #include "netlink.h"
49 #endif
50 #include "fs_context.h"
51 #include "cached_dir.h"
52 
53 /*
54  * DOS dates from 1980/1/1 through 2107/12/31
55  * Protocol specifications indicate the range should be to 119, which
56  * limits maximum year to 2099. But this range has not been checked.
57  */
58 #define SMB_DATE_MAX (127<<9 | 12<<5 | 31)
59 #define SMB_DATE_MIN (0<<9 | 1<<5 | 1)
60 #define SMB_TIME_MAX (23<<11 | 59<<5 | 29)
61 
62 int cifsFYI = 0;
63 bool traceSMB;
64 bool enable_oplocks = true;
65 bool linuxExtEnabled = true;
66 bool lookupCacheEnabled = true;
67 bool disable_legacy_dialects; /* false by default */
68 bool enable_gcm_256 = true;
69 bool require_gcm_256; /* false by default */
70 bool enable_negotiate_signing; /* false by default */
71 unsigned int global_secflags = CIFSSEC_DEF;
72 /* unsigned int ntlmv2_support = 0; */
73 unsigned int sign_CIFS_PDUs = 1;
74 
75 /*
76  * Global transaction id (XID) information
77  */
78 unsigned int GlobalCurrentXid;	/* protected by GlobalMid_Sem */
79 unsigned int GlobalTotalActiveXid; /* prot by GlobalMid_Sem */
80 unsigned int GlobalMaxActiveXid;	/* prot by GlobalMid_Sem */
81 spinlock_t GlobalMid_Lock; /* protects above & list operations on midQ entries */
82 
83 /*
84  *  Global counters, updated atomically
85  */
86 atomic_t sesInfoAllocCount;
87 atomic_t tconInfoAllocCount;
88 atomic_t tcpSesNextId;
89 atomic_t tcpSesAllocCount;
90 atomic_t tcpSesReconnectCount;
91 atomic_t tconInfoReconnectCount;
92 
93 atomic_t mid_count;
94 atomic_t buf_alloc_count;
95 atomic_t small_buf_alloc_count;
96 #ifdef CONFIG_CIFS_STATS2
97 atomic_t total_buf_alloc_count;
98 atomic_t total_small_buf_alloc_count;
99 #endif/* STATS2 */
100 struct list_head	cifs_tcp_ses_list;
101 spinlock_t		cifs_tcp_ses_lock;
102 static const struct super_operations cifs_super_ops;
103 unsigned int CIFSMaxBufSize = CIFS_MAX_MSGSIZE;
104 module_param(CIFSMaxBufSize, uint, 0444);
105 MODULE_PARM_DESC(CIFSMaxBufSize, "Network buffer size (not including header) "
106 				 "for CIFS requests. "
107 				 "Default: 16384 Range: 8192 to 130048");
108 unsigned int cifs_min_rcv = CIFS_MIN_RCV_POOL;
109 module_param(cifs_min_rcv, uint, 0444);
110 MODULE_PARM_DESC(cifs_min_rcv, "Network buffers in pool. Default: 4 Range: "
111 				"1 to 64");
112 unsigned int cifs_min_small = 30;
113 module_param(cifs_min_small, uint, 0444);
114 MODULE_PARM_DESC(cifs_min_small, "Small network buffers in pool. Default: 30 "
115 				 "Range: 2 to 256");
116 unsigned int cifs_max_pending = CIFS_MAX_REQ;
117 module_param(cifs_max_pending, uint, 0444);
118 MODULE_PARM_DESC(cifs_max_pending, "Simultaneous requests to server for "
119 				   "CIFS/SMB1 dialect (N/A for SMB3) "
120 				   "Default: 32767 Range: 2 to 32767.");
121 unsigned int dir_cache_timeout = 30;
122 module_param(dir_cache_timeout, uint, 0644);
123 MODULE_PARM_DESC(dir_cache_timeout, "Number of seconds to cache directory contents for which we have a lease. Default: 30 "
124 				 "Range: 1 to 65000 seconds, 0 to disable caching dir contents");
125 #ifdef CONFIG_CIFS_STATS2
126 unsigned int slow_rsp_threshold = 1;
127 module_param(slow_rsp_threshold, uint, 0644);
128 MODULE_PARM_DESC(slow_rsp_threshold, "Amount of time (in seconds) to wait "
129 				   "before logging that a response is delayed. "
130 				   "Default: 1 (if set to 0 disables msg).");
131 #endif /* STATS2 */
132 
133 module_param(enable_oplocks, bool, 0644);
134 MODULE_PARM_DESC(enable_oplocks, "Enable or disable oplocks. Default: y/Y/1");
135 
136 module_param(enable_gcm_256, bool, 0644);
137 MODULE_PARM_DESC(enable_gcm_256, "Enable requesting strongest (256 bit) GCM encryption. Default: n/N/0");
138 
139 module_param(require_gcm_256, bool, 0644);
140 MODULE_PARM_DESC(require_gcm_256, "Require strongest (256 bit) GCM encryption. Default: n/N/0");
141 
142 module_param(enable_negotiate_signing, bool, 0644);
143 MODULE_PARM_DESC(enable_negotiate_signing, "Enable negotiating packet signing algorithm with server. Default: n/N/0");
144 
145 module_param(disable_legacy_dialects, bool, 0644);
146 MODULE_PARM_DESC(disable_legacy_dialects, "To improve security it may be "
147 				  "helpful to restrict the ability to "
148 				  "override the default dialects (SMB2.1, "
149 				  "SMB3 and SMB3.02) on mount with old "
150 				  "dialects (CIFS/SMB1 and SMB2) since "
151 				  "vers=1.0 (CIFS/SMB1) and vers=2.0 are weaker"
152 				  " and less secure. Default: n/N/0");
153 
154 struct workqueue_struct	*cifsiod_wq;
155 struct workqueue_struct	*decrypt_wq;
156 struct workqueue_struct	*fileinfo_put_wq;
157 struct workqueue_struct	*cifsoplockd_wq;
158 struct workqueue_struct	*deferredclose_wq;
159 struct workqueue_struct	*serverclose_wq;
160 __u32 cifs_lock_secret;
161 
162 /*
163  * Bumps refcount for cifs super block.
164  * Note that it should be only called if a referece to VFS super block is
165  * already held, e.g. in open-type syscalls context. Otherwise it can race with
166  * atomic_dec_and_test in deactivate_locked_super.
167  */
168 void
169 cifs_sb_active(struct super_block *sb)
170 {
171 	struct cifs_sb_info *server = CIFS_SB(sb);
172 
173 	if (atomic_inc_return(&server->active) == 1)
174 		atomic_inc(&sb->s_active);
175 }
176 
177 void
178 cifs_sb_deactive(struct super_block *sb)
179 {
180 	struct cifs_sb_info *server = CIFS_SB(sb);
181 
182 	if (atomic_dec_and_test(&server->active))
183 		deactivate_super(sb);
184 }
185 
186 static int
187 cifs_read_super(struct super_block *sb)
188 {
189 	struct inode *inode;
190 	struct cifs_sb_info *cifs_sb;
191 	struct cifs_tcon *tcon;
192 	struct timespec64 ts;
193 	int rc = 0;
194 
195 	cifs_sb = CIFS_SB(sb);
196 	tcon = cifs_sb_master_tcon(cifs_sb);
197 
198 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIXACL)
199 		sb->s_flags |= SB_POSIXACL;
200 
201 	if (tcon->snapshot_time)
202 		sb->s_flags |= SB_RDONLY;
203 
204 	if (tcon->ses->capabilities & tcon->ses->server->vals->cap_large_files)
205 		sb->s_maxbytes = MAX_LFS_FILESIZE;
206 	else
207 		sb->s_maxbytes = MAX_NON_LFS;
208 
209 	/*
210 	 * Some very old servers like DOS and OS/2 used 2 second granularity
211 	 * (while all current servers use 100ns granularity - see MS-DTYP)
212 	 * but 1 second is the maximum allowed granularity for the VFS
213 	 * so for old servers set time granularity to 1 second while for
214 	 * everything else (current servers) set it to 100ns.
215 	 */
216 	if ((tcon->ses->server->vals->protocol_id == SMB10_PROT_ID) &&
217 	    ((tcon->ses->capabilities &
218 	      tcon->ses->server->vals->cap_nt_find) == 0) &&
219 	    !tcon->unix_ext) {
220 		sb->s_time_gran = 1000000000; /* 1 second is max allowed gran */
221 		ts = cnvrtDosUnixTm(cpu_to_le16(SMB_DATE_MIN), 0, 0);
222 		sb->s_time_min = ts.tv_sec;
223 		ts = cnvrtDosUnixTm(cpu_to_le16(SMB_DATE_MAX),
224 				    cpu_to_le16(SMB_TIME_MAX), 0);
225 		sb->s_time_max = ts.tv_sec;
226 	} else {
227 		/*
228 		 * Almost every server, including all SMB2+, uses DCE TIME
229 		 * ie 100 nanosecond units, since 1601.  See MS-DTYP and MS-FSCC
230 		 */
231 		sb->s_time_gran = 100;
232 		ts = cifs_NTtimeToUnix(0);
233 		sb->s_time_min = ts.tv_sec;
234 		ts = cifs_NTtimeToUnix(cpu_to_le64(S64_MAX));
235 		sb->s_time_max = ts.tv_sec;
236 	}
237 
238 	sb->s_magic = CIFS_SUPER_MAGIC;
239 	sb->s_op = &cifs_super_ops;
240 	sb->s_xattr = cifs_xattr_handlers;
241 	rc = super_setup_bdi(sb);
242 	if (rc)
243 		goto out_no_root;
244 	/* tune readahead according to rsize if readahead size not set on mount */
245 	if (cifs_sb->ctx->rsize == 0)
246 		cifs_sb->ctx->rsize =
247 			tcon->ses->server->ops->negotiate_rsize(tcon, cifs_sb->ctx);
248 	if (cifs_sb->ctx->rasize)
249 		sb->s_bdi->ra_pages = cifs_sb->ctx->rasize / PAGE_SIZE;
250 	else
251 		sb->s_bdi->ra_pages = 2 * (cifs_sb->ctx->rsize / PAGE_SIZE);
252 
253 	sb->s_blocksize = CIFS_MAX_MSGSIZE;
254 	sb->s_blocksize_bits = 14;	/* default 2**14 = CIFS_MAX_MSGSIZE */
255 	inode = cifs_root_iget(sb);
256 
257 	if (IS_ERR(inode)) {
258 		rc = PTR_ERR(inode);
259 		goto out_no_root;
260 	}
261 
262 	if (tcon->nocase)
263 		sb->s_d_op = &cifs_ci_dentry_ops;
264 	else
265 		sb->s_d_op = &cifs_dentry_ops;
266 
267 	sb->s_root = d_make_root(inode);
268 	if (!sb->s_root) {
269 		rc = -ENOMEM;
270 		goto out_no_root;
271 	}
272 
273 #ifdef CONFIG_CIFS_NFSD_EXPORT
274 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) {
275 		cifs_dbg(FYI, "export ops supported\n");
276 		sb->s_export_op = &cifs_export_ops;
277 	}
278 #endif /* CONFIG_CIFS_NFSD_EXPORT */
279 
280 	return 0;
281 
282 out_no_root:
283 	cifs_dbg(VFS, "%s: get root inode failed\n", __func__);
284 	return rc;
285 }
286 
287 static void cifs_kill_sb(struct super_block *sb)
288 {
289 	struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
290 
291 	/*
292 	 * We ned to release all dentries for the cached directories
293 	 * before we kill the sb.
294 	 */
295 	if (cifs_sb->root) {
296 		close_all_cached_dirs(cifs_sb);
297 
298 		/* finally release root dentry */
299 		dput(cifs_sb->root);
300 		cifs_sb->root = NULL;
301 	}
302 
303 	kill_anon_super(sb);
304 	cifs_umount(cifs_sb);
305 }
306 
307 static int
308 cifs_statfs(struct dentry *dentry, struct kstatfs *buf)
309 {
310 	struct super_block *sb = dentry->d_sb;
311 	struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
312 	struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
313 	struct TCP_Server_Info *server = tcon->ses->server;
314 	unsigned int xid;
315 	int rc = 0;
316 
317 	xid = get_xid();
318 
319 	if (le32_to_cpu(tcon->fsAttrInfo.MaxPathNameComponentLength) > 0)
320 		buf->f_namelen =
321 		       le32_to_cpu(tcon->fsAttrInfo.MaxPathNameComponentLength);
322 	else
323 		buf->f_namelen = PATH_MAX;
324 
325 	buf->f_fsid.val[0] = tcon->vol_serial_number;
326 	/* are using part of create time for more randomness, see man statfs */
327 	buf->f_fsid.val[1] =  (int)le64_to_cpu(tcon->vol_create_time);
328 
329 	buf->f_files = 0;	/* undefined */
330 	buf->f_ffree = 0;	/* unlimited */
331 
332 	if (server->ops->queryfs)
333 		rc = server->ops->queryfs(xid, tcon, cifs_sb, buf);
334 
335 	free_xid(xid);
336 	return rc;
337 }
338 
339 static long cifs_fallocate(struct file *file, int mode, loff_t off, loff_t len)
340 {
341 	struct cifs_sb_info *cifs_sb = CIFS_FILE_SB(file);
342 	struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
343 	struct TCP_Server_Info *server = tcon->ses->server;
344 
345 	if (server->ops->fallocate)
346 		return server->ops->fallocate(file, tcon, mode, off, len);
347 
348 	return -EOPNOTSUPP;
349 }
350 
351 static int cifs_permission(struct mnt_idmap *idmap,
352 			   struct inode *inode, int mask)
353 {
354 	struct cifs_sb_info *cifs_sb;
355 
356 	cifs_sb = CIFS_SB(inode->i_sb);
357 
358 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_PERM) {
359 		if ((mask & MAY_EXEC) && !execute_ok(inode))
360 			return -EACCES;
361 		else
362 			return 0;
363 	} else /* file mode might have been restricted at mount time
364 		on the client (above and beyond ACL on servers) for
365 		servers which do not support setting and viewing mode bits,
366 		so allowing client to check permissions is useful */
367 		return generic_permission(&nop_mnt_idmap, inode, mask);
368 }
369 
370 static struct kmem_cache *cifs_inode_cachep;
371 static struct kmem_cache *cifs_req_cachep;
372 static struct kmem_cache *cifs_mid_cachep;
373 static struct kmem_cache *cifs_sm_req_cachep;
374 static struct kmem_cache *cifs_io_request_cachep;
375 static struct kmem_cache *cifs_io_subrequest_cachep;
376 mempool_t *cifs_sm_req_poolp;
377 mempool_t *cifs_req_poolp;
378 mempool_t *cifs_mid_poolp;
379 mempool_t cifs_io_request_pool;
380 mempool_t cifs_io_subrequest_pool;
381 
382 static struct inode *
383 cifs_alloc_inode(struct super_block *sb)
384 {
385 	struct cifsInodeInfo *cifs_inode;
386 	cifs_inode = alloc_inode_sb(sb, cifs_inode_cachep, GFP_KERNEL);
387 	if (!cifs_inode)
388 		return NULL;
389 	cifs_inode->cifsAttrs = 0x20;	/* default */
390 	cifs_inode->time = 0;
391 	/*
392 	 * Until the file is open and we have gotten oplock info back from the
393 	 * server, can not assume caching of file data or metadata.
394 	 */
395 	cifs_set_oplock_level(cifs_inode, 0);
396 	cifs_inode->lease_granted = false;
397 	cifs_inode->flags = 0;
398 	spin_lock_init(&cifs_inode->writers_lock);
399 	cifs_inode->writers = 0;
400 	cifs_inode->netfs.inode.i_blkbits = 14;  /* 2**14 = CIFS_MAX_MSGSIZE */
401 	cifs_inode->netfs.remote_i_size = 0;
402 	cifs_inode->uniqueid = 0;
403 	cifs_inode->createtime = 0;
404 	cifs_inode->epoch = 0;
405 	spin_lock_init(&cifs_inode->open_file_lock);
406 	generate_random_uuid(cifs_inode->lease_key);
407 	cifs_inode->symlink_target = NULL;
408 
409 	/*
410 	 * Can not set i_flags here - they get immediately overwritten to zero
411 	 * by the VFS.
412 	 */
413 	/* cifs_inode->netfs.inode.i_flags = S_NOATIME | S_NOCMTIME; */
414 	INIT_LIST_HEAD(&cifs_inode->openFileList);
415 	INIT_LIST_HEAD(&cifs_inode->llist);
416 	INIT_LIST_HEAD(&cifs_inode->deferred_closes);
417 	spin_lock_init(&cifs_inode->deferred_lock);
418 	return &cifs_inode->netfs.inode;
419 }
420 
421 static void
422 cifs_free_inode(struct inode *inode)
423 {
424 	struct cifsInodeInfo *cinode = CIFS_I(inode);
425 
426 	if (S_ISLNK(inode->i_mode))
427 		kfree(cinode->symlink_target);
428 	kmem_cache_free(cifs_inode_cachep, cinode);
429 }
430 
431 static void
432 cifs_evict_inode(struct inode *inode)
433 {
434 	truncate_inode_pages_final(&inode->i_data);
435 	if (inode->i_state & I_PINNING_NETFS_WB)
436 		cifs_fscache_unuse_inode_cookie(inode, true);
437 	cifs_fscache_release_inode_cookie(inode);
438 	clear_inode(inode);
439 }
440 
441 static void
442 cifs_show_address(struct seq_file *s, struct TCP_Server_Info *server)
443 {
444 	struct sockaddr_in *sa = (struct sockaddr_in *) &server->dstaddr;
445 	struct sockaddr_in6 *sa6 = (struct sockaddr_in6 *) &server->dstaddr;
446 
447 	seq_puts(s, ",addr=");
448 
449 	switch (server->dstaddr.ss_family) {
450 	case AF_INET:
451 		seq_printf(s, "%pI4", &sa->sin_addr.s_addr);
452 		break;
453 	case AF_INET6:
454 		seq_printf(s, "%pI6", &sa6->sin6_addr.s6_addr);
455 		if (sa6->sin6_scope_id)
456 			seq_printf(s, "%%%u", sa6->sin6_scope_id);
457 		break;
458 	default:
459 		seq_puts(s, "(unknown)");
460 	}
461 	if (server->rdma)
462 		seq_puts(s, ",rdma");
463 }
464 
465 static void
466 cifs_show_security(struct seq_file *s, struct cifs_ses *ses)
467 {
468 	if (ses->sectype == Unspecified) {
469 		if (ses->user_name == NULL)
470 			seq_puts(s, ",sec=none");
471 		return;
472 	}
473 
474 	seq_puts(s, ",sec=");
475 
476 	switch (ses->sectype) {
477 	case NTLMv2:
478 		seq_puts(s, "ntlmv2");
479 		break;
480 	case Kerberos:
481 		seq_puts(s, "krb5");
482 		break;
483 	case RawNTLMSSP:
484 		seq_puts(s, "ntlmssp");
485 		break;
486 	default:
487 		/* shouldn't ever happen */
488 		seq_puts(s, "unknown");
489 		break;
490 	}
491 
492 	if (ses->sign)
493 		seq_puts(s, "i");
494 
495 	if (ses->sectype == Kerberos)
496 		seq_printf(s, ",cruid=%u",
497 			   from_kuid_munged(&init_user_ns, ses->cred_uid));
498 }
499 
500 static void
501 cifs_show_cache_flavor(struct seq_file *s, struct cifs_sb_info *cifs_sb)
502 {
503 	seq_puts(s, ",cache=");
504 
505 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO)
506 		seq_puts(s, "strict");
507 	else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO)
508 		seq_puts(s, "none");
509 	else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RW_CACHE)
510 		seq_puts(s, "singleclient"); /* assume only one client access */
511 	else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RO_CACHE)
512 		seq_puts(s, "ro"); /* read only caching assumed */
513 	else
514 		seq_puts(s, "loose");
515 }
516 
517 /*
518  * cifs_show_devname() is used so we show the mount device name with correct
519  * format (e.g. forward slashes vs. back slashes) in /proc/mounts
520  */
521 static int cifs_show_devname(struct seq_file *m, struct dentry *root)
522 {
523 	struct cifs_sb_info *cifs_sb = CIFS_SB(root->d_sb);
524 	char *devname = kstrdup(cifs_sb->ctx->source, GFP_KERNEL);
525 
526 	if (devname == NULL)
527 		seq_puts(m, "none");
528 	else {
529 		convert_delimiter(devname, '/');
530 		/* escape all spaces in share names */
531 		seq_escape(m, devname, " \t");
532 		kfree(devname);
533 	}
534 	return 0;
535 }
536 
537 /*
538  * cifs_show_options() is for displaying mount options in /proc/mounts.
539  * Not all settable options are displayed but most of the important
540  * ones are.
541  */
542 static int
543 cifs_show_options(struct seq_file *s, struct dentry *root)
544 {
545 	struct cifs_sb_info *cifs_sb = CIFS_SB(root->d_sb);
546 	struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
547 	struct sockaddr *srcaddr;
548 	srcaddr = (struct sockaddr *)&tcon->ses->server->srcaddr;
549 
550 	seq_show_option(s, "vers", tcon->ses->server->vals->version_string);
551 	cifs_show_security(s, tcon->ses);
552 	cifs_show_cache_flavor(s, cifs_sb);
553 
554 	if (tcon->no_lease)
555 		seq_puts(s, ",nolease");
556 	if (cifs_sb->ctx->multiuser)
557 		seq_puts(s, ",multiuser");
558 	else if (tcon->ses->user_name)
559 		seq_show_option(s, "username", tcon->ses->user_name);
560 
561 	if (tcon->ses->domainName && tcon->ses->domainName[0] != 0)
562 		seq_show_option(s, "domain", tcon->ses->domainName);
563 
564 	if (srcaddr->sa_family != AF_UNSPEC) {
565 		struct sockaddr_in *saddr4;
566 		struct sockaddr_in6 *saddr6;
567 		saddr4 = (struct sockaddr_in *)srcaddr;
568 		saddr6 = (struct sockaddr_in6 *)srcaddr;
569 		if (srcaddr->sa_family == AF_INET6)
570 			seq_printf(s, ",srcaddr=%pI6c",
571 				   &saddr6->sin6_addr);
572 		else if (srcaddr->sa_family == AF_INET)
573 			seq_printf(s, ",srcaddr=%pI4",
574 				   &saddr4->sin_addr.s_addr);
575 		else
576 			seq_printf(s, ",srcaddr=BAD-AF:%i",
577 				   (int)(srcaddr->sa_family));
578 	}
579 
580 	seq_printf(s, ",uid=%u",
581 		   from_kuid_munged(&init_user_ns, cifs_sb->ctx->linux_uid));
582 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_OVERR_UID)
583 		seq_puts(s, ",forceuid");
584 	else
585 		seq_puts(s, ",noforceuid");
586 
587 	seq_printf(s, ",gid=%u",
588 		   from_kgid_munged(&init_user_ns, cifs_sb->ctx->linux_gid));
589 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_OVERR_GID)
590 		seq_puts(s, ",forcegid");
591 	else
592 		seq_puts(s, ",noforcegid");
593 
594 	cifs_show_address(s, tcon->ses->server);
595 
596 	if (!tcon->unix_ext)
597 		seq_printf(s, ",file_mode=0%ho,dir_mode=0%ho",
598 					   cifs_sb->ctx->file_mode,
599 					   cifs_sb->ctx->dir_mode);
600 	if (cifs_sb->ctx->iocharset)
601 		seq_printf(s, ",iocharset=%s", cifs_sb->ctx->iocharset);
602 	if (tcon->seal)
603 		seq_puts(s, ",seal");
604 	else if (tcon->ses->server->ignore_signature)
605 		seq_puts(s, ",signloosely");
606 	if (tcon->nocase)
607 		seq_puts(s, ",nocase");
608 	if (tcon->nodelete)
609 		seq_puts(s, ",nodelete");
610 	if (cifs_sb->ctx->no_sparse)
611 		seq_puts(s, ",nosparse");
612 	if (tcon->local_lease)
613 		seq_puts(s, ",locallease");
614 	if (tcon->retry)
615 		seq_puts(s, ",hard");
616 	else
617 		seq_puts(s, ",soft");
618 	if (tcon->use_persistent)
619 		seq_puts(s, ",persistenthandles");
620 	else if (tcon->use_resilient)
621 		seq_puts(s, ",resilienthandles");
622 	if (tcon->posix_extensions)
623 		seq_puts(s, ",posix");
624 	else if (tcon->unix_ext)
625 		seq_puts(s, ",unix");
626 	else
627 		seq_puts(s, ",nounix");
628 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_DFS)
629 		seq_puts(s, ",nodfs");
630 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIX_PATHS)
631 		seq_puts(s, ",posixpaths");
632 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SET_UID)
633 		seq_puts(s, ",setuids");
634 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UID_FROM_ACL)
635 		seq_puts(s, ",idsfromsid");
636 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM)
637 		seq_puts(s, ",serverino");
638 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
639 		seq_puts(s, ",rwpidforward");
640 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL)
641 		seq_puts(s, ",forcemand");
642 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_XATTR)
643 		seq_puts(s, ",nouser_xattr");
644 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR)
645 		seq_puts(s, ",mapchars");
646 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SFM_CHR)
647 		seq_puts(s, ",mapposix");
648 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UNX_EMUL)
649 		seq_puts(s, ",sfu");
650 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_BRL)
651 		seq_puts(s, ",nobrl");
652 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_HANDLE_CACHE)
653 		seq_puts(s, ",nohandlecache");
654 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MODE_FROM_SID)
655 		seq_puts(s, ",modefromsid");
656 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_ACL)
657 		seq_puts(s, ",cifsacl");
658 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DYNPERM)
659 		seq_puts(s, ",dynperm");
660 	if (root->d_sb->s_flags & SB_POSIXACL)
661 		seq_puts(s, ",acl");
662 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MF_SYMLINKS)
663 		seq_puts(s, ",mfsymlinks");
664 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_FSCACHE)
665 		seq_puts(s, ",fsc");
666 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC)
667 		seq_puts(s, ",nostrictsync");
668 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_PERM)
669 		seq_puts(s, ",noperm");
670 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_BACKUPUID)
671 		seq_printf(s, ",backupuid=%u",
672 			   from_kuid_munged(&init_user_ns,
673 					    cifs_sb->ctx->backupuid));
674 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_BACKUPGID)
675 		seq_printf(s, ",backupgid=%u",
676 			   from_kgid_munged(&init_user_ns,
677 					    cifs_sb->ctx->backupgid));
678 	seq_show_option(s, "reparse",
679 			cifs_reparse_type_str(cifs_sb->ctx->reparse_type));
680 
681 	seq_printf(s, ",rsize=%u", cifs_sb->ctx->rsize);
682 	seq_printf(s, ",wsize=%u", cifs_sb->ctx->wsize);
683 	seq_printf(s, ",bsize=%u", cifs_sb->ctx->bsize);
684 	if (cifs_sb->ctx->rasize)
685 		seq_printf(s, ",rasize=%u", cifs_sb->ctx->rasize);
686 	if (tcon->ses->server->min_offload)
687 		seq_printf(s, ",esize=%u", tcon->ses->server->min_offload);
688 	if (tcon->ses->server->retrans)
689 		seq_printf(s, ",retrans=%u", tcon->ses->server->retrans);
690 	seq_printf(s, ",echo_interval=%lu",
691 			tcon->ses->server->echo_interval / HZ);
692 
693 	/* Only display the following if overridden on mount */
694 	if (tcon->ses->server->max_credits != SMB2_MAX_CREDITS_AVAILABLE)
695 		seq_printf(s, ",max_credits=%u", tcon->ses->server->max_credits);
696 	if (tcon->ses->server->tcp_nodelay)
697 		seq_puts(s, ",tcpnodelay");
698 	if (tcon->ses->server->noautotune)
699 		seq_puts(s, ",noautotune");
700 	if (tcon->ses->server->noblocksnd)
701 		seq_puts(s, ",noblocksend");
702 	if (tcon->ses->server->nosharesock)
703 		seq_puts(s, ",nosharesock");
704 
705 	if (tcon->snapshot_time)
706 		seq_printf(s, ",snapshot=%llu", tcon->snapshot_time);
707 	if (tcon->handle_timeout)
708 		seq_printf(s, ",handletimeout=%u", tcon->handle_timeout);
709 	if (tcon->max_cached_dirs != MAX_CACHED_FIDS)
710 		seq_printf(s, ",max_cached_dirs=%u", tcon->max_cached_dirs);
711 
712 	/*
713 	 * Display file and directory attribute timeout in seconds.
714 	 * If file and directory attribute timeout the same then actimeo
715 	 * was likely specified on mount
716 	 */
717 	if (cifs_sb->ctx->acdirmax == cifs_sb->ctx->acregmax)
718 		seq_printf(s, ",actimeo=%lu", cifs_sb->ctx->acregmax / HZ);
719 	else {
720 		seq_printf(s, ",acdirmax=%lu", cifs_sb->ctx->acdirmax / HZ);
721 		seq_printf(s, ",acregmax=%lu", cifs_sb->ctx->acregmax / HZ);
722 	}
723 	seq_printf(s, ",closetimeo=%lu", cifs_sb->ctx->closetimeo / HZ);
724 
725 	if (tcon->ses->chan_max > 1)
726 		seq_printf(s, ",multichannel,max_channels=%zu",
727 			   tcon->ses->chan_max);
728 
729 	if (tcon->use_witness)
730 		seq_puts(s, ",witness");
731 
732 	return 0;
733 }
734 
735 static void cifs_umount_begin(struct super_block *sb)
736 {
737 	struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
738 	struct cifs_tcon *tcon;
739 
740 	if (cifs_sb == NULL)
741 		return;
742 
743 	tcon = cifs_sb_master_tcon(cifs_sb);
744 
745 	spin_lock(&cifs_tcp_ses_lock);
746 	spin_lock(&tcon->tc_lock);
747 	trace_smb3_tcon_ref(tcon->debug_id, tcon->tc_count,
748 			    netfs_trace_tcon_ref_see_umount);
749 	if ((tcon->tc_count > 1) || (tcon->status == TID_EXITING)) {
750 		/* we have other mounts to same share or we have
751 		   already tried to umount this and woken up
752 		   all waiting network requests, nothing to do */
753 		spin_unlock(&tcon->tc_lock);
754 		spin_unlock(&cifs_tcp_ses_lock);
755 		return;
756 	}
757 	/*
758 	 * can not set tcon->status to TID_EXITING yet since we don't know if umount -f will
759 	 * fail later (e.g. due to open files).  TID_EXITING will be set just before tdis req sent
760 	 */
761 	spin_unlock(&tcon->tc_lock);
762 	spin_unlock(&cifs_tcp_ses_lock);
763 
764 	cifs_close_all_deferred_files(tcon);
765 	/* cancel_brl_requests(tcon); */ /* BB mark all brl mids as exiting */
766 	/* cancel_notify_requests(tcon); */
767 	if (tcon->ses && tcon->ses->server) {
768 		cifs_dbg(FYI, "wake up tasks now - umount begin not complete\n");
769 		wake_up_all(&tcon->ses->server->request_q);
770 		wake_up_all(&tcon->ses->server->response_q);
771 		msleep(1); /* yield */
772 		/* we have to kick the requests once more */
773 		wake_up_all(&tcon->ses->server->response_q);
774 		msleep(1);
775 	}
776 
777 	return;
778 }
779 
780 static int cifs_freeze(struct super_block *sb)
781 {
782 	struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
783 	struct cifs_tcon *tcon;
784 
785 	if (cifs_sb == NULL)
786 		return 0;
787 
788 	tcon = cifs_sb_master_tcon(cifs_sb);
789 
790 	cifs_close_all_deferred_files(tcon);
791 	return 0;
792 }
793 
794 #ifdef CONFIG_CIFS_STATS2
795 static int cifs_show_stats(struct seq_file *s, struct dentry *root)
796 {
797 	/* BB FIXME */
798 	return 0;
799 }
800 #endif
801 
802 static int cifs_write_inode(struct inode *inode, struct writeback_control *wbc)
803 {
804 	return netfs_unpin_writeback(inode, wbc);
805 }
806 
807 static int cifs_drop_inode(struct inode *inode)
808 {
809 	struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
810 
811 	/* no serverino => unconditional eviction */
812 	return !(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) ||
813 		generic_drop_inode(inode);
814 }
815 
816 static const struct super_operations cifs_super_ops = {
817 	.statfs = cifs_statfs,
818 	.alloc_inode = cifs_alloc_inode,
819 	.write_inode	= cifs_write_inode,
820 	.free_inode = cifs_free_inode,
821 	.drop_inode	= cifs_drop_inode,
822 	.evict_inode	= cifs_evict_inode,
823 /*	.show_path	= cifs_show_path, */ /* Would we ever need show path? */
824 	.show_devname   = cifs_show_devname,
825 /*	.delete_inode	= cifs_delete_inode,  */  /* Do not need above
826 	function unless later we add lazy close of inodes or unless the
827 	kernel forgets to call us with the same number of releases (closes)
828 	as opens */
829 	.show_options = cifs_show_options,
830 	.umount_begin   = cifs_umount_begin,
831 	.freeze_fs      = cifs_freeze,
832 #ifdef CONFIG_CIFS_STATS2
833 	.show_stats = cifs_show_stats,
834 #endif
835 };
836 
837 /*
838  * Get root dentry from superblock according to prefix path mount option.
839  * Return dentry with refcount + 1 on success and NULL otherwise.
840  */
841 static struct dentry *
842 cifs_get_root(struct smb3_fs_context *ctx, struct super_block *sb)
843 {
844 	struct dentry *dentry;
845 	struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
846 	char *full_path = NULL;
847 	char *s, *p;
848 	char sep;
849 
850 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_USE_PREFIX_PATH)
851 		return dget(sb->s_root);
852 
853 	full_path = cifs_build_path_to_root(ctx, cifs_sb,
854 				cifs_sb_master_tcon(cifs_sb), 0);
855 	if (full_path == NULL)
856 		return ERR_PTR(-ENOMEM);
857 
858 	cifs_dbg(FYI, "Get root dentry for %s\n", full_path);
859 
860 	sep = CIFS_DIR_SEP(cifs_sb);
861 	dentry = dget(sb->s_root);
862 	s = full_path;
863 
864 	do {
865 		struct inode *dir = d_inode(dentry);
866 		struct dentry *child;
867 
868 		if (!S_ISDIR(dir->i_mode)) {
869 			dput(dentry);
870 			dentry = ERR_PTR(-ENOTDIR);
871 			break;
872 		}
873 
874 		/* skip separators */
875 		while (*s == sep)
876 			s++;
877 		if (!*s)
878 			break;
879 		p = s++;
880 		/* next separator */
881 		while (*s && *s != sep)
882 			s++;
883 
884 		child = lookup_positive_unlocked(p, dentry, s - p);
885 		dput(dentry);
886 		dentry = child;
887 	} while (!IS_ERR(dentry));
888 	kfree(full_path);
889 	return dentry;
890 }
891 
892 static int cifs_set_super(struct super_block *sb, void *data)
893 {
894 	struct cifs_mnt_data *mnt_data = data;
895 	sb->s_fs_info = mnt_data->cifs_sb;
896 	return set_anon_super(sb, NULL);
897 }
898 
899 struct dentry *
900 cifs_smb3_do_mount(struct file_system_type *fs_type,
901 	      int flags, struct smb3_fs_context *old_ctx)
902 {
903 	struct cifs_mnt_data mnt_data;
904 	struct cifs_sb_info *cifs_sb;
905 	struct super_block *sb;
906 	struct dentry *root;
907 	int rc;
908 
909 	if (cifsFYI) {
910 		cifs_dbg(FYI, "%s: devname=%s flags=0x%x\n", __func__,
911 			 old_ctx->source, flags);
912 	} else {
913 		cifs_info("Attempting to mount %s\n", old_ctx->source);
914 	}
915 
916 	cifs_sb = kzalloc(sizeof(*cifs_sb), GFP_KERNEL);
917 	if (!cifs_sb)
918 		return ERR_PTR(-ENOMEM);
919 
920 	cifs_sb->ctx = kzalloc(sizeof(struct smb3_fs_context), GFP_KERNEL);
921 	if (!cifs_sb->ctx) {
922 		root = ERR_PTR(-ENOMEM);
923 		goto out;
924 	}
925 	rc = smb3_fs_context_dup(cifs_sb->ctx, old_ctx);
926 	if (rc) {
927 		root = ERR_PTR(rc);
928 		goto out;
929 	}
930 
931 	rc = cifs_setup_cifs_sb(cifs_sb);
932 	if (rc) {
933 		root = ERR_PTR(rc);
934 		goto out;
935 	}
936 
937 	rc = cifs_mount(cifs_sb, cifs_sb->ctx);
938 	if (rc) {
939 		if (!(flags & SB_SILENT))
940 			cifs_dbg(VFS, "cifs_mount failed w/return code = %d\n",
941 				 rc);
942 		root = ERR_PTR(rc);
943 		goto out;
944 	}
945 
946 	mnt_data.ctx = cifs_sb->ctx;
947 	mnt_data.cifs_sb = cifs_sb;
948 	mnt_data.flags = flags;
949 
950 	/* BB should we make this contingent on mount parm? */
951 	flags |= SB_NODIRATIME | SB_NOATIME;
952 
953 	sb = sget(fs_type, cifs_match_super, cifs_set_super, flags, &mnt_data);
954 	if (IS_ERR(sb)) {
955 		cifs_umount(cifs_sb);
956 		return ERR_CAST(sb);
957 	}
958 
959 	if (sb->s_root) {
960 		cifs_dbg(FYI, "Use existing superblock\n");
961 		cifs_umount(cifs_sb);
962 		cifs_sb = NULL;
963 	} else {
964 		rc = cifs_read_super(sb);
965 		if (rc) {
966 			root = ERR_PTR(rc);
967 			goto out_super;
968 		}
969 
970 		sb->s_flags |= SB_ACTIVE;
971 	}
972 
973 	root = cifs_get_root(cifs_sb ? cifs_sb->ctx : old_ctx, sb);
974 	if (IS_ERR(root))
975 		goto out_super;
976 
977 	if (cifs_sb)
978 		cifs_sb->root = dget(root);
979 
980 	cifs_dbg(FYI, "dentry root is: %p\n", root);
981 	return root;
982 
983 out_super:
984 	deactivate_locked_super(sb);
985 	return root;
986 out:
987 	kfree(cifs_sb->prepath);
988 	smb3_cleanup_fs_context(cifs_sb->ctx);
989 	kfree(cifs_sb);
990 	return root;
991 }
992 
993 static loff_t cifs_llseek(struct file *file, loff_t offset, int whence)
994 {
995 	struct cifsFileInfo *cfile = file->private_data;
996 	struct cifs_tcon *tcon;
997 
998 	/*
999 	 * whence == SEEK_END || SEEK_DATA || SEEK_HOLE => we must revalidate
1000 	 * the cached file length
1001 	 */
1002 	if (whence != SEEK_SET && whence != SEEK_CUR) {
1003 		int rc;
1004 		struct inode *inode = file_inode(file);
1005 
1006 		/*
1007 		 * We need to be sure that all dirty pages are written and the
1008 		 * server has the newest file length.
1009 		 */
1010 		if (!CIFS_CACHE_READ(CIFS_I(inode)) && inode->i_mapping &&
1011 		    inode->i_mapping->nrpages != 0) {
1012 			rc = filemap_fdatawait(inode->i_mapping);
1013 			if (rc) {
1014 				mapping_set_error(inode->i_mapping, rc);
1015 				return rc;
1016 			}
1017 		}
1018 		/*
1019 		 * Some applications poll for the file length in this strange
1020 		 * way so we must seek to end on non-oplocked files by
1021 		 * setting the revalidate time to zero.
1022 		 */
1023 		CIFS_I(inode)->time = 0;
1024 
1025 		rc = cifs_revalidate_file_attr(file);
1026 		if (rc < 0)
1027 			return (loff_t)rc;
1028 	}
1029 	if (cfile && cfile->tlink) {
1030 		tcon = tlink_tcon(cfile->tlink);
1031 		if (tcon->ses->server->ops->llseek)
1032 			return tcon->ses->server->ops->llseek(file, tcon,
1033 							      offset, whence);
1034 	}
1035 	return generic_file_llseek(file, offset, whence);
1036 }
1037 
1038 static int
1039 cifs_setlease(struct file *file, int arg, struct file_lease **lease, void **priv)
1040 {
1041 	/*
1042 	 * Note that this is called by vfs setlease with i_lock held to
1043 	 * protect *lease from going away.
1044 	 */
1045 	struct inode *inode = file_inode(file);
1046 	struct cifsFileInfo *cfile = file->private_data;
1047 
1048 	/* Check if file is oplocked if this is request for new lease */
1049 	if (arg == F_UNLCK ||
1050 	    ((arg == F_RDLCK) && CIFS_CACHE_READ(CIFS_I(inode))) ||
1051 	    ((arg == F_WRLCK) && CIFS_CACHE_WRITE(CIFS_I(inode))))
1052 		return generic_setlease(file, arg, lease, priv);
1053 	else if (tlink_tcon(cfile->tlink)->local_lease &&
1054 		 !CIFS_CACHE_READ(CIFS_I(inode)))
1055 		/*
1056 		 * If the server claims to support oplock on this file, then we
1057 		 * still need to check oplock even if the local_lease mount
1058 		 * option is set, but there are servers which do not support
1059 		 * oplock for which this mount option may be useful if the user
1060 		 * knows that the file won't be changed on the server by anyone
1061 		 * else.
1062 		 */
1063 		return generic_setlease(file, arg, lease, priv);
1064 	else
1065 		return -EAGAIN;
1066 }
1067 
1068 struct file_system_type cifs_fs_type = {
1069 	.owner = THIS_MODULE,
1070 	.name = "cifs",
1071 	.init_fs_context = smb3_init_fs_context,
1072 	.parameters = smb3_fs_parameters,
1073 	.kill_sb = cifs_kill_sb,
1074 	.fs_flags = FS_RENAME_DOES_D_MOVE,
1075 };
1076 MODULE_ALIAS_FS("cifs");
1077 
1078 struct file_system_type smb3_fs_type = {
1079 	.owner = THIS_MODULE,
1080 	.name = "smb3",
1081 	.init_fs_context = smb3_init_fs_context,
1082 	.parameters = smb3_fs_parameters,
1083 	.kill_sb = cifs_kill_sb,
1084 	.fs_flags = FS_RENAME_DOES_D_MOVE,
1085 };
1086 MODULE_ALIAS_FS("smb3");
1087 MODULE_ALIAS("smb3");
1088 
1089 const struct inode_operations cifs_dir_inode_ops = {
1090 	.create = cifs_create,
1091 	.atomic_open = cifs_atomic_open,
1092 	.lookup = cifs_lookup,
1093 	.getattr = cifs_getattr,
1094 	.unlink = cifs_unlink,
1095 	.link = cifs_hardlink,
1096 	.mkdir = cifs_mkdir,
1097 	.rmdir = cifs_rmdir,
1098 	.rename = cifs_rename2,
1099 	.permission = cifs_permission,
1100 	.setattr = cifs_setattr,
1101 	.symlink = cifs_symlink,
1102 	.mknod   = cifs_mknod,
1103 	.listxattr = cifs_listxattr,
1104 	.get_acl = cifs_get_acl,
1105 	.set_acl = cifs_set_acl,
1106 };
1107 
1108 const struct inode_operations cifs_file_inode_ops = {
1109 	.setattr = cifs_setattr,
1110 	.getattr = cifs_getattr,
1111 	.permission = cifs_permission,
1112 	.listxattr = cifs_listxattr,
1113 	.fiemap = cifs_fiemap,
1114 	.get_acl = cifs_get_acl,
1115 	.set_acl = cifs_set_acl,
1116 };
1117 
1118 const char *cifs_get_link(struct dentry *dentry, struct inode *inode,
1119 			    struct delayed_call *done)
1120 {
1121 	char *target_path;
1122 
1123 	if (!dentry)
1124 		return ERR_PTR(-ECHILD);
1125 
1126 	target_path = kmalloc(PATH_MAX, GFP_KERNEL);
1127 	if (!target_path)
1128 		return ERR_PTR(-ENOMEM);
1129 
1130 	spin_lock(&inode->i_lock);
1131 	if (likely(CIFS_I(inode)->symlink_target)) {
1132 		strscpy(target_path, CIFS_I(inode)->symlink_target, PATH_MAX);
1133 	} else {
1134 		kfree(target_path);
1135 		target_path = ERR_PTR(-EOPNOTSUPP);
1136 	}
1137 	spin_unlock(&inode->i_lock);
1138 
1139 	if (!IS_ERR(target_path))
1140 		set_delayed_call(done, kfree_link, target_path);
1141 
1142 	return target_path;
1143 }
1144 
1145 const struct inode_operations cifs_symlink_inode_ops = {
1146 	.get_link = cifs_get_link,
1147 	.setattr = cifs_setattr,
1148 	.permission = cifs_permission,
1149 	.listxattr = cifs_listxattr,
1150 };
1151 
1152 /*
1153  * Advance the EOF marker to after the source range.
1154  */
1155 static int cifs_precopy_set_eof(struct inode *src_inode, struct cifsInodeInfo *src_cifsi,
1156 				struct cifs_tcon *src_tcon,
1157 				unsigned int xid, loff_t src_end)
1158 {
1159 	struct cifsFileInfo *writeable_srcfile;
1160 	int rc = -EINVAL;
1161 
1162 	writeable_srcfile = find_writable_file(src_cifsi, FIND_WR_FSUID_ONLY);
1163 	if (writeable_srcfile) {
1164 		if (src_tcon->ses->server->ops->set_file_size)
1165 			rc = src_tcon->ses->server->ops->set_file_size(
1166 				xid, src_tcon, writeable_srcfile,
1167 				src_inode->i_size, true /* no need to set sparse */);
1168 		else
1169 			rc = -ENOSYS;
1170 		cifsFileInfo_put(writeable_srcfile);
1171 		cifs_dbg(FYI, "SetFSize for copychunk rc = %d\n", rc);
1172 	}
1173 
1174 	if (rc < 0)
1175 		goto set_failed;
1176 
1177 	netfs_resize_file(&src_cifsi->netfs, src_end, true);
1178 	fscache_resize_cookie(cifs_inode_cookie(src_inode), src_end);
1179 	return 0;
1180 
1181 set_failed:
1182 	return filemap_write_and_wait(src_inode->i_mapping);
1183 }
1184 
1185 /*
1186  * Flush out either the folio that overlaps the beginning of a range in which
1187  * pos resides or the folio that overlaps the end of a range unless that folio
1188  * is entirely within the range we're going to invalidate.  We extend the flush
1189  * bounds to encompass the folio.
1190  */
1191 static int cifs_flush_folio(struct inode *inode, loff_t pos, loff_t *_fstart, loff_t *_fend,
1192 			    bool first)
1193 {
1194 	struct folio *folio;
1195 	unsigned long long fpos, fend;
1196 	pgoff_t index = pos / PAGE_SIZE;
1197 	size_t size;
1198 	int rc = 0;
1199 
1200 	folio = filemap_get_folio(inode->i_mapping, index);
1201 	if (IS_ERR(folio))
1202 		return 0;
1203 
1204 	size = folio_size(folio);
1205 	fpos = folio_pos(folio);
1206 	fend = fpos + size - 1;
1207 	*_fstart = min_t(unsigned long long, *_fstart, fpos);
1208 	*_fend   = max_t(unsigned long long, *_fend, fend);
1209 	if ((first && pos == fpos) || (!first && pos == fend))
1210 		goto out;
1211 
1212 	rc = filemap_write_and_wait_range(inode->i_mapping, fpos, fend);
1213 out:
1214 	folio_put(folio);
1215 	return rc;
1216 }
1217 
1218 static loff_t cifs_remap_file_range(struct file *src_file, loff_t off,
1219 		struct file *dst_file, loff_t destoff, loff_t len,
1220 		unsigned int remap_flags)
1221 {
1222 	struct inode *src_inode = file_inode(src_file);
1223 	struct inode *target_inode = file_inode(dst_file);
1224 	struct cifsInodeInfo *src_cifsi = CIFS_I(src_inode);
1225 	struct cifsInodeInfo *target_cifsi = CIFS_I(target_inode);
1226 	struct cifsFileInfo *smb_file_src = src_file->private_data;
1227 	struct cifsFileInfo *smb_file_target = dst_file->private_data;
1228 	struct cifs_tcon *target_tcon, *src_tcon;
1229 	unsigned long long destend, fstart, fend, old_size, new_size;
1230 	unsigned int xid;
1231 	int rc;
1232 
1233 	if (remap_flags & REMAP_FILE_DEDUP)
1234 		return -EOPNOTSUPP;
1235 	if (remap_flags & ~REMAP_FILE_ADVISORY)
1236 		return -EINVAL;
1237 
1238 	cifs_dbg(FYI, "clone range\n");
1239 
1240 	xid = get_xid();
1241 
1242 	if (!smb_file_src || !smb_file_target) {
1243 		rc = -EBADF;
1244 		cifs_dbg(VFS, "missing cifsFileInfo on copy range src file\n");
1245 		goto out;
1246 	}
1247 
1248 	src_tcon = tlink_tcon(smb_file_src->tlink);
1249 	target_tcon = tlink_tcon(smb_file_target->tlink);
1250 
1251 	/*
1252 	 * Note: cifs case is easier than btrfs since server responsible for
1253 	 * checks for proper open modes and file type and if it wants
1254 	 * server could even support copy of range where source = target
1255 	 */
1256 	lock_two_nondirectories(target_inode, src_inode);
1257 
1258 	if (len == 0)
1259 		len = src_inode->i_size - off;
1260 
1261 	cifs_dbg(FYI, "clone range\n");
1262 
1263 	/* Flush the source buffer */
1264 	rc = filemap_write_and_wait_range(src_inode->i_mapping, off,
1265 					  off + len - 1);
1266 	if (rc)
1267 		goto unlock;
1268 
1269 	/* The server-side copy will fail if the source crosses the EOF marker.
1270 	 * Advance the EOF marker after the flush above to the end of the range
1271 	 * if it's short of that.
1272 	 */
1273 	if (src_cifsi->netfs.remote_i_size < off + len) {
1274 		rc = cifs_precopy_set_eof(src_inode, src_cifsi, src_tcon, xid, off + len);
1275 		if (rc < 0)
1276 			goto unlock;
1277 	}
1278 
1279 	new_size = destoff + len;
1280 	destend = destoff + len - 1;
1281 
1282 	/* Flush the folios at either end of the destination range to prevent
1283 	 * accidental loss of dirty data outside of the range.
1284 	 */
1285 	fstart = destoff;
1286 	fend = destend;
1287 
1288 	rc = cifs_flush_folio(target_inode, destoff, &fstart, &fend, true);
1289 	if (rc)
1290 		goto unlock;
1291 	rc = cifs_flush_folio(target_inode, destend, &fstart, &fend, false);
1292 	if (rc)
1293 		goto unlock;
1294 	if (fend > target_cifsi->netfs.zero_point)
1295 		target_cifsi->netfs.zero_point = fend + 1;
1296 	old_size = target_cifsi->netfs.remote_i_size;
1297 
1298 	/* Discard all the folios that overlap the destination region. */
1299 	cifs_dbg(FYI, "about to discard pages %llx-%llx\n", fstart, fend);
1300 	truncate_inode_pages_range(&target_inode->i_data, fstart, fend);
1301 
1302 	fscache_invalidate(cifs_inode_cookie(target_inode), NULL,
1303 			   i_size_read(target_inode), 0);
1304 
1305 	rc = -EOPNOTSUPP;
1306 	if (target_tcon->ses->server->ops->duplicate_extents) {
1307 		rc = target_tcon->ses->server->ops->duplicate_extents(xid,
1308 			smb_file_src, smb_file_target, off, len, destoff);
1309 		if (rc == 0 && new_size > old_size) {
1310 			truncate_setsize(target_inode, new_size);
1311 			fscache_resize_cookie(cifs_inode_cookie(target_inode),
1312 					      new_size);
1313 		}
1314 		if (rc == 0 && new_size > target_cifsi->netfs.zero_point)
1315 			target_cifsi->netfs.zero_point = new_size;
1316 	}
1317 
1318 	/* force revalidate of size and timestamps of target file now
1319 	   that target is updated on the server */
1320 	CIFS_I(target_inode)->time = 0;
1321 unlock:
1322 	/* although unlocking in the reverse order from locking is not
1323 	   strictly necessary here it is a little cleaner to be consistent */
1324 	unlock_two_nondirectories(src_inode, target_inode);
1325 out:
1326 	free_xid(xid);
1327 	return rc < 0 ? rc : len;
1328 }
1329 
1330 ssize_t cifs_file_copychunk_range(unsigned int xid,
1331 				struct file *src_file, loff_t off,
1332 				struct file *dst_file, loff_t destoff,
1333 				size_t len, unsigned int flags)
1334 {
1335 	struct inode *src_inode = file_inode(src_file);
1336 	struct inode *target_inode = file_inode(dst_file);
1337 	struct cifsInodeInfo *src_cifsi = CIFS_I(src_inode);
1338 	struct cifsInodeInfo *target_cifsi = CIFS_I(target_inode);
1339 	struct cifsFileInfo *smb_file_src;
1340 	struct cifsFileInfo *smb_file_target;
1341 	struct cifs_tcon *src_tcon;
1342 	struct cifs_tcon *target_tcon;
1343 	unsigned long long destend, fstart, fend;
1344 	ssize_t rc;
1345 
1346 	cifs_dbg(FYI, "copychunk range\n");
1347 
1348 	if (!src_file->private_data || !dst_file->private_data) {
1349 		rc = -EBADF;
1350 		cifs_dbg(VFS, "missing cifsFileInfo on copy range src file\n");
1351 		goto out;
1352 	}
1353 
1354 	rc = -EXDEV;
1355 	smb_file_target = dst_file->private_data;
1356 	smb_file_src = src_file->private_data;
1357 	src_tcon = tlink_tcon(smb_file_src->tlink);
1358 	target_tcon = tlink_tcon(smb_file_target->tlink);
1359 
1360 	if (src_tcon->ses != target_tcon->ses) {
1361 		cifs_dbg(VFS, "source and target of copy not on same server\n");
1362 		goto out;
1363 	}
1364 
1365 	rc = -EOPNOTSUPP;
1366 	if (!target_tcon->ses->server->ops->copychunk_range)
1367 		goto out;
1368 
1369 	/*
1370 	 * Note: cifs case is easier than btrfs since server responsible for
1371 	 * checks for proper open modes and file type and if it wants
1372 	 * server could even support copy of range where source = target
1373 	 */
1374 	lock_two_nondirectories(target_inode, src_inode);
1375 
1376 	cifs_dbg(FYI, "about to flush pages\n");
1377 
1378 	rc = filemap_write_and_wait_range(src_inode->i_mapping, off,
1379 					  off + len - 1);
1380 	if (rc)
1381 		goto unlock;
1382 
1383 	/* The server-side copy will fail if the source crosses the EOF marker.
1384 	 * Advance the EOF marker after the flush above to the end of the range
1385 	 * if it's short of that.
1386 	 */
1387 	if (src_cifsi->netfs.remote_i_size < off + len) {
1388 		rc = cifs_precopy_set_eof(src_inode, src_cifsi, src_tcon, xid, off + len);
1389 		if (rc < 0)
1390 			goto unlock;
1391 	}
1392 
1393 	destend = destoff + len - 1;
1394 
1395 	/* Flush the folios at either end of the destination range to prevent
1396 	 * accidental loss of dirty data outside of the range.
1397 	 */
1398 	fstart = destoff;
1399 	fend = destend;
1400 
1401 	rc = cifs_flush_folio(target_inode, destoff, &fstart, &fend, true);
1402 	if (rc)
1403 		goto unlock;
1404 	rc = cifs_flush_folio(target_inode, destend, &fstart, &fend, false);
1405 	if (rc)
1406 		goto unlock;
1407 	if (fend > target_cifsi->netfs.zero_point)
1408 		target_cifsi->netfs.zero_point = fend + 1;
1409 
1410 	/* Discard all the folios that overlap the destination region. */
1411 	truncate_inode_pages_range(&target_inode->i_data, fstart, fend);
1412 
1413 	fscache_invalidate(cifs_inode_cookie(target_inode), NULL,
1414 			   i_size_read(target_inode), 0);
1415 
1416 	rc = file_modified(dst_file);
1417 	if (!rc) {
1418 		rc = target_tcon->ses->server->ops->copychunk_range(xid,
1419 			smb_file_src, smb_file_target, off, len, destoff);
1420 		if (rc > 0 && destoff + rc > i_size_read(target_inode)) {
1421 			truncate_setsize(target_inode, destoff + rc);
1422 			netfs_resize_file(&target_cifsi->netfs,
1423 					  i_size_read(target_inode), true);
1424 			fscache_resize_cookie(cifs_inode_cookie(target_inode),
1425 					      i_size_read(target_inode));
1426 		}
1427 		if (rc > 0 && destoff + rc > target_cifsi->netfs.zero_point)
1428 			target_cifsi->netfs.zero_point = destoff + rc;
1429 	}
1430 
1431 	file_accessed(src_file);
1432 
1433 	/* force revalidate of size and timestamps of target file now
1434 	 * that target is updated on the server
1435 	 */
1436 	CIFS_I(target_inode)->time = 0;
1437 
1438 unlock:
1439 	/* although unlocking in the reverse order from locking is not
1440 	 * strictly necessary here it is a little cleaner to be consistent
1441 	 */
1442 	unlock_two_nondirectories(src_inode, target_inode);
1443 
1444 out:
1445 	return rc;
1446 }
1447 
1448 /*
1449  * Directory operations under CIFS/SMB2/SMB3 are synchronous, so fsync()
1450  * is a dummy operation.
1451  */
1452 static int cifs_dir_fsync(struct file *file, loff_t start, loff_t end, int datasync)
1453 {
1454 	cifs_dbg(FYI, "Sync directory - name: %pD datasync: 0x%x\n",
1455 		 file, datasync);
1456 
1457 	return 0;
1458 }
1459 
1460 static ssize_t cifs_copy_file_range(struct file *src_file, loff_t off,
1461 				struct file *dst_file, loff_t destoff,
1462 				size_t len, unsigned int flags)
1463 {
1464 	unsigned int xid = get_xid();
1465 	ssize_t rc;
1466 	struct cifsFileInfo *cfile = dst_file->private_data;
1467 
1468 	if (cfile->swapfile) {
1469 		rc = -EOPNOTSUPP;
1470 		free_xid(xid);
1471 		return rc;
1472 	}
1473 
1474 	rc = cifs_file_copychunk_range(xid, src_file, off, dst_file, destoff,
1475 					len, flags);
1476 	free_xid(xid);
1477 
1478 	if (rc == -EOPNOTSUPP || rc == -EXDEV)
1479 		rc = splice_copy_file_range(src_file, off, dst_file,
1480 					    destoff, len);
1481 	return rc;
1482 }
1483 
1484 const struct file_operations cifs_file_ops = {
1485 	.read_iter = cifs_loose_read_iter,
1486 	.write_iter = cifs_file_write_iter,
1487 	.open = cifs_open,
1488 	.release = cifs_close,
1489 	.lock = cifs_lock,
1490 	.flock = cifs_flock,
1491 	.fsync = cifs_fsync,
1492 	.flush = cifs_flush,
1493 	.mmap  = cifs_file_mmap,
1494 	.splice_read = filemap_splice_read,
1495 	.splice_write = iter_file_splice_write,
1496 	.llseek = cifs_llseek,
1497 	.unlocked_ioctl	= cifs_ioctl,
1498 	.copy_file_range = cifs_copy_file_range,
1499 	.remap_file_range = cifs_remap_file_range,
1500 	.setlease = cifs_setlease,
1501 	.fallocate = cifs_fallocate,
1502 };
1503 
1504 const struct file_operations cifs_file_strict_ops = {
1505 	.read_iter = cifs_strict_readv,
1506 	.write_iter = cifs_strict_writev,
1507 	.open = cifs_open,
1508 	.release = cifs_close,
1509 	.lock = cifs_lock,
1510 	.flock = cifs_flock,
1511 	.fsync = cifs_strict_fsync,
1512 	.flush = cifs_flush,
1513 	.mmap = cifs_file_strict_mmap,
1514 	.splice_read = filemap_splice_read,
1515 	.splice_write = iter_file_splice_write,
1516 	.llseek = cifs_llseek,
1517 	.unlocked_ioctl	= cifs_ioctl,
1518 	.copy_file_range = cifs_copy_file_range,
1519 	.remap_file_range = cifs_remap_file_range,
1520 	.setlease = cifs_setlease,
1521 	.fallocate = cifs_fallocate,
1522 };
1523 
1524 const struct file_operations cifs_file_direct_ops = {
1525 	.read_iter = netfs_unbuffered_read_iter,
1526 	.write_iter = netfs_file_write_iter,
1527 	.open = cifs_open,
1528 	.release = cifs_close,
1529 	.lock = cifs_lock,
1530 	.flock = cifs_flock,
1531 	.fsync = cifs_fsync,
1532 	.flush = cifs_flush,
1533 	.mmap = cifs_file_mmap,
1534 	.splice_read = copy_splice_read,
1535 	.splice_write = iter_file_splice_write,
1536 	.unlocked_ioctl  = cifs_ioctl,
1537 	.copy_file_range = cifs_copy_file_range,
1538 	.remap_file_range = cifs_remap_file_range,
1539 	.llseek = cifs_llseek,
1540 	.setlease = cifs_setlease,
1541 	.fallocate = cifs_fallocate,
1542 };
1543 
1544 const struct file_operations cifs_file_nobrl_ops = {
1545 	.read_iter = cifs_loose_read_iter,
1546 	.write_iter = cifs_file_write_iter,
1547 	.open = cifs_open,
1548 	.release = cifs_close,
1549 	.fsync = cifs_fsync,
1550 	.flush = cifs_flush,
1551 	.mmap  = cifs_file_mmap,
1552 	.splice_read = filemap_splice_read,
1553 	.splice_write = iter_file_splice_write,
1554 	.llseek = cifs_llseek,
1555 	.unlocked_ioctl	= cifs_ioctl,
1556 	.copy_file_range = cifs_copy_file_range,
1557 	.remap_file_range = cifs_remap_file_range,
1558 	.setlease = cifs_setlease,
1559 	.fallocate = cifs_fallocate,
1560 };
1561 
1562 const struct file_operations cifs_file_strict_nobrl_ops = {
1563 	.read_iter = cifs_strict_readv,
1564 	.write_iter = cifs_strict_writev,
1565 	.open = cifs_open,
1566 	.release = cifs_close,
1567 	.fsync = cifs_strict_fsync,
1568 	.flush = cifs_flush,
1569 	.mmap = cifs_file_strict_mmap,
1570 	.splice_read = filemap_splice_read,
1571 	.splice_write = iter_file_splice_write,
1572 	.llseek = cifs_llseek,
1573 	.unlocked_ioctl	= cifs_ioctl,
1574 	.copy_file_range = cifs_copy_file_range,
1575 	.remap_file_range = cifs_remap_file_range,
1576 	.setlease = cifs_setlease,
1577 	.fallocate = cifs_fallocate,
1578 };
1579 
1580 const struct file_operations cifs_file_direct_nobrl_ops = {
1581 	.read_iter = netfs_unbuffered_read_iter,
1582 	.write_iter = netfs_file_write_iter,
1583 	.open = cifs_open,
1584 	.release = cifs_close,
1585 	.fsync = cifs_fsync,
1586 	.flush = cifs_flush,
1587 	.mmap = cifs_file_mmap,
1588 	.splice_read = copy_splice_read,
1589 	.splice_write = iter_file_splice_write,
1590 	.unlocked_ioctl  = cifs_ioctl,
1591 	.copy_file_range = cifs_copy_file_range,
1592 	.remap_file_range = cifs_remap_file_range,
1593 	.llseek = cifs_llseek,
1594 	.setlease = cifs_setlease,
1595 	.fallocate = cifs_fallocate,
1596 };
1597 
1598 const struct file_operations cifs_dir_ops = {
1599 	.iterate_shared = cifs_readdir,
1600 	.release = cifs_closedir,
1601 	.read    = generic_read_dir,
1602 	.unlocked_ioctl  = cifs_ioctl,
1603 	.copy_file_range = cifs_copy_file_range,
1604 	.remap_file_range = cifs_remap_file_range,
1605 	.llseek = generic_file_llseek,
1606 	.fsync = cifs_dir_fsync,
1607 };
1608 
1609 static void
1610 cifs_init_once(void *inode)
1611 {
1612 	struct cifsInodeInfo *cifsi = inode;
1613 
1614 	inode_init_once(&cifsi->netfs.inode);
1615 	init_rwsem(&cifsi->lock_sem);
1616 }
1617 
1618 static int __init
1619 cifs_init_inodecache(void)
1620 {
1621 	cifs_inode_cachep = kmem_cache_create("cifs_inode_cache",
1622 					      sizeof(struct cifsInodeInfo),
1623 					      0, (SLAB_RECLAIM_ACCOUNT|
1624 						SLAB_ACCOUNT),
1625 					      cifs_init_once);
1626 	if (cifs_inode_cachep == NULL)
1627 		return -ENOMEM;
1628 
1629 	return 0;
1630 }
1631 
1632 static void
1633 cifs_destroy_inodecache(void)
1634 {
1635 	/*
1636 	 * Make sure all delayed rcu free inodes are flushed before we
1637 	 * destroy cache.
1638 	 */
1639 	rcu_barrier();
1640 	kmem_cache_destroy(cifs_inode_cachep);
1641 }
1642 
1643 static int
1644 cifs_init_request_bufs(void)
1645 {
1646 	/*
1647 	 * SMB2 maximum header size is bigger than CIFS one - no problems to
1648 	 * allocate some more bytes for CIFS.
1649 	 */
1650 	size_t max_hdr_size = MAX_SMB2_HDR_SIZE;
1651 
1652 	if (CIFSMaxBufSize < 8192) {
1653 	/* Buffer size can not be smaller than 2 * PATH_MAX since maximum
1654 	Unicode path name has to fit in any SMB/CIFS path based frames */
1655 		CIFSMaxBufSize = 8192;
1656 	} else if (CIFSMaxBufSize > 1024*127) {
1657 		CIFSMaxBufSize = 1024 * 127;
1658 	} else {
1659 		CIFSMaxBufSize &= 0x1FE00; /* Round size to even 512 byte mult*/
1660 	}
1661 /*
1662 	cifs_dbg(VFS, "CIFSMaxBufSize %d 0x%x\n",
1663 		 CIFSMaxBufSize, CIFSMaxBufSize);
1664 */
1665 	cifs_req_cachep = kmem_cache_create_usercopy("cifs_request",
1666 					    CIFSMaxBufSize + max_hdr_size, 0,
1667 					    SLAB_HWCACHE_ALIGN, 0,
1668 					    CIFSMaxBufSize + max_hdr_size,
1669 					    NULL);
1670 	if (cifs_req_cachep == NULL)
1671 		return -ENOMEM;
1672 
1673 	if (cifs_min_rcv < 1)
1674 		cifs_min_rcv = 1;
1675 	else if (cifs_min_rcv > 64) {
1676 		cifs_min_rcv = 64;
1677 		cifs_dbg(VFS, "cifs_min_rcv set to maximum (64)\n");
1678 	}
1679 
1680 	cifs_req_poolp = mempool_create_slab_pool(cifs_min_rcv,
1681 						  cifs_req_cachep);
1682 
1683 	if (cifs_req_poolp == NULL) {
1684 		kmem_cache_destroy(cifs_req_cachep);
1685 		return -ENOMEM;
1686 	}
1687 	/* MAX_CIFS_SMALL_BUFFER_SIZE bytes is enough for most SMB responses and
1688 	almost all handle based requests (but not write response, nor is it
1689 	sufficient for path based requests).  A smaller size would have
1690 	been more efficient (compacting multiple slab items on one 4k page)
1691 	for the case in which debug was on, but this larger size allows
1692 	more SMBs to use small buffer alloc and is still much more
1693 	efficient to alloc 1 per page off the slab compared to 17K (5page)
1694 	alloc of large cifs buffers even when page debugging is on */
1695 	cifs_sm_req_cachep = kmem_cache_create_usercopy("cifs_small_rq",
1696 			MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN,
1697 			0, MAX_CIFS_SMALL_BUFFER_SIZE, NULL);
1698 	if (cifs_sm_req_cachep == NULL) {
1699 		mempool_destroy(cifs_req_poolp);
1700 		kmem_cache_destroy(cifs_req_cachep);
1701 		return -ENOMEM;
1702 	}
1703 
1704 	if (cifs_min_small < 2)
1705 		cifs_min_small = 2;
1706 	else if (cifs_min_small > 256) {
1707 		cifs_min_small = 256;
1708 		cifs_dbg(FYI, "cifs_min_small set to maximum (256)\n");
1709 	}
1710 
1711 	cifs_sm_req_poolp = mempool_create_slab_pool(cifs_min_small,
1712 						     cifs_sm_req_cachep);
1713 
1714 	if (cifs_sm_req_poolp == NULL) {
1715 		mempool_destroy(cifs_req_poolp);
1716 		kmem_cache_destroy(cifs_req_cachep);
1717 		kmem_cache_destroy(cifs_sm_req_cachep);
1718 		return -ENOMEM;
1719 	}
1720 
1721 	return 0;
1722 }
1723 
1724 static void
1725 cifs_destroy_request_bufs(void)
1726 {
1727 	mempool_destroy(cifs_req_poolp);
1728 	kmem_cache_destroy(cifs_req_cachep);
1729 	mempool_destroy(cifs_sm_req_poolp);
1730 	kmem_cache_destroy(cifs_sm_req_cachep);
1731 }
1732 
1733 static int init_mids(void)
1734 {
1735 	cifs_mid_cachep = kmem_cache_create("cifs_mpx_ids",
1736 					    sizeof(struct mid_q_entry), 0,
1737 					    SLAB_HWCACHE_ALIGN, NULL);
1738 	if (cifs_mid_cachep == NULL)
1739 		return -ENOMEM;
1740 
1741 	/* 3 is a reasonable minimum number of simultaneous operations */
1742 	cifs_mid_poolp = mempool_create_slab_pool(3, cifs_mid_cachep);
1743 	if (cifs_mid_poolp == NULL) {
1744 		kmem_cache_destroy(cifs_mid_cachep);
1745 		return -ENOMEM;
1746 	}
1747 
1748 	return 0;
1749 }
1750 
1751 static void destroy_mids(void)
1752 {
1753 	mempool_destroy(cifs_mid_poolp);
1754 	kmem_cache_destroy(cifs_mid_cachep);
1755 }
1756 
1757 static int cifs_init_netfs(void)
1758 {
1759 	cifs_io_request_cachep =
1760 		kmem_cache_create("cifs_io_request",
1761 				  sizeof(struct cifs_io_request), 0,
1762 				  SLAB_HWCACHE_ALIGN, NULL);
1763 	if (!cifs_io_request_cachep)
1764 		goto nomem_req;
1765 
1766 	if (mempool_init_slab_pool(&cifs_io_request_pool, 100, cifs_io_request_cachep) < 0)
1767 		goto nomem_reqpool;
1768 
1769 	cifs_io_subrequest_cachep =
1770 		kmem_cache_create("cifs_io_subrequest",
1771 				  sizeof(struct cifs_io_subrequest), 0,
1772 				  SLAB_HWCACHE_ALIGN, NULL);
1773 	if (!cifs_io_subrequest_cachep)
1774 		goto nomem_subreq;
1775 
1776 	if (mempool_init_slab_pool(&cifs_io_subrequest_pool, 100, cifs_io_subrequest_cachep) < 0)
1777 		goto nomem_subreqpool;
1778 
1779 	return 0;
1780 
1781 nomem_subreqpool:
1782 	kmem_cache_destroy(cifs_io_subrequest_cachep);
1783 nomem_subreq:
1784 	mempool_destroy(&cifs_io_request_pool);
1785 nomem_reqpool:
1786 	kmem_cache_destroy(cifs_io_request_cachep);
1787 nomem_req:
1788 	return -ENOMEM;
1789 }
1790 
1791 static void cifs_destroy_netfs(void)
1792 {
1793 	mempool_exit(&cifs_io_subrequest_pool);
1794 	kmem_cache_destroy(cifs_io_subrequest_cachep);
1795 	mempool_exit(&cifs_io_request_pool);
1796 	kmem_cache_destroy(cifs_io_request_cachep);
1797 }
1798 
1799 static int __init
1800 init_cifs(void)
1801 {
1802 	int rc = 0;
1803 	cifs_proc_init();
1804 	INIT_LIST_HEAD(&cifs_tcp_ses_list);
1805 /*
1806  *  Initialize Global counters
1807  */
1808 	atomic_set(&sesInfoAllocCount, 0);
1809 	atomic_set(&tconInfoAllocCount, 0);
1810 	atomic_set(&tcpSesNextId, 0);
1811 	atomic_set(&tcpSesAllocCount, 0);
1812 	atomic_set(&tcpSesReconnectCount, 0);
1813 	atomic_set(&tconInfoReconnectCount, 0);
1814 
1815 	atomic_set(&buf_alloc_count, 0);
1816 	atomic_set(&small_buf_alloc_count, 0);
1817 #ifdef CONFIG_CIFS_STATS2
1818 	atomic_set(&total_buf_alloc_count, 0);
1819 	atomic_set(&total_small_buf_alloc_count, 0);
1820 	if (slow_rsp_threshold < 1)
1821 		cifs_dbg(FYI, "slow_response_threshold msgs disabled\n");
1822 	else if (slow_rsp_threshold > 32767)
1823 		cifs_dbg(VFS,
1824 		       "slow response threshold set higher than recommended (0 to 32767)\n");
1825 #endif /* CONFIG_CIFS_STATS2 */
1826 
1827 	atomic_set(&mid_count, 0);
1828 	GlobalCurrentXid = 0;
1829 	GlobalTotalActiveXid = 0;
1830 	GlobalMaxActiveXid = 0;
1831 	spin_lock_init(&cifs_tcp_ses_lock);
1832 	spin_lock_init(&GlobalMid_Lock);
1833 
1834 	cifs_lock_secret = get_random_u32();
1835 
1836 	if (cifs_max_pending < 2) {
1837 		cifs_max_pending = 2;
1838 		cifs_dbg(FYI, "cifs_max_pending set to min of 2\n");
1839 	} else if (cifs_max_pending > CIFS_MAX_REQ) {
1840 		cifs_max_pending = CIFS_MAX_REQ;
1841 		cifs_dbg(FYI, "cifs_max_pending set to max of %u\n",
1842 			 CIFS_MAX_REQ);
1843 	}
1844 
1845 	/* Limit max to about 18 hours, and setting to zero disables directory entry caching */
1846 	if (dir_cache_timeout > 65000) {
1847 		dir_cache_timeout = 65000;
1848 		cifs_dbg(VFS, "dir_cache_timeout set to max of 65000 seconds\n");
1849 	}
1850 
1851 	cifsiod_wq = alloc_workqueue("cifsiod", WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
1852 	if (!cifsiod_wq) {
1853 		rc = -ENOMEM;
1854 		goto out_clean_proc;
1855 	}
1856 
1857 	/*
1858 	 * Consider in future setting limit!=0 maybe to min(num_of_cores - 1, 3)
1859 	 * so that we don't launch too many worker threads but
1860 	 * Documentation/core-api/workqueue.rst recommends setting it to 0
1861 	 */
1862 
1863 	/* WQ_UNBOUND allows decrypt tasks to run on any CPU */
1864 	decrypt_wq = alloc_workqueue("smb3decryptd",
1865 				     WQ_UNBOUND|WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
1866 	if (!decrypt_wq) {
1867 		rc = -ENOMEM;
1868 		goto out_destroy_cifsiod_wq;
1869 	}
1870 
1871 	fileinfo_put_wq = alloc_workqueue("cifsfileinfoput",
1872 				     WQ_UNBOUND|WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
1873 	if (!fileinfo_put_wq) {
1874 		rc = -ENOMEM;
1875 		goto out_destroy_decrypt_wq;
1876 	}
1877 
1878 	cifsoplockd_wq = alloc_workqueue("cifsoplockd",
1879 					 WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
1880 	if (!cifsoplockd_wq) {
1881 		rc = -ENOMEM;
1882 		goto out_destroy_fileinfo_put_wq;
1883 	}
1884 
1885 	deferredclose_wq = alloc_workqueue("deferredclose",
1886 					   WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
1887 	if (!deferredclose_wq) {
1888 		rc = -ENOMEM;
1889 		goto out_destroy_cifsoplockd_wq;
1890 	}
1891 
1892 	serverclose_wq = alloc_workqueue("serverclose",
1893 					   WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
1894 	if (!serverclose_wq) {
1895 		rc = -ENOMEM;
1896 		goto out_destroy_serverclose_wq;
1897 	}
1898 
1899 	rc = cifs_init_inodecache();
1900 	if (rc)
1901 		goto out_destroy_deferredclose_wq;
1902 
1903 	rc = cifs_init_netfs();
1904 	if (rc)
1905 		goto out_destroy_inodecache;
1906 
1907 	rc = init_mids();
1908 	if (rc)
1909 		goto out_destroy_netfs;
1910 
1911 	rc = cifs_init_request_bufs();
1912 	if (rc)
1913 		goto out_destroy_mids;
1914 
1915 #ifdef CONFIG_CIFS_DFS_UPCALL
1916 	rc = dfs_cache_init();
1917 	if (rc)
1918 		goto out_destroy_request_bufs;
1919 #endif /* CONFIG_CIFS_DFS_UPCALL */
1920 #ifdef CONFIG_CIFS_UPCALL
1921 	rc = init_cifs_spnego();
1922 	if (rc)
1923 		goto out_destroy_dfs_cache;
1924 #endif /* CONFIG_CIFS_UPCALL */
1925 #ifdef CONFIG_CIFS_SWN_UPCALL
1926 	rc = cifs_genl_init();
1927 	if (rc)
1928 		goto out_register_key_type;
1929 #endif /* CONFIG_CIFS_SWN_UPCALL */
1930 
1931 	rc = init_cifs_idmap();
1932 	if (rc)
1933 		goto out_cifs_swn_init;
1934 
1935 	rc = register_filesystem(&cifs_fs_type);
1936 	if (rc)
1937 		goto out_init_cifs_idmap;
1938 
1939 	rc = register_filesystem(&smb3_fs_type);
1940 	if (rc) {
1941 		unregister_filesystem(&cifs_fs_type);
1942 		goto out_init_cifs_idmap;
1943 	}
1944 
1945 	return 0;
1946 
1947 out_init_cifs_idmap:
1948 	exit_cifs_idmap();
1949 out_cifs_swn_init:
1950 #ifdef CONFIG_CIFS_SWN_UPCALL
1951 	cifs_genl_exit();
1952 out_register_key_type:
1953 #endif
1954 #ifdef CONFIG_CIFS_UPCALL
1955 	exit_cifs_spnego();
1956 out_destroy_dfs_cache:
1957 #endif
1958 #ifdef CONFIG_CIFS_DFS_UPCALL
1959 	dfs_cache_destroy();
1960 out_destroy_request_bufs:
1961 #endif
1962 	cifs_destroy_request_bufs();
1963 out_destroy_mids:
1964 	destroy_mids();
1965 out_destroy_netfs:
1966 	cifs_destroy_netfs();
1967 out_destroy_inodecache:
1968 	cifs_destroy_inodecache();
1969 out_destroy_deferredclose_wq:
1970 	destroy_workqueue(deferredclose_wq);
1971 out_destroy_cifsoplockd_wq:
1972 	destroy_workqueue(cifsoplockd_wq);
1973 out_destroy_fileinfo_put_wq:
1974 	destroy_workqueue(fileinfo_put_wq);
1975 out_destroy_decrypt_wq:
1976 	destroy_workqueue(decrypt_wq);
1977 out_destroy_cifsiod_wq:
1978 	destroy_workqueue(cifsiod_wq);
1979 out_destroy_serverclose_wq:
1980 	destroy_workqueue(serverclose_wq);
1981 out_clean_proc:
1982 	cifs_proc_clean();
1983 	return rc;
1984 }
1985 
1986 static void __exit
1987 exit_cifs(void)
1988 {
1989 	cifs_dbg(NOISY, "exit_smb3\n");
1990 	unregister_filesystem(&cifs_fs_type);
1991 	unregister_filesystem(&smb3_fs_type);
1992 	cifs_release_automount_timer();
1993 	exit_cifs_idmap();
1994 #ifdef CONFIG_CIFS_SWN_UPCALL
1995 	cifs_genl_exit();
1996 #endif
1997 #ifdef CONFIG_CIFS_UPCALL
1998 	exit_cifs_spnego();
1999 #endif
2000 #ifdef CONFIG_CIFS_DFS_UPCALL
2001 	dfs_cache_destroy();
2002 #endif
2003 	cifs_destroy_request_bufs();
2004 	destroy_mids();
2005 	cifs_destroy_netfs();
2006 	cifs_destroy_inodecache();
2007 	destroy_workqueue(deferredclose_wq);
2008 	destroy_workqueue(cifsoplockd_wq);
2009 	destroy_workqueue(decrypt_wq);
2010 	destroy_workqueue(fileinfo_put_wq);
2011 	destroy_workqueue(serverclose_wq);
2012 	destroy_workqueue(cifsiod_wq);
2013 	cifs_proc_clean();
2014 }
2015 
2016 MODULE_AUTHOR("Steve French");
2017 MODULE_LICENSE("GPL");	/* combination of LGPL + GPL source behaves as GPL */
2018 MODULE_DESCRIPTION
2019 	("VFS to access SMB3 servers e.g. Samba, Macs, Azure and Windows (and "
2020 	"also older servers complying with the SNIA CIFS Specification)");
2021 MODULE_VERSION(CIFS_VERSION);
2022 MODULE_SOFTDEP("ecb");
2023 MODULE_SOFTDEP("hmac");
2024 MODULE_SOFTDEP("md5");
2025 MODULE_SOFTDEP("nls");
2026 MODULE_SOFTDEP("aes");
2027 MODULE_SOFTDEP("cmac");
2028 MODULE_SOFTDEP("sha256");
2029 MODULE_SOFTDEP("sha512");
2030 MODULE_SOFTDEP("aead2");
2031 MODULE_SOFTDEP("ccm");
2032 MODULE_SOFTDEP("gcm");
2033 module_init(init_cifs)
2034 module_exit(exit_cifs)
2035