xref: /linux/fs/smb/client/cifsfs.c (revision eb9b9a6f5ab35db7a431184456fe410b792be03f)
1 // SPDX-License-Identifier: LGPL-2.1
2 /*
3  *
4  *   Copyright (C) International Business Machines  Corp., 2002,2008
5  *   Author(s): Steve French (sfrench@us.ibm.com)
6  *
7  *   Common Internet FileSystem (CIFS) client
8  *
9  */
10 
11 /* Note that BB means BUGBUG (ie something to fix eventually) */
12 
13 #include <linux/module.h>
14 #include <linux/fs.h>
15 #include <linux/filelock.h>
16 #include <linux/mount.h>
17 #include <linux/slab.h>
18 #include <linux/init.h>
19 #include <linux/list.h>
20 #include <linux/seq_file.h>
21 #include <linux/vfs.h>
22 #include <linux/mempool.h>
23 #include <linux/delay.h>
24 #include <linux/kthread.h>
25 #include <linux/freezer.h>
26 #include <linux/namei.h>
27 #include <linux/random.h>
28 #include <linux/splice.h>
29 #include <linux/uuid.h>
30 #include <linux/xattr.h>
31 #include <uapi/linux/magic.h>
32 #include <net/ipv6.h>
33 #include "cifsfs.h"
34 #include "cifspdu.h"
35 #define DECLARE_GLOBALS_HERE
36 #include "cifsglob.h"
37 #include "cifsproto.h"
38 #include "cifs_debug.h"
39 #include "cifs_fs_sb.h"
40 #include <linux/mm.h>
41 #include <linux/key-type.h>
42 #include "cifs_spnego.h"
43 #include "fscache.h"
44 #ifdef CONFIG_CIFS_DFS_UPCALL
45 #include "dfs_cache.h"
46 #endif
47 #ifdef CONFIG_CIFS_SWN_UPCALL
48 #include "netlink.h"
49 #endif
50 #include "fs_context.h"
51 #include "cached_dir.h"
52 
53 /*
54  * DOS dates from 1980/1/1 through 2107/12/31
55  * Protocol specifications indicate the range should be to 119, which
56  * limits maximum year to 2099. But this range has not been checked.
57  */
58 #define SMB_DATE_MAX (127<<9 | 12<<5 | 31)
59 #define SMB_DATE_MIN (0<<9 | 1<<5 | 1)
60 #define SMB_TIME_MAX (23<<11 | 59<<5 | 29)
61 
62 int cifsFYI = 0;
63 bool traceSMB;
64 bool enable_oplocks = true;
65 bool linuxExtEnabled = true;
66 bool lookupCacheEnabled = true;
67 bool disable_legacy_dialects; /* false by default */
68 bool enable_gcm_256 = true;
69 bool require_gcm_256; /* false by default */
70 bool enable_negotiate_signing; /* false by default */
71 unsigned int global_secflags = CIFSSEC_DEF;
72 /* unsigned int ntlmv2_support = 0; */
73 unsigned int sign_CIFS_PDUs = 1;
74 
75 /*
76  * Global transaction id (XID) information
77  */
78 unsigned int GlobalCurrentXid;	/* protected by GlobalMid_Lock */
79 unsigned int GlobalTotalActiveXid; /* prot by GlobalMid_Lock */
80 unsigned int GlobalMaxActiveXid;	/* prot by GlobalMid_Lock */
81 spinlock_t GlobalMid_Lock; /* protects above & list operations on midQ entries */
82 
83 /*
84  *  Global counters, updated atomically
85  */
86 atomic_t sesInfoAllocCount;
87 atomic_t tconInfoAllocCount;
88 atomic_t tcpSesNextId;
89 atomic_t tcpSesAllocCount;
90 atomic_t tcpSesReconnectCount;
91 atomic_t tconInfoReconnectCount;
92 
93 atomic_t mid_count;
94 atomic_t buf_alloc_count;
95 atomic_t small_buf_alloc_count;
96 #ifdef CONFIG_CIFS_STATS2
97 atomic_t total_buf_alloc_count;
98 atomic_t total_small_buf_alloc_count;
99 #endif/* STATS2 */
100 struct list_head	cifs_tcp_ses_list;
101 spinlock_t		cifs_tcp_ses_lock;
102 static const struct super_operations cifs_super_ops;
103 unsigned int CIFSMaxBufSize = CIFS_MAX_MSGSIZE;
104 module_param(CIFSMaxBufSize, uint, 0444);
105 MODULE_PARM_DESC(CIFSMaxBufSize, "Network buffer size (not including header) "
106 				 "for CIFS requests. "
107 				 "Default: 16384 Range: 8192 to 130048");
108 unsigned int cifs_min_rcv = CIFS_MIN_RCV_POOL;
109 module_param(cifs_min_rcv, uint, 0444);
110 MODULE_PARM_DESC(cifs_min_rcv, "Network buffers in pool. Default: 4 Range: "
111 				"1 to 64");
112 unsigned int cifs_min_small = 30;
113 module_param(cifs_min_small, uint, 0444);
114 MODULE_PARM_DESC(cifs_min_small, "Small network buffers in pool. Default: 30 "
115 				 "Range: 2 to 256");
116 unsigned int cifs_max_pending = CIFS_MAX_REQ;
117 module_param(cifs_max_pending, uint, 0444);
118 MODULE_PARM_DESC(cifs_max_pending, "Simultaneous requests to server for "
119 				   "CIFS/SMB1 dialect (N/A for SMB3) "
120 				   "Default: 32767 Range: 2 to 32767.");
121 unsigned int dir_cache_timeout = 30;
122 module_param(dir_cache_timeout, uint, 0644);
123 MODULE_PARM_DESC(dir_cache_timeout, "Number of seconds to cache directory contents for which we have a lease. Default: 30 "
124 				 "Range: 1 to 65000 seconds, 0 to disable caching dir contents");
125 #ifdef CONFIG_CIFS_STATS2
126 unsigned int slow_rsp_threshold = 1;
127 module_param(slow_rsp_threshold, uint, 0644);
128 MODULE_PARM_DESC(slow_rsp_threshold, "Amount of time (in seconds) to wait "
129 				   "before logging that a response is delayed. "
130 				   "Default: 1 (if set to 0 disables msg).");
131 #endif /* STATS2 */
132 
133 module_param(enable_oplocks, bool, 0644);
134 MODULE_PARM_DESC(enable_oplocks, "Enable or disable oplocks. Default: y/Y/1");
135 
136 module_param(enable_gcm_256, bool, 0644);
137 MODULE_PARM_DESC(enable_gcm_256, "Enable requesting strongest (256 bit) GCM encryption. Default: y/Y/0");
138 
139 module_param(require_gcm_256, bool, 0644);
140 MODULE_PARM_DESC(require_gcm_256, "Require strongest (256 bit) GCM encryption. Default: n/N/0");
141 
142 module_param(enable_negotiate_signing, bool, 0644);
143 MODULE_PARM_DESC(enable_negotiate_signing, "Enable negotiating packet signing algorithm with server. Default: n/N/0");
144 
145 module_param(disable_legacy_dialects, bool, 0644);
146 MODULE_PARM_DESC(disable_legacy_dialects, "To improve security it may be "
147 				  "helpful to restrict the ability to "
148 				  "override the default dialects (SMB2.1, "
149 				  "SMB3 and SMB3.02) on mount with old "
150 				  "dialects (CIFS/SMB1 and SMB2) since "
151 				  "vers=1.0 (CIFS/SMB1) and vers=2.0 are weaker"
152 				  " and less secure. Default: n/N/0");
153 
154 struct workqueue_struct	*cifsiod_wq;
155 struct workqueue_struct	*decrypt_wq;
156 struct workqueue_struct	*fileinfo_put_wq;
157 struct workqueue_struct	*cifsoplockd_wq;
158 struct workqueue_struct	*deferredclose_wq;
159 struct workqueue_struct	*serverclose_wq;
160 __u32 cifs_lock_secret;
161 
162 /*
163  * Bumps refcount for cifs super block.
164  * Note that it should be only called if a referece to VFS super block is
165  * already held, e.g. in open-type syscalls context. Otherwise it can race with
166  * atomic_dec_and_test in deactivate_locked_super.
167  */
168 void
169 cifs_sb_active(struct super_block *sb)
170 {
171 	struct cifs_sb_info *server = CIFS_SB(sb);
172 
173 	if (atomic_inc_return(&server->active) == 1)
174 		atomic_inc(&sb->s_active);
175 }
176 
177 void
178 cifs_sb_deactive(struct super_block *sb)
179 {
180 	struct cifs_sb_info *server = CIFS_SB(sb);
181 
182 	if (atomic_dec_and_test(&server->active))
183 		deactivate_super(sb);
184 }
185 
186 static int
187 cifs_read_super(struct super_block *sb)
188 {
189 	struct inode *inode;
190 	struct cifs_sb_info *cifs_sb;
191 	struct cifs_tcon *tcon;
192 	struct timespec64 ts;
193 	int rc = 0;
194 
195 	cifs_sb = CIFS_SB(sb);
196 	tcon = cifs_sb_master_tcon(cifs_sb);
197 
198 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIXACL)
199 		sb->s_flags |= SB_POSIXACL;
200 
201 	if (tcon->snapshot_time)
202 		sb->s_flags |= SB_RDONLY;
203 
204 	if (tcon->ses->capabilities & tcon->ses->server->vals->cap_large_files)
205 		sb->s_maxbytes = MAX_LFS_FILESIZE;
206 	else
207 		sb->s_maxbytes = MAX_NON_LFS;
208 
209 	/*
210 	 * Some very old servers like DOS and OS/2 used 2 second granularity
211 	 * (while all current servers use 100ns granularity - see MS-DTYP)
212 	 * but 1 second is the maximum allowed granularity for the VFS
213 	 * so for old servers set time granularity to 1 second while for
214 	 * everything else (current servers) set it to 100ns.
215 	 */
216 	if ((tcon->ses->server->vals->protocol_id == SMB10_PROT_ID) &&
217 	    ((tcon->ses->capabilities &
218 	      tcon->ses->server->vals->cap_nt_find) == 0) &&
219 	    !tcon->unix_ext) {
220 		sb->s_time_gran = 1000000000; /* 1 second is max allowed gran */
221 		ts = cnvrtDosUnixTm(cpu_to_le16(SMB_DATE_MIN), 0, 0);
222 		sb->s_time_min = ts.tv_sec;
223 		ts = cnvrtDosUnixTm(cpu_to_le16(SMB_DATE_MAX),
224 				    cpu_to_le16(SMB_TIME_MAX), 0);
225 		sb->s_time_max = ts.tv_sec;
226 	} else {
227 		/*
228 		 * Almost every server, including all SMB2+, uses DCE TIME
229 		 * ie 100 nanosecond units, since 1601.  See MS-DTYP and MS-FSCC
230 		 */
231 		sb->s_time_gran = 100;
232 		ts = cifs_NTtimeToUnix(0);
233 		sb->s_time_min = ts.tv_sec;
234 		ts = cifs_NTtimeToUnix(cpu_to_le64(S64_MAX));
235 		sb->s_time_max = ts.tv_sec;
236 	}
237 
238 	sb->s_magic = CIFS_SUPER_MAGIC;
239 	sb->s_op = &cifs_super_ops;
240 	sb->s_xattr = cifs_xattr_handlers;
241 	rc = super_setup_bdi(sb);
242 	if (rc)
243 		goto out_no_root;
244 	/* tune readahead according to rsize if readahead size not set on mount */
245 	if (cifs_sb->ctx->rsize == 0)
246 		cifs_sb->ctx->rsize =
247 			tcon->ses->server->ops->negotiate_rsize(tcon, cifs_sb->ctx);
248 	if (cifs_sb->ctx->rasize)
249 		sb->s_bdi->ra_pages = cifs_sb->ctx->rasize / PAGE_SIZE;
250 	else
251 		sb->s_bdi->ra_pages = 2 * (cifs_sb->ctx->rsize / PAGE_SIZE);
252 
253 	sb->s_blocksize = CIFS_MAX_MSGSIZE;
254 	sb->s_blocksize_bits = 14;	/* default 2**14 = CIFS_MAX_MSGSIZE */
255 	inode = cifs_root_iget(sb);
256 
257 	if (IS_ERR(inode)) {
258 		rc = PTR_ERR(inode);
259 		goto out_no_root;
260 	}
261 
262 	if (tcon->nocase)
263 		sb->s_d_op = &cifs_ci_dentry_ops;
264 	else
265 		sb->s_d_op = &cifs_dentry_ops;
266 
267 	sb->s_root = d_make_root(inode);
268 	if (!sb->s_root) {
269 		rc = -ENOMEM;
270 		goto out_no_root;
271 	}
272 
273 #ifdef CONFIG_CIFS_NFSD_EXPORT
274 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) {
275 		cifs_dbg(FYI, "export ops supported\n");
276 		sb->s_export_op = &cifs_export_ops;
277 	}
278 #endif /* CONFIG_CIFS_NFSD_EXPORT */
279 
280 	return 0;
281 
282 out_no_root:
283 	cifs_dbg(VFS, "%s: get root inode failed\n", __func__);
284 	return rc;
285 }
286 
287 static void cifs_kill_sb(struct super_block *sb)
288 {
289 	struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
290 
291 	/*
292 	 * We ned to release all dentries for the cached directories
293 	 * before we kill the sb.
294 	 */
295 	if (cifs_sb->root) {
296 		close_all_cached_dirs(cifs_sb);
297 
298 		/* finally release root dentry */
299 		dput(cifs_sb->root);
300 		cifs_sb->root = NULL;
301 	}
302 
303 	kill_anon_super(sb);
304 	cifs_umount(cifs_sb);
305 }
306 
307 static int
308 cifs_statfs(struct dentry *dentry, struct kstatfs *buf)
309 {
310 	struct super_block *sb = dentry->d_sb;
311 	struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
312 	struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
313 	struct TCP_Server_Info *server = tcon->ses->server;
314 	unsigned int xid;
315 	int rc = 0;
316 
317 	xid = get_xid();
318 
319 	if (le32_to_cpu(tcon->fsAttrInfo.MaxPathNameComponentLength) > 0)
320 		buf->f_namelen =
321 		       le32_to_cpu(tcon->fsAttrInfo.MaxPathNameComponentLength);
322 	else
323 		buf->f_namelen = PATH_MAX;
324 
325 	buf->f_fsid.val[0] = tcon->vol_serial_number;
326 	/* are using part of create time for more randomness, see man statfs */
327 	buf->f_fsid.val[1] =  (int)le64_to_cpu(tcon->vol_create_time);
328 
329 	buf->f_files = 0;	/* undefined */
330 	buf->f_ffree = 0;	/* unlimited */
331 
332 	if (server->ops->queryfs)
333 		rc = server->ops->queryfs(xid, tcon, cifs_sb, buf);
334 
335 	free_xid(xid);
336 	return rc;
337 }
338 
339 static long cifs_fallocate(struct file *file, int mode, loff_t off, loff_t len)
340 {
341 	struct cifs_sb_info *cifs_sb = CIFS_FILE_SB(file);
342 	struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
343 	struct TCP_Server_Info *server = tcon->ses->server;
344 
345 	if (server->ops->fallocate)
346 		return server->ops->fallocate(file, tcon, mode, off, len);
347 
348 	return -EOPNOTSUPP;
349 }
350 
351 static int cifs_permission(struct mnt_idmap *idmap,
352 			   struct inode *inode, int mask)
353 {
354 	struct cifs_sb_info *cifs_sb;
355 
356 	cifs_sb = CIFS_SB(inode->i_sb);
357 
358 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_PERM) {
359 		if ((mask & MAY_EXEC) && !execute_ok(inode))
360 			return -EACCES;
361 		else
362 			return 0;
363 	} else /* file mode might have been restricted at mount time
364 		on the client (above and beyond ACL on servers) for
365 		servers which do not support setting and viewing mode bits,
366 		so allowing client to check permissions is useful */
367 		return generic_permission(&nop_mnt_idmap, inode, mask);
368 }
369 
370 static struct kmem_cache *cifs_inode_cachep;
371 static struct kmem_cache *cifs_req_cachep;
372 static struct kmem_cache *cifs_mid_cachep;
373 static struct kmem_cache *cifs_sm_req_cachep;
374 static struct kmem_cache *cifs_io_request_cachep;
375 static struct kmem_cache *cifs_io_subrequest_cachep;
376 mempool_t *cifs_sm_req_poolp;
377 mempool_t *cifs_req_poolp;
378 mempool_t *cifs_mid_poolp;
379 mempool_t cifs_io_request_pool;
380 mempool_t cifs_io_subrequest_pool;
381 
382 static struct inode *
383 cifs_alloc_inode(struct super_block *sb)
384 {
385 	struct cifsInodeInfo *cifs_inode;
386 	cifs_inode = alloc_inode_sb(sb, cifs_inode_cachep, GFP_KERNEL);
387 	if (!cifs_inode)
388 		return NULL;
389 	cifs_inode->cifsAttrs = 0x20;	/* default */
390 	cifs_inode->time = 0;
391 	/*
392 	 * Until the file is open and we have gotten oplock info back from the
393 	 * server, can not assume caching of file data or metadata.
394 	 */
395 	cifs_set_oplock_level(cifs_inode, 0);
396 	cifs_inode->lease_granted = false;
397 	cifs_inode->flags = 0;
398 	spin_lock_init(&cifs_inode->writers_lock);
399 	cifs_inode->writers = 0;
400 	cifs_inode->netfs.inode.i_blkbits = 14;  /* 2**14 = CIFS_MAX_MSGSIZE */
401 	cifs_inode->netfs.remote_i_size = 0;
402 	cifs_inode->uniqueid = 0;
403 	cifs_inode->createtime = 0;
404 	cifs_inode->epoch = 0;
405 	spin_lock_init(&cifs_inode->open_file_lock);
406 	generate_random_uuid(cifs_inode->lease_key);
407 	cifs_inode->symlink_target = NULL;
408 
409 	/*
410 	 * Can not set i_flags here - they get immediately overwritten to zero
411 	 * by the VFS.
412 	 */
413 	/* cifs_inode->netfs.inode.i_flags = S_NOATIME | S_NOCMTIME; */
414 	INIT_LIST_HEAD(&cifs_inode->openFileList);
415 	INIT_LIST_HEAD(&cifs_inode->llist);
416 	INIT_LIST_HEAD(&cifs_inode->deferred_closes);
417 	spin_lock_init(&cifs_inode->deferred_lock);
418 	return &cifs_inode->netfs.inode;
419 }
420 
421 static void
422 cifs_free_inode(struct inode *inode)
423 {
424 	struct cifsInodeInfo *cinode = CIFS_I(inode);
425 
426 	if (S_ISLNK(inode->i_mode))
427 		kfree(cinode->symlink_target);
428 	kmem_cache_free(cifs_inode_cachep, cinode);
429 }
430 
431 static void
432 cifs_evict_inode(struct inode *inode)
433 {
434 	netfs_wait_for_outstanding_io(inode);
435 	truncate_inode_pages_final(&inode->i_data);
436 	if (inode->i_state & I_PINNING_NETFS_WB)
437 		cifs_fscache_unuse_inode_cookie(inode, true);
438 	cifs_fscache_release_inode_cookie(inode);
439 	clear_inode(inode);
440 }
441 
442 static void
443 cifs_show_address(struct seq_file *s, struct TCP_Server_Info *server)
444 {
445 	struct sockaddr_in *sa = (struct sockaddr_in *) &server->dstaddr;
446 	struct sockaddr_in6 *sa6 = (struct sockaddr_in6 *) &server->dstaddr;
447 
448 	seq_puts(s, ",addr=");
449 
450 	switch (server->dstaddr.ss_family) {
451 	case AF_INET:
452 		seq_printf(s, "%pI4", &sa->sin_addr.s_addr);
453 		break;
454 	case AF_INET6:
455 		seq_printf(s, "%pI6", &sa6->sin6_addr.s6_addr);
456 		if (sa6->sin6_scope_id)
457 			seq_printf(s, "%%%u", sa6->sin6_scope_id);
458 		break;
459 	default:
460 		seq_puts(s, "(unknown)");
461 	}
462 	if (server->rdma)
463 		seq_puts(s, ",rdma");
464 }
465 
466 static void
467 cifs_show_security(struct seq_file *s, struct cifs_ses *ses)
468 {
469 	if (ses->sectype == Unspecified) {
470 		if (ses->user_name == NULL)
471 			seq_puts(s, ",sec=none");
472 		return;
473 	}
474 
475 	seq_puts(s, ",sec=");
476 
477 	switch (ses->sectype) {
478 	case NTLMv2:
479 		seq_puts(s, "ntlmv2");
480 		break;
481 	case Kerberos:
482 		seq_puts(s, "krb5");
483 		break;
484 	case RawNTLMSSP:
485 		seq_puts(s, "ntlmssp");
486 		break;
487 	default:
488 		/* shouldn't ever happen */
489 		seq_puts(s, "unknown");
490 		break;
491 	}
492 
493 	if (ses->sign)
494 		seq_puts(s, "i");
495 
496 	if (ses->sectype == Kerberos)
497 		seq_printf(s, ",cruid=%u",
498 			   from_kuid_munged(&init_user_ns, ses->cred_uid));
499 }
500 
501 static void
502 cifs_show_cache_flavor(struct seq_file *s, struct cifs_sb_info *cifs_sb)
503 {
504 	seq_puts(s, ",cache=");
505 
506 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO)
507 		seq_puts(s, "strict");
508 	else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO)
509 		seq_puts(s, "none");
510 	else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RW_CACHE)
511 		seq_puts(s, "singleclient"); /* assume only one client access */
512 	else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RO_CACHE)
513 		seq_puts(s, "ro"); /* read only caching assumed */
514 	else
515 		seq_puts(s, "loose");
516 }
517 
518 /*
519  * cifs_show_devname() is used so we show the mount device name with correct
520  * format (e.g. forward slashes vs. back slashes) in /proc/mounts
521  */
522 static int cifs_show_devname(struct seq_file *m, struct dentry *root)
523 {
524 	struct cifs_sb_info *cifs_sb = CIFS_SB(root->d_sb);
525 	char *devname = kstrdup(cifs_sb->ctx->source, GFP_KERNEL);
526 
527 	if (devname == NULL)
528 		seq_puts(m, "none");
529 	else {
530 		convert_delimiter(devname, '/');
531 		/* escape all spaces in share names */
532 		seq_escape(m, devname, " \t");
533 		kfree(devname);
534 	}
535 	return 0;
536 }
537 
538 /*
539  * cifs_show_options() is for displaying mount options in /proc/mounts.
540  * Not all settable options are displayed but most of the important
541  * ones are.
542  */
543 static int
544 cifs_show_options(struct seq_file *s, struct dentry *root)
545 {
546 	struct cifs_sb_info *cifs_sb = CIFS_SB(root->d_sb);
547 	struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
548 	struct sockaddr *srcaddr;
549 	srcaddr = (struct sockaddr *)&tcon->ses->server->srcaddr;
550 
551 	seq_show_option(s, "vers", tcon->ses->server->vals->version_string);
552 	cifs_show_security(s, tcon->ses);
553 	cifs_show_cache_flavor(s, cifs_sb);
554 
555 	if (tcon->no_lease)
556 		seq_puts(s, ",nolease");
557 	if (cifs_sb->ctx->multiuser)
558 		seq_puts(s, ",multiuser");
559 	else if (tcon->ses->user_name)
560 		seq_show_option(s, "username", tcon->ses->user_name);
561 
562 	if (tcon->ses->domainName && tcon->ses->domainName[0] != 0)
563 		seq_show_option(s, "domain", tcon->ses->domainName);
564 
565 	if (srcaddr->sa_family != AF_UNSPEC) {
566 		struct sockaddr_in *saddr4;
567 		struct sockaddr_in6 *saddr6;
568 		saddr4 = (struct sockaddr_in *)srcaddr;
569 		saddr6 = (struct sockaddr_in6 *)srcaddr;
570 		if (srcaddr->sa_family == AF_INET6)
571 			seq_printf(s, ",srcaddr=%pI6c",
572 				   &saddr6->sin6_addr);
573 		else if (srcaddr->sa_family == AF_INET)
574 			seq_printf(s, ",srcaddr=%pI4",
575 				   &saddr4->sin_addr.s_addr);
576 		else
577 			seq_printf(s, ",srcaddr=BAD-AF:%i",
578 				   (int)(srcaddr->sa_family));
579 	}
580 
581 	seq_printf(s, ",uid=%u",
582 		   from_kuid_munged(&init_user_ns, cifs_sb->ctx->linux_uid));
583 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_OVERR_UID)
584 		seq_puts(s, ",forceuid");
585 	else
586 		seq_puts(s, ",noforceuid");
587 
588 	seq_printf(s, ",gid=%u",
589 		   from_kgid_munged(&init_user_ns, cifs_sb->ctx->linux_gid));
590 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_OVERR_GID)
591 		seq_puts(s, ",forcegid");
592 	else
593 		seq_puts(s, ",noforcegid");
594 
595 	cifs_show_address(s, tcon->ses->server);
596 
597 	if (!tcon->unix_ext)
598 		seq_printf(s, ",file_mode=0%ho,dir_mode=0%ho",
599 					   cifs_sb->ctx->file_mode,
600 					   cifs_sb->ctx->dir_mode);
601 	if (cifs_sb->ctx->iocharset)
602 		seq_printf(s, ",iocharset=%s", cifs_sb->ctx->iocharset);
603 	if (tcon->seal)
604 		seq_puts(s, ",seal");
605 	else if (tcon->ses->server->ignore_signature)
606 		seq_puts(s, ",signloosely");
607 	if (tcon->nocase)
608 		seq_puts(s, ",nocase");
609 	if (tcon->nodelete)
610 		seq_puts(s, ",nodelete");
611 	if (cifs_sb->ctx->no_sparse)
612 		seq_puts(s, ",nosparse");
613 	if (tcon->local_lease)
614 		seq_puts(s, ",locallease");
615 	if (tcon->retry)
616 		seq_puts(s, ",hard");
617 	else
618 		seq_puts(s, ",soft");
619 	if (tcon->use_persistent)
620 		seq_puts(s, ",persistenthandles");
621 	else if (tcon->use_resilient)
622 		seq_puts(s, ",resilienthandles");
623 	if (tcon->posix_extensions)
624 		seq_puts(s, ",posix");
625 	else if (tcon->unix_ext)
626 		seq_puts(s, ",unix");
627 	else
628 		seq_puts(s, ",nounix");
629 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_DFS)
630 		seq_puts(s, ",nodfs");
631 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIX_PATHS)
632 		seq_puts(s, ",posixpaths");
633 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SET_UID)
634 		seq_puts(s, ",setuids");
635 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UID_FROM_ACL)
636 		seq_puts(s, ",idsfromsid");
637 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM)
638 		seq_puts(s, ",serverino");
639 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
640 		seq_puts(s, ",rwpidforward");
641 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL)
642 		seq_puts(s, ",forcemand");
643 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_XATTR)
644 		seq_puts(s, ",nouser_xattr");
645 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR)
646 		seq_puts(s, ",mapchars");
647 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SFM_CHR)
648 		seq_puts(s, ",mapposix");
649 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UNX_EMUL)
650 		seq_puts(s, ",sfu");
651 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_BRL)
652 		seq_puts(s, ",nobrl");
653 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_HANDLE_CACHE)
654 		seq_puts(s, ",nohandlecache");
655 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MODE_FROM_SID)
656 		seq_puts(s, ",modefromsid");
657 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_ACL)
658 		seq_puts(s, ",cifsacl");
659 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DYNPERM)
660 		seq_puts(s, ",dynperm");
661 	if (root->d_sb->s_flags & SB_POSIXACL)
662 		seq_puts(s, ",acl");
663 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MF_SYMLINKS)
664 		seq_puts(s, ",mfsymlinks");
665 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_FSCACHE)
666 		seq_puts(s, ",fsc");
667 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC)
668 		seq_puts(s, ",nostrictsync");
669 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_PERM)
670 		seq_puts(s, ",noperm");
671 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_BACKUPUID)
672 		seq_printf(s, ",backupuid=%u",
673 			   from_kuid_munged(&init_user_ns,
674 					    cifs_sb->ctx->backupuid));
675 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_BACKUPGID)
676 		seq_printf(s, ",backupgid=%u",
677 			   from_kgid_munged(&init_user_ns,
678 					    cifs_sb->ctx->backupgid));
679 	seq_show_option(s, "reparse",
680 			cifs_reparse_type_str(cifs_sb->ctx->reparse_type));
681 
682 	seq_printf(s, ",rsize=%u", cifs_sb->ctx->rsize);
683 	seq_printf(s, ",wsize=%u", cifs_sb->ctx->wsize);
684 	seq_printf(s, ",bsize=%u", cifs_sb->ctx->bsize);
685 	if (cifs_sb->ctx->rasize)
686 		seq_printf(s, ",rasize=%u", cifs_sb->ctx->rasize);
687 	if (tcon->ses->server->min_offload)
688 		seq_printf(s, ",esize=%u", tcon->ses->server->min_offload);
689 	if (tcon->ses->server->retrans)
690 		seq_printf(s, ",retrans=%u", tcon->ses->server->retrans);
691 	seq_printf(s, ",echo_interval=%lu",
692 			tcon->ses->server->echo_interval / HZ);
693 
694 	/* Only display the following if overridden on mount */
695 	if (tcon->ses->server->max_credits != SMB2_MAX_CREDITS_AVAILABLE)
696 		seq_printf(s, ",max_credits=%u", tcon->ses->server->max_credits);
697 	if (tcon->ses->server->tcp_nodelay)
698 		seq_puts(s, ",tcpnodelay");
699 	if (tcon->ses->server->noautotune)
700 		seq_puts(s, ",noautotune");
701 	if (tcon->ses->server->noblocksnd)
702 		seq_puts(s, ",noblocksend");
703 	if (tcon->ses->server->nosharesock)
704 		seq_puts(s, ",nosharesock");
705 
706 	if (tcon->snapshot_time)
707 		seq_printf(s, ",snapshot=%llu", tcon->snapshot_time);
708 	if (tcon->handle_timeout)
709 		seq_printf(s, ",handletimeout=%u", tcon->handle_timeout);
710 	if (tcon->max_cached_dirs != MAX_CACHED_FIDS)
711 		seq_printf(s, ",max_cached_dirs=%u", tcon->max_cached_dirs);
712 
713 	/*
714 	 * Display file and directory attribute timeout in seconds.
715 	 * If file and directory attribute timeout the same then actimeo
716 	 * was likely specified on mount
717 	 */
718 	if (cifs_sb->ctx->acdirmax == cifs_sb->ctx->acregmax)
719 		seq_printf(s, ",actimeo=%lu", cifs_sb->ctx->acregmax / HZ);
720 	else {
721 		seq_printf(s, ",acdirmax=%lu", cifs_sb->ctx->acdirmax / HZ);
722 		seq_printf(s, ",acregmax=%lu", cifs_sb->ctx->acregmax / HZ);
723 	}
724 	seq_printf(s, ",closetimeo=%lu", cifs_sb->ctx->closetimeo / HZ);
725 
726 	if (tcon->ses->chan_max > 1)
727 		seq_printf(s, ",multichannel,max_channels=%zu",
728 			   tcon->ses->chan_max);
729 
730 	if (tcon->use_witness)
731 		seq_puts(s, ",witness");
732 
733 	return 0;
734 }
735 
736 static void cifs_umount_begin(struct super_block *sb)
737 {
738 	struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
739 	struct cifs_tcon *tcon;
740 
741 	if (cifs_sb == NULL)
742 		return;
743 
744 	tcon = cifs_sb_master_tcon(cifs_sb);
745 
746 	spin_lock(&cifs_tcp_ses_lock);
747 	spin_lock(&tcon->tc_lock);
748 	trace_smb3_tcon_ref(tcon->debug_id, tcon->tc_count,
749 			    netfs_trace_tcon_ref_see_umount);
750 	if ((tcon->tc_count > 1) || (tcon->status == TID_EXITING)) {
751 		/* we have other mounts to same share or we have
752 		   already tried to umount this and woken up
753 		   all waiting network requests, nothing to do */
754 		spin_unlock(&tcon->tc_lock);
755 		spin_unlock(&cifs_tcp_ses_lock);
756 		return;
757 	}
758 	/*
759 	 * can not set tcon->status to TID_EXITING yet since we don't know if umount -f will
760 	 * fail later (e.g. due to open files).  TID_EXITING will be set just before tdis req sent
761 	 */
762 	spin_unlock(&tcon->tc_lock);
763 	spin_unlock(&cifs_tcp_ses_lock);
764 
765 	cifs_close_all_deferred_files(tcon);
766 	/* cancel_brl_requests(tcon); */ /* BB mark all brl mids as exiting */
767 	/* cancel_notify_requests(tcon); */
768 	if (tcon->ses && tcon->ses->server) {
769 		cifs_dbg(FYI, "wake up tasks now - umount begin not complete\n");
770 		wake_up_all(&tcon->ses->server->request_q);
771 		wake_up_all(&tcon->ses->server->response_q);
772 		msleep(1); /* yield */
773 		/* we have to kick the requests once more */
774 		wake_up_all(&tcon->ses->server->response_q);
775 		msleep(1);
776 	}
777 
778 	return;
779 }
780 
781 static int cifs_freeze(struct super_block *sb)
782 {
783 	struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
784 	struct cifs_tcon *tcon;
785 
786 	if (cifs_sb == NULL)
787 		return 0;
788 
789 	tcon = cifs_sb_master_tcon(cifs_sb);
790 
791 	cifs_close_all_deferred_files(tcon);
792 	return 0;
793 }
794 
795 #ifdef CONFIG_CIFS_STATS2
796 static int cifs_show_stats(struct seq_file *s, struct dentry *root)
797 {
798 	/* BB FIXME */
799 	return 0;
800 }
801 #endif
802 
803 static int cifs_write_inode(struct inode *inode, struct writeback_control *wbc)
804 {
805 	return netfs_unpin_writeback(inode, wbc);
806 }
807 
808 static int cifs_drop_inode(struct inode *inode)
809 {
810 	struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
811 
812 	/* no serverino => unconditional eviction */
813 	return !(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) ||
814 		generic_drop_inode(inode);
815 }
816 
817 static const struct super_operations cifs_super_ops = {
818 	.statfs = cifs_statfs,
819 	.alloc_inode = cifs_alloc_inode,
820 	.write_inode	= cifs_write_inode,
821 	.free_inode = cifs_free_inode,
822 	.drop_inode	= cifs_drop_inode,
823 	.evict_inode	= cifs_evict_inode,
824 /*	.show_path	= cifs_show_path, */ /* Would we ever need show path? */
825 	.show_devname   = cifs_show_devname,
826 /*	.delete_inode	= cifs_delete_inode,  */  /* Do not need above
827 	function unless later we add lazy close of inodes or unless the
828 	kernel forgets to call us with the same number of releases (closes)
829 	as opens */
830 	.show_options = cifs_show_options,
831 	.umount_begin   = cifs_umount_begin,
832 	.freeze_fs      = cifs_freeze,
833 #ifdef CONFIG_CIFS_STATS2
834 	.show_stats = cifs_show_stats,
835 #endif
836 };
837 
838 /*
839  * Get root dentry from superblock according to prefix path mount option.
840  * Return dentry with refcount + 1 on success and NULL otherwise.
841  */
842 static struct dentry *
843 cifs_get_root(struct smb3_fs_context *ctx, struct super_block *sb)
844 {
845 	struct dentry *dentry;
846 	struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
847 	char *full_path = NULL;
848 	char *s, *p;
849 	char sep;
850 
851 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_USE_PREFIX_PATH)
852 		return dget(sb->s_root);
853 
854 	full_path = cifs_build_path_to_root(ctx, cifs_sb,
855 				cifs_sb_master_tcon(cifs_sb), 0);
856 	if (full_path == NULL)
857 		return ERR_PTR(-ENOMEM);
858 
859 	cifs_dbg(FYI, "Get root dentry for %s\n", full_path);
860 
861 	sep = CIFS_DIR_SEP(cifs_sb);
862 	dentry = dget(sb->s_root);
863 	s = full_path;
864 
865 	do {
866 		struct inode *dir = d_inode(dentry);
867 		struct dentry *child;
868 
869 		if (!S_ISDIR(dir->i_mode)) {
870 			dput(dentry);
871 			dentry = ERR_PTR(-ENOTDIR);
872 			break;
873 		}
874 
875 		/* skip separators */
876 		while (*s == sep)
877 			s++;
878 		if (!*s)
879 			break;
880 		p = s++;
881 		/* next separator */
882 		while (*s && *s != sep)
883 			s++;
884 
885 		child = lookup_positive_unlocked(p, dentry, s - p);
886 		dput(dentry);
887 		dentry = child;
888 	} while (!IS_ERR(dentry));
889 	kfree(full_path);
890 	return dentry;
891 }
892 
893 static int cifs_set_super(struct super_block *sb, void *data)
894 {
895 	struct cifs_mnt_data *mnt_data = data;
896 	sb->s_fs_info = mnt_data->cifs_sb;
897 	return set_anon_super(sb, NULL);
898 }
899 
900 struct dentry *
901 cifs_smb3_do_mount(struct file_system_type *fs_type,
902 	      int flags, struct smb3_fs_context *old_ctx)
903 {
904 	struct cifs_mnt_data mnt_data;
905 	struct cifs_sb_info *cifs_sb;
906 	struct super_block *sb;
907 	struct dentry *root;
908 	int rc;
909 
910 	if (cifsFYI) {
911 		cifs_dbg(FYI, "%s: devname=%s flags=0x%x\n", __func__,
912 			 old_ctx->source, flags);
913 	} else {
914 		cifs_info("Attempting to mount %s\n", old_ctx->source);
915 	}
916 
917 	cifs_sb = kzalloc(sizeof(*cifs_sb), GFP_KERNEL);
918 	if (!cifs_sb)
919 		return ERR_PTR(-ENOMEM);
920 
921 	cifs_sb->ctx = kzalloc(sizeof(struct smb3_fs_context), GFP_KERNEL);
922 	if (!cifs_sb->ctx) {
923 		root = ERR_PTR(-ENOMEM);
924 		goto out;
925 	}
926 	rc = smb3_fs_context_dup(cifs_sb->ctx, old_ctx);
927 	if (rc) {
928 		root = ERR_PTR(rc);
929 		goto out;
930 	}
931 
932 	rc = cifs_setup_cifs_sb(cifs_sb);
933 	if (rc) {
934 		root = ERR_PTR(rc);
935 		goto out;
936 	}
937 
938 	rc = cifs_mount(cifs_sb, cifs_sb->ctx);
939 	if (rc) {
940 		if (!(flags & SB_SILENT))
941 			cifs_dbg(VFS, "cifs_mount failed w/return code = %d\n",
942 				 rc);
943 		root = ERR_PTR(rc);
944 		goto out;
945 	}
946 
947 	mnt_data.ctx = cifs_sb->ctx;
948 	mnt_data.cifs_sb = cifs_sb;
949 	mnt_data.flags = flags;
950 
951 	/* BB should we make this contingent on mount parm? */
952 	flags |= SB_NODIRATIME | SB_NOATIME;
953 
954 	sb = sget(fs_type, cifs_match_super, cifs_set_super, flags, &mnt_data);
955 	if (IS_ERR(sb)) {
956 		cifs_umount(cifs_sb);
957 		return ERR_CAST(sb);
958 	}
959 
960 	if (sb->s_root) {
961 		cifs_dbg(FYI, "Use existing superblock\n");
962 		cifs_umount(cifs_sb);
963 		cifs_sb = NULL;
964 	} else {
965 		rc = cifs_read_super(sb);
966 		if (rc) {
967 			root = ERR_PTR(rc);
968 			goto out_super;
969 		}
970 
971 		sb->s_flags |= SB_ACTIVE;
972 	}
973 
974 	root = cifs_get_root(cifs_sb ? cifs_sb->ctx : old_ctx, sb);
975 	if (IS_ERR(root))
976 		goto out_super;
977 
978 	if (cifs_sb)
979 		cifs_sb->root = dget(root);
980 
981 	cifs_dbg(FYI, "dentry root is: %p\n", root);
982 	return root;
983 
984 out_super:
985 	deactivate_locked_super(sb);
986 	return root;
987 out:
988 	kfree(cifs_sb->prepath);
989 	smb3_cleanup_fs_context(cifs_sb->ctx);
990 	kfree(cifs_sb);
991 	return root;
992 }
993 
994 static loff_t cifs_llseek(struct file *file, loff_t offset, int whence)
995 {
996 	struct cifsFileInfo *cfile = file->private_data;
997 	struct cifs_tcon *tcon;
998 
999 	/*
1000 	 * whence == SEEK_END || SEEK_DATA || SEEK_HOLE => we must revalidate
1001 	 * the cached file length
1002 	 */
1003 	if (whence != SEEK_SET && whence != SEEK_CUR) {
1004 		int rc;
1005 		struct inode *inode = file_inode(file);
1006 
1007 		/*
1008 		 * We need to be sure that all dirty pages are written and the
1009 		 * server has the newest file length.
1010 		 */
1011 		if (!CIFS_CACHE_READ(CIFS_I(inode)) && inode->i_mapping &&
1012 		    inode->i_mapping->nrpages != 0) {
1013 			rc = filemap_fdatawait(inode->i_mapping);
1014 			if (rc) {
1015 				mapping_set_error(inode->i_mapping, rc);
1016 				return rc;
1017 			}
1018 		}
1019 		/*
1020 		 * Some applications poll for the file length in this strange
1021 		 * way so we must seek to end on non-oplocked files by
1022 		 * setting the revalidate time to zero.
1023 		 */
1024 		CIFS_I(inode)->time = 0;
1025 
1026 		rc = cifs_revalidate_file_attr(file);
1027 		if (rc < 0)
1028 			return (loff_t)rc;
1029 	}
1030 	if (cfile && cfile->tlink) {
1031 		tcon = tlink_tcon(cfile->tlink);
1032 		if (tcon->ses->server->ops->llseek)
1033 			return tcon->ses->server->ops->llseek(file, tcon,
1034 							      offset, whence);
1035 	}
1036 	return generic_file_llseek(file, offset, whence);
1037 }
1038 
1039 static int
1040 cifs_setlease(struct file *file, int arg, struct file_lease **lease, void **priv)
1041 {
1042 	/*
1043 	 * Note that this is called by vfs setlease with i_lock held to
1044 	 * protect *lease from going away.
1045 	 */
1046 	struct inode *inode = file_inode(file);
1047 	struct cifsFileInfo *cfile = file->private_data;
1048 
1049 	/* Check if file is oplocked if this is request for new lease */
1050 	if (arg == F_UNLCK ||
1051 	    ((arg == F_RDLCK) && CIFS_CACHE_READ(CIFS_I(inode))) ||
1052 	    ((arg == F_WRLCK) && CIFS_CACHE_WRITE(CIFS_I(inode))))
1053 		return generic_setlease(file, arg, lease, priv);
1054 	else if (tlink_tcon(cfile->tlink)->local_lease &&
1055 		 !CIFS_CACHE_READ(CIFS_I(inode)))
1056 		/*
1057 		 * If the server claims to support oplock on this file, then we
1058 		 * still need to check oplock even if the local_lease mount
1059 		 * option is set, but there are servers which do not support
1060 		 * oplock for which this mount option may be useful if the user
1061 		 * knows that the file won't be changed on the server by anyone
1062 		 * else.
1063 		 */
1064 		return generic_setlease(file, arg, lease, priv);
1065 	else
1066 		return -EAGAIN;
1067 }
1068 
1069 struct file_system_type cifs_fs_type = {
1070 	.owner = THIS_MODULE,
1071 	.name = "cifs",
1072 	.init_fs_context = smb3_init_fs_context,
1073 	.parameters = smb3_fs_parameters,
1074 	.kill_sb = cifs_kill_sb,
1075 	.fs_flags = FS_RENAME_DOES_D_MOVE,
1076 };
1077 MODULE_ALIAS_FS("cifs");
1078 
1079 struct file_system_type smb3_fs_type = {
1080 	.owner = THIS_MODULE,
1081 	.name = "smb3",
1082 	.init_fs_context = smb3_init_fs_context,
1083 	.parameters = smb3_fs_parameters,
1084 	.kill_sb = cifs_kill_sb,
1085 	.fs_flags = FS_RENAME_DOES_D_MOVE,
1086 };
1087 MODULE_ALIAS_FS("smb3");
1088 MODULE_ALIAS("smb3");
1089 
1090 const struct inode_operations cifs_dir_inode_ops = {
1091 	.create = cifs_create,
1092 	.atomic_open = cifs_atomic_open,
1093 	.lookup = cifs_lookup,
1094 	.getattr = cifs_getattr,
1095 	.unlink = cifs_unlink,
1096 	.link = cifs_hardlink,
1097 	.mkdir = cifs_mkdir,
1098 	.rmdir = cifs_rmdir,
1099 	.rename = cifs_rename2,
1100 	.permission = cifs_permission,
1101 	.setattr = cifs_setattr,
1102 	.symlink = cifs_symlink,
1103 	.mknod   = cifs_mknod,
1104 	.listxattr = cifs_listxattr,
1105 	.get_acl = cifs_get_acl,
1106 	.set_acl = cifs_set_acl,
1107 };
1108 
1109 const struct inode_operations cifs_file_inode_ops = {
1110 	.setattr = cifs_setattr,
1111 	.getattr = cifs_getattr,
1112 	.permission = cifs_permission,
1113 	.listxattr = cifs_listxattr,
1114 	.fiemap = cifs_fiemap,
1115 	.get_acl = cifs_get_acl,
1116 	.set_acl = cifs_set_acl,
1117 };
1118 
1119 const char *cifs_get_link(struct dentry *dentry, struct inode *inode,
1120 			    struct delayed_call *done)
1121 {
1122 	char *target_path;
1123 
1124 	if (!dentry)
1125 		return ERR_PTR(-ECHILD);
1126 
1127 	target_path = kmalloc(PATH_MAX, GFP_KERNEL);
1128 	if (!target_path)
1129 		return ERR_PTR(-ENOMEM);
1130 
1131 	spin_lock(&inode->i_lock);
1132 	if (likely(CIFS_I(inode)->symlink_target)) {
1133 		strscpy(target_path, CIFS_I(inode)->symlink_target, PATH_MAX);
1134 	} else {
1135 		kfree(target_path);
1136 		target_path = ERR_PTR(-EOPNOTSUPP);
1137 	}
1138 	spin_unlock(&inode->i_lock);
1139 
1140 	if (!IS_ERR(target_path))
1141 		set_delayed_call(done, kfree_link, target_path);
1142 
1143 	return target_path;
1144 }
1145 
1146 const struct inode_operations cifs_symlink_inode_ops = {
1147 	.get_link = cifs_get_link,
1148 	.setattr = cifs_setattr,
1149 	.permission = cifs_permission,
1150 	.listxattr = cifs_listxattr,
1151 };
1152 
1153 /*
1154  * Advance the EOF marker to after the source range.
1155  */
1156 static int cifs_precopy_set_eof(struct inode *src_inode, struct cifsInodeInfo *src_cifsi,
1157 				struct cifs_tcon *src_tcon,
1158 				unsigned int xid, loff_t src_end)
1159 {
1160 	struct cifsFileInfo *writeable_srcfile;
1161 	int rc = -EINVAL;
1162 
1163 	writeable_srcfile = find_writable_file(src_cifsi, FIND_WR_FSUID_ONLY);
1164 	if (writeable_srcfile) {
1165 		if (src_tcon->ses->server->ops->set_file_size)
1166 			rc = src_tcon->ses->server->ops->set_file_size(
1167 				xid, src_tcon, writeable_srcfile,
1168 				src_inode->i_size, true /* no need to set sparse */);
1169 		else
1170 			rc = -ENOSYS;
1171 		cifsFileInfo_put(writeable_srcfile);
1172 		cifs_dbg(FYI, "SetFSize for copychunk rc = %d\n", rc);
1173 	}
1174 
1175 	if (rc < 0)
1176 		goto set_failed;
1177 
1178 	netfs_resize_file(&src_cifsi->netfs, src_end, true);
1179 	fscache_resize_cookie(cifs_inode_cookie(src_inode), src_end);
1180 	return 0;
1181 
1182 set_failed:
1183 	return filemap_write_and_wait(src_inode->i_mapping);
1184 }
1185 
1186 /*
1187  * Flush out either the folio that overlaps the beginning of a range in which
1188  * pos resides or the folio that overlaps the end of a range unless that folio
1189  * is entirely within the range we're going to invalidate.  We extend the flush
1190  * bounds to encompass the folio.
1191  */
1192 static int cifs_flush_folio(struct inode *inode, loff_t pos, loff_t *_fstart, loff_t *_fend,
1193 			    bool first)
1194 {
1195 	struct folio *folio;
1196 	unsigned long long fpos, fend;
1197 	pgoff_t index = pos / PAGE_SIZE;
1198 	size_t size;
1199 	int rc = 0;
1200 
1201 	folio = filemap_get_folio(inode->i_mapping, index);
1202 	if (IS_ERR(folio))
1203 		return 0;
1204 
1205 	size = folio_size(folio);
1206 	fpos = folio_pos(folio);
1207 	fend = fpos + size - 1;
1208 	*_fstart = min_t(unsigned long long, *_fstart, fpos);
1209 	*_fend   = max_t(unsigned long long, *_fend, fend);
1210 	if ((first && pos == fpos) || (!first && pos == fend))
1211 		goto out;
1212 
1213 	rc = filemap_write_and_wait_range(inode->i_mapping, fpos, fend);
1214 out:
1215 	folio_put(folio);
1216 	return rc;
1217 }
1218 
1219 static loff_t cifs_remap_file_range(struct file *src_file, loff_t off,
1220 		struct file *dst_file, loff_t destoff, loff_t len,
1221 		unsigned int remap_flags)
1222 {
1223 	struct inode *src_inode = file_inode(src_file);
1224 	struct inode *target_inode = file_inode(dst_file);
1225 	struct cifsInodeInfo *src_cifsi = CIFS_I(src_inode);
1226 	struct cifsInodeInfo *target_cifsi = CIFS_I(target_inode);
1227 	struct cifsFileInfo *smb_file_src = src_file->private_data;
1228 	struct cifsFileInfo *smb_file_target = dst_file->private_data;
1229 	struct cifs_tcon *target_tcon, *src_tcon;
1230 	unsigned long long destend, fstart, fend, old_size, new_size;
1231 	unsigned int xid;
1232 	int rc;
1233 
1234 	if (remap_flags & REMAP_FILE_DEDUP)
1235 		return -EOPNOTSUPP;
1236 	if (remap_flags & ~REMAP_FILE_ADVISORY)
1237 		return -EINVAL;
1238 
1239 	cifs_dbg(FYI, "clone range\n");
1240 
1241 	xid = get_xid();
1242 
1243 	if (!smb_file_src || !smb_file_target) {
1244 		rc = -EBADF;
1245 		cifs_dbg(VFS, "missing cifsFileInfo on copy range src file\n");
1246 		goto out;
1247 	}
1248 
1249 	src_tcon = tlink_tcon(smb_file_src->tlink);
1250 	target_tcon = tlink_tcon(smb_file_target->tlink);
1251 
1252 	/*
1253 	 * Note: cifs case is easier than btrfs since server responsible for
1254 	 * checks for proper open modes and file type and if it wants
1255 	 * server could even support copy of range where source = target
1256 	 */
1257 	lock_two_nondirectories(target_inode, src_inode);
1258 
1259 	if (len == 0)
1260 		len = src_inode->i_size - off;
1261 
1262 	cifs_dbg(FYI, "clone range\n");
1263 
1264 	/* Flush the source buffer */
1265 	rc = filemap_write_and_wait_range(src_inode->i_mapping, off,
1266 					  off + len - 1);
1267 	if (rc)
1268 		goto unlock;
1269 
1270 	/* The server-side copy will fail if the source crosses the EOF marker.
1271 	 * Advance the EOF marker after the flush above to the end of the range
1272 	 * if it's short of that.
1273 	 */
1274 	if (src_cifsi->netfs.remote_i_size < off + len) {
1275 		rc = cifs_precopy_set_eof(src_inode, src_cifsi, src_tcon, xid, off + len);
1276 		if (rc < 0)
1277 			goto unlock;
1278 	}
1279 
1280 	new_size = destoff + len;
1281 	destend = destoff + len - 1;
1282 
1283 	/* Flush the folios at either end of the destination range to prevent
1284 	 * accidental loss of dirty data outside of the range.
1285 	 */
1286 	fstart = destoff;
1287 	fend = destend;
1288 
1289 	rc = cifs_flush_folio(target_inode, destoff, &fstart, &fend, true);
1290 	if (rc)
1291 		goto unlock;
1292 	rc = cifs_flush_folio(target_inode, destend, &fstart, &fend, false);
1293 	if (rc)
1294 		goto unlock;
1295 	if (fend > target_cifsi->netfs.zero_point)
1296 		target_cifsi->netfs.zero_point = fend + 1;
1297 	old_size = target_cifsi->netfs.remote_i_size;
1298 
1299 	/* Discard all the folios that overlap the destination region. */
1300 	cifs_dbg(FYI, "about to discard pages %llx-%llx\n", fstart, fend);
1301 	truncate_inode_pages_range(&target_inode->i_data, fstart, fend);
1302 
1303 	fscache_invalidate(cifs_inode_cookie(target_inode), NULL,
1304 			   i_size_read(target_inode), 0);
1305 
1306 	rc = -EOPNOTSUPP;
1307 	if (target_tcon->ses->server->ops->duplicate_extents) {
1308 		rc = target_tcon->ses->server->ops->duplicate_extents(xid,
1309 			smb_file_src, smb_file_target, off, len, destoff);
1310 		if (rc == 0 && new_size > old_size) {
1311 			truncate_setsize(target_inode, new_size);
1312 			fscache_resize_cookie(cifs_inode_cookie(target_inode),
1313 					      new_size);
1314 		}
1315 		if (rc == 0 && new_size > target_cifsi->netfs.zero_point)
1316 			target_cifsi->netfs.zero_point = new_size;
1317 	}
1318 
1319 	/* force revalidate of size and timestamps of target file now
1320 	   that target is updated on the server */
1321 	CIFS_I(target_inode)->time = 0;
1322 unlock:
1323 	/* although unlocking in the reverse order from locking is not
1324 	   strictly necessary here it is a little cleaner to be consistent */
1325 	unlock_two_nondirectories(src_inode, target_inode);
1326 out:
1327 	free_xid(xid);
1328 	return rc < 0 ? rc : len;
1329 }
1330 
1331 ssize_t cifs_file_copychunk_range(unsigned int xid,
1332 				struct file *src_file, loff_t off,
1333 				struct file *dst_file, loff_t destoff,
1334 				size_t len, unsigned int flags)
1335 {
1336 	struct inode *src_inode = file_inode(src_file);
1337 	struct inode *target_inode = file_inode(dst_file);
1338 	struct cifsInodeInfo *src_cifsi = CIFS_I(src_inode);
1339 	struct cifsInodeInfo *target_cifsi = CIFS_I(target_inode);
1340 	struct cifsFileInfo *smb_file_src;
1341 	struct cifsFileInfo *smb_file_target;
1342 	struct cifs_tcon *src_tcon;
1343 	struct cifs_tcon *target_tcon;
1344 	ssize_t rc;
1345 
1346 	cifs_dbg(FYI, "copychunk range\n");
1347 
1348 	if (!src_file->private_data || !dst_file->private_data) {
1349 		rc = -EBADF;
1350 		cifs_dbg(VFS, "missing cifsFileInfo on copy range src file\n");
1351 		goto out;
1352 	}
1353 
1354 	rc = -EXDEV;
1355 	smb_file_target = dst_file->private_data;
1356 	smb_file_src = src_file->private_data;
1357 	src_tcon = tlink_tcon(smb_file_src->tlink);
1358 	target_tcon = tlink_tcon(smb_file_target->tlink);
1359 
1360 	if (src_tcon->ses != target_tcon->ses) {
1361 		cifs_dbg(FYI, "source and target of copy not on same server\n");
1362 		goto out;
1363 	}
1364 
1365 	rc = -EOPNOTSUPP;
1366 	if (!target_tcon->ses->server->ops->copychunk_range)
1367 		goto out;
1368 
1369 	/*
1370 	 * Note: cifs case is easier than btrfs since server responsible for
1371 	 * checks for proper open modes and file type and if it wants
1372 	 * server could even support copy of range where source = target
1373 	 */
1374 	lock_two_nondirectories(target_inode, src_inode);
1375 
1376 	cifs_dbg(FYI, "about to flush pages\n");
1377 
1378 	rc = filemap_write_and_wait_range(src_inode->i_mapping, off,
1379 					  off + len - 1);
1380 	if (rc)
1381 		goto unlock;
1382 
1383 	/* The server-side copy will fail if the source crosses the EOF marker.
1384 	 * Advance the EOF marker after the flush above to the end of the range
1385 	 * if it's short of that.
1386 	 */
1387 	if (src_cifsi->netfs.remote_i_size < off + len) {
1388 		rc = cifs_precopy_set_eof(src_inode, src_cifsi, src_tcon, xid, off + len);
1389 		if (rc < 0)
1390 			goto unlock;
1391 	}
1392 
1393 	/* Flush and invalidate all the folios in the destination region.  If
1394 	 * the copy was successful, then some of the flush is extra overhead,
1395 	 * but we need to allow for the copy failing in some way (eg. ENOSPC).
1396 	 */
1397 	rc = filemap_invalidate_inode(target_inode, true, destoff, destoff + len - 1);
1398 	if (rc)
1399 		goto unlock;
1400 
1401 	fscache_invalidate(cifs_inode_cookie(target_inode), NULL,
1402 			   i_size_read(target_inode), 0);
1403 
1404 	rc = file_modified(dst_file);
1405 	if (!rc) {
1406 		rc = target_tcon->ses->server->ops->copychunk_range(xid,
1407 			smb_file_src, smb_file_target, off, len, destoff);
1408 		if (rc > 0 && destoff + rc > i_size_read(target_inode)) {
1409 			truncate_setsize(target_inode, destoff + rc);
1410 			netfs_resize_file(&target_cifsi->netfs,
1411 					  i_size_read(target_inode), true);
1412 			fscache_resize_cookie(cifs_inode_cookie(target_inode),
1413 					      i_size_read(target_inode));
1414 		}
1415 		if (rc > 0 && destoff + rc > target_cifsi->netfs.zero_point)
1416 			target_cifsi->netfs.zero_point = destoff + rc;
1417 	}
1418 
1419 	file_accessed(src_file);
1420 
1421 	/* force revalidate of size and timestamps of target file now
1422 	 * that target is updated on the server
1423 	 */
1424 	CIFS_I(target_inode)->time = 0;
1425 
1426 unlock:
1427 	/* although unlocking in the reverse order from locking is not
1428 	 * strictly necessary here it is a little cleaner to be consistent
1429 	 */
1430 	unlock_two_nondirectories(src_inode, target_inode);
1431 
1432 out:
1433 	return rc;
1434 }
1435 
1436 /*
1437  * Directory operations under CIFS/SMB2/SMB3 are synchronous, so fsync()
1438  * is a dummy operation.
1439  */
1440 static int cifs_dir_fsync(struct file *file, loff_t start, loff_t end, int datasync)
1441 {
1442 	cifs_dbg(FYI, "Sync directory - name: %pD datasync: 0x%x\n",
1443 		 file, datasync);
1444 
1445 	return 0;
1446 }
1447 
1448 static ssize_t cifs_copy_file_range(struct file *src_file, loff_t off,
1449 				struct file *dst_file, loff_t destoff,
1450 				size_t len, unsigned int flags)
1451 {
1452 	unsigned int xid = get_xid();
1453 	ssize_t rc;
1454 	struct cifsFileInfo *cfile = dst_file->private_data;
1455 
1456 	if (cfile->swapfile) {
1457 		rc = -EOPNOTSUPP;
1458 		free_xid(xid);
1459 		return rc;
1460 	}
1461 
1462 	rc = cifs_file_copychunk_range(xid, src_file, off, dst_file, destoff,
1463 					len, flags);
1464 	free_xid(xid);
1465 
1466 	if (rc == -EOPNOTSUPP || rc == -EXDEV)
1467 		rc = splice_copy_file_range(src_file, off, dst_file,
1468 					    destoff, len);
1469 	return rc;
1470 }
1471 
1472 const struct file_operations cifs_file_ops = {
1473 	.read_iter = cifs_loose_read_iter,
1474 	.write_iter = cifs_file_write_iter,
1475 	.open = cifs_open,
1476 	.release = cifs_close,
1477 	.lock = cifs_lock,
1478 	.flock = cifs_flock,
1479 	.fsync = cifs_fsync,
1480 	.flush = cifs_flush,
1481 	.mmap  = cifs_file_mmap,
1482 	.splice_read = filemap_splice_read,
1483 	.splice_write = iter_file_splice_write,
1484 	.llseek = cifs_llseek,
1485 	.unlocked_ioctl	= cifs_ioctl,
1486 	.copy_file_range = cifs_copy_file_range,
1487 	.remap_file_range = cifs_remap_file_range,
1488 	.setlease = cifs_setlease,
1489 	.fallocate = cifs_fallocate,
1490 };
1491 
1492 const struct file_operations cifs_file_strict_ops = {
1493 	.read_iter = cifs_strict_readv,
1494 	.write_iter = cifs_strict_writev,
1495 	.open = cifs_open,
1496 	.release = cifs_close,
1497 	.lock = cifs_lock,
1498 	.flock = cifs_flock,
1499 	.fsync = cifs_strict_fsync,
1500 	.flush = cifs_flush,
1501 	.mmap = cifs_file_strict_mmap,
1502 	.splice_read = filemap_splice_read,
1503 	.splice_write = iter_file_splice_write,
1504 	.llseek = cifs_llseek,
1505 	.unlocked_ioctl	= cifs_ioctl,
1506 	.copy_file_range = cifs_copy_file_range,
1507 	.remap_file_range = cifs_remap_file_range,
1508 	.setlease = cifs_setlease,
1509 	.fallocate = cifs_fallocate,
1510 };
1511 
1512 const struct file_operations cifs_file_direct_ops = {
1513 	.read_iter = netfs_unbuffered_read_iter,
1514 	.write_iter = netfs_file_write_iter,
1515 	.open = cifs_open,
1516 	.release = cifs_close,
1517 	.lock = cifs_lock,
1518 	.flock = cifs_flock,
1519 	.fsync = cifs_fsync,
1520 	.flush = cifs_flush,
1521 	.mmap = cifs_file_mmap,
1522 	.splice_read = copy_splice_read,
1523 	.splice_write = iter_file_splice_write,
1524 	.unlocked_ioctl  = cifs_ioctl,
1525 	.copy_file_range = cifs_copy_file_range,
1526 	.remap_file_range = cifs_remap_file_range,
1527 	.llseek = cifs_llseek,
1528 	.setlease = cifs_setlease,
1529 	.fallocate = cifs_fallocate,
1530 };
1531 
1532 const struct file_operations cifs_file_nobrl_ops = {
1533 	.read_iter = cifs_loose_read_iter,
1534 	.write_iter = cifs_file_write_iter,
1535 	.open = cifs_open,
1536 	.release = cifs_close,
1537 	.fsync = cifs_fsync,
1538 	.flush = cifs_flush,
1539 	.mmap  = cifs_file_mmap,
1540 	.splice_read = filemap_splice_read,
1541 	.splice_write = iter_file_splice_write,
1542 	.llseek = cifs_llseek,
1543 	.unlocked_ioctl	= cifs_ioctl,
1544 	.copy_file_range = cifs_copy_file_range,
1545 	.remap_file_range = cifs_remap_file_range,
1546 	.setlease = cifs_setlease,
1547 	.fallocate = cifs_fallocate,
1548 };
1549 
1550 const struct file_operations cifs_file_strict_nobrl_ops = {
1551 	.read_iter = cifs_strict_readv,
1552 	.write_iter = cifs_strict_writev,
1553 	.open = cifs_open,
1554 	.release = cifs_close,
1555 	.fsync = cifs_strict_fsync,
1556 	.flush = cifs_flush,
1557 	.mmap = cifs_file_strict_mmap,
1558 	.splice_read = filemap_splice_read,
1559 	.splice_write = iter_file_splice_write,
1560 	.llseek = cifs_llseek,
1561 	.unlocked_ioctl	= cifs_ioctl,
1562 	.copy_file_range = cifs_copy_file_range,
1563 	.remap_file_range = cifs_remap_file_range,
1564 	.setlease = cifs_setlease,
1565 	.fallocate = cifs_fallocate,
1566 };
1567 
1568 const struct file_operations cifs_file_direct_nobrl_ops = {
1569 	.read_iter = netfs_unbuffered_read_iter,
1570 	.write_iter = netfs_file_write_iter,
1571 	.open = cifs_open,
1572 	.release = cifs_close,
1573 	.fsync = cifs_fsync,
1574 	.flush = cifs_flush,
1575 	.mmap = cifs_file_mmap,
1576 	.splice_read = copy_splice_read,
1577 	.splice_write = iter_file_splice_write,
1578 	.unlocked_ioctl  = cifs_ioctl,
1579 	.copy_file_range = cifs_copy_file_range,
1580 	.remap_file_range = cifs_remap_file_range,
1581 	.llseek = cifs_llseek,
1582 	.setlease = cifs_setlease,
1583 	.fallocate = cifs_fallocate,
1584 };
1585 
1586 const struct file_operations cifs_dir_ops = {
1587 	.iterate_shared = cifs_readdir,
1588 	.release = cifs_closedir,
1589 	.read    = generic_read_dir,
1590 	.unlocked_ioctl  = cifs_ioctl,
1591 	.copy_file_range = cifs_copy_file_range,
1592 	.remap_file_range = cifs_remap_file_range,
1593 	.llseek = generic_file_llseek,
1594 	.fsync = cifs_dir_fsync,
1595 };
1596 
1597 static void
1598 cifs_init_once(void *inode)
1599 {
1600 	struct cifsInodeInfo *cifsi = inode;
1601 
1602 	inode_init_once(&cifsi->netfs.inode);
1603 	init_rwsem(&cifsi->lock_sem);
1604 }
1605 
1606 static int __init
1607 cifs_init_inodecache(void)
1608 {
1609 	cifs_inode_cachep = kmem_cache_create("cifs_inode_cache",
1610 					      sizeof(struct cifsInodeInfo),
1611 					      0, (SLAB_RECLAIM_ACCOUNT|
1612 						SLAB_ACCOUNT),
1613 					      cifs_init_once);
1614 	if (cifs_inode_cachep == NULL)
1615 		return -ENOMEM;
1616 
1617 	return 0;
1618 }
1619 
1620 static void
1621 cifs_destroy_inodecache(void)
1622 {
1623 	/*
1624 	 * Make sure all delayed rcu free inodes are flushed before we
1625 	 * destroy cache.
1626 	 */
1627 	rcu_barrier();
1628 	kmem_cache_destroy(cifs_inode_cachep);
1629 }
1630 
1631 static int
1632 cifs_init_request_bufs(void)
1633 {
1634 	/*
1635 	 * SMB2 maximum header size is bigger than CIFS one - no problems to
1636 	 * allocate some more bytes for CIFS.
1637 	 */
1638 	size_t max_hdr_size = MAX_SMB2_HDR_SIZE;
1639 
1640 	if (CIFSMaxBufSize < 8192) {
1641 	/* Buffer size can not be smaller than 2 * PATH_MAX since maximum
1642 	Unicode path name has to fit in any SMB/CIFS path based frames */
1643 		CIFSMaxBufSize = 8192;
1644 	} else if (CIFSMaxBufSize > 1024*127) {
1645 		CIFSMaxBufSize = 1024 * 127;
1646 	} else {
1647 		CIFSMaxBufSize &= 0x1FE00; /* Round size to even 512 byte mult*/
1648 	}
1649 /*
1650 	cifs_dbg(VFS, "CIFSMaxBufSize %d 0x%x\n",
1651 		 CIFSMaxBufSize, CIFSMaxBufSize);
1652 */
1653 	cifs_req_cachep = kmem_cache_create_usercopy("cifs_request",
1654 					    CIFSMaxBufSize + max_hdr_size, 0,
1655 					    SLAB_HWCACHE_ALIGN, 0,
1656 					    CIFSMaxBufSize + max_hdr_size,
1657 					    NULL);
1658 	if (cifs_req_cachep == NULL)
1659 		return -ENOMEM;
1660 
1661 	if (cifs_min_rcv < 1)
1662 		cifs_min_rcv = 1;
1663 	else if (cifs_min_rcv > 64) {
1664 		cifs_min_rcv = 64;
1665 		cifs_dbg(VFS, "cifs_min_rcv set to maximum (64)\n");
1666 	}
1667 
1668 	cifs_req_poolp = mempool_create_slab_pool(cifs_min_rcv,
1669 						  cifs_req_cachep);
1670 
1671 	if (cifs_req_poolp == NULL) {
1672 		kmem_cache_destroy(cifs_req_cachep);
1673 		return -ENOMEM;
1674 	}
1675 	/* MAX_CIFS_SMALL_BUFFER_SIZE bytes is enough for most SMB responses and
1676 	almost all handle based requests (but not write response, nor is it
1677 	sufficient for path based requests).  A smaller size would have
1678 	been more efficient (compacting multiple slab items on one 4k page)
1679 	for the case in which debug was on, but this larger size allows
1680 	more SMBs to use small buffer alloc and is still much more
1681 	efficient to alloc 1 per page off the slab compared to 17K (5page)
1682 	alloc of large cifs buffers even when page debugging is on */
1683 	cifs_sm_req_cachep = kmem_cache_create_usercopy("cifs_small_rq",
1684 			MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN,
1685 			0, MAX_CIFS_SMALL_BUFFER_SIZE, NULL);
1686 	if (cifs_sm_req_cachep == NULL) {
1687 		mempool_destroy(cifs_req_poolp);
1688 		kmem_cache_destroy(cifs_req_cachep);
1689 		return -ENOMEM;
1690 	}
1691 
1692 	if (cifs_min_small < 2)
1693 		cifs_min_small = 2;
1694 	else if (cifs_min_small > 256) {
1695 		cifs_min_small = 256;
1696 		cifs_dbg(FYI, "cifs_min_small set to maximum (256)\n");
1697 	}
1698 
1699 	cifs_sm_req_poolp = mempool_create_slab_pool(cifs_min_small,
1700 						     cifs_sm_req_cachep);
1701 
1702 	if (cifs_sm_req_poolp == NULL) {
1703 		mempool_destroy(cifs_req_poolp);
1704 		kmem_cache_destroy(cifs_req_cachep);
1705 		kmem_cache_destroy(cifs_sm_req_cachep);
1706 		return -ENOMEM;
1707 	}
1708 
1709 	return 0;
1710 }
1711 
1712 static void
1713 cifs_destroy_request_bufs(void)
1714 {
1715 	mempool_destroy(cifs_req_poolp);
1716 	kmem_cache_destroy(cifs_req_cachep);
1717 	mempool_destroy(cifs_sm_req_poolp);
1718 	kmem_cache_destroy(cifs_sm_req_cachep);
1719 }
1720 
1721 static int init_mids(void)
1722 {
1723 	cifs_mid_cachep = kmem_cache_create("cifs_mpx_ids",
1724 					    sizeof(struct mid_q_entry), 0,
1725 					    SLAB_HWCACHE_ALIGN, NULL);
1726 	if (cifs_mid_cachep == NULL)
1727 		return -ENOMEM;
1728 
1729 	/* 3 is a reasonable minimum number of simultaneous operations */
1730 	cifs_mid_poolp = mempool_create_slab_pool(3, cifs_mid_cachep);
1731 	if (cifs_mid_poolp == NULL) {
1732 		kmem_cache_destroy(cifs_mid_cachep);
1733 		return -ENOMEM;
1734 	}
1735 
1736 	return 0;
1737 }
1738 
1739 static void destroy_mids(void)
1740 {
1741 	mempool_destroy(cifs_mid_poolp);
1742 	kmem_cache_destroy(cifs_mid_cachep);
1743 }
1744 
1745 static int cifs_init_netfs(void)
1746 {
1747 	cifs_io_request_cachep =
1748 		kmem_cache_create("cifs_io_request",
1749 				  sizeof(struct cifs_io_request), 0,
1750 				  SLAB_HWCACHE_ALIGN, NULL);
1751 	if (!cifs_io_request_cachep)
1752 		goto nomem_req;
1753 
1754 	if (mempool_init_slab_pool(&cifs_io_request_pool, 100, cifs_io_request_cachep) < 0)
1755 		goto nomem_reqpool;
1756 
1757 	cifs_io_subrequest_cachep =
1758 		kmem_cache_create("cifs_io_subrequest",
1759 				  sizeof(struct cifs_io_subrequest), 0,
1760 				  SLAB_HWCACHE_ALIGN, NULL);
1761 	if (!cifs_io_subrequest_cachep)
1762 		goto nomem_subreq;
1763 
1764 	if (mempool_init_slab_pool(&cifs_io_subrequest_pool, 100, cifs_io_subrequest_cachep) < 0)
1765 		goto nomem_subreqpool;
1766 
1767 	return 0;
1768 
1769 nomem_subreqpool:
1770 	kmem_cache_destroy(cifs_io_subrequest_cachep);
1771 nomem_subreq:
1772 	mempool_destroy(&cifs_io_request_pool);
1773 nomem_reqpool:
1774 	kmem_cache_destroy(cifs_io_request_cachep);
1775 nomem_req:
1776 	return -ENOMEM;
1777 }
1778 
1779 static void cifs_destroy_netfs(void)
1780 {
1781 	mempool_exit(&cifs_io_subrequest_pool);
1782 	kmem_cache_destroy(cifs_io_subrequest_cachep);
1783 	mempool_exit(&cifs_io_request_pool);
1784 	kmem_cache_destroy(cifs_io_request_cachep);
1785 }
1786 
1787 static int __init
1788 init_cifs(void)
1789 {
1790 	int rc = 0;
1791 	cifs_proc_init();
1792 	INIT_LIST_HEAD(&cifs_tcp_ses_list);
1793 /*
1794  *  Initialize Global counters
1795  */
1796 	atomic_set(&sesInfoAllocCount, 0);
1797 	atomic_set(&tconInfoAllocCount, 0);
1798 	atomic_set(&tcpSesNextId, 0);
1799 	atomic_set(&tcpSesAllocCount, 0);
1800 	atomic_set(&tcpSesReconnectCount, 0);
1801 	atomic_set(&tconInfoReconnectCount, 0);
1802 
1803 	atomic_set(&buf_alloc_count, 0);
1804 	atomic_set(&small_buf_alloc_count, 0);
1805 #ifdef CONFIG_CIFS_STATS2
1806 	atomic_set(&total_buf_alloc_count, 0);
1807 	atomic_set(&total_small_buf_alloc_count, 0);
1808 	if (slow_rsp_threshold < 1)
1809 		cifs_dbg(FYI, "slow_response_threshold msgs disabled\n");
1810 	else if (slow_rsp_threshold > 32767)
1811 		cifs_dbg(VFS,
1812 		       "slow response threshold set higher than recommended (0 to 32767)\n");
1813 #endif /* CONFIG_CIFS_STATS2 */
1814 
1815 	atomic_set(&mid_count, 0);
1816 	GlobalCurrentXid = 0;
1817 	GlobalTotalActiveXid = 0;
1818 	GlobalMaxActiveXid = 0;
1819 	spin_lock_init(&cifs_tcp_ses_lock);
1820 	spin_lock_init(&GlobalMid_Lock);
1821 
1822 	cifs_lock_secret = get_random_u32();
1823 
1824 	if (cifs_max_pending < 2) {
1825 		cifs_max_pending = 2;
1826 		cifs_dbg(FYI, "cifs_max_pending set to min of 2\n");
1827 	} else if (cifs_max_pending > CIFS_MAX_REQ) {
1828 		cifs_max_pending = CIFS_MAX_REQ;
1829 		cifs_dbg(FYI, "cifs_max_pending set to max of %u\n",
1830 			 CIFS_MAX_REQ);
1831 	}
1832 
1833 	/* Limit max to about 18 hours, and setting to zero disables directory entry caching */
1834 	if (dir_cache_timeout > 65000) {
1835 		dir_cache_timeout = 65000;
1836 		cifs_dbg(VFS, "dir_cache_timeout set to max of 65000 seconds\n");
1837 	}
1838 
1839 	cifsiod_wq = alloc_workqueue("cifsiod", WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
1840 	if (!cifsiod_wq) {
1841 		rc = -ENOMEM;
1842 		goto out_clean_proc;
1843 	}
1844 
1845 	/*
1846 	 * Consider in future setting limit!=0 maybe to min(num_of_cores - 1, 3)
1847 	 * so that we don't launch too many worker threads but
1848 	 * Documentation/core-api/workqueue.rst recommends setting it to 0
1849 	 */
1850 
1851 	/* WQ_UNBOUND allows decrypt tasks to run on any CPU */
1852 	decrypt_wq = alloc_workqueue("smb3decryptd",
1853 				     WQ_UNBOUND|WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
1854 	if (!decrypt_wq) {
1855 		rc = -ENOMEM;
1856 		goto out_destroy_cifsiod_wq;
1857 	}
1858 
1859 	fileinfo_put_wq = alloc_workqueue("cifsfileinfoput",
1860 				     WQ_UNBOUND|WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
1861 	if (!fileinfo_put_wq) {
1862 		rc = -ENOMEM;
1863 		goto out_destroy_decrypt_wq;
1864 	}
1865 
1866 	cifsoplockd_wq = alloc_workqueue("cifsoplockd",
1867 					 WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
1868 	if (!cifsoplockd_wq) {
1869 		rc = -ENOMEM;
1870 		goto out_destroy_fileinfo_put_wq;
1871 	}
1872 
1873 	deferredclose_wq = alloc_workqueue("deferredclose",
1874 					   WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
1875 	if (!deferredclose_wq) {
1876 		rc = -ENOMEM;
1877 		goto out_destroy_cifsoplockd_wq;
1878 	}
1879 
1880 	serverclose_wq = alloc_workqueue("serverclose",
1881 					   WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
1882 	if (!serverclose_wq) {
1883 		rc = -ENOMEM;
1884 		goto out_destroy_deferredclose_wq;
1885 	}
1886 
1887 	rc = cifs_init_inodecache();
1888 	if (rc)
1889 		goto out_destroy_serverclose_wq;
1890 
1891 	rc = cifs_init_netfs();
1892 	if (rc)
1893 		goto out_destroy_inodecache;
1894 
1895 	rc = init_mids();
1896 	if (rc)
1897 		goto out_destroy_netfs;
1898 
1899 	rc = cifs_init_request_bufs();
1900 	if (rc)
1901 		goto out_destroy_mids;
1902 
1903 #ifdef CONFIG_CIFS_DFS_UPCALL
1904 	rc = dfs_cache_init();
1905 	if (rc)
1906 		goto out_destroy_request_bufs;
1907 #endif /* CONFIG_CIFS_DFS_UPCALL */
1908 #ifdef CONFIG_CIFS_UPCALL
1909 	rc = init_cifs_spnego();
1910 	if (rc)
1911 		goto out_destroy_dfs_cache;
1912 #endif /* CONFIG_CIFS_UPCALL */
1913 #ifdef CONFIG_CIFS_SWN_UPCALL
1914 	rc = cifs_genl_init();
1915 	if (rc)
1916 		goto out_register_key_type;
1917 #endif /* CONFIG_CIFS_SWN_UPCALL */
1918 
1919 	rc = init_cifs_idmap();
1920 	if (rc)
1921 		goto out_cifs_swn_init;
1922 
1923 	rc = register_filesystem(&cifs_fs_type);
1924 	if (rc)
1925 		goto out_init_cifs_idmap;
1926 
1927 	rc = register_filesystem(&smb3_fs_type);
1928 	if (rc) {
1929 		unregister_filesystem(&cifs_fs_type);
1930 		goto out_init_cifs_idmap;
1931 	}
1932 
1933 	return 0;
1934 
1935 out_init_cifs_idmap:
1936 	exit_cifs_idmap();
1937 out_cifs_swn_init:
1938 #ifdef CONFIG_CIFS_SWN_UPCALL
1939 	cifs_genl_exit();
1940 out_register_key_type:
1941 #endif
1942 #ifdef CONFIG_CIFS_UPCALL
1943 	exit_cifs_spnego();
1944 out_destroy_dfs_cache:
1945 #endif
1946 #ifdef CONFIG_CIFS_DFS_UPCALL
1947 	dfs_cache_destroy();
1948 out_destroy_request_bufs:
1949 #endif
1950 	cifs_destroy_request_bufs();
1951 out_destroy_mids:
1952 	destroy_mids();
1953 out_destroy_netfs:
1954 	cifs_destroy_netfs();
1955 out_destroy_inodecache:
1956 	cifs_destroy_inodecache();
1957 out_destroy_serverclose_wq:
1958 	destroy_workqueue(serverclose_wq);
1959 out_destroy_deferredclose_wq:
1960 	destroy_workqueue(deferredclose_wq);
1961 out_destroy_cifsoplockd_wq:
1962 	destroy_workqueue(cifsoplockd_wq);
1963 out_destroy_fileinfo_put_wq:
1964 	destroy_workqueue(fileinfo_put_wq);
1965 out_destroy_decrypt_wq:
1966 	destroy_workqueue(decrypt_wq);
1967 out_destroy_cifsiod_wq:
1968 	destroy_workqueue(cifsiod_wq);
1969 out_clean_proc:
1970 	cifs_proc_clean();
1971 	return rc;
1972 }
1973 
1974 static void __exit
1975 exit_cifs(void)
1976 {
1977 	cifs_dbg(NOISY, "exit_smb3\n");
1978 	unregister_filesystem(&cifs_fs_type);
1979 	unregister_filesystem(&smb3_fs_type);
1980 	cifs_release_automount_timer();
1981 	exit_cifs_idmap();
1982 #ifdef CONFIG_CIFS_SWN_UPCALL
1983 	cifs_genl_exit();
1984 #endif
1985 #ifdef CONFIG_CIFS_UPCALL
1986 	exit_cifs_spnego();
1987 #endif
1988 #ifdef CONFIG_CIFS_DFS_UPCALL
1989 	dfs_cache_destroy();
1990 #endif
1991 	cifs_destroy_request_bufs();
1992 	destroy_mids();
1993 	cifs_destroy_netfs();
1994 	cifs_destroy_inodecache();
1995 	destroy_workqueue(deferredclose_wq);
1996 	destroy_workqueue(cifsoplockd_wq);
1997 	destroy_workqueue(decrypt_wq);
1998 	destroy_workqueue(fileinfo_put_wq);
1999 	destroy_workqueue(serverclose_wq);
2000 	destroy_workqueue(cifsiod_wq);
2001 	cifs_proc_clean();
2002 }
2003 
2004 MODULE_AUTHOR("Steve French");
2005 MODULE_LICENSE("GPL");	/* combination of LGPL + GPL source behaves as GPL */
2006 MODULE_DESCRIPTION
2007 	("VFS to access SMB3 servers e.g. Samba, Macs, Azure and Windows (and "
2008 	"also older servers complying with the SNIA CIFS Specification)");
2009 MODULE_VERSION(CIFS_VERSION);
2010 MODULE_SOFTDEP("ecb");
2011 MODULE_SOFTDEP("hmac");
2012 MODULE_SOFTDEP("md5");
2013 MODULE_SOFTDEP("nls");
2014 MODULE_SOFTDEP("aes");
2015 MODULE_SOFTDEP("cmac");
2016 MODULE_SOFTDEP("sha256");
2017 MODULE_SOFTDEP("sha512");
2018 MODULE_SOFTDEP("aead2");
2019 MODULE_SOFTDEP("ccm");
2020 MODULE_SOFTDEP("gcm");
2021 module_init(init_cifs)
2022 module_exit(exit_cifs)
2023