xref: /linux/fs/smb/client/cifsfs.c (revision eb01fe7abbe2d0b38824d2a93fdb4cc3eaf2ccc1)
1 // SPDX-License-Identifier: LGPL-2.1
2 /*
3  *
4  *   Copyright (C) International Business Machines  Corp., 2002,2008
5  *   Author(s): Steve French (sfrench@us.ibm.com)
6  *
7  *   Common Internet FileSystem (CIFS) client
8  *
9  */
10 
11 /* Note that BB means BUGBUG (ie something to fix eventually) */
12 
13 #include <linux/module.h>
14 #include <linux/fs.h>
15 #include <linux/filelock.h>
16 #include <linux/mount.h>
17 #include <linux/slab.h>
18 #include <linux/init.h>
19 #include <linux/list.h>
20 #include <linux/seq_file.h>
21 #include <linux/vfs.h>
22 #include <linux/mempool.h>
23 #include <linux/delay.h>
24 #include <linux/kthread.h>
25 #include <linux/freezer.h>
26 #include <linux/namei.h>
27 #include <linux/random.h>
28 #include <linux/splice.h>
29 #include <linux/uuid.h>
30 #include <linux/xattr.h>
31 #include <uapi/linux/magic.h>
32 #include <net/ipv6.h>
33 #include "cifsfs.h"
34 #include "cifspdu.h"
35 #define DECLARE_GLOBALS_HERE
36 #include "cifsglob.h"
37 #include "cifsproto.h"
38 #include "cifs_debug.h"
39 #include "cifs_fs_sb.h"
40 #include <linux/mm.h>
41 #include <linux/key-type.h>
42 #include "cifs_spnego.h"
43 #include "fscache.h"
44 #ifdef CONFIG_CIFS_DFS_UPCALL
45 #include "dfs_cache.h"
46 #endif
47 #ifdef CONFIG_CIFS_SWN_UPCALL
48 #include "netlink.h"
49 #endif
50 #include "fs_context.h"
51 #include "cached_dir.h"
52 
53 /*
54  * DOS dates from 1980/1/1 through 2107/12/31
55  * Protocol specifications indicate the range should be to 119, which
56  * limits maximum year to 2099. But this range has not been checked.
57  */
58 #define SMB_DATE_MAX (127<<9 | 12<<5 | 31)
59 #define SMB_DATE_MIN (0<<9 | 1<<5 | 1)
60 #define SMB_TIME_MAX (23<<11 | 59<<5 | 29)
61 
62 int cifsFYI = 0;
63 bool traceSMB;
64 bool enable_oplocks = true;
65 bool linuxExtEnabled = true;
66 bool lookupCacheEnabled = true;
67 bool disable_legacy_dialects; /* false by default */
68 bool enable_gcm_256 = true;
69 bool require_gcm_256; /* false by default */
70 bool enable_negotiate_signing; /* false by default */
71 unsigned int global_secflags = CIFSSEC_DEF;
72 /* unsigned int ntlmv2_support = 0; */
73 unsigned int sign_CIFS_PDUs = 1;
74 
75 /*
76  * Global transaction id (XID) information
77  */
78 unsigned int GlobalCurrentXid;	/* protected by GlobalMid_Sem */
79 unsigned int GlobalTotalActiveXid; /* prot by GlobalMid_Sem */
80 unsigned int GlobalMaxActiveXid;	/* prot by GlobalMid_Sem */
81 spinlock_t GlobalMid_Lock; /* protects above & list operations on midQ entries */
82 
83 /*
84  *  Global counters, updated atomically
85  */
86 atomic_t sesInfoAllocCount;
87 atomic_t tconInfoAllocCount;
88 atomic_t tcpSesNextId;
89 atomic_t tcpSesAllocCount;
90 atomic_t tcpSesReconnectCount;
91 atomic_t tconInfoReconnectCount;
92 
93 atomic_t mid_count;
94 atomic_t buf_alloc_count;
95 atomic_t small_buf_alloc_count;
96 #ifdef CONFIG_CIFS_STATS2
97 atomic_t total_buf_alloc_count;
98 atomic_t total_small_buf_alloc_count;
99 #endif/* STATS2 */
100 struct list_head	cifs_tcp_ses_list;
101 spinlock_t		cifs_tcp_ses_lock;
102 static const struct super_operations cifs_super_ops;
103 unsigned int CIFSMaxBufSize = CIFS_MAX_MSGSIZE;
104 module_param(CIFSMaxBufSize, uint, 0444);
105 MODULE_PARM_DESC(CIFSMaxBufSize, "Network buffer size (not including header) "
106 				 "for CIFS requests. "
107 				 "Default: 16384 Range: 8192 to 130048");
108 unsigned int cifs_min_rcv = CIFS_MIN_RCV_POOL;
109 module_param(cifs_min_rcv, uint, 0444);
110 MODULE_PARM_DESC(cifs_min_rcv, "Network buffers in pool. Default: 4 Range: "
111 				"1 to 64");
112 unsigned int cifs_min_small = 30;
113 module_param(cifs_min_small, uint, 0444);
114 MODULE_PARM_DESC(cifs_min_small, "Small network buffers in pool. Default: 30 "
115 				 "Range: 2 to 256");
116 unsigned int cifs_max_pending = CIFS_MAX_REQ;
117 module_param(cifs_max_pending, uint, 0444);
118 MODULE_PARM_DESC(cifs_max_pending, "Simultaneous requests to server for "
119 				   "CIFS/SMB1 dialect (N/A for SMB3) "
120 				   "Default: 32767 Range: 2 to 32767.");
121 unsigned int dir_cache_timeout = 30;
122 module_param(dir_cache_timeout, uint, 0644);
123 MODULE_PARM_DESC(dir_cache_timeout, "Number of seconds to cache directory contents for which we have a lease. Default: 30 "
124 				 "Range: 1 to 65000 seconds, 0 to disable caching dir contents");
125 #ifdef CONFIG_CIFS_STATS2
126 unsigned int slow_rsp_threshold = 1;
127 module_param(slow_rsp_threshold, uint, 0644);
128 MODULE_PARM_DESC(slow_rsp_threshold, "Amount of time (in seconds) to wait "
129 				   "before logging that a response is delayed. "
130 				   "Default: 1 (if set to 0 disables msg).");
131 #endif /* STATS2 */
132 
133 module_param(enable_oplocks, bool, 0644);
134 MODULE_PARM_DESC(enable_oplocks, "Enable or disable oplocks. Default: y/Y/1");
135 
136 module_param(enable_gcm_256, bool, 0644);
137 MODULE_PARM_DESC(enable_gcm_256, "Enable requesting strongest (256 bit) GCM encryption. Default: n/N/0");
138 
139 module_param(require_gcm_256, bool, 0644);
140 MODULE_PARM_DESC(require_gcm_256, "Require strongest (256 bit) GCM encryption. Default: n/N/0");
141 
142 module_param(enable_negotiate_signing, bool, 0644);
143 MODULE_PARM_DESC(enable_negotiate_signing, "Enable negotiating packet signing algorithm with server. Default: n/N/0");
144 
145 module_param(disable_legacy_dialects, bool, 0644);
146 MODULE_PARM_DESC(disable_legacy_dialects, "To improve security it may be "
147 				  "helpful to restrict the ability to "
148 				  "override the default dialects (SMB2.1, "
149 				  "SMB3 and SMB3.02) on mount with old "
150 				  "dialects (CIFS/SMB1 and SMB2) since "
151 				  "vers=1.0 (CIFS/SMB1) and vers=2.0 are weaker"
152 				  " and less secure. Default: n/N/0");
153 
154 extern mempool_t *cifs_sm_req_poolp;
155 extern mempool_t *cifs_req_poolp;
156 extern mempool_t *cifs_mid_poolp;
157 
158 struct workqueue_struct	*cifsiod_wq;
159 struct workqueue_struct	*decrypt_wq;
160 struct workqueue_struct	*fileinfo_put_wq;
161 struct workqueue_struct	*cifsoplockd_wq;
162 struct workqueue_struct	*deferredclose_wq;
163 __u32 cifs_lock_secret;
164 
165 /*
166  * Bumps refcount for cifs super block.
167  * Note that it should be only called if a referece to VFS super block is
168  * already held, e.g. in open-type syscalls context. Otherwise it can race with
169  * atomic_dec_and_test in deactivate_locked_super.
170  */
171 void
172 cifs_sb_active(struct super_block *sb)
173 {
174 	struct cifs_sb_info *server = CIFS_SB(sb);
175 
176 	if (atomic_inc_return(&server->active) == 1)
177 		atomic_inc(&sb->s_active);
178 }
179 
180 void
181 cifs_sb_deactive(struct super_block *sb)
182 {
183 	struct cifs_sb_info *server = CIFS_SB(sb);
184 
185 	if (atomic_dec_and_test(&server->active))
186 		deactivate_super(sb);
187 }
188 
189 static int
190 cifs_read_super(struct super_block *sb)
191 {
192 	struct inode *inode;
193 	struct cifs_sb_info *cifs_sb;
194 	struct cifs_tcon *tcon;
195 	struct timespec64 ts;
196 	int rc = 0;
197 
198 	cifs_sb = CIFS_SB(sb);
199 	tcon = cifs_sb_master_tcon(cifs_sb);
200 
201 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIXACL)
202 		sb->s_flags |= SB_POSIXACL;
203 
204 	if (tcon->snapshot_time)
205 		sb->s_flags |= SB_RDONLY;
206 
207 	if (tcon->ses->capabilities & tcon->ses->server->vals->cap_large_files)
208 		sb->s_maxbytes = MAX_LFS_FILESIZE;
209 	else
210 		sb->s_maxbytes = MAX_NON_LFS;
211 
212 	/*
213 	 * Some very old servers like DOS and OS/2 used 2 second granularity
214 	 * (while all current servers use 100ns granularity - see MS-DTYP)
215 	 * but 1 second is the maximum allowed granularity for the VFS
216 	 * so for old servers set time granularity to 1 second while for
217 	 * everything else (current servers) set it to 100ns.
218 	 */
219 	if ((tcon->ses->server->vals->protocol_id == SMB10_PROT_ID) &&
220 	    ((tcon->ses->capabilities &
221 	      tcon->ses->server->vals->cap_nt_find) == 0) &&
222 	    !tcon->unix_ext) {
223 		sb->s_time_gran = 1000000000; /* 1 second is max allowed gran */
224 		ts = cnvrtDosUnixTm(cpu_to_le16(SMB_DATE_MIN), 0, 0);
225 		sb->s_time_min = ts.tv_sec;
226 		ts = cnvrtDosUnixTm(cpu_to_le16(SMB_DATE_MAX),
227 				    cpu_to_le16(SMB_TIME_MAX), 0);
228 		sb->s_time_max = ts.tv_sec;
229 	} else {
230 		/*
231 		 * Almost every server, including all SMB2+, uses DCE TIME
232 		 * ie 100 nanosecond units, since 1601.  See MS-DTYP and MS-FSCC
233 		 */
234 		sb->s_time_gran = 100;
235 		ts = cifs_NTtimeToUnix(0);
236 		sb->s_time_min = ts.tv_sec;
237 		ts = cifs_NTtimeToUnix(cpu_to_le64(S64_MAX));
238 		sb->s_time_max = ts.tv_sec;
239 	}
240 
241 	sb->s_magic = CIFS_SUPER_MAGIC;
242 	sb->s_op = &cifs_super_ops;
243 	sb->s_xattr = cifs_xattr_handlers;
244 	rc = super_setup_bdi(sb);
245 	if (rc)
246 		goto out_no_root;
247 	/* tune readahead according to rsize if readahead size not set on mount */
248 	if (cifs_sb->ctx->rsize == 0)
249 		cifs_sb->ctx->rsize =
250 			tcon->ses->server->ops->negotiate_rsize(tcon, cifs_sb->ctx);
251 	if (cifs_sb->ctx->rasize)
252 		sb->s_bdi->ra_pages = cifs_sb->ctx->rasize / PAGE_SIZE;
253 	else
254 		sb->s_bdi->ra_pages = 2 * (cifs_sb->ctx->rsize / PAGE_SIZE);
255 
256 	sb->s_blocksize = CIFS_MAX_MSGSIZE;
257 	sb->s_blocksize_bits = 14;	/* default 2**14 = CIFS_MAX_MSGSIZE */
258 	inode = cifs_root_iget(sb);
259 
260 	if (IS_ERR(inode)) {
261 		rc = PTR_ERR(inode);
262 		goto out_no_root;
263 	}
264 
265 	if (tcon->nocase)
266 		sb->s_d_op = &cifs_ci_dentry_ops;
267 	else
268 		sb->s_d_op = &cifs_dentry_ops;
269 
270 	sb->s_root = d_make_root(inode);
271 	if (!sb->s_root) {
272 		rc = -ENOMEM;
273 		goto out_no_root;
274 	}
275 
276 #ifdef CONFIG_CIFS_NFSD_EXPORT
277 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) {
278 		cifs_dbg(FYI, "export ops supported\n");
279 		sb->s_export_op = &cifs_export_ops;
280 	}
281 #endif /* CONFIG_CIFS_NFSD_EXPORT */
282 
283 	return 0;
284 
285 out_no_root:
286 	cifs_dbg(VFS, "%s: get root inode failed\n", __func__);
287 	return rc;
288 }
289 
290 static void cifs_kill_sb(struct super_block *sb)
291 {
292 	struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
293 
294 	/*
295 	 * We ned to release all dentries for the cached directories
296 	 * before we kill the sb.
297 	 */
298 	if (cifs_sb->root) {
299 		close_all_cached_dirs(cifs_sb);
300 
301 		/* finally release root dentry */
302 		dput(cifs_sb->root);
303 		cifs_sb->root = NULL;
304 	}
305 
306 	kill_anon_super(sb);
307 	cifs_umount(cifs_sb);
308 }
309 
310 static int
311 cifs_statfs(struct dentry *dentry, struct kstatfs *buf)
312 {
313 	struct super_block *sb = dentry->d_sb;
314 	struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
315 	struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
316 	struct TCP_Server_Info *server = tcon->ses->server;
317 	unsigned int xid;
318 	int rc = 0;
319 
320 	xid = get_xid();
321 
322 	if (le32_to_cpu(tcon->fsAttrInfo.MaxPathNameComponentLength) > 0)
323 		buf->f_namelen =
324 		       le32_to_cpu(tcon->fsAttrInfo.MaxPathNameComponentLength);
325 	else
326 		buf->f_namelen = PATH_MAX;
327 
328 	buf->f_fsid.val[0] = tcon->vol_serial_number;
329 	/* are using part of create time for more randomness, see man statfs */
330 	buf->f_fsid.val[1] =  (int)le64_to_cpu(tcon->vol_create_time);
331 
332 	buf->f_files = 0;	/* undefined */
333 	buf->f_ffree = 0;	/* unlimited */
334 
335 	if (server->ops->queryfs)
336 		rc = server->ops->queryfs(xid, tcon, cifs_sb, buf);
337 
338 	free_xid(xid);
339 	return rc;
340 }
341 
342 static long cifs_fallocate(struct file *file, int mode, loff_t off, loff_t len)
343 {
344 	struct cifs_sb_info *cifs_sb = CIFS_FILE_SB(file);
345 	struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
346 	struct TCP_Server_Info *server = tcon->ses->server;
347 
348 	if (server->ops->fallocate)
349 		return server->ops->fallocate(file, tcon, mode, off, len);
350 
351 	return -EOPNOTSUPP;
352 }
353 
354 static int cifs_permission(struct mnt_idmap *idmap,
355 			   struct inode *inode, int mask)
356 {
357 	struct cifs_sb_info *cifs_sb;
358 
359 	cifs_sb = CIFS_SB(inode->i_sb);
360 
361 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_PERM) {
362 		if ((mask & MAY_EXEC) && !execute_ok(inode))
363 			return -EACCES;
364 		else
365 			return 0;
366 	} else /* file mode might have been restricted at mount time
367 		on the client (above and beyond ACL on servers) for
368 		servers which do not support setting and viewing mode bits,
369 		so allowing client to check permissions is useful */
370 		return generic_permission(&nop_mnt_idmap, inode, mask);
371 }
372 
373 static struct kmem_cache *cifs_inode_cachep;
374 static struct kmem_cache *cifs_req_cachep;
375 static struct kmem_cache *cifs_mid_cachep;
376 static struct kmem_cache *cifs_sm_req_cachep;
377 mempool_t *cifs_sm_req_poolp;
378 mempool_t *cifs_req_poolp;
379 mempool_t *cifs_mid_poolp;
380 
381 static struct inode *
382 cifs_alloc_inode(struct super_block *sb)
383 {
384 	struct cifsInodeInfo *cifs_inode;
385 	cifs_inode = alloc_inode_sb(sb, cifs_inode_cachep, GFP_KERNEL);
386 	if (!cifs_inode)
387 		return NULL;
388 	cifs_inode->cifsAttrs = 0x20;	/* default */
389 	cifs_inode->time = 0;
390 	/*
391 	 * Until the file is open and we have gotten oplock info back from the
392 	 * server, can not assume caching of file data or metadata.
393 	 */
394 	cifs_set_oplock_level(cifs_inode, 0);
395 	cifs_inode->flags = 0;
396 	spin_lock_init(&cifs_inode->writers_lock);
397 	cifs_inode->writers = 0;
398 	cifs_inode->netfs.inode.i_blkbits = 14;  /* 2**14 = CIFS_MAX_MSGSIZE */
399 	cifs_inode->netfs.remote_i_size = 0;
400 	cifs_inode->uniqueid = 0;
401 	cifs_inode->createtime = 0;
402 	cifs_inode->epoch = 0;
403 	spin_lock_init(&cifs_inode->open_file_lock);
404 	generate_random_uuid(cifs_inode->lease_key);
405 	cifs_inode->symlink_target = NULL;
406 
407 	/*
408 	 * Can not set i_flags here - they get immediately overwritten to zero
409 	 * by the VFS.
410 	 */
411 	/* cifs_inode->netfs.inode.i_flags = S_NOATIME | S_NOCMTIME; */
412 	INIT_LIST_HEAD(&cifs_inode->openFileList);
413 	INIT_LIST_HEAD(&cifs_inode->llist);
414 	INIT_LIST_HEAD(&cifs_inode->deferred_closes);
415 	spin_lock_init(&cifs_inode->deferred_lock);
416 	return &cifs_inode->netfs.inode;
417 }
418 
419 static void
420 cifs_free_inode(struct inode *inode)
421 {
422 	struct cifsInodeInfo *cinode = CIFS_I(inode);
423 
424 	if (S_ISLNK(inode->i_mode))
425 		kfree(cinode->symlink_target);
426 	kmem_cache_free(cifs_inode_cachep, cinode);
427 }
428 
429 static void
430 cifs_evict_inode(struct inode *inode)
431 {
432 	truncate_inode_pages_final(&inode->i_data);
433 	if (inode->i_state & I_PINNING_NETFS_WB)
434 		cifs_fscache_unuse_inode_cookie(inode, true);
435 	cifs_fscache_release_inode_cookie(inode);
436 	clear_inode(inode);
437 }
438 
439 static void
440 cifs_show_address(struct seq_file *s, struct TCP_Server_Info *server)
441 {
442 	struct sockaddr_in *sa = (struct sockaddr_in *) &server->dstaddr;
443 	struct sockaddr_in6 *sa6 = (struct sockaddr_in6 *) &server->dstaddr;
444 
445 	seq_puts(s, ",addr=");
446 
447 	switch (server->dstaddr.ss_family) {
448 	case AF_INET:
449 		seq_printf(s, "%pI4", &sa->sin_addr.s_addr);
450 		break;
451 	case AF_INET6:
452 		seq_printf(s, "%pI6", &sa6->sin6_addr.s6_addr);
453 		if (sa6->sin6_scope_id)
454 			seq_printf(s, "%%%u", sa6->sin6_scope_id);
455 		break;
456 	default:
457 		seq_puts(s, "(unknown)");
458 	}
459 	if (server->rdma)
460 		seq_puts(s, ",rdma");
461 }
462 
463 static void
464 cifs_show_security(struct seq_file *s, struct cifs_ses *ses)
465 {
466 	if (ses->sectype == Unspecified) {
467 		if (ses->user_name == NULL)
468 			seq_puts(s, ",sec=none");
469 		return;
470 	}
471 
472 	seq_puts(s, ",sec=");
473 
474 	switch (ses->sectype) {
475 	case NTLMv2:
476 		seq_puts(s, "ntlmv2");
477 		break;
478 	case Kerberos:
479 		seq_puts(s, "krb5");
480 		break;
481 	case RawNTLMSSP:
482 		seq_puts(s, "ntlmssp");
483 		break;
484 	default:
485 		/* shouldn't ever happen */
486 		seq_puts(s, "unknown");
487 		break;
488 	}
489 
490 	if (ses->sign)
491 		seq_puts(s, "i");
492 
493 	if (ses->sectype == Kerberos)
494 		seq_printf(s, ",cruid=%u",
495 			   from_kuid_munged(&init_user_ns, ses->cred_uid));
496 }
497 
498 static void
499 cifs_show_cache_flavor(struct seq_file *s, struct cifs_sb_info *cifs_sb)
500 {
501 	seq_puts(s, ",cache=");
502 
503 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO)
504 		seq_puts(s, "strict");
505 	else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO)
506 		seq_puts(s, "none");
507 	else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RW_CACHE)
508 		seq_puts(s, "singleclient"); /* assume only one client access */
509 	else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RO_CACHE)
510 		seq_puts(s, "ro"); /* read only caching assumed */
511 	else
512 		seq_puts(s, "loose");
513 }
514 
515 /*
516  * cifs_show_devname() is used so we show the mount device name with correct
517  * format (e.g. forward slashes vs. back slashes) in /proc/mounts
518  */
519 static int cifs_show_devname(struct seq_file *m, struct dentry *root)
520 {
521 	struct cifs_sb_info *cifs_sb = CIFS_SB(root->d_sb);
522 	char *devname = kstrdup(cifs_sb->ctx->source, GFP_KERNEL);
523 
524 	if (devname == NULL)
525 		seq_puts(m, "none");
526 	else {
527 		convert_delimiter(devname, '/');
528 		/* escape all spaces in share names */
529 		seq_escape(m, devname, " \t");
530 		kfree(devname);
531 	}
532 	return 0;
533 }
534 
535 /*
536  * cifs_show_options() is for displaying mount options in /proc/mounts.
537  * Not all settable options are displayed but most of the important
538  * ones are.
539  */
540 static int
541 cifs_show_options(struct seq_file *s, struct dentry *root)
542 {
543 	struct cifs_sb_info *cifs_sb = CIFS_SB(root->d_sb);
544 	struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
545 	struct sockaddr *srcaddr;
546 	srcaddr = (struct sockaddr *)&tcon->ses->server->srcaddr;
547 
548 	seq_show_option(s, "vers", tcon->ses->server->vals->version_string);
549 	cifs_show_security(s, tcon->ses);
550 	cifs_show_cache_flavor(s, cifs_sb);
551 
552 	if (tcon->no_lease)
553 		seq_puts(s, ",nolease");
554 	if (cifs_sb->ctx->multiuser)
555 		seq_puts(s, ",multiuser");
556 	else if (tcon->ses->user_name)
557 		seq_show_option(s, "username", tcon->ses->user_name);
558 
559 	if (tcon->ses->domainName && tcon->ses->domainName[0] != 0)
560 		seq_show_option(s, "domain", tcon->ses->domainName);
561 
562 	if (srcaddr->sa_family != AF_UNSPEC) {
563 		struct sockaddr_in *saddr4;
564 		struct sockaddr_in6 *saddr6;
565 		saddr4 = (struct sockaddr_in *)srcaddr;
566 		saddr6 = (struct sockaddr_in6 *)srcaddr;
567 		if (srcaddr->sa_family == AF_INET6)
568 			seq_printf(s, ",srcaddr=%pI6c",
569 				   &saddr6->sin6_addr);
570 		else if (srcaddr->sa_family == AF_INET)
571 			seq_printf(s, ",srcaddr=%pI4",
572 				   &saddr4->sin_addr.s_addr);
573 		else
574 			seq_printf(s, ",srcaddr=BAD-AF:%i",
575 				   (int)(srcaddr->sa_family));
576 	}
577 
578 	seq_printf(s, ",uid=%u",
579 		   from_kuid_munged(&init_user_ns, cifs_sb->ctx->linux_uid));
580 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_OVERR_UID)
581 		seq_puts(s, ",forceuid");
582 	else
583 		seq_puts(s, ",noforceuid");
584 
585 	seq_printf(s, ",gid=%u",
586 		   from_kgid_munged(&init_user_ns, cifs_sb->ctx->linux_gid));
587 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_OVERR_GID)
588 		seq_puts(s, ",forcegid");
589 	else
590 		seq_puts(s, ",noforcegid");
591 
592 	cifs_show_address(s, tcon->ses->server);
593 
594 	if (!tcon->unix_ext)
595 		seq_printf(s, ",file_mode=0%ho,dir_mode=0%ho",
596 					   cifs_sb->ctx->file_mode,
597 					   cifs_sb->ctx->dir_mode);
598 	if (cifs_sb->ctx->iocharset)
599 		seq_printf(s, ",iocharset=%s", cifs_sb->ctx->iocharset);
600 	if (tcon->seal)
601 		seq_puts(s, ",seal");
602 	else if (tcon->ses->server->ignore_signature)
603 		seq_puts(s, ",signloosely");
604 	if (tcon->nocase)
605 		seq_puts(s, ",nocase");
606 	if (tcon->nodelete)
607 		seq_puts(s, ",nodelete");
608 	if (cifs_sb->ctx->no_sparse)
609 		seq_puts(s, ",nosparse");
610 	if (tcon->local_lease)
611 		seq_puts(s, ",locallease");
612 	if (tcon->retry)
613 		seq_puts(s, ",hard");
614 	else
615 		seq_puts(s, ",soft");
616 	if (tcon->use_persistent)
617 		seq_puts(s, ",persistenthandles");
618 	else if (tcon->use_resilient)
619 		seq_puts(s, ",resilienthandles");
620 	if (tcon->posix_extensions)
621 		seq_puts(s, ",posix");
622 	else if (tcon->unix_ext)
623 		seq_puts(s, ",unix");
624 	else
625 		seq_puts(s, ",nounix");
626 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_DFS)
627 		seq_puts(s, ",nodfs");
628 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIX_PATHS)
629 		seq_puts(s, ",posixpaths");
630 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SET_UID)
631 		seq_puts(s, ",setuids");
632 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UID_FROM_ACL)
633 		seq_puts(s, ",idsfromsid");
634 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM)
635 		seq_puts(s, ",serverino");
636 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
637 		seq_puts(s, ",rwpidforward");
638 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL)
639 		seq_puts(s, ",forcemand");
640 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_XATTR)
641 		seq_puts(s, ",nouser_xattr");
642 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR)
643 		seq_puts(s, ",mapchars");
644 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SFM_CHR)
645 		seq_puts(s, ",mapposix");
646 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UNX_EMUL)
647 		seq_puts(s, ",sfu");
648 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_BRL)
649 		seq_puts(s, ",nobrl");
650 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_HANDLE_CACHE)
651 		seq_puts(s, ",nohandlecache");
652 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MODE_FROM_SID)
653 		seq_puts(s, ",modefromsid");
654 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_ACL)
655 		seq_puts(s, ",cifsacl");
656 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DYNPERM)
657 		seq_puts(s, ",dynperm");
658 	if (root->d_sb->s_flags & SB_POSIXACL)
659 		seq_puts(s, ",acl");
660 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MF_SYMLINKS)
661 		seq_puts(s, ",mfsymlinks");
662 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_FSCACHE)
663 		seq_puts(s, ",fsc");
664 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC)
665 		seq_puts(s, ",nostrictsync");
666 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_PERM)
667 		seq_puts(s, ",noperm");
668 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_BACKUPUID)
669 		seq_printf(s, ",backupuid=%u",
670 			   from_kuid_munged(&init_user_ns,
671 					    cifs_sb->ctx->backupuid));
672 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_BACKUPGID)
673 		seq_printf(s, ",backupgid=%u",
674 			   from_kgid_munged(&init_user_ns,
675 					    cifs_sb->ctx->backupgid));
676 	seq_show_option(s, "reparse",
677 			cifs_reparse_type_str(cifs_sb->ctx->reparse_type));
678 
679 	seq_printf(s, ",rsize=%u", cifs_sb->ctx->rsize);
680 	seq_printf(s, ",wsize=%u", cifs_sb->ctx->wsize);
681 	seq_printf(s, ",bsize=%u", cifs_sb->ctx->bsize);
682 	if (cifs_sb->ctx->rasize)
683 		seq_printf(s, ",rasize=%u", cifs_sb->ctx->rasize);
684 	if (tcon->ses->server->min_offload)
685 		seq_printf(s, ",esize=%u", tcon->ses->server->min_offload);
686 	if (tcon->ses->server->retrans)
687 		seq_printf(s, ",retrans=%u", tcon->ses->server->retrans);
688 	seq_printf(s, ",echo_interval=%lu",
689 			tcon->ses->server->echo_interval / HZ);
690 
691 	/* Only display the following if overridden on mount */
692 	if (tcon->ses->server->max_credits != SMB2_MAX_CREDITS_AVAILABLE)
693 		seq_printf(s, ",max_credits=%u", tcon->ses->server->max_credits);
694 	if (tcon->ses->server->tcp_nodelay)
695 		seq_puts(s, ",tcpnodelay");
696 	if (tcon->ses->server->noautotune)
697 		seq_puts(s, ",noautotune");
698 	if (tcon->ses->server->noblocksnd)
699 		seq_puts(s, ",noblocksend");
700 	if (tcon->ses->server->nosharesock)
701 		seq_puts(s, ",nosharesock");
702 
703 	if (tcon->snapshot_time)
704 		seq_printf(s, ",snapshot=%llu", tcon->snapshot_time);
705 	if (tcon->handle_timeout)
706 		seq_printf(s, ",handletimeout=%u", tcon->handle_timeout);
707 	if (tcon->max_cached_dirs != MAX_CACHED_FIDS)
708 		seq_printf(s, ",max_cached_dirs=%u", tcon->max_cached_dirs);
709 
710 	/*
711 	 * Display file and directory attribute timeout in seconds.
712 	 * If file and directory attribute timeout the same then actimeo
713 	 * was likely specified on mount
714 	 */
715 	if (cifs_sb->ctx->acdirmax == cifs_sb->ctx->acregmax)
716 		seq_printf(s, ",actimeo=%lu", cifs_sb->ctx->acregmax / HZ);
717 	else {
718 		seq_printf(s, ",acdirmax=%lu", cifs_sb->ctx->acdirmax / HZ);
719 		seq_printf(s, ",acregmax=%lu", cifs_sb->ctx->acregmax / HZ);
720 	}
721 	seq_printf(s, ",closetimeo=%lu", cifs_sb->ctx->closetimeo / HZ);
722 
723 	if (tcon->ses->chan_max > 1)
724 		seq_printf(s, ",multichannel,max_channels=%zu",
725 			   tcon->ses->chan_max);
726 
727 	if (tcon->use_witness)
728 		seq_puts(s, ",witness");
729 
730 	return 0;
731 }
732 
733 static void cifs_umount_begin(struct super_block *sb)
734 {
735 	struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
736 	struct cifs_tcon *tcon;
737 
738 	if (cifs_sb == NULL)
739 		return;
740 
741 	tcon = cifs_sb_master_tcon(cifs_sb);
742 
743 	spin_lock(&cifs_tcp_ses_lock);
744 	spin_lock(&tcon->tc_lock);
745 	if ((tcon->tc_count > 1) || (tcon->status == TID_EXITING)) {
746 		/* we have other mounts to same share or we have
747 		   already tried to umount this and woken up
748 		   all waiting network requests, nothing to do */
749 		spin_unlock(&tcon->tc_lock);
750 		spin_unlock(&cifs_tcp_ses_lock);
751 		return;
752 	}
753 	/*
754 	 * can not set tcon->status to TID_EXITING yet since we don't know if umount -f will
755 	 * fail later (e.g. due to open files).  TID_EXITING will be set just before tdis req sent
756 	 */
757 	spin_unlock(&tcon->tc_lock);
758 	spin_unlock(&cifs_tcp_ses_lock);
759 
760 	cifs_close_all_deferred_files(tcon);
761 	/* cancel_brl_requests(tcon); */ /* BB mark all brl mids as exiting */
762 	/* cancel_notify_requests(tcon); */
763 	if (tcon->ses && tcon->ses->server) {
764 		cifs_dbg(FYI, "wake up tasks now - umount begin not complete\n");
765 		wake_up_all(&tcon->ses->server->request_q);
766 		wake_up_all(&tcon->ses->server->response_q);
767 		msleep(1); /* yield */
768 		/* we have to kick the requests once more */
769 		wake_up_all(&tcon->ses->server->response_q);
770 		msleep(1);
771 	}
772 
773 	return;
774 }
775 
776 static int cifs_freeze(struct super_block *sb)
777 {
778 	struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
779 	struct cifs_tcon *tcon;
780 
781 	if (cifs_sb == NULL)
782 		return 0;
783 
784 	tcon = cifs_sb_master_tcon(cifs_sb);
785 
786 	cifs_close_all_deferred_files(tcon);
787 	return 0;
788 }
789 
790 #ifdef CONFIG_CIFS_STATS2
791 static int cifs_show_stats(struct seq_file *s, struct dentry *root)
792 {
793 	/* BB FIXME */
794 	return 0;
795 }
796 #endif
797 
798 static int cifs_write_inode(struct inode *inode, struct writeback_control *wbc)
799 {
800 	return netfs_unpin_writeback(inode, wbc);
801 }
802 
803 static int cifs_drop_inode(struct inode *inode)
804 {
805 	struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
806 
807 	/* no serverino => unconditional eviction */
808 	return !(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) ||
809 		generic_drop_inode(inode);
810 }
811 
812 static const struct super_operations cifs_super_ops = {
813 	.statfs = cifs_statfs,
814 	.alloc_inode = cifs_alloc_inode,
815 	.write_inode	= cifs_write_inode,
816 	.free_inode = cifs_free_inode,
817 	.drop_inode	= cifs_drop_inode,
818 	.evict_inode	= cifs_evict_inode,
819 /*	.show_path	= cifs_show_path, */ /* Would we ever need show path? */
820 	.show_devname   = cifs_show_devname,
821 /*	.delete_inode	= cifs_delete_inode,  */  /* Do not need above
822 	function unless later we add lazy close of inodes or unless the
823 	kernel forgets to call us with the same number of releases (closes)
824 	as opens */
825 	.show_options = cifs_show_options,
826 	.umount_begin   = cifs_umount_begin,
827 	.freeze_fs      = cifs_freeze,
828 #ifdef CONFIG_CIFS_STATS2
829 	.show_stats = cifs_show_stats,
830 #endif
831 };
832 
833 /*
834  * Get root dentry from superblock according to prefix path mount option.
835  * Return dentry with refcount + 1 on success and NULL otherwise.
836  */
837 static struct dentry *
838 cifs_get_root(struct smb3_fs_context *ctx, struct super_block *sb)
839 {
840 	struct dentry *dentry;
841 	struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
842 	char *full_path = NULL;
843 	char *s, *p;
844 	char sep;
845 
846 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_USE_PREFIX_PATH)
847 		return dget(sb->s_root);
848 
849 	full_path = cifs_build_path_to_root(ctx, cifs_sb,
850 				cifs_sb_master_tcon(cifs_sb), 0);
851 	if (full_path == NULL)
852 		return ERR_PTR(-ENOMEM);
853 
854 	cifs_dbg(FYI, "Get root dentry for %s\n", full_path);
855 
856 	sep = CIFS_DIR_SEP(cifs_sb);
857 	dentry = dget(sb->s_root);
858 	s = full_path;
859 
860 	do {
861 		struct inode *dir = d_inode(dentry);
862 		struct dentry *child;
863 
864 		if (!S_ISDIR(dir->i_mode)) {
865 			dput(dentry);
866 			dentry = ERR_PTR(-ENOTDIR);
867 			break;
868 		}
869 
870 		/* skip separators */
871 		while (*s == sep)
872 			s++;
873 		if (!*s)
874 			break;
875 		p = s++;
876 		/* next separator */
877 		while (*s && *s != sep)
878 			s++;
879 
880 		child = lookup_positive_unlocked(p, dentry, s - p);
881 		dput(dentry);
882 		dentry = child;
883 	} while (!IS_ERR(dentry));
884 	kfree(full_path);
885 	return dentry;
886 }
887 
888 static int cifs_set_super(struct super_block *sb, void *data)
889 {
890 	struct cifs_mnt_data *mnt_data = data;
891 	sb->s_fs_info = mnt_data->cifs_sb;
892 	return set_anon_super(sb, NULL);
893 }
894 
895 struct dentry *
896 cifs_smb3_do_mount(struct file_system_type *fs_type,
897 	      int flags, struct smb3_fs_context *old_ctx)
898 {
899 	struct cifs_mnt_data mnt_data;
900 	struct cifs_sb_info *cifs_sb;
901 	struct super_block *sb;
902 	struct dentry *root;
903 	int rc;
904 
905 	if (cifsFYI) {
906 		cifs_dbg(FYI, "%s: devname=%s flags=0x%x\n", __func__,
907 			 old_ctx->source, flags);
908 	} else {
909 		cifs_info("Attempting to mount %s\n", old_ctx->source);
910 	}
911 
912 	cifs_sb = kzalloc(sizeof(*cifs_sb), GFP_KERNEL);
913 	if (!cifs_sb)
914 		return ERR_PTR(-ENOMEM);
915 
916 	cifs_sb->ctx = kzalloc(sizeof(struct smb3_fs_context), GFP_KERNEL);
917 	if (!cifs_sb->ctx) {
918 		root = ERR_PTR(-ENOMEM);
919 		goto out;
920 	}
921 	rc = smb3_fs_context_dup(cifs_sb->ctx, old_ctx);
922 	if (rc) {
923 		root = ERR_PTR(rc);
924 		goto out;
925 	}
926 
927 	rc = cifs_setup_cifs_sb(cifs_sb);
928 	if (rc) {
929 		root = ERR_PTR(rc);
930 		goto out;
931 	}
932 
933 	rc = cifs_mount(cifs_sb, cifs_sb->ctx);
934 	if (rc) {
935 		if (!(flags & SB_SILENT))
936 			cifs_dbg(VFS, "cifs_mount failed w/return code = %d\n",
937 				 rc);
938 		root = ERR_PTR(rc);
939 		goto out;
940 	}
941 
942 	mnt_data.ctx = cifs_sb->ctx;
943 	mnt_data.cifs_sb = cifs_sb;
944 	mnt_data.flags = flags;
945 
946 	/* BB should we make this contingent on mount parm? */
947 	flags |= SB_NODIRATIME | SB_NOATIME;
948 
949 	sb = sget(fs_type, cifs_match_super, cifs_set_super, flags, &mnt_data);
950 	if (IS_ERR(sb)) {
951 		cifs_umount(cifs_sb);
952 		return ERR_CAST(sb);
953 	}
954 
955 	if (sb->s_root) {
956 		cifs_dbg(FYI, "Use existing superblock\n");
957 		cifs_umount(cifs_sb);
958 		cifs_sb = NULL;
959 	} else {
960 		rc = cifs_read_super(sb);
961 		if (rc) {
962 			root = ERR_PTR(rc);
963 			goto out_super;
964 		}
965 
966 		sb->s_flags |= SB_ACTIVE;
967 	}
968 
969 	root = cifs_get_root(cifs_sb ? cifs_sb->ctx : old_ctx, sb);
970 	if (IS_ERR(root))
971 		goto out_super;
972 
973 	if (cifs_sb)
974 		cifs_sb->root = dget(root);
975 
976 	cifs_dbg(FYI, "dentry root is: %p\n", root);
977 	return root;
978 
979 out_super:
980 	deactivate_locked_super(sb);
981 	return root;
982 out:
983 	kfree(cifs_sb->prepath);
984 	smb3_cleanup_fs_context(cifs_sb->ctx);
985 	kfree(cifs_sb);
986 	return root;
987 }
988 
989 
990 static ssize_t
991 cifs_loose_read_iter(struct kiocb *iocb, struct iov_iter *iter)
992 {
993 	ssize_t rc;
994 	struct inode *inode = file_inode(iocb->ki_filp);
995 
996 	if (iocb->ki_flags & IOCB_DIRECT)
997 		return cifs_user_readv(iocb, iter);
998 
999 	rc = cifs_revalidate_mapping(inode);
1000 	if (rc)
1001 		return rc;
1002 
1003 	return generic_file_read_iter(iocb, iter);
1004 }
1005 
1006 static ssize_t cifs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
1007 {
1008 	struct inode *inode = file_inode(iocb->ki_filp);
1009 	struct cifsInodeInfo *cinode = CIFS_I(inode);
1010 	ssize_t written;
1011 	int rc;
1012 
1013 	if (iocb->ki_filp->f_flags & O_DIRECT) {
1014 		written = cifs_user_writev(iocb, from);
1015 		if (written > 0 && CIFS_CACHE_READ(cinode)) {
1016 			cifs_zap_mapping(inode);
1017 			cifs_dbg(FYI,
1018 				 "Set no oplock for inode=%p after a write operation\n",
1019 				 inode);
1020 			cinode->oplock = 0;
1021 		}
1022 		return written;
1023 	}
1024 
1025 	written = cifs_get_writer(cinode);
1026 	if (written)
1027 		return written;
1028 
1029 	written = generic_file_write_iter(iocb, from);
1030 
1031 	if (CIFS_CACHE_WRITE(CIFS_I(inode)))
1032 		goto out;
1033 
1034 	rc = filemap_fdatawrite(inode->i_mapping);
1035 	if (rc)
1036 		cifs_dbg(FYI, "cifs_file_write_iter: %d rc on %p inode\n",
1037 			 rc, inode);
1038 
1039 out:
1040 	cifs_put_writer(cinode);
1041 	return written;
1042 }
1043 
1044 static loff_t cifs_llseek(struct file *file, loff_t offset, int whence)
1045 {
1046 	struct cifsFileInfo *cfile = file->private_data;
1047 	struct cifs_tcon *tcon;
1048 
1049 	/*
1050 	 * whence == SEEK_END || SEEK_DATA || SEEK_HOLE => we must revalidate
1051 	 * the cached file length
1052 	 */
1053 	if (whence != SEEK_SET && whence != SEEK_CUR) {
1054 		int rc;
1055 		struct inode *inode = file_inode(file);
1056 
1057 		/*
1058 		 * We need to be sure that all dirty pages are written and the
1059 		 * server has the newest file length.
1060 		 */
1061 		if (!CIFS_CACHE_READ(CIFS_I(inode)) && inode->i_mapping &&
1062 		    inode->i_mapping->nrpages != 0) {
1063 			rc = filemap_fdatawait(inode->i_mapping);
1064 			if (rc) {
1065 				mapping_set_error(inode->i_mapping, rc);
1066 				return rc;
1067 			}
1068 		}
1069 		/*
1070 		 * Some applications poll for the file length in this strange
1071 		 * way so we must seek to end on non-oplocked files by
1072 		 * setting the revalidate time to zero.
1073 		 */
1074 		CIFS_I(inode)->time = 0;
1075 
1076 		rc = cifs_revalidate_file_attr(file);
1077 		if (rc < 0)
1078 			return (loff_t)rc;
1079 	}
1080 	if (cfile && cfile->tlink) {
1081 		tcon = tlink_tcon(cfile->tlink);
1082 		if (tcon->ses->server->ops->llseek)
1083 			return tcon->ses->server->ops->llseek(file, tcon,
1084 							      offset, whence);
1085 	}
1086 	return generic_file_llseek(file, offset, whence);
1087 }
1088 
1089 static int
1090 cifs_setlease(struct file *file, int arg, struct file_lease **lease, void **priv)
1091 {
1092 	/*
1093 	 * Note that this is called by vfs setlease with i_lock held to
1094 	 * protect *lease from going away.
1095 	 */
1096 	struct inode *inode = file_inode(file);
1097 	struct cifsFileInfo *cfile = file->private_data;
1098 
1099 	/* Check if file is oplocked if this is request for new lease */
1100 	if (arg == F_UNLCK ||
1101 	    ((arg == F_RDLCK) && CIFS_CACHE_READ(CIFS_I(inode))) ||
1102 	    ((arg == F_WRLCK) && CIFS_CACHE_WRITE(CIFS_I(inode))))
1103 		return generic_setlease(file, arg, lease, priv);
1104 	else if (tlink_tcon(cfile->tlink)->local_lease &&
1105 		 !CIFS_CACHE_READ(CIFS_I(inode)))
1106 		/*
1107 		 * If the server claims to support oplock on this file, then we
1108 		 * still need to check oplock even if the local_lease mount
1109 		 * option is set, but there are servers which do not support
1110 		 * oplock for which this mount option may be useful if the user
1111 		 * knows that the file won't be changed on the server by anyone
1112 		 * else.
1113 		 */
1114 		return generic_setlease(file, arg, lease, priv);
1115 	else
1116 		return -EAGAIN;
1117 }
1118 
1119 struct file_system_type cifs_fs_type = {
1120 	.owner = THIS_MODULE,
1121 	.name = "cifs",
1122 	.init_fs_context = smb3_init_fs_context,
1123 	.parameters = smb3_fs_parameters,
1124 	.kill_sb = cifs_kill_sb,
1125 	.fs_flags = FS_RENAME_DOES_D_MOVE,
1126 };
1127 MODULE_ALIAS_FS("cifs");
1128 
1129 struct file_system_type smb3_fs_type = {
1130 	.owner = THIS_MODULE,
1131 	.name = "smb3",
1132 	.init_fs_context = smb3_init_fs_context,
1133 	.parameters = smb3_fs_parameters,
1134 	.kill_sb = cifs_kill_sb,
1135 	.fs_flags = FS_RENAME_DOES_D_MOVE,
1136 };
1137 MODULE_ALIAS_FS("smb3");
1138 MODULE_ALIAS("smb3");
1139 
1140 const struct inode_operations cifs_dir_inode_ops = {
1141 	.create = cifs_create,
1142 	.atomic_open = cifs_atomic_open,
1143 	.lookup = cifs_lookup,
1144 	.getattr = cifs_getattr,
1145 	.unlink = cifs_unlink,
1146 	.link = cifs_hardlink,
1147 	.mkdir = cifs_mkdir,
1148 	.rmdir = cifs_rmdir,
1149 	.rename = cifs_rename2,
1150 	.permission = cifs_permission,
1151 	.setattr = cifs_setattr,
1152 	.symlink = cifs_symlink,
1153 	.mknod   = cifs_mknod,
1154 	.listxattr = cifs_listxattr,
1155 	.get_acl = cifs_get_acl,
1156 	.set_acl = cifs_set_acl,
1157 };
1158 
1159 const struct inode_operations cifs_file_inode_ops = {
1160 	.setattr = cifs_setattr,
1161 	.getattr = cifs_getattr,
1162 	.permission = cifs_permission,
1163 	.listxattr = cifs_listxattr,
1164 	.fiemap = cifs_fiemap,
1165 	.get_acl = cifs_get_acl,
1166 	.set_acl = cifs_set_acl,
1167 };
1168 
1169 const char *cifs_get_link(struct dentry *dentry, struct inode *inode,
1170 			    struct delayed_call *done)
1171 {
1172 	char *target_path;
1173 
1174 	if (!dentry)
1175 		return ERR_PTR(-ECHILD);
1176 
1177 	target_path = kmalloc(PATH_MAX, GFP_KERNEL);
1178 	if (!target_path)
1179 		return ERR_PTR(-ENOMEM);
1180 
1181 	spin_lock(&inode->i_lock);
1182 	if (likely(CIFS_I(inode)->symlink_target)) {
1183 		strscpy(target_path, CIFS_I(inode)->symlink_target, PATH_MAX);
1184 	} else {
1185 		kfree(target_path);
1186 		target_path = ERR_PTR(-EOPNOTSUPP);
1187 	}
1188 	spin_unlock(&inode->i_lock);
1189 
1190 	if (!IS_ERR(target_path))
1191 		set_delayed_call(done, kfree_link, target_path);
1192 
1193 	return target_path;
1194 }
1195 
1196 const struct inode_operations cifs_symlink_inode_ops = {
1197 	.get_link = cifs_get_link,
1198 	.setattr = cifs_setattr,
1199 	.permission = cifs_permission,
1200 	.listxattr = cifs_listxattr,
1201 };
1202 
1203 /*
1204  * Advance the EOF marker to after the source range.
1205  */
1206 static int cifs_precopy_set_eof(struct inode *src_inode, struct cifsInodeInfo *src_cifsi,
1207 				struct cifs_tcon *src_tcon,
1208 				unsigned int xid, loff_t src_end)
1209 {
1210 	struct cifsFileInfo *writeable_srcfile;
1211 	int rc = -EINVAL;
1212 
1213 	writeable_srcfile = find_writable_file(src_cifsi, FIND_WR_FSUID_ONLY);
1214 	if (writeable_srcfile) {
1215 		if (src_tcon->ses->server->ops->set_file_size)
1216 			rc = src_tcon->ses->server->ops->set_file_size(
1217 				xid, src_tcon, writeable_srcfile,
1218 				src_inode->i_size, true /* no need to set sparse */);
1219 		else
1220 			rc = -ENOSYS;
1221 		cifsFileInfo_put(writeable_srcfile);
1222 		cifs_dbg(FYI, "SetFSize for copychunk rc = %d\n", rc);
1223 	}
1224 
1225 	if (rc < 0)
1226 		goto set_failed;
1227 
1228 	netfs_resize_file(&src_cifsi->netfs, src_end, true);
1229 	fscache_resize_cookie(cifs_inode_cookie(src_inode), src_end);
1230 	return 0;
1231 
1232 set_failed:
1233 	return filemap_write_and_wait(src_inode->i_mapping);
1234 }
1235 
1236 /*
1237  * Flush out either the folio that overlaps the beginning of a range in which
1238  * pos resides or the folio that overlaps the end of a range unless that folio
1239  * is entirely within the range we're going to invalidate.  We extend the flush
1240  * bounds to encompass the folio.
1241  */
1242 static int cifs_flush_folio(struct inode *inode, loff_t pos, loff_t *_fstart, loff_t *_fend,
1243 			    bool first)
1244 {
1245 	struct folio *folio;
1246 	unsigned long long fpos, fend;
1247 	pgoff_t index = pos / PAGE_SIZE;
1248 	size_t size;
1249 	int rc = 0;
1250 
1251 	folio = filemap_get_folio(inode->i_mapping, index);
1252 	if (IS_ERR(folio))
1253 		return 0;
1254 
1255 	size = folio_size(folio);
1256 	fpos = folio_pos(folio);
1257 	fend = fpos + size - 1;
1258 	*_fstart = min_t(unsigned long long, *_fstart, fpos);
1259 	*_fend   = max_t(unsigned long long, *_fend, fend);
1260 	if ((first && pos == fpos) || (!first && pos == fend))
1261 		goto out;
1262 
1263 	rc = filemap_write_and_wait_range(inode->i_mapping, fpos, fend);
1264 out:
1265 	folio_put(folio);
1266 	return rc;
1267 }
1268 
1269 static loff_t cifs_remap_file_range(struct file *src_file, loff_t off,
1270 		struct file *dst_file, loff_t destoff, loff_t len,
1271 		unsigned int remap_flags)
1272 {
1273 	struct inode *src_inode = file_inode(src_file);
1274 	struct inode *target_inode = file_inode(dst_file);
1275 	struct cifsInodeInfo *src_cifsi = CIFS_I(src_inode);
1276 	struct cifsInodeInfo *target_cifsi = CIFS_I(target_inode);
1277 	struct cifsFileInfo *smb_file_src = src_file->private_data;
1278 	struct cifsFileInfo *smb_file_target = dst_file->private_data;
1279 	struct cifs_tcon *target_tcon, *src_tcon;
1280 	unsigned long long destend, fstart, fend, new_size;
1281 	unsigned int xid;
1282 	int rc;
1283 
1284 	if (remap_flags & REMAP_FILE_DEDUP)
1285 		return -EOPNOTSUPP;
1286 	if (remap_flags & ~REMAP_FILE_ADVISORY)
1287 		return -EINVAL;
1288 
1289 	cifs_dbg(FYI, "clone range\n");
1290 
1291 	xid = get_xid();
1292 
1293 	if (!smb_file_src || !smb_file_target) {
1294 		rc = -EBADF;
1295 		cifs_dbg(VFS, "missing cifsFileInfo on copy range src file\n");
1296 		goto out;
1297 	}
1298 
1299 	src_tcon = tlink_tcon(smb_file_src->tlink);
1300 	target_tcon = tlink_tcon(smb_file_target->tlink);
1301 
1302 	/*
1303 	 * Note: cifs case is easier than btrfs since server responsible for
1304 	 * checks for proper open modes and file type and if it wants
1305 	 * server could even support copy of range where source = target
1306 	 */
1307 	lock_two_nondirectories(target_inode, src_inode);
1308 
1309 	if (len == 0)
1310 		len = src_inode->i_size - off;
1311 
1312 	cifs_dbg(FYI, "clone range\n");
1313 
1314 	/* Flush the source buffer */
1315 	rc = filemap_write_and_wait_range(src_inode->i_mapping, off,
1316 					  off + len - 1);
1317 	if (rc)
1318 		goto unlock;
1319 
1320 	/* The server-side copy will fail if the source crosses the EOF marker.
1321 	 * Advance the EOF marker after the flush above to the end of the range
1322 	 * if it's short of that.
1323 	 */
1324 	if (src_cifsi->netfs.remote_i_size < off + len) {
1325 		rc = cifs_precopy_set_eof(src_inode, src_cifsi, src_tcon, xid, off + len);
1326 		if (rc < 0)
1327 			goto unlock;
1328 	}
1329 
1330 	new_size = destoff + len;
1331 	destend = destoff + len - 1;
1332 
1333 	/* Flush the folios at either end of the destination range to prevent
1334 	 * accidental loss of dirty data outside of the range.
1335 	 */
1336 	fstart = destoff;
1337 	fend = destend;
1338 
1339 	rc = cifs_flush_folio(target_inode, destoff, &fstart, &fend, true);
1340 	if (rc)
1341 		goto unlock;
1342 	rc = cifs_flush_folio(target_inode, destend, &fstart, &fend, false);
1343 	if (rc)
1344 		goto unlock;
1345 
1346 	/* Discard all the folios that overlap the destination region. */
1347 	cifs_dbg(FYI, "about to discard pages %llx-%llx\n", fstart, fend);
1348 	truncate_inode_pages_range(&target_inode->i_data, fstart, fend);
1349 
1350 	fscache_invalidate(cifs_inode_cookie(target_inode), NULL,
1351 			   i_size_read(target_inode), 0);
1352 
1353 	rc = -EOPNOTSUPP;
1354 	if (target_tcon->ses->server->ops->duplicate_extents) {
1355 		rc = target_tcon->ses->server->ops->duplicate_extents(xid,
1356 			smb_file_src, smb_file_target, off, len, destoff);
1357 		if (rc == 0 && new_size > i_size_read(target_inode)) {
1358 			truncate_setsize(target_inode, new_size);
1359 			netfs_resize_file(&target_cifsi->netfs, new_size, true);
1360 			fscache_resize_cookie(cifs_inode_cookie(target_inode),
1361 					      new_size);
1362 		}
1363 	}
1364 
1365 	/* force revalidate of size and timestamps of target file now
1366 	   that target is updated on the server */
1367 	CIFS_I(target_inode)->time = 0;
1368 unlock:
1369 	/* although unlocking in the reverse order from locking is not
1370 	   strictly necessary here it is a little cleaner to be consistent */
1371 	unlock_two_nondirectories(src_inode, target_inode);
1372 out:
1373 	free_xid(xid);
1374 	return rc < 0 ? rc : len;
1375 }
1376 
1377 ssize_t cifs_file_copychunk_range(unsigned int xid,
1378 				struct file *src_file, loff_t off,
1379 				struct file *dst_file, loff_t destoff,
1380 				size_t len, unsigned int flags)
1381 {
1382 	struct inode *src_inode = file_inode(src_file);
1383 	struct inode *target_inode = file_inode(dst_file);
1384 	struct cifsInodeInfo *src_cifsi = CIFS_I(src_inode);
1385 	struct cifsInodeInfo *target_cifsi = CIFS_I(target_inode);
1386 	struct cifsFileInfo *smb_file_src;
1387 	struct cifsFileInfo *smb_file_target;
1388 	struct cifs_tcon *src_tcon;
1389 	struct cifs_tcon *target_tcon;
1390 	unsigned long long destend, fstart, fend;
1391 	ssize_t rc;
1392 
1393 	cifs_dbg(FYI, "copychunk range\n");
1394 
1395 	if (!src_file->private_data || !dst_file->private_data) {
1396 		rc = -EBADF;
1397 		cifs_dbg(VFS, "missing cifsFileInfo on copy range src file\n");
1398 		goto out;
1399 	}
1400 
1401 	rc = -EXDEV;
1402 	smb_file_target = dst_file->private_data;
1403 	smb_file_src = src_file->private_data;
1404 	src_tcon = tlink_tcon(smb_file_src->tlink);
1405 	target_tcon = tlink_tcon(smb_file_target->tlink);
1406 
1407 	if (src_tcon->ses != target_tcon->ses) {
1408 		cifs_dbg(VFS, "source and target of copy not on same server\n");
1409 		goto out;
1410 	}
1411 
1412 	rc = -EOPNOTSUPP;
1413 	if (!target_tcon->ses->server->ops->copychunk_range)
1414 		goto out;
1415 
1416 	/*
1417 	 * Note: cifs case is easier than btrfs since server responsible for
1418 	 * checks for proper open modes and file type and if it wants
1419 	 * server could even support copy of range where source = target
1420 	 */
1421 	lock_two_nondirectories(target_inode, src_inode);
1422 
1423 	cifs_dbg(FYI, "about to flush pages\n");
1424 
1425 	rc = filemap_write_and_wait_range(src_inode->i_mapping, off,
1426 					  off + len - 1);
1427 	if (rc)
1428 		goto unlock;
1429 
1430 	/* The server-side copy will fail if the source crosses the EOF marker.
1431 	 * Advance the EOF marker after the flush above to the end of the range
1432 	 * if it's short of that.
1433 	 */
1434 	if (src_cifsi->netfs.remote_i_size < off + len) {
1435 		rc = cifs_precopy_set_eof(src_inode, src_cifsi, src_tcon, xid, off + len);
1436 		if (rc < 0)
1437 			goto unlock;
1438 	}
1439 
1440 	destend = destoff + len - 1;
1441 
1442 	/* Flush the folios at either end of the destination range to prevent
1443 	 * accidental loss of dirty data outside of the range.
1444 	 */
1445 	fstart = destoff;
1446 	fend = destend;
1447 
1448 	rc = cifs_flush_folio(target_inode, destoff, &fstart, &fend, true);
1449 	if (rc)
1450 		goto unlock;
1451 	rc = cifs_flush_folio(target_inode, destend, &fstart, &fend, false);
1452 	if (rc)
1453 		goto unlock;
1454 
1455 	/* Discard all the folios that overlap the destination region. */
1456 	truncate_inode_pages_range(&target_inode->i_data, fstart, fend);
1457 
1458 	fscache_invalidate(cifs_inode_cookie(target_inode), NULL,
1459 			   i_size_read(target_inode), 0);
1460 
1461 	rc = file_modified(dst_file);
1462 	if (!rc) {
1463 		rc = target_tcon->ses->server->ops->copychunk_range(xid,
1464 			smb_file_src, smb_file_target, off, len, destoff);
1465 		if (rc > 0 && destoff + rc > i_size_read(target_inode)) {
1466 			truncate_setsize(target_inode, destoff + rc);
1467 			netfs_resize_file(&target_cifsi->netfs,
1468 					  i_size_read(target_inode), true);
1469 			fscache_resize_cookie(cifs_inode_cookie(target_inode),
1470 					      i_size_read(target_inode));
1471 		}
1472 		if (rc > 0 && destoff + rc > target_cifsi->netfs.zero_point)
1473 			target_cifsi->netfs.zero_point = destoff + rc;
1474 	}
1475 
1476 	file_accessed(src_file);
1477 
1478 	/* force revalidate of size and timestamps of target file now
1479 	 * that target is updated on the server
1480 	 */
1481 	CIFS_I(target_inode)->time = 0;
1482 
1483 unlock:
1484 	/* although unlocking in the reverse order from locking is not
1485 	 * strictly necessary here it is a little cleaner to be consistent
1486 	 */
1487 	unlock_two_nondirectories(src_inode, target_inode);
1488 
1489 out:
1490 	return rc;
1491 }
1492 
1493 /*
1494  * Directory operations under CIFS/SMB2/SMB3 are synchronous, so fsync()
1495  * is a dummy operation.
1496  */
1497 static int cifs_dir_fsync(struct file *file, loff_t start, loff_t end, int datasync)
1498 {
1499 	cifs_dbg(FYI, "Sync directory - name: %pD datasync: 0x%x\n",
1500 		 file, datasync);
1501 
1502 	return 0;
1503 }
1504 
1505 static ssize_t cifs_copy_file_range(struct file *src_file, loff_t off,
1506 				struct file *dst_file, loff_t destoff,
1507 				size_t len, unsigned int flags)
1508 {
1509 	unsigned int xid = get_xid();
1510 	ssize_t rc;
1511 	struct cifsFileInfo *cfile = dst_file->private_data;
1512 
1513 	if (cfile->swapfile) {
1514 		rc = -EOPNOTSUPP;
1515 		free_xid(xid);
1516 		return rc;
1517 	}
1518 
1519 	rc = cifs_file_copychunk_range(xid, src_file, off, dst_file, destoff,
1520 					len, flags);
1521 	free_xid(xid);
1522 
1523 	if (rc == -EOPNOTSUPP || rc == -EXDEV)
1524 		rc = splice_copy_file_range(src_file, off, dst_file,
1525 					    destoff, len);
1526 	return rc;
1527 }
1528 
1529 const struct file_operations cifs_file_ops = {
1530 	.read_iter = cifs_loose_read_iter,
1531 	.write_iter = cifs_file_write_iter,
1532 	.open = cifs_open,
1533 	.release = cifs_close,
1534 	.lock = cifs_lock,
1535 	.flock = cifs_flock,
1536 	.fsync = cifs_fsync,
1537 	.flush = cifs_flush,
1538 	.mmap  = cifs_file_mmap,
1539 	.splice_read = filemap_splice_read,
1540 	.splice_write = iter_file_splice_write,
1541 	.llseek = cifs_llseek,
1542 	.unlocked_ioctl	= cifs_ioctl,
1543 	.copy_file_range = cifs_copy_file_range,
1544 	.remap_file_range = cifs_remap_file_range,
1545 	.setlease = cifs_setlease,
1546 	.fallocate = cifs_fallocate,
1547 };
1548 
1549 const struct file_operations cifs_file_strict_ops = {
1550 	.read_iter = cifs_strict_readv,
1551 	.write_iter = cifs_strict_writev,
1552 	.open = cifs_open,
1553 	.release = cifs_close,
1554 	.lock = cifs_lock,
1555 	.flock = cifs_flock,
1556 	.fsync = cifs_strict_fsync,
1557 	.flush = cifs_flush,
1558 	.mmap = cifs_file_strict_mmap,
1559 	.splice_read = filemap_splice_read,
1560 	.splice_write = iter_file_splice_write,
1561 	.llseek = cifs_llseek,
1562 	.unlocked_ioctl	= cifs_ioctl,
1563 	.copy_file_range = cifs_copy_file_range,
1564 	.remap_file_range = cifs_remap_file_range,
1565 	.setlease = cifs_setlease,
1566 	.fallocate = cifs_fallocate,
1567 };
1568 
1569 const struct file_operations cifs_file_direct_ops = {
1570 	.read_iter = cifs_direct_readv,
1571 	.write_iter = cifs_direct_writev,
1572 	.open = cifs_open,
1573 	.release = cifs_close,
1574 	.lock = cifs_lock,
1575 	.flock = cifs_flock,
1576 	.fsync = cifs_fsync,
1577 	.flush = cifs_flush,
1578 	.mmap = cifs_file_mmap,
1579 	.splice_read = copy_splice_read,
1580 	.splice_write = iter_file_splice_write,
1581 	.unlocked_ioctl  = cifs_ioctl,
1582 	.copy_file_range = cifs_copy_file_range,
1583 	.remap_file_range = cifs_remap_file_range,
1584 	.llseek = cifs_llseek,
1585 	.setlease = cifs_setlease,
1586 	.fallocate = cifs_fallocate,
1587 };
1588 
1589 const struct file_operations cifs_file_nobrl_ops = {
1590 	.read_iter = cifs_loose_read_iter,
1591 	.write_iter = cifs_file_write_iter,
1592 	.open = cifs_open,
1593 	.release = cifs_close,
1594 	.fsync = cifs_fsync,
1595 	.flush = cifs_flush,
1596 	.mmap  = cifs_file_mmap,
1597 	.splice_read = filemap_splice_read,
1598 	.splice_write = iter_file_splice_write,
1599 	.llseek = cifs_llseek,
1600 	.unlocked_ioctl	= cifs_ioctl,
1601 	.copy_file_range = cifs_copy_file_range,
1602 	.remap_file_range = cifs_remap_file_range,
1603 	.setlease = cifs_setlease,
1604 	.fallocate = cifs_fallocate,
1605 };
1606 
1607 const struct file_operations cifs_file_strict_nobrl_ops = {
1608 	.read_iter = cifs_strict_readv,
1609 	.write_iter = cifs_strict_writev,
1610 	.open = cifs_open,
1611 	.release = cifs_close,
1612 	.fsync = cifs_strict_fsync,
1613 	.flush = cifs_flush,
1614 	.mmap = cifs_file_strict_mmap,
1615 	.splice_read = filemap_splice_read,
1616 	.splice_write = iter_file_splice_write,
1617 	.llseek = cifs_llseek,
1618 	.unlocked_ioctl	= cifs_ioctl,
1619 	.copy_file_range = cifs_copy_file_range,
1620 	.remap_file_range = cifs_remap_file_range,
1621 	.setlease = cifs_setlease,
1622 	.fallocate = cifs_fallocate,
1623 };
1624 
1625 const struct file_operations cifs_file_direct_nobrl_ops = {
1626 	.read_iter = cifs_direct_readv,
1627 	.write_iter = cifs_direct_writev,
1628 	.open = cifs_open,
1629 	.release = cifs_close,
1630 	.fsync = cifs_fsync,
1631 	.flush = cifs_flush,
1632 	.mmap = cifs_file_mmap,
1633 	.splice_read = copy_splice_read,
1634 	.splice_write = iter_file_splice_write,
1635 	.unlocked_ioctl  = cifs_ioctl,
1636 	.copy_file_range = cifs_copy_file_range,
1637 	.remap_file_range = cifs_remap_file_range,
1638 	.llseek = cifs_llseek,
1639 	.setlease = cifs_setlease,
1640 	.fallocate = cifs_fallocate,
1641 };
1642 
1643 const struct file_operations cifs_dir_ops = {
1644 	.iterate_shared = cifs_readdir,
1645 	.release = cifs_closedir,
1646 	.read    = generic_read_dir,
1647 	.unlocked_ioctl  = cifs_ioctl,
1648 	.copy_file_range = cifs_copy_file_range,
1649 	.remap_file_range = cifs_remap_file_range,
1650 	.llseek = generic_file_llseek,
1651 	.fsync = cifs_dir_fsync,
1652 };
1653 
1654 static void
1655 cifs_init_once(void *inode)
1656 {
1657 	struct cifsInodeInfo *cifsi = inode;
1658 
1659 	inode_init_once(&cifsi->netfs.inode);
1660 	init_rwsem(&cifsi->lock_sem);
1661 }
1662 
1663 static int __init
1664 cifs_init_inodecache(void)
1665 {
1666 	cifs_inode_cachep = kmem_cache_create("cifs_inode_cache",
1667 					      sizeof(struct cifsInodeInfo),
1668 					      0, (SLAB_RECLAIM_ACCOUNT|
1669 						SLAB_ACCOUNT),
1670 					      cifs_init_once);
1671 	if (cifs_inode_cachep == NULL)
1672 		return -ENOMEM;
1673 
1674 	return 0;
1675 }
1676 
1677 static void
1678 cifs_destroy_inodecache(void)
1679 {
1680 	/*
1681 	 * Make sure all delayed rcu free inodes are flushed before we
1682 	 * destroy cache.
1683 	 */
1684 	rcu_barrier();
1685 	kmem_cache_destroy(cifs_inode_cachep);
1686 }
1687 
1688 static int
1689 cifs_init_request_bufs(void)
1690 {
1691 	/*
1692 	 * SMB2 maximum header size is bigger than CIFS one - no problems to
1693 	 * allocate some more bytes for CIFS.
1694 	 */
1695 	size_t max_hdr_size = MAX_SMB2_HDR_SIZE;
1696 
1697 	if (CIFSMaxBufSize < 8192) {
1698 	/* Buffer size can not be smaller than 2 * PATH_MAX since maximum
1699 	Unicode path name has to fit in any SMB/CIFS path based frames */
1700 		CIFSMaxBufSize = 8192;
1701 	} else if (CIFSMaxBufSize > 1024*127) {
1702 		CIFSMaxBufSize = 1024 * 127;
1703 	} else {
1704 		CIFSMaxBufSize &= 0x1FE00; /* Round size to even 512 byte mult*/
1705 	}
1706 /*
1707 	cifs_dbg(VFS, "CIFSMaxBufSize %d 0x%x\n",
1708 		 CIFSMaxBufSize, CIFSMaxBufSize);
1709 */
1710 	cifs_req_cachep = kmem_cache_create_usercopy("cifs_request",
1711 					    CIFSMaxBufSize + max_hdr_size, 0,
1712 					    SLAB_HWCACHE_ALIGN, 0,
1713 					    CIFSMaxBufSize + max_hdr_size,
1714 					    NULL);
1715 	if (cifs_req_cachep == NULL)
1716 		return -ENOMEM;
1717 
1718 	if (cifs_min_rcv < 1)
1719 		cifs_min_rcv = 1;
1720 	else if (cifs_min_rcv > 64) {
1721 		cifs_min_rcv = 64;
1722 		cifs_dbg(VFS, "cifs_min_rcv set to maximum (64)\n");
1723 	}
1724 
1725 	cifs_req_poolp = mempool_create_slab_pool(cifs_min_rcv,
1726 						  cifs_req_cachep);
1727 
1728 	if (cifs_req_poolp == NULL) {
1729 		kmem_cache_destroy(cifs_req_cachep);
1730 		return -ENOMEM;
1731 	}
1732 	/* MAX_CIFS_SMALL_BUFFER_SIZE bytes is enough for most SMB responses and
1733 	almost all handle based requests (but not write response, nor is it
1734 	sufficient for path based requests).  A smaller size would have
1735 	been more efficient (compacting multiple slab items on one 4k page)
1736 	for the case in which debug was on, but this larger size allows
1737 	more SMBs to use small buffer alloc and is still much more
1738 	efficient to alloc 1 per page off the slab compared to 17K (5page)
1739 	alloc of large cifs buffers even when page debugging is on */
1740 	cifs_sm_req_cachep = kmem_cache_create_usercopy("cifs_small_rq",
1741 			MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN,
1742 			0, MAX_CIFS_SMALL_BUFFER_SIZE, NULL);
1743 	if (cifs_sm_req_cachep == NULL) {
1744 		mempool_destroy(cifs_req_poolp);
1745 		kmem_cache_destroy(cifs_req_cachep);
1746 		return -ENOMEM;
1747 	}
1748 
1749 	if (cifs_min_small < 2)
1750 		cifs_min_small = 2;
1751 	else if (cifs_min_small > 256) {
1752 		cifs_min_small = 256;
1753 		cifs_dbg(FYI, "cifs_min_small set to maximum (256)\n");
1754 	}
1755 
1756 	cifs_sm_req_poolp = mempool_create_slab_pool(cifs_min_small,
1757 						     cifs_sm_req_cachep);
1758 
1759 	if (cifs_sm_req_poolp == NULL) {
1760 		mempool_destroy(cifs_req_poolp);
1761 		kmem_cache_destroy(cifs_req_cachep);
1762 		kmem_cache_destroy(cifs_sm_req_cachep);
1763 		return -ENOMEM;
1764 	}
1765 
1766 	return 0;
1767 }
1768 
1769 static void
1770 cifs_destroy_request_bufs(void)
1771 {
1772 	mempool_destroy(cifs_req_poolp);
1773 	kmem_cache_destroy(cifs_req_cachep);
1774 	mempool_destroy(cifs_sm_req_poolp);
1775 	kmem_cache_destroy(cifs_sm_req_cachep);
1776 }
1777 
1778 static int init_mids(void)
1779 {
1780 	cifs_mid_cachep = kmem_cache_create("cifs_mpx_ids",
1781 					    sizeof(struct mid_q_entry), 0,
1782 					    SLAB_HWCACHE_ALIGN, NULL);
1783 	if (cifs_mid_cachep == NULL)
1784 		return -ENOMEM;
1785 
1786 	/* 3 is a reasonable minimum number of simultaneous operations */
1787 	cifs_mid_poolp = mempool_create_slab_pool(3, cifs_mid_cachep);
1788 	if (cifs_mid_poolp == NULL) {
1789 		kmem_cache_destroy(cifs_mid_cachep);
1790 		return -ENOMEM;
1791 	}
1792 
1793 	return 0;
1794 }
1795 
1796 static void destroy_mids(void)
1797 {
1798 	mempool_destroy(cifs_mid_poolp);
1799 	kmem_cache_destroy(cifs_mid_cachep);
1800 }
1801 
1802 static int __init
1803 init_cifs(void)
1804 {
1805 	int rc = 0;
1806 	cifs_proc_init();
1807 	INIT_LIST_HEAD(&cifs_tcp_ses_list);
1808 /*
1809  *  Initialize Global counters
1810  */
1811 	atomic_set(&sesInfoAllocCount, 0);
1812 	atomic_set(&tconInfoAllocCount, 0);
1813 	atomic_set(&tcpSesNextId, 0);
1814 	atomic_set(&tcpSesAllocCount, 0);
1815 	atomic_set(&tcpSesReconnectCount, 0);
1816 	atomic_set(&tconInfoReconnectCount, 0);
1817 
1818 	atomic_set(&buf_alloc_count, 0);
1819 	atomic_set(&small_buf_alloc_count, 0);
1820 #ifdef CONFIG_CIFS_STATS2
1821 	atomic_set(&total_buf_alloc_count, 0);
1822 	atomic_set(&total_small_buf_alloc_count, 0);
1823 	if (slow_rsp_threshold < 1)
1824 		cifs_dbg(FYI, "slow_response_threshold msgs disabled\n");
1825 	else if (slow_rsp_threshold > 32767)
1826 		cifs_dbg(VFS,
1827 		       "slow response threshold set higher than recommended (0 to 32767)\n");
1828 #endif /* CONFIG_CIFS_STATS2 */
1829 
1830 	atomic_set(&mid_count, 0);
1831 	GlobalCurrentXid = 0;
1832 	GlobalTotalActiveXid = 0;
1833 	GlobalMaxActiveXid = 0;
1834 	spin_lock_init(&cifs_tcp_ses_lock);
1835 	spin_lock_init(&GlobalMid_Lock);
1836 
1837 	cifs_lock_secret = get_random_u32();
1838 
1839 	if (cifs_max_pending < 2) {
1840 		cifs_max_pending = 2;
1841 		cifs_dbg(FYI, "cifs_max_pending set to min of 2\n");
1842 	} else if (cifs_max_pending > CIFS_MAX_REQ) {
1843 		cifs_max_pending = CIFS_MAX_REQ;
1844 		cifs_dbg(FYI, "cifs_max_pending set to max of %u\n",
1845 			 CIFS_MAX_REQ);
1846 	}
1847 
1848 	/* Limit max to about 18 hours, and setting to zero disables directory entry caching */
1849 	if (dir_cache_timeout > 65000) {
1850 		dir_cache_timeout = 65000;
1851 		cifs_dbg(VFS, "dir_cache_timeout set to max of 65000 seconds\n");
1852 	}
1853 
1854 	cifsiod_wq = alloc_workqueue("cifsiod", WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
1855 	if (!cifsiod_wq) {
1856 		rc = -ENOMEM;
1857 		goto out_clean_proc;
1858 	}
1859 
1860 	/*
1861 	 * Consider in future setting limit!=0 maybe to min(num_of_cores - 1, 3)
1862 	 * so that we don't launch too many worker threads but
1863 	 * Documentation/core-api/workqueue.rst recommends setting it to 0
1864 	 */
1865 
1866 	/* WQ_UNBOUND allows decrypt tasks to run on any CPU */
1867 	decrypt_wq = alloc_workqueue("smb3decryptd",
1868 				     WQ_UNBOUND|WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
1869 	if (!decrypt_wq) {
1870 		rc = -ENOMEM;
1871 		goto out_destroy_cifsiod_wq;
1872 	}
1873 
1874 	fileinfo_put_wq = alloc_workqueue("cifsfileinfoput",
1875 				     WQ_UNBOUND|WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
1876 	if (!fileinfo_put_wq) {
1877 		rc = -ENOMEM;
1878 		goto out_destroy_decrypt_wq;
1879 	}
1880 
1881 	cifsoplockd_wq = alloc_workqueue("cifsoplockd",
1882 					 WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
1883 	if (!cifsoplockd_wq) {
1884 		rc = -ENOMEM;
1885 		goto out_destroy_fileinfo_put_wq;
1886 	}
1887 
1888 	deferredclose_wq = alloc_workqueue("deferredclose",
1889 					   WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
1890 	if (!deferredclose_wq) {
1891 		rc = -ENOMEM;
1892 		goto out_destroy_cifsoplockd_wq;
1893 	}
1894 
1895 	rc = cifs_init_inodecache();
1896 	if (rc)
1897 		goto out_destroy_deferredclose_wq;
1898 
1899 	rc = init_mids();
1900 	if (rc)
1901 		goto out_destroy_inodecache;
1902 
1903 	rc = cifs_init_request_bufs();
1904 	if (rc)
1905 		goto out_destroy_mids;
1906 
1907 #ifdef CONFIG_CIFS_DFS_UPCALL
1908 	rc = dfs_cache_init();
1909 	if (rc)
1910 		goto out_destroy_request_bufs;
1911 #endif /* CONFIG_CIFS_DFS_UPCALL */
1912 #ifdef CONFIG_CIFS_UPCALL
1913 	rc = init_cifs_spnego();
1914 	if (rc)
1915 		goto out_destroy_dfs_cache;
1916 #endif /* CONFIG_CIFS_UPCALL */
1917 #ifdef CONFIG_CIFS_SWN_UPCALL
1918 	rc = cifs_genl_init();
1919 	if (rc)
1920 		goto out_register_key_type;
1921 #endif /* CONFIG_CIFS_SWN_UPCALL */
1922 
1923 	rc = init_cifs_idmap();
1924 	if (rc)
1925 		goto out_cifs_swn_init;
1926 
1927 	rc = register_filesystem(&cifs_fs_type);
1928 	if (rc)
1929 		goto out_init_cifs_idmap;
1930 
1931 	rc = register_filesystem(&smb3_fs_type);
1932 	if (rc) {
1933 		unregister_filesystem(&cifs_fs_type);
1934 		goto out_init_cifs_idmap;
1935 	}
1936 
1937 	return 0;
1938 
1939 out_init_cifs_idmap:
1940 	exit_cifs_idmap();
1941 out_cifs_swn_init:
1942 #ifdef CONFIG_CIFS_SWN_UPCALL
1943 	cifs_genl_exit();
1944 out_register_key_type:
1945 #endif
1946 #ifdef CONFIG_CIFS_UPCALL
1947 	exit_cifs_spnego();
1948 out_destroy_dfs_cache:
1949 #endif
1950 #ifdef CONFIG_CIFS_DFS_UPCALL
1951 	dfs_cache_destroy();
1952 out_destroy_request_bufs:
1953 #endif
1954 	cifs_destroy_request_bufs();
1955 out_destroy_mids:
1956 	destroy_mids();
1957 out_destroy_inodecache:
1958 	cifs_destroy_inodecache();
1959 out_destroy_deferredclose_wq:
1960 	destroy_workqueue(deferredclose_wq);
1961 out_destroy_cifsoplockd_wq:
1962 	destroy_workqueue(cifsoplockd_wq);
1963 out_destroy_fileinfo_put_wq:
1964 	destroy_workqueue(fileinfo_put_wq);
1965 out_destroy_decrypt_wq:
1966 	destroy_workqueue(decrypt_wq);
1967 out_destroy_cifsiod_wq:
1968 	destroy_workqueue(cifsiod_wq);
1969 out_clean_proc:
1970 	cifs_proc_clean();
1971 	return rc;
1972 }
1973 
1974 static void __exit
1975 exit_cifs(void)
1976 {
1977 	cifs_dbg(NOISY, "exit_smb3\n");
1978 	unregister_filesystem(&cifs_fs_type);
1979 	unregister_filesystem(&smb3_fs_type);
1980 	cifs_release_automount_timer();
1981 	exit_cifs_idmap();
1982 #ifdef CONFIG_CIFS_SWN_UPCALL
1983 	cifs_genl_exit();
1984 #endif
1985 #ifdef CONFIG_CIFS_UPCALL
1986 	exit_cifs_spnego();
1987 #endif
1988 #ifdef CONFIG_CIFS_DFS_UPCALL
1989 	dfs_cache_destroy();
1990 #endif
1991 	cifs_destroy_request_bufs();
1992 	destroy_mids();
1993 	cifs_destroy_inodecache();
1994 	destroy_workqueue(deferredclose_wq);
1995 	destroy_workqueue(cifsoplockd_wq);
1996 	destroy_workqueue(decrypt_wq);
1997 	destroy_workqueue(fileinfo_put_wq);
1998 	destroy_workqueue(cifsiod_wq);
1999 	cifs_proc_clean();
2000 }
2001 
2002 MODULE_AUTHOR("Steve French");
2003 MODULE_LICENSE("GPL");	/* combination of LGPL + GPL source behaves as GPL */
2004 MODULE_DESCRIPTION
2005 	("VFS to access SMB3 servers e.g. Samba, Macs, Azure and Windows (and "
2006 	"also older servers complying with the SNIA CIFS Specification)");
2007 MODULE_VERSION(CIFS_VERSION);
2008 MODULE_SOFTDEP("ecb");
2009 MODULE_SOFTDEP("hmac");
2010 MODULE_SOFTDEP("md5");
2011 MODULE_SOFTDEP("nls");
2012 MODULE_SOFTDEP("aes");
2013 MODULE_SOFTDEP("cmac");
2014 MODULE_SOFTDEP("sha256");
2015 MODULE_SOFTDEP("sha512");
2016 MODULE_SOFTDEP("aead2");
2017 MODULE_SOFTDEP("ccm");
2018 MODULE_SOFTDEP("gcm");
2019 module_init(init_cifs)
2020 module_exit(exit_cifs)
2021