xref: /linux/fs/smb/client/cifsfs.c (revision 881f1bb5e25c8982ed963b2d319fc0fc732e55db)
1 // SPDX-License-Identifier: LGPL-2.1
2 /*
3  *
4  *   Copyright (C) International Business Machines  Corp., 2002,2008
5  *   Author(s): Steve French (sfrench@us.ibm.com)
6  *
7  *   Common Internet FileSystem (CIFS) client
8  *
9  */
10 
11 /* Note that BB means BUGBUG (ie something to fix eventually) */
12 
13 #include <linux/module.h>
14 #include <linux/fs.h>
15 #include <linux/filelock.h>
16 #include <linux/mount.h>
17 #include <linux/slab.h>
18 #include <linux/init.h>
19 #include <linux/list.h>
20 #include <linux/seq_file.h>
21 #include <linux/vfs.h>
22 #include <linux/mempool.h>
23 #include <linux/delay.h>
24 #include <linux/kthread.h>
25 #include <linux/freezer.h>
26 #include <linux/namei.h>
27 #include <linux/random.h>
28 #include <linux/splice.h>
29 #include <linux/uuid.h>
30 #include <linux/xattr.h>
31 #include <uapi/linux/magic.h>
32 #include <net/ipv6.h>
33 #include "cifsfs.h"
34 #include "cifspdu.h"
35 #define DECLARE_GLOBALS_HERE
36 #include "cifsglob.h"
37 #include "cifsproto.h"
38 #include "cifs_debug.h"
39 #include "cifs_fs_sb.h"
40 #include <linux/mm.h>
41 #include <linux/key-type.h>
42 #include "cifs_spnego.h"
43 #include "fscache.h"
44 #ifdef CONFIG_CIFS_DFS_UPCALL
45 #include "dfs_cache.h"
46 #endif
47 #ifdef CONFIG_CIFS_SWN_UPCALL
48 #include "netlink.h"
49 #endif
50 #include "fs_context.h"
51 #include "cached_dir.h"
52 
53 /*
54  * DOS dates from 1980/1/1 through 2107/12/31
55  * Protocol specifications indicate the range should be to 119, which
56  * limits maximum year to 2099. But this range has not been checked.
57  */
58 #define SMB_DATE_MAX (127<<9 | 12<<5 | 31)
59 #define SMB_DATE_MIN (0<<9 | 1<<5 | 1)
60 #define SMB_TIME_MAX (23<<11 | 59<<5 | 29)
61 
62 int cifsFYI = 0;
63 bool traceSMB;
64 bool enable_oplocks = true;
65 bool linuxExtEnabled = true;
66 bool lookupCacheEnabled = true;
67 bool disable_legacy_dialects; /* false by default */
68 bool enable_gcm_256 = true;
69 bool require_gcm_256; /* false by default */
70 bool enable_negotiate_signing; /* false by default */
71 unsigned int global_secflags = CIFSSEC_DEF;
72 /* unsigned int ntlmv2_support = 0; */
73 unsigned int sign_CIFS_PDUs = 1;
74 
75 /*
76  * Global transaction id (XID) information
77  */
78 unsigned int GlobalCurrentXid;	/* protected by GlobalMid_Sem */
79 unsigned int GlobalTotalActiveXid; /* prot by GlobalMid_Sem */
80 unsigned int GlobalMaxActiveXid;	/* prot by GlobalMid_Sem */
81 spinlock_t GlobalMid_Lock; /* protects above & list operations on midQ entries */
82 
83 /*
84  *  Global counters, updated atomically
85  */
86 atomic_t sesInfoAllocCount;
87 atomic_t tconInfoAllocCount;
88 atomic_t tcpSesNextId;
89 atomic_t tcpSesAllocCount;
90 atomic_t tcpSesReconnectCount;
91 atomic_t tconInfoReconnectCount;
92 
93 atomic_t mid_count;
94 atomic_t buf_alloc_count;
95 atomic_t small_buf_alloc_count;
96 #ifdef CONFIG_CIFS_STATS2
97 atomic_t total_buf_alloc_count;
98 atomic_t total_small_buf_alloc_count;
99 #endif/* STATS2 */
100 struct list_head	cifs_tcp_ses_list;
101 spinlock_t		cifs_tcp_ses_lock;
102 static const struct super_operations cifs_super_ops;
103 unsigned int CIFSMaxBufSize = CIFS_MAX_MSGSIZE;
104 module_param(CIFSMaxBufSize, uint, 0444);
105 MODULE_PARM_DESC(CIFSMaxBufSize, "Network buffer size (not including header) "
106 				 "for CIFS requests. "
107 				 "Default: 16384 Range: 8192 to 130048");
108 unsigned int cifs_min_rcv = CIFS_MIN_RCV_POOL;
109 module_param(cifs_min_rcv, uint, 0444);
110 MODULE_PARM_DESC(cifs_min_rcv, "Network buffers in pool. Default: 4 Range: "
111 				"1 to 64");
112 unsigned int cifs_min_small = 30;
113 module_param(cifs_min_small, uint, 0444);
114 MODULE_PARM_DESC(cifs_min_small, "Small network buffers in pool. Default: 30 "
115 				 "Range: 2 to 256");
116 unsigned int cifs_max_pending = CIFS_MAX_REQ;
117 module_param(cifs_max_pending, uint, 0444);
118 MODULE_PARM_DESC(cifs_max_pending, "Simultaneous requests to server for "
119 				   "CIFS/SMB1 dialect (N/A for SMB3) "
120 				   "Default: 32767 Range: 2 to 32767.");
121 unsigned int dir_cache_timeout = 30;
122 module_param(dir_cache_timeout, uint, 0644);
123 MODULE_PARM_DESC(dir_cache_timeout, "Number of seconds to cache directory contents for which we have a lease. Default: 30 "
124 				 "Range: 1 to 65000 seconds, 0 to disable caching dir contents");
125 #ifdef CONFIG_CIFS_STATS2
126 unsigned int slow_rsp_threshold = 1;
127 module_param(slow_rsp_threshold, uint, 0644);
128 MODULE_PARM_DESC(slow_rsp_threshold, "Amount of time (in seconds) to wait "
129 				   "before logging that a response is delayed. "
130 				   "Default: 1 (if set to 0 disables msg).");
131 #endif /* STATS2 */
132 
133 module_param(enable_oplocks, bool, 0644);
134 MODULE_PARM_DESC(enable_oplocks, "Enable or disable oplocks. Default: y/Y/1");
135 
136 module_param(enable_gcm_256, bool, 0644);
137 MODULE_PARM_DESC(enable_gcm_256, "Enable requesting strongest (256 bit) GCM encryption. Default: n/N/0");
138 
139 module_param(require_gcm_256, bool, 0644);
140 MODULE_PARM_DESC(require_gcm_256, "Require strongest (256 bit) GCM encryption. Default: n/N/0");
141 
142 module_param(enable_negotiate_signing, bool, 0644);
143 MODULE_PARM_DESC(enable_negotiate_signing, "Enable negotiating packet signing algorithm with server. Default: n/N/0");
144 
145 module_param(disable_legacy_dialects, bool, 0644);
146 MODULE_PARM_DESC(disable_legacy_dialects, "To improve security it may be "
147 				  "helpful to restrict the ability to "
148 				  "override the default dialects (SMB2.1, "
149 				  "SMB3 and SMB3.02) on mount with old "
150 				  "dialects (CIFS/SMB1 and SMB2) since "
151 				  "vers=1.0 (CIFS/SMB1) and vers=2.0 are weaker"
152 				  " and less secure. Default: n/N/0");
153 
154 struct workqueue_struct	*cifsiod_wq;
155 struct workqueue_struct	*decrypt_wq;
156 struct workqueue_struct	*fileinfo_put_wq;
157 struct workqueue_struct	*cifsoplockd_wq;
158 struct workqueue_struct	*deferredclose_wq;
159 struct workqueue_struct	*serverclose_wq;
160 __u32 cifs_lock_secret;
161 
162 /*
163  * Bumps refcount for cifs super block.
164  * Note that it should be only called if a referece to VFS super block is
165  * already held, e.g. in open-type syscalls context. Otherwise it can race with
166  * atomic_dec_and_test in deactivate_locked_super.
167  */
168 void
169 cifs_sb_active(struct super_block *sb)
170 {
171 	struct cifs_sb_info *server = CIFS_SB(sb);
172 
173 	if (atomic_inc_return(&server->active) == 1)
174 		atomic_inc(&sb->s_active);
175 }
176 
177 void
178 cifs_sb_deactive(struct super_block *sb)
179 {
180 	struct cifs_sb_info *server = CIFS_SB(sb);
181 
182 	if (atomic_dec_and_test(&server->active))
183 		deactivate_super(sb);
184 }
185 
186 static int
187 cifs_read_super(struct super_block *sb)
188 {
189 	struct inode *inode;
190 	struct cifs_sb_info *cifs_sb;
191 	struct cifs_tcon *tcon;
192 	struct timespec64 ts;
193 	int rc = 0;
194 
195 	cifs_sb = CIFS_SB(sb);
196 	tcon = cifs_sb_master_tcon(cifs_sb);
197 
198 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIXACL)
199 		sb->s_flags |= SB_POSIXACL;
200 
201 	if (tcon->snapshot_time)
202 		sb->s_flags |= SB_RDONLY;
203 
204 	if (tcon->ses->capabilities & tcon->ses->server->vals->cap_large_files)
205 		sb->s_maxbytes = MAX_LFS_FILESIZE;
206 	else
207 		sb->s_maxbytes = MAX_NON_LFS;
208 
209 	/*
210 	 * Some very old servers like DOS and OS/2 used 2 second granularity
211 	 * (while all current servers use 100ns granularity - see MS-DTYP)
212 	 * but 1 second is the maximum allowed granularity for the VFS
213 	 * so for old servers set time granularity to 1 second while for
214 	 * everything else (current servers) set it to 100ns.
215 	 */
216 	if ((tcon->ses->server->vals->protocol_id == SMB10_PROT_ID) &&
217 	    ((tcon->ses->capabilities &
218 	      tcon->ses->server->vals->cap_nt_find) == 0) &&
219 	    !tcon->unix_ext) {
220 		sb->s_time_gran = 1000000000; /* 1 second is max allowed gran */
221 		ts = cnvrtDosUnixTm(cpu_to_le16(SMB_DATE_MIN), 0, 0);
222 		sb->s_time_min = ts.tv_sec;
223 		ts = cnvrtDosUnixTm(cpu_to_le16(SMB_DATE_MAX),
224 				    cpu_to_le16(SMB_TIME_MAX), 0);
225 		sb->s_time_max = ts.tv_sec;
226 	} else {
227 		/*
228 		 * Almost every server, including all SMB2+, uses DCE TIME
229 		 * ie 100 nanosecond units, since 1601.  See MS-DTYP and MS-FSCC
230 		 */
231 		sb->s_time_gran = 100;
232 		ts = cifs_NTtimeToUnix(0);
233 		sb->s_time_min = ts.tv_sec;
234 		ts = cifs_NTtimeToUnix(cpu_to_le64(S64_MAX));
235 		sb->s_time_max = ts.tv_sec;
236 	}
237 
238 	sb->s_magic = CIFS_SUPER_MAGIC;
239 	sb->s_op = &cifs_super_ops;
240 	sb->s_xattr = cifs_xattr_handlers;
241 	rc = super_setup_bdi(sb);
242 	if (rc)
243 		goto out_no_root;
244 	/* tune readahead according to rsize if readahead size not set on mount */
245 	if (cifs_sb->ctx->rsize == 0)
246 		cifs_sb->ctx->rsize =
247 			tcon->ses->server->ops->negotiate_rsize(tcon, cifs_sb->ctx);
248 	if (cifs_sb->ctx->rasize)
249 		sb->s_bdi->ra_pages = cifs_sb->ctx->rasize / PAGE_SIZE;
250 	else
251 		sb->s_bdi->ra_pages = 2 * (cifs_sb->ctx->rsize / PAGE_SIZE);
252 
253 	sb->s_blocksize = CIFS_MAX_MSGSIZE;
254 	sb->s_blocksize_bits = 14;	/* default 2**14 = CIFS_MAX_MSGSIZE */
255 	inode = cifs_root_iget(sb);
256 
257 	if (IS_ERR(inode)) {
258 		rc = PTR_ERR(inode);
259 		goto out_no_root;
260 	}
261 
262 	if (tcon->nocase)
263 		sb->s_d_op = &cifs_ci_dentry_ops;
264 	else
265 		sb->s_d_op = &cifs_dentry_ops;
266 
267 	sb->s_root = d_make_root(inode);
268 	if (!sb->s_root) {
269 		rc = -ENOMEM;
270 		goto out_no_root;
271 	}
272 
273 #ifdef CONFIG_CIFS_NFSD_EXPORT
274 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) {
275 		cifs_dbg(FYI, "export ops supported\n");
276 		sb->s_export_op = &cifs_export_ops;
277 	}
278 #endif /* CONFIG_CIFS_NFSD_EXPORT */
279 
280 	return 0;
281 
282 out_no_root:
283 	cifs_dbg(VFS, "%s: get root inode failed\n", __func__);
284 	return rc;
285 }
286 
287 static void cifs_kill_sb(struct super_block *sb)
288 {
289 	struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
290 
291 	/*
292 	 * We ned to release all dentries for the cached directories
293 	 * before we kill the sb.
294 	 */
295 	if (cifs_sb->root) {
296 		close_all_cached_dirs(cifs_sb);
297 
298 		/* finally release root dentry */
299 		dput(cifs_sb->root);
300 		cifs_sb->root = NULL;
301 	}
302 
303 	kill_anon_super(sb);
304 	cifs_umount(cifs_sb);
305 }
306 
307 static int
308 cifs_statfs(struct dentry *dentry, struct kstatfs *buf)
309 {
310 	struct super_block *sb = dentry->d_sb;
311 	struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
312 	struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
313 	struct TCP_Server_Info *server = tcon->ses->server;
314 	unsigned int xid;
315 	int rc = 0;
316 
317 	xid = get_xid();
318 
319 	if (le32_to_cpu(tcon->fsAttrInfo.MaxPathNameComponentLength) > 0)
320 		buf->f_namelen =
321 		       le32_to_cpu(tcon->fsAttrInfo.MaxPathNameComponentLength);
322 	else
323 		buf->f_namelen = PATH_MAX;
324 
325 	buf->f_fsid.val[0] = tcon->vol_serial_number;
326 	/* are using part of create time for more randomness, see man statfs */
327 	buf->f_fsid.val[1] =  (int)le64_to_cpu(tcon->vol_create_time);
328 
329 	buf->f_files = 0;	/* undefined */
330 	buf->f_ffree = 0;	/* unlimited */
331 
332 	if (server->ops->queryfs)
333 		rc = server->ops->queryfs(xid, tcon, cifs_sb, buf);
334 
335 	free_xid(xid);
336 	return rc;
337 }
338 
339 static long cifs_fallocate(struct file *file, int mode, loff_t off, loff_t len)
340 {
341 	struct cifs_sb_info *cifs_sb = CIFS_FILE_SB(file);
342 	struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
343 	struct TCP_Server_Info *server = tcon->ses->server;
344 
345 	if (server->ops->fallocate)
346 		return server->ops->fallocate(file, tcon, mode, off, len);
347 
348 	return -EOPNOTSUPP;
349 }
350 
351 static int cifs_permission(struct mnt_idmap *idmap,
352 			   struct inode *inode, int mask)
353 {
354 	struct cifs_sb_info *cifs_sb;
355 
356 	cifs_sb = CIFS_SB(inode->i_sb);
357 
358 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_PERM) {
359 		if ((mask & MAY_EXEC) && !execute_ok(inode))
360 			return -EACCES;
361 		else
362 			return 0;
363 	} else /* file mode might have been restricted at mount time
364 		on the client (above and beyond ACL on servers) for
365 		servers which do not support setting and viewing mode bits,
366 		so allowing client to check permissions is useful */
367 		return generic_permission(&nop_mnt_idmap, inode, mask);
368 }
369 
370 static struct kmem_cache *cifs_inode_cachep;
371 static struct kmem_cache *cifs_req_cachep;
372 static struct kmem_cache *cifs_mid_cachep;
373 static struct kmem_cache *cifs_sm_req_cachep;
374 mempool_t *cifs_sm_req_poolp;
375 mempool_t *cifs_req_poolp;
376 mempool_t *cifs_mid_poolp;
377 
378 static struct inode *
379 cifs_alloc_inode(struct super_block *sb)
380 {
381 	struct cifsInodeInfo *cifs_inode;
382 	cifs_inode = alloc_inode_sb(sb, cifs_inode_cachep, GFP_KERNEL);
383 	if (!cifs_inode)
384 		return NULL;
385 	cifs_inode->cifsAttrs = 0x20;	/* default */
386 	cifs_inode->time = 0;
387 	/*
388 	 * Until the file is open and we have gotten oplock info back from the
389 	 * server, can not assume caching of file data or metadata.
390 	 */
391 	cifs_set_oplock_level(cifs_inode, 0);
392 	cifs_inode->flags = 0;
393 	spin_lock_init(&cifs_inode->writers_lock);
394 	cifs_inode->writers = 0;
395 	cifs_inode->netfs.inode.i_blkbits = 14;  /* 2**14 = CIFS_MAX_MSGSIZE */
396 	cifs_inode->netfs.remote_i_size = 0;
397 	cifs_inode->uniqueid = 0;
398 	cifs_inode->createtime = 0;
399 	cifs_inode->epoch = 0;
400 	spin_lock_init(&cifs_inode->open_file_lock);
401 	generate_random_uuid(cifs_inode->lease_key);
402 	cifs_inode->symlink_target = NULL;
403 
404 	/*
405 	 * Can not set i_flags here - they get immediately overwritten to zero
406 	 * by the VFS.
407 	 */
408 	/* cifs_inode->netfs.inode.i_flags = S_NOATIME | S_NOCMTIME; */
409 	INIT_LIST_HEAD(&cifs_inode->openFileList);
410 	INIT_LIST_HEAD(&cifs_inode->llist);
411 	INIT_LIST_HEAD(&cifs_inode->deferred_closes);
412 	spin_lock_init(&cifs_inode->deferred_lock);
413 	return &cifs_inode->netfs.inode;
414 }
415 
416 static void
417 cifs_free_inode(struct inode *inode)
418 {
419 	struct cifsInodeInfo *cinode = CIFS_I(inode);
420 
421 	if (S_ISLNK(inode->i_mode))
422 		kfree(cinode->symlink_target);
423 	kmem_cache_free(cifs_inode_cachep, cinode);
424 }
425 
426 static void
427 cifs_evict_inode(struct inode *inode)
428 {
429 	truncate_inode_pages_final(&inode->i_data);
430 	if (inode->i_state & I_PINNING_NETFS_WB)
431 		cifs_fscache_unuse_inode_cookie(inode, true);
432 	cifs_fscache_release_inode_cookie(inode);
433 	clear_inode(inode);
434 }
435 
436 static void
437 cifs_show_address(struct seq_file *s, struct TCP_Server_Info *server)
438 {
439 	struct sockaddr_in *sa = (struct sockaddr_in *) &server->dstaddr;
440 	struct sockaddr_in6 *sa6 = (struct sockaddr_in6 *) &server->dstaddr;
441 
442 	seq_puts(s, ",addr=");
443 
444 	switch (server->dstaddr.ss_family) {
445 	case AF_INET:
446 		seq_printf(s, "%pI4", &sa->sin_addr.s_addr);
447 		break;
448 	case AF_INET6:
449 		seq_printf(s, "%pI6", &sa6->sin6_addr.s6_addr);
450 		if (sa6->sin6_scope_id)
451 			seq_printf(s, "%%%u", sa6->sin6_scope_id);
452 		break;
453 	default:
454 		seq_puts(s, "(unknown)");
455 	}
456 	if (server->rdma)
457 		seq_puts(s, ",rdma");
458 }
459 
460 static void
461 cifs_show_security(struct seq_file *s, struct cifs_ses *ses)
462 {
463 	if (ses->sectype == Unspecified) {
464 		if (ses->user_name == NULL)
465 			seq_puts(s, ",sec=none");
466 		return;
467 	}
468 
469 	seq_puts(s, ",sec=");
470 
471 	switch (ses->sectype) {
472 	case NTLMv2:
473 		seq_puts(s, "ntlmv2");
474 		break;
475 	case Kerberos:
476 		seq_puts(s, "krb5");
477 		break;
478 	case RawNTLMSSP:
479 		seq_puts(s, "ntlmssp");
480 		break;
481 	default:
482 		/* shouldn't ever happen */
483 		seq_puts(s, "unknown");
484 		break;
485 	}
486 
487 	if (ses->sign)
488 		seq_puts(s, "i");
489 
490 	if (ses->sectype == Kerberos)
491 		seq_printf(s, ",cruid=%u",
492 			   from_kuid_munged(&init_user_ns, ses->cred_uid));
493 }
494 
495 static void
496 cifs_show_cache_flavor(struct seq_file *s, struct cifs_sb_info *cifs_sb)
497 {
498 	seq_puts(s, ",cache=");
499 
500 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO)
501 		seq_puts(s, "strict");
502 	else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO)
503 		seq_puts(s, "none");
504 	else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RW_CACHE)
505 		seq_puts(s, "singleclient"); /* assume only one client access */
506 	else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RO_CACHE)
507 		seq_puts(s, "ro"); /* read only caching assumed */
508 	else
509 		seq_puts(s, "loose");
510 }
511 
512 /*
513  * cifs_show_devname() is used so we show the mount device name with correct
514  * format (e.g. forward slashes vs. back slashes) in /proc/mounts
515  */
516 static int cifs_show_devname(struct seq_file *m, struct dentry *root)
517 {
518 	struct cifs_sb_info *cifs_sb = CIFS_SB(root->d_sb);
519 	char *devname = kstrdup(cifs_sb->ctx->source, GFP_KERNEL);
520 
521 	if (devname == NULL)
522 		seq_puts(m, "none");
523 	else {
524 		convert_delimiter(devname, '/');
525 		/* escape all spaces in share names */
526 		seq_escape(m, devname, " \t");
527 		kfree(devname);
528 	}
529 	return 0;
530 }
531 
532 /*
533  * cifs_show_options() is for displaying mount options in /proc/mounts.
534  * Not all settable options are displayed but most of the important
535  * ones are.
536  */
537 static int
538 cifs_show_options(struct seq_file *s, struct dentry *root)
539 {
540 	struct cifs_sb_info *cifs_sb = CIFS_SB(root->d_sb);
541 	struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
542 	struct sockaddr *srcaddr;
543 	srcaddr = (struct sockaddr *)&tcon->ses->server->srcaddr;
544 
545 	seq_show_option(s, "vers", tcon->ses->server->vals->version_string);
546 	cifs_show_security(s, tcon->ses);
547 	cifs_show_cache_flavor(s, cifs_sb);
548 
549 	if (tcon->no_lease)
550 		seq_puts(s, ",nolease");
551 	if (cifs_sb->ctx->multiuser)
552 		seq_puts(s, ",multiuser");
553 	else if (tcon->ses->user_name)
554 		seq_show_option(s, "username", tcon->ses->user_name);
555 
556 	if (tcon->ses->domainName && tcon->ses->domainName[0] != 0)
557 		seq_show_option(s, "domain", tcon->ses->domainName);
558 
559 	if (srcaddr->sa_family != AF_UNSPEC) {
560 		struct sockaddr_in *saddr4;
561 		struct sockaddr_in6 *saddr6;
562 		saddr4 = (struct sockaddr_in *)srcaddr;
563 		saddr6 = (struct sockaddr_in6 *)srcaddr;
564 		if (srcaddr->sa_family == AF_INET6)
565 			seq_printf(s, ",srcaddr=%pI6c",
566 				   &saddr6->sin6_addr);
567 		else if (srcaddr->sa_family == AF_INET)
568 			seq_printf(s, ",srcaddr=%pI4",
569 				   &saddr4->sin_addr.s_addr);
570 		else
571 			seq_printf(s, ",srcaddr=BAD-AF:%i",
572 				   (int)(srcaddr->sa_family));
573 	}
574 
575 	seq_printf(s, ",uid=%u",
576 		   from_kuid_munged(&init_user_ns, cifs_sb->ctx->linux_uid));
577 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_OVERR_UID)
578 		seq_puts(s, ",forceuid");
579 	else
580 		seq_puts(s, ",noforceuid");
581 
582 	seq_printf(s, ",gid=%u",
583 		   from_kgid_munged(&init_user_ns, cifs_sb->ctx->linux_gid));
584 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_OVERR_GID)
585 		seq_puts(s, ",forcegid");
586 	else
587 		seq_puts(s, ",noforcegid");
588 
589 	cifs_show_address(s, tcon->ses->server);
590 
591 	if (!tcon->unix_ext)
592 		seq_printf(s, ",file_mode=0%ho,dir_mode=0%ho",
593 					   cifs_sb->ctx->file_mode,
594 					   cifs_sb->ctx->dir_mode);
595 	if (cifs_sb->ctx->iocharset)
596 		seq_printf(s, ",iocharset=%s", cifs_sb->ctx->iocharset);
597 	if (tcon->seal)
598 		seq_puts(s, ",seal");
599 	else if (tcon->ses->server->ignore_signature)
600 		seq_puts(s, ",signloosely");
601 	if (tcon->nocase)
602 		seq_puts(s, ",nocase");
603 	if (tcon->nodelete)
604 		seq_puts(s, ",nodelete");
605 	if (cifs_sb->ctx->no_sparse)
606 		seq_puts(s, ",nosparse");
607 	if (tcon->local_lease)
608 		seq_puts(s, ",locallease");
609 	if (tcon->retry)
610 		seq_puts(s, ",hard");
611 	else
612 		seq_puts(s, ",soft");
613 	if (tcon->use_persistent)
614 		seq_puts(s, ",persistenthandles");
615 	else if (tcon->use_resilient)
616 		seq_puts(s, ",resilienthandles");
617 	if (tcon->posix_extensions)
618 		seq_puts(s, ",posix");
619 	else if (tcon->unix_ext)
620 		seq_puts(s, ",unix");
621 	else
622 		seq_puts(s, ",nounix");
623 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_DFS)
624 		seq_puts(s, ",nodfs");
625 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIX_PATHS)
626 		seq_puts(s, ",posixpaths");
627 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SET_UID)
628 		seq_puts(s, ",setuids");
629 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UID_FROM_ACL)
630 		seq_puts(s, ",idsfromsid");
631 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM)
632 		seq_puts(s, ",serverino");
633 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
634 		seq_puts(s, ",rwpidforward");
635 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL)
636 		seq_puts(s, ",forcemand");
637 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_XATTR)
638 		seq_puts(s, ",nouser_xattr");
639 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR)
640 		seq_puts(s, ",mapchars");
641 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SFM_CHR)
642 		seq_puts(s, ",mapposix");
643 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UNX_EMUL)
644 		seq_puts(s, ",sfu");
645 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_BRL)
646 		seq_puts(s, ",nobrl");
647 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_HANDLE_CACHE)
648 		seq_puts(s, ",nohandlecache");
649 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MODE_FROM_SID)
650 		seq_puts(s, ",modefromsid");
651 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_ACL)
652 		seq_puts(s, ",cifsacl");
653 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DYNPERM)
654 		seq_puts(s, ",dynperm");
655 	if (root->d_sb->s_flags & SB_POSIXACL)
656 		seq_puts(s, ",acl");
657 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MF_SYMLINKS)
658 		seq_puts(s, ",mfsymlinks");
659 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_FSCACHE)
660 		seq_puts(s, ",fsc");
661 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC)
662 		seq_puts(s, ",nostrictsync");
663 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_PERM)
664 		seq_puts(s, ",noperm");
665 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_BACKUPUID)
666 		seq_printf(s, ",backupuid=%u",
667 			   from_kuid_munged(&init_user_ns,
668 					    cifs_sb->ctx->backupuid));
669 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_BACKUPGID)
670 		seq_printf(s, ",backupgid=%u",
671 			   from_kgid_munged(&init_user_ns,
672 					    cifs_sb->ctx->backupgid));
673 	seq_show_option(s, "reparse",
674 			cifs_reparse_type_str(cifs_sb->ctx->reparse_type));
675 
676 	seq_printf(s, ",rsize=%u", cifs_sb->ctx->rsize);
677 	seq_printf(s, ",wsize=%u", cifs_sb->ctx->wsize);
678 	seq_printf(s, ",bsize=%u", cifs_sb->ctx->bsize);
679 	if (cifs_sb->ctx->rasize)
680 		seq_printf(s, ",rasize=%u", cifs_sb->ctx->rasize);
681 	if (tcon->ses->server->min_offload)
682 		seq_printf(s, ",esize=%u", tcon->ses->server->min_offload);
683 	if (tcon->ses->server->retrans)
684 		seq_printf(s, ",retrans=%u", tcon->ses->server->retrans);
685 	seq_printf(s, ",echo_interval=%lu",
686 			tcon->ses->server->echo_interval / HZ);
687 
688 	/* Only display the following if overridden on mount */
689 	if (tcon->ses->server->max_credits != SMB2_MAX_CREDITS_AVAILABLE)
690 		seq_printf(s, ",max_credits=%u", tcon->ses->server->max_credits);
691 	if (tcon->ses->server->tcp_nodelay)
692 		seq_puts(s, ",tcpnodelay");
693 	if (tcon->ses->server->noautotune)
694 		seq_puts(s, ",noautotune");
695 	if (tcon->ses->server->noblocksnd)
696 		seq_puts(s, ",noblocksend");
697 	if (tcon->ses->server->nosharesock)
698 		seq_puts(s, ",nosharesock");
699 
700 	if (tcon->snapshot_time)
701 		seq_printf(s, ",snapshot=%llu", tcon->snapshot_time);
702 	if (tcon->handle_timeout)
703 		seq_printf(s, ",handletimeout=%u", tcon->handle_timeout);
704 	if (tcon->max_cached_dirs != MAX_CACHED_FIDS)
705 		seq_printf(s, ",max_cached_dirs=%u", tcon->max_cached_dirs);
706 
707 	/*
708 	 * Display file and directory attribute timeout in seconds.
709 	 * If file and directory attribute timeout the same then actimeo
710 	 * was likely specified on mount
711 	 */
712 	if (cifs_sb->ctx->acdirmax == cifs_sb->ctx->acregmax)
713 		seq_printf(s, ",actimeo=%lu", cifs_sb->ctx->acregmax / HZ);
714 	else {
715 		seq_printf(s, ",acdirmax=%lu", cifs_sb->ctx->acdirmax / HZ);
716 		seq_printf(s, ",acregmax=%lu", cifs_sb->ctx->acregmax / HZ);
717 	}
718 	seq_printf(s, ",closetimeo=%lu", cifs_sb->ctx->closetimeo / HZ);
719 
720 	if (tcon->ses->chan_max > 1)
721 		seq_printf(s, ",multichannel,max_channels=%zu",
722 			   tcon->ses->chan_max);
723 
724 	if (tcon->use_witness)
725 		seq_puts(s, ",witness");
726 
727 	return 0;
728 }
729 
730 static void cifs_umount_begin(struct super_block *sb)
731 {
732 	struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
733 	struct cifs_tcon *tcon;
734 
735 	if (cifs_sb == NULL)
736 		return;
737 
738 	tcon = cifs_sb_master_tcon(cifs_sb);
739 
740 	spin_lock(&cifs_tcp_ses_lock);
741 	spin_lock(&tcon->tc_lock);
742 	if ((tcon->tc_count > 1) || (tcon->status == TID_EXITING)) {
743 		/* we have other mounts to same share or we have
744 		   already tried to umount this and woken up
745 		   all waiting network requests, nothing to do */
746 		spin_unlock(&tcon->tc_lock);
747 		spin_unlock(&cifs_tcp_ses_lock);
748 		return;
749 	}
750 	/*
751 	 * can not set tcon->status to TID_EXITING yet since we don't know if umount -f will
752 	 * fail later (e.g. due to open files).  TID_EXITING will be set just before tdis req sent
753 	 */
754 	spin_unlock(&tcon->tc_lock);
755 	spin_unlock(&cifs_tcp_ses_lock);
756 
757 	cifs_close_all_deferred_files(tcon);
758 	/* cancel_brl_requests(tcon); */ /* BB mark all brl mids as exiting */
759 	/* cancel_notify_requests(tcon); */
760 	if (tcon->ses && tcon->ses->server) {
761 		cifs_dbg(FYI, "wake up tasks now - umount begin not complete\n");
762 		wake_up_all(&tcon->ses->server->request_q);
763 		wake_up_all(&tcon->ses->server->response_q);
764 		msleep(1); /* yield */
765 		/* we have to kick the requests once more */
766 		wake_up_all(&tcon->ses->server->response_q);
767 		msleep(1);
768 	}
769 
770 	return;
771 }
772 
773 static int cifs_freeze(struct super_block *sb)
774 {
775 	struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
776 	struct cifs_tcon *tcon;
777 
778 	if (cifs_sb == NULL)
779 		return 0;
780 
781 	tcon = cifs_sb_master_tcon(cifs_sb);
782 
783 	cifs_close_all_deferred_files(tcon);
784 	return 0;
785 }
786 
787 #ifdef CONFIG_CIFS_STATS2
788 static int cifs_show_stats(struct seq_file *s, struct dentry *root)
789 {
790 	/* BB FIXME */
791 	return 0;
792 }
793 #endif
794 
795 static int cifs_write_inode(struct inode *inode, struct writeback_control *wbc)
796 {
797 	return netfs_unpin_writeback(inode, wbc);
798 }
799 
800 static int cifs_drop_inode(struct inode *inode)
801 {
802 	struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
803 
804 	/* no serverino => unconditional eviction */
805 	return !(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) ||
806 		generic_drop_inode(inode);
807 }
808 
809 static const struct super_operations cifs_super_ops = {
810 	.statfs = cifs_statfs,
811 	.alloc_inode = cifs_alloc_inode,
812 	.write_inode	= cifs_write_inode,
813 	.free_inode = cifs_free_inode,
814 	.drop_inode	= cifs_drop_inode,
815 	.evict_inode	= cifs_evict_inode,
816 /*	.show_path	= cifs_show_path, */ /* Would we ever need show path? */
817 	.show_devname   = cifs_show_devname,
818 /*	.delete_inode	= cifs_delete_inode,  */  /* Do not need above
819 	function unless later we add lazy close of inodes or unless the
820 	kernel forgets to call us with the same number of releases (closes)
821 	as opens */
822 	.show_options = cifs_show_options,
823 	.umount_begin   = cifs_umount_begin,
824 	.freeze_fs      = cifs_freeze,
825 #ifdef CONFIG_CIFS_STATS2
826 	.show_stats = cifs_show_stats,
827 #endif
828 };
829 
830 /*
831  * Get root dentry from superblock according to prefix path mount option.
832  * Return dentry with refcount + 1 on success and NULL otherwise.
833  */
834 static struct dentry *
835 cifs_get_root(struct smb3_fs_context *ctx, struct super_block *sb)
836 {
837 	struct dentry *dentry;
838 	struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
839 	char *full_path = NULL;
840 	char *s, *p;
841 	char sep;
842 
843 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_USE_PREFIX_PATH)
844 		return dget(sb->s_root);
845 
846 	full_path = cifs_build_path_to_root(ctx, cifs_sb,
847 				cifs_sb_master_tcon(cifs_sb), 0);
848 	if (full_path == NULL)
849 		return ERR_PTR(-ENOMEM);
850 
851 	cifs_dbg(FYI, "Get root dentry for %s\n", full_path);
852 
853 	sep = CIFS_DIR_SEP(cifs_sb);
854 	dentry = dget(sb->s_root);
855 	s = full_path;
856 
857 	do {
858 		struct inode *dir = d_inode(dentry);
859 		struct dentry *child;
860 
861 		if (!S_ISDIR(dir->i_mode)) {
862 			dput(dentry);
863 			dentry = ERR_PTR(-ENOTDIR);
864 			break;
865 		}
866 
867 		/* skip separators */
868 		while (*s == sep)
869 			s++;
870 		if (!*s)
871 			break;
872 		p = s++;
873 		/* next separator */
874 		while (*s && *s != sep)
875 			s++;
876 
877 		child = lookup_positive_unlocked(p, dentry, s - p);
878 		dput(dentry);
879 		dentry = child;
880 	} while (!IS_ERR(dentry));
881 	kfree(full_path);
882 	return dentry;
883 }
884 
885 static int cifs_set_super(struct super_block *sb, void *data)
886 {
887 	struct cifs_mnt_data *mnt_data = data;
888 	sb->s_fs_info = mnt_data->cifs_sb;
889 	return set_anon_super(sb, NULL);
890 }
891 
892 struct dentry *
893 cifs_smb3_do_mount(struct file_system_type *fs_type,
894 	      int flags, struct smb3_fs_context *old_ctx)
895 {
896 	struct cifs_mnt_data mnt_data;
897 	struct cifs_sb_info *cifs_sb;
898 	struct super_block *sb;
899 	struct dentry *root;
900 	int rc;
901 
902 	if (cifsFYI) {
903 		cifs_dbg(FYI, "%s: devname=%s flags=0x%x\n", __func__,
904 			 old_ctx->source, flags);
905 	} else {
906 		cifs_info("Attempting to mount %s\n", old_ctx->source);
907 	}
908 
909 	cifs_sb = kzalloc(sizeof(*cifs_sb), GFP_KERNEL);
910 	if (!cifs_sb)
911 		return ERR_PTR(-ENOMEM);
912 
913 	cifs_sb->ctx = kzalloc(sizeof(struct smb3_fs_context), GFP_KERNEL);
914 	if (!cifs_sb->ctx) {
915 		root = ERR_PTR(-ENOMEM);
916 		goto out;
917 	}
918 	rc = smb3_fs_context_dup(cifs_sb->ctx, old_ctx);
919 	if (rc) {
920 		root = ERR_PTR(rc);
921 		goto out;
922 	}
923 
924 	rc = cifs_setup_cifs_sb(cifs_sb);
925 	if (rc) {
926 		root = ERR_PTR(rc);
927 		goto out;
928 	}
929 
930 	rc = cifs_mount(cifs_sb, cifs_sb->ctx);
931 	if (rc) {
932 		if (!(flags & SB_SILENT))
933 			cifs_dbg(VFS, "cifs_mount failed w/return code = %d\n",
934 				 rc);
935 		root = ERR_PTR(rc);
936 		goto out;
937 	}
938 
939 	mnt_data.ctx = cifs_sb->ctx;
940 	mnt_data.cifs_sb = cifs_sb;
941 	mnt_data.flags = flags;
942 
943 	/* BB should we make this contingent on mount parm? */
944 	flags |= SB_NODIRATIME | SB_NOATIME;
945 
946 	sb = sget(fs_type, cifs_match_super, cifs_set_super, flags, &mnt_data);
947 	if (IS_ERR(sb)) {
948 		cifs_umount(cifs_sb);
949 		return ERR_CAST(sb);
950 	}
951 
952 	if (sb->s_root) {
953 		cifs_dbg(FYI, "Use existing superblock\n");
954 		cifs_umount(cifs_sb);
955 		cifs_sb = NULL;
956 	} else {
957 		rc = cifs_read_super(sb);
958 		if (rc) {
959 			root = ERR_PTR(rc);
960 			goto out_super;
961 		}
962 
963 		sb->s_flags |= SB_ACTIVE;
964 	}
965 
966 	root = cifs_get_root(cifs_sb ? cifs_sb->ctx : old_ctx, sb);
967 	if (IS_ERR(root))
968 		goto out_super;
969 
970 	if (cifs_sb)
971 		cifs_sb->root = dget(root);
972 
973 	cifs_dbg(FYI, "dentry root is: %p\n", root);
974 	return root;
975 
976 out_super:
977 	deactivate_locked_super(sb);
978 	return root;
979 out:
980 	kfree(cifs_sb->prepath);
981 	smb3_cleanup_fs_context(cifs_sb->ctx);
982 	kfree(cifs_sb);
983 	return root;
984 }
985 
986 
987 static ssize_t
988 cifs_loose_read_iter(struct kiocb *iocb, struct iov_iter *iter)
989 {
990 	ssize_t rc;
991 	struct inode *inode = file_inode(iocb->ki_filp);
992 
993 	if (iocb->ki_flags & IOCB_DIRECT)
994 		return cifs_user_readv(iocb, iter);
995 
996 	rc = cifs_revalidate_mapping(inode);
997 	if (rc)
998 		return rc;
999 
1000 	return generic_file_read_iter(iocb, iter);
1001 }
1002 
1003 static ssize_t cifs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
1004 {
1005 	struct inode *inode = file_inode(iocb->ki_filp);
1006 	struct cifsInodeInfo *cinode = CIFS_I(inode);
1007 	ssize_t written;
1008 	int rc;
1009 
1010 	if (iocb->ki_filp->f_flags & O_DIRECT) {
1011 		written = cifs_user_writev(iocb, from);
1012 		if (written > 0 && CIFS_CACHE_READ(cinode)) {
1013 			cifs_zap_mapping(inode);
1014 			cifs_dbg(FYI,
1015 				 "Set no oplock for inode=%p after a write operation\n",
1016 				 inode);
1017 			cinode->oplock = 0;
1018 		}
1019 		return written;
1020 	}
1021 
1022 	written = cifs_get_writer(cinode);
1023 	if (written)
1024 		return written;
1025 
1026 	written = generic_file_write_iter(iocb, from);
1027 
1028 	if (CIFS_CACHE_WRITE(CIFS_I(inode)))
1029 		goto out;
1030 
1031 	rc = filemap_fdatawrite(inode->i_mapping);
1032 	if (rc)
1033 		cifs_dbg(FYI, "cifs_file_write_iter: %d rc on %p inode\n",
1034 			 rc, inode);
1035 
1036 out:
1037 	cifs_put_writer(cinode);
1038 	return written;
1039 }
1040 
1041 static loff_t cifs_llseek(struct file *file, loff_t offset, int whence)
1042 {
1043 	struct cifsFileInfo *cfile = file->private_data;
1044 	struct cifs_tcon *tcon;
1045 
1046 	/*
1047 	 * whence == SEEK_END || SEEK_DATA || SEEK_HOLE => we must revalidate
1048 	 * the cached file length
1049 	 */
1050 	if (whence != SEEK_SET && whence != SEEK_CUR) {
1051 		int rc;
1052 		struct inode *inode = file_inode(file);
1053 
1054 		/*
1055 		 * We need to be sure that all dirty pages are written and the
1056 		 * server has the newest file length.
1057 		 */
1058 		if (!CIFS_CACHE_READ(CIFS_I(inode)) && inode->i_mapping &&
1059 		    inode->i_mapping->nrpages != 0) {
1060 			rc = filemap_fdatawait(inode->i_mapping);
1061 			if (rc) {
1062 				mapping_set_error(inode->i_mapping, rc);
1063 				return rc;
1064 			}
1065 		}
1066 		/*
1067 		 * Some applications poll for the file length in this strange
1068 		 * way so we must seek to end on non-oplocked files by
1069 		 * setting the revalidate time to zero.
1070 		 */
1071 		CIFS_I(inode)->time = 0;
1072 
1073 		rc = cifs_revalidate_file_attr(file);
1074 		if (rc < 0)
1075 			return (loff_t)rc;
1076 	}
1077 	if (cfile && cfile->tlink) {
1078 		tcon = tlink_tcon(cfile->tlink);
1079 		if (tcon->ses->server->ops->llseek)
1080 			return tcon->ses->server->ops->llseek(file, tcon,
1081 							      offset, whence);
1082 	}
1083 	return generic_file_llseek(file, offset, whence);
1084 }
1085 
1086 static int
1087 cifs_setlease(struct file *file, int arg, struct file_lease **lease, void **priv)
1088 {
1089 	/*
1090 	 * Note that this is called by vfs setlease with i_lock held to
1091 	 * protect *lease from going away.
1092 	 */
1093 	struct inode *inode = file_inode(file);
1094 	struct cifsFileInfo *cfile = file->private_data;
1095 
1096 	/* Check if file is oplocked if this is request for new lease */
1097 	if (arg == F_UNLCK ||
1098 	    ((arg == F_RDLCK) && CIFS_CACHE_READ(CIFS_I(inode))) ||
1099 	    ((arg == F_WRLCK) && CIFS_CACHE_WRITE(CIFS_I(inode))))
1100 		return generic_setlease(file, arg, lease, priv);
1101 	else if (tlink_tcon(cfile->tlink)->local_lease &&
1102 		 !CIFS_CACHE_READ(CIFS_I(inode)))
1103 		/*
1104 		 * If the server claims to support oplock on this file, then we
1105 		 * still need to check oplock even if the local_lease mount
1106 		 * option is set, but there are servers which do not support
1107 		 * oplock for which this mount option may be useful if the user
1108 		 * knows that the file won't be changed on the server by anyone
1109 		 * else.
1110 		 */
1111 		return generic_setlease(file, arg, lease, priv);
1112 	else
1113 		return -EAGAIN;
1114 }
1115 
1116 struct file_system_type cifs_fs_type = {
1117 	.owner = THIS_MODULE,
1118 	.name = "cifs",
1119 	.init_fs_context = smb3_init_fs_context,
1120 	.parameters = smb3_fs_parameters,
1121 	.kill_sb = cifs_kill_sb,
1122 	.fs_flags = FS_RENAME_DOES_D_MOVE,
1123 };
1124 MODULE_ALIAS_FS("cifs");
1125 
1126 struct file_system_type smb3_fs_type = {
1127 	.owner = THIS_MODULE,
1128 	.name = "smb3",
1129 	.init_fs_context = smb3_init_fs_context,
1130 	.parameters = smb3_fs_parameters,
1131 	.kill_sb = cifs_kill_sb,
1132 	.fs_flags = FS_RENAME_DOES_D_MOVE,
1133 };
1134 MODULE_ALIAS_FS("smb3");
1135 MODULE_ALIAS("smb3");
1136 
1137 const struct inode_operations cifs_dir_inode_ops = {
1138 	.create = cifs_create,
1139 	.atomic_open = cifs_atomic_open,
1140 	.lookup = cifs_lookup,
1141 	.getattr = cifs_getattr,
1142 	.unlink = cifs_unlink,
1143 	.link = cifs_hardlink,
1144 	.mkdir = cifs_mkdir,
1145 	.rmdir = cifs_rmdir,
1146 	.rename = cifs_rename2,
1147 	.permission = cifs_permission,
1148 	.setattr = cifs_setattr,
1149 	.symlink = cifs_symlink,
1150 	.mknod   = cifs_mknod,
1151 	.listxattr = cifs_listxattr,
1152 	.get_acl = cifs_get_acl,
1153 	.set_acl = cifs_set_acl,
1154 };
1155 
1156 const struct inode_operations cifs_file_inode_ops = {
1157 	.setattr = cifs_setattr,
1158 	.getattr = cifs_getattr,
1159 	.permission = cifs_permission,
1160 	.listxattr = cifs_listxattr,
1161 	.fiemap = cifs_fiemap,
1162 	.get_acl = cifs_get_acl,
1163 	.set_acl = cifs_set_acl,
1164 };
1165 
1166 const char *cifs_get_link(struct dentry *dentry, struct inode *inode,
1167 			    struct delayed_call *done)
1168 {
1169 	char *target_path;
1170 
1171 	if (!dentry)
1172 		return ERR_PTR(-ECHILD);
1173 
1174 	target_path = kmalloc(PATH_MAX, GFP_KERNEL);
1175 	if (!target_path)
1176 		return ERR_PTR(-ENOMEM);
1177 
1178 	spin_lock(&inode->i_lock);
1179 	if (likely(CIFS_I(inode)->symlink_target)) {
1180 		strscpy(target_path, CIFS_I(inode)->symlink_target, PATH_MAX);
1181 	} else {
1182 		kfree(target_path);
1183 		target_path = ERR_PTR(-EOPNOTSUPP);
1184 	}
1185 	spin_unlock(&inode->i_lock);
1186 
1187 	if (!IS_ERR(target_path))
1188 		set_delayed_call(done, kfree_link, target_path);
1189 
1190 	return target_path;
1191 }
1192 
1193 const struct inode_operations cifs_symlink_inode_ops = {
1194 	.get_link = cifs_get_link,
1195 	.setattr = cifs_setattr,
1196 	.permission = cifs_permission,
1197 	.listxattr = cifs_listxattr,
1198 };
1199 
1200 /*
1201  * Advance the EOF marker to after the source range.
1202  */
1203 static int cifs_precopy_set_eof(struct inode *src_inode, struct cifsInodeInfo *src_cifsi,
1204 				struct cifs_tcon *src_tcon,
1205 				unsigned int xid, loff_t src_end)
1206 {
1207 	struct cifsFileInfo *writeable_srcfile;
1208 	int rc = -EINVAL;
1209 
1210 	writeable_srcfile = find_writable_file(src_cifsi, FIND_WR_FSUID_ONLY);
1211 	if (writeable_srcfile) {
1212 		if (src_tcon->ses->server->ops->set_file_size)
1213 			rc = src_tcon->ses->server->ops->set_file_size(
1214 				xid, src_tcon, writeable_srcfile,
1215 				src_inode->i_size, true /* no need to set sparse */);
1216 		else
1217 			rc = -ENOSYS;
1218 		cifsFileInfo_put(writeable_srcfile);
1219 		cifs_dbg(FYI, "SetFSize for copychunk rc = %d\n", rc);
1220 	}
1221 
1222 	if (rc < 0)
1223 		goto set_failed;
1224 
1225 	netfs_resize_file(&src_cifsi->netfs, src_end, true);
1226 	fscache_resize_cookie(cifs_inode_cookie(src_inode), src_end);
1227 	return 0;
1228 
1229 set_failed:
1230 	return filemap_write_and_wait(src_inode->i_mapping);
1231 }
1232 
1233 /*
1234  * Flush out either the folio that overlaps the beginning of a range in which
1235  * pos resides or the folio that overlaps the end of a range unless that folio
1236  * is entirely within the range we're going to invalidate.  We extend the flush
1237  * bounds to encompass the folio.
1238  */
1239 static int cifs_flush_folio(struct inode *inode, loff_t pos, loff_t *_fstart, loff_t *_fend,
1240 			    bool first)
1241 {
1242 	struct folio *folio;
1243 	unsigned long long fpos, fend;
1244 	pgoff_t index = pos / PAGE_SIZE;
1245 	size_t size;
1246 	int rc = 0;
1247 
1248 	folio = filemap_get_folio(inode->i_mapping, index);
1249 	if (IS_ERR(folio))
1250 		return 0;
1251 
1252 	size = folio_size(folio);
1253 	fpos = folio_pos(folio);
1254 	fend = fpos + size - 1;
1255 	*_fstart = min_t(unsigned long long, *_fstart, fpos);
1256 	*_fend   = max_t(unsigned long long, *_fend, fend);
1257 	if ((first && pos == fpos) || (!first && pos == fend))
1258 		goto out;
1259 
1260 	rc = filemap_write_and_wait_range(inode->i_mapping, fpos, fend);
1261 out:
1262 	folio_put(folio);
1263 	return rc;
1264 }
1265 
1266 static loff_t cifs_remap_file_range(struct file *src_file, loff_t off,
1267 		struct file *dst_file, loff_t destoff, loff_t len,
1268 		unsigned int remap_flags)
1269 {
1270 	struct inode *src_inode = file_inode(src_file);
1271 	struct inode *target_inode = file_inode(dst_file);
1272 	struct cifsInodeInfo *src_cifsi = CIFS_I(src_inode);
1273 	struct cifsInodeInfo *target_cifsi = CIFS_I(target_inode);
1274 	struct cifsFileInfo *smb_file_src = src_file->private_data;
1275 	struct cifsFileInfo *smb_file_target = dst_file->private_data;
1276 	struct cifs_tcon *target_tcon, *src_tcon;
1277 	unsigned long long destend, fstart, fend, new_size;
1278 	unsigned int xid;
1279 	int rc;
1280 
1281 	if (remap_flags & REMAP_FILE_DEDUP)
1282 		return -EOPNOTSUPP;
1283 	if (remap_flags & ~REMAP_FILE_ADVISORY)
1284 		return -EINVAL;
1285 
1286 	cifs_dbg(FYI, "clone range\n");
1287 
1288 	xid = get_xid();
1289 
1290 	if (!smb_file_src || !smb_file_target) {
1291 		rc = -EBADF;
1292 		cifs_dbg(VFS, "missing cifsFileInfo on copy range src file\n");
1293 		goto out;
1294 	}
1295 
1296 	src_tcon = tlink_tcon(smb_file_src->tlink);
1297 	target_tcon = tlink_tcon(smb_file_target->tlink);
1298 
1299 	/*
1300 	 * Note: cifs case is easier than btrfs since server responsible for
1301 	 * checks for proper open modes and file type and if it wants
1302 	 * server could even support copy of range where source = target
1303 	 */
1304 	lock_two_nondirectories(target_inode, src_inode);
1305 
1306 	if (len == 0)
1307 		len = src_inode->i_size - off;
1308 
1309 	cifs_dbg(FYI, "clone range\n");
1310 
1311 	/* Flush the source buffer */
1312 	rc = filemap_write_and_wait_range(src_inode->i_mapping, off,
1313 					  off + len - 1);
1314 	if (rc)
1315 		goto unlock;
1316 
1317 	/* The server-side copy will fail if the source crosses the EOF marker.
1318 	 * Advance the EOF marker after the flush above to the end of the range
1319 	 * if it's short of that.
1320 	 */
1321 	if (src_cifsi->netfs.remote_i_size < off + len) {
1322 		rc = cifs_precopy_set_eof(src_inode, src_cifsi, src_tcon, xid, off + len);
1323 		if (rc < 0)
1324 			goto unlock;
1325 	}
1326 
1327 	new_size = destoff + len;
1328 	destend = destoff + len - 1;
1329 
1330 	/* Flush the folios at either end of the destination range to prevent
1331 	 * accidental loss of dirty data outside of the range.
1332 	 */
1333 	fstart = destoff;
1334 	fend = destend;
1335 
1336 	rc = cifs_flush_folio(target_inode, destoff, &fstart, &fend, true);
1337 	if (rc)
1338 		goto unlock;
1339 	rc = cifs_flush_folio(target_inode, destend, &fstart, &fend, false);
1340 	if (rc)
1341 		goto unlock;
1342 
1343 	/* Discard all the folios that overlap the destination region. */
1344 	cifs_dbg(FYI, "about to discard pages %llx-%llx\n", fstart, fend);
1345 	truncate_inode_pages_range(&target_inode->i_data, fstart, fend);
1346 
1347 	fscache_invalidate(cifs_inode_cookie(target_inode), NULL,
1348 			   i_size_read(target_inode), 0);
1349 
1350 	rc = -EOPNOTSUPP;
1351 	if (target_tcon->ses->server->ops->duplicate_extents) {
1352 		rc = target_tcon->ses->server->ops->duplicate_extents(xid,
1353 			smb_file_src, smb_file_target, off, len, destoff);
1354 		if (rc == 0 && new_size > i_size_read(target_inode)) {
1355 			truncate_setsize(target_inode, new_size);
1356 			netfs_resize_file(&target_cifsi->netfs, new_size, true);
1357 			fscache_resize_cookie(cifs_inode_cookie(target_inode),
1358 					      new_size);
1359 		}
1360 	}
1361 
1362 	/* force revalidate of size and timestamps of target file now
1363 	   that target is updated on the server */
1364 	CIFS_I(target_inode)->time = 0;
1365 unlock:
1366 	/* although unlocking in the reverse order from locking is not
1367 	   strictly necessary here it is a little cleaner to be consistent */
1368 	unlock_two_nondirectories(src_inode, target_inode);
1369 out:
1370 	free_xid(xid);
1371 	return rc < 0 ? rc : len;
1372 }
1373 
1374 ssize_t cifs_file_copychunk_range(unsigned int xid,
1375 				struct file *src_file, loff_t off,
1376 				struct file *dst_file, loff_t destoff,
1377 				size_t len, unsigned int flags)
1378 {
1379 	struct inode *src_inode = file_inode(src_file);
1380 	struct inode *target_inode = file_inode(dst_file);
1381 	struct cifsInodeInfo *src_cifsi = CIFS_I(src_inode);
1382 	struct cifsInodeInfo *target_cifsi = CIFS_I(target_inode);
1383 	struct cifsFileInfo *smb_file_src;
1384 	struct cifsFileInfo *smb_file_target;
1385 	struct cifs_tcon *src_tcon;
1386 	struct cifs_tcon *target_tcon;
1387 	unsigned long long destend, fstart, fend;
1388 	ssize_t rc;
1389 
1390 	cifs_dbg(FYI, "copychunk range\n");
1391 
1392 	if (!src_file->private_data || !dst_file->private_data) {
1393 		rc = -EBADF;
1394 		cifs_dbg(VFS, "missing cifsFileInfo on copy range src file\n");
1395 		goto out;
1396 	}
1397 
1398 	rc = -EXDEV;
1399 	smb_file_target = dst_file->private_data;
1400 	smb_file_src = src_file->private_data;
1401 	src_tcon = tlink_tcon(smb_file_src->tlink);
1402 	target_tcon = tlink_tcon(smb_file_target->tlink);
1403 
1404 	if (src_tcon->ses != target_tcon->ses) {
1405 		cifs_dbg(VFS, "source and target of copy not on same server\n");
1406 		goto out;
1407 	}
1408 
1409 	rc = -EOPNOTSUPP;
1410 	if (!target_tcon->ses->server->ops->copychunk_range)
1411 		goto out;
1412 
1413 	/*
1414 	 * Note: cifs case is easier than btrfs since server responsible for
1415 	 * checks for proper open modes and file type and if it wants
1416 	 * server could even support copy of range where source = target
1417 	 */
1418 	lock_two_nondirectories(target_inode, src_inode);
1419 
1420 	cifs_dbg(FYI, "about to flush pages\n");
1421 
1422 	rc = filemap_write_and_wait_range(src_inode->i_mapping, off,
1423 					  off + len - 1);
1424 	if (rc)
1425 		goto unlock;
1426 
1427 	/* The server-side copy will fail if the source crosses the EOF marker.
1428 	 * Advance the EOF marker after the flush above to the end of the range
1429 	 * if it's short of that.
1430 	 */
1431 	if (src_cifsi->netfs.remote_i_size < off + len) {
1432 		rc = cifs_precopy_set_eof(src_inode, src_cifsi, src_tcon, xid, off + len);
1433 		if (rc < 0)
1434 			goto unlock;
1435 	}
1436 
1437 	destend = destoff + len - 1;
1438 
1439 	/* Flush the folios at either end of the destination range to prevent
1440 	 * accidental loss of dirty data outside of the range.
1441 	 */
1442 	fstart = destoff;
1443 	fend = destend;
1444 
1445 	rc = cifs_flush_folio(target_inode, destoff, &fstart, &fend, true);
1446 	if (rc)
1447 		goto unlock;
1448 	rc = cifs_flush_folio(target_inode, destend, &fstart, &fend, false);
1449 	if (rc)
1450 		goto unlock;
1451 
1452 	/* Discard all the folios that overlap the destination region. */
1453 	truncate_inode_pages_range(&target_inode->i_data, fstart, fend);
1454 
1455 	fscache_invalidate(cifs_inode_cookie(target_inode), NULL,
1456 			   i_size_read(target_inode), 0);
1457 
1458 	rc = file_modified(dst_file);
1459 	if (!rc) {
1460 		rc = target_tcon->ses->server->ops->copychunk_range(xid,
1461 			smb_file_src, smb_file_target, off, len, destoff);
1462 		if (rc > 0 && destoff + rc > i_size_read(target_inode)) {
1463 			truncate_setsize(target_inode, destoff + rc);
1464 			netfs_resize_file(&target_cifsi->netfs,
1465 					  i_size_read(target_inode), true);
1466 			fscache_resize_cookie(cifs_inode_cookie(target_inode),
1467 					      i_size_read(target_inode));
1468 		}
1469 		if (rc > 0 && destoff + rc > target_cifsi->netfs.zero_point)
1470 			target_cifsi->netfs.zero_point = destoff + rc;
1471 	}
1472 
1473 	file_accessed(src_file);
1474 
1475 	/* force revalidate of size and timestamps of target file now
1476 	 * that target is updated on the server
1477 	 */
1478 	CIFS_I(target_inode)->time = 0;
1479 
1480 unlock:
1481 	/* although unlocking in the reverse order from locking is not
1482 	 * strictly necessary here it is a little cleaner to be consistent
1483 	 */
1484 	unlock_two_nondirectories(src_inode, target_inode);
1485 
1486 out:
1487 	return rc;
1488 }
1489 
1490 /*
1491  * Directory operations under CIFS/SMB2/SMB3 are synchronous, so fsync()
1492  * is a dummy operation.
1493  */
1494 static int cifs_dir_fsync(struct file *file, loff_t start, loff_t end, int datasync)
1495 {
1496 	cifs_dbg(FYI, "Sync directory - name: %pD datasync: 0x%x\n",
1497 		 file, datasync);
1498 
1499 	return 0;
1500 }
1501 
1502 static ssize_t cifs_copy_file_range(struct file *src_file, loff_t off,
1503 				struct file *dst_file, loff_t destoff,
1504 				size_t len, unsigned int flags)
1505 {
1506 	unsigned int xid = get_xid();
1507 	ssize_t rc;
1508 	struct cifsFileInfo *cfile = dst_file->private_data;
1509 
1510 	if (cfile->swapfile) {
1511 		rc = -EOPNOTSUPP;
1512 		free_xid(xid);
1513 		return rc;
1514 	}
1515 
1516 	rc = cifs_file_copychunk_range(xid, src_file, off, dst_file, destoff,
1517 					len, flags);
1518 	free_xid(xid);
1519 
1520 	if (rc == -EOPNOTSUPP || rc == -EXDEV)
1521 		rc = splice_copy_file_range(src_file, off, dst_file,
1522 					    destoff, len);
1523 	return rc;
1524 }
1525 
1526 const struct file_operations cifs_file_ops = {
1527 	.read_iter = cifs_loose_read_iter,
1528 	.write_iter = cifs_file_write_iter,
1529 	.open = cifs_open,
1530 	.release = cifs_close,
1531 	.lock = cifs_lock,
1532 	.flock = cifs_flock,
1533 	.fsync = cifs_fsync,
1534 	.flush = cifs_flush,
1535 	.mmap  = cifs_file_mmap,
1536 	.splice_read = filemap_splice_read,
1537 	.splice_write = iter_file_splice_write,
1538 	.llseek = cifs_llseek,
1539 	.unlocked_ioctl	= cifs_ioctl,
1540 	.copy_file_range = cifs_copy_file_range,
1541 	.remap_file_range = cifs_remap_file_range,
1542 	.setlease = cifs_setlease,
1543 	.fallocate = cifs_fallocate,
1544 };
1545 
1546 const struct file_operations cifs_file_strict_ops = {
1547 	.read_iter = cifs_strict_readv,
1548 	.write_iter = cifs_strict_writev,
1549 	.open = cifs_open,
1550 	.release = cifs_close,
1551 	.lock = cifs_lock,
1552 	.flock = cifs_flock,
1553 	.fsync = cifs_strict_fsync,
1554 	.flush = cifs_flush,
1555 	.mmap = cifs_file_strict_mmap,
1556 	.splice_read = filemap_splice_read,
1557 	.splice_write = iter_file_splice_write,
1558 	.llseek = cifs_llseek,
1559 	.unlocked_ioctl	= cifs_ioctl,
1560 	.copy_file_range = cifs_copy_file_range,
1561 	.remap_file_range = cifs_remap_file_range,
1562 	.setlease = cifs_setlease,
1563 	.fallocate = cifs_fallocate,
1564 };
1565 
1566 const struct file_operations cifs_file_direct_ops = {
1567 	.read_iter = cifs_direct_readv,
1568 	.write_iter = cifs_direct_writev,
1569 	.open = cifs_open,
1570 	.release = cifs_close,
1571 	.lock = cifs_lock,
1572 	.flock = cifs_flock,
1573 	.fsync = cifs_fsync,
1574 	.flush = cifs_flush,
1575 	.mmap = cifs_file_mmap,
1576 	.splice_read = copy_splice_read,
1577 	.splice_write = iter_file_splice_write,
1578 	.unlocked_ioctl  = cifs_ioctl,
1579 	.copy_file_range = cifs_copy_file_range,
1580 	.remap_file_range = cifs_remap_file_range,
1581 	.llseek = cifs_llseek,
1582 	.setlease = cifs_setlease,
1583 	.fallocate = cifs_fallocate,
1584 };
1585 
1586 const struct file_operations cifs_file_nobrl_ops = {
1587 	.read_iter = cifs_loose_read_iter,
1588 	.write_iter = cifs_file_write_iter,
1589 	.open = cifs_open,
1590 	.release = cifs_close,
1591 	.fsync = cifs_fsync,
1592 	.flush = cifs_flush,
1593 	.mmap  = cifs_file_mmap,
1594 	.splice_read = filemap_splice_read,
1595 	.splice_write = iter_file_splice_write,
1596 	.llseek = cifs_llseek,
1597 	.unlocked_ioctl	= cifs_ioctl,
1598 	.copy_file_range = cifs_copy_file_range,
1599 	.remap_file_range = cifs_remap_file_range,
1600 	.setlease = cifs_setlease,
1601 	.fallocate = cifs_fallocate,
1602 };
1603 
1604 const struct file_operations cifs_file_strict_nobrl_ops = {
1605 	.read_iter = cifs_strict_readv,
1606 	.write_iter = cifs_strict_writev,
1607 	.open = cifs_open,
1608 	.release = cifs_close,
1609 	.fsync = cifs_strict_fsync,
1610 	.flush = cifs_flush,
1611 	.mmap = cifs_file_strict_mmap,
1612 	.splice_read = filemap_splice_read,
1613 	.splice_write = iter_file_splice_write,
1614 	.llseek = cifs_llseek,
1615 	.unlocked_ioctl	= cifs_ioctl,
1616 	.copy_file_range = cifs_copy_file_range,
1617 	.remap_file_range = cifs_remap_file_range,
1618 	.setlease = cifs_setlease,
1619 	.fallocate = cifs_fallocate,
1620 };
1621 
1622 const struct file_operations cifs_file_direct_nobrl_ops = {
1623 	.read_iter = cifs_direct_readv,
1624 	.write_iter = cifs_direct_writev,
1625 	.open = cifs_open,
1626 	.release = cifs_close,
1627 	.fsync = cifs_fsync,
1628 	.flush = cifs_flush,
1629 	.mmap = cifs_file_mmap,
1630 	.splice_read = copy_splice_read,
1631 	.splice_write = iter_file_splice_write,
1632 	.unlocked_ioctl  = cifs_ioctl,
1633 	.copy_file_range = cifs_copy_file_range,
1634 	.remap_file_range = cifs_remap_file_range,
1635 	.llseek = cifs_llseek,
1636 	.setlease = cifs_setlease,
1637 	.fallocate = cifs_fallocate,
1638 };
1639 
1640 const struct file_operations cifs_dir_ops = {
1641 	.iterate_shared = cifs_readdir,
1642 	.release = cifs_closedir,
1643 	.read    = generic_read_dir,
1644 	.unlocked_ioctl  = cifs_ioctl,
1645 	.copy_file_range = cifs_copy_file_range,
1646 	.remap_file_range = cifs_remap_file_range,
1647 	.llseek = generic_file_llseek,
1648 	.fsync = cifs_dir_fsync,
1649 };
1650 
1651 static void
1652 cifs_init_once(void *inode)
1653 {
1654 	struct cifsInodeInfo *cifsi = inode;
1655 
1656 	inode_init_once(&cifsi->netfs.inode);
1657 	init_rwsem(&cifsi->lock_sem);
1658 }
1659 
1660 static int __init
1661 cifs_init_inodecache(void)
1662 {
1663 	cifs_inode_cachep = kmem_cache_create("cifs_inode_cache",
1664 					      sizeof(struct cifsInodeInfo),
1665 					      0, (SLAB_RECLAIM_ACCOUNT|
1666 						SLAB_ACCOUNT),
1667 					      cifs_init_once);
1668 	if (cifs_inode_cachep == NULL)
1669 		return -ENOMEM;
1670 
1671 	return 0;
1672 }
1673 
1674 static void
1675 cifs_destroy_inodecache(void)
1676 {
1677 	/*
1678 	 * Make sure all delayed rcu free inodes are flushed before we
1679 	 * destroy cache.
1680 	 */
1681 	rcu_barrier();
1682 	kmem_cache_destroy(cifs_inode_cachep);
1683 }
1684 
1685 static int
1686 cifs_init_request_bufs(void)
1687 {
1688 	/*
1689 	 * SMB2 maximum header size is bigger than CIFS one - no problems to
1690 	 * allocate some more bytes for CIFS.
1691 	 */
1692 	size_t max_hdr_size = MAX_SMB2_HDR_SIZE;
1693 
1694 	if (CIFSMaxBufSize < 8192) {
1695 	/* Buffer size can not be smaller than 2 * PATH_MAX since maximum
1696 	Unicode path name has to fit in any SMB/CIFS path based frames */
1697 		CIFSMaxBufSize = 8192;
1698 	} else if (CIFSMaxBufSize > 1024*127) {
1699 		CIFSMaxBufSize = 1024 * 127;
1700 	} else {
1701 		CIFSMaxBufSize &= 0x1FE00; /* Round size to even 512 byte mult*/
1702 	}
1703 /*
1704 	cifs_dbg(VFS, "CIFSMaxBufSize %d 0x%x\n",
1705 		 CIFSMaxBufSize, CIFSMaxBufSize);
1706 */
1707 	cifs_req_cachep = kmem_cache_create_usercopy("cifs_request",
1708 					    CIFSMaxBufSize + max_hdr_size, 0,
1709 					    SLAB_HWCACHE_ALIGN, 0,
1710 					    CIFSMaxBufSize + max_hdr_size,
1711 					    NULL);
1712 	if (cifs_req_cachep == NULL)
1713 		return -ENOMEM;
1714 
1715 	if (cifs_min_rcv < 1)
1716 		cifs_min_rcv = 1;
1717 	else if (cifs_min_rcv > 64) {
1718 		cifs_min_rcv = 64;
1719 		cifs_dbg(VFS, "cifs_min_rcv set to maximum (64)\n");
1720 	}
1721 
1722 	cifs_req_poolp = mempool_create_slab_pool(cifs_min_rcv,
1723 						  cifs_req_cachep);
1724 
1725 	if (cifs_req_poolp == NULL) {
1726 		kmem_cache_destroy(cifs_req_cachep);
1727 		return -ENOMEM;
1728 	}
1729 	/* MAX_CIFS_SMALL_BUFFER_SIZE bytes is enough for most SMB responses and
1730 	almost all handle based requests (but not write response, nor is it
1731 	sufficient for path based requests).  A smaller size would have
1732 	been more efficient (compacting multiple slab items on one 4k page)
1733 	for the case in which debug was on, but this larger size allows
1734 	more SMBs to use small buffer alloc and is still much more
1735 	efficient to alloc 1 per page off the slab compared to 17K (5page)
1736 	alloc of large cifs buffers even when page debugging is on */
1737 	cifs_sm_req_cachep = kmem_cache_create_usercopy("cifs_small_rq",
1738 			MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN,
1739 			0, MAX_CIFS_SMALL_BUFFER_SIZE, NULL);
1740 	if (cifs_sm_req_cachep == NULL) {
1741 		mempool_destroy(cifs_req_poolp);
1742 		kmem_cache_destroy(cifs_req_cachep);
1743 		return -ENOMEM;
1744 	}
1745 
1746 	if (cifs_min_small < 2)
1747 		cifs_min_small = 2;
1748 	else if (cifs_min_small > 256) {
1749 		cifs_min_small = 256;
1750 		cifs_dbg(FYI, "cifs_min_small set to maximum (256)\n");
1751 	}
1752 
1753 	cifs_sm_req_poolp = mempool_create_slab_pool(cifs_min_small,
1754 						     cifs_sm_req_cachep);
1755 
1756 	if (cifs_sm_req_poolp == NULL) {
1757 		mempool_destroy(cifs_req_poolp);
1758 		kmem_cache_destroy(cifs_req_cachep);
1759 		kmem_cache_destroy(cifs_sm_req_cachep);
1760 		return -ENOMEM;
1761 	}
1762 
1763 	return 0;
1764 }
1765 
1766 static void
1767 cifs_destroy_request_bufs(void)
1768 {
1769 	mempool_destroy(cifs_req_poolp);
1770 	kmem_cache_destroy(cifs_req_cachep);
1771 	mempool_destroy(cifs_sm_req_poolp);
1772 	kmem_cache_destroy(cifs_sm_req_cachep);
1773 }
1774 
1775 static int init_mids(void)
1776 {
1777 	cifs_mid_cachep = kmem_cache_create("cifs_mpx_ids",
1778 					    sizeof(struct mid_q_entry), 0,
1779 					    SLAB_HWCACHE_ALIGN, NULL);
1780 	if (cifs_mid_cachep == NULL)
1781 		return -ENOMEM;
1782 
1783 	/* 3 is a reasonable minimum number of simultaneous operations */
1784 	cifs_mid_poolp = mempool_create_slab_pool(3, cifs_mid_cachep);
1785 	if (cifs_mid_poolp == NULL) {
1786 		kmem_cache_destroy(cifs_mid_cachep);
1787 		return -ENOMEM;
1788 	}
1789 
1790 	return 0;
1791 }
1792 
1793 static void destroy_mids(void)
1794 {
1795 	mempool_destroy(cifs_mid_poolp);
1796 	kmem_cache_destroy(cifs_mid_cachep);
1797 }
1798 
1799 static int __init
1800 init_cifs(void)
1801 {
1802 	int rc = 0;
1803 	cifs_proc_init();
1804 	INIT_LIST_HEAD(&cifs_tcp_ses_list);
1805 /*
1806  *  Initialize Global counters
1807  */
1808 	atomic_set(&sesInfoAllocCount, 0);
1809 	atomic_set(&tconInfoAllocCount, 0);
1810 	atomic_set(&tcpSesNextId, 0);
1811 	atomic_set(&tcpSesAllocCount, 0);
1812 	atomic_set(&tcpSesReconnectCount, 0);
1813 	atomic_set(&tconInfoReconnectCount, 0);
1814 
1815 	atomic_set(&buf_alloc_count, 0);
1816 	atomic_set(&small_buf_alloc_count, 0);
1817 #ifdef CONFIG_CIFS_STATS2
1818 	atomic_set(&total_buf_alloc_count, 0);
1819 	atomic_set(&total_small_buf_alloc_count, 0);
1820 	if (slow_rsp_threshold < 1)
1821 		cifs_dbg(FYI, "slow_response_threshold msgs disabled\n");
1822 	else if (slow_rsp_threshold > 32767)
1823 		cifs_dbg(VFS,
1824 		       "slow response threshold set higher than recommended (0 to 32767)\n");
1825 #endif /* CONFIG_CIFS_STATS2 */
1826 
1827 	atomic_set(&mid_count, 0);
1828 	GlobalCurrentXid = 0;
1829 	GlobalTotalActiveXid = 0;
1830 	GlobalMaxActiveXid = 0;
1831 	spin_lock_init(&cifs_tcp_ses_lock);
1832 	spin_lock_init(&GlobalMid_Lock);
1833 
1834 	cifs_lock_secret = get_random_u32();
1835 
1836 	if (cifs_max_pending < 2) {
1837 		cifs_max_pending = 2;
1838 		cifs_dbg(FYI, "cifs_max_pending set to min of 2\n");
1839 	} else if (cifs_max_pending > CIFS_MAX_REQ) {
1840 		cifs_max_pending = CIFS_MAX_REQ;
1841 		cifs_dbg(FYI, "cifs_max_pending set to max of %u\n",
1842 			 CIFS_MAX_REQ);
1843 	}
1844 
1845 	/* Limit max to about 18 hours, and setting to zero disables directory entry caching */
1846 	if (dir_cache_timeout > 65000) {
1847 		dir_cache_timeout = 65000;
1848 		cifs_dbg(VFS, "dir_cache_timeout set to max of 65000 seconds\n");
1849 	}
1850 
1851 	cifsiod_wq = alloc_workqueue("cifsiod", WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
1852 	if (!cifsiod_wq) {
1853 		rc = -ENOMEM;
1854 		goto out_clean_proc;
1855 	}
1856 
1857 	/*
1858 	 * Consider in future setting limit!=0 maybe to min(num_of_cores - 1, 3)
1859 	 * so that we don't launch too many worker threads but
1860 	 * Documentation/core-api/workqueue.rst recommends setting it to 0
1861 	 */
1862 
1863 	/* WQ_UNBOUND allows decrypt tasks to run on any CPU */
1864 	decrypt_wq = alloc_workqueue("smb3decryptd",
1865 				     WQ_UNBOUND|WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
1866 	if (!decrypt_wq) {
1867 		rc = -ENOMEM;
1868 		goto out_destroy_cifsiod_wq;
1869 	}
1870 
1871 	fileinfo_put_wq = alloc_workqueue("cifsfileinfoput",
1872 				     WQ_UNBOUND|WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
1873 	if (!fileinfo_put_wq) {
1874 		rc = -ENOMEM;
1875 		goto out_destroy_decrypt_wq;
1876 	}
1877 
1878 	cifsoplockd_wq = alloc_workqueue("cifsoplockd",
1879 					 WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
1880 	if (!cifsoplockd_wq) {
1881 		rc = -ENOMEM;
1882 		goto out_destroy_fileinfo_put_wq;
1883 	}
1884 
1885 	deferredclose_wq = alloc_workqueue("deferredclose",
1886 					   WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
1887 	if (!deferredclose_wq) {
1888 		rc = -ENOMEM;
1889 		goto out_destroy_cifsoplockd_wq;
1890 	}
1891 
1892 	serverclose_wq = alloc_workqueue("serverclose",
1893 					   WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
1894 	if (!serverclose_wq) {
1895 		rc = -ENOMEM;
1896 		goto out_destroy_serverclose_wq;
1897 	}
1898 
1899 	rc = cifs_init_inodecache();
1900 	if (rc)
1901 		goto out_destroy_deferredclose_wq;
1902 
1903 	rc = init_mids();
1904 	if (rc)
1905 		goto out_destroy_inodecache;
1906 
1907 	rc = cifs_init_request_bufs();
1908 	if (rc)
1909 		goto out_destroy_mids;
1910 
1911 #ifdef CONFIG_CIFS_DFS_UPCALL
1912 	rc = dfs_cache_init();
1913 	if (rc)
1914 		goto out_destroy_request_bufs;
1915 #endif /* CONFIG_CIFS_DFS_UPCALL */
1916 #ifdef CONFIG_CIFS_UPCALL
1917 	rc = init_cifs_spnego();
1918 	if (rc)
1919 		goto out_destroy_dfs_cache;
1920 #endif /* CONFIG_CIFS_UPCALL */
1921 #ifdef CONFIG_CIFS_SWN_UPCALL
1922 	rc = cifs_genl_init();
1923 	if (rc)
1924 		goto out_register_key_type;
1925 #endif /* CONFIG_CIFS_SWN_UPCALL */
1926 
1927 	rc = init_cifs_idmap();
1928 	if (rc)
1929 		goto out_cifs_swn_init;
1930 
1931 	rc = register_filesystem(&cifs_fs_type);
1932 	if (rc)
1933 		goto out_init_cifs_idmap;
1934 
1935 	rc = register_filesystem(&smb3_fs_type);
1936 	if (rc) {
1937 		unregister_filesystem(&cifs_fs_type);
1938 		goto out_init_cifs_idmap;
1939 	}
1940 
1941 	return 0;
1942 
1943 out_init_cifs_idmap:
1944 	exit_cifs_idmap();
1945 out_cifs_swn_init:
1946 #ifdef CONFIG_CIFS_SWN_UPCALL
1947 	cifs_genl_exit();
1948 out_register_key_type:
1949 #endif
1950 #ifdef CONFIG_CIFS_UPCALL
1951 	exit_cifs_spnego();
1952 out_destroy_dfs_cache:
1953 #endif
1954 #ifdef CONFIG_CIFS_DFS_UPCALL
1955 	dfs_cache_destroy();
1956 out_destroy_request_bufs:
1957 #endif
1958 	cifs_destroy_request_bufs();
1959 out_destroy_mids:
1960 	destroy_mids();
1961 out_destroy_inodecache:
1962 	cifs_destroy_inodecache();
1963 out_destroy_deferredclose_wq:
1964 	destroy_workqueue(deferredclose_wq);
1965 out_destroy_cifsoplockd_wq:
1966 	destroy_workqueue(cifsoplockd_wq);
1967 out_destroy_fileinfo_put_wq:
1968 	destroy_workqueue(fileinfo_put_wq);
1969 out_destroy_decrypt_wq:
1970 	destroy_workqueue(decrypt_wq);
1971 out_destroy_cifsiod_wq:
1972 	destroy_workqueue(cifsiod_wq);
1973 out_destroy_serverclose_wq:
1974 	destroy_workqueue(serverclose_wq);
1975 out_clean_proc:
1976 	cifs_proc_clean();
1977 	return rc;
1978 }
1979 
1980 static void __exit
1981 exit_cifs(void)
1982 {
1983 	cifs_dbg(NOISY, "exit_smb3\n");
1984 	unregister_filesystem(&cifs_fs_type);
1985 	unregister_filesystem(&smb3_fs_type);
1986 	cifs_release_automount_timer();
1987 	exit_cifs_idmap();
1988 #ifdef CONFIG_CIFS_SWN_UPCALL
1989 	cifs_genl_exit();
1990 #endif
1991 #ifdef CONFIG_CIFS_UPCALL
1992 	exit_cifs_spnego();
1993 #endif
1994 #ifdef CONFIG_CIFS_DFS_UPCALL
1995 	dfs_cache_destroy();
1996 #endif
1997 	cifs_destroy_request_bufs();
1998 	destroy_mids();
1999 	cifs_destroy_inodecache();
2000 	destroy_workqueue(deferredclose_wq);
2001 	destroy_workqueue(cifsoplockd_wq);
2002 	destroy_workqueue(decrypt_wq);
2003 	destroy_workqueue(fileinfo_put_wq);
2004 	destroy_workqueue(serverclose_wq);
2005 	destroy_workqueue(cifsiod_wq);
2006 	cifs_proc_clean();
2007 }
2008 
2009 MODULE_AUTHOR("Steve French");
2010 MODULE_LICENSE("GPL");	/* combination of LGPL + GPL source behaves as GPL */
2011 MODULE_DESCRIPTION
2012 	("VFS to access SMB3 servers e.g. Samba, Macs, Azure and Windows (and "
2013 	"also older servers complying with the SNIA CIFS Specification)");
2014 MODULE_VERSION(CIFS_VERSION);
2015 MODULE_SOFTDEP("ecb");
2016 MODULE_SOFTDEP("hmac");
2017 MODULE_SOFTDEP("md5");
2018 MODULE_SOFTDEP("nls");
2019 MODULE_SOFTDEP("aes");
2020 MODULE_SOFTDEP("cmac");
2021 MODULE_SOFTDEP("sha256");
2022 MODULE_SOFTDEP("sha512");
2023 MODULE_SOFTDEP("aead2");
2024 MODULE_SOFTDEP("ccm");
2025 MODULE_SOFTDEP("gcm");
2026 module_init(init_cifs)
2027 module_exit(exit_cifs)
2028