xref: /linux/fs/smb/client/cifsfs.c (revision c71f8fb4dc911022748a378b16aad1cc9b43aad8)
1 // SPDX-License-Identifier: LGPL-2.1
2 /*
3  *
4  *   Copyright (C) International Business Machines  Corp., 2002,2008
5  *   Author(s): Steve French (sfrench@us.ibm.com)
6  *
7  *   Common Internet FileSystem (CIFS) client
8  *
9  */
10 
11 /* Note that BB means BUGBUG (ie something to fix eventually) */
12 
13 #include <linux/module.h>
14 #include <linux/fs.h>
15 #include <linux/filelock.h>
16 #include <linux/mount.h>
17 #include <linux/slab.h>
18 #include <linux/init.h>
19 #include <linux/list.h>
20 #include <linux/seq_file.h>
21 #include <linux/vfs.h>
22 #include <linux/mempool.h>
23 #include <linux/delay.h>
24 #include <linux/kthread.h>
25 #include <linux/freezer.h>
26 #include <linux/namei.h>
27 #include <linux/random.h>
28 #include <linux/splice.h>
29 #include <linux/uuid.h>
30 #include <linux/xattr.h>
31 #include <uapi/linux/magic.h>
32 #include <net/ipv6.h>
33 #include "cifsfs.h"
34 #include "cifspdu.h"
35 #define DECLARE_GLOBALS_HERE
36 #include "cifsglob.h"
37 #include "cifsproto.h"
38 #include "cifs_debug.h"
39 #include "cifs_fs_sb.h"
40 #include <linux/mm.h>
41 #include <linux/key-type.h>
42 #include "cifs_spnego.h"
43 #include "fscache.h"
44 #ifdef CONFIG_CIFS_DFS_UPCALL
45 #include "dfs_cache.h"
46 #endif
47 #ifdef CONFIG_CIFS_SWN_UPCALL
48 #include "netlink.h"
49 #endif
50 #include "fs_context.h"
51 #include "cached_dir.h"
52 
53 /*
54  * DOS dates from 1980/1/1 through 2107/12/31
55  * Protocol specifications indicate the range should be to 119, which
56  * limits maximum year to 2099. But this range has not been checked.
57  */
58 #define SMB_DATE_MAX (127<<9 | 12<<5 | 31)
59 #define SMB_DATE_MIN (0<<9 | 1<<5 | 1)
60 #define SMB_TIME_MAX (23<<11 | 59<<5 | 29)
61 
62 int cifsFYI = 0;
63 bool traceSMB;
64 bool enable_oplocks = true;
65 bool linuxExtEnabled = true;
66 bool lookupCacheEnabled = true;
67 bool disable_legacy_dialects; /* false by default */
68 bool enable_gcm_256 = true;
69 bool require_gcm_256; /* false by default */
70 bool enable_negotiate_signing; /* false by default */
71 unsigned int global_secflags = CIFSSEC_DEF;
72 /* unsigned int ntlmv2_support = 0; */
73 unsigned int sign_CIFS_PDUs = 1;
74 
75 /*
76  * Global transaction id (XID) information
77  */
78 unsigned int GlobalCurrentXid;	/* protected by GlobalMid_Lock */
79 unsigned int GlobalTotalActiveXid; /* prot by GlobalMid_Lock */
80 unsigned int GlobalMaxActiveXid;	/* prot by GlobalMid_Lock */
81 spinlock_t GlobalMid_Lock; /* protects above & list operations on midQ entries */
82 
83 /*
84  *  Global counters, updated atomically
85  */
86 atomic_t sesInfoAllocCount;
87 atomic_t tconInfoAllocCount;
88 atomic_t tcpSesNextId;
89 atomic_t tcpSesAllocCount;
90 atomic_t tcpSesReconnectCount;
91 atomic_t tconInfoReconnectCount;
92 
93 atomic_t mid_count;
94 atomic_t buf_alloc_count;
95 atomic_t small_buf_alloc_count;
96 #ifdef CONFIG_CIFS_STATS2
97 atomic_t total_buf_alloc_count;
98 atomic_t total_small_buf_alloc_count;
99 #endif/* STATS2 */
100 struct list_head	cifs_tcp_ses_list;
101 spinlock_t		cifs_tcp_ses_lock;
102 static const struct super_operations cifs_super_ops;
103 unsigned int CIFSMaxBufSize = CIFS_MAX_MSGSIZE;
104 module_param(CIFSMaxBufSize, uint, 0444);
105 MODULE_PARM_DESC(CIFSMaxBufSize, "Network buffer size (not including header) "
106 				 "for CIFS requests. "
107 				 "Default: 16384 Range: 8192 to 130048");
108 unsigned int cifs_min_rcv = CIFS_MIN_RCV_POOL;
109 module_param(cifs_min_rcv, uint, 0444);
110 MODULE_PARM_DESC(cifs_min_rcv, "Network buffers in pool. Default: 4 Range: "
111 				"1 to 64");
112 unsigned int cifs_min_small = 30;
113 module_param(cifs_min_small, uint, 0444);
114 MODULE_PARM_DESC(cifs_min_small, "Small network buffers in pool. Default: 30 "
115 				 "Range: 2 to 256");
116 unsigned int cifs_max_pending = CIFS_MAX_REQ;
117 module_param(cifs_max_pending, uint, 0444);
118 MODULE_PARM_DESC(cifs_max_pending, "Simultaneous requests to server for "
119 				   "CIFS/SMB1 dialect (N/A for SMB3) "
120 				   "Default: 32767 Range: 2 to 32767.");
121 unsigned int dir_cache_timeout = 30;
122 module_param(dir_cache_timeout, uint, 0644);
123 MODULE_PARM_DESC(dir_cache_timeout, "Number of seconds to cache directory contents for which we have a lease. Default: 30 "
124 				 "Range: 1 to 65000 seconds, 0 to disable caching dir contents");
125 #ifdef CONFIG_CIFS_STATS2
126 unsigned int slow_rsp_threshold = 1;
127 module_param(slow_rsp_threshold, uint, 0644);
128 MODULE_PARM_DESC(slow_rsp_threshold, "Amount of time (in seconds) to wait "
129 				   "before logging that a response is delayed. "
130 				   "Default: 1 (if set to 0 disables msg).");
131 #endif /* STATS2 */
132 
133 module_param(enable_oplocks, bool, 0644);
134 MODULE_PARM_DESC(enable_oplocks, "Enable or disable oplocks. Default: y/Y/1");
135 
136 module_param(enable_gcm_256, bool, 0644);
137 MODULE_PARM_DESC(enable_gcm_256, "Enable requesting strongest (256 bit) GCM encryption. Default: y/Y/0");
138 
139 module_param(require_gcm_256, bool, 0644);
140 MODULE_PARM_DESC(require_gcm_256, "Require strongest (256 bit) GCM encryption. Default: n/N/0");
141 
142 module_param(enable_negotiate_signing, bool, 0644);
143 MODULE_PARM_DESC(enable_negotiate_signing, "Enable negotiating packet signing algorithm with server. Default: n/N/0");
144 
145 module_param(disable_legacy_dialects, bool, 0644);
146 MODULE_PARM_DESC(disable_legacy_dialects, "To improve security it may be "
147 				  "helpful to restrict the ability to "
148 				  "override the default dialects (SMB2.1, "
149 				  "SMB3 and SMB3.02) on mount with old "
150 				  "dialects (CIFS/SMB1 and SMB2) since "
151 				  "vers=1.0 (CIFS/SMB1) and vers=2.0 are weaker"
152 				  " and less secure. Default: n/N/0");
153 
154 struct workqueue_struct	*cifsiod_wq;
155 struct workqueue_struct	*decrypt_wq;
156 struct workqueue_struct	*fileinfo_put_wq;
157 struct workqueue_struct	*cifsoplockd_wq;
158 struct workqueue_struct	*deferredclose_wq;
159 struct workqueue_struct	*serverclose_wq;
160 __u32 cifs_lock_secret;
161 
162 /*
163  * Bumps refcount for cifs super block.
164  * Note that it should be only called if a reference to VFS super block is
165  * already held, e.g. in open-type syscalls context. Otherwise it can race with
166  * atomic_dec_and_test in deactivate_locked_super.
167  */
168 void
cifs_sb_active(struct super_block * sb)169 cifs_sb_active(struct super_block *sb)
170 {
171 	struct cifs_sb_info *server = CIFS_SB(sb);
172 
173 	if (atomic_inc_return(&server->active) == 1)
174 		atomic_inc(&sb->s_active);
175 }
176 
177 void
cifs_sb_deactive(struct super_block * sb)178 cifs_sb_deactive(struct super_block *sb)
179 {
180 	struct cifs_sb_info *server = CIFS_SB(sb);
181 
182 	if (atomic_dec_and_test(&server->active))
183 		deactivate_super(sb);
184 }
185 
186 static int
cifs_read_super(struct super_block * sb)187 cifs_read_super(struct super_block *sb)
188 {
189 	struct inode *inode;
190 	struct cifs_sb_info *cifs_sb;
191 	struct cifs_tcon *tcon;
192 	struct timespec64 ts;
193 	int rc = 0;
194 
195 	cifs_sb = CIFS_SB(sb);
196 	tcon = cifs_sb_master_tcon(cifs_sb);
197 
198 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIXACL)
199 		sb->s_flags |= SB_POSIXACL;
200 
201 	if (tcon->snapshot_time)
202 		sb->s_flags |= SB_RDONLY;
203 
204 	if (tcon->ses->capabilities & tcon->ses->server->vals->cap_large_files)
205 		sb->s_maxbytes = MAX_LFS_FILESIZE;
206 	else
207 		sb->s_maxbytes = MAX_NON_LFS;
208 
209 	/*
210 	 * Some very old servers like DOS and OS/2 used 2 second granularity
211 	 * (while all current servers use 100ns granularity - see MS-DTYP)
212 	 * but 1 second is the maximum allowed granularity for the VFS
213 	 * so for old servers set time granularity to 1 second while for
214 	 * everything else (current servers) set it to 100ns.
215 	 */
216 	if ((tcon->ses->server->vals->protocol_id == SMB10_PROT_ID) &&
217 	    ((tcon->ses->capabilities &
218 	      tcon->ses->server->vals->cap_nt_find) == 0) &&
219 	    !tcon->unix_ext) {
220 		sb->s_time_gran = 1000000000; /* 1 second is max allowed gran */
221 		ts = cnvrtDosUnixTm(cpu_to_le16(SMB_DATE_MIN), 0, 0);
222 		sb->s_time_min = ts.tv_sec;
223 		ts = cnvrtDosUnixTm(cpu_to_le16(SMB_DATE_MAX),
224 				    cpu_to_le16(SMB_TIME_MAX), 0);
225 		sb->s_time_max = ts.tv_sec;
226 	} else {
227 		/*
228 		 * Almost every server, including all SMB2+, uses DCE TIME
229 		 * ie 100 nanosecond units, since 1601.  See MS-DTYP and MS-FSCC
230 		 */
231 		sb->s_time_gran = 100;
232 		ts = cifs_NTtimeToUnix(0);
233 		sb->s_time_min = ts.tv_sec;
234 		ts = cifs_NTtimeToUnix(cpu_to_le64(S64_MAX));
235 		sb->s_time_max = ts.tv_sec;
236 	}
237 
238 	sb->s_magic = CIFS_SUPER_MAGIC;
239 	sb->s_op = &cifs_super_ops;
240 	sb->s_xattr = cifs_xattr_handlers;
241 	rc = super_setup_bdi(sb);
242 	if (rc)
243 		goto out_no_root;
244 	/* tune readahead according to rsize if readahead size not set on mount */
245 	if (cifs_sb->ctx->rsize == 0)
246 		cifs_sb->ctx->rsize =
247 			tcon->ses->server->ops->negotiate_rsize(tcon, cifs_sb->ctx);
248 	if (cifs_sb->ctx->rasize)
249 		sb->s_bdi->ra_pages = cifs_sb->ctx->rasize / PAGE_SIZE;
250 	else
251 		sb->s_bdi->ra_pages = 2 * (cifs_sb->ctx->rsize / PAGE_SIZE);
252 
253 	sb->s_blocksize = CIFS_MAX_MSGSIZE;
254 	sb->s_blocksize_bits = 14;	/* default 2**14 = CIFS_MAX_MSGSIZE */
255 	inode = cifs_root_iget(sb);
256 
257 	if (IS_ERR(inode)) {
258 		rc = PTR_ERR(inode);
259 		goto out_no_root;
260 	}
261 
262 	if (tcon->nocase)
263 		sb->s_d_op = &cifs_ci_dentry_ops;
264 	else
265 		sb->s_d_op = &cifs_dentry_ops;
266 
267 	sb->s_root = d_make_root(inode);
268 	if (!sb->s_root) {
269 		rc = -ENOMEM;
270 		goto out_no_root;
271 	}
272 
273 #ifdef CONFIG_CIFS_NFSD_EXPORT
274 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) {
275 		cifs_dbg(FYI, "export ops supported\n");
276 		sb->s_export_op = &cifs_export_ops;
277 	}
278 #endif /* CONFIG_CIFS_NFSD_EXPORT */
279 
280 	return 0;
281 
282 out_no_root:
283 	cifs_dbg(VFS, "%s: get root inode failed\n", __func__);
284 	return rc;
285 }
286 
cifs_kill_sb(struct super_block * sb)287 static void cifs_kill_sb(struct super_block *sb)
288 {
289 	struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
290 
291 	/*
292 	 * We need to release all dentries for the cached directories
293 	 * before we kill the sb.
294 	 */
295 	if (cifs_sb->root) {
296 		close_all_cached_dirs(cifs_sb);
297 
298 		/* finally release root dentry */
299 		dput(cifs_sb->root);
300 		cifs_sb->root = NULL;
301 	}
302 
303 	kill_anon_super(sb);
304 	cifs_umount(cifs_sb);
305 }
306 
307 static int
cifs_statfs(struct dentry * dentry,struct kstatfs * buf)308 cifs_statfs(struct dentry *dentry, struct kstatfs *buf)
309 {
310 	struct super_block *sb = dentry->d_sb;
311 	struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
312 	struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
313 	struct TCP_Server_Info *server = tcon->ses->server;
314 	unsigned int xid;
315 	int rc = 0;
316 	const char *full_path;
317 	void *page;
318 
319 	xid = get_xid();
320 	page = alloc_dentry_path();
321 
322 	full_path = build_path_from_dentry(dentry, page);
323 	if (IS_ERR(full_path)) {
324 		rc = PTR_ERR(full_path);
325 		goto statfs_out;
326 	}
327 
328 	if (le32_to_cpu(tcon->fsAttrInfo.MaxPathNameComponentLength) > 0)
329 		buf->f_namelen =
330 		       le32_to_cpu(tcon->fsAttrInfo.MaxPathNameComponentLength);
331 	else
332 		buf->f_namelen = PATH_MAX;
333 
334 	buf->f_fsid.val[0] = tcon->vol_serial_number;
335 	/* are using part of create time for more randomness, see man statfs */
336 	buf->f_fsid.val[1] =  (int)le64_to_cpu(tcon->vol_create_time);
337 
338 	buf->f_files = 0;	/* undefined */
339 	buf->f_ffree = 0;	/* unlimited */
340 
341 	if (server->ops->queryfs)
342 		rc = server->ops->queryfs(xid, tcon, full_path, cifs_sb, buf);
343 
344 statfs_out:
345 	free_dentry_path(page);
346 	free_xid(xid);
347 	return rc;
348 }
349 
cifs_fallocate(struct file * file,int mode,loff_t off,loff_t len)350 static long cifs_fallocate(struct file *file, int mode, loff_t off, loff_t len)
351 {
352 	struct cifs_sb_info *cifs_sb = CIFS_FILE_SB(file);
353 	struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
354 	struct TCP_Server_Info *server = tcon->ses->server;
355 
356 	if (server->ops->fallocate)
357 		return server->ops->fallocate(file, tcon, mode, off, len);
358 
359 	return -EOPNOTSUPP;
360 }
361 
cifs_permission(struct mnt_idmap * idmap,struct inode * inode,int mask)362 static int cifs_permission(struct mnt_idmap *idmap,
363 			   struct inode *inode, int mask)
364 {
365 	struct cifs_sb_info *cifs_sb;
366 
367 	cifs_sb = CIFS_SB(inode->i_sb);
368 
369 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_PERM) {
370 		if ((mask & MAY_EXEC) && !execute_ok(inode))
371 			return -EACCES;
372 		else
373 			return 0;
374 	} else /* file mode might have been restricted at mount time
375 		on the client (above and beyond ACL on servers) for
376 		servers which do not support setting and viewing mode bits,
377 		so allowing client to check permissions is useful */
378 		return generic_permission(&nop_mnt_idmap, inode, mask);
379 }
380 
381 static struct kmem_cache *cifs_inode_cachep;
382 static struct kmem_cache *cifs_req_cachep;
383 static struct kmem_cache *cifs_mid_cachep;
384 static struct kmem_cache *cifs_sm_req_cachep;
385 static struct kmem_cache *cifs_io_request_cachep;
386 static struct kmem_cache *cifs_io_subrequest_cachep;
387 mempool_t *cifs_sm_req_poolp;
388 mempool_t *cifs_req_poolp;
389 mempool_t *cifs_mid_poolp;
390 mempool_t cifs_io_request_pool;
391 mempool_t cifs_io_subrequest_pool;
392 
393 static struct inode *
cifs_alloc_inode(struct super_block * sb)394 cifs_alloc_inode(struct super_block *sb)
395 {
396 	struct cifsInodeInfo *cifs_inode;
397 	cifs_inode = alloc_inode_sb(sb, cifs_inode_cachep, GFP_KERNEL);
398 	if (!cifs_inode)
399 		return NULL;
400 	cifs_inode->cifsAttrs = 0x20;	/* default */
401 	cifs_inode->time = 0;
402 	/*
403 	 * Until the file is open and we have gotten oplock info back from the
404 	 * server, can not assume caching of file data or metadata.
405 	 */
406 	cifs_set_oplock_level(cifs_inode, 0);
407 	cifs_inode->lease_granted = false;
408 	cifs_inode->flags = 0;
409 	spin_lock_init(&cifs_inode->writers_lock);
410 	cifs_inode->writers = 0;
411 	cifs_inode->netfs.inode.i_blkbits = 14;  /* 2**14 = CIFS_MAX_MSGSIZE */
412 	cifs_inode->netfs.remote_i_size = 0;
413 	cifs_inode->uniqueid = 0;
414 	cifs_inode->createtime = 0;
415 	cifs_inode->epoch = 0;
416 	spin_lock_init(&cifs_inode->open_file_lock);
417 	generate_random_uuid(cifs_inode->lease_key);
418 	cifs_inode->symlink_target = NULL;
419 
420 	/*
421 	 * Can not set i_flags here - they get immediately overwritten to zero
422 	 * by the VFS.
423 	 */
424 	/* cifs_inode->netfs.inode.i_flags = S_NOATIME | S_NOCMTIME; */
425 	INIT_LIST_HEAD(&cifs_inode->openFileList);
426 	INIT_LIST_HEAD(&cifs_inode->llist);
427 	INIT_LIST_HEAD(&cifs_inode->deferred_closes);
428 	spin_lock_init(&cifs_inode->deferred_lock);
429 	return &cifs_inode->netfs.inode;
430 }
431 
432 static void
cifs_free_inode(struct inode * inode)433 cifs_free_inode(struct inode *inode)
434 {
435 	struct cifsInodeInfo *cinode = CIFS_I(inode);
436 
437 	if (S_ISLNK(inode->i_mode))
438 		kfree(cinode->symlink_target);
439 	kmem_cache_free(cifs_inode_cachep, cinode);
440 }
441 
442 static void
cifs_evict_inode(struct inode * inode)443 cifs_evict_inode(struct inode *inode)
444 {
445 	netfs_wait_for_outstanding_io(inode);
446 	truncate_inode_pages_final(&inode->i_data);
447 	if (inode->i_state & I_PINNING_NETFS_WB)
448 		cifs_fscache_unuse_inode_cookie(inode, true);
449 	cifs_fscache_release_inode_cookie(inode);
450 	clear_inode(inode);
451 }
452 
453 static void
cifs_show_address(struct seq_file * s,struct TCP_Server_Info * server)454 cifs_show_address(struct seq_file *s, struct TCP_Server_Info *server)
455 {
456 	struct sockaddr_in *sa = (struct sockaddr_in *) &server->dstaddr;
457 	struct sockaddr_in6 *sa6 = (struct sockaddr_in6 *) &server->dstaddr;
458 
459 	seq_puts(s, ",addr=");
460 
461 	switch (server->dstaddr.ss_family) {
462 	case AF_INET:
463 		seq_printf(s, "%pI4", &sa->sin_addr.s_addr);
464 		break;
465 	case AF_INET6:
466 		seq_printf(s, "%pI6", &sa6->sin6_addr.s6_addr);
467 		if (sa6->sin6_scope_id)
468 			seq_printf(s, "%%%u", sa6->sin6_scope_id);
469 		break;
470 	default:
471 		seq_puts(s, "(unknown)");
472 	}
473 	if (server->rdma)
474 		seq_puts(s, ",rdma");
475 }
476 
477 static void
cifs_show_security(struct seq_file * s,struct cifs_ses * ses)478 cifs_show_security(struct seq_file *s, struct cifs_ses *ses)
479 {
480 	if (ses->sectype == Unspecified) {
481 		if (ses->user_name == NULL)
482 			seq_puts(s, ",sec=none");
483 		return;
484 	}
485 
486 	seq_puts(s, ",sec=");
487 
488 	switch (ses->sectype) {
489 	case NTLMv2:
490 		seq_puts(s, "ntlmv2");
491 		break;
492 	case Kerberos:
493 		seq_puts(s, "krb5");
494 		break;
495 	case RawNTLMSSP:
496 		seq_puts(s, "ntlmssp");
497 		break;
498 	default:
499 		/* shouldn't ever happen */
500 		seq_puts(s, "unknown");
501 		break;
502 	}
503 
504 	if (ses->sign)
505 		seq_puts(s, "i");
506 
507 	if (ses->sectype == Kerberos)
508 		seq_printf(s, ",cruid=%u",
509 			   from_kuid_munged(&init_user_ns, ses->cred_uid));
510 }
511 
512 static void
cifs_show_cache_flavor(struct seq_file * s,struct cifs_sb_info * cifs_sb)513 cifs_show_cache_flavor(struct seq_file *s, struct cifs_sb_info *cifs_sb)
514 {
515 	seq_puts(s, ",cache=");
516 
517 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO)
518 		seq_puts(s, "strict");
519 	else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO)
520 		seq_puts(s, "none");
521 	else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RW_CACHE)
522 		seq_puts(s, "singleclient"); /* assume only one client access */
523 	else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RO_CACHE)
524 		seq_puts(s, "ro"); /* read only caching assumed */
525 	else
526 		seq_puts(s, "loose");
527 }
528 
529 /*
530  * cifs_show_devname() is used so we show the mount device name with correct
531  * format (e.g. forward slashes vs. back slashes) in /proc/mounts
532  */
cifs_show_devname(struct seq_file * m,struct dentry * root)533 static int cifs_show_devname(struct seq_file *m, struct dentry *root)
534 {
535 	struct cifs_sb_info *cifs_sb = CIFS_SB(root->d_sb);
536 	char *devname = kstrdup(cifs_sb->ctx->source, GFP_KERNEL);
537 
538 	if (devname == NULL)
539 		seq_puts(m, "none");
540 	else {
541 		convert_delimiter(devname, '/');
542 		/* escape all spaces in share names */
543 		seq_escape(m, devname, " \t");
544 		kfree(devname);
545 	}
546 	return 0;
547 }
548 
549 /*
550  * cifs_show_options() is for displaying mount options in /proc/mounts.
551  * Not all settable options are displayed but most of the important
552  * ones are.
553  */
554 static int
cifs_show_options(struct seq_file * s,struct dentry * root)555 cifs_show_options(struct seq_file *s, struct dentry *root)
556 {
557 	struct cifs_sb_info *cifs_sb = CIFS_SB(root->d_sb);
558 	struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
559 	struct sockaddr *srcaddr;
560 	srcaddr = (struct sockaddr *)&tcon->ses->server->srcaddr;
561 
562 	seq_show_option(s, "vers", tcon->ses->server->vals->version_string);
563 	cifs_show_security(s, tcon->ses);
564 	cifs_show_cache_flavor(s, cifs_sb);
565 
566 	if (tcon->no_lease)
567 		seq_puts(s, ",nolease");
568 	if (cifs_sb->ctx->multiuser)
569 		seq_puts(s, ",multiuser");
570 	else if (tcon->ses->user_name)
571 		seq_show_option(s, "username", tcon->ses->user_name);
572 
573 	if (tcon->ses->domainName && tcon->ses->domainName[0] != 0)
574 		seq_show_option(s, "domain", tcon->ses->domainName);
575 
576 	if (srcaddr->sa_family != AF_UNSPEC) {
577 		struct sockaddr_in *saddr4;
578 		struct sockaddr_in6 *saddr6;
579 		saddr4 = (struct sockaddr_in *)srcaddr;
580 		saddr6 = (struct sockaddr_in6 *)srcaddr;
581 		if (srcaddr->sa_family == AF_INET6)
582 			seq_printf(s, ",srcaddr=%pI6c",
583 				   &saddr6->sin6_addr);
584 		else if (srcaddr->sa_family == AF_INET)
585 			seq_printf(s, ",srcaddr=%pI4",
586 				   &saddr4->sin_addr.s_addr);
587 		else
588 			seq_printf(s, ",srcaddr=BAD-AF:%i",
589 				   (int)(srcaddr->sa_family));
590 	}
591 
592 	seq_printf(s, ",uid=%u",
593 		   from_kuid_munged(&init_user_ns, cifs_sb->ctx->linux_uid));
594 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_OVERR_UID)
595 		seq_puts(s, ",forceuid");
596 	else
597 		seq_puts(s, ",noforceuid");
598 
599 	seq_printf(s, ",gid=%u",
600 		   from_kgid_munged(&init_user_ns, cifs_sb->ctx->linux_gid));
601 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_OVERR_GID)
602 		seq_puts(s, ",forcegid");
603 	else
604 		seq_puts(s, ",noforcegid");
605 
606 	cifs_show_address(s, tcon->ses->server);
607 
608 	if (!tcon->unix_ext)
609 		seq_printf(s, ",file_mode=0%ho,dir_mode=0%ho",
610 					   cifs_sb->ctx->file_mode,
611 					   cifs_sb->ctx->dir_mode);
612 	if (cifs_sb->ctx->iocharset)
613 		seq_printf(s, ",iocharset=%s", cifs_sb->ctx->iocharset);
614 	if (tcon->seal)
615 		seq_puts(s, ",seal");
616 	else if (tcon->ses->server->ignore_signature)
617 		seq_puts(s, ",signloosely");
618 	if (tcon->nocase)
619 		seq_puts(s, ",nocase");
620 	if (tcon->nodelete)
621 		seq_puts(s, ",nodelete");
622 	if (cifs_sb->ctx->no_sparse)
623 		seq_puts(s, ",nosparse");
624 	if (tcon->local_lease)
625 		seq_puts(s, ",locallease");
626 	if (tcon->retry)
627 		seq_puts(s, ",hard");
628 	else
629 		seq_puts(s, ",soft");
630 	if (tcon->use_persistent)
631 		seq_puts(s, ",persistenthandles");
632 	else if (tcon->use_resilient)
633 		seq_puts(s, ",resilienthandles");
634 	if (tcon->posix_extensions)
635 		seq_puts(s, ",posix");
636 	else if (tcon->unix_ext)
637 		seq_puts(s, ",unix");
638 	else
639 		seq_puts(s, ",nounix");
640 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_DFS)
641 		seq_puts(s, ",nodfs");
642 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIX_PATHS)
643 		seq_puts(s, ",posixpaths");
644 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SET_UID)
645 		seq_puts(s, ",setuids");
646 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UID_FROM_ACL)
647 		seq_puts(s, ",idsfromsid");
648 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM)
649 		seq_puts(s, ",serverino");
650 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
651 		seq_puts(s, ",rwpidforward");
652 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL)
653 		seq_puts(s, ",forcemand");
654 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_XATTR)
655 		seq_puts(s, ",nouser_xattr");
656 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR)
657 		seq_puts(s, ",mapchars");
658 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SFM_CHR)
659 		seq_puts(s, ",mapposix");
660 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UNX_EMUL)
661 		seq_puts(s, ",sfu");
662 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_BRL)
663 		seq_puts(s, ",nobrl");
664 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_HANDLE_CACHE)
665 		seq_puts(s, ",nohandlecache");
666 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MODE_FROM_SID)
667 		seq_puts(s, ",modefromsid");
668 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_ACL)
669 		seq_puts(s, ",cifsacl");
670 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DYNPERM)
671 		seq_puts(s, ",dynperm");
672 	if (root->d_sb->s_flags & SB_POSIXACL)
673 		seq_puts(s, ",acl");
674 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MF_SYMLINKS)
675 		seq_puts(s, ",mfsymlinks");
676 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_FSCACHE)
677 		seq_puts(s, ",fsc");
678 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC)
679 		seq_puts(s, ",nostrictsync");
680 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_PERM)
681 		seq_puts(s, ",noperm");
682 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_BACKUPUID)
683 		seq_printf(s, ",backupuid=%u",
684 			   from_kuid_munged(&init_user_ns,
685 					    cifs_sb->ctx->backupuid));
686 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_BACKUPGID)
687 		seq_printf(s, ",backupgid=%u",
688 			   from_kgid_munged(&init_user_ns,
689 					    cifs_sb->ctx->backupgid));
690 	seq_show_option(s, "reparse",
691 			cifs_reparse_type_str(cifs_sb->ctx->reparse_type));
692 
693 	seq_printf(s, ",rsize=%u", cifs_sb->ctx->rsize);
694 	seq_printf(s, ",wsize=%u", cifs_sb->ctx->wsize);
695 	seq_printf(s, ",bsize=%u", cifs_sb->ctx->bsize);
696 	if (cifs_sb->ctx->rasize)
697 		seq_printf(s, ",rasize=%u", cifs_sb->ctx->rasize);
698 	if (tcon->ses->server->min_offload)
699 		seq_printf(s, ",esize=%u", tcon->ses->server->min_offload);
700 	if (tcon->ses->server->retrans)
701 		seq_printf(s, ",retrans=%u", tcon->ses->server->retrans);
702 	seq_printf(s, ",echo_interval=%lu",
703 			tcon->ses->server->echo_interval / HZ);
704 
705 	/* Only display the following if overridden on mount */
706 	if (tcon->ses->server->max_credits != SMB2_MAX_CREDITS_AVAILABLE)
707 		seq_printf(s, ",max_credits=%u", tcon->ses->server->max_credits);
708 	if (tcon->ses->server->tcp_nodelay)
709 		seq_puts(s, ",tcpnodelay");
710 	if (tcon->ses->server->noautotune)
711 		seq_puts(s, ",noautotune");
712 	if (tcon->ses->server->noblocksnd)
713 		seq_puts(s, ",noblocksend");
714 	if (tcon->ses->server->nosharesock)
715 		seq_puts(s, ",nosharesock");
716 
717 	if (tcon->snapshot_time)
718 		seq_printf(s, ",snapshot=%llu", tcon->snapshot_time);
719 	if (tcon->handle_timeout)
720 		seq_printf(s, ",handletimeout=%u", tcon->handle_timeout);
721 	if (tcon->max_cached_dirs != MAX_CACHED_FIDS)
722 		seq_printf(s, ",max_cached_dirs=%u", tcon->max_cached_dirs);
723 
724 	/*
725 	 * Display file and directory attribute timeout in seconds.
726 	 * If file and directory attribute timeout the same then actimeo
727 	 * was likely specified on mount
728 	 */
729 	if (cifs_sb->ctx->acdirmax == cifs_sb->ctx->acregmax)
730 		seq_printf(s, ",actimeo=%lu", cifs_sb->ctx->acregmax / HZ);
731 	else {
732 		seq_printf(s, ",acdirmax=%lu", cifs_sb->ctx->acdirmax / HZ);
733 		seq_printf(s, ",acregmax=%lu", cifs_sb->ctx->acregmax / HZ);
734 	}
735 	seq_printf(s, ",closetimeo=%lu", cifs_sb->ctx->closetimeo / HZ);
736 
737 	if (tcon->ses->chan_max > 1)
738 		seq_printf(s, ",multichannel,max_channels=%zu",
739 			   tcon->ses->chan_max);
740 
741 	if (tcon->use_witness)
742 		seq_puts(s, ",witness");
743 
744 	return 0;
745 }
746 
cifs_umount_begin(struct super_block * sb)747 static void cifs_umount_begin(struct super_block *sb)
748 {
749 	struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
750 	struct cifs_tcon *tcon;
751 
752 	if (cifs_sb == NULL)
753 		return;
754 
755 	tcon = cifs_sb_master_tcon(cifs_sb);
756 
757 	spin_lock(&cifs_tcp_ses_lock);
758 	spin_lock(&tcon->tc_lock);
759 	trace_smb3_tcon_ref(tcon->debug_id, tcon->tc_count,
760 			    netfs_trace_tcon_ref_see_umount);
761 	if ((tcon->tc_count > 1) || (tcon->status == TID_EXITING)) {
762 		/* we have other mounts to same share or we have
763 		   already tried to umount this and woken up
764 		   all waiting network requests, nothing to do */
765 		spin_unlock(&tcon->tc_lock);
766 		spin_unlock(&cifs_tcp_ses_lock);
767 		return;
768 	}
769 	/*
770 	 * can not set tcon->status to TID_EXITING yet since we don't know if umount -f will
771 	 * fail later (e.g. due to open files).  TID_EXITING will be set just before tdis req sent
772 	 */
773 	spin_unlock(&tcon->tc_lock);
774 	spin_unlock(&cifs_tcp_ses_lock);
775 
776 	cifs_close_all_deferred_files(tcon);
777 	/* cancel_brl_requests(tcon); */ /* BB mark all brl mids as exiting */
778 	/* cancel_notify_requests(tcon); */
779 	if (tcon->ses && tcon->ses->server) {
780 		cifs_dbg(FYI, "wake up tasks now - umount begin not complete\n");
781 		wake_up_all(&tcon->ses->server->request_q);
782 		wake_up_all(&tcon->ses->server->response_q);
783 		msleep(1); /* yield */
784 		/* we have to kick the requests once more */
785 		wake_up_all(&tcon->ses->server->response_q);
786 		msleep(1);
787 	}
788 
789 	return;
790 }
791 
cifs_freeze(struct super_block * sb)792 static int cifs_freeze(struct super_block *sb)
793 {
794 	struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
795 	struct cifs_tcon *tcon;
796 
797 	if (cifs_sb == NULL)
798 		return 0;
799 
800 	tcon = cifs_sb_master_tcon(cifs_sb);
801 
802 	cifs_close_all_deferred_files(tcon);
803 	return 0;
804 }
805 
806 #ifdef CONFIG_CIFS_STATS2
cifs_show_stats(struct seq_file * s,struct dentry * root)807 static int cifs_show_stats(struct seq_file *s, struct dentry *root)
808 {
809 	/* BB FIXME */
810 	return 0;
811 }
812 #endif
813 
cifs_write_inode(struct inode * inode,struct writeback_control * wbc)814 static int cifs_write_inode(struct inode *inode, struct writeback_control *wbc)
815 {
816 	return netfs_unpin_writeback(inode, wbc);
817 }
818 
cifs_drop_inode(struct inode * inode)819 static int cifs_drop_inode(struct inode *inode)
820 {
821 	struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
822 
823 	/* no serverino => unconditional eviction */
824 	return !(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) ||
825 		generic_drop_inode(inode);
826 }
827 
828 static const struct super_operations cifs_super_ops = {
829 	.statfs = cifs_statfs,
830 	.alloc_inode = cifs_alloc_inode,
831 	.write_inode	= cifs_write_inode,
832 	.free_inode = cifs_free_inode,
833 	.drop_inode	= cifs_drop_inode,
834 	.evict_inode	= cifs_evict_inode,
835 /*	.show_path	= cifs_show_path, */ /* Would we ever need show path? */
836 	.show_devname   = cifs_show_devname,
837 /*	.delete_inode	= cifs_delete_inode,  */  /* Do not need above
838 	function unless later we add lazy close of inodes or unless the
839 	kernel forgets to call us with the same number of releases (closes)
840 	as opens */
841 	.show_options = cifs_show_options,
842 	.umount_begin   = cifs_umount_begin,
843 	.freeze_fs      = cifs_freeze,
844 #ifdef CONFIG_CIFS_STATS2
845 	.show_stats = cifs_show_stats,
846 #endif
847 };
848 
849 /*
850  * Get root dentry from superblock according to prefix path mount option.
851  * Return dentry with refcount + 1 on success and NULL otherwise.
852  */
853 static struct dentry *
cifs_get_root(struct smb3_fs_context * ctx,struct super_block * sb)854 cifs_get_root(struct smb3_fs_context *ctx, struct super_block *sb)
855 {
856 	struct dentry *dentry;
857 	struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
858 	char *full_path = NULL;
859 	char *s, *p;
860 	char sep;
861 
862 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_USE_PREFIX_PATH)
863 		return dget(sb->s_root);
864 
865 	full_path = cifs_build_path_to_root(ctx, cifs_sb,
866 				cifs_sb_master_tcon(cifs_sb), 0);
867 	if (full_path == NULL)
868 		return ERR_PTR(-ENOMEM);
869 
870 	cifs_dbg(FYI, "Get root dentry for %s\n", full_path);
871 
872 	sep = CIFS_DIR_SEP(cifs_sb);
873 	dentry = dget(sb->s_root);
874 	s = full_path;
875 
876 	do {
877 		struct inode *dir = d_inode(dentry);
878 		struct dentry *child;
879 
880 		if (!S_ISDIR(dir->i_mode)) {
881 			dput(dentry);
882 			dentry = ERR_PTR(-ENOTDIR);
883 			break;
884 		}
885 
886 		/* skip separators */
887 		while (*s == sep)
888 			s++;
889 		if (!*s)
890 			break;
891 		p = s++;
892 		/* next separator */
893 		while (*s && *s != sep)
894 			s++;
895 
896 		child = lookup_positive_unlocked(p, dentry, s - p);
897 		dput(dentry);
898 		dentry = child;
899 	} while (!IS_ERR(dentry));
900 	kfree(full_path);
901 	return dentry;
902 }
903 
cifs_set_super(struct super_block * sb,void * data)904 static int cifs_set_super(struct super_block *sb, void *data)
905 {
906 	struct cifs_mnt_data *mnt_data = data;
907 	sb->s_fs_info = mnt_data->cifs_sb;
908 	return set_anon_super(sb, NULL);
909 }
910 
911 struct dentry *
cifs_smb3_do_mount(struct file_system_type * fs_type,int flags,struct smb3_fs_context * old_ctx)912 cifs_smb3_do_mount(struct file_system_type *fs_type,
913 	      int flags, struct smb3_fs_context *old_ctx)
914 {
915 	struct cifs_mnt_data mnt_data;
916 	struct cifs_sb_info *cifs_sb;
917 	struct super_block *sb;
918 	struct dentry *root;
919 	int rc;
920 
921 	if (cifsFYI) {
922 		cifs_dbg(FYI, "%s: devname=%s flags=0x%x\n", __func__,
923 			 old_ctx->source, flags);
924 	} else {
925 		cifs_info("Attempting to mount %s\n", old_ctx->source);
926 	}
927 
928 	cifs_sb = kzalloc(sizeof(*cifs_sb), GFP_KERNEL);
929 	if (!cifs_sb)
930 		return ERR_PTR(-ENOMEM);
931 
932 	cifs_sb->ctx = kzalloc(sizeof(struct smb3_fs_context), GFP_KERNEL);
933 	if (!cifs_sb->ctx) {
934 		root = ERR_PTR(-ENOMEM);
935 		goto out;
936 	}
937 	rc = smb3_fs_context_dup(cifs_sb->ctx, old_ctx);
938 	if (rc) {
939 		root = ERR_PTR(rc);
940 		goto out;
941 	}
942 
943 	rc = cifs_setup_cifs_sb(cifs_sb);
944 	if (rc) {
945 		root = ERR_PTR(rc);
946 		goto out;
947 	}
948 
949 	rc = cifs_mount(cifs_sb, cifs_sb->ctx);
950 	if (rc) {
951 		if (!(flags & SB_SILENT))
952 			cifs_dbg(VFS, "cifs_mount failed w/return code = %d\n",
953 				 rc);
954 		root = ERR_PTR(rc);
955 		goto out;
956 	}
957 
958 	mnt_data.ctx = cifs_sb->ctx;
959 	mnt_data.cifs_sb = cifs_sb;
960 	mnt_data.flags = flags;
961 
962 	/* BB should we make this contingent on mount parm? */
963 	flags |= SB_NODIRATIME | SB_NOATIME;
964 
965 	sb = sget(fs_type, cifs_match_super, cifs_set_super, flags, &mnt_data);
966 	if (IS_ERR(sb)) {
967 		cifs_umount(cifs_sb);
968 		return ERR_CAST(sb);
969 	}
970 
971 	if (sb->s_root) {
972 		cifs_dbg(FYI, "Use existing superblock\n");
973 		cifs_umount(cifs_sb);
974 		cifs_sb = NULL;
975 	} else {
976 		rc = cifs_read_super(sb);
977 		if (rc) {
978 			root = ERR_PTR(rc);
979 			goto out_super;
980 		}
981 
982 		sb->s_flags |= SB_ACTIVE;
983 	}
984 
985 	root = cifs_get_root(cifs_sb ? cifs_sb->ctx : old_ctx, sb);
986 	if (IS_ERR(root))
987 		goto out_super;
988 
989 	if (cifs_sb)
990 		cifs_sb->root = dget(root);
991 
992 	cifs_dbg(FYI, "dentry root is: %p\n", root);
993 	return root;
994 
995 out_super:
996 	deactivate_locked_super(sb);
997 	return root;
998 out:
999 	kfree(cifs_sb->prepath);
1000 	smb3_cleanup_fs_context(cifs_sb->ctx);
1001 	kfree(cifs_sb);
1002 	return root;
1003 }
1004 
cifs_llseek(struct file * file,loff_t offset,int whence)1005 static loff_t cifs_llseek(struct file *file, loff_t offset, int whence)
1006 {
1007 	struct cifsFileInfo *cfile = file->private_data;
1008 	struct cifs_tcon *tcon;
1009 
1010 	/*
1011 	 * whence == SEEK_END || SEEK_DATA || SEEK_HOLE => we must revalidate
1012 	 * the cached file length
1013 	 */
1014 	if (whence != SEEK_SET && whence != SEEK_CUR) {
1015 		int rc;
1016 		struct inode *inode = file_inode(file);
1017 
1018 		/*
1019 		 * We need to be sure that all dirty pages are written and the
1020 		 * server has the newest file length.
1021 		 */
1022 		if (!CIFS_CACHE_READ(CIFS_I(inode)) && inode->i_mapping &&
1023 		    inode->i_mapping->nrpages != 0) {
1024 			rc = filemap_fdatawait(inode->i_mapping);
1025 			if (rc) {
1026 				mapping_set_error(inode->i_mapping, rc);
1027 				return rc;
1028 			}
1029 		}
1030 		/*
1031 		 * Some applications poll for the file length in this strange
1032 		 * way so we must seek to end on non-oplocked files by
1033 		 * setting the revalidate time to zero.
1034 		 */
1035 		CIFS_I(inode)->time = 0;
1036 
1037 		rc = cifs_revalidate_file_attr(file);
1038 		if (rc < 0)
1039 			return (loff_t)rc;
1040 	}
1041 	if (cfile && cfile->tlink) {
1042 		tcon = tlink_tcon(cfile->tlink);
1043 		if (tcon->ses->server->ops->llseek)
1044 			return tcon->ses->server->ops->llseek(file, tcon,
1045 							      offset, whence);
1046 	}
1047 	return generic_file_llseek(file, offset, whence);
1048 }
1049 
1050 static int
cifs_setlease(struct file * file,int arg,struct file_lease ** lease,void ** priv)1051 cifs_setlease(struct file *file, int arg, struct file_lease **lease, void **priv)
1052 {
1053 	/*
1054 	 * Note that this is called by vfs setlease with i_lock held to
1055 	 * protect *lease from going away.
1056 	 */
1057 	struct inode *inode = file_inode(file);
1058 	struct cifsFileInfo *cfile = file->private_data;
1059 
1060 	/* Check if file is oplocked if this is request for new lease */
1061 	if (arg == F_UNLCK ||
1062 	    ((arg == F_RDLCK) && CIFS_CACHE_READ(CIFS_I(inode))) ||
1063 	    ((arg == F_WRLCK) && CIFS_CACHE_WRITE(CIFS_I(inode))))
1064 		return generic_setlease(file, arg, lease, priv);
1065 	else if (tlink_tcon(cfile->tlink)->local_lease &&
1066 		 !CIFS_CACHE_READ(CIFS_I(inode)))
1067 		/*
1068 		 * If the server claims to support oplock on this file, then we
1069 		 * still need to check oplock even if the local_lease mount
1070 		 * option is set, but there are servers which do not support
1071 		 * oplock for which this mount option may be useful if the user
1072 		 * knows that the file won't be changed on the server by anyone
1073 		 * else.
1074 		 */
1075 		return generic_setlease(file, arg, lease, priv);
1076 	else
1077 		return -EAGAIN;
1078 }
1079 
1080 struct file_system_type cifs_fs_type = {
1081 	.owner = THIS_MODULE,
1082 	.name = "cifs",
1083 	.init_fs_context = smb3_init_fs_context,
1084 	.parameters = smb3_fs_parameters,
1085 	.kill_sb = cifs_kill_sb,
1086 	.fs_flags = FS_RENAME_DOES_D_MOVE,
1087 };
1088 MODULE_ALIAS_FS("cifs");
1089 
1090 struct file_system_type smb3_fs_type = {
1091 	.owner = THIS_MODULE,
1092 	.name = "smb3",
1093 	.init_fs_context = smb3_init_fs_context,
1094 	.parameters = smb3_fs_parameters,
1095 	.kill_sb = cifs_kill_sb,
1096 	.fs_flags = FS_RENAME_DOES_D_MOVE,
1097 };
1098 MODULE_ALIAS_FS("smb3");
1099 MODULE_ALIAS("smb3");
1100 
1101 const struct inode_operations cifs_dir_inode_ops = {
1102 	.create = cifs_create,
1103 	.atomic_open = cifs_atomic_open,
1104 	.lookup = cifs_lookup,
1105 	.getattr = cifs_getattr,
1106 	.unlink = cifs_unlink,
1107 	.link = cifs_hardlink,
1108 	.mkdir = cifs_mkdir,
1109 	.rmdir = cifs_rmdir,
1110 	.rename = cifs_rename2,
1111 	.permission = cifs_permission,
1112 	.setattr = cifs_setattr,
1113 	.symlink = cifs_symlink,
1114 	.mknod   = cifs_mknod,
1115 	.listxattr = cifs_listxattr,
1116 	.get_acl = cifs_get_acl,
1117 	.set_acl = cifs_set_acl,
1118 };
1119 
1120 const struct inode_operations cifs_file_inode_ops = {
1121 	.setattr = cifs_setattr,
1122 	.getattr = cifs_getattr,
1123 	.permission = cifs_permission,
1124 	.listxattr = cifs_listxattr,
1125 	.fiemap = cifs_fiemap,
1126 	.get_acl = cifs_get_acl,
1127 	.set_acl = cifs_set_acl,
1128 };
1129 
cifs_get_link(struct dentry * dentry,struct inode * inode,struct delayed_call * done)1130 const char *cifs_get_link(struct dentry *dentry, struct inode *inode,
1131 			    struct delayed_call *done)
1132 {
1133 	char *target_path;
1134 
1135 	if (!dentry)
1136 		return ERR_PTR(-ECHILD);
1137 
1138 	target_path = kmalloc(PATH_MAX, GFP_KERNEL);
1139 	if (!target_path)
1140 		return ERR_PTR(-ENOMEM);
1141 
1142 	spin_lock(&inode->i_lock);
1143 	if (likely(CIFS_I(inode)->symlink_target)) {
1144 		strscpy(target_path, CIFS_I(inode)->symlink_target, PATH_MAX);
1145 	} else {
1146 		kfree(target_path);
1147 		target_path = ERR_PTR(-EOPNOTSUPP);
1148 	}
1149 	spin_unlock(&inode->i_lock);
1150 
1151 	if (!IS_ERR(target_path))
1152 		set_delayed_call(done, kfree_link, target_path);
1153 
1154 	return target_path;
1155 }
1156 
1157 const struct inode_operations cifs_symlink_inode_ops = {
1158 	.get_link = cifs_get_link,
1159 	.setattr = cifs_setattr,
1160 	.permission = cifs_permission,
1161 	.listxattr = cifs_listxattr,
1162 };
1163 
1164 /*
1165  * Advance the EOF marker to after the source range.
1166  */
cifs_precopy_set_eof(struct inode * src_inode,struct cifsInodeInfo * src_cifsi,struct cifs_tcon * src_tcon,unsigned int xid,loff_t src_end)1167 static int cifs_precopy_set_eof(struct inode *src_inode, struct cifsInodeInfo *src_cifsi,
1168 				struct cifs_tcon *src_tcon,
1169 				unsigned int xid, loff_t src_end)
1170 {
1171 	struct cifsFileInfo *writeable_srcfile;
1172 	int rc = -EINVAL;
1173 
1174 	writeable_srcfile = find_writable_file(src_cifsi, FIND_WR_FSUID_ONLY);
1175 	if (writeable_srcfile) {
1176 		if (src_tcon->ses->server->ops->set_file_size)
1177 			rc = src_tcon->ses->server->ops->set_file_size(
1178 				xid, src_tcon, writeable_srcfile,
1179 				src_inode->i_size, true /* no need to set sparse */);
1180 		else
1181 			rc = -ENOSYS;
1182 		cifsFileInfo_put(writeable_srcfile);
1183 		cifs_dbg(FYI, "SetFSize for copychunk rc = %d\n", rc);
1184 	}
1185 
1186 	if (rc < 0)
1187 		goto set_failed;
1188 
1189 	netfs_resize_file(&src_cifsi->netfs, src_end, true);
1190 	fscache_resize_cookie(cifs_inode_cookie(src_inode), src_end);
1191 	return 0;
1192 
1193 set_failed:
1194 	return filemap_write_and_wait(src_inode->i_mapping);
1195 }
1196 
1197 /*
1198  * Flush out either the folio that overlaps the beginning of a range in which
1199  * pos resides or the folio that overlaps the end of a range unless that folio
1200  * is entirely within the range we're going to invalidate.  We extend the flush
1201  * bounds to encompass the folio.
1202  */
cifs_flush_folio(struct inode * inode,loff_t pos,loff_t * _fstart,loff_t * _fend,bool first)1203 static int cifs_flush_folio(struct inode *inode, loff_t pos, loff_t *_fstart, loff_t *_fend,
1204 			    bool first)
1205 {
1206 	struct folio *folio;
1207 	unsigned long long fpos, fend;
1208 	pgoff_t index = pos / PAGE_SIZE;
1209 	size_t size;
1210 	int rc = 0;
1211 
1212 	folio = filemap_get_folio(inode->i_mapping, index);
1213 	if (IS_ERR(folio))
1214 		return 0;
1215 
1216 	size = folio_size(folio);
1217 	fpos = folio_pos(folio);
1218 	fend = fpos + size - 1;
1219 	*_fstart = min_t(unsigned long long, *_fstart, fpos);
1220 	*_fend   = max_t(unsigned long long, *_fend, fend);
1221 	if ((first && pos == fpos) || (!first && pos == fend))
1222 		goto out;
1223 
1224 	rc = filemap_write_and_wait_range(inode->i_mapping, fpos, fend);
1225 out:
1226 	folio_put(folio);
1227 	return rc;
1228 }
1229 
cifs_remap_file_range(struct file * src_file,loff_t off,struct file * dst_file,loff_t destoff,loff_t len,unsigned int remap_flags)1230 static loff_t cifs_remap_file_range(struct file *src_file, loff_t off,
1231 		struct file *dst_file, loff_t destoff, loff_t len,
1232 		unsigned int remap_flags)
1233 {
1234 	struct inode *src_inode = file_inode(src_file);
1235 	struct inode *target_inode = file_inode(dst_file);
1236 	struct cifsInodeInfo *src_cifsi = CIFS_I(src_inode);
1237 	struct cifsInodeInfo *target_cifsi = CIFS_I(target_inode);
1238 	struct cifsFileInfo *smb_file_src = src_file->private_data;
1239 	struct cifsFileInfo *smb_file_target = dst_file->private_data;
1240 	struct cifs_tcon *target_tcon, *src_tcon;
1241 	unsigned long long destend, fstart, fend, old_size, new_size;
1242 	unsigned int xid;
1243 	int rc;
1244 
1245 	if (remap_flags & REMAP_FILE_DEDUP)
1246 		return -EOPNOTSUPP;
1247 	if (remap_flags & ~REMAP_FILE_ADVISORY)
1248 		return -EINVAL;
1249 
1250 	cifs_dbg(FYI, "clone range\n");
1251 
1252 	xid = get_xid();
1253 
1254 	if (!smb_file_src || !smb_file_target) {
1255 		rc = -EBADF;
1256 		cifs_dbg(VFS, "missing cifsFileInfo on copy range src file\n");
1257 		goto out;
1258 	}
1259 
1260 	src_tcon = tlink_tcon(smb_file_src->tlink);
1261 	target_tcon = tlink_tcon(smb_file_target->tlink);
1262 
1263 	/*
1264 	 * Note: cifs case is easier than btrfs since server responsible for
1265 	 * checks for proper open modes and file type and if it wants
1266 	 * server could even support copy of range where source = target
1267 	 */
1268 	lock_two_nondirectories(target_inode, src_inode);
1269 
1270 	if (len == 0)
1271 		len = src_inode->i_size - off;
1272 
1273 	cifs_dbg(FYI, "clone range\n");
1274 
1275 	/* Flush the source buffer */
1276 	rc = filemap_write_and_wait_range(src_inode->i_mapping, off,
1277 					  off + len - 1);
1278 	if (rc)
1279 		goto unlock;
1280 
1281 	/* The server-side copy will fail if the source crosses the EOF marker.
1282 	 * Advance the EOF marker after the flush above to the end of the range
1283 	 * if it's short of that.
1284 	 */
1285 	if (src_cifsi->netfs.remote_i_size < off + len) {
1286 		rc = cifs_precopy_set_eof(src_inode, src_cifsi, src_tcon, xid, off + len);
1287 		if (rc < 0)
1288 			goto unlock;
1289 	}
1290 
1291 	new_size = destoff + len;
1292 	destend = destoff + len - 1;
1293 
1294 	/* Flush the folios at either end of the destination range to prevent
1295 	 * accidental loss of dirty data outside of the range.
1296 	 */
1297 	fstart = destoff;
1298 	fend = destend;
1299 
1300 	rc = cifs_flush_folio(target_inode, destoff, &fstart, &fend, true);
1301 	if (rc)
1302 		goto unlock;
1303 	rc = cifs_flush_folio(target_inode, destend, &fstart, &fend, false);
1304 	if (rc)
1305 		goto unlock;
1306 	if (fend > target_cifsi->netfs.zero_point)
1307 		target_cifsi->netfs.zero_point = fend + 1;
1308 	old_size = target_cifsi->netfs.remote_i_size;
1309 
1310 	/* Discard all the folios that overlap the destination region. */
1311 	cifs_dbg(FYI, "about to discard pages %llx-%llx\n", fstart, fend);
1312 	truncate_inode_pages_range(&target_inode->i_data, fstart, fend);
1313 
1314 	fscache_invalidate(cifs_inode_cookie(target_inode), NULL,
1315 			   i_size_read(target_inode), 0);
1316 
1317 	rc = -EOPNOTSUPP;
1318 	if (target_tcon->ses->server->ops->duplicate_extents) {
1319 		rc = target_tcon->ses->server->ops->duplicate_extents(xid,
1320 			smb_file_src, smb_file_target, off, len, destoff);
1321 		if (rc == 0 && new_size > old_size) {
1322 			truncate_setsize(target_inode, new_size);
1323 			fscache_resize_cookie(cifs_inode_cookie(target_inode),
1324 					      new_size);
1325 		}
1326 		if (rc == 0 && new_size > target_cifsi->netfs.zero_point)
1327 			target_cifsi->netfs.zero_point = new_size;
1328 	}
1329 
1330 	/* force revalidate of size and timestamps of target file now
1331 	   that target is updated on the server */
1332 	CIFS_I(target_inode)->time = 0;
1333 unlock:
1334 	/* although unlocking in the reverse order from locking is not
1335 	   strictly necessary here it is a little cleaner to be consistent */
1336 	unlock_two_nondirectories(src_inode, target_inode);
1337 out:
1338 	free_xid(xid);
1339 	return rc < 0 ? rc : len;
1340 }
1341 
cifs_file_copychunk_range(unsigned int xid,struct file * src_file,loff_t off,struct file * dst_file,loff_t destoff,size_t len,unsigned int flags)1342 ssize_t cifs_file_copychunk_range(unsigned int xid,
1343 				struct file *src_file, loff_t off,
1344 				struct file *dst_file, loff_t destoff,
1345 				size_t len, unsigned int flags)
1346 {
1347 	struct inode *src_inode = file_inode(src_file);
1348 	struct inode *target_inode = file_inode(dst_file);
1349 	struct cifsInodeInfo *src_cifsi = CIFS_I(src_inode);
1350 	struct cifsInodeInfo *target_cifsi = CIFS_I(target_inode);
1351 	struct cifsFileInfo *smb_file_src;
1352 	struct cifsFileInfo *smb_file_target;
1353 	struct cifs_tcon *src_tcon;
1354 	struct cifs_tcon *target_tcon;
1355 	ssize_t rc;
1356 
1357 	cifs_dbg(FYI, "copychunk range\n");
1358 
1359 	if (!src_file->private_data || !dst_file->private_data) {
1360 		rc = -EBADF;
1361 		cifs_dbg(VFS, "missing cifsFileInfo on copy range src file\n");
1362 		goto out;
1363 	}
1364 
1365 	rc = -EXDEV;
1366 	smb_file_target = dst_file->private_data;
1367 	smb_file_src = src_file->private_data;
1368 	src_tcon = tlink_tcon(smb_file_src->tlink);
1369 	target_tcon = tlink_tcon(smb_file_target->tlink);
1370 
1371 	if (src_tcon->ses != target_tcon->ses) {
1372 		cifs_dbg(FYI, "source and target of copy not on same server\n");
1373 		goto out;
1374 	}
1375 
1376 	rc = -EOPNOTSUPP;
1377 	if (!target_tcon->ses->server->ops->copychunk_range)
1378 		goto out;
1379 
1380 	/*
1381 	 * Note: cifs case is easier than btrfs since server responsible for
1382 	 * checks for proper open modes and file type and if it wants
1383 	 * server could even support copy of range where source = target
1384 	 */
1385 	lock_two_nondirectories(target_inode, src_inode);
1386 
1387 	cifs_dbg(FYI, "about to flush pages\n");
1388 
1389 	rc = filemap_write_and_wait_range(src_inode->i_mapping, off,
1390 					  off + len - 1);
1391 	if (rc)
1392 		goto unlock;
1393 
1394 	/* The server-side copy will fail if the source crosses the EOF marker.
1395 	 * Advance the EOF marker after the flush above to the end of the range
1396 	 * if it's short of that.
1397 	 */
1398 	if (src_cifsi->netfs.remote_i_size < off + len) {
1399 		rc = cifs_precopy_set_eof(src_inode, src_cifsi, src_tcon, xid, off + len);
1400 		if (rc < 0)
1401 			goto unlock;
1402 	}
1403 
1404 	/* Flush and invalidate all the folios in the destination region.  If
1405 	 * the copy was successful, then some of the flush is extra overhead,
1406 	 * but we need to allow for the copy failing in some way (eg. ENOSPC).
1407 	 */
1408 	rc = filemap_invalidate_inode(target_inode, true, destoff, destoff + len - 1);
1409 	if (rc)
1410 		goto unlock;
1411 
1412 	fscache_invalidate(cifs_inode_cookie(target_inode), NULL,
1413 			   i_size_read(target_inode), 0);
1414 
1415 	rc = file_modified(dst_file);
1416 	if (!rc) {
1417 		rc = target_tcon->ses->server->ops->copychunk_range(xid,
1418 			smb_file_src, smb_file_target, off, len, destoff);
1419 		if (rc > 0 && destoff + rc > i_size_read(target_inode)) {
1420 			truncate_setsize(target_inode, destoff + rc);
1421 			netfs_resize_file(&target_cifsi->netfs,
1422 					  i_size_read(target_inode), true);
1423 			fscache_resize_cookie(cifs_inode_cookie(target_inode),
1424 					      i_size_read(target_inode));
1425 		}
1426 		if (rc > 0 && destoff + rc > target_cifsi->netfs.zero_point)
1427 			target_cifsi->netfs.zero_point = destoff + rc;
1428 	}
1429 
1430 	file_accessed(src_file);
1431 
1432 	/* force revalidate of size and timestamps of target file now
1433 	 * that target is updated on the server
1434 	 */
1435 	CIFS_I(target_inode)->time = 0;
1436 
1437 unlock:
1438 	/* although unlocking in the reverse order from locking is not
1439 	 * strictly necessary here it is a little cleaner to be consistent
1440 	 */
1441 	unlock_two_nondirectories(src_inode, target_inode);
1442 
1443 out:
1444 	return rc;
1445 }
1446 
1447 /*
1448  * Directory operations under CIFS/SMB2/SMB3 are synchronous, so fsync()
1449  * is a dummy operation.
1450  */
cifs_dir_fsync(struct file * file,loff_t start,loff_t end,int datasync)1451 static int cifs_dir_fsync(struct file *file, loff_t start, loff_t end, int datasync)
1452 {
1453 	cifs_dbg(FYI, "Sync directory - name: %pD datasync: 0x%x\n",
1454 		 file, datasync);
1455 
1456 	return 0;
1457 }
1458 
cifs_copy_file_range(struct file * src_file,loff_t off,struct file * dst_file,loff_t destoff,size_t len,unsigned int flags)1459 static ssize_t cifs_copy_file_range(struct file *src_file, loff_t off,
1460 				struct file *dst_file, loff_t destoff,
1461 				size_t len, unsigned int flags)
1462 {
1463 	unsigned int xid = get_xid();
1464 	ssize_t rc;
1465 	struct cifsFileInfo *cfile = dst_file->private_data;
1466 
1467 	if (cfile->swapfile) {
1468 		rc = -EOPNOTSUPP;
1469 		free_xid(xid);
1470 		return rc;
1471 	}
1472 
1473 	rc = cifs_file_copychunk_range(xid, src_file, off, dst_file, destoff,
1474 					len, flags);
1475 	free_xid(xid);
1476 
1477 	if (rc == -EOPNOTSUPP || rc == -EXDEV)
1478 		rc = splice_copy_file_range(src_file, off, dst_file,
1479 					    destoff, len);
1480 	return rc;
1481 }
1482 
1483 const struct file_operations cifs_file_ops = {
1484 	.read_iter = cifs_loose_read_iter,
1485 	.write_iter = cifs_file_write_iter,
1486 	.open = cifs_open,
1487 	.release = cifs_close,
1488 	.lock = cifs_lock,
1489 	.flock = cifs_flock,
1490 	.fsync = cifs_fsync,
1491 	.flush = cifs_flush,
1492 	.mmap  = cifs_file_mmap,
1493 	.splice_read = filemap_splice_read,
1494 	.splice_write = iter_file_splice_write,
1495 	.llseek = cifs_llseek,
1496 	.unlocked_ioctl	= cifs_ioctl,
1497 	.copy_file_range = cifs_copy_file_range,
1498 	.remap_file_range = cifs_remap_file_range,
1499 	.setlease = cifs_setlease,
1500 	.fallocate = cifs_fallocate,
1501 };
1502 
1503 const struct file_operations cifs_file_strict_ops = {
1504 	.read_iter = cifs_strict_readv,
1505 	.write_iter = cifs_strict_writev,
1506 	.open = cifs_open,
1507 	.release = cifs_close,
1508 	.lock = cifs_lock,
1509 	.flock = cifs_flock,
1510 	.fsync = cifs_strict_fsync,
1511 	.flush = cifs_flush,
1512 	.mmap = cifs_file_strict_mmap,
1513 	.splice_read = filemap_splice_read,
1514 	.splice_write = iter_file_splice_write,
1515 	.llseek = cifs_llseek,
1516 	.unlocked_ioctl	= cifs_ioctl,
1517 	.copy_file_range = cifs_copy_file_range,
1518 	.remap_file_range = cifs_remap_file_range,
1519 	.setlease = cifs_setlease,
1520 	.fallocate = cifs_fallocate,
1521 };
1522 
1523 const struct file_operations cifs_file_direct_ops = {
1524 	.read_iter = netfs_unbuffered_read_iter,
1525 	.write_iter = netfs_file_write_iter,
1526 	.open = cifs_open,
1527 	.release = cifs_close,
1528 	.lock = cifs_lock,
1529 	.flock = cifs_flock,
1530 	.fsync = cifs_fsync,
1531 	.flush = cifs_flush,
1532 	.mmap = cifs_file_mmap,
1533 	.splice_read = copy_splice_read,
1534 	.splice_write = iter_file_splice_write,
1535 	.unlocked_ioctl  = cifs_ioctl,
1536 	.copy_file_range = cifs_copy_file_range,
1537 	.remap_file_range = cifs_remap_file_range,
1538 	.llseek = cifs_llseek,
1539 	.setlease = cifs_setlease,
1540 	.fallocate = cifs_fallocate,
1541 };
1542 
1543 const struct file_operations cifs_file_nobrl_ops = {
1544 	.read_iter = cifs_loose_read_iter,
1545 	.write_iter = cifs_file_write_iter,
1546 	.open = cifs_open,
1547 	.release = cifs_close,
1548 	.fsync = cifs_fsync,
1549 	.flush = cifs_flush,
1550 	.mmap  = cifs_file_mmap,
1551 	.splice_read = filemap_splice_read,
1552 	.splice_write = iter_file_splice_write,
1553 	.llseek = cifs_llseek,
1554 	.unlocked_ioctl	= cifs_ioctl,
1555 	.copy_file_range = cifs_copy_file_range,
1556 	.remap_file_range = cifs_remap_file_range,
1557 	.setlease = cifs_setlease,
1558 	.fallocate = cifs_fallocate,
1559 };
1560 
1561 const struct file_operations cifs_file_strict_nobrl_ops = {
1562 	.read_iter = cifs_strict_readv,
1563 	.write_iter = cifs_strict_writev,
1564 	.open = cifs_open,
1565 	.release = cifs_close,
1566 	.fsync = cifs_strict_fsync,
1567 	.flush = cifs_flush,
1568 	.mmap = cifs_file_strict_mmap,
1569 	.splice_read = filemap_splice_read,
1570 	.splice_write = iter_file_splice_write,
1571 	.llseek = cifs_llseek,
1572 	.unlocked_ioctl	= cifs_ioctl,
1573 	.copy_file_range = cifs_copy_file_range,
1574 	.remap_file_range = cifs_remap_file_range,
1575 	.setlease = cifs_setlease,
1576 	.fallocate = cifs_fallocate,
1577 };
1578 
1579 const struct file_operations cifs_file_direct_nobrl_ops = {
1580 	.read_iter = netfs_unbuffered_read_iter,
1581 	.write_iter = netfs_file_write_iter,
1582 	.open = cifs_open,
1583 	.release = cifs_close,
1584 	.fsync = cifs_fsync,
1585 	.flush = cifs_flush,
1586 	.mmap = cifs_file_mmap,
1587 	.splice_read = copy_splice_read,
1588 	.splice_write = iter_file_splice_write,
1589 	.unlocked_ioctl  = cifs_ioctl,
1590 	.copy_file_range = cifs_copy_file_range,
1591 	.remap_file_range = cifs_remap_file_range,
1592 	.llseek = cifs_llseek,
1593 	.setlease = cifs_setlease,
1594 	.fallocate = cifs_fallocate,
1595 };
1596 
1597 const struct file_operations cifs_dir_ops = {
1598 	.iterate_shared = cifs_readdir,
1599 	.release = cifs_closedir,
1600 	.read    = generic_read_dir,
1601 	.unlocked_ioctl  = cifs_ioctl,
1602 	.copy_file_range = cifs_copy_file_range,
1603 	.remap_file_range = cifs_remap_file_range,
1604 	.llseek = generic_file_llseek,
1605 	.fsync = cifs_dir_fsync,
1606 };
1607 
1608 static void
cifs_init_once(void * inode)1609 cifs_init_once(void *inode)
1610 {
1611 	struct cifsInodeInfo *cifsi = inode;
1612 
1613 	inode_init_once(&cifsi->netfs.inode);
1614 	init_rwsem(&cifsi->lock_sem);
1615 }
1616 
1617 static int __init
cifs_init_inodecache(void)1618 cifs_init_inodecache(void)
1619 {
1620 	cifs_inode_cachep = kmem_cache_create("cifs_inode_cache",
1621 					      sizeof(struct cifsInodeInfo),
1622 					      0, (SLAB_RECLAIM_ACCOUNT|
1623 						SLAB_ACCOUNT),
1624 					      cifs_init_once);
1625 	if (cifs_inode_cachep == NULL)
1626 		return -ENOMEM;
1627 
1628 	return 0;
1629 }
1630 
1631 static void
cifs_destroy_inodecache(void)1632 cifs_destroy_inodecache(void)
1633 {
1634 	/*
1635 	 * Make sure all delayed rcu free inodes are flushed before we
1636 	 * destroy cache.
1637 	 */
1638 	rcu_barrier();
1639 	kmem_cache_destroy(cifs_inode_cachep);
1640 }
1641 
1642 static int
cifs_init_request_bufs(void)1643 cifs_init_request_bufs(void)
1644 {
1645 	/*
1646 	 * SMB2 maximum header size is bigger than CIFS one - no problems to
1647 	 * allocate some more bytes for CIFS.
1648 	 */
1649 	size_t max_hdr_size = MAX_SMB2_HDR_SIZE;
1650 
1651 	if (CIFSMaxBufSize < 8192) {
1652 	/* Buffer size can not be smaller than 2 * PATH_MAX since maximum
1653 	Unicode path name has to fit in any SMB/CIFS path based frames */
1654 		CIFSMaxBufSize = 8192;
1655 	} else if (CIFSMaxBufSize > 1024*127) {
1656 		CIFSMaxBufSize = 1024 * 127;
1657 	} else {
1658 		CIFSMaxBufSize &= 0x1FE00; /* Round size to even 512 byte mult*/
1659 	}
1660 /*
1661 	cifs_dbg(VFS, "CIFSMaxBufSize %d 0x%x\n",
1662 		 CIFSMaxBufSize, CIFSMaxBufSize);
1663 */
1664 	cifs_req_cachep = kmem_cache_create_usercopy("cifs_request",
1665 					    CIFSMaxBufSize + max_hdr_size, 0,
1666 					    SLAB_HWCACHE_ALIGN, 0,
1667 					    CIFSMaxBufSize + max_hdr_size,
1668 					    NULL);
1669 	if (cifs_req_cachep == NULL)
1670 		return -ENOMEM;
1671 
1672 	if (cifs_min_rcv < 1)
1673 		cifs_min_rcv = 1;
1674 	else if (cifs_min_rcv > 64) {
1675 		cifs_min_rcv = 64;
1676 		cifs_dbg(VFS, "cifs_min_rcv set to maximum (64)\n");
1677 	}
1678 
1679 	cifs_req_poolp = mempool_create_slab_pool(cifs_min_rcv,
1680 						  cifs_req_cachep);
1681 
1682 	if (cifs_req_poolp == NULL) {
1683 		kmem_cache_destroy(cifs_req_cachep);
1684 		return -ENOMEM;
1685 	}
1686 	/* MAX_CIFS_SMALL_BUFFER_SIZE bytes is enough for most SMB responses and
1687 	almost all handle based requests (but not write response, nor is it
1688 	sufficient for path based requests).  A smaller size would have
1689 	been more efficient (compacting multiple slab items on one 4k page)
1690 	for the case in which debug was on, but this larger size allows
1691 	more SMBs to use small buffer alloc and is still much more
1692 	efficient to alloc 1 per page off the slab compared to 17K (5page)
1693 	alloc of large cifs buffers even when page debugging is on */
1694 	cifs_sm_req_cachep = kmem_cache_create_usercopy("cifs_small_rq",
1695 			MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN,
1696 			0, MAX_CIFS_SMALL_BUFFER_SIZE, NULL);
1697 	if (cifs_sm_req_cachep == NULL) {
1698 		mempool_destroy(cifs_req_poolp);
1699 		kmem_cache_destroy(cifs_req_cachep);
1700 		return -ENOMEM;
1701 	}
1702 
1703 	if (cifs_min_small < 2)
1704 		cifs_min_small = 2;
1705 	else if (cifs_min_small > 256) {
1706 		cifs_min_small = 256;
1707 		cifs_dbg(FYI, "cifs_min_small set to maximum (256)\n");
1708 	}
1709 
1710 	cifs_sm_req_poolp = mempool_create_slab_pool(cifs_min_small,
1711 						     cifs_sm_req_cachep);
1712 
1713 	if (cifs_sm_req_poolp == NULL) {
1714 		mempool_destroy(cifs_req_poolp);
1715 		kmem_cache_destroy(cifs_req_cachep);
1716 		kmem_cache_destroy(cifs_sm_req_cachep);
1717 		return -ENOMEM;
1718 	}
1719 
1720 	return 0;
1721 }
1722 
1723 static void
cifs_destroy_request_bufs(void)1724 cifs_destroy_request_bufs(void)
1725 {
1726 	mempool_destroy(cifs_req_poolp);
1727 	kmem_cache_destroy(cifs_req_cachep);
1728 	mempool_destroy(cifs_sm_req_poolp);
1729 	kmem_cache_destroy(cifs_sm_req_cachep);
1730 }
1731 
init_mids(void)1732 static int init_mids(void)
1733 {
1734 	cifs_mid_cachep = kmem_cache_create("cifs_mpx_ids",
1735 					    sizeof(struct mid_q_entry), 0,
1736 					    SLAB_HWCACHE_ALIGN, NULL);
1737 	if (cifs_mid_cachep == NULL)
1738 		return -ENOMEM;
1739 
1740 	/* 3 is a reasonable minimum number of simultaneous operations */
1741 	cifs_mid_poolp = mempool_create_slab_pool(3, cifs_mid_cachep);
1742 	if (cifs_mid_poolp == NULL) {
1743 		kmem_cache_destroy(cifs_mid_cachep);
1744 		return -ENOMEM;
1745 	}
1746 
1747 	return 0;
1748 }
1749 
destroy_mids(void)1750 static void destroy_mids(void)
1751 {
1752 	mempool_destroy(cifs_mid_poolp);
1753 	kmem_cache_destroy(cifs_mid_cachep);
1754 }
1755 
cifs_init_netfs(void)1756 static int cifs_init_netfs(void)
1757 {
1758 	cifs_io_request_cachep =
1759 		kmem_cache_create("cifs_io_request",
1760 				  sizeof(struct cifs_io_request), 0,
1761 				  SLAB_HWCACHE_ALIGN, NULL);
1762 	if (!cifs_io_request_cachep)
1763 		goto nomem_req;
1764 
1765 	if (mempool_init_slab_pool(&cifs_io_request_pool, 100, cifs_io_request_cachep) < 0)
1766 		goto nomem_reqpool;
1767 
1768 	cifs_io_subrequest_cachep =
1769 		kmem_cache_create("cifs_io_subrequest",
1770 				  sizeof(struct cifs_io_subrequest), 0,
1771 				  SLAB_HWCACHE_ALIGN, NULL);
1772 	if (!cifs_io_subrequest_cachep)
1773 		goto nomem_subreq;
1774 
1775 	if (mempool_init_slab_pool(&cifs_io_subrequest_pool, 100, cifs_io_subrequest_cachep) < 0)
1776 		goto nomem_subreqpool;
1777 
1778 	return 0;
1779 
1780 nomem_subreqpool:
1781 	kmem_cache_destroy(cifs_io_subrequest_cachep);
1782 nomem_subreq:
1783 	mempool_exit(&cifs_io_request_pool);
1784 nomem_reqpool:
1785 	kmem_cache_destroy(cifs_io_request_cachep);
1786 nomem_req:
1787 	return -ENOMEM;
1788 }
1789 
cifs_destroy_netfs(void)1790 static void cifs_destroy_netfs(void)
1791 {
1792 	mempool_exit(&cifs_io_subrequest_pool);
1793 	kmem_cache_destroy(cifs_io_subrequest_cachep);
1794 	mempool_exit(&cifs_io_request_pool);
1795 	kmem_cache_destroy(cifs_io_request_cachep);
1796 }
1797 
1798 static int __init
init_cifs(void)1799 init_cifs(void)
1800 {
1801 	int rc = 0;
1802 	cifs_proc_init();
1803 	INIT_LIST_HEAD(&cifs_tcp_ses_list);
1804 /*
1805  *  Initialize Global counters
1806  */
1807 	atomic_set(&sesInfoAllocCount, 0);
1808 	atomic_set(&tconInfoAllocCount, 0);
1809 	atomic_set(&tcpSesNextId, 0);
1810 	atomic_set(&tcpSesAllocCount, 0);
1811 	atomic_set(&tcpSesReconnectCount, 0);
1812 	atomic_set(&tconInfoReconnectCount, 0);
1813 
1814 	atomic_set(&buf_alloc_count, 0);
1815 	atomic_set(&small_buf_alloc_count, 0);
1816 #ifdef CONFIG_CIFS_STATS2
1817 	atomic_set(&total_buf_alloc_count, 0);
1818 	atomic_set(&total_small_buf_alloc_count, 0);
1819 	if (slow_rsp_threshold < 1)
1820 		cifs_dbg(FYI, "slow_response_threshold msgs disabled\n");
1821 	else if (slow_rsp_threshold > 32767)
1822 		cifs_dbg(VFS,
1823 		       "slow response threshold set higher than recommended (0 to 32767)\n");
1824 #endif /* CONFIG_CIFS_STATS2 */
1825 
1826 	atomic_set(&mid_count, 0);
1827 	GlobalCurrentXid = 0;
1828 	GlobalTotalActiveXid = 0;
1829 	GlobalMaxActiveXid = 0;
1830 	spin_lock_init(&cifs_tcp_ses_lock);
1831 	spin_lock_init(&GlobalMid_Lock);
1832 
1833 	cifs_lock_secret = get_random_u32();
1834 
1835 	if (cifs_max_pending < 2) {
1836 		cifs_max_pending = 2;
1837 		cifs_dbg(FYI, "cifs_max_pending set to min of 2\n");
1838 	} else if (cifs_max_pending > CIFS_MAX_REQ) {
1839 		cifs_max_pending = CIFS_MAX_REQ;
1840 		cifs_dbg(FYI, "cifs_max_pending set to max of %u\n",
1841 			 CIFS_MAX_REQ);
1842 	}
1843 
1844 	/* Limit max to about 18 hours, and setting to zero disables directory entry caching */
1845 	if (dir_cache_timeout > 65000) {
1846 		dir_cache_timeout = 65000;
1847 		cifs_dbg(VFS, "dir_cache_timeout set to max of 65000 seconds\n");
1848 	}
1849 
1850 	cifsiod_wq = alloc_workqueue("cifsiod", WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
1851 	if (!cifsiod_wq) {
1852 		rc = -ENOMEM;
1853 		goto out_clean_proc;
1854 	}
1855 
1856 	/*
1857 	 * Consider in future setting limit!=0 maybe to min(num_of_cores - 1, 3)
1858 	 * so that we don't launch too many worker threads but
1859 	 * Documentation/core-api/workqueue.rst recommends setting it to 0
1860 	 */
1861 
1862 	/* WQ_UNBOUND allows decrypt tasks to run on any CPU */
1863 	decrypt_wq = alloc_workqueue("smb3decryptd",
1864 				     WQ_UNBOUND|WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
1865 	if (!decrypt_wq) {
1866 		rc = -ENOMEM;
1867 		goto out_destroy_cifsiod_wq;
1868 	}
1869 
1870 	fileinfo_put_wq = alloc_workqueue("cifsfileinfoput",
1871 				     WQ_UNBOUND|WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
1872 	if (!fileinfo_put_wq) {
1873 		rc = -ENOMEM;
1874 		goto out_destroy_decrypt_wq;
1875 	}
1876 
1877 	cifsoplockd_wq = alloc_workqueue("cifsoplockd",
1878 					 WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
1879 	if (!cifsoplockd_wq) {
1880 		rc = -ENOMEM;
1881 		goto out_destroy_fileinfo_put_wq;
1882 	}
1883 
1884 	deferredclose_wq = alloc_workqueue("deferredclose",
1885 					   WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
1886 	if (!deferredclose_wq) {
1887 		rc = -ENOMEM;
1888 		goto out_destroy_cifsoplockd_wq;
1889 	}
1890 
1891 	serverclose_wq = alloc_workqueue("serverclose",
1892 					   WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
1893 	if (!serverclose_wq) {
1894 		rc = -ENOMEM;
1895 		goto out_destroy_deferredclose_wq;
1896 	}
1897 
1898 	rc = cifs_init_inodecache();
1899 	if (rc)
1900 		goto out_destroy_serverclose_wq;
1901 
1902 	rc = cifs_init_netfs();
1903 	if (rc)
1904 		goto out_destroy_inodecache;
1905 
1906 	rc = init_mids();
1907 	if (rc)
1908 		goto out_destroy_netfs;
1909 
1910 	rc = cifs_init_request_bufs();
1911 	if (rc)
1912 		goto out_destroy_mids;
1913 
1914 #ifdef CONFIG_CIFS_DFS_UPCALL
1915 	rc = dfs_cache_init();
1916 	if (rc)
1917 		goto out_destroy_request_bufs;
1918 #endif /* CONFIG_CIFS_DFS_UPCALL */
1919 #ifdef CONFIG_CIFS_UPCALL
1920 	rc = init_cifs_spnego();
1921 	if (rc)
1922 		goto out_destroy_dfs_cache;
1923 #endif /* CONFIG_CIFS_UPCALL */
1924 #ifdef CONFIG_CIFS_SWN_UPCALL
1925 	rc = cifs_genl_init();
1926 	if (rc)
1927 		goto out_register_key_type;
1928 #endif /* CONFIG_CIFS_SWN_UPCALL */
1929 
1930 	rc = init_cifs_idmap();
1931 	if (rc)
1932 		goto out_cifs_swn_init;
1933 
1934 	rc = register_filesystem(&cifs_fs_type);
1935 	if (rc)
1936 		goto out_init_cifs_idmap;
1937 
1938 	rc = register_filesystem(&smb3_fs_type);
1939 	if (rc) {
1940 		unregister_filesystem(&cifs_fs_type);
1941 		goto out_init_cifs_idmap;
1942 	}
1943 
1944 	return 0;
1945 
1946 out_init_cifs_idmap:
1947 	exit_cifs_idmap();
1948 out_cifs_swn_init:
1949 #ifdef CONFIG_CIFS_SWN_UPCALL
1950 	cifs_genl_exit();
1951 out_register_key_type:
1952 #endif
1953 #ifdef CONFIG_CIFS_UPCALL
1954 	exit_cifs_spnego();
1955 out_destroy_dfs_cache:
1956 #endif
1957 #ifdef CONFIG_CIFS_DFS_UPCALL
1958 	dfs_cache_destroy();
1959 out_destroy_request_bufs:
1960 #endif
1961 	cifs_destroy_request_bufs();
1962 out_destroy_mids:
1963 	destroy_mids();
1964 out_destroy_netfs:
1965 	cifs_destroy_netfs();
1966 out_destroy_inodecache:
1967 	cifs_destroy_inodecache();
1968 out_destroy_serverclose_wq:
1969 	destroy_workqueue(serverclose_wq);
1970 out_destroy_deferredclose_wq:
1971 	destroy_workqueue(deferredclose_wq);
1972 out_destroy_cifsoplockd_wq:
1973 	destroy_workqueue(cifsoplockd_wq);
1974 out_destroy_fileinfo_put_wq:
1975 	destroy_workqueue(fileinfo_put_wq);
1976 out_destroy_decrypt_wq:
1977 	destroy_workqueue(decrypt_wq);
1978 out_destroy_cifsiod_wq:
1979 	destroy_workqueue(cifsiod_wq);
1980 out_clean_proc:
1981 	cifs_proc_clean();
1982 	return rc;
1983 }
1984 
1985 static void __exit
exit_cifs(void)1986 exit_cifs(void)
1987 {
1988 	cifs_dbg(NOISY, "exit_smb3\n");
1989 	unregister_filesystem(&cifs_fs_type);
1990 	unregister_filesystem(&smb3_fs_type);
1991 	cifs_release_automount_timer();
1992 	exit_cifs_idmap();
1993 #ifdef CONFIG_CIFS_SWN_UPCALL
1994 	cifs_genl_exit();
1995 #endif
1996 #ifdef CONFIG_CIFS_UPCALL
1997 	exit_cifs_spnego();
1998 #endif
1999 #ifdef CONFIG_CIFS_DFS_UPCALL
2000 	dfs_cache_destroy();
2001 #endif
2002 	cifs_destroy_request_bufs();
2003 	destroy_mids();
2004 	cifs_destroy_netfs();
2005 	cifs_destroy_inodecache();
2006 	destroy_workqueue(deferredclose_wq);
2007 	destroy_workqueue(cifsoplockd_wq);
2008 	destroy_workqueue(decrypt_wq);
2009 	destroy_workqueue(fileinfo_put_wq);
2010 	destroy_workqueue(serverclose_wq);
2011 	destroy_workqueue(cifsiod_wq);
2012 	cifs_proc_clean();
2013 }
2014 
2015 MODULE_AUTHOR("Steve French");
2016 MODULE_LICENSE("GPL");	/* combination of LGPL + GPL source behaves as GPL */
2017 MODULE_DESCRIPTION
2018 	("VFS to access SMB3 servers e.g. Samba, Macs, Azure and Windows (and "
2019 	"also older servers complying with the SNIA CIFS Specification)");
2020 MODULE_VERSION(CIFS_VERSION);
2021 MODULE_SOFTDEP("ecb");
2022 MODULE_SOFTDEP("hmac");
2023 MODULE_SOFTDEP("md5");
2024 MODULE_SOFTDEP("nls");
2025 MODULE_SOFTDEP("aes");
2026 MODULE_SOFTDEP("cmac");
2027 MODULE_SOFTDEP("sha256");
2028 MODULE_SOFTDEP("sha512");
2029 MODULE_SOFTDEP("aead2");
2030 MODULE_SOFTDEP("ccm");
2031 MODULE_SOFTDEP("gcm");
2032 module_init(init_cifs)
2033 module_exit(exit_cifs)
2034