xref: /linux/fs/smb/client/cifsfs.c (revision 2c1ed907520c50326b8f604907a8478b27881a2e)
1 // SPDX-License-Identifier: LGPL-2.1
2 /*
3  *
4  *   Copyright (C) International Business Machines  Corp., 2002,2008
5  *   Author(s): Steve French (sfrench@us.ibm.com)
6  *
7  *   Common Internet FileSystem (CIFS) client
8  *
9  */
10 
11 /* Note that BB means BUGBUG (ie something to fix eventually) */
12 
13 #include <linux/module.h>
14 #include <linux/fs.h>
15 #include <linux/filelock.h>
16 #include <linux/mount.h>
17 #include <linux/slab.h>
18 #include <linux/init.h>
19 #include <linux/list.h>
20 #include <linux/seq_file.h>
21 #include <linux/vfs.h>
22 #include <linux/mempool.h>
23 #include <linux/delay.h>
24 #include <linux/kthread.h>
25 #include <linux/freezer.h>
26 #include <linux/namei.h>
27 #include <linux/random.h>
28 #include <linux/splice.h>
29 #include <linux/uuid.h>
30 #include <linux/xattr.h>
31 #include <uapi/linux/magic.h>
32 #include <net/ipv6.h>
33 #include "cifsfs.h"
34 #include "cifspdu.h"
35 #define DECLARE_GLOBALS_HERE
36 #include "cifsglob.h"
37 #include "cifsproto.h"
38 #include "cifs_debug.h"
39 #include "cifs_fs_sb.h"
40 #include <linux/mm.h>
41 #include <linux/key-type.h>
42 #include "cifs_spnego.h"
43 #include "fscache.h"
44 #ifdef CONFIG_CIFS_DFS_UPCALL
45 #include "dfs_cache.h"
46 #endif
47 #ifdef CONFIG_CIFS_SWN_UPCALL
48 #include "netlink.h"
49 #endif
50 #include "fs_context.h"
51 #include "cached_dir.h"
52 
53 /*
54  * DOS dates from 1980/1/1 through 2107/12/31
55  * Protocol specifications indicate the range should be to 119, which
56  * limits maximum year to 2099. But this range has not been checked.
57  */
58 #define SMB_DATE_MAX (127<<9 | 12<<5 | 31)
59 #define SMB_DATE_MIN (0<<9 | 1<<5 | 1)
60 #define SMB_TIME_MAX (23<<11 | 59<<5 | 29)
61 
62 int cifsFYI = 0;
63 bool traceSMB;
64 bool enable_oplocks = true;
65 bool linuxExtEnabled = true;
66 bool lookupCacheEnabled = true;
67 bool disable_legacy_dialects; /* false by default */
68 bool enable_gcm_256 = true;
69 bool require_gcm_256; /* false by default */
70 bool enable_negotiate_signing; /* false by default */
71 unsigned int global_secflags = CIFSSEC_DEF;
72 /* unsigned int ntlmv2_support = 0; */
73 unsigned int sign_CIFS_PDUs = 1;
74 
75 /*
76  * Global transaction id (XID) information
77  */
78 unsigned int GlobalCurrentXid;	/* protected by GlobalMid_Lock */
79 unsigned int GlobalTotalActiveXid; /* prot by GlobalMid_Lock */
80 unsigned int GlobalMaxActiveXid;	/* prot by GlobalMid_Lock */
81 spinlock_t GlobalMid_Lock; /* protects above & list operations on midQ entries */
82 
83 /*
84  *  Global counters, updated atomically
85  */
86 atomic_t sesInfoAllocCount;
87 atomic_t tconInfoAllocCount;
88 atomic_t tcpSesNextId;
89 atomic_t tcpSesAllocCount;
90 atomic_t tcpSesReconnectCount;
91 atomic_t tconInfoReconnectCount;
92 
93 atomic_t mid_count;
94 atomic_t buf_alloc_count;
95 atomic_t small_buf_alloc_count;
96 #ifdef CONFIG_CIFS_STATS2
97 atomic_t total_buf_alloc_count;
98 atomic_t total_small_buf_alloc_count;
99 #endif/* STATS2 */
100 struct list_head	cifs_tcp_ses_list;
101 spinlock_t		cifs_tcp_ses_lock;
102 static const struct super_operations cifs_super_ops;
103 unsigned int CIFSMaxBufSize = CIFS_MAX_MSGSIZE;
104 module_param(CIFSMaxBufSize, uint, 0444);
105 MODULE_PARM_DESC(CIFSMaxBufSize, "Network buffer size (not including header) "
106 				 "for CIFS requests. "
107 				 "Default: 16384 Range: 8192 to 130048");
108 unsigned int cifs_min_rcv = CIFS_MIN_RCV_POOL;
109 module_param(cifs_min_rcv, uint, 0444);
110 MODULE_PARM_DESC(cifs_min_rcv, "Network buffers in pool. Default: 4 Range: "
111 				"1 to 64");
112 unsigned int cifs_min_small = 30;
113 module_param(cifs_min_small, uint, 0444);
114 MODULE_PARM_DESC(cifs_min_small, "Small network buffers in pool. Default: 30 "
115 				 "Range: 2 to 256");
116 unsigned int cifs_max_pending = CIFS_MAX_REQ;
117 module_param(cifs_max_pending, uint, 0444);
118 MODULE_PARM_DESC(cifs_max_pending, "Simultaneous requests to server for "
119 				   "CIFS/SMB1 dialect (N/A for SMB3) "
120 				   "Default: 32767 Range: 2 to 32767.");
121 unsigned int dir_cache_timeout = 30;
122 module_param(dir_cache_timeout, uint, 0644);
123 MODULE_PARM_DESC(dir_cache_timeout, "Number of seconds to cache directory contents for which we have a lease. Default: 30 "
124 				 "Range: 1 to 65000 seconds, 0 to disable caching dir contents");
125 #ifdef CONFIG_CIFS_STATS2
126 unsigned int slow_rsp_threshold = 1;
127 module_param(slow_rsp_threshold, uint, 0644);
128 MODULE_PARM_DESC(slow_rsp_threshold, "Amount of time (in seconds) to wait "
129 				   "before logging that a response is delayed. "
130 				   "Default: 1 (if set to 0 disables msg).");
131 #endif /* STATS2 */
132 
133 module_param(enable_oplocks, bool, 0644);
134 MODULE_PARM_DESC(enable_oplocks, "Enable or disable oplocks. Default: y/Y/1");
135 
136 module_param(enable_gcm_256, bool, 0644);
137 MODULE_PARM_DESC(enable_gcm_256, "Enable requesting strongest (256 bit) GCM encryption. Default: y/Y/0");
138 
139 module_param(require_gcm_256, bool, 0644);
140 MODULE_PARM_DESC(require_gcm_256, "Require strongest (256 bit) GCM encryption. Default: n/N/0");
141 
142 module_param(enable_negotiate_signing, bool, 0644);
143 MODULE_PARM_DESC(enable_negotiate_signing, "Enable negotiating packet signing algorithm with server. Default: n/N/0");
144 
145 module_param(disable_legacy_dialects, bool, 0644);
146 MODULE_PARM_DESC(disable_legacy_dialects, "To improve security it may be "
147 				  "helpful to restrict the ability to "
148 				  "override the default dialects (SMB2.1, "
149 				  "SMB3 and SMB3.02) on mount with old "
150 				  "dialects (CIFS/SMB1 and SMB2) since "
151 				  "vers=1.0 (CIFS/SMB1) and vers=2.0 are weaker"
152 				  " and less secure. Default: n/N/0");
153 
154 struct workqueue_struct	*cifsiod_wq;
155 struct workqueue_struct	*decrypt_wq;
156 struct workqueue_struct	*fileinfo_put_wq;
157 struct workqueue_struct	*cifsoplockd_wq;
158 struct workqueue_struct	*deferredclose_wq;
159 struct workqueue_struct	*serverclose_wq;
160 struct workqueue_struct	*cfid_put_wq;
161 __u32 cifs_lock_secret;
162 
163 /*
164  * Bumps refcount for cifs super block.
165  * Note that it should be only called if a reference to VFS super block is
166  * already held, e.g. in open-type syscalls context. Otherwise it can race with
167  * atomic_dec_and_test in deactivate_locked_super.
168  */
169 void
cifs_sb_active(struct super_block * sb)170 cifs_sb_active(struct super_block *sb)
171 {
172 	struct cifs_sb_info *server = CIFS_SB(sb);
173 
174 	if (atomic_inc_return(&server->active) == 1)
175 		atomic_inc(&sb->s_active);
176 }
177 
178 void
cifs_sb_deactive(struct super_block * sb)179 cifs_sb_deactive(struct super_block *sb)
180 {
181 	struct cifs_sb_info *server = CIFS_SB(sb);
182 
183 	if (atomic_dec_and_test(&server->active))
184 		deactivate_super(sb);
185 }
186 
187 static int
cifs_read_super(struct super_block * sb)188 cifs_read_super(struct super_block *sb)
189 {
190 	struct inode *inode;
191 	struct cifs_sb_info *cifs_sb;
192 	struct cifs_tcon *tcon;
193 	struct timespec64 ts;
194 	int rc = 0;
195 
196 	cifs_sb = CIFS_SB(sb);
197 	tcon = cifs_sb_master_tcon(cifs_sb);
198 
199 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIXACL)
200 		sb->s_flags |= SB_POSIXACL;
201 
202 	if (tcon->snapshot_time)
203 		sb->s_flags |= SB_RDONLY;
204 
205 	if (tcon->ses->capabilities & tcon->ses->server->vals->cap_large_files)
206 		sb->s_maxbytes = MAX_LFS_FILESIZE;
207 	else
208 		sb->s_maxbytes = MAX_NON_LFS;
209 
210 	/*
211 	 * Some very old servers like DOS and OS/2 used 2 second granularity
212 	 * (while all current servers use 100ns granularity - see MS-DTYP)
213 	 * but 1 second is the maximum allowed granularity for the VFS
214 	 * so for old servers set time granularity to 1 second while for
215 	 * everything else (current servers) set it to 100ns.
216 	 */
217 	if ((tcon->ses->server->vals->protocol_id == SMB10_PROT_ID) &&
218 	    ((tcon->ses->capabilities &
219 	      tcon->ses->server->vals->cap_nt_find) == 0) &&
220 	    !tcon->unix_ext) {
221 		sb->s_time_gran = 1000000000; /* 1 second is max allowed gran */
222 		ts = cnvrtDosUnixTm(cpu_to_le16(SMB_DATE_MIN), 0, 0);
223 		sb->s_time_min = ts.tv_sec;
224 		ts = cnvrtDosUnixTm(cpu_to_le16(SMB_DATE_MAX),
225 				    cpu_to_le16(SMB_TIME_MAX), 0);
226 		sb->s_time_max = ts.tv_sec;
227 	} else {
228 		/*
229 		 * Almost every server, including all SMB2+, uses DCE TIME
230 		 * ie 100 nanosecond units, since 1601.  See MS-DTYP and MS-FSCC
231 		 */
232 		sb->s_time_gran = 100;
233 		ts = cifs_NTtimeToUnix(0);
234 		sb->s_time_min = ts.tv_sec;
235 		ts = cifs_NTtimeToUnix(cpu_to_le64(S64_MAX));
236 		sb->s_time_max = ts.tv_sec;
237 	}
238 
239 	sb->s_magic = CIFS_SUPER_MAGIC;
240 	sb->s_op = &cifs_super_ops;
241 	sb->s_xattr = cifs_xattr_handlers;
242 	rc = super_setup_bdi(sb);
243 	if (rc)
244 		goto out_no_root;
245 	/* tune readahead according to rsize if readahead size not set on mount */
246 	if (cifs_sb->ctx->rsize == 0)
247 		cifs_sb->ctx->rsize =
248 			tcon->ses->server->ops->negotiate_rsize(tcon, cifs_sb->ctx);
249 	if (cifs_sb->ctx->rasize)
250 		sb->s_bdi->ra_pages = cifs_sb->ctx->rasize / PAGE_SIZE;
251 	else
252 		sb->s_bdi->ra_pages = 2 * (cifs_sb->ctx->rsize / PAGE_SIZE);
253 
254 	sb->s_blocksize = CIFS_MAX_MSGSIZE;
255 	sb->s_blocksize_bits = 14;	/* default 2**14 = CIFS_MAX_MSGSIZE */
256 	inode = cifs_root_iget(sb);
257 
258 	if (IS_ERR(inode)) {
259 		rc = PTR_ERR(inode);
260 		goto out_no_root;
261 	}
262 
263 	if (tcon->nocase)
264 		sb->s_d_op = &cifs_ci_dentry_ops;
265 	else
266 		sb->s_d_op = &cifs_dentry_ops;
267 
268 	sb->s_root = d_make_root(inode);
269 	if (!sb->s_root) {
270 		rc = -ENOMEM;
271 		goto out_no_root;
272 	}
273 
274 #ifdef CONFIG_CIFS_NFSD_EXPORT
275 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) {
276 		cifs_dbg(FYI, "export ops supported\n");
277 		sb->s_export_op = &cifs_export_ops;
278 	}
279 #endif /* CONFIG_CIFS_NFSD_EXPORT */
280 
281 	return 0;
282 
283 out_no_root:
284 	cifs_dbg(VFS, "%s: get root inode failed\n", __func__);
285 	return rc;
286 }
287 
cifs_kill_sb(struct super_block * sb)288 static void cifs_kill_sb(struct super_block *sb)
289 {
290 	struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
291 
292 	/*
293 	 * We need to release all dentries for the cached directories
294 	 * before we kill the sb.
295 	 */
296 	if (cifs_sb->root) {
297 		close_all_cached_dirs(cifs_sb);
298 
299 		/* finally release root dentry */
300 		dput(cifs_sb->root);
301 		cifs_sb->root = NULL;
302 	}
303 
304 	kill_anon_super(sb);
305 	cifs_umount(cifs_sb);
306 }
307 
308 static int
cifs_statfs(struct dentry * dentry,struct kstatfs * buf)309 cifs_statfs(struct dentry *dentry, struct kstatfs *buf)
310 {
311 	struct super_block *sb = dentry->d_sb;
312 	struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
313 	struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
314 	struct TCP_Server_Info *server = tcon->ses->server;
315 	unsigned int xid;
316 	int rc = 0;
317 	const char *full_path;
318 	void *page;
319 
320 	xid = get_xid();
321 	page = alloc_dentry_path();
322 
323 	full_path = build_path_from_dentry(dentry, page);
324 	if (IS_ERR(full_path)) {
325 		rc = PTR_ERR(full_path);
326 		goto statfs_out;
327 	}
328 
329 	if (le32_to_cpu(tcon->fsAttrInfo.MaxPathNameComponentLength) > 0)
330 		buf->f_namelen =
331 		       le32_to_cpu(tcon->fsAttrInfo.MaxPathNameComponentLength);
332 	else
333 		buf->f_namelen = PATH_MAX;
334 
335 	buf->f_fsid.val[0] = tcon->vol_serial_number;
336 	/* are using part of create time for more randomness, see man statfs */
337 	buf->f_fsid.val[1] =  (int)le64_to_cpu(tcon->vol_create_time);
338 
339 	buf->f_files = 0;	/* undefined */
340 	buf->f_ffree = 0;	/* unlimited */
341 
342 	if (server->ops->queryfs)
343 		rc = server->ops->queryfs(xid, tcon, full_path, cifs_sb, buf);
344 
345 statfs_out:
346 	free_dentry_path(page);
347 	free_xid(xid);
348 	return rc;
349 }
350 
cifs_fallocate(struct file * file,int mode,loff_t off,loff_t len)351 static long cifs_fallocate(struct file *file, int mode, loff_t off, loff_t len)
352 {
353 	struct cifs_sb_info *cifs_sb = CIFS_FILE_SB(file);
354 	struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
355 	struct TCP_Server_Info *server = tcon->ses->server;
356 
357 	if (server->ops->fallocate)
358 		return server->ops->fallocate(file, tcon, mode, off, len);
359 
360 	return -EOPNOTSUPP;
361 }
362 
cifs_permission(struct mnt_idmap * idmap,struct inode * inode,int mask)363 static int cifs_permission(struct mnt_idmap *idmap,
364 			   struct inode *inode, int mask)
365 {
366 	struct cifs_sb_info *cifs_sb;
367 
368 	cifs_sb = CIFS_SB(inode->i_sb);
369 
370 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_PERM) {
371 		if ((mask & MAY_EXEC) && !execute_ok(inode))
372 			return -EACCES;
373 		else
374 			return 0;
375 	} else /* file mode might have been restricted at mount time
376 		on the client (above and beyond ACL on servers) for
377 		servers which do not support setting and viewing mode bits,
378 		so allowing client to check permissions is useful */
379 		return generic_permission(&nop_mnt_idmap, inode, mask);
380 }
381 
382 static struct kmem_cache *cifs_inode_cachep;
383 static struct kmem_cache *cifs_req_cachep;
384 static struct kmem_cache *cifs_mid_cachep;
385 static struct kmem_cache *cifs_sm_req_cachep;
386 static struct kmem_cache *cifs_io_request_cachep;
387 static struct kmem_cache *cifs_io_subrequest_cachep;
388 mempool_t *cifs_sm_req_poolp;
389 mempool_t *cifs_req_poolp;
390 mempool_t *cifs_mid_poolp;
391 mempool_t cifs_io_request_pool;
392 mempool_t cifs_io_subrequest_pool;
393 
394 static struct inode *
cifs_alloc_inode(struct super_block * sb)395 cifs_alloc_inode(struct super_block *sb)
396 {
397 	struct cifsInodeInfo *cifs_inode;
398 	cifs_inode = alloc_inode_sb(sb, cifs_inode_cachep, GFP_KERNEL);
399 	if (!cifs_inode)
400 		return NULL;
401 	cifs_inode->cifsAttrs = ATTR_ARCHIVE;	/* default */
402 	cifs_inode->time = 0;
403 	/*
404 	 * Until the file is open and we have gotten oplock info back from the
405 	 * server, can not assume caching of file data or metadata.
406 	 */
407 	cifs_set_oplock_level(cifs_inode, 0);
408 	cifs_inode->lease_granted = false;
409 	cifs_inode->flags = 0;
410 	spin_lock_init(&cifs_inode->writers_lock);
411 	cifs_inode->writers = 0;
412 	cifs_inode->netfs.inode.i_blkbits = 14;  /* 2**14 = CIFS_MAX_MSGSIZE */
413 	cifs_inode->netfs.remote_i_size = 0;
414 	cifs_inode->uniqueid = 0;
415 	cifs_inode->createtime = 0;
416 	cifs_inode->epoch = 0;
417 	spin_lock_init(&cifs_inode->open_file_lock);
418 	generate_random_uuid(cifs_inode->lease_key);
419 	cifs_inode->symlink_target = NULL;
420 
421 	/*
422 	 * Can not set i_flags here - they get immediately overwritten to zero
423 	 * by the VFS.
424 	 */
425 	/* cifs_inode->netfs.inode.i_flags = S_NOATIME | S_NOCMTIME; */
426 	INIT_LIST_HEAD(&cifs_inode->openFileList);
427 	INIT_LIST_HEAD(&cifs_inode->llist);
428 	INIT_LIST_HEAD(&cifs_inode->deferred_closes);
429 	spin_lock_init(&cifs_inode->deferred_lock);
430 	return &cifs_inode->netfs.inode;
431 }
432 
433 static void
cifs_free_inode(struct inode * inode)434 cifs_free_inode(struct inode *inode)
435 {
436 	struct cifsInodeInfo *cinode = CIFS_I(inode);
437 
438 	if (S_ISLNK(inode->i_mode))
439 		kfree(cinode->symlink_target);
440 	kmem_cache_free(cifs_inode_cachep, cinode);
441 }
442 
443 static void
cifs_evict_inode(struct inode * inode)444 cifs_evict_inode(struct inode *inode)
445 {
446 	netfs_wait_for_outstanding_io(inode);
447 	truncate_inode_pages_final(&inode->i_data);
448 	if (inode->i_state & I_PINNING_NETFS_WB)
449 		cifs_fscache_unuse_inode_cookie(inode, true);
450 	cifs_fscache_release_inode_cookie(inode);
451 	clear_inode(inode);
452 }
453 
454 static void
cifs_show_address(struct seq_file * s,struct TCP_Server_Info * server)455 cifs_show_address(struct seq_file *s, struct TCP_Server_Info *server)
456 {
457 	struct sockaddr_in *sa = (struct sockaddr_in *) &server->dstaddr;
458 	struct sockaddr_in6 *sa6 = (struct sockaddr_in6 *) &server->dstaddr;
459 
460 	seq_puts(s, ",addr=");
461 
462 	switch (server->dstaddr.ss_family) {
463 	case AF_INET:
464 		seq_printf(s, "%pI4", &sa->sin_addr.s_addr);
465 		break;
466 	case AF_INET6:
467 		seq_printf(s, "%pI6", &sa6->sin6_addr.s6_addr);
468 		if (sa6->sin6_scope_id)
469 			seq_printf(s, "%%%u", sa6->sin6_scope_id);
470 		break;
471 	default:
472 		seq_puts(s, "(unknown)");
473 	}
474 	if (server->rdma)
475 		seq_puts(s, ",rdma");
476 }
477 
478 static void
cifs_show_security(struct seq_file * s,struct cifs_ses * ses)479 cifs_show_security(struct seq_file *s, struct cifs_ses *ses)
480 {
481 	if (ses->sectype == Unspecified) {
482 		if (ses->user_name == NULL)
483 			seq_puts(s, ",sec=none");
484 		return;
485 	}
486 
487 	seq_puts(s, ",sec=");
488 
489 	switch (ses->sectype) {
490 	case NTLMv2:
491 		seq_puts(s, "ntlmv2");
492 		break;
493 	case Kerberos:
494 		seq_puts(s, "krb5");
495 		break;
496 	case RawNTLMSSP:
497 		seq_puts(s, "ntlmssp");
498 		break;
499 	default:
500 		/* shouldn't ever happen */
501 		seq_puts(s, "unknown");
502 		break;
503 	}
504 
505 	if (ses->sign)
506 		seq_puts(s, "i");
507 
508 	if (ses->sectype == Kerberos)
509 		seq_printf(s, ",cruid=%u",
510 			   from_kuid_munged(&init_user_ns, ses->cred_uid));
511 }
512 
513 static void
cifs_show_cache_flavor(struct seq_file * s,struct cifs_sb_info * cifs_sb)514 cifs_show_cache_flavor(struct seq_file *s, struct cifs_sb_info *cifs_sb)
515 {
516 	seq_puts(s, ",cache=");
517 
518 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO)
519 		seq_puts(s, "strict");
520 	else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO)
521 		seq_puts(s, "none");
522 	else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RW_CACHE)
523 		seq_puts(s, "singleclient"); /* assume only one client access */
524 	else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RO_CACHE)
525 		seq_puts(s, "ro"); /* read only caching assumed */
526 	else
527 		seq_puts(s, "loose");
528 }
529 
530 /*
531  * cifs_show_devname() is used so we show the mount device name with correct
532  * format (e.g. forward slashes vs. back slashes) in /proc/mounts
533  */
cifs_show_devname(struct seq_file * m,struct dentry * root)534 static int cifs_show_devname(struct seq_file *m, struct dentry *root)
535 {
536 	struct cifs_sb_info *cifs_sb = CIFS_SB(root->d_sb);
537 	char *devname = kstrdup(cifs_sb->ctx->source, GFP_KERNEL);
538 
539 	if (devname == NULL)
540 		seq_puts(m, "none");
541 	else {
542 		convert_delimiter(devname, '/');
543 		/* escape all spaces in share names */
544 		seq_escape(m, devname, " \t");
545 		kfree(devname);
546 	}
547 	return 0;
548 }
549 
550 static void
cifs_show_upcall_target(struct seq_file * s,struct cifs_sb_info * cifs_sb)551 cifs_show_upcall_target(struct seq_file *s, struct cifs_sb_info *cifs_sb)
552 {
553 	if (cifs_sb->ctx->upcall_target == UPTARGET_UNSPECIFIED) {
554 		seq_puts(s, ",upcall_target=app");
555 		return;
556 	}
557 
558 	seq_puts(s, ",upcall_target=");
559 
560 	switch (cifs_sb->ctx->upcall_target) {
561 	case UPTARGET_APP:
562 		seq_puts(s, "app");
563 		break;
564 	case UPTARGET_MOUNT:
565 		seq_puts(s, "mount");
566 		break;
567 	default:
568 		/* shouldn't ever happen */
569 		seq_puts(s, "unknown");
570 		break;
571 	}
572 }
573 
574 /*
575  * cifs_show_options() is for displaying mount options in /proc/mounts.
576  * Not all settable options are displayed but most of the important
577  * ones are.
578  */
579 static int
cifs_show_options(struct seq_file * s,struct dentry * root)580 cifs_show_options(struct seq_file *s, struct dentry *root)
581 {
582 	struct cifs_sb_info *cifs_sb = CIFS_SB(root->d_sb);
583 	struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
584 	struct sockaddr *srcaddr;
585 	srcaddr = (struct sockaddr *)&tcon->ses->server->srcaddr;
586 
587 	seq_show_option(s, "vers", tcon->ses->server->vals->version_string);
588 	cifs_show_security(s, tcon->ses);
589 	cifs_show_cache_flavor(s, cifs_sb);
590 	cifs_show_upcall_target(s, cifs_sb);
591 
592 	if (tcon->no_lease)
593 		seq_puts(s, ",nolease");
594 	if (cifs_sb->ctx->multiuser)
595 		seq_puts(s, ",multiuser");
596 	else if (tcon->ses->user_name)
597 		seq_show_option(s, "username", tcon->ses->user_name);
598 
599 	if (tcon->ses->domainName && tcon->ses->domainName[0] != 0)
600 		seq_show_option(s, "domain", tcon->ses->domainName);
601 
602 	if (srcaddr->sa_family != AF_UNSPEC) {
603 		struct sockaddr_in *saddr4;
604 		struct sockaddr_in6 *saddr6;
605 		saddr4 = (struct sockaddr_in *)srcaddr;
606 		saddr6 = (struct sockaddr_in6 *)srcaddr;
607 		if (srcaddr->sa_family == AF_INET6)
608 			seq_printf(s, ",srcaddr=%pI6c",
609 				   &saddr6->sin6_addr);
610 		else if (srcaddr->sa_family == AF_INET)
611 			seq_printf(s, ",srcaddr=%pI4",
612 				   &saddr4->sin_addr.s_addr);
613 		else
614 			seq_printf(s, ",srcaddr=BAD-AF:%i",
615 				   (int)(srcaddr->sa_family));
616 	}
617 
618 	seq_printf(s, ",uid=%u",
619 		   from_kuid_munged(&init_user_ns, cifs_sb->ctx->linux_uid));
620 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_OVERR_UID)
621 		seq_puts(s, ",forceuid");
622 	else
623 		seq_puts(s, ",noforceuid");
624 
625 	seq_printf(s, ",gid=%u",
626 		   from_kgid_munged(&init_user_ns, cifs_sb->ctx->linux_gid));
627 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_OVERR_GID)
628 		seq_puts(s, ",forcegid");
629 	else
630 		seq_puts(s, ",noforcegid");
631 
632 	cifs_show_address(s, tcon->ses->server);
633 
634 	if (!tcon->unix_ext)
635 		seq_printf(s, ",file_mode=0%ho,dir_mode=0%ho",
636 					   cifs_sb->ctx->file_mode,
637 					   cifs_sb->ctx->dir_mode);
638 	if (cifs_sb->ctx->iocharset)
639 		seq_printf(s, ",iocharset=%s", cifs_sb->ctx->iocharset);
640 	if (tcon->seal)
641 		seq_puts(s, ",seal");
642 	else if (tcon->ses->server->ignore_signature)
643 		seq_puts(s, ",signloosely");
644 	if (tcon->nocase)
645 		seq_puts(s, ",nocase");
646 	if (tcon->nodelete)
647 		seq_puts(s, ",nodelete");
648 	if (cifs_sb->ctx->no_sparse)
649 		seq_puts(s, ",nosparse");
650 	if (tcon->local_lease)
651 		seq_puts(s, ",locallease");
652 	if (tcon->retry)
653 		seq_puts(s, ",hard");
654 	else
655 		seq_puts(s, ",soft");
656 	if (tcon->use_persistent)
657 		seq_puts(s, ",persistenthandles");
658 	else if (tcon->use_resilient)
659 		seq_puts(s, ",resilienthandles");
660 	if (tcon->posix_extensions)
661 		seq_puts(s, ",posix");
662 	else if (tcon->unix_ext)
663 		seq_puts(s, ",unix");
664 	else
665 		seq_puts(s, ",nounix");
666 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_DFS)
667 		seq_puts(s, ",nodfs");
668 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIX_PATHS)
669 		seq_puts(s, ",posixpaths");
670 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SET_UID)
671 		seq_puts(s, ",setuids");
672 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UID_FROM_ACL)
673 		seq_puts(s, ",idsfromsid");
674 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM)
675 		seq_puts(s, ",serverino");
676 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
677 		seq_puts(s, ",rwpidforward");
678 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL)
679 		seq_puts(s, ",forcemand");
680 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_XATTR)
681 		seq_puts(s, ",nouser_xattr");
682 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR)
683 		seq_puts(s, ",mapchars");
684 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SFM_CHR)
685 		seq_puts(s, ",mapposix");
686 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UNX_EMUL)
687 		seq_puts(s, ",sfu");
688 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_BRL)
689 		seq_puts(s, ",nobrl");
690 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_HANDLE_CACHE)
691 		seq_puts(s, ",nohandlecache");
692 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MODE_FROM_SID)
693 		seq_puts(s, ",modefromsid");
694 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_ACL)
695 		seq_puts(s, ",cifsacl");
696 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DYNPERM)
697 		seq_puts(s, ",dynperm");
698 	if (root->d_sb->s_flags & SB_POSIXACL)
699 		seq_puts(s, ",acl");
700 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MF_SYMLINKS)
701 		seq_puts(s, ",mfsymlinks");
702 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_FSCACHE)
703 		seq_puts(s, ",fsc");
704 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC)
705 		seq_puts(s, ",nostrictsync");
706 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_PERM)
707 		seq_puts(s, ",noperm");
708 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_BACKUPUID)
709 		seq_printf(s, ",backupuid=%u",
710 			   from_kuid_munged(&init_user_ns,
711 					    cifs_sb->ctx->backupuid));
712 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_BACKUPGID)
713 		seq_printf(s, ",backupgid=%u",
714 			   from_kgid_munged(&init_user_ns,
715 					    cifs_sb->ctx->backupgid));
716 	seq_show_option(s, "reparse",
717 			cifs_reparse_type_str(cifs_sb->ctx->reparse_type));
718 	if (cifs_sb->ctx->nonativesocket)
719 		seq_puts(s, ",nonativesocket");
720 	else
721 		seq_puts(s, ",nativesocket");
722 	seq_show_option(s, "symlink",
723 			cifs_symlink_type_str(get_cifs_symlink_type(cifs_sb)));
724 
725 	seq_printf(s, ",rsize=%u", cifs_sb->ctx->rsize);
726 	seq_printf(s, ",wsize=%u", cifs_sb->ctx->wsize);
727 	seq_printf(s, ",bsize=%u", cifs_sb->ctx->bsize);
728 	if (cifs_sb->ctx->rasize)
729 		seq_printf(s, ",rasize=%u", cifs_sb->ctx->rasize);
730 	if (tcon->ses->server->min_offload)
731 		seq_printf(s, ",esize=%u", tcon->ses->server->min_offload);
732 	if (tcon->ses->server->retrans)
733 		seq_printf(s, ",retrans=%u", tcon->ses->server->retrans);
734 	seq_printf(s, ",echo_interval=%lu",
735 			tcon->ses->server->echo_interval / HZ);
736 
737 	/* Only display the following if overridden on mount */
738 	if (tcon->ses->server->max_credits != SMB2_MAX_CREDITS_AVAILABLE)
739 		seq_printf(s, ",max_credits=%u", tcon->ses->server->max_credits);
740 	if (tcon->ses->server->tcp_nodelay)
741 		seq_puts(s, ",tcpnodelay");
742 	if (tcon->ses->server->noautotune)
743 		seq_puts(s, ",noautotune");
744 	if (tcon->ses->server->noblocksnd)
745 		seq_puts(s, ",noblocksend");
746 	if (tcon->ses->server->nosharesock)
747 		seq_puts(s, ",nosharesock");
748 
749 	if (tcon->snapshot_time)
750 		seq_printf(s, ",snapshot=%llu", tcon->snapshot_time);
751 	if (tcon->handle_timeout)
752 		seq_printf(s, ",handletimeout=%u", tcon->handle_timeout);
753 	if (tcon->max_cached_dirs != MAX_CACHED_FIDS)
754 		seq_printf(s, ",max_cached_dirs=%u", tcon->max_cached_dirs);
755 
756 	/*
757 	 * Display file and directory attribute timeout in seconds.
758 	 * If file and directory attribute timeout the same then actimeo
759 	 * was likely specified on mount
760 	 */
761 	if (cifs_sb->ctx->acdirmax == cifs_sb->ctx->acregmax)
762 		seq_printf(s, ",actimeo=%lu", cifs_sb->ctx->acregmax / HZ);
763 	else {
764 		seq_printf(s, ",acdirmax=%lu", cifs_sb->ctx->acdirmax / HZ);
765 		seq_printf(s, ",acregmax=%lu", cifs_sb->ctx->acregmax / HZ);
766 	}
767 	seq_printf(s, ",closetimeo=%lu", cifs_sb->ctx->closetimeo / HZ);
768 
769 	if (tcon->ses->chan_max > 1)
770 		seq_printf(s, ",multichannel,max_channels=%zu",
771 			   tcon->ses->chan_max);
772 
773 	if (tcon->use_witness)
774 		seq_puts(s, ",witness");
775 
776 	return 0;
777 }
778 
cifs_umount_begin(struct super_block * sb)779 static void cifs_umount_begin(struct super_block *sb)
780 {
781 	struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
782 	struct cifs_tcon *tcon;
783 
784 	if (cifs_sb == NULL)
785 		return;
786 
787 	tcon = cifs_sb_master_tcon(cifs_sb);
788 
789 	spin_lock(&cifs_tcp_ses_lock);
790 	spin_lock(&tcon->tc_lock);
791 	trace_smb3_tcon_ref(tcon->debug_id, tcon->tc_count,
792 			    netfs_trace_tcon_ref_see_umount);
793 	if ((tcon->tc_count > 1) || (tcon->status == TID_EXITING)) {
794 		/* we have other mounts to same share or we have
795 		   already tried to umount this and woken up
796 		   all waiting network requests, nothing to do */
797 		spin_unlock(&tcon->tc_lock);
798 		spin_unlock(&cifs_tcp_ses_lock);
799 		return;
800 	}
801 	/*
802 	 * can not set tcon->status to TID_EXITING yet since we don't know if umount -f will
803 	 * fail later (e.g. due to open files).  TID_EXITING will be set just before tdis req sent
804 	 */
805 	spin_unlock(&tcon->tc_lock);
806 	spin_unlock(&cifs_tcp_ses_lock);
807 
808 	cifs_close_all_deferred_files(tcon);
809 	/* cancel_brl_requests(tcon); */ /* BB mark all brl mids as exiting */
810 	/* cancel_notify_requests(tcon); */
811 	if (tcon->ses && tcon->ses->server) {
812 		cifs_dbg(FYI, "wake up tasks now - umount begin not complete\n");
813 		wake_up_all(&tcon->ses->server->request_q);
814 		wake_up_all(&tcon->ses->server->response_q);
815 		msleep(1); /* yield */
816 		/* we have to kick the requests once more */
817 		wake_up_all(&tcon->ses->server->response_q);
818 		msleep(1);
819 	}
820 
821 	return;
822 }
823 
cifs_freeze(struct super_block * sb)824 static int cifs_freeze(struct super_block *sb)
825 {
826 	struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
827 	struct cifs_tcon *tcon;
828 
829 	if (cifs_sb == NULL)
830 		return 0;
831 
832 	tcon = cifs_sb_master_tcon(cifs_sb);
833 
834 	cifs_close_all_deferred_files(tcon);
835 	return 0;
836 }
837 
838 #ifdef CONFIG_CIFS_STATS2
cifs_show_stats(struct seq_file * s,struct dentry * root)839 static int cifs_show_stats(struct seq_file *s, struct dentry *root)
840 {
841 	/* BB FIXME */
842 	return 0;
843 }
844 #endif
845 
cifs_write_inode(struct inode * inode,struct writeback_control * wbc)846 static int cifs_write_inode(struct inode *inode, struct writeback_control *wbc)
847 {
848 	return netfs_unpin_writeback(inode, wbc);
849 }
850 
cifs_drop_inode(struct inode * inode)851 static int cifs_drop_inode(struct inode *inode)
852 {
853 	struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
854 
855 	/* no serverino => unconditional eviction */
856 	return !(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) ||
857 		generic_drop_inode(inode);
858 }
859 
860 static const struct super_operations cifs_super_ops = {
861 	.statfs = cifs_statfs,
862 	.alloc_inode = cifs_alloc_inode,
863 	.write_inode	= cifs_write_inode,
864 	.free_inode = cifs_free_inode,
865 	.drop_inode	= cifs_drop_inode,
866 	.evict_inode	= cifs_evict_inode,
867 /*	.show_path	= cifs_show_path, */ /* Would we ever need show path? */
868 	.show_devname   = cifs_show_devname,
869 /*	.delete_inode	= cifs_delete_inode,  */  /* Do not need above
870 	function unless later we add lazy close of inodes or unless the
871 	kernel forgets to call us with the same number of releases (closes)
872 	as opens */
873 	.show_options = cifs_show_options,
874 	.umount_begin   = cifs_umount_begin,
875 	.freeze_fs      = cifs_freeze,
876 #ifdef CONFIG_CIFS_STATS2
877 	.show_stats = cifs_show_stats,
878 #endif
879 };
880 
881 /*
882  * Get root dentry from superblock according to prefix path mount option.
883  * Return dentry with refcount + 1 on success and NULL otherwise.
884  */
885 static struct dentry *
cifs_get_root(struct smb3_fs_context * ctx,struct super_block * sb)886 cifs_get_root(struct smb3_fs_context *ctx, struct super_block *sb)
887 {
888 	struct dentry *dentry;
889 	struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
890 	char *full_path = NULL;
891 	char *s, *p;
892 	char sep;
893 
894 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_USE_PREFIX_PATH)
895 		return dget(sb->s_root);
896 
897 	full_path = cifs_build_path_to_root(ctx, cifs_sb,
898 				cifs_sb_master_tcon(cifs_sb), 0);
899 	if (full_path == NULL)
900 		return ERR_PTR(-ENOMEM);
901 
902 	cifs_dbg(FYI, "Get root dentry for %s\n", full_path);
903 
904 	sep = CIFS_DIR_SEP(cifs_sb);
905 	dentry = dget(sb->s_root);
906 	s = full_path;
907 
908 	do {
909 		struct inode *dir = d_inode(dentry);
910 		struct dentry *child;
911 
912 		if (!S_ISDIR(dir->i_mode)) {
913 			dput(dentry);
914 			dentry = ERR_PTR(-ENOTDIR);
915 			break;
916 		}
917 
918 		/* skip separators */
919 		while (*s == sep)
920 			s++;
921 		if (!*s)
922 			break;
923 		p = s++;
924 		/* next separator */
925 		while (*s && *s != sep)
926 			s++;
927 
928 		child = lookup_positive_unlocked(p, dentry, s - p);
929 		dput(dentry);
930 		dentry = child;
931 	} while (!IS_ERR(dentry));
932 	kfree(full_path);
933 	return dentry;
934 }
935 
cifs_set_super(struct super_block * sb,void * data)936 static int cifs_set_super(struct super_block *sb, void *data)
937 {
938 	struct cifs_mnt_data *mnt_data = data;
939 	sb->s_fs_info = mnt_data->cifs_sb;
940 	return set_anon_super(sb, NULL);
941 }
942 
943 struct dentry *
cifs_smb3_do_mount(struct file_system_type * fs_type,int flags,struct smb3_fs_context * old_ctx)944 cifs_smb3_do_mount(struct file_system_type *fs_type,
945 	      int flags, struct smb3_fs_context *old_ctx)
946 {
947 	struct cifs_mnt_data mnt_data;
948 	struct cifs_sb_info *cifs_sb;
949 	struct super_block *sb;
950 	struct dentry *root;
951 	int rc;
952 
953 	if (cifsFYI) {
954 		cifs_dbg(FYI, "%s: devname=%s flags=0x%x\n", __func__,
955 			 old_ctx->source, flags);
956 	} else {
957 		cifs_info("Attempting to mount %s\n", old_ctx->source);
958 	}
959 
960 	cifs_sb = kzalloc(sizeof(*cifs_sb), GFP_KERNEL);
961 	if (!cifs_sb)
962 		return ERR_PTR(-ENOMEM);
963 
964 	cifs_sb->ctx = kzalloc(sizeof(struct smb3_fs_context), GFP_KERNEL);
965 	if (!cifs_sb->ctx) {
966 		root = ERR_PTR(-ENOMEM);
967 		goto out;
968 	}
969 	rc = smb3_fs_context_dup(cifs_sb->ctx, old_ctx);
970 	if (rc) {
971 		root = ERR_PTR(rc);
972 		goto out;
973 	}
974 
975 	rc = cifs_setup_cifs_sb(cifs_sb);
976 	if (rc) {
977 		root = ERR_PTR(rc);
978 		goto out;
979 	}
980 
981 	rc = cifs_mount(cifs_sb, cifs_sb->ctx);
982 	if (rc) {
983 		if (!(flags & SB_SILENT))
984 			cifs_dbg(VFS, "cifs_mount failed w/return code = %d\n",
985 				 rc);
986 		root = ERR_PTR(rc);
987 		goto out;
988 	}
989 
990 	mnt_data.ctx = cifs_sb->ctx;
991 	mnt_data.cifs_sb = cifs_sb;
992 	mnt_data.flags = flags;
993 
994 	/* BB should we make this contingent on mount parm? */
995 	flags |= SB_NODIRATIME | SB_NOATIME;
996 
997 	sb = sget(fs_type, cifs_match_super, cifs_set_super, flags, &mnt_data);
998 	if (IS_ERR(sb)) {
999 		cifs_umount(cifs_sb);
1000 		return ERR_CAST(sb);
1001 	}
1002 
1003 	if (sb->s_root) {
1004 		cifs_dbg(FYI, "Use existing superblock\n");
1005 		cifs_umount(cifs_sb);
1006 		cifs_sb = NULL;
1007 	} else {
1008 		rc = cifs_read_super(sb);
1009 		if (rc) {
1010 			root = ERR_PTR(rc);
1011 			goto out_super;
1012 		}
1013 
1014 		sb->s_flags |= SB_ACTIVE;
1015 	}
1016 
1017 	root = cifs_get_root(cifs_sb ? cifs_sb->ctx : old_ctx, sb);
1018 	if (IS_ERR(root))
1019 		goto out_super;
1020 
1021 	if (cifs_sb)
1022 		cifs_sb->root = dget(root);
1023 
1024 	cifs_dbg(FYI, "dentry root is: %p\n", root);
1025 	return root;
1026 
1027 out_super:
1028 	deactivate_locked_super(sb);
1029 	return root;
1030 out:
1031 	kfree(cifs_sb->prepath);
1032 	smb3_cleanup_fs_context(cifs_sb->ctx);
1033 	kfree(cifs_sb);
1034 	return root;
1035 }
1036 
cifs_llseek(struct file * file,loff_t offset,int whence)1037 static loff_t cifs_llseek(struct file *file, loff_t offset, int whence)
1038 {
1039 	struct cifsFileInfo *cfile = file->private_data;
1040 	struct cifs_tcon *tcon;
1041 
1042 	/*
1043 	 * whence == SEEK_END || SEEK_DATA || SEEK_HOLE => we must revalidate
1044 	 * the cached file length
1045 	 */
1046 	if (whence != SEEK_SET && whence != SEEK_CUR) {
1047 		int rc;
1048 		struct inode *inode = file_inode(file);
1049 
1050 		/*
1051 		 * We need to be sure that all dirty pages are written and the
1052 		 * server has the newest file length.
1053 		 */
1054 		if (!CIFS_CACHE_READ(CIFS_I(inode)) && inode->i_mapping &&
1055 		    inode->i_mapping->nrpages != 0) {
1056 			rc = filemap_fdatawait(inode->i_mapping);
1057 			if (rc) {
1058 				mapping_set_error(inode->i_mapping, rc);
1059 				return rc;
1060 			}
1061 		}
1062 		/*
1063 		 * Some applications poll for the file length in this strange
1064 		 * way so we must seek to end on non-oplocked files by
1065 		 * setting the revalidate time to zero.
1066 		 */
1067 		CIFS_I(inode)->time = 0;
1068 
1069 		rc = cifs_revalidate_file_attr(file);
1070 		if (rc < 0)
1071 			return (loff_t)rc;
1072 	}
1073 	if (cfile && cfile->tlink) {
1074 		tcon = tlink_tcon(cfile->tlink);
1075 		if (tcon->ses->server->ops->llseek)
1076 			return tcon->ses->server->ops->llseek(file, tcon,
1077 							      offset, whence);
1078 	}
1079 	return generic_file_llseek(file, offset, whence);
1080 }
1081 
1082 static int
cifs_setlease(struct file * file,int arg,struct file_lease ** lease,void ** priv)1083 cifs_setlease(struct file *file, int arg, struct file_lease **lease, void **priv)
1084 {
1085 	/*
1086 	 * Note that this is called by vfs setlease with i_lock held to
1087 	 * protect *lease from going away.
1088 	 */
1089 	struct inode *inode = file_inode(file);
1090 	struct cifsFileInfo *cfile = file->private_data;
1091 
1092 	/* Check if file is oplocked if this is request for new lease */
1093 	if (arg == F_UNLCK ||
1094 	    ((arg == F_RDLCK) && CIFS_CACHE_READ(CIFS_I(inode))) ||
1095 	    ((arg == F_WRLCK) && CIFS_CACHE_WRITE(CIFS_I(inode))))
1096 		return generic_setlease(file, arg, lease, priv);
1097 	else if (tlink_tcon(cfile->tlink)->local_lease &&
1098 		 !CIFS_CACHE_READ(CIFS_I(inode)))
1099 		/*
1100 		 * If the server claims to support oplock on this file, then we
1101 		 * still need to check oplock even if the local_lease mount
1102 		 * option is set, but there are servers which do not support
1103 		 * oplock for which this mount option may be useful if the user
1104 		 * knows that the file won't be changed on the server by anyone
1105 		 * else.
1106 		 */
1107 		return generic_setlease(file, arg, lease, priv);
1108 	else
1109 		return -EAGAIN;
1110 }
1111 
1112 struct file_system_type cifs_fs_type = {
1113 	.owner = THIS_MODULE,
1114 	.name = "cifs",
1115 	.init_fs_context = smb3_init_fs_context,
1116 	.parameters = smb3_fs_parameters,
1117 	.kill_sb = cifs_kill_sb,
1118 	.fs_flags = FS_RENAME_DOES_D_MOVE,
1119 };
1120 MODULE_ALIAS_FS("cifs");
1121 
1122 struct file_system_type smb3_fs_type = {
1123 	.owner = THIS_MODULE,
1124 	.name = "smb3",
1125 	.init_fs_context = smb3_init_fs_context,
1126 	.parameters = smb3_fs_parameters,
1127 	.kill_sb = cifs_kill_sb,
1128 	.fs_flags = FS_RENAME_DOES_D_MOVE,
1129 };
1130 MODULE_ALIAS_FS("smb3");
1131 MODULE_ALIAS("smb3");
1132 
1133 const struct inode_operations cifs_dir_inode_ops = {
1134 	.create = cifs_create,
1135 	.atomic_open = cifs_atomic_open,
1136 	.lookup = cifs_lookup,
1137 	.getattr = cifs_getattr,
1138 	.unlink = cifs_unlink,
1139 	.link = cifs_hardlink,
1140 	.mkdir = cifs_mkdir,
1141 	.rmdir = cifs_rmdir,
1142 	.rename = cifs_rename2,
1143 	.permission = cifs_permission,
1144 	.setattr = cifs_setattr,
1145 	.symlink = cifs_symlink,
1146 	.mknod   = cifs_mknod,
1147 	.listxattr = cifs_listxattr,
1148 	.get_acl = cifs_get_acl,
1149 	.set_acl = cifs_set_acl,
1150 };
1151 
1152 const struct inode_operations cifs_file_inode_ops = {
1153 	.setattr = cifs_setattr,
1154 	.getattr = cifs_getattr,
1155 	.permission = cifs_permission,
1156 	.listxattr = cifs_listxattr,
1157 	.fiemap = cifs_fiemap,
1158 	.get_acl = cifs_get_acl,
1159 	.set_acl = cifs_set_acl,
1160 };
1161 
cifs_get_link(struct dentry * dentry,struct inode * inode,struct delayed_call * done)1162 const char *cifs_get_link(struct dentry *dentry, struct inode *inode,
1163 			    struct delayed_call *done)
1164 {
1165 	char *target_path;
1166 
1167 	if (!dentry)
1168 		return ERR_PTR(-ECHILD);
1169 
1170 	target_path = kmalloc(PATH_MAX, GFP_KERNEL);
1171 	if (!target_path)
1172 		return ERR_PTR(-ENOMEM);
1173 
1174 	spin_lock(&inode->i_lock);
1175 	if (likely(CIFS_I(inode)->symlink_target)) {
1176 		strscpy(target_path, CIFS_I(inode)->symlink_target, PATH_MAX);
1177 	} else {
1178 		kfree(target_path);
1179 		target_path = ERR_PTR(-EOPNOTSUPP);
1180 	}
1181 	spin_unlock(&inode->i_lock);
1182 
1183 	if (!IS_ERR(target_path))
1184 		set_delayed_call(done, kfree_link, target_path);
1185 
1186 	return target_path;
1187 }
1188 
1189 const struct inode_operations cifs_symlink_inode_ops = {
1190 	.get_link = cifs_get_link,
1191 	.setattr = cifs_setattr,
1192 	.permission = cifs_permission,
1193 	.listxattr = cifs_listxattr,
1194 };
1195 
1196 /*
1197  * Advance the EOF marker to after the source range.
1198  */
cifs_precopy_set_eof(struct inode * src_inode,struct cifsInodeInfo * src_cifsi,struct cifs_tcon * src_tcon,unsigned int xid,loff_t src_end)1199 static int cifs_precopy_set_eof(struct inode *src_inode, struct cifsInodeInfo *src_cifsi,
1200 				struct cifs_tcon *src_tcon,
1201 				unsigned int xid, loff_t src_end)
1202 {
1203 	struct cifsFileInfo *writeable_srcfile;
1204 	int rc = -EINVAL;
1205 
1206 	writeable_srcfile = find_writable_file(src_cifsi, FIND_WR_FSUID_ONLY);
1207 	if (writeable_srcfile) {
1208 		if (src_tcon->ses->server->ops->set_file_size)
1209 			rc = src_tcon->ses->server->ops->set_file_size(
1210 				xid, src_tcon, writeable_srcfile,
1211 				src_inode->i_size, true /* no need to set sparse */);
1212 		else
1213 			rc = -ENOSYS;
1214 		cifsFileInfo_put(writeable_srcfile);
1215 		cifs_dbg(FYI, "SetFSize for copychunk rc = %d\n", rc);
1216 	}
1217 
1218 	if (rc < 0)
1219 		goto set_failed;
1220 
1221 	netfs_resize_file(&src_cifsi->netfs, src_end, true);
1222 	fscache_resize_cookie(cifs_inode_cookie(src_inode), src_end);
1223 	return 0;
1224 
1225 set_failed:
1226 	return filemap_write_and_wait(src_inode->i_mapping);
1227 }
1228 
1229 /*
1230  * Flush out either the folio that overlaps the beginning of a range in which
1231  * pos resides or the folio that overlaps the end of a range unless that folio
1232  * is entirely within the range we're going to invalidate.  We extend the flush
1233  * bounds to encompass the folio.
1234  */
cifs_flush_folio(struct inode * inode,loff_t pos,loff_t * _fstart,loff_t * _fend,bool first)1235 static int cifs_flush_folio(struct inode *inode, loff_t pos, loff_t *_fstart, loff_t *_fend,
1236 			    bool first)
1237 {
1238 	struct folio *folio;
1239 	unsigned long long fpos, fend;
1240 	pgoff_t index = pos / PAGE_SIZE;
1241 	size_t size;
1242 	int rc = 0;
1243 
1244 	folio = filemap_get_folio(inode->i_mapping, index);
1245 	if (IS_ERR(folio))
1246 		return 0;
1247 
1248 	size = folio_size(folio);
1249 	fpos = folio_pos(folio);
1250 	fend = fpos + size - 1;
1251 	*_fstart = min_t(unsigned long long, *_fstart, fpos);
1252 	*_fend   = max_t(unsigned long long, *_fend, fend);
1253 	if ((first && pos == fpos) || (!first && pos == fend))
1254 		goto out;
1255 
1256 	rc = filemap_write_and_wait_range(inode->i_mapping, fpos, fend);
1257 out:
1258 	folio_put(folio);
1259 	return rc;
1260 }
1261 
cifs_remap_file_range(struct file * src_file,loff_t off,struct file * dst_file,loff_t destoff,loff_t len,unsigned int remap_flags)1262 static loff_t cifs_remap_file_range(struct file *src_file, loff_t off,
1263 		struct file *dst_file, loff_t destoff, loff_t len,
1264 		unsigned int remap_flags)
1265 {
1266 	struct inode *src_inode = file_inode(src_file);
1267 	struct inode *target_inode = file_inode(dst_file);
1268 	struct cifsInodeInfo *src_cifsi = CIFS_I(src_inode);
1269 	struct cifsInodeInfo *target_cifsi = CIFS_I(target_inode);
1270 	struct cifsFileInfo *smb_file_src = src_file->private_data;
1271 	struct cifsFileInfo *smb_file_target = dst_file->private_data;
1272 	struct cifs_tcon *target_tcon, *src_tcon;
1273 	unsigned long long destend, fstart, fend, old_size, new_size;
1274 	unsigned int xid;
1275 	int rc;
1276 
1277 	if (remap_flags & REMAP_FILE_DEDUP)
1278 		return -EOPNOTSUPP;
1279 	if (remap_flags & ~REMAP_FILE_ADVISORY)
1280 		return -EINVAL;
1281 
1282 	cifs_dbg(FYI, "clone range\n");
1283 
1284 	xid = get_xid();
1285 
1286 	if (!smb_file_src || !smb_file_target) {
1287 		rc = -EBADF;
1288 		cifs_dbg(VFS, "missing cifsFileInfo on copy range src file\n");
1289 		goto out;
1290 	}
1291 
1292 	src_tcon = tlink_tcon(smb_file_src->tlink);
1293 	target_tcon = tlink_tcon(smb_file_target->tlink);
1294 
1295 	/*
1296 	 * Note: cifs case is easier than btrfs since server responsible for
1297 	 * checks for proper open modes and file type and if it wants
1298 	 * server could even support copy of range where source = target
1299 	 */
1300 	lock_two_nondirectories(target_inode, src_inode);
1301 
1302 	if (len == 0)
1303 		len = src_inode->i_size - off;
1304 
1305 	cifs_dbg(FYI, "clone range\n");
1306 
1307 	/* Flush the source buffer */
1308 	rc = filemap_write_and_wait_range(src_inode->i_mapping, off,
1309 					  off + len - 1);
1310 	if (rc)
1311 		goto unlock;
1312 
1313 	/* The server-side copy will fail if the source crosses the EOF marker.
1314 	 * Advance the EOF marker after the flush above to the end of the range
1315 	 * if it's short of that.
1316 	 */
1317 	if (src_cifsi->netfs.remote_i_size < off + len) {
1318 		rc = cifs_precopy_set_eof(src_inode, src_cifsi, src_tcon, xid, off + len);
1319 		if (rc < 0)
1320 			goto unlock;
1321 	}
1322 
1323 	new_size = destoff + len;
1324 	destend = destoff + len - 1;
1325 
1326 	/* Flush the folios at either end of the destination range to prevent
1327 	 * accidental loss of dirty data outside of the range.
1328 	 */
1329 	fstart = destoff;
1330 	fend = destend;
1331 
1332 	rc = cifs_flush_folio(target_inode, destoff, &fstart, &fend, true);
1333 	if (rc)
1334 		goto unlock;
1335 	rc = cifs_flush_folio(target_inode, destend, &fstart, &fend, false);
1336 	if (rc)
1337 		goto unlock;
1338 	if (fend > target_cifsi->netfs.zero_point)
1339 		target_cifsi->netfs.zero_point = fend + 1;
1340 	old_size = target_cifsi->netfs.remote_i_size;
1341 
1342 	/* Discard all the folios that overlap the destination region. */
1343 	cifs_dbg(FYI, "about to discard pages %llx-%llx\n", fstart, fend);
1344 	truncate_inode_pages_range(&target_inode->i_data, fstart, fend);
1345 
1346 	fscache_invalidate(cifs_inode_cookie(target_inode), NULL,
1347 			   i_size_read(target_inode), 0);
1348 
1349 	rc = -EOPNOTSUPP;
1350 	if (target_tcon->ses->server->ops->duplicate_extents) {
1351 		rc = target_tcon->ses->server->ops->duplicate_extents(xid,
1352 			smb_file_src, smb_file_target, off, len, destoff);
1353 		if (rc == 0 && new_size > old_size) {
1354 			truncate_setsize(target_inode, new_size);
1355 			fscache_resize_cookie(cifs_inode_cookie(target_inode),
1356 					      new_size);
1357 		}
1358 		if (rc == 0 && new_size > target_cifsi->netfs.zero_point)
1359 			target_cifsi->netfs.zero_point = new_size;
1360 	}
1361 
1362 	/* force revalidate of size and timestamps of target file now
1363 	   that target is updated on the server */
1364 	CIFS_I(target_inode)->time = 0;
1365 unlock:
1366 	/* although unlocking in the reverse order from locking is not
1367 	   strictly necessary here it is a little cleaner to be consistent */
1368 	unlock_two_nondirectories(src_inode, target_inode);
1369 out:
1370 	free_xid(xid);
1371 	return rc < 0 ? rc : len;
1372 }
1373 
cifs_file_copychunk_range(unsigned int xid,struct file * src_file,loff_t off,struct file * dst_file,loff_t destoff,size_t len,unsigned int flags)1374 ssize_t cifs_file_copychunk_range(unsigned int xid,
1375 				struct file *src_file, loff_t off,
1376 				struct file *dst_file, loff_t destoff,
1377 				size_t len, unsigned int flags)
1378 {
1379 	struct inode *src_inode = file_inode(src_file);
1380 	struct inode *target_inode = file_inode(dst_file);
1381 	struct cifsInodeInfo *src_cifsi = CIFS_I(src_inode);
1382 	struct cifsInodeInfo *target_cifsi = CIFS_I(target_inode);
1383 	struct cifsFileInfo *smb_file_src;
1384 	struct cifsFileInfo *smb_file_target;
1385 	struct cifs_tcon *src_tcon;
1386 	struct cifs_tcon *target_tcon;
1387 	ssize_t rc;
1388 
1389 	cifs_dbg(FYI, "copychunk range\n");
1390 
1391 	if (!src_file->private_data || !dst_file->private_data) {
1392 		rc = -EBADF;
1393 		cifs_dbg(VFS, "missing cifsFileInfo on copy range src file\n");
1394 		goto out;
1395 	}
1396 
1397 	rc = -EXDEV;
1398 	smb_file_target = dst_file->private_data;
1399 	smb_file_src = src_file->private_data;
1400 	src_tcon = tlink_tcon(smb_file_src->tlink);
1401 	target_tcon = tlink_tcon(smb_file_target->tlink);
1402 
1403 	if (src_tcon->ses != target_tcon->ses) {
1404 		cifs_dbg(FYI, "source and target of copy not on same server\n");
1405 		goto out;
1406 	}
1407 
1408 	rc = -EOPNOTSUPP;
1409 	if (!target_tcon->ses->server->ops->copychunk_range)
1410 		goto out;
1411 
1412 	/*
1413 	 * Note: cifs case is easier than btrfs since server responsible for
1414 	 * checks for proper open modes and file type and if it wants
1415 	 * server could even support copy of range where source = target
1416 	 */
1417 	lock_two_nondirectories(target_inode, src_inode);
1418 
1419 	cifs_dbg(FYI, "about to flush pages\n");
1420 
1421 	rc = filemap_write_and_wait_range(src_inode->i_mapping, off,
1422 					  off + len - 1);
1423 	if (rc)
1424 		goto unlock;
1425 
1426 	/* The server-side copy will fail if the source crosses the EOF marker.
1427 	 * Advance the EOF marker after the flush above to the end of the range
1428 	 * if it's short of that.
1429 	 */
1430 	if (src_cifsi->netfs.remote_i_size < off + len) {
1431 		rc = cifs_precopy_set_eof(src_inode, src_cifsi, src_tcon, xid, off + len);
1432 		if (rc < 0)
1433 			goto unlock;
1434 	}
1435 
1436 	/* Flush and invalidate all the folios in the destination region.  If
1437 	 * the copy was successful, then some of the flush is extra overhead,
1438 	 * but we need to allow for the copy failing in some way (eg. ENOSPC).
1439 	 */
1440 	rc = filemap_invalidate_inode(target_inode, true, destoff, destoff + len - 1);
1441 	if (rc)
1442 		goto unlock;
1443 
1444 	fscache_invalidate(cifs_inode_cookie(target_inode), NULL,
1445 			   i_size_read(target_inode), 0);
1446 
1447 	rc = file_modified(dst_file);
1448 	if (!rc) {
1449 		rc = target_tcon->ses->server->ops->copychunk_range(xid,
1450 			smb_file_src, smb_file_target, off, len, destoff);
1451 		if (rc > 0 && destoff + rc > i_size_read(target_inode)) {
1452 			truncate_setsize(target_inode, destoff + rc);
1453 			netfs_resize_file(&target_cifsi->netfs,
1454 					  i_size_read(target_inode), true);
1455 			fscache_resize_cookie(cifs_inode_cookie(target_inode),
1456 					      i_size_read(target_inode));
1457 		}
1458 		if (rc > 0 && destoff + rc > target_cifsi->netfs.zero_point)
1459 			target_cifsi->netfs.zero_point = destoff + rc;
1460 	}
1461 
1462 	file_accessed(src_file);
1463 
1464 	/* force revalidate of size and timestamps of target file now
1465 	 * that target is updated on the server
1466 	 */
1467 	CIFS_I(target_inode)->time = 0;
1468 
1469 unlock:
1470 	/* although unlocking in the reverse order from locking is not
1471 	 * strictly necessary here it is a little cleaner to be consistent
1472 	 */
1473 	unlock_two_nondirectories(src_inode, target_inode);
1474 
1475 out:
1476 	return rc;
1477 }
1478 
1479 /*
1480  * Directory operations under CIFS/SMB2/SMB3 are synchronous, so fsync()
1481  * is a dummy operation.
1482  */
cifs_dir_fsync(struct file * file,loff_t start,loff_t end,int datasync)1483 static int cifs_dir_fsync(struct file *file, loff_t start, loff_t end, int datasync)
1484 {
1485 	cifs_dbg(FYI, "Sync directory - name: %pD datasync: 0x%x\n",
1486 		 file, datasync);
1487 
1488 	return 0;
1489 }
1490 
cifs_copy_file_range(struct file * src_file,loff_t off,struct file * dst_file,loff_t destoff,size_t len,unsigned int flags)1491 static ssize_t cifs_copy_file_range(struct file *src_file, loff_t off,
1492 				struct file *dst_file, loff_t destoff,
1493 				size_t len, unsigned int flags)
1494 {
1495 	unsigned int xid = get_xid();
1496 	ssize_t rc;
1497 	struct cifsFileInfo *cfile = dst_file->private_data;
1498 
1499 	if (cfile->swapfile) {
1500 		rc = -EOPNOTSUPP;
1501 		free_xid(xid);
1502 		return rc;
1503 	}
1504 
1505 	rc = cifs_file_copychunk_range(xid, src_file, off, dst_file, destoff,
1506 					len, flags);
1507 	free_xid(xid);
1508 
1509 	if (rc == -EOPNOTSUPP || rc == -EXDEV)
1510 		rc = splice_copy_file_range(src_file, off, dst_file,
1511 					    destoff, len);
1512 	return rc;
1513 }
1514 
1515 const struct file_operations cifs_file_ops = {
1516 	.read_iter = cifs_loose_read_iter,
1517 	.write_iter = cifs_file_write_iter,
1518 	.open = cifs_open,
1519 	.release = cifs_close,
1520 	.lock = cifs_lock,
1521 	.flock = cifs_flock,
1522 	.fsync = cifs_fsync,
1523 	.flush = cifs_flush,
1524 	.mmap  = cifs_file_mmap,
1525 	.splice_read = filemap_splice_read,
1526 	.splice_write = iter_file_splice_write,
1527 	.llseek = cifs_llseek,
1528 	.unlocked_ioctl	= cifs_ioctl,
1529 	.copy_file_range = cifs_copy_file_range,
1530 	.remap_file_range = cifs_remap_file_range,
1531 	.setlease = cifs_setlease,
1532 	.fallocate = cifs_fallocate,
1533 };
1534 
1535 const struct file_operations cifs_file_strict_ops = {
1536 	.read_iter = cifs_strict_readv,
1537 	.write_iter = cifs_strict_writev,
1538 	.open = cifs_open,
1539 	.release = cifs_close,
1540 	.lock = cifs_lock,
1541 	.flock = cifs_flock,
1542 	.fsync = cifs_strict_fsync,
1543 	.flush = cifs_flush,
1544 	.mmap = cifs_file_strict_mmap,
1545 	.splice_read = filemap_splice_read,
1546 	.splice_write = iter_file_splice_write,
1547 	.llseek = cifs_llseek,
1548 	.unlocked_ioctl	= cifs_ioctl,
1549 	.copy_file_range = cifs_copy_file_range,
1550 	.remap_file_range = cifs_remap_file_range,
1551 	.setlease = cifs_setlease,
1552 	.fallocate = cifs_fallocate,
1553 };
1554 
1555 const struct file_operations cifs_file_direct_ops = {
1556 	.read_iter = netfs_unbuffered_read_iter,
1557 	.write_iter = netfs_file_write_iter,
1558 	.open = cifs_open,
1559 	.release = cifs_close,
1560 	.lock = cifs_lock,
1561 	.flock = cifs_flock,
1562 	.fsync = cifs_fsync,
1563 	.flush = cifs_flush,
1564 	.mmap = cifs_file_mmap,
1565 	.splice_read = copy_splice_read,
1566 	.splice_write = iter_file_splice_write,
1567 	.unlocked_ioctl  = cifs_ioctl,
1568 	.copy_file_range = cifs_copy_file_range,
1569 	.remap_file_range = cifs_remap_file_range,
1570 	.llseek = cifs_llseek,
1571 	.setlease = cifs_setlease,
1572 	.fallocate = cifs_fallocate,
1573 };
1574 
1575 const struct file_operations cifs_file_nobrl_ops = {
1576 	.read_iter = cifs_loose_read_iter,
1577 	.write_iter = cifs_file_write_iter,
1578 	.open = cifs_open,
1579 	.release = cifs_close,
1580 	.fsync = cifs_fsync,
1581 	.flush = cifs_flush,
1582 	.mmap  = cifs_file_mmap,
1583 	.splice_read = filemap_splice_read,
1584 	.splice_write = iter_file_splice_write,
1585 	.llseek = cifs_llseek,
1586 	.unlocked_ioctl	= cifs_ioctl,
1587 	.copy_file_range = cifs_copy_file_range,
1588 	.remap_file_range = cifs_remap_file_range,
1589 	.setlease = cifs_setlease,
1590 	.fallocate = cifs_fallocate,
1591 };
1592 
1593 const struct file_operations cifs_file_strict_nobrl_ops = {
1594 	.read_iter = cifs_strict_readv,
1595 	.write_iter = cifs_strict_writev,
1596 	.open = cifs_open,
1597 	.release = cifs_close,
1598 	.fsync = cifs_strict_fsync,
1599 	.flush = cifs_flush,
1600 	.mmap = cifs_file_strict_mmap,
1601 	.splice_read = filemap_splice_read,
1602 	.splice_write = iter_file_splice_write,
1603 	.llseek = cifs_llseek,
1604 	.unlocked_ioctl	= cifs_ioctl,
1605 	.copy_file_range = cifs_copy_file_range,
1606 	.remap_file_range = cifs_remap_file_range,
1607 	.setlease = cifs_setlease,
1608 	.fallocate = cifs_fallocate,
1609 };
1610 
1611 const struct file_operations cifs_file_direct_nobrl_ops = {
1612 	.read_iter = netfs_unbuffered_read_iter,
1613 	.write_iter = netfs_file_write_iter,
1614 	.open = cifs_open,
1615 	.release = cifs_close,
1616 	.fsync = cifs_fsync,
1617 	.flush = cifs_flush,
1618 	.mmap = cifs_file_mmap,
1619 	.splice_read = copy_splice_read,
1620 	.splice_write = iter_file_splice_write,
1621 	.unlocked_ioctl  = cifs_ioctl,
1622 	.copy_file_range = cifs_copy_file_range,
1623 	.remap_file_range = cifs_remap_file_range,
1624 	.llseek = cifs_llseek,
1625 	.setlease = cifs_setlease,
1626 	.fallocate = cifs_fallocate,
1627 };
1628 
1629 const struct file_operations cifs_dir_ops = {
1630 	.iterate_shared = cifs_readdir,
1631 	.release = cifs_closedir,
1632 	.read    = generic_read_dir,
1633 	.unlocked_ioctl  = cifs_ioctl,
1634 	.copy_file_range = cifs_copy_file_range,
1635 	.remap_file_range = cifs_remap_file_range,
1636 	.llseek = generic_file_llseek,
1637 	.fsync = cifs_dir_fsync,
1638 };
1639 
1640 static void
cifs_init_once(void * inode)1641 cifs_init_once(void *inode)
1642 {
1643 	struct cifsInodeInfo *cifsi = inode;
1644 
1645 	inode_init_once(&cifsi->netfs.inode);
1646 	init_rwsem(&cifsi->lock_sem);
1647 }
1648 
1649 static int __init
cifs_init_inodecache(void)1650 cifs_init_inodecache(void)
1651 {
1652 	cifs_inode_cachep = kmem_cache_create("cifs_inode_cache",
1653 					      sizeof(struct cifsInodeInfo),
1654 					      0, (SLAB_RECLAIM_ACCOUNT|
1655 						SLAB_ACCOUNT),
1656 					      cifs_init_once);
1657 	if (cifs_inode_cachep == NULL)
1658 		return -ENOMEM;
1659 
1660 	return 0;
1661 }
1662 
1663 static void
cifs_destroy_inodecache(void)1664 cifs_destroy_inodecache(void)
1665 {
1666 	/*
1667 	 * Make sure all delayed rcu free inodes are flushed before we
1668 	 * destroy cache.
1669 	 */
1670 	rcu_barrier();
1671 	kmem_cache_destroy(cifs_inode_cachep);
1672 }
1673 
1674 static int
cifs_init_request_bufs(void)1675 cifs_init_request_bufs(void)
1676 {
1677 	/*
1678 	 * SMB2 maximum header size is bigger than CIFS one - no problems to
1679 	 * allocate some more bytes for CIFS.
1680 	 */
1681 	size_t max_hdr_size = MAX_SMB2_HDR_SIZE;
1682 
1683 	if (CIFSMaxBufSize < 8192) {
1684 	/* Buffer size can not be smaller than 2 * PATH_MAX since maximum
1685 	Unicode path name has to fit in any SMB/CIFS path based frames */
1686 		CIFSMaxBufSize = 8192;
1687 	} else if (CIFSMaxBufSize > 1024*127) {
1688 		CIFSMaxBufSize = 1024 * 127;
1689 	} else {
1690 		CIFSMaxBufSize &= 0x1FE00; /* Round size to even 512 byte mult*/
1691 	}
1692 /*
1693 	cifs_dbg(VFS, "CIFSMaxBufSize %d 0x%x\n",
1694 		 CIFSMaxBufSize, CIFSMaxBufSize);
1695 */
1696 	cifs_req_cachep = kmem_cache_create_usercopy("cifs_request",
1697 					    CIFSMaxBufSize + max_hdr_size, 0,
1698 					    SLAB_HWCACHE_ALIGN, 0,
1699 					    CIFSMaxBufSize + max_hdr_size,
1700 					    NULL);
1701 	if (cifs_req_cachep == NULL)
1702 		return -ENOMEM;
1703 
1704 	if (cifs_min_rcv < 1)
1705 		cifs_min_rcv = 1;
1706 	else if (cifs_min_rcv > 64) {
1707 		cifs_min_rcv = 64;
1708 		cifs_dbg(VFS, "cifs_min_rcv set to maximum (64)\n");
1709 	}
1710 
1711 	cifs_req_poolp = mempool_create_slab_pool(cifs_min_rcv,
1712 						  cifs_req_cachep);
1713 
1714 	if (cifs_req_poolp == NULL) {
1715 		kmem_cache_destroy(cifs_req_cachep);
1716 		return -ENOMEM;
1717 	}
1718 	/* MAX_CIFS_SMALL_BUFFER_SIZE bytes is enough for most SMB responses and
1719 	almost all handle based requests (but not write response, nor is it
1720 	sufficient for path based requests).  A smaller size would have
1721 	been more efficient (compacting multiple slab items on one 4k page)
1722 	for the case in which debug was on, but this larger size allows
1723 	more SMBs to use small buffer alloc and is still much more
1724 	efficient to alloc 1 per page off the slab compared to 17K (5page)
1725 	alloc of large cifs buffers even when page debugging is on */
1726 	cifs_sm_req_cachep = kmem_cache_create_usercopy("cifs_small_rq",
1727 			MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN,
1728 			0, MAX_CIFS_SMALL_BUFFER_SIZE, NULL);
1729 	if (cifs_sm_req_cachep == NULL) {
1730 		mempool_destroy(cifs_req_poolp);
1731 		kmem_cache_destroy(cifs_req_cachep);
1732 		return -ENOMEM;
1733 	}
1734 
1735 	if (cifs_min_small < 2)
1736 		cifs_min_small = 2;
1737 	else if (cifs_min_small > 256) {
1738 		cifs_min_small = 256;
1739 		cifs_dbg(FYI, "cifs_min_small set to maximum (256)\n");
1740 	}
1741 
1742 	cifs_sm_req_poolp = mempool_create_slab_pool(cifs_min_small,
1743 						     cifs_sm_req_cachep);
1744 
1745 	if (cifs_sm_req_poolp == NULL) {
1746 		mempool_destroy(cifs_req_poolp);
1747 		kmem_cache_destroy(cifs_req_cachep);
1748 		kmem_cache_destroy(cifs_sm_req_cachep);
1749 		return -ENOMEM;
1750 	}
1751 
1752 	return 0;
1753 }
1754 
1755 static void
cifs_destroy_request_bufs(void)1756 cifs_destroy_request_bufs(void)
1757 {
1758 	mempool_destroy(cifs_req_poolp);
1759 	kmem_cache_destroy(cifs_req_cachep);
1760 	mempool_destroy(cifs_sm_req_poolp);
1761 	kmem_cache_destroy(cifs_sm_req_cachep);
1762 }
1763 
init_mids(void)1764 static int init_mids(void)
1765 {
1766 	cifs_mid_cachep = kmem_cache_create("cifs_mpx_ids",
1767 					    sizeof(struct mid_q_entry), 0,
1768 					    SLAB_HWCACHE_ALIGN, NULL);
1769 	if (cifs_mid_cachep == NULL)
1770 		return -ENOMEM;
1771 
1772 	/* 3 is a reasonable minimum number of simultaneous operations */
1773 	cifs_mid_poolp = mempool_create_slab_pool(3, cifs_mid_cachep);
1774 	if (cifs_mid_poolp == NULL) {
1775 		kmem_cache_destroy(cifs_mid_cachep);
1776 		return -ENOMEM;
1777 	}
1778 
1779 	return 0;
1780 }
1781 
destroy_mids(void)1782 static void destroy_mids(void)
1783 {
1784 	mempool_destroy(cifs_mid_poolp);
1785 	kmem_cache_destroy(cifs_mid_cachep);
1786 }
1787 
cifs_init_netfs(void)1788 static int cifs_init_netfs(void)
1789 {
1790 	cifs_io_request_cachep =
1791 		kmem_cache_create("cifs_io_request",
1792 				  sizeof(struct cifs_io_request), 0,
1793 				  SLAB_HWCACHE_ALIGN, NULL);
1794 	if (!cifs_io_request_cachep)
1795 		goto nomem_req;
1796 
1797 	if (mempool_init_slab_pool(&cifs_io_request_pool, 100, cifs_io_request_cachep) < 0)
1798 		goto nomem_reqpool;
1799 
1800 	cifs_io_subrequest_cachep =
1801 		kmem_cache_create("cifs_io_subrequest",
1802 				  sizeof(struct cifs_io_subrequest), 0,
1803 				  SLAB_HWCACHE_ALIGN, NULL);
1804 	if (!cifs_io_subrequest_cachep)
1805 		goto nomem_subreq;
1806 
1807 	if (mempool_init_slab_pool(&cifs_io_subrequest_pool, 100, cifs_io_subrequest_cachep) < 0)
1808 		goto nomem_subreqpool;
1809 
1810 	return 0;
1811 
1812 nomem_subreqpool:
1813 	kmem_cache_destroy(cifs_io_subrequest_cachep);
1814 nomem_subreq:
1815 	mempool_exit(&cifs_io_request_pool);
1816 nomem_reqpool:
1817 	kmem_cache_destroy(cifs_io_request_cachep);
1818 nomem_req:
1819 	return -ENOMEM;
1820 }
1821 
cifs_destroy_netfs(void)1822 static void cifs_destroy_netfs(void)
1823 {
1824 	mempool_exit(&cifs_io_subrequest_pool);
1825 	kmem_cache_destroy(cifs_io_subrequest_cachep);
1826 	mempool_exit(&cifs_io_request_pool);
1827 	kmem_cache_destroy(cifs_io_request_cachep);
1828 }
1829 
1830 static int __init
init_cifs(void)1831 init_cifs(void)
1832 {
1833 	int rc = 0;
1834 	cifs_proc_init();
1835 	INIT_LIST_HEAD(&cifs_tcp_ses_list);
1836 /*
1837  *  Initialize Global counters
1838  */
1839 	atomic_set(&sesInfoAllocCount, 0);
1840 	atomic_set(&tconInfoAllocCount, 0);
1841 	atomic_set(&tcpSesNextId, 0);
1842 	atomic_set(&tcpSesAllocCount, 0);
1843 	atomic_set(&tcpSesReconnectCount, 0);
1844 	atomic_set(&tconInfoReconnectCount, 0);
1845 
1846 	atomic_set(&buf_alloc_count, 0);
1847 	atomic_set(&small_buf_alloc_count, 0);
1848 #ifdef CONFIG_CIFS_STATS2
1849 	atomic_set(&total_buf_alloc_count, 0);
1850 	atomic_set(&total_small_buf_alloc_count, 0);
1851 	if (slow_rsp_threshold < 1)
1852 		cifs_dbg(FYI, "slow_response_threshold msgs disabled\n");
1853 	else if (slow_rsp_threshold > 32767)
1854 		cifs_dbg(VFS,
1855 		       "slow response threshold set higher than recommended (0 to 32767)\n");
1856 #endif /* CONFIG_CIFS_STATS2 */
1857 
1858 	atomic_set(&mid_count, 0);
1859 	GlobalCurrentXid = 0;
1860 	GlobalTotalActiveXid = 0;
1861 	GlobalMaxActiveXid = 0;
1862 	spin_lock_init(&cifs_tcp_ses_lock);
1863 	spin_lock_init(&GlobalMid_Lock);
1864 
1865 	cifs_lock_secret = get_random_u32();
1866 
1867 	if (cifs_max_pending < 2) {
1868 		cifs_max_pending = 2;
1869 		cifs_dbg(FYI, "cifs_max_pending set to min of 2\n");
1870 	} else if (cifs_max_pending > CIFS_MAX_REQ) {
1871 		cifs_max_pending = CIFS_MAX_REQ;
1872 		cifs_dbg(FYI, "cifs_max_pending set to max of %u\n",
1873 			 CIFS_MAX_REQ);
1874 	}
1875 
1876 	/* Limit max to about 18 hours, and setting to zero disables directory entry caching */
1877 	if (dir_cache_timeout > 65000) {
1878 		dir_cache_timeout = 65000;
1879 		cifs_dbg(VFS, "dir_cache_timeout set to max of 65000 seconds\n");
1880 	}
1881 
1882 	cifsiod_wq = alloc_workqueue("cifsiod", WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
1883 	if (!cifsiod_wq) {
1884 		rc = -ENOMEM;
1885 		goto out_clean_proc;
1886 	}
1887 
1888 	/*
1889 	 * Consider in future setting limit!=0 maybe to min(num_of_cores - 1, 3)
1890 	 * so that we don't launch too many worker threads but
1891 	 * Documentation/core-api/workqueue.rst recommends setting it to 0
1892 	 */
1893 
1894 	/* WQ_UNBOUND allows decrypt tasks to run on any CPU */
1895 	decrypt_wq = alloc_workqueue("smb3decryptd",
1896 				     WQ_UNBOUND|WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
1897 	if (!decrypt_wq) {
1898 		rc = -ENOMEM;
1899 		goto out_destroy_cifsiod_wq;
1900 	}
1901 
1902 	fileinfo_put_wq = alloc_workqueue("cifsfileinfoput",
1903 				     WQ_UNBOUND|WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
1904 	if (!fileinfo_put_wq) {
1905 		rc = -ENOMEM;
1906 		goto out_destroy_decrypt_wq;
1907 	}
1908 
1909 	cifsoplockd_wq = alloc_workqueue("cifsoplockd",
1910 					 WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
1911 	if (!cifsoplockd_wq) {
1912 		rc = -ENOMEM;
1913 		goto out_destroy_fileinfo_put_wq;
1914 	}
1915 
1916 	deferredclose_wq = alloc_workqueue("deferredclose",
1917 					   WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
1918 	if (!deferredclose_wq) {
1919 		rc = -ENOMEM;
1920 		goto out_destroy_cifsoplockd_wq;
1921 	}
1922 
1923 	serverclose_wq = alloc_workqueue("serverclose",
1924 					   WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
1925 	if (!serverclose_wq) {
1926 		rc = -ENOMEM;
1927 		goto out_destroy_deferredclose_wq;
1928 	}
1929 
1930 	cfid_put_wq = alloc_workqueue("cfid_put_wq",
1931 				      WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
1932 	if (!cfid_put_wq) {
1933 		rc = -ENOMEM;
1934 		goto out_destroy_serverclose_wq;
1935 	}
1936 
1937 	rc = cifs_init_inodecache();
1938 	if (rc)
1939 		goto out_destroy_cfid_put_wq;
1940 
1941 	rc = cifs_init_netfs();
1942 	if (rc)
1943 		goto out_destroy_inodecache;
1944 
1945 	rc = init_mids();
1946 	if (rc)
1947 		goto out_destroy_netfs;
1948 
1949 	rc = cifs_init_request_bufs();
1950 	if (rc)
1951 		goto out_destroy_mids;
1952 
1953 #ifdef CONFIG_CIFS_DFS_UPCALL
1954 	rc = dfs_cache_init();
1955 	if (rc)
1956 		goto out_destroy_request_bufs;
1957 #endif /* CONFIG_CIFS_DFS_UPCALL */
1958 #ifdef CONFIG_CIFS_UPCALL
1959 	rc = init_cifs_spnego();
1960 	if (rc)
1961 		goto out_destroy_dfs_cache;
1962 #endif /* CONFIG_CIFS_UPCALL */
1963 #ifdef CONFIG_CIFS_SWN_UPCALL
1964 	rc = cifs_genl_init();
1965 	if (rc)
1966 		goto out_register_key_type;
1967 #endif /* CONFIG_CIFS_SWN_UPCALL */
1968 
1969 	rc = init_cifs_idmap();
1970 	if (rc)
1971 		goto out_cifs_swn_init;
1972 
1973 	rc = register_filesystem(&cifs_fs_type);
1974 	if (rc)
1975 		goto out_init_cifs_idmap;
1976 
1977 	rc = register_filesystem(&smb3_fs_type);
1978 	if (rc) {
1979 		unregister_filesystem(&cifs_fs_type);
1980 		goto out_init_cifs_idmap;
1981 	}
1982 
1983 	return 0;
1984 
1985 out_init_cifs_idmap:
1986 	exit_cifs_idmap();
1987 out_cifs_swn_init:
1988 #ifdef CONFIG_CIFS_SWN_UPCALL
1989 	cifs_genl_exit();
1990 out_register_key_type:
1991 #endif
1992 #ifdef CONFIG_CIFS_UPCALL
1993 	exit_cifs_spnego();
1994 out_destroy_dfs_cache:
1995 #endif
1996 #ifdef CONFIG_CIFS_DFS_UPCALL
1997 	dfs_cache_destroy();
1998 out_destroy_request_bufs:
1999 #endif
2000 	cifs_destroy_request_bufs();
2001 out_destroy_mids:
2002 	destroy_mids();
2003 out_destroy_netfs:
2004 	cifs_destroy_netfs();
2005 out_destroy_inodecache:
2006 	cifs_destroy_inodecache();
2007 out_destroy_cfid_put_wq:
2008 	destroy_workqueue(cfid_put_wq);
2009 out_destroy_serverclose_wq:
2010 	destroy_workqueue(serverclose_wq);
2011 out_destroy_deferredclose_wq:
2012 	destroy_workqueue(deferredclose_wq);
2013 out_destroy_cifsoplockd_wq:
2014 	destroy_workqueue(cifsoplockd_wq);
2015 out_destroy_fileinfo_put_wq:
2016 	destroy_workqueue(fileinfo_put_wq);
2017 out_destroy_decrypt_wq:
2018 	destroy_workqueue(decrypt_wq);
2019 out_destroy_cifsiod_wq:
2020 	destroy_workqueue(cifsiod_wq);
2021 out_clean_proc:
2022 	cifs_proc_clean();
2023 	return rc;
2024 }
2025 
2026 static void __exit
exit_cifs(void)2027 exit_cifs(void)
2028 {
2029 	cifs_dbg(NOISY, "exit_smb3\n");
2030 	unregister_filesystem(&cifs_fs_type);
2031 	unregister_filesystem(&smb3_fs_type);
2032 	cifs_release_automount_timer();
2033 	exit_cifs_idmap();
2034 #ifdef CONFIG_CIFS_SWN_UPCALL
2035 	cifs_genl_exit();
2036 #endif
2037 #ifdef CONFIG_CIFS_UPCALL
2038 	exit_cifs_spnego();
2039 #endif
2040 #ifdef CONFIG_CIFS_DFS_UPCALL
2041 	dfs_cache_destroy();
2042 #endif
2043 	cifs_destroy_request_bufs();
2044 	destroy_mids();
2045 	cifs_destroy_netfs();
2046 	cifs_destroy_inodecache();
2047 	destroy_workqueue(deferredclose_wq);
2048 	destroy_workqueue(cifsoplockd_wq);
2049 	destroy_workqueue(decrypt_wq);
2050 	destroy_workqueue(fileinfo_put_wq);
2051 	destroy_workqueue(serverclose_wq);
2052 	destroy_workqueue(cfid_put_wq);
2053 	destroy_workqueue(cifsiod_wq);
2054 	cifs_proc_clean();
2055 }
2056 
2057 MODULE_AUTHOR("Steve French");
2058 MODULE_LICENSE("GPL");	/* combination of LGPL + GPL source behaves as GPL */
2059 MODULE_DESCRIPTION
2060 	("VFS to access SMB3 servers e.g. Samba, Macs, Azure and Windows (and "
2061 	"also older servers complying with the SNIA CIFS Specification)");
2062 MODULE_VERSION(CIFS_VERSION);
2063 MODULE_SOFTDEP("ecb");
2064 MODULE_SOFTDEP("hmac");
2065 MODULE_SOFTDEP("md5");
2066 MODULE_SOFTDEP("nls");
2067 MODULE_SOFTDEP("aes");
2068 MODULE_SOFTDEP("cmac");
2069 MODULE_SOFTDEP("sha256");
2070 MODULE_SOFTDEP("sha512");
2071 MODULE_SOFTDEP("aead2");
2072 MODULE_SOFTDEP("ccm");
2073 MODULE_SOFTDEP("gcm");
2074 module_init(init_cifs)
2075 module_exit(exit_cifs)
2076