xref: /linux/fs/smb/client/cifsfs.c (revision 63e62baaa72e1aceb422f64a50408bc9b02a6022)
1 // SPDX-License-Identifier: LGPL-2.1
2 /*
3  *
4  *   Copyright (C) International Business Machines  Corp., 2002,2008
5  *   Author(s): Steve French (sfrench@us.ibm.com)
6  *
7  *   Common Internet FileSystem (CIFS) client
8  *
9  */
10 
11 /* Note that BB means BUGBUG (ie something to fix eventually) */
12 
13 #include <linux/module.h>
14 #include <linux/fs.h>
15 #include <linux/filelock.h>
16 #include <linux/mount.h>
17 #include <linux/slab.h>
18 #include <linux/init.h>
19 #include <linux/list.h>
20 #include <linux/seq_file.h>
21 #include <linux/vfs.h>
22 #include <linux/mempool.h>
23 #include <linux/delay.h>
24 #include <linux/kthread.h>
25 #include <linux/freezer.h>
26 #include <linux/namei.h>
27 #include <linux/random.h>
28 #include <linux/splice.h>
29 #include <linux/uuid.h>
30 #include <linux/xattr.h>
31 #include <uapi/linux/magic.h>
32 #include <net/ipv6.h>
33 #include "cifsfs.h"
34 #include "cifspdu.h"
35 #define DECLARE_GLOBALS_HERE
36 #include "cifsglob.h"
37 #include "cifsproto.h"
38 #include "cifs_debug.h"
39 #include "cifs_fs_sb.h"
40 #include <linux/mm.h>
41 #include <linux/key-type.h>
42 #include "cifs_spnego.h"
43 #include "fscache.h"
44 #ifdef CONFIG_CIFS_DFS_UPCALL
45 #include "dfs_cache.h"
46 #endif
47 #ifdef CONFIG_CIFS_SWN_UPCALL
48 #include "netlink.h"
49 #endif
50 #include "fs_context.h"
51 #include "cached_dir.h"
52 
53 /*
54  * DOS dates from 1980/1/1 through 2107/12/31
55  * Protocol specifications indicate the range should be to 119, which
56  * limits maximum year to 2099. But this range has not been checked.
57  */
58 #define SMB_DATE_MAX (127<<9 | 12<<5 | 31)
59 #define SMB_DATE_MIN (0<<9 | 1<<5 | 1)
60 #define SMB_TIME_MAX (23<<11 | 59<<5 | 29)
61 
62 int cifsFYI = 0;
63 bool traceSMB;
64 bool enable_oplocks = true;
65 bool linuxExtEnabled = true;
66 bool lookupCacheEnabled = true;
67 bool disable_legacy_dialects; /* false by default */
68 bool enable_gcm_256 = true;
69 bool require_gcm_256; /* false by default */
70 bool enable_negotiate_signing; /* false by default */
71 unsigned int global_secflags = CIFSSEC_DEF;
72 /* unsigned int ntlmv2_support = 0; */
73 
74 /*
75  * Global transaction id (XID) information
76  */
77 unsigned int GlobalCurrentXid;	/* protected by GlobalMid_Lock */
78 unsigned int GlobalTotalActiveXid; /* prot by GlobalMid_Lock */
79 unsigned int GlobalMaxActiveXid;	/* prot by GlobalMid_Lock */
80 DEFINE_SPINLOCK(GlobalMid_Lock); /* protects above & list operations on midQ entries */
81 
82 /*
83  *  Global counters, updated atomically
84  */
85 atomic_t sesInfoAllocCount;
86 atomic_t tconInfoAllocCount;
87 atomic_t tcpSesNextId;
88 atomic_t tcpSesAllocCount;
89 atomic_t tcpSesReconnectCount;
90 atomic_t tconInfoReconnectCount;
91 
92 atomic_t mid_count;
93 atomic_t buf_alloc_count;
94 atomic_t small_buf_alloc_count;
95 #ifdef CONFIG_CIFS_STATS2
96 atomic_t total_buf_alloc_count;
97 atomic_t total_small_buf_alloc_count;
98 #endif/* STATS2 */
99 struct list_head	cifs_tcp_ses_list;
100 DEFINE_SPINLOCK(cifs_tcp_ses_lock);
101 static const struct super_operations cifs_super_ops;
102 unsigned int CIFSMaxBufSize = CIFS_MAX_MSGSIZE;
103 module_param(CIFSMaxBufSize, uint, 0444);
104 MODULE_PARM_DESC(CIFSMaxBufSize, "Network buffer size (not including header) "
105 				 "for CIFS requests. "
106 				 "Default: 16384 Range: 8192 to 130048");
107 unsigned int cifs_min_rcv = CIFS_MIN_RCV_POOL;
108 module_param(cifs_min_rcv, uint, 0444);
109 MODULE_PARM_DESC(cifs_min_rcv, "Network buffers in pool. Default: 4 Range: "
110 				"1 to 64");
111 unsigned int cifs_min_small = 30;
112 module_param(cifs_min_small, uint, 0444);
113 MODULE_PARM_DESC(cifs_min_small, "Small network buffers in pool. Default: 30 "
114 				 "Range: 2 to 256");
115 unsigned int cifs_max_pending = CIFS_MAX_REQ;
116 module_param(cifs_max_pending, uint, 0444);
117 MODULE_PARM_DESC(cifs_max_pending, "Simultaneous requests to server for "
118 				   "CIFS/SMB1 dialect (N/A for SMB3) "
119 				   "Default: 32767 Range: 2 to 32767.");
120 unsigned int dir_cache_timeout = 30;
121 module_param(dir_cache_timeout, uint, 0644);
122 MODULE_PARM_DESC(dir_cache_timeout, "Number of seconds to cache directory contents for which we have a lease. Default: 30 "
123 				 "Range: 1 to 65000 seconds, 0 to disable caching dir contents");
124 /* Module-wide total cached dirents (in bytes) across all tcons */
125 atomic64_t cifs_dircache_bytes_used = ATOMIC64_INIT(0);
126 
127 /*
128  * Write-only module parameter to drop all cached directory entries across
129  * all CIFS mounts. Echo a non-zero value to trigger.
130  */
cifs_drop_all_dir_caches(void)131 static void cifs_drop_all_dir_caches(void)
132 {
133 	struct TCP_Server_Info *server;
134 	struct cifs_ses *ses;
135 	struct cifs_tcon *tcon;
136 
137 	spin_lock(&cifs_tcp_ses_lock);
138 	list_for_each_entry(server, &cifs_tcp_ses_list, tcp_ses_list) {
139 		list_for_each_entry(ses, &server->smb_ses_list, smb_ses_list) {
140 			if (cifs_ses_exiting(ses))
141 				continue;
142 			list_for_each_entry(tcon, &ses->tcon_list, tcon_list)
143 				invalidate_all_cached_dirs(tcon);
144 		}
145 	}
146 	spin_unlock(&cifs_tcp_ses_lock);
147 }
148 
cifs_param_set_drop_dir_cache(const char * val,const struct kernel_param * kp)149 static int cifs_param_set_drop_dir_cache(const char *val, const struct kernel_param *kp)
150 {
151 	bool bv;
152 	int rc = kstrtobool(val, &bv);
153 
154 	if (rc)
155 		return rc;
156 	if (bv)
157 		cifs_drop_all_dir_caches();
158 	return 0;
159 }
160 
161 module_param_call(drop_dir_cache, cifs_param_set_drop_dir_cache, NULL, NULL, 0200);
162 MODULE_PARM_DESC(drop_dir_cache, "Write 1 to drop all cached directory entries across all CIFS mounts");
163 
164 #ifdef CONFIG_CIFS_STATS2
165 unsigned int slow_rsp_threshold = 1;
166 module_param(slow_rsp_threshold, uint, 0644);
167 MODULE_PARM_DESC(slow_rsp_threshold, "Amount of time (in seconds) to wait "
168 				   "before logging that a response is delayed. "
169 				   "Default: 1 (if set to 0 disables msg).");
170 #endif /* STATS2 */
171 
172 module_param(enable_oplocks, bool, 0644);
173 MODULE_PARM_DESC(enable_oplocks, "Enable or disable oplocks. Default: y/Y/1");
174 
175 module_param(enable_gcm_256, bool, 0644);
176 MODULE_PARM_DESC(enable_gcm_256, "Enable requesting strongest (256 bit) GCM encryption. Default: y/Y/0");
177 
178 module_param(require_gcm_256, bool, 0644);
179 MODULE_PARM_DESC(require_gcm_256, "Require strongest (256 bit) GCM encryption. Default: n/N/0");
180 
181 module_param(enable_negotiate_signing, bool, 0644);
182 MODULE_PARM_DESC(enable_negotiate_signing, "Enable negotiating packet signing algorithm with server. Default: n/N/0");
183 
184 module_param(disable_legacy_dialects, bool, 0644);
185 MODULE_PARM_DESC(disable_legacy_dialects, "To improve security it may be "
186 				  "helpful to restrict the ability to "
187 				  "override the default dialects (SMB2.1, "
188 				  "SMB3 and SMB3.02) on mount with old "
189 				  "dialects (CIFS/SMB1 and SMB2) since "
190 				  "vers=1.0 (CIFS/SMB1) and vers=2.0 are weaker"
191 				  " and less secure. Default: n/N/0");
192 
193 struct workqueue_struct	*cifsiod_wq;
194 struct workqueue_struct	*decrypt_wq;
195 struct workqueue_struct	*fileinfo_put_wq;
196 struct workqueue_struct	*cifsoplockd_wq;
197 struct workqueue_struct	*deferredclose_wq;
198 struct workqueue_struct	*serverclose_wq;
199 struct workqueue_struct	*cfid_put_wq;
200 __u32 cifs_lock_secret;
201 
202 /*
203  * Bumps refcount for cifs super block.
204  * Note that it should be only called if a reference to VFS super block is
205  * already held, e.g. in open-type syscalls context. Otherwise it can race with
206  * atomic_dec_and_test in deactivate_locked_super.
207  */
208 void
cifs_sb_active(struct super_block * sb)209 cifs_sb_active(struct super_block *sb)
210 {
211 	struct cifs_sb_info *server = CIFS_SB(sb);
212 
213 	if (atomic_inc_return(&server->active) == 1)
214 		atomic_inc(&sb->s_active);
215 }
216 
217 void
cifs_sb_deactive(struct super_block * sb)218 cifs_sb_deactive(struct super_block *sb)
219 {
220 	struct cifs_sb_info *server = CIFS_SB(sb);
221 
222 	if (atomic_dec_and_test(&server->active))
223 		deactivate_super(sb);
224 }
225 
226 static int
cifs_read_super(struct super_block * sb)227 cifs_read_super(struct super_block *sb)
228 {
229 	struct inode *inode;
230 	struct cifs_sb_info *cifs_sb;
231 	struct cifs_tcon *tcon;
232 	struct timespec64 ts;
233 	int rc = 0;
234 
235 	cifs_sb = CIFS_SB(sb);
236 	tcon = cifs_sb_master_tcon(cifs_sb);
237 
238 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIXACL)
239 		sb->s_flags |= SB_POSIXACL;
240 
241 	if (tcon->snapshot_time)
242 		sb->s_flags |= SB_RDONLY;
243 
244 	if (tcon->ses->capabilities & tcon->ses->server->vals->cap_large_files)
245 		sb->s_maxbytes = MAX_LFS_FILESIZE;
246 	else
247 		sb->s_maxbytes = MAX_NON_LFS;
248 
249 	/*
250 	 * Some very old servers like DOS and OS/2 used 2 second granularity
251 	 * (while all current servers use 100ns granularity - see MS-DTYP)
252 	 * but 1 second is the maximum allowed granularity for the VFS
253 	 * so for old servers set time granularity to 1 second while for
254 	 * everything else (current servers) set it to 100ns.
255 	 */
256 	if ((tcon->ses->server->vals->protocol_id == SMB10_PROT_ID) &&
257 	    ((tcon->ses->capabilities &
258 	      tcon->ses->server->vals->cap_nt_find) == 0) &&
259 	    !tcon->unix_ext) {
260 		sb->s_time_gran = 1000000000; /* 1 second is max allowed gran */
261 		ts = cnvrtDosUnixTm(cpu_to_le16(SMB_DATE_MIN), 0, 0);
262 		sb->s_time_min = ts.tv_sec;
263 		ts = cnvrtDosUnixTm(cpu_to_le16(SMB_DATE_MAX),
264 				    cpu_to_le16(SMB_TIME_MAX), 0);
265 		sb->s_time_max = ts.tv_sec;
266 	} else {
267 		/*
268 		 * Almost every server, including all SMB2+, uses DCE TIME
269 		 * ie 100 nanosecond units, since 1601.  See MS-DTYP and MS-FSCC
270 		 */
271 		sb->s_time_gran = 100;
272 		ts = cifs_NTtimeToUnix(0);
273 		sb->s_time_min = ts.tv_sec;
274 		ts = cifs_NTtimeToUnix(cpu_to_le64(S64_MAX));
275 		sb->s_time_max = ts.tv_sec;
276 	}
277 
278 	sb->s_magic = CIFS_SUPER_MAGIC;
279 	sb->s_op = &cifs_super_ops;
280 	sb->s_xattr = cifs_xattr_handlers;
281 	rc = super_setup_bdi(sb);
282 	if (rc)
283 		goto out_no_root;
284 	/* tune readahead according to rsize if readahead size not set on mount */
285 	if (cifs_sb->ctx->rsize == 0)
286 		cifs_sb->ctx->rsize =
287 			tcon->ses->server->ops->negotiate_rsize(tcon, cifs_sb->ctx);
288 	if (cifs_sb->ctx->rasize)
289 		sb->s_bdi->ra_pages = cifs_sb->ctx->rasize / PAGE_SIZE;
290 	else
291 		sb->s_bdi->ra_pages = 2 * (cifs_sb->ctx->rsize / PAGE_SIZE);
292 
293 	sb->s_blocksize = CIFS_MAX_MSGSIZE;
294 	sb->s_blocksize_bits = 14;	/* default 2**14 = CIFS_MAX_MSGSIZE */
295 	inode = cifs_root_iget(sb);
296 
297 	if (IS_ERR(inode)) {
298 		rc = PTR_ERR(inode);
299 		goto out_no_root;
300 	}
301 
302 	if (tcon->nocase)
303 		set_default_d_op(sb, &cifs_ci_dentry_ops);
304 	else
305 		set_default_d_op(sb, &cifs_dentry_ops);
306 
307 	sb->s_root = d_make_root(inode);
308 	if (!sb->s_root) {
309 		rc = -ENOMEM;
310 		goto out_no_root;
311 	}
312 
313 #ifdef CONFIG_CIFS_NFSD_EXPORT
314 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) {
315 		cifs_dbg(FYI, "export ops supported\n");
316 		sb->s_export_op = &cifs_export_ops;
317 	}
318 #endif /* CONFIG_CIFS_NFSD_EXPORT */
319 
320 	return 0;
321 
322 out_no_root:
323 	cifs_dbg(VFS, "%s: get root inode failed\n", __func__);
324 	return rc;
325 }
326 
cifs_kill_sb(struct super_block * sb)327 static void cifs_kill_sb(struct super_block *sb)
328 {
329 	struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
330 
331 	/*
332 	 * We need to release all dentries for the cached directories
333 	 * before we kill the sb.
334 	 */
335 	if (cifs_sb->root) {
336 		close_all_cached_dirs(cifs_sb);
337 
338 		/* finally release root dentry */
339 		dput(cifs_sb->root);
340 		cifs_sb->root = NULL;
341 	}
342 
343 	kill_anon_super(sb);
344 	cifs_umount(cifs_sb);
345 }
346 
347 static int
cifs_statfs(struct dentry * dentry,struct kstatfs * buf)348 cifs_statfs(struct dentry *dentry, struct kstatfs *buf)
349 {
350 	struct super_block *sb = dentry->d_sb;
351 	struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
352 	struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
353 	struct TCP_Server_Info *server = tcon->ses->server;
354 	unsigned int xid;
355 	int rc = 0;
356 	const char *full_path;
357 	void *page;
358 
359 	xid = get_xid();
360 	page = alloc_dentry_path();
361 
362 	full_path = build_path_from_dentry(dentry, page);
363 	if (IS_ERR(full_path)) {
364 		rc = PTR_ERR(full_path);
365 		goto statfs_out;
366 	}
367 
368 	if (le32_to_cpu(tcon->fsAttrInfo.MaxPathNameComponentLength) > 0)
369 		buf->f_namelen =
370 		       le32_to_cpu(tcon->fsAttrInfo.MaxPathNameComponentLength);
371 	else
372 		buf->f_namelen = PATH_MAX;
373 
374 	buf->f_fsid.val[0] = tcon->vol_serial_number;
375 	/* are using part of create time for more randomness, see man statfs */
376 	buf->f_fsid.val[1] =  (int)le64_to_cpu(tcon->vol_create_time);
377 
378 	buf->f_files = 0;	/* undefined */
379 	buf->f_ffree = 0;	/* unlimited */
380 
381 	if (server->ops->queryfs)
382 		rc = server->ops->queryfs(xid, tcon, full_path, cifs_sb, buf);
383 
384 statfs_out:
385 	free_dentry_path(page);
386 	free_xid(xid);
387 	return rc;
388 }
389 
cifs_fallocate(struct file * file,int mode,loff_t off,loff_t len)390 static long cifs_fallocate(struct file *file, int mode, loff_t off, loff_t len)
391 {
392 	struct cifs_sb_info *cifs_sb = CIFS_FILE_SB(file);
393 	struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
394 	struct TCP_Server_Info *server = tcon->ses->server;
395 
396 	if (server->ops->fallocate)
397 		return server->ops->fallocate(file, tcon, mode, off, len);
398 
399 	return -EOPNOTSUPP;
400 }
401 
cifs_permission(struct mnt_idmap * idmap,struct inode * inode,int mask)402 static int cifs_permission(struct mnt_idmap *idmap,
403 			   struct inode *inode, int mask)
404 {
405 	struct cifs_sb_info *cifs_sb;
406 
407 	cifs_sb = CIFS_SB(inode->i_sb);
408 
409 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_PERM) {
410 		if ((mask & MAY_EXEC) && !execute_ok(inode))
411 			return -EACCES;
412 		else
413 			return 0;
414 	} else /* file mode might have been restricted at mount time
415 		on the client (above and beyond ACL on servers) for
416 		servers which do not support setting and viewing mode bits,
417 		so allowing client to check permissions is useful */
418 		return generic_permission(&nop_mnt_idmap, inode, mask);
419 }
420 
421 static struct kmem_cache *cifs_inode_cachep;
422 static struct kmem_cache *cifs_req_cachep;
423 static struct kmem_cache *cifs_mid_cachep;
424 static struct kmem_cache *cifs_sm_req_cachep;
425 static struct kmem_cache *cifs_io_request_cachep;
426 static struct kmem_cache *cifs_io_subrequest_cachep;
427 mempool_t *cifs_sm_req_poolp;
428 mempool_t *cifs_req_poolp;
429 mempool_t *cifs_mid_poolp;
430 mempool_t cifs_io_request_pool;
431 mempool_t cifs_io_subrequest_pool;
432 
433 static struct inode *
cifs_alloc_inode(struct super_block * sb)434 cifs_alloc_inode(struct super_block *sb)
435 {
436 	struct cifsInodeInfo *cifs_inode;
437 	cifs_inode = alloc_inode_sb(sb, cifs_inode_cachep, GFP_KERNEL);
438 	if (!cifs_inode)
439 		return NULL;
440 	cifs_inode->cifsAttrs = ATTR_ARCHIVE;	/* default */
441 	cifs_inode->time = 0;
442 	/*
443 	 * Until the file is open and we have gotten oplock info back from the
444 	 * server, can not assume caching of file data or metadata.
445 	 */
446 	cifs_set_oplock_level(cifs_inode, 0);
447 	cifs_inode->lease_granted = false;
448 	cifs_inode->flags = 0;
449 	spin_lock_init(&cifs_inode->writers_lock);
450 	cifs_inode->writers = 0;
451 	cifs_inode->netfs.inode.i_blkbits = 14;  /* 2**14 = CIFS_MAX_MSGSIZE */
452 	cifs_inode->netfs.remote_i_size = 0;
453 	cifs_inode->uniqueid = 0;
454 	cifs_inode->createtime = 0;
455 	cifs_inode->epoch = 0;
456 	spin_lock_init(&cifs_inode->open_file_lock);
457 	generate_random_uuid(cifs_inode->lease_key);
458 	cifs_inode->symlink_target = NULL;
459 
460 	/*
461 	 * Can not set i_flags here - they get immediately overwritten to zero
462 	 * by the VFS.
463 	 */
464 	/* cifs_inode->netfs.inode.i_flags = S_NOATIME | S_NOCMTIME; */
465 	INIT_LIST_HEAD(&cifs_inode->openFileList);
466 	INIT_LIST_HEAD(&cifs_inode->llist);
467 	INIT_LIST_HEAD(&cifs_inode->deferred_closes);
468 	spin_lock_init(&cifs_inode->deferred_lock);
469 	return &cifs_inode->netfs.inode;
470 }
471 
472 static void
cifs_free_inode(struct inode * inode)473 cifs_free_inode(struct inode *inode)
474 {
475 	struct cifsInodeInfo *cinode = CIFS_I(inode);
476 
477 	if (S_ISLNK(inode->i_mode))
478 		kfree(cinode->symlink_target);
479 	kmem_cache_free(cifs_inode_cachep, cinode);
480 }
481 
482 static void
cifs_evict_inode(struct inode * inode)483 cifs_evict_inode(struct inode *inode)
484 {
485 	netfs_wait_for_outstanding_io(inode);
486 	truncate_inode_pages_final(&inode->i_data);
487 	if (inode->i_state & I_PINNING_NETFS_WB)
488 		cifs_fscache_unuse_inode_cookie(inode, true);
489 	cifs_fscache_release_inode_cookie(inode);
490 	clear_inode(inode);
491 }
492 
493 static void
cifs_show_address(struct seq_file * s,struct TCP_Server_Info * server)494 cifs_show_address(struct seq_file *s, struct TCP_Server_Info *server)
495 {
496 	struct sockaddr_in *sa = (struct sockaddr_in *) &server->dstaddr;
497 	struct sockaddr_in6 *sa6 = (struct sockaddr_in6 *) &server->dstaddr;
498 
499 	seq_puts(s, ",addr=");
500 
501 	switch (server->dstaddr.ss_family) {
502 	case AF_INET:
503 		seq_printf(s, "%pI4", &sa->sin_addr.s_addr);
504 		break;
505 	case AF_INET6:
506 		seq_printf(s, "%pI6", &sa6->sin6_addr.s6_addr);
507 		if (sa6->sin6_scope_id)
508 			seq_printf(s, "%%%u", sa6->sin6_scope_id);
509 		break;
510 	default:
511 		seq_puts(s, "(unknown)");
512 	}
513 	if (server->rdma)
514 		seq_puts(s, ",rdma");
515 }
516 
517 static void
cifs_show_security(struct seq_file * s,struct cifs_ses * ses)518 cifs_show_security(struct seq_file *s, struct cifs_ses *ses)
519 {
520 	if (ses->sectype == Unspecified) {
521 		if (ses->user_name == NULL)
522 			seq_puts(s, ",sec=none");
523 		return;
524 	}
525 
526 	seq_puts(s, ",sec=");
527 
528 	switch (ses->sectype) {
529 	case NTLMv2:
530 		seq_puts(s, "ntlmv2");
531 		break;
532 	case Kerberos:
533 		seq_puts(s, "krb5");
534 		break;
535 	case RawNTLMSSP:
536 		seq_puts(s, "ntlmssp");
537 		break;
538 	default:
539 		/* shouldn't ever happen */
540 		seq_puts(s, "unknown");
541 		break;
542 	}
543 
544 	if (ses->sign)
545 		seq_puts(s, "i");
546 
547 	if (ses->sectype == Kerberos)
548 		seq_printf(s, ",cruid=%u",
549 			   from_kuid_munged(&init_user_ns, ses->cred_uid));
550 }
551 
552 static void
cifs_show_cache_flavor(struct seq_file * s,struct cifs_sb_info * cifs_sb)553 cifs_show_cache_flavor(struct seq_file *s, struct cifs_sb_info *cifs_sb)
554 {
555 	seq_puts(s, ",cache=");
556 
557 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO)
558 		seq_puts(s, "strict");
559 	else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO)
560 		seq_puts(s, "none");
561 	else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RW_CACHE)
562 		seq_puts(s, "singleclient"); /* assume only one client access */
563 	else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RO_CACHE)
564 		seq_puts(s, "ro"); /* read only caching assumed */
565 	else
566 		seq_puts(s, "loose");
567 }
568 
569 /*
570  * cifs_show_devname() is used so we show the mount device name with correct
571  * format (e.g. forward slashes vs. back slashes) in /proc/mounts
572  */
cifs_show_devname(struct seq_file * m,struct dentry * root)573 static int cifs_show_devname(struct seq_file *m, struct dentry *root)
574 {
575 	struct cifs_sb_info *cifs_sb = CIFS_SB(root->d_sb);
576 	char *devname = kstrdup(cifs_sb->ctx->source, GFP_KERNEL);
577 
578 	if (devname == NULL)
579 		seq_puts(m, "none");
580 	else {
581 		convert_delimiter(devname, '/');
582 		/* escape all spaces in share names */
583 		seq_escape(m, devname, " \t");
584 		kfree(devname);
585 	}
586 	return 0;
587 }
588 
589 static void
cifs_show_upcall_target(struct seq_file * s,struct cifs_sb_info * cifs_sb)590 cifs_show_upcall_target(struct seq_file *s, struct cifs_sb_info *cifs_sb)
591 {
592 	if (cifs_sb->ctx->upcall_target == UPTARGET_UNSPECIFIED) {
593 		seq_puts(s, ",upcall_target=app");
594 		return;
595 	}
596 
597 	seq_puts(s, ",upcall_target=");
598 
599 	switch (cifs_sb->ctx->upcall_target) {
600 	case UPTARGET_APP:
601 		seq_puts(s, "app");
602 		break;
603 	case UPTARGET_MOUNT:
604 		seq_puts(s, "mount");
605 		break;
606 	default:
607 		/* shouldn't ever happen */
608 		seq_puts(s, "unknown");
609 		break;
610 	}
611 }
612 
613 /*
614  * cifs_show_options() is for displaying mount options in /proc/mounts.
615  * Not all settable options are displayed but most of the important
616  * ones are.
617  */
618 static int
cifs_show_options(struct seq_file * s,struct dentry * root)619 cifs_show_options(struct seq_file *s, struct dentry *root)
620 {
621 	struct cifs_sb_info *cifs_sb = CIFS_SB(root->d_sb);
622 	struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
623 	struct sockaddr *srcaddr;
624 	srcaddr = (struct sockaddr *)&tcon->ses->server->srcaddr;
625 
626 	seq_show_option(s, "vers", tcon->ses->server->vals->version_string);
627 	cifs_show_security(s, tcon->ses);
628 	cifs_show_cache_flavor(s, cifs_sb);
629 	cifs_show_upcall_target(s, cifs_sb);
630 
631 	if (tcon->no_lease)
632 		seq_puts(s, ",nolease");
633 	if (cifs_sb->ctx->multiuser)
634 		seq_puts(s, ",multiuser");
635 	else if (tcon->ses->user_name)
636 		seq_show_option(s, "username", tcon->ses->user_name);
637 
638 	if (tcon->ses->domainName && tcon->ses->domainName[0] != 0)
639 		seq_show_option(s, "domain", tcon->ses->domainName);
640 
641 	if (srcaddr->sa_family != AF_UNSPEC) {
642 		struct sockaddr_in *saddr4;
643 		struct sockaddr_in6 *saddr6;
644 		saddr4 = (struct sockaddr_in *)srcaddr;
645 		saddr6 = (struct sockaddr_in6 *)srcaddr;
646 		if (srcaddr->sa_family == AF_INET6)
647 			seq_printf(s, ",srcaddr=%pI6c",
648 				   &saddr6->sin6_addr);
649 		else if (srcaddr->sa_family == AF_INET)
650 			seq_printf(s, ",srcaddr=%pI4",
651 				   &saddr4->sin_addr.s_addr);
652 		else
653 			seq_printf(s, ",srcaddr=BAD-AF:%i",
654 				   (int)(srcaddr->sa_family));
655 	}
656 
657 	seq_printf(s, ",uid=%u",
658 		   from_kuid_munged(&init_user_ns, cifs_sb->ctx->linux_uid));
659 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_OVERR_UID)
660 		seq_puts(s, ",forceuid");
661 	else
662 		seq_puts(s, ",noforceuid");
663 
664 	seq_printf(s, ",gid=%u",
665 		   from_kgid_munged(&init_user_ns, cifs_sb->ctx->linux_gid));
666 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_OVERR_GID)
667 		seq_puts(s, ",forcegid");
668 	else
669 		seq_puts(s, ",noforcegid");
670 
671 	cifs_show_address(s, tcon->ses->server);
672 
673 	if (!tcon->unix_ext)
674 		seq_printf(s, ",file_mode=0%ho,dir_mode=0%ho",
675 					   cifs_sb->ctx->file_mode,
676 					   cifs_sb->ctx->dir_mode);
677 	if (cifs_sb->ctx->iocharset)
678 		seq_printf(s, ",iocharset=%s", cifs_sb->ctx->iocharset);
679 	if (tcon->ses->unicode == 0)
680 		seq_puts(s, ",nounicode");
681 	else if (tcon->ses->unicode == 1)
682 		seq_puts(s, ",unicode");
683 	if (tcon->seal)
684 		seq_puts(s, ",seal");
685 	else if (tcon->ses->server->ignore_signature)
686 		seq_puts(s, ",signloosely");
687 	if (tcon->nocase)
688 		seq_puts(s, ",nocase");
689 	if (tcon->nodelete)
690 		seq_puts(s, ",nodelete");
691 	if (cifs_sb->ctx->no_sparse)
692 		seq_puts(s, ",nosparse");
693 	if (tcon->local_lease)
694 		seq_puts(s, ",locallease");
695 	if (tcon->retry)
696 		seq_puts(s, ",hard");
697 	else
698 		seq_puts(s, ",soft");
699 	if (tcon->use_persistent)
700 		seq_puts(s, ",persistenthandles");
701 	else if (tcon->use_resilient)
702 		seq_puts(s, ",resilienthandles");
703 	if (tcon->posix_extensions)
704 		seq_puts(s, ",posix");
705 	else if (tcon->unix_ext)
706 		seq_puts(s, ",unix");
707 	else
708 		seq_puts(s, ",nounix");
709 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_DFS)
710 		seq_puts(s, ",nodfs");
711 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIX_PATHS)
712 		seq_puts(s, ",posixpaths");
713 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SET_UID)
714 		seq_puts(s, ",setuids");
715 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UID_FROM_ACL)
716 		seq_puts(s, ",idsfromsid");
717 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM)
718 		seq_puts(s, ",serverino");
719 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
720 		seq_puts(s, ",rwpidforward");
721 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL)
722 		seq_puts(s, ",forcemand");
723 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_XATTR)
724 		seq_puts(s, ",nouser_xattr");
725 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR)
726 		seq_puts(s, ",mapchars");
727 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SFM_CHR)
728 		seq_puts(s, ",mapposix");
729 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UNX_EMUL)
730 		seq_puts(s, ",sfu");
731 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_BRL)
732 		seq_puts(s, ",nobrl");
733 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_HANDLE_CACHE)
734 		seq_puts(s, ",nohandlecache");
735 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MODE_FROM_SID)
736 		seq_puts(s, ",modefromsid");
737 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_ACL)
738 		seq_puts(s, ",cifsacl");
739 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DYNPERM)
740 		seq_puts(s, ",dynperm");
741 	if (root->d_sb->s_flags & SB_POSIXACL)
742 		seq_puts(s, ",acl");
743 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MF_SYMLINKS)
744 		seq_puts(s, ",mfsymlinks");
745 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_FSCACHE)
746 		seq_puts(s, ",fsc");
747 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC)
748 		seq_puts(s, ",nostrictsync");
749 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_PERM)
750 		seq_puts(s, ",noperm");
751 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_BACKUPUID)
752 		seq_printf(s, ",backupuid=%u",
753 			   from_kuid_munged(&init_user_ns,
754 					    cifs_sb->ctx->backupuid));
755 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_BACKUPGID)
756 		seq_printf(s, ",backupgid=%u",
757 			   from_kgid_munged(&init_user_ns,
758 					    cifs_sb->ctx->backupgid));
759 	seq_show_option(s, "reparse",
760 			cifs_reparse_type_str(cifs_sb->ctx->reparse_type));
761 	if (cifs_sb->ctx->nonativesocket)
762 		seq_puts(s, ",nonativesocket");
763 	else
764 		seq_puts(s, ",nativesocket");
765 	seq_show_option(s, "symlink",
766 			cifs_symlink_type_str(cifs_symlink_type(cifs_sb)));
767 
768 	seq_printf(s, ",rsize=%u", cifs_sb->ctx->rsize);
769 	seq_printf(s, ",wsize=%u", cifs_sb->ctx->wsize);
770 	seq_printf(s, ",bsize=%u", cifs_sb->ctx->bsize);
771 	if (cifs_sb->ctx->rasize)
772 		seq_printf(s, ",rasize=%u", cifs_sb->ctx->rasize);
773 	if (tcon->ses->server->min_offload)
774 		seq_printf(s, ",esize=%u", tcon->ses->server->min_offload);
775 	if (tcon->ses->server->retrans)
776 		seq_printf(s, ",retrans=%u", tcon->ses->server->retrans);
777 	seq_printf(s, ",echo_interval=%lu",
778 			tcon->ses->server->echo_interval / HZ);
779 
780 	/* Only display the following if overridden on mount */
781 	if (tcon->ses->server->max_credits != SMB2_MAX_CREDITS_AVAILABLE)
782 		seq_printf(s, ",max_credits=%u", tcon->ses->server->max_credits);
783 	if (tcon->ses->server->tcp_nodelay)
784 		seq_puts(s, ",tcpnodelay");
785 	if (tcon->ses->server->noautotune)
786 		seq_puts(s, ",noautotune");
787 	if (tcon->ses->server->noblocksnd)
788 		seq_puts(s, ",noblocksend");
789 	if (tcon->ses->server->nosharesock)
790 		seq_puts(s, ",nosharesock");
791 
792 	if (tcon->snapshot_time)
793 		seq_printf(s, ",snapshot=%llu", tcon->snapshot_time);
794 	if (tcon->handle_timeout)
795 		seq_printf(s, ",handletimeout=%u", tcon->handle_timeout);
796 	if (tcon->max_cached_dirs != MAX_CACHED_FIDS)
797 		seq_printf(s, ",max_cached_dirs=%u", tcon->max_cached_dirs);
798 
799 	/*
800 	 * Display file and directory attribute timeout in seconds.
801 	 * If file and directory attribute timeout the same then actimeo
802 	 * was likely specified on mount
803 	 */
804 	if (cifs_sb->ctx->acdirmax == cifs_sb->ctx->acregmax)
805 		seq_printf(s, ",actimeo=%lu", cifs_sb->ctx->acregmax / HZ);
806 	else {
807 		seq_printf(s, ",acdirmax=%lu", cifs_sb->ctx->acdirmax / HZ);
808 		seq_printf(s, ",acregmax=%lu", cifs_sb->ctx->acregmax / HZ);
809 	}
810 	seq_printf(s, ",closetimeo=%lu", cifs_sb->ctx->closetimeo / HZ);
811 
812 	if (tcon->ses->chan_max > 1)
813 		seq_printf(s, ",multichannel,max_channels=%zu",
814 			   tcon->ses->chan_max);
815 
816 	if (tcon->use_witness)
817 		seq_puts(s, ",witness");
818 
819 	return 0;
820 }
821 
cifs_umount_begin(struct super_block * sb)822 static void cifs_umount_begin(struct super_block *sb)
823 {
824 	struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
825 	struct cifs_tcon *tcon;
826 
827 	if (cifs_sb == NULL)
828 		return;
829 
830 	tcon = cifs_sb_master_tcon(cifs_sb);
831 
832 	spin_lock(&cifs_tcp_ses_lock);
833 	spin_lock(&tcon->tc_lock);
834 	trace_smb3_tcon_ref(tcon->debug_id, tcon->tc_count,
835 			    netfs_trace_tcon_ref_see_umount);
836 	if ((tcon->tc_count > 1) || (tcon->status == TID_EXITING)) {
837 		/* we have other mounts to same share or we have
838 		   already tried to umount this and woken up
839 		   all waiting network requests, nothing to do */
840 		spin_unlock(&tcon->tc_lock);
841 		spin_unlock(&cifs_tcp_ses_lock);
842 		return;
843 	}
844 	/*
845 	 * can not set tcon->status to TID_EXITING yet since we don't know if umount -f will
846 	 * fail later (e.g. due to open files).  TID_EXITING will be set just before tdis req sent
847 	 */
848 	spin_unlock(&tcon->tc_lock);
849 	spin_unlock(&cifs_tcp_ses_lock);
850 
851 	cifs_close_all_deferred_files(tcon);
852 	/* cancel_brl_requests(tcon); */ /* BB mark all brl mids as exiting */
853 	/* cancel_notify_requests(tcon); */
854 	if (tcon->ses && tcon->ses->server) {
855 		cifs_dbg(FYI, "wake up tasks now - umount begin not complete\n");
856 		wake_up_all(&tcon->ses->server->request_q);
857 		wake_up_all(&tcon->ses->server->response_q);
858 		msleep(1); /* yield */
859 		/* we have to kick the requests once more */
860 		wake_up_all(&tcon->ses->server->response_q);
861 		msleep(1);
862 	}
863 
864 	return;
865 }
866 
cifs_freeze(struct super_block * sb)867 static int cifs_freeze(struct super_block *sb)
868 {
869 	struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
870 	struct cifs_tcon *tcon;
871 
872 	if (cifs_sb == NULL)
873 		return 0;
874 
875 	tcon = cifs_sb_master_tcon(cifs_sb);
876 
877 	cifs_close_all_deferred_files(tcon);
878 	return 0;
879 }
880 
881 #ifdef CONFIG_CIFS_STATS2
cifs_show_stats(struct seq_file * s,struct dentry * root)882 static int cifs_show_stats(struct seq_file *s, struct dentry *root)
883 {
884 	/* BB FIXME */
885 	return 0;
886 }
887 #endif
888 
cifs_write_inode(struct inode * inode,struct writeback_control * wbc)889 static int cifs_write_inode(struct inode *inode, struct writeback_control *wbc)
890 {
891 	return netfs_unpin_writeback(inode, wbc);
892 }
893 
cifs_drop_inode(struct inode * inode)894 static int cifs_drop_inode(struct inode *inode)
895 {
896 	struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
897 
898 	/* no serverino => unconditional eviction */
899 	return !(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) ||
900 		inode_generic_drop(inode);
901 }
902 
903 static const struct super_operations cifs_super_ops = {
904 	.statfs = cifs_statfs,
905 	.alloc_inode = cifs_alloc_inode,
906 	.write_inode	= cifs_write_inode,
907 	.free_inode = cifs_free_inode,
908 	.drop_inode	= cifs_drop_inode,
909 	.evict_inode	= cifs_evict_inode,
910 /*	.show_path	= cifs_show_path, */ /* Would we ever need show path? */
911 	.show_devname   = cifs_show_devname,
912 /*	.delete_inode	= cifs_delete_inode,  */  /* Do not need above
913 	function unless later we add lazy close of inodes or unless the
914 	kernel forgets to call us with the same number of releases (closes)
915 	as opens */
916 	.show_options = cifs_show_options,
917 	.umount_begin   = cifs_umount_begin,
918 	.freeze_fs      = cifs_freeze,
919 #ifdef CONFIG_CIFS_STATS2
920 	.show_stats = cifs_show_stats,
921 #endif
922 };
923 
924 /*
925  * Get root dentry from superblock according to prefix path mount option.
926  * Return dentry with refcount + 1 on success and NULL otherwise.
927  */
928 static struct dentry *
cifs_get_root(struct smb3_fs_context * ctx,struct super_block * sb)929 cifs_get_root(struct smb3_fs_context *ctx, struct super_block *sb)
930 {
931 	struct dentry *dentry;
932 	struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
933 	char *full_path = NULL;
934 	char *s, *p;
935 	char sep;
936 
937 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_USE_PREFIX_PATH)
938 		return dget(sb->s_root);
939 
940 	full_path = cifs_build_path_to_root(ctx, cifs_sb,
941 				cifs_sb_master_tcon(cifs_sb), 0);
942 	if (full_path == NULL)
943 		return ERR_PTR(-ENOMEM);
944 
945 	cifs_dbg(FYI, "Get root dentry for %s\n", full_path);
946 
947 	sep = CIFS_DIR_SEP(cifs_sb);
948 	dentry = dget(sb->s_root);
949 	s = full_path;
950 
951 	do {
952 		struct inode *dir = d_inode(dentry);
953 		struct dentry *child;
954 
955 		if (!S_ISDIR(dir->i_mode)) {
956 			dput(dentry);
957 			dentry = ERR_PTR(-ENOTDIR);
958 			break;
959 		}
960 
961 		/* skip separators */
962 		while (*s == sep)
963 			s++;
964 		if (!*s)
965 			break;
966 		p = s++;
967 		/* next separator */
968 		while (*s && *s != sep)
969 			s++;
970 
971 		child = lookup_noperm_positive_unlocked(&QSTR_LEN(p, s - p),
972 							dentry);
973 		dput(dentry);
974 		dentry = child;
975 	} while (!IS_ERR(dentry));
976 	kfree(full_path);
977 	return dentry;
978 }
979 
cifs_set_super(struct super_block * sb,void * data)980 static int cifs_set_super(struct super_block *sb, void *data)
981 {
982 	struct cifs_mnt_data *mnt_data = data;
983 	sb->s_fs_info = mnt_data->cifs_sb;
984 	return set_anon_super(sb, NULL);
985 }
986 
987 struct dentry *
cifs_smb3_do_mount(struct file_system_type * fs_type,int flags,struct smb3_fs_context * old_ctx)988 cifs_smb3_do_mount(struct file_system_type *fs_type,
989 	      int flags, struct smb3_fs_context *old_ctx)
990 {
991 	struct cifs_mnt_data mnt_data;
992 	struct cifs_sb_info *cifs_sb;
993 	struct super_block *sb;
994 	struct dentry *root;
995 	int rc;
996 
997 	if (cifsFYI) {
998 		cifs_dbg(FYI, "%s: devname=%s flags=0x%x\n", __func__,
999 			 old_ctx->source, flags);
1000 	} else {
1001 		cifs_info("Attempting to mount %s\n", old_ctx->source);
1002 	}
1003 
1004 	cifs_sb = kzalloc(sizeof(*cifs_sb), GFP_KERNEL);
1005 	if (!cifs_sb)
1006 		return ERR_PTR(-ENOMEM);
1007 
1008 	cifs_sb->ctx = kzalloc(sizeof(struct smb3_fs_context), GFP_KERNEL);
1009 	if (!cifs_sb->ctx) {
1010 		root = ERR_PTR(-ENOMEM);
1011 		goto out;
1012 	}
1013 	rc = smb3_fs_context_dup(cifs_sb->ctx, old_ctx);
1014 	if (rc) {
1015 		root = ERR_PTR(rc);
1016 		goto out;
1017 	}
1018 
1019 	rc = cifs_setup_cifs_sb(cifs_sb);
1020 	if (rc) {
1021 		root = ERR_PTR(rc);
1022 		goto out;
1023 	}
1024 
1025 	rc = cifs_mount(cifs_sb, cifs_sb->ctx);
1026 	if (rc) {
1027 		if (!(flags & SB_SILENT))
1028 			cifs_dbg(VFS, "cifs_mount failed w/return code = %d\n",
1029 				 rc);
1030 		root = ERR_PTR(rc);
1031 		goto out;
1032 	}
1033 
1034 	mnt_data.ctx = cifs_sb->ctx;
1035 	mnt_data.cifs_sb = cifs_sb;
1036 	mnt_data.flags = flags;
1037 
1038 	/* BB should we make this contingent on mount parm? */
1039 	flags |= SB_NODIRATIME | SB_NOATIME;
1040 
1041 	sb = sget(fs_type, cifs_match_super, cifs_set_super, flags, &mnt_data);
1042 	if (IS_ERR(sb)) {
1043 		cifs_umount(cifs_sb);
1044 		return ERR_CAST(sb);
1045 	}
1046 
1047 	if (sb->s_root) {
1048 		cifs_dbg(FYI, "Use existing superblock\n");
1049 		cifs_umount(cifs_sb);
1050 		cifs_sb = NULL;
1051 	} else {
1052 		rc = cifs_read_super(sb);
1053 		if (rc) {
1054 			root = ERR_PTR(rc);
1055 			goto out_super;
1056 		}
1057 
1058 		sb->s_flags |= SB_ACTIVE;
1059 	}
1060 
1061 	root = cifs_get_root(cifs_sb ? cifs_sb->ctx : old_ctx, sb);
1062 	if (IS_ERR(root))
1063 		goto out_super;
1064 
1065 	if (cifs_sb)
1066 		cifs_sb->root = dget(root);
1067 
1068 	cifs_dbg(FYI, "dentry root is: %p\n", root);
1069 	return root;
1070 
1071 out_super:
1072 	deactivate_locked_super(sb);
1073 	return root;
1074 out:
1075 	kfree(cifs_sb->prepath);
1076 	smb3_cleanup_fs_context(cifs_sb->ctx);
1077 	kfree(cifs_sb);
1078 	return root;
1079 }
1080 
cifs_llseek(struct file * file,loff_t offset,int whence)1081 static loff_t cifs_llseek(struct file *file, loff_t offset, int whence)
1082 {
1083 	struct cifsFileInfo *cfile = file->private_data;
1084 	struct cifs_tcon *tcon;
1085 
1086 	/*
1087 	 * whence == SEEK_END || SEEK_DATA || SEEK_HOLE => we must revalidate
1088 	 * the cached file length
1089 	 */
1090 	if (whence != SEEK_SET && whence != SEEK_CUR) {
1091 		int rc;
1092 		struct inode *inode = file_inode(file);
1093 
1094 		/*
1095 		 * We need to be sure that all dirty pages are written and the
1096 		 * server has the newest file length.
1097 		 */
1098 		if (!CIFS_CACHE_READ(CIFS_I(inode)) && inode->i_mapping &&
1099 		    inode->i_mapping->nrpages != 0) {
1100 			rc = filemap_fdatawait(inode->i_mapping);
1101 			if (rc) {
1102 				mapping_set_error(inode->i_mapping, rc);
1103 				return rc;
1104 			}
1105 		}
1106 		/*
1107 		 * Some applications poll for the file length in this strange
1108 		 * way so we must seek to end on non-oplocked files by
1109 		 * setting the revalidate time to zero.
1110 		 */
1111 		CIFS_I(inode)->time = 0;
1112 
1113 		rc = cifs_revalidate_file_attr(file);
1114 		if (rc < 0)
1115 			return (loff_t)rc;
1116 	}
1117 	if (cfile && cfile->tlink) {
1118 		tcon = tlink_tcon(cfile->tlink);
1119 		if (tcon->ses->server->ops->llseek)
1120 			return tcon->ses->server->ops->llseek(file, tcon,
1121 							      offset, whence);
1122 	}
1123 	return generic_file_llseek(file, offset, whence);
1124 }
1125 
1126 static int
cifs_setlease(struct file * file,int arg,struct file_lease ** lease,void ** priv)1127 cifs_setlease(struct file *file, int arg, struct file_lease **lease, void **priv)
1128 {
1129 	/*
1130 	 * Note that this is called by vfs setlease with i_lock held to
1131 	 * protect *lease from going away.
1132 	 */
1133 	struct inode *inode = file_inode(file);
1134 	struct cifsFileInfo *cfile = file->private_data;
1135 
1136 	/* Check if file is oplocked if this is request for new lease */
1137 	if (arg == F_UNLCK ||
1138 	    ((arg == F_RDLCK) && CIFS_CACHE_READ(CIFS_I(inode))) ||
1139 	    ((arg == F_WRLCK) && CIFS_CACHE_WRITE(CIFS_I(inode))))
1140 		return generic_setlease(file, arg, lease, priv);
1141 	else if (tlink_tcon(cfile->tlink)->local_lease &&
1142 		 !CIFS_CACHE_READ(CIFS_I(inode)))
1143 		/*
1144 		 * If the server claims to support oplock on this file, then we
1145 		 * still need to check oplock even if the local_lease mount
1146 		 * option is set, but there are servers which do not support
1147 		 * oplock for which this mount option may be useful if the user
1148 		 * knows that the file won't be changed on the server by anyone
1149 		 * else.
1150 		 */
1151 		return generic_setlease(file, arg, lease, priv);
1152 	else
1153 		return -EAGAIN;
1154 }
1155 
1156 struct file_system_type cifs_fs_type = {
1157 	.owner = THIS_MODULE,
1158 	.name = "cifs",
1159 	.init_fs_context = smb3_init_fs_context,
1160 	.parameters = smb3_fs_parameters,
1161 	.kill_sb = cifs_kill_sb,
1162 	.fs_flags = FS_RENAME_DOES_D_MOVE,
1163 };
1164 MODULE_ALIAS_FS("cifs");
1165 
1166 struct file_system_type smb3_fs_type = {
1167 	.owner = THIS_MODULE,
1168 	.name = "smb3",
1169 	.init_fs_context = smb3_init_fs_context,
1170 	.parameters = smb3_fs_parameters,
1171 	.kill_sb = cifs_kill_sb,
1172 	.fs_flags = FS_RENAME_DOES_D_MOVE,
1173 };
1174 MODULE_ALIAS_FS("smb3");
1175 MODULE_ALIAS("smb3");
1176 
1177 const struct inode_operations cifs_dir_inode_ops = {
1178 	.create = cifs_create,
1179 	.atomic_open = cifs_atomic_open,
1180 	.lookup = cifs_lookup,
1181 	.getattr = cifs_getattr,
1182 	.unlink = cifs_unlink,
1183 	.link = cifs_hardlink,
1184 	.mkdir = cifs_mkdir,
1185 	.rmdir = cifs_rmdir,
1186 	.rename = cifs_rename2,
1187 	.permission = cifs_permission,
1188 	.setattr = cifs_setattr,
1189 	.symlink = cifs_symlink,
1190 	.mknod   = cifs_mknod,
1191 	.listxattr = cifs_listxattr,
1192 	.get_acl = cifs_get_acl,
1193 	.set_acl = cifs_set_acl,
1194 };
1195 
1196 const struct inode_operations cifs_file_inode_ops = {
1197 	.setattr = cifs_setattr,
1198 	.getattr = cifs_getattr,
1199 	.permission = cifs_permission,
1200 	.listxattr = cifs_listxattr,
1201 	.fiemap = cifs_fiemap,
1202 	.get_acl = cifs_get_acl,
1203 	.set_acl = cifs_set_acl,
1204 };
1205 
cifs_get_link(struct dentry * dentry,struct inode * inode,struct delayed_call * done)1206 const char *cifs_get_link(struct dentry *dentry, struct inode *inode,
1207 			    struct delayed_call *done)
1208 {
1209 	char *target_path;
1210 
1211 	if (!dentry)
1212 		return ERR_PTR(-ECHILD);
1213 
1214 	target_path = kmalloc(PATH_MAX, GFP_KERNEL);
1215 	if (!target_path)
1216 		return ERR_PTR(-ENOMEM);
1217 
1218 	spin_lock(&inode->i_lock);
1219 	if (likely(CIFS_I(inode)->symlink_target)) {
1220 		strscpy(target_path, CIFS_I(inode)->symlink_target, PATH_MAX);
1221 	} else {
1222 		kfree(target_path);
1223 		target_path = ERR_PTR(-EOPNOTSUPP);
1224 	}
1225 	spin_unlock(&inode->i_lock);
1226 
1227 	if (!IS_ERR(target_path))
1228 		set_delayed_call(done, kfree_link, target_path);
1229 
1230 	return target_path;
1231 }
1232 
1233 const struct inode_operations cifs_symlink_inode_ops = {
1234 	.get_link = cifs_get_link,
1235 	.setattr = cifs_setattr,
1236 	.permission = cifs_permission,
1237 	.listxattr = cifs_listxattr,
1238 };
1239 
1240 /*
1241  * Advance the EOF marker to after the source range.
1242  */
cifs_precopy_set_eof(struct inode * src_inode,struct cifsInodeInfo * src_cifsi,struct cifs_tcon * src_tcon,unsigned int xid,loff_t src_end)1243 static int cifs_precopy_set_eof(struct inode *src_inode, struct cifsInodeInfo *src_cifsi,
1244 				struct cifs_tcon *src_tcon,
1245 				unsigned int xid, loff_t src_end)
1246 {
1247 	struct cifsFileInfo *writeable_srcfile;
1248 	int rc = -EINVAL;
1249 
1250 	writeable_srcfile = find_writable_file(src_cifsi, FIND_WR_FSUID_ONLY);
1251 	if (writeable_srcfile) {
1252 		if (src_tcon->ses->server->ops->set_file_size)
1253 			rc = src_tcon->ses->server->ops->set_file_size(
1254 				xid, src_tcon, writeable_srcfile,
1255 				src_inode->i_size, true /* no need to set sparse */);
1256 		else
1257 			rc = -ENOSYS;
1258 		cifsFileInfo_put(writeable_srcfile);
1259 		cifs_dbg(FYI, "SetFSize for copychunk rc = %d\n", rc);
1260 	}
1261 
1262 	if (rc < 0)
1263 		goto set_failed;
1264 
1265 	netfs_resize_file(&src_cifsi->netfs, src_end, true);
1266 	fscache_resize_cookie(cifs_inode_cookie(src_inode), src_end);
1267 	return 0;
1268 
1269 set_failed:
1270 	return filemap_write_and_wait(src_inode->i_mapping);
1271 }
1272 
1273 /*
1274  * Flush out either the folio that overlaps the beginning of a range in which
1275  * pos resides or the folio that overlaps the end of a range unless that folio
1276  * is entirely within the range we're going to invalidate.  We extend the flush
1277  * bounds to encompass the folio.
1278  */
cifs_flush_folio(struct inode * inode,loff_t pos,loff_t * _fstart,loff_t * _fend,bool first)1279 static int cifs_flush_folio(struct inode *inode, loff_t pos, loff_t *_fstart, loff_t *_fend,
1280 			    bool first)
1281 {
1282 	struct folio *folio;
1283 	unsigned long long fpos, fend;
1284 	pgoff_t index = pos / PAGE_SIZE;
1285 	size_t size;
1286 	int rc = 0;
1287 
1288 	folio = filemap_get_folio(inode->i_mapping, index);
1289 	if (IS_ERR(folio))
1290 		return 0;
1291 
1292 	size = folio_size(folio);
1293 	fpos = folio_pos(folio);
1294 	fend = fpos + size - 1;
1295 	*_fstart = min_t(unsigned long long, *_fstart, fpos);
1296 	*_fend   = max_t(unsigned long long, *_fend, fend);
1297 	if ((first && pos == fpos) || (!first && pos == fend))
1298 		goto out;
1299 
1300 	rc = filemap_write_and_wait_range(inode->i_mapping, fpos, fend);
1301 out:
1302 	folio_put(folio);
1303 	return rc;
1304 }
1305 
cifs_remap_file_range(struct file * src_file,loff_t off,struct file * dst_file,loff_t destoff,loff_t len,unsigned int remap_flags)1306 static loff_t cifs_remap_file_range(struct file *src_file, loff_t off,
1307 		struct file *dst_file, loff_t destoff, loff_t len,
1308 		unsigned int remap_flags)
1309 {
1310 	struct inode *src_inode = file_inode(src_file);
1311 	struct inode *target_inode = file_inode(dst_file);
1312 	struct cifsInodeInfo *src_cifsi = CIFS_I(src_inode);
1313 	struct cifsInodeInfo *target_cifsi = CIFS_I(target_inode);
1314 	struct cifsFileInfo *smb_file_src = src_file->private_data;
1315 	struct cifsFileInfo *smb_file_target = dst_file->private_data;
1316 	struct cifs_tcon *target_tcon, *src_tcon;
1317 	unsigned long long destend, fstart, fend, old_size, new_size;
1318 	unsigned int xid;
1319 	int rc;
1320 
1321 	if (remap_flags & REMAP_FILE_DEDUP)
1322 		return -EOPNOTSUPP;
1323 	if (remap_flags & ~REMAP_FILE_ADVISORY)
1324 		return -EINVAL;
1325 
1326 	cifs_dbg(FYI, "clone range\n");
1327 
1328 	xid = get_xid();
1329 
1330 	if (!smb_file_src || !smb_file_target) {
1331 		rc = -EBADF;
1332 		cifs_dbg(VFS, "missing cifsFileInfo on copy range src file\n");
1333 		goto out;
1334 	}
1335 
1336 	src_tcon = tlink_tcon(smb_file_src->tlink);
1337 	target_tcon = tlink_tcon(smb_file_target->tlink);
1338 
1339 	/*
1340 	 * Note: cifs case is easier than btrfs since server responsible for
1341 	 * checks for proper open modes and file type and if it wants
1342 	 * server could even support copy of range where source = target
1343 	 */
1344 	lock_two_nondirectories(target_inode, src_inode);
1345 
1346 	if (len == 0)
1347 		len = src_inode->i_size - off;
1348 
1349 	cifs_dbg(FYI, "clone range\n");
1350 
1351 	/* Flush the source buffer */
1352 	rc = filemap_write_and_wait_range(src_inode->i_mapping, off,
1353 					  off + len - 1);
1354 	if (rc)
1355 		goto unlock;
1356 
1357 	/* The server-side copy will fail if the source crosses the EOF marker.
1358 	 * Advance the EOF marker after the flush above to the end of the range
1359 	 * if it's short of that.
1360 	 */
1361 	if (src_cifsi->netfs.remote_i_size < off + len) {
1362 		rc = cifs_precopy_set_eof(src_inode, src_cifsi, src_tcon, xid, off + len);
1363 		if (rc < 0)
1364 			goto unlock;
1365 	}
1366 
1367 	new_size = destoff + len;
1368 	destend = destoff + len - 1;
1369 
1370 	/* Flush the folios at either end of the destination range to prevent
1371 	 * accidental loss of dirty data outside of the range.
1372 	 */
1373 	fstart = destoff;
1374 	fend = destend;
1375 
1376 	rc = cifs_flush_folio(target_inode, destoff, &fstart, &fend, true);
1377 	if (rc)
1378 		goto unlock;
1379 	rc = cifs_flush_folio(target_inode, destend, &fstart, &fend, false);
1380 	if (rc)
1381 		goto unlock;
1382 	if (fend > target_cifsi->netfs.zero_point)
1383 		target_cifsi->netfs.zero_point = fend + 1;
1384 	old_size = target_cifsi->netfs.remote_i_size;
1385 
1386 	/* Discard all the folios that overlap the destination region. */
1387 	cifs_dbg(FYI, "about to discard pages %llx-%llx\n", fstart, fend);
1388 	truncate_inode_pages_range(&target_inode->i_data, fstart, fend);
1389 
1390 	fscache_invalidate(cifs_inode_cookie(target_inode), NULL,
1391 			   i_size_read(target_inode), 0);
1392 
1393 	rc = -EOPNOTSUPP;
1394 	if (target_tcon->ses->server->ops->duplicate_extents) {
1395 		rc = target_tcon->ses->server->ops->duplicate_extents(xid,
1396 			smb_file_src, smb_file_target, off, len, destoff);
1397 		if (rc == 0 && new_size > old_size) {
1398 			truncate_setsize(target_inode, new_size);
1399 			fscache_resize_cookie(cifs_inode_cookie(target_inode),
1400 					      new_size);
1401 		} else if (rc == -EOPNOTSUPP) {
1402 			/*
1403 			 * copy_file_range syscall man page indicates EINVAL
1404 			 * is returned e.g when "fd_in and fd_out refer to the
1405 			 * same file and the source and target ranges overlap."
1406 			 * Test generic/157 was what showed these cases where
1407 			 * we need to remap EOPNOTSUPP to EINVAL
1408 			 */
1409 			if (off >= src_inode->i_size) {
1410 				rc = -EINVAL;
1411 			} else if (src_inode == target_inode) {
1412 				if (off + len > destoff)
1413 					rc = -EINVAL;
1414 			}
1415 		}
1416 		if (rc == 0 && new_size > target_cifsi->netfs.zero_point)
1417 			target_cifsi->netfs.zero_point = new_size;
1418 	}
1419 
1420 	/* force revalidate of size and timestamps of target file now
1421 	   that target is updated on the server */
1422 	CIFS_I(target_inode)->time = 0;
1423 unlock:
1424 	/* although unlocking in the reverse order from locking is not
1425 	   strictly necessary here it is a little cleaner to be consistent */
1426 	unlock_two_nondirectories(src_inode, target_inode);
1427 out:
1428 	free_xid(xid);
1429 	return rc < 0 ? rc : len;
1430 }
1431 
cifs_file_copychunk_range(unsigned int xid,struct file * src_file,loff_t off,struct file * dst_file,loff_t destoff,size_t len,unsigned int flags)1432 ssize_t cifs_file_copychunk_range(unsigned int xid,
1433 				struct file *src_file, loff_t off,
1434 				struct file *dst_file, loff_t destoff,
1435 				size_t len, unsigned int flags)
1436 {
1437 	struct inode *src_inode = file_inode(src_file);
1438 	struct inode *target_inode = file_inode(dst_file);
1439 	struct cifsInodeInfo *src_cifsi = CIFS_I(src_inode);
1440 	struct cifsInodeInfo *target_cifsi = CIFS_I(target_inode);
1441 	struct cifsFileInfo *smb_file_src;
1442 	struct cifsFileInfo *smb_file_target;
1443 	struct cifs_tcon *src_tcon;
1444 	struct cifs_tcon *target_tcon;
1445 	ssize_t rc;
1446 
1447 	cifs_dbg(FYI, "copychunk range\n");
1448 
1449 	if (!src_file->private_data || !dst_file->private_data) {
1450 		rc = -EBADF;
1451 		cifs_dbg(VFS, "missing cifsFileInfo on copy range src file\n");
1452 		goto out;
1453 	}
1454 
1455 	rc = -EXDEV;
1456 	smb_file_target = dst_file->private_data;
1457 	smb_file_src = src_file->private_data;
1458 	src_tcon = tlink_tcon(smb_file_src->tlink);
1459 	target_tcon = tlink_tcon(smb_file_target->tlink);
1460 
1461 	if (src_tcon->ses != target_tcon->ses) {
1462 		cifs_dbg(FYI, "source and target of copy not on same server\n");
1463 		goto out;
1464 	}
1465 
1466 	rc = -EOPNOTSUPP;
1467 	if (!target_tcon->ses->server->ops->copychunk_range)
1468 		goto out;
1469 
1470 	/*
1471 	 * Note: cifs case is easier than btrfs since server responsible for
1472 	 * checks for proper open modes and file type and if it wants
1473 	 * server could even support copy of range where source = target
1474 	 */
1475 	lock_two_nondirectories(target_inode, src_inode);
1476 
1477 	cifs_dbg(FYI, "about to flush pages\n");
1478 
1479 	rc = filemap_write_and_wait_range(src_inode->i_mapping, off,
1480 					  off + len - 1);
1481 	if (rc)
1482 		goto unlock;
1483 
1484 	/* The server-side copy will fail if the source crosses the EOF marker.
1485 	 * Advance the EOF marker after the flush above to the end of the range
1486 	 * if it's short of that.
1487 	 */
1488 	if (src_cifsi->netfs.remote_i_size < off + len) {
1489 		rc = cifs_precopy_set_eof(src_inode, src_cifsi, src_tcon, xid, off + len);
1490 		if (rc < 0)
1491 			goto unlock;
1492 	}
1493 
1494 	/* Flush and invalidate all the folios in the destination region.  If
1495 	 * the copy was successful, then some of the flush is extra overhead,
1496 	 * but we need to allow for the copy failing in some way (eg. ENOSPC).
1497 	 */
1498 	rc = filemap_invalidate_inode(target_inode, true, destoff, destoff + len - 1);
1499 	if (rc)
1500 		goto unlock;
1501 
1502 	fscache_invalidate(cifs_inode_cookie(target_inode), NULL,
1503 			   i_size_read(target_inode), 0);
1504 
1505 	rc = file_modified(dst_file);
1506 	if (!rc) {
1507 		rc = target_tcon->ses->server->ops->copychunk_range(xid,
1508 			smb_file_src, smb_file_target, off, len, destoff);
1509 		if (rc > 0 && destoff + rc > i_size_read(target_inode)) {
1510 			truncate_setsize(target_inode, destoff + rc);
1511 			netfs_resize_file(&target_cifsi->netfs,
1512 					  i_size_read(target_inode), true);
1513 			fscache_resize_cookie(cifs_inode_cookie(target_inode),
1514 					      i_size_read(target_inode));
1515 		}
1516 		if (rc > 0 && destoff + rc > target_cifsi->netfs.zero_point)
1517 			target_cifsi->netfs.zero_point = destoff + rc;
1518 	}
1519 
1520 	file_accessed(src_file);
1521 
1522 	/* force revalidate of size and timestamps of target file now
1523 	 * that target is updated on the server
1524 	 */
1525 	CIFS_I(target_inode)->time = 0;
1526 
1527 unlock:
1528 	/* although unlocking in the reverse order from locking is not
1529 	 * strictly necessary here it is a little cleaner to be consistent
1530 	 */
1531 	unlock_two_nondirectories(src_inode, target_inode);
1532 
1533 out:
1534 	return rc;
1535 }
1536 
1537 /*
1538  * Directory operations under CIFS/SMB2/SMB3 are synchronous, so fsync()
1539  * is a dummy operation.
1540  */
cifs_dir_fsync(struct file * file,loff_t start,loff_t end,int datasync)1541 static int cifs_dir_fsync(struct file *file, loff_t start, loff_t end, int datasync)
1542 {
1543 	cifs_dbg(FYI, "Sync directory - name: %pD datasync: 0x%x\n",
1544 		 file, datasync);
1545 
1546 	return 0;
1547 }
1548 
cifs_copy_file_range(struct file * src_file,loff_t off,struct file * dst_file,loff_t destoff,size_t len,unsigned int flags)1549 static ssize_t cifs_copy_file_range(struct file *src_file, loff_t off,
1550 				struct file *dst_file, loff_t destoff,
1551 				size_t len, unsigned int flags)
1552 {
1553 	unsigned int xid = get_xid();
1554 	ssize_t rc;
1555 	struct cifsFileInfo *cfile = dst_file->private_data;
1556 
1557 	if (cfile->swapfile) {
1558 		rc = -EOPNOTSUPP;
1559 		free_xid(xid);
1560 		return rc;
1561 	}
1562 
1563 	rc = cifs_file_copychunk_range(xid, src_file, off, dst_file, destoff,
1564 					len, flags);
1565 	free_xid(xid);
1566 
1567 	if (rc == -EOPNOTSUPP || rc == -EXDEV)
1568 		rc = splice_copy_file_range(src_file, off, dst_file,
1569 					    destoff, len);
1570 	return rc;
1571 }
1572 
1573 const struct file_operations cifs_file_ops = {
1574 	.read_iter = cifs_loose_read_iter,
1575 	.write_iter = cifs_file_write_iter,
1576 	.open = cifs_open,
1577 	.release = cifs_close,
1578 	.lock = cifs_lock,
1579 	.flock = cifs_flock,
1580 	.fsync = cifs_fsync,
1581 	.flush = cifs_flush,
1582 	.mmap_prepare = cifs_file_mmap_prepare,
1583 	.splice_read = filemap_splice_read,
1584 	.splice_write = iter_file_splice_write,
1585 	.llseek = cifs_llseek,
1586 	.unlocked_ioctl	= cifs_ioctl,
1587 	.copy_file_range = cifs_copy_file_range,
1588 	.remap_file_range = cifs_remap_file_range,
1589 	.setlease = cifs_setlease,
1590 	.fallocate = cifs_fallocate,
1591 };
1592 
1593 const struct file_operations cifs_file_strict_ops = {
1594 	.read_iter = cifs_strict_readv,
1595 	.write_iter = cifs_strict_writev,
1596 	.open = cifs_open,
1597 	.release = cifs_close,
1598 	.lock = cifs_lock,
1599 	.flock = cifs_flock,
1600 	.fsync = cifs_strict_fsync,
1601 	.flush = cifs_flush,
1602 	.mmap_prepare = cifs_file_strict_mmap_prepare,
1603 	.splice_read = filemap_splice_read,
1604 	.splice_write = iter_file_splice_write,
1605 	.llseek = cifs_llseek,
1606 	.unlocked_ioctl	= cifs_ioctl,
1607 	.copy_file_range = cifs_copy_file_range,
1608 	.remap_file_range = cifs_remap_file_range,
1609 	.setlease = cifs_setlease,
1610 	.fallocate = cifs_fallocate,
1611 };
1612 
1613 const struct file_operations cifs_file_direct_ops = {
1614 	.read_iter = netfs_unbuffered_read_iter,
1615 	.write_iter = netfs_file_write_iter,
1616 	.open = cifs_open,
1617 	.release = cifs_close,
1618 	.lock = cifs_lock,
1619 	.flock = cifs_flock,
1620 	.fsync = cifs_fsync,
1621 	.flush = cifs_flush,
1622 	.mmap_prepare = cifs_file_mmap_prepare,
1623 	.splice_read = copy_splice_read,
1624 	.splice_write = iter_file_splice_write,
1625 	.unlocked_ioctl  = cifs_ioctl,
1626 	.copy_file_range = cifs_copy_file_range,
1627 	.remap_file_range = cifs_remap_file_range,
1628 	.llseek = cifs_llseek,
1629 	.setlease = cifs_setlease,
1630 	.fallocate = cifs_fallocate,
1631 };
1632 
1633 const struct file_operations cifs_file_nobrl_ops = {
1634 	.read_iter = cifs_loose_read_iter,
1635 	.write_iter = cifs_file_write_iter,
1636 	.open = cifs_open,
1637 	.release = cifs_close,
1638 	.fsync = cifs_fsync,
1639 	.flush = cifs_flush,
1640 	.mmap_prepare = cifs_file_mmap_prepare,
1641 	.splice_read = filemap_splice_read,
1642 	.splice_write = iter_file_splice_write,
1643 	.llseek = cifs_llseek,
1644 	.unlocked_ioctl	= cifs_ioctl,
1645 	.copy_file_range = cifs_copy_file_range,
1646 	.remap_file_range = cifs_remap_file_range,
1647 	.setlease = cifs_setlease,
1648 	.fallocate = cifs_fallocate,
1649 };
1650 
1651 const struct file_operations cifs_file_strict_nobrl_ops = {
1652 	.read_iter = cifs_strict_readv,
1653 	.write_iter = cifs_strict_writev,
1654 	.open = cifs_open,
1655 	.release = cifs_close,
1656 	.fsync = cifs_strict_fsync,
1657 	.flush = cifs_flush,
1658 	.mmap_prepare = cifs_file_strict_mmap_prepare,
1659 	.splice_read = filemap_splice_read,
1660 	.splice_write = iter_file_splice_write,
1661 	.llseek = cifs_llseek,
1662 	.unlocked_ioctl	= cifs_ioctl,
1663 	.copy_file_range = cifs_copy_file_range,
1664 	.remap_file_range = cifs_remap_file_range,
1665 	.setlease = cifs_setlease,
1666 	.fallocate = cifs_fallocate,
1667 };
1668 
1669 const struct file_operations cifs_file_direct_nobrl_ops = {
1670 	.read_iter = netfs_unbuffered_read_iter,
1671 	.write_iter = netfs_file_write_iter,
1672 	.open = cifs_open,
1673 	.release = cifs_close,
1674 	.fsync = cifs_fsync,
1675 	.flush = cifs_flush,
1676 	.mmap_prepare = cifs_file_mmap_prepare,
1677 	.splice_read = copy_splice_read,
1678 	.splice_write = iter_file_splice_write,
1679 	.unlocked_ioctl  = cifs_ioctl,
1680 	.copy_file_range = cifs_copy_file_range,
1681 	.remap_file_range = cifs_remap_file_range,
1682 	.llseek = cifs_llseek,
1683 	.setlease = cifs_setlease,
1684 	.fallocate = cifs_fallocate,
1685 };
1686 
1687 const struct file_operations cifs_dir_ops = {
1688 	.iterate_shared = cifs_readdir,
1689 	.release = cifs_closedir,
1690 	.read    = generic_read_dir,
1691 	.unlocked_ioctl  = cifs_ioctl,
1692 	.copy_file_range = cifs_copy_file_range,
1693 	.remap_file_range = cifs_remap_file_range,
1694 	.llseek = generic_file_llseek,
1695 	.fsync = cifs_dir_fsync,
1696 };
1697 
1698 static void
cifs_init_once(void * inode)1699 cifs_init_once(void *inode)
1700 {
1701 	struct cifsInodeInfo *cifsi = inode;
1702 
1703 	inode_init_once(&cifsi->netfs.inode);
1704 	init_rwsem(&cifsi->lock_sem);
1705 }
1706 
1707 static int __init
cifs_init_inodecache(void)1708 cifs_init_inodecache(void)
1709 {
1710 	cifs_inode_cachep = kmem_cache_create("cifs_inode_cache",
1711 					      sizeof(struct cifsInodeInfo),
1712 					      0, (SLAB_RECLAIM_ACCOUNT|
1713 						SLAB_ACCOUNT),
1714 					      cifs_init_once);
1715 	if (cifs_inode_cachep == NULL)
1716 		return -ENOMEM;
1717 
1718 	return 0;
1719 }
1720 
1721 static void
cifs_destroy_inodecache(void)1722 cifs_destroy_inodecache(void)
1723 {
1724 	/*
1725 	 * Make sure all delayed rcu free inodes are flushed before we
1726 	 * destroy cache.
1727 	 */
1728 	rcu_barrier();
1729 	kmem_cache_destroy(cifs_inode_cachep);
1730 }
1731 
1732 static int
cifs_init_request_bufs(void)1733 cifs_init_request_bufs(void)
1734 {
1735 	/*
1736 	 * SMB2 maximum header size is bigger than CIFS one - no problems to
1737 	 * allocate some more bytes for CIFS.
1738 	 */
1739 	size_t max_hdr_size = MAX_SMB2_HDR_SIZE;
1740 
1741 	if (CIFSMaxBufSize < 8192) {
1742 	/* Buffer size can not be smaller than 2 * PATH_MAX since maximum
1743 	Unicode path name has to fit in any SMB/CIFS path based frames */
1744 		CIFSMaxBufSize = 8192;
1745 	} else if (CIFSMaxBufSize > 1024*127) {
1746 		CIFSMaxBufSize = 1024 * 127;
1747 	} else {
1748 		CIFSMaxBufSize &= 0x1FE00; /* Round size to even 512 byte mult*/
1749 	}
1750 /*
1751 	cifs_dbg(VFS, "CIFSMaxBufSize %d 0x%x\n",
1752 		 CIFSMaxBufSize, CIFSMaxBufSize);
1753 */
1754 	cifs_req_cachep = kmem_cache_create_usercopy("cifs_request",
1755 					    CIFSMaxBufSize + max_hdr_size, 0,
1756 					    SLAB_HWCACHE_ALIGN, 0,
1757 					    CIFSMaxBufSize + max_hdr_size,
1758 					    NULL);
1759 	if (cifs_req_cachep == NULL)
1760 		return -ENOMEM;
1761 
1762 	if (cifs_min_rcv < 1)
1763 		cifs_min_rcv = 1;
1764 	else if (cifs_min_rcv > 64) {
1765 		cifs_min_rcv = 64;
1766 		cifs_dbg(VFS, "cifs_min_rcv set to maximum (64)\n");
1767 	}
1768 
1769 	cifs_req_poolp = mempool_create_slab_pool(cifs_min_rcv,
1770 						  cifs_req_cachep);
1771 
1772 	if (cifs_req_poolp == NULL) {
1773 		kmem_cache_destroy(cifs_req_cachep);
1774 		return -ENOMEM;
1775 	}
1776 	/* MAX_CIFS_SMALL_BUFFER_SIZE bytes is enough for most SMB responses and
1777 	almost all handle based requests (but not write response, nor is it
1778 	sufficient for path based requests).  A smaller size would have
1779 	been more efficient (compacting multiple slab items on one 4k page)
1780 	for the case in which debug was on, but this larger size allows
1781 	more SMBs to use small buffer alloc and is still much more
1782 	efficient to alloc 1 per page off the slab compared to 17K (5page)
1783 	alloc of large cifs buffers even when page debugging is on */
1784 	cifs_sm_req_cachep = kmem_cache_create_usercopy("cifs_small_rq",
1785 			MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN,
1786 			0, MAX_CIFS_SMALL_BUFFER_SIZE, NULL);
1787 	if (cifs_sm_req_cachep == NULL) {
1788 		mempool_destroy(cifs_req_poolp);
1789 		kmem_cache_destroy(cifs_req_cachep);
1790 		return -ENOMEM;
1791 	}
1792 
1793 	if (cifs_min_small < 2)
1794 		cifs_min_small = 2;
1795 	else if (cifs_min_small > 256) {
1796 		cifs_min_small = 256;
1797 		cifs_dbg(FYI, "cifs_min_small set to maximum (256)\n");
1798 	}
1799 
1800 	cifs_sm_req_poolp = mempool_create_slab_pool(cifs_min_small,
1801 						     cifs_sm_req_cachep);
1802 
1803 	if (cifs_sm_req_poolp == NULL) {
1804 		mempool_destroy(cifs_req_poolp);
1805 		kmem_cache_destroy(cifs_req_cachep);
1806 		kmem_cache_destroy(cifs_sm_req_cachep);
1807 		return -ENOMEM;
1808 	}
1809 
1810 	return 0;
1811 }
1812 
1813 static void
cifs_destroy_request_bufs(void)1814 cifs_destroy_request_bufs(void)
1815 {
1816 	mempool_destroy(cifs_req_poolp);
1817 	kmem_cache_destroy(cifs_req_cachep);
1818 	mempool_destroy(cifs_sm_req_poolp);
1819 	kmem_cache_destroy(cifs_sm_req_cachep);
1820 }
1821 
init_mids(void)1822 static int init_mids(void)
1823 {
1824 	cifs_mid_cachep = kmem_cache_create("cifs_mpx_ids",
1825 					    sizeof(struct mid_q_entry), 0,
1826 					    SLAB_HWCACHE_ALIGN, NULL);
1827 	if (cifs_mid_cachep == NULL)
1828 		return -ENOMEM;
1829 
1830 	/* 3 is a reasonable minimum number of simultaneous operations */
1831 	cifs_mid_poolp = mempool_create_slab_pool(3, cifs_mid_cachep);
1832 	if (cifs_mid_poolp == NULL) {
1833 		kmem_cache_destroy(cifs_mid_cachep);
1834 		return -ENOMEM;
1835 	}
1836 
1837 	return 0;
1838 }
1839 
destroy_mids(void)1840 static void destroy_mids(void)
1841 {
1842 	mempool_destroy(cifs_mid_poolp);
1843 	kmem_cache_destroy(cifs_mid_cachep);
1844 }
1845 
cifs_init_netfs(void)1846 static int cifs_init_netfs(void)
1847 {
1848 	cifs_io_request_cachep =
1849 		kmem_cache_create("cifs_io_request",
1850 				  sizeof(struct cifs_io_request), 0,
1851 				  SLAB_HWCACHE_ALIGN, NULL);
1852 	if (!cifs_io_request_cachep)
1853 		goto nomem_req;
1854 
1855 	if (mempool_init_slab_pool(&cifs_io_request_pool, 100, cifs_io_request_cachep) < 0)
1856 		goto nomem_reqpool;
1857 
1858 	cifs_io_subrequest_cachep =
1859 		kmem_cache_create("cifs_io_subrequest",
1860 				  sizeof(struct cifs_io_subrequest), 0,
1861 				  SLAB_HWCACHE_ALIGN, NULL);
1862 	if (!cifs_io_subrequest_cachep)
1863 		goto nomem_subreq;
1864 
1865 	if (mempool_init_slab_pool(&cifs_io_subrequest_pool, 100, cifs_io_subrequest_cachep) < 0)
1866 		goto nomem_subreqpool;
1867 
1868 	return 0;
1869 
1870 nomem_subreqpool:
1871 	kmem_cache_destroy(cifs_io_subrequest_cachep);
1872 nomem_subreq:
1873 	mempool_exit(&cifs_io_request_pool);
1874 nomem_reqpool:
1875 	kmem_cache_destroy(cifs_io_request_cachep);
1876 nomem_req:
1877 	return -ENOMEM;
1878 }
1879 
cifs_destroy_netfs(void)1880 static void cifs_destroy_netfs(void)
1881 {
1882 	mempool_exit(&cifs_io_subrequest_pool);
1883 	kmem_cache_destroy(cifs_io_subrequest_cachep);
1884 	mempool_exit(&cifs_io_request_pool);
1885 	kmem_cache_destroy(cifs_io_request_cachep);
1886 }
1887 
1888 static int __init
init_cifs(void)1889 init_cifs(void)
1890 {
1891 	int rc = 0;
1892 	cifs_proc_init();
1893 	INIT_LIST_HEAD(&cifs_tcp_ses_list);
1894 /*
1895  *  Initialize Global counters
1896  */
1897 	atomic_set(&sesInfoAllocCount, 0);
1898 	atomic_set(&tconInfoAllocCount, 0);
1899 	atomic_set(&tcpSesNextId, 0);
1900 	atomic_set(&tcpSesAllocCount, 0);
1901 	atomic_set(&tcpSesReconnectCount, 0);
1902 	atomic_set(&tconInfoReconnectCount, 0);
1903 
1904 	atomic_set(&buf_alloc_count, 0);
1905 	atomic_set(&small_buf_alloc_count, 0);
1906 #ifdef CONFIG_CIFS_STATS2
1907 	atomic_set(&total_buf_alloc_count, 0);
1908 	atomic_set(&total_small_buf_alloc_count, 0);
1909 	if (slow_rsp_threshold < 1)
1910 		cifs_dbg(FYI, "slow_response_threshold msgs disabled\n");
1911 	else if (slow_rsp_threshold > 32767)
1912 		cifs_dbg(VFS,
1913 		       "slow response threshold set higher than recommended (0 to 32767)\n");
1914 #endif /* CONFIG_CIFS_STATS2 */
1915 
1916 	atomic_set(&mid_count, 0);
1917 	GlobalCurrentXid = 0;
1918 	GlobalTotalActiveXid = 0;
1919 	GlobalMaxActiveXid = 0;
1920 
1921 	cifs_lock_secret = get_random_u32();
1922 
1923 	if (cifs_max_pending < 2) {
1924 		cifs_max_pending = 2;
1925 		cifs_dbg(FYI, "cifs_max_pending set to min of 2\n");
1926 	} else if (cifs_max_pending > CIFS_MAX_REQ) {
1927 		cifs_max_pending = CIFS_MAX_REQ;
1928 		cifs_dbg(FYI, "cifs_max_pending set to max of %u\n",
1929 			 CIFS_MAX_REQ);
1930 	}
1931 
1932 	/* Limit max to about 18 hours, and setting to zero disables directory entry caching */
1933 	if (dir_cache_timeout > 65000) {
1934 		dir_cache_timeout = 65000;
1935 		cifs_dbg(VFS, "dir_cache_timeout set to max of 65000 seconds\n");
1936 	}
1937 
1938 	cifsiod_wq = alloc_workqueue("cifsiod",
1939 				     WQ_FREEZABLE | WQ_MEM_RECLAIM | WQ_PERCPU,
1940 				     0);
1941 	if (!cifsiod_wq) {
1942 		rc = -ENOMEM;
1943 		goto out_clean_proc;
1944 	}
1945 
1946 	/*
1947 	 * Consider in future setting limit!=0 maybe to min(num_of_cores - 1, 3)
1948 	 * so that we don't launch too many worker threads but
1949 	 * Documentation/core-api/workqueue.rst recommends setting it to 0
1950 	 */
1951 
1952 	/* WQ_UNBOUND allows decrypt tasks to run on any CPU */
1953 	decrypt_wq = alloc_workqueue("smb3decryptd",
1954 				     WQ_UNBOUND|WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
1955 	if (!decrypt_wq) {
1956 		rc = -ENOMEM;
1957 		goto out_destroy_cifsiod_wq;
1958 	}
1959 
1960 	fileinfo_put_wq = alloc_workqueue("cifsfileinfoput",
1961 				     WQ_UNBOUND|WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
1962 	if (!fileinfo_put_wq) {
1963 		rc = -ENOMEM;
1964 		goto out_destroy_decrypt_wq;
1965 	}
1966 
1967 	cifsoplockd_wq = alloc_workqueue("cifsoplockd",
1968 					 WQ_FREEZABLE | WQ_MEM_RECLAIM | WQ_PERCPU,
1969 					 0);
1970 	if (!cifsoplockd_wq) {
1971 		rc = -ENOMEM;
1972 		goto out_destroy_fileinfo_put_wq;
1973 	}
1974 
1975 	deferredclose_wq = alloc_workqueue("deferredclose",
1976 					   WQ_FREEZABLE | WQ_MEM_RECLAIM | WQ_PERCPU,
1977 					   0);
1978 	if (!deferredclose_wq) {
1979 		rc = -ENOMEM;
1980 		goto out_destroy_cifsoplockd_wq;
1981 	}
1982 
1983 	serverclose_wq = alloc_workqueue("serverclose",
1984 					   WQ_FREEZABLE | WQ_MEM_RECLAIM | WQ_PERCPU,
1985 					   0);
1986 	if (!serverclose_wq) {
1987 		rc = -ENOMEM;
1988 		goto out_destroy_deferredclose_wq;
1989 	}
1990 
1991 	cfid_put_wq = alloc_workqueue("cfid_put_wq",
1992 				      WQ_FREEZABLE | WQ_MEM_RECLAIM | WQ_PERCPU,
1993 				      0);
1994 	if (!cfid_put_wq) {
1995 		rc = -ENOMEM;
1996 		goto out_destroy_serverclose_wq;
1997 	}
1998 
1999 	rc = cifs_init_inodecache();
2000 	if (rc)
2001 		goto out_destroy_cfid_put_wq;
2002 
2003 	rc = cifs_init_netfs();
2004 	if (rc)
2005 		goto out_destroy_inodecache;
2006 
2007 	rc = init_mids();
2008 	if (rc)
2009 		goto out_destroy_netfs;
2010 
2011 	rc = cifs_init_request_bufs();
2012 	if (rc)
2013 		goto out_destroy_mids;
2014 
2015 #ifdef CONFIG_CIFS_DFS_UPCALL
2016 	rc = dfs_cache_init();
2017 	if (rc)
2018 		goto out_destroy_request_bufs;
2019 #endif /* CONFIG_CIFS_DFS_UPCALL */
2020 #ifdef CONFIG_CIFS_UPCALL
2021 	rc = init_cifs_spnego();
2022 	if (rc)
2023 		goto out_destroy_dfs_cache;
2024 #endif /* CONFIG_CIFS_UPCALL */
2025 #ifdef CONFIG_CIFS_SWN_UPCALL
2026 	rc = cifs_genl_init();
2027 	if (rc)
2028 		goto out_register_key_type;
2029 #endif /* CONFIG_CIFS_SWN_UPCALL */
2030 
2031 	rc = init_cifs_idmap();
2032 	if (rc)
2033 		goto out_cifs_swn_init;
2034 
2035 	rc = register_filesystem(&cifs_fs_type);
2036 	if (rc)
2037 		goto out_init_cifs_idmap;
2038 
2039 	rc = register_filesystem(&smb3_fs_type);
2040 	if (rc) {
2041 		unregister_filesystem(&cifs_fs_type);
2042 		goto out_init_cifs_idmap;
2043 	}
2044 
2045 	return 0;
2046 
2047 out_init_cifs_idmap:
2048 	exit_cifs_idmap();
2049 out_cifs_swn_init:
2050 #ifdef CONFIG_CIFS_SWN_UPCALL
2051 	cifs_genl_exit();
2052 out_register_key_type:
2053 #endif
2054 #ifdef CONFIG_CIFS_UPCALL
2055 	exit_cifs_spnego();
2056 out_destroy_dfs_cache:
2057 #endif
2058 #ifdef CONFIG_CIFS_DFS_UPCALL
2059 	dfs_cache_destroy();
2060 out_destroy_request_bufs:
2061 #endif
2062 	cifs_destroy_request_bufs();
2063 out_destroy_mids:
2064 	destroy_mids();
2065 out_destroy_netfs:
2066 	cifs_destroy_netfs();
2067 out_destroy_inodecache:
2068 	cifs_destroy_inodecache();
2069 out_destroy_cfid_put_wq:
2070 	destroy_workqueue(cfid_put_wq);
2071 out_destroy_serverclose_wq:
2072 	destroy_workqueue(serverclose_wq);
2073 out_destroy_deferredclose_wq:
2074 	destroy_workqueue(deferredclose_wq);
2075 out_destroy_cifsoplockd_wq:
2076 	destroy_workqueue(cifsoplockd_wq);
2077 out_destroy_fileinfo_put_wq:
2078 	destroy_workqueue(fileinfo_put_wq);
2079 out_destroy_decrypt_wq:
2080 	destroy_workqueue(decrypt_wq);
2081 out_destroy_cifsiod_wq:
2082 	destroy_workqueue(cifsiod_wq);
2083 out_clean_proc:
2084 	cifs_proc_clean();
2085 	return rc;
2086 }
2087 
2088 static void __exit
exit_cifs(void)2089 exit_cifs(void)
2090 {
2091 	cifs_dbg(NOISY, "exit_smb3\n");
2092 	unregister_filesystem(&cifs_fs_type);
2093 	unregister_filesystem(&smb3_fs_type);
2094 	cifs_release_automount_timer();
2095 	exit_cifs_idmap();
2096 #ifdef CONFIG_CIFS_SWN_UPCALL
2097 	cifs_genl_exit();
2098 #endif
2099 #ifdef CONFIG_CIFS_UPCALL
2100 	exit_cifs_spnego();
2101 #endif
2102 #ifdef CONFIG_CIFS_DFS_UPCALL
2103 	dfs_cache_destroy();
2104 #endif
2105 	cifs_destroy_request_bufs();
2106 	destroy_mids();
2107 	cifs_destroy_netfs();
2108 	cifs_destroy_inodecache();
2109 	destroy_workqueue(deferredclose_wq);
2110 	destroy_workqueue(cifsoplockd_wq);
2111 	destroy_workqueue(decrypt_wq);
2112 	destroy_workqueue(fileinfo_put_wq);
2113 	destroy_workqueue(serverclose_wq);
2114 	destroy_workqueue(cfid_put_wq);
2115 	destroy_workqueue(cifsiod_wq);
2116 	cifs_proc_clean();
2117 }
2118 
2119 MODULE_AUTHOR("Steve French");
2120 MODULE_LICENSE("GPL");	/* combination of LGPL + GPL source behaves as GPL */
2121 MODULE_DESCRIPTION
2122 	("VFS to access SMB3 servers e.g. Samba, Macs, Azure and Windows (and "
2123 	"also older servers complying with the SNIA CIFS Specification)");
2124 MODULE_VERSION(CIFS_VERSION);
2125 MODULE_SOFTDEP("ecb");
2126 MODULE_SOFTDEP("hmac");
2127 MODULE_SOFTDEP("md5");
2128 MODULE_SOFTDEP("nls");
2129 MODULE_SOFTDEP("aes");
2130 MODULE_SOFTDEP("cmac");
2131 MODULE_SOFTDEP("sha256");
2132 MODULE_SOFTDEP("sha512");
2133 MODULE_SOFTDEP("aead2");
2134 MODULE_SOFTDEP("ccm");
2135 MODULE_SOFTDEP("gcm");
2136 module_init(init_cifs)
2137 module_exit(exit_cifs)
2138