xref: /linux/fs/smb/client/cifsfs.c (revision 42eb01783091e49020221a8a7d6c00e154ae7e58)
1 // SPDX-License-Identifier: LGPL-2.1
2 /*
3  *
4  *   Copyright (C) International Business Machines  Corp., 2002,2008
5  *   Author(s): Steve French (sfrench@us.ibm.com)
6  *
7  *   Common Internet FileSystem (CIFS) client
8  *
9  */
10 
11 /* Note that BB means BUGBUG (ie something to fix eventually) */
12 
13 #include <linux/module.h>
14 #include <linux/fs.h>
15 #include <linux/filelock.h>
16 #include <linux/mount.h>
17 #include <linux/slab.h>
18 #include <linux/init.h>
19 #include <linux/list.h>
20 #include <linux/seq_file.h>
21 #include <linux/vfs.h>
22 #include <linux/mempool.h>
23 #include <linux/delay.h>
24 #include <linux/kthread.h>
25 #include <linux/freezer.h>
26 #include <linux/namei.h>
27 #include <linux/random.h>
28 #include <linux/splice.h>
29 #include <linux/uuid.h>
30 #include <linux/xattr.h>
31 #include <linux/mm.h>
32 #include <linux/key-type.h>
33 #include <uapi/linux/magic.h>
34 #include <net/ipv6.h>
35 #include "cifsfs.h"
36 #define DECLARE_GLOBALS_HERE
37 #include "cifsglob.h"
38 #include "cifsproto.h"
39 #include "smb2proto.h"
40 #include "cifs_debug.h"
41 #include "cifs_fs_sb.h"
42 #include "cifs_spnego.h"
43 #include "fscache.h"
44 #ifdef CONFIG_CIFS_DFS_UPCALL
45 #include "dfs_cache.h"
46 #endif
47 #ifdef CONFIG_CIFS_SWN_UPCALL
48 #include "netlink.h"
49 #endif
50 #include "fs_context.h"
51 #include "cached_dir.h"
52 
53 /*
54  * DOS dates from 1980/1/1 through 2107/12/31
55  * Protocol specifications indicate the range should be to 119, which
56  * limits maximum year to 2099. But this range has not been checked.
57  */
58 #define SMB_DATE_MAX (127<<9 | 12<<5 | 31)
59 #define SMB_DATE_MIN (0<<9 | 1<<5 | 1)
60 #define SMB_TIME_MAX (23<<11 | 59<<5 | 29)
61 
62 int cifsFYI = 0;
63 bool traceSMB;
64 bool enable_oplocks = true;
65 bool linuxExtEnabled = true;
66 bool lookupCacheEnabled = true;
67 bool disable_legacy_dialects; /* false by default */
68 bool enable_gcm_256 = true;
69 bool require_gcm_256; /* false by default */
70 bool enable_negotiate_signing; /* false by default */
71 unsigned int global_secflags = CIFSSEC_DEF;
72 /* unsigned int ntlmv2_support = 0; */
73 
74 /*
75  * Global transaction id (XID) information
76  */
77 unsigned int GlobalCurrentXid;	/* protected by GlobalMid_Lock */
78 unsigned int GlobalTotalActiveXid; /* prot by GlobalMid_Lock */
79 unsigned int GlobalMaxActiveXid;	/* prot by GlobalMid_Lock */
80 DEFINE_SPINLOCK(GlobalMid_Lock); /* protects above & list operations on midQ entries */
81 
82 /*
83  *  Global counters, updated atomically
84  */
85 atomic_t sesInfoAllocCount;
86 atomic_t tconInfoAllocCount;
87 atomic_t tcpSesNextId;
88 atomic_t tcpSesAllocCount;
89 atomic_t tcpSesReconnectCount;
90 atomic_t tconInfoReconnectCount;
91 
92 atomic_t mid_count;
93 atomic_t buf_alloc_count;
94 atomic_t small_buf_alloc_count;
95 #ifdef CONFIG_CIFS_STATS2
96 atomic_t total_buf_alloc_count;
97 atomic_t total_small_buf_alloc_count;
98 #endif/* STATS2 */
99 struct list_head	cifs_tcp_ses_list;
100 DEFINE_SPINLOCK(cifs_tcp_ses_lock);
101 static const struct super_operations cifs_super_ops;
102 unsigned int CIFSMaxBufSize = CIFS_MAX_MSGSIZE;
103 module_param(CIFSMaxBufSize, uint, 0444);
104 MODULE_PARM_DESC(CIFSMaxBufSize, "Network buffer size (not including header) "
105 				 "for CIFS requests. "
106 				 "Default: 16384 Range: 8192 to 130048");
107 unsigned int cifs_min_rcv = CIFS_MIN_RCV_POOL;
108 module_param(cifs_min_rcv, uint, 0444);
109 MODULE_PARM_DESC(cifs_min_rcv, "Network buffers in pool. Default: 4 Range: "
110 				"1 to 64");
111 unsigned int cifs_min_small = 30;
112 module_param(cifs_min_small, uint, 0444);
113 MODULE_PARM_DESC(cifs_min_small, "Small network buffers in pool. Default: 30 "
114 				 "Range: 2 to 256");
115 unsigned int cifs_max_pending = CIFS_MAX_REQ;
116 module_param(cifs_max_pending, uint, 0444);
117 MODULE_PARM_DESC(cifs_max_pending, "Simultaneous requests to server for "
118 				   "CIFS/SMB1 dialect (N/A for SMB3) "
119 				   "Default: 32767 Range: 2 to 32767.");
120 unsigned int dir_cache_timeout = 30;
121 module_param(dir_cache_timeout, uint, 0644);
122 MODULE_PARM_DESC(dir_cache_timeout, "Number of seconds to cache directory contents for which we have a lease. Default: 30 "
123 				 "Range: 1 to 65000 seconds, 0 to disable caching dir contents");
124 /* Module-wide total cached dirents (in bytes) across all tcons */
125 atomic64_t cifs_dircache_bytes_used = ATOMIC64_INIT(0);
126 
127 /*
128  * Write-only module parameter to drop all cached directory entries across
129  * all CIFS mounts. Echo a non-zero value to trigger.
130  */
cifs_drop_all_dir_caches(void)131 static void cifs_drop_all_dir_caches(void)
132 {
133 	struct TCP_Server_Info *server;
134 	struct cifs_ses *ses;
135 	struct cifs_tcon *tcon;
136 
137 	spin_lock(&cifs_tcp_ses_lock);
138 	list_for_each_entry(server, &cifs_tcp_ses_list, tcp_ses_list) {
139 		list_for_each_entry(ses, &server->smb_ses_list, smb_ses_list) {
140 			if (cifs_ses_exiting(ses))
141 				continue;
142 			list_for_each_entry(tcon, &ses->tcon_list, tcon_list)
143 				invalidate_all_cached_dirs(tcon);
144 		}
145 	}
146 	spin_unlock(&cifs_tcp_ses_lock);
147 }
148 
cifs_param_set_drop_dir_cache(const char * val,const struct kernel_param * kp)149 static int cifs_param_set_drop_dir_cache(const char *val, const struct kernel_param *kp)
150 {
151 	bool bv;
152 	int rc = kstrtobool(val, &bv);
153 
154 	if (rc)
155 		return rc;
156 	if (bv)
157 		cifs_drop_all_dir_caches();
158 	return 0;
159 }
160 
161 module_param_call(drop_dir_cache, cifs_param_set_drop_dir_cache, NULL, NULL, 0200);
162 MODULE_PARM_DESC(drop_dir_cache, "Write 1 to drop all cached directory entries across all CIFS mounts");
163 
164 #ifdef CONFIG_CIFS_STATS2
165 unsigned int slow_rsp_threshold = 1;
166 module_param(slow_rsp_threshold, uint, 0644);
167 MODULE_PARM_DESC(slow_rsp_threshold, "Amount of time (in seconds) to wait "
168 				   "before logging that a response is delayed. "
169 				   "Default: 1 (if set to 0 disables msg).");
170 #endif /* STATS2 */
171 
172 module_param(enable_oplocks, bool, 0644);
173 MODULE_PARM_DESC(enable_oplocks, "Enable or disable oplocks. Default: y/Y/1");
174 
175 module_param(enable_gcm_256, bool, 0644);
176 MODULE_PARM_DESC(enable_gcm_256, "Enable requesting strongest (256 bit) GCM encryption. Default: y/Y/1");
177 
178 module_param(require_gcm_256, bool, 0644);
179 MODULE_PARM_DESC(require_gcm_256, "Require strongest (256 bit) GCM encryption. Default: n/N/0");
180 
181 module_param(enable_negotiate_signing, bool, 0644);
182 MODULE_PARM_DESC(enable_negotiate_signing, "Enable negotiating packet signing algorithm with server. Default: n/N/0");
183 
184 module_param(disable_legacy_dialects, bool, 0644);
185 MODULE_PARM_DESC(disable_legacy_dialects, "To improve security it may be "
186 				  "helpful to restrict the ability to "
187 				  "override the default dialects (SMB2.1, "
188 				  "SMB3 and SMB3.02) on mount with old "
189 				  "dialects (CIFS/SMB1 and SMB2) since "
190 				  "vers=1.0 (CIFS/SMB1) and vers=2.0 are weaker"
191 				  " and less secure. Default: n/N/0");
192 
193 struct workqueue_struct	*cifsiod_wq;
194 struct workqueue_struct	*decrypt_wq;
195 struct workqueue_struct	*fileinfo_put_wq;
196 struct workqueue_struct	*cifsoplockd_wq;
197 struct workqueue_struct	*deferredclose_wq;
198 struct workqueue_struct	*serverclose_wq;
199 struct workqueue_struct	*cfid_put_wq;
200 __u32 cifs_lock_secret;
201 
202 /*
203  * Bumps refcount for cifs super block.
204  * Note that it should be only called if a reference to VFS super block is
205  * already held, e.g. in open-type syscalls context. Otherwise it can race with
206  * atomic_dec_and_test in deactivate_locked_super.
207  */
208 void
cifs_sb_active(struct super_block * sb)209 cifs_sb_active(struct super_block *sb)
210 {
211 	struct cifs_sb_info *server = CIFS_SB(sb);
212 
213 	if (atomic_inc_return(&server->active) == 1)
214 		atomic_inc(&sb->s_active);
215 }
216 
217 void
cifs_sb_deactive(struct super_block * sb)218 cifs_sb_deactive(struct super_block *sb)
219 {
220 	struct cifs_sb_info *server = CIFS_SB(sb);
221 
222 	if (atomic_dec_and_test(&server->active))
223 		deactivate_super(sb);
224 }
225 
226 static int
cifs_read_super(struct super_block * sb)227 cifs_read_super(struct super_block *sb)
228 {
229 	struct cifs_sb_info *cifs_sb;
230 	struct cifs_tcon *tcon;
231 	unsigned int sbflags;
232 	struct timespec64 ts;
233 	struct inode *inode;
234 	int rc = 0;
235 
236 	cifs_sb = CIFS_SB(sb);
237 	tcon = cifs_sb_master_tcon(cifs_sb);
238 	sbflags = cifs_sb_flags(cifs_sb);
239 
240 	if (sbflags & CIFS_MOUNT_POSIXACL)
241 		sb->s_flags |= SB_POSIXACL;
242 
243 	if (tcon->snapshot_time)
244 		sb->s_flags |= SB_RDONLY;
245 
246 	if (tcon->ses->capabilities & tcon->ses->server->vals->cap_large_files)
247 		sb->s_maxbytes = MAX_LFS_FILESIZE;
248 	else
249 		sb->s_maxbytes = MAX_NON_LFS;
250 
251 	/*
252 	 * Some very old servers like DOS and OS/2 used 2 second granularity
253 	 * (while all current servers use 100ns granularity - see MS-DTYP)
254 	 * but 1 second is the maximum allowed granularity for the VFS
255 	 * so for old servers set time granularity to 1 second while for
256 	 * everything else (current servers) set it to 100ns.
257 	 */
258 	if ((tcon->ses->server->vals->protocol_id == SMB10_PROT_ID) &&
259 	    ((tcon->ses->capabilities &
260 	      tcon->ses->server->vals->cap_nt_find) == 0) &&
261 	    !tcon->unix_ext) {
262 		sb->s_time_gran = 1000000000; /* 1 second is max allowed gran */
263 		ts = cnvrtDosUnixTm(cpu_to_le16(SMB_DATE_MIN), 0, 0);
264 		sb->s_time_min = ts.tv_sec;
265 		ts = cnvrtDosUnixTm(cpu_to_le16(SMB_DATE_MAX),
266 				    cpu_to_le16(SMB_TIME_MAX), 0);
267 		sb->s_time_max = ts.tv_sec;
268 	} else {
269 		/*
270 		 * Almost every server, including all SMB2+, uses DCE TIME
271 		 * ie 100 nanosecond units, since 1601.  See MS-DTYP and MS-FSCC
272 		 */
273 		sb->s_time_gran = 100;
274 		ts = cifs_NTtimeToUnix(0);
275 		sb->s_time_min = ts.tv_sec;
276 		ts = cifs_NTtimeToUnix(cpu_to_le64(S64_MAX));
277 		sb->s_time_max = ts.tv_sec;
278 	}
279 
280 	sb->s_magic = CIFS_SUPER_MAGIC;
281 	sb->s_op = &cifs_super_ops;
282 	sb->s_xattr = cifs_xattr_handlers;
283 	rc = super_setup_bdi(sb);
284 	if (rc)
285 		goto out_no_root;
286 	/* tune readahead according to rsize if readahead size not set on mount */
287 	if (cifs_sb->ctx->rsize == 0)
288 		cifs_sb->ctx->rsize =
289 			tcon->ses->server->ops->negotiate_rsize(tcon, cifs_sb->ctx);
290 	if (cifs_sb->ctx->rasize)
291 		sb->s_bdi->ra_pages = cifs_sb->ctx->rasize / PAGE_SIZE;
292 	else
293 		sb->s_bdi->ra_pages = 2 * (cifs_sb->ctx->rsize / PAGE_SIZE);
294 
295 	sb->s_blocksize = CIFS_MAX_MSGSIZE;
296 	sb->s_blocksize_bits = 14;	/* default 2**14 = CIFS_MAX_MSGSIZE */
297 	inode = cifs_root_iget(sb);
298 
299 	if (IS_ERR(inode)) {
300 		rc = PTR_ERR(inode);
301 		goto out_no_root;
302 	}
303 
304 	if (tcon->nocase)
305 		set_default_d_op(sb, &cifs_ci_dentry_ops);
306 	else
307 		set_default_d_op(sb, &cifs_dentry_ops);
308 
309 	sb->s_root = d_make_root(inode);
310 	if (!sb->s_root) {
311 		rc = -ENOMEM;
312 		goto out_no_root;
313 	}
314 
315 #ifdef CONFIG_CIFS_NFSD_EXPORT
316 	if (sbflags & CIFS_MOUNT_SERVER_INUM) {
317 		cifs_dbg(FYI, "export ops supported\n");
318 		sb->s_export_op = &cifs_export_ops;
319 	}
320 #endif /* CONFIG_CIFS_NFSD_EXPORT */
321 
322 	return 0;
323 
324 out_no_root:
325 	cifs_dbg(VFS, "%s: get root inode failed\n", __func__);
326 	return rc;
327 }
328 
cifs_kill_sb(struct super_block * sb)329 static void cifs_kill_sb(struct super_block *sb)
330 {
331 	struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
332 
333 	/*
334 	 * We need to release all dentries for the cached directories
335 	 * before we kill the sb.
336 	 */
337 	if (cifs_sb->root) {
338 		close_all_cached_dirs(cifs_sb);
339 
340 		/* finally release root dentry */
341 		dput(cifs_sb->root);
342 		cifs_sb->root = NULL;
343 	}
344 
345 	kill_anon_super(sb);
346 	cifs_umount(cifs_sb);
347 }
348 
349 static int
cifs_statfs(struct dentry * dentry,struct kstatfs * buf)350 cifs_statfs(struct dentry *dentry, struct kstatfs *buf)
351 {
352 	struct super_block *sb = dentry->d_sb;
353 	struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
354 	struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
355 	struct TCP_Server_Info *server = tcon->ses->server;
356 	unsigned int xid;
357 	int rc = 0;
358 	const char *full_path;
359 	void *page;
360 
361 	xid = get_xid();
362 	page = alloc_dentry_path();
363 
364 	full_path = build_path_from_dentry(dentry, page);
365 	if (IS_ERR(full_path)) {
366 		rc = PTR_ERR(full_path);
367 		goto statfs_out;
368 	}
369 
370 	if (le32_to_cpu(tcon->fsAttrInfo.MaxPathNameComponentLength) > 0)
371 		buf->f_namelen =
372 		       le32_to_cpu(tcon->fsAttrInfo.MaxPathNameComponentLength);
373 	else
374 		buf->f_namelen = PATH_MAX;
375 
376 	buf->f_fsid.val[0] = tcon->vol_serial_number;
377 	/* are using part of create time for more randomness, see man statfs */
378 	buf->f_fsid.val[1] =  (int)le64_to_cpu(tcon->vol_create_time);
379 
380 	buf->f_files = 0;	/* undefined */
381 	buf->f_ffree = 0;	/* unlimited */
382 
383 	if (server->ops->queryfs)
384 		rc = server->ops->queryfs(xid, tcon, full_path, cifs_sb, buf);
385 
386 statfs_out:
387 	free_dentry_path(page);
388 	free_xid(xid);
389 	return rc;
390 }
391 
cifs_fallocate(struct file * file,int mode,loff_t off,loff_t len)392 static long cifs_fallocate(struct file *file, int mode, loff_t off, loff_t len)
393 {
394 	struct cifs_tcon *tcon = cifs_sb_master_tcon(CIFS_SB(file));
395 	struct TCP_Server_Info *server = tcon->ses->server;
396 	struct inode *inode = file_inode(file);
397 	int rc;
398 
399 	if (!server->ops->fallocate)
400 		return -EOPNOTSUPP;
401 
402 	rc = inode_lock_killable(inode);
403 	if (rc)
404 		return rc;
405 
406 	netfs_wait_for_outstanding_io(inode);
407 
408 	rc = file_modified(file);
409 	if (rc)
410 		goto out_unlock;
411 
412 	rc = server->ops->fallocate(file, tcon, mode, off, len);
413 
414 out_unlock:
415 	inode_unlock(inode);
416 	return rc;
417 }
418 
cifs_permission(struct mnt_idmap * idmap,struct inode * inode,int mask)419 static int cifs_permission(struct mnt_idmap *idmap,
420 			   struct inode *inode, int mask)
421 {
422 	unsigned int sbflags = cifs_sb_flags(CIFS_SB(inode));
423 
424 	if (sbflags & CIFS_MOUNT_NO_PERM) {
425 		if ((mask & MAY_EXEC) && !execute_ok(inode))
426 			return -EACCES;
427 		else
428 			return 0;
429 	} else /* file mode might have been restricted at mount time
430 		on the client (above and beyond ACL on servers) for
431 		servers which do not support setting and viewing mode bits,
432 		so allowing client to check permissions is useful */
433 		return generic_permission(&nop_mnt_idmap, inode, mask);
434 }
435 
436 static struct kmem_cache *cifs_inode_cachep;
437 static struct kmem_cache *cifs_req_cachep;
438 static struct kmem_cache *cifs_mid_cachep;
439 static struct kmem_cache *cifs_sm_req_cachep;
440 static struct kmem_cache *cifs_io_request_cachep;
441 static struct kmem_cache *cifs_io_subrequest_cachep;
442 mempool_t *cifs_sm_req_poolp;
443 mempool_t *cifs_req_poolp;
444 mempool_t cifs_mid_pool;
445 mempool_t cifs_io_request_pool;
446 mempool_t cifs_io_subrequest_pool;
447 
448 static struct inode *
cifs_alloc_inode(struct super_block * sb)449 cifs_alloc_inode(struct super_block *sb)
450 {
451 	struct cifsInodeInfo *cifs_inode;
452 	cifs_inode = alloc_inode_sb(sb, cifs_inode_cachep, GFP_KERNEL);
453 	if (!cifs_inode)
454 		return NULL;
455 	cifs_inode->cifsAttrs = ATTR_ARCHIVE;	/* default */
456 	cifs_inode->time = 0;
457 	/*
458 	 * Until the file is open and we have gotten oplock info back from the
459 	 * server, can not assume caching of file data or metadata.
460 	 */
461 	cifs_set_oplock_level(cifs_inode, 0);
462 	cifs_inode->lease_granted = false;
463 	cifs_inode->flags = 0;
464 	spin_lock_init(&cifs_inode->writers_lock);
465 	cifs_inode->writers = 0;
466 	cifs_inode->netfs.inode.i_blkbits = 14;  /* 2**14 = CIFS_MAX_MSGSIZE */
467 	cifs_inode->netfs.remote_i_size = 0;
468 	cifs_inode->uniqueid = 0;
469 	cifs_inode->createtime = 0;
470 	cifs_inode->epoch = 0;
471 	spin_lock_init(&cifs_inode->open_file_lock);
472 	generate_random_uuid(cifs_inode->lease_key);
473 	cifs_inode->symlink_target = NULL;
474 
475 	/*
476 	 * Can not set i_flags here - they get immediately overwritten to zero
477 	 * by the VFS.
478 	 */
479 	/* cifs_inode->netfs.inode.i_flags = S_NOATIME | S_NOCMTIME; */
480 	INIT_LIST_HEAD(&cifs_inode->openFileList);
481 	INIT_LIST_HEAD(&cifs_inode->llist);
482 	INIT_LIST_HEAD(&cifs_inode->deferred_closes);
483 	spin_lock_init(&cifs_inode->deferred_lock);
484 	return &cifs_inode->netfs.inode;
485 }
486 
487 static void
cifs_free_inode(struct inode * inode)488 cifs_free_inode(struct inode *inode)
489 {
490 	struct cifsInodeInfo *cinode = CIFS_I(inode);
491 
492 	if (S_ISLNK(inode->i_mode))
493 		kfree(cinode->symlink_target);
494 	kmem_cache_free(cifs_inode_cachep, cinode);
495 }
496 
497 static void
cifs_evict_inode(struct inode * inode)498 cifs_evict_inode(struct inode *inode)
499 {
500 	netfs_wait_for_outstanding_io(inode);
501 	truncate_inode_pages_final(&inode->i_data);
502 	if (inode_state_read_once(inode) & I_PINNING_NETFS_WB)
503 		cifs_fscache_unuse_inode_cookie(inode, true);
504 	cifs_fscache_release_inode_cookie(inode);
505 	clear_inode(inode);
506 }
507 
508 static void
cifs_show_address(struct seq_file * s,struct TCP_Server_Info * server)509 cifs_show_address(struct seq_file *s, struct TCP_Server_Info *server)
510 {
511 	struct sockaddr_in *sa = (struct sockaddr_in *) &server->dstaddr;
512 	struct sockaddr_in6 *sa6 = (struct sockaddr_in6 *) &server->dstaddr;
513 
514 	seq_puts(s, ",addr=");
515 
516 	switch (server->dstaddr.ss_family) {
517 	case AF_INET:
518 		seq_printf(s, "%pI4", &sa->sin_addr.s_addr);
519 		break;
520 	case AF_INET6:
521 		seq_printf(s, "%pI6", &sa6->sin6_addr.s6_addr);
522 		if (sa6->sin6_scope_id)
523 			seq_printf(s, "%%%u", sa6->sin6_scope_id);
524 		break;
525 	default:
526 		seq_puts(s, "(unknown)");
527 	}
528 	if (server->rdma)
529 		seq_puts(s, ",rdma");
530 }
531 
532 static void
cifs_show_security(struct seq_file * s,struct cifs_ses * ses)533 cifs_show_security(struct seq_file *s, struct cifs_ses *ses)
534 {
535 	if (ses->sectype == Unspecified) {
536 		if (ses->user_name == NULL)
537 			seq_puts(s, ",sec=none");
538 		return;
539 	}
540 
541 	seq_puts(s, ",sec=");
542 
543 	switch (ses->sectype) {
544 	case NTLMv2:
545 		seq_puts(s, "ntlmv2");
546 		break;
547 	case Kerberos:
548 		seq_puts(s, "krb5");
549 		break;
550 	case RawNTLMSSP:
551 		seq_puts(s, "ntlmssp");
552 		break;
553 	default:
554 		/* shouldn't ever happen */
555 		seq_puts(s, "unknown");
556 		break;
557 	}
558 
559 	if (ses->sign)
560 		seq_puts(s, "i");
561 
562 	if (ses->sectype == Kerberos)
563 		seq_printf(s, ",cruid=%u",
564 			   from_kuid_munged(&init_user_ns, ses->cred_uid));
565 }
566 
567 static void
cifs_show_cache_flavor(struct seq_file * s,struct cifs_sb_info * cifs_sb)568 cifs_show_cache_flavor(struct seq_file *s, struct cifs_sb_info *cifs_sb)
569 {
570 	unsigned int sbflags = cifs_sb_flags(cifs_sb);
571 
572 	seq_puts(s, ",cache=");
573 
574 	if (sbflags & CIFS_MOUNT_STRICT_IO)
575 		seq_puts(s, "strict");
576 	else if (sbflags & CIFS_MOUNT_DIRECT_IO)
577 		seq_puts(s, "none");
578 	else if (sbflags & CIFS_MOUNT_RW_CACHE)
579 		seq_puts(s, "singleclient"); /* assume only one client access */
580 	else if (sbflags & CIFS_MOUNT_RO_CACHE)
581 		seq_puts(s, "ro"); /* read only caching assumed */
582 	else
583 		seq_puts(s, "loose");
584 }
585 
586 /*
587  * cifs_show_devname() is used so we show the mount device name with correct
588  * format (e.g. forward slashes vs. back slashes) in /proc/mounts
589  */
cifs_show_devname(struct seq_file * m,struct dentry * root)590 static int cifs_show_devname(struct seq_file *m, struct dentry *root)
591 {
592 	struct cifs_sb_info *cifs_sb = CIFS_SB(root->d_sb);
593 	char *devname = kstrdup(cifs_sb->ctx->source, GFP_KERNEL);
594 
595 	if (devname == NULL)
596 		seq_puts(m, "none");
597 	else {
598 		convert_delimiter(devname, '/');
599 		/* escape all spaces in share names */
600 		seq_escape(m, devname, " \t");
601 		kfree(devname);
602 	}
603 	return 0;
604 }
605 
606 static void
cifs_show_upcall_target(struct seq_file * s,struct cifs_sb_info * cifs_sb)607 cifs_show_upcall_target(struct seq_file *s, struct cifs_sb_info *cifs_sb)
608 {
609 	if (cifs_sb->ctx->upcall_target == UPTARGET_UNSPECIFIED) {
610 		seq_puts(s, ",upcall_target=app");
611 		return;
612 	}
613 
614 	seq_puts(s, ",upcall_target=");
615 
616 	switch (cifs_sb->ctx->upcall_target) {
617 	case UPTARGET_APP:
618 		seq_puts(s, "app");
619 		break;
620 	case UPTARGET_MOUNT:
621 		seq_puts(s, "mount");
622 		break;
623 	default:
624 		/* shouldn't ever happen */
625 		seq_puts(s, "unknown");
626 		break;
627 	}
628 }
629 
630 /*
631  * cifs_show_options() is for displaying mount options in /proc/mounts.
632  * Not all settable options are displayed but most of the important
633  * ones are.
634  */
635 static int
cifs_show_options(struct seq_file * s,struct dentry * root)636 cifs_show_options(struct seq_file *s, struct dentry *root)
637 {
638 	struct cifs_sb_info *cifs_sb = CIFS_SB(root->d_sb);
639 	struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
640 	struct sockaddr *srcaddr;
641 	unsigned int sbflags;
642 
643 	srcaddr = (struct sockaddr *)&tcon->ses->server->srcaddr;
644 
645 	seq_show_option(s, "vers", tcon->ses->server->vals->version_string);
646 	cifs_show_security(s, tcon->ses);
647 	cifs_show_cache_flavor(s, cifs_sb);
648 	cifs_show_upcall_target(s, cifs_sb);
649 
650 	if (tcon->no_lease)
651 		seq_puts(s, ",nolease");
652 	if (cifs_sb->ctx->multiuser)
653 		seq_puts(s, ",multiuser");
654 	else if (tcon->ses->user_name)
655 		seq_show_option(s, "username", tcon->ses->user_name);
656 
657 	if (tcon->ses->domainName && tcon->ses->domainName[0] != 0)
658 		seq_show_option(s, "domain", tcon->ses->domainName);
659 
660 	if (srcaddr->sa_family != AF_UNSPEC) {
661 		struct sockaddr_in *saddr4;
662 		struct sockaddr_in6 *saddr6;
663 		saddr4 = (struct sockaddr_in *)srcaddr;
664 		saddr6 = (struct sockaddr_in6 *)srcaddr;
665 		if (srcaddr->sa_family == AF_INET6)
666 			seq_printf(s, ",srcaddr=%pI6c",
667 				   &saddr6->sin6_addr);
668 		else if (srcaddr->sa_family == AF_INET)
669 			seq_printf(s, ",srcaddr=%pI4",
670 				   &saddr4->sin_addr.s_addr);
671 		else
672 			seq_printf(s, ",srcaddr=BAD-AF:%i",
673 				   (int)(srcaddr->sa_family));
674 	}
675 
676 	sbflags = cifs_sb_flags(cifs_sb);
677 	seq_printf(s, ",uid=%u",
678 		   from_kuid_munged(&init_user_ns, cifs_sb->ctx->linux_uid));
679 	if (sbflags & CIFS_MOUNT_OVERR_UID)
680 		seq_puts(s, ",forceuid");
681 	else
682 		seq_puts(s, ",noforceuid");
683 
684 	seq_printf(s, ",gid=%u",
685 		   from_kgid_munged(&init_user_ns, cifs_sb->ctx->linux_gid));
686 	if (sbflags & CIFS_MOUNT_OVERR_GID)
687 		seq_puts(s, ",forcegid");
688 	else
689 		seq_puts(s, ",noforcegid");
690 
691 	cifs_show_address(s, tcon->ses->server);
692 
693 	if (!tcon->unix_ext)
694 		seq_printf(s, ",file_mode=0%ho,dir_mode=0%ho",
695 					   cifs_sb->ctx->file_mode,
696 					   cifs_sb->ctx->dir_mode);
697 	if (cifs_sb->ctx->iocharset)
698 		seq_printf(s, ",iocharset=%s", cifs_sb->ctx->iocharset);
699 	if (tcon->ses->unicode == 0)
700 		seq_puts(s, ",nounicode");
701 	else if (tcon->ses->unicode == 1)
702 		seq_puts(s, ",unicode");
703 	if (tcon->seal)
704 		seq_puts(s, ",seal");
705 	else if (tcon->ses->server->ignore_signature)
706 		seq_puts(s, ",signloosely");
707 	if (tcon->nocase)
708 		seq_puts(s, ",nocase");
709 	if (tcon->nodelete)
710 		seq_puts(s, ",nodelete");
711 	if (cifs_sb->ctx->no_sparse)
712 		seq_puts(s, ",nosparse");
713 	if (tcon->local_lease)
714 		seq_puts(s, ",locallease");
715 	if (tcon->retry)
716 		seq_puts(s, ",hard");
717 	else
718 		seq_puts(s, ",soft");
719 	if (tcon->use_persistent)
720 		seq_puts(s, ",persistenthandles");
721 	else if (tcon->use_resilient)
722 		seq_puts(s, ",resilienthandles");
723 	if (tcon->posix_extensions)
724 		seq_puts(s, ",posix");
725 	else if (tcon->unix_ext)
726 		seq_puts(s, ",unix");
727 	else
728 		seq_puts(s, ",nounix");
729 	if (sbflags & CIFS_MOUNT_NO_DFS)
730 		seq_puts(s, ",nodfs");
731 	if (sbflags & CIFS_MOUNT_POSIX_PATHS)
732 		seq_puts(s, ",posixpaths");
733 	if (sbflags & CIFS_MOUNT_SET_UID)
734 		seq_puts(s, ",setuids");
735 	if (sbflags & CIFS_MOUNT_UID_FROM_ACL)
736 		seq_puts(s, ",idsfromsid");
737 	if (sbflags & CIFS_MOUNT_SERVER_INUM)
738 		seq_puts(s, ",serverino");
739 	if (sbflags & CIFS_MOUNT_RWPIDFORWARD)
740 		seq_puts(s, ",rwpidforward");
741 	if (sbflags & CIFS_MOUNT_NOPOSIXBRL)
742 		seq_puts(s, ",forcemand");
743 	if (sbflags & CIFS_MOUNT_NO_XATTR)
744 		seq_puts(s, ",nouser_xattr");
745 	if (sbflags & CIFS_MOUNT_MAP_SPECIAL_CHR)
746 		seq_puts(s, ",mapchars");
747 	if (sbflags & CIFS_MOUNT_MAP_SFM_CHR)
748 		seq_puts(s, ",mapposix");
749 	if (sbflags & CIFS_MOUNT_UNX_EMUL)
750 		seq_puts(s, ",sfu");
751 	if (sbflags & CIFS_MOUNT_NO_BRL)
752 		seq_puts(s, ",nobrl");
753 	if (sbflags & CIFS_MOUNT_NO_HANDLE_CACHE)
754 		seq_puts(s, ",nohandlecache");
755 	if (sbflags & CIFS_MOUNT_MODE_FROM_SID)
756 		seq_puts(s, ",modefromsid");
757 	if (sbflags & CIFS_MOUNT_CIFS_ACL)
758 		seq_puts(s, ",cifsacl");
759 	if (sbflags & CIFS_MOUNT_DYNPERM)
760 		seq_puts(s, ",dynperm");
761 	if (root->d_sb->s_flags & SB_POSIXACL)
762 		seq_puts(s, ",acl");
763 	if (sbflags & CIFS_MOUNT_MF_SYMLINKS)
764 		seq_puts(s, ",mfsymlinks");
765 	if (sbflags & CIFS_MOUNT_FSCACHE)
766 		seq_puts(s, ",fsc");
767 	if (sbflags & CIFS_MOUNT_NOSSYNC)
768 		seq_puts(s, ",nostrictsync");
769 	if (sbflags & CIFS_MOUNT_NO_PERM)
770 		seq_puts(s, ",noperm");
771 	if (sbflags & CIFS_MOUNT_CIFS_BACKUPUID)
772 		seq_printf(s, ",backupuid=%u",
773 			   from_kuid_munged(&init_user_ns,
774 					    cifs_sb->ctx->backupuid));
775 	if (sbflags & CIFS_MOUNT_CIFS_BACKUPGID)
776 		seq_printf(s, ",backupgid=%u",
777 			   from_kgid_munged(&init_user_ns,
778 					    cifs_sb->ctx->backupgid));
779 	seq_show_option(s, "reparse",
780 			cifs_reparse_type_str(cifs_sb->ctx->reparse_type));
781 	if (cifs_sb->ctx->nonativesocket)
782 		seq_puts(s, ",nonativesocket");
783 	else
784 		seq_puts(s, ",nativesocket");
785 	seq_show_option(s, "symlink",
786 			cifs_symlink_type_str(cifs_symlink_type(cifs_sb)));
787 
788 	seq_printf(s, ",rsize=%u", cifs_sb->ctx->rsize);
789 	seq_printf(s, ",wsize=%u", cifs_sb->ctx->wsize);
790 	seq_printf(s, ",bsize=%u", cifs_sb->ctx->bsize);
791 	if (cifs_sb->ctx->rasize)
792 		seq_printf(s, ",rasize=%u", cifs_sb->ctx->rasize);
793 	if (tcon->ses->server->min_offload)
794 		seq_printf(s, ",esize=%u", tcon->ses->server->min_offload);
795 	if (tcon->ses->server->retrans)
796 		seq_printf(s, ",retrans=%u", tcon->ses->server->retrans);
797 	seq_printf(s, ",echo_interval=%lu",
798 			tcon->ses->server->echo_interval / HZ);
799 
800 	/* Only display the following if overridden on mount */
801 	if (tcon->ses->server->max_credits != SMB2_MAX_CREDITS_AVAILABLE)
802 		seq_printf(s, ",max_credits=%u", tcon->ses->server->max_credits);
803 	if (tcon->ses->server->tcp_nodelay)
804 		seq_puts(s, ",tcpnodelay");
805 	if (tcon->ses->server->noautotune)
806 		seq_puts(s, ",noautotune");
807 	if (tcon->ses->server->noblocksnd)
808 		seq_puts(s, ",noblocksend");
809 	if (tcon->ses->server->nosharesock)
810 		seq_puts(s, ",nosharesock");
811 
812 	if (tcon->snapshot_time)
813 		seq_printf(s, ",snapshot=%llu", tcon->snapshot_time);
814 	if (tcon->handle_timeout)
815 		seq_printf(s, ",handletimeout=%u", tcon->handle_timeout);
816 	if (tcon->max_cached_dirs != MAX_CACHED_FIDS)
817 		seq_printf(s, ",max_cached_dirs=%u", tcon->max_cached_dirs);
818 
819 	/*
820 	 * Display file and directory attribute timeout in seconds.
821 	 * If file and directory attribute timeout the same then actimeo
822 	 * was likely specified on mount
823 	 */
824 	if (cifs_sb->ctx->acdirmax == cifs_sb->ctx->acregmax)
825 		seq_printf(s, ",actimeo=%lu", cifs_sb->ctx->acregmax / HZ);
826 	else {
827 		seq_printf(s, ",acdirmax=%lu", cifs_sb->ctx->acdirmax / HZ);
828 		seq_printf(s, ",acregmax=%lu", cifs_sb->ctx->acregmax / HZ);
829 	}
830 	seq_printf(s, ",closetimeo=%lu", cifs_sb->ctx->closetimeo / HZ);
831 
832 	if (tcon->ses->chan_max > 1)
833 		seq_printf(s, ",multichannel,max_channels=%zu",
834 			   tcon->ses->chan_max);
835 
836 	if (tcon->use_witness)
837 		seq_puts(s, ",witness");
838 
839 	return 0;
840 }
841 
cifs_umount_begin(struct super_block * sb)842 static void cifs_umount_begin(struct super_block *sb)
843 {
844 	struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
845 	struct cifs_tcon *tcon;
846 
847 	if (cifs_sb == NULL)
848 		return;
849 
850 	tcon = cifs_sb_master_tcon(cifs_sb);
851 
852 	spin_lock(&cifs_tcp_ses_lock);
853 	spin_lock(&tcon->tc_lock);
854 	trace_smb3_tcon_ref(tcon->debug_id, tcon->tc_count,
855 			    netfs_trace_tcon_ref_see_umount);
856 	if ((tcon->tc_count > 1) || (tcon->status == TID_EXITING)) {
857 		/* we have other mounts to same share or we have
858 		   already tried to umount this and woken up
859 		   all waiting network requests, nothing to do */
860 		spin_unlock(&tcon->tc_lock);
861 		spin_unlock(&cifs_tcp_ses_lock);
862 		return;
863 	}
864 	/*
865 	 * can not set tcon->status to TID_EXITING yet since we don't know if umount -f will
866 	 * fail later (e.g. due to open files).  TID_EXITING will be set just before tdis req sent
867 	 */
868 	spin_unlock(&tcon->tc_lock);
869 	spin_unlock(&cifs_tcp_ses_lock);
870 
871 	cifs_close_all_deferred_files(tcon);
872 	/* cancel_brl_requests(tcon); */ /* BB mark all brl mids as exiting */
873 	/* cancel_notify_requests(tcon); */
874 	if (tcon->ses && tcon->ses->server) {
875 		cifs_dbg(FYI, "wake up tasks now - umount begin not complete\n");
876 		wake_up_all(&tcon->ses->server->request_q);
877 		wake_up_all(&tcon->ses->server->response_q);
878 		msleep(1); /* yield */
879 		/* we have to kick the requests once more */
880 		wake_up_all(&tcon->ses->server->response_q);
881 		msleep(1);
882 	}
883 
884 	return;
885 }
886 
cifs_freeze(struct super_block * sb)887 static int cifs_freeze(struct super_block *sb)
888 {
889 	struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
890 	struct cifs_tcon *tcon;
891 
892 	if (cifs_sb == NULL)
893 		return 0;
894 
895 	tcon = cifs_sb_master_tcon(cifs_sb);
896 
897 	cifs_close_all_deferred_files(tcon);
898 	return 0;
899 }
900 
901 #ifdef CONFIG_CIFS_STATS2
cifs_show_stats(struct seq_file * s,struct dentry * root)902 static int cifs_show_stats(struct seq_file *s, struct dentry *root)
903 {
904 	/* BB FIXME */
905 	return 0;
906 }
907 #endif
908 
cifs_write_inode(struct inode * inode,struct writeback_control * wbc)909 static int cifs_write_inode(struct inode *inode, struct writeback_control *wbc)
910 {
911 	return netfs_unpin_writeback(inode, wbc);
912 }
913 
cifs_drop_inode(struct inode * inode)914 static int cifs_drop_inode(struct inode *inode)
915 {
916 	unsigned int sbflags = cifs_sb_flags(CIFS_SB(inode));
917 
918 	/* no serverino => unconditional eviction */
919 	return !(sbflags & CIFS_MOUNT_SERVER_INUM) ||
920 		inode_generic_drop(inode);
921 }
922 
923 static const struct super_operations cifs_super_ops = {
924 	.statfs = cifs_statfs,
925 	.alloc_inode = cifs_alloc_inode,
926 	.write_inode	= cifs_write_inode,
927 	.free_inode = cifs_free_inode,
928 	.drop_inode	= cifs_drop_inode,
929 	.evict_inode	= cifs_evict_inode,
930 /*	.show_path	= cifs_show_path, */ /* Would we ever need show path? */
931 	.show_devname   = cifs_show_devname,
932 /*	.delete_inode	= cifs_delete_inode,  */  /* Do not need above
933 	function unless later we add lazy close of inodes or unless the
934 	kernel forgets to call us with the same number of releases (closes)
935 	as opens */
936 	.show_options = cifs_show_options,
937 	.umount_begin   = cifs_umount_begin,
938 	.freeze_fs      = cifs_freeze,
939 #ifdef CONFIG_CIFS_STATS2
940 	.show_stats = cifs_show_stats,
941 #endif
942 };
943 
944 /*
945  * Get root dentry from superblock according to prefix path mount option.
946  * Return dentry with refcount + 1 on success and NULL otherwise.
947  */
948 static struct dentry *
cifs_get_root(struct smb3_fs_context * ctx,struct super_block * sb)949 cifs_get_root(struct smb3_fs_context *ctx, struct super_block *sb)
950 {
951 	struct dentry *dentry;
952 	struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
953 	char *full_path = NULL;
954 	char *s, *p;
955 	char sep;
956 
957 	if (cifs_sb_flags(cifs_sb) & CIFS_MOUNT_USE_PREFIX_PATH)
958 		return dget(sb->s_root);
959 
960 	full_path = cifs_build_path_to_root(ctx, cifs_sb,
961 				cifs_sb_master_tcon(cifs_sb), 0);
962 	if (full_path == NULL)
963 		return ERR_PTR(-ENOMEM);
964 
965 	cifs_dbg(FYI, "Get root dentry for %s\n", full_path);
966 
967 	sep = CIFS_DIR_SEP(cifs_sb);
968 	dentry = dget(sb->s_root);
969 	s = full_path;
970 
971 	do {
972 		struct inode *dir = d_inode(dentry);
973 		struct dentry *child;
974 
975 		if (!S_ISDIR(dir->i_mode)) {
976 			dput(dentry);
977 			dentry = ERR_PTR(-ENOTDIR);
978 			break;
979 		}
980 
981 		/* skip separators */
982 		while (*s == sep)
983 			s++;
984 		if (!*s)
985 			break;
986 		p = s++;
987 		/* next separator */
988 		while (*s && *s != sep)
989 			s++;
990 
991 		child = lookup_noperm_positive_unlocked(&QSTR_LEN(p, s - p),
992 							dentry);
993 		dput(dentry);
994 		dentry = child;
995 	} while (!IS_ERR(dentry));
996 	kfree(full_path);
997 	return dentry;
998 }
999 
cifs_set_super(struct super_block * sb,void * data)1000 static int cifs_set_super(struct super_block *sb, void *data)
1001 {
1002 	struct cifs_mnt_data *mnt_data = data;
1003 	sb->s_fs_info = mnt_data->cifs_sb;
1004 	return set_anon_super(sb, NULL);
1005 }
1006 
1007 struct dentry *
cifs_smb3_do_mount(struct file_system_type * fs_type,int flags,struct smb3_fs_context * old_ctx)1008 cifs_smb3_do_mount(struct file_system_type *fs_type,
1009 	      int flags, struct smb3_fs_context *old_ctx)
1010 {
1011 	struct cifs_mnt_data mnt_data;
1012 	struct cifs_sb_info *cifs_sb;
1013 	struct super_block *sb;
1014 	struct dentry *root;
1015 	int rc;
1016 
1017 	if (cifsFYI) {
1018 		cifs_dbg(FYI, "%s: devname=%s flags=0x%x\n", __func__,
1019 			 old_ctx->source, flags);
1020 	} else {
1021 		cifs_info("Attempting to mount %s\n", old_ctx->source);
1022 	}
1023 	cifs_sb = kzalloc_obj(*cifs_sb);
1024 	if (!cifs_sb)
1025 		return ERR_PTR(-ENOMEM);
1026 
1027 	cifs_sb->ctx = kzalloc_obj(struct smb3_fs_context);
1028 	if (!cifs_sb->ctx) {
1029 		root = ERR_PTR(-ENOMEM);
1030 		goto out;
1031 	}
1032 	rc = smb3_fs_context_dup(cifs_sb->ctx, old_ctx);
1033 	if (rc) {
1034 		root = ERR_PTR(rc);
1035 		goto out;
1036 	}
1037 
1038 	rc = cifs_setup_cifs_sb(cifs_sb);
1039 	if (rc) {
1040 		root = ERR_PTR(rc);
1041 		goto out;
1042 	}
1043 
1044 	rc = cifs_mount(cifs_sb, cifs_sb->ctx);
1045 	if (rc) {
1046 		if (!(flags & SB_SILENT))
1047 			cifs_dbg(VFS, "cifs_mount failed w/return code = %d\n",
1048 				 rc);
1049 		root = ERR_PTR(rc);
1050 		goto out;
1051 	}
1052 
1053 	mnt_data.ctx = cifs_sb->ctx;
1054 	mnt_data.cifs_sb = cifs_sb;
1055 	mnt_data.flags = flags;
1056 
1057 	/* BB should we make this contingent on mount parm? */
1058 	flags |= SB_NODIRATIME | SB_NOATIME;
1059 
1060 	sb = sget(fs_type, cifs_match_super, cifs_set_super, flags, &mnt_data);
1061 	if (IS_ERR(sb)) {
1062 		cifs_umount(cifs_sb);
1063 		return ERR_CAST(sb);
1064 	}
1065 
1066 	if (sb->s_root) {
1067 		cifs_dbg(FYI, "Use existing superblock\n");
1068 		cifs_umount(cifs_sb);
1069 		cifs_sb = NULL;
1070 	} else {
1071 		rc = cifs_read_super(sb);
1072 		if (rc) {
1073 			root = ERR_PTR(rc);
1074 			goto out_super;
1075 		}
1076 
1077 		sb->s_flags |= SB_ACTIVE;
1078 	}
1079 
1080 	root = cifs_get_root(cifs_sb ? cifs_sb->ctx : old_ctx, sb);
1081 	if (IS_ERR(root))
1082 		goto out_super;
1083 
1084 	if (cifs_sb)
1085 		cifs_sb->root = dget(root);
1086 
1087 	cifs_dbg(FYI, "dentry root is: %p\n", root);
1088 	return root;
1089 
1090 out_super:
1091 	deactivate_locked_super(sb);
1092 	return root;
1093 out:
1094 	kfree(cifs_sb->prepath);
1095 	smb3_cleanup_fs_context(cifs_sb->ctx);
1096 	kfree(cifs_sb);
1097 	return root;
1098 }
1099 
cifs_llseek(struct file * file,loff_t offset,int whence)1100 static loff_t cifs_llseek(struct file *file, loff_t offset, int whence)
1101 {
1102 	struct cifsFileInfo *cfile = file->private_data;
1103 	struct cifs_tcon *tcon;
1104 
1105 	/*
1106 	 * whence == SEEK_END || SEEK_DATA || SEEK_HOLE => we must revalidate
1107 	 * the cached file length
1108 	 */
1109 	if (whence != SEEK_SET && whence != SEEK_CUR) {
1110 		int rc;
1111 		struct inode *inode = file_inode(file);
1112 
1113 		/*
1114 		 * We need to be sure that all dirty pages are written and the
1115 		 * server has the newest file length.
1116 		 */
1117 		if (!CIFS_CACHE_READ(CIFS_I(inode)) && inode->i_mapping &&
1118 		    inode->i_mapping->nrpages != 0) {
1119 			rc = filemap_fdatawait(inode->i_mapping);
1120 			if (rc) {
1121 				mapping_set_error(inode->i_mapping, rc);
1122 				return rc;
1123 			}
1124 		}
1125 		/*
1126 		 * Some applications poll for the file length in this strange
1127 		 * way so we must seek to end on non-oplocked files by
1128 		 * setting the revalidate time to zero.
1129 		 */
1130 		CIFS_I(inode)->time = 0;
1131 
1132 		rc = cifs_revalidate_file_attr(file);
1133 		if (rc < 0)
1134 			return (loff_t)rc;
1135 	}
1136 	if (cfile && cfile->tlink) {
1137 		tcon = tlink_tcon(cfile->tlink);
1138 		if (tcon->ses->server->ops->llseek)
1139 			return tcon->ses->server->ops->llseek(file, tcon,
1140 							      offset, whence);
1141 	}
1142 	return generic_file_llseek(file, offset, whence);
1143 }
1144 
1145 static int
cifs_setlease(struct file * file,int arg,struct file_lease ** lease,void ** priv)1146 cifs_setlease(struct file *file, int arg, struct file_lease **lease, void **priv)
1147 {
1148 	/*
1149 	 * Note that this is called by vfs setlease with i_lock held to
1150 	 * protect *lease from going away.
1151 	 */
1152 	struct inode *inode = file_inode(file);
1153 	struct cifsFileInfo *cfile = file->private_data;
1154 
1155 	/* Check if file is oplocked if this is request for new lease */
1156 	if (arg == F_UNLCK ||
1157 	    ((arg == F_RDLCK) && CIFS_CACHE_READ(CIFS_I(inode))) ||
1158 	    ((arg == F_WRLCK) && CIFS_CACHE_WRITE(CIFS_I(inode))))
1159 		return generic_setlease(file, arg, lease, priv);
1160 	else if (tlink_tcon(cfile->tlink)->local_lease &&
1161 		 !CIFS_CACHE_READ(CIFS_I(inode)))
1162 		/*
1163 		 * If the server claims to support oplock on this file, then we
1164 		 * still need to check oplock even if the local_lease mount
1165 		 * option is set, but there are servers which do not support
1166 		 * oplock for which this mount option may be useful if the user
1167 		 * knows that the file won't be changed on the server by anyone
1168 		 * else.
1169 		 */
1170 		return generic_setlease(file, arg, lease, priv);
1171 	else
1172 		return -EAGAIN;
1173 }
1174 
1175 struct file_system_type cifs_fs_type = {
1176 	.owner = THIS_MODULE,
1177 	.name = "cifs",
1178 	.init_fs_context = smb3_init_fs_context,
1179 	.parameters = smb3_fs_parameters,
1180 	.kill_sb = cifs_kill_sb,
1181 	.fs_flags = FS_RENAME_DOES_D_MOVE,
1182 };
1183 MODULE_ALIAS_FS("cifs");
1184 
1185 struct file_system_type smb3_fs_type = {
1186 	.owner = THIS_MODULE,
1187 	.name = "smb3",
1188 	.init_fs_context = smb3_init_fs_context,
1189 	.parameters = smb3_fs_parameters,
1190 	.kill_sb = cifs_kill_sb,
1191 	.fs_flags = FS_RENAME_DOES_D_MOVE,
1192 };
1193 MODULE_ALIAS_FS("smb3");
1194 MODULE_ALIAS("smb3");
1195 
1196 const struct inode_operations cifs_dir_inode_ops = {
1197 	.create = cifs_create,
1198 	.atomic_open = cifs_atomic_open,
1199 	.lookup = cifs_lookup,
1200 	.getattr = cifs_getattr,
1201 	.unlink = cifs_unlink,
1202 	.link = cifs_hardlink,
1203 	.mkdir = cifs_mkdir,
1204 	.rmdir = cifs_rmdir,
1205 	.rename = cifs_rename2,
1206 	.permission = cifs_permission,
1207 	.setattr = cifs_setattr,
1208 	.symlink = cifs_symlink,
1209 	.mknod   = cifs_mknod,
1210 	.listxattr = cifs_listxattr,
1211 	.get_acl = cifs_get_acl,
1212 	.set_acl = cifs_set_acl,
1213 };
1214 
1215 const struct inode_operations cifs_file_inode_ops = {
1216 	.setattr = cifs_setattr,
1217 	.getattr = cifs_getattr,
1218 	.permission = cifs_permission,
1219 	.listxattr = cifs_listxattr,
1220 	.fiemap = cifs_fiemap,
1221 	.get_acl = cifs_get_acl,
1222 	.set_acl = cifs_set_acl,
1223 };
1224 
cifs_get_link(struct dentry * dentry,struct inode * inode,struct delayed_call * done)1225 const char *cifs_get_link(struct dentry *dentry, struct inode *inode,
1226 			    struct delayed_call *done)
1227 {
1228 	char *target_path;
1229 
1230 	if (!dentry)
1231 		return ERR_PTR(-ECHILD);
1232 
1233 	target_path = kmalloc(PATH_MAX, GFP_KERNEL);
1234 	if (!target_path)
1235 		return ERR_PTR(-ENOMEM);
1236 
1237 	spin_lock(&inode->i_lock);
1238 	if (likely(CIFS_I(inode)->symlink_target)) {
1239 		strscpy(target_path, CIFS_I(inode)->symlink_target, PATH_MAX);
1240 	} else {
1241 		kfree(target_path);
1242 		target_path = ERR_PTR(-EOPNOTSUPP);
1243 	}
1244 	spin_unlock(&inode->i_lock);
1245 
1246 	if (!IS_ERR(target_path))
1247 		set_delayed_call(done, kfree_link, target_path);
1248 
1249 	return target_path;
1250 }
1251 
1252 const struct inode_operations cifs_symlink_inode_ops = {
1253 	.get_link = cifs_get_link,
1254 	.setattr = cifs_setattr,
1255 	.permission = cifs_permission,
1256 	.listxattr = cifs_listxattr,
1257 };
1258 
1259 /*
1260  * Advance the EOF marker to after the source range.
1261  */
cifs_precopy_set_eof(struct inode * src_inode,struct cifsInodeInfo * src_cifsi,struct cifs_tcon * src_tcon,unsigned int xid,loff_t src_end)1262 static int cifs_precopy_set_eof(struct inode *src_inode, struct cifsInodeInfo *src_cifsi,
1263 				struct cifs_tcon *src_tcon,
1264 				unsigned int xid, loff_t src_end)
1265 {
1266 	struct cifsFileInfo *writeable_srcfile;
1267 	int rc = -EINVAL;
1268 
1269 	writeable_srcfile = find_writable_file(src_cifsi, FIND_WR_FSUID_ONLY);
1270 	if (writeable_srcfile) {
1271 		if (src_tcon->ses->server->ops->set_file_size)
1272 			rc = src_tcon->ses->server->ops->set_file_size(
1273 				xid, src_tcon, writeable_srcfile,
1274 				src_inode->i_size, true /* no need to set sparse */);
1275 		else
1276 			rc = -ENOSYS;
1277 		cifsFileInfo_put(writeable_srcfile);
1278 		cifs_dbg(FYI, "SetFSize for copychunk rc = %d\n", rc);
1279 	}
1280 
1281 	if (rc < 0)
1282 		goto set_failed;
1283 
1284 	netfs_resize_file(&src_cifsi->netfs, src_end, true);
1285 	fscache_resize_cookie(cifs_inode_cookie(src_inode), src_end);
1286 	return 0;
1287 
1288 set_failed:
1289 	return filemap_write_and_wait(src_inode->i_mapping);
1290 }
1291 
1292 /*
1293  * Flush out either the folio that overlaps the beginning of a range in which
1294  * pos resides or the folio that overlaps the end of a range unless that folio
1295  * is entirely within the range we're going to invalidate.  We extend the flush
1296  * bounds to encompass the folio.
1297  */
cifs_flush_folio(struct inode * inode,loff_t pos,loff_t * _fstart,loff_t * _fend,bool first)1298 static int cifs_flush_folio(struct inode *inode, loff_t pos, loff_t *_fstart, loff_t *_fend,
1299 			    bool first)
1300 {
1301 	struct folio *folio;
1302 	unsigned long long fpos, fend;
1303 	pgoff_t index = pos / PAGE_SIZE;
1304 	size_t size;
1305 	int rc = 0;
1306 
1307 	folio = filemap_get_folio(inode->i_mapping, index);
1308 	if (IS_ERR(folio))
1309 		return 0;
1310 
1311 	size = folio_size(folio);
1312 	fpos = folio_pos(folio);
1313 	fend = fpos + size - 1;
1314 	*_fstart = min_t(unsigned long long, *_fstart, fpos);
1315 	*_fend   = max_t(unsigned long long, *_fend, fend);
1316 	if ((first && pos == fpos) || (!first && pos == fend))
1317 		goto out;
1318 
1319 	rc = filemap_write_and_wait_range(inode->i_mapping, fpos, fend);
1320 out:
1321 	folio_put(folio);
1322 	return rc;
1323 }
1324 
cifs_remap_file_range(struct file * src_file,loff_t off,struct file * dst_file,loff_t destoff,loff_t len,unsigned int remap_flags)1325 static loff_t cifs_remap_file_range(struct file *src_file, loff_t off,
1326 		struct file *dst_file, loff_t destoff, loff_t len,
1327 		unsigned int remap_flags)
1328 {
1329 	struct inode *src_inode = file_inode(src_file);
1330 	struct inode *target_inode = file_inode(dst_file);
1331 	struct cifsInodeInfo *src_cifsi = CIFS_I(src_inode);
1332 	struct cifsInodeInfo *target_cifsi = CIFS_I(target_inode);
1333 	struct cifsFileInfo *smb_file_src = src_file->private_data;
1334 	struct cifsFileInfo *smb_file_target = dst_file->private_data;
1335 	struct cifs_tcon *target_tcon, *src_tcon;
1336 	unsigned long long destend, fstart, fend, old_size, new_size;
1337 	unsigned int xid;
1338 	int rc;
1339 
1340 	if (remap_flags & REMAP_FILE_DEDUP)
1341 		return -EOPNOTSUPP;
1342 	if (remap_flags & ~REMAP_FILE_ADVISORY)
1343 		return -EINVAL;
1344 
1345 	cifs_dbg(FYI, "clone range\n");
1346 
1347 	xid = get_xid();
1348 
1349 	if (!smb_file_src || !smb_file_target) {
1350 		rc = -EBADF;
1351 		cifs_dbg(VFS, "missing cifsFileInfo on copy range src file\n");
1352 		goto out;
1353 	}
1354 
1355 	src_tcon = tlink_tcon(smb_file_src->tlink);
1356 	target_tcon = tlink_tcon(smb_file_target->tlink);
1357 
1358 	/*
1359 	 * Note: cifs case is easier than btrfs since server responsible for
1360 	 * checks for proper open modes and file type and if it wants
1361 	 * server could even support copy of range where source = target
1362 	 */
1363 	lock_two_nondirectories(target_inode, src_inode);
1364 
1365 	if (len == 0)
1366 		len = src_inode->i_size - off;
1367 
1368 	cifs_dbg(FYI, "clone range\n");
1369 
1370 	/* Flush the source buffer */
1371 	rc = filemap_write_and_wait_range(src_inode->i_mapping, off,
1372 					  off + len - 1);
1373 	if (rc)
1374 		goto unlock;
1375 
1376 	/* The server-side copy will fail if the source crosses the EOF marker.
1377 	 * Advance the EOF marker after the flush above to the end of the range
1378 	 * if it's short of that.
1379 	 */
1380 	if (src_cifsi->netfs.remote_i_size < off + len) {
1381 		rc = cifs_precopy_set_eof(src_inode, src_cifsi, src_tcon, xid, off + len);
1382 		if (rc < 0)
1383 			goto unlock;
1384 	}
1385 
1386 	new_size = destoff + len;
1387 	destend = destoff + len - 1;
1388 
1389 	/* Flush the folios at either end of the destination range to prevent
1390 	 * accidental loss of dirty data outside of the range.
1391 	 */
1392 	fstart = destoff;
1393 	fend = destend;
1394 
1395 	rc = cifs_flush_folio(target_inode, destoff, &fstart, &fend, true);
1396 	if (rc)
1397 		goto unlock;
1398 	rc = cifs_flush_folio(target_inode, destend, &fstart, &fend, false);
1399 	if (rc)
1400 		goto unlock;
1401 	if (fend > target_cifsi->netfs.zero_point)
1402 		target_cifsi->netfs.zero_point = fend + 1;
1403 	old_size = target_cifsi->netfs.remote_i_size;
1404 
1405 	/* Discard all the folios that overlap the destination region. */
1406 	cifs_dbg(FYI, "about to discard pages %llx-%llx\n", fstart, fend);
1407 	truncate_inode_pages_range(&target_inode->i_data, fstart, fend);
1408 
1409 	fscache_invalidate(cifs_inode_cookie(target_inode), NULL,
1410 			   i_size_read(target_inode), 0);
1411 
1412 	rc = -EOPNOTSUPP;
1413 	if (target_tcon->ses->server->ops->duplicate_extents) {
1414 		rc = target_tcon->ses->server->ops->duplicate_extents(xid,
1415 			smb_file_src, smb_file_target, off, len, destoff);
1416 		if (rc == 0 && new_size > old_size) {
1417 			truncate_setsize(target_inode, new_size);
1418 			fscache_resize_cookie(cifs_inode_cookie(target_inode),
1419 					      new_size);
1420 		} else if (rc == -EOPNOTSUPP) {
1421 			/*
1422 			 * copy_file_range syscall man page indicates EINVAL
1423 			 * is returned e.g when "fd_in and fd_out refer to the
1424 			 * same file and the source and target ranges overlap."
1425 			 * Test generic/157 was what showed these cases where
1426 			 * we need to remap EOPNOTSUPP to EINVAL
1427 			 */
1428 			if (off >= src_inode->i_size) {
1429 				rc = -EINVAL;
1430 			} else if (src_inode == target_inode) {
1431 				if (off + len > destoff)
1432 					rc = -EINVAL;
1433 			}
1434 		}
1435 		if (rc == 0 && new_size > target_cifsi->netfs.zero_point)
1436 			target_cifsi->netfs.zero_point = new_size;
1437 	}
1438 
1439 	/* force revalidate of size and timestamps of target file now
1440 	   that target is updated on the server */
1441 	CIFS_I(target_inode)->time = 0;
1442 unlock:
1443 	/* although unlocking in the reverse order from locking is not
1444 	   strictly necessary here it is a little cleaner to be consistent */
1445 	unlock_two_nondirectories(src_inode, target_inode);
1446 out:
1447 	free_xid(xid);
1448 	return rc < 0 ? rc : len;
1449 }
1450 
cifs_file_copychunk_range(unsigned int xid,struct file * src_file,loff_t off,struct file * dst_file,loff_t destoff,size_t len,unsigned int flags)1451 ssize_t cifs_file_copychunk_range(unsigned int xid,
1452 				struct file *src_file, loff_t off,
1453 				struct file *dst_file, loff_t destoff,
1454 				size_t len, unsigned int flags)
1455 {
1456 	struct inode *src_inode = file_inode(src_file);
1457 	struct inode *target_inode = file_inode(dst_file);
1458 	struct cifsInodeInfo *src_cifsi = CIFS_I(src_inode);
1459 	struct cifsInodeInfo *target_cifsi = CIFS_I(target_inode);
1460 	struct cifsFileInfo *smb_file_src;
1461 	struct cifsFileInfo *smb_file_target;
1462 	struct cifs_tcon *src_tcon;
1463 	struct cifs_tcon *target_tcon;
1464 	ssize_t rc;
1465 
1466 	cifs_dbg(FYI, "copychunk range\n");
1467 
1468 	if (!src_file->private_data || !dst_file->private_data) {
1469 		rc = -EBADF;
1470 		cifs_dbg(VFS, "missing cifsFileInfo on copy range src file\n");
1471 		goto out;
1472 	}
1473 
1474 	rc = -EXDEV;
1475 	smb_file_target = dst_file->private_data;
1476 	smb_file_src = src_file->private_data;
1477 	src_tcon = tlink_tcon(smb_file_src->tlink);
1478 	target_tcon = tlink_tcon(smb_file_target->tlink);
1479 
1480 	if (src_tcon->ses != target_tcon->ses) {
1481 		cifs_dbg(FYI, "source and target of copy not on same server\n");
1482 		goto out;
1483 	}
1484 
1485 	rc = -EOPNOTSUPP;
1486 	if (!target_tcon->ses->server->ops->copychunk_range)
1487 		goto out;
1488 
1489 	/*
1490 	 * Note: cifs case is easier than btrfs since server responsible for
1491 	 * checks for proper open modes and file type and if it wants
1492 	 * server could even support copy of range where source = target
1493 	 */
1494 	lock_two_nondirectories(target_inode, src_inode);
1495 
1496 	cifs_dbg(FYI, "about to flush pages\n");
1497 
1498 	rc = filemap_write_and_wait_range(src_inode->i_mapping, off,
1499 					  off + len - 1);
1500 	if (rc)
1501 		goto unlock;
1502 
1503 	/* The server-side copy will fail if the source crosses the EOF marker.
1504 	 * Advance the EOF marker after the flush above to the end of the range
1505 	 * if it's short of that.
1506 	 */
1507 	if (src_cifsi->netfs.remote_i_size < off + len) {
1508 		rc = cifs_precopy_set_eof(src_inode, src_cifsi, src_tcon, xid, off + len);
1509 		if (rc < 0)
1510 			goto unlock;
1511 	}
1512 
1513 	/* Flush and invalidate all the folios in the destination region.  If
1514 	 * the copy was successful, then some of the flush is extra overhead,
1515 	 * but we need to allow for the copy failing in some way (eg. ENOSPC).
1516 	 */
1517 	rc = filemap_invalidate_inode(target_inode, true, destoff, destoff + len - 1);
1518 	if (rc)
1519 		goto unlock;
1520 
1521 	fscache_invalidate(cifs_inode_cookie(target_inode), NULL,
1522 			   i_size_read(target_inode), 0);
1523 
1524 	rc = file_modified(dst_file);
1525 	if (!rc) {
1526 		rc = target_tcon->ses->server->ops->copychunk_range(xid,
1527 			smb_file_src, smb_file_target, off, len, destoff);
1528 		if (rc > 0 && destoff + rc > i_size_read(target_inode)) {
1529 			truncate_setsize(target_inode, destoff + rc);
1530 			netfs_resize_file(&target_cifsi->netfs,
1531 					  i_size_read(target_inode), true);
1532 			fscache_resize_cookie(cifs_inode_cookie(target_inode),
1533 					      i_size_read(target_inode));
1534 		}
1535 		if (rc > 0 && destoff + rc > target_cifsi->netfs.zero_point)
1536 			target_cifsi->netfs.zero_point = destoff + rc;
1537 	}
1538 
1539 	file_accessed(src_file);
1540 
1541 	/* force revalidate of size and timestamps of target file now
1542 	 * that target is updated on the server
1543 	 */
1544 	CIFS_I(target_inode)->time = 0;
1545 
1546 unlock:
1547 	/* although unlocking in the reverse order from locking is not
1548 	 * strictly necessary here it is a little cleaner to be consistent
1549 	 */
1550 	unlock_two_nondirectories(src_inode, target_inode);
1551 
1552 out:
1553 	return rc;
1554 }
1555 
1556 /*
1557  * Directory operations under CIFS/SMB2/SMB3 are synchronous, so fsync()
1558  * is a dummy operation.
1559  */
cifs_dir_fsync(struct file * file,loff_t start,loff_t end,int datasync)1560 static int cifs_dir_fsync(struct file *file, loff_t start, loff_t end, int datasync)
1561 {
1562 	cifs_dbg(FYI, "Sync directory - name: %pD datasync: 0x%x\n",
1563 		 file, datasync);
1564 
1565 	return 0;
1566 }
1567 
cifs_copy_file_range(struct file * src_file,loff_t off,struct file * dst_file,loff_t destoff,size_t len,unsigned int flags)1568 static ssize_t cifs_copy_file_range(struct file *src_file, loff_t off,
1569 				struct file *dst_file, loff_t destoff,
1570 				size_t len, unsigned int flags)
1571 {
1572 	unsigned int xid = get_xid();
1573 	ssize_t rc;
1574 	struct cifsFileInfo *cfile = dst_file->private_data;
1575 
1576 	if (cfile->swapfile) {
1577 		rc = -EOPNOTSUPP;
1578 		free_xid(xid);
1579 		return rc;
1580 	}
1581 
1582 	rc = cifs_file_copychunk_range(xid, src_file, off, dst_file, destoff,
1583 					len, flags);
1584 	free_xid(xid);
1585 
1586 	if (rc == -EOPNOTSUPP || rc == -EXDEV)
1587 		rc = splice_copy_file_range(src_file, off, dst_file,
1588 					    destoff, len);
1589 	return rc;
1590 }
1591 
1592 const struct file_operations cifs_file_ops = {
1593 	.read_iter = cifs_loose_read_iter,
1594 	.write_iter = cifs_file_write_iter,
1595 	.open = cifs_open,
1596 	.release = cifs_close,
1597 	.lock = cifs_lock,
1598 	.flock = cifs_flock,
1599 	.fsync = cifs_fsync,
1600 	.flush = cifs_flush,
1601 	.mmap_prepare = cifs_file_mmap_prepare,
1602 	.splice_read = filemap_splice_read,
1603 	.splice_write = iter_file_splice_write,
1604 	.llseek = cifs_llseek,
1605 	.unlocked_ioctl	= cifs_ioctl,
1606 	.copy_file_range = cifs_copy_file_range,
1607 	.remap_file_range = cifs_remap_file_range,
1608 	.setlease = cifs_setlease,
1609 	.fallocate = cifs_fallocate,
1610 };
1611 
1612 const struct file_operations cifs_file_strict_ops = {
1613 	.read_iter = cifs_strict_readv,
1614 	.write_iter = cifs_strict_writev,
1615 	.open = cifs_open,
1616 	.release = cifs_close,
1617 	.lock = cifs_lock,
1618 	.flock = cifs_flock,
1619 	.fsync = cifs_strict_fsync,
1620 	.flush = cifs_flush,
1621 	.mmap_prepare = cifs_file_strict_mmap_prepare,
1622 	.splice_read = filemap_splice_read,
1623 	.splice_write = iter_file_splice_write,
1624 	.llseek = cifs_llseek,
1625 	.unlocked_ioctl	= cifs_ioctl,
1626 	.copy_file_range = cifs_copy_file_range,
1627 	.remap_file_range = cifs_remap_file_range,
1628 	.setlease = cifs_setlease,
1629 	.fallocate = cifs_fallocate,
1630 };
1631 
1632 const struct file_operations cifs_file_direct_ops = {
1633 	.read_iter = netfs_unbuffered_read_iter,
1634 	.write_iter = netfs_file_write_iter,
1635 	.open = cifs_open,
1636 	.release = cifs_close,
1637 	.lock = cifs_lock,
1638 	.flock = cifs_flock,
1639 	.fsync = cifs_fsync,
1640 	.flush = cifs_flush,
1641 	.mmap_prepare = cifs_file_mmap_prepare,
1642 	.splice_read = copy_splice_read,
1643 	.splice_write = iter_file_splice_write,
1644 	.unlocked_ioctl  = cifs_ioctl,
1645 	.copy_file_range = cifs_copy_file_range,
1646 	.remap_file_range = cifs_remap_file_range,
1647 	.llseek = cifs_llseek,
1648 	.setlease = cifs_setlease,
1649 	.fallocate = cifs_fallocate,
1650 };
1651 
1652 const struct file_operations cifs_file_nobrl_ops = {
1653 	.read_iter = cifs_loose_read_iter,
1654 	.write_iter = cifs_file_write_iter,
1655 	.open = cifs_open,
1656 	.release = cifs_close,
1657 	.fsync = cifs_fsync,
1658 	.flush = cifs_flush,
1659 	.mmap_prepare = cifs_file_mmap_prepare,
1660 	.splice_read = filemap_splice_read,
1661 	.splice_write = iter_file_splice_write,
1662 	.llseek = cifs_llseek,
1663 	.unlocked_ioctl	= cifs_ioctl,
1664 	.copy_file_range = cifs_copy_file_range,
1665 	.remap_file_range = cifs_remap_file_range,
1666 	.setlease = cifs_setlease,
1667 	.fallocate = cifs_fallocate,
1668 };
1669 
1670 const struct file_operations cifs_file_strict_nobrl_ops = {
1671 	.read_iter = cifs_strict_readv,
1672 	.write_iter = cifs_strict_writev,
1673 	.open = cifs_open,
1674 	.release = cifs_close,
1675 	.fsync = cifs_strict_fsync,
1676 	.flush = cifs_flush,
1677 	.mmap_prepare = cifs_file_strict_mmap_prepare,
1678 	.splice_read = filemap_splice_read,
1679 	.splice_write = iter_file_splice_write,
1680 	.llseek = cifs_llseek,
1681 	.unlocked_ioctl	= cifs_ioctl,
1682 	.copy_file_range = cifs_copy_file_range,
1683 	.remap_file_range = cifs_remap_file_range,
1684 	.setlease = cifs_setlease,
1685 	.fallocate = cifs_fallocate,
1686 };
1687 
1688 const struct file_operations cifs_file_direct_nobrl_ops = {
1689 	.read_iter = netfs_unbuffered_read_iter,
1690 	.write_iter = netfs_file_write_iter,
1691 	.open = cifs_open,
1692 	.release = cifs_close,
1693 	.fsync = cifs_fsync,
1694 	.flush = cifs_flush,
1695 	.mmap_prepare = cifs_file_mmap_prepare,
1696 	.splice_read = copy_splice_read,
1697 	.splice_write = iter_file_splice_write,
1698 	.unlocked_ioctl  = cifs_ioctl,
1699 	.copy_file_range = cifs_copy_file_range,
1700 	.remap_file_range = cifs_remap_file_range,
1701 	.llseek = cifs_llseek,
1702 	.setlease = cifs_setlease,
1703 	.fallocate = cifs_fallocate,
1704 };
1705 
1706 const struct file_operations cifs_dir_ops = {
1707 	.iterate_shared = cifs_readdir,
1708 	.release = cifs_closedir,
1709 	.read    = generic_read_dir,
1710 	.unlocked_ioctl  = cifs_ioctl,
1711 	.copy_file_range = cifs_copy_file_range,
1712 	.remap_file_range = cifs_remap_file_range,
1713 	.llseek = generic_file_llseek,
1714 	.fsync = cifs_dir_fsync,
1715 };
1716 
1717 static void
cifs_init_once(void * inode)1718 cifs_init_once(void *inode)
1719 {
1720 	struct cifsInodeInfo *cifsi = inode;
1721 
1722 	inode_init_once(&cifsi->netfs.inode);
1723 	init_rwsem(&cifsi->lock_sem);
1724 }
1725 
1726 static int __init
cifs_init_inodecache(void)1727 cifs_init_inodecache(void)
1728 {
1729 	cifs_inode_cachep = kmem_cache_create("cifs_inode_cache",
1730 					      sizeof(struct cifsInodeInfo),
1731 					      0, (SLAB_RECLAIM_ACCOUNT|
1732 						SLAB_ACCOUNT),
1733 					      cifs_init_once);
1734 	if (cifs_inode_cachep == NULL)
1735 		return -ENOMEM;
1736 
1737 	return 0;
1738 }
1739 
1740 static void
cifs_destroy_inodecache(void)1741 cifs_destroy_inodecache(void)
1742 {
1743 	/*
1744 	 * Make sure all delayed rcu free inodes are flushed before we
1745 	 * destroy cache.
1746 	 */
1747 	rcu_barrier();
1748 	kmem_cache_destroy(cifs_inode_cachep);
1749 }
1750 
1751 static int
cifs_init_request_bufs(void)1752 cifs_init_request_bufs(void)
1753 {
1754 	/*
1755 	 * SMB2 maximum header size is bigger than CIFS one - no problems to
1756 	 * allocate some more bytes for CIFS.
1757 	 */
1758 	size_t max_hdr_size = MAX_SMB2_HDR_SIZE;
1759 
1760 	if (CIFSMaxBufSize < 8192) {
1761 	/* Buffer size can not be smaller than 2 * PATH_MAX since maximum
1762 	Unicode path name has to fit in any SMB/CIFS path based frames */
1763 		CIFSMaxBufSize = 8192;
1764 	} else if (CIFSMaxBufSize > 1024*127) {
1765 		CIFSMaxBufSize = 1024 * 127;
1766 	} else {
1767 		CIFSMaxBufSize &= 0x1FE00; /* Round size to even 512 byte mult*/
1768 	}
1769 /*
1770 	cifs_dbg(VFS, "CIFSMaxBufSize %d 0x%x\n",
1771 		 CIFSMaxBufSize, CIFSMaxBufSize);
1772 */
1773 	cifs_req_cachep = kmem_cache_create_usercopy("cifs_request",
1774 					    CIFSMaxBufSize + max_hdr_size, 0,
1775 					    SLAB_HWCACHE_ALIGN, 0,
1776 					    CIFSMaxBufSize + max_hdr_size,
1777 					    NULL);
1778 	if (cifs_req_cachep == NULL)
1779 		return -ENOMEM;
1780 
1781 	if (cifs_min_rcv < 1)
1782 		cifs_min_rcv = 1;
1783 	else if (cifs_min_rcv > 64) {
1784 		cifs_min_rcv = 64;
1785 		cifs_dbg(VFS, "cifs_min_rcv set to maximum (64)\n");
1786 	}
1787 
1788 	cifs_req_poolp = mempool_create_slab_pool(cifs_min_rcv,
1789 						  cifs_req_cachep);
1790 
1791 	if (cifs_req_poolp == NULL) {
1792 		kmem_cache_destroy(cifs_req_cachep);
1793 		return -ENOMEM;
1794 	}
1795 	/* MAX_CIFS_SMALL_BUFFER_SIZE bytes is enough for most SMB responses and
1796 	almost all handle based requests (but not write response, nor is it
1797 	sufficient for path based requests).  A smaller size would have
1798 	been more efficient (compacting multiple slab items on one 4k page)
1799 	for the case in which debug was on, but this larger size allows
1800 	more SMBs to use small buffer alloc and is still much more
1801 	efficient to alloc 1 per page off the slab compared to 17K (5page)
1802 	alloc of large cifs buffers even when page debugging is on */
1803 	cifs_sm_req_cachep = kmem_cache_create_usercopy("cifs_small_rq",
1804 			MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN,
1805 			0, MAX_CIFS_SMALL_BUFFER_SIZE, NULL);
1806 	if (cifs_sm_req_cachep == NULL) {
1807 		mempool_destroy(cifs_req_poolp);
1808 		kmem_cache_destroy(cifs_req_cachep);
1809 		return -ENOMEM;
1810 	}
1811 
1812 	if (cifs_min_small < 2)
1813 		cifs_min_small = 2;
1814 	else if (cifs_min_small > 256) {
1815 		cifs_min_small = 256;
1816 		cifs_dbg(FYI, "cifs_min_small set to maximum (256)\n");
1817 	}
1818 
1819 	cifs_sm_req_poolp = mempool_create_slab_pool(cifs_min_small,
1820 						     cifs_sm_req_cachep);
1821 
1822 	if (cifs_sm_req_poolp == NULL) {
1823 		mempool_destroy(cifs_req_poolp);
1824 		kmem_cache_destroy(cifs_req_cachep);
1825 		kmem_cache_destroy(cifs_sm_req_cachep);
1826 		return -ENOMEM;
1827 	}
1828 
1829 	return 0;
1830 }
1831 
1832 static void
cifs_destroy_request_bufs(void)1833 cifs_destroy_request_bufs(void)
1834 {
1835 	mempool_destroy(cifs_req_poolp);
1836 	kmem_cache_destroy(cifs_req_cachep);
1837 	mempool_destroy(cifs_sm_req_poolp);
1838 	kmem_cache_destroy(cifs_sm_req_cachep);
1839 }
1840 
init_mids(void)1841 static int init_mids(void)
1842 {
1843 	cifs_mid_cachep = kmem_cache_create("cifs_mpx_ids",
1844 					    sizeof(struct mid_q_entry), 0,
1845 					    SLAB_HWCACHE_ALIGN, NULL);
1846 	if (cifs_mid_cachep == NULL)
1847 		return -ENOMEM;
1848 
1849 	/* 3 is a reasonable minimum number of simultaneous operations */
1850 	if (mempool_init_slab_pool(&cifs_mid_pool, 3, cifs_mid_cachep) < 0) {
1851 		kmem_cache_destroy(cifs_mid_cachep);
1852 		return -ENOMEM;
1853 	}
1854 
1855 	return 0;
1856 }
1857 
destroy_mids(void)1858 static void destroy_mids(void)
1859 {
1860 	mempool_exit(&cifs_mid_pool);
1861 	kmem_cache_destroy(cifs_mid_cachep);
1862 }
1863 
cifs_init_netfs(void)1864 static int cifs_init_netfs(void)
1865 {
1866 	cifs_io_request_cachep =
1867 		kmem_cache_create("cifs_io_request",
1868 				  sizeof(struct cifs_io_request), 0,
1869 				  SLAB_HWCACHE_ALIGN, NULL);
1870 	if (!cifs_io_request_cachep)
1871 		goto nomem_req;
1872 
1873 	if (mempool_init_slab_pool(&cifs_io_request_pool, 100, cifs_io_request_cachep) < 0)
1874 		goto nomem_reqpool;
1875 
1876 	cifs_io_subrequest_cachep =
1877 		kmem_cache_create("cifs_io_subrequest",
1878 				  sizeof(struct cifs_io_subrequest), 0,
1879 				  SLAB_HWCACHE_ALIGN, NULL);
1880 	if (!cifs_io_subrequest_cachep)
1881 		goto nomem_subreq;
1882 
1883 	if (mempool_init_slab_pool(&cifs_io_subrequest_pool, 100, cifs_io_subrequest_cachep) < 0)
1884 		goto nomem_subreqpool;
1885 
1886 	return 0;
1887 
1888 nomem_subreqpool:
1889 	kmem_cache_destroy(cifs_io_subrequest_cachep);
1890 nomem_subreq:
1891 	mempool_exit(&cifs_io_request_pool);
1892 nomem_reqpool:
1893 	kmem_cache_destroy(cifs_io_request_cachep);
1894 nomem_req:
1895 	return -ENOMEM;
1896 }
1897 
cifs_destroy_netfs(void)1898 static void cifs_destroy_netfs(void)
1899 {
1900 	mempool_exit(&cifs_io_subrequest_pool);
1901 	kmem_cache_destroy(cifs_io_subrequest_cachep);
1902 	mempool_exit(&cifs_io_request_pool);
1903 	kmem_cache_destroy(cifs_io_request_cachep);
1904 }
1905 
1906 static int __init
init_cifs(void)1907 init_cifs(void)
1908 {
1909 	int rc = 0;
1910 
1911 	rc = smb2_init_maperror();
1912 	if (rc)
1913 		return rc;
1914 
1915 	cifs_proc_init();
1916 	INIT_LIST_HEAD(&cifs_tcp_ses_list);
1917 /*
1918  *  Initialize Global counters
1919  */
1920 	atomic_set(&sesInfoAllocCount, 0);
1921 	atomic_set(&tconInfoAllocCount, 0);
1922 	atomic_set(&tcpSesNextId, 0);
1923 	atomic_set(&tcpSesAllocCount, 0);
1924 	atomic_set(&tcpSesReconnectCount, 0);
1925 	atomic_set(&tconInfoReconnectCount, 0);
1926 
1927 	atomic_set(&buf_alloc_count, 0);
1928 	atomic_set(&small_buf_alloc_count, 0);
1929 #ifdef CONFIG_CIFS_STATS2
1930 	atomic_set(&total_buf_alloc_count, 0);
1931 	atomic_set(&total_small_buf_alloc_count, 0);
1932 	if (slow_rsp_threshold < 1)
1933 		cifs_dbg(FYI, "slow_response_threshold msgs disabled\n");
1934 	else if (slow_rsp_threshold > 32767)
1935 		cifs_dbg(VFS,
1936 		       "slow response threshold set higher than recommended (0 to 32767)\n");
1937 #endif /* CONFIG_CIFS_STATS2 */
1938 
1939 	atomic_set(&mid_count, 0);
1940 	GlobalCurrentXid = 0;
1941 	GlobalTotalActiveXid = 0;
1942 	GlobalMaxActiveXid = 0;
1943 
1944 	cifs_lock_secret = get_random_u32();
1945 
1946 	if (cifs_max_pending < 2) {
1947 		cifs_max_pending = 2;
1948 		cifs_dbg(FYI, "cifs_max_pending set to min of 2\n");
1949 	} else if (cifs_max_pending > CIFS_MAX_REQ) {
1950 		cifs_max_pending = CIFS_MAX_REQ;
1951 		cifs_dbg(FYI, "cifs_max_pending set to max of %u\n",
1952 			 CIFS_MAX_REQ);
1953 	}
1954 
1955 	/* Limit max to about 18 hours, and setting to zero disables directory entry caching */
1956 	if (dir_cache_timeout > 65000) {
1957 		dir_cache_timeout = 65000;
1958 		cifs_dbg(VFS, "dir_cache_timeout set to max of 65000 seconds\n");
1959 	}
1960 
1961 	cifsiod_wq = alloc_workqueue("cifsiod",
1962 				     WQ_FREEZABLE | WQ_MEM_RECLAIM | WQ_PERCPU,
1963 				     0);
1964 	if (!cifsiod_wq) {
1965 		rc = -ENOMEM;
1966 		goto out_clean_proc;
1967 	}
1968 
1969 	/*
1970 	 * Consider in future setting limit!=0 maybe to min(num_of_cores - 1, 3)
1971 	 * so that we don't launch too many worker threads but
1972 	 * Documentation/core-api/workqueue.rst recommends setting it to 0
1973 	 */
1974 
1975 	/* WQ_UNBOUND allows decrypt tasks to run on any CPU */
1976 	decrypt_wq = alloc_workqueue("smb3decryptd",
1977 				     WQ_UNBOUND|WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
1978 	if (!decrypt_wq) {
1979 		rc = -ENOMEM;
1980 		goto out_destroy_cifsiod_wq;
1981 	}
1982 
1983 	fileinfo_put_wq = alloc_workqueue("cifsfileinfoput",
1984 				     WQ_UNBOUND|WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
1985 	if (!fileinfo_put_wq) {
1986 		rc = -ENOMEM;
1987 		goto out_destroy_decrypt_wq;
1988 	}
1989 
1990 	cifsoplockd_wq = alloc_workqueue("cifsoplockd",
1991 					 WQ_FREEZABLE | WQ_MEM_RECLAIM | WQ_PERCPU,
1992 					 0);
1993 	if (!cifsoplockd_wq) {
1994 		rc = -ENOMEM;
1995 		goto out_destroy_fileinfo_put_wq;
1996 	}
1997 
1998 	deferredclose_wq = alloc_workqueue("deferredclose",
1999 					   WQ_FREEZABLE | WQ_MEM_RECLAIM | WQ_PERCPU,
2000 					   0);
2001 	if (!deferredclose_wq) {
2002 		rc = -ENOMEM;
2003 		goto out_destroy_cifsoplockd_wq;
2004 	}
2005 
2006 	serverclose_wq = alloc_workqueue("serverclose",
2007 					   WQ_FREEZABLE | WQ_MEM_RECLAIM | WQ_PERCPU,
2008 					   0);
2009 	if (!serverclose_wq) {
2010 		rc = -ENOMEM;
2011 		goto out_destroy_deferredclose_wq;
2012 	}
2013 
2014 	cfid_put_wq = alloc_workqueue("cfid_put_wq",
2015 				      WQ_FREEZABLE | WQ_MEM_RECLAIM | WQ_PERCPU,
2016 				      0);
2017 	if (!cfid_put_wq) {
2018 		rc = -ENOMEM;
2019 		goto out_destroy_serverclose_wq;
2020 	}
2021 
2022 	rc = cifs_init_inodecache();
2023 	if (rc)
2024 		goto out_destroy_cfid_put_wq;
2025 
2026 	rc = cifs_init_netfs();
2027 	if (rc)
2028 		goto out_destroy_inodecache;
2029 
2030 	rc = init_mids();
2031 	if (rc)
2032 		goto out_destroy_netfs;
2033 
2034 	rc = cifs_init_request_bufs();
2035 	if (rc)
2036 		goto out_destroy_mids;
2037 
2038 #ifdef CONFIG_CIFS_DFS_UPCALL
2039 	rc = dfs_cache_init();
2040 	if (rc)
2041 		goto out_destroy_request_bufs;
2042 #endif /* CONFIG_CIFS_DFS_UPCALL */
2043 #ifdef CONFIG_CIFS_UPCALL
2044 	rc = init_cifs_spnego();
2045 	if (rc)
2046 		goto out_destroy_dfs_cache;
2047 #endif /* CONFIG_CIFS_UPCALL */
2048 #ifdef CONFIG_CIFS_SWN_UPCALL
2049 	rc = cifs_genl_init();
2050 	if (rc)
2051 		goto out_register_key_type;
2052 #endif /* CONFIG_CIFS_SWN_UPCALL */
2053 
2054 	rc = init_cifs_idmap();
2055 	if (rc)
2056 		goto out_cifs_swn_init;
2057 
2058 	rc = register_filesystem(&cifs_fs_type);
2059 	if (rc)
2060 		goto out_init_cifs_idmap;
2061 
2062 	rc = register_filesystem(&smb3_fs_type);
2063 	if (rc) {
2064 		unregister_filesystem(&cifs_fs_type);
2065 		goto out_init_cifs_idmap;
2066 	}
2067 
2068 	return 0;
2069 
2070 out_init_cifs_idmap:
2071 	exit_cifs_idmap();
2072 out_cifs_swn_init:
2073 #ifdef CONFIG_CIFS_SWN_UPCALL
2074 	cifs_genl_exit();
2075 out_register_key_type:
2076 #endif
2077 #ifdef CONFIG_CIFS_UPCALL
2078 	exit_cifs_spnego();
2079 out_destroy_dfs_cache:
2080 #endif
2081 #ifdef CONFIG_CIFS_DFS_UPCALL
2082 	dfs_cache_destroy();
2083 out_destroy_request_bufs:
2084 #endif
2085 	cifs_destroy_request_bufs();
2086 out_destroy_mids:
2087 	destroy_mids();
2088 out_destroy_netfs:
2089 	cifs_destroy_netfs();
2090 out_destroy_inodecache:
2091 	cifs_destroy_inodecache();
2092 out_destroy_cfid_put_wq:
2093 	destroy_workqueue(cfid_put_wq);
2094 out_destroy_serverclose_wq:
2095 	destroy_workqueue(serverclose_wq);
2096 out_destroy_deferredclose_wq:
2097 	destroy_workqueue(deferredclose_wq);
2098 out_destroy_cifsoplockd_wq:
2099 	destroy_workqueue(cifsoplockd_wq);
2100 out_destroy_fileinfo_put_wq:
2101 	destroy_workqueue(fileinfo_put_wq);
2102 out_destroy_decrypt_wq:
2103 	destroy_workqueue(decrypt_wq);
2104 out_destroy_cifsiod_wq:
2105 	destroy_workqueue(cifsiod_wq);
2106 out_clean_proc:
2107 	cifs_proc_clean();
2108 	return rc;
2109 }
2110 
2111 static void __exit
exit_cifs(void)2112 exit_cifs(void)
2113 {
2114 	cifs_dbg(NOISY, "exit_smb3\n");
2115 	unregister_filesystem(&cifs_fs_type);
2116 	unregister_filesystem(&smb3_fs_type);
2117 	cifs_release_automount_timer();
2118 	exit_cifs_idmap();
2119 #ifdef CONFIG_CIFS_SWN_UPCALL
2120 	cifs_genl_exit();
2121 #endif
2122 #ifdef CONFIG_CIFS_UPCALL
2123 	exit_cifs_spnego();
2124 #endif
2125 #ifdef CONFIG_CIFS_DFS_UPCALL
2126 	dfs_cache_destroy();
2127 #endif
2128 	cifs_destroy_request_bufs();
2129 	destroy_mids();
2130 	cifs_destroy_netfs();
2131 	cifs_destroy_inodecache();
2132 	destroy_workqueue(deferredclose_wq);
2133 	destroy_workqueue(cifsoplockd_wq);
2134 	destroy_workqueue(decrypt_wq);
2135 	destroy_workqueue(fileinfo_put_wq);
2136 	destroy_workqueue(serverclose_wq);
2137 	destroy_workqueue(cfid_put_wq);
2138 	destroy_workqueue(cifsiod_wq);
2139 	cifs_proc_clean();
2140 }
2141 
2142 MODULE_AUTHOR("Steve French");
2143 MODULE_LICENSE("GPL");	/* combination of LGPL + GPL source behaves as GPL */
2144 MODULE_DESCRIPTION
2145 	("VFS to access SMB3 servers e.g. Samba, Macs, Azure and Windows (and "
2146 	"also older servers complying with the SNIA CIFS Specification)");
2147 MODULE_VERSION(CIFS_VERSION);
2148 MODULE_SOFTDEP("ecb");
2149 MODULE_SOFTDEP("nls");
2150 MODULE_SOFTDEP("aes");
2151 MODULE_SOFTDEP("cmac");
2152 MODULE_SOFTDEP("aead2");
2153 MODULE_SOFTDEP("ccm");
2154 MODULE_SOFTDEP("gcm");
2155 module_init(init_cifs)
2156 module_exit(exit_cifs)
2157