xref: /linux/fs/smb/client/cifsfs.c (revision e0c505cb764e73273b3ddce80b5944fa5b796bd9)
1 // SPDX-License-Identifier: LGPL-2.1
2 /*
3  *
4  *   Copyright (C) International Business Machines  Corp., 2002,2008
5  *   Author(s): Steve French (sfrench@us.ibm.com)
6  *
7  *   Common Internet FileSystem (CIFS) client
8  *
9  */
10 
11 /* Note that BB means BUGBUG (ie something to fix eventually) */
12 
13 #include <linux/module.h>
14 #include <linux/fs.h>
15 #include <linux/filelock.h>
16 #include <linux/mount.h>
17 #include <linux/slab.h>
18 #include <linux/init.h>
19 #include <linux/list.h>
20 #include <linux/seq_file.h>
21 #include <linux/vfs.h>
22 #include <linux/mempool.h>
23 #include <linux/delay.h>
24 #include <linux/kthread.h>
25 #include <linux/freezer.h>
26 #include <linux/namei.h>
27 #include <linux/random.h>
28 #include <linux/splice.h>
29 #include <linux/uuid.h>
30 #include <linux/xattr.h>
31 #include <linux/mm.h>
32 #include <linux/key-type.h>
33 #include <uapi/linux/magic.h>
34 #include <net/ipv6.h>
35 #include "cifsfs.h"
36 #define DECLARE_GLOBALS_HERE
37 #include "cifsglob.h"
38 #include "cifsproto.h"
39 #include "smb2proto.h"
40 #include "cifs_debug.h"
41 #include "cifs_fs_sb.h"
42 #include "cifs_spnego.h"
43 #include "fscache.h"
44 #ifdef CONFIG_CIFS_DFS_UPCALL
45 #include "dfs_cache.h"
46 #endif
47 #ifdef CONFIG_CIFS_SWN_UPCALL
48 #include "netlink.h"
49 #endif
50 #include "fs_context.h"
51 #include "cached_dir.h"
52 
53 /*
54  * DOS dates from 1980/1/1 through 2107/12/31
55  * Protocol specifications indicate the range should be to 119, which
56  * limits maximum year to 2099. But this range has not been checked.
57  */
58 #define SMB_DATE_MAX (127<<9 | 12<<5 | 31)
59 #define SMB_DATE_MIN (0<<9 | 1<<5 | 1)
60 #define SMB_TIME_MAX (23<<11 | 59<<5 | 29)
61 
62 int cifsFYI = 0;
63 bool traceSMB;
64 bool enable_oplocks = true;
65 bool linuxExtEnabled = true;
66 bool lookupCacheEnabled = true;
67 bool disable_legacy_dialects; /* false by default */
68 bool enable_gcm_256 = true;
69 bool require_gcm_256; /* false by default */
70 bool enable_negotiate_signing; /* false by default */
71 unsigned int global_secflags = CIFSSEC_DEF;
72 /* unsigned int ntlmv2_support = 0; */
73 
74 /*
75  * Global transaction id (XID) information
76  */
77 unsigned int GlobalCurrentXid;	/* protected by GlobalMid_Lock */
78 unsigned int GlobalTotalActiveXid; /* prot by GlobalMid_Lock */
79 unsigned int GlobalMaxActiveXid;	/* prot by GlobalMid_Lock */
80 DEFINE_SPINLOCK(GlobalMid_Lock); /* protects above & list operations on midQ entries */
81 
82 /*
83  *  Global counters, updated atomically
84  */
85 atomic_t sesInfoAllocCount;
86 atomic_t tconInfoAllocCount;
87 atomic_t tcpSesNextId;
88 atomic_t tcpSesAllocCount;
89 atomic_t tcpSesReconnectCount;
90 atomic_t tconInfoReconnectCount;
91 
92 atomic_t mid_count;
93 atomic_t buf_alloc_count;
94 atomic_t small_buf_alloc_count;
95 #ifdef CONFIG_CIFS_STATS2
96 atomic_t total_buf_alloc_count;
97 atomic_t total_small_buf_alloc_count;
98 #endif/* STATS2 */
99 struct list_head	cifs_tcp_ses_list;
100 DEFINE_SPINLOCK(cifs_tcp_ses_lock);
101 static const struct super_operations cifs_super_ops;
102 unsigned int CIFSMaxBufSize = CIFS_MAX_MSGSIZE;
103 module_param(CIFSMaxBufSize, uint, 0444);
104 MODULE_PARM_DESC(CIFSMaxBufSize, "Network buffer size (not including header) "
105 				 "for CIFS requests. "
106 				 "Default: 16384 Range: 8192 to 130048");
107 unsigned int cifs_min_rcv = CIFS_MIN_RCV_POOL;
108 module_param(cifs_min_rcv, uint, 0444);
109 MODULE_PARM_DESC(cifs_min_rcv, "Network buffers in pool. Default: 4 Range: "
110 				"1 to 64");
111 unsigned int cifs_min_small = 30;
112 module_param(cifs_min_small, uint, 0444);
113 MODULE_PARM_DESC(cifs_min_small, "Small network buffers in pool. Default: 30 "
114 				 "Range: 2 to 256");
115 unsigned int cifs_max_pending = CIFS_MAX_REQ;
116 module_param(cifs_max_pending, uint, 0444);
117 MODULE_PARM_DESC(cifs_max_pending, "Simultaneous requests to server for "
118 				   "CIFS/SMB1 dialect (N/A for SMB3) "
119 				   "Default: 32767 Range: 2 to 32767.");
120 unsigned int dir_cache_timeout = 30;
121 module_param(dir_cache_timeout, uint, 0644);
122 MODULE_PARM_DESC(dir_cache_timeout, "Number of seconds to cache directory contents for which we have a lease. Default: 30 "
123 				 "Range: 1 to 65000 seconds, 0 to disable caching dir contents");
124 /* Module-wide total cached dirents (in bytes) across all tcons */
125 atomic64_t cifs_dircache_bytes_used = ATOMIC64_INIT(0);
126 
127 /*
128  * Write-only module parameter to drop all cached directory entries across
129  * all CIFS mounts. Echo a non-zero value to trigger.
130  */
cifs_drop_all_dir_caches(void)131 static void cifs_drop_all_dir_caches(void)
132 {
133 	struct TCP_Server_Info *server;
134 	struct cifs_ses *ses;
135 	struct cifs_tcon *tcon;
136 
137 	spin_lock(&cifs_tcp_ses_lock);
138 	list_for_each_entry(server, &cifs_tcp_ses_list, tcp_ses_list) {
139 		list_for_each_entry(ses, &server->smb_ses_list, smb_ses_list) {
140 			if (cifs_ses_exiting(ses))
141 				continue;
142 			list_for_each_entry(tcon, &ses->tcon_list, tcon_list)
143 				invalidate_all_cached_dirs(tcon);
144 		}
145 	}
146 	spin_unlock(&cifs_tcp_ses_lock);
147 }
148 
cifs_param_set_drop_dir_cache(const char * val,const struct kernel_param * kp)149 static int cifs_param_set_drop_dir_cache(const char *val, const struct kernel_param *kp)
150 {
151 	bool bv;
152 	int rc = kstrtobool(val, &bv);
153 
154 	if (rc)
155 		return rc;
156 	if (bv)
157 		cifs_drop_all_dir_caches();
158 	return 0;
159 }
160 
161 module_param_call(drop_dir_cache, cifs_param_set_drop_dir_cache, NULL, NULL, 0200);
162 MODULE_PARM_DESC(drop_dir_cache, "Write 1 to drop all cached directory entries across all CIFS mounts");
163 
164 #ifdef CONFIG_CIFS_STATS2
165 unsigned int slow_rsp_threshold = 1;
166 module_param(slow_rsp_threshold, uint, 0644);
167 MODULE_PARM_DESC(slow_rsp_threshold, "Amount of time (in seconds) to wait "
168 				   "before logging that a response is delayed. "
169 				   "Default: 1 (if set to 0 disables msg).");
170 #endif /* STATS2 */
171 
172 module_param(enable_oplocks, bool, 0644);
173 MODULE_PARM_DESC(enable_oplocks, "Enable or disable oplocks. Default: y/Y/1");
174 
175 module_param(enable_gcm_256, bool, 0644);
176 MODULE_PARM_DESC(enable_gcm_256, "Enable requesting strongest (256 bit) GCM encryption. Default: y/Y/1");
177 
178 module_param(require_gcm_256, bool, 0644);
179 MODULE_PARM_DESC(require_gcm_256, "Require strongest (256 bit) GCM encryption. Default: n/N/0");
180 
181 module_param(enable_negotiate_signing, bool, 0644);
182 MODULE_PARM_DESC(enable_negotiate_signing, "Enable negotiating packet signing algorithm with server. Default: n/N/0");
183 
184 module_param(disable_legacy_dialects, bool, 0644);
185 MODULE_PARM_DESC(disable_legacy_dialects, "To improve security it may be "
186 				  "helpful to restrict the ability to "
187 				  "override the default dialects (SMB2.1, "
188 				  "SMB3 and SMB3.02) on mount with old "
189 				  "dialects (CIFS/SMB1 and SMB2) since "
190 				  "vers=1.0 (CIFS/SMB1) and vers=2.0 are weaker"
191 				  " and less secure. Default: n/N/0");
192 
193 struct workqueue_struct	*cifsiod_wq;
194 struct workqueue_struct	*decrypt_wq;
195 struct workqueue_struct	*fileinfo_put_wq;
196 struct workqueue_struct	*cifsoplockd_wq;
197 struct workqueue_struct	*deferredclose_wq;
198 struct workqueue_struct	*serverclose_wq;
199 struct workqueue_struct	*cfid_put_wq;
200 __u32 cifs_lock_secret;
201 
202 /*
203  * Bumps refcount for cifs super block.
204  * Note that it should be only called if a reference to VFS super block is
205  * already held, e.g. in open-type syscalls context. Otherwise it can race with
206  * atomic_dec_and_test in deactivate_locked_super.
207  */
208 void
cifs_sb_active(struct super_block * sb)209 cifs_sb_active(struct super_block *sb)
210 {
211 	struct cifs_sb_info *server = CIFS_SB(sb);
212 
213 	if (atomic_inc_return(&server->active) == 1)
214 		atomic_inc(&sb->s_active);
215 }
216 
217 void
cifs_sb_deactive(struct super_block * sb)218 cifs_sb_deactive(struct super_block *sb)
219 {
220 	struct cifs_sb_info *server = CIFS_SB(sb);
221 
222 	if (atomic_dec_and_test(&server->active))
223 		deactivate_super(sb);
224 }
225 
226 static int
cifs_read_super(struct super_block * sb)227 cifs_read_super(struct super_block *sb)
228 {
229 	struct cifs_sb_info *cifs_sb;
230 	struct cifs_tcon *tcon;
231 	unsigned int sbflags;
232 	struct timespec64 ts;
233 	struct inode *inode;
234 	int rc = 0;
235 
236 	cifs_sb = CIFS_SB(sb);
237 	tcon = cifs_sb_master_tcon(cifs_sb);
238 	sbflags = cifs_sb_flags(cifs_sb);
239 
240 	if (sbflags & CIFS_MOUNT_POSIXACL)
241 		sb->s_flags |= SB_POSIXACL;
242 
243 	if (tcon->snapshot_time)
244 		sb->s_flags |= SB_RDONLY;
245 
246 	if (tcon->ses->capabilities & tcon->ses->server->vals->cap_large_files)
247 		sb->s_maxbytes = MAX_LFS_FILESIZE;
248 	else
249 		sb->s_maxbytes = MAX_NON_LFS;
250 
251 	/*
252 	 * Some very old servers like DOS and OS/2 used 2 second granularity
253 	 * (while all current servers use 100ns granularity - see MS-DTYP)
254 	 * but 1 second is the maximum allowed granularity for the VFS
255 	 * so for old servers set time granularity to 1 second while for
256 	 * everything else (current servers) set it to 100ns.
257 	 */
258 	if ((tcon->ses->server->vals->protocol_id == SMB10_PROT_ID) &&
259 	    ((tcon->ses->capabilities &
260 	      tcon->ses->server->vals->cap_nt_find) == 0) &&
261 	    !tcon->unix_ext) {
262 		sb->s_time_gran = 1000000000; /* 1 second is max allowed gran */
263 		ts = cnvrtDosUnixTm(cpu_to_le16(SMB_DATE_MIN), 0, 0);
264 		sb->s_time_min = ts.tv_sec;
265 		ts = cnvrtDosUnixTm(cpu_to_le16(SMB_DATE_MAX),
266 				    cpu_to_le16(SMB_TIME_MAX), 0);
267 		sb->s_time_max = ts.tv_sec;
268 	} else {
269 		/*
270 		 * Almost every server, including all SMB2+, uses DCE TIME
271 		 * ie 100 nanosecond units, since 1601.  See MS-DTYP and MS-FSCC
272 		 */
273 		sb->s_time_gran = 100;
274 		ts = cifs_NTtimeToUnix(0);
275 		sb->s_time_min = ts.tv_sec;
276 		ts = cifs_NTtimeToUnix(cpu_to_le64(S64_MAX));
277 		sb->s_time_max = ts.tv_sec;
278 	}
279 
280 	sb->s_magic = CIFS_SUPER_MAGIC;
281 	sb->s_op = &cifs_super_ops;
282 	sb->s_xattr = cifs_xattr_handlers;
283 	rc = super_setup_bdi(sb);
284 	if (rc)
285 		goto out_no_root;
286 	/* tune readahead according to rsize if readahead size not set on mount */
287 	if (cifs_sb->ctx->rsize == 0)
288 		cifs_sb->ctx->rsize =
289 			tcon->ses->server->ops->negotiate_rsize(tcon, cifs_sb->ctx);
290 	if (cifs_sb->ctx->rasize)
291 		sb->s_bdi->ra_pages = cifs_sb->ctx->rasize / PAGE_SIZE;
292 	else
293 		sb->s_bdi->ra_pages = 2 * (cifs_sb->ctx->rsize / PAGE_SIZE);
294 
295 	sb->s_blocksize = CIFS_MAX_MSGSIZE;
296 	sb->s_blocksize_bits = 14;	/* default 2**14 = CIFS_MAX_MSGSIZE */
297 	inode = cifs_root_iget(sb);
298 
299 	if (IS_ERR(inode)) {
300 		rc = PTR_ERR(inode);
301 		goto out_no_root;
302 	}
303 
304 	if (tcon->nocase)
305 		set_default_d_op(sb, &cifs_ci_dentry_ops);
306 	else
307 		set_default_d_op(sb, &cifs_dentry_ops);
308 
309 	sb->s_root = d_make_root(inode);
310 	if (!sb->s_root) {
311 		rc = -ENOMEM;
312 		goto out_no_root;
313 	}
314 
315 #ifdef CONFIG_CIFS_NFSD_EXPORT
316 	if (sbflags & CIFS_MOUNT_SERVER_INUM) {
317 		cifs_dbg(FYI, "export ops supported\n");
318 		sb->s_export_op = &cifs_export_ops;
319 	}
320 #endif /* CONFIG_CIFS_NFSD_EXPORT */
321 
322 	return 0;
323 
324 out_no_root:
325 	cifs_dbg(VFS, "%s: get root inode failed\n", __func__);
326 	return rc;
327 }
328 
cifs_kill_sb(struct super_block * sb)329 static void cifs_kill_sb(struct super_block *sb)
330 {
331 	struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
332 
333 	/*
334 	 * We need to release all dentries for the cached directories
335 	 * and close all deferred file handles before we kill the sb.
336 	 */
337 	if (cifs_sb->root) {
338 		close_all_cached_dirs(cifs_sb);
339 		cifs_close_all_deferred_files_sb(cifs_sb);
340 
341 		/* Wait for all pending oplock breaks to complete */
342 		flush_workqueue(cifsoplockd_wq);
343 
344 		/* finally release root dentry */
345 		dput(cifs_sb->root);
346 		cifs_sb->root = NULL;
347 	}
348 
349 	kill_anon_super(sb);
350 	cifs_umount(cifs_sb);
351 }
352 
353 static int
cifs_statfs(struct dentry * dentry,struct kstatfs * buf)354 cifs_statfs(struct dentry *dentry, struct kstatfs *buf)
355 {
356 	struct super_block *sb = dentry->d_sb;
357 	struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
358 	struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
359 	struct TCP_Server_Info *server = tcon->ses->server;
360 	unsigned int xid;
361 	int rc = 0;
362 	const char *full_path;
363 	void *page;
364 
365 	xid = get_xid();
366 	page = alloc_dentry_path();
367 
368 	full_path = build_path_from_dentry(dentry, page);
369 	if (IS_ERR(full_path)) {
370 		rc = PTR_ERR(full_path);
371 		goto statfs_out;
372 	}
373 
374 	if (le32_to_cpu(tcon->fsAttrInfo.MaxPathNameComponentLength) > 0)
375 		buf->f_namelen =
376 		       le32_to_cpu(tcon->fsAttrInfo.MaxPathNameComponentLength);
377 	else
378 		buf->f_namelen = PATH_MAX;
379 
380 	buf->f_fsid.val[0] = tcon->vol_serial_number;
381 	/* are using part of create time for more randomness, see man statfs */
382 	buf->f_fsid.val[1] =  (int)le64_to_cpu(tcon->vol_create_time);
383 
384 	buf->f_files = 0;	/* undefined */
385 	buf->f_ffree = 0;	/* unlimited */
386 
387 	if (server->ops->queryfs)
388 		rc = server->ops->queryfs(xid, tcon, full_path, cifs_sb, buf);
389 
390 statfs_out:
391 	free_dentry_path(page);
392 	free_xid(xid);
393 	return rc;
394 }
395 
cifs_fallocate(struct file * file,int mode,loff_t off,loff_t len)396 static long cifs_fallocate(struct file *file, int mode, loff_t off, loff_t len)
397 {
398 	struct cifs_tcon *tcon = cifs_sb_master_tcon(CIFS_SB(file));
399 	struct TCP_Server_Info *server = tcon->ses->server;
400 	struct inode *inode = file_inode(file);
401 	int rc;
402 
403 	if (!server->ops->fallocate)
404 		return -EOPNOTSUPP;
405 
406 	rc = inode_lock_killable(inode);
407 	if (rc)
408 		return rc;
409 
410 	netfs_wait_for_outstanding_io(inode);
411 
412 	rc = file_modified(file);
413 	if (rc)
414 		goto out_unlock;
415 
416 	rc = server->ops->fallocate(file, tcon, mode, off, len);
417 
418 out_unlock:
419 	inode_unlock(inode);
420 	return rc;
421 }
422 
cifs_permission(struct mnt_idmap * idmap,struct inode * inode,int mask)423 static int cifs_permission(struct mnt_idmap *idmap,
424 			   struct inode *inode, int mask)
425 {
426 	unsigned int sbflags = cifs_sb_flags(CIFS_SB(inode));
427 
428 	if (sbflags & CIFS_MOUNT_NO_PERM) {
429 		if ((mask & MAY_EXEC) && !execute_ok(inode))
430 			return -EACCES;
431 		else
432 			return 0;
433 	} else /* file mode might have been restricted at mount time
434 		on the client (above and beyond ACL on servers) for
435 		servers which do not support setting and viewing mode bits,
436 		so allowing client to check permissions is useful */
437 		return generic_permission(&nop_mnt_idmap, inode, mask);
438 }
439 
440 static struct kmem_cache *cifs_inode_cachep;
441 static struct kmem_cache *cifs_req_cachep;
442 static struct kmem_cache *cifs_mid_cachep;
443 static struct kmem_cache *cifs_sm_req_cachep;
444 static struct kmem_cache *cifs_io_request_cachep;
445 static struct kmem_cache *cifs_io_subrequest_cachep;
446 mempool_t *cifs_sm_req_poolp;
447 mempool_t *cifs_req_poolp;
448 mempool_t cifs_mid_pool;
449 mempool_t cifs_io_request_pool;
450 mempool_t cifs_io_subrequest_pool;
451 
452 static struct inode *
cifs_alloc_inode(struct super_block * sb)453 cifs_alloc_inode(struct super_block *sb)
454 {
455 	struct cifsInodeInfo *cifs_inode;
456 	cifs_inode = alloc_inode_sb(sb, cifs_inode_cachep, GFP_KERNEL);
457 	if (!cifs_inode)
458 		return NULL;
459 	cifs_inode->cifsAttrs = ATTR_ARCHIVE;	/* default */
460 	cifs_inode->time = 0;
461 	/*
462 	 * Until the file is open and we have gotten oplock info back from the
463 	 * server, can not assume caching of file data or metadata.
464 	 */
465 	cifs_set_oplock_level(cifs_inode, 0);
466 	cifs_inode->lease_granted = false;
467 	cifs_inode->flags = 0;
468 	spin_lock_init(&cifs_inode->writers_lock);
469 	cifs_inode->writers = 0;
470 	cifs_inode->netfs.inode.i_blkbits = 14;  /* 2**14 = CIFS_MAX_MSGSIZE */
471 	cifs_inode->netfs.remote_i_size = 0;
472 	cifs_inode->uniqueid = 0;
473 	cifs_inode->createtime = 0;
474 	cifs_inode->epoch = 0;
475 	spin_lock_init(&cifs_inode->open_file_lock);
476 	generate_random_uuid(cifs_inode->lease_key);
477 	cifs_inode->symlink_target = NULL;
478 
479 	/*
480 	 * Can not set i_flags here - they get immediately overwritten to zero
481 	 * by the VFS.
482 	 */
483 	/* cifs_inode->netfs.inode.i_flags = S_NOATIME | S_NOCMTIME; */
484 	INIT_LIST_HEAD(&cifs_inode->openFileList);
485 	INIT_LIST_HEAD(&cifs_inode->llist);
486 	INIT_LIST_HEAD(&cifs_inode->deferred_closes);
487 	spin_lock_init(&cifs_inode->deferred_lock);
488 	return &cifs_inode->netfs.inode;
489 }
490 
491 static void
cifs_free_inode(struct inode * inode)492 cifs_free_inode(struct inode *inode)
493 {
494 	struct cifsInodeInfo *cinode = CIFS_I(inode);
495 
496 	if (S_ISLNK(inode->i_mode))
497 		kfree(cinode->symlink_target);
498 	kmem_cache_free(cifs_inode_cachep, cinode);
499 }
500 
501 static void
cifs_evict_inode(struct inode * inode)502 cifs_evict_inode(struct inode *inode)
503 {
504 	netfs_wait_for_outstanding_io(inode);
505 	truncate_inode_pages_final(&inode->i_data);
506 	if (inode_state_read_once(inode) & I_PINNING_NETFS_WB)
507 		cifs_fscache_unuse_inode_cookie(inode, true);
508 	cifs_fscache_release_inode_cookie(inode);
509 	clear_inode(inode);
510 }
511 
512 static void
cifs_show_address(struct seq_file * s,struct TCP_Server_Info * server)513 cifs_show_address(struct seq_file *s, struct TCP_Server_Info *server)
514 {
515 	struct sockaddr_in *sa = (struct sockaddr_in *) &server->dstaddr;
516 	struct sockaddr_in6 *sa6 = (struct sockaddr_in6 *) &server->dstaddr;
517 
518 	seq_puts(s, ",addr=");
519 
520 	switch (server->dstaddr.ss_family) {
521 	case AF_INET:
522 		seq_printf(s, "%pI4", &sa->sin_addr.s_addr);
523 		break;
524 	case AF_INET6:
525 		seq_printf(s, "%pI6", &sa6->sin6_addr.s6_addr);
526 		if (sa6->sin6_scope_id)
527 			seq_printf(s, "%%%u", sa6->sin6_scope_id);
528 		break;
529 	default:
530 		seq_puts(s, "(unknown)");
531 	}
532 	if (server->rdma)
533 		seq_puts(s, ",rdma");
534 }
535 
536 static void
cifs_show_security(struct seq_file * s,struct cifs_ses * ses)537 cifs_show_security(struct seq_file *s, struct cifs_ses *ses)
538 {
539 	if (ses->sectype == Unspecified) {
540 		if (ses->user_name == NULL)
541 			seq_puts(s, ",sec=none");
542 		return;
543 	}
544 
545 	seq_puts(s, ",sec=");
546 
547 	switch (ses->sectype) {
548 	case NTLMv2:
549 		seq_puts(s, "ntlmv2");
550 		break;
551 	case Kerberos:
552 		seq_puts(s, "krb5");
553 		break;
554 	case RawNTLMSSP:
555 		seq_puts(s, "ntlmssp");
556 		break;
557 	default:
558 		/* shouldn't ever happen */
559 		seq_puts(s, "unknown");
560 		break;
561 	}
562 
563 	if (ses->sign)
564 		seq_puts(s, "i");
565 
566 	if (ses->sectype == Kerberos)
567 		seq_printf(s, ",cruid=%u",
568 			   from_kuid_munged(&init_user_ns, ses->cred_uid));
569 }
570 
571 static void
cifs_show_cache_flavor(struct seq_file * s,struct cifs_sb_info * cifs_sb)572 cifs_show_cache_flavor(struct seq_file *s, struct cifs_sb_info *cifs_sb)
573 {
574 	unsigned int sbflags = cifs_sb_flags(cifs_sb);
575 
576 	seq_puts(s, ",cache=");
577 
578 	if (sbflags & CIFS_MOUNT_STRICT_IO)
579 		seq_puts(s, "strict");
580 	else if (sbflags & CIFS_MOUNT_DIRECT_IO)
581 		seq_puts(s, "none");
582 	else if (sbflags & CIFS_MOUNT_RW_CACHE)
583 		seq_puts(s, "singleclient"); /* assume only one client access */
584 	else if (sbflags & CIFS_MOUNT_RO_CACHE)
585 		seq_puts(s, "ro"); /* read only caching assumed */
586 	else
587 		seq_puts(s, "loose");
588 }
589 
590 /*
591  * cifs_show_devname() is used so we show the mount device name with correct
592  * format (e.g. forward slashes vs. back slashes) in /proc/mounts
593  */
cifs_show_devname(struct seq_file * m,struct dentry * root)594 static int cifs_show_devname(struct seq_file *m, struct dentry *root)
595 {
596 	struct cifs_sb_info *cifs_sb = CIFS_SB(root->d_sb);
597 	char *devname = kstrdup(cifs_sb->ctx->source, GFP_KERNEL);
598 
599 	if (devname == NULL)
600 		seq_puts(m, "none");
601 	else {
602 		convert_delimiter(devname, '/');
603 		/* escape all spaces in share names */
604 		seq_escape(m, devname, " \t");
605 		kfree(devname);
606 	}
607 	return 0;
608 }
609 
610 static void
cifs_show_upcall_target(struct seq_file * s,struct cifs_sb_info * cifs_sb)611 cifs_show_upcall_target(struct seq_file *s, struct cifs_sb_info *cifs_sb)
612 {
613 	if (cifs_sb->ctx->upcall_target == UPTARGET_UNSPECIFIED) {
614 		seq_puts(s, ",upcall_target=app");
615 		return;
616 	}
617 
618 	seq_puts(s, ",upcall_target=");
619 
620 	switch (cifs_sb->ctx->upcall_target) {
621 	case UPTARGET_APP:
622 		seq_puts(s, "app");
623 		break;
624 	case UPTARGET_MOUNT:
625 		seq_puts(s, "mount");
626 		break;
627 	default:
628 		/* shouldn't ever happen */
629 		seq_puts(s, "unknown");
630 		break;
631 	}
632 }
633 
634 /*
635  * cifs_show_options() is for displaying mount options in /proc/mounts.
636  * Not all settable options are displayed but most of the important
637  * ones are.
638  */
639 static int
cifs_show_options(struct seq_file * s,struct dentry * root)640 cifs_show_options(struct seq_file *s, struct dentry *root)
641 {
642 	struct cifs_sb_info *cifs_sb = CIFS_SB(root->d_sb);
643 	struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
644 	struct sockaddr *srcaddr;
645 	unsigned int sbflags;
646 
647 	srcaddr = (struct sockaddr *)&tcon->ses->server->srcaddr;
648 
649 	seq_show_option(s, "vers", tcon->ses->server->vals->version_string);
650 	cifs_show_security(s, tcon->ses);
651 	cifs_show_cache_flavor(s, cifs_sb);
652 	cifs_show_upcall_target(s, cifs_sb);
653 
654 	if (tcon->no_lease)
655 		seq_puts(s, ",nolease");
656 	if (cifs_sb->ctx->multiuser)
657 		seq_puts(s, ",multiuser");
658 	else if (tcon->ses->user_name)
659 		seq_show_option(s, "username", tcon->ses->user_name);
660 
661 	if (tcon->ses->domainName && tcon->ses->domainName[0] != 0)
662 		seq_show_option(s, "domain", tcon->ses->domainName);
663 
664 	if (srcaddr->sa_family != AF_UNSPEC) {
665 		struct sockaddr_in *saddr4;
666 		struct sockaddr_in6 *saddr6;
667 		saddr4 = (struct sockaddr_in *)srcaddr;
668 		saddr6 = (struct sockaddr_in6 *)srcaddr;
669 		if (srcaddr->sa_family == AF_INET6)
670 			seq_printf(s, ",srcaddr=%pI6c",
671 				   &saddr6->sin6_addr);
672 		else if (srcaddr->sa_family == AF_INET)
673 			seq_printf(s, ",srcaddr=%pI4",
674 				   &saddr4->sin_addr.s_addr);
675 		else
676 			seq_printf(s, ",srcaddr=BAD-AF:%i",
677 				   (int)(srcaddr->sa_family));
678 	}
679 
680 	sbflags = cifs_sb_flags(cifs_sb);
681 	seq_printf(s, ",uid=%u",
682 		   from_kuid_munged(&init_user_ns, cifs_sb->ctx->linux_uid));
683 	if (sbflags & CIFS_MOUNT_OVERR_UID)
684 		seq_puts(s, ",forceuid");
685 	else
686 		seq_puts(s, ",noforceuid");
687 
688 	seq_printf(s, ",gid=%u",
689 		   from_kgid_munged(&init_user_ns, cifs_sb->ctx->linux_gid));
690 	if (sbflags & CIFS_MOUNT_OVERR_GID)
691 		seq_puts(s, ",forcegid");
692 	else
693 		seq_puts(s, ",noforcegid");
694 
695 	cifs_show_address(s, tcon->ses->server);
696 
697 	if (!tcon->unix_ext)
698 		seq_printf(s, ",file_mode=0%ho,dir_mode=0%ho",
699 					   cifs_sb->ctx->file_mode,
700 					   cifs_sb->ctx->dir_mode);
701 	if (cifs_sb->ctx->iocharset)
702 		seq_printf(s, ",iocharset=%s", cifs_sb->ctx->iocharset);
703 	if (tcon->ses->unicode == 0)
704 		seq_puts(s, ",nounicode");
705 	else if (tcon->ses->unicode == 1)
706 		seq_puts(s, ",unicode");
707 	if (tcon->seal)
708 		seq_puts(s, ",seal");
709 	else if (tcon->ses->server->ignore_signature)
710 		seq_puts(s, ",signloosely");
711 	if (tcon->nocase)
712 		seq_puts(s, ",nocase");
713 	if (tcon->nodelete)
714 		seq_puts(s, ",nodelete");
715 	if (cifs_sb->ctx->no_sparse)
716 		seq_puts(s, ",nosparse");
717 	if (tcon->local_lease)
718 		seq_puts(s, ",locallease");
719 	if (tcon->retry)
720 		seq_puts(s, ",hard");
721 	else
722 		seq_puts(s, ",soft");
723 	if (tcon->use_persistent)
724 		seq_puts(s, ",persistenthandles");
725 	else if (tcon->use_resilient)
726 		seq_puts(s, ",resilienthandles");
727 	if (tcon->posix_extensions)
728 		seq_puts(s, ",posix");
729 	else if (tcon->unix_ext)
730 		seq_puts(s, ",unix");
731 	else
732 		seq_puts(s, ",nounix");
733 	if (sbflags & CIFS_MOUNT_NO_DFS)
734 		seq_puts(s, ",nodfs");
735 	if (sbflags & CIFS_MOUNT_POSIX_PATHS)
736 		seq_puts(s, ",posixpaths");
737 	if (sbflags & CIFS_MOUNT_SET_UID)
738 		seq_puts(s, ",setuids");
739 	if (sbflags & CIFS_MOUNT_UID_FROM_ACL)
740 		seq_puts(s, ",idsfromsid");
741 	if (sbflags & CIFS_MOUNT_SERVER_INUM)
742 		seq_puts(s, ",serverino");
743 	if (sbflags & CIFS_MOUNT_RWPIDFORWARD)
744 		seq_puts(s, ",rwpidforward");
745 	if (sbflags & CIFS_MOUNT_NOPOSIXBRL)
746 		seq_puts(s, ",forcemand");
747 	if (sbflags & CIFS_MOUNT_NO_XATTR)
748 		seq_puts(s, ",nouser_xattr");
749 	if (sbflags & CIFS_MOUNT_MAP_SPECIAL_CHR)
750 		seq_puts(s, ",mapchars");
751 	if (sbflags & CIFS_MOUNT_MAP_SFM_CHR)
752 		seq_puts(s, ",mapposix");
753 	if (sbflags & CIFS_MOUNT_UNX_EMUL)
754 		seq_puts(s, ",sfu");
755 	if (sbflags & CIFS_MOUNT_NO_BRL)
756 		seq_puts(s, ",nobrl");
757 	if (sbflags & CIFS_MOUNT_NO_HANDLE_CACHE)
758 		seq_puts(s, ",nohandlecache");
759 	if (sbflags & CIFS_MOUNT_MODE_FROM_SID)
760 		seq_puts(s, ",modefromsid");
761 	if (sbflags & CIFS_MOUNT_CIFS_ACL)
762 		seq_puts(s, ",cifsacl");
763 	if (sbflags & CIFS_MOUNT_DYNPERM)
764 		seq_puts(s, ",dynperm");
765 	if (root->d_sb->s_flags & SB_POSIXACL)
766 		seq_puts(s, ",acl");
767 	if (sbflags & CIFS_MOUNT_MF_SYMLINKS)
768 		seq_puts(s, ",mfsymlinks");
769 	if (sbflags & CIFS_MOUNT_FSCACHE)
770 		seq_puts(s, ",fsc");
771 	if (sbflags & CIFS_MOUNT_NOSSYNC)
772 		seq_puts(s, ",nostrictsync");
773 	if (sbflags & CIFS_MOUNT_NO_PERM)
774 		seq_puts(s, ",noperm");
775 	if (sbflags & CIFS_MOUNT_CIFS_BACKUPUID)
776 		seq_printf(s, ",backupuid=%u",
777 			   from_kuid_munged(&init_user_ns,
778 					    cifs_sb->ctx->backupuid));
779 	if (sbflags & CIFS_MOUNT_CIFS_BACKUPGID)
780 		seq_printf(s, ",backupgid=%u",
781 			   from_kgid_munged(&init_user_ns,
782 					    cifs_sb->ctx->backupgid));
783 	seq_show_option(s, "reparse",
784 			cifs_reparse_type_str(cifs_sb->ctx->reparse_type));
785 	if (cifs_sb->ctx->nonativesocket)
786 		seq_puts(s, ",nonativesocket");
787 	else
788 		seq_puts(s, ",nativesocket");
789 	seq_show_option(s, "symlink",
790 			cifs_symlink_type_str(cifs_symlink_type(cifs_sb)));
791 
792 	seq_printf(s, ",rsize=%u", cifs_sb->ctx->rsize);
793 	seq_printf(s, ",wsize=%u", cifs_sb->ctx->wsize);
794 	seq_printf(s, ",bsize=%u", cifs_sb->ctx->bsize);
795 	if (cifs_sb->ctx->rasize)
796 		seq_printf(s, ",rasize=%u", cifs_sb->ctx->rasize);
797 	if (tcon->ses->server->min_offload)
798 		seq_printf(s, ",esize=%u", tcon->ses->server->min_offload);
799 	if (tcon->ses->server->retrans)
800 		seq_printf(s, ",retrans=%u", tcon->ses->server->retrans);
801 	seq_printf(s, ",echo_interval=%lu",
802 			tcon->ses->server->echo_interval / HZ);
803 
804 	/* Only display the following if overridden on mount */
805 	if (tcon->ses->server->max_credits != SMB2_MAX_CREDITS_AVAILABLE)
806 		seq_printf(s, ",max_credits=%u", tcon->ses->server->max_credits);
807 	if (tcon->ses->server->tcp_nodelay)
808 		seq_puts(s, ",tcpnodelay");
809 	if (tcon->ses->server->noautotune)
810 		seq_puts(s, ",noautotune");
811 	if (tcon->ses->server->noblocksnd)
812 		seq_puts(s, ",noblocksend");
813 	if (tcon->ses->server->nosharesock)
814 		seq_puts(s, ",nosharesock");
815 
816 	if (tcon->snapshot_time)
817 		seq_printf(s, ",snapshot=%llu", tcon->snapshot_time);
818 	if (tcon->handle_timeout)
819 		seq_printf(s, ",handletimeout=%u", tcon->handle_timeout);
820 	if (tcon->max_cached_dirs != MAX_CACHED_FIDS)
821 		seq_printf(s, ",max_cached_dirs=%u", tcon->max_cached_dirs);
822 
823 	/*
824 	 * Display file and directory attribute timeout in seconds.
825 	 * If file and directory attribute timeout the same then actimeo
826 	 * was likely specified on mount
827 	 */
828 	if (cifs_sb->ctx->acdirmax == cifs_sb->ctx->acregmax)
829 		seq_printf(s, ",actimeo=%lu", cifs_sb->ctx->acregmax / HZ);
830 	else {
831 		seq_printf(s, ",acdirmax=%lu", cifs_sb->ctx->acdirmax / HZ);
832 		seq_printf(s, ",acregmax=%lu", cifs_sb->ctx->acregmax / HZ);
833 	}
834 	seq_printf(s, ",closetimeo=%lu", cifs_sb->ctx->closetimeo / HZ);
835 
836 	if (tcon->ses->chan_max > 1)
837 		seq_printf(s, ",multichannel,max_channels=%zu",
838 			   tcon->ses->chan_max);
839 
840 	if (tcon->use_witness)
841 		seq_puts(s, ",witness");
842 
843 	return 0;
844 }
845 
cifs_umount_begin(struct super_block * sb)846 static void cifs_umount_begin(struct super_block *sb)
847 {
848 	struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
849 	struct cifs_tcon *tcon;
850 
851 	if (cifs_sb == NULL)
852 		return;
853 
854 	tcon = cifs_sb_master_tcon(cifs_sb);
855 
856 	spin_lock(&cifs_tcp_ses_lock);
857 	spin_lock(&tcon->tc_lock);
858 	trace_smb3_tcon_ref(tcon->debug_id, tcon->tc_count,
859 			    netfs_trace_tcon_ref_see_umount);
860 	if ((tcon->tc_count > 1) || (tcon->status == TID_EXITING)) {
861 		/* we have other mounts to same share or we have
862 		   already tried to umount this and woken up
863 		   all waiting network requests, nothing to do */
864 		spin_unlock(&tcon->tc_lock);
865 		spin_unlock(&cifs_tcp_ses_lock);
866 		return;
867 	}
868 	/*
869 	 * can not set tcon->status to TID_EXITING yet since we don't know if umount -f will
870 	 * fail later (e.g. due to open files).  TID_EXITING will be set just before tdis req sent
871 	 */
872 	spin_unlock(&tcon->tc_lock);
873 	spin_unlock(&cifs_tcp_ses_lock);
874 
875 	/* cancel_brl_requests(tcon); */ /* BB mark all brl mids as exiting */
876 	/* cancel_notify_requests(tcon); */
877 	if (tcon->ses && tcon->ses->server) {
878 		cifs_dbg(FYI, "wake up tasks now - umount begin not complete\n");
879 		wake_up_all(&tcon->ses->server->request_q);
880 		wake_up_all(&tcon->ses->server->response_q);
881 		msleep(1); /* yield */
882 		/* we have to kick the requests once more */
883 		wake_up_all(&tcon->ses->server->response_q);
884 		msleep(1);
885 	}
886 
887 	return;
888 }
889 
cifs_freeze(struct super_block * sb)890 static int cifs_freeze(struct super_block *sb)
891 {
892 	struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
893 	struct cifs_tcon *tcon;
894 
895 	if (cifs_sb == NULL)
896 		return 0;
897 
898 	tcon = cifs_sb_master_tcon(cifs_sb);
899 
900 	cifs_close_all_deferred_files(tcon);
901 	return 0;
902 }
903 
904 #ifdef CONFIG_CIFS_STATS2
cifs_show_stats(struct seq_file * s,struct dentry * root)905 static int cifs_show_stats(struct seq_file *s, struct dentry *root)
906 {
907 	/* BB FIXME */
908 	return 0;
909 }
910 #endif
911 
cifs_write_inode(struct inode * inode,struct writeback_control * wbc)912 static int cifs_write_inode(struct inode *inode, struct writeback_control *wbc)
913 {
914 	return netfs_unpin_writeback(inode, wbc);
915 }
916 
cifs_drop_inode(struct inode * inode)917 static int cifs_drop_inode(struct inode *inode)
918 {
919 	unsigned int sbflags = cifs_sb_flags(CIFS_SB(inode));
920 
921 	/* no serverino => unconditional eviction */
922 	return !(sbflags & CIFS_MOUNT_SERVER_INUM) ||
923 		inode_generic_drop(inode);
924 }
925 
926 static const struct super_operations cifs_super_ops = {
927 	.statfs = cifs_statfs,
928 	.alloc_inode = cifs_alloc_inode,
929 	.write_inode	= cifs_write_inode,
930 	.free_inode = cifs_free_inode,
931 	.drop_inode	= cifs_drop_inode,
932 	.evict_inode	= cifs_evict_inode,
933 /*	.show_path	= cifs_show_path, */ /* Would we ever need show path? */
934 	.show_devname   = cifs_show_devname,
935 /*	.delete_inode	= cifs_delete_inode,  */  /* Do not need above
936 	function unless later we add lazy close of inodes or unless the
937 	kernel forgets to call us with the same number of releases (closes)
938 	as opens */
939 	.show_options = cifs_show_options,
940 	.umount_begin   = cifs_umount_begin,
941 	.freeze_fs      = cifs_freeze,
942 #ifdef CONFIG_CIFS_STATS2
943 	.show_stats = cifs_show_stats,
944 #endif
945 };
946 
947 /*
948  * Get root dentry from superblock according to prefix path mount option.
949  * Return dentry with refcount + 1 on success and NULL otherwise.
950  */
951 static struct dentry *
cifs_get_root(struct smb3_fs_context * ctx,struct super_block * sb)952 cifs_get_root(struct smb3_fs_context *ctx, struct super_block *sb)
953 {
954 	struct dentry *dentry;
955 	struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
956 	char *full_path = NULL;
957 	char *s, *p;
958 	char sep;
959 
960 	if (cifs_sb_flags(cifs_sb) & CIFS_MOUNT_USE_PREFIX_PATH)
961 		return dget(sb->s_root);
962 
963 	full_path = cifs_build_path_to_root(ctx, cifs_sb,
964 				cifs_sb_master_tcon(cifs_sb), 0);
965 	if (full_path == NULL)
966 		return ERR_PTR(-ENOMEM);
967 
968 	cifs_dbg(FYI, "Get root dentry for %s\n", full_path);
969 
970 	sep = CIFS_DIR_SEP(cifs_sb);
971 	dentry = dget(sb->s_root);
972 	s = full_path;
973 
974 	do {
975 		struct inode *dir = d_inode(dentry);
976 		struct dentry *child;
977 
978 		if (!S_ISDIR(dir->i_mode)) {
979 			dput(dentry);
980 			dentry = ERR_PTR(-ENOTDIR);
981 			break;
982 		}
983 
984 		/* skip separators */
985 		while (*s == sep)
986 			s++;
987 		if (!*s)
988 			break;
989 		p = s++;
990 		/* next separator */
991 		while (*s && *s != sep)
992 			s++;
993 
994 		child = lookup_noperm_positive_unlocked(&QSTR_LEN(p, s - p),
995 							dentry);
996 		dput(dentry);
997 		dentry = child;
998 	} while (!IS_ERR(dentry));
999 	kfree(full_path);
1000 	return dentry;
1001 }
1002 
cifs_set_super(struct super_block * sb,void * data)1003 static int cifs_set_super(struct super_block *sb, void *data)
1004 {
1005 	struct cifs_mnt_data *mnt_data = data;
1006 	sb->s_fs_info = mnt_data->cifs_sb;
1007 	return set_anon_super(sb, NULL);
1008 }
1009 
1010 struct dentry *
cifs_smb3_do_mount(struct file_system_type * fs_type,int flags,struct smb3_fs_context * old_ctx)1011 cifs_smb3_do_mount(struct file_system_type *fs_type,
1012 	      int flags, struct smb3_fs_context *old_ctx)
1013 {
1014 	struct cifs_mnt_data mnt_data;
1015 	struct cifs_sb_info *cifs_sb;
1016 	struct super_block *sb;
1017 	struct dentry *root;
1018 	int rc;
1019 
1020 	if (cifsFYI) {
1021 		cifs_dbg(FYI, "%s: devname=%s flags=0x%x\n", __func__,
1022 			 old_ctx->source, flags);
1023 	} else {
1024 		cifs_info("Attempting to mount %s\n", old_ctx->source);
1025 	}
1026 	cifs_sb = kzalloc_obj(*cifs_sb);
1027 	if (!cifs_sb)
1028 		return ERR_PTR(-ENOMEM);
1029 
1030 	cifs_sb->ctx = kzalloc_obj(struct smb3_fs_context);
1031 	if (!cifs_sb->ctx) {
1032 		root = ERR_PTR(-ENOMEM);
1033 		goto out;
1034 	}
1035 	rc = smb3_fs_context_dup(cifs_sb->ctx, old_ctx);
1036 	if (rc) {
1037 		root = ERR_PTR(rc);
1038 		goto out;
1039 	}
1040 
1041 	rc = cifs_setup_cifs_sb(cifs_sb);
1042 	if (rc) {
1043 		root = ERR_PTR(rc);
1044 		goto out;
1045 	}
1046 
1047 	rc = cifs_mount(cifs_sb, cifs_sb->ctx);
1048 	if (rc) {
1049 		if (!(flags & SB_SILENT))
1050 			cifs_dbg(VFS, "cifs_mount failed w/return code = %d\n",
1051 				 rc);
1052 		root = ERR_PTR(rc);
1053 		goto out;
1054 	}
1055 
1056 	mnt_data.ctx = cifs_sb->ctx;
1057 	mnt_data.cifs_sb = cifs_sb;
1058 	mnt_data.flags = flags;
1059 
1060 	/* BB should we make this contingent on mount parm? */
1061 	flags |= SB_NODIRATIME | SB_NOATIME;
1062 
1063 	sb = sget(fs_type, cifs_match_super, cifs_set_super, flags, &mnt_data);
1064 	if (IS_ERR(sb)) {
1065 		cifs_umount(cifs_sb);
1066 		return ERR_CAST(sb);
1067 	}
1068 
1069 	if (sb->s_root) {
1070 		cifs_dbg(FYI, "Use existing superblock\n");
1071 		cifs_umount(cifs_sb);
1072 		cifs_sb = NULL;
1073 	} else {
1074 		rc = cifs_read_super(sb);
1075 		if (rc) {
1076 			root = ERR_PTR(rc);
1077 			goto out_super;
1078 		}
1079 
1080 		sb->s_flags |= SB_ACTIVE;
1081 	}
1082 
1083 	root = cifs_get_root(cifs_sb ? cifs_sb->ctx : old_ctx, sb);
1084 	if (IS_ERR(root))
1085 		goto out_super;
1086 
1087 	if (cifs_sb)
1088 		cifs_sb->root = dget(root);
1089 
1090 	cifs_dbg(FYI, "dentry root is: %p\n", root);
1091 	return root;
1092 
1093 out_super:
1094 	deactivate_locked_super(sb);
1095 	return root;
1096 out:
1097 	kfree(cifs_sb->prepath);
1098 	smb3_cleanup_fs_context(cifs_sb->ctx);
1099 	kfree(cifs_sb);
1100 	return root;
1101 }
1102 
cifs_llseek(struct file * file,loff_t offset,int whence)1103 static loff_t cifs_llseek(struct file *file, loff_t offset, int whence)
1104 {
1105 	struct cifsFileInfo *cfile = file->private_data;
1106 	struct cifs_tcon *tcon;
1107 
1108 	/*
1109 	 * whence == SEEK_END || SEEK_DATA || SEEK_HOLE => we must revalidate
1110 	 * the cached file length
1111 	 */
1112 	if (whence != SEEK_SET && whence != SEEK_CUR) {
1113 		int rc;
1114 		struct inode *inode = file_inode(file);
1115 
1116 		/*
1117 		 * We need to be sure that all dirty pages are written and the
1118 		 * server has the newest file length.
1119 		 */
1120 		if (!CIFS_CACHE_READ(CIFS_I(inode)) && inode->i_mapping &&
1121 		    inode->i_mapping->nrpages != 0) {
1122 			rc = filemap_fdatawait(inode->i_mapping);
1123 			if (rc) {
1124 				mapping_set_error(inode->i_mapping, rc);
1125 				return rc;
1126 			}
1127 		}
1128 		/*
1129 		 * Some applications poll for the file length in this strange
1130 		 * way so we must seek to end on non-oplocked files by
1131 		 * setting the revalidate time to zero.
1132 		 */
1133 		CIFS_I(inode)->time = 0;
1134 
1135 		rc = cifs_revalidate_file_attr(file);
1136 		if (rc < 0)
1137 			return (loff_t)rc;
1138 	}
1139 	if (cfile && cfile->tlink) {
1140 		tcon = tlink_tcon(cfile->tlink);
1141 		if (tcon->ses->server->ops->llseek)
1142 			return tcon->ses->server->ops->llseek(file, tcon,
1143 							      offset, whence);
1144 	}
1145 	return generic_file_llseek(file, offset, whence);
1146 }
1147 
1148 static int
cifs_setlease(struct file * file,int arg,struct file_lease ** lease,void ** priv)1149 cifs_setlease(struct file *file, int arg, struct file_lease **lease, void **priv)
1150 {
1151 	/*
1152 	 * Note that this is called by vfs setlease with i_lock held to
1153 	 * protect *lease from going away.
1154 	 */
1155 	struct inode *inode = file_inode(file);
1156 	struct cifsFileInfo *cfile = file->private_data;
1157 
1158 	/* Check if file is oplocked if this is request for new lease */
1159 	if (arg == F_UNLCK ||
1160 	    ((arg == F_RDLCK) && CIFS_CACHE_READ(CIFS_I(inode))) ||
1161 	    ((arg == F_WRLCK) && CIFS_CACHE_WRITE(CIFS_I(inode))))
1162 		return generic_setlease(file, arg, lease, priv);
1163 	else if (tlink_tcon(cfile->tlink)->local_lease &&
1164 		 !CIFS_CACHE_READ(CIFS_I(inode)))
1165 		/*
1166 		 * If the server claims to support oplock on this file, then we
1167 		 * still need to check oplock even if the local_lease mount
1168 		 * option is set, but there are servers which do not support
1169 		 * oplock for which this mount option may be useful if the user
1170 		 * knows that the file won't be changed on the server by anyone
1171 		 * else.
1172 		 */
1173 		return generic_setlease(file, arg, lease, priv);
1174 	else
1175 		return -EAGAIN;
1176 }
1177 
1178 struct file_system_type cifs_fs_type = {
1179 	.owner = THIS_MODULE,
1180 	.name = "cifs",
1181 	.init_fs_context = smb3_init_fs_context,
1182 	.parameters = smb3_fs_parameters,
1183 	.kill_sb = cifs_kill_sb,
1184 	.fs_flags = FS_RENAME_DOES_D_MOVE,
1185 };
1186 MODULE_ALIAS_FS("cifs");
1187 
1188 struct file_system_type smb3_fs_type = {
1189 	.owner = THIS_MODULE,
1190 	.name = "smb3",
1191 	.init_fs_context = smb3_init_fs_context,
1192 	.parameters = smb3_fs_parameters,
1193 	.kill_sb = cifs_kill_sb,
1194 	.fs_flags = FS_RENAME_DOES_D_MOVE,
1195 };
1196 MODULE_ALIAS_FS("smb3");
1197 MODULE_ALIAS("smb3");
1198 
1199 const struct inode_operations cifs_dir_inode_ops = {
1200 	.create = cifs_create,
1201 	.atomic_open = cifs_atomic_open,
1202 	.lookup = cifs_lookup,
1203 	.getattr = cifs_getattr,
1204 	.unlink = cifs_unlink,
1205 	.link = cifs_hardlink,
1206 	.mkdir = cifs_mkdir,
1207 	.rmdir = cifs_rmdir,
1208 	.rename = cifs_rename2,
1209 	.permission = cifs_permission,
1210 	.setattr = cifs_setattr,
1211 	.symlink = cifs_symlink,
1212 	.mknod   = cifs_mknod,
1213 	.listxattr = cifs_listxattr,
1214 	.get_acl = cifs_get_acl,
1215 	.set_acl = cifs_set_acl,
1216 };
1217 
1218 const struct inode_operations cifs_file_inode_ops = {
1219 	.setattr = cifs_setattr,
1220 	.getattr = cifs_getattr,
1221 	.permission = cifs_permission,
1222 	.listxattr = cifs_listxattr,
1223 	.fiemap = cifs_fiemap,
1224 	.get_acl = cifs_get_acl,
1225 	.set_acl = cifs_set_acl,
1226 };
1227 
cifs_get_link(struct dentry * dentry,struct inode * inode,struct delayed_call * done)1228 const char *cifs_get_link(struct dentry *dentry, struct inode *inode,
1229 			    struct delayed_call *done)
1230 {
1231 	char *target_path;
1232 
1233 	if (!dentry)
1234 		return ERR_PTR(-ECHILD);
1235 
1236 	target_path = kmalloc(PATH_MAX, GFP_KERNEL);
1237 	if (!target_path)
1238 		return ERR_PTR(-ENOMEM);
1239 
1240 	spin_lock(&inode->i_lock);
1241 	if (likely(CIFS_I(inode)->symlink_target)) {
1242 		strscpy(target_path, CIFS_I(inode)->symlink_target, PATH_MAX);
1243 	} else {
1244 		kfree(target_path);
1245 		target_path = ERR_PTR(-EOPNOTSUPP);
1246 	}
1247 	spin_unlock(&inode->i_lock);
1248 
1249 	if (!IS_ERR(target_path))
1250 		set_delayed_call(done, kfree_link, target_path);
1251 
1252 	return target_path;
1253 }
1254 
1255 const struct inode_operations cifs_symlink_inode_ops = {
1256 	.get_link = cifs_get_link,
1257 	.setattr = cifs_setattr,
1258 	.permission = cifs_permission,
1259 	.listxattr = cifs_listxattr,
1260 };
1261 
1262 /*
1263  * Advance the EOF marker to after the source range.
1264  */
cifs_precopy_set_eof(struct inode * src_inode,struct cifsInodeInfo * src_cifsi,struct cifs_tcon * src_tcon,unsigned int xid,loff_t src_end)1265 static int cifs_precopy_set_eof(struct inode *src_inode, struct cifsInodeInfo *src_cifsi,
1266 				struct cifs_tcon *src_tcon,
1267 				unsigned int xid, loff_t src_end)
1268 {
1269 	struct cifsFileInfo *writeable_srcfile;
1270 	int rc = -EINVAL;
1271 
1272 	writeable_srcfile = find_writable_file(src_cifsi, FIND_WR_FSUID_ONLY);
1273 	if (writeable_srcfile) {
1274 		if (src_tcon->ses->server->ops->set_file_size)
1275 			rc = src_tcon->ses->server->ops->set_file_size(
1276 				xid, src_tcon, writeable_srcfile,
1277 				src_inode->i_size, true /* no need to set sparse */);
1278 		else
1279 			rc = -ENOSYS;
1280 		cifsFileInfo_put(writeable_srcfile);
1281 		cifs_dbg(FYI, "SetFSize for copychunk rc = %d\n", rc);
1282 	}
1283 
1284 	if (rc < 0)
1285 		goto set_failed;
1286 
1287 	netfs_resize_file(&src_cifsi->netfs, src_end, true);
1288 	fscache_resize_cookie(cifs_inode_cookie(src_inode), src_end);
1289 	return 0;
1290 
1291 set_failed:
1292 	return filemap_write_and_wait(src_inode->i_mapping);
1293 }
1294 
1295 /*
1296  * Flush out either the folio that overlaps the beginning of a range in which
1297  * pos resides or the folio that overlaps the end of a range unless that folio
1298  * is entirely within the range we're going to invalidate.  We extend the flush
1299  * bounds to encompass the folio.
1300  */
cifs_flush_folio(struct inode * inode,loff_t pos,loff_t * _fstart,loff_t * _fend,bool first)1301 static int cifs_flush_folio(struct inode *inode, loff_t pos, loff_t *_fstart, loff_t *_fend,
1302 			    bool first)
1303 {
1304 	struct folio *folio;
1305 	unsigned long long fpos, fend;
1306 	pgoff_t index = pos / PAGE_SIZE;
1307 	size_t size;
1308 	int rc = 0;
1309 
1310 	folio = filemap_get_folio(inode->i_mapping, index);
1311 	if (IS_ERR(folio))
1312 		return 0;
1313 
1314 	size = folio_size(folio);
1315 	fpos = folio_pos(folio);
1316 	fend = fpos + size - 1;
1317 	*_fstart = min_t(unsigned long long, *_fstart, fpos);
1318 	*_fend   = max_t(unsigned long long, *_fend, fend);
1319 	if ((first && pos == fpos) || (!first && pos == fend))
1320 		goto out;
1321 
1322 	rc = filemap_write_and_wait_range(inode->i_mapping, fpos, fend);
1323 out:
1324 	folio_put(folio);
1325 	return rc;
1326 }
1327 
cifs_remap_file_range(struct file * src_file,loff_t off,struct file * dst_file,loff_t destoff,loff_t len,unsigned int remap_flags)1328 static loff_t cifs_remap_file_range(struct file *src_file, loff_t off,
1329 		struct file *dst_file, loff_t destoff, loff_t len,
1330 		unsigned int remap_flags)
1331 {
1332 	struct inode *src_inode = file_inode(src_file);
1333 	struct inode *target_inode = file_inode(dst_file);
1334 	struct cifsInodeInfo *src_cifsi = CIFS_I(src_inode);
1335 	struct cifsInodeInfo *target_cifsi = CIFS_I(target_inode);
1336 	struct cifsFileInfo *smb_file_src = src_file->private_data;
1337 	struct cifsFileInfo *smb_file_target = dst_file->private_data;
1338 	struct cifs_tcon *target_tcon, *src_tcon;
1339 	unsigned long long destend, fstart, fend, old_size, new_size;
1340 	unsigned int xid;
1341 	int rc;
1342 
1343 	if (remap_flags & REMAP_FILE_DEDUP)
1344 		return -EOPNOTSUPP;
1345 	if (remap_flags & ~REMAP_FILE_ADVISORY)
1346 		return -EINVAL;
1347 
1348 	cifs_dbg(FYI, "clone range\n");
1349 
1350 	xid = get_xid();
1351 
1352 	if (!smb_file_src || !smb_file_target) {
1353 		rc = -EBADF;
1354 		cifs_dbg(VFS, "missing cifsFileInfo on copy range src file\n");
1355 		goto out;
1356 	}
1357 
1358 	src_tcon = tlink_tcon(smb_file_src->tlink);
1359 	target_tcon = tlink_tcon(smb_file_target->tlink);
1360 
1361 	/*
1362 	 * Note: cifs case is easier than btrfs since server responsible for
1363 	 * checks for proper open modes and file type and if it wants
1364 	 * server could even support copy of range where source = target
1365 	 */
1366 	lock_two_nondirectories(target_inode, src_inode);
1367 
1368 	if (len == 0)
1369 		len = src_inode->i_size - off;
1370 
1371 	cifs_dbg(FYI, "clone range\n");
1372 
1373 	/* Flush the source buffer */
1374 	rc = filemap_write_and_wait_range(src_inode->i_mapping, off,
1375 					  off + len - 1);
1376 	if (rc)
1377 		goto unlock;
1378 
1379 	/* The server-side copy will fail if the source crosses the EOF marker.
1380 	 * Advance the EOF marker after the flush above to the end of the range
1381 	 * if it's short of that.
1382 	 */
1383 	if (src_cifsi->netfs.remote_i_size < off + len) {
1384 		rc = cifs_precopy_set_eof(src_inode, src_cifsi, src_tcon, xid, off + len);
1385 		if (rc < 0)
1386 			goto unlock;
1387 	}
1388 
1389 	new_size = destoff + len;
1390 	destend = destoff + len - 1;
1391 
1392 	/* Flush the folios at either end of the destination range to prevent
1393 	 * accidental loss of dirty data outside of the range.
1394 	 */
1395 	fstart = destoff;
1396 	fend = destend;
1397 
1398 	rc = cifs_flush_folio(target_inode, destoff, &fstart, &fend, true);
1399 	if (rc)
1400 		goto unlock;
1401 	rc = cifs_flush_folio(target_inode, destend, &fstart, &fend, false);
1402 	if (rc)
1403 		goto unlock;
1404 	if (fend > target_cifsi->netfs.zero_point)
1405 		target_cifsi->netfs.zero_point = fend + 1;
1406 	old_size = target_cifsi->netfs.remote_i_size;
1407 
1408 	/* Discard all the folios that overlap the destination region. */
1409 	cifs_dbg(FYI, "about to discard pages %llx-%llx\n", fstart, fend);
1410 	truncate_inode_pages_range(&target_inode->i_data, fstart, fend);
1411 
1412 	fscache_invalidate(cifs_inode_cookie(target_inode), NULL,
1413 			   i_size_read(target_inode), 0);
1414 
1415 	rc = -EOPNOTSUPP;
1416 	if (target_tcon->ses->server->ops->duplicate_extents) {
1417 		rc = target_tcon->ses->server->ops->duplicate_extents(xid,
1418 			smb_file_src, smb_file_target, off, len, destoff);
1419 		if (rc == 0 && new_size > old_size) {
1420 			truncate_setsize(target_inode, new_size);
1421 			fscache_resize_cookie(cifs_inode_cookie(target_inode),
1422 					      new_size);
1423 		} else if (rc == -EOPNOTSUPP) {
1424 			/*
1425 			 * copy_file_range syscall man page indicates EINVAL
1426 			 * is returned e.g when "fd_in and fd_out refer to the
1427 			 * same file and the source and target ranges overlap."
1428 			 * Test generic/157 was what showed these cases where
1429 			 * we need to remap EOPNOTSUPP to EINVAL
1430 			 */
1431 			if (off >= src_inode->i_size) {
1432 				rc = -EINVAL;
1433 			} else if (src_inode == target_inode) {
1434 				if (off + len > destoff)
1435 					rc = -EINVAL;
1436 			}
1437 		}
1438 		if (rc == 0 && new_size > target_cifsi->netfs.zero_point)
1439 			target_cifsi->netfs.zero_point = new_size;
1440 	}
1441 
1442 	/* force revalidate of size and timestamps of target file now
1443 	   that target is updated on the server */
1444 	CIFS_I(target_inode)->time = 0;
1445 unlock:
1446 	/* although unlocking in the reverse order from locking is not
1447 	   strictly necessary here it is a little cleaner to be consistent */
1448 	unlock_two_nondirectories(src_inode, target_inode);
1449 out:
1450 	free_xid(xid);
1451 	return rc < 0 ? rc : len;
1452 }
1453 
cifs_file_copychunk_range(unsigned int xid,struct file * src_file,loff_t off,struct file * dst_file,loff_t destoff,size_t len,unsigned int flags)1454 ssize_t cifs_file_copychunk_range(unsigned int xid,
1455 				struct file *src_file, loff_t off,
1456 				struct file *dst_file, loff_t destoff,
1457 				size_t len, unsigned int flags)
1458 {
1459 	struct inode *src_inode = file_inode(src_file);
1460 	struct inode *target_inode = file_inode(dst_file);
1461 	struct cifsInodeInfo *src_cifsi = CIFS_I(src_inode);
1462 	struct cifsInodeInfo *target_cifsi = CIFS_I(target_inode);
1463 	struct cifsFileInfo *smb_file_src;
1464 	struct cifsFileInfo *smb_file_target;
1465 	struct cifs_tcon *src_tcon;
1466 	struct cifs_tcon *target_tcon;
1467 	ssize_t rc;
1468 
1469 	cifs_dbg(FYI, "copychunk range\n");
1470 
1471 	if (!src_file->private_data || !dst_file->private_data) {
1472 		rc = -EBADF;
1473 		cifs_dbg(VFS, "missing cifsFileInfo on copy range src file\n");
1474 		goto out;
1475 	}
1476 
1477 	rc = -EXDEV;
1478 	smb_file_target = dst_file->private_data;
1479 	smb_file_src = src_file->private_data;
1480 	src_tcon = tlink_tcon(smb_file_src->tlink);
1481 	target_tcon = tlink_tcon(smb_file_target->tlink);
1482 
1483 	if (src_tcon->ses != target_tcon->ses) {
1484 		cifs_dbg(FYI, "source and target of copy not on same server\n");
1485 		goto out;
1486 	}
1487 
1488 	rc = -EOPNOTSUPP;
1489 	if (!target_tcon->ses->server->ops->copychunk_range)
1490 		goto out;
1491 
1492 	/*
1493 	 * Note: cifs case is easier than btrfs since server responsible for
1494 	 * checks for proper open modes and file type and if it wants
1495 	 * server could even support copy of range where source = target
1496 	 */
1497 	lock_two_nondirectories(target_inode, src_inode);
1498 
1499 	cifs_dbg(FYI, "about to flush pages\n");
1500 
1501 	rc = filemap_write_and_wait_range(src_inode->i_mapping, off,
1502 					  off + len - 1);
1503 	if (rc)
1504 		goto unlock;
1505 
1506 	/* The server-side copy will fail if the source crosses the EOF marker.
1507 	 * Advance the EOF marker after the flush above to the end of the range
1508 	 * if it's short of that.
1509 	 */
1510 	if (src_cifsi->netfs.remote_i_size < off + len) {
1511 		rc = cifs_precopy_set_eof(src_inode, src_cifsi, src_tcon, xid, off + len);
1512 		if (rc < 0)
1513 			goto unlock;
1514 	}
1515 
1516 	/* Flush and invalidate all the folios in the destination region.  If
1517 	 * the copy was successful, then some of the flush is extra overhead,
1518 	 * but we need to allow for the copy failing in some way (eg. ENOSPC).
1519 	 */
1520 	rc = filemap_invalidate_inode(target_inode, true, destoff, destoff + len - 1);
1521 	if (rc)
1522 		goto unlock;
1523 
1524 	fscache_invalidate(cifs_inode_cookie(target_inode), NULL,
1525 			   i_size_read(target_inode), 0);
1526 
1527 	rc = file_modified(dst_file);
1528 	if (!rc) {
1529 		rc = target_tcon->ses->server->ops->copychunk_range(xid,
1530 			smb_file_src, smb_file_target, off, len, destoff);
1531 		if (rc > 0 && destoff + rc > i_size_read(target_inode)) {
1532 			truncate_setsize(target_inode, destoff + rc);
1533 			netfs_resize_file(&target_cifsi->netfs,
1534 					  i_size_read(target_inode), true);
1535 			fscache_resize_cookie(cifs_inode_cookie(target_inode),
1536 					      i_size_read(target_inode));
1537 		}
1538 		if (rc > 0 && destoff + rc > target_cifsi->netfs.zero_point)
1539 			target_cifsi->netfs.zero_point = destoff + rc;
1540 	}
1541 
1542 	file_accessed(src_file);
1543 
1544 	/* force revalidate of size and timestamps of target file now
1545 	 * that target is updated on the server
1546 	 */
1547 	CIFS_I(target_inode)->time = 0;
1548 
1549 unlock:
1550 	/* although unlocking in the reverse order from locking is not
1551 	 * strictly necessary here it is a little cleaner to be consistent
1552 	 */
1553 	unlock_two_nondirectories(src_inode, target_inode);
1554 
1555 out:
1556 	return rc;
1557 }
1558 
1559 /*
1560  * Directory operations under CIFS/SMB2/SMB3 are synchronous, so fsync()
1561  * is a dummy operation.
1562  */
cifs_dir_fsync(struct file * file,loff_t start,loff_t end,int datasync)1563 static int cifs_dir_fsync(struct file *file, loff_t start, loff_t end, int datasync)
1564 {
1565 	cifs_dbg(FYI, "Sync directory - name: %pD datasync: 0x%x\n",
1566 		 file, datasync);
1567 
1568 	return 0;
1569 }
1570 
cifs_copy_file_range(struct file * src_file,loff_t off,struct file * dst_file,loff_t destoff,size_t len,unsigned int flags)1571 static ssize_t cifs_copy_file_range(struct file *src_file, loff_t off,
1572 				struct file *dst_file, loff_t destoff,
1573 				size_t len, unsigned int flags)
1574 {
1575 	unsigned int xid = get_xid();
1576 	ssize_t rc;
1577 	struct cifsFileInfo *cfile = dst_file->private_data;
1578 
1579 	if (cfile->swapfile) {
1580 		rc = -EOPNOTSUPP;
1581 		free_xid(xid);
1582 		return rc;
1583 	}
1584 
1585 	rc = cifs_file_copychunk_range(xid, src_file, off, dst_file, destoff,
1586 					len, flags);
1587 	free_xid(xid);
1588 
1589 	if (rc == -EOPNOTSUPP || rc == -EXDEV)
1590 		rc = splice_copy_file_range(src_file, off, dst_file,
1591 					    destoff, len);
1592 	return rc;
1593 }
1594 
1595 const struct file_operations cifs_file_ops = {
1596 	.read_iter = cifs_loose_read_iter,
1597 	.write_iter = cifs_file_write_iter,
1598 	.open = cifs_open,
1599 	.release = cifs_close,
1600 	.lock = cifs_lock,
1601 	.flock = cifs_flock,
1602 	.fsync = cifs_fsync,
1603 	.flush = cifs_flush,
1604 	.mmap_prepare = cifs_file_mmap_prepare,
1605 	.splice_read = filemap_splice_read,
1606 	.splice_write = iter_file_splice_write,
1607 	.llseek = cifs_llseek,
1608 	.unlocked_ioctl	= cifs_ioctl,
1609 	.copy_file_range = cifs_copy_file_range,
1610 	.remap_file_range = cifs_remap_file_range,
1611 	.setlease = cifs_setlease,
1612 	.fallocate = cifs_fallocate,
1613 };
1614 
1615 const struct file_operations cifs_file_strict_ops = {
1616 	.read_iter = cifs_strict_readv,
1617 	.write_iter = cifs_strict_writev,
1618 	.open = cifs_open,
1619 	.release = cifs_close,
1620 	.lock = cifs_lock,
1621 	.flock = cifs_flock,
1622 	.fsync = cifs_strict_fsync,
1623 	.flush = cifs_flush,
1624 	.mmap_prepare = cifs_file_strict_mmap_prepare,
1625 	.splice_read = filemap_splice_read,
1626 	.splice_write = iter_file_splice_write,
1627 	.llseek = cifs_llseek,
1628 	.unlocked_ioctl	= cifs_ioctl,
1629 	.copy_file_range = cifs_copy_file_range,
1630 	.remap_file_range = cifs_remap_file_range,
1631 	.setlease = cifs_setlease,
1632 	.fallocate = cifs_fallocate,
1633 };
1634 
1635 const struct file_operations cifs_file_direct_ops = {
1636 	.read_iter = netfs_unbuffered_read_iter,
1637 	.write_iter = netfs_file_write_iter,
1638 	.open = cifs_open,
1639 	.release = cifs_close,
1640 	.lock = cifs_lock,
1641 	.flock = cifs_flock,
1642 	.fsync = cifs_fsync,
1643 	.flush = cifs_flush,
1644 	.mmap_prepare = cifs_file_mmap_prepare,
1645 	.splice_read = copy_splice_read,
1646 	.splice_write = iter_file_splice_write,
1647 	.unlocked_ioctl  = cifs_ioctl,
1648 	.copy_file_range = cifs_copy_file_range,
1649 	.remap_file_range = cifs_remap_file_range,
1650 	.llseek = cifs_llseek,
1651 	.setlease = cifs_setlease,
1652 	.fallocate = cifs_fallocate,
1653 };
1654 
1655 const struct file_operations cifs_file_nobrl_ops = {
1656 	.read_iter = cifs_loose_read_iter,
1657 	.write_iter = cifs_file_write_iter,
1658 	.open = cifs_open,
1659 	.release = cifs_close,
1660 	.fsync = cifs_fsync,
1661 	.flush = cifs_flush,
1662 	.mmap_prepare = cifs_file_mmap_prepare,
1663 	.splice_read = filemap_splice_read,
1664 	.splice_write = iter_file_splice_write,
1665 	.llseek = cifs_llseek,
1666 	.unlocked_ioctl	= cifs_ioctl,
1667 	.copy_file_range = cifs_copy_file_range,
1668 	.remap_file_range = cifs_remap_file_range,
1669 	.setlease = cifs_setlease,
1670 	.fallocate = cifs_fallocate,
1671 };
1672 
1673 const struct file_operations cifs_file_strict_nobrl_ops = {
1674 	.read_iter = cifs_strict_readv,
1675 	.write_iter = cifs_strict_writev,
1676 	.open = cifs_open,
1677 	.release = cifs_close,
1678 	.fsync = cifs_strict_fsync,
1679 	.flush = cifs_flush,
1680 	.mmap_prepare = cifs_file_strict_mmap_prepare,
1681 	.splice_read = filemap_splice_read,
1682 	.splice_write = iter_file_splice_write,
1683 	.llseek = cifs_llseek,
1684 	.unlocked_ioctl	= cifs_ioctl,
1685 	.copy_file_range = cifs_copy_file_range,
1686 	.remap_file_range = cifs_remap_file_range,
1687 	.setlease = cifs_setlease,
1688 	.fallocate = cifs_fallocate,
1689 };
1690 
1691 const struct file_operations cifs_file_direct_nobrl_ops = {
1692 	.read_iter = netfs_unbuffered_read_iter,
1693 	.write_iter = netfs_file_write_iter,
1694 	.open = cifs_open,
1695 	.release = cifs_close,
1696 	.fsync = cifs_fsync,
1697 	.flush = cifs_flush,
1698 	.mmap_prepare = cifs_file_mmap_prepare,
1699 	.splice_read = copy_splice_read,
1700 	.splice_write = iter_file_splice_write,
1701 	.unlocked_ioctl  = cifs_ioctl,
1702 	.copy_file_range = cifs_copy_file_range,
1703 	.remap_file_range = cifs_remap_file_range,
1704 	.llseek = cifs_llseek,
1705 	.setlease = cifs_setlease,
1706 	.fallocate = cifs_fallocate,
1707 };
1708 
1709 const struct file_operations cifs_dir_ops = {
1710 	.iterate_shared = cifs_readdir,
1711 	.release = cifs_closedir,
1712 	.read    = generic_read_dir,
1713 	.unlocked_ioctl  = cifs_ioctl,
1714 	.copy_file_range = cifs_copy_file_range,
1715 	.remap_file_range = cifs_remap_file_range,
1716 	.llseek = generic_file_llseek,
1717 	.fsync = cifs_dir_fsync,
1718 };
1719 
1720 static void
cifs_init_once(void * inode)1721 cifs_init_once(void *inode)
1722 {
1723 	struct cifsInodeInfo *cifsi = inode;
1724 
1725 	inode_init_once(&cifsi->netfs.inode);
1726 	init_rwsem(&cifsi->lock_sem);
1727 }
1728 
1729 static int __init
cifs_init_inodecache(void)1730 cifs_init_inodecache(void)
1731 {
1732 	cifs_inode_cachep = kmem_cache_create("cifs_inode_cache",
1733 					      sizeof(struct cifsInodeInfo),
1734 					      0, (SLAB_RECLAIM_ACCOUNT|
1735 						SLAB_ACCOUNT),
1736 					      cifs_init_once);
1737 	if (cifs_inode_cachep == NULL)
1738 		return -ENOMEM;
1739 
1740 	return 0;
1741 }
1742 
1743 static void
cifs_destroy_inodecache(void)1744 cifs_destroy_inodecache(void)
1745 {
1746 	/*
1747 	 * Make sure all delayed rcu free inodes are flushed before we
1748 	 * destroy cache.
1749 	 */
1750 	rcu_barrier();
1751 	kmem_cache_destroy(cifs_inode_cachep);
1752 }
1753 
1754 static int
cifs_init_request_bufs(void)1755 cifs_init_request_bufs(void)
1756 {
1757 	/*
1758 	 * SMB2 maximum header size is bigger than CIFS one - no problems to
1759 	 * allocate some more bytes for CIFS.
1760 	 */
1761 	size_t max_hdr_size = MAX_SMB2_HDR_SIZE;
1762 
1763 	if (CIFSMaxBufSize < 8192) {
1764 	/* Buffer size can not be smaller than 2 * PATH_MAX since maximum
1765 	Unicode path name has to fit in any SMB/CIFS path based frames */
1766 		CIFSMaxBufSize = 8192;
1767 	} else if (CIFSMaxBufSize > 1024*127) {
1768 		CIFSMaxBufSize = 1024 * 127;
1769 	} else {
1770 		CIFSMaxBufSize &= 0x1FE00; /* Round size to even 512 byte mult*/
1771 	}
1772 /*
1773 	cifs_dbg(VFS, "CIFSMaxBufSize %d 0x%x\n",
1774 		 CIFSMaxBufSize, CIFSMaxBufSize);
1775 */
1776 	cifs_req_cachep = kmem_cache_create_usercopy("cifs_request",
1777 					    CIFSMaxBufSize + max_hdr_size, 0,
1778 					    SLAB_HWCACHE_ALIGN, 0,
1779 					    CIFSMaxBufSize + max_hdr_size,
1780 					    NULL);
1781 	if (cifs_req_cachep == NULL)
1782 		return -ENOMEM;
1783 
1784 	if (cifs_min_rcv < 1)
1785 		cifs_min_rcv = 1;
1786 	else if (cifs_min_rcv > 64) {
1787 		cifs_min_rcv = 64;
1788 		cifs_dbg(VFS, "cifs_min_rcv set to maximum (64)\n");
1789 	}
1790 
1791 	cifs_req_poolp = mempool_create_slab_pool(cifs_min_rcv,
1792 						  cifs_req_cachep);
1793 
1794 	if (cifs_req_poolp == NULL) {
1795 		kmem_cache_destroy(cifs_req_cachep);
1796 		return -ENOMEM;
1797 	}
1798 	/* MAX_CIFS_SMALL_BUFFER_SIZE bytes is enough for most SMB responses and
1799 	almost all handle based requests (but not write response, nor is it
1800 	sufficient for path based requests).  A smaller size would have
1801 	been more efficient (compacting multiple slab items on one 4k page)
1802 	for the case in which debug was on, but this larger size allows
1803 	more SMBs to use small buffer alloc and is still much more
1804 	efficient to alloc 1 per page off the slab compared to 17K (5page)
1805 	alloc of large cifs buffers even when page debugging is on */
1806 	cifs_sm_req_cachep = kmem_cache_create_usercopy("cifs_small_rq",
1807 			MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN,
1808 			0, MAX_CIFS_SMALL_BUFFER_SIZE, NULL);
1809 	if (cifs_sm_req_cachep == NULL) {
1810 		mempool_destroy(cifs_req_poolp);
1811 		kmem_cache_destroy(cifs_req_cachep);
1812 		return -ENOMEM;
1813 	}
1814 
1815 	if (cifs_min_small < 2)
1816 		cifs_min_small = 2;
1817 	else if (cifs_min_small > 256) {
1818 		cifs_min_small = 256;
1819 		cifs_dbg(FYI, "cifs_min_small set to maximum (256)\n");
1820 	}
1821 
1822 	cifs_sm_req_poolp = mempool_create_slab_pool(cifs_min_small,
1823 						     cifs_sm_req_cachep);
1824 
1825 	if (cifs_sm_req_poolp == NULL) {
1826 		mempool_destroy(cifs_req_poolp);
1827 		kmem_cache_destroy(cifs_req_cachep);
1828 		kmem_cache_destroy(cifs_sm_req_cachep);
1829 		return -ENOMEM;
1830 	}
1831 
1832 	return 0;
1833 }
1834 
1835 static void
cifs_destroy_request_bufs(void)1836 cifs_destroy_request_bufs(void)
1837 {
1838 	mempool_destroy(cifs_req_poolp);
1839 	kmem_cache_destroy(cifs_req_cachep);
1840 	mempool_destroy(cifs_sm_req_poolp);
1841 	kmem_cache_destroy(cifs_sm_req_cachep);
1842 }
1843 
init_mids(void)1844 static int init_mids(void)
1845 {
1846 	cifs_mid_cachep = kmem_cache_create("cifs_mpx_ids",
1847 					    sizeof(struct mid_q_entry), 0,
1848 					    SLAB_HWCACHE_ALIGN, NULL);
1849 	if (cifs_mid_cachep == NULL)
1850 		return -ENOMEM;
1851 
1852 	/* 3 is a reasonable minimum number of simultaneous operations */
1853 	if (mempool_init_slab_pool(&cifs_mid_pool, 3, cifs_mid_cachep) < 0) {
1854 		kmem_cache_destroy(cifs_mid_cachep);
1855 		return -ENOMEM;
1856 	}
1857 
1858 	return 0;
1859 }
1860 
destroy_mids(void)1861 static void destroy_mids(void)
1862 {
1863 	mempool_exit(&cifs_mid_pool);
1864 	kmem_cache_destroy(cifs_mid_cachep);
1865 }
1866 
cifs_init_netfs(void)1867 static int cifs_init_netfs(void)
1868 {
1869 	cifs_io_request_cachep =
1870 		kmem_cache_create("cifs_io_request",
1871 				  sizeof(struct cifs_io_request), 0,
1872 				  SLAB_HWCACHE_ALIGN, NULL);
1873 	if (!cifs_io_request_cachep)
1874 		goto nomem_req;
1875 
1876 	if (mempool_init_slab_pool(&cifs_io_request_pool, 100, cifs_io_request_cachep) < 0)
1877 		goto nomem_reqpool;
1878 
1879 	cifs_io_subrequest_cachep =
1880 		kmem_cache_create("cifs_io_subrequest",
1881 				  sizeof(struct cifs_io_subrequest), 0,
1882 				  SLAB_HWCACHE_ALIGN, NULL);
1883 	if (!cifs_io_subrequest_cachep)
1884 		goto nomem_subreq;
1885 
1886 	if (mempool_init_slab_pool(&cifs_io_subrequest_pool, 100, cifs_io_subrequest_cachep) < 0)
1887 		goto nomem_subreqpool;
1888 
1889 	return 0;
1890 
1891 nomem_subreqpool:
1892 	kmem_cache_destroy(cifs_io_subrequest_cachep);
1893 nomem_subreq:
1894 	mempool_exit(&cifs_io_request_pool);
1895 nomem_reqpool:
1896 	kmem_cache_destroy(cifs_io_request_cachep);
1897 nomem_req:
1898 	return -ENOMEM;
1899 }
1900 
cifs_destroy_netfs(void)1901 static void cifs_destroy_netfs(void)
1902 {
1903 	mempool_exit(&cifs_io_subrequest_pool);
1904 	kmem_cache_destroy(cifs_io_subrequest_cachep);
1905 	mempool_exit(&cifs_io_request_pool);
1906 	kmem_cache_destroy(cifs_io_request_cachep);
1907 }
1908 
1909 static int __init
init_cifs(void)1910 init_cifs(void)
1911 {
1912 	int rc = 0;
1913 
1914 	rc = smb2_init_maperror();
1915 	if (rc)
1916 		return rc;
1917 
1918 	cifs_proc_init();
1919 	INIT_LIST_HEAD(&cifs_tcp_ses_list);
1920 /*
1921  *  Initialize Global counters
1922  */
1923 	atomic_set(&sesInfoAllocCount, 0);
1924 	atomic_set(&tconInfoAllocCount, 0);
1925 	atomic_set(&tcpSesNextId, 0);
1926 	atomic_set(&tcpSesAllocCount, 0);
1927 	atomic_set(&tcpSesReconnectCount, 0);
1928 	atomic_set(&tconInfoReconnectCount, 0);
1929 
1930 	atomic_set(&buf_alloc_count, 0);
1931 	atomic_set(&small_buf_alloc_count, 0);
1932 #ifdef CONFIG_CIFS_STATS2
1933 	atomic_set(&total_buf_alloc_count, 0);
1934 	atomic_set(&total_small_buf_alloc_count, 0);
1935 	if (slow_rsp_threshold < 1)
1936 		cifs_dbg(FYI, "slow_response_threshold msgs disabled\n");
1937 	else if (slow_rsp_threshold > 32767)
1938 		cifs_dbg(VFS,
1939 		       "slow response threshold set higher than recommended (0 to 32767)\n");
1940 #endif /* CONFIG_CIFS_STATS2 */
1941 
1942 	atomic_set(&mid_count, 0);
1943 	GlobalCurrentXid = 0;
1944 	GlobalTotalActiveXid = 0;
1945 	GlobalMaxActiveXid = 0;
1946 
1947 	cifs_lock_secret = get_random_u32();
1948 
1949 	if (cifs_max_pending < 2) {
1950 		cifs_max_pending = 2;
1951 		cifs_dbg(FYI, "cifs_max_pending set to min of 2\n");
1952 	} else if (cifs_max_pending > CIFS_MAX_REQ) {
1953 		cifs_max_pending = CIFS_MAX_REQ;
1954 		cifs_dbg(FYI, "cifs_max_pending set to max of %u\n",
1955 			 CIFS_MAX_REQ);
1956 	}
1957 
1958 	/* Limit max to about 18 hours, and setting to zero disables directory entry caching */
1959 	if (dir_cache_timeout > 65000) {
1960 		dir_cache_timeout = 65000;
1961 		cifs_dbg(VFS, "dir_cache_timeout set to max of 65000 seconds\n");
1962 	}
1963 
1964 	cifsiod_wq = alloc_workqueue("cifsiod",
1965 				     WQ_FREEZABLE | WQ_MEM_RECLAIM | WQ_PERCPU,
1966 				     0);
1967 	if (!cifsiod_wq) {
1968 		rc = -ENOMEM;
1969 		goto out_clean_proc;
1970 	}
1971 
1972 	/*
1973 	 * Consider in future setting limit!=0 maybe to min(num_of_cores - 1, 3)
1974 	 * so that we don't launch too many worker threads but
1975 	 * Documentation/core-api/workqueue.rst recommends setting it to 0
1976 	 */
1977 
1978 	/* WQ_UNBOUND allows decrypt tasks to run on any CPU */
1979 	decrypt_wq = alloc_workqueue("smb3decryptd",
1980 				     WQ_UNBOUND|WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
1981 	if (!decrypt_wq) {
1982 		rc = -ENOMEM;
1983 		goto out_destroy_cifsiod_wq;
1984 	}
1985 
1986 	fileinfo_put_wq = alloc_workqueue("cifsfileinfoput",
1987 				     WQ_UNBOUND|WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
1988 	if (!fileinfo_put_wq) {
1989 		rc = -ENOMEM;
1990 		goto out_destroy_decrypt_wq;
1991 	}
1992 
1993 	cifsoplockd_wq = alloc_workqueue("cifsoplockd",
1994 					 WQ_FREEZABLE | WQ_MEM_RECLAIM | WQ_PERCPU,
1995 					 0);
1996 	if (!cifsoplockd_wq) {
1997 		rc = -ENOMEM;
1998 		goto out_destroy_fileinfo_put_wq;
1999 	}
2000 
2001 	deferredclose_wq = alloc_workqueue("deferredclose",
2002 					   WQ_FREEZABLE | WQ_MEM_RECLAIM | WQ_PERCPU,
2003 					   0);
2004 	if (!deferredclose_wq) {
2005 		rc = -ENOMEM;
2006 		goto out_destroy_cifsoplockd_wq;
2007 	}
2008 
2009 	serverclose_wq = alloc_workqueue("serverclose",
2010 					   WQ_FREEZABLE | WQ_MEM_RECLAIM | WQ_PERCPU,
2011 					   0);
2012 	if (!serverclose_wq) {
2013 		rc = -ENOMEM;
2014 		goto out_destroy_deferredclose_wq;
2015 	}
2016 
2017 	cfid_put_wq = alloc_workqueue("cfid_put_wq",
2018 				      WQ_FREEZABLE | WQ_MEM_RECLAIM | WQ_PERCPU,
2019 				      0);
2020 	if (!cfid_put_wq) {
2021 		rc = -ENOMEM;
2022 		goto out_destroy_serverclose_wq;
2023 	}
2024 
2025 	rc = cifs_init_inodecache();
2026 	if (rc)
2027 		goto out_destroy_cfid_put_wq;
2028 
2029 	rc = cifs_init_netfs();
2030 	if (rc)
2031 		goto out_destroy_inodecache;
2032 
2033 	rc = init_mids();
2034 	if (rc)
2035 		goto out_destroy_netfs;
2036 
2037 	rc = cifs_init_request_bufs();
2038 	if (rc)
2039 		goto out_destroy_mids;
2040 
2041 #ifdef CONFIG_CIFS_DFS_UPCALL
2042 	rc = dfs_cache_init();
2043 	if (rc)
2044 		goto out_destroy_request_bufs;
2045 #endif /* CONFIG_CIFS_DFS_UPCALL */
2046 #ifdef CONFIG_CIFS_UPCALL
2047 	rc = init_cifs_spnego();
2048 	if (rc)
2049 		goto out_destroy_dfs_cache;
2050 #endif /* CONFIG_CIFS_UPCALL */
2051 #ifdef CONFIG_CIFS_SWN_UPCALL
2052 	rc = cifs_genl_init();
2053 	if (rc)
2054 		goto out_register_key_type;
2055 #endif /* CONFIG_CIFS_SWN_UPCALL */
2056 
2057 	rc = init_cifs_idmap();
2058 	if (rc)
2059 		goto out_cifs_swn_init;
2060 
2061 	rc = register_filesystem(&cifs_fs_type);
2062 	if (rc)
2063 		goto out_init_cifs_idmap;
2064 
2065 	rc = register_filesystem(&smb3_fs_type);
2066 	if (rc) {
2067 		unregister_filesystem(&cifs_fs_type);
2068 		goto out_init_cifs_idmap;
2069 	}
2070 
2071 	return 0;
2072 
2073 out_init_cifs_idmap:
2074 	exit_cifs_idmap();
2075 out_cifs_swn_init:
2076 #ifdef CONFIG_CIFS_SWN_UPCALL
2077 	cifs_genl_exit();
2078 out_register_key_type:
2079 #endif
2080 #ifdef CONFIG_CIFS_UPCALL
2081 	exit_cifs_spnego();
2082 out_destroy_dfs_cache:
2083 #endif
2084 #ifdef CONFIG_CIFS_DFS_UPCALL
2085 	dfs_cache_destroy();
2086 out_destroy_request_bufs:
2087 #endif
2088 	cifs_destroy_request_bufs();
2089 out_destroy_mids:
2090 	destroy_mids();
2091 out_destroy_netfs:
2092 	cifs_destroy_netfs();
2093 out_destroy_inodecache:
2094 	cifs_destroy_inodecache();
2095 out_destroy_cfid_put_wq:
2096 	destroy_workqueue(cfid_put_wq);
2097 out_destroy_serverclose_wq:
2098 	destroy_workqueue(serverclose_wq);
2099 out_destroy_deferredclose_wq:
2100 	destroy_workqueue(deferredclose_wq);
2101 out_destroy_cifsoplockd_wq:
2102 	destroy_workqueue(cifsoplockd_wq);
2103 out_destroy_fileinfo_put_wq:
2104 	destroy_workqueue(fileinfo_put_wq);
2105 out_destroy_decrypt_wq:
2106 	destroy_workqueue(decrypt_wq);
2107 out_destroy_cifsiod_wq:
2108 	destroy_workqueue(cifsiod_wq);
2109 out_clean_proc:
2110 	cifs_proc_clean();
2111 	return rc;
2112 }
2113 
2114 static void __exit
exit_cifs(void)2115 exit_cifs(void)
2116 {
2117 	cifs_dbg(NOISY, "exit_smb3\n");
2118 	unregister_filesystem(&cifs_fs_type);
2119 	unregister_filesystem(&smb3_fs_type);
2120 	cifs_release_automount_timer();
2121 	exit_cifs_idmap();
2122 #ifdef CONFIG_CIFS_SWN_UPCALL
2123 	cifs_genl_exit();
2124 #endif
2125 #ifdef CONFIG_CIFS_UPCALL
2126 	exit_cifs_spnego();
2127 #endif
2128 #ifdef CONFIG_CIFS_DFS_UPCALL
2129 	dfs_cache_destroy();
2130 #endif
2131 	cifs_destroy_request_bufs();
2132 	destroy_mids();
2133 	cifs_destroy_netfs();
2134 	cifs_destroy_inodecache();
2135 	destroy_workqueue(deferredclose_wq);
2136 	destroy_workqueue(cifsoplockd_wq);
2137 	destroy_workqueue(decrypt_wq);
2138 	destroy_workqueue(fileinfo_put_wq);
2139 	destroy_workqueue(serverclose_wq);
2140 	destroy_workqueue(cfid_put_wq);
2141 	destroy_workqueue(cifsiod_wq);
2142 	cifs_proc_clean();
2143 }
2144 
2145 MODULE_AUTHOR("Steve French");
2146 MODULE_LICENSE("GPL");	/* combination of LGPL + GPL source behaves as GPL */
2147 MODULE_DESCRIPTION
2148 	("VFS to access SMB3 servers e.g. Samba, Macs, Azure and Windows (and "
2149 	"also older servers complying with the SNIA CIFS Specification)");
2150 MODULE_VERSION(CIFS_VERSION);
2151 MODULE_SOFTDEP("ecb");
2152 MODULE_SOFTDEP("nls");
2153 MODULE_SOFTDEP("aes");
2154 MODULE_SOFTDEP("cmac");
2155 MODULE_SOFTDEP("aead2");
2156 MODULE_SOFTDEP("ccm");
2157 MODULE_SOFTDEP("gcm");
2158 module_init(init_cifs)
2159 module_exit(exit_cifs)
2160