xref: /linux/fs/smb/client/cifsfs.c (revision 3d99347a2e1ae60d9368b1d734290bab1acde0ce)
1 // SPDX-License-Identifier: LGPL-2.1
2 /*
3  *
4  *   Copyright (C) International Business Machines  Corp., 2002,2008
5  *   Author(s): Steve French (sfrench@us.ibm.com)
6  *
7  *   Common Internet FileSystem (CIFS) client
8  *
9  */
10 
11 /* Note that BB means BUGBUG (ie something to fix eventually) */
12 
13 #include <linux/module.h>
14 #include <linux/fs.h>
15 #include <linux/filelock.h>
16 #include <linux/mount.h>
17 #include <linux/slab.h>
18 #include <linux/init.h>
19 #include <linux/list.h>
20 #include <linux/seq_file.h>
21 #include <linux/vfs.h>
22 #include <linux/mempool.h>
23 #include <linux/delay.h>
24 #include <linux/kthread.h>
25 #include <linux/freezer.h>
26 #include <linux/namei.h>
27 #include <linux/random.h>
28 #include <linux/splice.h>
29 #include <linux/uuid.h>
30 #include <linux/xattr.h>
31 #include <linux/mm.h>
32 #include <linux/key-type.h>
33 #include <uapi/linux/magic.h>
34 #include <net/ipv6.h>
35 #include "cifsfs.h"
36 #include "cifspdu.h"
37 #define DECLARE_GLOBALS_HERE
38 #include "cifsglob.h"
39 #include "cifsproto.h"
40 #include "smb2proto.h"
41 #include "cifs_debug.h"
42 #include "cifs_fs_sb.h"
43 #include "cifs_spnego.h"
44 #include "fscache.h"
45 #ifdef CONFIG_CIFS_DFS_UPCALL
46 #include "dfs_cache.h"
47 #endif
48 #ifdef CONFIG_CIFS_SWN_UPCALL
49 #include "netlink.h"
50 #endif
51 #include "fs_context.h"
52 #include "cached_dir.h"
53 
54 /*
55  * DOS dates from 1980/1/1 through 2107/12/31
56  * Protocol specifications indicate the range should be to 119, which
57  * limits maximum year to 2099. But this range has not been checked.
58  */
59 #define SMB_DATE_MAX (127<<9 | 12<<5 | 31)
60 #define SMB_DATE_MIN (0<<9 | 1<<5 | 1)
61 #define SMB_TIME_MAX (23<<11 | 59<<5 | 29)
62 
63 int cifsFYI = 0;
64 bool traceSMB;
65 bool enable_oplocks = true;
66 bool linuxExtEnabled = true;
67 bool lookupCacheEnabled = true;
68 bool disable_legacy_dialects; /* false by default */
69 bool enable_gcm_256 = true;
70 bool require_gcm_256; /* false by default */
71 bool enable_negotiate_signing; /* false by default */
72 unsigned int global_secflags = CIFSSEC_DEF;
73 /* unsigned int ntlmv2_support = 0; */
74 
75 /*
76  * Global transaction id (XID) information
77  */
78 unsigned int GlobalCurrentXid;	/* protected by GlobalMid_Lock */
79 unsigned int GlobalTotalActiveXid; /* prot by GlobalMid_Lock */
80 unsigned int GlobalMaxActiveXid;	/* prot by GlobalMid_Lock */
81 DEFINE_SPINLOCK(GlobalMid_Lock); /* protects above & list operations on midQ entries */
82 
83 /*
84  *  Global counters, updated atomically
85  */
86 atomic_t sesInfoAllocCount;
87 atomic_t tconInfoAllocCount;
88 atomic_t tcpSesNextId;
89 atomic_t tcpSesAllocCount;
90 atomic_t tcpSesReconnectCount;
91 atomic_t tconInfoReconnectCount;
92 
93 atomic_t mid_count;
94 atomic_t buf_alloc_count;
95 atomic_t small_buf_alloc_count;
96 #ifdef CONFIG_CIFS_STATS2
97 atomic_t total_buf_alloc_count;
98 atomic_t total_small_buf_alloc_count;
99 #endif/* STATS2 */
100 struct list_head	cifs_tcp_ses_list;
101 DEFINE_SPINLOCK(cifs_tcp_ses_lock);
102 static const struct super_operations cifs_super_ops;
103 unsigned int CIFSMaxBufSize = CIFS_MAX_MSGSIZE;
104 module_param(CIFSMaxBufSize, uint, 0444);
105 MODULE_PARM_DESC(CIFSMaxBufSize, "Network buffer size (not including header) "
106 				 "for CIFS requests. "
107 				 "Default: 16384 Range: 8192 to 130048");
108 unsigned int cifs_min_rcv = CIFS_MIN_RCV_POOL;
109 module_param(cifs_min_rcv, uint, 0444);
110 MODULE_PARM_DESC(cifs_min_rcv, "Network buffers in pool. Default: 4 Range: "
111 				"1 to 64");
112 unsigned int cifs_min_small = 30;
113 module_param(cifs_min_small, uint, 0444);
114 MODULE_PARM_DESC(cifs_min_small, "Small network buffers in pool. Default: 30 "
115 				 "Range: 2 to 256");
116 unsigned int cifs_max_pending = CIFS_MAX_REQ;
117 module_param(cifs_max_pending, uint, 0444);
118 MODULE_PARM_DESC(cifs_max_pending, "Simultaneous requests to server for "
119 				   "CIFS/SMB1 dialect (N/A for SMB3) "
120 				   "Default: 32767 Range: 2 to 32767.");
121 unsigned int dir_cache_timeout = 30;
122 module_param(dir_cache_timeout, uint, 0644);
123 MODULE_PARM_DESC(dir_cache_timeout, "Number of seconds to cache directory contents for which we have a lease. Default: 30 "
124 				 "Range: 1 to 65000 seconds, 0 to disable caching dir contents");
125 /* Module-wide total cached dirents (in bytes) across all tcons */
126 atomic64_t cifs_dircache_bytes_used = ATOMIC64_INIT(0);
127 
128 /*
129  * Write-only module parameter to drop all cached directory entries across
130  * all CIFS mounts. Echo a non-zero value to trigger.
131  */
cifs_drop_all_dir_caches(void)132 static void cifs_drop_all_dir_caches(void)
133 {
134 	struct TCP_Server_Info *server;
135 	struct cifs_ses *ses;
136 	struct cifs_tcon *tcon;
137 
138 	spin_lock(&cifs_tcp_ses_lock);
139 	list_for_each_entry(server, &cifs_tcp_ses_list, tcp_ses_list) {
140 		list_for_each_entry(ses, &server->smb_ses_list, smb_ses_list) {
141 			if (cifs_ses_exiting(ses))
142 				continue;
143 			list_for_each_entry(tcon, &ses->tcon_list, tcon_list)
144 				invalidate_all_cached_dirs(tcon);
145 		}
146 	}
147 	spin_unlock(&cifs_tcp_ses_lock);
148 }
149 
cifs_param_set_drop_dir_cache(const char * val,const struct kernel_param * kp)150 static int cifs_param_set_drop_dir_cache(const char *val, const struct kernel_param *kp)
151 {
152 	bool bv;
153 	int rc = kstrtobool(val, &bv);
154 
155 	if (rc)
156 		return rc;
157 	if (bv)
158 		cifs_drop_all_dir_caches();
159 	return 0;
160 }
161 
162 module_param_call(drop_dir_cache, cifs_param_set_drop_dir_cache, NULL, NULL, 0200);
163 MODULE_PARM_DESC(drop_dir_cache, "Write 1 to drop all cached directory entries across all CIFS mounts");
164 
165 #ifdef CONFIG_CIFS_STATS2
166 unsigned int slow_rsp_threshold = 1;
167 module_param(slow_rsp_threshold, uint, 0644);
168 MODULE_PARM_DESC(slow_rsp_threshold, "Amount of time (in seconds) to wait "
169 				   "before logging that a response is delayed. "
170 				   "Default: 1 (if set to 0 disables msg).");
171 #endif /* STATS2 */
172 
173 module_param(enable_oplocks, bool, 0644);
174 MODULE_PARM_DESC(enable_oplocks, "Enable or disable oplocks. Default: y/Y/1");
175 
176 module_param(enable_gcm_256, bool, 0644);
177 MODULE_PARM_DESC(enable_gcm_256, "Enable requesting strongest (256 bit) GCM encryption. Default: y/Y/1");
178 
179 module_param(require_gcm_256, bool, 0644);
180 MODULE_PARM_DESC(require_gcm_256, "Require strongest (256 bit) GCM encryption. Default: n/N/0");
181 
182 module_param(enable_negotiate_signing, bool, 0644);
183 MODULE_PARM_DESC(enable_negotiate_signing, "Enable negotiating packet signing algorithm with server. Default: n/N/0");
184 
185 module_param(disable_legacy_dialects, bool, 0644);
186 MODULE_PARM_DESC(disable_legacy_dialects, "To improve security it may be "
187 				  "helpful to restrict the ability to "
188 				  "override the default dialects (SMB2.1, "
189 				  "SMB3 and SMB3.02) on mount with old "
190 				  "dialects (CIFS/SMB1 and SMB2) since "
191 				  "vers=1.0 (CIFS/SMB1) and vers=2.0 are weaker"
192 				  " and less secure. Default: n/N/0");
193 
194 struct workqueue_struct	*cifsiod_wq;
195 struct workqueue_struct	*decrypt_wq;
196 struct workqueue_struct	*fileinfo_put_wq;
197 struct workqueue_struct	*cifsoplockd_wq;
198 struct workqueue_struct	*deferredclose_wq;
199 struct workqueue_struct	*serverclose_wq;
200 struct workqueue_struct	*cfid_put_wq;
201 __u32 cifs_lock_secret;
202 
203 /*
204  * Bumps refcount for cifs super block.
205  * Note that it should be only called if a reference to VFS super block is
206  * already held, e.g. in open-type syscalls context. Otherwise it can race with
207  * atomic_dec_and_test in deactivate_locked_super.
208  */
209 void
cifs_sb_active(struct super_block * sb)210 cifs_sb_active(struct super_block *sb)
211 {
212 	struct cifs_sb_info *server = CIFS_SB(sb);
213 
214 	if (atomic_inc_return(&server->active) == 1)
215 		atomic_inc(&sb->s_active);
216 }
217 
218 void
cifs_sb_deactive(struct super_block * sb)219 cifs_sb_deactive(struct super_block *sb)
220 {
221 	struct cifs_sb_info *server = CIFS_SB(sb);
222 
223 	if (atomic_dec_and_test(&server->active))
224 		deactivate_super(sb);
225 }
226 
227 static int
cifs_read_super(struct super_block * sb)228 cifs_read_super(struct super_block *sb)
229 {
230 	struct inode *inode;
231 	struct cifs_sb_info *cifs_sb;
232 	struct cifs_tcon *tcon;
233 	struct timespec64 ts;
234 	int rc = 0;
235 
236 	cifs_sb = CIFS_SB(sb);
237 	tcon = cifs_sb_master_tcon(cifs_sb);
238 
239 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIXACL)
240 		sb->s_flags |= SB_POSIXACL;
241 
242 	if (tcon->snapshot_time)
243 		sb->s_flags |= SB_RDONLY;
244 
245 	if (tcon->ses->capabilities & tcon->ses->server->vals->cap_large_files)
246 		sb->s_maxbytes = MAX_LFS_FILESIZE;
247 	else
248 		sb->s_maxbytes = MAX_NON_LFS;
249 
250 	/*
251 	 * Some very old servers like DOS and OS/2 used 2 second granularity
252 	 * (while all current servers use 100ns granularity - see MS-DTYP)
253 	 * but 1 second is the maximum allowed granularity for the VFS
254 	 * so for old servers set time granularity to 1 second while for
255 	 * everything else (current servers) set it to 100ns.
256 	 */
257 	if ((tcon->ses->server->vals->protocol_id == SMB10_PROT_ID) &&
258 	    ((tcon->ses->capabilities &
259 	      tcon->ses->server->vals->cap_nt_find) == 0) &&
260 	    !tcon->unix_ext) {
261 		sb->s_time_gran = 1000000000; /* 1 second is max allowed gran */
262 		ts = cnvrtDosUnixTm(cpu_to_le16(SMB_DATE_MIN), 0, 0);
263 		sb->s_time_min = ts.tv_sec;
264 		ts = cnvrtDosUnixTm(cpu_to_le16(SMB_DATE_MAX),
265 				    cpu_to_le16(SMB_TIME_MAX), 0);
266 		sb->s_time_max = ts.tv_sec;
267 	} else {
268 		/*
269 		 * Almost every server, including all SMB2+, uses DCE TIME
270 		 * ie 100 nanosecond units, since 1601.  See MS-DTYP and MS-FSCC
271 		 */
272 		sb->s_time_gran = 100;
273 		ts = cifs_NTtimeToUnix(0);
274 		sb->s_time_min = ts.tv_sec;
275 		ts = cifs_NTtimeToUnix(cpu_to_le64(S64_MAX));
276 		sb->s_time_max = ts.tv_sec;
277 	}
278 
279 	sb->s_magic = CIFS_SUPER_MAGIC;
280 	sb->s_op = &cifs_super_ops;
281 	sb->s_xattr = cifs_xattr_handlers;
282 	rc = super_setup_bdi(sb);
283 	if (rc)
284 		goto out_no_root;
285 	/* tune readahead according to rsize if readahead size not set on mount */
286 	if (cifs_sb->ctx->rsize == 0)
287 		cifs_sb->ctx->rsize =
288 			tcon->ses->server->ops->negotiate_rsize(tcon, cifs_sb->ctx);
289 	if (cifs_sb->ctx->rasize)
290 		sb->s_bdi->ra_pages = cifs_sb->ctx->rasize / PAGE_SIZE;
291 	else
292 		sb->s_bdi->ra_pages = 2 * (cifs_sb->ctx->rsize / PAGE_SIZE);
293 
294 	sb->s_blocksize = CIFS_MAX_MSGSIZE;
295 	sb->s_blocksize_bits = 14;	/* default 2**14 = CIFS_MAX_MSGSIZE */
296 	inode = cifs_root_iget(sb);
297 
298 	if (IS_ERR(inode)) {
299 		rc = PTR_ERR(inode);
300 		goto out_no_root;
301 	}
302 
303 	if (tcon->nocase)
304 		set_default_d_op(sb, &cifs_ci_dentry_ops);
305 	else
306 		set_default_d_op(sb, &cifs_dentry_ops);
307 
308 	sb->s_root = d_make_root(inode);
309 	if (!sb->s_root) {
310 		rc = -ENOMEM;
311 		goto out_no_root;
312 	}
313 
314 #ifdef CONFIG_CIFS_NFSD_EXPORT
315 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) {
316 		cifs_dbg(FYI, "export ops supported\n");
317 		sb->s_export_op = &cifs_export_ops;
318 	}
319 #endif /* CONFIG_CIFS_NFSD_EXPORT */
320 
321 	return 0;
322 
323 out_no_root:
324 	cifs_dbg(VFS, "%s: get root inode failed\n", __func__);
325 	return rc;
326 }
327 
cifs_kill_sb(struct super_block * sb)328 static void cifs_kill_sb(struct super_block *sb)
329 {
330 	struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
331 
332 	/*
333 	 * We need to release all dentries for the cached directories
334 	 * before we kill the sb.
335 	 */
336 	if (cifs_sb->root) {
337 		close_all_cached_dirs(cifs_sb);
338 
339 		/* finally release root dentry */
340 		dput(cifs_sb->root);
341 		cifs_sb->root = NULL;
342 	}
343 
344 	kill_anon_super(sb);
345 	cifs_umount(cifs_sb);
346 }
347 
348 static int
cifs_statfs(struct dentry * dentry,struct kstatfs * buf)349 cifs_statfs(struct dentry *dentry, struct kstatfs *buf)
350 {
351 	struct super_block *sb = dentry->d_sb;
352 	struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
353 	struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
354 	struct TCP_Server_Info *server = tcon->ses->server;
355 	unsigned int xid;
356 	int rc = 0;
357 	const char *full_path;
358 	void *page;
359 
360 	xid = get_xid();
361 	page = alloc_dentry_path();
362 
363 	full_path = build_path_from_dentry(dentry, page);
364 	if (IS_ERR(full_path)) {
365 		rc = PTR_ERR(full_path);
366 		goto statfs_out;
367 	}
368 
369 	if (le32_to_cpu(tcon->fsAttrInfo.MaxPathNameComponentLength) > 0)
370 		buf->f_namelen =
371 		       le32_to_cpu(tcon->fsAttrInfo.MaxPathNameComponentLength);
372 	else
373 		buf->f_namelen = PATH_MAX;
374 
375 	buf->f_fsid.val[0] = tcon->vol_serial_number;
376 	/* are using part of create time for more randomness, see man statfs */
377 	buf->f_fsid.val[1] =  (int)le64_to_cpu(tcon->vol_create_time);
378 
379 	buf->f_files = 0;	/* undefined */
380 	buf->f_ffree = 0;	/* unlimited */
381 
382 	if (server->ops->queryfs)
383 		rc = server->ops->queryfs(xid, tcon, full_path, cifs_sb, buf);
384 
385 statfs_out:
386 	free_dentry_path(page);
387 	free_xid(xid);
388 	return rc;
389 }
390 
cifs_fallocate(struct file * file,int mode,loff_t off,loff_t len)391 static long cifs_fallocate(struct file *file, int mode, loff_t off, loff_t len)
392 {
393 	struct cifs_sb_info *cifs_sb = CIFS_FILE_SB(file);
394 	struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
395 	struct TCP_Server_Info *server = tcon->ses->server;
396 	struct inode *inode = file_inode(file);
397 	int rc;
398 
399 	if (!server->ops->fallocate)
400 		return -EOPNOTSUPP;
401 
402 	rc = inode_lock_killable(inode);
403 	if (rc)
404 		return rc;
405 
406 	netfs_wait_for_outstanding_io(inode);
407 
408 	rc = file_modified(file);
409 	if (rc)
410 		goto out_unlock;
411 
412 	rc = server->ops->fallocate(file, tcon, mode, off, len);
413 
414 out_unlock:
415 	inode_unlock(inode);
416 	return rc;
417 }
418 
cifs_permission(struct mnt_idmap * idmap,struct inode * inode,int mask)419 static int cifs_permission(struct mnt_idmap *idmap,
420 			   struct inode *inode, int mask)
421 {
422 	struct cifs_sb_info *cifs_sb;
423 
424 	cifs_sb = CIFS_SB(inode->i_sb);
425 
426 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_PERM) {
427 		if ((mask & MAY_EXEC) && !execute_ok(inode))
428 			return -EACCES;
429 		else
430 			return 0;
431 	} else /* file mode might have been restricted at mount time
432 		on the client (above and beyond ACL on servers) for
433 		servers which do not support setting and viewing mode bits,
434 		so allowing client to check permissions is useful */
435 		return generic_permission(&nop_mnt_idmap, inode, mask);
436 }
437 
438 static struct kmem_cache *cifs_inode_cachep;
439 static struct kmem_cache *cifs_req_cachep;
440 static struct kmem_cache *cifs_mid_cachep;
441 static struct kmem_cache *cifs_sm_req_cachep;
442 static struct kmem_cache *cifs_io_request_cachep;
443 static struct kmem_cache *cifs_io_subrequest_cachep;
444 mempool_t *cifs_sm_req_poolp;
445 mempool_t *cifs_req_poolp;
446 mempool_t cifs_mid_pool;
447 mempool_t cifs_io_request_pool;
448 mempool_t cifs_io_subrequest_pool;
449 
450 static struct inode *
cifs_alloc_inode(struct super_block * sb)451 cifs_alloc_inode(struct super_block *sb)
452 {
453 	struct cifsInodeInfo *cifs_inode;
454 	cifs_inode = alloc_inode_sb(sb, cifs_inode_cachep, GFP_KERNEL);
455 	if (!cifs_inode)
456 		return NULL;
457 	cifs_inode->cifsAttrs = ATTR_ARCHIVE;	/* default */
458 	cifs_inode->time = 0;
459 	/*
460 	 * Until the file is open and we have gotten oplock info back from the
461 	 * server, can not assume caching of file data or metadata.
462 	 */
463 	cifs_set_oplock_level(cifs_inode, 0);
464 	cifs_inode->lease_granted = false;
465 	cifs_inode->flags = 0;
466 	spin_lock_init(&cifs_inode->writers_lock);
467 	cifs_inode->writers = 0;
468 	cifs_inode->netfs.inode.i_blkbits = 14;  /* 2**14 = CIFS_MAX_MSGSIZE */
469 	cifs_inode->netfs.remote_i_size = 0;
470 	cifs_inode->uniqueid = 0;
471 	cifs_inode->createtime = 0;
472 	cifs_inode->epoch = 0;
473 	spin_lock_init(&cifs_inode->open_file_lock);
474 	generate_random_uuid(cifs_inode->lease_key);
475 	cifs_inode->symlink_target = NULL;
476 
477 	/*
478 	 * Can not set i_flags here - they get immediately overwritten to zero
479 	 * by the VFS.
480 	 */
481 	/* cifs_inode->netfs.inode.i_flags = S_NOATIME | S_NOCMTIME; */
482 	INIT_LIST_HEAD(&cifs_inode->openFileList);
483 	INIT_LIST_HEAD(&cifs_inode->llist);
484 	INIT_LIST_HEAD(&cifs_inode->deferred_closes);
485 	spin_lock_init(&cifs_inode->deferred_lock);
486 	return &cifs_inode->netfs.inode;
487 }
488 
489 static void
cifs_free_inode(struct inode * inode)490 cifs_free_inode(struct inode *inode)
491 {
492 	struct cifsInodeInfo *cinode = CIFS_I(inode);
493 
494 	if (S_ISLNK(inode->i_mode))
495 		kfree(cinode->symlink_target);
496 	kmem_cache_free(cifs_inode_cachep, cinode);
497 }
498 
499 static void
cifs_evict_inode(struct inode * inode)500 cifs_evict_inode(struct inode *inode)
501 {
502 	netfs_wait_for_outstanding_io(inode);
503 	truncate_inode_pages_final(&inode->i_data);
504 	if (inode_state_read_once(inode) & I_PINNING_NETFS_WB)
505 		cifs_fscache_unuse_inode_cookie(inode, true);
506 	cifs_fscache_release_inode_cookie(inode);
507 	clear_inode(inode);
508 }
509 
510 static void
cifs_show_address(struct seq_file * s,struct TCP_Server_Info * server)511 cifs_show_address(struct seq_file *s, struct TCP_Server_Info *server)
512 {
513 	struct sockaddr_in *sa = (struct sockaddr_in *) &server->dstaddr;
514 	struct sockaddr_in6 *sa6 = (struct sockaddr_in6 *) &server->dstaddr;
515 
516 	seq_puts(s, ",addr=");
517 
518 	switch (server->dstaddr.ss_family) {
519 	case AF_INET:
520 		seq_printf(s, "%pI4", &sa->sin_addr.s_addr);
521 		break;
522 	case AF_INET6:
523 		seq_printf(s, "%pI6", &sa6->sin6_addr.s6_addr);
524 		if (sa6->sin6_scope_id)
525 			seq_printf(s, "%%%u", sa6->sin6_scope_id);
526 		break;
527 	default:
528 		seq_puts(s, "(unknown)");
529 	}
530 	if (server->rdma)
531 		seq_puts(s, ",rdma");
532 }
533 
534 static void
cifs_show_security(struct seq_file * s,struct cifs_ses * ses)535 cifs_show_security(struct seq_file *s, struct cifs_ses *ses)
536 {
537 	if (ses->sectype == Unspecified) {
538 		if (ses->user_name == NULL)
539 			seq_puts(s, ",sec=none");
540 		return;
541 	}
542 
543 	seq_puts(s, ",sec=");
544 
545 	switch (ses->sectype) {
546 	case NTLMv2:
547 		seq_puts(s, "ntlmv2");
548 		break;
549 	case Kerberos:
550 		seq_puts(s, "krb5");
551 		break;
552 	case RawNTLMSSP:
553 		seq_puts(s, "ntlmssp");
554 		break;
555 	default:
556 		/* shouldn't ever happen */
557 		seq_puts(s, "unknown");
558 		break;
559 	}
560 
561 	if (ses->sign)
562 		seq_puts(s, "i");
563 
564 	if (ses->sectype == Kerberos)
565 		seq_printf(s, ",cruid=%u",
566 			   from_kuid_munged(&init_user_ns, ses->cred_uid));
567 }
568 
569 static void
cifs_show_cache_flavor(struct seq_file * s,struct cifs_sb_info * cifs_sb)570 cifs_show_cache_flavor(struct seq_file *s, struct cifs_sb_info *cifs_sb)
571 {
572 	seq_puts(s, ",cache=");
573 
574 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO)
575 		seq_puts(s, "strict");
576 	else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO)
577 		seq_puts(s, "none");
578 	else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RW_CACHE)
579 		seq_puts(s, "singleclient"); /* assume only one client access */
580 	else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RO_CACHE)
581 		seq_puts(s, "ro"); /* read only caching assumed */
582 	else
583 		seq_puts(s, "loose");
584 }
585 
586 /*
587  * cifs_show_devname() is used so we show the mount device name with correct
588  * format (e.g. forward slashes vs. back slashes) in /proc/mounts
589  */
cifs_show_devname(struct seq_file * m,struct dentry * root)590 static int cifs_show_devname(struct seq_file *m, struct dentry *root)
591 {
592 	struct cifs_sb_info *cifs_sb = CIFS_SB(root->d_sb);
593 	char *devname = kstrdup(cifs_sb->ctx->source, GFP_KERNEL);
594 
595 	if (devname == NULL)
596 		seq_puts(m, "none");
597 	else {
598 		convert_delimiter(devname, '/');
599 		/* escape all spaces in share names */
600 		seq_escape(m, devname, " \t");
601 		kfree(devname);
602 	}
603 	return 0;
604 }
605 
606 static void
cifs_show_upcall_target(struct seq_file * s,struct cifs_sb_info * cifs_sb)607 cifs_show_upcall_target(struct seq_file *s, struct cifs_sb_info *cifs_sb)
608 {
609 	if (cifs_sb->ctx->upcall_target == UPTARGET_UNSPECIFIED) {
610 		seq_puts(s, ",upcall_target=app");
611 		return;
612 	}
613 
614 	seq_puts(s, ",upcall_target=");
615 
616 	switch (cifs_sb->ctx->upcall_target) {
617 	case UPTARGET_APP:
618 		seq_puts(s, "app");
619 		break;
620 	case UPTARGET_MOUNT:
621 		seq_puts(s, "mount");
622 		break;
623 	default:
624 		/* shouldn't ever happen */
625 		seq_puts(s, "unknown");
626 		break;
627 	}
628 }
629 
630 /*
631  * cifs_show_options() is for displaying mount options in /proc/mounts.
632  * Not all settable options are displayed but most of the important
633  * ones are.
634  */
635 static int
cifs_show_options(struct seq_file * s,struct dentry * root)636 cifs_show_options(struct seq_file *s, struct dentry *root)
637 {
638 	struct cifs_sb_info *cifs_sb = CIFS_SB(root->d_sb);
639 	struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
640 	struct sockaddr *srcaddr;
641 	srcaddr = (struct sockaddr *)&tcon->ses->server->srcaddr;
642 
643 	seq_show_option(s, "vers", tcon->ses->server->vals->version_string);
644 	cifs_show_security(s, tcon->ses);
645 	cifs_show_cache_flavor(s, cifs_sb);
646 	cifs_show_upcall_target(s, cifs_sb);
647 
648 	if (tcon->no_lease)
649 		seq_puts(s, ",nolease");
650 	if (cifs_sb->ctx->multiuser)
651 		seq_puts(s, ",multiuser");
652 	else if (tcon->ses->user_name)
653 		seq_show_option(s, "username", tcon->ses->user_name);
654 
655 	if (tcon->ses->domainName && tcon->ses->domainName[0] != 0)
656 		seq_show_option(s, "domain", tcon->ses->domainName);
657 
658 	if (srcaddr->sa_family != AF_UNSPEC) {
659 		struct sockaddr_in *saddr4;
660 		struct sockaddr_in6 *saddr6;
661 		saddr4 = (struct sockaddr_in *)srcaddr;
662 		saddr6 = (struct sockaddr_in6 *)srcaddr;
663 		if (srcaddr->sa_family == AF_INET6)
664 			seq_printf(s, ",srcaddr=%pI6c",
665 				   &saddr6->sin6_addr);
666 		else if (srcaddr->sa_family == AF_INET)
667 			seq_printf(s, ",srcaddr=%pI4",
668 				   &saddr4->sin_addr.s_addr);
669 		else
670 			seq_printf(s, ",srcaddr=BAD-AF:%i",
671 				   (int)(srcaddr->sa_family));
672 	}
673 
674 	seq_printf(s, ",uid=%u",
675 		   from_kuid_munged(&init_user_ns, cifs_sb->ctx->linux_uid));
676 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_OVERR_UID)
677 		seq_puts(s, ",forceuid");
678 	else
679 		seq_puts(s, ",noforceuid");
680 
681 	seq_printf(s, ",gid=%u",
682 		   from_kgid_munged(&init_user_ns, cifs_sb->ctx->linux_gid));
683 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_OVERR_GID)
684 		seq_puts(s, ",forcegid");
685 	else
686 		seq_puts(s, ",noforcegid");
687 
688 	cifs_show_address(s, tcon->ses->server);
689 
690 	if (!tcon->unix_ext)
691 		seq_printf(s, ",file_mode=0%ho,dir_mode=0%ho",
692 					   cifs_sb->ctx->file_mode,
693 					   cifs_sb->ctx->dir_mode);
694 	if (cifs_sb->ctx->iocharset)
695 		seq_printf(s, ",iocharset=%s", cifs_sb->ctx->iocharset);
696 	if (tcon->ses->unicode == 0)
697 		seq_puts(s, ",nounicode");
698 	else if (tcon->ses->unicode == 1)
699 		seq_puts(s, ",unicode");
700 	if (tcon->seal)
701 		seq_puts(s, ",seal");
702 	else if (tcon->ses->server->ignore_signature)
703 		seq_puts(s, ",signloosely");
704 	if (tcon->nocase)
705 		seq_puts(s, ",nocase");
706 	if (tcon->nodelete)
707 		seq_puts(s, ",nodelete");
708 	if (cifs_sb->ctx->no_sparse)
709 		seq_puts(s, ",nosparse");
710 	if (tcon->local_lease)
711 		seq_puts(s, ",locallease");
712 	if (tcon->retry)
713 		seq_puts(s, ",hard");
714 	else
715 		seq_puts(s, ",soft");
716 	if (tcon->use_persistent)
717 		seq_puts(s, ",persistenthandles");
718 	else if (tcon->use_resilient)
719 		seq_puts(s, ",resilienthandles");
720 	if (tcon->posix_extensions)
721 		seq_puts(s, ",posix");
722 	else if (tcon->unix_ext)
723 		seq_puts(s, ",unix");
724 	else
725 		seq_puts(s, ",nounix");
726 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_DFS)
727 		seq_puts(s, ",nodfs");
728 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIX_PATHS)
729 		seq_puts(s, ",posixpaths");
730 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SET_UID)
731 		seq_puts(s, ",setuids");
732 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UID_FROM_ACL)
733 		seq_puts(s, ",idsfromsid");
734 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM)
735 		seq_puts(s, ",serverino");
736 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
737 		seq_puts(s, ",rwpidforward");
738 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL)
739 		seq_puts(s, ",forcemand");
740 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_XATTR)
741 		seq_puts(s, ",nouser_xattr");
742 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR)
743 		seq_puts(s, ",mapchars");
744 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SFM_CHR)
745 		seq_puts(s, ",mapposix");
746 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UNX_EMUL)
747 		seq_puts(s, ",sfu");
748 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_BRL)
749 		seq_puts(s, ",nobrl");
750 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_HANDLE_CACHE)
751 		seq_puts(s, ",nohandlecache");
752 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MODE_FROM_SID)
753 		seq_puts(s, ",modefromsid");
754 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_ACL)
755 		seq_puts(s, ",cifsacl");
756 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DYNPERM)
757 		seq_puts(s, ",dynperm");
758 	if (root->d_sb->s_flags & SB_POSIXACL)
759 		seq_puts(s, ",acl");
760 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MF_SYMLINKS)
761 		seq_puts(s, ",mfsymlinks");
762 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_FSCACHE)
763 		seq_puts(s, ",fsc");
764 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC)
765 		seq_puts(s, ",nostrictsync");
766 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_PERM)
767 		seq_puts(s, ",noperm");
768 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_BACKUPUID)
769 		seq_printf(s, ",backupuid=%u",
770 			   from_kuid_munged(&init_user_ns,
771 					    cifs_sb->ctx->backupuid));
772 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_BACKUPGID)
773 		seq_printf(s, ",backupgid=%u",
774 			   from_kgid_munged(&init_user_ns,
775 					    cifs_sb->ctx->backupgid));
776 	seq_show_option(s, "reparse",
777 			cifs_reparse_type_str(cifs_sb->ctx->reparse_type));
778 	if (cifs_sb->ctx->nonativesocket)
779 		seq_puts(s, ",nonativesocket");
780 	else
781 		seq_puts(s, ",nativesocket");
782 	seq_show_option(s, "symlink",
783 			cifs_symlink_type_str(cifs_symlink_type(cifs_sb)));
784 
785 	seq_printf(s, ",rsize=%u", cifs_sb->ctx->rsize);
786 	seq_printf(s, ",wsize=%u", cifs_sb->ctx->wsize);
787 	seq_printf(s, ",bsize=%u", cifs_sb->ctx->bsize);
788 	if (cifs_sb->ctx->rasize)
789 		seq_printf(s, ",rasize=%u", cifs_sb->ctx->rasize);
790 	if (tcon->ses->server->min_offload)
791 		seq_printf(s, ",esize=%u", tcon->ses->server->min_offload);
792 	if (tcon->ses->server->retrans)
793 		seq_printf(s, ",retrans=%u", tcon->ses->server->retrans);
794 	seq_printf(s, ",echo_interval=%lu",
795 			tcon->ses->server->echo_interval / HZ);
796 
797 	/* Only display the following if overridden on mount */
798 	if (tcon->ses->server->max_credits != SMB2_MAX_CREDITS_AVAILABLE)
799 		seq_printf(s, ",max_credits=%u", tcon->ses->server->max_credits);
800 	if (tcon->ses->server->tcp_nodelay)
801 		seq_puts(s, ",tcpnodelay");
802 	if (tcon->ses->server->noautotune)
803 		seq_puts(s, ",noautotune");
804 	if (tcon->ses->server->noblocksnd)
805 		seq_puts(s, ",noblocksend");
806 	if (tcon->ses->server->nosharesock)
807 		seq_puts(s, ",nosharesock");
808 
809 	if (tcon->snapshot_time)
810 		seq_printf(s, ",snapshot=%llu", tcon->snapshot_time);
811 	if (tcon->handle_timeout)
812 		seq_printf(s, ",handletimeout=%u", tcon->handle_timeout);
813 	if (tcon->max_cached_dirs != MAX_CACHED_FIDS)
814 		seq_printf(s, ",max_cached_dirs=%u", tcon->max_cached_dirs);
815 
816 	/*
817 	 * Display file and directory attribute timeout in seconds.
818 	 * If file and directory attribute timeout the same then actimeo
819 	 * was likely specified on mount
820 	 */
821 	if (cifs_sb->ctx->acdirmax == cifs_sb->ctx->acregmax)
822 		seq_printf(s, ",actimeo=%lu", cifs_sb->ctx->acregmax / HZ);
823 	else {
824 		seq_printf(s, ",acdirmax=%lu", cifs_sb->ctx->acdirmax / HZ);
825 		seq_printf(s, ",acregmax=%lu", cifs_sb->ctx->acregmax / HZ);
826 	}
827 	seq_printf(s, ",closetimeo=%lu", cifs_sb->ctx->closetimeo / HZ);
828 
829 	if (tcon->ses->chan_max > 1)
830 		seq_printf(s, ",multichannel,max_channels=%zu",
831 			   tcon->ses->chan_max);
832 
833 	if (tcon->use_witness)
834 		seq_puts(s, ",witness");
835 
836 	return 0;
837 }
838 
cifs_umount_begin(struct super_block * sb)839 static void cifs_umount_begin(struct super_block *sb)
840 {
841 	struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
842 	struct cifs_tcon *tcon;
843 
844 	if (cifs_sb == NULL)
845 		return;
846 
847 	tcon = cifs_sb_master_tcon(cifs_sb);
848 
849 	spin_lock(&cifs_tcp_ses_lock);
850 	spin_lock(&tcon->tc_lock);
851 	trace_smb3_tcon_ref(tcon->debug_id, tcon->tc_count,
852 			    netfs_trace_tcon_ref_see_umount);
853 	if ((tcon->tc_count > 1) || (tcon->status == TID_EXITING)) {
854 		/* we have other mounts to same share or we have
855 		   already tried to umount this and woken up
856 		   all waiting network requests, nothing to do */
857 		spin_unlock(&tcon->tc_lock);
858 		spin_unlock(&cifs_tcp_ses_lock);
859 		return;
860 	}
861 	/*
862 	 * can not set tcon->status to TID_EXITING yet since we don't know if umount -f will
863 	 * fail later (e.g. due to open files).  TID_EXITING will be set just before tdis req sent
864 	 */
865 	spin_unlock(&tcon->tc_lock);
866 	spin_unlock(&cifs_tcp_ses_lock);
867 
868 	cifs_close_all_deferred_files(tcon);
869 	/* cancel_brl_requests(tcon); */ /* BB mark all brl mids as exiting */
870 	/* cancel_notify_requests(tcon); */
871 	if (tcon->ses && tcon->ses->server) {
872 		cifs_dbg(FYI, "wake up tasks now - umount begin not complete\n");
873 		wake_up_all(&tcon->ses->server->request_q);
874 		wake_up_all(&tcon->ses->server->response_q);
875 		msleep(1); /* yield */
876 		/* we have to kick the requests once more */
877 		wake_up_all(&tcon->ses->server->response_q);
878 		msleep(1);
879 	}
880 
881 	return;
882 }
883 
cifs_freeze(struct super_block * sb)884 static int cifs_freeze(struct super_block *sb)
885 {
886 	struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
887 	struct cifs_tcon *tcon;
888 
889 	if (cifs_sb == NULL)
890 		return 0;
891 
892 	tcon = cifs_sb_master_tcon(cifs_sb);
893 
894 	cifs_close_all_deferred_files(tcon);
895 	return 0;
896 }
897 
898 #ifdef CONFIG_CIFS_STATS2
cifs_show_stats(struct seq_file * s,struct dentry * root)899 static int cifs_show_stats(struct seq_file *s, struct dentry *root)
900 {
901 	/* BB FIXME */
902 	return 0;
903 }
904 #endif
905 
cifs_write_inode(struct inode * inode,struct writeback_control * wbc)906 static int cifs_write_inode(struct inode *inode, struct writeback_control *wbc)
907 {
908 	return netfs_unpin_writeback(inode, wbc);
909 }
910 
cifs_drop_inode(struct inode * inode)911 static int cifs_drop_inode(struct inode *inode)
912 {
913 	struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
914 
915 	/* no serverino => unconditional eviction */
916 	return !(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) ||
917 		inode_generic_drop(inode);
918 }
919 
920 static const struct super_operations cifs_super_ops = {
921 	.statfs = cifs_statfs,
922 	.alloc_inode = cifs_alloc_inode,
923 	.write_inode	= cifs_write_inode,
924 	.free_inode = cifs_free_inode,
925 	.drop_inode	= cifs_drop_inode,
926 	.evict_inode	= cifs_evict_inode,
927 /*	.show_path	= cifs_show_path, */ /* Would we ever need show path? */
928 	.show_devname   = cifs_show_devname,
929 /*	.delete_inode	= cifs_delete_inode,  */  /* Do not need above
930 	function unless later we add lazy close of inodes or unless the
931 	kernel forgets to call us with the same number of releases (closes)
932 	as opens */
933 	.show_options = cifs_show_options,
934 	.umount_begin   = cifs_umount_begin,
935 	.freeze_fs      = cifs_freeze,
936 #ifdef CONFIG_CIFS_STATS2
937 	.show_stats = cifs_show_stats,
938 #endif
939 };
940 
941 /*
942  * Get root dentry from superblock according to prefix path mount option.
943  * Return dentry with refcount + 1 on success and NULL otherwise.
944  */
945 static struct dentry *
cifs_get_root(struct smb3_fs_context * ctx,struct super_block * sb)946 cifs_get_root(struct smb3_fs_context *ctx, struct super_block *sb)
947 {
948 	struct dentry *dentry;
949 	struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
950 	char *full_path = NULL;
951 	char *s, *p;
952 	char sep;
953 
954 	if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_USE_PREFIX_PATH)
955 		return dget(sb->s_root);
956 
957 	full_path = cifs_build_path_to_root(ctx, cifs_sb,
958 				cifs_sb_master_tcon(cifs_sb), 0);
959 	if (full_path == NULL)
960 		return ERR_PTR(-ENOMEM);
961 
962 	cifs_dbg(FYI, "Get root dentry for %s\n", full_path);
963 
964 	sep = CIFS_DIR_SEP(cifs_sb);
965 	dentry = dget(sb->s_root);
966 	s = full_path;
967 
968 	do {
969 		struct inode *dir = d_inode(dentry);
970 		struct dentry *child;
971 
972 		if (!S_ISDIR(dir->i_mode)) {
973 			dput(dentry);
974 			dentry = ERR_PTR(-ENOTDIR);
975 			break;
976 		}
977 
978 		/* skip separators */
979 		while (*s == sep)
980 			s++;
981 		if (!*s)
982 			break;
983 		p = s++;
984 		/* next separator */
985 		while (*s && *s != sep)
986 			s++;
987 
988 		child = lookup_noperm_positive_unlocked(&QSTR_LEN(p, s - p),
989 							dentry);
990 		dput(dentry);
991 		dentry = child;
992 	} while (!IS_ERR(dentry));
993 	kfree(full_path);
994 	return dentry;
995 }
996 
cifs_set_super(struct super_block * sb,void * data)997 static int cifs_set_super(struct super_block *sb, void *data)
998 {
999 	struct cifs_mnt_data *mnt_data = data;
1000 	sb->s_fs_info = mnt_data->cifs_sb;
1001 	return set_anon_super(sb, NULL);
1002 }
1003 
1004 struct dentry *
cifs_smb3_do_mount(struct file_system_type * fs_type,int flags,struct smb3_fs_context * old_ctx)1005 cifs_smb3_do_mount(struct file_system_type *fs_type,
1006 	      int flags, struct smb3_fs_context *old_ctx)
1007 {
1008 	struct cifs_mnt_data mnt_data;
1009 	struct cifs_sb_info *cifs_sb;
1010 	struct super_block *sb;
1011 	struct dentry *root;
1012 	int rc;
1013 
1014 	if (cifsFYI) {
1015 		cifs_dbg(FYI, "%s: devname=%s flags=0x%x\n", __func__,
1016 			 old_ctx->source, flags);
1017 	} else {
1018 		cifs_info("Attempting to mount %s\n", old_ctx->source);
1019 	}
1020 	cifs_sb = kzalloc(sizeof(*cifs_sb), GFP_KERNEL);
1021 	if (!cifs_sb)
1022 		return ERR_PTR(-ENOMEM);
1023 
1024 	cifs_sb->ctx = kzalloc(sizeof(struct smb3_fs_context), GFP_KERNEL);
1025 	if (!cifs_sb->ctx) {
1026 		root = ERR_PTR(-ENOMEM);
1027 		goto out;
1028 	}
1029 	rc = smb3_fs_context_dup(cifs_sb->ctx, old_ctx);
1030 	if (rc) {
1031 		root = ERR_PTR(rc);
1032 		goto out;
1033 	}
1034 
1035 	rc = cifs_setup_cifs_sb(cifs_sb);
1036 	if (rc) {
1037 		root = ERR_PTR(rc);
1038 		goto out;
1039 	}
1040 
1041 	rc = cifs_mount(cifs_sb, cifs_sb->ctx);
1042 	if (rc) {
1043 		if (!(flags & SB_SILENT))
1044 			cifs_dbg(VFS, "cifs_mount failed w/return code = %d\n",
1045 				 rc);
1046 		root = ERR_PTR(rc);
1047 		goto out;
1048 	}
1049 
1050 	mnt_data.ctx = cifs_sb->ctx;
1051 	mnt_data.cifs_sb = cifs_sb;
1052 	mnt_data.flags = flags;
1053 
1054 	/* BB should we make this contingent on mount parm? */
1055 	flags |= SB_NODIRATIME | SB_NOATIME;
1056 
1057 	sb = sget(fs_type, cifs_match_super, cifs_set_super, flags, &mnt_data);
1058 	if (IS_ERR(sb)) {
1059 		cifs_umount(cifs_sb);
1060 		return ERR_CAST(sb);
1061 	}
1062 
1063 	if (sb->s_root) {
1064 		cifs_dbg(FYI, "Use existing superblock\n");
1065 		cifs_umount(cifs_sb);
1066 		cifs_sb = NULL;
1067 	} else {
1068 		rc = cifs_read_super(sb);
1069 		if (rc) {
1070 			root = ERR_PTR(rc);
1071 			goto out_super;
1072 		}
1073 
1074 		sb->s_flags |= SB_ACTIVE;
1075 	}
1076 
1077 	root = cifs_get_root(cifs_sb ? cifs_sb->ctx : old_ctx, sb);
1078 	if (IS_ERR(root))
1079 		goto out_super;
1080 
1081 	if (cifs_sb)
1082 		cifs_sb->root = dget(root);
1083 
1084 	cifs_dbg(FYI, "dentry root is: %p\n", root);
1085 	return root;
1086 
1087 out_super:
1088 	deactivate_locked_super(sb);
1089 	return root;
1090 out:
1091 	kfree(cifs_sb->prepath);
1092 	smb3_cleanup_fs_context(cifs_sb->ctx);
1093 	kfree(cifs_sb);
1094 	return root;
1095 }
1096 
cifs_llseek(struct file * file,loff_t offset,int whence)1097 static loff_t cifs_llseek(struct file *file, loff_t offset, int whence)
1098 {
1099 	struct cifsFileInfo *cfile = file->private_data;
1100 	struct cifs_tcon *tcon;
1101 
1102 	/*
1103 	 * whence == SEEK_END || SEEK_DATA || SEEK_HOLE => we must revalidate
1104 	 * the cached file length
1105 	 */
1106 	if (whence != SEEK_SET && whence != SEEK_CUR) {
1107 		int rc;
1108 		struct inode *inode = file_inode(file);
1109 
1110 		/*
1111 		 * We need to be sure that all dirty pages are written and the
1112 		 * server has the newest file length.
1113 		 */
1114 		if (!CIFS_CACHE_READ(CIFS_I(inode)) && inode->i_mapping &&
1115 		    inode->i_mapping->nrpages != 0) {
1116 			rc = filemap_fdatawait(inode->i_mapping);
1117 			if (rc) {
1118 				mapping_set_error(inode->i_mapping, rc);
1119 				return rc;
1120 			}
1121 		}
1122 		/*
1123 		 * Some applications poll for the file length in this strange
1124 		 * way so we must seek to end on non-oplocked files by
1125 		 * setting the revalidate time to zero.
1126 		 */
1127 		CIFS_I(inode)->time = 0;
1128 
1129 		rc = cifs_revalidate_file_attr(file);
1130 		if (rc < 0)
1131 			return (loff_t)rc;
1132 	}
1133 	if (cfile && cfile->tlink) {
1134 		tcon = tlink_tcon(cfile->tlink);
1135 		if (tcon->ses->server->ops->llseek)
1136 			return tcon->ses->server->ops->llseek(file, tcon,
1137 							      offset, whence);
1138 	}
1139 	return generic_file_llseek(file, offset, whence);
1140 }
1141 
1142 static int
cifs_setlease(struct file * file,int arg,struct file_lease ** lease,void ** priv)1143 cifs_setlease(struct file *file, int arg, struct file_lease **lease, void **priv)
1144 {
1145 	/*
1146 	 * Note that this is called by vfs setlease with i_lock held to
1147 	 * protect *lease from going away.
1148 	 */
1149 	struct inode *inode = file_inode(file);
1150 	struct cifsFileInfo *cfile = file->private_data;
1151 
1152 	if (!S_ISREG(inode->i_mode))
1153 		return -EINVAL;
1154 
1155 	/* Check if file is oplocked if this is request for new lease */
1156 	if (arg == F_UNLCK ||
1157 	    ((arg == F_RDLCK) && CIFS_CACHE_READ(CIFS_I(inode))) ||
1158 	    ((arg == F_WRLCK) && CIFS_CACHE_WRITE(CIFS_I(inode))))
1159 		return generic_setlease(file, arg, lease, priv);
1160 	else if (tlink_tcon(cfile->tlink)->local_lease &&
1161 		 !CIFS_CACHE_READ(CIFS_I(inode)))
1162 		/*
1163 		 * If the server claims to support oplock on this file, then we
1164 		 * still need to check oplock even if the local_lease mount
1165 		 * option is set, but there are servers which do not support
1166 		 * oplock for which this mount option may be useful if the user
1167 		 * knows that the file won't be changed on the server by anyone
1168 		 * else.
1169 		 */
1170 		return generic_setlease(file, arg, lease, priv);
1171 	else
1172 		return -EAGAIN;
1173 }
1174 
1175 struct file_system_type cifs_fs_type = {
1176 	.owner = THIS_MODULE,
1177 	.name = "cifs",
1178 	.init_fs_context = smb3_init_fs_context,
1179 	.parameters = smb3_fs_parameters,
1180 	.kill_sb = cifs_kill_sb,
1181 	.fs_flags = FS_RENAME_DOES_D_MOVE,
1182 };
1183 MODULE_ALIAS_FS("cifs");
1184 
1185 struct file_system_type smb3_fs_type = {
1186 	.owner = THIS_MODULE,
1187 	.name = "smb3",
1188 	.init_fs_context = smb3_init_fs_context,
1189 	.parameters = smb3_fs_parameters,
1190 	.kill_sb = cifs_kill_sb,
1191 	.fs_flags = FS_RENAME_DOES_D_MOVE,
1192 };
1193 MODULE_ALIAS_FS("smb3");
1194 MODULE_ALIAS("smb3");
1195 
1196 const struct inode_operations cifs_dir_inode_ops = {
1197 	.create = cifs_create,
1198 	.atomic_open = cifs_atomic_open,
1199 	.lookup = cifs_lookup,
1200 	.getattr = cifs_getattr,
1201 	.unlink = cifs_unlink,
1202 	.link = cifs_hardlink,
1203 	.mkdir = cifs_mkdir,
1204 	.rmdir = cifs_rmdir,
1205 	.rename = cifs_rename2,
1206 	.permission = cifs_permission,
1207 	.setattr = cifs_setattr,
1208 	.symlink = cifs_symlink,
1209 	.mknod   = cifs_mknod,
1210 	.listxattr = cifs_listxattr,
1211 	.get_acl = cifs_get_acl,
1212 	.set_acl = cifs_set_acl,
1213 };
1214 
1215 const struct inode_operations cifs_file_inode_ops = {
1216 	.setattr = cifs_setattr,
1217 	.getattr = cifs_getattr,
1218 	.permission = cifs_permission,
1219 	.listxattr = cifs_listxattr,
1220 	.fiemap = cifs_fiemap,
1221 	.get_acl = cifs_get_acl,
1222 	.set_acl = cifs_set_acl,
1223 };
1224 
cifs_get_link(struct dentry * dentry,struct inode * inode,struct delayed_call * done)1225 const char *cifs_get_link(struct dentry *dentry, struct inode *inode,
1226 			    struct delayed_call *done)
1227 {
1228 	char *target_path;
1229 
1230 	if (!dentry)
1231 		return ERR_PTR(-ECHILD);
1232 
1233 	target_path = kmalloc(PATH_MAX, GFP_KERNEL);
1234 	if (!target_path)
1235 		return ERR_PTR(-ENOMEM);
1236 
1237 	spin_lock(&inode->i_lock);
1238 	if (likely(CIFS_I(inode)->symlink_target)) {
1239 		strscpy(target_path, CIFS_I(inode)->symlink_target, PATH_MAX);
1240 	} else {
1241 		kfree(target_path);
1242 		target_path = ERR_PTR(-EOPNOTSUPP);
1243 	}
1244 	spin_unlock(&inode->i_lock);
1245 
1246 	if (!IS_ERR(target_path))
1247 		set_delayed_call(done, kfree_link, target_path);
1248 
1249 	return target_path;
1250 }
1251 
1252 const struct inode_operations cifs_symlink_inode_ops = {
1253 	.get_link = cifs_get_link,
1254 	.setattr = cifs_setattr,
1255 	.permission = cifs_permission,
1256 	.listxattr = cifs_listxattr,
1257 };
1258 
1259 /*
1260  * Advance the EOF marker to after the source range.
1261  */
cifs_precopy_set_eof(struct inode * src_inode,struct cifsInodeInfo * src_cifsi,struct cifs_tcon * src_tcon,unsigned int xid,loff_t src_end)1262 static int cifs_precopy_set_eof(struct inode *src_inode, struct cifsInodeInfo *src_cifsi,
1263 				struct cifs_tcon *src_tcon,
1264 				unsigned int xid, loff_t src_end)
1265 {
1266 	struct cifsFileInfo *writeable_srcfile;
1267 	int rc = -EINVAL;
1268 
1269 	writeable_srcfile = find_writable_file(src_cifsi, FIND_WR_FSUID_ONLY);
1270 	if (writeable_srcfile) {
1271 		if (src_tcon->ses->server->ops->set_file_size)
1272 			rc = src_tcon->ses->server->ops->set_file_size(
1273 				xid, src_tcon, writeable_srcfile,
1274 				src_inode->i_size, true /* no need to set sparse */);
1275 		else
1276 			rc = -ENOSYS;
1277 		cifsFileInfo_put(writeable_srcfile);
1278 		cifs_dbg(FYI, "SetFSize for copychunk rc = %d\n", rc);
1279 	}
1280 
1281 	if (rc < 0)
1282 		goto set_failed;
1283 
1284 	netfs_resize_file(&src_cifsi->netfs, src_end, true);
1285 	fscache_resize_cookie(cifs_inode_cookie(src_inode), src_end);
1286 	return 0;
1287 
1288 set_failed:
1289 	return filemap_write_and_wait(src_inode->i_mapping);
1290 }
1291 
1292 /*
1293  * Flush out either the folio that overlaps the beginning of a range in which
1294  * pos resides or the folio that overlaps the end of a range unless that folio
1295  * is entirely within the range we're going to invalidate.  We extend the flush
1296  * bounds to encompass the folio.
1297  */
cifs_flush_folio(struct inode * inode,loff_t pos,loff_t * _fstart,loff_t * _fend,bool first)1298 static int cifs_flush_folio(struct inode *inode, loff_t pos, loff_t *_fstart, loff_t *_fend,
1299 			    bool first)
1300 {
1301 	struct folio *folio;
1302 	unsigned long long fpos, fend;
1303 	pgoff_t index = pos / PAGE_SIZE;
1304 	size_t size;
1305 	int rc = 0;
1306 
1307 	folio = filemap_get_folio(inode->i_mapping, index);
1308 	if (IS_ERR(folio))
1309 		return 0;
1310 
1311 	size = folio_size(folio);
1312 	fpos = folio_pos(folio);
1313 	fend = fpos + size - 1;
1314 	*_fstart = min_t(unsigned long long, *_fstart, fpos);
1315 	*_fend   = max_t(unsigned long long, *_fend, fend);
1316 	if ((first && pos == fpos) || (!first && pos == fend))
1317 		goto out;
1318 
1319 	rc = filemap_write_and_wait_range(inode->i_mapping, fpos, fend);
1320 out:
1321 	folio_put(folio);
1322 	return rc;
1323 }
1324 
cifs_remap_file_range(struct file * src_file,loff_t off,struct file * dst_file,loff_t destoff,loff_t len,unsigned int remap_flags)1325 static loff_t cifs_remap_file_range(struct file *src_file, loff_t off,
1326 		struct file *dst_file, loff_t destoff, loff_t len,
1327 		unsigned int remap_flags)
1328 {
1329 	struct inode *src_inode = file_inode(src_file);
1330 	struct inode *target_inode = file_inode(dst_file);
1331 	struct cifsInodeInfo *src_cifsi = CIFS_I(src_inode);
1332 	struct cifsInodeInfo *target_cifsi = CIFS_I(target_inode);
1333 	struct cifsFileInfo *smb_file_src = src_file->private_data;
1334 	struct cifsFileInfo *smb_file_target = dst_file->private_data;
1335 	struct cifs_tcon *target_tcon, *src_tcon;
1336 	unsigned long long destend, fstart, fend, old_size, new_size;
1337 	unsigned int xid;
1338 	int rc;
1339 
1340 	if (remap_flags & REMAP_FILE_DEDUP)
1341 		return -EOPNOTSUPP;
1342 	if (remap_flags & ~REMAP_FILE_ADVISORY)
1343 		return -EINVAL;
1344 
1345 	cifs_dbg(FYI, "clone range\n");
1346 
1347 	xid = get_xid();
1348 
1349 	if (!smb_file_src || !smb_file_target) {
1350 		rc = -EBADF;
1351 		cifs_dbg(VFS, "missing cifsFileInfo on copy range src file\n");
1352 		goto out;
1353 	}
1354 
1355 	src_tcon = tlink_tcon(smb_file_src->tlink);
1356 	target_tcon = tlink_tcon(smb_file_target->tlink);
1357 
1358 	/*
1359 	 * Note: cifs case is easier than btrfs since server responsible for
1360 	 * checks for proper open modes and file type and if it wants
1361 	 * server could even support copy of range where source = target
1362 	 */
1363 	lock_two_nondirectories(target_inode, src_inode);
1364 
1365 	if (len == 0)
1366 		len = src_inode->i_size - off;
1367 
1368 	cifs_dbg(FYI, "clone range\n");
1369 
1370 	/* Flush the source buffer */
1371 	rc = filemap_write_and_wait_range(src_inode->i_mapping, off,
1372 					  off + len - 1);
1373 	if (rc)
1374 		goto unlock;
1375 
1376 	/* The server-side copy will fail if the source crosses the EOF marker.
1377 	 * Advance the EOF marker after the flush above to the end of the range
1378 	 * if it's short of that.
1379 	 */
1380 	if (src_cifsi->netfs.remote_i_size < off + len) {
1381 		rc = cifs_precopy_set_eof(src_inode, src_cifsi, src_tcon, xid, off + len);
1382 		if (rc < 0)
1383 			goto unlock;
1384 	}
1385 
1386 	new_size = destoff + len;
1387 	destend = destoff + len - 1;
1388 
1389 	/* Flush the folios at either end of the destination range to prevent
1390 	 * accidental loss of dirty data outside of the range.
1391 	 */
1392 	fstart = destoff;
1393 	fend = destend;
1394 
1395 	rc = cifs_flush_folio(target_inode, destoff, &fstart, &fend, true);
1396 	if (rc)
1397 		goto unlock;
1398 	rc = cifs_flush_folio(target_inode, destend, &fstart, &fend, false);
1399 	if (rc)
1400 		goto unlock;
1401 	if (fend > target_cifsi->netfs.zero_point)
1402 		target_cifsi->netfs.zero_point = fend + 1;
1403 	old_size = target_cifsi->netfs.remote_i_size;
1404 
1405 	/* Discard all the folios that overlap the destination region. */
1406 	cifs_dbg(FYI, "about to discard pages %llx-%llx\n", fstart, fend);
1407 	truncate_inode_pages_range(&target_inode->i_data, fstart, fend);
1408 
1409 	fscache_invalidate(cifs_inode_cookie(target_inode), NULL,
1410 			   i_size_read(target_inode), 0);
1411 
1412 	rc = -EOPNOTSUPP;
1413 	if (target_tcon->ses->server->ops->duplicate_extents) {
1414 		rc = target_tcon->ses->server->ops->duplicate_extents(xid,
1415 			smb_file_src, smb_file_target, off, len, destoff);
1416 		if (rc == 0 && new_size > old_size) {
1417 			truncate_setsize(target_inode, new_size);
1418 			fscache_resize_cookie(cifs_inode_cookie(target_inode),
1419 					      new_size);
1420 		} else if (rc == -EOPNOTSUPP) {
1421 			/*
1422 			 * copy_file_range syscall man page indicates EINVAL
1423 			 * is returned e.g when "fd_in and fd_out refer to the
1424 			 * same file and the source and target ranges overlap."
1425 			 * Test generic/157 was what showed these cases where
1426 			 * we need to remap EOPNOTSUPP to EINVAL
1427 			 */
1428 			if (off >= src_inode->i_size) {
1429 				rc = -EINVAL;
1430 			} else if (src_inode == target_inode) {
1431 				if (off + len > destoff)
1432 					rc = -EINVAL;
1433 			}
1434 		}
1435 		if (rc == 0 && new_size > target_cifsi->netfs.zero_point)
1436 			target_cifsi->netfs.zero_point = new_size;
1437 	}
1438 
1439 	/* force revalidate of size and timestamps of target file now
1440 	   that target is updated on the server */
1441 	CIFS_I(target_inode)->time = 0;
1442 unlock:
1443 	/* although unlocking in the reverse order from locking is not
1444 	   strictly necessary here it is a little cleaner to be consistent */
1445 	unlock_two_nondirectories(src_inode, target_inode);
1446 out:
1447 	free_xid(xid);
1448 	return rc < 0 ? rc : len;
1449 }
1450 
cifs_file_copychunk_range(unsigned int xid,struct file * src_file,loff_t off,struct file * dst_file,loff_t destoff,size_t len,unsigned int flags)1451 ssize_t cifs_file_copychunk_range(unsigned int xid,
1452 				struct file *src_file, loff_t off,
1453 				struct file *dst_file, loff_t destoff,
1454 				size_t len, unsigned int flags)
1455 {
1456 	struct inode *src_inode = file_inode(src_file);
1457 	struct inode *target_inode = file_inode(dst_file);
1458 	struct cifsInodeInfo *src_cifsi = CIFS_I(src_inode);
1459 	struct cifsInodeInfo *target_cifsi = CIFS_I(target_inode);
1460 	struct cifsFileInfo *smb_file_src;
1461 	struct cifsFileInfo *smb_file_target;
1462 	struct cifs_tcon *src_tcon;
1463 	struct cifs_tcon *target_tcon;
1464 	ssize_t rc;
1465 
1466 	cifs_dbg(FYI, "copychunk range\n");
1467 
1468 	if (!src_file->private_data || !dst_file->private_data) {
1469 		rc = -EBADF;
1470 		cifs_dbg(VFS, "missing cifsFileInfo on copy range src file\n");
1471 		goto out;
1472 	}
1473 
1474 	rc = -EXDEV;
1475 	smb_file_target = dst_file->private_data;
1476 	smb_file_src = src_file->private_data;
1477 	src_tcon = tlink_tcon(smb_file_src->tlink);
1478 	target_tcon = tlink_tcon(smb_file_target->tlink);
1479 
1480 	if (src_tcon->ses != target_tcon->ses) {
1481 		cifs_dbg(FYI, "source and target of copy not on same server\n");
1482 		goto out;
1483 	}
1484 
1485 	rc = -EOPNOTSUPP;
1486 	if (!target_tcon->ses->server->ops->copychunk_range)
1487 		goto out;
1488 
1489 	/*
1490 	 * Note: cifs case is easier than btrfs since server responsible for
1491 	 * checks for proper open modes and file type and if it wants
1492 	 * server could even support copy of range where source = target
1493 	 */
1494 	lock_two_nondirectories(target_inode, src_inode);
1495 
1496 	cifs_dbg(FYI, "about to flush pages\n");
1497 
1498 	rc = filemap_write_and_wait_range(src_inode->i_mapping, off,
1499 					  off + len - 1);
1500 	if (rc)
1501 		goto unlock;
1502 
1503 	/* The server-side copy will fail if the source crosses the EOF marker.
1504 	 * Advance the EOF marker after the flush above to the end of the range
1505 	 * if it's short of that.
1506 	 */
1507 	if (src_cifsi->netfs.remote_i_size < off + len) {
1508 		rc = cifs_precopy_set_eof(src_inode, src_cifsi, src_tcon, xid, off + len);
1509 		if (rc < 0)
1510 			goto unlock;
1511 	}
1512 
1513 	/* Flush and invalidate all the folios in the destination region.  If
1514 	 * the copy was successful, then some of the flush is extra overhead,
1515 	 * but we need to allow for the copy failing in some way (eg. ENOSPC).
1516 	 */
1517 	rc = filemap_invalidate_inode(target_inode, true, destoff, destoff + len - 1);
1518 	if (rc)
1519 		goto unlock;
1520 
1521 	fscache_invalidate(cifs_inode_cookie(target_inode), NULL,
1522 			   i_size_read(target_inode), 0);
1523 
1524 	rc = file_modified(dst_file);
1525 	if (!rc) {
1526 		rc = target_tcon->ses->server->ops->copychunk_range(xid,
1527 			smb_file_src, smb_file_target, off, len, destoff);
1528 		if (rc > 0 && destoff + rc > i_size_read(target_inode)) {
1529 			truncate_setsize(target_inode, destoff + rc);
1530 			netfs_resize_file(&target_cifsi->netfs,
1531 					  i_size_read(target_inode), true);
1532 			fscache_resize_cookie(cifs_inode_cookie(target_inode),
1533 					      i_size_read(target_inode));
1534 		}
1535 		if (rc > 0 && destoff + rc > target_cifsi->netfs.zero_point)
1536 			target_cifsi->netfs.zero_point = destoff + rc;
1537 	}
1538 
1539 	file_accessed(src_file);
1540 
1541 	/* force revalidate of size and timestamps of target file now
1542 	 * that target is updated on the server
1543 	 */
1544 	CIFS_I(target_inode)->time = 0;
1545 
1546 unlock:
1547 	/* although unlocking in the reverse order from locking is not
1548 	 * strictly necessary here it is a little cleaner to be consistent
1549 	 */
1550 	unlock_two_nondirectories(src_inode, target_inode);
1551 
1552 out:
1553 	return rc;
1554 }
1555 
1556 /*
1557  * Directory operations under CIFS/SMB2/SMB3 are synchronous, so fsync()
1558  * is a dummy operation.
1559  */
cifs_dir_fsync(struct file * file,loff_t start,loff_t end,int datasync)1560 static int cifs_dir_fsync(struct file *file, loff_t start, loff_t end, int datasync)
1561 {
1562 	cifs_dbg(FYI, "Sync directory - name: %pD datasync: 0x%x\n",
1563 		 file, datasync);
1564 
1565 	return 0;
1566 }
1567 
cifs_copy_file_range(struct file * src_file,loff_t off,struct file * dst_file,loff_t destoff,size_t len,unsigned int flags)1568 static ssize_t cifs_copy_file_range(struct file *src_file, loff_t off,
1569 				struct file *dst_file, loff_t destoff,
1570 				size_t len, unsigned int flags)
1571 {
1572 	unsigned int xid = get_xid();
1573 	ssize_t rc;
1574 	struct cifsFileInfo *cfile = dst_file->private_data;
1575 
1576 	if (cfile->swapfile) {
1577 		rc = -EOPNOTSUPP;
1578 		free_xid(xid);
1579 		return rc;
1580 	}
1581 
1582 	rc = cifs_file_copychunk_range(xid, src_file, off, dst_file, destoff,
1583 					len, flags);
1584 	free_xid(xid);
1585 
1586 	if (rc == -EOPNOTSUPP || rc == -EXDEV)
1587 		rc = splice_copy_file_range(src_file, off, dst_file,
1588 					    destoff, len);
1589 	return rc;
1590 }
1591 
1592 const struct file_operations cifs_file_ops = {
1593 	.read_iter = cifs_loose_read_iter,
1594 	.write_iter = cifs_file_write_iter,
1595 	.open = cifs_open,
1596 	.release = cifs_close,
1597 	.lock = cifs_lock,
1598 	.flock = cifs_flock,
1599 	.fsync = cifs_fsync,
1600 	.flush = cifs_flush,
1601 	.mmap_prepare = cifs_file_mmap_prepare,
1602 	.splice_read = filemap_splice_read,
1603 	.splice_write = iter_file_splice_write,
1604 	.llseek = cifs_llseek,
1605 	.unlocked_ioctl	= cifs_ioctl,
1606 	.copy_file_range = cifs_copy_file_range,
1607 	.remap_file_range = cifs_remap_file_range,
1608 	.setlease = cifs_setlease,
1609 	.fallocate = cifs_fallocate,
1610 };
1611 
1612 const struct file_operations cifs_file_strict_ops = {
1613 	.read_iter = cifs_strict_readv,
1614 	.write_iter = cifs_strict_writev,
1615 	.open = cifs_open,
1616 	.release = cifs_close,
1617 	.lock = cifs_lock,
1618 	.flock = cifs_flock,
1619 	.fsync = cifs_strict_fsync,
1620 	.flush = cifs_flush,
1621 	.mmap_prepare = cifs_file_strict_mmap_prepare,
1622 	.splice_read = filemap_splice_read,
1623 	.splice_write = iter_file_splice_write,
1624 	.llseek = cifs_llseek,
1625 	.unlocked_ioctl	= cifs_ioctl,
1626 	.copy_file_range = cifs_copy_file_range,
1627 	.remap_file_range = cifs_remap_file_range,
1628 	.setlease = cifs_setlease,
1629 	.fallocate = cifs_fallocate,
1630 };
1631 
1632 const struct file_operations cifs_file_direct_ops = {
1633 	.read_iter = netfs_unbuffered_read_iter,
1634 	.write_iter = netfs_file_write_iter,
1635 	.open = cifs_open,
1636 	.release = cifs_close,
1637 	.lock = cifs_lock,
1638 	.flock = cifs_flock,
1639 	.fsync = cifs_fsync,
1640 	.flush = cifs_flush,
1641 	.mmap_prepare = cifs_file_mmap_prepare,
1642 	.splice_read = copy_splice_read,
1643 	.splice_write = iter_file_splice_write,
1644 	.unlocked_ioctl  = cifs_ioctl,
1645 	.copy_file_range = cifs_copy_file_range,
1646 	.remap_file_range = cifs_remap_file_range,
1647 	.llseek = cifs_llseek,
1648 	.setlease = cifs_setlease,
1649 	.fallocate = cifs_fallocate,
1650 };
1651 
1652 const struct file_operations cifs_file_nobrl_ops = {
1653 	.read_iter = cifs_loose_read_iter,
1654 	.write_iter = cifs_file_write_iter,
1655 	.open = cifs_open,
1656 	.release = cifs_close,
1657 	.fsync = cifs_fsync,
1658 	.flush = cifs_flush,
1659 	.mmap_prepare = cifs_file_mmap_prepare,
1660 	.splice_read = filemap_splice_read,
1661 	.splice_write = iter_file_splice_write,
1662 	.llseek = cifs_llseek,
1663 	.unlocked_ioctl	= cifs_ioctl,
1664 	.copy_file_range = cifs_copy_file_range,
1665 	.remap_file_range = cifs_remap_file_range,
1666 	.setlease = cifs_setlease,
1667 	.fallocate = cifs_fallocate,
1668 };
1669 
1670 const struct file_operations cifs_file_strict_nobrl_ops = {
1671 	.read_iter = cifs_strict_readv,
1672 	.write_iter = cifs_strict_writev,
1673 	.open = cifs_open,
1674 	.release = cifs_close,
1675 	.fsync = cifs_strict_fsync,
1676 	.flush = cifs_flush,
1677 	.mmap_prepare = cifs_file_strict_mmap_prepare,
1678 	.splice_read = filemap_splice_read,
1679 	.splice_write = iter_file_splice_write,
1680 	.llseek = cifs_llseek,
1681 	.unlocked_ioctl	= cifs_ioctl,
1682 	.copy_file_range = cifs_copy_file_range,
1683 	.remap_file_range = cifs_remap_file_range,
1684 	.setlease = cifs_setlease,
1685 	.fallocate = cifs_fallocate,
1686 };
1687 
1688 const struct file_operations cifs_file_direct_nobrl_ops = {
1689 	.read_iter = netfs_unbuffered_read_iter,
1690 	.write_iter = netfs_file_write_iter,
1691 	.open = cifs_open,
1692 	.release = cifs_close,
1693 	.fsync = cifs_fsync,
1694 	.flush = cifs_flush,
1695 	.mmap_prepare = cifs_file_mmap_prepare,
1696 	.splice_read = copy_splice_read,
1697 	.splice_write = iter_file_splice_write,
1698 	.unlocked_ioctl  = cifs_ioctl,
1699 	.copy_file_range = cifs_copy_file_range,
1700 	.remap_file_range = cifs_remap_file_range,
1701 	.llseek = cifs_llseek,
1702 	.setlease = cifs_setlease,
1703 	.fallocate = cifs_fallocate,
1704 };
1705 
1706 const struct file_operations cifs_dir_ops = {
1707 	.iterate_shared = cifs_readdir,
1708 	.release = cifs_closedir,
1709 	.read    = generic_read_dir,
1710 	.unlocked_ioctl  = cifs_ioctl,
1711 	.copy_file_range = cifs_copy_file_range,
1712 	.remap_file_range = cifs_remap_file_range,
1713 	.llseek = generic_file_llseek,
1714 	.fsync = cifs_dir_fsync,
1715 };
1716 
1717 static void
cifs_init_once(void * inode)1718 cifs_init_once(void *inode)
1719 {
1720 	struct cifsInodeInfo *cifsi = inode;
1721 
1722 	inode_init_once(&cifsi->netfs.inode);
1723 	init_rwsem(&cifsi->lock_sem);
1724 }
1725 
1726 static int __init
cifs_init_inodecache(void)1727 cifs_init_inodecache(void)
1728 {
1729 	cifs_inode_cachep = kmem_cache_create("cifs_inode_cache",
1730 					      sizeof(struct cifsInodeInfo),
1731 					      0, (SLAB_RECLAIM_ACCOUNT|
1732 						SLAB_ACCOUNT),
1733 					      cifs_init_once);
1734 	if (cifs_inode_cachep == NULL)
1735 		return -ENOMEM;
1736 
1737 	return 0;
1738 }
1739 
1740 static void
cifs_destroy_inodecache(void)1741 cifs_destroy_inodecache(void)
1742 {
1743 	/*
1744 	 * Make sure all delayed rcu free inodes are flushed before we
1745 	 * destroy cache.
1746 	 */
1747 	rcu_barrier();
1748 	kmem_cache_destroy(cifs_inode_cachep);
1749 }
1750 
1751 static int
cifs_init_request_bufs(void)1752 cifs_init_request_bufs(void)
1753 {
1754 	/*
1755 	 * SMB2 maximum header size is bigger than CIFS one - no problems to
1756 	 * allocate some more bytes for CIFS.
1757 	 */
1758 	size_t max_hdr_size = MAX_SMB2_HDR_SIZE;
1759 
1760 	if (CIFSMaxBufSize < 8192) {
1761 	/* Buffer size can not be smaller than 2 * PATH_MAX since maximum
1762 	Unicode path name has to fit in any SMB/CIFS path based frames */
1763 		CIFSMaxBufSize = 8192;
1764 	} else if (CIFSMaxBufSize > 1024*127) {
1765 		CIFSMaxBufSize = 1024 * 127;
1766 	} else {
1767 		CIFSMaxBufSize &= 0x1FE00; /* Round size to even 512 byte mult*/
1768 	}
1769 /*
1770 	cifs_dbg(VFS, "CIFSMaxBufSize %d 0x%x\n",
1771 		 CIFSMaxBufSize, CIFSMaxBufSize);
1772 */
1773 	cifs_req_cachep = kmem_cache_create_usercopy("cifs_request",
1774 					    CIFSMaxBufSize + max_hdr_size, 0,
1775 					    SLAB_HWCACHE_ALIGN, 0,
1776 					    CIFSMaxBufSize + max_hdr_size,
1777 					    NULL);
1778 	if (cifs_req_cachep == NULL)
1779 		return -ENOMEM;
1780 
1781 	if (cifs_min_rcv < 1)
1782 		cifs_min_rcv = 1;
1783 	else if (cifs_min_rcv > 64) {
1784 		cifs_min_rcv = 64;
1785 		cifs_dbg(VFS, "cifs_min_rcv set to maximum (64)\n");
1786 	}
1787 
1788 	cifs_req_poolp = mempool_create_slab_pool(cifs_min_rcv,
1789 						  cifs_req_cachep);
1790 
1791 	if (cifs_req_poolp == NULL) {
1792 		kmem_cache_destroy(cifs_req_cachep);
1793 		return -ENOMEM;
1794 	}
1795 	/* MAX_CIFS_SMALL_BUFFER_SIZE bytes is enough for most SMB responses and
1796 	almost all handle based requests (but not write response, nor is it
1797 	sufficient for path based requests).  A smaller size would have
1798 	been more efficient (compacting multiple slab items on one 4k page)
1799 	for the case in which debug was on, but this larger size allows
1800 	more SMBs to use small buffer alloc and is still much more
1801 	efficient to alloc 1 per page off the slab compared to 17K (5page)
1802 	alloc of large cifs buffers even when page debugging is on */
1803 	cifs_sm_req_cachep = kmem_cache_create_usercopy("cifs_small_rq",
1804 			MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN,
1805 			0, MAX_CIFS_SMALL_BUFFER_SIZE, NULL);
1806 	if (cifs_sm_req_cachep == NULL) {
1807 		mempool_destroy(cifs_req_poolp);
1808 		kmem_cache_destroy(cifs_req_cachep);
1809 		return -ENOMEM;
1810 	}
1811 
1812 	if (cifs_min_small < 2)
1813 		cifs_min_small = 2;
1814 	else if (cifs_min_small > 256) {
1815 		cifs_min_small = 256;
1816 		cifs_dbg(FYI, "cifs_min_small set to maximum (256)\n");
1817 	}
1818 
1819 	cifs_sm_req_poolp = mempool_create_slab_pool(cifs_min_small,
1820 						     cifs_sm_req_cachep);
1821 
1822 	if (cifs_sm_req_poolp == NULL) {
1823 		mempool_destroy(cifs_req_poolp);
1824 		kmem_cache_destroy(cifs_req_cachep);
1825 		kmem_cache_destroy(cifs_sm_req_cachep);
1826 		return -ENOMEM;
1827 	}
1828 
1829 	return 0;
1830 }
1831 
1832 static void
cifs_destroy_request_bufs(void)1833 cifs_destroy_request_bufs(void)
1834 {
1835 	mempool_destroy(cifs_req_poolp);
1836 	kmem_cache_destroy(cifs_req_cachep);
1837 	mempool_destroy(cifs_sm_req_poolp);
1838 	kmem_cache_destroy(cifs_sm_req_cachep);
1839 }
1840 
init_mids(void)1841 static int init_mids(void)
1842 {
1843 	cifs_mid_cachep = kmem_cache_create("cifs_mpx_ids",
1844 					    sizeof(struct mid_q_entry), 0,
1845 					    SLAB_HWCACHE_ALIGN, NULL);
1846 	if (cifs_mid_cachep == NULL)
1847 		return -ENOMEM;
1848 
1849 	/* 3 is a reasonable minimum number of simultaneous operations */
1850 	if (mempool_init_slab_pool(&cifs_mid_pool, 3, cifs_mid_cachep) < 0) {
1851 		kmem_cache_destroy(cifs_mid_cachep);
1852 		return -ENOMEM;
1853 	}
1854 
1855 	return 0;
1856 }
1857 
destroy_mids(void)1858 static void destroy_mids(void)
1859 {
1860 	mempool_exit(&cifs_mid_pool);
1861 	kmem_cache_destroy(cifs_mid_cachep);
1862 }
1863 
cifs_init_netfs(void)1864 static int cifs_init_netfs(void)
1865 {
1866 	cifs_io_request_cachep =
1867 		kmem_cache_create("cifs_io_request",
1868 				  sizeof(struct cifs_io_request), 0,
1869 				  SLAB_HWCACHE_ALIGN, NULL);
1870 	if (!cifs_io_request_cachep)
1871 		goto nomem_req;
1872 
1873 	if (mempool_init_slab_pool(&cifs_io_request_pool, 100, cifs_io_request_cachep) < 0)
1874 		goto nomem_reqpool;
1875 
1876 	cifs_io_subrequest_cachep =
1877 		kmem_cache_create("cifs_io_subrequest",
1878 				  sizeof(struct cifs_io_subrequest), 0,
1879 				  SLAB_HWCACHE_ALIGN, NULL);
1880 	if (!cifs_io_subrequest_cachep)
1881 		goto nomem_subreq;
1882 
1883 	if (mempool_init_slab_pool(&cifs_io_subrequest_pool, 100, cifs_io_subrequest_cachep) < 0)
1884 		goto nomem_subreqpool;
1885 
1886 	return 0;
1887 
1888 nomem_subreqpool:
1889 	kmem_cache_destroy(cifs_io_subrequest_cachep);
1890 nomem_subreq:
1891 	mempool_exit(&cifs_io_request_pool);
1892 nomem_reqpool:
1893 	kmem_cache_destroy(cifs_io_request_cachep);
1894 nomem_req:
1895 	return -ENOMEM;
1896 }
1897 
cifs_destroy_netfs(void)1898 static void cifs_destroy_netfs(void)
1899 {
1900 	mempool_exit(&cifs_io_subrequest_pool);
1901 	kmem_cache_destroy(cifs_io_subrequest_cachep);
1902 	mempool_exit(&cifs_io_request_pool);
1903 	kmem_cache_destroy(cifs_io_request_cachep);
1904 }
1905 
1906 static int __init
init_cifs(void)1907 init_cifs(void)
1908 {
1909 	int rc = 0;
1910 	cifs_proc_init();
1911 	INIT_LIST_HEAD(&cifs_tcp_ses_list);
1912 /*
1913  *  Initialize Global counters
1914  */
1915 	atomic_set(&sesInfoAllocCount, 0);
1916 	atomic_set(&tconInfoAllocCount, 0);
1917 	atomic_set(&tcpSesNextId, 0);
1918 	atomic_set(&tcpSesAllocCount, 0);
1919 	atomic_set(&tcpSesReconnectCount, 0);
1920 	atomic_set(&tconInfoReconnectCount, 0);
1921 
1922 	atomic_set(&buf_alloc_count, 0);
1923 	atomic_set(&small_buf_alloc_count, 0);
1924 #ifdef CONFIG_CIFS_STATS2
1925 	atomic_set(&total_buf_alloc_count, 0);
1926 	atomic_set(&total_small_buf_alloc_count, 0);
1927 	if (slow_rsp_threshold < 1)
1928 		cifs_dbg(FYI, "slow_response_threshold msgs disabled\n");
1929 	else if (slow_rsp_threshold > 32767)
1930 		cifs_dbg(VFS,
1931 		       "slow response threshold set higher than recommended (0 to 32767)\n");
1932 #endif /* CONFIG_CIFS_STATS2 */
1933 
1934 	atomic_set(&mid_count, 0);
1935 	GlobalCurrentXid = 0;
1936 	GlobalTotalActiveXid = 0;
1937 	GlobalMaxActiveXid = 0;
1938 
1939 	cifs_lock_secret = get_random_u32();
1940 
1941 	if (cifs_max_pending < 2) {
1942 		cifs_max_pending = 2;
1943 		cifs_dbg(FYI, "cifs_max_pending set to min of 2\n");
1944 	} else if (cifs_max_pending > CIFS_MAX_REQ) {
1945 		cifs_max_pending = CIFS_MAX_REQ;
1946 		cifs_dbg(FYI, "cifs_max_pending set to max of %u\n",
1947 			 CIFS_MAX_REQ);
1948 	}
1949 
1950 	/* Limit max to about 18 hours, and setting to zero disables directory entry caching */
1951 	if (dir_cache_timeout > 65000) {
1952 		dir_cache_timeout = 65000;
1953 		cifs_dbg(VFS, "dir_cache_timeout set to max of 65000 seconds\n");
1954 	}
1955 
1956 	cifsiod_wq = alloc_workqueue("cifsiod",
1957 				     WQ_FREEZABLE | WQ_MEM_RECLAIM | WQ_PERCPU,
1958 				     0);
1959 	if (!cifsiod_wq) {
1960 		rc = -ENOMEM;
1961 		goto out_clean_proc;
1962 	}
1963 
1964 	/*
1965 	 * Consider in future setting limit!=0 maybe to min(num_of_cores - 1, 3)
1966 	 * so that we don't launch too many worker threads but
1967 	 * Documentation/core-api/workqueue.rst recommends setting it to 0
1968 	 */
1969 
1970 	/* WQ_UNBOUND allows decrypt tasks to run on any CPU */
1971 	decrypt_wq = alloc_workqueue("smb3decryptd",
1972 				     WQ_UNBOUND|WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
1973 	if (!decrypt_wq) {
1974 		rc = -ENOMEM;
1975 		goto out_destroy_cifsiod_wq;
1976 	}
1977 
1978 	fileinfo_put_wq = alloc_workqueue("cifsfileinfoput",
1979 				     WQ_UNBOUND|WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
1980 	if (!fileinfo_put_wq) {
1981 		rc = -ENOMEM;
1982 		goto out_destroy_decrypt_wq;
1983 	}
1984 
1985 	cifsoplockd_wq = alloc_workqueue("cifsoplockd",
1986 					 WQ_FREEZABLE | WQ_MEM_RECLAIM | WQ_PERCPU,
1987 					 0);
1988 	if (!cifsoplockd_wq) {
1989 		rc = -ENOMEM;
1990 		goto out_destroy_fileinfo_put_wq;
1991 	}
1992 
1993 	deferredclose_wq = alloc_workqueue("deferredclose",
1994 					   WQ_FREEZABLE | WQ_MEM_RECLAIM | WQ_PERCPU,
1995 					   0);
1996 	if (!deferredclose_wq) {
1997 		rc = -ENOMEM;
1998 		goto out_destroy_cifsoplockd_wq;
1999 	}
2000 
2001 	serverclose_wq = alloc_workqueue("serverclose",
2002 					   WQ_FREEZABLE | WQ_MEM_RECLAIM | WQ_PERCPU,
2003 					   0);
2004 	if (!serverclose_wq) {
2005 		rc = -ENOMEM;
2006 		goto out_destroy_deferredclose_wq;
2007 	}
2008 
2009 	cfid_put_wq = alloc_workqueue("cfid_put_wq",
2010 				      WQ_FREEZABLE | WQ_MEM_RECLAIM | WQ_PERCPU,
2011 				      0);
2012 	if (!cfid_put_wq) {
2013 		rc = -ENOMEM;
2014 		goto out_destroy_serverclose_wq;
2015 	}
2016 
2017 	rc = cifs_init_inodecache();
2018 	if (rc)
2019 		goto out_destroy_cfid_put_wq;
2020 
2021 	rc = cifs_init_netfs();
2022 	if (rc)
2023 		goto out_destroy_inodecache;
2024 
2025 	rc = init_mids();
2026 	if (rc)
2027 		goto out_destroy_netfs;
2028 
2029 	rc = cifs_init_request_bufs();
2030 	if (rc)
2031 		goto out_destroy_mids;
2032 
2033 #ifdef CONFIG_CIFS_DFS_UPCALL
2034 	rc = dfs_cache_init();
2035 	if (rc)
2036 		goto out_destroy_request_bufs;
2037 #endif /* CONFIG_CIFS_DFS_UPCALL */
2038 #ifdef CONFIG_CIFS_UPCALL
2039 	rc = init_cifs_spnego();
2040 	if (rc)
2041 		goto out_destroy_dfs_cache;
2042 #endif /* CONFIG_CIFS_UPCALL */
2043 #ifdef CONFIG_CIFS_SWN_UPCALL
2044 	rc = cifs_genl_init();
2045 	if (rc)
2046 		goto out_register_key_type;
2047 #endif /* CONFIG_CIFS_SWN_UPCALL */
2048 
2049 	rc = init_cifs_idmap();
2050 	if (rc)
2051 		goto out_cifs_swn_init;
2052 
2053 	rc = register_filesystem(&cifs_fs_type);
2054 	if (rc)
2055 		goto out_init_cifs_idmap;
2056 
2057 	rc = register_filesystem(&smb3_fs_type);
2058 	if (rc) {
2059 		unregister_filesystem(&cifs_fs_type);
2060 		goto out_init_cifs_idmap;
2061 	}
2062 
2063 	return 0;
2064 
2065 out_init_cifs_idmap:
2066 	exit_cifs_idmap();
2067 out_cifs_swn_init:
2068 #ifdef CONFIG_CIFS_SWN_UPCALL
2069 	cifs_genl_exit();
2070 out_register_key_type:
2071 #endif
2072 #ifdef CONFIG_CIFS_UPCALL
2073 	exit_cifs_spnego();
2074 out_destroy_dfs_cache:
2075 #endif
2076 #ifdef CONFIG_CIFS_DFS_UPCALL
2077 	dfs_cache_destroy();
2078 out_destroy_request_bufs:
2079 #endif
2080 	cifs_destroy_request_bufs();
2081 out_destroy_mids:
2082 	destroy_mids();
2083 out_destroy_netfs:
2084 	cifs_destroy_netfs();
2085 out_destroy_inodecache:
2086 	cifs_destroy_inodecache();
2087 out_destroy_cfid_put_wq:
2088 	destroy_workqueue(cfid_put_wq);
2089 out_destroy_serverclose_wq:
2090 	destroy_workqueue(serverclose_wq);
2091 out_destroy_deferredclose_wq:
2092 	destroy_workqueue(deferredclose_wq);
2093 out_destroy_cifsoplockd_wq:
2094 	destroy_workqueue(cifsoplockd_wq);
2095 out_destroy_fileinfo_put_wq:
2096 	destroy_workqueue(fileinfo_put_wq);
2097 out_destroy_decrypt_wq:
2098 	destroy_workqueue(decrypt_wq);
2099 out_destroy_cifsiod_wq:
2100 	destroy_workqueue(cifsiod_wq);
2101 out_clean_proc:
2102 	cifs_proc_clean();
2103 	return rc;
2104 }
2105 
2106 static void __exit
exit_cifs(void)2107 exit_cifs(void)
2108 {
2109 	cifs_dbg(NOISY, "exit_smb3\n");
2110 	unregister_filesystem(&cifs_fs_type);
2111 	unregister_filesystem(&smb3_fs_type);
2112 	cifs_release_automount_timer();
2113 	exit_cifs_idmap();
2114 #ifdef CONFIG_CIFS_SWN_UPCALL
2115 	cifs_genl_exit();
2116 #endif
2117 #ifdef CONFIG_CIFS_UPCALL
2118 	exit_cifs_spnego();
2119 #endif
2120 #ifdef CONFIG_CIFS_DFS_UPCALL
2121 	dfs_cache_destroy();
2122 #endif
2123 	cifs_destroy_request_bufs();
2124 	destroy_mids();
2125 	cifs_destroy_netfs();
2126 	cifs_destroy_inodecache();
2127 	destroy_workqueue(deferredclose_wq);
2128 	destroy_workqueue(cifsoplockd_wq);
2129 	destroy_workqueue(decrypt_wq);
2130 	destroy_workqueue(fileinfo_put_wq);
2131 	destroy_workqueue(serverclose_wq);
2132 	destroy_workqueue(cfid_put_wq);
2133 	destroy_workqueue(cifsiod_wq);
2134 	cifs_proc_clean();
2135 }
2136 
2137 MODULE_AUTHOR("Steve French");
2138 MODULE_LICENSE("GPL");	/* combination of LGPL + GPL source behaves as GPL */
2139 MODULE_DESCRIPTION
2140 	("VFS to access SMB3 servers e.g. Samba, Macs, Azure and Windows (and "
2141 	"also older servers complying with the SNIA CIFS Specification)");
2142 MODULE_VERSION(CIFS_VERSION);
2143 MODULE_SOFTDEP("ecb");
2144 MODULE_SOFTDEP("nls");
2145 MODULE_SOFTDEP("aes");
2146 MODULE_SOFTDEP("cmac");
2147 MODULE_SOFTDEP("aead2");
2148 MODULE_SOFTDEP("ccm");
2149 MODULE_SOFTDEP("gcm");
2150 module_init(init_cifs)
2151 module_exit(exit_cifs)
2152