1 // SPDX-License-Identifier: LGPL-2.1
2 /*
3 *
4 * Copyright (C) International Business Machines Corp., 2002,2008
5 * Author(s): Steve French (sfrench@us.ibm.com)
6 *
7 * Common Internet FileSystem (CIFS) client
8 *
9 */
10
11 /* Note that BB means BUGBUG (ie something to fix eventually) */
12
13 #include <linux/module.h>
14 #include <linux/fs.h>
15 #include <linux/filelock.h>
16 #include <linux/mount.h>
17 #include <linux/slab.h>
18 #include <linux/init.h>
19 #include <linux/list.h>
20 #include <linux/seq_file.h>
21 #include <linux/vfs.h>
22 #include <linux/mempool.h>
23 #include <linux/delay.h>
24 #include <linux/kthread.h>
25 #include <linux/freezer.h>
26 #include <linux/namei.h>
27 #include <linux/random.h>
28 #include <linux/splice.h>
29 #include <linux/uuid.h>
30 #include <linux/xattr.h>
31 #include <uapi/linux/magic.h>
32 #include <net/ipv6.h>
33 #include "cifsfs.h"
34 #include "cifspdu.h"
35 #define DECLARE_GLOBALS_HERE
36 #include "cifsglob.h"
37 #include "cifsproto.h"
38 #include "cifs_debug.h"
39 #include "cifs_fs_sb.h"
40 #include <linux/mm.h>
41 #include <linux/key-type.h>
42 #include "cifs_spnego.h"
43 #include "fscache.h"
44 #ifdef CONFIG_CIFS_DFS_UPCALL
45 #include "dfs_cache.h"
46 #endif
47 #ifdef CONFIG_CIFS_SWN_UPCALL
48 #include "netlink.h"
49 #endif
50 #include "fs_context.h"
51 #include "cached_dir.h"
52
53 /*
54 * DOS dates from 1980/1/1 through 2107/12/31
55 * Protocol specifications indicate the range should be to 119, which
56 * limits maximum year to 2099. But this range has not been checked.
57 */
58 #define SMB_DATE_MAX (127<<9 | 12<<5 | 31)
59 #define SMB_DATE_MIN (0<<9 | 1<<5 | 1)
60 #define SMB_TIME_MAX (23<<11 | 59<<5 | 29)
61
62 int cifsFYI = 0;
63 bool traceSMB;
64 bool enable_oplocks = true;
65 bool linuxExtEnabled = true;
66 bool lookupCacheEnabled = true;
67 bool disable_legacy_dialects; /* false by default */
68 bool enable_gcm_256 = true;
69 bool require_gcm_256; /* false by default */
70 bool enable_negotiate_signing; /* false by default */
71 unsigned int global_secflags = CIFSSEC_DEF;
72 /* unsigned int ntlmv2_support = 0; */
73
74 /*
75 * Global transaction id (XID) information
76 */
77 unsigned int GlobalCurrentXid; /* protected by GlobalMid_Lock */
78 unsigned int GlobalTotalActiveXid; /* prot by GlobalMid_Lock */
79 unsigned int GlobalMaxActiveXid; /* prot by GlobalMid_Lock */
80 DEFINE_SPINLOCK(GlobalMid_Lock); /* protects above & list operations on midQ entries */
81
82 /*
83 * Global counters, updated atomically
84 */
85 atomic_t sesInfoAllocCount;
86 atomic_t tconInfoAllocCount;
87 atomic_t tcpSesNextId;
88 atomic_t tcpSesAllocCount;
89 atomic_t tcpSesReconnectCount;
90 atomic_t tconInfoReconnectCount;
91
92 atomic_t mid_count;
93 atomic_t buf_alloc_count;
94 atomic_t small_buf_alloc_count;
95 #ifdef CONFIG_CIFS_STATS2
96 atomic_t total_buf_alloc_count;
97 atomic_t total_small_buf_alloc_count;
98 #endif/* STATS2 */
99 struct list_head cifs_tcp_ses_list;
100 DEFINE_SPINLOCK(cifs_tcp_ses_lock);
101 static const struct super_operations cifs_super_ops;
102 unsigned int CIFSMaxBufSize = CIFS_MAX_MSGSIZE;
103 module_param(CIFSMaxBufSize, uint, 0444);
104 MODULE_PARM_DESC(CIFSMaxBufSize, "Network buffer size (not including header) "
105 "for CIFS requests. "
106 "Default: 16384 Range: 8192 to 130048");
107 unsigned int cifs_min_rcv = CIFS_MIN_RCV_POOL;
108 module_param(cifs_min_rcv, uint, 0444);
109 MODULE_PARM_DESC(cifs_min_rcv, "Network buffers in pool. Default: 4 Range: "
110 "1 to 64");
111 unsigned int cifs_min_small = 30;
112 module_param(cifs_min_small, uint, 0444);
113 MODULE_PARM_DESC(cifs_min_small, "Small network buffers in pool. Default: 30 "
114 "Range: 2 to 256");
115 unsigned int cifs_max_pending = CIFS_MAX_REQ;
116 module_param(cifs_max_pending, uint, 0444);
117 MODULE_PARM_DESC(cifs_max_pending, "Simultaneous requests to server for "
118 "CIFS/SMB1 dialect (N/A for SMB3) "
119 "Default: 32767 Range: 2 to 32767.");
120 unsigned int dir_cache_timeout = 30;
121 module_param(dir_cache_timeout, uint, 0644);
122 MODULE_PARM_DESC(dir_cache_timeout, "Number of seconds to cache directory contents for which we have a lease. Default: 30 "
123 "Range: 1 to 65000 seconds, 0 to disable caching dir contents");
124 /* Module-wide total cached dirents (in bytes) across all tcons */
125 atomic64_t cifs_dircache_bytes_used = ATOMIC64_INIT(0);
126
127 /*
128 * Write-only module parameter to drop all cached directory entries across
129 * all CIFS mounts. Echo a non-zero value to trigger.
130 */
cifs_drop_all_dir_caches(void)131 static void cifs_drop_all_dir_caches(void)
132 {
133 struct TCP_Server_Info *server;
134 struct cifs_ses *ses;
135 struct cifs_tcon *tcon;
136
137 spin_lock(&cifs_tcp_ses_lock);
138 list_for_each_entry(server, &cifs_tcp_ses_list, tcp_ses_list) {
139 list_for_each_entry(ses, &server->smb_ses_list, smb_ses_list) {
140 if (cifs_ses_exiting(ses))
141 continue;
142 list_for_each_entry(tcon, &ses->tcon_list, tcon_list)
143 invalidate_all_cached_dirs(tcon);
144 }
145 }
146 spin_unlock(&cifs_tcp_ses_lock);
147 }
148
cifs_param_set_drop_dir_cache(const char * val,const struct kernel_param * kp)149 static int cifs_param_set_drop_dir_cache(const char *val, const struct kernel_param *kp)
150 {
151 bool bv;
152 int rc = kstrtobool(val, &bv);
153
154 if (rc)
155 return rc;
156 if (bv)
157 cifs_drop_all_dir_caches();
158 return 0;
159 }
160
161 module_param_call(drop_dir_cache, cifs_param_set_drop_dir_cache, NULL, NULL, 0200);
162 MODULE_PARM_DESC(drop_dir_cache, "Write 1 to drop all cached directory entries across all CIFS mounts");
163
164 #ifdef CONFIG_CIFS_STATS2
165 unsigned int slow_rsp_threshold = 1;
166 module_param(slow_rsp_threshold, uint, 0644);
167 MODULE_PARM_DESC(slow_rsp_threshold, "Amount of time (in seconds) to wait "
168 "before logging that a response is delayed. "
169 "Default: 1 (if set to 0 disables msg).");
170 #endif /* STATS2 */
171
172 module_param(enable_oplocks, bool, 0644);
173 MODULE_PARM_DESC(enable_oplocks, "Enable or disable oplocks. Default: y/Y/1");
174
175 module_param(enable_gcm_256, bool, 0644);
176 MODULE_PARM_DESC(enable_gcm_256, "Enable requesting strongest (256 bit) GCM encryption. Default: y/Y/0");
177
178 module_param(require_gcm_256, bool, 0644);
179 MODULE_PARM_DESC(require_gcm_256, "Require strongest (256 bit) GCM encryption. Default: n/N/0");
180
181 module_param(enable_negotiate_signing, bool, 0644);
182 MODULE_PARM_DESC(enable_negotiate_signing, "Enable negotiating packet signing algorithm with server. Default: n/N/0");
183
184 module_param(disable_legacy_dialects, bool, 0644);
185 MODULE_PARM_DESC(disable_legacy_dialects, "To improve security it may be "
186 "helpful to restrict the ability to "
187 "override the default dialects (SMB2.1, "
188 "SMB3 and SMB3.02) on mount with old "
189 "dialects (CIFS/SMB1 and SMB2) since "
190 "vers=1.0 (CIFS/SMB1) and vers=2.0 are weaker"
191 " and less secure. Default: n/N/0");
192
193 struct workqueue_struct *cifsiod_wq;
194 struct workqueue_struct *decrypt_wq;
195 struct workqueue_struct *fileinfo_put_wq;
196 struct workqueue_struct *cifsoplockd_wq;
197 struct workqueue_struct *deferredclose_wq;
198 struct workqueue_struct *serverclose_wq;
199 struct workqueue_struct *cfid_put_wq;
200 __u32 cifs_lock_secret;
201
202 /*
203 * Bumps refcount for cifs super block.
204 * Note that it should be only called if a reference to VFS super block is
205 * already held, e.g. in open-type syscalls context. Otherwise it can race with
206 * atomic_dec_and_test in deactivate_locked_super.
207 */
208 void
cifs_sb_active(struct super_block * sb)209 cifs_sb_active(struct super_block *sb)
210 {
211 struct cifs_sb_info *server = CIFS_SB(sb);
212
213 if (atomic_inc_return(&server->active) == 1)
214 atomic_inc(&sb->s_active);
215 }
216
217 void
cifs_sb_deactive(struct super_block * sb)218 cifs_sb_deactive(struct super_block *sb)
219 {
220 struct cifs_sb_info *server = CIFS_SB(sb);
221
222 if (atomic_dec_and_test(&server->active))
223 deactivate_super(sb);
224 }
225
226 static int
cifs_read_super(struct super_block * sb)227 cifs_read_super(struct super_block *sb)
228 {
229 struct inode *inode;
230 struct cifs_sb_info *cifs_sb;
231 struct cifs_tcon *tcon;
232 struct timespec64 ts;
233 int rc = 0;
234
235 cifs_sb = CIFS_SB(sb);
236 tcon = cifs_sb_master_tcon(cifs_sb);
237
238 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIXACL)
239 sb->s_flags |= SB_POSIXACL;
240
241 if (tcon->snapshot_time)
242 sb->s_flags |= SB_RDONLY;
243
244 if (tcon->ses->capabilities & tcon->ses->server->vals->cap_large_files)
245 sb->s_maxbytes = MAX_LFS_FILESIZE;
246 else
247 sb->s_maxbytes = MAX_NON_LFS;
248
249 /*
250 * Some very old servers like DOS and OS/2 used 2 second granularity
251 * (while all current servers use 100ns granularity - see MS-DTYP)
252 * but 1 second is the maximum allowed granularity for the VFS
253 * so for old servers set time granularity to 1 second while for
254 * everything else (current servers) set it to 100ns.
255 */
256 if ((tcon->ses->server->vals->protocol_id == SMB10_PROT_ID) &&
257 ((tcon->ses->capabilities &
258 tcon->ses->server->vals->cap_nt_find) == 0) &&
259 !tcon->unix_ext) {
260 sb->s_time_gran = 1000000000; /* 1 second is max allowed gran */
261 ts = cnvrtDosUnixTm(cpu_to_le16(SMB_DATE_MIN), 0, 0);
262 sb->s_time_min = ts.tv_sec;
263 ts = cnvrtDosUnixTm(cpu_to_le16(SMB_DATE_MAX),
264 cpu_to_le16(SMB_TIME_MAX), 0);
265 sb->s_time_max = ts.tv_sec;
266 } else {
267 /*
268 * Almost every server, including all SMB2+, uses DCE TIME
269 * ie 100 nanosecond units, since 1601. See MS-DTYP and MS-FSCC
270 */
271 sb->s_time_gran = 100;
272 ts = cifs_NTtimeToUnix(0);
273 sb->s_time_min = ts.tv_sec;
274 ts = cifs_NTtimeToUnix(cpu_to_le64(S64_MAX));
275 sb->s_time_max = ts.tv_sec;
276 }
277
278 sb->s_magic = CIFS_SUPER_MAGIC;
279 sb->s_op = &cifs_super_ops;
280 sb->s_xattr = cifs_xattr_handlers;
281 rc = super_setup_bdi(sb);
282 if (rc)
283 goto out_no_root;
284 /* tune readahead according to rsize if readahead size not set on mount */
285 if (cifs_sb->ctx->rsize == 0)
286 cifs_sb->ctx->rsize =
287 tcon->ses->server->ops->negotiate_rsize(tcon, cifs_sb->ctx);
288 if (cifs_sb->ctx->rasize)
289 sb->s_bdi->ra_pages = cifs_sb->ctx->rasize / PAGE_SIZE;
290 else
291 sb->s_bdi->ra_pages = 2 * (cifs_sb->ctx->rsize / PAGE_SIZE);
292
293 sb->s_blocksize = CIFS_MAX_MSGSIZE;
294 sb->s_blocksize_bits = 14; /* default 2**14 = CIFS_MAX_MSGSIZE */
295 inode = cifs_root_iget(sb);
296
297 if (IS_ERR(inode)) {
298 rc = PTR_ERR(inode);
299 goto out_no_root;
300 }
301
302 if (tcon->nocase)
303 set_default_d_op(sb, &cifs_ci_dentry_ops);
304 else
305 set_default_d_op(sb, &cifs_dentry_ops);
306
307 sb->s_root = d_make_root(inode);
308 if (!sb->s_root) {
309 rc = -ENOMEM;
310 goto out_no_root;
311 }
312
313 #ifdef CONFIG_CIFS_NFSD_EXPORT
314 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) {
315 cifs_dbg(FYI, "export ops supported\n");
316 sb->s_export_op = &cifs_export_ops;
317 }
318 #endif /* CONFIG_CIFS_NFSD_EXPORT */
319
320 return 0;
321
322 out_no_root:
323 cifs_dbg(VFS, "%s: get root inode failed\n", __func__);
324 return rc;
325 }
326
cifs_kill_sb(struct super_block * sb)327 static void cifs_kill_sb(struct super_block *sb)
328 {
329 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
330
331 /*
332 * We need to release all dentries for the cached directories
333 * before we kill the sb.
334 */
335 if (cifs_sb->root) {
336 close_all_cached_dirs(cifs_sb);
337
338 /* finally release root dentry */
339 dput(cifs_sb->root);
340 cifs_sb->root = NULL;
341 }
342
343 kill_anon_super(sb);
344 cifs_umount(cifs_sb);
345 }
346
347 static int
cifs_statfs(struct dentry * dentry,struct kstatfs * buf)348 cifs_statfs(struct dentry *dentry, struct kstatfs *buf)
349 {
350 struct super_block *sb = dentry->d_sb;
351 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
352 struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
353 struct TCP_Server_Info *server = tcon->ses->server;
354 unsigned int xid;
355 int rc = 0;
356 const char *full_path;
357 void *page;
358
359 xid = get_xid();
360 page = alloc_dentry_path();
361
362 full_path = build_path_from_dentry(dentry, page);
363 if (IS_ERR(full_path)) {
364 rc = PTR_ERR(full_path);
365 goto statfs_out;
366 }
367
368 if (le32_to_cpu(tcon->fsAttrInfo.MaxPathNameComponentLength) > 0)
369 buf->f_namelen =
370 le32_to_cpu(tcon->fsAttrInfo.MaxPathNameComponentLength);
371 else
372 buf->f_namelen = PATH_MAX;
373
374 buf->f_fsid.val[0] = tcon->vol_serial_number;
375 /* are using part of create time for more randomness, see man statfs */
376 buf->f_fsid.val[1] = (int)le64_to_cpu(tcon->vol_create_time);
377
378 buf->f_files = 0; /* undefined */
379 buf->f_ffree = 0; /* unlimited */
380
381 if (server->ops->queryfs)
382 rc = server->ops->queryfs(xid, tcon, full_path, cifs_sb, buf);
383
384 statfs_out:
385 free_dentry_path(page);
386 free_xid(xid);
387 return rc;
388 }
389
cifs_fallocate(struct file * file,int mode,loff_t off,loff_t len)390 static long cifs_fallocate(struct file *file, int mode, loff_t off, loff_t len)
391 {
392 struct cifs_sb_info *cifs_sb = CIFS_FILE_SB(file);
393 struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
394 struct TCP_Server_Info *server = tcon->ses->server;
395 struct inode *inode = file_inode(file);
396 int rc;
397
398 if (!server->ops->fallocate)
399 return -EOPNOTSUPP;
400
401 rc = inode_lock_killable(inode);
402 if (rc)
403 return rc;
404
405 netfs_wait_for_outstanding_io(inode);
406
407 rc = file_modified(file);
408 if (rc)
409 goto out_unlock;
410
411 rc = server->ops->fallocate(file, tcon, mode, off, len);
412
413 out_unlock:
414 inode_unlock(inode);
415 return rc;
416 }
417
cifs_permission(struct mnt_idmap * idmap,struct inode * inode,int mask)418 static int cifs_permission(struct mnt_idmap *idmap,
419 struct inode *inode, int mask)
420 {
421 struct cifs_sb_info *cifs_sb;
422
423 cifs_sb = CIFS_SB(inode->i_sb);
424
425 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_PERM) {
426 if ((mask & MAY_EXEC) && !execute_ok(inode))
427 return -EACCES;
428 else
429 return 0;
430 } else /* file mode might have been restricted at mount time
431 on the client (above and beyond ACL on servers) for
432 servers which do not support setting and viewing mode bits,
433 so allowing client to check permissions is useful */
434 return generic_permission(&nop_mnt_idmap, inode, mask);
435 }
436
437 static struct kmem_cache *cifs_inode_cachep;
438 static struct kmem_cache *cifs_req_cachep;
439 static struct kmem_cache *cifs_mid_cachep;
440 static struct kmem_cache *cifs_sm_req_cachep;
441 static struct kmem_cache *cifs_io_request_cachep;
442 static struct kmem_cache *cifs_io_subrequest_cachep;
443 mempool_t *cifs_sm_req_poolp;
444 mempool_t *cifs_req_poolp;
445 mempool_t *cifs_mid_poolp;
446 mempool_t cifs_io_request_pool;
447 mempool_t cifs_io_subrequest_pool;
448
449 static struct inode *
cifs_alloc_inode(struct super_block * sb)450 cifs_alloc_inode(struct super_block *sb)
451 {
452 struct cifsInodeInfo *cifs_inode;
453 cifs_inode = alloc_inode_sb(sb, cifs_inode_cachep, GFP_KERNEL);
454 if (!cifs_inode)
455 return NULL;
456 cifs_inode->cifsAttrs = ATTR_ARCHIVE; /* default */
457 cifs_inode->time = 0;
458 /*
459 * Until the file is open and we have gotten oplock info back from the
460 * server, can not assume caching of file data or metadata.
461 */
462 cifs_set_oplock_level(cifs_inode, 0);
463 cifs_inode->lease_granted = false;
464 cifs_inode->flags = 0;
465 spin_lock_init(&cifs_inode->writers_lock);
466 cifs_inode->writers = 0;
467 cifs_inode->netfs.inode.i_blkbits = 14; /* 2**14 = CIFS_MAX_MSGSIZE */
468 cifs_inode->netfs.remote_i_size = 0;
469 cifs_inode->uniqueid = 0;
470 cifs_inode->createtime = 0;
471 cifs_inode->epoch = 0;
472 spin_lock_init(&cifs_inode->open_file_lock);
473 generate_random_uuid(cifs_inode->lease_key);
474 cifs_inode->symlink_target = NULL;
475
476 /*
477 * Can not set i_flags here - they get immediately overwritten to zero
478 * by the VFS.
479 */
480 /* cifs_inode->netfs.inode.i_flags = S_NOATIME | S_NOCMTIME; */
481 INIT_LIST_HEAD(&cifs_inode->openFileList);
482 INIT_LIST_HEAD(&cifs_inode->llist);
483 INIT_LIST_HEAD(&cifs_inode->deferred_closes);
484 spin_lock_init(&cifs_inode->deferred_lock);
485 return &cifs_inode->netfs.inode;
486 }
487
488 static void
cifs_free_inode(struct inode * inode)489 cifs_free_inode(struct inode *inode)
490 {
491 struct cifsInodeInfo *cinode = CIFS_I(inode);
492
493 if (S_ISLNK(inode->i_mode))
494 kfree(cinode->symlink_target);
495 kmem_cache_free(cifs_inode_cachep, cinode);
496 }
497
498 static void
cifs_evict_inode(struct inode * inode)499 cifs_evict_inode(struct inode *inode)
500 {
501 netfs_wait_for_outstanding_io(inode);
502 truncate_inode_pages_final(&inode->i_data);
503 if (inode->i_state & I_PINNING_NETFS_WB)
504 cifs_fscache_unuse_inode_cookie(inode, true);
505 cifs_fscache_release_inode_cookie(inode);
506 clear_inode(inode);
507 }
508
509 static void
cifs_show_address(struct seq_file * s,struct TCP_Server_Info * server)510 cifs_show_address(struct seq_file *s, struct TCP_Server_Info *server)
511 {
512 struct sockaddr_in *sa = (struct sockaddr_in *) &server->dstaddr;
513 struct sockaddr_in6 *sa6 = (struct sockaddr_in6 *) &server->dstaddr;
514
515 seq_puts(s, ",addr=");
516
517 switch (server->dstaddr.ss_family) {
518 case AF_INET:
519 seq_printf(s, "%pI4", &sa->sin_addr.s_addr);
520 break;
521 case AF_INET6:
522 seq_printf(s, "%pI6", &sa6->sin6_addr.s6_addr);
523 if (sa6->sin6_scope_id)
524 seq_printf(s, "%%%u", sa6->sin6_scope_id);
525 break;
526 default:
527 seq_puts(s, "(unknown)");
528 }
529 if (server->rdma)
530 seq_puts(s, ",rdma");
531 }
532
533 static void
cifs_show_security(struct seq_file * s,struct cifs_ses * ses)534 cifs_show_security(struct seq_file *s, struct cifs_ses *ses)
535 {
536 if (ses->sectype == Unspecified) {
537 if (ses->user_name == NULL)
538 seq_puts(s, ",sec=none");
539 return;
540 }
541
542 seq_puts(s, ",sec=");
543
544 switch (ses->sectype) {
545 case NTLMv2:
546 seq_puts(s, "ntlmv2");
547 break;
548 case Kerberos:
549 seq_puts(s, "krb5");
550 break;
551 case RawNTLMSSP:
552 seq_puts(s, "ntlmssp");
553 break;
554 default:
555 /* shouldn't ever happen */
556 seq_puts(s, "unknown");
557 break;
558 }
559
560 if (ses->sign)
561 seq_puts(s, "i");
562
563 if (ses->sectype == Kerberos)
564 seq_printf(s, ",cruid=%u",
565 from_kuid_munged(&init_user_ns, ses->cred_uid));
566 }
567
568 static void
cifs_show_cache_flavor(struct seq_file * s,struct cifs_sb_info * cifs_sb)569 cifs_show_cache_flavor(struct seq_file *s, struct cifs_sb_info *cifs_sb)
570 {
571 seq_puts(s, ",cache=");
572
573 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO)
574 seq_puts(s, "strict");
575 else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO)
576 seq_puts(s, "none");
577 else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RW_CACHE)
578 seq_puts(s, "singleclient"); /* assume only one client access */
579 else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RO_CACHE)
580 seq_puts(s, "ro"); /* read only caching assumed */
581 else
582 seq_puts(s, "loose");
583 }
584
585 /*
586 * cifs_show_devname() is used so we show the mount device name with correct
587 * format (e.g. forward slashes vs. back slashes) in /proc/mounts
588 */
cifs_show_devname(struct seq_file * m,struct dentry * root)589 static int cifs_show_devname(struct seq_file *m, struct dentry *root)
590 {
591 struct cifs_sb_info *cifs_sb = CIFS_SB(root->d_sb);
592 char *devname = kstrdup(cifs_sb->ctx->source, GFP_KERNEL);
593
594 if (devname == NULL)
595 seq_puts(m, "none");
596 else {
597 convert_delimiter(devname, '/');
598 /* escape all spaces in share names */
599 seq_escape(m, devname, " \t");
600 kfree(devname);
601 }
602 return 0;
603 }
604
605 static void
cifs_show_upcall_target(struct seq_file * s,struct cifs_sb_info * cifs_sb)606 cifs_show_upcall_target(struct seq_file *s, struct cifs_sb_info *cifs_sb)
607 {
608 if (cifs_sb->ctx->upcall_target == UPTARGET_UNSPECIFIED) {
609 seq_puts(s, ",upcall_target=app");
610 return;
611 }
612
613 seq_puts(s, ",upcall_target=");
614
615 switch (cifs_sb->ctx->upcall_target) {
616 case UPTARGET_APP:
617 seq_puts(s, "app");
618 break;
619 case UPTARGET_MOUNT:
620 seq_puts(s, "mount");
621 break;
622 default:
623 /* shouldn't ever happen */
624 seq_puts(s, "unknown");
625 break;
626 }
627 }
628
629 /*
630 * cifs_show_options() is for displaying mount options in /proc/mounts.
631 * Not all settable options are displayed but most of the important
632 * ones are.
633 */
634 static int
cifs_show_options(struct seq_file * s,struct dentry * root)635 cifs_show_options(struct seq_file *s, struct dentry *root)
636 {
637 struct cifs_sb_info *cifs_sb = CIFS_SB(root->d_sb);
638 struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
639 struct sockaddr *srcaddr;
640 srcaddr = (struct sockaddr *)&tcon->ses->server->srcaddr;
641
642 seq_show_option(s, "vers", tcon->ses->server->vals->version_string);
643 cifs_show_security(s, tcon->ses);
644 cifs_show_cache_flavor(s, cifs_sb);
645 cifs_show_upcall_target(s, cifs_sb);
646
647 if (tcon->no_lease)
648 seq_puts(s, ",nolease");
649 if (cifs_sb->ctx->multiuser)
650 seq_puts(s, ",multiuser");
651 else if (tcon->ses->user_name)
652 seq_show_option(s, "username", tcon->ses->user_name);
653
654 if (tcon->ses->domainName && tcon->ses->domainName[0] != 0)
655 seq_show_option(s, "domain", tcon->ses->domainName);
656
657 if (srcaddr->sa_family != AF_UNSPEC) {
658 struct sockaddr_in *saddr4;
659 struct sockaddr_in6 *saddr6;
660 saddr4 = (struct sockaddr_in *)srcaddr;
661 saddr6 = (struct sockaddr_in6 *)srcaddr;
662 if (srcaddr->sa_family == AF_INET6)
663 seq_printf(s, ",srcaddr=%pI6c",
664 &saddr6->sin6_addr);
665 else if (srcaddr->sa_family == AF_INET)
666 seq_printf(s, ",srcaddr=%pI4",
667 &saddr4->sin_addr.s_addr);
668 else
669 seq_printf(s, ",srcaddr=BAD-AF:%i",
670 (int)(srcaddr->sa_family));
671 }
672
673 seq_printf(s, ",uid=%u",
674 from_kuid_munged(&init_user_ns, cifs_sb->ctx->linux_uid));
675 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_OVERR_UID)
676 seq_puts(s, ",forceuid");
677 else
678 seq_puts(s, ",noforceuid");
679
680 seq_printf(s, ",gid=%u",
681 from_kgid_munged(&init_user_ns, cifs_sb->ctx->linux_gid));
682 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_OVERR_GID)
683 seq_puts(s, ",forcegid");
684 else
685 seq_puts(s, ",noforcegid");
686
687 cifs_show_address(s, tcon->ses->server);
688
689 if (!tcon->unix_ext)
690 seq_printf(s, ",file_mode=0%ho,dir_mode=0%ho",
691 cifs_sb->ctx->file_mode,
692 cifs_sb->ctx->dir_mode);
693 if (cifs_sb->ctx->iocharset)
694 seq_printf(s, ",iocharset=%s", cifs_sb->ctx->iocharset);
695 if (tcon->ses->unicode == 0)
696 seq_puts(s, ",nounicode");
697 else if (tcon->ses->unicode == 1)
698 seq_puts(s, ",unicode");
699 if (tcon->seal)
700 seq_puts(s, ",seal");
701 else if (tcon->ses->server->ignore_signature)
702 seq_puts(s, ",signloosely");
703 if (tcon->nocase)
704 seq_puts(s, ",nocase");
705 if (tcon->nodelete)
706 seq_puts(s, ",nodelete");
707 if (cifs_sb->ctx->no_sparse)
708 seq_puts(s, ",nosparse");
709 if (tcon->local_lease)
710 seq_puts(s, ",locallease");
711 if (tcon->retry)
712 seq_puts(s, ",hard");
713 else
714 seq_puts(s, ",soft");
715 if (tcon->use_persistent)
716 seq_puts(s, ",persistenthandles");
717 else if (tcon->use_resilient)
718 seq_puts(s, ",resilienthandles");
719 if (tcon->posix_extensions)
720 seq_puts(s, ",posix");
721 else if (tcon->unix_ext)
722 seq_puts(s, ",unix");
723 else
724 seq_puts(s, ",nounix");
725 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_DFS)
726 seq_puts(s, ",nodfs");
727 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIX_PATHS)
728 seq_puts(s, ",posixpaths");
729 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SET_UID)
730 seq_puts(s, ",setuids");
731 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UID_FROM_ACL)
732 seq_puts(s, ",idsfromsid");
733 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM)
734 seq_puts(s, ",serverino");
735 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
736 seq_puts(s, ",rwpidforward");
737 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL)
738 seq_puts(s, ",forcemand");
739 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_XATTR)
740 seq_puts(s, ",nouser_xattr");
741 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR)
742 seq_puts(s, ",mapchars");
743 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SFM_CHR)
744 seq_puts(s, ",mapposix");
745 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UNX_EMUL)
746 seq_puts(s, ",sfu");
747 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_BRL)
748 seq_puts(s, ",nobrl");
749 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_HANDLE_CACHE)
750 seq_puts(s, ",nohandlecache");
751 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MODE_FROM_SID)
752 seq_puts(s, ",modefromsid");
753 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_ACL)
754 seq_puts(s, ",cifsacl");
755 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DYNPERM)
756 seq_puts(s, ",dynperm");
757 if (root->d_sb->s_flags & SB_POSIXACL)
758 seq_puts(s, ",acl");
759 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MF_SYMLINKS)
760 seq_puts(s, ",mfsymlinks");
761 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_FSCACHE)
762 seq_puts(s, ",fsc");
763 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC)
764 seq_puts(s, ",nostrictsync");
765 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_PERM)
766 seq_puts(s, ",noperm");
767 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_BACKUPUID)
768 seq_printf(s, ",backupuid=%u",
769 from_kuid_munged(&init_user_ns,
770 cifs_sb->ctx->backupuid));
771 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_BACKUPGID)
772 seq_printf(s, ",backupgid=%u",
773 from_kgid_munged(&init_user_ns,
774 cifs_sb->ctx->backupgid));
775 seq_show_option(s, "reparse",
776 cifs_reparse_type_str(cifs_sb->ctx->reparse_type));
777 if (cifs_sb->ctx->nonativesocket)
778 seq_puts(s, ",nonativesocket");
779 else
780 seq_puts(s, ",nativesocket");
781 seq_show_option(s, "symlink",
782 cifs_symlink_type_str(cifs_symlink_type(cifs_sb)));
783
784 seq_printf(s, ",rsize=%u", cifs_sb->ctx->rsize);
785 seq_printf(s, ",wsize=%u", cifs_sb->ctx->wsize);
786 seq_printf(s, ",bsize=%u", cifs_sb->ctx->bsize);
787 if (cifs_sb->ctx->rasize)
788 seq_printf(s, ",rasize=%u", cifs_sb->ctx->rasize);
789 if (tcon->ses->server->min_offload)
790 seq_printf(s, ",esize=%u", tcon->ses->server->min_offload);
791 if (tcon->ses->server->retrans)
792 seq_printf(s, ",retrans=%u", tcon->ses->server->retrans);
793 seq_printf(s, ",echo_interval=%lu",
794 tcon->ses->server->echo_interval / HZ);
795
796 /* Only display the following if overridden on mount */
797 if (tcon->ses->server->max_credits != SMB2_MAX_CREDITS_AVAILABLE)
798 seq_printf(s, ",max_credits=%u", tcon->ses->server->max_credits);
799 if (tcon->ses->server->tcp_nodelay)
800 seq_puts(s, ",tcpnodelay");
801 if (tcon->ses->server->noautotune)
802 seq_puts(s, ",noautotune");
803 if (tcon->ses->server->noblocksnd)
804 seq_puts(s, ",noblocksend");
805 if (tcon->ses->server->nosharesock)
806 seq_puts(s, ",nosharesock");
807
808 if (tcon->snapshot_time)
809 seq_printf(s, ",snapshot=%llu", tcon->snapshot_time);
810 if (tcon->handle_timeout)
811 seq_printf(s, ",handletimeout=%u", tcon->handle_timeout);
812 if (tcon->max_cached_dirs != MAX_CACHED_FIDS)
813 seq_printf(s, ",max_cached_dirs=%u", tcon->max_cached_dirs);
814
815 /*
816 * Display file and directory attribute timeout in seconds.
817 * If file and directory attribute timeout the same then actimeo
818 * was likely specified on mount
819 */
820 if (cifs_sb->ctx->acdirmax == cifs_sb->ctx->acregmax)
821 seq_printf(s, ",actimeo=%lu", cifs_sb->ctx->acregmax / HZ);
822 else {
823 seq_printf(s, ",acdirmax=%lu", cifs_sb->ctx->acdirmax / HZ);
824 seq_printf(s, ",acregmax=%lu", cifs_sb->ctx->acregmax / HZ);
825 }
826 seq_printf(s, ",closetimeo=%lu", cifs_sb->ctx->closetimeo / HZ);
827
828 if (tcon->ses->chan_max > 1)
829 seq_printf(s, ",multichannel,max_channels=%zu",
830 tcon->ses->chan_max);
831
832 if (tcon->use_witness)
833 seq_puts(s, ",witness");
834
835 return 0;
836 }
837
cifs_umount_begin(struct super_block * sb)838 static void cifs_umount_begin(struct super_block *sb)
839 {
840 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
841 struct cifs_tcon *tcon;
842
843 if (cifs_sb == NULL)
844 return;
845
846 tcon = cifs_sb_master_tcon(cifs_sb);
847
848 spin_lock(&cifs_tcp_ses_lock);
849 spin_lock(&tcon->tc_lock);
850 trace_smb3_tcon_ref(tcon->debug_id, tcon->tc_count,
851 netfs_trace_tcon_ref_see_umount);
852 if ((tcon->tc_count > 1) || (tcon->status == TID_EXITING)) {
853 /* we have other mounts to same share or we have
854 already tried to umount this and woken up
855 all waiting network requests, nothing to do */
856 spin_unlock(&tcon->tc_lock);
857 spin_unlock(&cifs_tcp_ses_lock);
858 return;
859 }
860 /*
861 * can not set tcon->status to TID_EXITING yet since we don't know if umount -f will
862 * fail later (e.g. due to open files). TID_EXITING will be set just before tdis req sent
863 */
864 spin_unlock(&tcon->tc_lock);
865 spin_unlock(&cifs_tcp_ses_lock);
866
867 cifs_close_all_deferred_files(tcon);
868 /* cancel_brl_requests(tcon); */ /* BB mark all brl mids as exiting */
869 /* cancel_notify_requests(tcon); */
870 if (tcon->ses && tcon->ses->server) {
871 cifs_dbg(FYI, "wake up tasks now - umount begin not complete\n");
872 wake_up_all(&tcon->ses->server->request_q);
873 wake_up_all(&tcon->ses->server->response_q);
874 msleep(1); /* yield */
875 /* we have to kick the requests once more */
876 wake_up_all(&tcon->ses->server->response_q);
877 msleep(1);
878 }
879
880 return;
881 }
882
cifs_freeze(struct super_block * sb)883 static int cifs_freeze(struct super_block *sb)
884 {
885 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
886 struct cifs_tcon *tcon;
887
888 if (cifs_sb == NULL)
889 return 0;
890
891 tcon = cifs_sb_master_tcon(cifs_sb);
892
893 cifs_close_all_deferred_files(tcon);
894 return 0;
895 }
896
897 #ifdef CONFIG_CIFS_STATS2
cifs_show_stats(struct seq_file * s,struct dentry * root)898 static int cifs_show_stats(struct seq_file *s, struct dentry *root)
899 {
900 /* BB FIXME */
901 return 0;
902 }
903 #endif
904
cifs_write_inode(struct inode * inode,struct writeback_control * wbc)905 static int cifs_write_inode(struct inode *inode, struct writeback_control *wbc)
906 {
907 return netfs_unpin_writeback(inode, wbc);
908 }
909
cifs_drop_inode(struct inode * inode)910 static int cifs_drop_inode(struct inode *inode)
911 {
912 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
913
914 /* no serverino => unconditional eviction */
915 return !(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) ||
916 inode_generic_drop(inode);
917 }
918
919 static const struct super_operations cifs_super_ops = {
920 .statfs = cifs_statfs,
921 .alloc_inode = cifs_alloc_inode,
922 .write_inode = cifs_write_inode,
923 .free_inode = cifs_free_inode,
924 .drop_inode = cifs_drop_inode,
925 .evict_inode = cifs_evict_inode,
926 /* .show_path = cifs_show_path, */ /* Would we ever need show path? */
927 .show_devname = cifs_show_devname,
928 /* .delete_inode = cifs_delete_inode, */ /* Do not need above
929 function unless later we add lazy close of inodes or unless the
930 kernel forgets to call us with the same number of releases (closes)
931 as opens */
932 .show_options = cifs_show_options,
933 .umount_begin = cifs_umount_begin,
934 .freeze_fs = cifs_freeze,
935 #ifdef CONFIG_CIFS_STATS2
936 .show_stats = cifs_show_stats,
937 #endif
938 };
939
940 /*
941 * Get root dentry from superblock according to prefix path mount option.
942 * Return dentry with refcount + 1 on success and NULL otherwise.
943 */
944 static struct dentry *
cifs_get_root(struct smb3_fs_context * ctx,struct super_block * sb)945 cifs_get_root(struct smb3_fs_context *ctx, struct super_block *sb)
946 {
947 struct dentry *dentry;
948 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
949 char *full_path = NULL;
950 char *s, *p;
951 char sep;
952
953 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_USE_PREFIX_PATH)
954 return dget(sb->s_root);
955
956 full_path = cifs_build_path_to_root(ctx, cifs_sb,
957 cifs_sb_master_tcon(cifs_sb), 0);
958 if (full_path == NULL)
959 return ERR_PTR(-ENOMEM);
960
961 cifs_dbg(FYI, "Get root dentry for %s\n", full_path);
962
963 sep = CIFS_DIR_SEP(cifs_sb);
964 dentry = dget(sb->s_root);
965 s = full_path;
966
967 do {
968 struct inode *dir = d_inode(dentry);
969 struct dentry *child;
970
971 if (!S_ISDIR(dir->i_mode)) {
972 dput(dentry);
973 dentry = ERR_PTR(-ENOTDIR);
974 break;
975 }
976
977 /* skip separators */
978 while (*s == sep)
979 s++;
980 if (!*s)
981 break;
982 p = s++;
983 /* next separator */
984 while (*s && *s != sep)
985 s++;
986
987 child = lookup_noperm_positive_unlocked(&QSTR_LEN(p, s - p),
988 dentry);
989 dput(dentry);
990 dentry = child;
991 } while (!IS_ERR(dentry));
992 kfree(full_path);
993 return dentry;
994 }
995
cifs_set_super(struct super_block * sb,void * data)996 static int cifs_set_super(struct super_block *sb, void *data)
997 {
998 struct cifs_mnt_data *mnt_data = data;
999 sb->s_fs_info = mnt_data->cifs_sb;
1000 return set_anon_super(sb, NULL);
1001 }
1002
1003 struct dentry *
cifs_smb3_do_mount(struct file_system_type * fs_type,int flags,struct smb3_fs_context * old_ctx)1004 cifs_smb3_do_mount(struct file_system_type *fs_type,
1005 int flags, struct smb3_fs_context *old_ctx)
1006 {
1007 struct cifs_mnt_data mnt_data;
1008 struct cifs_sb_info *cifs_sb;
1009 struct super_block *sb;
1010 struct dentry *root;
1011 int rc;
1012
1013 if (cifsFYI) {
1014 cifs_dbg(FYI, "%s: devname=%s flags=0x%x\n", __func__,
1015 old_ctx->source, flags);
1016 } else {
1017 cifs_info("Attempting to mount %s\n", old_ctx->source);
1018 }
1019
1020 cifs_sb = kzalloc(sizeof(*cifs_sb), GFP_KERNEL);
1021 if (!cifs_sb)
1022 return ERR_PTR(-ENOMEM);
1023
1024 cifs_sb->ctx = kzalloc(sizeof(struct smb3_fs_context), GFP_KERNEL);
1025 if (!cifs_sb->ctx) {
1026 root = ERR_PTR(-ENOMEM);
1027 goto out;
1028 }
1029 rc = smb3_fs_context_dup(cifs_sb->ctx, old_ctx);
1030 if (rc) {
1031 root = ERR_PTR(rc);
1032 goto out;
1033 }
1034
1035 rc = cifs_setup_cifs_sb(cifs_sb);
1036 if (rc) {
1037 root = ERR_PTR(rc);
1038 goto out;
1039 }
1040
1041 rc = cifs_mount(cifs_sb, cifs_sb->ctx);
1042 if (rc) {
1043 if (!(flags & SB_SILENT))
1044 cifs_dbg(VFS, "cifs_mount failed w/return code = %d\n",
1045 rc);
1046 root = ERR_PTR(rc);
1047 goto out;
1048 }
1049
1050 mnt_data.ctx = cifs_sb->ctx;
1051 mnt_data.cifs_sb = cifs_sb;
1052 mnt_data.flags = flags;
1053
1054 /* BB should we make this contingent on mount parm? */
1055 flags |= SB_NODIRATIME | SB_NOATIME;
1056
1057 sb = sget(fs_type, cifs_match_super, cifs_set_super, flags, &mnt_data);
1058 if (IS_ERR(sb)) {
1059 cifs_umount(cifs_sb);
1060 return ERR_CAST(sb);
1061 }
1062
1063 if (sb->s_root) {
1064 cifs_dbg(FYI, "Use existing superblock\n");
1065 cifs_umount(cifs_sb);
1066 cifs_sb = NULL;
1067 } else {
1068 rc = cifs_read_super(sb);
1069 if (rc) {
1070 root = ERR_PTR(rc);
1071 goto out_super;
1072 }
1073
1074 sb->s_flags |= SB_ACTIVE;
1075 }
1076
1077 root = cifs_get_root(cifs_sb ? cifs_sb->ctx : old_ctx, sb);
1078 if (IS_ERR(root))
1079 goto out_super;
1080
1081 if (cifs_sb)
1082 cifs_sb->root = dget(root);
1083
1084 cifs_dbg(FYI, "dentry root is: %p\n", root);
1085 return root;
1086
1087 out_super:
1088 deactivate_locked_super(sb);
1089 return root;
1090 out:
1091 kfree(cifs_sb->prepath);
1092 smb3_cleanup_fs_context(cifs_sb->ctx);
1093 kfree(cifs_sb);
1094 return root;
1095 }
1096
cifs_llseek(struct file * file,loff_t offset,int whence)1097 static loff_t cifs_llseek(struct file *file, loff_t offset, int whence)
1098 {
1099 struct cifsFileInfo *cfile = file->private_data;
1100 struct cifs_tcon *tcon;
1101
1102 /*
1103 * whence == SEEK_END || SEEK_DATA || SEEK_HOLE => we must revalidate
1104 * the cached file length
1105 */
1106 if (whence != SEEK_SET && whence != SEEK_CUR) {
1107 int rc;
1108 struct inode *inode = file_inode(file);
1109
1110 /*
1111 * We need to be sure that all dirty pages are written and the
1112 * server has the newest file length.
1113 */
1114 if (!CIFS_CACHE_READ(CIFS_I(inode)) && inode->i_mapping &&
1115 inode->i_mapping->nrpages != 0) {
1116 rc = filemap_fdatawait(inode->i_mapping);
1117 if (rc) {
1118 mapping_set_error(inode->i_mapping, rc);
1119 return rc;
1120 }
1121 }
1122 /*
1123 * Some applications poll for the file length in this strange
1124 * way so we must seek to end on non-oplocked files by
1125 * setting the revalidate time to zero.
1126 */
1127 CIFS_I(inode)->time = 0;
1128
1129 rc = cifs_revalidate_file_attr(file);
1130 if (rc < 0)
1131 return (loff_t)rc;
1132 }
1133 if (cfile && cfile->tlink) {
1134 tcon = tlink_tcon(cfile->tlink);
1135 if (tcon->ses->server->ops->llseek)
1136 return tcon->ses->server->ops->llseek(file, tcon,
1137 offset, whence);
1138 }
1139 return generic_file_llseek(file, offset, whence);
1140 }
1141
1142 static int
cifs_setlease(struct file * file,int arg,struct file_lease ** lease,void ** priv)1143 cifs_setlease(struct file *file, int arg, struct file_lease **lease, void **priv)
1144 {
1145 /*
1146 * Note that this is called by vfs setlease with i_lock held to
1147 * protect *lease from going away.
1148 */
1149 struct inode *inode = file_inode(file);
1150 struct cifsFileInfo *cfile = file->private_data;
1151
1152 /* Check if file is oplocked if this is request for new lease */
1153 if (arg == F_UNLCK ||
1154 ((arg == F_RDLCK) && CIFS_CACHE_READ(CIFS_I(inode))) ||
1155 ((arg == F_WRLCK) && CIFS_CACHE_WRITE(CIFS_I(inode))))
1156 return generic_setlease(file, arg, lease, priv);
1157 else if (tlink_tcon(cfile->tlink)->local_lease &&
1158 !CIFS_CACHE_READ(CIFS_I(inode)))
1159 /*
1160 * If the server claims to support oplock on this file, then we
1161 * still need to check oplock even if the local_lease mount
1162 * option is set, but there are servers which do not support
1163 * oplock for which this mount option may be useful if the user
1164 * knows that the file won't be changed on the server by anyone
1165 * else.
1166 */
1167 return generic_setlease(file, arg, lease, priv);
1168 else
1169 return -EAGAIN;
1170 }
1171
1172 struct file_system_type cifs_fs_type = {
1173 .owner = THIS_MODULE,
1174 .name = "cifs",
1175 .init_fs_context = smb3_init_fs_context,
1176 .parameters = smb3_fs_parameters,
1177 .kill_sb = cifs_kill_sb,
1178 .fs_flags = FS_RENAME_DOES_D_MOVE,
1179 };
1180 MODULE_ALIAS_FS("cifs");
1181
1182 struct file_system_type smb3_fs_type = {
1183 .owner = THIS_MODULE,
1184 .name = "smb3",
1185 .init_fs_context = smb3_init_fs_context,
1186 .parameters = smb3_fs_parameters,
1187 .kill_sb = cifs_kill_sb,
1188 .fs_flags = FS_RENAME_DOES_D_MOVE,
1189 };
1190 MODULE_ALIAS_FS("smb3");
1191 MODULE_ALIAS("smb3");
1192
1193 const struct inode_operations cifs_dir_inode_ops = {
1194 .create = cifs_create,
1195 .atomic_open = cifs_atomic_open,
1196 .lookup = cifs_lookup,
1197 .getattr = cifs_getattr,
1198 .unlink = cifs_unlink,
1199 .link = cifs_hardlink,
1200 .mkdir = cifs_mkdir,
1201 .rmdir = cifs_rmdir,
1202 .rename = cifs_rename2,
1203 .permission = cifs_permission,
1204 .setattr = cifs_setattr,
1205 .symlink = cifs_symlink,
1206 .mknod = cifs_mknod,
1207 .listxattr = cifs_listxattr,
1208 .get_acl = cifs_get_acl,
1209 .set_acl = cifs_set_acl,
1210 };
1211
1212 const struct inode_operations cifs_file_inode_ops = {
1213 .setattr = cifs_setattr,
1214 .getattr = cifs_getattr,
1215 .permission = cifs_permission,
1216 .listxattr = cifs_listxattr,
1217 .fiemap = cifs_fiemap,
1218 .get_acl = cifs_get_acl,
1219 .set_acl = cifs_set_acl,
1220 };
1221
cifs_get_link(struct dentry * dentry,struct inode * inode,struct delayed_call * done)1222 const char *cifs_get_link(struct dentry *dentry, struct inode *inode,
1223 struct delayed_call *done)
1224 {
1225 char *target_path;
1226
1227 if (!dentry)
1228 return ERR_PTR(-ECHILD);
1229
1230 target_path = kmalloc(PATH_MAX, GFP_KERNEL);
1231 if (!target_path)
1232 return ERR_PTR(-ENOMEM);
1233
1234 spin_lock(&inode->i_lock);
1235 if (likely(CIFS_I(inode)->symlink_target)) {
1236 strscpy(target_path, CIFS_I(inode)->symlink_target, PATH_MAX);
1237 } else {
1238 kfree(target_path);
1239 target_path = ERR_PTR(-EOPNOTSUPP);
1240 }
1241 spin_unlock(&inode->i_lock);
1242
1243 if (!IS_ERR(target_path))
1244 set_delayed_call(done, kfree_link, target_path);
1245
1246 return target_path;
1247 }
1248
1249 const struct inode_operations cifs_symlink_inode_ops = {
1250 .get_link = cifs_get_link,
1251 .setattr = cifs_setattr,
1252 .permission = cifs_permission,
1253 .listxattr = cifs_listxattr,
1254 };
1255
1256 /*
1257 * Advance the EOF marker to after the source range.
1258 */
cifs_precopy_set_eof(struct inode * src_inode,struct cifsInodeInfo * src_cifsi,struct cifs_tcon * src_tcon,unsigned int xid,loff_t src_end)1259 static int cifs_precopy_set_eof(struct inode *src_inode, struct cifsInodeInfo *src_cifsi,
1260 struct cifs_tcon *src_tcon,
1261 unsigned int xid, loff_t src_end)
1262 {
1263 struct cifsFileInfo *writeable_srcfile;
1264 int rc = -EINVAL;
1265
1266 writeable_srcfile = find_writable_file(src_cifsi, FIND_WR_FSUID_ONLY);
1267 if (writeable_srcfile) {
1268 if (src_tcon->ses->server->ops->set_file_size)
1269 rc = src_tcon->ses->server->ops->set_file_size(
1270 xid, src_tcon, writeable_srcfile,
1271 src_inode->i_size, true /* no need to set sparse */);
1272 else
1273 rc = -ENOSYS;
1274 cifsFileInfo_put(writeable_srcfile);
1275 cifs_dbg(FYI, "SetFSize for copychunk rc = %d\n", rc);
1276 }
1277
1278 if (rc < 0)
1279 goto set_failed;
1280
1281 netfs_resize_file(&src_cifsi->netfs, src_end, true);
1282 fscache_resize_cookie(cifs_inode_cookie(src_inode), src_end);
1283 return 0;
1284
1285 set_failed:
1286 return filemap_write_and_wait(src_inode->i_mapping);
1287 }
1288
1289 /*
1290 * Flush out either the folio that overlaps the beginning of a range in which
1291 * pos resides or the folio that overlaps the end of a range unless that folio
1292 * is entirely within the range we're going to invalidate. We extend the flush
1293 * bounds to encompass the folio.
1294 */
cifs_flush_folio(struct inode * inode,loff_t pos,loff_t * _fstart,loff_t * _fend,bool first)1295 static int cifs_flush_folio(struct inode *inode, loff_t pos, loff_t *_fstart, loff_t *_fend,
1296 bool first)
1297 {
1298 struct folio *folio;
1299 unsigned long long fpos, fend;
1300 pgoff_t index = pos / PAGE_SIZE;
1301 size_t size;
1302 int rc = 0;
1303
1304 folio = filemap_get_folio(inode->i_mapping, index);
1305 if (IS_ERR(folio))
1306 return 0;
1307
1308 size = folio_size(folio);
1309 fpos = folio_pos(folio);
1310 fend = fpos + size - 1;
1311 *_fstart = min_t(unsigned long long, *_fstart, fpos);
1312 *_fend = max_t(unsigned long long, *_fend, fend);
1313 if ((first && pos == fpos) || (!first && pos == fend))
1314 goto out;
1315
1316 rc = filemap_write_and_wait_range(inode->i_mapping, fpos, fend);
1317 out:
1318 folio_put(folio);
1319 return rc;
1320 }
1321
cifs_remap_file_range(struct file * src_file,loff_t off,struct file * dst_file,loff_t destoff,loff_t len,unsigned int remap_flags)1322 static loff_t cifs_remap_file_range(struct file *src_file, loff_t off,
1323 struct file *dst_file, loff_t destoff, loff_t len,
1324 unsigned int remap_flags)
1325 {
1326 struct inode *src_inode = file_inode(src_file);
1327 struct inode *target_inode = file_inode(dst_file);
1328 struct cifsInodeInfo *src_cifsi = CIFS_I(src_inode);
1329 struct cifsInodeInfo *target_cifsi = CIFS_I(target_inode);
1330 struct cifsFileInfo *smb_file_src = src_file->private_data;
1331 struct cifsFileInfo *smb_file_target = dst_file->private_data;
1332 struct cifs_tcon *target_tcon, *src_tcon;
1333 unsigned long long destend, fstart, fend, old_size, new_size;
1334 unsigned int xid;
1335 int rc;
1336
1337 if (remap_flags & REMAP_FILE_DEDUP)
1338 return -EOPNOTSUPP;
1339 if (remap_flags & ~REMAP_FILE_ADVISORY)
1340 return -EINVAL;
1341
1342 cifs_dbg(FYI, "clone range\n");
1343
1344 xid = get_xid();
1345
1346 if (!smb_file_src || !smb_file_target) {
1347 rc = -EBADF;
1348 cifs_dbg(VFS, "missing cifsFileInfo on copy range src file\n");
1349 goto out;
1350 }
1351
1352 src_tcon = tlink_tcon(smb_file_src->tlink);
1353 target_tcon = tlink_tcon(smb_file_target->tlink);
1354
1355 /*
1356 * Note: cifs case is easier than btrfs since server responsible for
1357 * checks for proper open modes and file type and if it wants
1358 * server could even support copy of range where source = target
1359 */
1360 lock_two_nondirectories(target_inode, src_inode);
1361
1362 if (len == 0)
1363 len = src_inode->i_size - off;
1364
1365 cifs_dbg(FYI, "clone range\n");
1366
1367 /* Flush the source buffer */
1368 rc = filemap_write_and_wait_range(src_inode->i_mapping, off,
1369 off + len - 1);
1370 if (rc)
1371 goto unlock;
1372
1373 /* The server-side copy will fail if the source crosses the EOF marker.
1374 * Advance the EOF marker after the flush above to the end of the range
1375 * if it's short of that.
1376 */
1377 if (src_cifsi->netfs.remote_i_size < off + len) {
1378 rc = cifs_precopy_set_eof(src_inode, src_cifsi, src_tcon, xid, off + len);
1379 if (rc < 0)
1380 goto unlock;
1381 }
1382
1383 new_size = destoff + len;
1384 destend = destoff + len - 1;
1385
1386 /* Flush the folios at either end of the destination range to prevent
1387 * accidental loss of dirty data outside of the range.
1388 */
1389 fstart = destoff;
1390 fend = destend;
1391
1392 rc = cifs_flush_folio(target_inode, destoff, &fstart, &fend, true);
1393 if (rc)
1394 goto unlock;
1395 rc = cifs_flush_folio(target_inode, destend, &fstart, &fend, false);
1396 if (rc)
1397 goto unlock;
1398 if (fend > target_cifsi->netfs.zero_point)
1399 target_cifsi->netfs.zero_point = fend + 1;
1400 old_size = target_cifsi->netfs.remote_i_size;
1401
1402 /* Discard all the folios that overlap the destination region. */
1403 cifs_dbg(FYI, "about to discard pages %llx-%llx\n", fstart, fend);
1404 truncate_inode_pages_range(&target_inode->i_data, fstart, fend);
1405
1406 fscache_invalidate(cifs_inode_cookie(target_inode), NULL,
1407 i_size_read(target_inode), 0);
1408
1409 rc = -EOPNOTSUPP;
1410 if (target_tcon->ses->server->ops->duplicate_extents) {
1411 rc = target_tcon->ses->server->ops->duplicate_extents(xid,
1412 smb_file_src, smb_file_target, off, len, destoff);
1413 if (rc == 0 && new_size > old_size) {
1414 truncate_setsize(target_inode, new_size);
1415 fscache_resize_cookie(cifs_inode_cookie(target_inode),
1416 new_size);
1417 } else if (rc == -EOPNOTSUPP) {
1418 /*
1419 * copy_file_range syscall man page indicates EINVAL
1420 * is returned e.g when "fd_in and fd_out refer to the
1421 * same file and the source and target ranges overlap."
1422 * Test generic/157 was what showed these cases where
1423 * we need to remap EOPNOTSUPP to EINVAL
1424 */
1425 if (off >= src_inode->i_size) {
1426 rc = -EINVAL;
1427 } else if (src_inode == target_inode) {
1428 if (off + len > destoff)
1429 rc = -EINVAL;
1430 }
1431 }
1432 if (rc == 0 && new_size > target_cifsi->netfs.zero_point)
1433 target_cifsi->netfs.zero_point = new_size;
1434 }
1435
1436 /* force revalidate of size and timestamps of target file now
1437 that target is updated on the server */
1438 CIFS_I(target_inode)->time = 0;
1439 unlock:
1440 /* although unlocking in the reverse order from locking is not
1441 strictly necessary here it is a little cleaner to be consistent */
1442 unlock_two_nondirectories(src_inode, target_inode);
1443 out:
1444 free_xid(xid);
1445 return rc < 0 ? rc : len;
1446 }
1447
cifs_file_copychunk_range(unsigned int xid,struct file * src_file,loff_t off,struct file * dst_file,loff_t destoff,size_t len,unsigned int flags)1448 ssize_t cifs_file_copychunk_range(unsigned int xid,
1449 struct file *src_file, loff_t off,
1450 struct file *dst_file, loff_t destoff,
1451 size_t len, unsigned int flags)
1452 {
1453 struct inode *src_inode = file_inode(src_file);
1454 struct inode *target_inode = file_inode(dst_file);
1455 struct cifsInodeInfo *src_cifsi = CIFS_I(src_inode);
1456 struct cifsInodeInfo *target_cifsi = CIFS_I(target_inode);
1457 struct cifsFileInfo *smb_file_src;
1458 struct cifsFileInfo *smb_file_target;
1459 struct cifs_tcon *src_tcon;
1460 struct cifs_tcon *target_tcon;
1461 ssize_t rc;
1462
1463 cifs_dbg(FYI, "copychunk range\n");
1464
1465 if (!src_file->private_data || !dst_file->private_data) {
1466 rc = -EBADF;
1467 cifs_dbg(VFS, "missing cifsFileInfo on copy range src file\n");
1468 goto out;
1469 }
1470
1471 rc = -EXDEV;
1472 smb_file_target = dst_file->private_data;
1473 smb_file_src = src_file->private_data;
1474 src_tcon = tlink_tcon(smb_file_src->tlink);
1475 target_tcon = tlink_tcon(smb_file_target->tlink);
1476
1477 if (src_tcon->ses != target_tcon->ses) {
1478 cifs_dbg(FYI, "source and target of copy not on same server\n");
1479 goto out;
1480 }
1481
1482 rc = -EOPNOTSUPP;
1483 if (!target_tcon->ses->server->ops->copychunk_range)
1484 goto out;
1485
1486 /*
1487 * Note: cifs case is easier than btrfs since server responsible for
1488 * checks for proper open modes and file type and if it wants
1489 * server could even support copy of range where source = target
1490 */
1491 lock_two_nondirectories(target_inode, src_inode);
1492
1493 cifs_dbg(FYI, "about to flush pages\n");
1494
1495 rc = filemap_write_and_wait_range(src_inode->i_mapping, off,
1496 off + len - 1);
1497 if (rc)
1498 goto unlock;
1499
1500 /* The server-side copy will fail if the source crosses the EOF marker.
1501 * Advance the EOF marker after the flush above to the end of the range
1502 * if it's short of that.
1503 */
1504 if (src_cifsi->netfs.remote_i_size < off + len) {
1505 rc = cifs_precopy_set_eof(src_inode, src_cifsi, src_tcon, xid, off + len);
1506 if (rc < 0)
1507 goto unlock;
1508 }
1509
1510 /* Flush and invalidate all the folios in the destination region. If
1511 * the copy was successful, then some of the flush is extra overhead,
1512 * but we need to allow for the copy failing in some way (eg. ENOSPC).
1513 */
1514 rc = filemap_invalidate_inode(target_inode, true, destoff, destoff + len - 1);
1515 if (rc)
1516 goto unlock;
1517
1518 fscache_invalidate(cifs_inode_cookie(target_inode), NULL,
1519 i_size_read(target_inode), 0);
1520
1521 rc = file_modified(dst_file);
1522 if (!rc) {
1523 rc = target_tcon->ses->server->ops->copychunk_range(xid,
1524 smb_file_src, smb_file_target, off, len, destoff);
1525 if (rc > 0 && destoff + rc > i_size_read(target_inode)) {
1526 truncate_setsize(target_inode, destoff + rc);
1527 netfs_resize_file(&target_cifsi->netfs,
1528 i_size_read(target_inode), true);
1529 fscache_resize_cookie(cifs_inode_cookie(target_inode),
1530 i_size_read(target_inode));
1531 }
1532 if (rc > 0 && destoff + rc > target_cifsi->netfs.zero_point)
1533 target_cifsi->netfs.zero_point = destoff + rc;
1534 }
1535
1536 file_accessed(src_file);
1537
1538 /* force revalidate of size and timestamps of target file now
1539 * that target is updated on the server
1540 */
1541 CIFS_I(target_inode)->time = 0;
1542
1543 unlock:
1544 /* although unlocking in the reverse order from locking is not
1545 * strictly necessary here it is a little cleaner to be consistent
1546 */
1547 unlock_two_nondirectories(src_inode, target_inode);
1548
1549 out:
1550 return rc;
1551 }
1552
1553 /*
1554 * Directory operations under CIFS/SMB2/SMB3 are synchronous, so fsync()
1555 * is a dummy operation.
1556 */
cifs_dir_fsync(struct file * file,loff_t start,loff_t end,int datasync)1557 static int cifs_dir_fsync(struct file *file, loff_t start, loff_t end, int datasync)
1558 {
1559 cifs_dbg(FYI, "Sync directory - name: %pD datasync: 0x%x\n",
1560 file, datasync);
1561
1562 return 0;
1563 }
1564
cifs_copy_file_range(struct file * src_file,loff_t off,struct file * dst_file,loff_t destoff,size_t len,unsigned int flags)1565 static ssize_t cifs_copy_file_range(struct file *src_file, loff_t off,
1566 struct file *dst_file, loff_t destoff,
1567 size_t len, unsigned int flags)
1568 {
1569 unsigned int xid = get_xid();
1570 ssize_t rc;
1571 struct cifsFileInfo *cfile = dst_file->private_data;
1572
1573 if (cfile->swapfile) {
1574 rc = -EOPNOTSUPP;
1575 free_xid(xid);
1576 return rc;
1577 }
1578
1579 rc = cifs_file_copychunk_range(xid, src_file, off, dst_file, destoff,
1580 len, flags);
1581 free_xid(xid);
1582
1583 if (rc == -EOPNOTSUPP || rc == -EXDEV)
1584 rc = splice_copy_file_range(src_file, off, dst_file,
1585 destoff, len);
1586 return rc;
1587 }
1588
1589 const struct file_operations cifs_file_ops = {
1590 .read_iter = cifs_loose_read_iter,
1591 .write_iter = cifs_file_write_iter,
1592 .open = cifs_open,
1593 .release = cifs_close,
1594 .lock = cifs_lock,
1595 .flock = cifs_flock,
1596 .fsync = cifs_fsync,
1597 .flush = cifs_flush,
1598 .mmap_prepare = cifs_file_mmap_prepare,
1599 .splice_read = filemap_splice_read,
1600 .splice_write = iter_file_splice_write,
1601 .llseek = cifs_llseek,
1602 .unlocked_ioctl = cifs_ioctl,
1603 .copy_file_range = cifs_copy_file_range,
1604 .remap_file_range = cifs_remap_file_range,
1605 .setlease = cifs_setlease,
1606 .fallocate = cifs_fallocate,
1607 };
1608
1609 const struct file_operations cifs_file_strict_ops = {
1610 .read_iter = cifs_strict_readv,
1611 .write_iter = cifs_strict_writev,
1612 .open = cifs_open,
1613 .release = cifs_close,
1614 .lock = cifs_lock,
1615 .flock = cifs_flock,
1616 .fsync = cifs_strict_fsync,
1617 .flush = cifs_flush,
1618 .mmap_prepare = cifs_file_strict_mmap_prepare,
1619 .splice_read = filemap_splice_read,
1620 .splice_write = iter_file_splice_write,
1621 .llseek = cifs_llseek,
1622 .unlocked_ioctl = cifs_ioctl,
1623 .copy_file_range = cifs_copy_file_range,
1624 .remap_file_range = cifs_remap_file_range,
1625 .setlease = cifs_setlease,
1626 .fallocate = cifs_fallocate,
1627 };
1628
1629 const struct file_operations cifs_file_direct_ops = {
1630 .read_iter = netfs_unbuffered_read_iter,
1631 .write_iter = netfs_file_write_iter,
1632 .open = cifs_open,
1633 .release = cifs_close,
1634 .lock = cifs_lock,
1635 .flock = cifs_flock,
1636 .fsync = cifs_fsync,
1637 .flush = cifs_flush,
1638 .mmap_prepare = cifs_file_mmap_prepare,
1639 .splice_read = copy_splice_read,
1640 .splice_write = iter_file_splice_write,
1641 .unlocked_ioctl = cifs_ioctl,
1642 .copy_file_range = cifs_copy_file_range,
1643 .remap_file_range = cifs_remap_file_range,
1644 .llseek = cifs_llseek,
1645 .setlease = cifs_setlease,
1646 .fallocate = cifs_fallocate,
1647 };
1648
1649 const struct file_operations cifs_file_nobrl_ops = {
1650 .read_iter = cifs_loose_read_iter,
1651 .write_iter = cifs_file_write_iter,
1652 .open = cifs_open,
1653 .release = cifs_close,
1654 .fsync = cifs_fsync,
1655 .flush = cifs_flush,
1656 .mmap_prepare = cifs_file_mmap_prepare,
1657 .splice_read = filemap_splice_read,
1658 .splice_write = iter_file_splice_write,
1659 .llseek = cifs_llseek,
1660 .unlocked_ioctl = cifs_ioctl,
1661 .copy_file_range = cifs_copy_file_range,
1662 .remap_file_range = cifs_remap_file_range,
1663 .setlease = cifs_setlease,
1664 .fallocate = cifs_fallocate,
1665 };
1666
1667 const struct file_operations cifs_file_strict_nobrl_ops = {
1668 .read_iter = cifs_strict_readv,
1669 .write_iter = cifs_strict_writev,
1670 .open = cifs_open,
1671 .release = cifs_close,
1672 .fsync = cifs_strict_fsync,
1673 .flush = cifs_flush,
1674 .mmap_prepare = cifs_file_strict_mmap_prepare,
1675 .splice_read = filemap_splice_read,
1676 .splice_write = iter_file_splice_write,
1677 .llseek = cifs_llseek,
1678 .unlocked_ioctl = cifs_ioctl,
1679 .copy_file_range = cifs_copy_file_range,
1680 .remap_file_range = cifs_remap_file_range,
1681 .setlease = cifs_setlease,
1682 .fallocate = cifs_fallocate,
1683 };
1684
1685 const struct file_operations cifs_file_direct_nobrl_ops = {
1686 .read_iter = netfs_unbuffered_read_iter,
1687 .write_iter = netfs_file_write_iter,
1688 .open = cifs_open,
1689 .release = cifs_close,
1690 .fsync = cifs_fsync,
1691 .flush = cifs_flush,
1692 .mmap_prepare = cifs_file_mmap_prepare,
1693 .splice_read = copy_splice_read,
1694 .splice_write = iter_file_splice_write,
1695 .unlocked_ioctl = cifs_ioctl,
1696 .copy_file_range = cifs_copy_file_range,
1697 .remap_file_range = cifs_remap_file_range,
1698 .llseek = cifs_llseek,
1699 .setlease = cifs_setlease,
1700 .fallocate = cifs_fallocate,
1701 };
1702
1703 const struct file_operations cifs_dir_ops = {
1704 .iterate_shared = cifs_readdir,
1705 .release = cifs_closedir,
1706 .read = generic_read_dir,
1707 .unlocked_ioctl = cifs_ioctl,
1708 .copy_file_range = cifs_copy_file_range,
1709 .remap_file_range = cifs_remap_file_range,
1710 .llseek = generic_file_llseek,
1711 .fsync = cifs_dir_fsync,
1712 };
1713
1714 static void
cifs_init_once(void * inode)1715 cifs_init_once(void *inode)
1716 {
1717 struct cifsInodeInfo *cifsi = inode;
1718
1719 inode_init_once(&cifsi->netfs.inode);
1720 init_rwsem(&cifsi->lock_sem);
1721 }
1722
1723 static int __init
cifs_init_inodecache(void)1724 cifs_init_inodecache(void)
1725 {
1726 cifs_inode_cachep = kmem_cache_create("cifs_inode_cache",
1727 sizeof(struct cifsInodeInfo),
1728 0, (SLAB_RECLAIM_ACCOUNT|
1729 SLAB_ACCOUNT),
1730 cifs_init_once);
1731 if (cifs_inode_cachep == NULL)
1732 return -ENOMEM;
1733
1734 return 0;
1735 }
1736
1737 static void
cifs_destroy_inodecache(void)1738 cifs_destroy_inodecache(void)
1739 {
1740 /*
1741 * Make sure all delayed rcu free inodes are flushed before we
1742 * destroy cache.
1743 */
1744 rcu_barrier();
1745 kmem_cache_destroy(cifs_inode_cachep);
1746 }
1747
1748 static int
cifs_init_request_bufs(void)1749 cifs_init_request_bufs(void)
1750 {
1751 /*
1752 * SMB2 maximum header size is bigger than CIFS one - no problems to
1753 * allocate some more bytes for CIFS.
1754 */
1755 size_t max_hdr_size = MAX_SMB2_HDR_SIZE;
1756
1757 if (CIFSMaxBufSize < 8192) {
1758 /* Buffer size can not be smaller than 2 * PATH_MAX since maximum
1759 Unicode path name has to fit in any SMB/CIFS path based frames */
1760 CIFSMaxBufSize = 8192;
1761 } else if (CIFSMaxBufSize > 1024*127) {
1762 CIFSMaxBufSize = 1024 * 127;
1763 } else {
1764 CIFSMaxBufSize &= 0x1FE00; /* Round size to even 512 byte mult*/
1765 }
1766 /*
1767 cifs_dbg(VFS, "CIFSMaxBufSize %d 0x%x\n",
1768 CIFSMaxBufSize, CIFSMaxBufSize);
1769 */
1770 cifs_req_cachep = kmem_cache_create_usercopy("cifs_request",
1771 CIFSMaxBufSize + max_hdr_size, 0,
1772 SLAB_HWCACHE_ALIGN, 0,
1773 CIFSMaxBufSize + max_hdr_size,
1774 NULL);
1775 if (cifs_req_cachep == NULL)
1776 return -ENOMEM;
1777
1778 if (cifs_min_rcv < 1)
1779 cifs_min_rcv = 1;
1780 else if (cifs_min_rcv > 64) {
1781 cifs_min_rcv = 64;
1782 cifs_dbg(VFS, "cifs_min_rcv set to maximum (64)\n");
1783 }
1784
1785 cifs_req_poolp = mempool_create_slab_pool(cifs_min_rcv,
1786 cifs_req_cachep);
1787
1788 if (cifs_req_poolp == NULL) {
1789 kmem_cache_destroy(cifs_req_cachep);
1790 return -ENOMEM;
1791 }
1792 /* MAX_CIFS_SMALL_BUFFER_SIZE bytes is enough for most SMB responses and
1793 almost all handle based requests (but not write response, nor is it
1794 sufficient for path based requests). A smaller size would have
1795 been more efficient (compacting multiple slab items on one 4k page)
1796 for the case in which debug was on, but this larger size allows
1797 more SMBs to use small buffer alloc and is still much more
1798 efficient to alloc 1 per page off the slab compared to 17K (5page)
1799 alloc of large cifs buffers even when page debugging is on */
1800 cifs_sm_req_cachep = kmem_cache_create_usercopy("cifs_small_rq",
1801 MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN,
1802 0, MAX_CIFS_SMALL_BUFFER_SIZE, NULL);
1803 if (cifs_sm_req_cachep == NULL) {
1804 mempool_destroy(cifs_req_poolp);
1805 kmem_cache_destroy(cifs_req_cachep);
1806 return -ENOMEM;
1807 }
1808
1809 if (cifs_min_small < 2)
1810 cifs_min_small = 2;
1811 else if (cifs_min_small > 256) {
1812 cifs_min_small = 256;
1813 cifs_dbg(FYI, "cifs_min_small set to maximum (256)\n");
1814 }
1815
1816 cifs_sm_req_poolp = mempool_create_slab_pool(cifs_min_small,
1817 cifs_sm_req_cachep);
1818
1819 if (cifs_sm_req_poolp == NULL) {
1820 mempool_destroy(cifs_req_poolp);
1821 kmem_cache_destroy(cifs_req_cachep);
1822 kmem_cache_destroy(cifs_sm_req_cachep);
1823 return -ENOMEM;
1824 }
1825
1826 return 0;
1827 }
1828
1829 static void
cifs_destroy_request_bufs(void)1830 cifs_destroy_request_bufs(void)
1831 {
1832 mempool_destroy(cifs_req_poolp);
1833 kmem_cache_destroy(cifs_req_cachep);
1834 mempool_destroy(cifs_sm_req_poolp);
1835 kmem_cache_destroy(cifs_sm_req_cachep);
1836 }
1837
init_mids(void)1838 static int init_mids(void)
1839 {
1840 cifs_mid_cachep = kmem_cache_create("cifs_mpx_ids",
1841 sizeof(struct mid_q_entry), 0,
1842 SLAB_HWCACHE_ALIGN, NULL);
1843 if (cifs_mid_cachep == NULL)
1844 return -ENOMEM;
1845
1846 /* 3 is a reasonable minimum number of simultaneous operations */
1847 cifs_mid_poolp = mempool_create_slab_pool(3, cifs_mid_cachep);
1848 if (cifs_mid_poolp == NULL) {
1849 kmem_cache_destroy(cifs_mid_cachep);
1850 return -ENOMEM;
1851 }
1852
1853 return 0;
1854 }
1855
destroy_mids(void)1856 static void destroy_mids(void)
1857 {
1858 mempool_destroy(cifs_mid_poolp);
1859 kmem_cache_destroy(cifs_mid_cachep);
1860 }
1861
cifs_init_netfs(void)1862 static int cifs_init_netfs(void)
1863 {
1864 cifs_io_request_cachep =
1865 kmem_cache_create("cifs_io_request",
1866 sizeof(struct cifs_io_request), 0,
1867 SLAB_HWCACHE_ALIGN, NULL);
1868 if (!cifs_io_request_cachep)
1869 goto nomem_req;
1870
1871 if (mempool_init_slab_pool(&cifs_io_request_pool, 100, cifs_io_request_cachep) < 0)
1872 goto nomem_reqpool;
1873
1874 cifs_io_subrequest_cachep =
1875 kmem_cache_create("cifs_io_subrequest",
1876 sizeof(struct cifs_io_subrequest), 0,
1877 SLAB_HWCACHE_ALIGN, NULL);
1878 if (!cifs_io_subrequest_cachep)
1879 goto nomem_subreq;
1880
1881 if (mempool_init_slab_pool(&cifs_io_subrequest_pool, 100, cifs_io_subrequest_cachep) < 0)
1882 goto nomem_subreqpool;
1883
1884 return 0;
1885
1886 nomem_subreqpool:
1887 kmem_cache_destroy(cifs_io_subrequest_cachep);
1888 nomem_subreq:
1889 mempool_exit(&cifs_io_request_pool);
1890 nomem_reqpool:
1891 kmem_cache_destroy(cifs_io_request_cachep);
1892 nomem_req:
1893 return -ENOMEM;
1894 }
1895
cifs_destroy_netfs(void)1896 static void cifs_destroy_netfs(void)
1897 {
1898 mempool_exit(&cifs_io_subrequest_pool);
1899 kmem_cache_destroy(cifs_io_subrequest_cachep);
1900 mempool_exit(&cifs_io_request_pool);
1901 kmem_cache_destroy(cifs_io_request_cachep);
1902 }
1903
1904 static int __init
init_cifs(void)1905 init_cifs(void)
1906 {
1907 int rc = 0;
1908 cifs_proc_init();
1909 INIT_LIST_HEAD(&cifs_tcp_ses_list);
1910 /*
1911 * Initialize Global counters
1912 */
1913 atomic_set(&sesInfoAllocCount, 0);
1914 atomic_set(&tconInfoAllocCount, 0);
1915 atomic_set(&tcpSesNextId, 0);
1916 atomic_set(&tcpSesAllocCount, 0);
1917 atomic_set(&tcpSesReconnectCount, 0);
1918 atomic_set(&tconInfoReconnectCount, 0);
1919
1920 atomic_set(&buf_alloc_count, 0);
1921 atomic_set(&small_buf_alloc_count, 0);
1922 #ifdef CONFIG_CIFS_STATS2
1923 atomic_set(&total_buf_alloc_count, 0);
1924 atomic_set(&total_small_buf_alloc_count, 0);
1925 if (slow_rsp_threshold < 1)
1926 cifs_dbg(FYI, "slow_response_threshold msgs disabled\n");
1927 else if (slow_rsp_threshold > 32767)
1928 cifs_dbg(VFS,
1929 "slow response threshold set higher than recommended (0 to 32767)\n");
1930 #endif /* CONFIG_CIFS_STATS2 */
1931
1932 atomic_set(&mid_count, 0);
1933 GlobalCurrentXid = 0;
1934 GlobalTotalActiveXid = 0;
1935 GlobalMaxActiveXid = 0;
1936
1937 cifs_lock_secret = get_random_u32();
1938
1939 if (cifs_max_pending < 2) {
1940 cifs_max_pending = 2;
1941 cifs_dbg(FYI, "cifs_max_pending set to min of 2\n");
1942 } else if (cifs_max_pending > CIFS_MAX_REQ) {
1943 cifs_max_pending = CIFS_MAX_REQ;
1944 cifs_dbg(FYI, "cifs_max_pending set to max of %u\n",
1945 CIFS_MAX_REQ);
1946 }
1947
1948 /* Limit max to about 18 hours, and setting to zero disables directory entry caching */
1949 if (dir_cache_timeout > 65000) {
1950 dir_cache_timeout = 65000;
1951 cifs_dbg(VFS, "dir_cache_timeout set to max of 65000 seconds\n");
1952 }
1953
1954 cifsiod_wq = alloc_workqueue("cifsiod",
1955 WQ_FREEZABLE | WQ_MEM_RECLAIM | WQ_PERCPU,
1956 0);
1957 if (!cifsiod_wq) {
1958 rc = -ENOMEM;
1959 goto out_clean_proc;
1960 }
1961
1962 /*
1963 * Consider in future setting limit!=0 maybe to min(num_of_cores - 1, 3)
1964 * so that we don't launch too many worker threads but
1965 * Documentation/core-api/workqueue.rst recommends setting it to 0
1966 */
1967
1968 /* WQ_UNBOUND allows decrypt tasks to run on any CPU */
1969 decrypt_wq = alloc_workqueue("smb3decryptd",
1970 WQ_UNBOUND|WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
1971 if (!decrypt_wq) {
1972 rc = -ENOMEM;
1973 goto out_destroy_cifsiod_wq;
1974 }
1975
1976 fileinfo_put_wq = alloc_workqueue("cifsfileinfoput",
1977 WQ_UNBOUND|WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
1978 if (!fileinfo_put_wq) {
1979 rc = -ENOMEM;
1980 goto out_destroy_decrypt_wq;
1981 }
1982
1983 cifsoplockd_wq = alloc_workqueue("cifsoplockd",
1984 WQ_FREEZABLE | WQ_MEM_RECLAIM | WQ_PERCPU,
1985 0);
1986 if (!cifsoplockd_wq) {
1987 rc = -ENOMEM;
1988 goto out_destroy_fileinfo_put_wq;
1989 }
1990
1991 deferredclose_wq = alloc_workqueue("deferredclose",
1992 WQ_FREEZABLE | WQ_MEM_RECLAIM | WQ_PERCPU,
1993 0);
1994 if (!deferredclose_wq) {
1995 rc = -ENOMEM;
1996 goto out_destroy_cifsoplockd_wq;
1997 }
1998
1999 serverclose_wq = alloc_workqueue("serverclose",
2000 WQ_FREEZABLE | WQ_MEM_RECLAIM | WQ_PERCPU,
2001 0);
2002 if (!serverclose_wq) {
2003 rc = -ENOMEM;
2004 goto out_destroy_deferredclose_wq;
2005 }
2006
2007 cfid_put_wq = alloc_workqueue("cfid_put_wq",
2008 WQ_FREEZABLE | WQ_MEM_RECLAIM | WQ_PERCPU,
2009 0);
2010 if (!cfid_put_wq) {
2011 rc = -ENOMEM;
2012 goto out_destroy_serverclose_wq;
2013 }
2014
2015 rc = cifs_init_inodecache();
2016 if (rc)
2017 goto out_destroy_cfid_put_wq;
2018
2019 rc = cifs_init_netfs();
2020 if (rc)
2021 goto out_destroy_inodecache;
2022
2023 rc = init_mids();
2024 if (rc)
2025 goto out_destroy_netfs;
2026
2027 rc = cifs_init_request_bufs();
2028 if (rc)
2029 goto out_destroy_mids;
2030
2031 #ifdef CONFIG_CIFS_DFS_UPCALL
2032 rc = dfs_cache_init();
2033 if (rc)
2034 goto out_destroy_request_bufs;
2035 #endif /* CONFIG_CIFS_DFS_UPCALL */
2036 #ifdef CONFIG_CIFS_UPCALL
2037 rc = init_cifs_spnego();
2038 if (rc)
2039 goto out_destroy_dfs_cache;
2040 #endif /* CONFIG_CIFS_UPCALL */
2041 #ifdef CONFIG_CIFS_SWN_UPCALL
2042 rc = cifs_genl_init();
2043 if (rc)
2044 goto out_register_key_type;
2045 #endif /* CONFIG_CIFS_SWN_UPCALL */
2046
2047 rc = init_cifs_idmap();
2048 if (rc)
2049 goto out_cifs_swn_init;
2050
2051 rc = register_filesystem(&cifs_fs_type);
2052 if (rc)
2053 goto out_init_cifs_idmap;
2054
2055 rc = register_filesystem(&smb3_fs_type);
2056 if (rc) {
2057 unregister_filesystem(&cifs_fs_type);
2058 goto out_init_cifs_idmap;
2059 }
2060
2061 return 0;
2062
2063 out_init_cifs_idmap:
2064 exit_cifs_idmap();
2065 out_cifs_swn_init:
2066 #ifdef CONFIG_CIFS_SWN_UPCALL
2067 cifs_genl_exit();
2068 out_register_key_type:
2069 #endif
2070 #ifdef CONFIG_CIFS_UPCALL
2071 exit_cifs_spnego();
2072 out_destroy_dfs_cache:
2073 #endif
2074 #ifdef CONFIG_CIFS_DFS_UPCALL
2075 dfs_cache_destroy();
2076 out_destroy_request_bufs:
2077 #endif
2078 cifs_destroy_request_bufs();
2079 out_destroy_mids:
2080 destroy_mids();
2081 out_destroy_netfs:
2082 cifs_destroy_netfs();
2083 out_destroy_inodecache:
2084 cifs_destroy_inodecache();
2085 out_destroy_cfid_put_wq:
2086 destroy_workqueue(cfid_put_wq);
2087 out_destroy_serverclose_wq:
2088 destroy_workqueue(serverclose_wq);
2089 out_destroy_deferredclose_wq:
2090 destroy_workqueue(deferredclose_wq);
2091 out_destroy_cifsoplockd_wq:
2092 destroy_workqueue(cifsoplockd_wq);
2093 out_destroy_fileinfo_put_wq:
2094 destroy_workqueue(fileinfo_put_wq);
2095 out_destroy_decrypt_wq:
2096 destroy_workqueue(decrypt_wq);
2097 out_destroy_cifsiod_wq:
2098 destroy_workqueue(cifsiod_wq);
2099 out_clean_proc:
2100 cifs_proc_clean();
2101 return rc;
2102 }
2103
2104 static void __exit
exit_cifs(void)2105 exit_cifs(void)
2106 {
2107 cifs_dbg(NOISY, "exit_smb3\n");
2108 unregister_filesystem(&cifs_fs_type);
2109 unregister_filesystem(&smb3_fs_type);
2110 cifs_release_automount_timer();
2111 exit_cifs_idmap();
2112 #ifdef CONFIG_CIFS_SWN_UPCALL
2113 cifs_genl_exit();
2114 #endif
2115 #ifdef CONFIG_CIFS_UPCALL
2116 exit_cifs_spnego();
2117 #endif
2118 #ifdef CONFIG_CIFS_DFS_UPCALL
2119 dfs_cache_destroy();
2120 #endif
2121 cifs_destroy_request_bufs();
2122 destroy_mids();
2123 cifs_destroy_netfs();
2124 cifs_destroy_inodecache();
2125 destroy_workqueue(deferredclose_wq);
2126 destroy_workqueue(cifsoplockd_wq);
2127 destroy_workqueue(decrypt_wq);
2128 destroy_workqueue(fileinfo_put_wq);
2129 destroy_workqueue(serverclose_wq);
2130 destroy_workqueue(cfid_put_wq);
2131 destroy_workqueue(cifsiod_wq);
2132 cifs_proc_clean();
2133 }
2134
2135 MODULE_AUTHOR("Steve French");
2136 MODULE_LICENSE("GPL"); /* combination of LGPL + GPL source behaves as GPL */
2137 MODULE_DESCRIPTION
2138 ("VFS to access SMB3 servers e.g. Samba, Macs, Azure and Windows (and "
2139 "also older servers complying with the SNIA CIFS Specification)");
2140 MODULE_VERSION(CIFS_VERSION);
2141 MODULE_SOFTDEP("ecb");
2142 MODULE_SOFTDEP("hmac");
2143 MODULE_SOFTDEP("md5");
2144 MODULE_SOFTDEP("nls");
2145 MODULE_SOFTDEP("aes");
2146 MODULE_SOFTDEP("cmac");
2147 MODULE_SOFTDEP("sha256");
2148 MODULE_SOFTDEP("sha512");
2149 MODULE_SOFTDEP("aead2");
2150 MODULE_SOFTDEP("ccm");
2151 MODULE_SOFTDEP("gcm");
2152 module_init(init_cifs)
2153 module_exit(exit_cifs)
2154