1 // SPDX-License-Identifier: LGPL-2.1
2 /*
3 *
4 * Copyright (C) International Business Machines Corp., 2002,2008
5 * Author(s): Steve French (sfrench@us.ibm.com)
6 *
7 * Common Internet FileSystem (CIFS) client
8 *
9 */
10
11 /* Note that BB means BUGBUG (ie something to fix eventually) */
12
13 #include <linux/module.h>
14 #include <linux/fs.h>
15 #include <linux/filelock.h>
16 #include <linux/mount.h>
17 #include <linux/slab.h>
18 #include <linux/init.h>
19 #include <linux/list.h>
20 #include <linux/seq_file.h>
21 #include <linux/vfs.h>
22 #include <linux/mempool.h>
23 #include <linux/delay.h>
24 #include <linux/kthread.h>
25 #include <linux/freezer.h>
26 #include <linux/namei.h>
27 #include <linux/random.h>
28 #include <linux/splice.h>
29 #include <linux/uuid.h>
30 #include <linux/xattr.h>
31 #include <linux/mm.h>
32 #include <linux/key-type.h>
33 #include <uapi/linux/magic.h>
34 #include <net/ipv6.h>
35 #include "cifsfs.h"
36 #define DECLARE_GLOBALS_HERE
37 #include "cifsglob.h"
38 #include "cifsproto.h"
39 #include "smb2proto.h"
40 #include "cifs_debug.h"
41 #include "cifs_fs_sb.h"
42 #include "cifs_spnego.h"
43 #include "fscache.h"
44 #ifdef CONFIG_CIFS_DFS_UPCALL
45 #include "dfs_cache.h"
46 #endif
47 #ifdef CONFIG_CIFS_SWN_UPCALL
48 #include "netlink.h"
49 #endif
50 #include "fs_context.h"
51 #include "cached_dir.h"
52
53 /*
54 * DOS dates from 1980/1/1 through 2107/12/31
55 * Protocol specifications indicate the range should be to 119, which
56 * limits maximum year to 2099. But this range has not been checked.
57 */
58 #define SMB_DATE_MAX (127<<9 | 12<<5 | 31)
59 #define SMB_DATE_MIN (0<<9 | 1<<5 | 1)
60 #define SMB_TIME_MAX (23<<11 | 59<<5 | 29)
61
62 int cifsFYI = 0;
63 bool traceSMB;
64 bool enable_oplocks = true;
65 bool linuxExtEnabled = true;
66 bool lookupCacheEnabled = true;
67 bool disable_legacy_dialects; /* false by default */
68 bool enable_gcm_256 = true;
69 bool require_gcm_256; /* false by default */
70 bool enable_negotiate_signing; /* false by default */
71 unsigned int global_secflags = CIFSSEC_DEF;
72 /* unsigned int ntlmv2_support = 0; */
73
74 /*
75 * Global transaction id (XID) information
76 */
77 unsigned int GlobalCurrentXid; /* protected by GlobalMid_Lock */
78 unsigned int GlobalTotalActiveXid; /* prot by GlobalMid_Lock */
79 unsigned int GlobalMaxActiveXid; /* prot by GlobalMid_Lock */
80 DEFINE_SPINLOCK(GlobalMid_Lock); /* protects above & list operations on midQ entries */
81
82 /*
83 * Global counters, updated atomically
84 */
85 atomic_t sesInfoAllocCount;
86 atomic_t tconInfoAllocCount;
87 atomic_t tcpSesNextId;
88 atomic_t tcpSesAllocCount;
89 atomic_t tcpSesReconnectCount;
90 atomic_t tconInfoReconnectCount;
91
92 atomic_t mid_count;
93 atomic_t buf_alloc_count;
94 atomic_t small_buf_alloc_count;
95 #ifdef CONFIG_CIFS_STATS2
96 atomic_t total_buf_alloc_count;
97 atomic_t total_small_buf_alloc_count;
98 #endif/* STATS2 */
99 struct list_head cifs_tcp_ses_list;
100 DEFINE_SPINLOCK(cifs_tcp_ses_lock);
101 static const struct super_operations cifs_super_ops;
102 unsigned int CIFSMaxBufSize = CIFS_MAX_MSGSIZE;
103 module_param(CIFSMaxBufSize, uint, 0444);
104 MODULE_PARM_DESC(CIFSMaxBufSize, "Network buffer size (not including header) "
105 "for CIFS requests. "
106 "Default: 16384 Range: 8192 to 130048");
107 unsigned int cifs_min_rcv = CIFS_MIN_RCV_POOL;
108 module_param(cifs_min_rcv, uint, 0444);
109 MODULE_PARM_DESC(cifs_min_rcv, "Network buffers in pool. Default: 4 Range: "
110 "1 to 64");
111 unsigned int cifs_min_small = 30;
112 module_param(cifs_min_small, uint, 0444);
113 MODULE_PARM_DESC(cifs_min_small, "Small network buffers in pool. Default: 30 "
114 "Range: 2 to 256");
115 unsigned int cifs_max_pending = CIFS_MAX_REQ;
116 module_param(cifs_max_pending, uint, 0444);
117 MODULE_PARM_DESC(cifs_max_pending, "Simultaneous requests to server for "
118 "CIFS/SMB1 dialect (N/A for SMB3) "
119 "Default: 32767 Range: 2 to 32767.");
120 unsigned int dir_cache_timeout = 30;
121 module_param(dir_cache_timeout, uint, 0644);
122 MODULE_PARM_DESC(dir_cache_timeout, "Number of seconds to cache directory contents for which we have a lease. Default: 30 "
123 "Range: 1 to 65000 seconds, 0 to disable caching dir contents");
124 /* Module-wide total cached dirents (in bytes) across all tcons */
125 atomic64_t cifs_dircache_bytes_used = ATOMIC64_INIT(0);
126
127 atomic_t cifs_sillycounter;
128 atomic_t cifs_tmpcounter;
129
130 #ifdef CONFIG_CIFS_STATS2
131 unsigned int slow_rsp_threshold = 1;
132 module_param(slow_rsp_threshold, uint, 0644);
133 MODULE_PARM_DESC(slow_rsp_threshold, "Amount of time (in seconds) to wait "
134 "before logging that a response is delayed. "
135 "Default: 1 (if set to 0 disables msg).");
136 #endif /* STATS2 */
137
138 module_param(enable_oplocks, bool, 0644);
139 MODULE_PARM_DESC(enable_oplocks, "Enable or disable oplocks. Default: y/Y/1");
140
141 module_param(enable_gcm_256, bool, 0644);
142 MODULE_PARM_DESC(enable_gcm_256, "Enable requesting strongest (256 bit) GCM encryption. Default: y/Y/1");
143
144 module_param(require_gcm_256, bool, 0644);
145 MODULE_PARM_DESC(require_gcm_256, "Require strongest (256 bit) GCM encryption. Default: n/N/0");
146
147 module_param(enable_negotiate_signing, bool, 0644);
148 MODULE_PARM_DESC(enable_negotiate_signing, "Enable negotiating packet signing algorithm with server. Default: n/N/0");
149
150 module_param(disable_legacy_dialects, bool, 0644);
151 MODULE_PARM_DESC(disable_legacy_dialects, "To improve security it may be "
152 "helpful to restrict the ability to "
153 "override the default dialects (SMB2.1, "
154 "SMB3 and SMB3.02) on mount with old "
155 "dialects (CIFS/SMB1 and SMB2) since "
156 "vers=1.0 (CIFS/SMB1) and vers=2.0 are weaker"
157 " and less secure. Default: n/N/0");
158
159 struct workqueue_struct *cifsiod_wq;
160 struct workqueue_struct *decrypt_wq;
161 struct workqueue_struct *fileinfo_put_wq;
162 struct workqueue_struct *cifsoplockd_wq;
163 struct workqueue_struct *deferredclose_wq;
164 struct workqueue_struct *serverclose_wq;
165 struct workqueue_struct *cfid_put_wq;
166 __u32 cifs_lock_secret;
167
168 /*
169 * Bumps refcount for cifs super block.
170 * Note that it should be only called if a reference to VFS super block is
171 * already held, e.g. in open-type syscalls context. Otherwise it can race with
172 * atomic_dec_and_test in deactivate_locked_super.
173 */
174 void
cifs_sb_active(struct super_block * sb)175 cifs_sb_active(struct super_block *sb)
176 {
177 struct cifs_sb_info *server = CIFS_SB(sb);
178
179 if (atomic_inc_return(&server->active) == 1)
180 atomic_inc(&sb->s_active);
181 }
182
183 void
cifs_sb_deactive(struct super_block * sb)184 cifs_sb_deactive(struct super_block *sb)
185 {
186 struct cifs_sb_info *server = CIFS_SB(sb);
187
188 if (atomic_dec_and_test(&server->active))
189 deactivate_super(sb);
190 }
191
192 static int
cifs_read_super(struct super_block * sb)193 cifs_read_super(struct super_block *sb)
194 {
195 struct cifs_sb_info *cifs_sb;
196 struct cifs_tcon *tcon;
197 unsigned int sbflags;
198 struct timespec64 ts;
199 struct inode *inode;
200 int rc = 0;
201
202 cifs_sb = CIFS_SB(sb);
203 tcon = cifs_sb_master_tcon(cifs_sb);
204 sbflags = cifs_sb_flags(cifs_sb);
205
206 if (sbflags & CIFS_MOUNT_POSIXACL)
207 sb->s_flags |= SB_POSIXACL;
208
209 if (tcon->snapshot_time)
210 sb->s_flags |= SB_RDONLY;
211
212 if (tcon->ses->capabilities & tcon->ses->server->vals->cap_large_files)
213 sb->s_maxbytes = MAX_LFS_FILESIZE;
214 else
215 sb->s_maxbytes = MAX_NON_LFS;
216
217 /*
218 * Some very old servers like DOS and OS/2 used 2 second granularity
219 * (while all current servers use 100ns granularity - see MS-DTYP)
220 * but 1 second is the maximum allowed granularity for the VFS
221 * so for old servers set time granularity to 1 second while for
222 * everything else (current servers) set it to 100ns.
223 */
224 if ((tcon->ses->server->vals->protocol_id == SMB10_PROT_ID) &&
225 ((tcon->ses->capabilities &
226 tcon->ses->server->vals->cap_nt_find) == 0) &&
227 !tcon->unix_ext) {
228 sb->s_time_gran = 1000000000; /* 1 second is max allowed gran */
229 ts = cnvrtDosUnixTm(cpu_to_le16(SMB_DATE_MIN), 0, 0);
230 sb->s_time_min = ts.tv_sec;
231 ts = cnvrtDosUnixTm(cpu_to_le16(SMB_DATE_MAX),
232 cpu_to_le16(SMB_TIME_MAX), 0);
233 sb->s_time_max = ts.tv_sec;
234 } else {
235 /*
236 * Almost every server, including all SMB2+, uses DCE TIME
237 * ie 100 nanosecond units, since 1601. See MS-DTYP and MS-FSCC
238 */
239 sb->s_time_gran = 100;
240 ts = cifs_NTtimeToUnix(0);
241 sb->s_time_min = ts.tv_sec;
242 ts = cifs_NTtimeToUnix(cpu_to_le64(S64_MAX));
243 sb->s_time_max = ts.tv_sec;
244 }
245
246 sb->s_magic = CIFS_SUPER_MAGIC;
247 sb->s_op = &cifs_super_ops;
248 sb->s_xattr = cifs_xattr_handlers;
249 rc = super_setup_bdi(sb);
250 if (rc)
251 goto out_no_root;
252 /* tune readahead according to rsize if readahead size not set on mount */
253 if (cifs_sb->ctx->rsize == 0)
254 cifs_sb->ctx->rsize =
255 tcon->ses->server->ops->negotiate_rsize(tcon, cifs_sb->ctx);
256 if (cifs_sb->ctx->rasize)
257 sb->s_bdi->ra_pages = cifs_sb->ctx->rasize / PAGE_SIZE;
258 else
259 sb->s_bdi->ra_pages = 2 * (cifs_sb->ctx->rsize / PAGE_SIZE);
260
261 sb->s_blocksize = CIFS_MAX_MSGSIZE;
262 sb->s_blocksize_bits = 14; /* default 2**14 = CIFS_MAX_MSGSIZE */
263 inode = cifs_root_iget(sb);
264
265 if (IS_ERR(inode)) {
266 rc = PTR_ERR(inode);
267 goto out_no_root;
268 }
269
270 if (tcon->nocase)
271 set_default_d_op(sb, &cifs_ci_dentry_ops);
272 else
273 set_default_d_op(sb, &cifs_dentry_ops);
274
275 sb->s_root = d_make_root(inode);
276 if (!sb->s_root) {
277 rc = -ENOMEM;
278 goto out_no_root;
279 }
280
281 #ifdef CONFIG_CIFS_NFSD_EXPORT
282 if (sbflags & CIFS_MOUNT_SERVER_INUM) {
283 cifs_dbg(FYI, "export ops supported\n");
284 sb->s_export_op = &cifs_export_ops;
285 }
286 #endif /* CONFIG_CIFS_NFSD_EXPORT */
287
288 return 0;
289
290 out_no_root:
291 cifs_dbg(VFS, "%s: get root inode failed\n", __func__);
292 return rc;
293 }
294
cifs_kill_sb(struct super_block * sb)295 static void cifs_kill_sb(struct super_block *sb)
296 {
297 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
298
299 /*
300 * We need to release all dentries for the cached directories
301 * and close all deferred file handles before we kill the sb.
302 */
303 if (cifs_sb->root) {
304 close_all_cached_dirs(cifs_sb);
305 cifs_close_all_deferred_files_sb(cifs_sb);
306
307 /* Wait for all pending oplock breaks to complete */
308 flush_workqueue(cifsoplockd_wq);
309
310 /* finally release root dentry */
311 dput(cifs_sb->root);
312 cifs_sb->root = NULL;
313 }
314
315 kill_anon_super(sb);
316 cifs_umount(cifs_sb);
317 }
318
319 static int
cifs_statfs(struct dentry * dentry,struct kstatfs * buf)320 cifs_statfs(struct dentry *dentry, struct kstatfs *buf)
321 {
322 struct super_block *sb = dentry->d_sb;
323 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
324 struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
325 struct TCP_Server_Info *server = tcon->ses->server;
326 unsigned int xid;
327 int rc = 0;
328 const char *full_path;
329 void *page;
330
331 xid = get_xid();
332 page = alloc_dentry_path();
333
334 full_path = build_path_from_dentry(dentry, page);
335 if (IS_ERR(full_path)) {
336 rc = PTR_ERR(full_path);
337 goto statfs_out;
338 }
339
340 if (le32_to_cpu(tcon->fsAttrInfo.MaxPathNameComponentLength) > 0)
341 buf->f_namelen =
342 le32_to_cpu(tcon->fsAttrInfo.MaxPathNameComponentLength);
343 else
344 buf->f_namelen = PATH_MAX;
345
346 buf->f_fsid.val[0] = tcon->vol_serial_number;
347 /* are using part of create time for more randomness, see man statfs */
348 buf->f_fsid.val[1] = (int)le64_to_cpu(tcon->vol_create_time);
349
350 buf->f_files = 0; /* undefined */
351 buf->f_ffree = 0; /* unlimited */
352
353 if (server->ops->queryfs)
354 rc = server->ops->queryfs(xid, tcon, full_path, cifs_sb, buf);
355
356 statfs_out:
357 free_dentry_path(page);
358 free_xid(xid);
359 return rc;
360 }
361
cifs_fallocate(struct file * file,int mode,loff_t off,loff_t len)362 static long cifs_fallocate(struct file *file, int mode, loff_t off, loff_t len)
363 {
364 struct cifs_tcon *tcon = cifs_sb_master_tcon(CIFS_SB(file));
365 struct TCP_Server_Info *server = tcon->ses->server;
366 struct inode *inode = file_inode(file);
367 int rc;
368
369 if (!server->ops->fallocate)
370 return -EOPNOTSUPP;
371
372 rc = inode_lock_killable(inode);
373 if (rc)
374 return rc;
375
376 netfs_wait_for_outstanding_io(inode);
377
378 rc = file_modified(file);
379 if (rc)
380 goto out_unlock;
381
382 rc = server->ops->fallocate(file, tcon, mode, off, len);
383
384 out_unlock:
385 inode_unlock(inode);
386 return rc;
387 }
388
cifs_permission(struct mnt_idmap * idmap,struct inode * inode,int mask)389 static int cifs_permission(struct mnt_idmap *idmap,
390 struct inode *inode, int mask)
391 {
392 unsigned int sbflags = cifs_sb_flags(CIFS_SB(inode));
393
394 if (sbflags & CIFS_MOUNT_NO_PERM) {
395 if ((mask & MAY_EXEC) && !execute_ok(inode))
396 return -EACCES;
397 else
398 return 0;
399 } else /* file mode might have been restricted at mount time
400 on the client (above and beyond ACL on servers) for
401 servers which do not support setting and viewing mode bits,
402 so allowing client to check permissions is useful */
403 return generic_permission(&nop_mnt_idmap, inode, mask);
404 }
405
406 static struct kmem_cache *cifs_inode_cachep;
407 static struct kmem_cache *cifs_req_cachep;
408 static struct kmem_cache *cifs_mid_cachep;
409 static struct kmem_cache *cifs_sm_req_cachep;
410 static struct kmem_cache *cifs_io_request_cachep;
411 static struct kmem_cache *cifs_io_subrequest_cachep;
412 mempool_t *cifs_sm_req_poolp;
413 mempool_t *cifs_req_poolp;
414 mempool_t cifs_mid_pool;
415 mempool_t cifs_io_request_pool;
416 mempool_t cifs_io_subrequest_pool;
417
418 static struct inode *
cifs_alloc_inode(struct super_block * sb)419 cifs_alloc_inode(struct super_block *sb)
420 {
421 struct cifsInodeInfo *cifs_inode;
422 cifs_inode = alloc_inode_sb(sb, cifs_inode_cachep, GFP_KERNEL);
423 if (!cifs_inode)
424 return NULL;
425 cifs_inode->cifsAttrs = ATTR_ARCHIVE; /* default */
426 cifs_inode->time = 0;
427 /*
428 * Until the file is open and we have gotten oplock info back from the
429 * server, can not assume caching of file data or metadata.
430 */
431 cifs_set_oplock_level(cifs_inode, 0);
432 cifs_inode->lease_granted = false;
433 cifs_inode->flags = 0;
434 spin_lock_init(&cifs_inode->writers_lock);
435 cifs_inode->writers = 0;
436 cifs_inode->netfs.inode.i_blkbits = 14; /* 2**14 = CIFS_MAX_MSGSIZE */
437 cifs_inode->netfs.remote_i_size = 0;
438 cifs_inode->uniqueid = 0;
439 cifs_inode->createtime = 0;
440 cifs_inode->epoch = 0;
441 spin_lock_init(&cifs_inode->open_file_lock);
442 generate_random_uuid(cifs_inode->lease_key);
443 cifs_inode->symlink_target = NULL;
444
445 /*
446 * Can not set i_flags here - they get immediately overwritten to zero
447 * by the VFS.
448 */
449 /* cifs_inode->netfs.inode.i_flags = S_NOATIME | S_NOCMTIME; */
450 INIT_LIST_HEAD(&cifs_inode->openFileList);
451 INIT_LIST_HEAD(&cifs_inode->llist);
452 INIT_LIST_HEAD(&cifs_inode->deferred_closes);
453 spin_lock_init(&cifs_inode->deferred_lock);
454 return &cifs_inode->netfs.inode;
455 }
456
457 static void
cifs_free_inode(struct inode * inode)458 cifs_free_inode(struct inode *inode)
459 {
460 struct cifsInodeInfo *cinode = CIFS_I(inode);
461
462 if (S_ISLNK(inode->i_mode))
463 kfree(cinode->symlink_target);
464 kmem_cache_free(cifs_inode_cachep, cinode);
465 }
466
467 static void
cifs_evict_inode(struct inode * inode)468 cifs_evict_inode(struct inode *inode)
469 {
470 netfs_wait_for_outstanding_io(inode);
471 truncate_inode_pages_final(&inode->i_data);
472 if (inode_state_read_once(inode) & I_PINNING_NETFS_WB)
473 cifs_fscache_unuse_inode_cookie(inode, true);
474 cifs_fscache_release_inode_cookie(inode);
475 clear_inode(inode);
476 }
477
478 static void
cifs_show_address(struct seq_file * s,struct TCP_Server_Info * server)479 cifs_show_address(struct seq_file *s, struct TCP_Server_Info *server)
480 {
481 struct sockaddr_in *sa = (struct sockaddr_in *) &server->dstaddr;
482 struct sockaddr_in6 *sa6 = (struct sockaddr_in6 *) &server->dstaddr;
483
484 seq_puts(s, ",addr=");
485
486 switch (server->dstaddr.ss_family) {
487 case AF_INET:
488 seq_printf(s, "%pI4", &sa->sin_addr.s_addr);
489 break;
490 case AF_INET6:
491 seq_printf(s, "%pI6", &sa6->sin6_addr.s6_addr);
492 if (sa6->sin6_scope_id)
493 seq_printf(s, "%%%u", sa6->sin6_scope_id);
494 break;
495 default:
496 seq_puts(s, "(unknown)");
497 }
498 if (server->rdma)
499 seq_puts(s, ",rdma");
500 }
501
502 static void
cifs_show_security(struct seq_file * s,struct cifs_ses * ses)503 cifs_show_security(struct seq_file *s, struct cifs_ses *ses)
504 {
505 if (ses->sectype == Unspecified) {
506 if (ses->user_name == NULL)
507 seq_puts(s, ",sec=none");
508 return;
509 }
510
511 seq_puts(s, ",sec=");
512
513 switch (ses->sectype) {
514 case NTLMv2:
515 seq_puts(s, "ntlmv2");
516 break;
517 case Kerberos:
518 seq_puts(s, "krb5");
519 break;
520 case RawNTLMSSP:
521 seq_puts(s, "ntlmssp");
522 break;
523 default:
524 /* shouldn't ever happen */
525 seq_puts(s, "unknown");
526 break;
527 }
528
529 if (ses->sign)
530 seq_puts(s, "i");
531
532 if (ses->sectype == Kerberos)
533 seq_printf(s, ",cruid=%u",
534 from_kuid_munged(&init_user_ns, ses->cred_uid));
535 }
536
537 static void
cifs_show_cache_flavor(struct seq_file * s,struct cifs_sb_info * cifs_sb)538 cifs_show_cache_flavor(struct seq_file *s, struct cifs_sb_info *cifs_sb)
539 {
540 unsigned int sbflags = cifs_sb_flags(cifs_sb);
541
542 seq_puts(s, ",cache=");
543
544 if (sbflags & CIFS_MOUNT_STRICT_IO)
545 seq_puts(s, "strict");
546 else if (sbflags & CIFS_MOUNT_DIRECT_IO)
547 seq_puts(s, "none");
548 else if (sbflags & CIFS_MOUNT_RW_CACHE)
549 seq_puts(s, "singleclient"); /* assume only one client access */
550 else if (sbflags & CIFS_MOUNT_RO_CACHE)
551 seq_puts(s, "ro"); /* read only caching assumed */
552 else
553 seq_puts(s, "loose");
554 }
555
556 /*
557 * cifs_show_devname() is used so we show the mount device name with correct
558 * format (e.g. forward slashes vs. back slashes) in /proc/mounts
559 */
cifs_show_devname(struct seq_file * m,struct dentry * root)560 static int cifs_show_devname(struct seq_file *m, struct dentry *root)
561 {
562 struct cifs_sb_info *cifs_sb = CIFS_SB(root->d_sb);
563 char *devname = kstrdup(cifs_sb->ctx->source, GFP_KERNEL);
564
565 if (devname == NULL)
566 seq_puts(m, "none");
567 else {
568 convert_delimiter(devname, '/');
569 /* escape all spaces in share names */
570 seq_escape(m, devname, " \t");
571 kfree(devname);
572 }
573 return 0;
574 }
575
576 static void
cifs_show_upcall_target(struct seq_file * s,struct cifs_sb_info * cifs_sb)577 cifs_show_upcall_target(struct seq_file *s, struct cifs_sb_info *cifs_sb)
578 {
579 if (cifs_sb->ctx->upcall_target == UPTARGET_UNSPECIFIED) {
580 seq_puts(s, ",upcall_target=app");
581 return;
582 }
583
584 seq_puts(s, ",upcall_target=");
585
586 switch (cifs_sb->ctx->upcall_target) {
587 case UPTARGET_APP:
588 seq_puts(s, "app");
589 break;
590 case UPTARGET_MOUNT:
591 seq_puts(s, "mount");
592 break;
593 default:
594 /* shouldn't ever happen */
595 seq_puts(s, "unknown");
596 break;
597 }
598 }
599
600 /*
601 * cifs_show_options() is for displaying mount options in /proc/mounts.
602 * Not all settable options are displayed but most of the important
603 * ones are.
604 */
605 static int
cifs_show_options(struct seq_file * s,struct dentry * root)606 cifs_show_options(struct seq_file *s, struct dentry *root)
607 {
608 struct cifs_sb_info *cifs_sb = CIFS_SB(root->d_sb);
609 struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
610 struct sockaddr *srcaddr;
611 unsigned int sbflags;
612
613 srcaddr = (struct sockaddr *)&tcon->ses->server->srcaddr;
614
615 seq_show_option(s, "vers", tcon->ses->server->vals->version_string);
616 cifs_show_security(s, tcon->ses);
617 cifs_show_cache_flavor(s, cifs_sb);
618 cifs_show_upcall_target(s, cifs_sb);
619
620 if (tcon->no_lease)
621 seq_puts(s, ",nolease");
622 if (cifs_sb->ctx->multiuser)
623 seq_puts(s, ",multiuser");
624 else if (tcon->ses->user_name)
625 seq_show_option(s, "username", tcon->ses->user_name);
626
627 if (tcon->ses->domainName && tcon->ses->domainName[0] != 0)
628 seq_show_option(s, "domain", tcon->ses->domainName);
629
630 if (srcaddr->sa_family != AF_UNSPEC) {
631 struct sockaddr_in *saddr4;
632 struct sockaddr_in6 *saddr6;
633 saddr4 = (struct sockaddr_in *)srcaddr;
634 saddr6 = (struct sockaddr_in6 *)srcaddr;
635 if (srcaddr->sa_family == AF_INET6)
636 seq_printf(s, ",srcaddr=%pI6c",
637 &saddr6->sin6_addr);
638 else if (srcaddr->sa_family == AF_INET)
639 seq_printf(s, ",srcaddr=%pI4",
640 &saddr4->sin_addr.s_addr);
641 else
642 seq_printf(s, ",srcaddr=BAD-AF:%i",
643 (int)(srcaddr->sa_family));
644 }
645
646 sbflags = cifs_sb_flags(cifs_sb);
647 seq_printf(s, ",uid=%u",
648 from_kuid_munged(&init_user_ns, cifs_sb->ctx->linux_uid));
649 if (sbflags & CIFS_MOUNT_OVERR_UID)
650 seq_puts(s, ",forceuid");
651 else
652 seq_puts(s, ",noforceuid");
653
654 seq_printf(s, ",gid=%u",
655 from_kgid_munged(&init_user_ns, cifs_sb->ctx->linux_gid));
656 if (sbflags & CIFS_MOUNT_OVERR_GID)
657 seq_puts(s, ",forcegid");
658 else
659 seq_puts(s, ",noforcegid");
660
661 cifs_show_address(s, tcon->ses->server);
662
663 if (!tcon->unix_ext)
664 seq_printf(s, ",file_mode=0%ho,dir_mode=0%ho",
665 cifs_sb->ctx->file_mode,
666 cifs_sb->ctx->dir_mode);
667 if (cifs_sb->ctx->iocharset)
668 seq_printf(s, ",iocharset=%s", cifs_sb->ctx->iocharset);
669 if (tcon->ses->unicode == 0)
670 seq_puts(s, ",nounicode");
671 else if (tcon->ses->unicode == 1)
672 seq_puts(s, ",unicode");
673 if (tcon->seal)
674 seq_puts(s, ",seal");
675 else if (tcon->ses->server->ignore_signature)
676 seq_puts(s, ",signloosely");
677 if (tcon->nocase)
678 seq_puts(s, ",nocase");
679 if (tcon->nodelete)
680 seq_puts(s, ",nodelete");
681 if (cifs_sb->ctx->no_sparse)
682 seq_puts(s, ",nosparse");
683 if (tcon->local_lease)
684 seq_puts(s, ",locallease");
685 if (tcon->retry)
686 seq_puts(s, ",hard");
687 else
688 seq_puts(s, ",soft");
689 if (tcon->use_persistent)
690 seq_puts(s, ",persistenthandles");
691 else if (tcon->use_resilient)
692 seq_puts(s, ",resilienthandles");
693 if (tcon->posix_extensions)
694 seq_puts(s, ",posix");
695 else if (tcon->unix_ext)
696 seq_puts(s, ",unix");
697 else
698 seq_puts(s, ",nounix");
699 if (sbflags & CIFS_MOUNT_NO_DFS)
700 seq_puts(s, ",nodfs");
701 if (sbflags & CIFS_MOUNT_POSIX_PATHS)
702 seq_puts(s, ",posixpaths");
703 if (sbflags & CIFS_MOUNT_SET_UID)
704 seq_puts(s, ",setuids");
705 if (sbflags & CIFS_MOUNT_UID_FROM_ACL)
706 seq_puts(s, ",idsfromsid");
707 if (sbflags & CIFS_MOUNT_SERVER_INUM)
708 seq_puts(s, ",serverino");
709 if (sbflags & CIFS_MOUNT_RWPIDFORWARD)
710 seq_puts(s, ",rwpidforward");
711 if (sbflags & CIFS_MOUNT_NOPOSIXBRL)
712 seq_puts(s, ",forcemand");
713 if (sbflags & CIFS_MOUNT_NO_XATTR)
714 seq_puts(s, ",nouser_xattr");
715 if (sbflags & CIFS_MOUNT_MAP_SPECIAL_CHR)
716 seq_puts(s, ",mapchars");
717 if (sbflags & CIFS_MOUNT_MAP_SFM_CHR)
718 seq_puts(s, ",mapposix");
719 if (sbflags & CIFS_MOUNT_UNX_EMUL)
720 seq_puts(s, ",sfu");
721 if (sbflags & CIFS_MOUNT_NO_BRL)
722 seq_puts(s, ",nobrl");
723 if (sbflags & CIFS_MOUNT_NO_HANDLE_CACHE)
724 seq_puts(s, ",nohandlecache");
725 if (sbflags & CIFS_MOUNT_MODE_FROM_SID)
726 seq_puts(s, ",modefromsid");
727 if (sbflags & CIFS_MOUNT_CIFS_ACL)
728 seq_puts(s, ",cifsacl");
729 if (sbflags & CIFS_MOUNT_DYNPERM)
730 seq_puts(s, ",dynperm");
731 if (root->d_sb->s_flags & SB_POSIXACL)
732 seq_puts(s, ",acl");
733 if (sbflags & CIFS_MOUNT_MF_SYMLINKS)
734 seq_puts(s, ",mfsymlinks");
735 if (sbflags & CIFS_MOUNT_FSCACHE)
736 seq_puts(s, ",fsc");
737 if (sbflags & CIFS_MOUNT_NOSSYNC)
738 seq_puts(s, ",nostrictsync");
739 if (sbflags & CIFS_MOUNT_NO_PERM)
740 seq_puts(s, ",noperm");
741 if (sbflags & CIFS_MOUNT_CIFS_BACKUPUID)
742 seq_printf(s, ",backupuid=%u",
743 from_kuid_munged(&init_user_ns,
744 cifs_sb->ctx->backupuid));
745 if (sbflags & CIFS_MOUNT_CIFS_BACKUPGID)
746 seq_printf(s, ",backupgid=%u",
747 from_kgid_munged(&init_user_ns,
748 cifs_sb->ctx->backupgid));
749 seq_show_option(s, "reparse",
750 cifs_reparse_type_str(cifs_sb->ctx->reparse_type));
751 if (cifs_sb->ctx->nonativesocket)
752 seq_puts(s, ",nonativesocket");
753 else
754 seq_puts(s, ",nativesocket");
755 seq_show_option(s, "symlink",
756 cifs_symlink_type_str(cifs_symlink_type(cifs_sb)));
757
758 seq_printf(s, ",rsize=%u", cifs_sb->ctx->rsize);
759 seq_printf(s, ",wsize=%u", cifs_sb->ctx->wsize);
760 seq_printf(s, ",bsize=%u", cifs_sb->ctx->bsize);
761 if (cifs_sb->ctx->rasize)
762 seq_printf(s, ",rasize=%u", cifs_sb->ctx->rasize);
763 if (tcon->ses->server->min_offload)
764 seq_printf(s, ",esize=%u", tcon->ses->server->min_offload);
765 if (tcon->ses->server->retrans)
766 seq_printf(s, ",retrans=%u", tcon->ses->server->retrans);
767 seq_printf(s, ",echo_interval=%lu",
768 tcon->ses->server->echo_interval / HZ);
769
770 /* Only display the following if overridden on mount */
771 if (tcon->ses->server->max_credits != SMB2_MAX_CREDITS_AVAILABLE)
772 seq_printf(s, ",max_credits=%u", tcon->ses->server->max_credits);
773 if (tcon->ses->server->tcp_nodelay)
774 seq_puts(s, ",tcpnodelay");
775 if (tcon->ses->server->noautotune)
776 seq_puts(s, ",noautotune");
777 if (tcon->ses->server->noblocksnd)
778 seq_puts(s, ",noblocksend");
779 if (tcon->ses->server->nosharesock)
780 seq_puts(s, ",nosharesock");
781
782 if (tcon->snapshot_time)
783 seq_printf(s, ",snapshot=%llu", tcon->snapshot_time);
784 if (tcon->handle_timeout)
785 seq_printf(s, ",handletimeout=%u", tcon->handle_timeout);
786 if (tcon->max_cached_dirs != MAX_CACHED_FIDS)
787 seq_printf(s, ",max_cached_dirs=%u", tcon->max_cached_dirs);
788
789 /*
790 * Display file and directory attribute timeout in seconds.
791 * If file and directory attribute timeout the same then actimeo
792 * was likely specified on mount
793 */
794 if (cifs_sb->ctx->acdirmax == cifs_sb->ctx->acregmax)
795 seq_printf(s, ",actimeo=%lu", cifs_sb->ctx->acregmax / HZ);
796 else {
797 seq_printf(s, ",acdirmax=%lu", cifs_sb->ctx->acdirmax / HZ);
798 seq_printf(s, ",acregmax=%lu", cifs_sb->ctx->acregmax / HZ);
799 }
800 seq_printf(s, ",closetimeo=%lu", cifs_sb->ctx->closetimeo / HZ);
801
802 if (tcon->ses->chan_max > 1)
803 seq_printf(s, ",multichannel,max_channels=%zu",
804 tcon->ses->chan_max);
805
806 if (tcon->use_witness)
807 seq_puts(s, ",witness");
808
809 return 0;
810 }
811
cifs_umount_begin(struct super_block * sb)812 static void cifs_umount_begin(struct super_block *sb)
813 {
814 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
815 struct cifs_tcon *tcon;
816
817 if (cifs_sb == NULL)
818 return;
819
820 tcon = cifs_sb_master_tcon(cifs_sb);
821
822 spin_lock(&cifs_tcp_ses_lock);
823 spin_lock(&tcon->tc_lock);
824 trace_smb3_tcon_ref(tcon->debug_id, tcon->tc_count,
825 netfs_trace_tcon_ref_see_umount);
826 if ((tcon->tc_count > 1) || (tcon->status == TID_EXITING)) {
827 /* we have other mounts to same share or we have
828 already tried to umount this and woken up
829 all waiting network requests, nothing to do */
830 spin_unlock(&tcon->tc_lock);
831 spin_unlock(&cifs_tcp_ses_lock);
832 return;
833 }
834 /*
835 * can not set tcon->status to TID_EXITING yet since we don't know if umount -f will
836 * fail later (e.g. due to open files). TID_EXITING will be set just before tdis req sent
837 */
838 spin_unlock(&tcon->tc_lock);
839 spin_unlock(&cifs_tcp_ses_lock);
840
841 /* cancel_brl_requests(tcon); */ /* BB mark all brl mids as exiting */
842 /* cancel_notify_requests(tcon); */
843 if (tcon->ses && tcon->ses->server) {
844 cifs_dbg(FYI, "wake up tasks now - umount begin not complete\n");
845 wake_up_all(&tcon->ses->server->request_q);
846 wake_up_all(&tcon->ses->server->response_q);
847 msleep(1); /* yield */
848 /* we have to kick the requests once more */
849 wake_up_all(&tcon->ses->server->response_q);
850 msleep(1);
851 }
852
853 return;
854 }
855
cifs_freeze(struct super_block * sb)856 static int cifs_freeze(struct super_block *sb)
857 {
858 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
859 struct cifs_tcon *tcon;
860
861 if (cifs_sb == NULL)
862 return 0;
863
864 tcon = cifs_sb_master_tcon(cifs_sb);
865
866 cifs_close_all_deferred_files(tcon);
867 return 0;
868 }
869
870 #ifdef CONFIG_CIFS_STATS2
cifs_show_stats(struct seq_file * s,struct dentry * root)871 static int cifs_show_stats(struct seq_file *s, struct dentry *root)
872 {
873 /* BB FIXME */
874 return 0;
875 }
876 #endif
877
cifs_write_inode(struct inode * inode,struct writeback_control * wbc)878 static int cifs_write_inode(struct inode *inode, struct writeback_control *wbc)
879 {
880 return netfs_unpin_writeback(inode, wbc);
881 }
882
cifs_drop_inode(struct inode * inode)883 static int cifs_drop_inode(struct inode *inode)
884 {
885 unsigned int sbflags = cifs_sb_flags(CIFS_SB(inode));
886
887 /* no serverino => unconditional eviction */
888 return !(sbflags & CIFS_MOUNT_SERVER_INUM) ||
889 inode_generic_drop(inode);
890 }
891
892 static const struct super_operations cifs_super_ops = {
893 .statfs = cifs_statfs,
894 .alloc_inode = cifs_alloc_inode,
895 .write_inode = cifs_write_inode,
896 .free_inode = cifs_free_inode,
897 .drop_inode = cifs_drop_inode,
898 .evict_inode = cifs_evict_inode,
899 /* .show_path = cifs_show_path, */ /* Would we ever need show path? */
900 .show_devname = cifs_show_devname,
901 /* .delete_inode = cifs_delete_inode, */ /* Do not need above
902 function unless later we add lazy close of inodes or unless the
903 kernel forgets to call us with the same number of releases (closes)
904 as opens */
905 .show_options = cifs_show_options,
906 .umount_begin = cifs_umount_begin,
907 .freeze_fs = cifs_freeze,
908 #ifdef CONFIG_CIFS_STATS2
909 .show_stats = cifs_show_stats,
910 #endif
911 };
912
913 /*
914 * Get root dentry from superblock according to prefix path mount option.
915 * Return dentry with refcount + 1 on success and NULL otherwise.
916 */
917 static struct dentry *
cifs_get_root(struct smb3_fs_context * ctx,struct super_block * sb)918 cifs_get_root(struct smb3_fs_context *ctx, struct super_block *sb)
919 {
920 struct dentry *dentry;
921 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
922 char *full_path = NULL;
923 char *s, *p;
924 char sep;
925
926 if (cifs_sb_flags(cifs_sb) & CIFS_MOUNT_USE_PREFIX_PATH)
927 return dget(sb->s_root);
928
929 full_path = cifs_build_path_to_root(ctx, cifs_sb,
930 cifs_sb_master_tcon(cifs_sb), 0);
931 if (full_path == NULL)
932 return ERR_PTR(-ENOMEM);
933
934 cifs_dbg(FYI, "Get root dentry for %s\n", full_path);
935
936 sep = CIFS_DIR_SEP(cifs_sb);
937 dentry = dget(sb->s_root);
938 s = full_path;
939
940 do {
941 struct inode *dir = d_inode(dentry);
942 struct dentry *child;
943
944 if (!S_ISDIR(dir->i_mode)) {
945 dput(dentry);
946 dentry = ERR_PTR(-ENOTDIR);
947 break;
948 }
949
950 /* skip separators */
951 while (*s == sep)
952 s++;
953 if (!*s)
954 break;
955 p = s++;
956 /* next separator */
957 while (*s && *s != sep)
958 s++;
959
960 child = lookup_noperm_positive_unlocked(&QSTR_LEN(p, s - p),
961 dentry);
962 dput(dentry);
963 dentry = child;
964 } while (!IS_ERR(dentry));
965 kfree(full_path);
966 return dentry;
967 }
968
cifs_set_super(struct super_block * sb,void * data)969 static int cifs_set_super(struct super_block *sb, void *data)
970 {
971 struct cifs_mnt_data *mnt_data = data;
972 sb->s_fs_info = mnt_data->cifs_sb;
973 return set_anon_super(sb, NULL);
974 }
975
976 struct dentry *
cifs_smb3_do_mount(struct file_system_type * fs_type,int flags,struct smb3_fs_context * old_ctx)977 cifs_smb3_do_mount(struct file_system_type *fs_type,
978 int flags, struct smb3_fs_context *old_ctx)
979 {
980 struct cifs_mnt_data mnt_data;
981 struct cifs_sb_info *cifs_sb;
982 struct super_block *sb;
983 struct dentry *root;
984 int rc;
985
986 if (cifsFYI) {
987 cifs_dbg(FYI, "%s: devname=%s flags=0x%x\n", __func__,
988 old_ctx->source, flags);
989 } else {
990 cifs_info("Attempting to mount %s\n", old_ctx->source);
991 }
992 cifs_sb = kzalloc_obj(*cifs_sb);
993 if (!cifs_sb)
994 return ERR_PTR(-ENOMEM);
995
996 cifs_sb->ctx = kzalloc_obj(struct smb3_fs_context);
997 if (!cifs_sb->ctx) {
998 root = ERR_PTR(-ENOMEM);
999 goto out;
1000 }
1001 rc = smb3_fs_context_dup(cifs_sb->ctx, old_ctx);
1002 if (rc) {
1003 root = ERR_PTR(rc);
1004 goto out;
1005 }
1006
1007 rc = cifs_setup_cifs_sb(cifs_sb);
1008 if (rc) {
1009 root = ERR_PTR(rc);
1010 goto out;
1011 }
1012
1013 rc = cifs_mount(cifs_sb, cifs_sb->ctx);
1014 if (rc) {
1015 if (!(flags & SB_SILENT))
1016 cifs_dbg(VFS, "cifs_mount failed w/return code = %d\n",
1017 rc);
1018 root = ERR_PTR(rc);
1019 goto out;
1020 }
1021
1022 mnt_data.ctx = cifs_sb->ctx;
1023 mnt_data.cifs_sb = cifs_sb;
1024 mnt_data.flags = flags;
1025
1026 /* BB should we make this contingent on mount parm? */
1027 flags |= SB_NODIRATIME | SB_NOATIME;
1028
1029 sb = sget(fs_type, cifs_match_super, cifs_set_super, flags, &mnt_data);
1030 if (IS_ERR(sb)) {
1031 cifs_umount(cifs_sb);
1032 return ERR_CAST(sb);
1033 }
1034
1035 if (sb->s_root) {
1036 cifs_dbg(FYI, "Use existing superblock\n");
1037 cifs_umount(cifs_sb);
1038 cifs_sb = NULL;
1039 } else {
1040 rc = cifs_read_super(sb);
1041 if (rc) {
1042 root = ERR_PTR(rc);
1043 goto out_super;
1044 }
1045
1046 sb->s_flags |= SB_ACTIVE;
1047 }
1048
1049 root = cifs_get_root(cifs_sb ? cifs_sb->ctx : old_ctx, sb);
1050 if (IS_ERR(root))
1051 goto out_super;
1052
1053 if (cifs_sb)
1054 cifs_sb->root = dget(root);
1055
1056 cifs_dbg(FYI, "dentry root is: %p\n", root);
1057 return root;
1058
1059 out_super:
1060 deactivate_locked_super(sb);
1061 return root;
1062 out:
1063 kfree(cifs_sb->prepath);
1064 smb3_cleanup_fs_context(cifs_sb->ctx);
1065 kfree(cifs_sb);
1066 return root;
1067 }
1068
cifs_llseek(struct file * file,loff_t offset,int whence)1069 static loff_t cifs_llseek(struct file *file, loff_t offset, int whence)
1070 {
1071 struct cifsFileInfo *cfile = file->private_data;
1072 struct cifs_tcon *tcon;
1073
1074 /*
1075 * whence == SEEK_END || SEEK_DATA || SEEK_HOLE => we must revalidate
1076 * the cached file length
1077 */
1078 if (whence != SEEK_SET && whence != SEEK_CUR) {
1079 int rc;
1080 struct inode *inode = file_inode(file);
1081
1082 /*
1083 * We need to be sure that all dirty pages are written and the
1084 * server has the newest file length.
1085 */
1086 if (!CIFS_CACHE_READ(CIFS_I(inode)) && inode->i_mapping &&
1087 inode->i_mapping->nrpages != 0) {
1088 rc = filemap_fdatawait(inode->i_mapping);
1089 if (rc) {
1090 mapping_set_error(inode->i_mapping, rc);
1091 return rc;
1092 }
1093 }
1094 /*
1095 * Some applications poll for the file length in this strange
1096 * way so we must seek to end on non-oplocked files by
1097 * setting the revalidate time to zero.
1098 */
1099 CIFS_I(inode)->time = 0;
1100
1101 rc = cifs_revalidate_file_attr(file);
1102 if (rc < 0)
1103 return (loff_t)rc;
1104 }
1105 if (cfile && cfile->tlink) {
1106 tcon = tlink_tcon(cfile->tlink);
1107 if (tcon->ses->server->ops->llseek)
1108 return tcon->ses->server->ops->llseek(file, tcon,
1109 offset, whence);
1110 }
1111 return generic_file_llseek(file, offset, whence);
1112 }
1113
1114 static int
cifs_setlease(struct file * file,int arg,struct file_lease ** lease,void ** priv)1115 cifs_setlease(struct file *file, int arg, struct file_lease **lease, void **priv)
1116 {
1117 /*
1118 * Note that this is called by vfs setlease with i_lock held to
1119 * protect *lease from going away.
1120 */
1121 struct inode *inode = file_inode(file);
1122 struct cifsFileInfo *cfile = file->private_data;
1123
1124 /* Check if file is oplocked if this is request for new lease */
1125 if (arg == F_UNLCK ||
1126 ((arg == F_RDLCK) && CIFS_CACHE_READ(CIFS_I(inode))) ||
1127 ((arg == F_WRLCK) && CIFS_CACHE_WRITE(CIFS_I(inode))))
1128 return generic_setlease(file, arg, lease, priv);
1129 else if (tlink_tcon(cfile->tlink)->local_lease &&
1130 !CIFS_CACHE_READ(CIFS_I(inode)))
1131 /*
1132 * If the server claims to support oplock on this file, then we
1133 * still need to check oplock even if the local_lease mount
1134 * option is set, but there are servers which do not support
1135 * oplock for which this mount option may be useful if the user
1136 * knows that the file won't be changed on the server by anyone
1137 * else.
1138 */
1139 return generic_setlease(file, arg, lease, priv);
1140 else
1141 return -EAGAIN;
1142 }
1143
1144 struct file_system_type cifs_fs_type = {
1145 .owner = THIS_MODULE,
1146 .name = "cifs",
1147 .init_fs_context = smb3_init_fs_context,
1148 .parameters = smb3_fs_parameters,
1149 .kill_sb = cifs_kill_sb,
1150 .fs_flags = FS_RENAME_DOES_D_MOVE,
1151 };
1152 MODULE_ALIAS_FS("cifs");
1153
1154 struct file_system_type smb3_fs_type = {
1155 .owner = THIS_MODULE,
1156 .name = "smb3",
1157 .init_fs_context = smb3_init_fs_context,
1158 .parameters = smb3_fs_parameters,
1159 .kill_sb = cifs_kill_sb,
1160 .fs_flags = FS_RENAME_DOES_D_MOVE,
1161 };
1162 MODULE_ALIAS_FS("smb3");
1163 MODULE_ALIAS("smb3");
1164
1165 const struct inode_operations cifs_dir_inode_ops = {
1166 .create = cifs_create,
1167 .atomic_open = cifs_atomic_open,
1168 .tmpfile = cifs_tmpfile,
1169 .lookup = cifs_lookup,
1170 .getattr = cifs_getattr,
1171 .unlink = cifs_unlink,
1172 .link = cifs_hardlink,
1173 .mkdir = cifs_mkdir,
1174 .rmdir = cifs_rmdir,
1175 .rename = cifs_rename2,
1176 .permission = cifs_permission,
1177 .setattr = cifs_setattr,
1178 .symlink = cifs_symlink,
1179 .mknod = cifs_mknod,
1180 .listxattr = cifs_listxattr,
1181 .get_acl = cifs_get_acl,
1182 .set_acl = cifs_set_acl,
1183 };
1184
1185 const struct inode_operations cifs_file_inode_ops = {
1186 .setattr = cifs_setattr,
1187 .getattr = cifs_getattr,
1188 .permission = cifs_permission,
1189 .listxattr = cifs_listxattr,
1190 .fiemap = cifs_fiemap,
1191 .get_acl = cifs_get_acl,
1192 .set_acl = cifs_set_acl,
1193 };
1194
cifs_get_link(struct dentry * dentry,struct inode * inode,struct delayed_call * done)1195 const char *cifs_get_link(struct dentry *dentry, struct inode *inode,
1196 struct delayed_call *done)
1197 {
1198 char *target_path;
1199
1200 if (!dentry)
1201 return ERR_PTR(-ECHILD);
1202
1203 target_path = kmalloc(PATH_MAX, GFP_KERNEL);
1204 if (!target_path)
1205 return ERR_PTR(-ENOMEM);
1206
1207 spin_lock(&inode->i_lock);
1208 if (likely(CIFS_I(inode)->symlink_target)) {
1209 strscpy(target_path, CIFS_I(inode)->symlink_target, PATH_MAX);
1210 } else {
1211 kfree(target_path);
1212 target_path = ERR_PTR(-EOPNOTSUPP);
1213 }
1214 spin_unlock(&inode->i_lock);
1215
1216 if (!IS_ERR(target_path))
1217 set_delayed_call(done, kfree_link, target_path);
1218
1219 return target_path;
1220 }
1221
1222 const struct inode_operations cifs_symlink_inode_ops = {
1223 .get_link = cifs_get_link,
1224 .setattr = cifs_setattr,
1225 .permission = cifs_permission,
1226 .listxattr = cifs_listxattr,
1227 };
1228
1229 /*
1230 * Advance the EOF marker to after the source range.
1231 */
cifs_precopy_set_eof(struct inode * src_inode,struct cifsInodeInfo * src_cifsi,struct cifs_tcon * src_tcon,unsigned int xid,loff_t src_end)1232 static int cifs_precopy_set_eof(struct inode *src_inode, struct cifsInodeInfo *src_cifsi,
1233 struct cifs_tcon *src_tcon,
1234 unsigned int xid, loff_t src_end)
1235 {
1236 struct cifsFileInfo *writeable_srcfile;
1237 int rc = -EINVAL;
1238
1239 writeable_srcfile = find_writable_file(src_cifsi, FIND_FSUID_ONLY);
1240 if (writeable_srcfile) {
1241 if (src_tcon->ses->server->ops->set_file_size)
1242 rc = src_tcon->ses->server->ops->set_file_size(
1243 xid, src_tcon, writeable_srcfile,
1244 src_inode->i_size, true /* no need to set sparse */);
1245 else
1246 rc = -ENOSYS;
1247 cifsFileInfo_put(writeable_srcfile);
1248 cifs_dbg(FYI, "SetFSize for copychunk rc = %d\n", rc);
1249 }
1250
1251 if (rc < 0)
1252 goto set_failed;
1253
1254 netfs_resize_file(&src_cifsi->netfs, src_end, true);
1255 fscache_resize_cookie(cifs_inode_cookie(src_inode), src_end);
1256 return 0;
1257
1258 set_failed:
1259 return filemap_write_and_wait(src_inode->i_mapping);
1260 }
1261
1262 /*
1263 * Flush out either the folio that overlaps the beginning of a range in which
1264 * pos resides or the folio that overlaps the end of a range unless that folio
1265 * is entirely within the range we're going to invalidate. We extend the flush
1266 * bounds to encompass the folio.
1267 */
cifs_flush_folio(struct inode * inode,loff_t pos,loff_t * _fstart,loff_t * _fend,bool first)1268 static int cifs_flush_folio(struct inode *inode, loff_t pos, loff_t *_fstart, loff_t *_fend,
1269 bool first)
1270 {
1271 struct folio *folio;
1272 unsigned long long fpos, fend;
1273 pgoff_t index = pos / PAGE_SIZE;
1274 size_t size;
1275 int rc = 0;
1276
1277 folio = filemap_get_folio(inode->i_mapping, index);
1278 if (IS_ERR(folio))
1279 return 0;
1280
1281 size = folio_size(folio);
1282 fpos = folio_pos(folio);
1283 fend = fpos + size - 1;
1284 *_fstart = min_t(unsigned long long, *_fstart, fpos);
1285 *_fend = max_t(unsigned long long, *_fend, fend);
1286 if ((first && pos == fpos) || (!first && pos == fend))
1287 goto out;
1288
1289 rc = filemap_write_and_wait_range(inode->i_mapping, fpos, fend);
1290 out:
1291 folio_put(folio);
1292 return rc;
1293 }
1294
cifs_remap_file_range(struct file * src_file,loff_t off,struct file * dst_file,loff_t destoff,loff_t len,unsigned int remap_flags)1295 static loff_t cifs_remap_file_range(struct file *src_file, loff_t off,
1296 struct file *dst_file, loff_t destoff, loff_t len,
1297 unsigned int remap_flags)
1298 {
1299 struct inode *src_inode = file_inode(src_file);
1300 struct inode *target_inode = file_inode(dst_file);
1301 struct cifsInodeInfo *src_cifsi = CIFS_I(src_inode);
1302 struct cifsInodeInfo *target_cifsi = CIFS_I(target_inode);
1303 struct cifsFileInfo *smb_file_src = src_file->private_data;
1304 struct cifsFileInfo *smb_file_target = dst_file->private_data;
1305 struct cifs_tcon *target_tcon, *src_tcon;
1306 unsigned long long destend, fstart, fend, old_size, new_size;
1307 unsigned int xid;
1308 int rc;
1309
1310 if (remap_flags & REMAP_FILE_DEDUP)
1311 return -EOPNOTSUPP;
1312 if (remap_flags & ~REMAP_FILE_ADVISORY)
1313 return -EINVAL;
1314
1315 cifs_dbg(FYI, "clone range\n");
1316
1317 xid = get_xid();
1318
1319 if (!smb_file_src || !smb_file_target) {
1320 rc = -EBADF;
1321 cifs_dbg(VFS, "missing cifsFileInfo on copy range src file\n");
1322 goto out;
1323 }
1324
1325 src_tcon = tlink_tcon(smb_file_src->tlink);
1326 target_tcon = tlink_tcon(smb_file_target->tlink);
1327
1328 /*
1329 * Note: cifs case is easier than btrfs since server responsible for
1330 * checks for proper open modes and file type and if it wants
1331 * server could even support copy of range where source = target
1332 */
1333 lock_two_nondirectories(target_inode, src_inode);
1334
1335 if (len == 0)
1336 len = src_inode->i_size - off;
1337
1338 cifs_dbg(FYI, "clone range\n");
1339
1340 /* Flush the source buffer */
1341 rc = filemap_write_and_wait_range(src_inode->i_mapping, off,
1342 off + len - 1);
1343 if (rc)
1344 goto unlock;
1345
1346 /* The server-side copy will fail if the source crosses the EOF marker.
1347 * Advance the EOF marker after the flush above to the end of the range
1348 * if it's short of that.
1349 */
1350 if (src_cifsi->netfs.remote_i_size < off + len) {
1351 rc = cifs_precopy_set_eof(src_inode, src_cifsi, src_tcon, xid, off + len);
1352 if (rc < 0)
1353 goto unlock;
1354 }
1355
1356 new_size = destoff + len;
1357 destend = destoff + len - 1;
1358
1359 /* Flush the folios at either end of the destination range to prevent
1360 * accidental loss of dirty data outside of the range.
1361 */
1362 fstart = destoff;
1363 fend = destend;
1364
1365 rc = cifs_flush_folio(target_inode, destoff, &fstart, &fend, true);
1366 if (rc)
1367 goto unlock;
1368 rc = cifs_flush_folio(target_inode, destend, &fstart, &fend, false);
1369 if (rc)
1370 goto unlock;
1371 if (fend > target_cifsi->netfs.zero_point)
1372 target_cifsi->netfs.zero_point = fend + 1;
1373 old_size = target_cifsi->netfs.remote_i_size;
1374
1375 /* Discard all the folios that overlap the destination region. */
1376 cifs_dbg(FYI, "about to discard pages %llx-%llx\n", fstart, fend);
1377 truncate_inode_pages_range(&target_inode->i_data, fstart, fend);
1378
1379 fscache_invalidate(cifs_inode_cookie(target_inode), NULL,
1380 i_size_read(target_inode), 0);
1381
1382 rc = -EOPNOTSUPP;
1383 if (target_tcon->ses->server->ops->duplicate_extents) {
1384 rc = target_tcon->ses->server->ops->duplicate_extents(xid,
1385 smb_file_src, smb_file_target, off, len, destoff);
1386 if (rc == 0 && new_size > old_size) {
1387 truncate_setsize(target_inode, new_size);
1388 fscache_resize_cookie(cifs_inode_cookie(target_inode),
1389 new_size);
1390 } else if (rc == -EOPNOTSUPP) {
1391 /*
1392 * copy_file_range syscall man page indicates EINVAL
1393 * is returned e.g when "fd_in and fd_out refer to the
1394 * same file and the source and target ranges overlap."
1395 * Test generic/157 was what showed these cases where
1396 * we need to remap EOPNOTSUPP to EINVAL
1397 */
1398 if (off >= src_inode->i_size) {
1399 rc = -EINVAL;
1400 } else if (src_inode == target_inode) {
1401 if (off + len > destoff)
1402 rc = -EINVAL;
1403 }
1404 }
1405 if (rc == 0 && new_size > target_cifsi->netfs.zero_point)
1406 target_cifsi->netfs.zero_point = new_size;
1407 }
1408
1409 /* force revalidate of size and timestamps of target file now
1410 that target is updated on the server */
1411 CIFS_I(target_inode)->time = 0;
1412 unlock:
1413 /* although unlocking in the reverse order from locking is not
1414 strictly necessary here it is a little cleaner to be consistent */
1415 unlock_two_nondirectories(src_inode, target_inode);
1416 out:
1417 free_xid(xid);
1418 return rc < 0 ? rc : len;
1419 }
1420
cifs_file_copychunk_range(unsigned int xid,struct file * src_file,loff_t off,struct file * dst_file,loff_t destoff,size_t len,unsigned int flags)1421 ssize_t cifs_file_copychunk_range(unsigned int xid,
1422 struct file *src_file, loff_t off,
1423 struct file *dst_file, loff_t destoff,
1424 size_t len, unsigned int flags)
1425 {
1426 struct inode *src_inode = file_inode(src_file);
1427 struct inode *target_inode = file_inode(dst_file);
1428 struct cifsInodeInfo *src_cifsi = CIFS_I(src_inode);
1429 struct cifsInodeInfo *target_cifsi = CIFS_I(target_inode);
1430 struct cifsFileInfo *smb_file_src;
1431 struct cifsFileInfo *smb_file_target;
1432 struct cifs_tcon *src_tcon;
1433 struct cifs_tcon *target_tcon;
1434 ssize_t rc;
1435
1436 cifs_dbg(FYI, "copychunk range\n");
1437
1438 if (!src_file->private_data || !dst_file->private_data) {
1439 rc = -EBADF;
1440 cifs_dbg(VFS, "missing cifsFileInfo on copy range src file\n");
1441 goto out;
1442 }
1443
1444 rc = -EXDEV;
1445 smb_file_target = dst_file->private_data;
1446 smb_file_src = src_file->private_data;
1447 src_tcon = tlink_tcon(smb_file_src->tlink);
1448 target_tcon = tlink_tcon(smb_file_target->tlink);
1449
1450 if (src_tcon->ses != target_tcon->ses) {
1451 cifs_dbg(FYI, "source and target of copy not on same server\n");
1452 goto out;
1453 }
1454
1455 rc = -EOPNOTSUPP;
1456 if (!target_tcon->ses->server->ops->copychunk_range)
1457 goto out;
1458
1459 /*
1460 * Note: cifs case is easier than btrfs since server responsible for
1461 * checks for proper open modes and file type and if it wants
1462 * server could even support copy of range where source = target
1463 */
1464 lock_two_nondirectories(target_inode, src_inode);
1465
1466 cifs_dbg(FYI, "about to flush pages\n");
1467
1468 rc = filemap_write_and_wait_range(src_inode->i_mapping, off,
1469 off + len - 1);
1470 if (rc)
1471 goto unlock;
1472
1473 /* The server-side copy will fail if the source crosses the EOF marker.
1474 * Advance the EOF marker after the flush above to the end of the range
1475 * if it's short of that.
1476 */
1477 if (src_cifsi->netfs.remote_i_size < off + len) {
1478 rc = cifs_precopy_set_eof(src_inode, src_cifsi, src_tcon, xid, off + len);
1479 if (rc < 0)
1480 goto unlock;
1481 }
1482
1483 /* Flush and invalidate all the folios in the destination region. If
1484 * the copy was successful, then some of the flush is extra overhead,
1485 * but we need to allow for the copy failing in some way (eg. ENOSPC).
1486 */
1487 rc = filemap_invalidate_inode(target_inode, true, destoff, destoff + len - 1);
1488 if (rc)
1489 goto unlock;
1490
1491 fscache_invalidate(cifs_inode_cookie(target_inode), NULL,
1492 i_size_read(target_inode), 0);
1493
1494 rc = file_modified(dst_file);
1495 if (!rc) {
1496 rc = target_tcon->ses->server->ops->copychunk_range(xid,
1497 smb_file_src, smb_file_target, off, len, destoff);
1498 if (rc > 0 && destoff + rc > i_size_read(target_inode)) {
1499 truncate_setsize(target_inode, destoff + rc);
1500 netfs_resize_file(&target_cifsi->netfs,
1501 i_size_read(target_inode), true);
1502 fscache_resize_cookie(cifs_inode_cookie(target_inode),
1503 i_size_read(target_inode));
1504 }
1505 if (rc > 0 && destoff + rc > target_cifsi->netfs.zero_point)
1506 target_cifsi->netfs.zero_point = destoff + rc;
1507 }
1508
1509 file_accessed(src_file);
1510
1511 /* force revalidate of size and timestamps of target file now
1512 * that target is updated on the server
1513 */
1514 CIFS_I(target_inode)->time = 0;
1515
1516 unlock:
1517 /* although unlocking in the reverse order from locking is not
1518 * strictly necessary here it is a little cleaner to be consistent
1519 */
1520 unlock_two_nondirectories(src_inode, target_inode);
1521
1522 out:
1523 return rc;
1524 }
1525
1526 /*
1527 * Directory operations under CIFS/SMB2/SMB3 are synchronous, so fsync()
1528 * is a dummy operation.
1529 */
cifs_dir_fsync(struct file * file,loff_t start,loff_t end,int datasync)1530 static int cifs_dir_fsync(struct file *file, loff_t start, loff_t end, int datasync)
1531 {
1532 cifs_dbg(FYI, "Sync directory - name: %pD datasync: 0x%x\n",
1533 file, datasync);
1534
1535 return 0;
1536 }
1537
cifs_copy_file_range(struct file * src_file,loff_t off,struct file * dst_file,loff_t destoff,size_t len,unsigned int flags)1538 static ssize_t cifs_copy_file_range(struct file *src_file, loff_t off,
1539 struct file *dst_file, loff_t destoff,
1540 size_t len, unsigned int flags)
1541 {
1542 unsigned int xid = get_xid();
1543 ssize_t rc;
1544 struct cifsFileInfo *cfile = dst_file->private_data;
1545
1546 if (cfile->swapfile) {
1547 rc = -EOPNOTSUPP;
1548 free_xid(xid);
1549 return rc;
1550 }
1551
1552 rc = cifs_file_copychunk_range(xid, src_file, off, dst_file, destoff,
1553 len, flags);
1554 free_xid(xid);
1555
1556 if (rc == -EOPNOTSUPP || rc == -EXDEV)
1557 rc = splice_copy_file_range(src_file, off, dst_file,
1558 destoff, len);
1559 return rc;
1560 }
1561
1562 const struct file_operations cifs_file_ops = {
1563 .read_iter = cifs_loose_read_iter,
1564 .write_iter = cifs_file_write_iter,
1565 .open = cifs_open,
1566 .release = cifs_close,
1567 .lock = cifs_lock,
1568 .flock = cifs_flock,
1569 .fsync = cifs_fsync,
1570 .flush = cifs_flush,
1571 .mmap_prepare = cifs_file_mmap_prepare,
1572 .splice_read = filemap_splice_read,
1573 .splice_write = iter_file_splice_write,
1574 .llseek = cifs_llseek,
1575 .unlocked_ioctl = cifs_ioctl,
1576 .copy_file_range = cifs_copy_file_range,
1577 .remap_file_range = cifs_remap_file_range,
1578 .setlease = cifs_setlease,
1579 .fallocate = cifs_fallocate,
1580 };
1581
1582 const struct file_operations cifs_file_strict_ops = {
1583 .read_iter = cifs_strict_readv,
1584 .write_iter = cifs_strict_writev,
1585 .open = cifs_open,
1586 .release = cifs_close,
1587 .lock = cifs_lock,
1588 .flock = cifs_flock,
1589 .fsync = cifs_strict_fsync,
1590 .flush = cifs_flush,
1591 .mmap_prepare = cifs_file_strict_mmap_prepare,
1592 .splice_read = filemap_splice_read,
1593 .splice_write = iter_file_splice_write,
1594 .llseek = cifs_llseek,
1595 .unlocked_ioctl = cifs_ioctl,
1596 .copy_file_range = cifs_copy_file_range,
1597 .remap_file_range = cifs_remap_file_range,
1598 .setlease = cifs_setlease,
1599 .fallocate = cifs_fallocate,
1600 };
1601
1602 const struct file_operations cifs_file_direct_ops = {
1603 .read_iter = netfs_unbuffered_read_iter,
1604 .write_iter = netfs_file_write_iter,
1605 .open = cifs_open,
1606 .release = cifs_close,
1607 .lock = cifs_lock,
1608 .flock = cifs_flock,
1609 .fsync = cifs_fsync,
1610 .flush = cifs_flush,
1611 .mmap_prepare = cifs_file_mmap_prepare,
1612 .splice_read = copy_splice_read,
1613 .splice_write = iter_file_splice_write,
1614 .unlocked_ioctl = cifs_ioctl,
1615 .copy_file_range = cifs_copy_file_range,
1616 .remap_file_range = cifs_remap_file_range,
1617 .llseek = cifs_llseek,
1618 .setlease = cifs_setlease,
1619 .fallocate = cifs_fallocate,
1620 };
1621
1622 const struct file_operations cifs_file_nobrl_ops = {
1623 .read_iter = cifs_loose_read_iter,
1624 .write_iter = cifs_file_write_iter,
1625 .open = cifs_open,
1626 .release = cifs_close,
1627 .fsync = cifs_fsync,
1628 .flush = cifs_flush,
1629 .mmap_prepare = cifs_file_mmap_prepare,
1630 .splice_read = filemap_splice_read,
1631 .splice_write = iter_file_splice_write,
1632 .llseek = cifs_llseek,
1633 .unlocked_ioctl = cifs_ioctl,
1634 .copy_file_range = cifs_copy_file_range,
1635 .remap_file_range = cifs_remap_file_range,
1636 .setlease = cifs_setlease,
1637 .fallocate = cifs_fallocate,
1638 };
1639
1640 const struct file_operations cifs_file_strict_nobrl_ops = {
1641 .read_iter = cifs_strict_readv,
1642 .write_iter = cifs_strict_writev,
1643 .open = cifs_open,
1644 .release = cifs_close,
1645 .fsync = cifs_strict_fsync,
1646 .flush = cifs_flush,
1647 .mmap_prepare = cifs_file_strict_mmap_prepare,
1648 .splice_read = filemap_splice_read,
1649 .splice_write = iter_file_splice_write,
1650 .llseek = cifs_llseek,
1651 .unlocked_ioctl = cifs_ioctl,
1652 .copy_file_range = cifs_copy_file_range,
1653 .remap_file_range = cifs_remap_file_range,
1654 .setlease = cifs_setlease,
1655 .fallocate = cifs_fallocate,
1656 };
1657
1658 const struct file_operations cifs_file_direct_nobrl_ops = {
1659 .read_iter = netfs_unbuffered_read_iter,
1660 .write_iter = netfs_file_write_iter,
1661 .open = cifs_open,
1662 .release = cifs_close,
1663 .fsync = cifs_fsync,
1664 .flush = cifs_flush,
1665 .mmap_prepare = cifs_file_mmap_prepare,
1666 .splice_read = copy_splice_read,
1667 .splice_write = iter_file_splice_write,
1668 .unlocked_ioctl = cifs_ioctl,
1669 .copy_file_range = cifs_copy_file_range,
1670 .remap_file_range = cifs_remap_file_range,
1671 .llseek = cifs_llseek,
1672 .setlease = cifs_setlease,
1673 .fallocate = cifs_fallocate,
1674 };
1675
1676 const struct file_operations cifs_dir_ops = {
1677 .iterate_shared = cifs_readdir,
1678 .release = cifs_closedir,
1679 .read = generic_read_dir,
1680 .unlocked_ioctl = cifs_ioctl,
1681 .copy_file_range = cifs_copy_file_range,
1682 .remap_file_range = cifs_remap_file_range,
1683 .llseek = generic_file_llseek,
1684 .fsync = cifs_dir_fsync,
1685 };
1686
1687 static void
cifs_init_once(void * inode)1688 cifs_init_once(void *inode)
1689 {
1690 struct cifsInodeInfo *cifsi = inode;
1691
1692 inode_init_once(&cifsi->netfs.inode);
1693 init_rwsem(&cifsi->lock_sem);
1694 }
1695
1696 static int __init
cifs_init_inodecache(void)1697 cifs_init_inodecache(void)
1698 {
1699 cifs_inode_cachep = kmem_cache_create("cifs_inode_cache",
1700 sizeof(struct cifsInodeInfo),
1701 0, (SLAB_RECLAIM_ACCOUNT|
1702 SLAB_ACCOUNT),
1703 cifs_init_once);
1704 if (cifs_inode_cachep == NULL)
1705 return -ENOMEM;
1706
1707 return 0;
1708 }
1709
1710 static void
cifs_destroy_inodecache(void)1711 cifs_destroy_inodecache(void)
1712 {
1713 /*
1714 * Make sure all delayed rcu free inodes are flushed before we
1715 * destroy cache.
1716 */
1717 rcu_barrier();
1718 kmem_cache_destroy(cifs_inode_cachep);
1719 }
1720
1721 static int
cifs_init_request_bufs(void)1722 cifs_init_request_bufs(void)
1723 {
1724 /*
1725 * SMB2 maximum header size is bigger than CIFS one - no problems to
1726 * allocate some more bytes for CIFS.
1727 */
1728 size_t max_hdr_size = MAX_SMB2_HDR_SIZE;
1729
1730 if (CIFSMaxBufSize < 8192) {
1731 /* Buffer size can not be smaller than 2 * PATH_MAX since maximum
1732 Unicode path name has to fit in any SMB/CIFS path based frames */
1733 CIFSMaxBufSize = 8192;
1734 } else if (CIFSMaxBufSize > 1024*127) {
1735 CIFSMaxBufSize = 1024 * 127;
1736 } else {
1737 CIFSMaxBufSize &= 0x1FE00; /* Round size to even 512 byte mult*/
1738 }
1739 /*
1740 cifs_dbg(VFS, "CIFSMaxBufSize %d 0x%x\n",
1741 CIFSMaxBufSize, CIFSMaxBufSize);
1742 */
1743 cifs_req_cachep = kmem_cache_create_usercopy("cifs_request",
1744 CIFSMaxBufSize + max_hdr_size, 0,
1745 SLAB_HWCACHE_ALIGN, 0,
1746 CIFSMaxBufSize + max_hdr_size,
1747 NULL);
1748 if (cifs_req_cachep == NULL)
1749 return -ENOMEM;
1750
1751 if (cifs_min_rcv < 1)
1752 cifs_min_rcv = 1;
1753 else if (cifs_min_rcv > 64) {
1754 cifs_min_rcv = 64;
1755 cifs_dbg(VFS, "cifs_min_rcv set to maximum (64)\n");
1756 }
1757
1758 cifs_req_poolp = mempool_create_slab_pool(cifs_min_rcv,
1759 cifs_req_cachep);
1760
1761 if (cifs_req_poolp == NULL) {
1762 kmem_cache_destroy(cifs_req_cachep);
1763 return -ENOMEM;
1764 }
1765 /* MAX_CIFS_SMALL_BUFFER_SIZE bytes is enough for most SMB responses and
1766 almost all handle based requests (but not write response, nor is it
1767 sufficient for path based requests). A smaller size would have
1768 been more efficient (compacting multiple slab items on one 4k page)
1769 for the case in which debug was on, but this larger size allows
1770 more SMBs to use small buffer alloc and is still much more
1771 efficient to alloc 1 per page off the slab compared to 17K (5page)
1772 alloc of large cifs buffers even when page debugging is on */
1773 cifs_sm_req_cachep = kmem_cache_create_usercopy("cifs_small_rq",
1774 MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN,
1775 0, MAX_CIFS_SMALL_BUFFER_SIZE, NULL);
1776 if (cifs_sm_req_cachep == NULL) {
1777 mempool_destroy(cifs_req_poolp);
1778 kmem_cache_destroy(cifs_req_cachep);
1779 return -ENOMEM;
1780 }
1781
1782 if (cifs_min_small < 2)
1783 cifs_min_small = 2;
1784 else if (cifs_min_small > 256) {
1785 cifs_min_small = 256;
1786 cifs_dbg(FYI, "cifs_min_small set to maximum (256)\n");
1787 }
1788
1789 cifs_sm_req_poolp = mempool_create_slab_pool(cifs_min_small,
1790 cifs_sm_req_cachep);
1791
1792 if (cifs_sm_req_poolp == NULL) {
1793 mempool_destroy(cifs_req_poolp);
1794 kmem_cache_destroy(cifs_req_cachep);
1795 kmem_cache_destroy(cifs_sm_req_cachep);
1796 return -ENOMEM;
1797 }
1798
1799 return 0;
1800 }
1801
1802 static void
cifs_destroy_request_bufs(void)1803 cifs_destroy_request_bufs(void)
1804 {
1805 mempool_destroy(cifs_req_poolp);
1806 kmem_cache_destroy(cifs_req_cachep);
1807 mempool_destroy(cifs_sm_req_poolp);
1808 kmem_cache_destroy(cifs_sm_req_cachep);
1809 }
1810
init_mids(void)1811 static int init_mids(void)
1812 {
1813 cifs_mid_cachep = kmem_cache_create("cifs_mpx_ids",
1814 sizeof(struct mid_q_entry), 0,
1815 SLAB_HWCACHE_ALIGN, NULL);
1816 if (cifs_mid_cachep == NULL)
1817 return -ENOMEM;
1818
1819 /* 3 is a reasonable minimum number of simultaneous operations */
1820 if (mempool_init_slab_pool(&cifs_mid_pool, 3, cifs_mid_cachep) < 0) {
1821 kmem_cache_destroy(cifs_mid_cachep);
1822 return -ENOMEM;
1823 }
1824
1825 return 0;
1826 }
1827
destroy_mids(void)1828 static void destroy_mids(void)
1829 {
1830 mempool_exit(&cifs_mid_pool);
1831 kmem_cache_destroy(cifs_mid_cachep);
1832 }
1833
cifs_init_netfs(void)1834 static int cifs_init_netfs(void)
1835 {
1836 cifs_io_request_cachep =
1837 kmem_cache_create("cifs_io_request",
1838 sizeof(struct cifs_io_request), 0,
1839 SLAB_HWCACHE_ALIGN, NULL);
1840 if (!cifs_io_request_cachep)
1841 goto nomem_req;
1842
1843 if (mempool_init_slab_pool(&cifs_io_request_pool, 100, cifs_io_request_cachep) < 0)
1844 goto nomem_reqpool;
1845
1846 cifs_io_subrequest_cachep =
1847 kmem_cache_create("cifs_io_subrequest",
1848 sizeof(struct cifs_io_subrequest), 0,
1849 SLAB_HWCACHE_ALIGN, NULL);
1850 if (!cifs_io_subrequest_cachep)
1851 goto nomem_subreq;
1852
1853 if (mempool_init_slab_pool(&cifs_io_subrequest_pool, 100, cifs_io_subrequest_cachep) < 0)
1854 goto nomem_subreqpool;
1855
1856 return 0;
1857
1858 nomem_subreqpool:
1859 kmem_cache_destroy(cifs_io_subrequest_cachep);
1860 nomem_subreq:
1861 mempool_exit(&cifs_io_request_pool);
1862 nomem_reqpool:
1863 kmem_cache_destroy(cifs_io_request_cachep);
1864 nomem_req:
1865 return -ENOMEM;
1866 }
1867
cifs_destroy_netfs(void)1868 static void cifs_destroy_netfs(void)
1869 {
1870 mempool_exit(&cifs_io_subrequest_pool);
1871 kmem_cache_destroy(cifs_io_subrequest_cachep);
1872 mempool_exit(&cifs_io_request_pool);
1873 kmem_cache_destroy(cifs_io_request_cachep);
1874 }
1875
1876 static int __init
init_cifs(void)1877 init_cifs(void)
1878 {
1879 int rc = 0;
1880
1881 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1882 rc = smb1_init_maperror();
1883 if (rc)
1884 return rc;
1885 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
1886
1887 rc = smb2_init_maperror();
1888 if (rc)
1889 return rc;
1890
1891 cifs_proc_init();
1892 INIT_LIST_HEAD(&cifs_tcp_ses_list);
1893 /*
1894 * Initialize Global counters
1895 */
1896 atomic_set(&sesInfoAllocCount, 0);
1897 atomic_set(&tconInfoAllocCount, 0);
1898 atomic_set(&tcpSesNextId, 0);
1899 atomic_set(&tcpSesAllocCount, 0);
1900 atomic_set(&tcpSesReconnectCount, 0);
1901 atomic_set(&tconInfoReconnectCount, 0);
1902
1903 atomic_set(&buf_alloc_count, 0);
1904 atomic_set(&small_buf_alloc_count, 0);
1905 #ifdef CONFIG_CIFS_STATS2
1906 atomic_set(&total_buf_alloc_count, 0);
1907 atomic_set(&total_small_buf_alloc_count, 0);
1908 if (slow_rsp_threshold < 1)
1909 cifs_dbg(FYI, "slow_response_threshold msgs disabled\n");
1910 else if (slow_rsp_threshold > 32767)
1911 cifs_dbg(VFS,
1912 "slow response threshold set higher than recommended (0 to 32767)\n");
1913 #endif /* CONFIG_CIFS_STATS2 */
1914
1915 atomic_set(&mid_count, 0);
1916 GlobalCurrentXid = 0;
1917 GlobalTotalActiveXid = 0;
1918 GlobalMaxActiveXid = 0;
1919
1920 cifs_lock_secret = get_random_u32();
1921
1922 if (cifs_max_pending < 2) {
1923 cifs_max_pending = 2;
1924 cifs_dbg(FYI, "cifs_max_pending set to min of 2\n");
1925 } else if (cifs_max_pending > CIFS_MAX_REQ) {
1926 cifs_max_pending = CIFS_MAX_REQ;
1927 cifs_dbg(FYI, "cifs_max_pending set to max of %u\n",
1928 CIFS_MAX_REQ);
1929 }
1930
1931 /* Limit max to about 18 hours, and setting to zero disables directory entry caching */
1932 if (dir_cache_timeout > 65000) {
1933 dir_cache_timeout = 65000;
1934 cifs_dbg(VFS, "dir_cache_timeout set to max of 65000 seconds\n");
1935 }
1936
1937 cifsiod_wq = alloc_workqueue("cifsiod",
1938 WQ_FREEZABLE | WQ_MEM_RECLAIM | WQ_PERCPU,
1939 0);
1940 if (!cifsiod_wq) {
1941 rc = -ENOMEM;
1942 goto out_clean_proc;
1943 }
1944
1945 /*
1946 * Consider in future setting limit!=0 maybe to min(num_of_cores - 1, 3)
1947 * so that we don't launch too many worker threads but
1948 * Documentation/core-api/workqueue.rst recommends setting it to 0
1949 */
1950
1951 /* WQ_UNBOUND allows decrypt tasks to run on any CPU */
1952 decrypt_wq = alloc_workqueue("smb3decryptd",
1953 WQ_UNBOUND|WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
1954 if (!decrypt_wq) {
1955 rc = -ENOMEM;
1956 goto out_destroy_cifsiod_wq;
1957 }
1958
1959 fileinfo_put_wq = alloc_workqueue("cifsfileinfoput",
1960 WQ_UNBOUND|WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
1961 if (!fileinfo_put_wq) {
1962 rc = -ENOMEM;
1963 goto out_destroy_decrypt_wq;
1964 }
1965
1966 cifsoplockd_wq = alloc_workqueue("cifsoplockd",
1967 WQ_FREEZABLE | WQ_MEM_RECLAIM | WQ_PERCPU,
1968 0);
1969 if (!cifsoplockd_wq) {
1970 rc = -ENOMEM;
1971 goto out_destroy_fileinfo_put_wq;
1972 }
1973
1974 deferredclose_wq = alloc_workqueue("deferredclose",
1975 WQ_FREEZABLE | WQ_MEM_RECLAIM | WQ_PERCPU,
1976 0);
1977 if (!deferredclose_wq) {
1978 rc = -ENOMEM;
1979 goto out_destroy_cifsoplockd_wq;
1980 }
1981
1982 serverclose_wq = alloc_workqueue("serverclose",
1983 WQ_FREEZABLE | WQ_MEM_RECLAIM | WQ_PERCPU,
1984 0);
1985 if (!serverclose_wq) {
1986 rc = -ENOMEM;
1987 goto out_destroy_deferredclose_wq;
1988 }
1989
1990 cfid_put_wq = alloc_workqueue("cfid_put_wq",
1991 WQ_FREEZABLE | WQ_MEM_RECLAIM | WQ_PERCPU,
1992 0);
1993 if (!cfid_put_wq) {
1994 rc = -ENOMEM;
1995 goto out_destroy_serverclose_wq;
1996 }
1997
1998 rc = cifs_init_inodecache();
1999 if (rc)
2000 goto out_destroy_cfid_put_wq;
2001
2002 rc = cifs_init_netfs();
2003 if (rc)
2004 goto out_destroy_inodecache;
2005
2006 rc = init_mids();
2007 if (rc)
2008 goto out_destroy_netfs;
2009
2010 rc = cifs_init_request_bufs();
2011 if (rc)
2012 goto out_destroy_mids;
2013
2014 #ifdef CONFIG_CIFS_DFS_UPCALL
2015 rc = dfs_cache_init();
2016 if (rc)
2017 goto out_destroy_request_bufs;
2018 #endif /* CONFIG_CIFS_DFS_UPCALL */
2019 #ifdef CONFIG_CIFS_UPCALL
2020 rc = init_cifs_spnego();
2021 if (rc)
2022 goto out_destroy_dfs_cache;
2023 #endif /* CONFIG_CIFS_UPCALL */
2024 #ifdef CONFIG_CIFS_SWN_UPCALL
2025 rc = cifs_genl_init();
2026 if (rc)
2027 goto out_register_key_type;
2028 #endif /* CONFIG_CIFS_SWN_UPCALL */
2029
2030 rc = init_cifs_idmap();
2031 if (rc)
2032 goto out_cifs_swn_init;
2033
2034 rc = register_filesystem(&cifs_fs_type);
2035 if (rc)
2036 goto out_init_cifs_idmap;
2037
2038 rc = register_filesystem(&smb3_fs_type);
2039 if (rc) {
2040 unregister_filesystem(&cifs_fs_type);
2041 goto out_init_cifs_idmap;
2042 }
2043
2044 return 0;
2045
2046 out_init_cifs_idmap:
2047 exit_cifs_idmap();
2048 out_cifs_swn_init:
2049 #ifdef CONFIG_CIFS_SWN_UPCALL
2050 cifs_genl_exit();
2051 out_register_key_type:
2052 #endif
2053 #ifdef CONFIG_CIFS_UPCALL
2054 exit_cifs_spnego();
2055 out_destroy_dfs_cache:
2056 #endif
2057 #ifdef CONFIG_CIFS_DFS_UPCALL
2058 dfs_cache_destroy();
2059 out_destroy_request_bufs:
2060 #endif
2061 cifs_destroy_request_bufs();
2062 out_destroy_mids:
2063 destroy_mids();
2064 out_destroy_netfs:
2065 cifs_destroy_netfs();
2066 out_destroy_inodecache:
2067 cifs_destroy_inodecache();
2068 out_destroy_cfid_put_wq:
2069 destroy_workqueue(cfid_put_wq);
2070 out_destroy_serverclose_wq:
2071 destroy_workqueue(serverclose_wq);
2072 out_destroy_deferredclose_wq:
2073 destroy_workqueue(deferredclose_wq);
2074 out_destroy_cifsoplockd_wq:
2075 destroy_workqueue(cifsoplockd_wq);
2076 out_destroy_fileinfo_put_wq:
2077 destroy_workqueue(fileinfo_put_wq);
2078 out_destroy_decrypt_wq:
2079 destroy_workqueue(decrypt_wq);
2080 out_destroy_cifsiod_wq:
2081 destroy_workqueue(cifsiod_wq);
2082 out_clean_proc:
2083 cifs_proc_clean();
2084 return rc;
2085 }
2086
2087 static void __exit
exit_cifs(void)2088 exit_cifs(void)
2089 {
2090 cifs_dbg(NOISY, "exit_smb3\n");
2091 unregister_filesystem(&cifs_fs_type);
2092 unregister_filesystem(&smb3_fs_type);
2093 cifs_release_automount_timer();
2094 exit_cifs_idmap();
2095 #ifdef CONFIG_CIFS_SWN_UPCALL
2096 cifs_genl_exit();
2097 #endif
2098 #ifdef CONFIG_CIFS_UPCALL
2099 exit_cifs_spnego();
2100 #endif
2101 #ifdef CONFIG_CIFS_DFS_UPCALL
2102 dfs_cache_destroy();
2103 #endif
2104 cifs_destroy_request_bufs();
2105 destroy_mids();
2106 cifs_destroy_netfs();
2107 cifs_destroy_inodecache();
2108 destroy_workqueue(deferredclose_wq);
2109 destroy_workqueue(cifsoplockd_wq);
2110 destroy_workqueue(decrypt_wq);
2111 destroy_workqueue(fileinfo_put_wq);
2112 destroy_workqueue(serverclose_wq);
2113 destroy_workqueue(cfid_put_wq);
2114 destroy_workqueue(cifsiod_wq);
2115 cifs_proc_clean();
2116 }
2117
2118 MODULE_AUTHOR("Steve French");
2119 MODULE_LICENSE("GPL"); /* combination of LGPL + GPL source behaves as GPL */
2120 MODULE_DESCRIPTION
2121 ("VFS to access SMB3 servers e.g. Samba, Macs, Azure and Windows (and "
2122 "also older servers complying with the SNIA CIFS Specification)");
2123 MODULE_VERSION(CIFS_VERSION);
2124 MODULE_SOFTDEP("nls");
2125 MODULE_SOFTDEP("aes");
2126 MODULE_SOFTDEP("aead2");
2127 MODULE_SOFTDEP("ccm");
2128 MODULE_SOFTDEP("gcm");
2129 module_init(init_cifs)
2130 module_exit(exit_cifs)
2131