1 // SPDX-License-Identifier: LGPL-2.1 2 /* 3 * 4 * Copyright (C) International Business Machines Corp., 2002,2008 5 * Author(s): Steve French (sfrench@us.ibm.com) 6 * 7 * Common Internet FileSystem (CIFS) client 8 * 9 */ 10 11 /* Note that BB means BUGBUG (ie something to fix eventually) */ 12 13 #include <linux/module.h> 14 #include <linux/fs.h> 15 #include <linux/filelock.h> 16 #include <linux/mount.h> 17 #include <linux/slab.h> 18 #include <linux/init.h> 19 #include <linux/list.h> 20 #include <linux/seq_file.h> 21 #include <linux/vfs.h> 22 #include <linux/mempool.h> 23 #include <linux/delay.h> 24 #include <linux/kthread.h> 25 #include <linux/freezer.h> 26 #include <linux/namei.h> 27 #include <linux/random.h> 28 #include <linux/splice.h> 29 #include <linux/uuid.h> 30 #include <linux/xattr.h> 31 #include <uapi/linux/magic.h> 32 #include <net/ipv6.h> 33 #include "cifsfs.h" 34 #include "cifspdu.h" 35 #define DECLARE_GLOBALS_HERE 36 #include "cifsglob.h" 37 #include "cifsproto.h" 38 #include "cifs_debug.h" 39 #include "cifs_fs_sb.h" 40 #include <linux/mm.h> 41 #include <linux/key-type.h> 42 #include "cifs_spnego.h" 43 #include "fscache.h" 44 #ifdef CONFIG_CIFS_DFS_UPCALL 45 #include "dfs_cache.h" 46 #endif 47 #ifdef CONFIG_CIFS_SWN_UPCALL 48 #include "netlink.h" 49 #endif 50 #include "fs_context.h" 51 #include "cached_dir.h" 52 53 /* 54 * DOS dates from 1980/1/1 through 2107/12/31 55 * Protocol specifications indicate the range should be to 119, which 56 * limits maximum year to 2099. But this range has not been checked. 57 */ 58 #define SMB_DATE_MAX (127<<9 | 12<<5 | 31) 59 #define SMB_DATE_MIN (0<<9 | 1<<5 | 1) 60 #define SMB_TIME_MAX (23<<11 | 59<<5 | 29) 61 62 int cifsFYI = 0; 63 bool traceSMB; 64 bool enable_oplocks = true; 65 bool linuxExtEnabled = true; 66 bool lookupCacheEnabled = true; 67 bool disable_legacy_dialects; /* false by default */ 68 bool enable_gcm_256 = true; 69 bool require_gcm_256; /* false by default */ 70 bool enable_negotiate_signing; /* false by default */ 71 unsigned int global_secflags = CIFSSEC_DEF; 72 /* unsigned int ntlmv2_support = 0; */ 73 unsigned int sign_CIFS_PDUs = 1; 74 75 /* 76 * Global transaction id (XID) information 77 */ 78 unsigned int GlobalCurrentXid; /* protected by GlobalMid_Sem */ 79 unsigned int GlobalTotalActiveXid; /* prot by GlobalMid_Sem */ 80 unsigned int GlobalMaxActiveXid; /* prot by GlobalMid_Sem */ 81 spinlock_t GlobalMid_Lock; /* protects above & list operations on midQ entries */ 82 83 /* 84 * Global counters, updated atomically 85 */ 86 atomic_t sesInfoAllocCount; 87 atomic_t tconInfoAllocCount; 88 atomic_t tcpSesNextId; 89 atomic_t tcpSesAllocCount; 90 atomic_t tcpSesReconnectCount; 91 atomic_t tconInfoReconnectCount; 92 93 atomic_t mid_count; 94 atomic_t buf_alloc_count; 95 atomic_t small_buf_alloc_count; 96 #ifdef CONFIG_CIFS_STATS2 97 atomic_t total_buf_alloc_count; 98 atomic_t total_small_buf_alloc_count; 99 #endif/* STATS2 */ 100 struct list_head cifs_tcp_ses_list; 101 spinlock_t cifs_tcp_ses_lock; 102 static const struct super_operations cifs_super_ops; 103 unsigned int CIFSMaxBufSize = CIFS_MAX_MSGSIZE; 104 module_param(CIFSMaxBufSize, uint, 0444); 105 MODULE_PARM_DESC(CIFSMaxBufSize, "Network buffer size (not including header) " 106 "for CIFS requests. " 107 "Default: 16384 Range: 8192 to 130048"); 108 unsigned int cifs_min_rcv = CIFS_MIN_RCV_POOL; 109 module_param(cifs_min_rcv, uint, 0444); 110 MODULE_PARM_DESC(cifs_min_rcv, "Network buffers in pool. Default: 4 Range: " 111 "1 to 64"); 112 unsigned int cifs_min_small = 30; 113 module_param(cifs_min_small, uint, 0444); 114 MODULE_PARM_DESC(cifs_min_small, "Small network buffers in pool. Default: 30 " 115 "Range: 2 to 256"); 116 unsigned int cifs_max_pending = CIFS_MAX_REQ; 117 module_param(cifs_max_pending, uint, 0444); 118 MODULE_PARM_DESC(cifs_max_pending, "Simultaneous requests to server for " 119 "CIFS/SMB1 dialect (N/A for SMB3) " 120 "Default: 32767 Range: 2 to 32767."); 121 unsigned int dir_cache_timeout = 30; 122 module_param(dir_cache_timeout, uint, 0644); 123 MODULE_PARM_DESC(dir_cache_timeout, "Number of seconds to cache directory contents for which we have a lease. Default: 30 " 124 "Range: 1 to 65000 seconds, 0 to disable caching dir contents"); 125 #ifdef CONFIG_CIFS_STATS2 126 unsigned int slow_rsp_threshold = 1; 127 module_param(slow_rsp_threshold, uint, 0644); 128 MODULE_PARM_DESC(slow_rsp_threshold, "Amount of time (in seconds) to wait " 129 "before logging that a response is delayed. " 130 "Default: 1 (if set to 0 disables msg)."); 131 #endif /* STATS2 */ 132 133 module_param(enable_oplocks, bool, 0644); 134 MODULE_PARM_DESC(enable_oplocks, "Enable or disable oplocks. Default: y/Y/1"); 135 136 module_param(enable_gcm_256, bool, 0644); 137 MODULE_PARM_DESC(enable_gcm_256, "Enable requesting strongest (256 bit) GCM encryption. Default: n/N/0"); 138 139 module_param(require_gcm_256, bool, 0644); 140 MODULE_PARM_DESC(require_gcm_256, "Require strongest (256 bit) GCM encryption. Default: n/N/0"); 141 142 module_param(enable_negotiate_signing, bool, 0644); 143 MODULE_PARM_DESC(enable_negotiate_signing, "Enable negotiating packet signing algorithm with server. Default: n/N/0"); 144 145 module_param(disable_legacy_dialects, bool, 0644); 146 MODULE_PARM_DESC(disable_legacy_dialects, "To improve security it may be " 147 "helpful to restrict the ability to " 148 "override the default dialects (SMB2.1, " 149 "SMB3 and SMB3.02) on mount with old " 150 "dialects (CIFS/SMB1 and SMB2) since " 151 "vers=1.0 (CIFS/SMB1) and vers=2.0 are weaker" 152 " and less secure. Default: n/N/0"); 153 154 extern mempool_t *cifs_sm_req_poolp; 155 extern mempool_t *cifs_req_poolp; 156 extern mempool_t *cifs_mid_poolp; 157 158 struct workqueue_struct *cifsiod_wq; 159 struct workqueue_struct *decrypt_wq; 160 struct workqueue_struct *fileinfo_put_wq; 161 struct workqueue_struct *cifsoplockd_wq; 162 struct workqueue_struct *deferredclose_wq; 163 __u32 cifs_lock_secret; 164 165 /* 166 * Bumps refcount for cifs super block. 167 * Note that it should be only called if a referece to VFS super block is 168 * already held, e.g. in open-type syscalls context. Otherwise it can race with 169 * atomic_dec_and_test in deactivate_locked_super. 170 */ 171 void 172 cifs_sb_active(struct super_block *sb) 173 { 174 struct cifs_sb_info *server = CIFS_SB(sb); 175 176 if (atomic_inc_return(&server->active) == 1) 177 atomic_inc(&sb->s_active); 178 } 179 180 void 181 cifs_sb_deactive(struct super_block *sb) 182 { 183 struct cifs_sb_info *server = CIFS_SB(sb); 184 185 if (atomic_dec_and_test(&server->active)) 186 deactivate_super(sb); 187 } 188 189 static int 190 cifs_read_super(struct super_block *sb) 191 { 192 struct inode *inode; 193 struct cifs_sb_info *cifs_sb; 194 struct cifs_tcon *tcon; 195 struct timespec64 ts; 196 int rc = 0; 197 198 cifs_sb = CIFS_SB(sb); 199 tcon = cifs_sb_master_tcon(cifs_sb); 200 201 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIXACL) 202 sb->s_flags |= SB_POSIXACL; 203 204 if (tcon->snapshot_time) 205 sb->s_flags |= SB_RDONLY; 206 207 if (tcon->ses->capabilities & tcon->ses->server->vals->cap_large_files) 208 sb->s_maxbytes = MAX_LFS_FILESIZE; 209 else 210 sb->s_maxbytes = MAX_NON_LFS; 211 212 /* 213 * Some very old servers like DOS and OS/2 used 2 second granularity 214 * (while all current servers use 100ns granularity - see MS-DTYP) 215 * but 1 second is the maximum allowed granularity for the VFS 216 * so for old servers set time granularity to 1 second while for 217 * everything else (current servers) set it to 100ns. 218 */ 219 if ((tcon->ses->server->vals->protocol_id == SMB10_PROT_ID) && 220 ((tcon->ses->capabilities & 221 tcon->ses->server->vals->cap_nt_find) == 0) && 222 !tcon->unix_ext) { 223 sb->s_time_gran = 1000000000; /* 1 second is max allowed gran */ 224 ts = cnvrtDosUnixTm(cpu_to_le16(SMB_DATE_MIN), 0, 0); 225 sb->s_time_min = ts.tv_sec; 226 ts = cnvrtDosUnixTm(cpu_to_le16(SMB_DATE_MAX), 227 cpu_to_le16(SMB_TIME_MAX), 0); 228 sb->s_time_max = ts.tv_sec; 229 } else { 230 /* 231 * Almost every server, including all SMB2+, uses DCE TIME 232 * ie 100 nanosecond units, since 1601. See MS-DTYP and MS-FSCC 233 */ 234 sb->s_time_gran = 100; 235 ts = cifs_NTtimeToUnix(0); 236 sb->s_time_min = ts.tv_sec; 237 ts = cifs_NTtimeToUnix(cpu_to_le64(S64_MAX)); 238 sb->s_time_max = ts.tv_sec; 239 } 240 241 sb->s_magic = CIFS_SUPER_MAGIC; 242 sb->s_op = &cifs_super_ops; 243 sb->s_xattr = cifs_xattr_handlers; 244 rc = super_setup_bdi(sb); 245 if (rc) 246 goto out_no_root; 247 /* tune readahead according to rsize if readahead size not set on mount */ 248 if (cifs_sb->ctx->rsize == 0) 249 cifs_sb->ctx->rsize = 250 tcon->ses->server->ops->negotiate_rsize(tcon, cifs_sb->ctx); 251 if (cifs_sb->ctx->rasize) 252 sb->s_bdi->ra_pages = cifs_sb->ctx->rasize / PAGE_SIZE; 253 else 254 sb->s_bdi->ra_pages = 2 * (cifs_sb->ctx->rsize / PAGE_SIZE); 255 256 sb->s_blocksize = CIFS_MAX_MSGSIZE; 257 sb->s_blocksize_bits = 14; /* default 2**14 = CIFS_MAX_MSGSIZE */ 258 inode = cifs_root_iget(sb); 259 260 if (IS_ERR(inode)) { 261 rc = PTR_ERR(inode); 262 goto out_no_root; 263 } 264 265 if (tcon->nocase) 266 sb->s_d_op = &cifs_ci_dentry_ops; 267 else 268 sb->s_d_op = &cifs_dentry_ops; 269 270 sb->s_root = d_make_root(inode); 271 if (!sb->s_root) { 272 rc = -ENOMEM; 273 goto out_no_root; 274 } 275 276 #ifdef CONFIG_CIFS_NFSD_EXPORT 277 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) { 278 cifs_dbg(FYI, "export ops supported\n"); 279 sb->s_export_op = &cifs_export_ops; 280 } 281 #endif /* CONFIG_CIFS_NFSD_EXPORT */ 282 283 return 0; 284 285 out_no_root: 286 cifs_dbg(VFS, "%s: get root inode failed\n", __func__); 287 return rc; 288 } 289 290 static void cifs_kill_sb(struct super_block *sb) 291 { 292 struct cifs_sb_info *cifs_sb = CIFS_SB(sb); 293 294 /* 295 * We ned to release all dentries for the cached directories 296 * before we kill the sb. 297 */ 298 if (cifs_sb->root) { 299 close_all_cached_dirs(cifs_sb); 300 301 /* finally release root dentry */ 302 dput(cifs_sb->root); 303 cifs_sb->root = NULL; 304 } 305 306 kill_anon_super(sb); 307 cifs_umount(cifs_sb); 308 } 309 310 static int 311 cifs_statfs(struct dentry *dentry, struct kstatfs *buf) 312 { 313 struct super_block *sb = dentry->d_sb; 314 struct cifs_sb_info *cifs_sb = CIFS_SB(sb); 315 struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb); 316 struct TCP_Server_Info *server = tcon->ses->server; 317 unsigned int xid; 318 int rc = 0; 319 320 xid = get_xid(); 321 322 if (le32_to_cpu(tcon->fsAttrInfo.MaxPathNameComponentLength) > 0) 323 buf->f_namelen = 324 le32_to_cpu(tcon->fsAttrInfo.MaxPathNameComponentLength); 325 else 326 buf->f_namelen = PATH_MAX; 327 328 buf->f_fsid.val[0] = tcon->vol_serial_number; 329 /* are using part of create time for more randomness, see man statfs */ 330 buf->f_fsid.val[1] = (int)le64_to_cpu(tcon->vol_create_time); 331 332 buf->f_files = 0; /* undefined */ 333 buf->f_ffree = 0; /* unlimited */ 334 335 if (server->ops->queryfs) 336 rc = server->ops->queryfs(xid, tcon, cifs_sb, buf); 337 338 free_xid(xid); 339 return rc; 340 } 341 342 static long cifs_fallocate(struct file *file, int mode, loff_t off, loff_t len) 343 { 344 struct cifs_sb_info *cifs_sb = CIFS_FILE_SB(file); 345 struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb); 346 struct TCP_Server_Info *server = tcon->ses->server; 347 348 if (server->ops->fallocate) 349 return server->ops->fallocate(file, tcon, mode, off, len); 350 351 return -EOPNOTSUPP; 352 } 353 354 static int cifs_permission(struct mnt_idmap *idmap, 355 struct inode *inode, int mask) 356 { 357 struct cifs_sb_info *cifs_sb; 358 359 cifs_sb = CIFS_SB(inode->i_sb); 360 361 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_PERM) { 362 if ((mask & MAY_EXEC) && !execute_ok(inode)) 363 return -EACCES; 364 else 365 return 0; 366 } else /* file mode might have been restricted at mount time 367 on the client (above and beyond ACL on servers) for 368 servers which do not support setting and viewing mode bits, 369 so allowing client to check permissions is useful */ 370 return generic_permission(&nop_mnt_idmap, inode, mask); 371 } 372 373 static struct kmem_cache *cifs_inode_cachep; 374 static struct kmem_cache *cifs_req_cachep; 375 static struct kmem_cache *cifs_mid_cachep; 376 static struct kmem_cache *cifs_sm_req_cachep; 377 mempool_t *cifs_sm_req_poolp; 378 mempool_t *cifs_req_poolp; 379 mempool_t *cifs_mid_poolp; 380 381 static struct inode * 382 cifs_alloc_inode(struct super_block *sb) 383 { 384 struct cifsInodeInfo *cifs_inode; 385 cifs_inode = alloc_inode_sb(sb, cifs_inode_cachep, GFP_KERNEL); 386 if (!cifs_inode) 387 return NULL; 388 cifs_inode->cifsAttrs = 0x20; /* default */ 389 cifs_inode->time = 0; 390 /* 391 * Until the file is open and we have gotten oplock info back from the 392 * server, can not assume caching of file data or metadata. 393 */ 394 cifs_set_oplock_level(cifs_inode, 0); 395 cifs_inode->flags = 0; 396 spin_lock_init(&cifs_inode->writers_lock); 397 cifs_inode->writers = 0; 398 cifs_inode->netfs.inode.i_blkbits = 14; /* 2**14 = CIFS_MAX_MSGSIZE */ 399 cifs_inode->netfs.remote_i_size = 0; 400 cifs_inode->uniqueid = 0; 401 cifs_inode->createtime = 0; 402 cifs_inode->epoch = 0; 403 spin_lock_init(&cifs_inode->open_file_lock); 404 generate_random_uuid(cifs_inode->lease_key); 405 cifs_inode->symlink_target = NULL; 406 407 /* 408 * Can not set i_flags here - they get immediately overwritten to zero 409 * by the VFS. 410 */ 411 /* cifs_inode->netfs.inode.i_flags = S_NOATIME | S_NOCMTIME; */ 412 INIT_LIST_HEAD(&cifs_inode->openFileList); 413 INIT_LIST_HEAD(&cifs_inode->llist); 414 INIT_LIST_HEAD(&cifs_inode->deferred_closes); 415 spin_lock_init(&cifs_inode->deferred_lock); 416 return &cifs_inode->netfs.inode; 417 } 418 419 static void 420 cifs_free_inode(struct inode *inode) 421 { 422 struct cifsInodeInfo *cinode = CIFS_I(inode); 423 424 if (S_ISLNK(inode->i_mode)) 425 kfree(cinode->symlink_target); 426 kmem_cache_free(cifs_inode_cachep, cinode); 427 } 428 429 static void 430 cifs_evict_inode(struct inode *inode) 431 { 432 truncate_inode_pages_final(&inode->i_data); 433 if (inode->i_state & I_PINNING_NETFS_WB) 434 cifs_fscache_unuse_inode_cookie(inode, true); 435 cifs_fscache_release_inode_cookie(inode); 436 clear_inode(inode); 437 } 438 439 static void 440 cifs_show_address(struct seq_file *s, struct TCP_Server_Info *server) 441 { 442 struct sockaddr_in *sa = (struct sockaddr_in *) &server->dstaddr; 443 struct sockaddr_in6 *sa6 = (struct sockaddr_in6 *) &server->dstaddr; 444 445 seq_puts(s, ",addr="); 446 447 switch (server->dstaddr.ss_family) { 448 case AF_INET: 449 seq_printf(s, "%pI4", &sa->sin_addr.s_addr); 450 break; 451 case AF_INET6: 452 seq_printf(s, "%pI6", &sa6->sin6_addr.s6_addr); 453 if (sa6->sin6_scope_id) 454 seq_printf(s, "%%%u", sa6->sin6_scope_id); 455 break; 456 default: 457 seq_puts(s, "(unknown)"); 458 } 459 if (server->rdma) 460 seq_puts(s, ",rdma"); 461 } 462 463 static void 464 cifs_show_security(struct seq_file *s, struct cifs_ses *ses) 465 { 466 if (ses->sectype == Unspecified) { 467 if (ses->user_name == NULL) 468 seq_puts(s, ",sec=none"); 469 return; 470 } 471 472 seq_puts(s, ",sec="); 473 474 switch (ses->sectype) { 475 case NTLMv2: 476 seq_puts(s, "ntlmv2"); 477 break; 478 case Kerberos: 479 seq_puts(s, "krb5"); 480 break; 481 case RawNTLMSSP: 482 seq_puts(s, "ntlmssp"); 483 break; 484 default: 485 /* shouldn't ever happen */ 486 seq_puts(s, "unknown"); 487 break; 488 } 489 490 if (ses->sign) 491 seq_puts(s, "i"); 492 493 if (ses->sectype == Kerberos) 494 seq_printf(s, ",cruid=%u", 495 from_kuid_munged(&init_user_ns, ses->cred_uid)); 496 } 497 498 static void 499 cifs_show_cache_flavor(struct seq_file *s, struct cifs_sb_info *cifs_sb) 500 { 501 seq_puts(s, ",cache="); 502 503 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO) 504 seq_puts(s, "strict"); 505 else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO) 506 seq_puts(s, "none"); 507 else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RW_CACHE) 508 seq_puts(s, "singleclient"); /* assume only one client access */ 509 else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RO_CACHE) 510 seq_puts(s, "ro"); /* read only caching assumed */ 511 else 512 seq_puts(s, "loose"); 513 } 514 515 /* 516 * cifs_show_devname() is used so we show the mount device name with correct 517 * format (e.g. forward slashes vs. back slashes) in /proc/mounts 518 */ 519 static int cifs_show_devname(struct seq_file *m, struct dentry *root) 520 { 521 struct cifs_sb_info *cifs_sb = CIFS_SB(root->d_sb); 522 char *devname = kstrdup(cifs_sb->ctx->source, GFP_KERNEL); 523 524 if (devname == NULL) 525 seq_puts(m, "none"); 526 else { 527 convert_delimiter(devname, '/'); 528 /* escape all spaces in share names */ 529 seq_escape(m, devname, " \t"); 530 kfree(devname); 531 } 532 return 0; 533 } 534 535 /* 536 * cifs_show_options() is for displaying mount options in /proc/mounts. 537 * Not all settable options are displayed but most of the important 538 * ones are. 539 */ 540 static int 541 cifs_show_options(struct seq_file *s, struct dentry *root) 542 { 543 struct cifs_sb_info *cifs_sb = CIFS_SB(root->d_sb); 544 struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb); 545 struct sockaddr *srcaddr; 546 srcaddr = (struct sockaddr *)&tcon->ses->server->srcaddr; 547 548 seq_show_option(s, "vers", tcon->ses->server->vals->version_string); 549 cifs_show_security(s, tcon->ses); 550 cifs_show_cache_flavor(s, cifs_sb); 551 552 if (tcon->no_lease) 553 seq_puts(s, ",nolease"); 554 if (cifs_sb->ctx->multiuser) 555 seq_puts(s, ",multiuser"); 556 else if (tcon->ses->user_name) 557 seq_show_option(s, "username", tcon->ses->user_name); 558 559 if (tcon->ses->domainName && tcon->ses->domainName[0] != 0) 560 seq_show_option(s, "domain", tcon->ses->domainName); 561 562 if (srcaddr->sa_family != AF_UNSPEC) { 563 struct sockaddr_in *saddr4; 564 struct sockaddr_in6 *saddr6; 565 saddr4 = (struct sockaddr_in *)srcaddr; 566 saddr6 = (struct sockaddr_in6 *)srcaddr; 567 if (srcaddr->sa_family == AF_INET6) 568 seq_printf(s, ",srcaddr=%pI6c", 569 &saddr6->sin6_addr); 570 else if (srcaddr->sa_family == AF_INET) 571 seq_printf(s, ",srcaddr=%pI4", 572 &saddr4->sin_addr.s_addr); 573 else 574 seq_printf(s, ",srcaddr=BAD-AF:%i", 575 (int)(srcaddr->sa_family)); 576 } 577 578 seq_printf(s, ",uid=%u", 579 from_kuid_munged(&init_user_ns, cifs_sb->ctx->linux_uid)); 580 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_OVERR_UID) 581 seq_puts(s, ",forceuid"); 582 else 583 seq_puts(s, ",noforceuid"); 584 585 seq_printf(s, ",gid=%u", 586 from_kgid_munged(&init_user_ns, cifs_sb->ctx->linux_gid)); 587 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_OVERR_GID) 588 seq_puts(s, ",forcegid"); 589 else 590 seq_puts(s, ",noforcegid"); 591 592 cifs_show_address(s, tcon->ses->server); 593 594 if (!tcon->unix_ext) 595 seq_printf(s, ",file_mode=0%ho,dir_mode=0%ho", 596 cifs_sb->ctx->file_mode, 597 cifs_sb->ctx->dir_mode); 598 if (cifs_sb->ctx->iocharset) 599 seq_printf(s, ",iocharset=%s", cifs_sb->ctx->iocharset); 600 if (tcon->seal) 601 seq_puts(s, ",seal"); 602 else if (tcon->ses->server->ignore_signature) 603 seq_puts(s, ",signloosely"); 604 if (tcon->nocase) 605 seq_puts(s, ",nocase"); 606 if (tcon->nodelete) 607 seq_puts(s, ",nodelete"); 608 if (cifs_sb->ctx->no_sparse) 609 seq_puts(s, ",nosparse"); 610 if (tcon->local_lease) 611 seq_puts(s, ",locallease"); 612 if (tcon->retry) 613 seq_puts(s, ",hard"); 614 else 615 seq_puts(s, ",soft"); 616 if (tcon->use_persistent) 617 seq_puts(s, ",persistenthandles"); 618 else if (tcon->use_resilient) 619 seq_puts(s, ",resilienthandles"); 620 if (tcon->posix_extensions) 621 seq_puts(s, ",posix"); 622 else if (tcon->unix_ext) 623 seq_puts(s, ",unix"); 624 else 625 seq_puts(s, ",nounix"); 626 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_DFS) 627 seq_puts(s, ",nodfs"); 628 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIX_PATHS) 629 seq_puts(s, ",posixpaths"); 630 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SET_UID) 631 seq_puts(s, ",setuids"); 632 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UID_FROM_ACL) 633 seq_puts(s, ",idsfromsid"); 634 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) 635 seq_puts(s, ",serverino"); 636 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD) 637 seq_puts(s, ",rwpidforward"); 638 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) 639 seq_puts(s, ",forcemand"); 640 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_XATTR) 641 seq_puts(s, ",nouser_xattr"); 642 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR) 643 seq_puts(s, ",mapchars"); 644 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SFM_CHR) 645 seq_puts(s, ",mapposix"); 646 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UNX_EMUL) 647 seq_puts(s, ",sfu"); 648 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_BRL) 649 seq_puts(s, ",nobrl"); 650 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_HANDLE_CACHE) 651 seq_puts(s, ",nohandlecache"); 652 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MODE_FROM_SID) 653 seq_puts(s, ",modefromsid"); 654 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_ACL) 655 seq_puts(s, ",cifsacl"); 656 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DYNPERM) 657 seq_puts(s, ",dynperm"); 658 if (root->d_sb->s_flags & SB_POSIXACL) 659 seq_puts(s, ",acl"); 660 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MF_SYMLINKS) 661 seq_puts(s, ",mfsymlinks"); 662 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_FSCACHE) 663 seq_puts(s, ",fsc"); 664 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC) 665 seq_puts(s, ",nostrictsync"); 666 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_PERM) 667 seq_puts(s, ",noperm"); 668 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_BACKUPUID) 669 seq_printf(s, ",backupuid=%u", 670 from_kuid_munged(&init_user_ns, 671 cifs_sb->ctx->backupuid)); 672 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_BACKUPGID) 673 seq_printf(s, ",backupgid=%u", 674 from_kgid_munged(&init_user_ns, 675 cifs_sb->ctx->backupgid)); 676 677 seq_printf(s, ",rsize=%u", cifs_sb->ctx->rsize); 678 seq_printf(s, ",wsize=%u", cifs_sb->ctx->wsize); 679 seq_printf(s, ",bsize=%u", cifs_sb->ctx->bsize); 680 if (cifs_sb->ctx->rasize) 681 seq_printf(s, ",rasize=%u", cifs_sb->ctx->rasize); 682 if (tcon->ses->server->min_offload) 683 seq_printf(s, ",esize=%u", tcon->ses->server->min_offload); 684 if (tcon->ses->server->retrans) 685 seq_printf(s, ",retrans=%u", tcon->ses->server->retrans); 686 seq_printf(s, ",echo_interval=%lu", 687 tcon->ses->server->echo_interval / HZ); 688 689 /* Only display the following if overridden on mount */ 690 if (tcon->ses->server->max_credits != SMB2_MAX_CREDITS_AVAILABLE) 691 seq_printf(s, ",max_credits=%u", tcon->ses->server->max_credits); 692 if (tcon->ses->server->tcp_nodelay) 693 seq_puts(s, ",tcpnodelay"); 694 if (tcon->ses->server->noautotune) 695 seq_puts(s, ",noautotune"); 696 if (tcon->ses->server->noblocksnd) 697 seq_puts(s, ",noblocksend"); 698 if (tcon->ses->server->nosharesock) 699 seq_puts(s, ",nosharesock"); 700 701 if (tcon->snapshot_time) 702 seq_printf(s, ",snapshot=%llu", tcon->snapshot_time); 703 if (tcon->handle_timeout) 704 seq_printf(s, ",handletimeout=%u", tcon->handle_timeout); 705 if (tcon->max_cached_dirs != MAX_CACHED_FIDS) 706 seq_printf(s, ",max_cached_dirs=%u", tcon->max_cached_dirs); 707 708 /* 709 * Display file and directory attribute timeout in seconds. 710 * If file and directory attribute timeout the same then actimeo 711 * was likely specified on mount 712 */ 713 if (cifs_sb->ctx->acdirmax == cifs_sb->ctx->acregmax) 714 seq_printf(s, ",actimeo=%lu", cifs_sb->ctx->acregmax / HZ); 715 else { 716 seq_printf(s, ",acdirmax=%lu", cifs_sb->ctx->acdirmax / HZ); 717 seq_printf(s, ",acregmax=%lu", cifs_sb->ctx->acregmax / HZ); 718 } 719 seq_printf(s, ",closetimeo=%lu", cifs_sb->ctx->closetimeo / HZ); 720 721 if (tcon->ses->chan_max > 1) 722 seq_printf(s, ",multichannel,max_channels=%zu", 723 tcon->ses->chan_max); 724 725 if (tcon->use_witness) 726 seq_puts(s, ",witness"); 727 728 return 0; 729 } 730 731 static void cifs_umount_begin(struct super_block *sb) 732 { 733 struct cifs_sb_info *cifs_sb = CIFS_SB(sb); 734 struct cifs_tcon *tcon; 735 736 if (cifs_sb == NULL) 737 return; 738 739 tcon = cifs_sb_master_tcon(cifs_sb); 740 741 spin_lock(&cifs_tcp_ses_lock); 742 spin_lock(&tcon->tc_lock); 743 if ((tcon->tc_count > 1) || (tcon->status == TID_EXITING)) { 744 /* we have other mounts to same share or we have 745 already tried to umount this and woken up 746 all waiting network requests, nothing to do */ 747 spin_unlock(&tcon->tc_lock); 748 spin_unlock(&cifs_tcp_ses_lock); 749 return; 750 } 751 /* 752 * can not set tcon->status to TID_EXITING yet since we don't know if umount -f will 753 * fail later (e.g. due to open files). TID_EXITING will be set just before tdis req sent 754 */ 755 spin_unlock(&tcon->tc_lock); 756 spin_unlock(&cifs_tcp_ses_lock); 757 758 cifs_close_all_deferred_files(tcon); 759 /* cancel_brl_requests(tcon); */ /* BB mark all brl mids as exiting */ 760 /* cancel_notify_requests(tcon); */ 761 if (tcon->ses && tcon->ses->server) { 762 cifs_dbg(FYI, "wake up tasks now - umount begin not complete\n"); 763 wake_up_all(&tcon->ses->server->request_q); 764 wake_up_all(&tcon->ses->server->response_q); 765 msleep(1); /* yield */ 766 /* we have to kick the requests once more */ 767 wake_up_all(&tcon->ses->server->response_q); 768 msleep(1); 769 } 770 771 return; 772 } 773 774 static int cifs_freeze(struct super_block *sb) 775 { 776 struct cifs_sb_info *cifs_sb = CIFS_SB(sb); 777 struct cifs_tcon *tcon; 778 779 if (cifs_sb == NULL) 780 return 0; 781 782 tcon = cifs_sb_master_tcon(cifs_sb); 783 784 cifs_close_all_deferred_files(tcon); 785 return 0; 786 } 787 788 #ifdef CONFIG_CIFS_STATS2 789 static int cifs_show_stats(struct seq_file *s, struct dentry *root) 790 { 791 /* BB FIXME */ 792 return 0; 793 } 794 #endif 795 796 static int cifs_write_inode(struct inode *inode, struct writeback_control *wbc) 797 { 798 return netfs_unpin_writeback(inode, wbc); 799 } 800 801 static int cifs_drop_inode(struct inode *inode) 802 { 803 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb); 804 805 /* no serverino => unconditional eviction */ 806 return !(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) || 807 generic_drop_inode(inode); 808 } 809 810 static const struct super_operations cifs_super_ops = { 811 .statfs = cifs_statfs, 812 .alloc_inode = cifs_alloc_inode, 813 .write_inode = cifs_write_inode, 814 .free_inode = cifs_free_inode, 815 .drop_inode = cifs_drop_inode, 816 .evict_inode = cifs_evict_inode, 817 /* .show_path = cifs_show_path, */ /* Would we ever need show path? */ 818 .show_devname = cifs_show_devname, 819 /* .delete_inode = cifs_delete_inode, */ /* Do not need above 820 function unless later we add lazy close of inodes or unless the 821 kernel forgets to call us with the same number of releases (closes) 822 as opens */ 823 .show_options = cifs_show_options, 824 .umount_begin = cifs_umount_begin, 825 .freeze_fs = cifs_freeze, 826 #ifdef CONFIG_CIFS_STATS2 827 .show_stats = cifs_show_stats, 828 #endif 829 }; 830 831 /* 832 * Get root dentry from superblock according to prefix path mount option. 833 * Return dentry with refcount + 1 on success and NULL otherwise. 834 */ 835 static struct dentry * 836 cifs_get_root(struct smb3_fs_context *ctx, struct super_block *sb) 837 { 838 struct dentry *dentry; 839 struct cifs_sb_info *cifs_sb = CIFS_SB(sb); 840 char *full_path = NULL; 841 char *s, *p; 842 char sep; 843 844 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_USE_PREFIX_PATH) 845 return dget(sb->s_root); 846 847 full_path = cifs_build_path_to_root(ctx, cifs_sb, 848 cifs_sb_master_tcon(cifs_sb), 0); 849 if (full_path == NULL) 850 return ERR_PTR(-ENOMEM); 851 852 cifs_dbg(FYI, "Get root dentry for %s\n", full_path); 853 854 sep = CIFS_DIR_SEP(cifs_sb); 855 dentry = dget(sb->s_root); 856 s = full_path; 857 858 do { 859 struct inode *dir = d_inode(dentry); 860 struct dentry *child; 861 862 if (!S_ISDIR(dir->i_mode)) { 863 dput(dentry); 864 dentry = ERR_PTR(-ENOTDIR); 865 break; 866 } 867 868 /* skip separators */ 869 while (*s == sep) 870 s++; 871 if (!*s) 872 break; 873 p = s++; 874 /* next separator */ 875 while (*s && *s != sep) 876 s++; 877 878 child = lookup_positive_unlocked(p, dentry, s - p); 879 dput(dentry); 880 dentry = child; 881 } while (!IS_ERR(dentry)); 882 kfree(full_path); 883 return dentry; 884 } 885 886 static int cifs_set_super(struct super_block *sb, void *data) 887 { 888 struct cifs_mnt_data *mnt_data = data; 889 sb->s_fs_info = mnt_data->cifs_sb; 890 return set_anon_super(sb, NULL); 891 } 892 893 struct dentry * 894 cifs_smb3_do_mount(struct file_system_type *fs_type, 895 int flags, struct smb3_fs_context *old_ctx) 896 { 897 struct cifs_mnt_data mnt_data; 898 struct cifs_sb_info *cifs_sb; 899 struct super_block *sb; 900 struct dentry *root; 901 int rc; 902 903 if (cifsFYI) { 904 cifs_dbg(FYI, "%s: devname=%s flags=0x%x\n", __func__, 905 old_ctx->source, flags); 906 } else { 907 cifs_info("Attempting to mount %s\n", old_ctx->source); 908 } 909 910 cifs_sb = kzalloc(sizeof(*cifs_sb), GFP_KERNEL); 911 if (!cifs_sb) 912 return ERR_PTR(-ENOMEM); 913 914 cifs_sb->ctx = kzalloc(sizeof(struct smb3_fs_context), GFP_KERNEL); 915 if (!cifs_sb->ctx) { 916 root = ERR_PTR(-ENOMEM); 917 goto out; 918 } 919 rc = smb3_fs_context_dup(cifs_sb->ctx, old_ctx); 920 if (rc) { 921 root = ERR_PTR(rc); 922 goto out; 923 } 924 925 rc = cifs_setup_cifs_sb(cifs_sb); 926 if (rc) { 927 root = ERR_PTR(rc); 928 goto out; 929 } 930 931 rc = cifs_mount(cifs_sb, cifs_sb->ctx); 932 if (rc) { 933 if (!(flags & SB_SILENT)) 934 cifs_dbg(VFS, "cifs_mount failed w/return code = %d\n", 935 rc); 936 root = ERR_PTR(rc); 937 goto out; 938 } 939 940 mnt_data.ctx = cifs_sb->ctx; 941 mnt_data.cifs_sb = cifs_sb; 942 mnt_data.flags = flags; 943 944 /* BB should we make this contingent on mount parm? */ 945 flags |= SB_NODIRATIME | SB_NOATIME; 946 947 sb = sget(fs_type, cifs_match_super, cifs_set_super, flags, &mnt_data); 948 if (IS_ERR(sb)) { 949 cifs_umount(cifs_sb); 950 return ERR_CAST(sb); 951 } 952 953 if (sb->s_root) { 954 cifs_dbg(FYI, "Use existing superblock\n"); 955 cifs_umount(cifs_sb); 956 cifs_sb = NULL; 957 } else { 958 rc = cifs_read_super(sb); 959 if (rc) { 960 root = ERR_PTR(rc); 961 goto out_super; 962 } 963 964 sb->s_flags |= SB_ACTIVE; 965 } 966 967 root = cifs_get_root(cifs_sb ? cifs_sb->ctx : old_ctx, sb); 968 if (IS_ERR(root)) 969 goto out_super; 970 971 if (cifs_sb) 972 cifs_sb->root = dget(root); 973 974 cifs_dbg(FYI, "dentry root is: %p\n", root); 975 return root; 976 977 out_super: 978 deactivate_locked_super(sb); 979 return root; 980 out: 981 kfree(cifs_sb->prepath); 982 smb3_cleanup_fs_context(cifs_sb->ctx); 983 kfree(cifs_sb); 984 return root; 985 } 986 987 988 static ssize_t 989 cifs_loose_read_iter(struct kiocb *iocb, struct iov_iter *iter) 990 { 991 ssize_t rc; 992 struct inode *inode = file_inode(iocb->ki_filp); 993 994 if (iocb->ki_flags & IOCB_DIRECT) 995 return cifs_user_readv(iocb, iter); 996 997 rc = cifs_revalidate_mapping(inode); 998 if (rc) 999 return rc; 1000 1001 return generic_file_read_iter(iocb, iter); 1002 } 1003 1004 static ssize_t cifs_file_write_iter(struct kiocb *iocb, struct iov_iter *from) 1005 { 1006 struct inode *inode = file_inode(iocb->ki_filp); 1007 struct cifsInodeInfo *cinode = CIFS_I(inode); 1008 ssize_t written; 1009 int rc; 1010 1011 if (iocb->ki_filp->f_flags & O_DIRECT) { 1012 written = cifs_user_writev(iocb, from); 1013 if (written > 0 && CIFS_CACHE_READ(cinode)) { 1014 cifs_zap_mapping(inode); 1015 cifs_dbg(FYI, 1016 "Set no oplock for inode=%p after a write operation\n", 1017 inode); 1018 cinode->oplock = 0; 1019 } 1020 return written; 1021 } 1022 1023 written = cifs_get_writer(cinode); 1024 if (written) 1025 return written; 1026 1027 written = generic_file_write_iter(iocb, from); 1028 1029 if (CIFS_CACHE_WRITE(CIFS_I(inode))) 1030 goto out; 1031 1032 rc = filemap_fdatawrite(inode->i_mapping); 1033 if (rc) 1034 cifs_dbg(FYI, "cifs_file_write_iter: %d rc on %p inode\n", 1035 rc, inode); 1036 1037 out: 1038 cifs_put_writer(cinode); 1039 return written; 1040 } 1041 1042 static loff_t cifs_llseek(struct file *file, loff_t offset, int whence) 1043 { 1044 struct cifsFileInfo *cfile = file->private_data; 1045 struct cifs_tcon *tcon; 1046 1047 /* 1048 * whence == SEEK_END || SEEK_DATA || SEEK_HOLE => we must revalidate 1049 * the cached file length 1050 */ 1051 if (whence != SEEK_SET && whence != SEEK_CUR) { 1052 int rc; 1053 struct inode *inode = file_inode(file); 1054 1055 /* 1056 * We need to be sure that all dirty pages are written and the 1057 * server has the newest file length. 1058 */ 1059 if (!CIFS_CACHE_READ(CIFS_I(inode)) && inode->i_mapping && 1060 inode->i_mapping->nrpages != 0) { 1061 rc = filemap_fdatawait(inode->i_mapping); 1062 if (rc) { 1063 mapping_set_error(inode->i_mapping, rc); 1064 return rc; 1065 } 1066 } 1067 /* 1068 * Some applications poll for the file length in this strange 1069 * way so we must seek to end on non-oplocked files by 1070 * setting the revalidate time to zero. 1071 */ 1072 CIFS_I(inode)->time = 0; 1073 1074 rc = cifs_revalidate_file_attr(file); 1075 if (rc < 0) 1076 return (loff_t)rc; 1077 } 1078 if (cfile && cfile->tlink) { 1079 tcon = tlink_tcon(cfile->tlink); 1080 if (tcon->ses->server->ops->llseek) 1081 return tcon->ses->server->ops->llseek(file, tcon, 1082 offset, whence); 1083 } 1084 return generic_file_llseek(file, offset, whence); 1085 } 1086 1087 static int 1088 cifs_setlease(struct file *file, int arg, struct file_lock **lease, void **priv) 1089 { 1090 /* 1091 * Note that this is called by vfs setlease with i_lock held to 1092 * protect *lease from going away. 1093 */ 1094 struct inode *inode = file_inode(file); 1095 struct cifsFileInfo *cfile = file->private_data; 1096 1097 if (!(S_ISREG(inode->i_mode))) 1098 return -EINVAL; 1099 1100 /* Check if file is oplocked if this is request for new lease */ 1101 if (arg == F_UNLCK || 1102 ((arg == F_RDLCK) && CIFS_CACHE_READ(CIFS_I(inode))) || 1103 ((arg == F_WRLCK) && CIFS_CACHE_WRITE(CIFS_I(inode)))) 1104 return generic_setlease(file, arg, lease, priv); 1105 else if (tlink_tcon(cfile->tlink)->local_lease && 1106 !CIFS_CACHE_READ(CIFS_I(inode))) 1107 /* 1108 * If the server claims to support oplock on this file, then we 1109 * still need to check oplock even if the local_lease mount 1110 * option is set, but there are servers which do not support 1111 * oplock for which this mount option may be useful if the user 1112 * knows that the file won't be changed on the server by anyone 1113 * else. 1114 */ 1115 return generic_setlease(file, arg, lease, priv); 1116 else 1117 return -EAGAIN; 1118 } 1119 1120 struct file_system_type cifs_fs_type = { 1121 .owner = THIS_MODULE, 1122 .name = "cifs", 1123 .init_fs_context = smb3_init_fs_context, 1124 .parameters = smb3_fs_parameters, 1125 .kill_sb = cifs_kill_sb, 1126 .fs_flags = FS_RENAME_DOES_D_MOVE, 1127 }; 1128 MODULE_ALIAS_FS("cifs"); 1129 1130 struct file_system_type smb3_fs_type = { 1131 .owner = THIS_MODULE, 1132 .name = "smb3", 1133 .init_fs_context = smb3_init_fs_context, 1134 .parameters = smb3_fs_parameters, 1135 .kill_sb = cifs_kill_sb, 1136 .fs_flags = FS_RENAME_DOES_D_MOVE, 1137 }; 1138 MODULE_ALIAS_FS("smb3"); 1139 MODULE_ALIAS("smb3"); 1140 1141 const struct inode_operations cifs_dir_inode_ops = { 1142 .create = cifs_create, 1143 .atomic_open = cifs_atomic_open, 1144 .lookup = cifs_lookup, 1145 .getattr = cifs_getattr, 1146 .unlink = cifs_unlink, 1147 .link = cifs_hardlink, 1148 .mkdir = cifs_mkdir, 1149 .rmdir = cifs_rmdir, 1150 .rename = cifs_rename2, 1151 .permission = cifs_permission, 1152 .setattr = cifs_setattr, 1153 .symlink = cifs_symlink, 1154 .mknod = cifs_mknod, 1155 .listxattr = cifs_listxattr, 1156 .get_acl = cifs_get_acl, 1157 .set_acl = cifs_set_acl, 1158 }; 1159 1160 const struct inode_operations cifs_file_inode_ops = { 1161 .setattr = cifs_setattr, 1162 .getattr = cifs_getattr, 1163 .permission = cifs_permission, 1164 .listxattr = cifs_listxattr, 1165 .fiemap = cifs_fiemap, 1166 .get_acl = cifs_get_acl, 1167 .set_acl = cifs_set_acl, 1168 }; 1169 1170 const char *cifs_get_link(struct dentry *dentry, struct inode *inode, 1171 struct delayed_call *done) 1172 { 1173 char *target_path; 1174 1175 if (!dentry) 1176 return ERR_PTR(-ECHILD); 1177 1178 target_path = kmalloc(PATH_MAX, GFP_KERNEL); 1179 if (!target_path) 1180 return ERR_PTR(-ENOMEM); 1181 1182 spin_lock(&inode->i_lock); 1183 if (likely(CIFS_I(inode)->symlink_target)) { 1184 strscpy(target_path, CIFS_I(inode)->symlink_target, PATH_MAX); 1185 } else { 1186 kfree(target_path); 1187 target_path = ERR_PTR(-EOPNOTSUPP); 1188 } 1189 spin_unlock(&inode->i_lock); 1190 1191 if (!IS_ERR(target_path)) 1192 set_delayed_call(done, kfree_link, target_path); 1193 1194 return target_path; 1195 } 1196 1197 const struct inode_operations cifs_symlink_inode_ops = { 1198 .get_link = cifs_get_link, 1199 .setattr = cifs_setattr, 1200 .permission = cifs_permission, 1201 .listxattr = cifs_listxattr, 1202 }; 1203 1204 /* 1205 * Advance the EOF marker to after the source range. 1206 */ 1207 static int cifs_precopy_set_eof(struct inode *src_inode, struct cifsInodeInfo *src_cifsi, 1208 struct cifs_tcon *src_tcon, 1209 unsigned int xid, loff_t src_end) 1210 { 1211 struct cifsFileInfo *writeable_srcfile; 1212 int rc = -EINVAL; 1213 1214 writeable_srcfile = find_writable_file(src_cifsi, FIND_WR_FSUID_ONLY); 1215 if (writeable_srcfile) { 1216 if (src_tcon->ses->server->ops->set_file_size) 1217 rc = src_tcon->ses->server->ops->set_file_size( 1218 xid, src_tcon, writeable_srcfile, 1219 src_inode->i_size, true /* no need to set sparse */); 1220 else 1221 rc = -ENOSYS; 1222 cifsFileInfo_put(writeable_srcfile); 1223 cifs_dbg(FYI, "SetFSize for copychunk rc = %d\n", rc); 1224 } 1225 1226 if (rc < 0) 1227 goto set_failed; 1228 1229 netfs_resize_file(&src_cifsi->netfs, src_end, true); 1230 fscache_resize_cookie(cifs_inode_cookie(src_inode), src_end); 1231 return 0; 1232 1233 set_failed: 1234 return filemap_write_and_wait(src_inode->i_mapping); 1235 } 1236 1237 /* 1238 * Flush out either the folio that overlaps the beginning of a range in which 1239 * pos resides or the folio that overlaps the end of a range unless that folio 1240 * is entirely within the range we're going to invalidate. We extend the flush 1241 * bounds to encompass the folio. 1242 */ 1243 static int cifs_flush_folio(struct inode *inode, loff_t pos, loff_t *_fstart, loff_t *_fend, 1244 bool first) 1245 { 1246 struct folio *folio; 1247 unsigned long long fpos, fend; 1248 pgoff_t index = pos / PAGE_SIZE; 1249 size_t size; 1250 int rc = 0; 1251 1252 folio = filemap_get_folio(inode->i_mapping, index); 1253 if (IS_ERR(folio)) 1254 return 0; 1255 1256 size = folio_size(folio); 1257 fpos = folio_pos(folio); 1258 fend = fpos + size - 1; 1259 *_fstart = min_t(unsigned long long, *_fstart, fpos); 1260 *_fend = max_t(unsigned long long, *_fend, fend); 1261 if ((first && pos == fpos) || (!first && pos == fend)) 1262 goto out; 1263 1264 rc = filemap_write_and_wait_range(inode->i_mapping, fpos, fend); 1265 out: 1266 folio_put(folio); 1267 return rc; 1268 } 1269 1270 static loff_t cifs_remap_file_range(struct file *src_file, loff_t off, 1271 struct file *dst_file, loff_t destoff, loff_t len, 1272 unsigned int remap_flags) 1273 { 1274 struct inode *src_inode = file_inode(src_file); 1275 struct inode *target_inode = file_inode(dst_file); 1276 struct cifsInodeInfo *src_cifsi = CIFS_I(src_inode); 1277 struct cifsInodeInfo *target_cifsi = CIFS_I(target_inode); 1278 struct cifsFileInfo *smb_file_src = src_file->private_data; 1279 struct cifsFileInfo *smb_file_target = dst_file->private_data; 1280 struct cifs_tcon *target_tcon, *src_tcon; 1281 unsigned long long destend, fstart, fend, new_size; 1282 unsigned int xid; 1283 int rc; 1284 1285 if (remap_flags & REMAP_FILE_DEDUP) 1286 return -EOPNOTSUPP; 1287 if (remap_flags & ~REMAP_FILE_ADVISORY) 1288 return -EINVAL; 1289 1290 cifs_dbg(FYI, "clone range\n"); 1291 1292 xid = get_xid(); 1293 1294 if (!smb_file_src || !smb_file_target) { 1295 rc = -EBADF; 1296 cifs_dbg(VFS, "missing cifsFileInfo on copy range src file\n"); 1297 goto out; 1298 } 1299 1300 src_tcon = tlink_tcon(smb_file_src->tlink); 1301 target_tcon = tlink_tcon(smb_file_target->tlink); 1302 1303 /* 1304 * Note: cifs case is easier than btrfs since server responsible for 1305 * checks for proper open modes and file type and if it wants 1306 * server could even support copy of range where source = target 1307 */ 1308 lock_two_nondirectories(target_inode, src_inode); 1309 1310 if (len == 0) 1311 len = src_inode->i_size - off; 1312 1313 cifs_dbg(FYI, "clone range\n"); 1314 1315 /* Flush the source buffer */ 1316 rc = filemap_write_and_wait_range(src_inode->i_mapping, off, 1317 off + len - 1); 1318 if (rc) 1319 goto unlock; 1320 1321 /* The server-side copy will fail if the source crosses the EOF marker. 1322 * Advance the EOF marker after the flush above to the end of the range 1323 * if it's short of that. 1324 */ 1325 if (src_cifsi->netfs.remote_i_size < off + len) { 1326 rc = cifs_precopy_set_eof(src_inode, src_cifsi, src_tcon, xid, off + len); 1327 if (rc < 0) 1328 goto unlock; 1329 } 1330 1331 new_size = destoff + len; 1332 destend = destoff + len - 1; 1333 1334 /* Flush the folios at either end of the destination range to prevent 1335 * accidental loss of dirty data outside of the range. 1336 */ 1337 fstart = destoff; 1338 fend = destend; 1339 1340 rc = cifs_flush_folio(target_inode, destoff, &fstart, &fend, true); 1341 if (rc) 1342 goto unlock; 1343 rc = cifs_flush_folio(target_inode, destend, &fstart, &fend, false); 1344 if (rc) 1345 goto unlock; 1346 1347 /* Discard all the folios that overlap the destination region. */ 1348 cifs_dbg(FYI, "about to discard pages %llx-%llx\n", fstart, fend); 1349 truncate_inode_pages_range(&target_inode->i_data, fstart, fend); 1350 1351 fscache_invalidate(cifs_inode_cookie(target_inode), NULL, 1352 i_size_read(target_inode), 0); 1353 1354 rc = -EOPNOTSUPP; 1355 if (target_tcon->ses->server->ops->duplicate_extents) { 1356 rc = target_tcon->ses->server->ops->duplicate_extents(xid, 1357 smb_file_src, smb_file_target, off, len, destoff); 1358 if (rc == 0 && new_size > i_size_read(target_inode)) { 1359 truncate_setsize(target_inode, new_size); 1360 netfs_resize_file(&target_cifsi->netfs, new_size, true); 1361 fscache_resize_cookie(cifs_inode_cookie(target_inode), 1362 new_size); 1363 } 1364 } 1365 1366 /* force revalidate of size and timestamps of target file now 1367 that target is updated on the server */ 1368 CIFS_I(target_inode)->time = 0; 1369 unlock: 1370 /* although unlocking in the reverse order from locking is not 1371 strictly necessary here it is a little cleaner to be consistent */ 1372 unlock_two_nondirectories(src_inode, target_inode); 1373 out: 1374 free_xid(xid); 1375 return rc < 0 ? rc : len; 1376 } 1377 1378 ssize_t cifs_file_copychunk_range(unsigned int xid, 1379 struct file *src_file, loff_t off, 1380 struct file *dst_file, loff_t destoff, 1381 size_t len, unsigned int flags) 1382 { 1383 struct inode *src_inode = file_inode(src_file); 1384 struct inode *target_inode = file_inode(dst_file); 1385 struct cifsInodeInfo *src_cifsi = CIFS_I(src_inode); 1386 struct cifsInodeInfo *target_cifsi = CIFS_I(target_inode); 1387 struct cifsFileInfo *smb_file_src; 1388 struct cifsFileInfo *smb_file_target; 1389 struct cifs_tcon *src_tcon; 1390 struct cifs_tcon *target_tcon; 1391 unsigned long long destend, fstart, fend; 1392 ssize_t rc; 1393 1394 cifs_dbg(FYI, "copychunk range\n"); 1395 1396 if (!src_file->private_data || !dst_file->private_data) { 1397 rc = -EBADF; 1398 cifs_dbg(VFS, "missing cifsFileInfo on copy range src file\n"); 1399 goto out; 1400 } 1401 1402 rc = -EXDEV; 1403 smb_file_target = dst_file->private_data; 1404 smb_file_src = src_file->private_data; 1405 src_tcon = tlink_tcon(smb_file_src->tlink); 1406 target_tcon = tlink_tcon(smb_file_target->tlink); 1407 1408 if (src_tcon->ses != target_tcon->ses) { 1409 cifs_dbg(VFS, "source and target of copy not on same server\n"); 1410 goto out; 1411 } 1412 1413 rc = -EOPNOTSUPP; 1414 if (!target_tcon->ses->server->ops->copychunk_range) 1415 goto out; 1416 1417 /* 1418 * Note: cifs case is easier than btrfs since server responsible for 1419 * checks for proper open modes and file type and if it wants 1420 * server could even support copy of range where source = target 1421 */ 1422 lock_two_nondirectories(target_inode, src_inode); 1423 1424 cifs_dbg(FYI, "about to flush pages\n"); 1425 1426 rc = filemap_write_and_wait_range(src_inode->i_mapping, off, 1427 off + len - 1); 1428 if (rc) 1429 goto unlock; 1430 1431 /* The server-side copy will fail if the source crosses the EOF marker. 1432 * Advance the EOF marker after the flush above to the end of the range 1433 * if it's short of that. 1434 */ 1435 if (src_cifsi->netfs.remote_i_size < off + len) { 1436 rc = cifs_precopy_set_eof(src_inode, src_cifsi, src_tcon, xid, off + len); 1437 if (rc < 0) 1438 goto unlock; 1439 } 1440 1441 destend = destoff + len - 1; 1442 1443 /* Flush the folios at either end of the destination range to prevent 1444 * accidental loss of dirty data outside of the range. 1445 */ 1446 fstart = destoff; 1447 fend = destend; 1448 1449 rc = cifs_flush_folio(target_inode, destoff, &fstart, &fend, true); 1450 if (rc) 1451 goto unlock; 1452 rc = cifs_flush_folio(target_inode, destend, &fstart, &fend, false); 1453 if (rc) 1454 goto unlock; 1455 1456 /* Discard all the folios that overlap the destination region. */ 1457 truncate_inode_pages_range(&target_inode->i_data, fstart, fend); 1458 1459 fscache_invalidate(cifs_inode_cookie(target_inode), NULL, 1460 i_size_read(target_inode), 0); 1461 1462 rc = file_modified(dst_file); 1463 if (!rc) { 1464 rc = target_tcon->ses->server->ops->copychunk_range(xid, 1465 smb_file_src, smb_file_target, off, len, destoff); 1466 if (rc > 0 && destoff + rc > i_size_read(target_inode)) { 1467 truncate_setsize(target_inode, destoff + rc); 1468 netfs_resize_file(&target_cifsi->netfs, 1469 i_size_read(target_inode), true); 1470 fscache_resize_cookie(cifs_inode_cookie(target_inode), 1471 i_size_read(target_inode)); 1472 } 1473 if (rc > 0 && destoff + rc > target_cifsi->netfs.zero_point) 1474 target_cifsi->netfs.zero_point = destoff + rc; 1475 } 1476 1477 file_accessed(src_file); 1478 1479 /* force revalidate of size and timestamps of target file now 1480 * that target is updated on the server 1481 */ 1482 CIFS_I(target_inode)->time = 0; 1483 1484 unlock: 1485 /* although unlocking in the reverse order from locking is not 1486 * strictly necessary here it is a little cleaner to be consistent 1487 */ 1488 unlock_two_nondirectories(src_inode, target_inode); 1489 1490 out: 1491 return rc; 1492 } 1493 1494 /* 1495 * Directory operations under CIFS/SMB2/SMB3 are synchronous, so fsync() 1496 * is a dummy operation. 1497 */ 1498 static int cifs_dir_fsync(struct file *file, loff_t start, loff_t end, int datasync) 1499 { 1500 cifs_dbg(FYI, "Sync directory - name: %pD datasync: 0x%x\n", 1501 file, datasync); 1502 1503 return 0; 1504 } 1505 1506 static ssize_t cifs_copy_file_range(struct file *src_file, loff_t off, 1507 struct file *dst_file, loff_t destoff, 1508 size_t len, unsigned int flags) 1509 { 1510 unsigned int xid = get_xid(); 1511 ssize_t rc; 1512 struct cifsFileInfo *cfile = dst_file->private_data; 1513 1514 if (cfile->swapfile) { 1515 rc = -EOPNOTSUPP; 1516 free_xid(xid); 1517 return rc; 1518 } 1519 1520 rc = cifs_file_copychunk_range(xid, src_file, off, dst_file, destoff, 1521 len, flags); 1522 free_xid(xid); 1523 1524 if (rc == -EOPNOTSUPP || rc == -EXDEV) 1525 rc = splice_copy_file_range(src_file, off, dst_file, 1526 destoff, len); 1527 return rc; 1528 } 1529 1530 const struct file_operations cifs_file_ops = { 1531 .read_iter = cifs_loose_read_iter, 1532 .write_iter = cifs_file_write_iter, 1533 .open = cifs_open, 1534 .release = cifs_close, 1535 .lock = cifs_lock, 1536 .flock = cifs_flock, 1537 .fsync = cifs_fsync, 1538 .flush = cifs_flush, 1539 .mmap = cifs_file_mmap, 1540 .splice_read = filemap_splice_read, 1541 .splice_write = iter_file_splice_write, 1542 .llseek = cifs_llseek, 1543 .unlocked_ioctl = cifs_ioctl, 1544 .copy_file_range = cifs_copy_file_range, 1545 .remap_file_range = cifs_remap_file_range, 1546 .setlease = cifs_setlease, 1547 .fallocate = cifs_fallocate, 1548 }; 1549 1550 const struct file_operations cifs_file_strict_ops = { 1551 .read_iter = cifs_strict_readv, 1552 .write_iter = cifs_strict_writev, 1553 .open = cifs_open, 1554 .release = cifs_close, 1555 .lock = cifs_lock, 1556 .flock = cifs_flock, 1557 .fsync = cifs_strict_fsync, 1558 .flush = cifs_flush, 1559 .mmap = cifs_file_strict_mmap, 1560 .splice_read = filemap_splice_read, 1561 .splice_write = iter_file_splice_write, 1562 .llseek = cifs_llseek, 1563 .unlocked_ioctl = cifs_ioctl, 1564 .copy_file_range = cifs_copy_file_range, 1565 .remap_file_range = cifs_remap_file_range, 1566 .setlease = cifs_setlease, 1567 .fallocate = cifs_fallocate, 1568 }; 1569 1570 const struct file_operations cifs_file_direct_ops = { 1571 .read_iter = cifs_direct_readv, 1572 .write_iter = cifs_direct_writev, 1573 .open = cifs_open, 1574 .release = cifs_close, 1575 .lock = cifs_lock, 1576 .flock = cifs_flock, 1577 .fsync = cifs_fsync, 1578 .flush = cifs_flush, 1579 .mmap = cifs_file_mmap, 1580 .splice_read = copy_splice_read, 1581 .splice_write = iter_file_splice_write, 1582 .unlocked_ioctl = cifs_ioctl, 1583 .copy_file_range = cifs_copy_file_range, 1584 .remap_file_range = cifs_remap_file_range, 1585 .llseek = cifs_llseek, 1586 .setlease = cifs_setlease, 1587 .fallocate = cifs_fallocate, 1588 }; 1589 1590 const struct file_operations cifs_file_nobrl_ops = { 1591 .read_iter = cifs_loose_read_iter, 1592 .write_iter = cifs_file_write_iter, 1593 .open = cifs_open, 1594 .release = cifs_close, 1595 .fsync = cifs_fsync, 1596 .flush = cifs_flush, 1597 .mmap = cifs_file_mmap, 1598 .splice_read = filemap_splice_read, 1599 .splice_write = iter_file_splice_write, 1600 .llseek = cifs_llseek, 1601 .unlocked_ioctl = cifs_ioctl, 1602 .copy_file_range = cifs_copy_file_range, 1603 .remap_file_range = cifs_remap_file_range, 1604 .setlease = cifs_setlease, 1605 .fallocate = cifs_fallocate, 1606 }; 1607 1608 const struct file_operations cifs_file_strict_nobrl_ops = { 1609 .read_iter = cifs_strict_readv, 1610 .write_iter = cifs_strict_writev, 1611 .open = cifs_open, 1612 .release = cifs_close, 1613 .fsync = cifs_strict_fsync, 1614 .flush = cifs_flush, 1615 .mmap = cifs_file_strict_mmap, 1616 .splice_read = filemap_splice_read, 1617 .splice_write = iter_file_splice_write, 1618 .llseek = cifs_llseek, 1619 .unlocked_ioctl = cifs_ioctl, 1620 .copy_file_range = cifs_copy_file_range, 1621 .remap_file_range = cifs_remap_file_range, 1622 .setlease = cifs_setlease, 1623 .fallocate = cifs_fallocate, 1624 }; 1625 1626 const struct file_operations cifs_file_direct_nobrl_ops = { 1627 .read_iter = cifs_direct_readv, 1628 .write_iter = cifs_direct_writev, 1629 .open = cifs_open, 1630 .release = cifs_close, 1631 .fsync = cifs_fsync, 1632 .flush = cifs_flush, 1633 .mmap = cifs_file_mmap, 1634 .splice_read = copy_splice_read, 1635 .splice_write = iter_file_splice_write, 1636 .unlocked_ioctl = cifs_ioctl, 1637 .copy_file_range = cifs_copy_file_range, 1638 .remap_file_range = cifs_remap_file_range, 1639 .llseek = cifs_llseek, 1640 .setlease = cifs_setlease, 1641 .fallocate = cifs_fallocate, 1642 }; 1643 1644 const struct file_operations cifs_dir_ops = { 1645 .iterate_shared = cifs_readdir, 1646 .release = cifs_closedir, 1647 .read = generic_read_dir, 1648 .unlocked_ioctl = cifs_ioctl, 1649 .copy_file_range = cifs_copy_file_range, 1650 .remap_file_range = cifs_remap_file_range, 1651 .llseek = generic_file_llseek, 1652 .fsync = cifs_dir_fsync, 1653 }; 1654 1655 static void 1656 cifs_init_once(void *inode) 1657 { 1658 struct cifsInodeInfo *cifsi = inode; 1659 1660 inode_init_once(&cifsi->netfs.inode); 1661 init_rwsem(&cifsi->lock_sem); 1662 } 1663 1664 static int __init 1665 cifs_init_inodecache(void) 1666 { 1667 cifs_inode_cachep = kmem_cache_create("cifs_inode_cache", 1668 sizeof(struct cifsInodeInfo), 1669 0, (SLAB_RECLAIM_ACCOUNT| 1670 SLAB_MEM_SPREAD|SLAB_ACCOUNT), 1671 cifs_init_once); 1672 if (cifs_inode_cachep == NULL) 1673 return -ENOMEM; 1674 1675 return 0; 1676 } 1677 1678 static void 1679 cifs_destroy_inodecache(void) 1680 { 1681 /* 1682 * Make sure all delayed rcu free inodes are flushed before we 1683 * destroy cache. 1684 */ 1685 rcu_barrier(); 1686 kmem_cache_destroy(cifs_inode_cachep); 1687 } 1688 1689 static int 1690 cifs_init_request_bufs(void) 1691 { 1692 /* 1693 * SMB2 maximum header size is bigger than CIFS one - no problems to 1694 * allocate some more bytes for CIFS. 1695 */ 1696 size_t max_hdr_size = MAX_SMB2_HDR_SIZE; 1697 1698 if (CIFSMaxBufSize < 8192) { 1699 /* Buffer size can not be smaller than 2 * PATH_MAX since maximum 1700 Unicode path name has to fit in any SMB/CIFS path based frames */ 1701 CIFSMaxBufSize = 8192; 1702 } else if (CIFSMaxBufSize > 1024*127) { 1703 CIFSMaxBufSize = 1024 * 127; 1704 } else { 1705 CIFSMaxBufSize &= 0x1FE00; /* Round size to even 512 byte mult*/ 1706 } 1707 /* 1708 cifs_dbg(VFS, "CIFSMaxBufSize %d 0x%x\n", 1709 CIFSMaxBufSize, CIFSMaxBufSize); 1710 */ 1711 cifs_req_cachep = kmem_cache_create_usercopy("cifs_request", 1712 CIFSMaxBufSize + max_hdr_size, 0, 1713 SLAB_HWCACHE_ALIGN, 0, 1714 CIFSMaxBufSize + max_hdr_size, 1715 NULL); 1716 if (cifs_req_cachep == NULL) 1717 return -ENOMEM; 1718 1719 if (cifs_min_rcv < 1) 1720 cifs_min_rcv = 1; 1721 else if (cifs_min_rcv > 64) { 1722 cifs_min_rcv = 64; 1723 cifs_dbg(VFS, "cifs_min_rcv set to maximum (64)\n"); 1724 } 1725 1726 cifs_req_poolp = mempool_create_slab_pool(cifs_min_rcv, 1727 cifs_req_cachep); 1728 1729 if (cifs_req_poolp == NULL) { 1730 kmem_cache_destroy(cifs_req_cachep); 1731 return -ENOMEM; 1732 } 1733 /* MAX_CIFS_SMALL_BUFFER_SIZE bytes is enough for most SMB responses and 1734 almost all handle based requests (but not write response, nor is it 1735 sufficient for path based requests). A smaller size would have 1736 been more efficient (compacting multiple slab items on one 4k page) 1737 for the case in which debug was on, but this larger size allows 1738 more SMBs to use small buffer alloc and is still much more 1739 efficient to alloc 1 per page off the slab compared to 17K (5page) 1740 alloc of large cifs buffers even when page debugging is on */ 1741 cifs_sm_req_cachep = kmem_cache_create_usercopy("cifs_small_rq", 1742 MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN, 1743 0, MAX_CIFS_SMALL_BUFFER_SIZE, NULL); 1744 if (cifs_sm_req_cachep == NULL) { 1745 mempool_destroy(cifs_req_poolp); 1746 kmem_cache_destroy(cifs_req_cachep); 1747 return -ENOMEM; 1748 } 1749 1750 if (cifs_min_small < 2) 1751 cifs_min_small = 2; 1752 else if (cifs_min_small > 256) { 1753 cifs_min_small = 256; 1754 cifs_dbg(FYI, "cifs_min_small set to maximum (256)\n"); 1755 } 1756 1757 cifs_sm_req_poolp = mempool_create_slab_pool(cifs_min_small, 1758 cifs_sm_req_cachep); 1759 1760 if (cifs_sm_req_poolp == NULL) { 1761 mempool_destroy(cifs_req_poolp); 1762 kmem_cache_destroy(cifs_req_cachep); 1763 kmem_cache_destroy(cifs_sm_req_cachep); 1764 return -ENOMEM; 1765 } 1766 1767 return 0; 1768 } 1769 1770 static void 1771 cifs_destroy_request_bufs(void) 1772 { 1773 mempool_destroy(cifs_req_poolp); 1774 kmem_cache_destroy(cifs_req_cachep); 1775 mempool_destroy(cifs_sm_req_poolp); 1776 kmem_cache_destroy(cifs_sm_req_cachep); 1777 } 1778 1779 static int init_mids(void) 1780 { 1781 cifs_mid_cachep = kmem_cache_create("cifs_mpx_ids", 1782 sizeof(struct mid_q_entry), 0, 1783 SLAB_HWCACHE_ALIGN, NULL); 1784 if (cifs_mid_cachep == NULL) 1785 return -ENOMEM; 1786 1787 /* 3 is a reasonable minimum number of simultaneous operations */ 1788 cifs_mid_poolp = mempool_create_slab_pool(3, cifs_mid_cachep); 1789 if (cifs_mid_poolp == NULL) { 1790 kmem_cache_destroy(cifs_mid_cachep); 1791 return -ENOMEM; 1792 } 1793 1794 return 0; 1795 } 1796 1797 static void destroy_mids(void) 1798 { 1799 mempool_destroy(cifs_mid_poolp); 1800 kmem_cache_destroy(cifs_mid_cachep); 1801 } 1802 1803 static int __init 1804 init_cifs(void) 1805 { 1806 int rc = 0; 1807 cifs_proc_init(); 1808 INIT_LIST_HEAD(&cifs_tcp_ses_list); 1809 /* 1810 * Initialize Global counters 1811 */ 1812 atomic_set(&sesInfoAllocCount, 0); 1813 atomic_set(&tconInfoAllocCount, 0); 1814 atomic_set(&tcpSesNextId, 0); 1815 atomic_set(&tcpSesAllocCount, 0); 1816 atomic_set(&tcpSesReconnectCount, 0); 1817 atomic_set(&tconInfoReconnectCount, 0); 1818 1819 atomic_set(&buf_alloc_count, 0); 1820 atomic_set(&small_buf_alloc_count, 0); 1821 #ifdef CONFIG_CIFS_STATS2 1822 atomic_set(&total_buf_alloc_count, 0); 1823 atomic_set(&total_small_buf_alloc_count, 0); 1824 if (slow_rsp_threshold < 1) 1825 cifs_dbg(FYI, "slow_response_threshold msgs disabled\n"); 1826 else if (slow_rsp_threshold > 32767) 1827 cifs_dbg(VFS, 1828 "slow response threshold set higher than recommended (0 to 32767)\n"); 1829 #endif /* CONFIG_CIFS_STATS2 */ 1830 1831 atomic_set(&mid_count, 0); 1832 GlobalCurrentXid = 0; 1833 GlobalTotalActiveXid = 0; 1834 GlobalMaxActiveXid = 0; 1835 spin_lock_init(&cifs_tcp_ses_lock); 1836 spin_lock_init(&GlobalMid_Lock); 1837 1838 cifs_lock_secret = get_random_u32(); 1839 1840 if (cifs_max_pending < 2) { 1841 cifs_max_pending = 2; 1842 cifs_dbg(FYI, "cifs_max_pending set to min of 2\n"); 1843 } else if (cifs_max_pending > CIFS_MAX_REQ) { 1844 cifs_max_pending = CIFS_MAX_REQ; 1845 cifs_dbg(FYI, "cifs_max_pending set to max of %u\n", 1846 CIFS_MAX_REQ); 1847 } 1848 1849 /* Limit max to about 18 hours, and setting to zero disables directory entry caching */ 1850 if (dir_cache_timeout > 65000) { 1851 dir_cache_timeout = 65000; 1852 cifs_dbg(VFS, "dir_cache_timeout set to max of 65000 seconds\n"); 1853 } 1854 1855 cifsiod_wq = alloc_workqueue("cifsiod", WQ_FREEZABLE|WQ_MEM_RECLAIM, 0); 1856 if (!cifsiod_wq) { 1857 rc = -ENOMEM; 1858 goto out_clean_proc; 1859 } 1860 1861 /* 1862 * Consider in future setting limit!=0 maybe to min(num_of_cores - 1, 3) 1863 * so that we don't launch too many worker threads but 1864 * Documentation/core-api/workqueue.rst recommends setting it to 0 1865 */ 1866 1867 /* WQ_UNBOUND allows decrypt tasks to run on any CPU */ 1868 decrypt_wq = alloc_workqueue("smb3decryptd", 1869 WQ_UNBOUND|WQ_FREEZABLE|WQ_MEM_RECLAIM, 0); 1870 if (!decrypt_wq) { 1871 rc = -ENOMEM; 1872 goto out_destroy_cifsiod_wq; 1873 } 1874 1875 fileinfo_put_wq = alloc_workqueue("cifsfileinfoput", 1876 WQ_UNBOUND|WQ_FREEZABLE|WQ_MEM_RECLAIM, 0); 1877 if (!fileinfo_put_wq) { 1878 rc = -ENOMEM; 1879 goto out_destroy_decrypt_wq; 1880 } 1881 1882 cifsoplockd_wq = alloc_workqueue("cifsoplockd", 1883 WQ_FREEZABLE|WQ_MEM_RECLAIM, 0); 1884 if (!cifsoplockd_wq) { 1885 rc = -ENOMEM; 1886 goto out_destroy_fileinfo_put_wq; 1887 } 1888 1889 deferredclose_wq = alloc_workqueue("deferredclose", 1890 WQ_FREEZABLE|WQ_MEM_RECLAIM, 0); 1891 if (!deferredclose_wq) { 1892 rc = -ENOMEM; 1893 goto out_destroy_cifsoplockd_wq; 1894 } 1895 1896 rc = cifs_init_inodecache(); 1897 if (rc) 1898 goto out_destroy_deferredclose_wq; 1899 1900 rc = init_mids(); 1901 if (rc) 1902 goto out_destroy_inodecache; 1903 1904 rc = cifs_init_request_bufs(); 1905 if (rc) 1906 goto out_destroy_mids; 1907 1908 #ifdef CONFIG_CIFS_DFS_UPCALL 1909 rc = dfs_cache_init(); 1910 if (rc) 1911 goto out_destroy_request_bufs; 1912 #endif /* CONFIG_CIFS_DFS_UPCALL */ 1913 #ifdef CONFIG_CIFS_UPCALL 1914 rc = init_cifs_spnego(); 1915 if (rc) 1916 goto out_destroy_dfs_cache; 1917 #endif /* CONFIG_CIFS_UPCALL */ 1918 #ifdef CONFIG_CIFS_SWN_UPCALL 1919 rc = cifs_genl_init(); 1920 if (rc) 1921 goto out_register_key_type; 1922 #endif /* CONFIG_CIFS_SWN_UPCALL */ 1923 1924 rc = init_cifs_idmap(); 1925 if (rc) 1926 goto out_cifs_swn_init; 1927 1928 rc = register_filesystem(&cifs_fs_type); 1929 if (rc) 1930 goto out_init_cifs_idmap; 1931 1932 rc = register_filesystem(&smb3_fs_type); 1933 if (rc) { 1934 unregister_filesystem(&cifs_fs_type); 1935 goto out_init_cifs_idmap; 1936 } 1937 1938 return 0; 1939 1940 out_init_cifs_idmap: 1941 exit_cifs_idmap(); 1942 out_cifs_swn_init: 1943 #ifdef CONFIG_CIFS_SWN_UPCALL 1944 cifs_genl_exit(); 1945 out_register_key_type: 1946 #endif 1947 #ifdef CONFIG_CIFS_UPCALL 1948 exit_cifs_spnego(); 1949 out_destroy_dfs_cache: 1950 #endif 1951 #ifdef CONFIG_CIFS_DFS_UPCALL 1952 dfs_cache_destroy(); 1953 out_destroy_request_bufs: 1954 #endif 1955 cifs_destroy_request_bufs(); 1956 out_destroy_mids: 1957 destroy_mids(); 1958 out_destroy_inodecache: 1959 cifs_destroy_inodecache(); 1960 out_destroy_deferredclose_wq: 1961 destroy_workqueue(deferredclose_wq); 1962 out_destroy_cifsoplockd_wq: 1963 destroy_workqueue(cifsoplockd_wq); 1964 out_destroy_fileinfo_put_wq: 1965 destroy_workqueue(fileinfo_put_wq); 1966 out_destroy_decrypt_wq: 1967 destroy_workqueue(decrypt_wq); 1968 out_destroy_cifsiod_wq: 1969 destroy_workqueue(cifsiod_wq); 1970 out_clean_proc: 1971 cifs_proc_clean(); 1972 return rc; 1973 } 1974 1975 static void __exit 1976 exit_cifs(void) 1977 { 1978 cifs_dbg(NOISY, "exit_smb3\n"); 1979 unregister_filesystem(&cifs_fs_type); 1980 unregister_filesystem(&smb3_fs_type); 1981 cifs_release_automount_timer(); 1982 exit_cifs_idmap(); 1983 #ifdef CONFIG_CIFS_SWN_UPCALL 1984 cifs_genl_exit(); 1985 #endif 1986 #ifdef CONFIG_CIFS_UPCALL 1987 exit_cifs_spnego(); 1988 #endif 1989 #ifdef CONFIG_CIFS_DFS_UPCALL 1990 dfs_cache_destroy(); 1991 #endif 1992 cifs_destroy_request_bufs(); 1993 destroy_mids(); 1994 cifs_destroy_inodecache(); 1995 destroy_workqueue(deferredclose_wq); 1996 destroy_workqueue(cifsoplockd_wq); 1997 destroy_workqueue(decrypt_wq); 1998 destroy_workqueue(fileinfo_put_wq); 1999 destroy_workqueue(cifsiod_wq); 2000 cifs_proc_clean(); 2001 } 2002 2003 MODULE_AUTHOR("Steve French"); 2004 MODULE_LICENSE("GPL"); /* combination of LGPL + GPL source behaves as GPL */ 2005 MODULE_DESCRIPTION 2006 ("VFS to access SMB3 servers e.g. Samba, Macs, Azure and Windows (and " 2007 "also older servers complying with the SNIA CIFS Specification)"); 2008 MODULE_VERSION(CIFS_VERSION); 2009 MODULE_SOFTDEP("ecb"); 2010 MODULE_SOFTDEP("hmac"); 2011 MODULE_SOFTDEP("md5"); 2012 MODULE_SOFTDEP("nls"); 2013 MODULE_SOFTDEP("aes"); 2014 MODULE_SOFTDEP("cmac"); 2015 MODULE_SOFTDEP("sha256"); 2016 MODULE_SOFTDEP("sha512"); 2017 MODULE_SOFTDEP("aead2"); 2018 MODULE_SOFTDEP("ccm"); 2019 MODULE_SOFTDEP("gcm"); 2020 module_init(init_cifs) 2021 module_exit(exit_cifs) 2022