1 // SPDX-License-Identifier: LGPL-2.1 2 /* 3 * 4 * Copyright (C) International Business Machines Corp., 2002,2008 5 * Author(s): Steve French (sfrench@us.ibm.com) 6 * 7 * Common Internet FileSystem (CIFS) client 8 * 9 */ 10 11 /* Note that BB means BUGBUG (ie something to fix eventually) */ 12 13 #include <linux/module.h> 14 #include <linux/fs.h> 15 #include <linux/filelock.h> 16 #include <linux/mount.h> 17 #include <linux/slab.h> 18 #include <linux/init.h> 19 #include <linux/list.h> 20 #include <linux/seq_file.h> 21 #include <linux/vfs.h> 22 #include <linux/mempool.h> 23 #include <linux/delay.h> 24 #include <linux/kthread.h> 25 #include <linux/freezer.h> 26 #include <linux/namei.h> 27 #include <linux/random.h> 28 #include <linux/splice.h> 29 #include <linux/uuid.h> 30 #include <linux/xattr.h> 31 #include <uapi/linux/magic.h> 32 #include <net/ipv6.h> 33 #include "cifsfs.h" 34 #include "cifspdu.h" 35 #define DECLARE_GLOBALS_HERE 36 #include "cifsglob.h" 37 #include "cifsproto.h" 38 #include "cifs_debug.h" 39 #include "cifs_fs_sb.h" 40 #include <linux/mm.h> 41 #include <linux/key-type.h> 42 #include "cifs_spnego.h" 43 #include "fscache.h" 44 #ifdef CONFIG_CIFS_DFS_UPCALL 45 #include "dfs_cache.h" 46 #endif 47 #ifdef CONFIG_CIFS_SWN_UPCALL 48 #include "netlink.h" 49 #endif 50 #include "fs_context.h" 51 #include "cached_dir.h" 52 53 /* 54 * DOS dates from 1980/1/1 through 2107/12/31 55 * Protocol specifications indicate the range should be to 119, which 56 * limits maximum year to 2099. But this range has not been checked. 57 */ 58 #define SMB_DATE_MAX (127<<9 | 12<<5 | 31) 59 #define SMB_DATE_MIN (0<<9 | 1<<5 | 1) 60 #define SMB_TIME_MAX (23<<11 | 59<<5 | 29) 61 62 int cifsFYI = 0; 63 bool traceSMB; 64 bool enable_oplocks = true; 65 bool linuxExtEnabled = true; 66 bool lookupCacheEnabled = true; 67 bool disable_legacy_dialects; /* false by default */ 68 bool enable_gcm_256 = true; 69 bool require_gcm_256; /* false by default */ 70 bool enable_negotiate_signing; /* false by default */ 71 unsigned int global_secflags = CIFSSEC_DEF; 72 /* unsigned int ntlmv2_support = 0; */ 73 unsigned int sign_CIFS_PDUs = 1; 74 75 /* 76 * Global transaction id (XID) information 77 */ 78 unsigned int GlobalCurrentXid; /* protected by GlobalMid_Sem */ 79 unsigned int GlobalTotalActiveXid; /* prot by GlobalMid_Sem */ 80 unsigned int GlobalMaxActiveXid; /* prot by GlobalMid_Sem */ 81 spinlock_t GlobalMid_Lock; /* protects above & list operations on midQ entries */ 82 83 /* 84 * Global counters, updated atomically 85 */ 86 atomic_t sesInfoAllocCount; 87 atomic_t tconInfoAllocCount; 88 atomic_t tcpSesNextId; 89 atomic_t tcpSesAllocCount; 90 atomic_t tcpSesReconnectCount; 91 atomic_t tconInfoReconnectCount; 92 93 atomic_t mid_count; 94 atomic_t buf_alloc_count; 95 atomic_t small_buf_alloc_count; 96 #ifdef CONFIG_CIFS_STATS2 97 atomic_t total_buf_alloc_count; 98 atomic_t total_small_buf_alloc_count; 99 #endif/* STATS2 */ 100 struct list_head cifs_tcp_ses_list; 101 spinlock_t cifs_tcp_ses_lock; 102 static const struct super_operations cifs_super_ops; 103 unsigned int CIFSMaxBufSize = CIFS_MAX_MSGSIZE; 104 module_param(CIFSMaxBufSize, uint, 0444); 105 MODULE_PARM_DESC(CIFSMaxBufSize, "Network buffer size (not including header) " 106 "for CIFS requests. " 107 "Default: 16384 Range: 8192 to 130048"); 108 unsigned int cifs_min_rcv = CIFS_MIN_RCV_POOL; 109 module_param(cifs_min_rcv, uint, 0444); 110 MODULE_PARM_DESC(cifs_min_rcv, "Network buffers in pool. Default: 4 Range: " 111 "1 to 64"); 112 unsigned int cifs_min_small = 30; 113 module_param(cifs_min_small, uint, 0444); 114 MODULE_PARM_DESC(cifs_min_small, "Small network buffers in pool. Default: 30 " 115 "Range: 2 to 256"); 116 unsigned int cifs_max_pending = CIFS_MAX_REQ; 117 module_param(cifs_max_pending, uint, 0444); 118 MODULE_PARM_DESC(cifs_max_pending, "Simultaneous requests to server for " 119 "CIFS/SMB1 dialect (N/A for SMB3) " 120 "Default: 32767 Range: 2 to 32767."); 121 unsigned int dir_cache_timeout = 30; 122 module_param(dir_cache_timeout, uint, 0644); 123 MODULE_PARM_DESC(dir_cache_timeout, "Number of seconds to cache directory contents for which we have a lease. Default: 30 " 124 "Range: 1 to 65000 seconds, 0 to disable caching dir contents"); 125 #ifdef CONFIG_CIFS_STATS2 126 unsigned int slow_rsp_threshold = 1; 127 module_param(slow_rsp_threshold, uint, 0644); 128 MODULE_PARM_DESC(slow_rsp_threshold, "Amount of time (in seconds) to wait " 129 "before logging that a response is delayed. " 130 "Default: 1 (if set to 0 disables msg)."); 131 #endif /* STATS2 */ 132 133 module_param(enable_oplocks, bool, 0644); 134 MODULE_PARM_DESC(enable_oplocks, "Enable or disable oplocks. Default: y/Y/1"); 135 136 module_param(enable_gcm_256, bool, 0644); 137 MODULE_PARM_DESC(enable_gcm_256, "Enable requesting strongest (256 bit) GCM encryption. Default: n/N/0"); 138 139 module_param(require_gcm_256, bool, 0644); 140 MODULE_PARM_DESC(require_gcm_256, "Require strongest (256 bit) GCM encryption. Default: n/N/0"); 141 142 module_param(enable_negotiate_signing, bool, 0644); 143 MODULE_PARM_DESC(enable_negotiate_signing, "Enable negotiating packet signing algorithm with server. Default: n/N/0"); 144 145 module_param(disable_legacy_dialects, bool, 0644); 146 MODULE_PARM_DESC(disable_legacy_dialects, "To improve security it may be " 147 "helpful to restrict the ability to " 148 "override the default dialects (SMB2.1, " 149 "SMB3 and SMB3.02) on mount with old " 150 "dialects (CIFS/SMB1 and SMB2) since " 151 "vers=1.0 (CIFS/SMB1) and vers=2.0 are weaker" 152 " and less secure. Default: n/N/0"); 153 154 struct workqueue_struct *cifsiod_wq; 155 struct workqueue_struct *decrypt_wq; 156 struct workqueue_struct *fileinfo_put_wq; 157 struct workqueue_struct *cifsoplockd_wq; 158 struct workqueue_struct *deferredclose_wq; 159 struct workqueue_struct *serverclose_wq; 160 __u32 cifs_lock_secret; 161 162 /* 163 * Bumps refcount for cifs super block. 164 * Note that it should be only called if a referece to VFS super block is 165 * already held, e.g. in open-type syscalls context. Otherwise it can race with 166 * atomic_dec_and_test in deactivate_locked_super. 167 */ 168 void 169 cifs_sb_active(struct super_block *sb) 170 { 171 struct cifs_sb_info *server = CIFS_SB(sb); 172 173 if (atomic_inc_return(&server->active) == 1) 174 atomic_inc(&sb->s_active); 175 } 176 177 void 178 cifs_sb_deactive(struct super_block *sb) 179 { 180 struct cifs_sb_info *server = CIFS_SB(sb); 181 182 if (atomic_dec_and_test(&server->active)) 183 deactivate_super(sb); 184 } 185 186 static int 187 cifs_read_super(struct super_block *sb) 188 { 189 struct inode *inode; 190 struct cifs_sb_info *cifs_sb; 191 struct cifs_tcon *tcon; 192 struct timespec64 ts; 193 int rc = 0; 194 195 cifs_sb = CIFS_SB(sb); 196 tcon = cifs_sb_master_tcon(cifs_sb); 197 198 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIXACL) 199 sb->s_flags |= SB_POSIXACL; 200 201 if (tcon->snapshot_time) 202 sb->s_flags |= SB_RDONLY; 203 204 if (tcon->ses->capabilities & tcon->ses->server->vals->cap_large_files) 205 sb->s_maxbytes = MAX_LFS_FILESIZE; 206 else 207 sb->s_maxbytes = MAX_NON_LFS; 208 209 /* 210 * Some very old servers like DOS and OS/2 used 2 second granularity 211 * (while all current servers use 100ns granularity - see MS-DTYP) 212 * but 1 second is the maximum allowed granularity for the VFS 213 * so for old servers set time granularity to 1 second while for 214 * everything else (current servers) set it to 100ns. 215 */ 216 if ((tcon->ses->server->vals->protocol_id == SMB10_PROT_ID) && 217 ((tcon->ses->capabilities & 218 tcon->ses->server->vals->cap_nt_find) == 0) && 219 !tcon->unix_ext) { 220 sb->s_time_gran = 1000000000; /* 1 second is max allowed gran */ 221 ts = cnvrtDosUnixTm(cpu_to_le16(SMB_DATE_MIN), 0, 0); 222 sb->s_time_min = ts.tv_sec; 223 ts = cnvrtDosUnixTm(cpu_to_le16(SMB_DATE_MAX), 224 cpu_to_le16(SMB_TIME_MAX), 0); 225 sb->s_time_max = ts.tv_sec; 226 } else { 227 /* 228 * Almost every server, including all SMB2+, uses DCE TIME 229 * ie 100 nanosecond units, since 1601. See MS-DTYP and MS-FSCC 230 */ 231 sb->s_time_gran = 100; 232 ts = cifs_NTtimeToUnix(0); 233 sb->s_time_min = ts.tv_sec; 234 ts = cifs_NTtimeToUnix(cpu_to_le64(S64_MAX)); 235 sb->s_time_max = ts.tv_sec; 236 } 237 238 sb->s_magic = CIFS_SUPER_MAGIC; 239 sb->s_op = &cifs_super_ops; 240 sb->s_xattr = cifs_xattr_handlers; 241 rc = super_setup_bdi(sb); 242 if (rc) 243 goto out_no_root; 244 /* tune readahead according to rsize if readahead size not set on mount */ 245 if (cifs_sb->ctx->rsize == 0) 246 cifs_sb->ctx->rsize = 247 tcon->ses->server->ops->negotiate_rsize(tcon, cifs_sb->ctx); 248 if (cifs_sb->ctx->rasize) 249 sb->s_bdi->ra_pages = cifs_sb->ctx->rasize / PAGE_SIZE; 250 else 251 sb->s_bdi->ra_pages = 2 * (cifs_sb->ctx->rsize / PAGE_SIZE); 252 253 sb->s_blocksize = CIFS_MAX_MSGSIZE; 254 sb->s_blocksize_bits = 14; /* default 2**14 = CIFS_MAX_MSGSIZE */ 255 inode = cifs_root_iget(sb); 256 257 if (IS_ERR(inode)) { 258 rc = PTR_ERR(inode); 259 goto out_no_root; 260 } 261 262 if (tcon->nocase) 263 sb->s_d_op = &cifs_ci_dentry_ops; 264 else 265 sb->s_d_op = &cifs_dentry_ops; 266 267 sb->s_root = d_make_root(inode); 268 if (!sb->s_root) { 269 rc = -ENOMEM; 270 goto out_no_root; 271 } 272 273 #ifdef CONFIG_CIFS_NFSD_EXPORT 274 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) { 275 cifs_dbg(FYI, "export ops supported\n"); 276 sb->s_export_op = &cifs_export_ops; 277 } 278 #endif /* CONFIG_CIFS_NFSD_EXPORT */ 279 280 return 0; 281 282 out_no_root: 283 cifs_dbg(VFS, "%s: get root inode failed\n", __func__); 284 return rc; 285 } 286 287 static void cifs_kill_sb(struct super_block *sb) 288 { 289 struct cifs_sb_info *cifs_sb = CIFS_SB(sb); 290 291 /* 292 * We ned to release all dentries for the cached directories 293 * before we kill the sb. 294 */ 295 if (cifs_sb->root) { 296 close_all_cached_dirs(cifs_sb); 297 298 /* finally release root dentry */ 299 dput(cifs_sb->root); 300 cifs_sb->root = NULL; 301 } 302 303 kill_anon_super(sb); 304 cifs_umount(cifs_sb); 305 } 306 307 static int 308 cifs_statfs(struct dentry *dentry, struct kstatfs *buf) 309 { 310 struct super_block *sb = dentry->d_sb; 311 struct cifs_sb_info *cifs_sb = CIFS_SB(sb); 312 struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb); 313 struct TCP_Server_Info *server = tcon->ses->server; 314 unsigned int xid; 315 int rc = 0; 316 317 xid = get_xid(); 318 319 if (le32_to_cpu(tcon->fsAttrInfo.MaxPathNameComponentLength) > 0) 320 buf->f_namelen = 321 le32_to_cpu(tcon->fsAttrInfo.MaxPathNameComponentLength); 322 else 323 buf->f_namelen = PATH_MAX; 324 325 buf->f_fsid.val[0] = tcon->vol_serial_number; 326 /* are using part of create time for more randomness, see man statfs */ 327 buf->f_fsid.val[1] = (int)le64_to_cpu(tcon->vol_create_time); 328 329 buf->f_files = 0; /* undefined */ 330 buf->f_ffree = 0; /* unlimited */ 331 332 if (server->ops->queryfs) 333 rc = server->ops->queryfs(xid, tcon, cifs_sb, buf); 334 335 free_xid(xid); 336 return rc; 337 } 338 339 static long cifs_fallocate(struct file *file, int mode, loff_t off, loff_t len) 340 { 341 struct cifs_sb_info *cifs_sb = CIFS_FILE_SB(file); 342 struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb); 343 struct TCP_Server_Info *server = tcon->ses->server; 344 345 if (server->ops->fallocate) 346 return server->ops->fallocate(file, tcon, mode, off, len); 347 348 return -EOPNOTSUPP; 349 } 350 351 static int cifs_permission(struct mnt_idmap *idmap, 352 struct inode *inode, int mask) 353 { 354 struct cifs_sb_info *cifs_sb; 355 356 cifs_sb = CIFS_SB(inode->i_sb); 357 358 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_PERM) { 359 if ((mask & MAY_EXEC) && !execute_ok(inode)) 360 return -EACCES; 361 else 362 return 0; 363 } else /* file mode might have been restricted at mount time 364 on the client (above and beyond ACL on servers) for 365 servers which do not support setting and viewing mode bits, 366 so allowing client to check permissions is useful */ 367 return generic_permission(&nop_mnt_idmap, inode, mask); 368 } 369 370 static struct kmem_cache *cifs_inode_cachep; 371 static struct kmem_cache *cifs_req_cachep; 372 static struct kmem_cache *cifs_mid_cachep; 373 static struct kmem_cache *cifs_sm_req_cachep; 374 mempool_t *cifs_sm_req_poolp; 375 mempool_t *cifs_req_poolp; 376 mempool_t *cifs_mid_poolp; 377 378 static struct inode * 379 cifs_alloc_inode(struct super_block *sb) 380 { 381 struct cifsInodeInfo *cifs_inode; 382 cifs_inode = alloc_inode_sb(sb, cifs_inode_cachep, GFP_KERNEL); 383 if (!cifs_inode) 384 return NULL; 385 cifs_inode->cifsAttrs = 0x20; /* default */ 386 cifs_inode->time = 0; 387 /* 388 * Until the file is open and we have gotten oplock info back from the 389 * server, can not assume caching of file data or metadata. 390 */ 391 cifs_set_oplock_level(cifs_inode, 0); 392 cifs_inode->lease_granted = false; 393 cifs_inode->flags = 0; 394 spin_lock_init(&cifs_inode->writers_lock); 395 cifs_inode->writers = 0; 396 cifs_inode->netfs.inode.i_blkbits = 14; /* 2**14 = CIFS_MAX_MSGSIZE */ 397 cifs_inode->netfs.remote_i_size = 0; 398 cifs_inode->uniqueid = 0; 399 cifs_inode->createtime = 0; 400 cifs_inode->epoch = 0; 401 spin_lock_init(&cifs_inode->open_file_lock); 402 generate_random_uuid(cifs_inode->lease_key); 403 cifs_inode->symlink_target = NULL; 404 405 /* 406 * Can not set i_flags here - they get immediately overwritten to zero 407 * by the VFS. 408 */ 409 /* cifs_inode->netfs.inode.i_flags = S_NOATIME | S_NOCMTIME; */ 410 INIT_LIST_HEAD(&cifs_inode->openFileList); 411 INIT_LIST_HEAD(&cifs_inode->llist); 412 INIT_LIST_HEAD(&cifs_inode->deferred_closes); 413 spin_lock_init(&cifs_inode->deferred_lock); 414 return &cifs_inode->netfs.inode; 415 } 416 417 static void 418 cifs_free_inode(struct inode *inode) 419 { 420 struct cifsInodeInfo *cinode = CIFS_I(inode); 421 422 if (S_ISLNK(inode->i_mode)) 423 kfree(cinode->symlink_target); 424 kmem_cache_free(cifs_inode_cachep, cinode); 425 } 426 427 static void 428 cifs_evict_inode(struct inode *inode) 429 { 430 truncate_inode_pages_final(&inode->i_data); 431 if (inode->i_state & I_PINNING_NETFS_WB) 432 cifs_fscache_unuse_inode_cookie(inode, true); 433 cifs_fscache_release_inode_cookie(inode); 434 clear_inode(inode); 435 } 436 437 static void 438 cifs_show_address(struct seq_file *s, struct TCP_Server_Info *server) 439 { 440 struct sockaddr_in *sa = (struct sockaddr_in *) &server->dstaddr; 441 struct sockaddr_in6 *sa6 = (struct sockaddr_in6 *) &server->dstaddr; 442 443 seq_puts(s, ",addr="); 444 445 switch (server->dstaddr.ss_family) { 446 case AF_INET: 447 seq_printf(s, "%pI4", &sa->sin_addr.s_addr); 448 break; 449 case AF_INET6: 450 seq_printf(s, "%pI6", &sa6->sin6_addr.s6_addr); 451 if (sa6->sin6_scope_id) 452 seq_printf(s, "%%%u", sa6->sin6_scope_id); 453 break; 454 default: 455 seq_puts(s, "(unknown)"); 456 } 457 if (server->rdma) 458 seq_puts(s, ",rdma"); 459 } 460 461 static void 462 cifs_show_security(struct seq_file *s, struct cifs_ses *ses) 463 { 464 if (ses->sectype == Unspecified) { 465 if (ses->user_name == NULL) 466 seq_puts(s, ",sec=none"); 467 return; 468 } 469 470 seq_puts(s, ",sec="); 471 472 switch (ses->sectype) { 473 case NTLMv2: 474 seq_puts(s, "ntlmv2"); 475 break; 476 case Kerberos: 477 seq_puts(s, "krb5"); 478 break; 479 case RawNTLMSSP: 480 seq_puts(s, "ntlmssp"); 481 break; 482 default: 483 /* shouldn't ever happen */ 484 seq_puts(s, "unknown"); 485 break; 486 } 487 488 if (ses->sign) 489 seq_puts(s, "i"); 490 491 if (ses->sectype == Kerberos) 492 seq_printf(s, ",cruid=%u", 493 from_kuid_munged(&init_user_ns, ses->cred_uid)); 494 } 495 496 static void 497 cifs_show_cache_flavor(struct seq_file *s, struct cifs_sb_info *cifs_sb) 498 { 499 seq_puts(s, ",cache="); 500 501 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO) 502 seq_puts(s, "strict"); 503 else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO) 504 seq_puts(s, "none"); 505 else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RW_CACHE) 506 seq_puts(s, "singleclient"); /* assume only one client access */ 507 else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RO_CACHE) 508 seq_puts(s, "ro"); /* read only caching assumed */ 509 else 510 seq_puts(s, "loose"); 511 } 512 513 /* 514 * cifs_show_devname() is used so we show the mount device name with correct 515 * format (e.g. forward slashes vs. back slashes) in /proc/mounts 516 */ 517 static int cifs_show_devname(struct seq_file *m, struct dentry *root) 518 { 519 struct cifs_sb_info *cifs_sb = CIFS_SB(root->d_sb); 520 char *devname = kstrdup(cifs_sb->ctx->source, GFP_KERNEL); 521 522 if (devname == NULL) 523 seq_puts(m, "none"); 524 else { 525 convert_delimiter(devname, '/'); 526 /* escape all spaces in share names */ 527 seq_escape(m, devname, " \t"); 528 kfree(devname); 529 } 530 return 0; 531 } 532 533 /* 534 * cifs_show_options() is for displaying mount options in /proc/mounts. 535 * Not all settable options are displayed but most of the important 536 * ones are. 537 */ 538 static int 539 cifs_show_options(struct seq_file *s, struct dentry *root) 540 { 541 struct cifs_sb_info *cifs_sb = CIFS_SB(root->d_sb); 542 struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb); 543 struct sockaddr *srcaddr; 544 srcaddr = (struct sockaddr *)&tcon->ses->server->srcaddr; 545 546 seq_show_option(s, "vers", tcon->ses->server->vals->version_string); 547 cifs_show_security(s, tcon->ses); 548 cifs_show_cache_flavor(s, cifs_sb); 549 550 if (tcon->no_lease) 551 seq_puts(s, ",nolease"); 552 if (cifs_sb->ctx->multiuser) 553 seq_puts(s, ",multiuser"); 554 else if (tcon->ses->user_name) 555 seq_show_option(s, "username", tcon->ses->user_name); 556 557 if (tcon->ses->domainName && tcon->ses->domainName[0] != 0) 558 seq_show_option(s, "domain", tcon->ses->domainName); 559 560 if (srcaddr->sa_family != AF_UNSPEC) { 561 struct sockaddr_in *saddr4; 562 struct sockaddr_in6 *saddr6; 563 saddr4 = (struct sockaddr_in *)srcaddr; 564 saddr6 = (struct sockaddr_in6 *)srcaddr; 565 if (srcaddr->sa_family == AF_INET6) 566 seq_printf(s, ",srcaddr=%pI6c", 567 &saddr6->sin6_addr); 568 else if (srcaddr->sa_family == AF_INET) 569 seq_printf(s, ",srcaddr=%pI4", 570 &saddr4->sin_addr.s_addr); 571 else 572 seq_printf(s, ",srcaddr=BAD-AF:%i", 573 (int)(srcaddr->sa_family)); 574 } 575 576 seq_printf(s, ",uid=%u", 577 from_kuid_munged(&init_user_ns, cifs_sb->ctx->linux_uid)); 578 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_OVERR_UID) 579 seq_puts(s, ",forceuid"); 580 else 581 seq_puts(s, ",noforceuid"); 582 583 seq_printf(s, ",gid=%u", 584 from_kgid_munged(&init_user_ns, cifs_sb->ctx->linux_gid)); 585 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_OVERR_GID) 586 seq_puts(s, ",forcegid"); 587 else 588 seq_puts(s, ",noforcegid"); 589 590 cifs_show_address(s, tcon->ses->server); 591 592 if (!tcon->unix_ext) 593 seq_printf(s, ",file_mode=0%ho,dir_mode=0%ho", 594 cifs_sb->ctx->file_mode, 595 cifs_sb->ctx->dir_mode); 596 if (cifs_sb->ctx->iocharset) 597 seq_printf(s, ",iocharset=%s", cifs_sb->ctx->iocharset); 598 if (tcon->seal) 599 seq_puts(s, ",seal"); 600 else if (tcon->ses->server->ignore_signature) 601 seq_puts(s, ",signloosely"); 602 if (tcon->nocase) 603 seq_puts(s, ",nocase"); 604 if (tcon->nodelete) 605 seq_puts(s, ",nodelete"); 606 if (cifs_sb->ctx->no_sparse) 607 seq_puts(s, ",nosparse"); 608 if (tcon->local_lease) 609 seq_puts(s, ",locallease"); 610 if (tcon->retry) 611 seq_puts(s, ",hard"); 612 else 613 seq_puts(s, ",soft"); 614 if (tcon->use_persistent) 615 seq_puts(s, ",persistenthandles"); 616 else if (tcon->use_resilient) 617 seq_puts(s, ",resilienthandles"); 618 if (tcon->posix_extensions) 619 seq_puts(s, ",posix"); 620 else if (tcon->unix_ext) 621 seq_puts(s, ",unix"); 622 else 623 seq_puts(s, ",nounix"); 624 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_DFS) 625 seq_puts(s, ",nodfs"); 626 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIX_PATHS) 627 seq_puts(s, ",posixpaths"); 628 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SET_UID) 629 seq_puts(s, ",setuids"); 630 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UID_FROM_ACL) 631 seq_puts(s, ",idsfromsid"); 632 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) 633 seq_puts(s, ",serverino"); 634 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD) 635 seq_puts(s, ",rwpidforward"); 636 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) 637 seq_puts(s, ",forcemand"); 638 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_XATTR) 639 seq_puts(s, ",nouser_xattr"); 640 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR) 641 seq_puts(s, ",mapchars"); 642 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SFM_CHR) 643 seq_puts(s, ",mapposix"); 644 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UNX_EMUL) 645 seq_puts(s, ",sfu"); 646 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_BRL) 647 seq_puts(s, ",nobrl"); 648 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_HANDLE_CACHE) 649 seq_puts(s, ",nohandlecache"); 650 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MODE_FROM_SID) 651 seq_puts(s, ",modefromsid"); 652 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_ACL) 653 seq_puts(s, ",cifsacl"); 654 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DYNPERM) 655 seq_puts(s, ",dynperm"); 656 if (root->d_sb->s_flags & SB_POSIXACL) 657 seq_puts(s, ",acl"); 658 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MF_SYMLINKS) 659 seq_puts(s, ",mfsymlinks"); 660 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_FSCACHE) 661 seq_puts(s, ",fsc"); 662 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC) 663 seq_puts(s, ",nostrictsync"); 664 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_PERM) 665 seq_puts(s, ",noperm"); 666 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_BACKUPUID) 667 seq_printf(s, ",backupuid=%u", 668 from_kuid_munged(&init_user_ns, 669 cifs_sb->ctx->backupuid)); 670 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_BACKUPGID) 671 seq_printf(s, ",backupgid=%u", 672 from_kgid_munged(&init_user_ns, 673 cifs_sb->ctx->backupgid)); 674 seq_show_option(s, "reparse", 675 cifs_reparse_type_str(cifs_sb->ctx->reparse_type)); 676 677 seq_printf(s, ",rsize=%u", cifs_sb->ctx->rsize); 678 seq_printf(s, ",wsize=%u", cifs_sb->ctx->wsize); 679 seq_printf(s, ",bsize=%u", cifs_sb->ctx->bsize); 680 if (cifs_sb->ctx->rasize) 681 seq_printf(s, ",rasize=%u", cifs_sb->ctx->rasize); 682 if (tcon->ses->server->min_offload) 683 seq_printf(s, ",esize=%u", tcon->ses->server->min_offload); 684 if (tcon->ses->server->retrans) 685 seq_printf(s, ",retrans=%u", tcon->ses->server->retrans); 686 seq_printf(s, ",echo_interval=%lu", 687 tcon->ses->server->echo_interval / HZ); 688 689 /* Only display the following if overridden on mount */ 690 if (tcon->ses->server->max_credits != SMB2_MAX_CREDITS_AVAILABLE) 691 seq_printf(s, ",max_credits=%u", tcon->ses->server->max_credits); 692 if (tcon->ses->server->tcp_nodelay) 693 seq_puts(s, ",tcpnodelay"); 694 if (tcon->ses->server->noautotune) 695 seq_puts(s, ",noautotune"); 696 if (tcon->ses->server->noblocksnd) 697 seq_puts(s, ",noblocksend"); 698 if (tcon->ses->server->nosharesock) 699 seq_puts(s, ",nosharesock"); 700 701 if (tcon->snapshot_time) 702 seq_printf(s, ",snapshot=%llu", tcon->snapshot_time); 703 if (tcon->handle_timeout) 704 seq_printf(s, ",handletimeout=%u", tcon->handle_timeout); 705 if (tcon->max_cached_dirs != MAX_CACHED_FIDS) 706 seq_printf(s, ",max_cached_dirs=%u", tcon->max_cached_dirs); 707 708 /* 709 * Display file and directory attribute timeout in seconds. 710 * If file and directory attribute timeout the same then actimeo 711 * was likely specified on mount 712 */ 713 if (cifs_sb->ctx->acdirmax == cifs_sb->ctx->acregmax) 714 seq_printf(s, ",actimeo=%lu", cifs_sb->ctx->acregmax / HZ); 715 else { 716 seq_printf(s, ",acdirmax=%lu", cifs_sb->ctx->acdirmax / HZ); 717 seq_printf(s, ",acregmax=%lu", cifs_sb->ctx->acregmax / HZ); 718 } 719 seq_printf(s, ",closetimeo=%lu", cifs_sb->ctx->closetimeo / HZ); 720 721 if (tcon->ses->chan_max > 1) 722 seq_printf(s, ",multichannel,max_channels=%zu", 723 tcon->ses->chan_max); 724 725 if (tcon->use_witness) 726 seq_puts(s, ",witness"); 727 728 return 0; 729 } 730 731 static void cifs_umount_begin(struct super_block *sb) 732 { 733 struct cifs_sb_info *cifs_sb = CIFS_SB(sb); 734 struct cifs_tcon *tcon; 735 736 if (cifs_sb == NULL) 737 return; 738 739 tcon = cifs_sb_master_tcon(cifs_sb); 740 741 spin_lock(&cifs_tcp_ses_lock); 742 spin_lock(&tcon->tc_lock); 743 trace_smb3_tcon_ref(tcon->debug_id, tcon->tc_count, 744 netfs_trace_tcon_ref_see_umount); 745 if ((tcon->tc_count > 1) || (tcon->status == TID_EXITING)) { 746 /* we have other mounts to same share or we have 747 already tried to umount this and woken up 748 all waiting network requests, nothing to do */ 749 spin_unlock(&tcon->tc_lock); 750 spin_unlock(&cifs_tcp_ses_lock); 751 return; 752 } 753 /* 754 * can not set tcon->status to TID_EXITING yet since we don't know if umount -f will 755 * fail later (e.g. due to open files). TID_EXITING will be set just before tdis req sent 756 */ 757 spin_unlock(&tcon->tc_lock); 758 spin_unlock(&cifs_tcp_ses_lock); 759 760 cifs_close_all_deferred_files(tcon); 761 /* cancel_brl_requests(tcon); */ /* BB mark all brl mids as exiting */ 762 /* cancel_notify_requests(tcon); */ 763 if (tcon->ses && tcon->ses->server) { 764 cifs_dbg(FYI, "wake up tasks now - umount begin not complete\n"); 765 wake_up_all(&tcon->ses->server->request_q); 766 wake_up_all(&tcon->ses->server->response_q); 767 msleep(1); /* yield */ 768 /* we have to kick the requests once more */ 769 wake_up_all(&tcon->ses->server->response_q); 770 msleep(1); 771 } 772 773 return; 774 } 775 776 static int cifs_freeze(struct super_block *sb) 777 { 778 struct cifs_sb_info *cifs_sb = CIFS_SB(sb); 779 struct cifs_tcon *tcon; 780 781 if (cifs_sb == NULL) 782 return 0; 783 784 tcon = cifs_sb_master_tcon(cifs_sb); 785 786 cifs_close_all_deferred_files(tcon); 787 return 0; 788 } 789 790 #ifdef CONFIG_CIFS_STATS2 791 static int cifs_show_stats(struct seq_file *s, struct dentry *root) 792 { 793 /* BB FIXME */ 794 return 0; 795 } 796 #endif 797 798 static int cifs_write_inode(struct inode *inode, struct writeback_control *wbc) 799 { 800 return netfs_unpin_writeback(inode, wbc); 801 } 802 803 static int cifs_drop_inode(struct inode *inode) 804 { 805 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb); 806 807 /* no serverino => unconditional eviction */ 808 return !(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) || 809 generic_drop_inode(inode); 810 } 811 812 static const struct super_operations cifs_super_ops = { 813 .statfs = cifs_statfs, 814 .alloc_inode = cifs_alloc_inode, 815 .write_inode = cifs_write_inode, 816 .free_inode = cifs_free_inode, 817 .drop_inode = cifs_drop_inode, 818 .evict_inode = cifs_evict_inode, 819 /* .show_path = cifs_show_path, */ /* Would we ever need show path? */ 820 .show_devname = cifs_show_devname, 821 /* .delete_inode = cifs_delete_inode, */ /* Do not need above 822 function unless later we add lazy close of inodes or unless the 823 kernel forgets to call us with the same number of releases (closes) 824 as opens */ 825 .show_options = cifs_show_options, 826 .umount_begin = cifs_umount_begin, 827 .freeze_fs = cifs_freeze, 828 #ifdef CONFIG_CIFS_STATS2 829 .show_stats = cifs_show_stats, 830 #endif 831 }; 832 833 /* 834 * Get root dentry from superblock according to prefix path mount option. 835 * Return dentry with refcount + 1 on success and NULL otherwise. 836 */ 837 static struct dentry * 838 cifs_get_root(struct smb3_fs_context *ctx, struct super_block *sb) 839 { 840 struct dentry *dentry; 841 struct cifs_sb_info *cifs_sb = CIFS_SB(sb); 842 char *full_path = NULL; 843 char *s, *p; 844 char sep; 845 846 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_USE_PREFIX_PATH) 847 return dget(sb->s_root); 848 849 full_path = cifs_build_path_to_root(ctx, cifs_sb, 850 cifs_sb_master_tcon(cifs_sb), 0); 851 if (full_path == NULL) 852 return ERR_PTR(-ENOMEM); 853 854 cifs_dbg(FYI, "Get root dentry for %s\n", full_path); 855 856 sep = CIFS_DIR_SEP(cifs_sb); 857 dentry = dget(sb->s_root); 858 s = full_path; 859 860 do { 861 struct inode *dir = d_inode(dentry); 862 struct dentry *child; 863 864 if (!S_ISDIR(dir->i_mode)) { 865 dput(dentry); 866 dentry = ERR_PTR(-ENOTDIR); 867 break; 868 } 869 870 /* skip separators */ 871 while (*s == sep) 872 s++; 873 if (!*s) 874 break; 875 p = s++; 876 /* next separator */ 877 while (*s && *s != sep) 878 s++; 879 880 child = lookup_positive_unlocked(p, dentry, s - p); 881 dput(dentry); 882 dentry = child; 883 } while (!IS_ERR(dentry)); 884 kfree(full_path); 885 return dentry; 886 } 887 888 static int cifs_set_super(struct super_block *sb, void *data) 889 { 890 struct cifs_mnt_data *mnt_data = data; 891 sb->s_fs_info = mnt_data->cifs_sb; 892 return set_anon_super(sb, NULL); 893 } 894 895 struct dentry * 896 cifs_smb3_do_mount(struct file_system_type *fs_type, 897 int flags, struct smb3_fs_context *old_ctx) 898 { 899 struct cifs_mnt_data mnt_data; 900 struct cifs_sb_info *cifs_sb; 901 struct super_block *sb; 902 struct dentry *root; 903 int rc; 904 905 if (cifsFYI) { 906 cifs_dbg(FYI, "%s: devname=%s flags=0x%x\n", __func__, 907 old_ctx->source, flags); 908 } else { 909 cifs_info("Attempting to mount %s\n", old_ctx->source); 910 } 911 912 cifs_sb = kzalloc(sizeof(*cifs_sb), GFP_KERNEL); 913 if (!cifs_sb) 914 return ERR_PTR(-ENOMEM); 915 916 cifs_sb->ctx = kzalloc(sizeof(struct smb3_fs_context), GFP_KERNEL); 917 if (!cifs_sb->ctx) { 918 root = ERR_PTR(-ENOMEM); 919 goto out; 920 } 921 rc = smb3_fs_context_dup(cifs_sb->ctx, old_ctx); 922 if (rc) { 923 root = ERR_PTR(rc); 924 goto out; 925 } 926 927 rc = cifs_setup_cifs_sb(cifs_sb); 928 if (rc) { 929 root = ERR_PTR(rc); 930 goto out; 931 } 932 933 rc = cifs_mount(cifs_sb, cifs_sb->ctx); 934 if (rc) { 935 if (!(flags & SB_SILENT)) 936 cifs_dbg(VFS, "cifs_mount failed w/return code = %d\n", 937 rc); 938 root = ERR_PTR(rc); 939 goto out; 940 } 941 942 mnt_data.ctx = cifs_sb->ctx; 943 mnt_data.cifs_sb = cifs_sb; 944 mnt_data.flags = flags; 945 946 /* BB should we make this contingent on mount parm? */ 947 flags |= SB_NODIRATIME | SB_NOATIME; 948 949 sb = sget(fs_type, cifs_match_super, cifs_set_super, flags, &mnt_data); 950 if (IS_ERR(sb)) { 951 cifs_umount(cifs_sb); 952 return ERR_CAST(sb); 953 } 954 955 if (sb->s_root) { 956 cifs_dbg(FYI, "Use existing superblock\n"); 957 cifs_umount(cifs_sb); 958 cifs_sb = NULL; 959 } else { 960 rc = cifs_read_super(sb); 961 if (rc) { 962 root = ERR_PTR(rc); 963 goto out_super; 964 } 965 966 sb->s_flags |= SB_ACTIVE; 967 } 968 969 root = cifs_get_root(cifs_sb ? cifs_sb->ctx : old_ctx, sb); 970 if (IS_ERR(root)) 971 goto out_super; 972 973 if (cifs_sb) 974 cifs_sb->root = dget(root); 975 976 cifs_dbg(FYI, "dentry root is: %p\n", root); 977 return root; 978 979 out_super: 980 deactivate_locked_super(sb); 981 return root; 982 out: 983 kfree(cifs_sb->prepath); 984 smb3_cleanup_fs_context(cifs_sb->ctx); 985 kfree(cifs_sb); 986 return root; 987 } 988 989 990 static ssize_t 991 cifs_loose_read_iter(struct kiocb *iocb, struct iov_iter *iter) 992 { 993 ssize_t rc; 994 struct inode *inode = file_inode(iocb->ki_filp); 995 996 if (iocb->ki_flags & IOCB_DIRECT) 997 return cifs_user_readv(iocb, iter); 998 999 rc = cifs_revalidate_mapping(inode); 1000 if (rc) 1001 return rc; 1002 1003 return generic_file_read_iter(iocb, iter); 1004 } 1005 1006 static ssize_t cifs_file_write_iter(struct kiocb *iocb, struct iov_iter *from) 1007 { 1008 struct inode *inode = file_inode(iocb->ki_filp); 1009 struct cifsInodeInfo *cinode = CIFS_I(inode); 1010 ssize_t written; 1011 int rc; 1012 1013 if (iocb->ki_filp->f_flags & O_DIRECT) { 1014 written = cifs_user_writev(iocb, from); 1015 if (written > 0 && CIFS_CACHE_READ(cinode)) { 1016 cifs_zap_mapping(inode); 1017 cifs_dbg(FYI, 1018 "Set no oplock for inode=%p after a write operation\n", 1019 inode); 1020 cinode->oplock = 0; 1021 } 1022 return written; 1023 } 1024 1025 written = cifs_get_writer(cinode); 1026 if (written) 1027 return written; 1028 1029 written = generic_file_write_iter(iocb, from); 1030 1031 if (CIFS_CACHE_WRITE(CIFS_I(inode))) 1032 goto out; 1033 1034 rc = filemap_fdatawrite(inode->i_mapping); 1035 if (rc) 1036 cifs_dbg(FYI, "cifs_file_write_iter: %d rc on %p inode\n", 1037 rc, inode); 1038 1039 out: 1040 cifs_put_writer(cinode); 1041 return written; 1042 } 1043 1044 static loff_t cifs_llseek(struct file *file, loff_t offset, int whence) 1045 { 1046 struct cifsFileInfo *cfile = file->private_data; 1047 struct cifs_tcon *tcon; 1048 1049 /* 1050 * whence == SEEK_END || SEEK_DATA || SEEK_HOLE => we must revalidate 1051 * the cached file length 1052 */ 1053 if (whence != SEEK_SET && whence != SEEK_CUR) { 1054 int rc; 1055 struct inode *inode = file_inode(file); 1056 1057 /* 1058 * We need to be sure that all dirty pages are written and the 1059 * server has the newest file length. 1060 */ 1061 if (!CIFS_CACHE_READ(CIFS_I(inode)) && inode->i_mapping && 1062 inode->i_mapping->nrpages != 0) { 1063 rc = filemap_fdatawait(inode->i_mapping); 1064 if (rc) { 1065 mapping_set_error(inode->i_mapping, rc); 1066 return rc; 1067 } 1068 } 1069 /* 1070 * Some applications poll for the file length in this strange 1071 * way so we must seek to end on non-oplocked files by 1072 * setting the revalidate time to zero. 1073 */ 1074 CIFS_I(inode)->time = 0; 1075 1076 rc = cifs_revalidate_file_attr(file); 1077 if (rc < 0) 1078 return (loff_t)rc; 1079 } 1080 if (cfile && cfile->tlink) { 1081 tcon = tlink_tcon(cfile->tlink); 1082 if (tcon->ses->server->ops->llseek) 1083 return tcon->ses->server->ops->llseek(file, tcon, 1084 offset, whence); 1085 } 1086 return generic_file_llseek(file, offset, whence); 1087 } 1088 1089 static int 1090 cifs_setlease(struct file *file, int arg, struct file_lease **lease, void **priv) 1091 { 1092 /* 1093 * Note that this is called by vfs setlease with i_lock held to 1094 * protect *lease from going away. 1095 */ 1096 struct inode *inode = file_inode(file); 1097 struct cifsFileInfo *cfile = file->private_data; 1098 1099 /* Check if file is oplocked if this is request for new lease */ 1100 if (arg == F_UNLCK || 1101 ((arg == F_RDLCK) && CIFS_CACHE_READ(CIFS_I(inode))) || 1102 ((arg == F_WRLCK) && CIFS_CACHE_WRITE(CIFS_I(inode)))) 1103 return generic_setlease(file, arg, lease, priv); 1104 else if (tlink_tcon(cfile->tlink)->local_lease && 1105 !CIFS_CACHE_READ(CIFS_I(inode))) 1106 /* 1107 * If the server claims to support oplock on this file, then we 1108 * still need to check oplock even if the local_lease mount 1109 * option is set, but there are servers which do not support 1110 * oplock for which this mount option may be useful if the user 1111 * knows that the file won't be changed on the server by anyone 1112 * else. 1113 */ 1114 return generic_setlease(file, arg, lease, priv); 1115 else 1116 return -EAGAIN; 1117 } 1118 1119 struct file_system_type cifs_fs_type = { 1120 .owner = THIS_MODULE, 1121 .name = "cifs", 1122 .init_fs_context = smb3_init_fs_context, 1123 .parameters = smb3_fs_parameters, 1124 .kill_sb = cifs_kill_sb, 1125 .fs_flags = FS_RENAME_DOES_D_MOVE, 1126 }; 1127 MODULE_ALIAS_FS("cifs"); 1128 1129 struct file_system_type smb3_fs_type = { 1130 .owner = THIS_MODULE, 1131 .name = "smb3", 1132 .init_fs_context = smb3_init_fs_context, 1133 .parameters = smb3_fs_parameters, 1134 .kill_sb = cifs_kill_sb, 1135 .fs_flags = FS_RENAME_DOES_D_MOVE, 1136 }; 1137 MODULE_ALIAS_FS("smb3"); 1138 MODULE_ALIAS("smb3"); 1139 1140 const struct inode_operations cifs_dir_inode_ops = { 1141 .create = cifs_create, 1142 .atomic_open = cifs_atomic_open, 1143 .lookup = cifs_lookup, 1144 .getattr = cifs_getattr, 1145 .unlink = cifs_unlink, 1146 .link = cifs_hardlink, 1147 .mkdir = cifs_mkdir, 1148 .rmdir = cifs_rmdir, 1149 .rename = cifs_rename2, 1150 .permission = cifs_permission, 1151 .setattr = cifs_setattr, 1152 .symlink = cifs_symlink, 1153 .mknod = cifs_mknod, 1154 .listxattr = cifs_listxattr, 1155 .get_acl = cifs_get_acl, 1156 .set_acl = cifs_set_acl, 1157 }; 1158 1159 const struct inode_operations cifs_file_inode_ops = { 1160 .setattr = cifs_setattr, 1161 .getattr = cifs_getattr, 1162 .permission = cifs_permission, 1163 .listxattr = cifs_listxattr, 1164 .fiemap = cifs_fiemap, 1165 .get_acl = cifs_get_acl, 1166 .set_acl = cifs_set_acl, 1167 }; 1168 1169 const char *cifs_get_link(struct dentry *dentry, struct inode *inode, 1170 struct delayed_call *done) 1171 { 1172 char *target_path; 1173 1174 if (!dentry) 1175 return ERR_PTR(-ECHILD); 1176 1177 target_path = kmalloc(PATH_MAX, GFP_KERNEL); 1178 if (!target_path) 1179 return ERR_PTR(-ENOMEM); 1180 1181 spin_lock(&inode->i_lock); 1182 if (likely(CIFS_I(inode)->symlink_target)) { 1183 strscpy(target_path, CIFS_I(inode)->symlink_target, PATH_MAX); 1184 } else { 1185 kfree(target_path); 1186 target_path = ERR_PTR(-EOPNOTSUPP); 1187 } 1188 spin_unlock(&inode->i_lock); 1189 1190 if (!IS_ERR(target_path)) 1191 set_delayed_call(done, kfree_link, target_path); 1192 1193 return target_path; 1194 } 1195 1196 const struct inode_operations cifs_symlink_inode_ops = { 1197 .get_link = cifs_get_link, 1198 .setattr = cifs_setattr, 1199 .permission = cifs_permission, 1200 .listxattr = cifs_listxattr, 1201 }; 1202 1203 /* 1204 * Advance the EOF marker to after the source range. 1205 */ 1206 static int cifs_precopy_set_eof(struct inode *src_inode, struct cifsInodeInfo *src_cifsi, 1207 struct cifs_tcon *src_tcon, 1208 unsigned int xid, loff_t src_end) 1209 { 1210 struct cifsFileInfo *writeable_srcfile; 1211 int rc = -EINVAL; 1212 1213 writeable_srcfile = find_writable_file(src_cifsi, FIND_WR_FSUID_ONLY); 1214 if (writeable_srcfile) { 1215 if (src_tcon->ses->server->ops->set_file_size) 1216 rc = src_tcon->ses->server->ops->set_file_size( 1217 xid, src_tcon, writeable_srcfile, 1218 src_inode->i_size, true /* no need to set sparse */); 1219 else 1220 rc = -ENOSYS; 1221 cifsFileInfo_put(writeable_srcfile); 1222 cifs_dbg(FYI, "SetFSize for copychunk rc = %d\n", rc); 1223 } 1224 1225 if (rc < 0) 1226 goto set_failed; 1227 1228 netfs_resize_file(&src_cifsi->netfs, src_end, true); 1229 fscache_resize_cookie(cifs_inode_cookie(src_inode), src_end); 1230 return 0; 1231 1232 set_failed: 1233 return filemap_write_and_wait(src_inode->i_mapping); 1234 } 1235 1236 /* 1237 * Flush out either the folio that overlaps the beginning of a range in which 1238 * pos resides or the folio that overlaps the end of a range unless that folio 1239 * is entirely within the range we're going to invalidate. We extend the flush 1240 * bounds to encompass the folio. 1241 */ 1242 static int cifs_flush_folio(struct inode *inode, loff_t pos, loff_t *_fstart, loff_t *_fend, 1243 bool first) 1244 { 1245 struct folio *folio; 1246 unsigned long long fpos, fend; 1247 pgoff_t index = pos / PAGE_SIZE; 1248 size_t size; 1249 int rc = 0; 1250 1251 folio = filemap_get_folio(inode->i_mapping, index); 1252 if (IS_ERR(folio)) 1253 return 0; 1254 1255 size = folio_size(folio); 1256 fpos = folio_pos(folio); 1257 fend = fpos + size - 1; 1258 *_fstart = min_t(unsigned long long, *_fstart, fpos); 1259 *_fend = max_t(unsigned long long, *_fend, fend); 1260 if ((first && pos == fpos) || (!first && pos == fend)) 1261 goto out; 1262 1263 rc = filemap_write_and_wait_range(inode->i_mapping, fpos, fend); 1264 out: 1265 folio_put(folio); 1266 return rc; 1267 } 1268 1269 static loff_t cifs_remap_file_range(struct file *src_file, loff_t off, 1270 struct file *dst_file, loff_t destoff, loff_t len, 1271 unsigned int remap_flags) 1272 { 1273 struct inode *src_inode = file_inode(src_file); 1274 struct inode *target_inode = file_inode(dst_file); 1275 struct cifsInodeInfo *src_cifsi = CIFS_I(src_inode); 1276 struct cifsInodeInfo *target_cifsi = CIFS_I(target_inode); 1277 struct cifsFileInfo *smb_file_src = src_file->private_data; 1278 struct cifsFileInfo *smb_file_target = dst_file->private_data; 1279 struct cifs_tcon *target_tcon, *src_tcon; 1280 unsigned long long destend, fstart, fend, new_size; 1281 unsigned int xid; 1282 int rc; 1283 1284 if (remap_flags & REMAP_FILE_DEDUP) 1285 return -EOPNOTSUPP; 1286 if (remap_flags & ~REMAP_FILE_ADVISORY) 1287 return -EINVAL; 1288 1289 cifs_dbg(FYI, "clone range\n"); 1290 1291 xid = get_xid(); 1292 1293 if (!smb_file_src || !smb_file_target) { 1294 rc = -EBADF; 1295 cifs_dbg(VFS, "missing cifsFileInfo on copy range src file\n"); 1296 goto out; 1297 } 1298 1299 src_tcon = tlink_tcon(smb_file_src->tlink); 1300 target_tcon = tlink_tcon(smb_file_target->tlink); 1301 1302 /* 1303 * Note: cifs case is easier than btrfs since server responsible for 1304 * checks for proper open modes and file type and if it wants 1305 * server could even support copy of range where source = target 1306 */ 1307 lock_two_nondirectories(target_inode, src_inode); 1308 1309 if (len == 0) 1310 len = src_inode->i_size - off; 1311 1312 cifs_dbg(FYI, "clone range\n"); 1313 1314 /* Flush the source buffer */ 1315 rc = filemap_write_and_wait_range(src_inode->i_mapping, off, 1316 off + len - 1); 1317 if (rc) 1318 goto unlock; 1319 1320 /* The server-side copy will fail if the source crosses the EOF marker. 1321 * Advance the EOF marker after the flush above to the end of the range 1322 * if it's short of that. 1323 */ 1324 if (src_cifsi->netfs.remote_i_size < off + len) { 1325 rc = cifs_precopy_set_eof(src_inode, src_cifsi, src_tcon, xid, off + len); 1326 if (rc < 0) 1327 goto unlock; 1328 } 1329 1330 new_size = destoff + len; 1331 destend = destoff + len - 1; 1332 1333 /* Flush the folios at either end of the destination range to prevent 1334 * accidental loss of dirty data outside of the range. 1335 */ 1336 fstart = destoff; 1337 fend = destend; 1338 1339 rc = cifs_flush_folio(target_inode, destoff, &fstart, &fend, true); 1340 if (rc) 1341 goto unlock; 1342 rc = cifs_flush_folio(target_inode, destend, &fstart, &fend, false); 1343 if (rc) 1344 goto unlock; 1345 1346 /* Discard all the folios that overlap the destination region. */ 1347 cifs_dbg(FYI, "about to discard pages %llx-%llx\n", fstart, fend); 1348 truncate_inode_pages_range(&target_inode->i_data, fstart, fend); 1349 1350 fscache_invalidate(cifs_inode_cookie(target_inode), NULL, 1351 i_size_read(target_inode), 0); 1352 1353 rc = -EOPNOTSUPP; 1354 if (target_tcon->ses->server->ops->duplicate_extents) { 1355 rc = target_tcon->ses->server->ops->duplicate_extents(xid, 1356 smb_file_src, smb_file_target, off, len, destoff); 1357 if (rc == 0 && new_size > i_size_read(target_inode)) { 1358 truncate_setsize(target_inode, new_size); 1359 netfs_resize_file(&target_cifsi->netfs, new_size, true); 1360 fscache_resize_cookie(cifs_inode_cookie(target_inode), 1361 new_size); 1362 } 1363 } 1364 1365 /* force revalidate of size and timestamps of target file now 1366 that target is updated on the server */ 1367 CIFS_I(target_inode)->time = 0; 1368 unlock: 1369 /* although unlocking in the reverse order from locking is not 1370 strictly necessary here it is a little cleaner to be consistent */ 1371 unlock_two_nondirectories(src_inode, target_inode); 1372 out: 1373 free_xid(xid); 1374 return rc < 0 ? rc : len; 1375 } 1376 1377 ssize_t cifs_file_copychunk_range(unsigned int xid, 1378 struct file *src_file, loff_t off, 1379 struct file *dst_file, loff_t destoff, 1380 size_t len, unsigned int flags) 1381 { 1382 struct inode *src_inode = file_inode(src_file); 1383 struct inode *target_inode = file_inode(dst_file); 1384 struct cifsInodeInfo *src_cifsi = CIFS_I(src_inode); 1385 struct cifsInodeInfo *target_cifsi = CIFS_I(target_inode); 1386 struct cifsFileInfo *smb_file_src; 1387 struct cifsFileInfo *smb_file_target; 1388 struct cifs_tcon *src_tcon; 1389 struct cifs_tcon *target_tcon; 1390 unsigned long long destend, fstart, fend; 1391 ssize_t rc; 1392 1393 cifs_dbg(FYI, "copychunk range\n"); 1394 1395 if (!src_file->private_data || !dst_file->private_data) { 1396 rc = -EBADF; 1397 cifs_dbg(VFS, "missing cifsFileInfo on copy range src file\n"); 1398 goto out; 1399 } 1400 1401 rc = -EXDEV; 1402 smb_file_target = dst_file->private_data; 1403 smb_file_src = src_file->private_data; 1404 src_tcon = tlink_tcon(smb_file_src->tlink); 1405 target_tcon = tlink_tcon(smb_file_target->tlink); 1406 1407 if (src_tcon->ses != target_tcon->ses) { 1408 cifs_dbg(VFS, "source and target of copy not on same server\n"); 1409 goto out; 1410 } 1411 1412 rc = -EOPNOTSUPP; 1413 if (!target_tcon->ses->server->ops->copychunk_range) 1414 goto out; 1415 1416 /* 1417 * Note: cifs case is easier than btrfs since server responsible for 1418 * checks for proper open modes and file type and if it wants 1419 * server could even support copy of range where source = target 1420 */ 1421 lock_two_nondirectories(target_inode, src_inode); 1422 1423 cifs_dbg(FYI, "about to flush pages\n"); 1424 1425 rc = filemap_write_and_wait_range(src_inode->i_mapping, off, 1426 off + len - 1); 1427 if (rc) 1428 goto unlock; 1429 1430 /* The server-side copy will fail if the source crosses the EOF marker. 1431 * Advance the EOF marker after the flush above to the end of the range 1432 * if it's short of that. 1433 */ 1434 if (src_cifsi->netfs.remote_i_size < off + len) { 1435 rc = cifs_precopy_set_eof(src_inode, src_cifsi, src_tcon, xid, off + len); 1436 if (rc < 0) 1437 goto unlock; 1438 } 1439 1440 destend = destoff + len - 1; 1441 1442 /* Flush the folios at either end of the destination range to prevent 1443 * accidental loss of dirty data outside of the range. 1444 */ 1445 fstart = destoff; 1446 fend = destend; 1447 1448 rc = cifs_flush_folio(target_inode, destoff, &fstart, &fend, true); 1449 if (rc) 1450 goto unlock; 1451 rc = cifs_flush_folio(target_inode, destend, &fstart, &fend, false); 1452 if (rc) 1453 goto unlock; 1454 1455 /* Discard all the folios that overlap the destination region. */ 1456 truncate_inode_pages_range(&target_inode->i_data, fstart, fend); 1457 1458 fscache_invalidate(cifs_inode_cookie(target_inode), NULL, 1459 i_size_read(target_inode), 0); 1460 1461 rc = file_modified(dst_file); 1462 if (!rc) { 1463 rc = target_tcon->ses->server->ops->copychunk_range(xid, 1464 smb_file_src, smb_file_target, off, len, destoff); 1465 if (rc > 0 && destoff + rc > i_size_read(target_inode)) { 1466 truncate_setsize(target_inode, destoff + rc); 1467 netfs_resize_file(&target_cifsi->netfs, 1468 i_size_read(target_inode), true); 1469 fscache_resize_cookie(cifs_inode_cookie(target_inode), 1470 i_size_read(target_inode)); 1471 } 1472 if (rc > 0 && destoff + rc > target_cifsi->netfs.zero_point) 1473 target_cifsi->netfs.zero_point = destoff + rc; 1474 } 1475 1476 file_accessed(src_file); 1477 1478 /* force revalidate of size and timestamps of target file now 1479 * that target is updated on the server 1480 */ 1481 CIFS_I(target_inode)->time = 0; 1482 1483 unlock: 1484 /* although unlocking in the reverse order from locking is not 1485 * strictly necessary here it is a little cleaner to be consistent 1486 */ 1487 unlock_two_nondirectories(src_inode, target_inode); 1488 1489 out: 1490 return rc; 1491 } 1492 1493 /* 1494 * Directory operations under CIFS/SMB2/SMB3 are synchronous, so fsync() 1495 * is a dummy operation. 1496 */ 1497 static int cifs_dir_fsync(struct file *file, loff_t start, loff_t end, int datasync) 1498 { 1499 cifs_dbg(FYI, "Sync directory - name: %pD datasync: 0x%x\n", 1500 file, datasync); 1501 1502 return 0; 1503 } 1504 1505 static ssize_t cifs_copy_file_range(struct file *src_file, loff_t off, 1506 struct file *dst_file, loff_t destoff, 1507 size_t len, unsigned int flags) 1508 { 1509 unsigned int xid = get_xid(); 1510 ssize_t rc; 1511 struct cifsFileInfo *cfile = dst_file->private_data; 1512 1513 if (cfile->swapfile) { 1514 rc = -EOPNOTSUPP; 1515 free_xid(xid); 1516 return rc; 1517 } 1518 1519 rc = cifs_file_copychunk_range(xid, src_file, off, dst_file, destoff, 1520 len, flags); 1521 free_xid(xid); 1522 1523 if (rc == -EOPNOTSUPP || rc == -EXDEV) 1524 rc = splice_copy_file_range(src_file, off, dst_file, 1525 destoff, len); 1526 return rc; 1527 } 1528 1529 const struct file_operations cifs_file_ops = { 1530 .read_iter = cifs_loose_read_iter, 1531 .write_iter = cifs_file_write_iter, 1532 .open = cifs_open, 1533 .release = cifs_close, 1534 .lock = cifs_lock, 1535 .flock = cifs_flock, 1536 .fsync = cifs_fsync, 1537 .flush = cifs_flush, 1538 .mmap = cifs_file_mmap, 1539 .splice_read = filemap_splice_read, 1540 .splice_write = iter_file_splice_write, 1541 .llseek = cifs_llseek, 1542 .unlocked_ioctl = cifs_ioctl, 1543 .copy_file_range = cifs_copy_file_range, 1544 .remap_file_range = cifs_remap_file_range, 1545 .setlease = cifs_setlease, 1546 .fallocate = cifs_fallocate, 1547 }; 1548 1549 const struct file_operations cifs_file_strict_ops = { 1550 .read_iter = cifs_strict_readv, 1551 .write_iter = cifs_strict_writev, 1552 .open = cifs_open, 1553 .release = cifs_close, 1554 .lock = cifs_lock, 1555 .flock = cifs_flock, 1556 .fsync = cifs_strict_fsync, 1557 .flush = cifs_flush, 1558 .mmap = cifs_file_strict_mmap, 1559 .splice_read = filemap_splice_read, 1560 .splice_write = iter_file_splice_write, 1561 .llseek = cifs_llseek, 1562 .unlocked_ioctl = cifs_ioctl, 1563 .copy_file_range = cifs_copy_file_range, 1564 .remap_file_range = cifs_remap_file_range, 1565 .setlease = cifs_setlease, 1566 .fallocate = cifs_fallocate, 1567 }; 1568 1569 const struct file_operations cifs_file_direct_ops = { 1570 .read_iter = cifs_direct_readv, 1571 .write_iter = cifs_direct_writev, 1572 .open = cifs_open, 1573 .release = cifs_close, 1574 .lock = cifs_lock, 1575 .flock = cifs_flock, 1576 .fsync = cifs_fsync, 1577 .flush = cifs_flush, 1578 .mmap = cifs_file_mmap, 1579 .splice_read = copy_splice_read, 1580 .splice_write = iter_file_splice_write, 1581 .unlocked_ioctl = cifs_ioctl, 1582 .copy_file_range = cifs_copy_file_range, 1583 .remap_file_range = cifs_remap_file_range, 1584 .llseek = cifs_llseek, 1585 .setlease = cifs_setlease, 1586 .fallocate = cifs_fallocate, 1587 }; 1588 1589 const struct file_operations cifs_file_nobrl_ops = { 1590 .read_iter = cifs_loose_read_iter, 1591 .write_iter = cifs_file_write_iter, 1592 .open = cifs_open, 1593 .release = cifs_close, 1594 .fsync = cifs_fsync, 1595 .flush = cifs_flush, 1596 .mmap = cifs_file_mmap, 1597 .splice_read = filemap_splice_read, 1598 .splice_write = iter_file_splice_write, 1599 .llseek = cifs_llseek, 1600 .unlocked_ioctl = cifs_ioctl, 1601 .copy_file_range = cifs_copy_file_range, 1602 .remap_file_range = cifs_remap_file_range, 1603 .setlease = cifs_setlease, 1604 .fallocate = cifs_fallocate, 1605 }; 1606 1607 const struct file_operations cifs_file_strict_nobrl_ops = { 1608 .read_iter = cifs_strict_readv, 1609 .write_iter = cifs_strict_writev, 1610 .open = cifs_open, 1611 .release = cifs_close, 1612 .fsync = cifs_strict_fsync, 1613 .flush = cifs_flush, 1614 .mmap = cifs_file_strict_mmap, 1615 .splice_read = filemap_splice_read, 1616 .splice_write = iter_file_splice_write, 1617 .llseek = cifs_llseek, 1618 .unlocked_ioctl = cifs_ioctl, 1619 .copy_file_range = cifs_copy_file_range, 1620 .remap_file_range = cifs_remap_file_range, 1621 .setlease = cifs_setlease, 1622 .fallocate = cifs_fallocate, 1623 }; 1624 1625 const struct file_operations cifs_file_direct_nobrl_ops = { 1626 .read_iter = cifs_direct_readv, 1627 .write_iter = cifs_direct_writev, 1628 .open = cifs_open, 1629 .release = cifs_close, 1630 .fsync = cifs_fsync, 1631 .flush = cifs_flush, 1632 .mmap = cifs_file_mmap, 1633 .splice_read = copy_splice_read, 1634 .splice_write = iter_file_splice_write, 1635 .unlocked_ioctl = cifs_ioctl, 1636 .copy_file_range = cifs_copy_file_range, 1637 .remap_file_range = cifs_remap_file_range, 1638 .llseek = cifs_llseek, 1639 .setlease = cifs_setlease, 1640 .fallocate = cifs_fallocate, 1641 }; 1642 1643 const struct file_operations cifs_dir_ops = { 1644 .iterate_shared = cifs_readdir, 1645 .release = cifs_closedir, 1646 .read = generic_read_dir, 1647 .unlocked_ioctl = cifs_ioctl, 1648 .copy_file_range = cifs_copy_file_range, 1649 .remap_file_range = cifs_remap_file_range, 1650 .llseek = generic_file_llseek, 1651 .fsync = cifs_dir_fsync, 1652 }; 1653 1654 static void 1655 cifs_init_once(void *inode) 1656 { 1657 struct cifsInodeInfo *cifsi = inode; 1658 1659 inode_init_once(&cifsi->netfs.inode); 1660 init_rwsem(&cifsi->lock_sem); 1661 } 1662 1663 static int __init 1664 cifs_init_inodecache(void) 1665 { 1666 cifs_inode_cachep = kmem_cache_create("cifs_inode_cache", 1667 sizeof(struct cifsInodeInfo), 1668 0, (SLAB_RECLAIM_ACCOUNT| 1669 SLAB_ACCOUNT), 1670 cifs_init_once); 1671 if (cifs_inode_cachep == NULL) 1672 return -ENOMEM; 1673 1674 return 0; 1675 } 1676 1677 static void 1678 cifs_destroy_inodecache(void) 1679 { 1680 /* 1681 * Make sure all delayed rcu free inodes are flushed before we 1682 * destroy cache. 1683 */ 1684 rcu_barrier(); 1685 kmem_cache_destroy(cifs_inode_cachep); 1686 } 1687 1688 static int 1689 cifs_init_request_bufs(void) 1690 { 1691 /* 1692 * SMB2 maximum header size is bigger than CIFS one - no problems to 1693 * allocate some more bytes for CIFS. 1694 */ 1695 size_t max_hdr_size = MAX_SMB2_HDR_SIZE; 1696 1697 if (CIFSMaxBufSize < 8192) { 1698 /* Buffer size can not be smaller than 2 * PATH_MAX since maximum 1699 Unicode path name has to fit in any SMB/CIFS path based frames */ 1700 CIFSMaxBufSize = 8192; 1701 } else if (CIFSMaxBufSize > 1024*127) { 1702 CIFSMaxBufSize = 1024 * 127; 1703 } else { 1704 CIFSMaxBufSize &= 0x1FE00; /* Round size to even 512 byte mult*/ 1705 } 1706 /* 1707 cifs_dbg(VFS, "CIFSMaxBufSize %d 0x%x\n", 1708 CIFSMaxBufSize, CIFSMaxBufSize); 1709 */ 1710 cifs_req_cachep = kmem_cache_create_usercopy("cifs_request", 1711 CIFSMaxBufSize + max_hdr_size, 0, 1712 SLAB_HWCACHE_ALIGN, 0, 1713 CIFSMaxBufSize + max_hdr_size, 1714 NULL); 1715 if (cifs_req_cachep == NULL) 1716 return -ENOMEM; 1717 1718 if (cifs_min_rcv < 1) 1719 cifs_min_rcv = 1; 1720 else if (cifs_min_rcv > 64) { 1721 cifs_min_rcv = 64; 1722 cifs_dbg(VFS, "cifs_min_rcv set to maximum (64)\n"); 1723 } 1724 1725 cifs_req_poolp = mempool_create_slab_pool(cifs_min_rcv, 1726 cifs_req_cachep); 1727 1728 if (cifs_req_poolp == NULL) { 1729 kmem_cache_destroy(cifs_req_cachep); 1730 return -ENOMEM; 1731 } 1732 /* MAX_CIFS_SMALL_BUFFER_SIZE bytes is enough for most SMB responses and 1733 almost all handle based requests (but not write response, nor is it 1734 sufficient for path based requests). A smaller size would have 1735 been more efficient (compacting multiple slab items on one 4k page) 1736 for the case in which debug was on, but this larger size allows 1737 more SMBs to use small buffer alloc and is still much more 1738 efficient to alloc 1 per page off the slab compared to 17K (5page) 1739 alloc of large cifs buffers even when page debugging is on */ 1740 cifs_sm_req_cachep = kmem_cache_create_usercopy("cifs_small_rq", 1741 MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN, 1742 0, MAX_CIFS_SMALL_BUFFER_SIZE, NULL); 1743 if (cifs_sm_req_cachep == NULL) { 1744 mempool_destroy(cifs_req_poolp); 1745 kmem_cache_destroy(cifs_req_cachep); 1746 return -ENOMEM; 1747 } 1748 1749 if (cifs_min_small < 2) 1750 cifs_min_small = 2; 1751 else if (cifs_min_small > 256) { 1752 cifs_min_small = 256; 1753 cifs_dbg(FYI, "cifs_min_small set to maximum (256)\n"); 1754 } 1755 1756 cifs_sm_req_poolp = mempool_create_slab_pool(cifs_min_small, 1757 cifs_sm_req_cachep); 1758 1759 if (cifs_sm_req_poolp == NULL) { 1760 mempool_destroy(cifs_req_poolp); 1761 kmem_cache_destroy(cifs_req_cachep); 1762 kmem_cache_destroy(cifs_sm_req_cachep); 1763 return -ENOMEM; 1764 } 1765 1766 return 0; 1767 } 1768 1769 static void 1770 cifs_destroy_request_bufs(void) 1771 { 1772 mempool_destroy(cifs_req_poolp); 1773 kmem_cache_destroy(cifs_req_cachep); 1774 mempool_destroy(cifs_sm_req_poolp); 1775 kmem_cache_destroy(cifs_sm_req_cachep); 1776 } 1777 1778 static int init_mids(void) 1779 { 1780 cifs_mid_cachep = kmem_cache_create("cifs_mpx_ids", 1781 sizeof(struct mid_q_entry), 0, 1782 SLAB_HWCACHE_ALIGN, NULL); 1783 if (cifs_mid_cachep == NULL) 1784 return -ENOMEM; 1785 1786 /* 3 is a reasonable minimum number of simultaneous operations */ 1787 cifs_mid_poolp = mempool_create_slab_pool(3, cifs_mid_cachep); 1788 if (cifs_mid_poolp == NULL) { 1789 kmem_cache_destroy(cifs_mid_cachep); 1790 return -ENOMEM; 1791 } 1792 1793 return 0; 1794 } 1795 1796 static void destroy_mids(void) 1797 { 1798 mempool_destroy(cifs_mid_poolp); 1799 kmem_cache_destroy(cifs_mid_cachep); 1800 } 1801 1802 static int __init 1803 init_cifs(void) 1804 { 1805 int rc = 0; 1806 cifs_proc_init(); 1807 INIT_LIST_HEAD(&cifs_tcp_ses_list); 1808 /* 1809 * Initialize Global counters 1810 */ 1811 atomic_set(&sesInfoAllocCount, 0); 1812 atomic_set(&tconInfoAllocCount, 0); 1813 atomic_set(&tcpSesNextId, 0); 1814 atomic_set(&tcpSesAllocCount, 0); 1815 atomic_set(&tcpSesReconnectCount, 0); 1816 atomic_set(&tconInfoReconnectCount, 0); 1817 1818 atomic_set(&buf_alloc_count, 0); 1819 atomic_set(&small_buf_alloc_count, 0); 1820 #ifdef CONFIG_CIFS_STATS2 1821 atomic_set(&total_buf_alloc_count, 0); 1822 atomic_set(&total_small_buf_alloc_count, 0); 1823 if (slow_rsp_threshold < 1) 1824 cifs_dbg(FYI, "slow_response_threshold msgs disabled\n"); 1825 else if (slow_rsp_threshold > 32767) 1826 cifs_dbg(VFS, 1827 "slow response threshold set higher than recommended (0 to 32767)\n"); 1828 #endif /* CONFIG_CIFS_STATS2 */ 1829 1830 atomic_set(&mid_count, 0); 1831 GlobalCurrentXid = 0; 1832 GlobalTotalActiveXid = 0; 1833 GlobalMaxActiveXid = 0; 1834 spin_lock_init(&cifs_tcp_ses_lock); 1835 spin_lock_init(&GlobalMid_Lock); 1836 1837 cifs_lock_secret = get_random_u32(); 1838 1839 if (cifs_max_pending < 2) { 1840 cifs_max_pending = 2; 1841 cifs_dbg(FYI, "cifs_max_pending set to min of 2\n"); 1842 } else if (cifs_max_pending > CIFS_MAX_REQ) { 1843 cifs_max_pending = CIFS_MAX_REQ; 1844 cifs_dbg(FYI, "cifs_max_pending set to max of %u\n", 1845 CIFS_MAX_REQ); 1846 } 1847 1848 /* Limit max to about 18 hours, and setting to zero disables directory entry caching */ 1849 if (dir_cache_timeout > 65000) { 1850 dir_cache_timeout = 65000; 1851 cifs_dbg(VFS, "dir_cache_timeout set to max of 65000 seconds\n"); 1852 } 1853 1854 cifsiod_wq = alloc_workqueue("cifsiod", WQ_FREEZABLE|WQ_MEM_RECLAIM, 0); 1855 if (!cifsiod_wq) { 1856 rc = -ENOMEM; 1857 goto out_clean_proc; 1858 } 1859 1860 /* 1861 * Consider in future setting limit!=0 maybe to min(num_of_cores - 1, 3) 1862 * so that we don't launch too many worker threads but 1863 * Documentation/core-api/workqueue.rst recommends setting it to 0 1864 */ 1865 1866 /* WQ_UNBOUND allows decrypt tasks to run on any CPU */ 1867 decrypt_wq = alloc_workqueue("smb3decryptd", 1868 WQ_UNBOUND|WQ_FREEZABLE|WQ_MEM_RECLAIM, 0); 1869 if (!decrypt_wq) { 1870 rc = -ENOMEM; 1871 goto out_destroy_cifsiod_wq; 1872 } 1873 1874 fileinfo_put_wq = alloc_workqueue("cifsfileinfoput", 1875 WQ_UNBOUND|WQ_FREEZABLE|WQ_MEM_RECLAIM, 0); 1876 if (!fileinfo_put_wq) { 1877 rc = -ENOMEM; 1878 goto out_destroy_decrypt_wq; 1879 } 1880 1881 cifsoplockd_wq = alloc_workqueue("cifsoplockd", 1882 WQ_FREEZABLE|WQ_MEM_RECLAIM, 0); 1883 if (!cifsoplockd_wq) { 1884 rc = -ENOMEM; 1885 goto out_destroy_fileinfo_put_wq; 1886 } 1887 1888 deferredclose_wq = alloc_workqueue("deferredclose", 1889 WQ_FREEZABLE|WQ_MEM_RECLAIM, 0); 1890 if (!deferredclose_wq) { 1891 rc = -ENOMEM; 1892 goto out_destroy_cifsoplockd_wq; 1893 } 1894 1895 serverclose_wq = alloc_workqueue("serverclose", 1896 WQ_FREEZABLE|WQ_MEM_RECLAIM, 0); 1897 if (!serverclose_wq) { 1898 rc = -ENOMEM; 1899 goto out_destroy_serverclose_wq; 1900 } 1901 1902 rc = cifs_init_inodecache(); 1903 if (rc) 1904 goto out_destroy_deferredclose_wq; 1905 1906 rc = init_mids(); 1907 if (rc) 1908 goto out_destroy_inodecache; 1909 1910 rc = cifs_init_request_bufs(); 1911 if (rc) 1912 goto out_destroy_mids; 1913 1914 #ifdef CONFIG_CIFS_DFS_UPCALL 1915 rc = dfs_cache_init(); 1916 if (rc) 1917 goto out_destroy_request_bufs; 1918 #endif /* CONFIG_CIFS_DFS_UPCALL */ 1919 #ifdef CONFIG_CIFS_UPCALL 1920 rc = init_cifs_spnego(); 1921 if (rc) 1922 goto out_destroy_dfs_cache; 1923 #endif /* CONFIG_CIFS_UPCALL */ 1924 #ifdef CONFIG_CIFS_SWN_UPCALL 1925 rc = cifs_genl_init(); 1926 if (rc) 1927 goto out_register_key_type; 1928 #endif /* CONFIG_CIFS_SWN_UPCALL */ 1929 1930 rc = init_cifs_idmap(); 1931 if (rc) 1932 goto out_cifs_swn_init; 1933 1934 rc = register_filesystem(&cifs_fs_type); 1935 if (rc) 1936 goto out_init_cifs_idmap; 1937 1938 rc = register_filesystem(&smb3_fs_type); 1939 if (rc) { 1940 unregister_filesystem(&cifs_fs_type); 1941 goto out_init_cifs_idmap; 1942 } 1943 1944 return 0; 1945 1946 out_init_cifs_idmap: 1947 exit_cifs_idmap(); 1948 out_cifs_swn_init: 1949 #ifdef CONFIG_CIFS_SWN_UPCALL 1950 cifs_genl_exit(); 1951 out_register_key_type: 1952 #endif 1953 #ifdef CONFIG_CIFS_UPCALL 1954 exit_cifs_spnego(); 1955 out_destroy_dfs_cache: 1956 #endif 1957 #ifdef CONFIG_CIFS_DFS_UPCALL 1958 dfs_cache_destroy(); 1959 out_destroy_request_bufs: 1960 #endif 1961 cifs_destroy_request_bufs(); 1962 out_destroy_mids: 1963 destroy_mids(); 1964 out_destroy_inodecache: 1965 cifs_destroy_inodecache(); 1966 out_destroy_deferredclose_wq: 1967 destroy_workqueue(deferredclose_wq); 1968 out_destroy_cifsoplockd_wq: 1969 destroy_workqueue(cifsoplockd_wq); 1970 out_destroy_fileinfo_put_wq: 1971 destroy_workqueue(fileinfo_put_wq); 1972 out_destroy_decrypt_wq: 1973 destroy_workqueue(decrypt_wq); 1974 out_destroy_cifsiod_wq: 1975 destroy_workqueue(cifsiod_wq); 1976 out_destroy_serverclose_wq: 1977 destroy_workqueue(serverclose_wq); 1978 out_clean_proc: 1979 cifs_proc_clean(); 1980 return rc; 1981 } 1982 1983 static void __exit 1984 exit_cifs(void) 1985 { 1986 cifs_dbg(NOISY, "exit_smb3\n"); 1987 unregister_filesystem(&cifs_fs_type); 1988 unregister_filesystem(&smb3_fs_type); 1989 cifs_release_automount_timer(); 1990 exit_cifs_idmap(); 1991 #ifdef CONFIG_CIFS_SWN_UPCALL 1992 cifs_genl_exit(); 1993 #endif 1994 #ifdef CONFIG_CIFS_UPCALL 1995 exit_cifs_spnego(); 1996 #endif 1997 #ifdef CONFIG_CIFS_DFS_UPCALL 1998 dfs_cache_destroy(); 1999 #endif 2000 cifs_destroy_request_bufs(); 2001 destroy_mids(); 2002 cifs_destroy_inodecache(); 2003 destroy_workqueue(deferredclose_wq); 2004 destroy_workqueue(cifsoplockd_wq); 2005 destroy_workqueue(decrypt_wq); 2006 destroy_workqueue(fileinfo_put_wq); 2007 destroy_workqueue(serverclose_wq); 2008 destroy_workqueue(cifsiod_wq); 2009 cifs_proc_clean(); 2010 } 2011 2012 MODULE_AUTHOR("Steve French"); 2013 MODULE_LICENSE("GPL"); /* combination of LGPL + GPL source behaves as GPL */ 2014 MODULE_DESCRIPTION 2015 ("VFS to access SMB3 servers e.g. Samba, Macs, Azure and Windows (and " 2016 "also older servers complying with the SNIA CIFS Specification)"); 2017 MODULE_VERSION(CIFS_VERSION); 2018 MODULE_SOFTDEP("ecb"); 2019 MODULE_SOFTDEP("hmac"); 2020 MODULE_SOFTDEP("md5"); 2021 MODULE_SOFTDEP("nls"); 2022 MODULE_SOFTDEP("aes"); 2023 MODULE_SOFTDEP("cmac"); 2024 MODULE_SOFTDEP("sha256"); 2025 MODULE_SOFTDEP("sha512"); 2026 MODULE_SOFTDEP("aead2"); 2027 MODULE_SOFTDEP("ccm"); 2028 MODULE_SOFTDEP("gcm"); 2029 module_init(init_cifs) 2030 module_exit(exit_cifs) 2031