1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Functions to handle the cached directory entries 4 * 5 * Copyright (c) 2022, Ronnie Sahlberg <lsahlber@redhat.com> 6 */ 7 8 #include <linux/namei.h> 9 #include "cifsglob.h" 10 #include "cifsproto.h" 11 #include "cifs_debug.h" 12 #include "smb2proto.h" 13 #include "cached_dir.h" 14 15 static struct cached_fid *init_cached_dir(const char *path); 16 static void free_cached_dir(struct cached_fid *cfid); 17 static void smb2_close_cached_fid(struct kref *ref); 18 19 static struct cached_fid *find_or_create_cached_dir(struct cached_fids *cfids, 20 const char *path, 21 bool lookup_only, 22 __u32 max_cached_dirs) 23 { 24 struct cached_fid *cfid; 25 26 spin_lock(&cfids->cfid_list_lock); 27 list_for_each_entry(cfid, &cfids->entries, entry) { 28 if (!strcmp(cfid->path, path)) { 29 /* 30 * If it doesn't have a lease it is either not yet 31 * fully cached or it may be in the process of 32 * being deleted due to a lease break. 33 */ 34 if (!cfid->has_lease) { 35 spin_unlock(&cfids->cfid_list_lock); 36 return NULL; 37 } 38 kref_get(&cfid->refcount); 39 spin_unlock(&cfids->cfid_list_lock); 40 return cfid; 41 } 42 } 43 if (lookup_only) { 44 spin_unlock(&cfids->cfid_list_lock); 45 return NULL; 46 } 47 if (cfids->num_entries >= max_cached_dirs) { 48 spin_unlock(&cfids->cfid_list_lock); 49 return NULL; 50 } 51 cfid = init_cached_dir(path); 52 if (cfid == NULL) { 53 spin_unlock(&cfids->cfid_list_lock); 54 return NULL; 55 } 56 cfid->cfids = cfids; 57 cfids->num_entries++; 58 list_add(&cfid->entry, &cfids->entries); 59 cfid->on_list = true; 60 kref_get(&cfid->refcount); 61 spin_unlock(&cfids->cfid_list_lock); 62 return cfid; 63 } 64 65 static struct dentry * 66 path_to_dentry(struct cifs_sb_info *cifs_sb, const char *path) 67 { 68 struct dentry *dentry; 69 const char *s, *p; 70 char sep; 71 72 sep = CIFS_DIR_SEP(cifs_sb); 73 dentry = dget(cifs_sb->root); 74 s = path; 75 76 do { 77 struct inode *dir = d_inode(dentry); 78 struct dentry *child; 79 80 if (!S_ISDIR(dir->i_mode)) { 81 dput(dentry); 82 dentry = ERR_PTR(-ENOTDIR); 83 break; 84 } 85 86 /* skip separators */ 87 while (*s == sep) 88 s++; 89 if (!*s) 90 break; 91 p = s++; 92 /* next separator */ 93 while (*s && *s != sep) 94 s++; 95 96 child = lookup_positive_unlocked(p, dentry, s - p); 97 dput(dentry); 98 dentry = child; 99 } while (!IS_ERR(dentry)); 100 return dentry; 101 } 102 103 static const char *path_no_prefix(struct cifs_sb_info *cifs_sb, 104 const char *path) 105 { 106 size_t len = 0; 107 108 if (!*path) 109 return path; 110 111 if ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_USE_PREFIX_PATH) && 112 cifs_sb->prepath) { 113 len = strlen(cifs_sb->prepath) + 1; 114 if (unlikely(len > strlen(path))) 115 return ERR_PTR(-EINVAL); 116 } 117 return path + len; 118 } 119 120 /* 121 * Open the and cache a directory handle. 122 * If error then *cfid is not initialized. 123 */ 124 int open_cached_dir(unsigned int xid, struct cifs_tcon *tcon, 125 const char *path, 126 struct cifs_sb_info *cifs_sb, 127 bool lookup_only, struct cached_fid **ret_cfid) 128 { 129 struct cifs_ses *ses; 130 struct TCP_Server_Info *server; 131 struct cifs_open_parms oparms; 132 struct smb2_create_rsp *o_rsp = NULL; 133 struct smb2_query_info_rsp *qi_rsp = NULL; 134 int resp_buftype[2]; 135 struct smb_rqst rqst[2]; 136 struct kvec rsp_iov[2]; 137 struct kvec open_iov[SMB2_CREATE_IOV_SIZE]; 138 struct kvec qi_iov[1]; 139 int rc, flags = 0; 140 __le16 *utf16_path = NULL; 141 u8 oplock = SMB2_OPLOCK_LEVEL_II; 142 struct cifs_fid *pfid; 143 struct dentry *dentry = NULL; 144 struct cached_fid *cfid; 145 struct cached_fids *cfids; 146 const char *npath; 147 148 if (tcon == NULL || tcon->cfids == NULL || tcon->nohandlecache || 149 is_smb1_server(tcon->ses->server) || (dir_cache_timeout == 0)) 150 return -EOPNOTSUPP; 151 152 ses = tcon->ses; 153 server = ses->server; 154 cfids = tcon->cfids; 155 156 if (!server->ops->new_lease_key) 157 return -EIO; 158 159 if (cifs_sb->root == NULL) 160 return -ENOENT; 161 162 utf16_path = cifs_convert_path_to_utf16(path, cifs_sb); 163 if (!utf16_path) 164 return -ENOMEM; 165 166 cfid = find_or_create_cached_dir(cfids, path, lookup_only, tcon->max_cached_dirs); 167 if (cfid == NULL) { 168 kfree(utf16_path); 169 return -ENOENT; 170 } 171 /* 172 * At this point we either have a lease already and we can just 173 * return it. If not we are guaranteed to be the only thread accessing 174 * this cfid. 175 */ 176 if (cfid->has_lease) { 177 *ret_cfid = cfid; 178 kfree(utf16_path); 179 return 0; 180 } 181 182 /* 183 * Skip any prefix paths in @path as lookup_positive_unlocked() ends up 184 * calling ->lookup() which already adds those through 185 * build_path_from_dentry(). Also, do it earlier as we might reconnect 186 * below when trying to send compounded request and then potentially 187 * having a different prefix path (e.g. after DFS failover). 188 */ 189 npath = path_no_prefix(cifs_sb, path); 190 if (IS_ERR(npath)) { 191 rc = PTR_ERR(npath); 192 kfree(utf16_path); 193 return rc; 194 } 195 196 /* 197 * We do not hold the lock for the open because in case 198 * SMB2_open needs to reconnect. 199 * This is safe because no other thread will be able to get a ref 200 * to the cfid until we have finished opening the file and (possibly) 201 * acquired a lease. 202 */ 203 if (smb3_encryption_required(tcon)) 204 flags |= CIFS_TRANSFORM_REQ; 205 206 pfid = &cfid->fid; 207 server->ops->new_lease_key(pfid); 208 209 memset(rqst, 0, sizeof(rqst)); 210 resp_buftype[0] = resp_buftype[1] = CIFS_NO_BUFFER; 211 memset(rsp_iov, 0, sizeof(rsp_iov)); 212 213 /* Open */ 214 memset(&open_iov, 0, sizeof(open_iov)); 215 rqst[0].rq_iov = open_iov; 216 rqst[0].rq_nvec = SMB2_CREATE_IOV_SIZE; 217 218 oparms = (struct cifs_open_parms) { 219 .tcon = tcon, 220 .path = path, 221 .create_options = cifs_create_options(cifs_sb, CREATE_NOT_FILE), 222 .desired_access = FILE_READ_DATA | FILE_READ_ATTRIBUTES, 223 .disposition = FILE_OPEN, 224 .fid = pfid, 225 }; 226 227 rc = SMB2_open_init(tcon, server, 228 &rqst[0], &oplock, &oparms, utf16_path); 229 if (rc) 230 goto oshr_free; 231 smb2_set_next_command(tcon, &rqst[0]); 232 233 memset(&qi_iov, 0, sizeof(qi_iov)); 234 rqst[1].rq_iov = qi_iov; 235 rqst[1].rq_nvec = 1; 236 237 rc = SMB2_query_info_init(tcon, server, 238 &rqst[1], COMPOUND_FID, 239 COMPOUND_FID, FILE_ALL_INFORMATION, 240 SMB2_O_INFO_FILE, 0, 241 sizeof(struct smb2_file_all_info) + 242 PATH_MAX * 2, 0, NULL); 243 if (rc) 244 goto oshr_free; 245 246 smb2_set_related(&rqst[1]); 247 248 rc = compound_send_recv(xid, ses, server, 249 flags, 2, rqst, 250 resp_buftype, rsp_iov); 251 if (rc) { 252 if (rc == -EREMCHG) { 253 tcon->need_reconnect = true; 254 pr_warn_once("server share %s deleted\n", 255 tcon->tree_name); 256 } 257 goto oshr_free; 258 } 259 cfid->tcon = tcon; 260 cfid->is_open = true; 261 262 o_rsp = (struct smb2_create_rsp *)rsp_iov[0].iov_base; 263 oparms.fid->persistent_fid = o_rsp->PersistentFileId; 264 oparms.fid->volatile_fid = o_rsp->VolatileFileId; 265 #ifdef CONFIG_CIFS_DEBUG2 266 oparms.fid->mid = le64_to_cpu(o_rsp->hdr.MessageId); 267 #endif /* CIFS_DEBUG2 */ 268 269 if (o_rsp->OplockLevel != SMB2_OPLOCK_LEVEL_LEASE) 270 goto oshr_free; 271 272 smb2_parse_contexts(server, o_rsp, 273 &oparms.fid->epoch, 274 oparms.fid->lease_key, &oplock, 275 NULL, NULL); 276 if (!(oplock & SMB2_LEASE_READ_CACHING_HE)) 277 goto oshr_free; 278 qi_rsp = (struct smb2_query_info_rsp *)rsp_iov[1].iov_base; 279 if (le32_to_cpu(qi_rsp->OutputBufferLength) < sizeof(struct smb2_file_all_info)) 280 goto oshr_free; 281 if (!smb2_validate_and_copy_iov( 282 le16_to_cpu(qi_rsp->OutputBufferOffset), 283 sizeof(struct smb2_file_all_info), 284 &rsp_iov[1], sizeof(struct smb2_file_all_info), 285 (char *)&cfid->file_all_info)) 286 cfid->file_all_info_is_valid = true; 287 288 if (!npath[0]) 289 dentry = dget(cifs_sb->root); 290 else { 291 dentry = path_to_dentry(cifs_sb, npath); 292 if (IS_ERR(dentry)) { 293 rc = -ENOENT; 294 goto oshr_free; 295 } 296 } 297 cfid->dentry = dentry; 298 cfid->time = jiffies; 299 cfid->has_lease = true; 300 301 oshr_free: 302 kfree(utf16_path); 303 SMB2_open_free(&rqst[0]); 304 SMB2_query_info_free(&rqst[1]); 305 free_rsp_buf(resp_buftype[0], rsp_iov[0].iov_base); 306 free_rsp_buf(resp_buftype[1], rsp_iov[1].iov_base); 307 spin_lock(&cfids->cfid_list_lock); 308 if (rc && !cfid->has_lease) { 309 if (cfid->on_list) { 310 list_del(&cfid->entry); 311 cfid->on_list = false; 312 cfids->num_entries--; 313 } 314 rc = -ENOENT; 315 } 316 spin_unlock(&cfids->cfid_list_lock); 317 if (!rc && !cfid->has_lease) { 318 /* 319 * We are guaranteed to have two references at this point. 320 * One for the caller and one for a potential lease. 321 * Release the Lease-ref so that the directory will be closed 322 * when the caller closes the cached handle. 323 */ 324 kref_put(&cfid->refcount, smb2_close_cached_fid); 325 } 326 if (rc) { 327 if (cfid->is_open) 328 SMB2_close(0, cfid->tcon, cfid->fid.persistent_fid, 329 cfid->fid.volatile_fid); 330 free_cached_dir(cfid); 331 cfid = NULL; 332 } 333 334 if (rc == 0) { 335 *ret_cfid = cfid; 336 atomic_inc(&tcon->num_remote_opens); 337 } 338 339 return rc; 340 } 341 342 int open_cached_dir_by_dentry(struct cifs_tcon *tcon, 343 struct dentry *dentry, 344 struct cached_fid **ret_cfid) 345 { 346 struct cached_fid *cfid; 347 struct cached_fids *cfids = tcon->cfids; 348 349 if (cfids == NULL) 350 return -ENOENT; 351 352 spin_lock(&cfids->cfid_list_lock); 353 list_for_each_entry(cfid, &cfids->entries, entry) { 354 if (dentry && cfid->dentry == dentry) { 355 cifs_dbg(FYI, "found a cached root file handle by dentry\n"); 356 kref_get(&cfid->refcount); 357 *ret_cfid = cfid; 358 spin_unlock(&cfids->cfid_list_lock); 359 return 0; 360 } 361 } 362 spin_unlock(&cfids->cfid_list_lock); 363 return -ENOENT; 364 } 365 366 static void 367 smb2_close_cached_fid(struct kref *ref) 368 { 369 struct cached_fid *cfid = container_of(ref, struct cached_fid, 370 refcount); 371 372 spin_lock(&cfid->cfids->cfid_list_lock); 373 if (cfid->on_list) { 374 list_del(&cfid->entry); 375 cfid->on_list = false; 376 cfid->cfids->num_entries--; 377 } 378 spin_unlock(&cfid->cfids->cfid_list_lock); 379 380 dput(cfid->dentry); 381 cfid->dentry = NULL; 382 383 if (cfid->is_open) { 384 SMB2_close(0, cfid->tcon, cfid->fid.persistent_fid, 385 cfid->fid.volatile_fid); 386 atomic_dec(&cfid->tcon->num_remote_opens); 387 } 388 389 free_cached_dir(cfid); 390 } 391 392 void drop_cached_dir_by_name(const unsigned int xid, struct cifs_tcon *tcon, 393 const char *name, struct cifs_sb_info *cifs_sb) 394 { 395 struct cached_fid *cfid = NULL; 396 int rc; 397 398 rc = open_cached_dir(xid, tcon, name, cifs_sb, true, &cfid); 399 if (rc) { 400 cifs_dbg(FYI, "no cached dir found for rmdir(%s)\n", name); 401 return; 402 } 403 spin_lock(&cfid->cfids->cfid_list_lock); 404 if (cfid->has_lease) { 405 cfid->has_lease = false; 406 kref_put(&cfid->refcount, smb2_close_cached_fid); 407 } 408 spin_unlock(&cfid->cfids->cfid_list_lock); 409 close_cached_dir(cfid); 410 } 411 412 413 void close_cached_dir(struct cached_fid *cfid) 414 { 415 kref_put(&cfid->refcount, smb2_close_cached_fid); 416 } 417 418 /* 419 * Called from cifs_kill_sb when we unmount a share 420 */ 421 void close_all_cached_dirs(struct cifs_sb_info *cifs_sb) 422 { 423 struct rb_root *root = &cifs_sb->tlink_tree; 424 struct rb_node *node; 425 struct cached_fid *cfid; 426 struct cifs_tcon *tcon; 427 struct tcon_link *tlink; 428 struct cached_fids *cfids; 429 430 for (node = rb_first(root); node; node = rb_next(node)) { 431 tlink = rb_entry(node, struct tcon_link, tl_rbnode); 432 tcon = tlink_tcon(tlink); 433 if (IS_ERR(tcon)) 434 continue; 435 cfids = tcon->cfids; 436 if (cfids == NULL) 437 continue; 438 list_for_each_entry(cfid, &cfids->entries, entry) { 439 dput(cfid->dentry); 440 cfid->dentry = NULL; 441 } 442 } 443 } 444 445 /* 446 * Invalidate all cached dirs when a TCON has been reset 447 * due to a session loss. 448 */ 449 void invalidate_all_cached_dirs(struct cifs_tcon *tcon) 450 { 451 struct cached_fids *cfids = tcon->cfids; 452 struct cached_fid *cfid, *q; 453 LIST_HEAD(entry); 454 455 spin_lock(&cfids->cfid_list_lock); 456 list_for_each_entry_safe(cfid, q, &cfids->entries, entry) { 457 list_move(&cfid->entry, &entry); 458 cfids->num_entries--; 459 cfid->is_open = false; 460 cfid->on_list = false; 461 /* To prevent race with smb2_cached_lease_break() */ 462 kref_get(&cfid->refcount); 463 } 464 spin_unlock(&cfids->cfid_list_lock); 465 466 list_for_each_entry_safe(cfid, q, &entry, entry) { 467 list_del(&cfid->entry); 468 cancel_work_sync(&cfid->lease_break); 469 if (cfid->has_lease) { 470 /* 471 * We lease was never cancelled from the server so we 472 * need to drop the reference. 473 */ 474 spin_lock(&cfids->cfid_list_lock); 475 cfid->has_lease = false; 476 spin_unlock(&cfids->cfid_list_lock); 477 kref_put(&cfid->refcount, smb2_close_cached_fid); 478 } 479 /* Drop the extra reference opened above*/ 480 kref_put(&cfid->refcount, smb2_close_cached_fid); 481 } 482 } 483 484 static void 485 smb2_cached_lease_break(struct work_struct *work) 486 { 487 struct cached_fid *cfid = container_of(work, 488 struct cached_fid, lease_break); 489 490 spin_lock(&cfid->cfids->cfid_list_lock); 491 cfid->has_lease = false; 492 spin_unlock(&cfid->cfids->cfid_list_lock); 493 kref_put(&cfid->refcount, smb2_close_cached_fid); 494 } 495 496 int cached_dir_lease_break(struct cifs_tcon *tcon, __u8 lease_key[16]) 497 { 498 struct cached_fids *cfids = tcon->cfids; 499 struct cached_fid *cfid; 500 501 if (cfids == NULL) 502 return false; 503 504 spin_lock(&cfids->cfid_list_lock); 505 list_for_each_entry(cfid, &cfids->entries, entry) { 506 if (cfid->has_lease && 507 !memcmp(lease_key, 508 cfid->fid.lease_key, 509 SMB2_LEASE_KEY_SIZE)) { 510 cfid->time = 0; 511 /* 512 * We found a lease remove it from the list 513 * so no threads can access it. 514 */ 515 list_del(&cfid->entry); 516 cfid->on_list = false; 517 cfids->num_entries--; 518 519 queue_work(cifsiod_wq, 520 &cfid->lease_break); 521 spin_unlock(&cfids->cfid_list_lock); 522 return true; 523 } 524 } 525 spin_unlock(&cfids->cfid_list_lock); 526 return false; 527 } 528 529 static struct cached_fid *init_cached_dir(const char *path) 530 { 531 struct cached_fid *cfid; 532 533 cfid = kzalloc(sizeof(*cfid), GFP_ATOMIC); 534 if (!cfid) 535 return NULL; 536 cfid->path = kstrdup(path, GFP_ATOMIC); 537 if (!cfid->path) { 538 kfree(cfid); 539 return NULL; 540 } 541 542 INIT_WORK(&cfid->lease_break, smb2_cached_lease_break); 543 INIT_LIST_HEAD(&cfid->entry); 544 INIT_LIST_HEAD(&cfid->dirents.entries); 545 mutex_init(&cfid->dirents.de_mutex); 546 spin_lock_init(&cfid->fid_lock); 547 kref_init(&cfid->refcount); 548 return cfid; 549 } 550 551 static void free_cached_dir(struct cached_fid *cfid) 552 { 553 struct cached_dirent *dirent, *q; 554 555 dput(cfid->dentry); 556 cfid->dentry = NULL; 557 558 /* 559 * Delete all cached dirent names 560 */ 561 list_for_each_entry_safe(dirent, q, &cfid->dirents.entries, entry) { 562 list_del(&dirent->entry); 563 kfree(dirent->name); 564 kfree(dirent); 565 } 566 567 kfree(cfid->path); 568 cfid->path = NULL; 569 kfree(cfid); 570 } 571 572 static int 573 cifs_cfids_laundromat_thread(void *p) 574 { 575 struct cached_fids *cfids = p; 576 struct cached_fid *cfid, *q; 577 struct list_head entry; 578 579 while (!kthread_should_stop()) { 580 ssleep(1); 581 INIT_LIST_HEAD(&entry); 582 if (kthread_should_stop()) 583 return 0; 584 spin_lock(&cfids->cfid_list_lock); 585 list_for_each_entry_safe(cfid, q, &cfids->entries, entry) { 586 if (time_after(jiffies, cfid->time + HZ * dir_cache_timeout)) { 587 list_del(&cfid->entry); 588 list_add(&cfid->entry, &entry); 589 cfids->num_entries--; 590 } 591 } 592 spin_unlock(&cfids->cfid_list_lock); 593 594 list_for_each_entry_safe(cfid, q, &entry, entry) { 595 cfid->on_list = false; 596 list_del(&cfid->entry); 597 /* 598 * Cancel, and wait for the work to finish in 599 * case we are racing with it. 600 */ 601 cancel_work_sync(&cfid->lease_break); 602 if (cfid->has_lease) { 603 /* 604 * We lease has not yet been cancelled from 605 * the server so we need to drop the reference. 606 */ 607 spin_lock(&cfids->cfid_list_lock); 608 cfid->has_lease = false; 609 spin_unlock(&cfids->cfid_list_lock); 610 kref_put(&cfid->refcount, smb2_close_cached_fid); 611 } 612 } 613 } 614 615 return 0; 616 } 617 618 619 struct cached_fids *init_cached_dirs(void) 620 { 621 struct cached_fids *cfids; 622 623 cfids = kzalloc(sizeof(*cfids), GFP_KERNEL); 624 if (!cfids) 625 return NULL; 626 spin_lock_init(&cfids->cfid_list_lock); 627 INIT_LIST_HEAD(&cfids->entries); 628 629 /* 630 * since we're in a cifs function already, we know that 631 * this will succeed. No need for try_module_get(). 632 */ 633 __module_get(THIS_MODULE); 634 cfids->laundromat = kthread_run(cifs_cfids_laundromat_thread, 635 cfids, "cifsd-cfid-laundromat"); 636 if (IS_ERR(cfids->laundromat)) { 637 cifs_dbg(VFS, "Failed to start cfids laundromat thread.\n"); 638 kfree(cfids); 639 module_put(THIS_MODULE); 640 return NULL; 641 } 642 return cfids; 643 } 644 645 /* 646 * Called from tconInfoFree when we are tearing down the tcon. 647 * There are no active users or open files/directories at this point. 648 */ 649 void free_cached_dirs(struct cached_fids *cfids) 650 { 651 struct cached_fid *cfid, *q; 652 LIST_HEAD(entry); 653 654 if (cfids->laundromat) { 655 kthread_stop(cfids->laundromat); 656 cfids->laundromat = NULL; 657 module_put(THIS_MODULE); 658 } 659 660 spin_lock(&cfids->cfid_list_lock); 661 list_for_each_entry_safe(cfid, q, &cfids->entries, entry) { 662 cfid->on_list = false; 663 cfid->is_open = false; 664 list_move(&cfid->entry, &entry); 665 } 666 spin_unlock(&cfids->cfid_list_lock); 667 668 list_for_each_entry_safe(cfid, q, &entry, entry) { 669 list_del(&cfid->entry); 670 free_cached_dir(cfid); 671 } 672 673 kfree(cfids); 674 } 675