1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Functions to handle the cached directory entries 4 * 5 * Copyright (c) 2022, Ronnie Sahlberg <lsahlber@redhat.com> 6 */ 7 8 #include <linux/namei.h> 9 #include "cifsglob.h" 10 #include "cifsproto.h" 11 #include "cifs_debug.h" 12 #include "smb2proto.h" 13 #include "cached_dir.h" 14 15 static struct cached_fid *init_cached_dir(const char *path); 16 static void free_cached_dir(struct cached_fid *cfid); 17 static void smb2_close_cached_fid(struct kref *ref); 18 static void cfids_laundromat_worker(struct work_struct *work); 19 20 struct cached_dir_dentry { 21 struct list_head entry; 22 struct dentry *dentry; 23 }; 24 25 static struct cached_fid *find_or_create_cached_dir(struct cached_fids *cfids, 26 const char *path, 27 bool lookup_only, 28 __u32 max_cached_dirs) 29 { 30 struct cached_fid *cfid; 31 32 list_for_each_entry(cfid, &cfids->entries, entry) { 33 if (!strcmp(cfid->path, path)) { 34 /* 35 * If it doesn't have a lease it is either not yet 36 * fully cached or it may be in the process of 37 * being deleted due to a lease break. 38 */ 39 if (!cfid->time || !cfid->has_lease) { 40 return NULL; 41 } 42 kref_get(&cfid->refcount); 43 return cfid; 44 } 45 } 46 if (lookup_only) { 47 return NULL; 48 } 49 if (cfids->num_entries >= max_cached_dirs) { 50 return NULL; 51 } 52 cfid = init_cached_dir(path); 53 if (cfid == NULL) { 54 return NULL; 55 } 56 cfid->cfids = cfids; 57 cfids->num_entries++; 58 list_add(&cfid->entry, &cfids->entries); 59 cfid->on_list = true; 60 kref_get(&cfid->refcount); 61 /* 62 * Set @cfid->has_lease to true during construction so that the lease 63 * reference can be put in cached_dir_lease_break() due to a potential 64 * lease break right after the request is sent or while @cfid is still 65 * being cached, or if a reconnection is triggered during construction. 66 * Concurrent processes won't be to use it yet due to @cfid->time being 67 * zero. 68 */ 69 cfid->has_lease = true; 70 71 return cfid; 72 } 73 74 static struct dentry * 75 path_to_dentry(struct cifs_sb_info *cifs_sb, const char *path) 76 { 77 struct dentry *dentry; 78 const char *s, *p; 79 char sep; 80 81 sep = CIFS_DIR_SEP(cifs_sb); 82 dentry = dget(cifs_sb->root); 83 s = path; 84 85 do { 86 struct inode *dir = d_inode(dentry); 87 struct dentry *child; 88 89 if (!S_ISDIR(dir->i_mode)) { 90 dput(dentry); 91 dentry = ERR_PTR(-ENOTDIR); 92 break; 93 } 94 95 /* skip separators */ 96 while (*s == sep) 97 s++; 98 if (!*s) 99 break; 100 p = s++; 101 /* next separator */ 102 while (*s && *s != sep) 103 s++; 104 105 child = lookup_noperm_positive_unlocked(&QSTR_LEN(p, s - p), 106 dentry); 107 dput(dentry); 108 dentry = child; 109 } while (!IS_ERR(dentry)); 110 return dentry; 111 } 112 113 static const char *path_no_prefix(struct cifs_sb_info *cifs_sb, 114 const char *path) 115 { 116 size_t len = 0; 117 118 if (!*path) 119 return path; 120 121 if ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_USE_PREFIX_PATH) && 122 cifs_sb->prepath) { 123 len = strlen(cifs_sb->prepath) + 1; 124 if (unlikely(len > strlen(path))) 125 return ERR_PTR(-EINVAL); 126 } 127 return path + len; 128 } 129 130 /* 131 * Open the and cache a directory handle. 132 * If error then *cfid is not initialized. 133 */ 134 int open_cached_dir(unsigned int xid, struct cifs_tcon *tcon, 135 const char *path, 136 struct cifs_sb_info *cifs_sb, 137 bool lookup_only, struct cached_fid **ret_cfid) 138 { 139 struct cifs_ses *ses; 140 struct TCP_Server_Info *server; 141 struct cifs_open_parms oparms; 142 struct smb2_create_rsp *o_rsp = NULL; 143 struct smb2_query_info_rsp *qi_rsp = NULL; 144 int resp_buftype[2]; 145 struct smb_rqst rqst[2]; 146 struct kvec rsp_iov[2]; 147 struct kvec open_iov[SMB2_CREATE_IOV_SIZE]; 148 struct kvec qi_iov[1]; 149 int rc, flags = 0; 150 __le16 *utf16_path = NULL; 151 u8 oplock = SMB2_OPLOCK_LEVEL_II; 152 struct cifs_fid *pfid; 153 struct dentry *dentry = NULL; 154 struct cached_fid *cfid; 155 struct cached_fids *cfids; 156 const char *npath; 157 int retries = 0, cur_sleep = 1; 158 159 if (cifs_sb->root == NULL) 160 return -ENOENT; 161 162 if (tcon == NULL) 163 return -EOPNOTSUPP; 164 165 ses = tcon->ses; 166 cfids = tcon->cfids; 167 168 if (cfids == NULL) 169 return -EOPNOTSUPP; 170 171 replay_again: 172 /* reinitialize for possible replay */ 173 flags = 0; 174 oplock = SMB2_OPLOCK_LEVEL_II; 175 server = cifs_pick_channel(ses); 176 177 if (!server->ops->new_lease_key) 178 return -EIO; 179 180 utf16_path = cifs_convert_path_to_utf16(path, cifs_sb); 181 if (!utf16_path) 182 return -ENOMEM; 183 184 spin_lock(&cfids->cfid_list_lock); 185 cfid = find_or_create_cached_dir(cfids, path, lookup_only, tcon->max_cached_dirs); 186 if (cfid == NULL) { 187 spin_unlock(&cfids->cfid_list_lock); 188 kfree(utf16_path); 189 return -ENOENT; 190 } 191 /* 192 * Return cached fid if it is valid (has a lease and has a time). 193 * Otherwise, it is either a new entry or laundromat worker removed it 194 * from @cfids->entries. Caller will put last reference if the latter. 195 */ 196 if (cfid->has_lease && cfid->time) { 197 spin_unlock(&cfids->cfid_list_lock); 198 *ret_cfid = cfid; 199 kfree(utf16_path); 200 return 0; 201 } 202 spin_unlock(&cfids->cfid_list_lock); 203 204 /* 205 * Skip any prefix paths in @path as lookup_noperm_positive_unlocked() ends up 206 * calling ->lookup() which already adds those through 207 * build_path_from_dentry(). Also, do it earlier as we might reconnect 208 * below when trying to send compounded request and then potentially 209 * having a different prefix path (e.g. after DFS failover). 210 */ 211 npath = path_no_prefix(cifs_sb, path); 212 if (IS_ERR(npath)) { 213 rc = PTR_ERR(npath); 214 goto out; 215 } 216 217 if (!npath[0]) { 218 dentry = dget(cifs_sb->root); 219 } else { 220 dentry = path_to_dentry(cifs_sb, npath); 221 if (IS_ERR(dentry)) { 222 rc = -ENOENT; 223 goto out; 224 } 225 } 226 cfid->dentry = dentry; 227 cfid->tcon = tcon; 228 229 /* 230 * We do not hold the lock for the open because in case 231 * SMB2_open needs to reconnect. 232 * This is safe because no other thread will be able to get a ref 233 * to the cfid until we have finished opening the file and (possibly) 234 * acquired a lease. 235 */ 236 if (smb3_encryption_required(tcon)) 237 flags |= CIFS_TRANSFORM_REQ; 238 239 pfid = &cfid->fid; 240 server->ops->new_lease_key(pfid); 241 242 memset(rqst, 0, sizeof(rqst)); 243 resp_buftype[0] = resp_buftype[1] = CIFS_NO_BUFFER; 244 memset(rsp_iov, 0, sizeof(rsp_iov)); 245 246 /* Open */ 247 memset(&open_iov, 0, sizeof(open_iov)); 248 rqst[0].rq_iov = open_iov; 249 rqst[0].rq_nvec = SMB2_CREATE_IOV_SIZE; 250 251 oparms = (struct cifs_open_parms) { 252 .tcon = tcon, 253 .path = path, 254 .create_options = cifs_create_options(cifs_sb, CREATE_NOT_FILE), 255 .desired_access = FILE_READ_DATA | FILE_READ_ATTRIBUTES | 256 FILE_READ_EA, 257 .disposition = FILE_OPEN, 258 .fid = pfid, 259 .replay = !!(retries), 260 }; 261 262 rc = SMB2_open_init(tcon, server, 263 &rqst[0], &oplock, &oparms, utf16_path); 264 if (rc) 265 goto oshr_free; 266 smb2_set_next_command(tcon, &rqst[0]); 267 268 memset(&qi_iov, 0, sizeof(qi_iov)); 269 rqst[1].rq_iov = qi_iov; 270 rqst[1].rq_nvec = 1; 271 272 rc = SMB2_query_info_init(tcon, server, 273 &rqst[1], COMPOUND_FID, 274 COMPOUND_FID, FILE_ALL_INFORMATION, 275 SMB2_O_INFO_FILE, 0, 276 sizeof(struct smb2_file_all_info) + 277 PATH_MAX * 2, 0, NULL); 278 if (rc) 279 goto oshr_free; 280 281 smb2_set_related(&rqst[1]); 282 283 if (retries) { 284 smb2_set_replay(server, &rqst[0]); 285 smb2_set_replay(server, &rqst[1]); 286 } 287 288 rc = compound_send_recv(xid, ses, server, 289 flags, 2, rqst, 290 resp_buftype, rsp_iov); 291 if (rc) { 292 if (rc == -EREMCHG) { 293 tcon->need_reconnect = true; 294 pr_warn_once("server share %s deleted\n", 295 tcon->tree_name); 296 } 297 goto oshr_free; 298 } 299 cfid->is_open = true; 300 301 spin_lock(&cfids->cfid_list_lock); 302 303 o_rsp = (struct smb2_create_rsp *)rsp_iov[0].iov_base; 304 oparms.fid->persistent_fid = o_rsp->PersistentFileId; 305 oparms.fid->volatile_fid = o_rsp->VolatileFileId; 306 #ifdef CONFIG_CIFS_DEBUG2 307 oparms.fid->mid = le64_to_cpu(o_rsp->hdr.MessageId); 308 #endif /* CIFS_DEBUG2 */ 309 310 311 if (o_rsp->OplockLevel != SMB2_OPLOCK_LEVEL_LEASE) { 312 spin_unlock(&cfids->cfid_list_lock); 313 rc = -EINVAL; 314 goto oshr_free; 315 } 316 317 rc = smb2_parse_contexts(server, rsp_iov, 318 &oparms.fid->epoch, 319 oparms.fid->lease_key, 320 &oplock, NULL, NULL); 321 if (rc) { 322 spin_unlock(&cfids->cfid_list_lock); 323 goto oshr_free; 324 } 325 326 rc = -EINVAL; 327 if (!(oplock & SMB2_LEASE_READ_CACHING_HE)) { 328 spin_unlock(&cfids->cfid_list_lock); 329 goto oshr_free; 330 } 331 qi_rsp = (struct smb2_query_info_rsp *)rsp_iov[1].iov_base; 332 if (le32_to_cpu(qi_rsp->OutputBufferLength) < sizeof(struct smb2_file_all_info)) { 333 spin_unlock(&cfids->cfid_list_lock); 334 goto oshr_free; 335 } 336 if (!smb2_validate_and_copy_iov( 337 le16_to_cpu(qi_rsp->OutputBufferOffset), 338 sizeof(struct smb2_file_all_info), 339 &rsp_iov[1], sizeof(struct smb2_file_all_info), 340 (char *)&cfid->file_all_info)) 341 cfid->file_all_info_is_valid = true; 342 343 cfid->time = jiffies; 344 spin_unlock(&cfids->cfid_list_lock); 345 /* At this point the directory handle is fully cached */ 346 rc = 0; 347 348 oshr_free: 349 SMB2_open_free(&rqst[0]); 350 SMB2_query_info_free(&rqst[1]); 351 free_rsp_buf(resp_buftype[0], rsp_iov[0].iov_base); 352 free_rsp_buf(resp_buftype[1], rsp_iov[1].iov_base); 353 out: 354 if (rc) { 355 spin_lock(&cfids->cfid_list_lock); 356 if (cfid->on_list) { 357 list_del(&cfid->entry); 358 cfid->on_list = false; 359 cfids->num_entries--; 360 } 361 if (cfid->has_lease) { 362 /* 363 * We are guaranteed to have two references at this 364 * point. One for the caller and one for a potential 365 * lease. Release one here, and the second below. 366 */ 367 cfid->has_lease = false; 368 kref_put(&cfid->refcount, smb2_close_cached_fid); 369 } 370 spin_unlock(&cfids->cfid_list_lock); 371 372 kref_put(&cfid->refcount, smb2_close_cached_fid); 373 } else { 374 *ret_cfid = cfid; 375 atomic_inc(&tcon->num_remote_opens); 376 } 377 kfree(utf16_path); 378 379 if (is_replayable_error(rc) && 380 smb2_should_replay(tcon, &retries, &cur_sleep)) 381 goto replay_again; 382 383 return rc; 384 } 385 386 int open_cached_dir_by_dentry(struct cifs_tcon *tcon, 387 struct dentry *dentry, 388 struct cached_fid **ret_cfid) 389 { 390 struct cached_fid *cfid; 391 struct cached_fids *cfids = tcon->cfids; 392 393 if (cfids == NULL) 394 return -EOPNOTSUPP; 395 396 spin_lock(&cfids->cfid_list_lock); 397 list_for_each_entry(cfid, &cfids->entries, entry) { 398 if (dentry && cfid->dentry == dentry) { 399 cifs_dbg(FYI, "found a cached file handle by dentry\n"); 400 kref_get(&cfid->refcount); 401 *ret_cfid = cfid; 402 spin_unlock(&cfids->cfid_list_lock); 403 return 0; 404 } 405 } 406 spin_unlock(&cfids->cfid_list_lock); 407 return -ENOENT; 408 } 409 410 static void 411 smb2_close_cached_fid(struct kref *ref) 412 { 413 struct cached_fid *cfid = container_of(ref, struct cached_fid, 414 refcount); 415 int rc; 416 417 spin_lock(&cfid->cfids->cfid_list_lock); 418 if (cfid->on_list) { 419 list_del(&cfid->entry); 420 cfid->on_list = false; 421 cfid->cfids->num_entries--; 422 } 423 spin_unlock(&cfid->cfids->cfid_list_lock); 424 425 dput(cfid->dentry); 426 cfid->dentry = NULL; 427 428 if (cfid->is_open) { 429 rc = SMB2_close(0, cfid->tcon, cfid->fid.persistent_fid, 430 cfid->fid.volatile_fid); 431 if (rc) /* should we retry on -EBUSY or -EAGAIN? */ 432 cifs_dbg(VFS, "close cached dir rc %d\n", rc); 433 } 434 435 free_cached_dir(cfid); 436 } 437 438 void drop_cached_dir_by_name(const unsigned int xid, struct cifs_tcon *tcon, 439 const char *name, struct cifs_sb_info *cifs_sb) 440 { 441 struct cached_fid *cfid = NULL; 442 int rc; 443 444 rc = open_cached_dir(xid, tcon, name, cifs_sb, true, &cfid); 445 if (rc) { 446 cifs_dbg(FYI, "no cached dir found for rmdir(%s)\n", name); 447 return; 448 } 449 spin_lock(&cfid->cfids->cfid_list_lock); 450 if (cfid->has_lease) { 451 cfid->has_lease = false; 452 kref_put(&cfid->refcount, smb2_close_cached_fid); 453 } 454 spin_unlock(&cfid->cfids->cfid_list_lock); 455 close_cached_dir(cfid); 456 } 457 458 459 void close_cached_dir(struct cached_fid *cfid) 460 { 461 kref_put(&cfid->refcount, smb2_close_cached_fid); 462 } 463 464 /* 465 * Called from cifs_kill_sb when we unmount a share 466 */ 467 void close_all_cached_dirs(struct cifs_sb_info *cifs_sb) 468 { 469 struct rb_root *root = &cifs_sb->tlink_tree; 470 struct rb_node *node; 471 struct cached_fid *cfid; 472 struct cifs_tcon *tcon; 473 struct tcon_link *tlink; 474 struct cached_fids *cfids; 475 struct cached_dir_dentry *tmp_list, *q; 476 LIST_HEAD(entry); 477 478 spin_lock(&cifs_sb->tlink_tree_lock); 479 for (node = rb_first(root); node; node = rb_next(node)) { 480 tlink = rb_entry(node, struct tcon_link, tl_rbnode); 481 tcon = tlink_tcon(tlink); 482 if (IS_ERR(tcon)) 483 continue; 484 cfids = tcon->cfids; 485 if (cfids == NULL) 486 continue; 487 spin_lock(&cfids->cfid_list_lock); 488 list_for_each_entry(cfid, &cfids->entries, entry) { 489 tmp_list = kmalloc(sizeof(*tmp_list), GFP_ATOMIC); 490 if (tmp_list == NULL) 491 break; 492 spin_lock(&cfid->fid_lock); 493 tmp_list->dentry = cfid->dentry; 494 cfid->dentry = NULL; 495 spin_unlock(&cfid->fid_lock); 496 497 list_add_tail(&tmp_list->entry, &entry); 498 } 499 spin_unlock(&cfids->cfid_list_lock); 500 } 501 spin_unlock(&cifs_sb->tlink_tree_lock); 502 503 list_for_each_entry_safe(tmp_list, q, &entry, entry) { 504 list_del(&tmp_list->entry); 505 dput(tmp_list->dentry); 506 kfree(tmp_list); 507 } 508 509 /* Flush any pending work that will drop dentries */ 510 flush_workqueue(cfid_put_wq); 511 } 512 513 /* 514 * Invalidate all cached dirs when a TCON has been reset 515 * due to a session loss. 516 */ 517 void invalidate_all_cached_dirs(struct cifs_tcon *tcon) 518 { 519 struct cached_fids *cfids = tcon->cfids; 520 struct cached_fid *cfid, *q; 521 522 if (cfids == NULL) 523 return; 524 525 /* 526 * Mark all the cfids as closed, and move them to the cfids->dying list. 527 * They'll be cleaned up later by cfids_invalidation_worker. Take 528 * a reference to each cfid during this process. 529 */ 530 spin_lock(&cfids->cfid_list_lock); 531 list_for_each_entry_safe(cfid, q, &cfids->entries, entry) { 532 list_move(&cfid->entry, &cfids->dying); 533 cfids->num_entries--; 534 cfid->is_open = false; 535 cfid->on_list = false; 536 if (cfid->has_lease) { 537 /* 538 * The lease was never cancelled from the server, 539 * so steal that reference. 540 */ 541 cfid->has_lease = false; 542 } else 543 kref_get(&cfid->refcount); 544 } 545 /* 546 * Queue dropping of the dentries once locks have been dropped 547 */ 548 if (!list_empty(&cfids->dying)) 549 queue_work(cfid_put_wq, &cfids->invalidation_work); 550 spin_unlock(&cfids->cfid_list_lock); 551 } 552 553 static void 554 cached_dir_offload_close(struct work_struct *work) 555 { 556 struct cached_fid *cfid = container_of(work, 557 struct cached_fid, close_work); 558 struct cifs_tcon *tcon = cfid->tcon; 559 560 WARN_ON(cfid->on_list); 561 562 kref_put(&cfid->refcount, smb2_close_cached_fid); 563 cifs_put_tcon(tcon, netfs_trace_tcon_ref_put_cached_close); 564 } 565 566 /* 567 * Release the cached directory's dentry, and then queue work to drop cached 568 * directory itself (closing on server if needed). 569 * 570 * Must be called with a reference to the cached_fid and a reference to the 571 * tcon. 572 */ 573 static void cached_dir_put_work(struct work_struct *work) 574 { 575 struct cached_fid *cfid = container_of(work, struct cached_fid, 576 put_work); 577 struct dentry *dentry; 578 579 spin_lock(&cfid->fid_lock); 580 dentry = cfid->dentry; 581 cfid->dentry = NULL; 582 spin_unlock(&cfid->fid_lock); 583 584 dput(dentry); 585 queue_work(serverclose_wq, &cfid->close_work); 586 } 587 588 int cached_dir_lease_break(struct cifs_tcon *tcon, __u8 lease_key[16]) 589 { 590 struct cached_fids *cfids = tcon->cfids; 591 struct cached_fid *cfid; 592 593 if (cfids == NULL) 594 return false; 595 596 spin_lock(&cfids->cfid_list_lock); 597 list_for_each_entry(cfid, &cfids->entries, entry) { 598 if (cfid->has_lease && 599 !memcmp(lease_key, 600 cfid->fid.lease_key, 601 SMB2_LEASE_KEY_SIZE)) { 602 cfid->has_lease = false; 603 cfid->time = 0; 604 /* 605 * We found a lease remove it from the list 606 * so no threads can access it. 607 */ 608 list_del(&cfid->entry); 609 cfid->on_list = false; 610 cfids->num_entries--; 611 612 ++tcon->tc_count; 613 trace_smb3_tcon_ref(tcon->debug_id, tcon->tc_count, 614 netfs_trace_tcon_ref_get_cached_lease_break); 615 queue_work(cfid_put_wq, &cfid->put_work); 616 spin_unlock(&cfids->cfid_list_lock); 617 return true; 618 } 619 } 620 spin_unlock(&cfids->cfid_list_lock); 621 return false; 622 } 623 624 static struct cached_fid *init_cached_dir(const char *path) 625 { 626 struct cached_fid *cfid; 627 628 cfid = kzalloc(sizeof(*cfid), GFP_ATOMIC); 629 if (!cfid) 630 return NULL; 631 cfid->path = kstrdup(path, GFP_ATOMIC); 632 if (!cfid->path) { 633 kfree(cfid); 634 return NULL; 635 } 636 637 INIT_WORK(&cfid->close_work, cached_dir_offload_close); 638 INIT_WORK(&cfid->put_work, cached_dir_put_work); 639 INIT_LIST_HEAD(&cfid->entry); 640 INIT_LIST_HEAD(&cfid->dirents.entries); 641 mutex_init(&cfid->dirents.de_mutex); 642 spin_lock_init(&cfid->fid_lock); 643 kref_init(&cfid->refcount); 644 return cfid; 645 } 646 647 static void free_cached_dir(struct cached_fid *cfid) 648 { 649 struct cached_dirent *dirent, *q; 650 651 WARN_ON(work_pending(&cfid->close_work)); 652 WARN_ON(work_pending(&cfid->put_work)); 653 654 dput(cfid->dentry); 655 cfid->dentry = NULL; 656 657 /* 658 * Delete all cached dirent names 659 */ 660 list_for_each_entry_safe(dirent, q, &cfid->dirents.entries, entry) { 661 list_del(&dirent->entry); 662 kfree(dirent->name); 663 kfree(dirent); 664 } 665 666 kfree(cfid->path); 667 cfid->path = NULL; 668 kfree(cfid); 669 } 670 671 static void cfids_invalidation_worker(struct work_struct *work) 672 { 673 struct cached_fids *cfids = container_of(work, struct cached_fids, 674 invalidation_work); 675 struct cached_fid *cfid, *q; 676 LIST_HEAD(entry); 677 678 spin_lock(&cfids->cfid_list_lock); 679 /* move cfids->dying to the local list */ 680 list_cut_before(&entry, &cfids->dying, &cfids->dying); 681 spin_unlock(&cfids->cfid_list_lock); 682 683 list_for_each_entry_safe(cfid, q, &entry, entry) { 684 list_del(&cfid->entry); 685 /* Drop the ref-count acquired in invalidate_all_cached_dirs */ 686 kref_put(&cfid->refcount, smb2_close_cached_fid); 687 } 688 } 689 690 static void cfids_laundromat_worker(struct work_struct *work) 691 { 692 struct cached_fids *cfids; 693 struct cached_fid *cfid, *q; 694 struct dentry *dentry; 695 LIST_HEAD(entry); 696 697 cfids = container_of(work, struct cached_fids, laundromat_work.work); 698 699 spin_lock(&cfids->cfid_list_lock); 700 list_for_each_entry_safe(cfid, q, &cfids->entries, entry) { 701 if (cfid->time && 702 time_after(jiffies, cfid->time + HZ * dir_cache_timeout)) { 703 cfid->on_list = false; 704 list_move(&cfid->entry, &entry); 705 cfids->num_entries--; 706 if (cfid->has_lease) { 707 /* 708 * Our lease has not yet been cancelled from the 709 * server. Steal that reference. 710 */ 711 cfid->has_lease = false; 712 } else 713 kref_get(&cfid->refcount); 714 } 715 } 716 spin_unlock(&cfids->cfid_list_lock); 717 718 list_for_each_entry_safe(cfid, q, &entry, entry) { 719 list_del(&cfid->entry); 720 721 spin_lock(&cfid->fid_lock); 722 dentry = cfid->dentry; 723 cfid->dentry = NULL; 724 spin_unlock(&cfid->fid_lock); 725 726 dput(dentry); 727 if (cfid->is_open) { 728 spin_lock(&cifs_tcp_ses_lock); 729 ++cfid->tcon->tc_count; 730 trace_smb3_tcon_ref(cfid->tcon->debug_id, cfid->tcon->tc_count, 731 netfs_trace_tcon_ref_get_cached_laundromat); 732 spin_unlock(&cifs_tcp_ses_lock); 733 queue_work(serverclose_wq, &cfid->close_work); 734 } else 735 /* 736 * Drop the ref-count from above, either the lease-ref (if there 737 * was one) or the extra one acquired. 738 */ 739 kref_put(&cfid->refcount, smb2_close_cached_fid); 740 } 741 queue_delayed_work(cfid_put_wq, &cfids->laundromat_work, 742 dir_cache_timeout * HZ); 743 } 744 745 struct cached_fids *init_cached_dirs(void) 746 { 747 struct cached_fids *cfids; 748 749 cfids = kzalloc(sizeof(*cfids), GFP_KERNEL); 750 if (!cfids) 751 return NULL; 752 spin_lock_init(&cfids->cfid_list_lock); 753 INIT_LIST_HEAD(&cfids->entries); 754 INIT_LIST_HEAD(&cfids->dying); 755 756 INIT_WORK(&cfids->invalidation_work, cfids_invalidation_worker); 757 INIT_DELAYED_WORK(&cfids->laundromat_work, cfids_laundromat_worker); 758 queue_delayed_work(cfid_put_wq, &cfids->laundromat_work, 759 dir_cache_timeout * HZ); 760 761 return cfids; 762 } 763 764 /* 765 * Called from tconInfoFree when we are tearing down the tcon. 766 * There are no active users or open files/directories at this point. 767 */ 768 void free_cached_dirs(struct cached_fids *cfids) 769 { 770 struct cached_fid *cfid, *q; 771 LIST_HEAD(entry); 772 773 if (cfids == NULL) 774 return; 775 776 cancel_delayed_work_sync(&cfids->laundromat_work); 777 cancel_work_sync(&cfids->invalidation_work); 778 779 spin_lock(&cfids->cfid_list_lock); 780 list_for_each_entry_safe(cfid, q, &cfids->entries, entry) { 781 cfid->on_list = false; 782 cfid->is_open = false; 783 list_move(&cfid->entry, &entry); 784 } 785 list_for_each_entry_safe(cfid, q, &cfids->dying, entry) { 786 cfid->on_list = false; 787 cfid->is_open = false; 788 list_move(&cfid->entry, &entry); 789 } 790 spin_unlock(&cfids->cfid_list_lock); 791 792 list_for_each_entry_safe(cfid, q, &entry, entry) { 793 list_del(&cfid->entry); 794 free_cached_dir(cfid); 795 } 796 797 kfree(cfids); 798 } 799