1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Functions to handle the cached directory entries 4 * 5 * Copyright (c) 2022, Ronnie Sahlberg <lsahlber@redhat.com> 6 */ 7 8 #include <linux/namei.h> 9 #include "cifsglob.h" 10 #include "cifsproto.h" 11 #include "cifs_debug.h" 12 #include "smb2proto.h" 13 #include "cached_dir.h" 14 15 static struct cached_fid *init_cached_dir(const char *path); 16 static void free_cached_dir(struct cached_fid *cfid); 17 static void smb2_close_cached_fid(struct kref *ref); 18 static void cfids_laundromat_worker(struct work_struct *work); 19 20 struct cached_dir_dentry { 21 struct list_head entry; 22 struct dentry *dentry; 23 }; 24 25 static struct cached_fid *find_or_create_cached_dir(struct cached_fids *cfids, 26 const char *path, 27 bool lookup_only, 28 __u32 max_cached_dirs) 29 { 30 struct cached_fid *cfid; 31 32 list_for_each_entry(cfid, &cfids->entries, entry) { 33 if (!strcmp(cfid->path, path)) { 34 /* 35 * If it doesn't have a lease it is either not yet 36 * fully cached or it may be in the process of 37 * being deleted due to a lease break. 38 */ 39 if (!cfid->time || !cfid->has_lease) { 40 return NULL; 41 } 42 kref_get(&cfid->refcount); 43 return cfid; 44 } 45 } 46 if (lookup_only) { 47 return NULL; 48 } 49 if (cfids->num_entries >= max_cached_dirs) { 50 return NULL; 51 } 52 cfid = init_cached_dir(path); 53 if (cfid == NULL) { 54 return NULL; 55 } 56 cfid->cfids = cfids; 57 cfids->num_entries++; 58 list_add(&cfid->entry, &cfids->entries); 59 cfid->on_list = true; 60 kref_get(&cfid->refcount); 61 /* 62 * Set @cfid->has_lease to true during construction so that the lease 63 * reference can be put in cached_dir_lease_break() due to a potential 64 * lease break right after the request is sent or while @cfid is still 65 * being cached, or if a reconnection is triggered during construction. 66 * Concurrent processes won't be to use it yet due to @cfid->time being 67 * zero. 68 */ 69 cfid->has_lease = true; 70 71 return cfid; 72 } 73 74 static struct dentry * 75 path_to_dentry(struct cifs_sb_info *cifs_sb, const char *path) 76 { 77 struct dentry *dentry; 78 const char *s, *p; 79 char sep; 80 81 sep = CIFS_DIR_SEP(cifs_sb); 82 dentry = dget(cifs_sb->root); 83 s = path; 84 85 do { 86 struct inode *dir = d_inode(dentry); 87 struct dentry *child; 88 89 if (!S_ISDIR(dir->i_mode)) { 90 dput(dentry); 91 dentry = ERR_PTR(-ENOTDIR); 92 break; 93 } 94 95 /* skip separators */ 96 while (*s == sep) 97 s++; 98 if (!*s) 99 break; 100 p = s++; 101 /* next separator */ 102 while (*s && *s != sep) 103 s++; 104 105 child = lookup_noperm_positive_unlocked(&QSTR_LEN(p, s - p), 106 dentry); 107 dput(dentry); 108 dentry = child; 109 } while (!IS_ERR(dentry)); 110 return dentry; 111 } 112 113 static const char *path_no_prefix(struct cifs_sb_info *cifs_sb, 114 const char *path) 115 { 116 size_t len = 0; 117 118 if (!*path) 119 return path; 120 121 if ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_USE_PREFIX_PATH) && 122 cifs_sb->prepath) { 123 len = strlen(cifs_sb->prepath) + 1; 124 if (unlikely(len > strlen(path))) 125 return ERR_PTR(-EINVAL); 126 } 127 return path + len; 128 } 129 130 /* 131 * Open the and cache a directory handle. 132 * If error then *cfid is not initialized. 133 */ 134 int open_cached_dir(unsigned int xid, struct cifs_tcon *tcon, 135 const char *path, 136 struct cifs_sb_info *cifs_sb, 137 bool lookup_only, struct cached_fid **ret_cfid) 138 { 139 struct cifs_ses *ses; 140 struct TCP_Server_Info *server; 141 struct cifs_open_parms oparms; 142 struct smb2_create_rsp *o_rsp = NULL; 143 struct smb2_query_info_rsp *qi_rsp = NULL; 144 int resp_buftype[2]; 145 struct smb_rqst rqst[2]; 146 struct kvec rsp_iov[2]; 147 struct kvec open_iov[SMB2_CREATE_IOV_SIZE]; 148 struct kvec qi_iov[1]; 149 int rc, flags = 0; 150 __le16 *utf16_path = NULL; 151 u8 oplock = SMB2_OPLOCK_LEVEL_II; 152 struct cifs_fid *pfid; 153 struct dentry *dentry = NULL; 154 struct cached_fid *cfid; 155 struct cached_fids *cfids; 156 const char *npath; 157 int retries = 0, cur_sleep = 1; 158 __le32 lease_flags = 0; 159 160 if (cifs_sb->root == NULL) 161 return -ENOENT; 162 163 if (tcon == NULL) 164 return -EOPNOTSUPP; 165 166 ses = tcon->ses; 167 cfids = tcon->cfids; 168 169 if (cfids == NULL) 170 return -EOPNOTSUPP; 171 172 replay_again: 173 /* reinitialize for possible replay */ 174 flags = 0; 175 oplock = SMB2_OPLOCK_LEVEL_II; 176 server = cifs_pick_channel(ses); 177 178 if (!server->ops->new_lease_key) 179 return -EIO; 180 181 utf16_path = cifs_convert_path_to_utf16(path, cifs_sb); 182 if (!utf16_path) 183 return -ENOMEM; 184 185 spin_lock(&cfids->cfid_list_lock); 186 cfid = find_or_create_cached_dir(cfids, path, lookup_only, tcon->max_cached_dirs); 187 if (cfid == NULL) { 188 spin_unlock(&cfids->cfid_list_lock); 189 kfree(utf16_path); 190 return -ENOENT; 191 } 192 /* 193 * Return cached fid if it is valid (has a lease and has a time). 194 * Otherwise, it is either a new entry or laundromat worker removed it 195 * from @cfids->entries. Caller will put last reference if the latter. 196 */ 197 if (cfid->has_lease && cfid->time) { 198 cfid->last_access_time = jiffies; 199 spin_unlock(&cfids->cfid_list_lock); 200 *ret_cfid = cfid; 201 kfree(utf16_path); 202 return 0; 203 } 204 spin_unlock(&cfids->cfid_list_lock); 205 206 pfid = &cfid->fid; 207 208 /* 209 * Skip any prefix paths in @path as lookup_noperm_positive_unlocked() ends up 210 * calling ->lookup() which already adds those through 211 * build_path_from_dentry(). Also, do it earlier as we might reconnect 212 * below when trying to send compounded request and then potentially 213 * having a different prefix path (e.g. after DFS failover). 214 */ 215 npath = path_no_prefix(cifs_sb, path); 216 if (IS_ERR(npath)) { 217 rc = PTR_ERR(npath); 218 goto out; 219 } 220 221 if (!npath[0]) { 222 dentry = dget(cifs_sb->root); 223 } else { 224 dentry = path_to_dentry(cifs_sb, npath); 225 if (IS_ERR(dentry)) { 226 rc = -ENOENT; 227 goto out; 228 } 229 if (dentry->d_parent && server->dialect >= SMB30_PROT_ID) { 230 struct cached_fid *parent_cfid; 231 232 spin_lock(&cfids->cfid_list_lock); 233 list_for_each_entry(parent_cfid, &cfids->entries, entry) { 234 if (parent_cfid->dentry == dentry->d_parent) { 235 cifs_dbg(FYI, "found a parent cached file handle\n"); 236 if (parent_cfid->has_lease && parent_cfid->time) { 237 lease_flags 238 |= SMB2_LEASE_FLAG_PARENT_LEASE_KEY_SET_LE; 239 memcpy(pfid->parent_lease_key, 240 parent_cfid->fid.lease_key, 241 SMB2_LEASE_KEY_SIZE); 242 } 243 break; 244 } 245 } 246 spin_unlock(&cfids->cfid_list_lock); 247 } 248 } 249 cfid->dentry = dentry; 250 cfid->tcon = tcon; 251 252 /* 253 * We do not hold the lock for the open because in case 254 * SMB2_open needs to reconnect. 255 * This is safe because no other thread will be able to get a ref 256 * to the cfid until we have finished opening the file and (possibly) 257 * acquired a lease. 258 */ 259 if (smb3_encryption_required(tcon)) 260 flags |= CIFS_TRANSFORM_REQ; 261 262 server->ops->new_lease_key(pfid); 263 264 memset(rqst, 0, sizeof(rqst)); 265 resp_buftype[0] = resp_buftype[1] = CIFS_NO_BUFFER; 266 memset(rsp_iov, 0, sizeof(rsp_iov)); 267 268 /* Open */ 269 memset(&open_iov, 0, sizeof(open_iov)); 270 rqst[0].rq_iov = open_iov; 271 rqst[0].rq_nvec = SMB2_CREATE_IOV_SIZE; 272 273 oparms = (struct cifs_open_parms) { 274 .tcon = tcon, 275 .path = path, 276 .create_options = cifs_create_options(cifs_sb, CREATE_NOT_FILE), 277 .desired_access = FILE_READ_DATA | FILE_READ_ATTRIBUTES | 278 FILE_READ_EA, 279 .disposition = FILE_OPEN, 280 .fid = pfid, 281 .lease_flags = lease_flags, 282 .replay = !!(retries), 283 }; 284 285 rc = SMB2_open_init(tcon, server, 286 &rqst[0], &oplock, &oparms, utf16_path); 287 if (rc) 288 goto oshr_free; 289 smb2_set_next_command(tcon, &rqst[0]); 290 291 memset(&qi_iov, 0, sizeof(qi_iov)); 292 rqst[1].rq_iov = qi_iov; 293 rqst[1].rq_nvec = 1; 294 295 rc = SMB2_query_info_init(tcon, server, 296 &rqst[1], COMPOUND_FID, 297 COMPOUND_FID, FILE_ALL_INFORMATION, 298 SMB2_O_INFO_FILE, 0, 299 sizeof(struct smb2_file_all_info) + 300 PATH_MAX * 2, 0, NULL); 301 if (rc) 302 goto oshr_free; 303 304 smb2_set_related(&rqst[1]); 305 306 if (retries) { 307 smb2_set_replay(server, &rqst[0]); 308 smb2_set_replay(server, &rqst[1]); 309 } 310 311 rc = compound_send_recv(xid, ses, server, 312 flags, 2, rqst, 313 resp_buftype, rsp_iov); 314 if (rc) { 315 if (rc == -EREMCHG) { 316 tcon->need_reconnect = true; 317 pr_warn_once("server share %s deleted\n", 318 tcon->tree_name); 319 } 320 goto oshr_free; 321 } 322 cfid->is_open = true; 323 324 spin_lock(&cfids->cfid_list_lock); 325 326 o_rsp = (struct smb2_create_rsp *)rsp_iov[0].iov_base; 327 oparms.fid->persistent_fid = o_rsp->PersistentFileId; 328 oparms.fid->volatile_fid = o_rsp->VolatileFileId; 329 #ifdef CONFIG_CIFS_DEBUG2 330 oparms.fid->mid = le64_to_cpu(o_rsp->hdr.MessageId); 331 #endif /* CIFS_DEBUG2 */ 332 333 334 if (o_rsp->OplockLevel != SMB2_OPLOCK_LEVEL_LEASE) { 335 spin_unlock(&cfids->cfid_list_lock); 336 rc = -EINVAL; 337 goto oshr_free; 338 } 339 340 rc = smb2_parse_contexts(server, rsp_iov, 341 &oparms.fid->epoch, 342 oparms.fid->lease_key, 343 &oplock, NULL, NULL); 344 if (rc) { 345 spin_unlock(&cfids->cfid_list_lock); 346 goto oshr_free; 347 } 348 349 rc = -EINVAL; 350 if (!(oplock & SMB2_LEASE_READ_CACHING_HE)) { 351 spin_unlock(&cfids->cfid_list_lock); 352 goto oshr_free; 353 } 354 qi_rsp = (struct smb2_query_info_rsp *)rsp_iov[1].iov_base; 355 if (le32_to_cpu(qi_rsp->OutputBufferLength) < sizeof(struct smb2_file_all_info)) { 356 spin_unlock(&cfids->cfid_list_lock); 357 goto oshr_free; 358 } 359 if (!smb2_validate_and_copy_iov( 360 le16_to_cpu(qi_rsp->OutputBufferOffset), 361 sizeof(struct smb2_file_all_info), 362 &rsp_iov[1], sizeof(struct smb2_file_all_info), 363 (char *)&cfid->file_all_info)) 364 cfid->file_all_info_is_valid = true; 365 366 cfid->time = jiffies; 367 cfid->last_access_time = jiffies; 368 spin_unlock(&cfids->cfid_list_lock); 369 /* At this point the directory handle is fully cached */ 370 rc = 0; 371 372 oshr_free: 373 SMB2_open_free(&rqst[0]); 374 SMB2_query_info_free(&rqst[1]); 375 free_rsp_buf(resp_buftype[0], rsp_iov[0].iov_base); 376 free_rsp_buf(resp_buftype[1], rsp_iov[1].iov_base); 377 out: 378 if (rc) { 379 spin_lock(&cfids->cfid_list_lock); 380 if (cfid->on_list) { 381 list_del(&cfid->entry); 382 cfid->on_list = false; 383 cfids->num_entries--; 384 } 385 if (cfid->has_lease) { 386 /* 387 * We are guaranteed to have two references at this 388 * point. One for the caller and one for a potential 389 * lease. Release one here, and the second below. 390 */ 391 cfid->has_lease = false; 392 kref_put(&cfid->refcount, smb2_close_cached_fid); 393 } 394 spin_unlock(&cfids->cfid_list_lock); 395 396 kref_put(&cfid->refcount, smb2_close_cached_fid); 397 } else { 398 *ret_cfid = cfid; 399 atomic_inc(&tcon->num_remote_opens); 400 } 401 kfree(utf16_path); 402 403 if (is_replayable_error(rc) && 404 smb2_should_replay(tcon, &retries, &cur_sleep)) 405 goto replay_again; 406 407 return rc; 408 } 409 410 int open_cached_dir_by_dentry(struct cifs_tcon *tcon, 411 struct dentry *dentry, 412 struct cached_fid **ret_cfid) 413 { 414 struct cached_fid *cfid; 415 struct cached_fids *cfids = tcon->cfids; 416 417 if (cfids == NULL) 418 return -EOPNOTSUPP; 419 420 spin_lock(&cfids->cfid_list_lock); 421 list_for_each_entry(cfid, &cfids->entries, entry) { 422 if (dentry && cfid->dentry == dentry) { 423 cifs_dbg(FYI, "found a cached file handle by dentry\n"); 424 kref_get(&cfid->refcount); 425 *ret_cfid = cfid; 426 spin_unlock(&cfids->cfid_list_lock); 427 return 0; 428 } 429 } 430 spin_unlock(&cfids->cfid_list_lock); 431 return -ENOENT; 432 } 433 434 static void 435 smb2_close_cached_fid(struct kref *ref) 436 { 437 struct cached_fid *cfid = container_of(ref, struct cached_fid, 438 refcount); 439 int rc; 440 441 spin_lock(&cfid->cfids->cfid_list_lock); 442 if (cfid->on_list) { 443 list_del(&cfid->entry); 444 cfid->on_list = false; 445 cfid->cfids->num_entries--; 446 } 447 spin_unlock(&cfid->cfids->cfid_list_lock); 448 449 dput(cfid->dentry); 450 cfid->dentry = NULL; 451 452 if (cfid->is_open) { 453 rc = SMB2_close(0, cfid->tcon, cfid->fid.persistent_fid, 454 cfid->fid.volatile_fid); 455 if (rc) /* should we retry on -EBUSY or -EAGAIN? */ 456 cifs_dbg(VFS, "close cached dir rc %d\n", rc); 457 } 458 459 free_cached_dir(cfid); 460 } 461 462 void drop_cached_dir_by_name(const unsigned int xid, struct cifs_tcon *tcon, 463 const char *name, struct cifs_sb_info *cifs_sb) 464 { 465 struct cached_fid *cfid = NULL; 466 int rc; 467 468 rc = open_cached_dir(xid, tcon, name, cifs_sb, true, &cfid); 469 if (rc) { 470 cifs_dbg(FYI, "no cached dir found for rmdir(%s)\n", name); 471 return; 472 } 473 spin_lock(&cfid->cfids->cfid_list_lock); 474 if (cfid->has_lease) { 475 cfid->has_lease = false; 476 kref_put(&cfid->refcount, smb2_close_cached_fid); 477 } 478 spin_unlock(&cfid->cfids->cfid_list_lock); 479 close_cached_dir(cfid); 480 } 481 482 483 void close_cached_dir(struct cached_fid *cfid) 484 { 485 kref_put(&cfid->refcount, smb2_close_cached_fid); 486 } 487 488 /* 489 * Called from cifs_kill_sb when we unmount a share 490 */ 491 void close_all_cached_dirs(struct cifs_sb_info *cifs_sb) 492 { 493 struct rb_root *root = &cifs_sb->tlink_tree; 494 struct rb_node *node; 495 struct cached_fid *cfid; 496 struct cifs_tcon *tcon; 497 struct tcon_link *tlink; 498 struct cached_fids *cfids; 499 struct cached_dir_dentry *tmp_list, *q; 500 LIST_HEAD(entry); 501 502 spin_lock(&cifs_sb->tlink_tree_lock); 503 for (node = rb_first(root); node; node = rb_next(node)) { 504 tlink = rb_entry(node, struct tcon_link, tl_rbnode); 505 tcon = tlink_tcon(tlink); 506 if (IS_ERR(tcon)) 507 continue; 508 cfids = tcon->cfids; 509 if (cfids == NULL) 510 continue; 511 spin_lock(&cfids->cfid_list_lock); 512 list_for_each_entry(cfid, &cfids->entries, entry) { 513 tmp_list = kmalloc(sizeof(*tmp_list), GFP_ATOMIC); 514 if (tmp_list == NULL) { 515 /* 516 * If the malloc() fails, we won't drop all 517 * dentries, and unmounting is likely to trigger 518 * a 'Dentry still in use' error. 519 */ 520 cifs_tcon_dbg(VFS, "Out of memory while dropping dentries\n"); 521 spin_unlock(&cfids->cfid_list_lock); 522 spin_unlock(&cifs_sb->tlink_tree_lock); 523 goto done; 524 } 525 spin_lock(&cfid->fid_lock); 526 tmp_list->dentry = cfid->dentry; 527 cfid->dentry = NULL; 528 spin_unlock(&cfid->fid_lock); 529 530 list_add_tail(&tmp_list->entry, &entry); 531 } 532 spin_unlock(&cfids->cfid_list_lock); 533 } 534 spin_unlock(&cifs_sb->tlink_tree_lock); 535 536 done: 537 list_for_each_entry_safe(tmp_list, q, &entry, entry) { 538 list_del(&tmp_list->entry); 539 dput(tmp_list->dentry); 540 kfree(tmp_list); 541 } 542 543 /* Flush any pending work that will drop dentries */ 544 flush_workqueue(cfid_put_wq); 545 } 546 547 /* 548 * Invalidate all cached dirs when a TCON has been reset 549 * due to a session loss. 550 */ 551 void invalidate_all_cached_dirs(struct cifs_tcon *tcon) 552 { 553 struct cached_fids *cfids = tcon->cfids; 554 struct cached_fid *cfid, *q; 555 556 if (cfids == NULL) 557 return; 558 559 /* 560 * Mark all the cfids as closed, and move them to the cfids->dying list. 561 * They'll be cleaned up later by cfids_invalidation_worker. Take 562 * a reference to each cfid during this process. 563 */ 564 spin_lock(&cfids->cfid_list_lock); 565 list_for_each_entry_safe(cfid, q, &cfids->entries, entry) { 566 list_move(&cfid->entry, &cfids->dying); 567 cfids->num_entries--; 568 cfid->is_open = false; 569 cfid->on_list = false; 570 if (cfid->has_lease) { 571 /* 572 * The lease was never cancelled from the server, 573 * so steal that reference. 574 */ 575 cfid->has_lease = false; 576 } else 577 kref_get(&cfid->refcount); 578 } 579 /* 580 * Queue dropping of the dentries once locks have been dropped 581 */ 582 if (!list_empty(&cfids->dying)) 583 queue_work(cfid_put_wq, &cfids->invalidation_work); 584 spin_unlock(&cfids->cfid_list_lock); 585 } 586 587 static void 588 cached_dir_offload_close(struct work_struct *work) 589 { 590 struct cached_fid *cfid = container_of(work, 591 struct cached_fid, close_work); 592 struct cifs_tcon *tcon = cfid->tcon; 593 594 WARN_ON(cfid->on_list); 595 596 kref_put(&cfid->refcount, smb2_close_cached_fid); 597 cifs_put_tcon(tcon, netfs_trace_tcon_ref_put_cached_close); 598 } 599 600 /* 601 * Release the cached directory's dentry, and then queue work to drop cached 602 * directory itself (closing on server if needed). 603 * 604 * Must be called with a reference to the cached_fid and a reference to the 605 * tcon. 606 */ 607 static void cached_dir_put_work(struct work_struct *work) 608 { 609 struct cached_fid *cfid = container_of(work, struct cached_fid, 610 put_work); 611 struct dentry *dentry; 612 613 spin_lock(&cfid->fid_lock); 614 dentry = cfid->dentry; 615 cfid->dentry = NULL; 616 spin_unlock(&cfid->fid_lock); 617 618 dput(dentry); 619 queue_work(serverclose_wq, &cfid->close_work); 620 } 621 622 bool cached_dir_lease_break(struct cifs_tcon *tcon, __u8 lease_key[16]) 623 { 624 struct cached_fids *cfids = tcon->cfids; 625 struct cached_fid *cfid; 626 627 if (cfids == NULL) 628 return false; 629 630 spin_lock(&cfids->cfid_list_lock); 631 list_for_each_entry(cfid, &cfids->entries, entry) { 632 if (cfid->has_lease && 633 !memcmp(lease_key, 634 cfid->fid.lease_key, 635 SMB2_LEASE_KEY_SIZE)) { 636 cfid->has_lease = false; 637 cfid->time = 0; 638 /* 639 * We found a lease remove it from the list 640 * so no threads can access it. 641 */ 642 list_del(&cfid->entry); 643 cfid->on_list = false; 644 cfids->num_entries--; 645 646 ++tcon->tc_count; 647 trace_smb3_tcon_ref(tcon->debug_id, tcon->tc_count, 648 netfs_trace_tcon_ref_get_cached_lease_break); 649 queue_work(cfid_put_wq, &cfid->put_work); 650 spin_unlock(&cfids->cfid_list_lock); 651 return true; 652 } 653 } 654 spin_unlock(&cfids->cfid_list_lock); 655 return false; 656 } 657 658 static struct cached_fid *init_cached_dir(const char *path) 659 { 660 struct cached_fid *cfid; 661 662 cfid = kzalloc(sizeof(*cfid), GFP_ATOMIC); 663 if (!cfid) 664 return NULL; 665 cfid->path = kstrdup(path, GFP_ATOMIC); 666 if (!cfid->path) { 667 kfree(cfid); 668 return NULL; 669 } 670 671 INIT_WORK(&cfid->close_work, cached_dir_offload_close); 672 INIT_WORK(&cfid->put_work, cached_dir_put_work); 673 INIT_LIST_HEAD(&cfid->entry); 674 INIT_LIST_HEAD(&cfid->dirents.entries); 675 mutex_init(&cfid->dirents.de_mutex); 676 spin_lock_init(&cfid->fid_lock); 677 kref_init(&cfid->refcount); 678 return cfid; 679 } 680 681 static void free_cached_dir(struct cached_fid *cfid) 682 { 683 struct cached_dirent *dirent, *q; 684 685 WARN_ON(work_pending(&cfid->close_work)); 686 WARN_ON(work_pending(&cfid->put_work)); 687 688 dput(cfid->dentry); 689 cfid->dentry = NULL; 690 691 /* 692 * Delete all cached dirent names 693 */ 694 list_for_each_entry_safe(dirent, q, &cfid->dirents.entries, entry) { 695 list_del(&dirent->entry); 696 kfree(dirent->name); 697 kfree(dirent); 698 } 699 700 kfree(cfid->path); 701 cfid->path = NULL; 702 kfree(cfid); 703 } 704 705 static void cfids_invalidation_worker(struct work_struct *work) 706 { 707 struct cached_fids *cfids = container_of(work, struct cached_fids, 708 invalidation_work); 709 struct cached_fid *cfid, *q; 710 LIST_HEAD(entry); 711 712 spin_lock(&cfids->cfid_list_lock); 713 /* move cfids->dying to the local list */ 714 list_cut_before(&entry, &cfids->dying, &cfids->dying); 715 spin_unlock(&cfids->cfid_list_lock); 716 717 list_for_each_entry_safe(cfid, q, &entry, entry) { 718 list_del(&cfid->entry); 719 /* Drop the ref-count acquired in invalidate_all_cached_dirs */ 720 kref_put(&cfid->refcount, smb2_close_cached_fid); 721 } 722 } 723 724 static void cfids_laundromat_worker(struct work_struct *work) 725 { 726 struct cached_fids *cfids; 727 struct cached_fid *cfid, *q; 728 struct dentry *dentry; 729 LIST_HEAD(entry); 730 731 cfids = container_of(work, struct cached_fids, laundromat_work.work); 732 733 spin_lock(&cfids->cfid_list_lock); 734 list_for_each_entry_safe(cfid, q, &cfids->entries, entry) { 735 if (cfid->last_access_time && 736 time_after(jiffies, cfid->last_access_time + HZ * dir_cache_timeout)) { 737 cfid->on_list = false; 738 list_move(&cfid->entry, &entry); 739 cfids->num_entries--; 740 if (cfid->has_lease) { 741 /* 742 * Our lease has not yet been cancelled from the 743 * server. Steal that reference. 744 */ 745 cfid->has_lease = false; 746 } else 747 kref_get(&cfid->refcount); 748 } 749 } 750 spin_unlock(&cfids->cfid_list_lock); 751 752 list_for_each_entry_safe(cfid, q, &entry, entry) { 753 list_del(&cfid->entry); 754 755 spin_lock(&cfid->fid_lock); 756 dentry = cfid->dentry; 757 cfid->dentry = NULL; 758 spin_unlock(&cfid->fid_lock); 759 760 dput(dentry); 761 if (cfid->is_open) { 762 spin_lock(&cifs_tcp_ses_lock); 763 ++cfid->tcon->tc_count; 764 trace_smb3_tcon_ref(cfid->tcon->debug_id, cfid->tcon->tc_count, 765 netfs_trace_tcon_ref_get_cached_laundromat); 766 spin_unlock(&cifs_tcp_ses_lock); 767 queue_work(serverclose_wq, &cfid->close_work); 768 } else 769 /* 770 * Drop the ref-count from above, either the lease-ref (if there 771 * was one) or the extra one acquired. 772 */ 773 kref_put(&cfid->refcount, smb2_close_cached_fid); 774 } 775 queue_delayed_work(cfid_put_wq, &cfids->laundromat_work, 776 dir_cache_timeout * HZ); 777 } 778 779 struct cached_fids *init_cached_dirs(void) 780 { 781 struct cached_fids *cfids; 782 783 cfids = kzalloc(sizeof(*cfids), GFP_KERNEL); 784 if (!cfids) 785 return NULL; 786 spin_lock_init(&cfids->cfid_list_lock); 787 INIT_LIST_HEAD(&cfids->entries); 788 INIT_LIST_HEAD(&cfids->dying); 789 790 INIT_WORK(&cfids->invalidation_work, cfids_invalidation_worker); 791 INIT_DELAYED_WORK(&cfids->laundromat_work, cfids_laundromat_worker); 792 queue_delayed_work(cfid_put_wq, &cfids->laundromat_work, 793 dir_cache_timeout * HZ); 794 795 return cfids; 796 } 797 798 /* 799 * Called from tconInfoFree when we are tearing down the tcon. 800 * There are no active users or open files/directories at this point. 801 */ 802 void free_cached_dirs(struct cached_fids *cfids) 803 { 804 struct cached_fid *cfid, *q; 805 LIST_HEAD(entry); 806 807 if (cfids == NULL) 808 return; 809 810 cancel_delayed_work_sync(&cfids->laundromat_work); 811 cancel_work_sync(&cfids->invalidation_work); 812 813 spin_lock(&cfids->cfid_list_lock); 814 list_for_each_entry_safe(cfid, q, &cfids->entries, entry) { 815 cfid->on_list = false; 816 cfid->is_open = false; 817 list_move(&cfid->entry, &entry); 818 } 819 list_for_each_entry_safe(cfid, q, &cfids->dying, entry) { 820 cfid->on_list = false; 821 cfid->is_open = false; 822 list_move(&cfid->entry, &entry); 823 } 824 spin_unlock(&cfids->cfid_list_lock); 825 826 list_for_each_entry_safe(cfid, q, &entry, entry) { 827 list_del(&cfid->entry); 828 free_cached_dir(cfid); 829 } 830 831 kfree(cfids); 832 } 833