1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Copyright (C) 2016 Namjae Jeon <linkinjeon@kernel.org> 4 * Copyright (C) 2019 Samsung Electronics Co., Ltd. 5 */ 6 7 #include <linux/fs.h> 8 #include <linux/filelock.h> 9 #include <linux/slab.h> 10 #include <linux/vmalloc.h> 11 #include <linux/kthread.h> 12 #include <linux/freezer.h> 13 14 #include "glob.h" 15 #include "vfs_cache.h" 16 #include "oplock.h" 17 #include "vfs.h" 18 #include "connection.h" 19 #include "misc.h" 20 #include "mgmt/tree_connect.h" 21 #include "mgmt/user_session.h" 22 #include "mgmt/user_config.h" 23 #include "smb_common.h" 24 #include "server.h" 25 #include "smb2pdu.h" 26 27 #define S_DEL_PENDING 1 28 #define S_DEL_ON_CLS 2 29 #define S_DEL_ON_CLS_STREAM 8 30 31 static unsigned int inode_hash_mask __read_mostly; 32 static unsigned int inode_hash_shift __read_mostly; 33 static struct hlist_head *inode_hashtable __read_mostly; 34 static DEFINE_RWLOCK(inode_hash_lock); 35 36 static struct ksmbd_file_table global_ft; 37 static atomic_long_t fd_limit; 38 static struct kmem_cache *filp_cache; 39 40 #define OPLOCK_NONE 0 41 #define OPLOCK_EXCLUSIVE 1 42 #define OPLOCK_BATCH 2 43 #define OPLOCK_READ 3 /* level 2 oplock */ 44 45 #ifdef CONFIG_PROC_FS 46 47 static const struct ksmbd_const_name ksmbd_lease_const_names[] = { 48 {le32_to_cpu(SMB2_LEASE_NONE_LE), "LEASE_NONE"}, 49 {le32_to_cpu(SMB2_LEASE_READ_CACHING_LE), "LEASE_R"}, 50 {le32_to_cpu(SMB2_LEASE_HANDLE_CACHING_LE), "LEASE_H"}, 51 {le32_to_cpu(SMB2_LEASE_WRITE_CACHING_LE), "LEASE_W"}, 52 {le32_to_cpu(SMB2_LEASE_READ_CACHING_LE | 53 SMB2_LEASE_HANDLE_CACHING_LE), "LEASE_RH"}, 54 {le32_to_cpu(SMB2_LEASE_READ_CACHING_LE | 55 SMB2_LEASE_WRITE_CACHING_LE), "LEASE_RW"}, 56 {le32_to_cpu(SMB2_LEASE_HANDLE_CACHING_LE | 57 SMB2_LEASE_WRITE_CACHING_LE), "LEASE_WH"}, 58 {le32_to_cpu(SMB2_LEASE_READ_CACHING_LE | 59 SMB2_LEASE_HANDLE_CACHING_LE | 60 SMB2_LEASE_WRITE_CACHING_LE), "LEASE_RWH"}, 61 }; 62 63 static const struct ksmbd_const_name ksmbd_oplock_const_names[] = { 64 {SMB2_OPLOCK_LEVEL_NONE, "OPLOCK_NONE"}, 65 {SMB2_OPLOCK_LEVEL_II, "OPLOCK_II"}, 66 {SMB2_OPLOCK_LEVEL_EXCLUSIVE, "OPLOCK_EXECL"}, 67 {SMB2_OPLOCK_LEVEL_BATCH, "OPLOCK_BATCH"}, 68 }; 69 70 static int proc_show_files(struct seq_file *m, void *v) 71 { 72 struct ksmbd_file *fp = NULL; 73 unsigned int id; 74 struct oplock_info *opinfo; 75 76 seq_printf(m, "#%-10s %-10s %-10s %-10s %-15s %-10s %-10s %s\n", 77 "<tree id>", "<pid>", "<vid>", "<refcnt>", 78 "<oplock>", "<daccess>", "<saccess>", 79 "<name>"); 80 81 read_lock(&global_ft.lock); 82 idr_for_each_entry(global_ft.idr, fp, id) { 83 seq_printf(m, "%#-10x %#-10llx %#-10llx %#-10x", 84 fp->tcon->id, 85 fp->persistent_id, 86 fp->volatile_id, 87 atomic_read(&fp->refcount)); 88 89 rcu_read_lock(); 90 opinfo = rcu_dereference(fp->f_opinfo); 91 if (opinfo) { 92 const struct ksmbd_const_name *const_names; 93 int count; 94 unsigned int level; 95 96 if (opinfo->is_lease) { 97 const_names = ksmbd_lease_const_names; 98 count = ARRAY_SIZE(ksmbd_lease_const_names); 99 level = le32_to_cpu(opinfo->o_lease->state); 100 } else { 101 const_names = ksmbd_oplock_const_names; 102 count = ARRAY_SIZE(ksmbd_oplock_const_names); 103 level = opinfo->level; 104 } 105 rcu_read_unlock(); 106 ksmbd_proc_show_const_name(m, " %-15s", 107 const_names, count, level); 108 } else { 109 rcu_read_unlock(); 110 seq_printf(m, " %-15s", " "); 111 } 112 113 seq_printf(m, " %#010x %#010x %s\n", 114 le32_to_cpu(fp->daccess), 115 le32_to_cpu(fp->saccess), 116 fp->filp->f_path.dentry->d_name.name); 117 } 118 read_unlock(&global_ft.lock); 119 return 0; 120 } 121 122 static int create_proc_files(void) 123 { 124 ksmbd_proc_create("files", proc_show_files, NULL); 125 return 0; 126 } 127 #else 128 static int create_proc_files(void) { return 0; } 129 #endif 130 131 static bool durable_scavenger_running; 132 static DEFINE_MUTEX(durable_scavenger_lock); 133 static wait_queue_head_t dh_wq; 134 135 void ksmbd_set_fd_limit(unsigned long limit) 136 { 137 limit = min(limit, get_max_files()); 138 atomic_long_set(&fd_limit, limit); 139 } 140 141 static bool fd_limit_depleted(void) 142 { 143 long v = atomic_long_dec_return(&fd_limit); 144 145 if (v >= 0) 146 return false; 147 atomic_long_inc(&fd_limit); 148 return true; 149 } 150 151 static void fd_limit_close(void) 152 { 153 atomic_long_inc(&fd_limit); 154 } 155 156 /* 157 * INODE hash 158 */ 159 160 static unsigned long inode_hash(struct super_block *sb, unsigned long hashval) 161 { 162 unsigned long tmp; 163 164 tmp = (hashval * (unsigned long)sb) ^ (GOLDEN_RATIO_PRIME + hashval) / 165 L1_CACHE_BYTES; 166 tmp = tmp ^ ((tmp ^ GOLDEN_RATIO_PRIME) >> inode_hash_shift); 167 return tmp & inode_hash_mask; 168 } 169 170 static struct ksmbd_inode *__ksmbd_inode_lookup(struct dentry *de) 171 { 172 struct hlist_head *head = inode_hashtable + 173 inode_hash(d_inode(de)->i_sb, (unsigned long)de); 174 struct ksmbd_inode *ci = NULL, *ret_ci = NULL; 175 176 hlist_for_each_entry(ci, head, m_hash) { 177 if (ci->m_de == de) { 178 if (atomic_inc_not_zero(&ci->m_count)) 179 ret_ci = ci; 180 break; 181 } 182 } 183 return ret_ci; 184 } 185 186 static struct ksmbd_inode *ksmbd_inode_lookup(struct ksmbd_file *fp) 187 { 188 return __ksmbd_inode_lookup(fp->filp->f_path.dentry); 189 } 190 191 struct ksmbd_inode *ksmbd_inode_lookup_lock(struct dentry *d) 192 { 193 struct ksmbd_inode *ci; 194 195 read_lock(&inode_hash_lock); 196 ci = __ksmbd_inode_lookup(d); 197 read_unlock(&inode_hash_lock); 198 199 return ci; 200 } 201 202 int ksmbd_query_inode_status(struct dentry *dentry) 203 { 204 struct ksmbd_inode *ci; 205 int ret = KSMBD_INODE_STATUS_UNKNOWN; 206 207 read_lock(&inode_hash_lock); 208 ci = __ksmbd_inode_lookup(dentry); 209 read_unlock(&inode_hash_lock); 210 if (!ci) 211 return ret; 212 213 down_read(&ci->m_lock); 214 if (ci->m_flags & (S_DEL_PENDING | S_DEL_ON_CLS)) 215 ret = KSMBD_INODE_STATUS_PENDING_DELETE; 216 else 217 ret = KSMBD_INODE_STATUS_OK; 218 up_read(&ci->m_lock); 219 220 atomic_dec(&ci->m_count); 221 return ret; 222 } 223 224 bool ksmbd_inode_pending_delete(struct ksmbd_file *fp) 225 { 226 struct ksmbd_inode *ci = fp->f_ci; 227 int ret; 228 229 down_read(&ci->m_lock); 230 ret = (ci->m_flags & (S_DEL_PENDING | S_DEL_ON_CLS)); 231 up_read(&ci->m_lock); 232 233 return ret; 234 } 235 236 void ksmbd_set_inode_pending_delete(struct ksmbd_file *fp) 237 { 238 struct ksmbd_inode *ci = fp->f_ci; 239 240 down_write(&ci->m_lock); 241 ci->m_flags |= S_DEL_PENDING; 242 up_write(&ci->m_lock); 243 } 244 245 void ksmbd_clear_inode_pending_delete(struct ksmbd_file *fp) 246 { 247 struct ksmbd_inode *ci = fp->f_ci; 248 249 down_write(&ci->m_lock); 250 ci->m_flags &= ~S_DEL_PENDING; 251 up_write(&ci->m_lock); 252 } 253 254 void ksmbd_fd_set_delete_on_close(struct ksmbd_file *fp, 255 int file_info) 256 { 257 struct ksmbd_inode *ci = fp->f_ci; 258 259 down_write(&ci->m_lock); 260 if (ksmbd_stream_fd(fp)) 261 ci->m_flags |= S_DEL_ON_CLS_STREAM; 262 else 263 ci->m_flags |= S_DEL_ON_CLS; 264 up_write(&ci->m_lock); 265 } 266 267 static void ksmbd_inode_hash(struct ksmbd_inode *ci) 268 { 269 struct hlist_head *b = inode_hashtable + 270 inode_hash(d_inode(ci->m_de)->i_sb, (unsigned long)ci->m_de); 271 272 hlist_add_head(&ci->m_hash, b); 273 } 274 275 static void ksmbd_inode_unhash(struct ksmbd_inode *ci) 276 { 277 write_lock(&inode_hash_lock); 278 hlist_del_init(&ci->m_hash); 279 write_unlock(&inode_hash_lock); 280 } 281 282 static int ksmbd_inode_init(struct ksmbd_inode *ci, struct ksmbd_file *fp) 283 { 284 atomic_set(&ci->m_count, 1); 285 atomic_set(&ci->op_count, 0); 286 atomic_set(&ci->sop_count, 0); 287 ci->m_flags = 0; 288 ci->m_fattr = 0; 289 INIT_LIST_HEAD(&ci->m_fp_list); 290 INIT_LIST_HEAD(&ci->m_op_list); 291 init_rwsem(&ci->m_lock); 292 ci->m_de = fp->filp->f_path.dentry; 293 return 0; 294 } 295 296 static struct ksmbd_inode *ksmbd_inode_get(struct ksmbd_file *fp) 297 { 298 struct ksmbd_inode *ci, *tmpci; 299 int rc; 300 301 read_lock(&inode_hash_lock); 302 ci = ksmbd_inode_lookup(fp); 303 read_unlock(&inode_hash_lock); 304 if (ci) 305 return ci; 306 307 ci = kmalloc_obj(struct ksmbd_inode, KSMBD_DEFAULT_GFP); 308 if (!ci) 309 return NULL; 310 311 rc = ksmbd_inode_init(ci, fp); 312 if (rc) { 313 pr_err("inode initialized failed\n"); 314 kfree(ci); 315 return NULL; 316 } 317 318 write_lock(&inode_hash_lock); 319 tmpci = ksmbd_inode_lookup(fp); 320 if (!tmpci) { 321 ksmbd_inode_hash(ci); 322 } else { 323 kfree(ci); 324 ci = tmpci; 325 } 326 write_unlock(&inode_hash_lock); 327 return ci; 328 } 329 330 static void ksmbd_inode_free(struct ksmbd_inode *ci) 331 { 332 ksmbd_inode_unhash(ci); 333 kfree(ci); 334 } 335 336 void ksmbd_inode_put(struct ksmbd_inode *ci) 337 { 338 if (atomic_dec_and_test(&ci->m_count)) 339 ksmbd_inode_free(ci); 340 } 341 342 int __init ksmbd_inode_hash_init(void) 343 { 344 unsigned int loop; 345 unsigned long numentries = 16384; 346 unsigned long bucketsize = sizeof(struct hlist_head); 347 unsigned long size; 348 349 inode_hash_shift = ilog2(numentries); 350 inode_hash_mask = (1 << inode_hash_shift) - 1; 351 352 size = bucketsize << inode_hash_shift; 353 354 /* init master fp hash table */ 355 inode_hashtable = vmalloc(size); 356 if (!inode_hashtable) 357 return -ENOMEM; 358 359 for (loop = 0; loop < (1U << inode_hash_shift); loop++) 360 INIT_HLIST_HEAD(&inode_hashtable[loop]); 361 return 0; 362 } 363 364 void ksmbd_release_inode_hash(void) 365 { 366 vfree(inode_hashtable); 367 } 368 369 static void __ksmbd_inode_close(struct ksmbd_file *fp) 370 { 371 struct ksmbd_inode *ci = fp->f_ci; 372 int err; 373 struct file *filp; 374 375 filp = fp->filp; 376 377 if (ksmbd_stream_fd(fp)) { 378 bool remove_stream_xattr = false; 379 380 down_write(&ci->m_lock); 381 if (ci->m_flags & S_DEL_ON_CLS_STREAM) { 382 ci->m_flags &= ~S_DEL_ON_CLS_STREAM; 383 remove_stream_xattr = true; 384 } 385 up_write(&ci->m_lock); 386 387 if (remove_stream_xattr) { 388 err = ksmbd_vfs_remove_xattr(file_mnt_idmap(filp), 389 &filp->f_path, 390 fp->stream.name, 391 true); 392 if (err) 393 pr_err("remove xattr failed : %s\n", 394 fp->stream.name); 395 } 396 } 397 398 if (atomic_dec_and_test(&ci->m_count)) { 399 bool do_unlink = false; 400 401 down_write(&ci->m_lock); 402 if (ci->m_flags & (S_DEL_ON_CLS | S_DEL_PENDING)) { 403 ci->m_flags &= ~(S_DEL_ON_CLS | S_DEL_PENDING); 404 do_unlink = true; 405 } 406 up_write(&ci->m_lock); 407 408 if (do_unlink) 409 ksmbd_vfs_unlink(filp); 410 411 ksmbd_inode_free(ci); 412 } 413 } 414 415 static void __ksmbd_remove_durable_fd(struct ksmbd_file *fp) 416 { 417 if (!has_file_id(fp->persistent_id)) 418 return; 419 420 idr_remove(global_ft.idr, fp->persistent_id); 421 /* 422 * Clear persistent_id so a later __ksmbd_close_fd() that runs from a 423 * delayed putter (e.g. when a concurrent ksmbd_lookup_fd_inode() 424 * walker held the final reference) does not re-issue idr_remove() on 425 * an id that idr_alloc_cyclic() may have already handed out to a new 426 * durable handle. 427 */ 428 fp->persistent_id = KSMBD_NO_FID; 429 } 430 431 static void ksmbd_remove_durable_fd(struct ksmbd_file *fp) 432 { 433 write_lock(&global_ft.lock); 434 __ksmbd_remove_durable_fd(fp); 435 write_unlock(&global_ft.lock); 436 if (waitqueue_active(&dh_wq)) 437 wake_up(&dh_wq); 438 } 439 440 static void __ksmbd_remove_fd(struct ksmbd_file_table *ft, struct ksmbd_file *fp) 441 { 442 down_write(&fp->f_ci->m_lock); 443 list_del_init(&fp->node); 444 up_write(&fp->f_ci->m_lock); 445 446 if (!has_file_id(fp->volatile_id)) 447 return; 448 449 write_lock(&ft->lock); 450 idr_remove(ft->idr, fp->volatile_id); 451 write_unlock(&ft->lock); 452 } 453 454 static void __ksmbd_close_fd(struct ksmbd_file_table *ft, struct ksmbd_file *fp) 455 { 456 struct file *filp; 457 struct ksmbd_lock *smb_lock, *tmp_lock; 458 459 fd_limit_close(); 460 ksmbd_remove_durable_fd(fp); 461 if (ft) 462 __ksmbd_remove_fd(ft, fp); 463 464 close_id_del_oplock(fp); 465 filp = fp->filp; 466 467 __ksmbd_inode_close(fp); 468 if (!IS_ERR_OR_NULL(filp)) 469 fput(filp); 470 471 /* because the reference count of fp is 0, it is guaranteed that 472 * there are not accesses to fp->lock_list. 473 */ 474 list_for_each_entry_safe(smb_lock, tmp_lock, &fp->lock_list, flist) { 475 if (!list_empty(&smb_lock->clist) && fp->conn) { 476 spin_lock(&fp->conn->llist_lock); 477 list_del(&smb_lock->clist); 478 spin_unlock(&fp->conn->llist_lock); 479 } 480 481 list_del(&smb_lock->flist); 482 locks_free_lock(smb_lock->fl); 483 kfree(smb_lock); 484 } 485 486 /* 487 * Drop fp's strong reference on conn (taken in ksmbd_open_fd() / 488 * ksmbd_reopen_durable_fd()). Durable fps that reached the 489 * scavenger have already had fp->conn cleared by session_fd_check(), 490 * in which case there is nothing to drop here. 491 */ 492 if (fp->conn) { 493 ksmbd_conn_put(fp->conn); 494 fp->conn = NULL; 495 } 496 497 if (ksmbd_stream_fd(fp)) 498 kfree(fp->stream.name); 499 kfree(fp->owner.name); 500 501 kmem_cache_free(filp_cache, fp); 502 } 503 504 static struct ksmbd_file *ksmbd_fp_get(struct ksmbd_file *fp) 505 { 506 if (fp->f_state != FP_INITED) 507 return NULL; 508 509 if (!atomic_inc_not_zero(&fp->refcount)) 510 return NULL; 511 return fp; 512 } 513 514 static struct ksmbd_file *__ksmbd_lookup_fd(struct ksmbd_file_table *ft, 515 u64 id) 516 { 517 struct ksmbd_file *fp; 518 519 if (!has_file_id(id)) 520 return NULL; 521 522 read_lock(&ft->lock); 523 fp = idr_find(ft->idr, id); 524 if (fp) 525 fp = ksmbd_fp_get(fp); 526 read_unlock(&ft->lock); 527 return fp; 528 } 529 530 static void __put_fd_final(struct ksmbd_work *work, struct ksmbd_file *fp) 531 { 532 /* 533 * Detached durable fp -- session_fd_check() cleared fp->conn at 534 * preserve, so this fp is no longer tracked by any conn's 535 * stats.open_files_count. This happens when 536 * ksmbd_scavenger_dispose_dh() hands the final close off to an 537 * m_fp_list walker (e.g. ksmbd_lookup_fd_inode()) whose work->conn 538 * is unrelated to the conn that originally opened the handle; close 539 * via the NULL-ft path so we do not underflow that unrelated 540 * counter. 541 */ 542 if (!fp->conn) { 543 __ksmbd_close_fd(NULL, fp); 544 return; 545 } 546 __ksmbd_close_fd(&work->sess->file_table, fp); 547 atomic_dec(&work->conn->stats.open_files_count); 548 } 549 550 static void set_close_state_blocked_works(struct ksmbd_file *fp) 551 { 552 struct ksmbd_work *cancel_work; 553 554 spin_lock(&fp->f_lock); 555 list_for_each_entry(cancel_work, &fp->blocked_works, 556 fp_entry) { 557 cancel_work->state = KSMBD_WORK_CLOSED; 558 cancel_work->cancel_fn(cancel_work->cancel_argv); 559 } 560 spin_unlock(&fp->f_lock); 561 } 562 563 int ksmbd_close_fd(struct ksmbd_work *work, u64 id) 564 { 565 struct ksmbd_file *fp; 566 struct ksmbd_file_table *ft; 567 568 if (!has_file_id(id)) 569 return 0; 570 571 ft = &work->sess->file_table; 572 write_lock(&ft->lock); 573 fp = idr_find(ft->idr, id); 574 if (fp) { 575 set_close_state_blocked_works(fp); 576 577 if (fp->f_state != FP_INITED) 578 fp = NULL; 579 else { 580 fp->f_state = FP_CLOSED; 581 if (!atomic_dec_and_test(&fp->refcount)) 582 fp = NULL; 583 } 584 } 585 write_unlock(&ft->lock); 586 587 if (!fp) 588 return -EINVAL; 589 590 __put_fd_final(work, fp); 591 return 0; 592 } 593 594 void ksmbd_fd_put(struct ksmbd_work *work, struct ksmbd_file *fp) 595 { 596 if (!fp) 597 return; 598 599 if (!atomic_dec_and_test(&fp->refcount)) 600 return; 601 __put_fd_final(work, fp); 602 } 603 604 static bool __sanity_check(struct ksmbd_tree_connect *tcon, struct ksmbd_file *fp) 605 { 606 if (!fp) 607 return false; 608 if (fp->tcon != tcon) 609 return false; 610 return true; 611 } 612 613 struct ksmbd_file *ksmbd_lookup_foreign_fd(struct ksmbd_work *work, u64 id) 614 { 615 return __ksmbd_lookup_fd(&work->sess->file_table, id); 616 } 617 618 struct ksmbd_file *ksmbd_lookup_fd_fast(struct ksmbd_work *work, u64 id) 619 { 620 struct ksmbd_file *fp = __ksmbd_lookup_fd(&work->sess->file_table, id); 621 622 if (__sanity_check(work->tcon, fp)) 623 return fp; 624 625 ksmbd_fd_put(work, fp); 626 return NULL; 627 } 628 629 struct ksmbd_file *ksmbd_lookup_fd_slow(struct ksmbd_work *work, u64 id, 630 u64 pid) 631 { 632 struct ksmbd_file *fp; 633 634 if (!has_file_id(id)) { 635 id = work->compound_fid; 636 pid = work->compound_pfid; 637 } 638 639 fp = __ksmbd_lookup_fd(&work->sess->file_table, id); 640 if (!__sanity_check(work->tcon, fp)) { 641 ksmbd_fd_put(work, fp); 642 return NULL; 643 } 644 if (fp->persistent_id != pid) { 645 ksmbd_fd_put(work, fp); 646 return NULL; 647 } 648 return fp; 649 } 650 651 struct ksmbd_file *ksmbd_lookup_global_fd(unsigned long long id) 652 { 653 return __ksmbd_lookup_fd(&global_ft, id); 654 } 655 656 struct ksmbd_file *ksmbd_lookup_durable_fd(unsigned long long id) 657 { 658 struct ksmbd_file *fp; 659 660 fp = __ksmbd_lookup_fd(&global_ft, id); 661 if (fp && (fp->conn || 662 (fp->durable_scavenger_timeout && 663 (fp->durable_scavenger_timeout < 664 jiffies_to_msecs(jiffies))))) { 665 ksmbd_put_durable_fd(fp); 666 fp = NULL; 667 } 668 669 return fp; 670 } 671 672 void ksmbd_put_durable_fd(struct ksmbd_file *fp) 673 { 674 if (!atomic_dec_and_test(&fp->refcount)) 675 return; 676 677 __ksmbd_close_fd(NULL, fp); 678 } 679 680 struct ksmbd_file *ksmbd_lookup_fd_cguid(char *cguid) 681 { 682 struct ksmbd_file *fp = NULL; 683 unsigned int id; 684 685 read_lock(&global_ft.lock); 686 idr_for_each_entry(global_ft.idr, fp, id) { 687 if (!memcmp(fp->create_guid, 688 cguid, 689 SMB2_CREATE_GUID_SIZE)) { 690 fp = ksmbd_fp_get(fp); 691 break; 692 } 693 } 694 read_unlock(&global_ft.lock); 695 696 return fp; 697 } 698 699 struct ksmbd_file *ksmbd_lookup_fd_inode(struct dentry *dentry) 700 { 701 struct ksmbd_file *lfp; 702 struct ksmbd_inode *ci; 703 struct inode *inode = d_inode(dentry); 704 705 read_lock(&inode_hash_lock); 706 ci = __ksmbd_inode_lookup(dentry); 707 read_unlock(&inode_hash_lock); 708 if (!ci) 709 return NULL; 710 711 down_read(&ci->m_lock); 712 list_for_each_entry(lfp, &ci->m_fp_list, node) { 713 if (inode == file_inode(lfp->filp)) { 714 atomic_dec(&ci->m_count); 715 lfp = ksmbd_fp_get(lfp); 716 up_read(&ci->m_lock); 717 return lfp; 718 } 719 } 720 atomic_dec(&ci->m_count); 721 up_read(&ci->m_lock); 722 return NULL; 723 } 724 725 #define OPEN_ID_TYPE_VOLATILE_ID (0) 726 #define OPEN_ID_TYPE_PERSISTENT_ID (1) 727 728 static void __open_id_set(struct ksmbd_file *fp, u64 id, int type) 729 { 730 if (type == OPEN_ID_TYPE_VOLATILE_ID) 731 fp->volatile_id = id; 732 if (type == OPEN_ID_TYPE_PERSISTENT_ID) 733 fp->persistent_id = id; 734 } 735 736 static int __open_id(struct ksmbd_file_table *ft, struct ksmbd_file *fp, 737 int type) 738 { 739 u64 id = 0; 740 int ret; 741 742 if (type == OPEN_ID_TYPE_VOLATILE_ID && fd_limit_depleted()) { 743 __open_id_set(fp, KSMBD_NO_FID, type); 744 return -EMFILE; 745 } 746 747 idr_preload(KSMBD_DEFAULT_GFP); 748 write_lock(&ft->lock); 749 ret = idr_alloc_cyclic(ft->idr, fp, 0, INT_MAX - 1, GFP_NOWAIT); 750 if (ret >= 0) { 751 id = ret; 752 ret = 0; 753 } else { 754 id = KSMBD_NO_FID; 755 fd_limit_close(); 756 } 757 758 __open_id_set(fp, id, type); 759 write_unlock(&ft->lock); 760 idr_preload_end(); 761 return ret; 762 } 763 764 unsigned int ksmbd_open_durable_fd(struct ksmbd_file *fp) 765 { 766 __open_id(&global_ft, fp, OPEN_ID_TYPE_PERSISTENT_ID); 767 return fp->persistent_id; 768 } 769 770 struct ksmbd_file *ksmbd_open_fd(struct ksmbd_work *work, struct file *filp) 771 { 772 struct ksmbd_file *fp; 773 int ret; 774 775 fp = kmem_cache_zalloc(filp_cache, KSMBD_DEFAULT_GFP); 776 if (!fp) { 777 pr_err("Failed to allocate memory\n"); 778 return ERR_PTR(-ENOMEM); 779 } 780 781 INIT_LIST_HEAD(&fp->blocked_works); 782 INIT_LIST_HEAD(&fp->node); 783 INIT_LIST_HEAD(&fp->lock_list); 784 spin_lock_init(&fp->f_lock); 785 atomic_set(&fp->refcount, 1); 786 787 fp->filp = filp; 788 /* 789 * fp owns a strong reference on fp->conn for as long as fp->conn is 790 * non-NULL, so session_fd_check() and __ksmbd_close_fd() never 791 * dereference a dangling pointer. Paired with ksmbd_conn_put() in 792 * session_fd_check() (durable preserve), in __ksmbd_close_fd() 793 * (final close), and on the error paths below. 794 */ 795 fp->conn = ksmbd_conn_get(work->conn); 796 fp->tcon = work->tcon; 797 fp->volatile_id = KSMBD_NO_FID; 798 fp->persistent_id = KSMBD_NO_FID; 799 fp->f_state = FP_NEW; 800 fp->f_ci = ksmbd_inode_get(fp); 801 802 if (!fp->f_ci) { 803 ret = -ENOMEM; 804 goto err_out; 805 } 806 807 ret = __open_id(&work->sess->file_table, fp, OPEN_ID_TYPE_VOLATILE_ID); 808 if (ret) { 809 ksmbd_inode_put(fp->f_ci); 810 goto err_out; 811 } 812 813 atomic_inc(&work->conn->stats.open_files_count); 814 return fp; 815 816 err_out: 817 /* fp->conn was set and refcounted before every branch here. */ 818 ksmbd_conn_put(fp->conn); 819 kmem_cache_free(filp_cache, fp); 820 return ERR_PTR(ret); 821 } 822 823 /** 824 * ksmbd_update_fstate() - update an fp state under the file-table lock 825 * @ft: file table that publishes @fp's volatile id 826 * @fp: file pointer to update 827 * @state: new state 828 * 829 * Return: 0 on success. The FP_NEW -> FP_INITED transition is special: 830 * -ENOENT if teardown already unpublished @fp by advancing the state or 831 * clearing the volatile id. Other state updates preserve the historical 832 * fire-and-forget behavior. 833 */ 834 int ksmbd_update_fstate(struct ksmbd_file_table *ft, struct ksmbd_file *fp, 835 unsigned int state) 836 { 837 int ret; 838 839 if (!fp) 840 return -ENOENT; 841 842 write_lock(&ft->lock); 843 if (state == FP_INITED && 844 (fp->f_state != FP_NEW || !has_file_id(fp->volatile_id))) { 845 ret = -ENOENT; 846 } else { 847 fp->f_state = state; 848 ret = 0; 849 } 850 write_unlock(&ft->lock); 851 852 return ret; 853 } 854 855 /* 856 * ksmbd_mark_fp_closed() - mark fp closed under ft->lock and return how many 857 * refs the teardown path owns. 858 * 859 * FP_INITED has a normal idr-owned reference, so teardown owns both that 860 * reference and the transient lookup reference. FP_NEW is still owned by the 861 * in-flight opener/reopener, which will drop the original reference after 862 * ksmbd_update_fstate(..., FP_INITED) observes the cleared volatile id. 863 * FP_CLOSED on entry means an earlier ksmbd_close_fd() already consumed the 864 * idr-owned ref. 865 */ 866 static int ksmbd_mark_fp_closed(struct ksmbd_file *fp) 867 { 868 if (fp->f_state == FP_INITED) { 869 set_close_state_blocked_works(fp); 870 fp->f_state = FP_CLOSED; 871 return 2; 872 } 873 874 return 1; 875 } 876 877 static int 878 __close_file_table_ids(struct ksmbd_session *sess, 879 struct ksmbd_tree_connect *tcon, 880 bool (*skip)(struct ksmbd_tree_connect *tcon, 881 struct ksmbd_file *fp, 882 struct ksmbd_user *user), 883 bool skip_preserves_fp) 884 { 885 struct ksmbd_file_table *ft = &sess->file_table; 886 struct ksmbd_file *fp; 887 unsigned int id = 0; 888 int num = 0; 889 890 while (1) { 891 int n_to_drop; 892 893 write_lock(&ft->lock); 894 fp = idr_get_next(ft->idr, &id); 895 if (!fp) { 896 write_unlock(&ft->lock); 897 break; 898 } 899 if (!atomic_inc_not_zero(&fp->refcount)) { 900 id++; 901 write_unlock(&ft->lock); 902 continue; 903 } 904 905 if (skip_preserves_fp) { 906 /* 907 * Session teardown: skip() is session_fd_check(), 908 * which may sleep and mutates fp->conn / fp->tcon / 909 * fp->volatile_id when it chooses to preserve fp 910 * for durable reconnect. Unpublish fp from the 911 * session idr here, under ft->lock, so that 912 * __ksmbd_lookup_fd() through this session cannot 913 * grant a new ksmbd_fp_get() reference to an fp 914 * whose fields are about to be rewritten outside 915 * the lock. Durable reconnect still reaches fp via 916 * global_ft. 917 */ 918 idr_remove(ft->idr, id); 919 fp->volatile_id = KSMBD_NO_FID; 920 write_unlock(&ft->lock); 921 922 if (skip(tcon, fp, sess->user)) { 923 /* 924 * session_fd_check() has converted fp to 925 * durable-preserve state and cleared its 926 * per-conn fields. fp is already unpublished 927 * above; the original idr-owned ref keeps it 928 * alive for the durable scavenger. Drop only 929 * the transient ref. atomic_dec() is safe -- 930 * atomic_inc_not_zero() succeeded on a 931 * positive value and we added one more, so 932 * refcount cannot be zero here. 933 */ 934 atomic_dec(&fp->refcount); 935 id++; 936 continue; 937 } 938 939 /* 940 * Keep the close-state decision under the same lock 941 * observed by ksmbd_update_fstate(), which is how an 942 * in-flight FP_NEW opener learns that teardown has 943 * cleared its volatile id. 944 */ 945 write_lock(&ft->lock); 946 n_to_drop = ksmbd_mark_fp_closed(fp); 947 write_unlock(&ft->lock); 948 } else { 949 /* 950 * Tree teardown: skip() is tree_conn_fd_check(), a 951 * cheap pointer compare that doesn't sleep and has 952 * no side effects, so keep the skip decision plus 953 * the unpublish-and-mark-closed sequence atomic 954 * under ft->lock. fps belonging to other tree 955 * connects (skip() == true) stay fully published in 956 * the session idr with no lock window. 957 */ 958 if (skip(tcon, fp, sess->user)) { 959 atomic_dec(&fp->refcount); 960 write_unlock(&ft->lock); 961 id++; 962 continue; 963 } 964 idr_remove(ft->idr, id); 965 fp->volatile_id = KSMBD_NO_FID; 966 n_to_drop = ksmbd_mark_fp_closed(fp); 967 write_unlock(&ft->lock); 968 } 969 970 /* 971 * fp->volatile_id is already cleared to prevent stale idr 972 * removal from a deferred final close. Remove fp from 973 * m_fp_list here because __ksmbd_remove_fd() will skip the 974 * list unlink when volatile_id is KSMBD_NO_FID. 975 */ 976 down_write(&fp->f_ci->m_lock); 977 list_del_init(&fp->node); 978 up_write(&fp->f_ci->m_lock); 979 980 /* 981 * Drop the references this iteration owns: 982 * 983 * n_to_drop == 2: we observed FP_INITED and committed 984 * the FP_CLOSED transition ourselves, so we own the 985 * transient (+1) and the still-intact idr-owned ref. 986 * 987 * n_to_drop == 1: either a prior ksmbd_close_fd() 988 * already consumed the idr-owned ref, or fp was still 989 * FP_NEW and the in-flight opener/reopener must keep 990 * the original reference until ksmbd_update_fstate() 991 * observes the cleared volatile id. 992 * 993 * If we end up as the final putter, finalize fp and 994 * account the open_files_count decrement via the caller's 995 * atomic_sub(num, ...). Otherwise the remaining user's 996 * ksmbd_fd_put() reaches __put_fd_final(), which does its 997 * own atomic_dec(&open_files_count), so we must not count 998 * this fp here -- doing so would double-decrement the 999 * connection-wide counter. 1000 */ 1001 if (atomic_sub_and_test(n_to_drop, &fp->refcount)) { 1002 __ksmbd_close_fd(NULL, fp); 1003 num++; 1004 } 1005 id++; 1006 } 1007 1008 return num; 1009 } 1010 1011 static inline bool is_reconnectable(struct ksmbd_file *fp) 1012 { 1013 struct oplock_info *opinfo = opinfo_get(fp); 1014 bool reconn = false; 1015 1016 if (!opinfo) 1017 return false; 1018 1019 if (opinfo->op_state != OPLOCK_STATE_NONE) { 1020 opinfo_put(opinfo); 1021 return false; 1022 } 1023 1024 if (fp->is_resilient || fp->is_persistent) 1025 reconn = true; 1026 else if (fp->is_durable && opinfo->is_lease && 1027 opinfo->o_lease->state & SMB2_LEASE_HANDLE_CACHING_LE) 1028 reconn = true; 1029 1030 else if (fp->is_durable && opinfo->level == SMB2_OPLOCK_LEVEL_BATCH) 1031 reconn = true; 1032 1033 opinfo_put(opinfo); 1034 return reconn; 1035 } 1036 1037 static bool tree_conn_fd_check(struct ksmbd_tree_connect *tcon, 1038 struct ksmbd_file *fp, 1039 struct ksmbd_user *user) 1040 { 1041 return fp->tcon != tcon; 1042 } 1043 1044 static bool ksmbd_durable_scavenger_alive(void) 1045 { 1046 if (!durable_scavenger_running) 1047 return false; 1048 1049 if (kthread_should_stop()) 1050 return false; 1051 1052 if (idr_is_empty(global_ft.idr)) 1053 return false; 1054 1055 return true; 1056 } 1057 1058 static void ksmbd_scavenger_dispose_dh(struct ksmbd_file *fp) 1059 { 1060 /* 1061 * Durable-preserved fp can remain linked on f_ci->m_fp_list for 1062 * share-mode checks. Unlink it before final close; fp->node is not 1063 * available as a scavenger-private list node because re-adding it to 1064 * another list corrupts m_fp_list. 1065 */ 1066 down_write(&fp->f_ci->m_lock); 1067 list_del_init(&fp->node); 1068 up_write(&fp->f_ci->m_lock); 1069 1070 /* 1071 * Drop both the durable lifetime reference and the transient reference 1072 * taken by the scavenger under global_ft.lock. If a concurrent 1073 * ksmbd_lookup_fd_inode() (or any other m_fp_list walker) snatched fp 1074 * before the unlink above, that holder owns the final close via 1075 * ksmbd_fd_put() -> __ksmbd_close_fd(). Otherwise the scavenger is 1076 * the last putter and finalises fp here. 1077 */ 1078 if (atomic_sub_and_test(2, &fp->refcount)) 1079 __ksmbd_close_fd(NULL, fp); 1080 } 1081 1082 static int ksmbd_durable_scavenger(void *dummy) 1083 { 1084 struct ksmbd_file *fp = NULL; 1085 struct ksmbd_file *expired_fp; 1086 unsigned int id; 1087 unsigned int min_timeout = 1; 1088 bool found_fp_timeout; 1089 unsigned long remaining_jiffies; 1090 1091 __module_get(THIS_MODULE); 1092 1093 set_freezable(); 1094 while (ksmbd_durable_scavenger_alive()) { 1095 if (try_to_freeze()) 1096 continue; 1097 1098 remaining_jiffies = wait_event_timeout(dh_wq, 1099 ksmbd_durable_scavenger_alive() == false, 1100 __msecs_to_jiffies(min_timeout)); 1101 if (remaining_jiffies) 1102 min_timeout = jiffies_to_msecs(remaining_jiffies); 1103 else 1104 min_timeout = DURABLE_HANDLE_MAX_TIMEOUT; 1105 1106 do { 1107 expired_fp = NULL; 1108 found_fp_timeout = false; 1109 1110 write_lock(&global_ft.lock); 1111 idr_for_each_entry(global_ft.idr, fp, id) { 1112 unsigned long durable_timeout; 1113 1114 if (!fp->durable_timeout) 1115 continue; 1116 1117 if (atomic_read(&fp->refcount) > 1 || 1118 fp->conn) 1119 continue; 1120 1121 found_fp_timeout = true; 1122 if (fp->durable_scavenger_timeout <= 1123 jiffies_to_msecs(jiffies)) { 1124 __ksmbd_remove_durable_fd(fp); 1125 /* 1126 * Take a transient reference so fp 1127 * cannot be freed by an in-flight 1128 * ksmbd_lookup_fd_inode() that found 1129 * it through f_ci->m_fp_list while we 1130 * drop global_ft.lock and reach the 1131 * m_fp_list unlink in 1132 * ksmbd_scavenger_dispose_dh(). 1133 */ 1134 atomic_inc(&fp->refcount); 1135 expired_fp = fp; 1136 break; 1137 } 1138 1139 durable_timeout = 1140 fp->durable_scavenger_timeout - 1141 jiffies_to_msecs(jiffies); 1142 1143 if (min_timeout > durable_timeout) 1144 min_timeout = durable_timeout; 1145 } 1146 write_unlock(&global_ft.lock); 1147 1148 if (expired_fp) 1149 ksmbd_scavenger_dispose_dh(expired_fp); 1150 } while (expired_fp); 1151 1152 if (found_fp_timeout == false) 1153 break; 1154 } 1155 1156 durable_scavenger_running = false; 1157 1158 module_put(THIS_MODULE); 1159 1160 return 0; 1161 } 1162 1163 void ksmbd_launch_ksmbd_durable_scavenger(void) 1164 { 1165 if (!(server_conf.flags & KSMBD_GLOBAL_FLAG_DURABLE_HANDLE)) 1166 return; 1167 1168 mutex_lock(&durable_scavenger_lock); 1169 if (durable_scavenger_running == true) { 1170 mutex_unlock(&durable_scavenger_lock); 1171 return; 1172 } 1173 1174 durable_scavenger_running = true; 1175 1176 server_conf.dh_task = kthread_run(ksmbd_durable_scavenger, 1177 (void *)NULL, "ksmbd-durable-scavenger"); 1178 if (IS_ERR(server_conf.dh_task)) 1179 pr_err("cannot start conn thread, err : %ld\n", 1180 PTR_ERR(server_conf.dh_task)); 1181 mutex_unlock(&durable_scavenger_lock); 1182 } 1183 1184 void ksmbd_stop_durable_scavenger(void) 1185 { 1186 if (!(server_conf.flags & KSMBD_GLOBAL_FLAG_DURABLE_HANDLE)) 1187 return; 1188 1189 mutex_lock(&durable_scavenger_lock); 1190 if (!durable_scavenger_running) { 1191 mutex_unlock(&durable_scavenger_lock); 1192 return; 1193 } 1194 1195 durable_scavenger_running = false; 1196 if (waitqueue_active(&dh_wq)) 1197 wake_up(&dh_wq); 1198 mutex_unlock(&durable_scavenger_lock); 1199 kthread_stop(server_conf.dh_task); 1200 } 1201 1202 /* 1203 * ksmbd_vfs_copy_durable_owner - Copy owner info for durable reconnect 1204 * @fp: ksmbd file pointer to store owner info 1205 * @user: user pointer to copy from 1206 * 1207 * This function binds the current user's identity to the file handle 1208 * to satisfy MS-SMB2 Step 8 (SecurityContext matching) during reconnect. 1209 * 1210 * Return: 0 on success, or negative error code on failure 1211 */ 1212 static int ksmbd_vfs_copy_durable_owner(struct ksmbd_file *fp, 1213 struct ksmbd_user *user) 1214 { 1215 if (!user) 1216 return -EINVAL; 1217 1218 /* Duplicate the user name to ensure identity persistence */ 1219 fp->owner.name = kstrdup(user->name, GFP_KERNEL); 1220 if (!fp->owner.name) 1221 return -ENOMEM; 1222 1223 fp->owner.uid = user->uid; 1224 fp->owner.gid = user->gid; 1225 1226 return 0; 1227 } 1228 1229 /** 1230 * ksmbd_vfs_compare_durable_owner - Verify if the requester is original owner 1231 * @fp: existing ksmbd file pointer 1232 * @user: user pointer of the reconnect requester 1233 * 1234 * Compares the UID, GID, and name of the current requester against the 1235 * original owner stored in the file handle. 1236 * 1237 * Return: true if the user matches, false otherwise 1238 */ 1239 bool ksmbd_vfs_compare_durable_owner(struct ksmbd_file *fp, 1240 struct ksmbd_user *user) 1241 { 1242 if (!user || !fp->owner.name) 1243 return false; 1244 1245 /* Check if the UID and GID match first (fast path) */ 1246 if (fp->owner.uid != user->uid || fp->owner.gid != user->gid) 1247 return false; 1248 1249 /* Validate the account name to ensure the same SecurityContext */ 1250 if (strcmp(fp->owner.name, user->name)) 1251 return false; 1252 1253 return true; 1254 } 1255 1256 static bool session_fd_check(struct ksmbd_tree_connect *tcon, 1257 struct ksmbd_file *fp, struct ksmbd_user *user) 1258 { 1259 struct ksmbd_inode *ci; 1260 struct oplock_info *op; 1261 struct ksmbd_conn *conn; 1262 struct ksmbd_lock *smb_lock, *tmp_lock; 1263 1264 if (!is_reconnectable(fp)) 1265 return false; 1266 1267 if (fp->f_state != FP_INITED) 1268 return false; 1269 1270 if (WARN_ON_ONCE(!fp->conn)) 1271 return false; 1272 1273 if (ksmbd_vfs_copy_durable_owner(fp, user)) 1274 return false; 1275 1276 /* 1277 * fp owns a strong reference on fp->conn (taken in ksmbd_open_fd() 1278 * / ksmbd_reopen_durable_fd()), so conn stays valid for the whole 1279 * body of this function regardless of any op->conn puts below. 1280 */ 1281 conn = fp->conn; 1282 ci = fp->f_ci; 1283 down_write(&ci->m_lock); 1284 list_for_each_entry_rcu(op, &ci->m_op_list, op_entry) { 1285 if (op->conn != conn) 1286 continue; 1287 ksmbd_conn_put(op->conn); 1288 op->conn = NULL; 1289 } 1290 up_write(&ci->m_lock); 1291 1292 list_for_each_entry_safe(smb_lock, tmp_lock, &fp->lock_list, flist) { 1293 spin_lock(&conn->llist_lock); 1294 list_del_init(&smb_lock->clist); 1295 spin_unlock(&conn->llist_lock); 1296 } 1297 1298 fp->conn = NULL; 1299 fp->tcon = NULL; 1300 fp->volatile_id = KSMBD_NO_FID; 1301 1302 if (fp->durable_timeout) 1303 fp->durable_scavenger_timeout = 1304 jiffies_to_msecs(jiffies) + fp->durable_timeout; 1305 1306 /* Drop fp's own reference on conn. */ 1307 ksmbd_conn_put(conn); 1308 return true; 1309 } 1310 1311 void ksmbd_close_tree_conn_fds(struct ksmbd_work *work) 1312 { 1313 int num = __close_file_table_ids(work->sess, 1314 work->tcon, 1315 tree_conn_fd_check, 1316 false); 1317 1318 atomic_sub(num, &work->conn->stats.open_files_count); 1319 } 1320 1321 void ksmbd_close_session_fds(struct ksmbd_work *work) 1322 { 1323 int num = __close_file_table_ids(work->sess, 1324 work->tcon, 1325 session_fd_check, 1326 true); 1327 1328 atomic_sub(num, &work->conn->stats.open_files_count); 1329 } 1330 1331 int ksmbd_init_global_file_table(void) 1332 { 1333 create_proc_files(); 1334 return ksmbd_init_file_table(&global_ft); 1335 } 1336 1337 void ksmbd_free_global_file_table(void) 1338 { 1339 struct ksmbd_file *fp = NULL; 1340 unsigned int id; 1341 1342 idr_for_each_entry(global_ft.idr, fp, id) { 1343 ksmbd_remove_durable_fd(fp); 1344 __ksmbd_close_fd(NULL, fp); 1345 } 1346 1347 idr_destroy(global_ft.idr); 1348 kfree(global_ft.idr); 1349 } 1350 1351 int ksmbd_validate_name_reconnect(struct ksmbd_share_config *share, 1352 struct ksmbd_file *fp, char *name) 1353 { 1354 char *pathname, *ab_pathname; 1355 int ret = 0; 1356 1357 pathname = kmalloc(PATH_MAX, KSMBD_DEFAULT_GFP); 1358 if (!pathname) 1359 return -EACCES; 1360 1361 ab_pathname = d_path(&fp->filp->f_path, pathname, PATH_MAX); 1362 if (IS_ERR(ab_pathname)) { 1363 kfree(pathname); 1364 return -EACCES; 1365 } 1366 1367 if (name && strcmp(&ab_pathname[share->path_sz + 1], name)) { 1368 ksmbd_debug(SMB, "invalid name reconnect %s\n", name); 1369 ret = -EINVAL; 1370 } 1371 1372 kfree(pathname); 1373 1374 return ret; 1375 } 1376 1377 int ksmbd_reopen_durable_fd(struct ksmbd_work *work, struct ksmbd_file *fp) 1378 { 1379 struct ksmbd_inode *ci; 1380 struct oplock_info *op; 1381 struct ksmbd_conn *conn = work->conn; 1382 struct ksmbd_lock *smb_lock; 1383 unsigned int old_f_state; 1384 1385 if (!fp->is_durable || fp->conn || fp->tcon) { 1386 pr_err("Invalid durable fd [%p:%p]\n", fp->conn, fp->tcon); 1387 return -EBADF; 1388 } 1389 1390 if (has_file_id(fp->volatile_id)) { 1391 pr_err("Still in use durable fd: %llu\n", fp->volatile_id); 1392 return -EBADF; 1393 } 1394 1395 old_f_state = fp->f_state; 1396 fp->f_state = FP_NEW; 1397 1398 /* 1399 * Initialize fp's connection binding before publishing fp into the 1400 * session's file table. If __open_id() is ordered first, a 1401 * concurrent teardown that iterates the table can observe a valid 1402 * volatile_id with fp->conn == NULL and preserve a 1403 * partially-initialized fp. fp owns a strong reference on the new 1404 * conn (see ksmbd_open_fd()); undo it on __open_id() failure. 1405 */ 1406 fp->conn = ksmbd_conn_get(conn); 1407 fp->tcon = work->tcon; 1408 1409 __open_id(&work->sess->file_table, fp, OPEN_ID_TYPE_VOLATILE_ID); 1410 if (!has_file_id(fp->volatile_id)) { 1411 fp->conn = NULL; 1412 fp->tcon = NULL; 1413 ksmbd_conn_put(conn); 1414 fp->f_state = old_f_state; 1415 return -EBADF; 1416 } 1417 1418 list_for_each_entry(smb_lock, &fp->lock_list, flist) { 1419 spin_lock(&conn->llist_lock); 1420 list_add_tail(&smb_lock->clist, &conn->lock_list); 1421 spin_unlock(&conn->llist_lock); 1422 } 1423 1424 ci = fp->f_ci; 1425 down_write(&ci->m_lock); 1426 list_for_each_entry_rcu(op, &ci->m_op_list, op_entry) { 1427 if (op->conn) 1428 continue; 1429 op->conn = ksmbd_conn_get(fp->conn); 1430 } 1431 up_write(&ci->m_lock); 1432 1433 fp->owner.uid = fp->owner.gid = 0; 1434 kfree(fp->owner.name); 1435 fp->owner.name = NULL; 1436 1437 return 0; 1438 } 1439 1440 int ksmbd_init_file_table(struct ksmbd_file_table *ft) 1441 { 1442 ft->idr = kzalloc_obj(struct idr, KSMBD_DEFAULT_GFP); 1443 if (!ft->idr) 1444 return -ENOMEM; 1445 1446 idr_init(ft->idr); 1447 rwlock_init(&ft->lock); 1448 return 0; 1449 } 1450 1451 void ksmbd_destroy_file_table(struct ksmbd_session *sess) 1452 { 1453 struct ksmbd_file_table *ft = &sess->file_table; 1454 1455 if (!ft->idr) 1456 return; 1457 1458 __close_file_table_ids(sess, NULL, session_fd_check, true); 1459 idr_destroy(ft->idr); 1460 kfree(ft->idr); 1461 ft->idr = NULL; 1462 } 1463 1464 int ksmbd_init_file_cache(void) 1465 { 1466 filp_cache = kmem_cache_create("ksmbd_file_cache", 1467 sizeof(struct ksmbd_file), 0, 1468 SLAB_HWCACHE_ALIGN, NULL); 1469 if (!filp_cache) 1470 goto out; 1471 1472 init_waitqueue_head(&dh_wq); 1473 1474 return 0; 1475 1476 out: 1477 pr_err("failed to allocate file cache\n"); 1478 return -ENOMEM; 1479 } 1480 1481 void ksmbd_exit_file_cache(void) 1482 { 1483 kmem_cache_destroy(filp_cache); 1484 } 1485