1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2010 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 * 25 * Copyright (c) 1983,1984,1985,1986,1987,1988,1989 AT&T. 26 * All rights reserved. 27 */ 28 /* 29 * Copyright (c) 2017 by Delphix. All rights reserved. 30 */ 31 32 /* 33 * Node hash implementation initially borrowed from NFS (nfs_subr.c) 34 * but then heavily modified. It's no longer an array of hash lists, 35 * but an AVL tree per mount point. More on this below. 36 */ 37 38 #include <sys/param.h> 39 #include <sys/systm.h> 40 #include <sys/time.h> 41 #include <sys/vnode.h> 42 #include <sys/bitmap.h> 43 #include <sys/dnlc.h> 44 #include <sys/kmem.h> 45 #include <sys/sunddi.h> 46 #include <sys/sysmacros.h> 47 48 #include <netsmb/smb_osdep.h> 49 50 #include <netsmb/smb.h> 51 #include <netsmb/smb_conn.h> 52 #include <netsmb/smb_subr.h> 53 #include <netsmb/smb_rq.h> 54 55 #include <smbfs/smbfs.h> 56 #include <smbfs/smbfs_node.h> 57 #include <smbfs/smbfs_subr.h> 58 59 /* 60 * The AVL trees (now per-mount) allow finding an smbfs node by its 61 * full remote path name. It also allows easy traversal of all nodes 62 * below (path wise) any given node. A reader/writer lock for each 63 * (per mount) AVL tree is used to control access and to synchronize 64 * lookups, additions, and deletions from that AVL tree. 65 * 66 * Previously, this code use a global array of hash chains, each with 67 * its own rwlock. A few struct members, functions, and comments may 68 * still refer to a "hash", and those should all now be considered to 69 * refer to the per-mount AVL tree that replaced the old hash chains. 70 * (i.e. member smi_hash_lk, function sn_hashfind, etc.) 71 * 72 * The smbnode freelist is organized as a doubly linked list with 73 * a head pointer. Additions and deletions are synchronized via 74 * a single mutex. 75 * 76 * In order to add an smbnode to the free list, it must be linked into 77 * the mount's AVL tree and the exclusive lock for the AVL must be held. 78 * If an smbnode is not linked into the AVL tree, then it is destroyed 79 * because it represents no valuable information that can be reused 80 * about the file. The exclusive lock for the AVL tree must be held 81 * in order to prevent a lookup in the AVL tree from finding the 82 * smbnode and using it and assuming that the smbnode is not on the 83 * freelist. The lookup in the AVL tree will have the AVL tree lock 84 * held, either exclusive or shared. 85 * 86 * The vnode reference count for each smbnode is not allowed to drop 87 * below 1. This prevents external entities, such as the VM 88 * subsystem, from acquiring references to vnodes already on the 89 * freelist and then trying to place them back on the freelist 90 * when their reference is released. This means that the when an 91 * smbnode is looked up in the AVL tree, then either the smbnode 92 * is removed from the freelist and that reference is tranfered to 93 * the new reference or the vnode reference count must be incremented 94 * accordingly. The mutex for the freelist must be held in order to 95 * accurately test to see if the smbnode is on the freelist or not. 96 * The AVL tree lock might be held shared and it is possible that 97 * two different threads may race to remove the smbnode from the 98 * freelist. This race can be resolved by holding the mutex for the 99 * freelist. Please note that the mutex for the freelist does not 100 * need to held if the smbnode is not on the freelist. It can not be 101 * placed on the freelist due to the requirement that the thread 102 * putting the smbnode on the freelist must hold the exclusive lock 103 * for the AVL tree and the thread doing the lookup in the AVL tree 104 * is holding either a shared or exclusive lock for the AVL tree. 105 * 106 * The lock ordering is: 107 * 108 * AVL tree lock -> vnode lock 109 * AVL tree lock -> freelist lock 110 */ 111 112 static kmutex_t smbfreelist_lock; 113 static smbnode_t *smbfreelist = NULL; 114 static ulong_t smbnodenew = 0; 115 long nsmbnode = 0; 116 117 static struct kmem_cache *smbnode_cache; 118 119 static const vsecattr_t smbfs_vsa0 = { 0 }; 120 121 /* 122 * Mutex to protect the following variables: 123 * smbfs_major 124 * smbfs_minor 125 */ 126 kmutex_t smbfs_minor_lock; 127 int smbfs_major; 128 int smbfs_minor; 129 130 /* See smbfs_node_findcreate() */ 131 struct smbfattr smbfs_fattr0; 132 133 /* 134 * Local functions. 135 * SN for Smb Node 136 */ 137 static void sn_rmfree(smbnode_t *); 138 static void sn_inactive(smbnode_t *); 139 static void sn_addhash_locked(smbnode_t *, avl_index_t); 140 static void sn_rmhash_locked(smbnode_t *); 141 static void sn_destroy_node(smbnode_t *); 142 void smbfs_kmem_reclaim(void *cdrarg); 143 144 static smbnode_t * 145 sn_hashfind(smbmntinfo_t *, const char *, int, avl_index_t *); 146 147 static smbnode_t * 148 make_smbnode(smbmntinfo_t *, const char *, int, int *); 149 150 /* 151 * Free the resources associated with an smbnode. 152 * Note: This is different from smbfs_inactive 153 * 154 * NFS: nfs_subr.c:rinactive 155 */ 156 static void 157 sn_inactive(smbnode_t *np) 158 { 159 vsecattr_t ovsa; 160 cred_t *oldcr; 161 char *orpath; 162 int orplen; 163 164 /* 165 * Flush and invalidate all pages (todo) 166 * Free any held credentials and caches... 167 * etc. (See NFS code) 168 */ 169 mutex_enter(&np->r_statelock); 170 171 ovsa = np->r_secattr; 172 np->r_secattr = smbfs_vsa0; 173 np->r_sectime = 0; 174 175 oldcr = np->r_cred; 176 np->r_cred = NULL; 177 178 orpath = np->n_rpath; 179 orplen = np->n_rplen; 180 np->n_rpath = NULL; 181 np->n_rplen = 0; 182 183 mutex_exit(&np->r_statelock); 184 185 if (ovsa.vsa_aclentp != NULL) 186 kmem_free(ovsa.vsa_aclentp, ovsa.vsa_aclentsz); 187 188 if (oldcr != NULL) 189 crfree(oldcr); 190 191 if (orpath != NULL) 192 kmem_free(orpath, orplen + 1); 193 } 194 195 /* 196 * Find and optionally create an smbnode for the passed 197 * mountinfo, directory, separator, and name. If the 198 * desired smbnode already exists, return a reference. 199 * If the file attributes pointer is non-null, the node 200 * is created if necessary and linked into the AVL tree. 201 * 202 * Callers that need a node created but don't have the 203 * real attributes pass smbfs_fattr0 to force creation. 204 * 205 * Note: make_smbnode() may upgrade the "hash" lock to exclusive. 206 * 207 * NFS: nfs_subr.c:makenfsnode 208 */ 209 smbnode_t * 210 smbfs_node_findcreate( 211 smbmntinfo_t *mi, 212 const char *dirnm, 213 int dirlen, 214 const char *name, 215 int nmlen, 216 char sep, 217 struct smbfattr *fap) 218 { 219 char tmpbuf[256]; 220 size_t rpalloc; 221 char *p, *rpath; 222 int rplen; 223 smbnode_t *np; 224 vnode_t *vp; 225 int newnode; 226 227 /* 228 * Build the search string, either in tmpbuf or 229 * in allocated memory if larger than tmpbuf. 230 */ 231 rplen = dirlen; 232 if (sep != '\0') 233 rplen++; 234 rplen += nmlen; 235 if (rplen < sizeof (tmpbuf)) { 236 /* use tmpbuf */ 237 rpalloc = 0; 238 rpath = tmpbuf; 239 } else { 240 rpalloc = rplen + 1; 241 rpath = kmem_alloc(rpalloc, KM_SLEEP); 242 } 243 p = rpath; 244 bcopy(dirnm, p, dirlen); 245 p += dirlen; 246 if (sep != '\0') 247 *p++ = sep; 248 if (name != NULL) { 249 bcopy(name, p, nmlen); 250 p += nmlen; 251 } 252 ASSERT(p == rpath + rplen); 253 254 /* 255 * Find or create a node with this path. 256 */ 257 rw_enter(&mi->smi_hash_lk, RW_READER); 258 if (fap == NULL) 259 np = sn_hashfind(mi, rpath, rplen, NULL); 260 else 261 np = make_smbnode(mi, rpath, rplen, &newnode); 262 rw_exit(&mi->smi_hash_lk); 263 264 if (rpalloc) 265 kmem_free(rpath, rpalloc); 266 267 if (fap == NULL) { 268 /* 269 * Caller is "just looking" (no create) 270 * so np may or may not be NULL here. 271 * Either way, we're done. 272 */ 273 return (np); 274 } 275 276 /* 277 * We should have a node, possibly created. 278 * Do we have (real) attributes to apply? 279 */ 280 ASSERT(np != NULL); 281 if (fap == &smbfs_fattr0) 282 return (np); 283 284 /* 285 * Apply the given attributes to this node, 286 * dealing with any cache impact, etc. 287 */ 288 vp = SMBTOV(np); 289 if (!newnode) { 290 /* 291 * Found an existing node. 292 * Maybe purge caches... 293 */ 294 smbfs_cache_check(vp, fap); 295 } 296 smbfs_attrcache_fa(vp, fap); 297 298 /* 299 * Note NFS sets vp->v_type here, assuming it 300 * can never change for the life of a node. 301 * We allow v_type to change, and set it in 302 * smbfs_attrcache(). Also: mode, uid, gid 303 */ 304 return (np); 305 } 306 307 /* 308 * NFS: nfs_subr.c:rtablehash 309 * We use smbfs_hash(). 310 */ 311 312 /* 313 * Find or create an smbnode. 314 * NFS: nfs_subr.c:make_rnode 315 */ 316 static smbnode_t * 317 make_smbnode( 318 smbmntinfo_t *mi, 319 const char *rpath, 320 int rplen, 321 int *newnode) 322 { 323 smbnode_t *np; 324 smbnode_t *tnp; 325 vnode_t *vp; 326 vfs_t *vfsp; 327 avl_index_t where; 328 char *new_rpath = NULL; 329 330 ASSERT(RW_READ_HELD(&mi->smi_hash_lk)); 331 vfsp = mi->smi_vfsp; 332 333 start: 334 np = sn_hashfind(mi, rpath, rplen, NULL); 335 if (np != NULL) { 336 *newnode = 0; 337 return (np); 338 } 339 340 /* Note: will retake this lock below. */ 341 rw_exit(&mi->smi_hash_lk); 342 343 /* 344 * see if we can find something on the freelist 345 */ 346 mutex_enter(&smbfreelist_lock); 347 if (smbfreelist != NULL && smbnodenew >= nsmbnode) { 348 np = smbfreelist; 349 sn_rmfree(np); 350 mutex_exit(&smbfreelist_lock); 351 352 vp = SMBTOV(np); 353 354 if (np->r_flags & RHASHED) { 355 smbmntinfo_t *tmp_mi = np->n_mount; 356 ASSERT(tmp_mi != NULL); 357 rw_enter(&tmp_mi->smi_hash_lk, RW_WRITER); 358 mutex_enter(&vp->v_lock); 359 if (vp->v_count > 1) { 360 VN_RELE_LOCKED(vp); 361 mutex_exit(&vp->v_lock); 362 rw_exit(&tmp_mi->smi_hash_lk); 363 /* start over */ 364 rw_enter(&mi->smi_hash_lk, RW_READER); 365 goto start; 366 } 367 mutex_exit(&vp->v_lock); 368 sn_rmhash_locked(np); 369 rw_exit(&tmp_mi->smi_hash_lk); 370 } 371 372 sn_inactive(np); 373 374 mutex_enter(&vp->v_lock); 375 if (vp->v_count > 1) { 376 VN_RELE_LOCKED(vp); 377 mutex_exit(&vp->v_lock); 378 rw_enter(&mi->smi_hash_lk, RW_READER); 379 goto start; 380 } 381 mutex_exit(&vp->v_lock); 382 vn_invalid(vp); 383 /* 384 * destroy old locks before bzero'ing and 385 * recreating the locks below. 386 */ 387 smbfs_rw_destroy(&np->r_rwlock); 388 smbfs_rw_destroy(&np->r_lkserlock); 389 mutex_destroy(&np->r_statelock); 390 cv_destroy(&np->r_cv); 391 /* 392 * Make sure that if smbnode is recycled then 393 * VFS count is decremented properly before 394 * reuse. 395 */ 396 VFS_RELE(vp->v_vfsp); 397 vn_reinit(vp); 398 } else { 399 /* 400 * allocate and initialize a new smbnode 401 */ 402 vnode_t *new_vp; 403 404 mutex_exit(&smbfreelist_lock); 405 406 np = kmem_cache_alloc(smbnode_cache, KM_SLEEP); 407 new_vp = vn_alloc(KM_SLEEP); 408 409 atomic_inc_ulong((ulong_t *)&smbnodenew); 410 vp = new_vp; 411 } 412 413 /* 414 * Allocate and copy the rpath we'll need below. 415 */ 416 new_rpath = kmem_alloc(rplen + 1, KM_SLEEP); 417 bcopy(rpath, new_rpath, rplen); 418 new_rpath[rplen] = '\0'; 419 420 /* Initialize smbnode_t */ 421 bzero(np, sizeof (*np)); 422 423 smbfs_rw_init(&np->r_rwlock, NULL, RW_DEFAULT, NULL); 424 smbfs_rw_init(&np->r_lkserlock, NULL, RW_DEFAULT, NULL); 425 mutex_init(&np->r_statelock, NULL, MUTEX_DEFAULT, NULL); 426 cv_init(&np->r_cv, NULL, CV_DEFAULT, NULL); 427 /* cv_init(&np->r_commit.c_cv, NULL, CV_DEFAULT, NULL); */ 428 429 np->r_vnode = vp; 430 np->n_mount = mi; 431 432 np->n_fid = SMB_FID_UNUSED; 433 np->n_uid = mi->smi_uid; 434 np->n_gid = mi->smi_gid; 435 /* Leave attributes "stale." */ 436 437 #if 0 /* XXX dircache */ 438 /* 439 * We don't know if it's a directory yet. 440 * Let the caller do this? XXX 441 */ 442 avl_create(&np->r_dir, compar, sizeof (rddir_cache), 443 offsetof(rddir_cache, tree)); 444 #endif 445 446 /* Now fill in the vnode. */ 447 vn_setops(vp, smbfs_vnodeops); 448 vp->v_data = (caddr_t)np; 449 VFS_HOLD(vfsp); 450 vp->v_vfsp = vfsp; 451 vp->v_type = VNON; 452 453 /* 454 * We entered with mi->smi_hash_lk held (reader). 455 * Retake it now, (as the writer). 456 * Will return with it held. 457 */ 458 rw_enter(&mi->smi_hash_lk, RW_WRITER); 459 460 /* 461 * There is a race condition where someone else 462 * may alloc the smbnode while no locks are held, 463 * so check again and recover if found. 464 */ 465 tnp = sn_hashfind(mi, rpath, rplen, &where); 466 if (tnp != NULL) { 467 /* 468 * Lost the race. Put the node we were building 469 * on the free list and return the one we found. 470 */ 471 rw_exit(&mi->smi_hash_lk); 472 kmem_free(new_rpath, rplen + 1); 473 smbfs_addfree(np); 474 rw_enter(&mi->smi_hash_lk, RW_READER); 475 *newnode = 0; 476 return (tnp); 477 } 478 479 /* 480 * Hash search identifies nodes by the remote path 481 * (n_rpath) so fill that in now, before linking 482 * this node into the node cache (AVL tree). 483 */ 484 np->n_rpath = new_rpath; 485 np->n_rplen = rplen; 486 np->n_ino = smbfs_gethash(new_rpath, rplen); 487 488 sn_addhash_locked(np, where); 489 *newnode = 1; 490 return (np); 491 } 492 493 /* 494 * smbfs_addfree 495 * Put an smbnode on the free list, or destroy it immediately 496 * if it offers no value were it to be reclaimed later. Also 497 * destroy immediately when we have too many smbnodes, etc. 498 * 499 * Normally called by smbfs_inactive, but also 500 * called in here during cleanup operations. 501 * 502 * NFS: nfs_subr.c:rp_addfree 503 */ 504 void 505 smbfs_addfree(smbnode_t *np) 506 { 507 vnode_t *vp; 508 struct vfs *vfsp; 509 smbmntinfo_t *mi; 510 511 ASSERT(np->r_freef == NULL && np->r_freeb == NULL); 512 513 vp = SMBTOV(np); 514 ASSERT(vp->v_count >= 1); 515 516 vfsp = vp->v_vfsp; 517 mi = VFTOSMI(vfsp); 518 519 /* 520 * If there are no more references to this smbnode and: 521 * we have too many smbnodes allocated, or if the node 522 * is no longer accessible via the AVL tree (!RHASHED), 523 * or an i/o error occurred while writing to the file, 524 * or it's part of an unmounted FS, then try to destroy 525 * it instead of putting it on the smbnode freelist. 526 */ 527 if (np->r_count == 0 && ( 528 (np->r_flags & RHASHED) == 0 || 529 (np->r_error != 0) || 530 (vfsp->vfs_flag & VFS_UNMOUNTED) || 531 (smbnodenew > nsmbnode))) { 532 533 /* Try to destroy this node. */ 534 535 if (np->r_flags & RHASHED) { 536 rw_enter(&mi->smi_hash_lk, RW_WRITER); 537 mutex_enter(&vp->v_lock); 538 if (vp->v_count > 1) { 539 VN_RELE_LOCKED(vp); 540 mutex_exit(&vp->v_lock); 541 rw_exit(&mi->smi_hash_lk); 542 return; 543 /* 544 * Will get another call later, 545 * via smbfs_inactive. 546 */ 547 } 548 mutex_exit(&vp->v_lock); 549 sn_rmhash_locked(np); 550 rw_exit(&mi->smi_hash_lk); 551 } 552 553 sn_inactive(np); 554 555 /* 556 * Recheck the vnode reference count. We need to 557 * make sure that another reference has not been 558 * acquired while we were not holding v_lock. The 559 * smbnode is not in the smbnode "hash" AVL tree, so 560 * the only way for a reference to have been acquired 561 * is for a VOP_PUTPAGE because the smbnode was marked 562 * with RDIRTY or for a modified page. This vnode 563 * reference may have been acquired before our call 564 * to sn_inactive. The i/o may have been completed, 565 * thus allowing sn_inactive to complete, but the 566 * reference to the vnode may not have been released 567 * yet. In any case, the smbnode can not be destroyed 568 * until the other references to this vnode have been 569 * released. The other references will take care of 570 * either destroying the smbnode or placing it on the 571 * smbnode freelist. If there are no other references, 572 * then the smbnode may be safely destroyed. 573 */ 574 mutex_enter(&vp->v_lock); 575 if (vp->v_count > 1) { 576 VN_RELE_LOCKED(vp); 577 mutex_exit(&vp->v_lock); 578 return; 579 } 580 mutex_exit(&vp->v_lock); 581 582 sn_destroy_node(np); 583 return; 584 } 585 586 /* 587 * Lock the AVL tree and then recheck the reference count 588 * to ensure that no other threads have acquired a reference 589 * to indicate that the smbnode should not be placed on the 590 * freelist. If another reference has been acquired, then 591 * just release this one and let the other thread complete 592 * the processing of adding this smbnode to the freelist. 593 */ 594 rw_enter(&mi->smi_hash_lk, RW_WRITER); 595 596 mutex_enter(&vp->v_lock); 597 if (vp->v_count > 1) { 598 VN_RELE_LOCKED(vp); 599 mutex_exit(&vp->v_lock); 600 rw_exit(&mi->smi_hash_lk); 601 return; 602 } 603 mutex_exit(&vp->v_lock); 604 605 /* 606 * Put this node on the free list. 607 */ 608 mutex_enter(&smbfreelist_lock); 609 if (smbfreelist == NULL) { 610 np->r_freef = np; 611 np->r_freeb = np; 612 smbfreelist = np; 613 } else { 614 np->r_freef = smbfreelist; 615 np->r_freeb = smbfreelist->r_freeb; 616 smbfreelist->r_freeb->r_freef = np; 617 smbfreelist->r_freeb = np; 618 } 619 mutex_exit(&smbfreelist_lock); 620 621 rw_exit(&mi->smi_hash_lk); 622 } 623 624 /* 625 * Remove an smbnode from the free list. 626 * 627 * The caller must be holding smbfreelist_lock and the smbnode 628 * must be on the freelist. 629 * 630 * NFS: nfs_subr.c:rp_rmfree 631 */ 632 static void 633 sn_rmfree(smbnode_t *np) 634 { 635 636 ASSERT(MUTEX_HELD(&smbfreelist_lock)); 637 ASSERT(np->r_freef != NULL && np->r_freeb != NULL); 638 639 if (np == smbfreelist) { 640 smbfreelist = np->r_freef; 641 if (np == smbfreelist) 642 smbfreelist = NULL; 643 } 644 645 np->r_freeb->r_freef = np->r_freef; 646 np->r_freef->r_freeb = np->r_freeb; 647 648 np->r_freef = np->r_freeb = NULL; 649 } 650 651 /* 652 * Put an smbnode in the "hash" AVL tree. 653 * 654 * The caller must be hold the rwlock as writer. 655 * 656 * NFS: nfs_subr.c:rp_addhash 657 */ 658 static void 659 sn_addhash_locked(smbnode_t *np, avl_index_t where) 660 { 661 smbmntinfo_t *mi = np->n_mount; 662 663 ASSERT(RW_WRITE_HELD(&mi->smi_hash_lk)); 664 ASSERT(!(np->r_flags & RHASHED)); 665 666 avl_insert(&mi->smi_hash_avl, np, where); 667 668 mutex_enter(&np->r_statelock); 669 np->r_flags |= RHASHED; 670 mutex_exit(&np->r_statelock); 671 } 672 673 /* 674 * Remove an smbnode from the "hash" AVL tree. 675 * 676 * The caller must hold the rwlock as writer. 677 * 678 * NFS: nfs_subr.c:rp_rmhash_locked 679 */ 680 static void 681 sn_rmhash_locked(smbnode_t *np) 682 { 683 smbmntinfo_t *mi = np->n_mount; 684 685 ASSERT(RW_WRITE_HELD(&mi->smi_hash_lk)); 686 ASSERT(np->r_flags & RHASHED); 687 688 avl_remove(&mi->smi_hash_avl, np); 689 690 mutex_enter(&np->r_statelock); 691 np->r_flags &= ~RHASHED; 692 mutex_exit(&np->r_statelock); 693 } 694 695 /* 696 * Remove an smbnode from the "hash" AVL tree. 697 * 698 * The caller must not be holding the rwlock. 699 */ 700 void 701 smbfs_rmhash(smbnode_t *np) 702 { 703 smbmntinfo_t *mi = np->n_mount; 704 705 rw_enter(&mi->smi_hash_lk, RW_WRITER); 706 sn_rmhash_locked(np); 707 rw_exit(&mi->smi_hash_lk); 708 } 709 710 /* 711 * Lookup an smbnode by remote pathname 712 * 713 * The caller must be holding the AVL rwlock, either shared or exclusive. 714 * 715 * NFS: nfs_subr.c:rfind 716 */ 717 static smbnode_t * 718 sn_hashfind( 719 smbmntinfo_t *mi, 720 const char *rpath, 721 int rplen, 722 avl_index_t *pwhere) /* optional */ 723 { 724 smbfs_node_hdr_t nhdr; 725 smbnode_t *np; 726 vnode_t *vp; 727 728 ASSERT(RW_LOCK_HELD(&mi->smi_hash_lk)); 729 730 bzero(&nhdr, sizeof (nhdr)); 731 nhdr.hdr_n_rpath = (char *)rpath; 732 nhdr.hdr_n_rplen = rplen; 733 734 /* See smbfs_node_cmp below. */ 735 np = avl_find(&mi->smi_hash_avl, &nhdr, pwhere); 736 737 if (np == NULL) 738 return (NULL); 739 740 /* 741 * Found it in the "hash" AVL tree. 742 * Remove from free list, if necessary. 743 */ 744 vp = SMBTOV(np); 745 if (np->r_freef != NULL) { 746 mutex_enter(&smbfreelist_lock); 747 /* 748 * If the smbnode is on the freelist, 749 * then remove it and use that reference 750 * as the new reference. Otherwise, 751 * need to increment the reference count. 752 */ 753 if (np->r_freef != NULL) { 754 sn_rmfree(np); 755 mutex_exit(&smbfreelist_lock); 756 } else { 757 mutex_exit(&smbfreelist_lock); 758 VN_HOLD(vp); 759 } 760 } else 761 VN_HOLD(vp); 762 763 return (np); 764 } 765 766 static int 767 smbfs_node_cmp(const void *va, const void *vb) 768 { 769 const smbfs_node_hdr_t *a = va; 770 const smbfs_node_hdr_t *b = vb; 771 int clen, diff; 772 773 /* 774 * Same semantics as strcmp, but does not 775 * assume the strings are null terminated. 776 */ 777 clen = (a->hdr_n_rplen < b->hdr_n_rplen) ? 778 a->hdr_n_rplen : b->hdr_n_rplen; 779 diff = strncmp(a->hdr_n_rpath, b->hdr_n_rpath, clen); 780 if (diff < 0) 781 return (-1); 782 if (diff > 0) 783 return (1); 784 /* they match through clen */ 785 if (b->hdr_n_rplen > clen) 786 return (-1); 787 if (a->hdr_n_rplen > clen) 788 return (1); 789 return (0); 790 } 791 792 /* 793 * Setup the "hash" AVL tree used for our node cache. 794 * See: smbfs_mount, smbfs_destroy_table. 795 */ 796 void 797 smbfs_init_hash_avl(avl_tree_t *avl) 798 { 799 avl_create(avl, smbfs_node_cmp, sizeof (smbnode_t), 800 offsetof(smbnode_t, r_avl_node)); 801 } 802 803 /* 804 * Invalidate the cached attributes for all nodes "under" the 805 * passed-in node. Note: the passed-in node is NOT affected by 806 * this call. This is used both for files under some directory 807 * after the directory is deleted or renamed, and for extended 808 * attribute files (named streams) under a plain file after that 809 * file is renamed or deleted. 810 * 811 * Do this by walking the AVL tree starting at the passed in node, 812 * and continuing while the visited nodes have a path prefix matching 813 * the entire path of the passed-in node, and a separator just after 814 * that matching path prefix. Watch out for cases where the AVL tree 815 * order may not exactly match the order of an FS walk, i.e. 816 * consider this sequence: 817 * "foo" (directory) 818 * "foo bar" (name containing a space) 819 * "foo/bar" 820 * The walk needs to skip "foo bar" and keep going until it finds 821 * something that doesn't match the "foo" name prefix. 822 */ 823 void 824 smbfs_attrcache_prune(smbnode_t *top_np) 825 { 826 smbmntinfo_t *mi; 827 smbnode_t *np; 828 char *rpath; 829 int rplen; 830 831 mi = top_np->n_mount; 832 rw_enter(&mi->smi_hash_lk, RW_READER); 833 834 np = top_np; 835 rpath = top_np->n_rpath; 836 rplen = top_np->n_rplen; 837 for (;;) { 838 np = avl_walk(&mi->smi_hash_avl, np, AVL_AFTER); 839 if (np == NULL) 840 break; 841 if (np->n_rplen < rplen) 842 break; 843 if (0 != strncmp(np->n_rpath, rpath, rplen)) 844 break; 845 if (np->n_rplen > rplen && ( 846 np->n_rpath[rplen] == ':' || 847 np->n_rpath[rplen] == '\\')) 848 smbfs_attrcache_remove(np); 849 } 850 851 rw_exit(&mi->smi_hash_lk); 852 } 853 854 #ifdef SMB_VNODE_DEBUG 855 int smbfs_check_table_debug = 1; 856 #else /* SMB_VNODE_DEBUG */ 857 int smbfs_check_table_debug = 0; 858 #endif /* SMB_VNODE_DEBUG */ 859 860 861 /* 862 * Return 1 if there is a active vnode belonging to this vfs in the 863 * smbnode cache. 864 * 865 * Several of these checks are done without holding the usual 866 * locks. This is safe because destroy_smbtable(), smbfs_addfree(), 867 * etc. will redo the necessary checks before actually destroying 868 * any smbnodes. 869 * 870 * NFS: nfs_subr.c:check_rtable 871 * 872 * Debugging changes here relative to NFS. 873 * Relatively harmless, so left 'em in. 874 */ 875 int 876 smbfs_check_table(struct vfs *vfsp, smbnode_t *rtnp) 877 { 878 smbmntinfo_t *mi; 879 smbnode_t *np; 880 vnode_t *vp; 881 int busycnt = 0; 882 883 mi = VFTOSMI(vfsp); 884 rw_enter(&mi->smi_hash_lk, RW_READER); 885 for (np = avl_first(&mi->smi_hash_avl); np != NULL; 886 np = avl_walk(&mi->smi_hash_avl, np, AVL_AFTER)) { 887 888 if (np == rtnp) 889 continue; /* skip the root */ 890 vp = SMBTOV(np); 891 892 /* Now the 'busy' checks: */ 893 /* Not on the free list? */ 894 if (np->r_freef == NULL) { 895 SMBVDEBUG("!r_freef: node=0x%p, rpath=%s\n", 896 (void *)np, np->n_rpath); 897 busycnt++; 898 } 899 900 /* Has dirty pages? */ 901 if (vn_has_cached_data(vp) && 902 (np->r_flags & RDIRTY)) { 903 SMBVDEBUG("is dirty: node=0x%p, rpath=%s\n", 904 (void *)np, np->n_rpath); 905 busycnt++; 906 } 907 908 /* Other refs? (not reflected in v_count) */ 909 if (np->r_count > 0) { 910 SMBVDEBUG("+r_count: node=0x%p, rpath=%s\n", 911 (void *)np, np->n_rpath); 912 busycnt++; 913 } 914 915 if (busycnt && !smbfs_check_table_debug) 916 break; 917 918 } 919 rw_exit(&mi->smi_hash_lk); 920 921 return (busycnt); 922 } 923 924 /* 925 * Destroy inactive vnodes from the AVL tree which belong to this 926 * vfs. It is essential that we destroy all inactive vnodes during a 927 * forced unmount as well as during a normal unmount. 928 * 929 * NFS: nfs_subr.c:destroy_rtable 930 * 931 * In here, we're normally destrying all or most of the AVL tree, 932 * so the natural choice is to use avl_destroy_nodes. However, 933 * there may be a few busy nodes that should remain in the AVL 934 * tree when we're done. The solution: use a temporary tree to 935 * hold the busy nodes until we're done destroying the old tree, 936 * then copy the temporary tree over the (now emtpy) real tree. 937 */ 938 void 939 smbfs_destroy_table(struct vfs *vfsp) 940 { 941 avl_tree_t tmp_avl; 942 smbmntinfo_t *mi; 943 smbnode_t *np; 944 smbnode_t *rlist; 945 void *v; 946 947 mi = VFTOSMI(vfsp); 948 rlist = NULL; 949 smbfs_init_hash_avl(&tmp_avl); 950 951 rw_enter(&mi->smi_hash_lk, RW_WRITER); 952 v = NULL; 953 while ((np = avl_destroy_nodes(&mi->smi_hash_avl, &v)) != NULL) { 954 955 mutex_enter(&smbfreelist_lock); 956 if (np->r_freef == NULL) { 957 /* 958 * Busy node (not on the free list). 959 * Will keep in the final AVL tree. 960 */ 961 mutex_exit(&smbfreelist_lock); 962 avl_add(&tmp_avl, np); 963 } else { 964 /* 965 * It's on the free list. Remove and 966 * arrange for it to be destroyed. 967 */ 968 sn_rmfree(np); 969 mutex_exit(&smbfreelist_lock); 970 971 /* 972 * Last part of sn_rmhash_locked(). 973 * NB: avl_destroy_nodes has already 974 * removed this from the "hash" AVL. 975 */ 976 mutex_enter(&np->r_statelock); 977 np->r_flags &= ~RHASHED; 978 mutex_exit(&np->r_statelock); 979 980 /* 981 * Add to the list of nodes to destroy. 982 * Borrowing avl_child[0] for this list. 983 */ 984 np->r_avl_node.avl_child[0] = 985 (struct avl_node *)rlist; 986 rlist = np; 987 } 988 } 989 avl_destroy(&mi->smi_hash_avl); 990 991 /* 992 * Replace the (now destroyed) "hash" AVL with the 993 * temporary AVL, which restores the busy nodes. 994 */ 995 mi->smi_hash_avl = tmp_avl; 996 rw_exit(&mi->smi_hash_lk); 997 998 /* 999 * Now destroy the nodes on our temporary list (rlist). 1000 * This call to smbfs_addfree will end up destroying the 1001 * smbnode, but in a safe way with the appropriate set 1002 * of checks done. 1003 */ 1004 while ((np = rlist) != NULL) { 1005 rlist = (smbnode_t *)np->r_avl_node.avl_child[0]; 1006 smbfs_addfree(np); 1007 } 1008 } 1009 1010 /* 1011 * This routine destroys all the resources associated with the smbnode 1012 * and then the smbnode itself. Note: sn_inactive has been called. 1013 * 1014 * NFS: nfs_subr.c:destroy_rnode 1015 */ 1016 static void 1017 sn_destroy_node(smbnode_t *np) 1018 { 1019 vnode_t *vp; 1020 vfs_t *vfsp; 1021 1022 vp = SMBTOV(np); 1023 vfsp = vp->v_vfsp; 1024 1025 ASSERT(vp->v_count == 1); 1026 ASSERT(np->r_count == 0); 1027 ASSERT(np->r_mapcnt == 0); 1028 ASSERT(np->r_secattr.vsa_aclentp == NULL); 1029 ASSERT(np->r_cred == NULL); 1030 ASSERT(np->n_rpath == NULL); 1031 ASSERT(!(np->r_flags & RHASHED)); 1032 ASSERT(np->r_freef == NULL && np->r_freeb == NULL); 1033 atomic_dec_ulong((ulong_t *)&smbnodenew); 1034 vn_invalid(vp); 1035 vn_free(vp); 1036 kmem_cache_free(smbnode_cache, np); 1037 VFS_RELE(vfsp); 1038 } 1039 1040 /* 1041 * Flush all vnodes in this (or every) vfs. 1042 * Used by nfs_sync and by nfs_unmount. 1043 */ 1044 /*ARGSUSED*/ 1045 void 1046 smbfs_rflush(struct vfs *vfsp, cred_t *cr) 1047 { 1048 /* Todo: mmap support. */ 1049 } 1050 1051 /* access cache */ 1052 /* client handles */ 1053 1054 /* 1055 * initialize resources that are used by smbfs_subr.c 1056 * this is called from the _init() routine (by the way of smbfs_clntinit()) 1057 * 1058 * NFS: nfs_subr.c:nfs_subrinit 1059 */ 1060 int 1061 smbfs_subrinit(void) 1062 { 1063 ulong_t nsmbnode_max; 1064 1065 /* 1066 * Allocate and initialize the smbnode cache 1067 */ 1068 if (nsmbnode <= 0) 1069 nsmbnode = ncsize; /* dnlc.h */ 1070 nsmbnode_max = (ulong_t)((kmem_maxavail() >> 2) / 1071 sizeof (struct smbnode)); 1072 if (nsmbnode > nsmbnode_max || (nsmbnode == 0 && ncsize == 0)) { 1073 zcmn_err(GLOBAL_ZONEID, CE_NOTE, 1074 "setting nsmbnode to max value of %ld", nsmbnode_max); 1075 nsmbnode = nsmbnode_max; 1076 } 1077 1078 smbnode_cache = kmem_cache_create("smbnode_cache", sizeof (smbnode_t), 1079 0, NULL, NULL, smbfs_kmem_reclaim, NULL, NULL, 0); 1080 1081 /* 1082 * Initialize the various mutexes and reader/writer locks 1083 */ 1084 mutex_init(&smbfreelist_lock, NULL, MUTEX_DEFAULT, NULL); 1085 mutex_init(&smbfs_minor_lock, NULL, MUTEX_DEFAULT, NULL); 1086 1087 /* 1088 * Assign unique major number for all smbfs mounts 1089 */ 1090 if ((smbfs_major = getudev()) == -1) { 1091 zcmn_err(GLOBAL_ZONEID, CE_WARN, 1092 "smbfs: init: can't get unique device number"); 1093 smbfs_major = 0; 1094 } 1095 smbfs_minor = 0; 1096 1097 return (0); 1098 } 1099 1100 /* 1101 * free smbfs hash table, etc. 1102 * NFS: nfs_subr.c:nfs_subrfini 1103 */ 1104 void 1105 smbfs_subrfini(void) 1106 { 1107 1108 /* 1109 * Destroy the smbnode cache 1110 */ 1111 kmem_cache_destroy(smbnode_cache); 1112 1113 /* 1114 * Destroy the various mutexes and reader/writer locks 1115 */ 1116 mutex_destroy(&smbfreelist_lock); 1117 mutex_destroy(&smbfs_minor_lock); 1118 } 1119 1120 /* rddir_cache ? */ 1121 1122 /* 1123 * Support functions for smbfs_kmem_reclaim 1124 */ 1125 1126 static void 1127 smbfs_node_reclaim(void) 1128 { 1129 smbmntinfo_t *mi; 1130 smbnode_t *np; 1131 vnode_t *vp; 1132 1133 mutex_enter(&smbfreelist_lock); 1134 while ((np = smbfreelist) != NULL) { 1135 sn_rmfree(np); 1136 mutex_exit(&smbfreelist_lock); 1137 if (np->r_flags & RHASHED) { 1138 vp = SMBTOV(np); 1139 mi = np->n_mount; 1140 rw_enter(&mi->smi_hash_lk, RW_WRITER); 1141 mutex_enter(&vp->v_lock); 1142 if (vp->v_count > 1) { 1143 VN_RELE_LOCKED(vp); 1144 mutex_exit(&vp->v_lock); 1145 rw_exit(&mi->smi_hash_lk); 1146 mutex_enter(&smbfreelist_lock); 1147 continue; 1148 } 1149 mutex_exit(&vp->v_lock); 1150 sn_rmhash_locked(np); 1151 rw_exit(&mi->smi_hash_lk); 1152 } 1153 /* 1154 * This call to smbfs_addfree will end up destroying the 1155 * smbnode, but in a safe way with the appropriate set 1156 * of checks done. 1157 */ 1158 smbfs_addfree(np); 1159 mutex_enter(&smbfreelist_lock); 1160 } 1161 mutex_exit(&smbfreelist_lock); 1162 } 1163 1164 /* 1165 * Called by kmem_cache_alloc ask us if we could 1166 * "Please give back some memory!" 1167 * 1168 * Todo: dump nodes from the free list? 1169 */ 1170 /*ARGSUSED*/ 1171 void 1172 smbfs_kmem_reclaim(void *cdrarg) 1173 { 1174 smbfs_node_reclaim(); 1175 } 1176 1177 /* nfs failover stuff */ 1178 /* nfs_rw_xxx - see smbfs_rwlock.c */ 1179