1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2010 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 * 25 * Copyright (c) 1983,1984,1985,1986,1987,1988,1989 AT&T. 26 * All rights reserved. 27 */ 28 29 /* 30 * Node hash implementation initially borrowed from NFS (nfs_subr.c) 31 * but then heavily modified. It's no longer an array of hash lists, 32 * but an AVL tree per mount point. More on this below. 33 */ 34 35 #include <sys/param.h> 36 #include <sys/systm.h> 37 #include <sys/time.h> 38 #include <sys/vnode.h> 39 #include <sys/bitmap.h> 40 #include <sys/dnlc.h> 41 #include <sys/kmem.h> 42 #include <sys/sunddi.h> 43 #include <sys/sysmacros.h> 44 45 #include <netsmb/smb_osdep.h> 46 47 #include <netsmb/smb.h> 48 #include <netsmb/smb_conn.h> 49 #include <netsmb/smb_subr.h> 50 #include <netsmb/smb_rq.h> 51 52 #include <smbfs/smbfs.h> 53 #include <smbfs/smbfs_node.h> 54 #include <smbfs/smbfs_subr.h> 55 56 /* 57 * The AVL trees (now per-mount) allow finding an smbfs node by its 58 * full remote path name. It also allows easy traversal of all nodes 59 * below (path wise) any given node. A reader/writer lock for each 60 * (per mount) AVL tree is used to control access and to synchronize 61 * lookups, additions, and deletions from that AVL tree. 62 * 63 * Previously, this code use a global array of hash chains, each with 64 * its own rwlock. A few struct members, functions, and comments may 65 * still refer to a "hash", and those should all now be considered to 66 * refer to the per-mount AVL tree that replaced the old hash chains. 67 * (i.e. member smi_hash_lk, function sn_hashfind, etc.) 68 * 69 * The smbnode freelist is organized as a doubly linked list with 70 * a head pointer. Additions and deletions are synchronized via 71 * a single mutex. 72 * 73 * In order to add an smbnode to the free list, it must be linked into 74 * the mount's AVL tree and the exclusive lock for the AVL must be held. 75 * If an smbnode is not linked into the AVL tree, then it is destroyed 76 * because it represents no valuable information that can be reused 77 * about the file. The exclusive lock for the AVL tree must be held 78 * in order to prevent a lookup in the AVL tree from finding the 79 * smbnode and using it and assuming that the smbnode is not on the 80 * freelist. The lookup in the AVL tree will have the AVL tree lock 81 * held, either exclusive or shared. 82 * 83 * The vnode reference count for each smbnode is not allowed to drop 84 * below 1. This prevents external entities, such as the VM 85 * subsystem, from acquiring references to vnodes already on the 86 * freelist and then trying to place them back on the freelist 87 * when their reference is released. This means that the when an 88 * smbnode is looked up in the AVL tree, then either the smbnode 89 * is removed from the freelist and that reference is tranfered to 90 * the new reference or the vnode reference count must be incremented 91 * accordingly. The mutex for the freelist must be held in order to 92 * accurately test to see if the smbnode is on the freelist or not. 93 * The AVL tree lock might be held shared and it is possible that 94 * two different threads may race to remove the smbnode from the 95 * freelist. This race can be resolved by holding the mutex for the 96 * freelist. Please note that the mutex for the freelist does not 97 * need to held if the smbnode is not on the freelist. It can not be 98 * placed on the freelist due to the requirement that the thread 99 * putting the smbnode on the freelist must hold the exclusive lock 100 * for the AVL tree and the thread doing the lookup in the AVL tree 101 * is holding either a shared or exclusive lock for the AVL tree. 102 * 103 * The lock ordering is: 104 * 105 * AVL tree lock -> vnode lock 106 * AVL tree lock -> freelist lock 107 */ 108 109 static kmutex_t smbfreelist_lock; 110 static smbnode_t *smbfreelist = NULL; 111 static ulong_t smbnodenew = 0; 112 long nsmbnode = 0; 113 114 static struct kmem_cache *smbnode_cache; 115 116 static const vsecattr_t smbfs_vsa0 = { 0 }; 117 118 /* 119 * Mutex to protect the following variables: 120 * smbfs_major 121 * smbfs_minor 122 */ 123 kmutex_t smbfs_minor_lock; 124 int smbfs_major; 125 int smbfs_minor; 126 127 /* See smbfs_node_findcreate() */ 128 struct smbfattr smbfs_fattr0; 129 130 /* 131 * Local functions. 132 * SN for Smb Node 133 */ 134 static void sn_rmfree(smbnode_t *); 135 static void sn_inactive(smbnode_t *); 136 static void sn_addhash_locked(smbnode_t *, avl_index_t); 137 static void sn_rmhash_locked(smbnode_t *); 138 static void sn_destroy_node(smbnode_t *); 139 void smbfs_kmem_reclaim(void *cdrarg); 140 141 static smbnode_t * 142 sn_hashfind(smbmntinfo_t *, const char *, int, avl_index_t *); 143 144 static smbnode_t * 145 make_smbnode(smbmntinfo_t *, const char *, int, int *); 146 147 /* 148 * Free the resources associated with an smbnode. 149 * Note: This is different from smbfs_inactive 150 * 151 * NFS: nfs_subr.c:rinactive 152 */ 153 static void 154 sn_inactive(smbnode_t *np) 155 { 156 vsecattr_t ovsa; 157 cred_t *oldcr; 158 char *orpath; 159 int orplen; 160 161 /* 162 * Flush and invalidate all pages (todo) 163 * Free any held credentials and caches... 164 * etc. (See NFS code) 165 */ 166 mutex_enter(&np->r_statelock); 167 168 ovsa = np->r_secattr; 169 np->r_secattr = smbfs_vsa0; 170 np->r_sectime = 0; 171 172 oldcr = np->r_cred; 173 np->r_cred = NULL; 174 175 orpath = np->n_rpath; 176 orplen = np->n_rplen; 177 np->n_rpath = NULL; 178 np->n_rplen = 0; 179 180 mutex_exit(&np->r_statelock); 181 182 if (ovsa.vsa_aclentp != NULL) 183 kmem_free(ovsa.vsa_aclentp, ovsa.vsa_aclentsz); 184 185 if (oldcr != NULL) 186 crfree(oldcr); 187 188 if (orpath != NULL) 189 kmem_free(orpath, orplen + 1); 190 } 191 192 /* 193 * Find and optionally create an smbnode for the passed 194 * mountinfo, directory, separator, and name. If the 195 * desired smbnode already exists, return a reference. 196 * If the file attributes pointer is non-null, the node 197 * is created if necessary and linked into the AVL tree. 198 * 199 * Callers that need a node created but don't have the 200 * real attributes pass smbfs_fattr0 to force creation. 201 * 202 * Note: make_smbnode() may upgrade the "hash" lock to exclusive. 203 * 204 * NFS: nfs_subr.c:makenfsnode 205 */ 206 smbnode_t * 207 smbfs_node_findcreate( 208 smbmntinfo_t *mi, 209 const char *dirnm, 210 int dirlen, 211 const char *name, 212 int nmlen, 213 char sep, 214 struct smbfattr *fap) 215 { 216 char tmpbuf[256]; 217 size_t rpalloc; 218 char *p, *rpath; 219 int rplen; 220 smbnode_t *np; 221 vnode_t *vp; 222 int newnode; 223 224 /* 225 * Build the search string, either in tmpbuf or 226 * in allocated memory if larger than tmpbuf. 227 */ 228 rplen = dirlen; 229 if (sep != '\0') 230 rplen++; 231 rplen += nmlen; 232 if (rplen < sizeof (tmpbuf)) { 233 /* use tmpbuf */ 234 rpalloc = 0; 235 rpath = tmpbuf; 236 } else { 237 rpalloc = rplen + 1; 238 rpath = kmem_alloc(rpalloc, KM_SLEEP); 239 } 240 p = rpath; 241 bcopy(dirnm, p, dirlen); 242 p += dirlen; 243 if (sep != '\0') 244 *p++ = sep; 245 if (name != NULL) { 246 bcopy(name, p, nmlen); 247 p += nmlen; 248 } 249 ASSERT(p == rpath + rplen); 250 251 /* 252 * Find or create a node with this path. 253 */ 254 rw_enter(&mi->smi_hash_lk, RW_READER); 255 if (fap == NULL) 256 np = sn_hashfind(mi, rpath, rplen, NULL); 257 else 258 np = make_smbnode(mi, rpath, rplen, &newnode); 259 rw_exit(&mi->smi_hash_lk); 260 261 if (rpalloc) 262 kmem_free(rpath, rpalloc); 263 264 if (fap == NULL) { 265 /* 266 * Caller is "just looking" (no create) 267 * so np may or may not be NULL here. 268 * Either way, we're done. 269 */ 270 return (np); 271 } 272 273 /* 274 * We should have a node, possibly created. 275 * Do we have (real) attributes to apply? 276 */ 277 ASSERT(np != NULL); 278 if (fap == &smbfs_fattr0) 279 return (np); 280 281 /* 282 * Apply the given attributes to this node, 283 * dealing with any cache impact, etc. 284 */ 285 vp = SMBTOV(np); 286 if (!newnode) { 287 /* 288 * Found an existing node. 289 * Maybe purge caches... 290 */ 291 smbfs_cache_check(vp, fap); 292 } 293 smbfs_attrcache_fa(vp, fap); 294 295 /* 296 * Note NFS sets vp->v_type here, assuming it 297 * can never change for the life of a node. 298 * We allow v_type to change, and set it in 299 * smbfs_attrcache(). Also: mode, uid, gid 300 */ 301 return (np); 302 } 303 304 /* 305 * NFS: nfs_subr.c:rtablehash 306 * We use smbfs_hash(). 307 */ 308 309 /* 310 * Find or create an smbnode. 311 * NFS: nfs_subr.c:make_rnode 312 */ 313 static smbnode_t * 314 make_smbnode( 315 smbmntinfo_t *mi, 316 const char *rpath, 317 int rplen, 318 int *newnode) 319 { 320 smbnode_t *np; 321 smbnode_t *tnp; 322 vnode_t *vp; 323 vfs_t *vfsp; 324 avl_index_t where; 325 char *new_rpath = NULL; 326 327 ASSERT(RW_READ_HELD(&mi->smi_hash_lk)); 328 vfsp = mi->smi_vfsp; 329 330 start: 331 np = sn_hashfind(mi, rpath, rplen, NULL); 332 if (np != NULL) { 333 *newnode = 0; 334 return (np); 335 } 336 337 /* Note: will retake this lock below. */ 338 rw_exit(&mi->smi_hash_lk); 339 340 /* 341 * see if we can find something on the freelist 342 */ 343 mutex_enter(&smbfreelist_lock); 344 if (smbfreelist != NULL && smbnodenew >= nsmbnode) { 345 np = smbfreelist; 346 sn_rmfree(np); 347 mutex_exit(&smbfreelist_lock); 348 349 vp = SMBTOV(np); 350 351 if (np->r_flags & RHASHED) { 352 smbmntinfo_t *tmp_mi = np->n_mount; 353 ASSERT(tmp_mi != NULL); 354 rw_enter(&tmp_mi->smi_hash_lk, RW_WRITER); 355 mutex_enter(&vp->v_lock); 356 if (vp->v_count > 1) { 357 vp->v_count--; 358 mutex_exit(&vp->v_lock); 359 rw_exit(&tmp_mi->smi_hash_lk); 360 /* start over */ 361 rw_enter(&mi->smi_hash_lk, RW_READER); 362 goto start; 363 } 364 mutex_exit(&vp->v_lock); 365 sn_rmhash_locked(np); 366 rw_exit(&tmp_mi->smi_hash_lk); 367 } 368 369 sn_inactive(np); 370 371 mutex_enter(&vp->v_lock); 372 if (vp->v_count > 1) { 373 vp->v_count--; 374 mutex_exit(&vp->v_lock); 375 rw_enter(&mi->smi_hash_lk, RW_READER); 376 goto start; 377 } 378 mutex_exit(&vp->v_lock); 379 vn_invalid(vp); 380 /* 381 * destroy old locks before bzero'ing and 382 * recreating the locks below. 383 */ 384 smbfs_rw_destroy(&np->r_rwlock); 385 smbfs_rw_destroy(&np->r_lkserlock); 386 mutex_destroy(&np->r_statelock); 387 cv_destroy(&np->r_cv); 388 /* 389 * Make sure that if smbnode is recycled then 390 * VFS count is decremented properly before 391 * reuse. 392 */ 393 VFS_RELE(vp->v_vfsp); 394 vn_reinit(vp); 395 } else { 396 /* 397 * allocate and initialize a new smbnode 398 */ 399 vnode_t *new_vp; 400 401 mutex_exit(&smbfreelist_lock); 402 403 np = kmem_cache_alloc(smbnode_cache, KM_SLEEP); 404 new_vp = vn_alloc(KM_SLEEP); 405 406 atomic_add_long((ulong_t *)&smbnodenew, 1); 407 vp = new_vp; 408 } 409 410 /* 411 * Allocate and copy the rpath we'll need below. 412 */ 413 new_rpath = kmem_alloc(rplen + 1, KM_SLEEP); 414 bcopy(rpath, new_rpath, rplen); 415 new_rpath[rplen] = '\0'; 416 417 /* Initialize smbnode_t */ 418 bzero(np, sizeof (*np)); 419 420 smbfs_rw_init(&np->r_rwlock, NULL, RW_DEFAULT, NULL); 421 smbfs_rw_init(&np->r_lkserlock, NULL, RW_DEFAULT, NULL); 422 mutex_init(&np->r_statelock, NULL, MUTEX_DEFAULT, NULL); 423 cv_init(&np->r_cv, NULL, CV_DEFAULT, NULL); 424 /* cv_init(&np->r_commit.c_cv, NULL, CV_DEFAULT, NULL); */ 425 426 np->r_vnode = vp; 427 np->n_mount = mi; 428 429 np->n_fid = SMB_FID_UNUSED; 430 np->n_uid = mi->smi_uid; 431 np->n_gid = mi->smi_gid; 432 /* Leave attributes "stale." */ 433 434 #if 0 /* XXX dircache */ 435 /* 436 * We don't know if it's a directory yet. 437 * Let the caller do this? XXX 438 */ 439 avl_create(&np->r_dir, compar, sizeof (rddir_cache), 440 offsetof(rddir_cache, tree)); 441 #endif 442 443 /* Now fill in the vnode. */ 444 vn_setops(vp, smbfs_vnodeops); 445 vp->v_data = (caddr_t)np; 446 VFS_HOLD(vfsp); 447 vp->v_vfsp = vfsp; 448 vp->v_type = VNON; 449 450 /* 451 * We entered with mi->smi_hash_lk held (reader). 452 * Retake it now, (as the writer). 453 * Will return with it held. 454 */ 455 rw_enter(&mi->smi_hash_lk, RW_WRITER); 456 457 /* 458 * There is a race condition where someone else 459 * may alloc the smbnode while no locks are held, 460 * so check again and recover if found. 461 */ 462 tnp = sn_hashfind(mi, rpath, rplen, &where); 463 if (tnp != NULL) { 464 /* 465 * Lost the race. Put the node we were building 466 * on the free list and return the one we found. 467 */ 468 rw_exit(&mi->smi_hash_lk); 469 kmem_free(new_rpath, rplen + 1); 470 smbfs_addfree(np); 471 rw_enter(&mi->smi_hash_lk, RW_READER); 472 *newnode = 0; 473 return (tnp); 474 } 475 476 /* 477 * Hash search identifies nodes by the remote path 478 * (n_rpath) so fill that in now, before linking 479 * this node into the node cache (AVL tree). 480 */ 481 np->n_rpath = new_rpath; 482 np->n_rplen = rplen; 483 np->n_ino = smbfs_gethash(new_rpath, rplen); 484 485 sn_addhash_locked(np, where); 486 *newnode = 1; 487 return (np); 488 } 489 490 /* 491 * smbfs_addfree 492 * Put an smbnode on the free list, or destroy it immediately 493 * if it offers no value were it to be reclaimed later. Also 494 * destroy immediately when we have too many smbnodes, etc. 495 * 496 * Normally called by smbfs_inactive, but also 497 * called in here during cleanup operations. 498 * 499 * NFS: nfs_subr.c:rp_addfree 500 */ 501 void 502 smbfs_addfree(smbnode_t *np) 503 { 504 vnode_t *vp; 505 struct vfs *vfsp; 506 smbmntinfo_t *mi; 507 508 ASSERT(np->r_freef == NULL && np->r_freeb == NULL); 509 510 vp = SMBTOV(np); 511 ASSERT(vp->v_count >= 1); 512 513 vfsp = vp->v_vfsp; 514 mi = VFTOSMI(vfsp); 515 516 /* 517 * If there are no more references to this smbnode and: 518 * we have too many smbnodes allocated, or if the node 519 * is no longer accessible via the AVL tree (!RHASHED), 520 * or an i/o error occurred while writing to the file, 521 * or it's part of an unmounted FS, then try to destroy 522 * it instead of putting it on the smbnode freelist. 523 */ 524 if (np->r_count == 0 && ( 525 (np->r_flags & RHASHED) == 0 || 526 (np->r_error != 0) || 527 (vfsp->vfs_flag & VFS_UNMOUNTED) || 528 (smbnodenew > nsmbnode))) { 529 530 /* Try to destroy this node. */ 531 532 if (np->r_flags & RHASHED) { 533 rw_enter(&mi->smi_hash_lk, RW_WRITER); 534 mutex_enter(&vp->v_lock); 535 if (vp->v_count > 1) { 536 vp->v_count--; 537 mutex_exit(&vp->v_lock); 538 rw_exit(&mi->smi_hash_lk); 539 return; 540 /* 541 * Will get another call later, 542 * via smbfs_inactive. 543 */ 544 } 545 mutex_exit(&vp->v_lock); 546 sn_rmhash_locked(np); 547 rw_exit(&mi->smi_hash_lk); 548 } 549 550 sn_inactive(np); 551 552 /* 553 * Recheck the vnode reference count. We need to 554 * make sure that another reference has not been 555 * acquired while we were not holding v_lock. The 556 * smbnode is not in the smbnode "hash" AVL tree, so 557 * the only way for a reference to have been acquired 558 * is for a VOP_PUTPAGE because the smbnode was marked 559 * with RDIRTY or for a modified page. This vnode 560 * reference may have been acquired before our call 561 * to sn_inactive. The i/o may have been completed, 562 * thus allowing sn_inactive to complete, but the 563 * reference to the vnode may not have been released 564 * yet. In any case, the smbnode can not be destroyed 565 * until the other references to this vnode have been 566 * released. The other references will take care of 567 * either destroying the smbnode or placing it on the 568 * smbnode freelist. If there are no other references, 569 * then the smbnode may be safely destroyed. 570 */ 571 mutex_enter(&vp->v_lock); 572 if (vp->v_count > 1) { 573 vp->v_count--; 574 mutex_exit(&vp->v_lock); 575 return; 576 } 577 mutex_exit(&vp->v_lock); 578 579 sn_destroy_node(np); 580 return; 581 } 582 583 /* 584 * Lock the AVL tree and then recheck the reference count 585 * to ensure that no other threads have acquired a reference 586 * to indicate that the smbnode should not be placed on the 587 * freelist. If another reference has been acquired, then 588 * just release this one and let the other thread complete 589 * the processing of adding this smbnode to the freelist. 590 */ 591 rw_enter(&mi->smi_hash_lk, RW_WRITER); 592 593 mutex_enter(&vp->v_lock); 594 if (vp->v_count > 1) { 595 vp->v_count--; 596 mutex_exit(&vp->v_lock); 597 rw_exit(&mi->smi_hash_lk); 598 return; 599 } 600 mutex_exit(&vp->v_lock); 601 602 /* 603 * Put this node on the free list. 604 */ 605 mutex_enter(&smbfreelist_lock); 606 if (smbfreelist == NULL) { 607 np->r_freef = np; 608 np->r_freeb = np; 609 smbfreelist = np; 610 } else { 611 np->r_freef = smbfreelist; 612 np->r_freeb = smbfreelist->r_freeb; 613 smbfreelist->r_freeb->r_freef = np; 614 smbfreelist->r_freeb = np; 615 } 616 mutex_exit(&smbfreelist_lock); 617 618 rw_exit(&mi->smi_hash_lk); 619 } 620 621 /* 622 * Remove an smbnode from the free list. 623 * 624 * The caller must be holding smbfreelist_lock and the smbnode 625 * must be on the freelist. 626 * 627 * NFS: nfs_subr.c:rp_rmfree 628 */ 629 static void 630 sn_rmfree(smbnode_t *np) 631 { 632 633 ASSERT(MUTEX_HELD(&smbfreelist_lock)); 634 ASSERT(np->r_freef != NULL && np->r_freeb != NULL); 635 636 if (np == smbfreelist) { 637 smbfreelist = np->r_freef; 638 if (np == smbfreelist) 639 smbfreelist = NULL; 640 } 641 642 np->r_freeb->r_freef = np->r_freef; 643 np->r_freef->r_freeb = np->r_freeb; 644 645 np->r_freef = np->r_freeb = NULL; 646 } 647 648 /* 649 * Put an smbnode in the "hash" AVL tree. 650 * 651 * The caller must be hold the rwlock as writer. 652 * 653 * NFS: nfs_subr.c:rp_addhash 654 */ 655 static void 656 sn_addhash_locked(smbnode_t *np, avl_index_t where) 657 { 658 smbmntinfo_t *mi = np->n_mount; 659 660 ASSERT(RW_WRITE_HELD(&mi->smi_hash_lk)); 661 ASSERT(!(np->r_flags & RHASHED)); 662 663 avl_insert(&mi->smi_hash_avl, np, where); 664 665 mutex_enter(&np->r_statelock); 666 np->r_flags |= RHASHED; 667 mutex_exit(&np->r_statelock); 668 } 669 670 /* 671 * Remove an smbnode from the "hash" AVL tree. 672 * 673 * The caller must hold the rwlock as writer. 674 * 675 * NFS: nfs_subr.c:rp_rmhash_locked 676 */ 677 static void 678 sn_rmhash_locked(smbnode_t *np) 679 { 680 smbmntinfo_t *mi = np->n_mount; 681 682 ASSERT(RW_WRITE_HELD(&mi->smi_hash_lk)); 683 ASSERT(np->r_flags & RHASHED); 684 685 avl_remove(&mi->smi_hash_avl, np); 686 687 mutex_enter(&np->r_statelock); 688 np->r_flags &= ~RHASHED; 689 mutex_exit(&np->r_statelock); 690 } 691 692 /* 693 * Remove an smbnode from the "hash" AVL tree. 694 * 695 * The caller must not be holding the rwlock. 696 */ 697 void 698 smbfs_rmhash(smbnode_t *np) 699 { 700 smbmntinfo_t *mi = np->n_mount; 701 702 rw_enter(&mi->smi_hash_lk, RW_WRITER); 703 sn_rmhash_locked(np); 704 rw_exit(&mi->smi_hash_lk); 705 } 706 707 /* 708 * Lookup an smbnode by remote pathname 709 * 710 * The caller must be holding the AVL rwlock, either shared or exclusive. 711 * 712 * NFS: nfs_subr.c:rfind 713 */ 714 static smbnode_t * 715 sn_hashfind( 716 smbmntinfo_t *mi, 717 const char *rpath, 718 int rplen, 719 avl_index_t *pwhere) /* optional */ 720 { 721 smbfs_node_hdr_t nhdr; 722 smbnode_t *np; 723 vnode_t *vp; 724 725 ASSERT(RW_LOCK_HELD(&mi->smi_hash_lk)); 726 727 bzero(&nhdr, sizeof (nhdr)); 728 nhdr.hdr_n_rpath = (char *)rpath; 729 nhdr.hdr_n_rplen = rplen; 730 731 /* See smbfs_node_cmp below. */ 732 np = avl_find(&mi->smi_hash_avl, &nhdr, pwhere); 733 734 if (np == NULL) 735 return (NULL); 736 737 /* 738 * Found it in the "hash" AVL tree. 739 * Remove from free list, if necessary. 740 */ 741 vp = SMBTOV(np); 742 if (np->r_freef != NULL) { 743 mutex_enter(&smbfreelist_lock); 744 /* 745 * If the smbnode is on the freelist, 746 * then remove it and use that reference 747 * as the new reference. Otherwise, 748 * need to increment the reference count. 749 */ 750 if (np->r_freef != NULL) { 751 sn_rmfree(np); 752 mutex_exit(&smbfreelist_lock); 753 } else { 754 mutex_exit(&smbfreelist_lock); 755 VN_HOLD(vp); 756 } 757 } else 758 VN_HOLD(vp); 759 760 return (np); 761 } 762 763 static int 764 smbfs_node_cmp(const void *va, const void *vb) 765 { 766 const smbfs_node_hdr_t *a = va; 767 const smbfs_node_hdr_t *b = vb; 768 int clen, diff; 769 770 /* 771 * Same semantics as strcmp, but does not 772 * assume the strings are null terminated. 773 */ 774 clen = (a->hdr_n_rplen < b->hdr_n_rplen) ? 775 a->hdr_n_rplen : b->hdr_n_rplen; 776 diff = strncmp(a->hdr_n_rpath, b->hdr_n_rpath, clen); 777 if (diff < 0) 778 return (-1); 779 if (diff > 0) 780 return (1); 781 /* they match through clen */ 782 if (b->hdr_n_rplen > clen) 783 return (-1); 784 if (a->hdr_n_rplen > clen) 785 return (1); 786 return (0); 787 } 788 789 /* 790 * Setup the "hash" AVL tree used for our node cache. 791 * See: smbfs_mount, smbfs_destroy_table. 792 */ 793 void 794 smbfs_init_hash_avl(avl_tree_t *avl) 795 { 796 avl_create(avl, smbfs_node_cmp, sizeof (smbnode_t), 797 offsetof(smbnode_t, r_avl_node)); 798 } 799 800 /* 801 * Invalidate the cached attributes for all nodes "under" the 802 * passed-in node. Note: the passed-in node is NOT affected by 803 * this call. This is used both for files under some directory 804 * after the directory is deleted or renamed, and for extended 805 * attribute files (named streams) under a plain file after that 806 * file is renamed or deleted. 807 * 808 * Do this by walking the AVL tree starting at the passed in node, 809 * and continuing while the visited nodes have a path prefix matching 810 * the entire path of the passed-in node, and a separator just after 811 * that matching path prefix. Watch out for cases where the AVL tree 812 * order may not exactly match the order of an FS walk, i.e. 813 * consider this sequence: 814 * "foo" (directory) 815 * "foo bar" (name containing a space) 816 * "foo/bar" 817 * The walk needs to skip "foo bar" and keep going until it finds 818 * something that doesn't match the "foo" name prefix. 819 */ 820 void 821 smbfs_attrcache_prune(smbnode_t *top_np) 822 { 823 smbmntinfo_t *mi; 824 smbnode_t *np; 825 char *rpath; 826 int rplen; 827 828 mi = top_np->n_mount; 829 rw_enter(&mi->smi_hash_lk, RW_READER); 830 831 np = top_np; 832 rpath = top_np->n_rpath; 833 rplen = top_np->n_rplen; 834 for (;;) { 835 np = avl_walk(&mi->smi_hash_avl, np, AVL_AFTER); 836 if (np == NULL) 837 break; 838 if (np->n_rplen < rplen) 839 break; 840 if (0 != strncmp(np->n_rpath, rpath, rplen)) 841 break; 842 if (np->n_rplen > rplen && ( 843 np->n_rpath[rplen] == ':' || 844 np->n_rpath[rplen] == '\\')) 845 smbfs_attrcache_remove(np); 846 } 847 848 rw_exit(&mi->smi_hash_lk); 849 } 850 851 #ifdef SMB_VNODE_DEBUG 852 int smbfs_check_table_debug = 1; 853 #else /* SMB_VNODE_DEBUG */ 854 int smbfs_check_table_debug = 0; 855 #endif /* SMB_VNODE_DEBUG */ 856 857 858 /* 859 * Return 1 if there is a active vnode belonging to this vfs in the 860 * smbnode cache. 861 * 862 * Several of these checks are done without holding the usual 863 * locks. This is safe because destroy_smbtable(), smbfs_addfree(), 864 * etc. will redo the necessary checks before actually destroying 865 * any smbnodes. 866 * 867 * NFS: nfs_subr.c:check_rtable 868 * 869 * Debugging changes here relative to NFS. 870 * Relatively harmless, so left 'em in. 871 */ 872 int 873 smbfs_check_table(struct vfs *vfsp, smbnode_t *rtnp) 874 { 875 smbmntinfo_t *mi; 876 smbnode_t *np; 877 vnode_t *vp; 878 int busycnt = 0; 879 880 mi = VFTOSMI(vfsp); 881 rw_enter(&mi->smi_hash_lk, RW_READER); 882 for (np = avl_first(&mi->smi_hash_avl); np != NULL; 883 np = avl_walk(&mi->smi_hash_avl, np, AVL_AFTER)) { 884 885 if (np == rtnp) 886 continue; /* skip the root */ 887 vp = SMBTOV(np); 888 889 /* Now the 'busy' checks: */ 890 /* Not on the free list? */ 891 if (np->r_freef == NULL) { 892 SMBVDEBUG("!r_freef: node=0x%p, rpath=%s\n", 893 (void *)np, np->n_rpath); 894 busycnt++; 895 } 896 897 /* Has dirty pages? */ 898 if (vn_has_cached_data(vp) && 899 (np->r_flags & RDIRTY)) { 900 SMBVDEBUG("is dirty: node=0x%p, rpath=%s\n", 901 (void *)np, np->n_rpath); 902 busycnt++; 903 } 904 905 /* Other refs? (not reflected in v_count) */ 906 if (np->r_count > 0) { 907 SMBVDEBUG("+r_count: node=0x%p, rpath=%s\n", 908 (void *)np, np->n_rpath); 909 busycnt++; 910 } 911 912 if (busycnt && !smbfs_check_table_debug) 913 break; 914 915 } 916 rw_exit(&mi->smi_hash_lk); 917 918 return (busycnt); 919 } 920 921 /* 922 * Destroy inactive vnodes from the AVL tree which belong to this 923 * vfs. It is essential that we destroy all inactive vnodes during a 924 * forced unmount as well as during a normal unmount. 925 * 926 * NFS: nfs_subr.c:destroy_rtable 927 * 928 * In here, we're normally destrying all or most of the AVL tree, 929 * so the natural choice is to use avl_destroy_nodes. However, 930 * there may be a few busy nodes that should remain in the AVL 931 * tree when we're done. The solution: use a temporary tree to 932 * hold the busy nodes until we're done destroying the old tree, 933 * then copy the temporary tree over the (now emtpy) real tree. 934 */ 935 void 936 smbfs_destroy_table(struct vfs *vfsp) 937 { 938 avl_tree_t tmp_avl; 939 smbmntinfo_t *mi; 940 smbnode_t *np; 941 smbnode_t *rlist; 942 void *v; 943 944 mi = VFTOSMI(vfsp); 945 rlist = NULL; 946 smbfs_init_hash_avl(&tmp_avl); 947 948 rw_enter(&mi->smi_hash_lk, RW_WRITER); 949 v = NULL; 950 while ((np = avl_destroy_nodes(&mi->smi_hash_avl, &v)) != NULL) { 951 952 mutex_enter(&smbfreelist_lock); 953 if (np->r_freef == NULL) { 954 /* 955 * Busy node (not on the free list). 956 * Will keep in the final AVL tree. 957 */ 958 mutex_exit(&smbfreelist_lock); 959 avl_add(&tmp_avl, np); 960 } else { 961 /* 962 * It's on the free list. Remove and 963 * arrange for it to be destroyed. 964 */ 965 sn_rmfree(np); 966 mutex_exit(&smbfreelist_lock); 967 968 /* 969 * Last part of sn_rmhash_locked(). 970 * NB: avl_destroy_nodes has already 971 * removed this from the "hash" AVL. 972 */ 973 mutex_enter(&np->r_statelock); 974 np->r_flags &= ~RHASHED; 975 mutex_exit(&np->r_statelock); 976 977 /* 978 * Add to the list of nodes to destroy. 979 * Borrowing avl_child[0] for this list. 980 */ 981 np->r_avl_node.avl_child[0] = 982 (struct avl_node *)rlist; 983 rlist = np; 984 } 985 } 986 avl_destroy(&mi->smi_hash_avl); 987 988 /* 989 * Replace the (now destroyed) "hash" AVL with the 990 * temporary AVL, which restores the busy nodes. 991 */ 992 mi->smi_hash_avl = tmp_avl; 993 rw_exit(&mi->smi_hash_lk); 994 995 /* 996 * Now destroy the nodes on our temporary list (rlist). 997 * This call to smbfs_addfree will end up destroying the 998 * smbnode, but in a safe way with the appropriate set 999 * of checks done. 1000 */ 1001 while ((np = rlist) != NULL) { 1002 rlist = (smbnode_t *)np->r_avl_node.avl_child[0]; 1003 smbfs_addfree(np); 1004 } 1005 } 1006 1007 /* 1008 * This routine destroys all the resources associated with the smbnode 1009 * and then the smbnode itself. Note: sn_inactive has been called. 1010 * 1011 * NFS: nfs_subr.c:destroy_rnode 1012 */ 1013 static void 1014 sn_destroy_node(smbnode_t *np) 1015 { 1016 vnode_t *vp; 1017 vfs_t *vfsp; 1018 1019 vp = SMBTOV(np); 1020 vfsp = vp->v_vfsp; 1021 1022 ASSERT(vp->v_count == 1); 1023 ASSERT(np->r_count == 0); 1024 ASSERT(np->r_mapcnt == 0); 1025 ASSERT(np->r_secattr.vsa_aclentp == NULL); 1026 ASSERT(np->r_cred == NULL); 1027 ASSERT(np->n_rpath == NULL); 1028 ASSERT(!(np->r_flags & RHASHED)); 1029 ASSERT(np->r_freef == NULL && np->r_freeb == NULL); 1030 atomic_add_long((ulong_t *)&smbnodenew, -1); 1031 vn_invalid(vp); 1032 vn_free(vp); 1033 kmem_cache_free(smbnode_cache, np); 1034 VFS_RELE(vfsp); 1035 } 1036 1037 /* 1038 * Flush all vnodes in this (or every) vfs. 1039 * Used by nfs_sync and by nfs_unmount. 1040 */ 1041 /*ARGSUSED*/ 1042 void 1043 smbfs_rflush(struct vfs *vfsp, cred_t *cr) 1044 { 1045 /* Todo: mmap support. */ 1046 } 1047 1048 /* access cache */ 1049 /* client handles */ 1050 1051 /* 1052 * initialize resources that are used by smbfs_subr.c 1053 * this is called from the _init() routine (by the way of smbfs_clntinit()) 1054 * 1055 * NFS: nfs_subr.c:nfs_subrinit 1056 */ 1057 int 1058 smbfs_subrinit(void) 1059 { 1060 ulong_t nsmbnode_max; 1061 1062 /* 1063 * Allocate and initialize the smbnode cache 1064 */ 1065 if (nsmbnode <= 0) 1066 nsmbnode = ncsize; /* dnlc.h */ 1067 nsmbnode_max = (ulong_t)((kmem_maxavail() >> 2) / 1068 sizeof (struct smbnode)); 1069 if (nsmbnode > nsmbnode_max || (nsmbnode == 0 && ncsize == 0)) { 1070 zcmn_err(GLOBAL_ZONEID, CE_NOTE, 1071 "setting nsmbnode to max value of %ld", nsmbnode_max); 1072 nsmbnode = nsmbnode_max; 1073 } 1074 1075 smbnode_cache = kmem_cache_create("smbnode_cache", sizeof (smbnode_t), 1076 0, NULL, NULL, smbfs_kmem_reclaim, NULL, NULL, 0); 1077 1078 /* 1079 * Initialize the various mutexes and reader/writer locks 1080 */ 1081 mutex_init(&smbfreelist_lock, NULL, MUTEX_DEFAULT, NULL); 1082 mutex_init(&smbfs_minor_lock, NULL, MUTEX_DEFAULT, NULL); 1083 1084 /* 1085 * Assign unique major number for all smbfs mounts 1086 */ 1087 if ((smbfs_major = getudev()) == -1) { 1088 zcmn_err(GLOBAL_ZONEID, CE_WARN, 1089 "smbfs: init: can't get unique device number"); 1090 smbfs_major = 0; 1091 } 1092 smbfs_minor = 0; 1093 1094 return (0); 1095 } 1096 1097 /* 1098 * free smbfs hash table, etc. 1099 * NFS: nfs_subr.c:nfs_subrfini 1100 */ 1101 void 1102 smbfs_subrfini(void) 1103 { 1104 1105 /* 1106 * Destroy the smbnode cache 1107 */ 1108 kmem_cache_destroy(smbnode_cache); 1109 1110 /* 1111 * Destroy the various mutexes and reader/writer locks 1112 */ 1113 mutex_destroy(&smbfreelist_lock); 1114 mutex_destroy(&smbfs_minor_lock); 1115 } 1116 1117 /* rddir_cache ? */ 1118 1119 /* 1120 * Support functions for smbfs_kmem_reclaim 1121 */ 1122 1123 static void 1124 smbfs_node_reclaim(void) 1125 { 1126 smbmntinfo_t *mi; 1127 smbnode_t *np; 1128 vnode_t *vp; 1129 1130 mutex_enter(&smbfreelist_lock); 1131 while ((np = smbfreelist) != NULL) { 1132 sn_rmfree(np); 1133 mutex_exit(&smbfreelist_lock); 1134 if (np->r_flags & RHASHED) { 1135 vp = SMBTOV(np); 1136 mi = np->n_mount; 1137 rw_enter(&mi->smi_hash_lk, RW_WRITER); 1138 mutex_enter(&vp->v_lock); 1139 if (vp->v_count > 1) { 1140 vp->v_count--; 1141 mutex_exit(&vp->v_lock); 1142 rw_exit(&mi->smi_hash_lk); 1143 mutex_enter(&smbfreelist_lock); 1144 continue; 1145 } 1146 mutex_exit(&vp->v_lock); 1147 sn_rmhash_locked(np); 1148 rw_exit(&mi->smi_hash_lk); 1149 } 1150 /* 1151 * This call to smbfs_addfree will end up destroying the 1152 * smbnode, but in a safe way with the appropriate set 1153 * of checks done. 1154 */ 1155 smbfs_addfree(np); 1156 mutex_enter(&smbfreelist_lock); 1157 } 1158 mutex_exit(&smbfreelist_lock); 1159 } 1160 1161 /* 1162 * Called by kmem_cache_alloc ask us if we could 1163 * "Please give back some memory!" 1164 * 1165 * Todo: dump nodes from the free list? 1166 */ 1167 /*ARGSUSED*/ 1168 void 1169 smbfs_kmem_reclaim(void *cdrarg) 1170 { 1171 smbfs_node_reclaim(); 1172 } 1173 1174 /* nfs failover stuff */ 1175 /* nfs_rw_xxx - see smbfs_rwlock.c */ 1176