1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License, Version 1.0 only 6 * (the "License"). You may not use this file except in compliance 7 * with the License. 8 * 9 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 10 * or http://www.opensolaris.org/os/licensing. 11 * See the License for the specific language governing permissions 12 * and limitations under the License. 13 * 14 * When distributing Covered Code, include this CDDL HEADER in each 15 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 16 * If applicable, add the following below this CDDL HEADER, with the 17 * fields enclosed by brackets "[]" replaced with your own identifying 18 * information: Portions Copyright [yyyy] [name of copyright owner] 19 * 20 * CDDL HEADER END 21 */ 22 /* 23 * Copyright 2004 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 #pragma ident "%Z%%M% %I% %E% SMI" 28 29 /* 30 * The idea behind composition-based stacked filesystems is to add a 31 * vnode to the stack of vnodes for each mount. These vnodes have their 32 * own set of mount options and filesystem-specific functions, so they 33 * can modify data or operations before they are passed along. Such a 34 * filesystem must maintain a mapping from the underlying vnodes to its 35 * interposing vnodes. 36 * 37 * In lofs, this mapping is implemented by a hashtable. Each bucket 38 * contains a count of the number of nodes currently contained, the 39 * chain of vnodes, and a lock to protect the list of vnodes. The 40 * hashtable dynamically grows if the number of vnodes in the table as a 41 * whole exceeds the size of the table left-shifted by 42 * lo_resize_threshold. In order to minimize lock contention, there is 43 * no global lock protecting the hashtable, hence obtaining the 44 * per-bucket locks consists of a dance to make sure we've actually 45 * locked the correct bucket. Acquiring a bucket lock doesn't involve 46 * locking the hashtable itself, so we refrain from freeing old 47 * hashtables, and store them in a linked list of retired hashtables; 48 * the list is freed when the filesystem is unmounted. 49 */ 50 51 #include <sys/param.h> 52 #include <sys/kmem.h> 53 #include <sys/vfs.h> 54 #include <sys/vnode.h> 55 #include <sys/cmn_err.h> 56 #include <sys/systm.h> 57 #include <sys/t_lock.h> 58 #include <sys/debug.h> 59 #include <sys/atomic.h> 60 61 #include <sys/fs/lofs_node.h> 62 #include <sys/fs/lofs_info.h> 63 /* 64 * Due to the hashing algorithm, the size of the hash table needs to be a 65 * power of 2. 66 */ 67 #define LOFS_DEFAULT_HTSIZE (1 << 6) 68 69 #define ltablehash(vp, tblsz) ((((intptr_t)(vp))>>10) & ((tblsz)-1)) 70 71 /* 72 * The following macros can only be safely used when the desired bucket 73 * is already locked. 74 */ 75 /* 76 * The lock in the hashtable associated with the given vnode. 77 */ 78 #define TABLE_LOCK(vp, li) \ 79 (&(li)->li_hashtable[ltablehash((vp), (li)->li_htsize)].lh_lock) 80 81 /* 82 * The bucket in the hashtable that the given vnode hashes to. 83 */ 84 #define TABLE_BUCKET(vp, li) \ 85 ((li)->li_hashtable[ltablehash((vp), (li)->li_htsize)].lh_chain) 86 87 /* 88 * Number of elements currently in the bucket that the vnode hashes to. 89 */ 90 #define TABLE_COUNT(vp, li) \ 91 ((li)->li_hashtable[ltablehash((vp), (li)->li_htsize)].lh_count) 92 93 /* 94 * Grab/Drop the lock for the bucket this vnode hashes to. 95 */ 96 #define TABLE_LOCK_ENTER(vp, li) table_lock_enter(vp, li) 97 #define TABLE_LOCK_EXIT(vp, li) \ 98 mutex_exit(&(li)->li_hashtable[ltablehash((vp), \ 99 (li)->li_htsize)].lh_lock) 100 101 static lnode_t *lfind(struct vnode *, struct loinfo *); 102 static void lsave(lnode_t *, struct loinfo *); 103 static struct vfs *makelfsnode(struct vfs *, struct loinfo *); 104 static struct lfsnode *lfsfind(struct vfs *, struct loinfo *); 105 106 uint_t lo_resize_threshold = 1; 107 uint_t lo_resize_factor = 2; 108 109 static kmem_cache_t *lnode_cache; 110 111 /* 112 * Since the hashtable itself isn't protected by a lock, obtaining a 113 * per-bucket lock proceeds as follows: 114 * 115 * (a) li->li_htlock protects li->li_hashtable, li->li_htsize, and 116 * li->li_retired. 117 * 118 * (b) Per-bucket locks (lh_lock) protect the contents of the bucket. 119 * 120 * (c) Locking order for resizing the hashtable is li_htlock then 121 * lh_lock. 122 * 123 * To grab the bucket lock we: 124 * 125 * (1) Stash away the htsize and the pointer to the hashtable to make 126 * sure neither change while we're using them. 127 * 128 * (2) lgrow() updates the pointer to the hashtable before it updates 129 * the size: the worst case scenario is that we have the wrong size (but 130 * the correct table), so we hash to the wrong bucket, grab the wrong 131 * lock, and then realize that things have changed, rewind and start 132 * again. If both the size and the table changed since we loaded them, 133 * we'll realize that too and restart. 134 * 135 * (3) The protocol for growing the hashtable involves holding *all* the 136 * locks in the table, hence the unlocking code (TABLE_LOCK_EXIT()) 137 * doesn't need to do any dances, since neither the table nor the size 138 * can change while any bucket lock is held. 139 * 140 * (4) If the hashtable is growing (by thread t1) while another thread 141 * (t2) is trying to grab a bucket lock, t2 might have a stale reference 142 * to li->li_htsize: 143 * 144 * - t1 grabs all locks in lgrow() 145 * - t2 loads li->li_htsize and li->li_hashtable 146 * - t1 changes li->hashtable 147 * - t2 loads from an offset in the "stale" hashtable and tries to grab 148 * the relevant mutex. 149 * 150 * If t1 had free'd the stale hashtable, t2 would be in trouble. Hence, 151 * stale hashtables are not freed but stored in a list of "retired" 152 * hashtables, which is emptied when the filesystem is unmounted. 153 */ 154 static void 155 table_lock_enter(vnode_t *vp, struct loinfo *li) 156 { 157 struct lobucket *chain; 158 uint_t htsize; 159 uint_t hash; 160 161 for (;;) { 162 htsize = li->li_htsize; 163 membar_consumer(); 164 chain = (struct lobucket *)li->li_hashtable; 165 hash = ltablehash(vp, htsize); 166 mutex_enter(&chain[hash].lh_lock); 167 if (li->li_hashtable == chain && li->li_htsize == htsize) 168 break; 169 mutex_exit(&chain[hash].lh_lock); 170 } 171 } 172 173 void 174 lofs_subrinit(void) 175 { 176 /* 177 * Initialize the cache. 178 */ 179 lnode_cache = kmem_cache_create("lnode_cache", sizeof (lnode_t), 180 0, NULL, NULL, NULL, NULL, NULL, 0); 181 } 182 183 void 184 lofs_subrfini(void) 185 { 186 kmem_cache_destroy(lnode_cache); 187 } 188 189 /* 190 * Initialize a (struct loinfo), and initialize the hashtable to have 191 * htsize buckets. 192 */ 193 void 194 lsetup(struct loinfo *li, uint_t htsize) 195 { 196 li->li_refct = 0; 197 li->li_lfs = NULL; 198 if (htsize == 0) 199 htsize = LOFS_DEFAULT_HTSIZE; 200 li->li_htsize = htsize; 201 li->li_hashtable = kmem_zalloc(htsize * sizeof (*li->li_hashtable), 202 KM_SLEEP); 203 mutex_init(&li->li_lfslock, NULL, MUTEX_DEFAULT, NULL); 204 mutex_init(&li->li_htlock, NULL, MUTEX_DEFAULT, NULL); 205 li->li_retired = NULL; 206 } 207 208 /* 209 * Destroy a (struct loinfo) 210 */ 211 void 212 ldestroy(struct loinfo *li) 213 { 214 uint_t i, htsize; 215 struct lobucket *table; 216 struct lo_retired_ht *lrhp, *trhp; 217 218 mutex_destroy(&li->li_htlock); 219 mutex_destroy(&li->li_lfslock); 220 htsize = li->li_htsize; 221 table = li->li_hashtable; 222 for (i = 0; i < htsize; i++) 223 mutex_destroy(&table[i].lh_lock); 224 kmem_free(table, htsize * sizeof (*li->li_hashtable)); 225 226 /* 227 * Free the retired hashtables. 228 */ 229 lrhp = li->li_retired; 230 while (lrhp != NULL) { 231 trhp = lrhp; 232 lrhp = lrhp->lrh_next; 233 kmem_free(trhp->lrh_table, 234 trhp->lrh_size * sizeof (*li->li_hashtable)); 235 kmem_free(trhp, sizeof (*trhp)); 236 } 237 li->li_retired = NULL; 238 } 239 240 /* 241 * Return a looped back vnode for the given vnode. 242 * If no lnode exists for this vnode create one and put it 243 * in a table hashed by vnode. If the lnode for 244 * this vnode is already in the table return it (ref count is 245 * incremented by lfind). The lnode will be flushed from the 246 * table when lo_inactive calls freelonode. 247 * NOTE: vp is assumed to be a held vnode. 248 */ 249 struct vnode * 250 makelonode(struct vnode *vp, struct loinfo *li) 251 { 252 lnode_t *lp, *tlp; 253 struct vfs *vfsp; 254 vnode_t *nvp; 255 256 TABLE_LOCK_ENTER(vp, li); 257 if ((lp = lfind(vp, li)) == NULL) { 258 /* 259 * Optimistically assume that we won't need to sleep. 260 */ 261 lp = kmem_cache_alloc(lnode_cache, KM_NOSLEEP); 262 nvp = vn_alloc(KM_NOSLEEP); 263 if (lp == NULL || nvp == NULL) { 264 TABLE_LOCK_EXIT(vp, li); 265 /* The lnode allocation may have succeeded, save it */ 266 tlp = lp; 267 if (tlp == NULL) { 268 tlp = kmem_cache_alloc(lnode_cache, KM_SLEEP); 269 } 270 if (nvp == NULL) { 271 nvp = vn_alloc(KM_SLEEP); 272 } 273 TABLE_LOCK_ENTER(vp, li); 274 if ((lp = lfind(vp, li)) != NULL) { 275 kmem_cache_free(lnode_cache, tlp); 276 vn_free(nvp); 277 VN_RELE(vp); 278 goto found_lnode; 279 } 280 lp = tlp; 281 } 282 atomic_add_32(&li->li_refct, 1); 283 vfsp = makelfsnode(vp->v_vfsp, li); 284 lp->lo_vnode = nvp; 285 VN_SET_VFS_TYPE_DEV(nvp, vfsp, vp->v_type, vp->v_rdev); 286 nvp->v_flag |= (vp->v_flag & (VNOMOUNT|VNOMAP|VDIROPEN)); 287 vn_setops(nvp, lo_vnodeops); 288 nvp->v_data = (caddr_t)lp; 289 lp->lo_vp = vp; 290 lp->lo_looping = 0; 291 lsave(lp, li); 292 vn_exists(vp); 293 } else { 294 VN_RELE(vp); 295 } 296 297 found_lnode: 298 TABLE_LOCK_EXIT(vp, li); 299 return (ltov(lp)); 300 } 301 302 /* 303 * Get/Make vfs structure for given real vfs 304 */ 305 static struct vfs * 306 makelfsnode(struct vfs *vfsp, struct loinfo *li) 307 { 308 struct lfsnode *lfs; 309 struct lfsnode *tlfs; 310 311 /* 312 * Don't grab any locks for the fast (common) case. 313 */ 314 if (vfsp == li->li_realvfs) 315 return (li->li_mountvfs); 316 ASSERT(li->li_refct > 0); 317 mutex_enter(&li->li_lfslock); 318 if ((lfs = lfsfind(vfsp, li)) == NULL) { 319 mutex_exit(&li->li_lfslock); 320 lfs = kmem_zalloc(sizeof (*lfs), KM_SLEEP); 321 mutex_enter(&li->li_lfslock); 322 if ((tlfs = lfsfind(vfsp, li)) != NULL) { 323 kmem_free(lfs, sizeof (*lfs)); 324 lfs = tlfs; 325 goto found_lfs; 326 } 327 lfs->lfs_realvfs = vfsp; 328 329 /* 330 * Even though the lfsnode is strictly speaking a private 331 * implementation detail of lofs, it should behave as a regular 332 * vfs_t for the benefit of the rest of the kernel. 333 */ 334 VFS_INIT(&lfs->lfs_vfs, lo_vfsops, (caddr_t)li); 335 lfs->lfs_vfs.vfs_fstype = li->li_mountvfs->vfs_fstype; 336 lfs->lfs_vfs.vfs_flag = 337 ((vfsp->vfs_flag | li->li_mflag) & ~li->li_dflag) & 338 INHERIT_VFS_FLAG; 339 lfs->lfs_vfs.vfs_bsize = vfsp->vfs_bsize; 340 lfs->lfs_vfs.vfs_dev = vfsp->vfs_dev; 341 lfs->lfs_vfs.vfs_fsid = vfsp->vfs_fsid; 342 343 if (vfsp->vfs_mntpt != NULL) { 344 lfs->lfs_vfs.vfs_mntpt = vfs_getmntpoint(vfsp); 345 /* Leave a reference to the mountpoint */ 346 } 347 348 (void) VFS_ROOT(vfsp, &lfs->lfs_realrootvp); 349 350 /* 351 * We use 1 instead of 0 as the value to associate with 352 * an idle lfs_vfs. This is to prevent VFS_RELE() 353 * trying to kmem_free() our lfs_t (which is the wrong 354 * size). 355 */ 356 VFS_HOLD(&lfs->lfs_vfs); 357 lfs->lfs_next = li->li_lfs; 358 li->li_lfs = lfs; 359 } 360 361 found_lfs: 362 VFS_HOLD(&lfs->lfs_vfs); 363 mutex_exit(&li->li_lfslock); 364 return (&lfs->lfs_vfs); 365 } 366 367 /* 368 * Free lfs node since no longer in use 369 */ 370 static void 371 freelfsnode(struct lfsnode *lfs, struct loinfo *li) 372 { 373 struct lfsnode *prev = NULL; 374 struct lfsnode *this; 375 376 ASSERT(MUTEX_HELD(&li->li_lfslock)); 377 ASSERT(li->li_refct > 0); 378 for (this = li->li_lfs; this != NULL; this = this->lfs_next) { 379 if (this == lfs) { 380 ASSERT(lfs->lfs_vfs.vfs_count == 1); 381 if (prev == NULL) 382 li->li_lfs = lfs->lfs_next; 383 else 384 prev->lfs_next = lfs->lfs_next; 385 if (lfs->lfs_realrootvp != NULL) { 386 VN_RELE(lfs->lfs_realrootvp); 387 } 388 if (lfs->lfs_vfs.vfs_mntpt != NULL) 389 refstr_rele(lfs->lfs_vfs.vfs_mntpt); 390 sema_destroy(&lfs->lfs_vfs.vfs_reflock); 391 kmem_free(lfs, sizeof (struct lfsnode)); 392 return; 393 } 394 prev = this; 395 } 396 panic("freelfsnode"); 397 /*NOTREACHED*/ 398 } 399 400 /* 401 * Find lfs given real vfs and mount instance(li) 402 */ 403 static struct lfsnode * 404 lfsfind(struct vfs *vfsp, struct loinfo *li) 405 { 406 struct lfsnode *lfs; 407 408 ASSERT(MUTEX_HELD(&li->li_lfslock)); 409 410 /* 411 * We need to handle the case where a UFS filesystem was forced 412 * unmounted and then a subsequent mount got the same vfs 413 * structure. If the new mount lies in the lofs hierarchy, then 414 * this will confuse lofs, because the original vfsp (of the 415 * forced unmounted filesystem) is still around. We check for 416 * this condition here. 417 * 418 * If we find a cache vfsp hit, then we check to see if the 419 * cached filesystem was forced unmounted. Skip all such 420 * entries. This should be safe to do since no 421 * makelonode()->makelfsnode()->lfsfind() calls should be 422 * generated for such force-unmounted filesystems (because (ufs) 423 * lookup would've returned an error). 424 */ 425 for (lfs = li->li_lfs; lfs != NULL; lfs = lfs->lfs_next) { 426 if (lfs->lfs_realvfs == vfsp) { 427 struct vnode *realvp; 428 429 realvp = lfs->lfs_realrootvp; 430 if (realvp == NULL) 431 continue; 432 if (realvp->v_vfsp == NULL || realvp->v_type == VBAD) 433 continue; 434 return (lfs); 435 } 436 } 437 return (NULL); 438 } 439 440 /* 441 * Find real vfs given loopback vfs 442 */ 443 struct vfs * 444 lo_realvfs(struct vfs *vfsp, struct vnode **realrootvpp) 445 { 446 struct loinfo *li = vtoli(vfsp); 447 struct lfsnode *lfs; 448 449 ASSERT(li->li_refct > 0); 450 if (vfsp == li->li_mountvfs) { 451 if (realrootvpp != NULL) 452 *realrootvpp = vtol(li->li_rootvp)->lo_vp; 453 return (li->li_realvfs); 454 } 455 mutex_enter(&li->li_lfslock); 456 for (lfs = li->li_lfs; lfs != NULL; lfs = lfs->lfs_next) { 457 if (vfsp == &lfs->lfs_vfs) { 458 if (realrootvpp != NULL) 459 *realrootvpp = lfs->lfs_realrootvp; 460 mutex_exit(&li->li_lfslock); 461 return (lfs->lfs_realvfs); 462 } 463 } 464 panic("lo_realvfs"); 465 /*NOTREACHED*/ 466 } 467 468 /* 469 * Lnode lookup stuff. 470 * These routines maintain a table of lnodes hashed by vp so 471 * that the lnode for a vp can be found if it already exists. 472 * 473 * NB: A lofs shadow vnode causes exactly one VN_HOLD() on the 474 * underlying vnode. 475 */ 476 477 /* 478 * Retire old hashtables. 479 */ 480 static void 481 lretire(struct loinfo *li, struct lobucket *table, uint_t size) 482 { 483 struct lo_retired_ht *lrhp; 484 485 lrhp = kmem_alloc(sizeof (*lrhp), KM_SLEEP); 486 lrhp->lrh_table = table; 487 lrhp->lrh_size = size; 488 489 mutex_enter(&li->li_htlock); 490 lrhp->lrh_next = li->li_retired; 491 li->li_retired = lrhp; 492 mutex_exit(&li->li_htlock); 493 } 494 495 /* 496 * Grow the hashtable. 497 */ 498 static void 499 lgrow(struct loinfo *li, uint_t newsize) 500 { 501 uint_t oldsize; 502 uint_t i; 503 struct lobucket *oldtable, *newtable; 504 505 /* 506 * It's OK to not have enough memory to resize the hashtable. 507 * We'll go down this path the next time we add something to the 508 * table, and retry the allocation then. 509 */ 510 if ((newtable = kmem_zalloc(newsize * sizeof (*li->li_hashtable), 511 KM_NOSLEEP)) == NULL) 512 return; 513 514 mutex_enter(&li->li_htlock); 515 if (newsize <= li->li_htsize) { 516 mutex_exit(&li->li_htlock); 517 kmem_free(newtable, newsize * sizeof (*li->li_hashtable)); 518 return; 519 } 520 oldsize = li->li_htsize; 521 oldtable = li->li_hashtable; 522 523 /* 524 * Grab all locks so TABLE_LOCK_ENTER() calls block until the 525 * resize is complete. 526 */ 527 for (i = 0; i < oldsize; i++) 528 mutex_enter(&oldtable[i].lh_lock); 529 /* 530 * li->li_hashtable gets set before li->li_htsize, so in the 531 * time between the two assignments, callers of 532 * TABLE_LOCK_ENTER() cannot hash to a bucket beyond oldsize, 533 * hence we only need to grab the locks up to oldsize. 534 */ 535 for (i = 0; i < oldsize; i++) 536 mutex_enter(&newtable[i].lh_lock); 537 /* 538 * Rehash. 539 */ 540 for (i = 0; i < oldsize; i++) { 541 lnode_t *tlp, *nlp; 542 543 for (tlp = oldtable[i].lh_chain; tlp != NULL; tlp = nlp) { 544 uint_t hash = ltablehash(tlp->lo_vp, newsize); 545 546 nlp = tlp->lo_next; 547 tlp->lo_next = newtable[hash].lh_chain; 548 newtable[hash].lh_chain = tlp; 549 newtable[hash].lh_count++; 550 } 551 } 552 553 /* 554 * As soon as we store the new hashtable, future locking operations 555 * will use it. Therefore, we must ensure that all the state we've 556 * just established reaches global visibility before the new hashtable 557 * does. 558 */ 559 membar_producer(); 560 li->li_hashtable = newtable; 561 562 /* 563 * table_lock_enter() relies on the fact that li->li_hashtable 564 * is set to its new value before li->li_htsize. 565 */ 566 membar_producer(); 567 li->li_htsize = newsize; 568 569 /* 570 * The new state is consistent now, so we can drop all the locks. 571 */ 572 for (i = 0; i < oldsize; i++) { 573 mutex_exit(&newtable[i].lh_lock); 574 mutex_exit(&oldtable[i].lh_lock); 575 } 576 mutex_exit(&li->li_htlock); 577 578 lretire(li, oldtable, oldsize); 579 } 580 581 /* 582 * Put a lnode in the table 583 */ 584 static void 585 lsave(lnode_t *lp, struct loinfo *li) 586 { 587 ASSERT(lp->lo_vp); 588 ASSERT(MUTEX_HELD(TABLE_LOCK(lp->lo_vp, li))); 589 590 #ifdef LODEBUG 591 lo_dprint(4, "lsave lp %p hash %d\n", 592 lp, ltablehash(lp->lo_vp, li)); 593 #endif 594 595 TABLE_COUNT(lp->lo_vp, li)++; 596 lp->lo_next = TABLE_BUCKET(lp->lo_vp, li); 597 TABLE_BUCKET(lp->lo_vp, li) = lp; 598 599 if (li->li_refct > (li->li_htsize << lo_resize_threshold)) { 600 TABLE_LOCK_EXIT(lp->lo_vp, li); 601 lgrow(li, li->li_htsize << lo_resize_factor); 602 TABLE_LOCK_ENTER(lp->lo_vp, li); 603 } 604 } 605 606 /* 607 * Our version of vfs_rele() that stops at 1 instead of 0, and calls 608 * freelfsnode() instead of kmem_free(). 609 */ 610 static void 611 lfs_rele(struct lfsnode *lfs, struct loinfo *li) 612 { 613 vfs_t *vfsp = &lfs->lfs_vfs; 614 615 ASSERT(MUTEX_HELD(&li->li_lfslock)); 616 ASSERT(vfsp->vfs_count > 1); 617 if (atomic_add_32_nv(&vfsp->vfs_count, -1) == 1) 618 freelfsnode(lfs, li); 619 } 620 621 /* 622 * Remove a lnode from the table 623 */ 624 void 625 freelonode(lnode_t *lp) 626 { 627 lnode_t *lt; 628 lnode_t *ltprev = NULL; 629 struct lfsnode *lfs, *nextlfs; 630 struct vfs *vfsp; 631 struct vnode *vp = ltov(lp); 632 struct vnode *realvp = realvp(vp); 633 struct loinfo *li = vtoli(vp->v_vfsp); 634 635 #ifdef LODEBUG 636 lo_dprint(4, "freelonode lp %p hash %d\n", 637 lp, ltablehash(lp->lo_vp, li)); 638 #endif 639 TABLE_LOCK_ENTER(lp->lo_vp, li); 640 641 mutex_enter(&vp->v_lock); 642 if (vp->v_count > 1) { 643 vp->v_count--; /* release our hold from vn_rele */ 644 mutex_exit(&vp->v_lock); 645 TABLE_LOCK_EXIT(lp->lo_vp, li); 646 return; 647 } 648 mutex_exit(&vp->v_lock); 649 650 for (lt = TABLE_BUCKET(lp->lo_vp, li); lt != NULL; 651 ltprev = lt, lt = lt->lo_next) { 652 if (lt == lp) { 653 #ifdef LODEBUG 654 lo_dprint(4, "freeing %p, vfsp %p\n", 655 vp, vp->v_vfsp); 656 #endif 657 atomic_add_32(&li->li_refct, -1); 658 vfsp = vp->v_vfsp; 659 vn_invalid(vp); 660 if (vfsp != li->li_mountvfs) { 661 mutex_enter(&li->li_lfslock); 662 /* 663 * Check for unused lfs 664 */ 665 lfs = li->li_lfs; 666 while (lfs != NULL) { 667 nextlfs = lfs->lfs_next; 668 if (vfsp == &lfs->lfs_vfs) { 669 lfs_rele(lfs, li); 670 break; 671 } 672 if (lfs->lfs_vfs.vfs_count == 1) { 673 /* 674 * Lfs is idle 675 */ 676 freelfsnode(lfs, li); 677 } 678 lfs = nextlfs; 679 } 680 mutex_exit(&li->li_lfslock); 681 } 682 if (ltprev == NULL) { 683 TABLE_BUCKET(lt->lo_vp, li) = lt->lo_next; 684 } else { 685 ltprev->lo_next = lt->lo_next; 686 } 687 TABLE_COUNT(lt->lo_vp, li)--; 688 TABLE_LOCK_EXIT(lt->lo_vp, li); 689 kmem_cache_free(lnode_cache, lt); 690 vn_free(vp); 691 VN_RELE(realvp); 692 return; 693 } 694 } 695 panic("freelonode"); 696 /*NOTREACHED*/ 697 } 698 699 /* 700 * Lookup a lnode by vp 701 */ 702 static lnode_t * 703 lfind(struct vnode *vp, struct loinfo *li) 704 { 705 lnode_t *lt; 706 707 ASSERT(MUTEX_HELD(TABLE_LOCK(vp, li))); 708 709 lt = TABLE_BUCKET(vp, li); 710 while (lt != NULL) { 711 if (lt->lo_vp == vp) { 712 VN_HOLD(ltov(lt)); 713 return (lt); 714 } 715 lt = lt->lo_next; 716 } 717 return (NULL); 718 } 719 720 #ifdef LODEBUG 721 static int lofsdebug; 722 #endif /* LODEBUG */ 723 724 /* 725 * Utilities used by both client and server 726 * Standard levels: 727 * 0) no debugging 728 * 1) hard failures 729 * 2) soft failures 730 * 3) current test software 731 * 4) main procedure entry points 732 * 5) main procedure exit points 733 * 6) utility procedure entry points 734 * 7) utility procedure exit points 735 * 8) obscure procedure entry points 736 * 9) obscure procedure exit points 737 * 10) random stuff 738 * 11) all <= 1 739 * 12) all <= 2 740 * 13) all <= 3 741 * ... 742 */ 743 744 #ifdef LODEBUG 745 /*VARARGS2*/ 746 lo_dprint(level, str, a1, a2, a3, a4, a5, a6, a7, a8, a9) 747 int level; 748 char *str; 749 int a1, a2, a3, a4, a5, a6, a7, a8, a9; 750 { 751 752 if (lofsdebug == level || (lofsdebug > 10 && (lofsdebug - 10) >= level)) 753 printf(str, a1, a2, a3, a4, a5, a6, a7, a8, a9); 754 } 755 #endif 756