xref: /titanic_44/usr/src/uts/common/fs/lofs/lofs_subr.c (revision fe0e7ec4d916b05b52d8c7cc8a3e6a1b28e77b6f)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License, Version 1.0 only
6  * (the "License").  You may not use this file except in compliance
7  * with the License.
8  *
9  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10  * or http://www.opensolaris.org/os/licensing.
11  * See the License for the specific language governing permissions
12  * and limitations under the License.
13  *
14  * When distributing Covered Code, include this CDDL HEADER in each
15  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16  * If applicable, add the following below this CDDL HEADER, with the
17  * fields enclosed by brackets "[]" replaced with your own identifying
18  * information: Portions Copyright [yyyy] [name of copyright owner]
19  *
20  * CDDL HEADER END
21  */
22 /*
23  * Copyright 2005 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #pragma ident	"%Z%%M%	%I%	%E% SMI"
28 
29 /*
30  * The idea behind composition-based stacked filesystems is to add a
31  * vnode to the stack of vnodes for each mount. These vnodes have their
32  * own set of mount options and filesystem-specific functions, so they
33  * can modify data or operations before they are passed along. Such a
34  * filesystem must maintain a mapping from the underlying vnodes to its
35  * interposing vnodes.
36  *
37  * In lofs, this mapping is implemented by a hashtable. Each bucket
38  * contains a count of the number of nodes currently contained, the
39  * chain of vnodes, and a lock to protect the list of vnodes. The
40  * hashtable dynamically grows if the number of vnodes in the table as a
41  * whole exceeds the size of the table left-shifted by
42  * lo_resize_threshold. In order to minimize lock contention, there is
43  * no global lock protecting the hashtable, hence obtaining the
44  * per-bucket locks consists of a dance to make sure we've actually
45  * locked the correct bucket. Acquiring a bucket lock doesn't involve
46  * locking the hashtable itself, so we refrain from freeing old
47  * hashtables, and store them in a linked list of retired hashtables;
48  * the list is freed when the filesystem is unmounted.
49  */
50 
51 #include <sys/param.h>
52 #include <sys/kmem.h>
53 #include <sys/vfs.h>
54 #include <sys/vnode.h>
55 #include <sys/cmn_err.h>
56 #include <sys/systm.h>
57 #include <sys/t_lock.h>
58 #include <sys/debug.h>
59 #include <sys/atomic.h>
60 
61 #include <sys/fs/lofs_node.h>
62 #include <sys/fs/lofs_info.h>
63 /*
64  * Due to the hashing algorithm, the size of the hash table needs to be a
65  * power of 2.
66  */
67 #define	LOFS_DEFAULT_HTSIZE	(1 << 6)
68 
69 #define	ltablehash(vp, tblsz)	((((intptr_t)(vp))>>10) & ((tblsz)-1))
70 
71 /*
72  * The following macros can only be safely used when the desired bucket
73  * is already locked.
74  */
75 /*
76  * The lock in the hashtable associated with the given vnode.
77  */
78 #define	TABLE_LOCK(vp, li)      \
79 	(&(li)->li_hashtable[ltablehash((vp), (li)->li_htsize)].lh_lock)
80 
81 /*
82  * The bucket in the hashtable that the given vnode hashes to.
83  */
84 #define	TABLE_BUCKET(vp, li)    \
85 	((li)->li_hashtable[ltablehash((vp), (li)->li_htsize)].lh_chain)
86 
87 /*
88  * Number of elements currently in the bucket that the vnode hashes to.
89  */
90 #define	TABLE_COUNT(vp, li)	\
91 	((li)->li_hashtable[ltablehash((vp), (li)->li_htsize)].lh_count)
92 
93 /*
94  * Grab/Drop the lock for the bucket this vnode hashes to.
95  */
96 #define	TABLE_LOCK_ENTER(vp, li)	table_lock_enter(vp, li)
97 #define	TABLE_LOCK_EXIT(vp, li)		\
98 	mutex_exit(&(li)->li_hashtable[ltablehash((vp),	\
99 	    (li)->li_htsize)].lh_lock)
100 
101 static lnode_t *lfind(struct vnode *, struct loinfo *);
102 static void lsave(lnode_t *, struct loinfo *);
103 static struct vfs *makelfsnode(struct vfs *, struct loinfo *);
104 static struct lfsnode *lfsfind(struct vfs *, struct loinfo *);
105 
106 uint_t lo_resize_threshold = 1;
107 uint_t lo_resize_factor = 2;
108 
109 static kmem_cache_t *lnode_cache;
110 
111 /*
112  * Since the hashtable itself isn't protected by a lock, obtaining a
113  * per-bucket lock proceeds as follows:
114  *
115  * (a) li->li_htlock protects li->li_hashtable, li->li_htsize, and
116  * li->li_retired.
117  *
118  * (b) Per-bucket locks (lh_lock) protect the contents of the bucket.
119  *
120  * (c) Locking order for resizing the hashtable is li_htlock then
121  * lh_lock.
122  *
123  * To grab the bucket lock we:
124  *
125  * (1) Stash away the htsize and the pointer to the hashtable to make
126  * sure neither change while we're using them.
127  *
128  * (2) lgrow() updates the pointer to the hashtable before it updates
129  * the size: the worst case scenario is that we have the wrong size (but
130  * the correct table), so we hash to the wrong bucket, grab the wrong
131  * lock, and then realize that things have changed, rewind and start
132  * again. If both the size and the table changed since we loaded them,
133  * we'll realize that too and restart.
134  *
135  * (3) The protocol for growing the hashtable involves holding *all* the
136  * locks in the table, hence the unlocking code (TABLE_LOCK_EXIT())
137  * doesn't need to do any dances, since neither the table nor the size
138  * can change while any bucket lock is held.
139  *
140  * (4) If the hashtable is growing (by thread t1) while another thread
141  * (t2) is trying to grab a bucket lock, t2 might have a stale reference
142  * to li->li_htsize:
143  *
144  * - t1 grabs all locks in lgrow()
145  * 	- t2 loads li->li_htsize and li->li_hashtable
146  * - t1 changes li->hashtable
147  * 	- t2 loads from an offset in the "stale" hashtable and tries to grab
148  * 	the relevant mutex.
149  *
150  * If t1 had free'd the stale hashtable, t2 would be in trouble. Hence,
151  * stale hashtables are not freed but stored in a list of "retired"
152  * hashtables, which is emptied when the filesystem is unmounted.
153  */
154 static void
155 table_lock_enter(vnode_t *vp, struct loinfo *li)
156 {
157 	struct lobucket *chain;
158 	uint_t htsize;
159 	uint_t hash;
160 
161 	for (;;) {
162 		htsize = li->li_htsize;
163 		membar_consumer();
164 		chain = (struct lobucket *)li->li_hashtable;
165 		hash = ltablehash(vp, htsize);
166 		mutex_enter(&chain[hash].lh_lock);
167 		if (li->li_hashtable == chain && li->li_htsize == htsize)
168 			break;
169 		mutex_exit(&chain[hash].lh_lock);
170 	}
171 }
172 
173 void
174 lofs_subrinit(void)
175 {
176 	/*
177 	 * Initialize the cache.
178 	 */
179 	lnode_cache = kmem_cache_create("lnode_cache", sizeof (lnode_t),
180 	    0, NULL, NULL, NULL, NULL, NULL, 0);
181 }
182 
183 void
184 lofs_subrfini(void)
185 {
186 	kmem_cache_destroy(lnode_cache);
187 }
188 
189 /*
190  * Initialize a (struct loinfo), and initialize the hashtable to have
191  * htsize buckets.
192  */
193 void
194 lsetup(struct loinfo *li, uint_t htsize)
195 {
196 	li->li_refct = 0;
197 	li->li_lfs = NULL;
198 	if (htsize == 0)
199 		htsize = LOFS_DEFAULT_HTSIZE;
200 	li->li_htsize = htsize;
201 	li->li_hashtable = kmem_zalloc(htsize * sizeof (*li->li_hashtable),
202 	    KM_SLEEP);
203 	mutex_init(&li->li_lfslock, NULL, MUTEX_DEFAULT, NULL);
204 	mutex_init(&li->li_htlock, NULL, MUTEX_DEFAULT, NULL);
205 	li->li_retired = NULL;
206 }
207 
208 /*
209  * Destroy a (struct loinfo)
210  */
211 void
212 ldestroy(struct loinfo *li)
213 {
214 	uint_t i, htsize;
215 	struct lobucket *table;
216 	struct lo_retired_ht *lrhp, *trhp;
217 
218 	mutex_destroy(&li->li_htlock);
219 	mutex_destroy(&li->li_lfslock);
220 	htsize = li->li_htsize;
221 	table = li->li_hashtable;
222 	for (i = 0; i < htsize; i++)
223 		mutex_destroy(&table[i].lh_lock);
224 	kmem_free(table, htsize * sizeof (*li->li_hashtable));
225 
226 	/*
227 	 * Free the retired hashtables.
228 	 */
229 	lrhp = li->li_retired;
230 	while (lrhp != NULL) {
231 		trhp = lrhp;
232 		lrhp = lrhp->lrh_next;
233 		kmem_free(trhp->lrh_table,
234 		    trhp->lrh_size * sizeof (*li->li_hashtable));
235 		kmem_free(trhp, sizeof (*trhp));
236 	}
237 	li->li_retired = NULL;
238 }
239 
240 /*
241  * Return a looped back vnode for the given vnode.
242  * If no lnode exists for this vnode create one and put it
243  * in a table hashed by vnode.  If the lnode for
244  * this vnode is already in the table return it (ref count is
245  * incremented by lfind).  The lnode will be flushed from the
246  * table when lo_inactive calls freelonode.  The creation of
247  * a new lnode can be forced via the LOF_FORCE flag even if
248  * the vnode exists in the table.  This is used in the creation
249  * of a terminating lnode when looping is detected.  A unique
250  * lnode is required for the correct evaluation of the current
251  * working directory.
252  * NOTE: vp is assumed to be a held vnode.
253  */
254 struct vnode *
255 makelonode(struct vnode *vp, struct loinfo *li, int flag)
256 {
257 	lnode_t *lp, *tlp;
258 	struct vfs *vfsp;
259 	vnode_t *nvp;
260 
261 	lp = NULL;
262 	TABLE_LOCK_ENTER(vp, li);
263 	if (flag != LOF_FORCE)
264 		lp = lfind(vp, li);
265 	if ((flag == LOF_FORCE) || (lp == NULL)) {
266 		/*
267 		 * Optimistically assume that we won't need to sleep.
268 		 */
269 		lp = kmem_cache_alloc(lnode_cache, KM_NOSLEEP);
270 		nvp = vn_alloc(KM_NOSLEEP);
271 		if (lp == NULL || nvp == NULL) {
272 			TABLE_LOCK_EXIT(vp, li);
273 			/* The lnode allocation may have succeeded, save it */
274 			tlp = lp;
275 			if (tlp == NULL) {
276 				tlp = kmem_cache_alloc(lnode_cache, KM_SLEEP);
277 			}
278 			if (nvp == NULL) {
279 				nvp = vn_alloc(KM_SLEEP);
280 			}
281 			lp = NULL;
282 			TABLE_LOCK_ENTER(vp, li);
283 			if (flag != LOF_FORCE)
284 				lp = lfind(vp, li);
285 			if (lp != NULL) {
286 				kmem_cache_free(lnode_cache, tlp);
287 				vn_free(nvp);
288 				VN_RELE(vp);
289 				goto found_lnode;
290 			}
291 			lp = tlp;
292 		}
293 		atomic_add_32(&li->li_refct, 1);
294 		vfsp = makelfsnode(vp->v_vfsp, li);
295 		lp->lo_vnode = nvp;
296 		VN_SET_VFS_TYPE_DEV(nvp, vfsp, vp->v_type, vp->v_rdev);
297 		nvp->v_flag |= (vp->v_flag & (VNOMOUNT|VNOMAP|VDIROPEN));
298 		vn_setops(nvp, lo_vnodeops);
299 		nvp->v_data = (caddr_t)lp;
300 		lp->lo_vp = vp;
301 		lp->lo_looping = 0;
302 		lsave(lp, li);
303 		vn_exists(vp);
304 	} else {
305 		VN_RELE(vp);
306 	}
307 
308 found_lnode:
309 	TABLE_LOCK_EXIT(vp, li);
310 	return (ltov(lp));
311 }
312 
313 /*
314  * Get/Make vfs structure for given real vfs
315  */
316 static struct vfs *
317 makelfsnode(struct vfs *vfsp, struct loinfo *li)
318 {
319 	struct lfsnode *lfs;
320 	struct lfsnode *tlfs;
321 
322 	/*
323 	 * Don't grab any locks for the fast (common) case.
324 	 */
325 	if (vfsp == li->li_realvfs)
326 		return (li->li_mountvfs);
327 	ASSERT(li->li_refct > 0);
328 	mutex_enter(&li->li_lfslock);
329 	if ((lfs = lfsfind(vfsp, li)) == NULL) {
330 		mutex_exit(&li->li_lfslock);
331 		lfs = kmem_zalloc(sizeof (*lfs), KM_SLEEP);
332 		mutex_enter(&li->li_lfslock);
333 		if ((tlfs = lfsfind(vfsp, li)) != NULL) {
334 			kmem_free(lfs, sizeof (*lfs));
335 			lfs = tlfs;
336 			goto found_lfs;
337 		}
338 		lfs->lfs_realvfs = vfsp;
339 
340 		/*
341 		 * Even though the lfsnode is strictly speaking a private
342 		 * implementation detail of lofs, it should behave as a regular
343 		 * vfs_t for the benefit of the rest of the kernel.
344 		 */
345 		VFS_INIT(&lfs->lfs_vfs, lo_vfsops, (caddr_t)li);
346 		lfs->lfs_vfs.vfs_fstype = li->li_mountvfs->vfs_fstype;
347 		lfs->lfs_vfs.vfs_flag =
348 			((vfsp->vfs_flag | li->li_mflag) & ~li->li_dflag) &
349 			INHERIT_VFS_FLAG;
350 		lfs->lfs_vfs.vfs_bsize = vfsp->vfs_bsize;
351 		lfs->lfs_vfs.vfs_dev = vfsp->vfs_dev;
352 		lfs->lfs_vfs.vfs_fsid = vfsp->vfs_fsid;
353 
354 		if (vfsp->vfs_mntpt != NULL) {
355 			lfs->lfs_vfs.vfs_mntpt = vfs_getmntpoint(vfsp);
356 			/* Leave a reference to the mountpoint */
357 		}
358 
359 		(void) VFS_ROOT(vfsp, &lfs->lfs_realrootvp);
360 
361 		/*
362 		 * We use 1 instead of 0 as the value to associate with
363 		 * an idle lfs_vfs.  This is to prevent VFS_RELE()
364 		 * trying to kmem_free() our lfs_t (which is the wrong
365 		 * size).
366 		 */
367 		VFS_HOLD(&lfs->lfs_vfs);
368 		lfs->lfs_next = li->li_lfs;
369 		li->li_lfs = lfs;
370 	}
371 
372 found_lfs:
373 	VFS_HOLD(&lfs->lfs_vfs);
374 	mutex_exit(&li->li_lfslock);
375 	return (&lfs->lfs_vfs);
376 }
377 
378 /*
379  * Free lfs node since no longer in use
380  */
381 static void
382 freelfsnode(struct lfsnode *lfs, struct loinfo *li)
383 {
384 	struct lfsnode *prev = NULL;
385 	struct lfsnode *this;
386 
387 	ASSERT(MUTEX_HELD(&li->li_lfslock));
388 	ASSERT(li->li_refct > 0);
389 	for (this = li->li_lfs; this != NULL; this = this->lfs_next) {
390 		if (this == lfs) {
391 			ASSERT(lfs->lfs_vfs.vfs_count == 1);
392 			if (prev == NULL)
393 				li->li_lfs = lfs->lfs_next;
394 			else
395 				prev->lfs_next = lfs->lfs_next;
396 			if (lfs->lfs_realrootvp != NULL) {
397 				VN_RELE(lfs->lfs_realrootvp);
398 			}
399 			if (lfs->lfs_vfs.vfs_mntpt != NULL)
400 				refstr_rele(lfs->lfs_vfs.vfs_mntpt);
401 			sema_destroy(&lfs->lfs_vfs.vfs_reflock);
402 			kmem_free(lfs, sizeof (struct lfsnode));
403 			return;
404 		}
405 		prev = this;
406 	}
407 	panic("freelfsnode");
408 	/*NOTREACHED*/
409 }
410 
411 /*
412  * Find lfs given real vfs and mount instance(li)
413  */
414 static struct lfsnode *
415 lfsfind(struct vfs *vfsp, struct loinfo *li)
416 {
417 	struct lfsnode *lfs;
418 
419 	ASSERT(MUTEX_HELD(&li->li_lfslock));
420 
421 	/*
422 	 * We need to handle the case where a UFS filesystem was forced
423 	 * unmounted and then a subsequent mount got the same vfs
424 	 * structure.  If the new mount lies in the lofs hierarchy, then
425 	 * this will confuse lofs, because the original vfsp (of the
426 	 * forced unmounted filesystem) is still around. We check for
427 	 * this condition here.
428 	 *
429 	 * If we find a cache vfsp hit, then we check to see if the
430 	 * cached filesystem was forced unmounted. Skip all such
431 	 * entries. This should be safe to do since no
432 	 * makelonode()->makelfsnode()->lfsfind() calls should be
433 	 * generated for such force-unmounted filesystems (because (ufs)
434 	 * lookup would've returned an error).
435 	 */
436 	for (lfs = li->li_lfs; lfs != NULL; lfs = lfs->lfs_next) {
437 		if (lfs->lfs_realvfs == vfsp) {
438 			struct vnode *realvp;
439 
440 			realvp = lfs->lfs_realrootvp;
441 			if (realvp == NULL)
442 				continue;
443 			if (realvp->v_vfsp == NULL || realvp->v_type == VBAD)
444 				continue;
445 			return (lfs);
446 		}
447 	}
448 	return (NULL);
449 }
450 
451 /*
452  * Find real vfs given loopback vfs
453  */
454 struct vfs *
455 lo_realvfs(struct vfs *vfsp, struct vnode **realrootvpp)
456 {
457 	struct loinfo *li = vtoli(vfsp);
458 	struct lfsnode *lfs;
459 
460 	ASSERT(li->li_refct > 0);
461 	if (vfsp == li->li_mountvfs) {
462 		if (realrootvpp != NULL)
463 			*realrootvpp = vtol(li->li_rootvp)->lo_vp;
464 		return (li->li_realvfs);
465 	}
466 	mutex_enter(&li->li_lfslock);
467 	for (lfs = li->li_lfs; lfs != NULL; lfs = lfs->lfs_next) {
468 		if (vfsp == &lfs->lfs_vfs) {
469 			if (realrootvpp != NULL)
470 				*realrootvpp = lfs->lfs_realrootvp;
471 			mutex_exit(&li->li_lfslock);
472 			return (lfs->lfs_realvfs);
473 		}
474 	}
475 	panic("lo_realvfs");
476 	/*NOTREACHED*/
477 }
478 
479 /*
480  * Lnode lookup stuff.
481  * These routines maintain a table of lnodes hashed by vp so
482  * that the lnode for a vp can be found if it already exists.
483  *
484  * NB: A lofs shadow vnode causes exactly one VN_HOLD() on the
485  * underlying vnode.
486  */
487 
488 /*
489  * Retire old hashtables.
490  */
491 static void
492 lretire(struct loinfo *li, struct lobucket *table, uint_t size)
493 {
494 	struct lo_retired_ht *lrhp;
495 
496 	lrhp = kmem_alloc(sizeof (*lrhp), KM_SLEEP);
497 	lrhp->lrh_table = table;
498 	lrhp->lrh_size = size;
499 
500 	mutex_enter(&li->li_htlock);
501 	lrhp->lrh_next = li->li_retired;
502 	li->li_retired = lrhp;
503 	mutex_exit(&li->li_htlock);
504 }
505 
506 /*
507  * Grow the hashtable.
508  */
509 static void
510 lgrow(struct loinfo *li, uint_t newsize)
511 {
512 	uint_t oldsize;
513 	uint_t i;
514 	struct lobucket *oldtable, *newtable;
515 
516 	/*
517 	 * It's OK to not have enough memory to resize the hashtable.
518 	 * We'll go down this path the next time we add something to the
519 	 * table, and retry the allocation then.
520 	 */
521 	if ((newtable = kmem_zalloc(newsize * sizeof (*li->li_hashtable),
522 	    KM_NOSLEEP)) == NULL)
523 		return;
524 
525 	mutex_enter(&li->li_htlock);
526 	if (newsize <= li->li_htsize) {
527 		mutex_exit(&li->li_htlock);
528 		kmem_free(newtable, newsize * sizeof (*li->li_hashtable));
529 		return;
530 	}
531 	oldsize = li->li_htsize;
532 	oldtable = li->li_hashtable;
533 
534 	/*
535 	 * Grab all locks so TABLE_LOCK_ENTER() calls block until the
536 	 * resize is complete.
537 	 */
538 	for (i = 0; i < oldsize; i++)
539 		mutex_enter(&oldtable[i].lh_lock);
540 	/*
541 	 * li->li_hashtable gets set before li->li_htsize, so in the
542 	 * time between the two assignments, callers of
543 	 * TABLE_LOCK_ENTER() cannot hash to a bucket beyond oldsize,
544 	 * hence we only need to grab the locks up to oldsize.
545 	 */
546 	for (i = 0; i < oldsize; i++)
547 		mutex_enter(&newtable[i].lh_lock);
548 	/*
549 	 * Rehash.
550 	 */
551 	for (i = 0; i < oldsize; i++) {
552 		lnode_t *tlp, *nlp;
553 
554 		for (tlp = oldtable[i].lh_chain; tlp != NULL; tlp = nlp) {
555 			uint_t hash = ltablehash(tlp->lo_vp, newsize);
556 
557 			nlp = tlp->lo_next;
558 			tlp->lo_next = newtable[hash].lh_chain;
559 			newtable[hash].lh_chain = tlp;
560 			newtable[hash].lh_count++;
561 		}
562 	}
563 
564 	/*
565 	 * As soon as we store the new hashtable, future locking operations
566 	 * will use it.  Therefore, we must ensure that all the state we've
567 	 * just established reaches global visibility before the new hashtable
568 	 * does.
569 	 */
570 	membar_producer();
571 	li->li_hashtable = newtable;
572 
573 	/*
574 	 * table_lock_enter() relies on the fact that li->li_hashtable
575 	 * is set to its new value before li->li_htsize.
576 	 */
577 	membar_producer();
578 	li->li_htsize = newsize;
579 
580 	/*
581 	 * The new state is consistent now, so we can drop all the locks.
582 	 */
583 	for (i = 0; i < oldsize; i++) {
584 		mutex_exit(&newtable[i].lh_lock);
585 		mutex_exit(&oldtable[i].lh_lock);
586 	}
587 	mutex_exit(&li->li_htlock);
588 
589 	lretire(li, oldtable, oldsize);
590 }
591 
592 /*
593  * Put a lnode in the table
594  */
595 static void
596 lsave(lnode_t *lp, struct loinfo *li)
597 {
598 	ASSERT(lp->lo_vp);
599 	ASSERT(MUTEX_HELD(TABLE_LOCK(lp->lo_vp, li)));
600 
601 #ifdef LODEBUG
602 	lo_dprint(4, "lsave lp %p hash %d\n",
603 			lp, ltablehash(lp->lo_vp, li));
604 #endif
605 
606 	TABLE_COUNT(lp->lo_vp, li)++;
607 	lp->lo_next = TABLE_BUCKET(lp->lo_vp, li);
608 	TABLE_BUCKET(lp->lo_vp, li) = lp;
609 
610 	if (li->li_refct > (li->li_htsize << lo_resize_threshold)) {
611 		TABLE_LOCK_EXIT(lp->lo_vp, li);
612 		lgrow(li, li->li_htsize << lo_resize_factor);
613 		TABLE_LOCK_ENTER(lp->lo_vp, li);
614 	}
615 }
616 
617 /*
618  * Our version of vfs_rele() that stops at 1 instead of 0, and calls
619  * freelfsnode() instead of kmem_free().
620  */
621 static void
622 lfs_rele(struct lfsnode *lfs, struct loinfo *li)
623 {
624 	vfs_t *vfsp = &lfs->lfs_vfs;
625 
626 	ASSERT(MUTEX_HELD(&li->li_lfslock));
627 	ASSERT(vfsp->vfs_count > 1);
628 	if (atomic_add_32_nv(&vfsp->vfs_count, -1) == 1)
629 		freelfsnode(lfs, li);
630 }
631 
632 /*
633  * Remove a lnode from the table
634  */
635 void
636 freelonode(lnode_t *lp)
637 {
638 	lnode_t *lt;
639 	lnode_t *ltprev = NULL;
640 	struct lfsnode *lfs, *nextlfs;
641 	struct vfs *vfsp;
642 	struct vnode *vp = ltov(lp);
643 	struct vnode *realvp = realvp(vp);
644 	struct loinfo *li = vtoli(vp->v_vfsp);
645 
646 #ifdef LODEBUG
647 	lo_dprint(4, "freelonode lp %p hash %d\n",
648 			lp, ltablehash(lp->lo_vp, li));
649 #endif
650 	TABLE_LOCK_ENTER(lp->lo_vp, li);
651 
652 	mutex_enter(&vp->v_lock);
653 	if (vp->v_count > 1) {
654 		vp->v_count--;	/* release our hold from vn_rele */
655 		mutex_exit(&vp->v_lock);
656 		TABLE_LOCK_EXIT(lp->lo_vp, li);
657 		return;
658 	}
659 	mutex_exit(&vp->v_lock);
660 
661 	for (lt = TABLE_BUCKET(lp->lo_vp, li); lt != NULL;
662 	    ltprev = lt, lt = lt->lo_next) {
663 		if (lt == lp) {
664 #ifdef LODEBUG
665 			lo_dprint(4, "freeing %p, vfsp %p\n",
666 					vp, vp->v_vfsp);
667 #endif
668 			atomic_add_32(&li->li_refct, -1);
669 			vfsp = vp->v_vfsp;
670 			vn_invalid(vp);
671 			if (vfsp != li->li_mountvfs) {
672 				mutex_enter(&li->li_lfslock);
673 				/*
674 				 * Check for unused lfs
675 				 */
676 				lfs = li->li_lfs;
677 				while (lfs != NULL) {
678 					nextlfs = lfs->lfs_next;
679 					if (vfsp == &lfs->lfs_vfs) {
680 						lfs_rele(lfs, li);
681 						break;
682 					}
683 					if (lfs->lfs_vfs.vfs_count == 1) {
684 						/*
685 						 * Lfs is idle
686 						 */
687 						freelfsnode(lfs, li);
688 					}
689 					lfs = nextlfs;
690 				}
691 				mutex_exit(&li->li_lfslock);
692 			}
693 			if (ltprev == NULL) {
694 				TABLE_BUCKET(lt->lo_vp, li) = lt->lo_next;
695 			} else {
696 				ltprev->lo_next = lt->lo_next;
697 			}
698 			TABLE_COUNT(lt->lo_vp, li)--;
699 			TABLE_LOCK_EXIT(lt->lo_vp, li);
700 			kmem_cache_free(lnode_cache, lt);
701 			vn_free(vp);
702 			VN_RELE(realvp);
703 			return;
704 		}
705 	}
706 	panic("freelonode");
707 	/*NOTREACHED*/
708 }
709 
710 /*
711  * Lookup a lnode by vp
712  */
713 static lnode_t *
714 lfind(struct vnode *vp, struct loinfo *li)
715 {
716 	lnode_t *lt;
717 
718 	ASSERT(MUTEX_HELD(TABLE_LOCK(vp, li)));
719 
720 	lt = TABLE_BUCKET(vp, li);
721 	while (lt != NULL) {
722 		if (lt->lo_vp == vp) {
723 			VN_HOLD(ltov(lt));
724 			return (lt);
725 		}
726 		lt = lt->lo_next;
727 	}
728 	return (NULL);
729 }
730 
731 #ifdef	LODEBUG
732 static int lofsdebug;
733 #endif	/* LODEBUG */
734 
735 /*
736  * Utilities used by both client and server
737  * Standard levels:
738  * 0) no debugging
739  * 1) hard failures
740  * 2) soft failures
741  * 3) current test software
742  * 4) main procedure entry points
743  * 5) main procedure exit points
744  * 6) utility procedure entry points
745  * 7) utility procedure exit points
746  * 8) obscure procedure entry points
747  * 9) obscure procedure exit points
748  * 10) random stuff
749  * 11) all <= 1
750  * 12) all <= 2
751  * 13) all <= 3
752  * ...
753  */
754 
755 #ifdef LODEBUG
756 /*VARARGS2*/
757 lo_dprint(level, str, a1, a2, a3, a4, a5, a6, a7, a8, a9)
758 	int level;
759 	char *str;
760 	int a1, a2, a3, a4, a5, a6, a7, a8, a9;
761 {
762 
763 	if (lofsdebug == level || (lofsdebug > 10 && (lofsdebug - 10) >= level))
764 		printf(str, a1, a2, a3, a4, a5, a6, a7, a8, a9);
765 }
766 #endif
767