xref: /freebsd/sys/kern/vfs_subr.c (revision d37ea99837e6ad50837fd9fe1771ddf1c3ba6002)
1 /*
2  * Copyright (c) 1989, 1993
3  *	The Regents of the University of California.  All rights reserved.
4  * (c) UNIX System Laboratories, Inc.
5  * All or some portions of this file are derived from material licensed
6  * to the University of California by American Telephone and Telegraph
7  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8  * the permission of UNIX System Laboratories, Inc.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 4. Neither the name of the University nor the names of its contributors
19  *    may be used to endorse or promote products derived from this software
20  *    without specific prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  *
34  *	@(#)vfs_subr.c	8.31 (Berkeley) 5/26/95
35  */
36 
37 /*
38  * External virtual filesystem routines
39  */
40 
41 #include <sys/cdefs.h>
42 __FBSDID("$FreeBSD$");
43 
44 #include "opt_ddb.h"
45 #include "opt_mac.h"
46 
47 #include <sys/param.h>
48 #include <sys/systm.h>
49 #include <sys/bio.h>
50 #include <sys/buf.h>
51 #include <sys/conf.h>
52 #include <sys/eventhandler.h>
53 #include <sys/extattr.h>
54 #include <sys/fcntl.h>
55 #include <sys/kernel.h>
56 #include <sys/kthread.h>
57 #include <sys/mac.h>
58 #include <sys/malloc.h>
59 #include <sys/mount.h>
60 #include <sys/namei.h>
61 #include <sys/sleepqueue.h>
62 #include <sys/stat.h>
63 #include <sys/sysctl.h>
64 #include <sys/syslog.h>
65 #include <sys/vmmeter.h>
66 #include <sys/vnode.h>
67 
68 #include <vm/vm.h>
69 #include <vm/vm_object.h>
70 #include <vm/vm_extern.h>
71 #include <vm/pmap.h>
72 #include <vm/vm_map.h>
73 #include <vm/vm_page.h>
74 #include <vm/vm_kern.h>
75 #include <vm/uma.h>
76 
77 static MALLOC_DEFINE(M_NETADDR, "Export Host", "Export host address structure");
78 
79 static void	addalias(struct vnode *vp, struct cdev *nvp_rdev);
80 static void	insmntque(struct vnode *vp, struct mount *mp);
81 static void	vclean(struct vnode *vp, int flags, struct thread *td);
82 static void	vlruvp(struct vnode *vp);
83 static int	flushbuflist(struct buf *blist, int flags, struct vnode *vp,
84 		    int slpflag, int slptimeo, int *errorp);
85 static void	syncer_shutdown(void *arg, int howto);
86 static int	vtryrecycle(struct vnode *vp);
87 static void	vx_lock(struct vnode *vp);
88 static void	vx_unlock(struct vnode *vp);
89 static void	vgonechrl(struct vnode *vp, struct thread *td);
90 
91 
92 /*
93  * Number of vnodes in existence.  Increased whenever getnewvnode()
94  * allocates a new vnode, never decreased.
95  */
96 static unsigned long	numvnodes;
97 
98 SYSCTL_LONG(_vfs, OID_AUTO, numvnodes, CTLFLAG_RD, &numvnodes, 0, "");
99 
100 /*
101  * Conversion tables for conversion from vnode types to inode formats
102  * and back.
103  */
104 enum vtype iftovt_tab[16] = {
105 	VNON, VFIFO, VCHR, VNON, VDIR, VNON, VBLK, VNON,
106 	VREG, VNON, VLNK, VNON, VSOCK, VNON, VNON, VBAD,
107 };
108 int vttoif_tab[9] = {
109 	0, S_IFREG, S_IFDIR, S_IFBLK, S_IFCHR, S_IFLNK,
110 	S_IFSOCK, S_IFIFO, S_IFMT,
111 };
112 
113 /*
114  * List of vnodes that are ready for recycling.
115  */
116 static TAILQ_HEAD(freelst, vnode) vnode_free_list;
117 
118 /*
119  * Minimum number of free vnodes.  If there are fewer than this free vnodes,
120  * getnewvnode() will return a newly allocated vnode.
121  */
122 static u_long wantfreevnodes = 25;
123 SYSCTL_LONG(_vfs, OID_AUTO, wantfreevnodes, CTLFLAG_RW, &wantfreevnodes, 0, "");
124 /* Number of vnodes in the free list. */
125 static u_long freevnodes;
126 SYSCTL_LONG(_vfs, OID_AUTO, freevnodes, CTLFLAG_RD, &freevnodes, 0, "");
127 
128 /*
129  * Various variables used for debugging the new implementation of
130  * reassignbuf().
131  * XXX these are probably of (very) limited utility now.
132  */
133 static int reassignbufcalls;
134 SYSCTL_INT(_vfs, OID_AUTO, reassignbufcalls, CTLFLAG_RW, &reassignbufcalls, 0, "");
135 static int nameileafonly;
136 SYSCTL_INT(_vfs, OID_AUTO, nameileafonly, CTLFLAG_RW, &nameileafonly, 0, "");
137 
138 /*
139  * Cache for the mount type id assigned to NFS.  This is used for
140  * special checks in nfs/nfs_nqlease.c and vm/vnode_pager.c.
141  */
142 int	nfs_mount_type = -1;
143 
144 /* To keep more than one thread at a time from running vfs_getnewfsid */
145 static struct mtx mntid_mtx;
146 
147 /*
148  * Lock for any access to the following:
149  *	vnode_free_list
150  *	numvnodes
151  *	freevnodes
152  */
153 static struct mtx vnode_free_list_mtx;
154 
155 /*
156  * For any iteration/modification of dev->si_hlist (linked through
157  * v_specnext)
158  */
159 static struct mtx spechash_mtx;
160 
161 /* Publicly exported FS */
162 struct nfs_public nfs_pub;
163 
164 /* Zone for allocation of new vnodes - used exclusively by getnewvnode() */
165 static uma_zone_t vnode_zone;
166 static uma_zone_t vnodepoll_zone;
167 
168 /* Set to 1 to print out reclaim of active vnodes */
169 int	prtactive;
170 
171 /*
172  * The workitem queue.
173  *
174  * It is useful to delay writes of file data and filesystem metadata
175  * for tens of seconds so that quickly created and deleted files need
176  * not waste disk bandwidth being created and removed. To realize this,
177  * we append vnodes to a "workitem" queue. When running with a soft
178  * updates implementation, most pending metadata dependencies should
179  * not wait for more than a few seconds. Thus, mounted on block devices
180  * are delayed only about a half the time that file data is delayed.
181  * Similarly, directory updates are more critical, so are only delayed
182  * about a third the time that file data is delayed. Thus, there are
183  * SYNCER_MAXDELAY queues that are processed round-robin at a rate of
184  * one each second (driven off the filesystem syncer process). The
185  * syncer_delayno variable indicates the next queue that is to be processed.
186  * Items that need to be processed soon are placed in this queue:
187  *
188  *	syncer_workitem_pending[syncer_delayno]
189  *
190  * A delay of fifteen seconds is done by placing the request fifteen
191  * entries later in the queue:
192  *
193  *	syncer_workitem_pending[(syncer_delayno + 15) & syncer_mask]
194  *
195  */
196 static int syncer_delayno;
197 static long syncer_mask;
198 LIST_HEAD(synclist, vnode);
199 static struct synclist *syncer_workitem_pending;
200 /*
201  * The sync_mtx protects:
202  *	vp->v_synclist
203  *	sync_vnode_count
204  *	syncer_delayno
205  *	syncer_shutdown_iter
206  *	syncer_workitem_pending
207  *	syncer_worklist_len
208  *	rushjob
209  */
210 static struct mtx sync_mtx;
211 
212 #define SYNCER_MAXDELAY		32
213 static int syncer_maxdelay = SYNCER_MAXDELAY;	/* maximum delay time */
214 static int syncdelay = 30;		/* max time to delay syncing data */
215 static int filedelay = 30;		/* time to delay syncing files */
216 SYSCTL_INT(_kern, OID_AUTO, filedelay, CTLFLAG_RW, &filedelay, 0, "");
217 static int dirdelay = 29;		/* time to delay syncing directories */
218 SYSCTL_INT(_kern, OID_AUTO, dirdelay, CTLFLAG_RW, &dirdelay, 0, "");
219 static int metadelay = 28;		/* time to delay syncing metadata */
220 SYSCTL_INT(_kern, OID_AUTO, metadelay, CTLFLAG_RW, &metadelay, 0, "");
221 static int rushjob;		/* number of slots to run ASAP */
222 static int stat_rush_requests;	/* number of times I/O speeded up */
223 SYSCTL_INT(_debug, OID_AUTO, rush_requests, CTLFLAG_RW, &stat_rush_requests, 0, "");
224 
225 /*
226  * Tell the syncer to make three passes through the work list before
227  * shutting down (unless it runs out of work and shuts down sooner).
228  *
229  * Run at 8 times normal speed when shutting down the syncer.  With
230  * the default settings, the syncer will take approximately 12
231  * seconds to shut down, which is less than the default 60 timeout
232  * in kproc_shutdown().
233  */
234 #define SYNCER_SHUTDOWN_ITER_LIMIT	(3*SYNCER_MAXDELAY)
235 #define SYNCER_SHUTDOWN_SPEEDUP		7
236 static int sync_vnode_count;
237 static int syncer_shutdown_iter;
238 static int syncer_worklist_len;
239 
240 /*
241  * Number of vnodes we want to exist at any one time.  This is mostly used
242  * to size hash tables in vnode-related code.  It is normally not used in
243  * getnewvnode(), as wantfreevnodes is normally nonzero.)
244  *
245  * XXX desiredvnodes is historical cruft and should not exist.
246  */
247 int desiredvnodes;
248 SYSCTL_INT(_kern, KERN_MAXVNODES, maxvnodes, CTLFLAG_RW,
249     &desiredvnodes, 0, "Maximum number of vnodes");
250 static int minvnodes;
251 SYSCTL_INT(_kern, OID_AUTO, minvnodes, CTLFLAG_RW,
252     &minvnodes, 0, "Minimum number of vnodes");
253 static int vnlru_nowhere;
254 SYSCTL_INT(_debug, OID_AUTO, vnlru_nowhere, CTLFLAG_RW,
255     &vnlru_nowhere, 0, "Number of times the vnlru process ran without success");
256 
257 /* Hook for calling soft updates. */
258 int (*softdep_process_worklist_hook)(struct mount *);
259 
260 /*
261  * Initialize the vnode management data structures.
262  */
263 static void
264 vntblinit(void *dummy __unused)
265 {
266 
267 	/*
268 	 * Desiredvnodes is a function of the physical memory size and
269 	 * the kernel's heap size.  Specifically, desiredvnodes scales
270 	 * in proportion to the physical memory size until two fifths
271 	 * of the kernel's heap size is consumed by vnodes and vm
272 	 * objects.
273 	 */
274 	desiredvnodes = min(maxproc + cnt.v_page_count / 4, 2 * vm_kmem_size /
275 	    (5 * (sizeof(struct vm_object) + sizeof(struct vnode))));
276 	minvnodes = desiredvnodes / 4;
277 	mtx_init(&mountlist_mtx, "mountlist", NULL, MTX_DEF);
278 	mtx_init(&mntid_mtx, "mntid", NULL, MTX_DEF);
279 	mtx_init(&spechash_mtx, "spechash", NULL, MTX_DEF);
280 	TAILQ_INIT(&vnode_free_list);
281 	mtx_init(&vnode_free_list_mtx, "vnode_free_list", NULL, MTX_DEF);
282 	vnode_zone = uma_zcreate("VNODE", sizeof (struct vnode), NULL, NULL,
283 	    NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
284 	vnodepoll_zone = uma_zcreate("VNODEPOLL", sizeof (struct vpollinfo),
285 	      NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
286 	/*
287 	 * Initialize the filesystem syncer.
288 	 */
289 	syncer_workitem_pending = hashinit(syncer_maxdelay, M_VNODE,
290 		&syncer_mask);
291 	syncer_maxdelay = syncer_mask + 1;
292 	mtx_init(&sync_mtx, "Syncer mtx", NULL, MTX_DEF);
293 }
294 SYSINIT(vfs, SI_SUB_VFS, SI_ORDER_FIRST, vntblinit, NULL)
295 
296 
297 /*
298  * Mark a mount point as busy. Used to synchronize access and to delay
299  * unmounting. Interlock is not released on failure.
300  */
301 int
302 vfs_busy(mp, flags, interlkp, td)
303 	struct mount *mp;
304 	int flags;
305 	struct mtx *interlkp;
306 	struct thread *td;
307 {
308 	int lkflags;
309 
310 	if (mp->mnt_kern_flag & MNTK_UNMOUNT) {
311 		if (flags & LK_NOWAIT)
312 			return (ENOENT);
313 		mp->mnt_kern_flag |= MNTK_MWAIT;
314 		/*
315 		 * Since all busy locks are shared except the exclusive
316 		 * lock granted when unmounting, the only place that a
317 		 * wakeup needs to be done is at the release of the
318 		 * exclusive lock at the end of dounmount.
319 		 */
320 		msleep(mp, interlkp, PVFS, "vfs_busy", 0);
321 		return (ENOENT);
322 	}
323 	lkflags = LK_SHARED | LK_NOPAUSE;
324 	if (interlkp)
325 		lkflags |= LK_INTERLOCK;
326 	if (lockmgr(&mp->mnt_lock, lkflags, interlkp, td))
327 		panic("vfs_busy: unexpected lock failure");
328 	return (0);
329 }
330 
331 /*
332  * Free a busy filesystem.
333  */
334 void
335 vfs_unbusy(mp, td)
336 	struct mount *mp;
337 	struct thread *td;
338 {
339 
340 	lockmgr(&mp->mnt_lock, LK_RELEASE, NULL, td);
341 }
342 
343 /*
344  * Lookup a mount point by filesystem identifier.
345  */
346 struct mount *
347 vfs_getvfs(fsid)
348 	fsid_t *fsid;
349 {
350 	register struct mount *mp;
351 
352 	mtx_lock(&mountlist_mtx);
353 	TAILQ_FOREACH(mp, &mountlist, mnt_list) {
354 		if (mp->mnt_stat.f_fsid.val[0] == fsid->val[0] &&
355 		    mp->mnt_stat.f_fsid.val[1] == fsid->val[1]) {
356 			mtx_unlock(&mountlist_mtx);
357 			return (mp);
358 		}
359 	}
360 	mtx_unlock(&mountlist_mtx);
361 	return ((struct mount *) 0);
362 }
363 
364 /*
365  * Get a new unique fsid.  Try to make its val[0] unique, since this value
366  * will be used to create fake device numbers for stat().  Also try (but
367  * not so hard) make its val[0] unique mod 2^16, since some emulators only
368  * support 16-bit device numbers.  We end up with unique val[0]'s for the
369  * first 2^16 calls and unique val[0]'s mod 2^16 for the first 2^8 calls.
370  *
371  * Keep in mind that several mounts may be running in parallel.  Starting
372  * the search one past where the previous search terminated is both a
373  * micro-optimization and a defense against returning the same fsid to
374  * different mounts.
375  */
376 void
377 vfs_getnewfsid(mp)
378 	struct mount *mp;
379 {
380 	static u_int16_t mntid_base;
381 	fsid_t tfsid;
382 	int mtype;
383 
384 	mtx_lock(&mntid_mtx);
385 	mtype = mp->mnt_vfc->vfc_typenum;
386 	tfsid.val[1] = mtype;
387 	mtype = (mtype & 0xFF) << 24;
388 	for (;;) {
389 		tfsid.val[0] = makedev(255,
390 		    mtype | ((mntid_base & 0xFF00) << 8) | (mntid_base & 0xFF));
391 		mntid_base++;
392 		if (vfs_getvfs(&tfsid) == NULL)
393 			break;
394 	}
395 	mp->mnt_stat.f_fsid.val[0] = tfsid.val[0];
396 	mp->mnt_stat.f_fsid.val[1] = tfsid.val[1];
397 	mtx_unlock(&mntid_mtx);
398 }
399 
400 /*
401  * Knob to control the precision of file timestamps:
402  *
403  *   0 = seconds only; nanoseconds zeroed.
404  *   1 = seconds and nanoseconds, accurate within 1/HZ.
405  *   2 = seconds and nanoseconds, truncated to microseconds.
406  * >=3 = seconds and nanoseconds, maximum precision.
407  */
408 enum { TSP_SEC, TSP_HZ, TSP_USEC, TSP_NSEC };
409 
410 static int timestamp_precision = TSP_SEC;
411 SYSCTL_INT(_vfs, OID_AUTO, timestamp_precision, CTLFLAG_RW,
412     &timestamp_precision, 0, "");
413 
414 /*
415  * Get a current timestamp.
416  */
417 void
418 vfs_timestamp(tsp)
419 	struct timespec *tsp;
420 {
421 	struct timeval tv;
422 
423 	switch (timestamp_precision) {
424 	case TSP_SEC:
425 		tsp->tv_sec = time_second;
426 		tsp->tv_nsec = 0;
427 		break;
428 	case TSP_HZ:
429 		getnanotime(tsp);
430 		break;
431 	case TSP_USEC:
432 		microtime(&tv);
433 		TIMEVAL_TO_TIMESPEC(&tv, tsp);
434 		break;
435 	case TSP_NSEC:
436 	default:
437 		nanotime(tsp);
438 		break;
439 	}
440 }
441 
442 /*
443  * Set vnode attributes to VNOVAL
444  */
445 void
446 vattr_null(vap)
447 	register struct vattr *vap;
448 {
449 
450 	vap->va_type = VNON;
451 	vap->va_size = VNOVAL;
452 	vap->va_bytes = VNOVAL;
453 	vap->va_mode = VNOVAL;
454 	vap->va_nlink = VNOVAL;
455 	vap->va_uid = VNOVAL;
456 	vap->va_gid = VNOVAL;
457 	vap->va_fsid = VNOVAL;
458 	vap->va_fileid = VNOVAL;
459 	vap->va_blocksize = VNOVAL;
460 	vap->va_rdev = VNOVAL;
461 	vap->va_atime.tv_sec = VNOVAL;
462 	vap->va_atime.tv_nsec = VNOVAL;
463 	vap->va_mtime.tv_sec = VNOVAL;
464 	vap->va_mtime.tv_nsec = VNOVAL;
465 	vap->va_ctime.tv_sec = VNOVAL;
466 	vap->va_ctime.tv_nsec = VNOVAL;
467 	vap->va_birthtime.tv_sec = VNOVAL;
468 	vap->va_birthtime.tv_nsec = VNOVAL;
469 	vap->va_flags = VNOVAL;
470 	vap->va_gen = VNOVAL;
471 	vap->va_vaflags = 0;
472 }
473 
474 /*
475  * This routine is called when we have too many vnodes.  It attempts
476  * to free <count> vnodes and will potentially free vnodes that still
477  * have VM backing store (VM backing store is typically the cause
478  * of a vnode blowout so we want to do this).  Therefore, this operation
479  * is not considered cheap.
480  *
481  * A number of conditions may prevent a vnode from being reclaimed.
482  * the buffer cache may have references on the vnode, a directory
483  * vnode may still have references due to the namei cache representing
484  * underlying files, or the vnode may be in active use.   It is not
485  * desireable to reuse such vnodes.  These conditions may cause the
486  * number of vnodes to reach some minimum value regardless of what
487  * you set kern.maxvnodes to.  Do not set kern.maxvnodes too low.
488  */
489 static int
490 vlrureclaim(struct mount *mp)
491 {
492 	struct vnode *vp;
493 	int done;
494 	int trigger;
495 	int usevnodes;
496 	int count;
497 
498 	/*
499 	 * Calculate the trigger point, don't allow user
500 	 * screwups to blow us up.   This prevents us from
501 	 * recycling vnodes with lots of resident pages.  We
502 	 * aren't trying to free memory, we are trying to
503 	 * free vnodes.
504 	 */
505 	usevnodes = desiredvnodes;
506 	if (usevnodes <= 0)
507 		usevnodes = 1;
508 	trigger = cnt.v_page_count * 2 / usevnodes;
509 
510 	done = 0;
511 	MNT_ILOCK(mp);
512 	count = mp->mnt_nvnodelistsize / 10 + 1;
513 	while (count && (vp = TAILQ_FIRST(&mp->mnt_nvnodelist)) != NULL) {
514 		TAILQ_REMOVE(&mp->mnt_nvnodelist, vp, v_nmntvnodes);
515 		TAILQ_INSERT_TAIL(&mp->mnt_nvnodelist, vp, v_nmntvnodes);
516 
517 		if (vp->v_type != VNON &&
518 		    vp->v_type != VBAD &&
519 		    VI_TRYLOCK(vp)) {
520 			if (VMIGHTFREE(vp) &&           /* critical path opt */
521 			    (vp->v_object == NULL ||
522 			    vp->v_object->resident_page_count < trigger)) {
523 				MNT_IUNLOCK(mp);
524 				vgonel(vp, curthread);
525 				done++;
526 				MNT_ILOCK(mp);
527 			} else
528 				VI_UNLOCK(vp);
529 		}
530 		--count;
531 	}
532 	MNT_IUNLOCK(mp);
533 	return done;
534 }
535 
536 /*
537  * Attempt to recycle vnodes in a context that is always safe to block.
538  * Calling vlrurecycle() from the bowels of filesystem code has some
539  * interesting deadlock problems.
540  */
541 static struct proc *vnlruproc;
542 static int vnlruproc_sig;
543 
544 static void
545 vnlru_proc(void)
546 {
547 	struct mount *mp, *nmp;
548 	int done;
549 	struct proc *p = vnlruproc;
550 	struct thread *td = FIRST_THREAD_IN_PROC(p);
551 
552 	mtx_lock(&Giant);
553 
554 	EVENTHANDLER_REGISTER(shutdown_pre_sync, kproc_shutdown, p,
555 	    SHUTDOWN_PRI_FIRST);
556 
557 	for (;;) {
558 		kthread_suspend_check(p);
559 		mtx_lock(&vnode_free_list_mtx);
560 		if (numvnodes - freevnodes <= desiredvnodes * 9 / 10) {
561 			mtx_unlock(&vnode_free_list_mtx);
562 			vnlruproc_sig = 0;
563 			wakeup(&vnlruproc_sig);
564 			tsleep(vnlruproc, PVFS, "vlruwt", hz);
565 			continue;
566 		}
567 		mtx_unlock(&vnode_free_list_mtx);
568 		done = 0;
569 		mtx_lock(&mountlist_mtx);
570 		for (mp = TAILQ_FIRST(&mountlist); mp != NULL; mp = nmp) {
571 			if (vfs_busy(mp, LK_NOWAIT, &mountlist_mtx, td)) {
572 				nmp = TAILQ_NEXT(mp, mnt_list);
573 				continue;
574 			}
575 			done += vlrureclaim(mp);
576 			mtx_lock(&mountlist_mtx);
577 			nmp = TAILQ_NEXT(mp, mnt_list);
578 			vfs_unbusy(mp, td);
579 		}
580 		mtx_unlock(&mountlist_mtx);
581 		if (done == 0) {
582 #if 0
583 			/* These messages are temporary debugging aids */
584 			if (vnlru_nowhere < 5)
585 				printf("vnlru process getting nowhere..\n");
586 			else if (vnlru_nowhere == 5)
587 				printf("vnlru process messages stopped.\n");
588 #endif
589 			vnlru_nowhere++;
590 			tsleep(vnlruproc, PPAUSE, "vlrup", hz * 3);
591 		}
592 	}
593 }
594 
595 static struct kproc_desc vnlru_kp = {
596 	"vnlru",
597 	vnlru_proc,
598 	&vnlruproc
599 };
600 SYSINIT(vnlru, SI_SUB_KTHREAD_UPDATE, SI_ORDER_FIRST, kproc_start, &vnlru_kp)
601 
602 
603 /*
604  * Routines having to do with the management of the vnode table.
605  */
606 
607 /*
608  * Check to see if a free vnode can be recycled. If it can,
609  * recycle it and return it with the vnode interlock held.
610  */
611 static int
612 vtryrecycle(struct vnode *vp)
613 {
614 	struct thread *td = curthread;
615 	vm_object_t object;
616 	struct mount *vnmp;
617 	int error;
618 
619 	/* Don't recycle if we can't get the interlock */
620 	if (!VI_TRYLOCK(vp))
621 		return (EWOULDBLOCK);
622 	/*
623 	 * This vnode may found and locked via some other list, if so we
624 	 * can't recycle it yet.
625 	 */
626 	if (vn_lock(vp, LK_INTERLOCK | LK_EXCLUSIVE | LK_NOWAIT, td) != 0)
627 		return (EWOULDBLOCK);
628 	/*
629 	 * Don't recycle if its filesystem is being suspended.
630 	 */
631 	if (vn_start_write(vp, &vnmp, V_NOWAIT) != 0) {
632 		VOP_UNLOCK(vp, 0, td);
633 		return (EBUSY);
634 	}
635 
636 	/*
637 	 * Don't recycle if we still have cached pages.
638 	 */
639 	if (VOP_GETVOBJECT(vp, &object) == 0) {
640 		VM_OBJECT_LOCK(object);
641 		if (object->resident_page_count ||
642 		    object->ref_count) {
643 			VM_OBJECT_UNLOCK(object);
644 			error = EBUSY;
645 			goto done;
646 		}
647 		VM_OBJECT_UNLOCK(object);
648 	}
649 	if (LIST_FIRST(&vp->v_cache_src)) {
650 		/*
651 		 * note: nameileafonly sysctl is temporary,
652 		 * for debugging only, and will eventually be
653 		 * removed.
654 		 */
655 		if (nameileafonly > 0) {
656 			/*
657 			 * Do not reuse namei-cached directory
658 			 * vnodes that have cached
659 			 * subdirectories.
660 			 */
661 			if (cache_leaf_test(vp) < 0) {
662 				error = EISDIR;
663 				goto done;
664 			}
665 		} else if (nameileafonly < 0 ||
666 			    vmiodirenable == 0) {
667 			/*
668 			 * Do not reuse namei-cached directory
669 			 * vnodes if nameileafonly is -1 or
670 			 * if VMIO backing for directories is
671 			 * turned off (otherwise we reuse them
672 			 * too quickly).
673 			 */
674 			error = EBUSY;
675 			goto done;
676 		}
677 	}
678 	/*
679 	 * If we got this far, we need to acquire the interlock and see if
680 	 * anyone picked up this vnode from another list.  If not, we will
681 	 * mark it with XLOCK via vgonel() so that anyone who does find it
682 	 * will skip over it.
683 	 */
684 	VI_LOCK(vp);
685 	if (VSHOULDBUSY(vp) && (vp->v_iflag & VI_XLOCK) == 0) {
686 		VI_UNLOCK(vp);
687 		error = EBUSY;
688 		goto done;
689 	}
690 	mtx_lock(&vnode_free_list_mtx);
691 	TAILQ_REMOVE(&vnode_free_list, vp, v_freelist);
692 	vp->v_iflag &= ~VI_FREE;
693 	mtx_unlock(&vnode_free_list_mtx);
694 	vp->v_iflag |= VI_DOOMED;
695 	if (vp->v_type != VBAD) {
696 		VOP_UNLOCK(vp, 0, td);
697 		vgonel(vp, td);
698 		VI_LOCK(vp);
699 	} else
700 		VOP_UNLOCK(vp, 0, td);
701 	vn_finished_write(vnmp);
702 	return (0);
703 done:
704 	VOP_UNLOCK(vp, 0, td);
705 	vn_finished_write(vnmp);
706 	return (error);
707 }
708 
709 /*
710  * Return the next vnode from the free list.
711  */
712 int
713 getnewvnode(tag, mp, vops, vpp)
714 	const char *tag;
715 	struct mount *mp;
716 	vop_t **vops;
717 	struct vnode **vpp;
718 {
719 	struct vnode *vp = NULL;
720 	struct vpollinfo *pollinfo = NULL;
721 
722 	mtx_lock(&vnode_free_list_mtx);
723 
724 	/*
725 	 * Try to reuse vnodes if we hit the max.  This situation only
726 	 * occurs in certain large-memory (2G+) situations.  We cannot
727 	 * attempt to directly reclaim vnodes due to nasty recursion
728 	 * problems.
729 	 */
730 	while (numvnodes - freevnodes > desiredvnodes) {
731 		if (vnlruproc_sig == 0) {
732 			vnlruproc_sig = 1;      /* avoid unnecessary wakeups */
733 			wakeup(vnlruproc);
734 		}
735 		mtx_unlock(&vnode_free_list_mtx);
736 		tsleep(&vnlruproc_sig, PVFS, "vlruwk", hz);
737 		mtx_lock(&vnode_free_list_mtx);
738 	}
739 
740 	/*
741 	 * Attempt to reuse a vnode already on the free list, allocating
742 	 * a new vnode if we can't find one or if we have not reached a
743 	 * good minimum for good LRU performance.
744 	 */
745 
746 	if (freevnodes >= wantfreevnodes && numvnodes >= minvnodes) {
747 		int error;
748 		int count;
749 
750 		for (count = 0; count < freevnodes; count++) {
751 			vp = TAILQ_FIRST(&vnode_free_list);
752 
753 			KASSERT(vp->v_usecount == 0 &&
754 			    (vp->v_iflag & VI_DOINGINACT) == 0,
755 			    ("getnewvnode: free vnode isn't"));
756 
757 			TAILQ_REMOVE(&vnode_free_list, vp, v_freelist);
758 			TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist);
759 			mtx_unlock(&vnode_free_list_mtx);
760 			error = vtryrecycle(vp);
761 			mtx_lock(&vnode_free_list_mtx);
762 			if (error == 0)
763 				break;
764 			vp = NULL;
765 		}
766 	}
767 	if (vp) {
768 		freevnodes--;
769 		mtx_unlock(&vnode_free_list_mtx);
770 
771 #ifdef INVARIANTS
772 		{
773 			if (vp->v_data)
774 				panic("cleaned vnode isn't");
775 			if (vp->v_numoutput)
776 				panic("Clean vnode has pending I/O's");
777 			if (vp->v_writecount != 0)
778 				panic("Non-zero write count");
779 		}
780 #endif
781 		if ((pollinfo = vp->v_pollinfo) != NULL) {
782 			/*
783 			 * To avoid lock order reversals, the call to
784 			 * uma_zfree() must be delayed until the vnode
785 			 * interlock is released.
786 			 */
787 			vp->v_pollinfo = NULL;
788 		}
789 #ifdef MAC
790 		mac_destroy_vnode(vp);
791 #endif
792 		vp->v_iflag = 0;
793 		vp->v_vflag = 0;
794 		vp->v_lastw = 0;
795 		vp->v_lasta = 0;
796 		vp->v_cstart = 0;
797 		vp->v_clen = 0;
798 		vp->v_socket = 0;
799 		lockdestroy(vp->v_vnlock);
800 		lockinit(vp->v_vnlock, PVFS, tag, VLKTIMEOUT, LK_NOPAUSE);
801 		KASSERT(vp->v_cleanbufcnt == 0, ("cleanbufcnt not 0"));
802 		KASSERT(vp->v_cleanblkroot == NULL, ("cleanblkroot not NULL"));
803 		KASSERT(vp->v_dirtybufcnt == 0, ("dirtybufcnt not 0"));
804 		KASSERT(vp->v_dirtyblkroot == NULL, ("dirtyblkroot not NULL"));
805 	} else {
806 		numvnodes++;
807 		mtx_unlock(&vnode_free_list_mtx);
808 
809 		vp = (struct vnode *) uma_zalloc(vnode_zone, M_WAITOK|M_ZERO);
810 		mtx_init(&vp->v_interlock, "vnode interlock", NULL, MTX_DEF);
811 		VI_LOCK(vp);
812 		vp->v_dd = vp;
813 		vp->v_vnlock = &vp->v_lock;
814 		lockinit(vp->v_vnlock, PVFS, tag, VLKTIMEOUT, LK_NOPAUSE);
815 		cache_purge(vp);		/* Sets up v_id. */
816 		LIST_INIT(&vp->v_cache_src);
817 		TAILQ_INIT(&vp->v_cache_dst);
818 	}
819 
820 	TAILQ_INIT(&vp->v_cleanblkhd);
821 	TAILQ_INIT(&vp->v_dirtyblkhd);
822 	vp->v_type = VNON;
823 	vp->v_tag = tag;
824 	vp->v_op = vops;
825 	*vpp = vp;
826 	vp->v_usecount = 1;
827 	vp->v_data = 0;
828 	vp->v_cachedid = -1;
829 	VI_UNLOCK(vp);
830 	if (pollinfo != NULL) {
831 		mtx_destroy(&pollinfo->vpi_lock);
832 		uma_zfree(vnodepoll_zone, pollinfo);
833 	}
834 #ifdef MAC
835 	mac_init_vnode(vp);
836 	if (mp != NULL && (mp->mnt_flag & MNT_MULTILABEL) == 0)
837 		mac_associate_vnode_singlelabel(mp, vp);
838 #endif
839 	insmntque(vp, mp);
840 
841 	return (0);
842 }
843 
844 /*
845  * Move a vnode from one mount queue to another.
846  */
847 static void
848 insmntque(vp, mp)
849 	register struct vnode *vp;
850 	register struct mount *mp;
851 {
852 
853 	/*
854 	 * Delete from old mount point vnode list, if on one.
855 	 */
856 	if (vp->v_mount != NULL) {
857 		MNT_ILOCK(vp->v_mount);
858 		KASSERT(vp->v_mount->mnt_nvnodelistsize > 0,
859 			("bad mount point vnode list size"));
860 		TAILQ_REMOVE(&vp->v_mount->mnt_nvnodelist, vp, v_nmntvnodes);
861 		vp->v_mount->mnt_nvnodelistsize--;
862 		MNT_IUNLOCK(vp->v_mount);
863 	}
864 	/*
865 	 * Insert into list of vnodes for the new mount point, if available.
866 	 */
867 	if ((vp->v_mount = mp) != NULL) {
868 		MNT_ILOCK(vp->v_mount);
869 		TAILQ_INSERT_TAIL(&mp->mnt_nvnodelist, vp, v_nmntvnodes);
870 		mp->mnt_nvnodelistsize++;
871 		MNT_IUNLOCK(vp->v_mount);
872 	}
873 }
874 
875 /*
876  * Update outstanding I/O count and do wakeup if requested.
877  */
878 void
879 vwakeup(bp)
880 	register struct buf *bp;
881 {
882 	register struct vnode *vp;
883 
884 	bp->b_flags &= ~B_WRITEINPROG;
885 	if ((vp = bp->b_vp)) {
886 		VI_LOCK(vp);
887 		vp->v_numoutput--;
888 		if (vp->v_numoutput < 0)
889 			panic("vwakeup: neg numoutput");
890 		if ((vp->v_numoutput == 0) && (vp->v_iflag & VI_BWAIT)) {
891 			vp->v_iflag &= ~VI_BWAIT;
892 			wakeup(&vp->v_numoutput);
893 		}
894 		VI_UNLOCK(vp);
895 	}
896 }
897 
898 /*
899  * Flush out and invalidate all buffers associated with a vnode.
900  * Called with the underlying object locked.
901  */
902 int
903 vinvalbuf(vp, flags, cred, td, slpflag, slptimeo)
904 	struct vnode *vp;
905 	int flags;
906 	struct ucred *cred;
907 	struct thread *td;
908 	int slpflag, slptimeo;
909 {
910 	struct buf *blist;
911 	int error;
912 	vm_object_t object;
913 
914 	GIANT_REQUIRED;
915 
916 	ASSERT_VOP_LOCKED(vp, "vinvalbuf");
917 
918 	VI_LOCK(vp);
919 	if (flags & V_SAVE) {
920 		while (vp->v_numoutput) {
921 			vp->v_iflag |= VI_BWAIT;
922 			error = msleep(&vp->v_numoutput, VI_MTX(vp),
923 			    slpflag | (PRIBIO + 1), "vinvlbuf", slptimeo);
924 			if (error) {
925 				VI_UNLOCK(vp);
926 				return (error);
927 			}
928 		}
929 		if (!TAILQ_EMPTY(&vp->v_dirtyblkhd)) {
930 			VI_UNLOCK(vp);
931 			if ((error = VOP_FSYNC(vp, cred, MNT_WAIT, td)) != 0)
932 				return (error);
933 			/*
934 			 * XXX We could save a lock/unlock if this was only
935 			 * enabled under INVARIANTS
936 			 */
937 			VI_LOCK(vp);
938 			if (vp->v_numoutput > 0 ||
939 			    !TAILQ_EMPTY(&vp->v_dirtyblkhd))
940 				panic("vinvalbuf: dirty bufs");
941 		}
942 	}
943 	/*
944 	 * If you alter this loop please notice that interlock is dropped and
945 	 * reacquired in flushbuflist.  Special care is needed to ensure that
946 	 * no race conditions occur from this.
947 	 */
948 	for (error = 0;;) {
949 		if ((blist = TAILQ_FIRST(&vp->v_cleanblkhd)) != 0 &&
950 		    flushbuflist(blist, flags, vp, slpflag, slptimeo, &error)) {
951 			if (error)
952 				break;
953 			continue;
954 		}
955 		if ((blist = TAILQ_FIRST(&vp->v_dirtyblkhd)) != 0 &&
956 		    flushbuflist(blist, flags, vp, slpflag, slptimeo, &error)) {
957 			if (error)
958 				break;
959 			continue;
960 		}
961 		break;
962 	}
963 	if (error) {
964 		VI_UNLOCK(vp);
965 		return (error);
966 	}
967 
968 	/*
969 	 * Wait for I/O to complete.  XXX needs cleaning up.  The vnode can
970 	 * have write I/O in-progress but if there is a VM object then the
971 	 * VM object can also have read-I/O in-progress.
972 	 */
973 	do {
974 		while (vp->v_numoutput > 0) {
975 			vp->v_iflag |= VI_BWAIT;
976 			msleep(&vp->v_numoutput, VI_MTX(vp), PVM, "vnvlbv", 0);
977 		}
978 		VI_UNLOCK(vp);
979 		if (VOP_GETVOBJECT(vp, &object) == 0) {
980 			VM_OBJECT_LOCK(object);
981 			vm_object_pip_wait(object, "vnvlbx");
982 			VM_OBJECT_UNLOCK(object);
983 		}
984 		VI_LOCK(vp);
985 	} while (vp->v_numoutput > 0);
986 	VI_UNLOCK(vp);
987 
988 	/*
989 	 * Destroy the copy in the VM cache, too.
990 	 */
991 	if (VOP_GETVOBJECT(vp, &object) == 0) {
992 		VM_OBJECT_LOCK(object);
993 		vm_object_page_remove(object, 0, 0,
994 			(flags & V_SAVE) ? TRUE : FALSE);
995 		VM_OBJECT_UNLOCK(object);
996 	}
997 
998 #ifdef INVARIANTS
999 	VI_LOCK(vp);
1000 	if ((flags & (V_ALT | V_NORMAL)) == 0 &&
1001 	    (!TAILQ_EMPTY(&vp->v_dirtyblkhd) ||
1002 	     !TAILQ_EMPTY(&vp->v_cleanblkhd)))
1003 		panic("vinvalbuf: flush failed");
1004 	VI_UNLOCK(vp);
1005 #endif
1006 	return (0);
1007 }
1008 
1009 /*
1010  * Flush out buffers on the specified list.
1011  *
1012  */
1013 static int
1014 flushbuflist(blist, flags, vp, slpflag, slptimeo, errorp)
1015 	struct buf *blist;
1016 	int flags;
1017 	struct vnode *vp;
1018 	int slpflag, slptimeo;
1019 	int *errorp;
1020 {
1021 	struct buf *bp, *nbp;
1022 	int found, error;
1023 
1024 	ASSERT_VI_LOCKED(vp, "flushbuflist");
1025 
1026 	for (found = 0, bp = blist; bp; bp = nbp) {
1027 		nbp = TAILQ_NEXT(bp, b_vnbufs);
1028 		if (((flags & V_NORMAL) && (bp->b_xflags & BX_ALTDATA)) ||
1029 		    ((flags & V_ALT) && (bp->b_xflags & BX_ALTDATA) == 0)) {
1030 			continue;
1031 		}
1032 		found += 1;
1033 		error = BUF_TIMELOCK(bp,
1034 		    LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK, VI_MTX(vp),
1035 		    "flushbuf", slpflag, slptimeo);
1036 		if (error) {
1037 			if (error != ENOLCK)
1038 				*errorp = error;
1039 			goto done;
1040 		}
1041 		/*
1042 		 * XXX Since there are no node locks for NFS, I
1043 		 * believe there is a slight chance that a delayed
1044 		 * write will occur while sleeping just above, so
1045 		 * check for it.  Note that vfs_bio_awrite expects
1046 		 * buffers to reside on a queue, while bwrite and
1047 		 * brelse do not.
1048 		 */
1049 		if (((bp->b_flags & (B_DELWRI | B_INVAL)) == B_DELWRI) &&
1050 			(flags & V_SAVE)) {
1051 
1052 			if (bp->b_vp == vp) {
1053 				if (bp->b_flags & B_CLUSTEROK) {
1054 					vfs_bio_awrite(bp);
1055 				} else {
1056 					bremfree(bp);
1057 					bp->b_flags |= B_ASYNC;
1058 					bwrite(bp);
1059 				}
1060 			} else {
1061 				bremfree(bp);
1062 				(void) bwrite(bp);
1063 			}
1064 			goto done;
1065 		}
1066 		bremfree(bp);
1067 		bp->b_flags |= (B_INVAL | B_NOCACHE | B_RELBUF);
1068 		bp->b_flags &= ~B_ASYNC;
1069 		brelse(bp);
1070 		VI_LOCK(vp);
1071 	}
1072 	return (found);
1073 done:
1074 	VI_LOCK(vp);
1075 	return (found);
1076 }
1077 
1078 /*
1079  * Truncate a file's buffer and pages to a specified length.  This
1080  * is in lieu of the old vinvalbuf mechanism, which performed unneeded
1081  * sync activity.
1082  */
1083 int
1084 vtruncbuf(vp, cred, td, length, blksize)
1085 	register struct vnode *vp;
1086 	struct ucred *cred;
1087 	struct thread *td;
1088 	off_t length;
1089 	int blksize;
1090 {
1091 	register struct buf *bp;
1092 	struct buf *nbp;
1093 	int anyfreed;
1094 	int trunclbn;
1095 
1096 	/*
1097 	 * Round up to the *next* lbn.
1098 	 */
1099 	trunclbn = (length + blksize - 1) / blksize;
1100 
1101 	ASSERT_VOP_LOCKED(vp, "vtruncbuf");
1102 restart:
1103 	VI_LOCK(vp);
1104 	anyfreed = 1;
1105 	for (;anyfreed;) {
1106 		anyfreed = 0;
1107 		for (bp = TAILQ_FIRST(&vp->v_cleanblkhd); bp; bp = nbp) {
1108 			nbp = TAILQ_NEXT(bp, b_vnbufs);
1109 			if (bp->b_lblkno >= trunclbn) {
1110 				if (BUF_LOCK(bp,
1111 				    LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK,
1112 				    VI_MTX(vp)) == ENOLCK)
1113 					goto restart;
1114 
1115 				bremfree(bp);
1116 				bp->b_flags |= (B_INVAL | B_RELBUF);
1117 				bp->b_flags &= ~B_ASYNC;
1118 				brelse(bp);
1119 				anyfreed = 1;
1120 
1121 				if (nbp &&
1122 				    (((nbp->b_xflags & BX_VNCLEAN) == 0) ||
1123 				    (nbp->b_vp != vp) ||
1124 				    (nbp->b_flags & B_DELWRI))) {
1125 					goto restart;
1126 				}
1127 				VI_LOCK(vp);
1128 			}
1129 		}
1130 
1131 		for (bp = TAILQ_FIRST(&vp->v_dirtyblkhd); bp; bp = nbp) {
1132 			nbp = TAILQ_NEXT(bp, b_vnbufs);
1133 			if (bp->b_lblkno >= trunclbn) {
1134 				if (BUF_LOCK(bp,
1135 				    LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK,
1136 				    VI_MTX(vp)) == ENOLCK)
1137 					goto restart;
1138 				bremfree(bp);
1139 				bp->b_flags |= (B_INVAL | B_RELBUF);
1140 				bp->b_flags &= ~B_ASYNC;
1141 				brelse(bp);
1142 				anyfreed = 1;
1143 				if (nbp &&
1144 				    (((nbp->b_xflags & BX_VNDIRTY) == 0) ||
1145 				    (nbp->b_vp != vp) ||
1146 				    (nbp->b_flags & B_DELWRI) == 0)) {
1147 					goto restart;
1148 				}
1149 				VI_LOCK(vp);
1150 			}
1151 		}
1152 	}
1153 
1154 	if (length > 0) {
1155 restartsync:
1156 		for (bp = TAILQ_FIRST(&vp->v_dirtyblkhd); bp; bp = nbp) {
1157 			nbp = TAILQ_NEXT(bp, b_vnbufs);
1158 			if (bp->b_lblkno > 0)
1159 				continue;
1160 			/*
1161 			 * Since we hold the vnode lock this should only
1162 			 * fail if we're racing with the buf daemon.
1163 			 */
1164 			if (BUF_LOCK(bp,
1165 			    LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK,
1166 			    VI_MTX(vp)) == ENOLCK) {
1167 				goto restart;
1168 			}
1169 			KASSERT((bp->b_flags & B_DELWRI),
1170 			    ("buf(%p) on dirty queue without DELWRI", bp));
1171 
1172 			bremfree(bp);
1173 			bawrite(bp);
1174 			VI_LOCK(vp);
1175 			goto restartsync;
1176 		}
1177 	}
1178 
1179 	while (vp->v_numoutput > 0) {
1180 		vp->v_iflag |= VI_BWAIT;
1181 		msleep(&vp->v_numoutput, VI_MTX(vp), PVM, "vbtrunc", 0);
1182 	}
1183 	VI_UNLOCK(vp);
1184 	vnode_pager_setsize(vp, length);
1185 
1186 	return (0);
1187 }
1188 
1189 /*
1190  * buf_splay() - splay tree core for the clean/dirty list of buffers in
1191  * 		 a vnode.
1192  *
1193  *	NOTE: We have to deal with the special case of a background bitmap
1194  *	buffer, a situation where two buffers will have the same logical
1195  *	block offset.  We want (1) only the foreground buffer to be accessed
1196  *	in a lookup and (2) must differentiate between the foreground and
1197  *	background buffer in the splay tree algorithm because the splay
1198  *	tree cannot normally handle multiple entities with the same 'index'.
1199  *	We accomplish this by adding differentiating flags to the splay tree's
1200  *	numerical domain.
1201  */
1202 static
1203 struct buf *
1204 buf_splay(daddr_t lblkno, b_xflags_t xflags, struct buf *root)
1205 {
1206 	struct buf dummy;
1207 	struct buf *lefttreemax, *righttreemin, *y;
1208 
1209 	if (root == NULL)
1210 		return (NULL);
1211 	lefttreemax = righttreemin = &dummy;
1212 	for (;;) {
1213 		if (lblkno < root->b_lblkno ||
1214 		    (lblkno == root->b_lblkno &&
1215 		    (xflags & BX_BKGRDMARKER) < (root->b_xflags & BX_BKGRDMARKER))) {
1216 			if ((y = root->b_left) == NULL)
1217 				break;
1218 			if (lblkno < y->b_lblkno) {
1219 				/* Rotate right. */
1220 				root->b_left = y->b_right;
1221 				y->b_right = root;
1222 				root = y;
1223 				if ((y = root->b_left) == NULL)
1224 					break;
1225 			}
1226 			/* Link into the new root's right tree. */
1227 			righttreemin->b_left = root;
1228 			righttreemin = root;
1229 		} else if (lblkno > root->b_lblkno ||
1230 		    (lblkno == root->b_lblkno &&
1231 		    (xflags & BX_BKGRDMARKER) > (root->b_xflags & BX_BKGRDMARKER))) {
1232 			if ((y = root->b_right) == NULL)
1233 				break;
1234 			if (lblkno > y->b_lblkno) {
1235 				/* Rotate left. */
1236 				root->b_right = y->b_left;
1237 				y->b_left = root;
1238 				root = y;
1239 				if ((y = root->b_right) == NULL)
1240 					break;
1241 			}
1242 			/* Link into the new root's left tree. */
1243 			lefttreemax->b_right = root;
1244 			lefttreemax = root;
1245 		} else {
1246 			break;
1247 		}
1248 		root = y;
1249 	}
1250 	/* Assemble the new root. */
1251 	lefttreemax->b_right = root->b_left;
1252 	righttreemin->b_left = root->b_right;
1253 	root->b_left = dummy.b_right;
1254 	root->b_right = dummy.b_left;
1255 	return (root);
1256 }
1257 
1258 static
1259 void
1260 buf_vlist_remove(struct buf *bp)
1261 {
1262 	struct vnode *vp = bp->b_vp;
1263 	struct buf *root;
1264 
1265 	ASSERT_VI_LOCKED(vp, "buf_vlist_remove");
1266 	if (bp->b_xflags & BX_VNDIRTY) {
1267 		if (bp != vp->v_dirtyblkroot) {
1268 			root = buf_splay(bp->b_lblkno, bp->b_xflags,
1269 			    vp->v_dirtyblkroot);
1270 			KASSERT(root == bp,
1271 			    ("splay lookup failed during dirty remove"));
1272 		}
1273 		if (bp->b_left == NULL) {
1274 			root = bp->b_right;
1275 		} else {
1276 			root = buf_splay(bp->b_lblkno, bp->b_xflags,
1277 			    bp->b_left);
1278 			root->b_right = bp->b_right;
1279 		}
1280 		vp->v_dirtyblkroot = root;
1281 		TAILQ_REMOVE(&vp->v_dirtyblkhd, bp, b_vnbufs);
1282 		vp->v_dirtybufcnt--;
1283 	} else {
1284 		/* KASSERT(bp->b_xflags & BX_VNCLEAN, ("bp wasn't clean")); */
1285 		if (bp != vp->v_cleanblkroot) {
1286 			root = buf_splay(bp->b_lblkno, bp->b_xflags,
1287 			    vp->v_cleanblkroot);
1288 			KASSERT(root == bp,
1289 			    ("splay lookup failed during clean remove"));
1290 		}
1291 		if (bp->b_left == NULL) {
1292 			root = bp->b_right;
1293 		} else {
1294 			root = buf_splay(bp->b_lblkno, bp->b_xflags,
1295 			    bp->b_left);
1296 			root->b_right = bp->b_right;
1297 		}
1298 		vp->v_cleanblkroot = root;
1299 		TAILQ_REMOVE(&vp->v_cleanblkhd, bp, b_vnbufs);
1300 		vp->v_cleanbufcnt--;
1301 	}
1302 	bp->b_xflags &= ~(BX_VNDIRTY | BX_VNCLEAN);
1303 }
1304 
1305 /*
1306  * Add the buffer to the sorted clean or dirty block list using a
1307  * splay tree algorithm.
1308  *
1309  * NOTE: xflags is passed as a constant, optimizing this inline function!
1310  */
1311 static
1312 void
1313 buf_vlist_add(struct buf *bp, struct vnode *vp, b_xflags_t xflags)
1314 {
1315 	struct buf *root;
1316 
1317 	ASSERT_VI_LOCKED(vp, "buf_vlist_add");
1318 	bp->b_xflags |= xflags;
1319 	if (xflags & BX_VNDIRTY) {
1320 		root = buf_splay(bp->b_lblkno, bp->b_xflags, vp->v_dirtyblkroot);
1321 		if (root == NULL) {
1322 			bp->b_left = NULL;
1323 			bp->b_right = NULL;
1324 			TAILQ_INSERT_TAIL(&vp->v_dirtyblkhd, bp, b_vnbufs);
1325 		} else if (bp->b_lblkno < root->b_lblkno ||
1326 		    (bp->b_lblkno == root->b_lblkno &&
1327 		    (bp->b_xflags & BX_BKGRDMARKER) < (root->b_xflags & BX_BKGRDMARKER))) {
1328 			bp->b_left = root->b_left;
1329 			bp->b_right = root;
1330 			root->b_left = NULL;
1331 			TAILQ_INSERT_BEFORE(root, bp, b_vnbufs);
1332 		} else {
1333 			bp->b_right = root->b_right;
1334 			bp->b_left = root;
1335 			root->b_right = NULL;
1336 			TAILQ_INSERT_AFTER(&vp->v_dirtyblkhd,
1337 			    root, bp, b_vnbufs);
1338 		}
1339 		vp->v_dirtybufcnt++;
1340 		vp->v_dirtyblkroot = bp;
1341 	} else {
1342 		/* KASSERT(xflags & BX_VNCLEAN, ("xflags not clean")); */
1343 		root = buf_splay(bp->b_lblkno, bp->b_xflags, vp->v_cleanblkroot);
1344 		if (root == NULL) {
1345 			bp->b_left = NULL;
1346 			bp->b_right = NULL;
1347 			TAILQ_INSERT_TAIL(&vp->v_cleanblkhd, bp, b_vnbufs);
1348 		} else if (bp->b_lblkno < root->b_lblkno ||
1349 		    (bp->b_lblkno == root->b_lblkno &&
1350 		    (bp->b_xflags & BX_BKGRDMARKER) < (root->b_xflags & BX_BKGRDMARKER))) {
1351 			bp->b_left = root->b_left;
1352 			bp->b_right = root;
1353 			root->b_left = NULL;
1354 			TAILQ_INSERT_BEFORE(root, bp, b_vnbufs);
1355 		} else {
1356 			bp->b_right = root->b_right;
1357 			bp->b_left = root;
1358 			root->b_right = NULL;
1359 			TAILQ_INSERT_AFTER(&vp->v_cleanblkhd,
1360 			    root, bp, b_vnbufs);
1361 		}
1362 		vp->v_cleanbufcnt++;
1363 		vp->v_cleanblkroot = bp;
1364 	}
1365 }
1366 
1367 /*
1368  * Lookup a buffer using the splay tree.  Note that we specifically avoid
1369  * shadow buffers used in background bitmap writes.
1370  *
1371  * This code isn't quite efficient as it could be because we are maintaining
1372  * two sorted lists and do not know which list the block resides in.
1373  *
1374  * During a "make buildworld" the desired buffer is found at one of
1375  * the roots more than 60% of the time.  Thus, checking both roots
1376  * before performing either splay eliminates unnecessary splays on the
1377  * first tree splayed.
1378  */
1379 struct buf *
1380 gbincore(struct vnode *vp, daddr_t lblkno)
1381 {
1382 	struct buf *bp;
1383 
1384 	GIANT_REQUIRED;
1385 
1386 	ASSERT_VI_LOCKED(vp, "gbincore");
1387 	if ((bp = vp->v_cleanblkroot) != NULL &&
1388 	    bp->b_lblkno == lblkno && !(bp->b_xflags & BX_BKGRDMARKER))
1389 		return (bp);
1390 	if ((bp = vp->v_dirtyblkroot) != NULL &&
1391 	    bp->b_lblkno == lblkno && !(bp->b_xflags & BX_BKGRDMARKER))
1392 		return (bp);
1393 	if ((bp = vp->v_cleanblkroot) != NULL) {
1394 		vp->v_cleanblkroot = bp = buf_splay(lblkno, 0, bp);
1395 		if (bp->b_lblkno == lblkno && !(bp->b_xflags & BX_BKGRDMARKER))
1396 			return (bp);
1397 	}
1398 	if ((bp = vp->v_dirtyblkroot) != NULL) {
1399 		vp->v_dirtyblkroot = bp = buf_splay(lblkno, 0, bp);
1400 		if (bp->b_lblkno == lblkno && !(bp->b_xflags & BX_BKGRDMARKER))
1401 			return (bp);
1402 	}
1403 	return (NULL);
1404 }
1405 
1406 /*
1407  * Associate a buffer with a vnode.
1408  */
1409 void
1410 bgetvp(vp, bp)
1411 	register struct vnode *vp;
1412 	register struct buf *bp;
1413 {
1414 
1415 	KASSERT(bp->b_vp == NULL, ("bgetvp: not free"));
1416 
1417 	KASSERT((bp->b_xflags & (BX_VNDIRTY|BX_VNCLEAN)) == 0,
1418 	    ("bgetvp: bp already attached! %p", bp));
1419 
1420 	ASSERT_VI_LOCKED(vp, "bgetvp");
1421 	vholdl(vp);
1422 	bp->b_vp = vp;
1423 	bp->b_dev = vn_todev(vp);
1424 	/*
1425 	 * Insert onto list for new vnode.
1426 	 */
1427 	buf_vlist_add(bp, vp, BX_VNCLEAN);
1428 }
1429 
1430 /*
1431  * Disassociate a buffer from a vnode.
1432  */
1433 void
1434 brelvp(bp)
1435 	register struct buf *bp;
1436 {
1437 	struct vnode *vp;
1438 
1439 	KASSERT(bp->b_vp != NULL, ("brelvp: NULL"));
1440 
1441 	/*
1442 	 * Delete from old vnode list, if on one.
1443 	 */
1444 	vp = bp->b_vp;
1445 	VI_LOCK(vp);
1446 	if (bp->b_xflags & (BX_VNDIRTY | BX_VNCLEAN))
1447 		buf_vlist_remove(bp);
1448 	if ((vp->v_iflag & VI_ONWORKLST) && TAILQ_EMPTY(&vp->v_dirtyblkhd)) {
1449 		vp->v_iflag &= ~VI_ONWORKLST;
1450 		mtx_lock(&sync_mtx);
1451 		LIST_REMOVE(vp, v_synclist);
1452  		syncer_worklist_len--;
1453 		mtx_unlock(&sync_mtx);
1454 	}
1455 	vdropl(vp);
1456 	bp->b_vp = (struct vnode *) 0;
1457 	if (bp->b_object)
1458 		bp->b_object = NULL;
1459 	VI_UNLOCK(vp);
1460 }
1461 
1462 /*
1463  * Add an item to the syncer work queue.
1464  */
1465 static void
1466 vn_syncer_add_to_worklist(struct vnode *vp, int delay)
1467 {
1468 	int slot;
1469 
1470 	ASSERT_VI_LOCKED(vp, "vn_syncer_add_to_worklist");
1471 
1472 	mtx_lock(&sync_mtx);
1473 	if (vp->v_iflag & VI_ONWORKLST)
1474 		LIST_REMOVE(vp, v_synclist);
1475 	else {
1476 		vp->v_iflag |= VI_ONWORKLST;
1477  		syncer_worklist_len++;
1478 	}
1479 
1480 	if (delay > syncer_maxdelay - 2)
1481 		delay = syncer_maxdelay - 2;
1482 	slot = (syncer_delayno + delay) & syncer_mask;
1483 
1484 	LIST_INSERT_HEAD(&syncer_workitem_pending[slot], vp, v_synclist);
1485 	mtx_unlock(&sync_mtx);
1486 }
1487 
1488 struct  proc *updateproc;
1489 static void sched_sync(void);
1490 static struct kproc_desc up_kp = {
1491 	"syncer",
1492 	sched_sync,
1493 	&updateproc
1494 };
1495 SYSINIT(syncer, SI_SUB_KTHREAD_UPDATE, SI_ORDER_FIRST, kproc_start, &up_kp)
1496 
1497 /*
1498  * System filesystem synchronizer daemon.
1499  */
1500 static void
1501 sched_sync(void)
1502 {
1503 	struct synclist *next;
1504 	struct synclist *slp;
1505 	struct vnode *vp;
1506 	struct mount *mp;
1507 	long starttime;
1508 	struct thread *td = FIRST_THREAD_IN_PROC(updateproc);
1509 
1510 	mtx_lock(&Giant);
1511 
1512 	EVENTHANDLER_REGISTER(shutdown_pre_sync, syncer_shutdown, td->td_proc,
1513 	    SHUTDOWN_PRI_LAST);
1514 
1515 	for (;;) {
1516 		mtx_lock(&sync_mtx);
1517 		/*
1518 		 * Make one more full pass through the work list after
1519 		 * the only vnodes remaining on the work list are the
1520 		 * syncer vnodes.
1521 		 */
1522 		if (syncer_shutdown_iter > SYNCER_MAXDELAY &&
1523 		    syncer_worklist_len == sync_vnode_count)
1524 			syncer_shutdown_iter = SYNCER_MAXDELAY;
1525 		if (syncer_shutdown_iter == 0) {
1526 			mtx_unlock(&sync_mtx);
1527 			kthread_suspend_check(td->td_proc);
1528 			mtx_lock(&sync_mtx);
1529 		}
1530 		starttime = time_second;
1531 
1532 		/*
1533 		 * Push files whose dirty time has expired.  Be careful
1534 		 * of interrupt race on slp queue.
1535 		 */
1536 		slp = &syncer_workitem_pending[syncer_delayno];
1537 		syncer_delayno += 1;
1538 		if (syncer_delayno == syncer_maxdelay)
1539 			syncer_delayno = 0;
1540 		next = &syncer_workitem_pending[syncer_delayno];
1541 
1542 		while ((vp = LIST_FIRST(slp)) != NULL) {
1543 			if (VOP_ISLOCKED(vp, NULL) != 0 ||
1544 			    vn_start_write(vp, &mp, V_NOWAIT) != 0) {
1545 				LIST_REMOVE(vp, v_synclist);
1546 				LIST_INSERT_HEAD(next, vp, v_synclist);
1547 				continue;
1548 			}
1549 			if (VI_TRYLOCK(vp) == 0) {
1550 				LIST_REMOVE(vp, v_synclist);
1551 				LIST_INSERT_HEAD(next, vp, v_synclist);
1552 				vn_finished_write(mp);
1553 				continue;
1554 			}
1555 			/*
1556 			 * We use vhold in case the vnode does not
1557 			 * successfully sync.  vhold prevents the vnode from
1558 			 * going away when we unlock the sync_mtx so that
1559 			 * we can acquire the vnode interlock.
1560 			 */
1561 			vholdl(vp);
1562 			mtx_unlock(&sync_mtx);
1563 			vn_lock(vp, LK_EXCLUSIVE | LK_RETRY | LK_INTERLOCK, td);
1564 			(void) VOP_FSYNC(vp, td->td_ucred, MNT_LAZY, td);
1565 			VOP_UNLOCK(vp, 0, td);
1566 			vn_finished_write(mp);
1567 			VI_LOCK(vp);
1568 			if ((vp->v_iflag & VI_ONWORKLST) != 0) {
1569 				/*
1570 				 * Put us back on the worklist.  The worklist
1571 				 * routine will remove us from our current
1572 				 * position and then add us back in at a later
1573 				 * position.
1574 				 */
1575 				vn_syncer_add_to_worklist(vp, syncdelay);
1576 			}
1577 			vdropl(vp);
1578 			VI_UNLOCK(vp);
1579 			mtx_lock(&sync_mtx);
1580 		}
1581 		if (syncer_shutdown_iter > 0)
1582 			syncer_shutdown_iter--;
1583 		mtx_unlock(&sync_mtx);
1584 
1585 		/*
1586 		 * Do soft update processing.
1587 		 */
1588 		if (softdep_process_worklist_hook != NULL)
1589 			(*softdep_process_worklist_hook)(NULL);
1590 
1591 		/*
1592 		 * The variable rushjob allows the kernel to speed up the
1593 		 * processing of the filesystem syncer process. A rushjob
1594 		 * value of N tells the filesystem syncer to process the next
1595 		 * N seconds worth of work on its queue ASAP. Currently rushjob
1596 		 * is used by the soft update code to speed up the filesystem
1597 		 * syncer process when the incore state is getting so far
1598 		 * ahead of the disk that the kernel memory pool is being
1599 		 * threatened with exhaustion.
1600 		 */
1601 		mtx_lock(&sync_mtx);
1602 		if (rushjob > 0) {
1603 			rushjob -= 1;
1604 			mtx_unlock(&sync_mtx);
1605 			continue;
1606 		} else if (syncer_shutdown_iter > 0)
1607 			rushjob = SYNCER_SHUTDOWN_SPEEDUP;
1608 		mtx_unlock(&sync_mtx);
1609 		/*
1610 		 * If it has taken us less than a second to process the
1611 		 * current work, then wait. Otherwise start right over
1612 		 * again. We can still lose time if any single round
1613 		 * takes more than two seconds, but it does not really
1614 		 * matter as we are just trying to generally pace the
1615 		 * filesystem activity.
1616 		 */
1617 		if (time_second == starttime)
1618 			tsleep(&lbolt, PPAUSE, "syncer", 0);
1619 	}
1620 }
1621 
1622 /*
1623  * Request the syncer daemon to speed up its work.
1624  * We never push it to speed up more than half of its
1625  * normal turn time, otherwise it could take over the cpu.
1626  */
1627 int
1628 speedup_syncer()
1629 {
1630 	struct thread *td;
1631 	int ret = 0;
1632 
1633 	td = FIRST_THREAD_IN_PROC(updateproc);
1634 	sleepq_remove(td, &lbolt);
1635 	mtx_lock(&sync_mtx);
1636 	if (rushjob < syncdelay / 2) {
1637 		rushjob += 1;
1638 		stat_rush_requests += 1;
1639 		ret = 1;
1640 	}
1641 	mtx_unlock(&sync_mtx);
1642 	return (ret);
1643 }
1644 
1645 /*
1646  * Tell the syncer to speed up its work and run though its work
1647  * list several times, then tell it to shut down.
1648  */
1649 static void
1650 syncer_shutdown(void *arg, int howto)
1651 {
1652 	struct thread *td;
1653 
1654 	td = FIRST_THREAD_IN_PROC(updateproc);
1655 	sleepq_remove(td, &lbolt);
1656 	mtx_lock(&sync_mtx);
1657 	if (rushjob < SYNCER_SHUTDOWN_SPEEDUP)
1658 		rushjob = SYNCER_SHUTDOWN_SPEEDUP;
1659 	syncer_shutdown_iter = SYNCER_SHUTDOWN_ITER_LIMIT;
1660 	mtx_unlock(&sync_mtx);
1661 	kproc_shutdown(arg, howto);
1662 }
1663 
1664 /*
1665  * Associate a p-buffer with a vnode.
1666  *
1667  * Also sets B_PAGING flag to indicate that vnode is not fully associated
1668  * with the buffer.  i.e. the bp has not been linked into the vnode or
1669  * ref-counted.
1670  */
1671 void
1672 pbgetvp(vp, bp)
1673 	register struct vnode *vp;
1674 	register struct buf *bp;
1675 {
1676 
1677 	KASSERT(bp->b_vp == NULL, ("pbgetvp: not free"));
1678 
1679 	bp->b_vp = vp;
1680 	bp->b_object = vp->v_object;
1681 	bp->b_flags |= B_PAGING;
1682 	bp->b_dev = vn_todev(vp);
1683 }
1684 
1685 /*
1686  * Disassociate a p-buffer from a vnode.
1687  */
1688 void
1689 pbrelvp(bp)
1690 	register struct buf *bp;
1691 {
1692 
1693 	KASSERT(bp->b_vp != NULL, ("pbrelvp: NULL"));
1694 
1695 	/* XXX REMOVE ME */
1696 	VI_LOCK(bp->b_vp);
1697 	if (TAILQ_NEXT(bp, b_vnbufs) != NULL) {
1698 		panic(
1699 		    "relpbuf(): b_vp was probably reassignbuf()d %p %x",
1700 		    bp,
1701 		    (int)bp->b_flags
1702 		);
1703 	}
1704 	VI_UNLOCK(bp->b_vp);
1705 	bp->b_vp = (struct vnode *) 0;
1706 	bp->b_object = NULL;
1707 	bp->b_flags &= ~B_PAGING;
1708 }
1709 
1710 /*
1711  * Reassign a buffer from one vnode to another.
1712  * Used to assign file specific control information
1713  * (indirect blocks) to the vnode to which they belong.
1714  */
1715 void
1716 reassignbuf(bp, newvp)
1717 	register struct buf *bp;
1718 	register struct vnode *newvp;
1719 {
1720 	struct vnode *vp;
1721 	int delay;
1722 
1723 	if (newvp == NULL) {
1724 		printf("reassignbuf: NULL");
1725 		return;
1726 	}
1727 	vp = bp->b_vp;
1728 	++reassignbufcalls;
1729 
1730 	/*
1731 	 * B_PAGING flagged buffers cannot be reassigned because their vp
1732 	 * is not fully linked in.
1733 	 */
1734 	if (bp->b_flags & B_PAGING)
1735 		panic("cannot reassign paging buffer");
1736 
1737 	/*
1738 	 * Delete from old vnode list, if on one.
1739 	 */
1740 	VI_LOCK(vp);
1741 	if (bp->b_xflags & (BX_VNDIRTY | BX_VNCLEAN)) {
1742 		buf_vlist_remove(bp);
1743 		if (vp != newvp) {
1744 			vdropl(bp->b_vp);
1745 			bp->b_vp = NULL;	/* for clarification */
1746 		}
1747 	}
1748 	if (vp != newvp) {
1749 		VI_UNLOCK(vp);
1750 		VI_LOCK(newvp);
1751 	}
1752 	/*
1753 	 * If dirty, put on list of dirty buffers; otherwise insert onto list
1754 	 * of clean buffers.
1755 	 */
1756 	if (bp->b_flags & B_DELWRI) {
1757 		if ((newvp->v_iflag & VI_ONWORKLST) == 0) {
1758 			switch (newvp->v_type) {
1759 			case VDIR:
1760 				delay = dirdelay;
1761 				break;
1762 			case VCHR:
1763 				delay = metadelay;
1764 				break;
1765 			default:
1766 				delay = filedelay;
1767 			}
1768 			vn_syncer_add_to_worklist(newvp, delay);
1769 		}
1770 		buf_vlist_add(bp, newvp, BX_VNDIRTY);
1771 	} else {
1772 		buf_vlist_add(bp, newvp, BX_VNCLEAN);
1773 
1774 		if ((newvp->v_iflag & VI_ONWORKLST) &&
1775 		    TAILQ_EMPTY(&newvp->v_dirtyblkhd)) {
1776 			mtx_lock(&sync_mtx);
1777 			LIST_REMOVE(newvp, v_synclist);
1778  			syncer_worklist_len--;
1779 			mtx_unlock(&sync_mtx);
1780 			newvp->v_iflag &= ~VI_ONWORKLST;
1781 		}
1782 	}
1783 	if (bp->b_vp != newvp) {
1784 		bp->b_vp = newvp;
1785 		vholdl(bp->b_vp);
1786 	}
1787 	VI_UNLOCK(newvp);
1788 }
1789 
1790 /*
1791  * Create a vnode for a device.
1792  * Used for mounting the root filesystem.
1793  */
1794 int
1795 bdevvp(dev, vpp)
1796 	struct cdev *dev;
1797 	struct vnode **vpp;
1798 {
1799 	register struct vnode *vp;
1800 	struct vnode *nvp;
1801 	int error;
1802 
1803 	if (dev == NULL) {
1804 		*vpp = NULLVP;
1805 		return (ENXIO);
1806 	}
1807 	if (vfinddev(dev, vpp))
1808 		return (0);
1809 
1810 	error = getnewvnode("none", (struct mount *)0, spec_vnodeop_p, &nvp);
1811 	if (error) {
1812 		*vpp = NULLVP;
1813 		return (error);
1814 	}
1815 	vp = nvp;
1816 	vp->v_type = VCHR;
1817 	addalias(vp, dev);
1818 	*vpp = vp;
1819 	return (0);
1820 }
1821 
1822 static void
1823 v_incr_usecount(struct vnode *vp, int delta)
1824 {
1825 
1826 	vp->v_usecount += delta;
1827 	if (vp->v_type == VCHR && vp->v_rdev != NULL) {
1828 		mtx_lock(&spechash_mtx);
1829 		vp->v_rdev->si_usecount += delta;
1830 		mtx_unlock(&spechash_mtx);
1831 	}
1832 }
1833 
1834 /*
1835  * Add vnode to the alias list hung off the struct cdev *.
1836  *
1837  * The reason for this gunk is that multiple vnodes can reference
1838  * the same physical device, so checking vp->v_usecount to see
1839  * how many users there are is inadequate; the v_usecount for
1840  * the vnodes need to be accumulated.  vcount() does that.
1841  */
1842 struct vnode *
1843 addaliasu(nvp, nvp_rdev)
1844 	struct vnode *nvp;
1845 	dev_t nvp_rdev;
1846 {
1847 	struct vnode *ovp;
1848 	vop_t **ops;
1849 	struct cdev *dev;
1850 
1851 	if (nvp->v_type == VBLK)
1852 		return (nvp);
1853 	if (nvp->v_type != VCHR)
1854 		panic("addaliasu on non-special vnode");
1855 	dev = findcdev(nvp_rdev);
1856 	if (dev == NULL)
1857 		return (nvp);
1858 	/*
1859 	 * Check to see if we have a bdevvp vnode with no associated
1860 	 * filesystem. If so, we want to associate the filesystem of
1861 	 * the new newly instigated vnode with the bdevvp vnode and
1862 	 * discard the newly created vnode rather than leaving the
1863 	 * bdevvp vnode lying around with no associated filesystem.
1864 	 */
1865 	if (vfinddev(dev, &ovp) == 0 || ovp->v_data != NULL) {
1866 		addalias(nvp, dev);
1867 		return (nvp);
1868 	}
1869 	/*
1870 	 * Discard unneeded vnode, but save its node specific data.
1871 	 * Note that if there is a lock, it is carried over in the
1872 	 * node specific data to the replacement vnode.
1873 	 */
1874 	vref(ovp);
1875 	ovp->v_data = nvp->v_data;
1876 	ovp->v_tag = nvp->v_tag;
1877 	nvp->v_data = NULL;
1878 	lockdestroy(ovp->v_vnlock);
1879 	lockinit(ovp->v_vnlock, PVFS, nvp->v_vnlock->lk_wmesg,
1880 	    nvp->v_vnlock->lk_timo, nvp->v_vnlock->lk_flags & LK_EXTFLG_MASK);
1881 	ops = ovp->v_op;
1882 	ovp->v_op = nvp->v_op;
1883 	if (VOP_ISLOCKED(nvp, curthread)) {
1884 		VOP_UNLOCK(nvp, 0, curthread);
1885 		vn_lock(ovp, LK_EXCLUSIVE | LK_RETRY, curthread);
1886 	}
1887 	nvp->v_op = ops;
1888 	insmntque(ovp, nvp->v_mount);
1889 	vrele(nvp);
1890 	vgone(nvp);
1891 	return (ovp);
1892 }
1893 
1894 /* This is a local helper function that do the same as addaliasu, but for a
1895  * struct cdev *instead of an dev_t. */
1896 static void
1897 addalias(nvp, dev)
1898 	struct vnode *nvp;
1899 	struct cdev *dev;
1900 {
1901 
1902 	KASSERT(nvp->v_type == VCHR, ("addalias on non-special vnode"));
1903 	dev_ref(dev);
1904 	nvp->v_rdev = dev;
1905 	VI_LOCK(nvp);
1906 	mtx_lock(&spechash_mtx);
1907 	SLIST_INSERT_HEAD(&dev->si_hlist, nvp, v_specnext);
1908 	dev->si_usecount += nvp->v_usecount;
1909 	mtx_unlock(&spechash_mtx);
1910 	VI_UNLOCK(nvp);
1911 }
1912 
1913 /*
1914  * Grab a particular vnode from the free list, increment its
1915  * reference count and lock it. The vnode lock bit is set if the
1916  * vnode is being eliminated in vgone. The process is awakened
1917  * when the transition is completed, and an error returned to
1918  * indicate that the vnode is no longer usable (possibly having
1919  * been changed to a new filesystem type).
1920  */
1921 int
1922 vget(vp, flags, td)
1923 	register struct vnode *vp;
1924 	int flags;
1925 	struct thread *td;
1926 {
1927 	int error;
1928 
1929 	/*
1930 	 * If the vnode is in the process of being cleaned out for
1931 	 * another use, we wait for the cleaning to finish and then
1932 	 * return failure. Cleaning is determined by checking that
1933 	 * the VI_XLOCK flag is set.
1934 	 */
1935 	if ((flags & LK_INTERLOCK) == 0)
1936 		VI_LOCK(vp);
1937 	if (vp->v_iflag & VI_XLOCK && vp->v_vxthread != curthread) {
1938 		if ((flags & LK_NOWAIT) == 0) {
1939 			vp->v_iflag |= VI_XWANT;
1940 			msleep(vp, VI_MTX(vp), PINOD | PDROP, "vget", 0);
1941 			return (ENOENT);
1942 		}
1943 		VI_UNLOCK(vp);
1944 		return (EBUSY);
1945 	}
1946 
1947 	v_incr_usecount(vp, 1);
1948 
1949 	if (VSHOULDBUSY(vp))
1950 		vbusy(vp);
1951 	if (flags & LK_TYPE_MASK) {
1952 		if ((error = vn_lock(vp, flags | LK_INTERLOCK, td)) != 0) {
1953 			/*
1954 			 * must expand vrele here because we do not want
1955 			 * to call VOP_INACTIVE if the reference count
1956 			 * drops back to zero since it was never really
1957 			 * active. We must remove it from the free list
1958 			 * before sleeping so that multiple processes do
1959 			 * not try to recycle it.
1960 			 */
1961 			VI_LOCK(vp);
1962 			v_incr_usecount(vp, -1);
1963 			if (VSHOULDFREE(vp))
1964 				vfree(vp);
1965 			else
1966 				vlruvp(vp);
1967 			VI_UNLOCK(vp);
1968 		}
1969 		return (error);
1970 	}
1971 	VI_UNLOCK(vp);
1972 	return (0);
1973 }
1974 
1975 /*
1976  * Increase the reference count of a vnode.
1977  */
1978 void
1979 vref(struct vnode *vp)
1980 {
1981 
1982 	VI_LOCK(vp);
1983 	v_incr_usecount(vp, 1);
1984 	VI_UNLOCK(vp);
1985 }
1986 
1987 /*
1988  * Return reference count of a vnode.
1989  *
1990  * The results of this call are only guaranteed when some mechanism other
1991  * than the VI lock is used to stop other processes from gaining references
1992  * to the vnode.  This may be the case if the caller holds the only reference.
1993  * This is also useful when stale data is acceptable as race conditions may
1994  * be accounted for by some other means.
1995  */
1996 int
1997 vrefcnt(struct vnode *vp)
1998 {
1999 	int usecnt;
2000 
2001 	VI_LOCK(vp);
2002 	usecnt = vp->v_usecount;
2003 	VI_UNLOCK(vp);
2004 
2005 	return (usecnt);
2006 }
2007 
2008 
2009 /*
2010  * Vnode put/release.
2011  * If count drops to zero, call inactive routine and return to freelist.
2012  */
2013 void
2014 vrele(vp)
2015 	struct vnode *vp;
2016 {
2017 	struct thread *td = curthread;	/* XXX */
2018 
2019 	GIANT_REQUIRED;
2020 
2021 	KASSERT(vp != NULL, ("vrele: null vp"));
2022 
2023 	VI_LOCK(vp);
2024 
2025 	/* Skip this v_writecount check if we're going to panic below. */
2026 	KASSERT(vp->v_writecount < vp->v_usecount || vp->v_usecount < 1,
2027 	    ("vrele: missed vn_close"));
2028 
2029 	if (vp->v_usecount > 1 || ((vp->v_iflag & VI_DOINGINACT) &&
2030 	    vp->v_usecount == 1)) {
2031 		v_incr_usecount(vp, -1);
2032 		VI_UNLOCK(vp);
2033 
2034 		return;
2035 	}
2036 
2037 	if (vp->v_usecount == 1) {
2038 		v_incr_usecount(vp, -1);
2039 		/*
2040 		 * We must call VOP_INACTIVE with the node locked. Mark
2041 		 * as VI_DOINGINACT to avoid recursion.
2042 		 */
2043 		if (vn_lock(vp, LK_EXCLUSIVE | LK_INTERLOCK, td) == 0) {
2044 			VI_LOCK(vp);
2045 			vp->v_iflag |= VI_DOINGINACT;
2046 			VI_UNLOCK(vp);
2047 			VOP_INACTIVE(vp, td);
2048 			VI_LOCK(vp);
2049 			KASSERT(vp->v_iflag & VI_DOINGINACT,
2050 			    ("vrele: lost VI_DOINGINACT"));
2051 			vp->v_iflag &= ~VI_DOINGINACT;
2052 		} else
2053 			VI_LOCK(vp);
2054 		if (VSHOULDFREE(vp))
2055 			vfree(vp);
2056 		else
2057 			vlruvp(vp);
2058 		VI_UNLOCK(vp);
2059 
2060 	} else {
2061 #ifdef DIAGNOSTIC
2062 		vprint("vrele: negative ref count", vp);
2063 #endif
2064 		VI_UNLOCK(vp);
2065 		panic("vrele: negative ref cnt");
2066 	}
2067 }
2068 
2069 /*
2070  * Release an already locked vnode.  This give the same effects as
2071  * unlock+vrele(), but takes less time and avoids releasing and
2072  * re-aquiring the lock (as vrele() aquires the lock internally.)
2073  */
2074 void
2075 vput(vp)
2076 	struct vnode *vp;
2077 {
2078 	struct thread *td = curthread;	/* XXX */
2079 
2080 	GIANT_REQUIRED;
2081 
2082 	KASSERT(vp != NULL, ("vput: null vp"));
2083 	VI_LOCK(vp);
2084 	/* Skip this v_writecount check if we're going to panic below. */
2085 	KASSERT(vp->v_writecount < vp->v_usecount || vp->v_usecount < 1,
2086 	    ("vput: missed vn_close"));
2087 
2088 	if (vp->v_usecount > 1 || ((vp->v_iflag & VI_DOINGINACT) &&
2089 	    vp->v_usecount == 1)) {
2090 		v_incr_usecount(vp, -1);
2091 		VOP_UNLOCK(vp, LK_INTERLOCK, td);
2092 		return;
2093 	}
2094 
2095 	if (vp->v_usecount == 1) {
2096 		v_incr_usecount(vp, -1);
2097 		/*
2098 		 * We must call VOP_INACTIVE with the node locked, so
2099 		 * we just need to release the vnode mutex. Mark as
2100 		 * as VI_DOINGINACT to avoid recursion.
2101 		 */
2102 		vp->v_iflag |= VI_DOINGINACT;
2103 		VI_UNLOCK(vp);
2104 		VOP_INACTIVE(vp, td);
2105 		VI_LOCK(vp);
2106 		KASSERT(vp->v_iflag & VI_DOINGINACT,
2107 		    ("vput: lost VI_DOINGINACT"));
2108 		vp->v_iflag &= ~VI_DOINGINACT;
2109 		if (VSHOULDFREE(vp))
2110 			vfree(vp);
2111 		else
2112 			vlruvp(vp);
2113 		VI_UNLOCK(vp);
2114 
2115 	} else {
2116 #ifdef DIAGNOSTIC
2117 		vprint("vput: negative ref count", vp);
2118 #endif
2119 		panic("vput: negative ref cnt");
2120 	}
2121 }
2122 
2123 /*
2124  * Somebody doesn't want the vnode recycled.
2125  */
2126 void
2127 vhold(struct vnode *vp)
2128 {
2129 
2130 	VI_LOCK(vp);
2131 	vholdl(vp);
2132 	VI_UNLOCK(vp);
2133 }
2134 
2135 void
2136 vholdl(vp)
2137 	register struct vnode *vp;
2138 {
2139 
2140 	vp->v_holdcnt++;
2141 	if (VSHOULDBUSY(vp))
2142 		vbusy(vp);
2143 }
2144 
2145 /*
2146  * Note that there is one less who cares about this vnode.  vdrop() is the
2147  * opposite of vhold().
2148  */
2149 void
2150 vdrop(struct vnode *vp)
2151 {
2152 
2153 	VI_LOCK(vp);
2154 	vdropl(vp);
2155 	VI_UNLOCK(vp);
2156 }
2157 
2158 void
2159 vdropl(vp)
2160 	register struct vnode *vp;
2161 {
2162 
2163 	if (vp->v_holdcnt <= 0)
2164 		panic("vdrop: holdcnt");
2165 	vp->v_holdcnt--;
2166 	if (VSHOULDFREE(vp))
2167 		vfree(vp);
2168 	else
2169 		vlruvp(vp);
2170 }
2171 
2172 /*
2173  * Remove any vnodes in the vnode table belonging to mount point mp.
2174  *
2175  * If FORCECLOSE is not specified, there should not be any active ones,
2176  * return error if any are found (nb: this is a user error, not a
2177  * system error). If FORCECLOSE is specified, detach any active vnodes
2178  * that are found.
2179  *
2180  * If WRITECLOSE is set, only flush out regular file vnodes open for
2181  * writing.
2182  *
2183  * SKIPSYSTEM causes any vnodes marked VV_SYSTEM to be skipped.
2184  *
2185  * `rootrefs' specifies the base reference count for the root vnode
2186  * of this filesystem. The root vnode is considered busy if its
2187  * v_usecount exceeds this value. On a successful return, vflush()
2188  * will call vrele() on the root vnode exactly rootrefs times.
2189  * If the SKIPSYSTEM or WRITECLOSE flags are specified, rootrefs must
2190  * be zero.
2191  */
2192 #ifdef DIAGNOSTIC
2193 static int busyprt = 0;		/* print out busy vnodes */
2194 SYSCTL_INT(_debug, OID_AUTO, busyprt, CTLFLAG_RW, &busyprt, 0, "");
2195 #endif
2196 
2197 int
2198 vflush(mp, rootrefs, flags)
2199 	struct mount *mp;
2200 	int rootrefs;
2201 	int flags;
2202 {
2203 	struct thread *td = curthread;	/* XXX */
2204 	struct vnode *vp, *nvp, *rootvp = NULL;
2205 	struct vattr vattr;
2206 	int busy = 0, error;
2207 
2208 	if (rootrefs > 0) {
2209 		KASSERT((flags & (SKIPSYSTEM | WRITECLOSE)) == 0,
2210 		    ("vflush: bad args"));
2211 		/*
2212 		 * Get the filesystem root vnode. We can vput() it
2213 		 * immediately, since with rootrefs > 0, it won't go away.
2214 		 */
2215 		if ((error = VFS_ROOT(mp, &rootvp)) != 0)
2216 			return (error);
2217 		vput(rootvp);
2218 
2219 	}
2220 	MNT_ILOCK(mp);
2221 loop:
2222 	for (vp = TAILQ_FIRST(&mp->mnt_nvnodelist); vp; vp = nvp) {
2223 		/*
2224 		 * Make sure this vnode wasn't reclaimed in getnewvnode().
2225 		 * Start over if it has (it won't be on the list anymore).
2226 		 */
2227 		if (vp->v_mount != mp)
2228 			goto loop;
2229 		nvp = TAILQ_NEXT(vp, v_nmntvnodes);
2230 
2231 		VI_LOCK(vp);
2232 		MNT_IUNLOCK(mp);
2233 		error = vn_lock(vp, LK_INTERLOCK | LK_EXCLUSIVE, td);
2234 		if (error) {
2235 			MNT_ILOCK(mp);
2236 			goto loop;
2237 		}
2238 		/*
2239 		 * Skip over a vnodes marked VV_SYSTEM.
2240 		 */
2241 		if ((flags & SKIPSYSTEM) && (vp->v_vflag & VV_SYSTEM)) {
2242 			VOP_UNLOCK(vp, 0, td);
2243 			MNT_ILOCK(mp);
2244 			continue;
2245 		}
2246 		/*
2247 		 * If WRITECLOSE is set, flush out unlinked but still open
2248 		 * files (even if open only for reading) and regular file
2249 		 * vnodes open for writing.
2250 		 */
2251 		if (flags & WRITECLOSE) {
2252 			error = VOP_GETATTR(vp, &vattr, td->td_ucred, td);
2253 			VI_LOCK(vp);
2254 
2255 			if ((vp->v_type == VNON ||
2256 			    (error == 0 && vattr.va_nlink > 0)) &&
2257 			    (vp->v_writecount == 0 || vp->v_type != VREG)) {
2258 				VOP_UNLOCK(vp, LK_INTERLOCK, td);
2259 				MNT_ILOCK(mp);
2260 				continue;
2261 			}
2262 		} else
2263 			VI_LOCK(vp);
2264 
2265 		VOP_UNLOCK(vp, 0, td);
2266 
2267 		/*
2268 		 * With v_usecount == 0, all we need to do is clear out the
2269 		 * vnode data structures and we are done.
2270 		 */
2271 		if (vp->v_usecount == 0) {
2272 			vgonel(vp, td);
2273 			MNT_ILOCK(mp);
2274 			continue;
2275 		}
2276 
2277 		/*
2278 		 * If FORCECLOSE is set, forcibly close the vnode. For block
2279 		 * or character devices, revert to an anonymous device. For
2280 		 * all other files, just kill them.
2281 		 */
2282 		if (flags & FORCECLOSE) {
2283 			if (vp->v_type != VCHR)
2284 				vgonel(vp, td);
2285 			else
2286 				vgonechrl(vp, td);
2287 			MNT_ILOCK(mp);
2288 			continue;
2289 		}
2290 #ifdef DIAGNOSTIC
2291 		if (busyprt)
2292 			vprint("vflush: busy vnode", vp);
2293 #endif
2294 		VI_UNLOCK(vp);
2295 		MNT_ILOCK(mp);
2296 		busy++;
2297 	}
2298 	MNT_IUNLOCK(mp);
2299 	if (rootrefs > 0 && (flags & FORCECLOSE) == 0) {
2300 		/*
2301 		 * If just the root vnode is busy, and if its refcount
2302 		 * is equal to `rootrefs', then go ahead and kill it.
2303 		 */
2304 		VI_LOCK(rootvp);
2305 		KASSERT(busy > 0, ("vflush: not busy"));
2306 		KASSERT(rootvp->v_usecount >= rootrefs, ("vflush: rootrefs"));
2307 		if (busy == 1 && rootvp->v_usecount == rootrefs) {
2308 			vgonel(rootvp, td);
2309 			busy = 0;
2310 		} else
2311 			VI_UNLOCK(rootvp);
2312 	}
2313 	if (busy)
2314 		return (EBUSY);
2315 	for (; rootrefs > 0; rootrefs--)
2316 		vrele(rootvp);
2317 	return (0);
2318 }
2319 
2320 /*
2321  * This moves a now (likely recyclable) vnode to the end of the
2322  * mountlist.  XXX However, it is temporarily disabled until we
2323  * can clean up ffs_sync() and friends, which have loop restart
2324  * conditions which this code causes to operate O(N^2).
2325  */
2326 static void
2327 vlruvp(struct vnode *vp)
2328 {
2329 #if 0
2330 	struct mount *mp;
2331 
2332 	if ((mp = vp->v_mount) != NULL) {
2333 		MNT_ILOCK(mp);
2334 		TAILQ_REMOVE(&mp->mnt_nvnodelist, vp, v_nmntvnodes);
2335 		TAILQ_INSERT_TAIL(&mp->mnt_nvnodelist, vp, v_nmntvnodes);
2336 		MNT_IUNLOCK(mp);
2337 	}
2338 #endif
2339 }
2340 
2341 static void
2342 vx_lock(struct vnode *vp)
2343 {
2344 
2345 	ASSERT_VI_LOCKED(vp, "vx_lock");
2346 
2347 	/*
2348 	 * Prevent the vnode from being recycled or brought into use while we
2349 	 * clean it out.
2350 	 */
2351 	if (vp->v_iflag & VI_XLOCK)
2352 		panic("vclean: deadlock");
2353 	vp->v_iflag |= VI_XLOCK;
2354 	vp->v_vxthread = curthread;
2355 }
2356 
2357 static void
2358 vx_unlock(struct vnode *vp)
2359 {
2360 	ASSERT_VI_LOCKED(vp, "vx_unlock");
2361 	vp->v_iflag &= ~VI_XLOCK;
2362 	vp->v_vxthread = NULL;
2363 	if (vp->v_iflag & VI_XWANT) {
2364 		vp->v_iflag &= ~VI_XWANT;
2365 		wakeup(vp);
2366 	}
2367 }
2368 
2369 /*
2370  * Disassociate the underlying filesystem from a vnode.
2371  */
2372 static void
2373 vclean(vp, flags, td)
2374 	struct vnode *vp;
2375 	int flags;
2376 	struct thread *td;
2377 {
2378 	int active;
2379 
2380 	ASSERT_VI_LOCKED(vp, "vclean");
2381 	/*
2382 	 * Check to see if the vnode is in use. If so we have to reference it
2383 	 * before we clean it out so that its count cannot fall to zero and
2384 	 * generate a race against ourselves to recycle it.
2385 	 */
2386 	if ((active = vp->v_usecount))
2387 		v_incr_usecount(vp, 1);
2388 
2389 	/*
2390 	 * Even if the count is zero, the VOP_INACTIVE routine may still
2391 	 * have the object locked while it cleans it out. The VOP_LOCK
2392 	 * ensures that the VOP_INACTIVE routine is done with its work.
2393 	 * For active vnodes, it ensures that no other activity can
2394 	 * occur while the underlying object is being cleaned out.
2395 	 */
2396 	VOP_LOCK(vp, LK_DRAIN | LK_INTERLOCK, td);
2397 
2398 	/*
2399 	 * Clean out any buffers associated with the vnode.
2400 	 * If the flush fails, just toss the buffers.
2401 	 */
2402 	if (flags & DOCLOSE) {
2403 		struct buf *bp;
2404 		bp = TAILQ_FIRST(&vp->v_dirtyblkhd);
2405 		if (bp != NULL)
2406 			(void) vn_write_suspend_wait(vp, NULL, V_WAIT);
2407 		if (vinvalbuf(vp, V_SAVE, NOCRED, td, 0, 0) != 0)
2408 			vinvalbuf(vp, 0, NOCRED, td, 0, 0);
2409 	}
2410 
2411 	VOP_DESTROYVOBJECT(vp);
2412 
2413 	/*
2414 	 * Any other processes trying to obtain this lock must first
2415 	 * wait for VXLOCK to clear, then call the new lock operation.
2416 	 */
2417 	VOP_UNLOCK(vp, 0, td);
2418 
2419 	/*
2420 	 * If purging an active vnode, it must be closed and
2421 	 * deactivated before being reclaimed. Note that the
2422 	 * VOP_INACTIVE will unlock the vnode.
2423 	 */
2424 	if (active) {
2425 		if (flags & DOCLOSE)
2426 			VOP_CLOSE(vp, FNONBLOCK, NOCRED, td);
2427 		VI_LOCK(vp);
2428 		if ((vp->v_iflag & VI_DOINGINACT) == 0) {
2429 			vp->v_iflag |= VI_DOINGINACT;
2430 			VI_UNLOCK(vp);
2431 			if (vn_lock(vp, LK_EXCLUSIVE | LK_NOWAIT, td) != 0)
2432 				panic("vclean: cannot relock.");
2433 			VOP_INACTIVE(vp, td);
2434 			VI_LOCK(vp);
2435 			KASSERT(vp->v_iflag & VI_DOINGINACT,
2436 			    ("vclean: lost VI_DOINGINACT"));
2437 			vp->v_iflag &= ~VI_DOINGINACT;
2438 		}
2439 		VI_UNLOCK(vp);
2440 	}
2441 	/*
2442 	 * Reclaim the vnode.
2443 	 */
2444 	if (VOP_RECLAIM(vp, td))
2445 		panic("vclean: cannot reclaim");
2446 
2447 	if (active) {
2448 		/*
2449 		 * Inline copy of vrele() since VOP_INACTIVE
2450 		 * has already been called.
2451 		 */
2452 		VI_LOCK(vp);
2453 		v_incr_usecount(vp, -1);
2454 		if (vp->v_usecount <= 0) {
2455 #ifdef INVARIANTS
2456 			if (vp->v_usecount < 0 || vp->v_writecount != 0) {
2457 				vprint("vclean: bad ref count", vp);
2458 				panic("vclean: ref cnt");
2459 			}
2460 #endif
2461 			if (VSHOULDFREE(vp))
2462 				vfree(vp);
2463 		}
2464 		VI_UNLOCK(vp);
2465 	}
2466 	/*
2467 	 * Delete from old mount point vnode list.
2468 	 */
2469 	if (vp->v_mount != NULL)
2470 		insmntque(vp, (struct mount *)0);
2471 	cache_purge(vp);
2472 	VI_LOCK(vp);
2473 	if (VSHOULDFREE(vp))
2474 		vfree(vp);
2475 
2476 	/*
2477 	 * Done with purge, reset to the standard lock and
2478 	 * notify sleepers of the grim news.
2479 	 */
2480 	vp->v_vnlock = &vp->v_lock;
2481 	vp->v_op = dead_vnodeop_p;
2482 	if (vp->v_pollinfo != NULL)
2483 		vn_pollgone(vp);
2484 	vp->v_tag = "none";
2485 }
2486 
2487 /*
2488  * Eliminate all activity associated with the requested vnode
2489  * and with all vnodes aliased to the requested vnode.
2490  */
2491 int
2492 vop_revoke(ap)
2493 	struct vop_revoke_args /* {
2494 		struct vnode *a_vp;
2495 		int a_flags;
2496 	} */ *ap;
2497 {
2498 	struct vnode *vp, *vq;
2499 	struct cdev *dev;
2500 
2501 	KASSERT((ap->a_flags & REVOKEALL) != 0, ("vop_revoke"));
2502 	vp = ap->a_vp;
2503 	KASSERT((vp->v_type == VCHR), ("vop_revoke: not VCHR"));
2504 
2505 	VI_LOCK(vp);
2506 	/*
2507 	 * If a vgone (or vclean) is already in progress,
2508 	 * wait until it is done and return.
2509 	 */
2510 	if (vp->v_iflag & VI_XLOCK) {
2511 		vp->v_iflag |= VI_XWANT;
2512 		msleep(vp, VI_MTX(vp), PINOD | PDROP,
2513 		    "vop_revokeall", 0);
2514 		return (0);
2515 	}
2516 	VI_UNLOCK(vp);
2517 	dev = vp->v_rdev;
2518 	for (;;) {
2519 		mtx_lock(&spechash_mtx);
2520 		vq = SLIST_FIRST(&dev->si_hlist);
2521 		mtx_unlock(&spechash_mtx);
2522 		if (vq == NULL)
2523 			break;
2524 		vgone(vq);
2525 	}
2526 	return (0);
2527 }
2528 
2529 /*
2530  * Recycle an unused vnode to the front of the free list.
2531  * Release the passed interlock if the vnode will be recycled.
2532  */
2533 int
2534 vrecycle(vp, inter_lkp, td)
2535 	struct vnode *vp;
2536 	struct mtx *inter_lkp;
2537 	struct thread *td;
2538 {
2539 
2540 	VI_LOCK(vp);
2541 	if (vp->v_usecount == 0) {
2542 		if (inter_lkp) {
2543 			mtx_unlock(inter_lkp);
2544 		}
2545 		vgonel(vp, td);
2546 		return (1);
2547 	}
2548 	VI_UNLOCK(vp);
2549 	return (0);
2550 }
2551 
2552 /*
2553  * Eliminate all activity associated with a vnode
2554  * in preparation for reuse.
2555  */
2556 void
2557 vgone(vp)
2558 	register struct vnode *vp;
2559 {
2560 	struct thread *td = curthread;	/* XXX */
2561 
2562 	VI_LOCK(vp);
2563 	vgonel(vp, td);
2564 }
2565 
2566 /*
2567  * Disassociate a character device from the its underlying filesystem and
2568  * attach it to spec.  This is for use when the chr device is still active
2569  * and the filesystem is going away.
2570  */
2571 static void
2572 vgonechrl(struct vnode *vp, struct thread *td)
2573 {
2574 	ASSERT_VI_LOCKED(vp, "vgonechrl");
2575 	vx_lock(vp);
2576 	/*
2577 	 * This is a custom version of vclean() which does not tearm down
2578 	 * the bufs or vm objects held by this vnode.  This allows filesystems
2579 	 * to continue using devices which were discovered via another
2580 	 * filesystem that has been unmounted.
2581 	 */
2582 	if (vp->v_usecount != 0) {
2583 		v_incr_usecount(vp, 1);
2584 		/*
2585 		 * Ensure that no other activity can occur while the
2586 		 * underlying object is being cleaned out.
2587 		 */
2588 		VOP_LOCK(vp, LK_DRAIN | LK_INTERLOCK, td);
2589 		/*
2590 		 * Any other processes trying to obtain this lock must first
2591 		 * wait for VXLOCK to clear, then call the new lock operation.
2592 		 */
2593 		VOP_UNLOCK(vp, 0, td);
2594 		vp->v_vnlock = &vp->v_lock;
2595 		vp->v_tag = "orphanchr";
2596 		vp->v_op = spec_vnodeop_p;
2597 		if (vp->v_mount != NULL)
2598 			insmntque(vp, (struct mount *)0);
2599 		cache_purge(vp);
2600 		vrele(vp);
2601 		VI_LOCK(vp);
2602 	} else
2603 		vclean(vp, 0, td);
2604 	vp->v_op = spec_vnodeop_p;
2605 	vx_unlock(vp);
2606 	VI_UNLOCK(vp);
2607 }
2608 
2609 /*
2610  * vgone, with the vp interlock held.
2611  */
2612 void
2613 vgonel(vp, td)
2614 	struct vnode *vp;
2615 	struct thread *td;
2616 {
2617 	/*
2618 	 * If a vgone (or vclean) is already in progress,
2619 	 * wait until it is done and return.
2620 	 */
2621 	ASSERT_VI_LOCKED(vp, "vgonel");
2622 	if (vp->v_iflag & VI_XLOCK) {
2623 		vp->v_iflag |= VI_XWANT;
2624 		msleep(vp, VI_MTX(vp), PINOD | PDROP, "vgone", 0);
2625 		return;
2626 	}
2627 	vx_lock(vp);
2628 
2629 	/*
2630 	 * Clean out the filesystem specific data.
2631 	 */
2632 	vclean(vp, DOCLOSE, td);
2633 	VI_UNLOCK(vp);
2634 
2635 	/*
2636 	 * If special device, remove it from special device alias list
2637 	 * if it is on one.
2638 	 */
2639 	VI_LOCK(vp);
2640 	if (vp->v_type == VCHR && vp->v_rdev != NULL) {
2641 		mtx_lock(&spechash_mtx);
2642 		SLIST_REMOVE(&vp->v_rdev->si_hlist, vp, vnode, v_specnext);
2643 		vp->v_rdev->si_usecount -= vp->v_usecount;
2644 		mtx_unlock(&spechash_mtx);
2645 		dev_rel(vp->v_rdev);
2646 		vp->v_rdev = NULL;
2647 	}
2648 
2649 	/*
2650 	 * If it is on the freelist and not already at the head,
2651 	 * move it to the head of the list. The test of the
2652 	 * VDOOMED flag and the reference count of zero is because
2653 	 * it will be removed from the free list by getnewvnode,
2654 	 * but will not have its reference count incremented until
2655 	 * after calling vgone. If the reference count were
2656 	 * incremented first, vgone would (incorrectly) try to
2657 	 * close the previous instance of the underlying object.
2658 	 */
2659 	if (vp->v_usecount == 0 && !(vp->v_iflag & VI_DOOMED)) {
2660 		mtx_lock(&vnode_free_list_mtx);
2661 		if (vp->v_iflag & VI_FREE) {
2662 			TAILQ_REMOVE(&vnode_free_list, vp, v_freelist);
2663 		} else {
2664 			vp->v_iflag |= VI_FREE;
2665 			freevnodes++;
2666 		}
2667 		TAILQ_INSERT_HEAD(&vnode_free_list, vp, v_freelist);
2668 		mtx_unlock(&vnode_free_list_mtx);
2669 	}
2670 
2671 	vp->v_type = VBAD;
2672 	vx_unlock(vp);
2673 	VI_UNLOCK(vp);
2674 }
2675 
2676 /*
2677  * Lookup a vnode by device number.
2678  */
2679 int
2680 vfinddev(dev, vpp)
2681 	struct cdev *dev;
2682 	struct vnode **vpp;
2683 {
2684 	struct vnode *vp;
2685 
2686 	mtx_lock(&spechash_mtx);
2687 	SLIST_FOREACH(vp, &dev->si_hlist, v_specnext) {
2688 		*vpp = vp;
2689 		mtx_unlock(&spechash_mtx);
2690 		return (1);
2691 	}
2692 	mtx_unlock(&spechash_mtx);
2693 	return (0);
2694 }
2695 
2696 /*
2697  * Calculate the total number of references to a special device.
2698  */
2699 int
2700 vcount(vp)
2701 	struct vnode *vp;
2702 {
2703 	int count;
2704 
2705 	mtx_lock(&spechash_mtx);
2706 	count = vp->v_rdev->si_usecount;
2707 	mtx_unlock(&spechash_mtx);
2708 	return (count);
2709 }
2710 
2711 /*
2712  * Same as above, but using the struct cdev *as argument
2713  */
2714 int
2715 count_dev(dev)
2716 	struct cdev *dev;
2717 {
2718 	int count;
2719 
2720 	mtx_lock(&spechash_mtx);
2721 	count = dev->si_usecount;
2722 	mtx_unlock(&spechash_mtx);
2723 	return(count);
2724 }
2725 
2726 /*
2727  * Print out a description of a vnode.
2728  */
2729 static char *typename[] =
2730 {"VNON", "VREG", "VDIR", "VBLK", "VCHR", "VLNK", "VSOCK", "VFIFO", "VBAD"};
2731 
2732 void
2733 vprint(label, vp)
2734 	char *label;
2735 	struct vnode *vp;
2736 {
2737 	char buf[96];
2738 
2739 	if (label != NULL)
2740 		printf("%s: %p: ", label, (void *)vp);
2741 	else
2742 		printf("%p: ", (void *)vp);
2743 	printf("tag %s, type %s, usecount %d, writecount %d, refcount %d,",
2744 	    vp->v_tag, typename[vp->v_type], vp->v_usecount,
2745 	    vp->v_writecount, vp->v_holdcnt);
2746 	buf[0] = '\0';
2747 	if (vp->v_vflag & VV_ROOT)
2748 		strcat(buf, "|VV_ROOT");
2749 	if (vp->v_vflag & VV_TEXT)
2750 		strcat(buf, "|VV_TEXT");
2751 	if (vp->v_vflag & VV_SYSTEM)
2752 		strcat(buf, "|VV_SYSTEM");
2753 	if (vp->v_iflag & VI_XLOCK)
2754 		strcat(buf, "|VI_XLOCK");
2755 	if (vp->v_iflag & VI_XWANT)
2756 		strcat(buf, "|VI_XWANT");
2757 	if (vp->v_iflag & VI_BWAIT)
2758 		strcat(buf, "|VI_BWAIT");
2759 	if (vp->v_iflag & VI_DOOMED)
2760 		strcat(buf, "|VI_DOOMED");
2761 	if (vp->v_iflag & VI_FREE)
2762 		strcat(buf, "|VI_FREE");
2763 	if (vp->v_vflag & VV_OBJBUF)
2764 		strcat(buf, "|VV_OBJBUF");
2765 	if (buf[0] != '\0')
2766 		printf(" flags (%s),", &buf[1]);
2767 	lockmgr_printinfo(vp->v_vnlock);
2768 	printf("\n");
2769 	if (vp->v_data != NULL)
2770 		VOP_PRINT(vp);
2771 }
2772 
2773 #ifdef DDB
2774 #include <ddb/ddb.h>
2775 /*
2776  * List all of the locked vnodes in the system.
2777  * Called when debugging the kernel.
2778  */
2779 DB_SHOW_COMMAND(lockedvnods, lockedvnodes)
2780 {
2781 	struct mount *mp, *nmp;
2782 	struct vnode *vp;
2783 
2784 	/*
2785 	 * Note: because this is DDB, we can't obey the locking semantics
2786 	 * for these structures, which means we could catch an inconsistent
2787 	 * state and dereference a nasty pointer.  Not much to be done
2788 	 * about that.
2789 	 */
2790 	printf("Locked vnodes\n");
2791 	for (mp = TAILQ_FIRST(&mountlist); mp != NULL; mp = nmp) {
2792 		nmp = TAILQ_NEXT(mp, mnt_list);
2793 		TAILQ_FOREACH(vp, &mp->mnt_nvnodelist, v_nmntvnodes) {
2794 			if (VOP_ISLOCKED(vp, NULL))
2795 				vprint(NULL, vp);
2796 		}
2797 		nmp = TAILQ_NEXT(mp, mnt_list);
2798 	}
2799 }
2800 #endif
2801 
2802 /*
2803  * Fill in a struct xvfsconf based on a struct vfsconf.
2804  */
2805 static void
2806 vfsconf2x(struct vfsconf *vfsp, struct xvfsconf *xvfsp)
2807 {
2808 
2809 	strcpy(xvfsp->vfc_name, vfsp->vfc_name);
2810 	xvfsp->vfc_typenum = vfsp->vfc_typenum;
2811 	xvfsp->vfc_refcount = vfsp->vfc_refcount;
2812 	xvfsp->vfc_flags = vfsp->vfc_flags;
2813 	/*
2814 	 * These are unused in userland, we keep them
2815 	 * to not break binary compatibility.
2816 	 */
2817 	xvfsp->vfc_vfsops = NULL;
2818 	xvfsp->vfc_next = NULL;
2819 }
2820 
2821 /*
2822  * Top level filesystem related information gathering.
2823  */
2824 static int
2825 sysctl_vfs_conflist(SYSCTL_HANDLER_ARGS)
2826 {
2827 	struct vfsconf *vfsp;
2828 	struct xvfsconf *xvfsp;
2829 	int cnt, error, i;
2830 
2831 	cnt = 0;
2832 	for (vfsp = vfsconf; vfsp != NULL; vfsp = vfsp->vfc_next)
2833 		cnt++;
2834 	xvfsp = malloc(sizeof(struct xvfsconf) * cnt, M_TEMP, M_WAITOK);
2835 	/*
2836 	 * Handle the race that we will have here when struct vfsconf
2837 	 * will be locked down by using both cnt and checking vfc_next
2838 	 * against NULL to determine the end of the loop.  The race will
2839 	 * happen because we will have to unlock before calling malloc().
2840 	 * We are protected by Giant for now.
2841 	 */
2842 	i = 0;
2843 	for (vfsp = vfsconf; vfsp != NULL && i < cnt; vfsp = vfsp->vfc_next) {
2844 		vfsconf2x(vfsp, xvfsp + i);
2845 		i++;
2846 	}
2847 	error = SYSCTL_OUT(req, xvfsp, sizeof(struct xvfsconf) * i);
2848 	free(xvfsp, M_TEMP);
2849 	return (error);
2850 }
2851 
2852 SYSCTL_PROC(_vfs, OID_AUTO, conflist, CTLFLAG_RD, NULL, 0, sysctl_vfs_conflist,
2853     "S,xvfsconf", "List of all configured filesystems");
2854 
2855 #ifndef BURN_BRIDGES
2856 static int	sysctl_ovfs_conf(SYSCTL_HANDLER_ARGS);
2857 
2858 static int
2859 vfs_sysctl(SYSCTL_HANDLER_ARGS)
2860 {
2861 	int *name = (int *)arg1 - 1;	/* XXX */
2862 	u_int namelen = arg2 + 1;	/* XXX */
2863 	struct vfsconf *vfsp;
2864 	struct xvfsconf xvfsp;
2865 
2866 	printf("WARNING: userland calling deprecated sysctl, "
2867 	    "please rebuild world\n");
2868 
2869 #if 1 || defined(COMPAT_PRELITE2)
2870 	/* Resolve ambiguity between VFS_VFSCONF and VFS_GENERIC. */
2871 	if (namelen == 1)
2872 		return (sysctl_ovfs_conf(oidp, arg1, arg2, req));
2873 #endif
2874 
2875 	switch (name[1]) {
2876 	case VFS_MAXTYPENUM:
2877 		if (namelen != 2)
2878 			return (ENOTDIR);
2879 		return (SYSCTL_OUT(req, &maxvfsconf, sizeof(int)));
2880 	case VFS_CONF:
2881 		if (namelen != 3)
2882 			return (ENOTDIR);	/* overloaded */
2883 		for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next)
2884 			if (vfsp->vfc_typenum == name[2])
2885 				break;
2886 		if (vfsp == NULL)
2887 			return (EOPNOTSUPP);
2888 		vfsconf2x(vfsp, &xvfsp);
2889 		return (SYSCTL_OUT(req, &xvfsp, sizeof(xvfsp)));
2890 	}
2891 	return (EOPNOTSUPP);
2892 }
2893 
2894 SYSCTL_NODE(_vfs, VFS_GENERIC, generic, CTLFLAG_RD | CTLFLAG_SKIP, vfs_sysctl,
2895 	"Generic filesystem");
2896 
2897 #if 1 || defined(COMPAT_PRELITE2)
2898 
2899 static int
2900 sysctl_ovfs_conf(SYSCTL_HANDLER_ARGS)
2901 {
2902 	int error;
2903 	struct vfsconf *vfsp;
2904 	struct ovfsconf ovfs;
2905 
2906 	for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next) {
2907 		ovfs.vfc_vfsops = vfsp->vfc_vfsops;	/* XXX used as flag */
2908 		strcpy(ovfs.vfc_name, vfsp->vfc_name);
2909 		ovfs.vfc_index = vfsp->vfc_typenum;
2910 		ovfs.vfc_refcount = vfsp->vfc_refcount;
2911 		ovfs.vfc_flags = vfsp->vfc_flags;
2912 		error = SYSCTL_OUT(req, &ovfs, sizeof ovfs);
2913 		if (error)
2914 			return error;
2915 	}
2916 	return 0;
2917 }
2918 
2919 #endif /* 1 || COMPAT_PRELITE2 */
2920 #endif /* !BURN_BRIDGES */
2921 
2922 #define KINFO_VNODESLOP		10
2923 #ifdef notyet
2924 /*
2925  * Dump vnode list (via sysctl).
2926  */
2927 /* ARGSUSED */
2928 static int
2929 sysctl_vnode(SYSCTL_HANDLER_ARGS)
2930 {
2931 	struct xvnode *xvn;
2932 	struct thread *td = req->td;
2933 	struct mount *mp;
2934 	struct vnode *vp;
2935 	int error, len, n;
2936 
2937 	/*
2938 	 * Stale numvnodes access is not fatal here.
2939 	 */
2940 	req->lock = 0;
2941 	len = (numvnodes + KINFO_VNODESLOP) * sizeof *xvn;
2942 	if (!req->oldptr)
2943 		/* Make an estimate */
2944 		return (SYSCTL_OUT(req, 0, len));
2945 
2946 	error = sysctl_wire_old_buffer(req, 0);
2947 	if (error != 0)
2948 		return (error);
2949 	xvn = malloc(len, M_TEMP, M_ZERO | M_WAITOK);
2950 	n = 0;
2951 	mtx_lock(&mountlist_mtx);
2952 	TAILQ_FOREACH(mp, &mountlist, mnt_list) {
2953 		if (vfs_busy(mp, LK_NOWAIT, &mountlist_mtx, td))
2954 			continue;
2955 		MNT_ILOCK(mp);
2956 		TAILQ_FOREACH(vp, &mp->mnt_nvnodelist, v_nmntvnodes) {
2957 			if (n == len)
2958 				break;
2959 			vref(vp);
2960 			xvn[n].xv_size = sizeof *xvn;
2961 			xvn[n].xv_vnode = vp;
2962 #define XV_COPY(field) xvn[n].xv_##field = vp->v_##field
2963 			XV_COPY(usecount);
2964 			XV_COPY(writecount);
2965 			XV_COPY(holdcnt);
2966 			XV_COPY(id);
2967 			XV_COPY(mount);
2968 			XV_COPY(numoutput);
2969 			XV_COPY(type);
2970 #undef XV_COPY
2971 			xvn[n].xv_flag = vp->v_vflag;
2972 
2973 			switch (vp->v_type) {
2974 			case VREG:
2975 			case VDIR:
2976 			case VLNK:
2977 				xvn[n].xv_dev = vp->v_cachedfs;
2978 				xvn[n].xv_ino = vp->v_cachedid;
2979 				break;
2980 			case VBLK:
2981 			case VCHR:
2982 				if (vp->v_rdev == NULL) {
2983 					vrele(vp);
2984 					continue;
2985 				}
2986 				xvn[n].xv_dev = dev2udev(vp->v_rdev);
2987 				break;
2988 			case VSOCK:
2989 				xvn[n].xv_socket = vp->v_socket;
2990 				break;
2991 			case VFIFO:
2992 				xvn[n].xv_fifo = vp->v_fifoinfo;
2993 				break;
2994 			case VNON:
2995 			case VBAD:
2996 			default:
2997 				/* shouldn't happen? */
2998 				vrele(vp);
2999 				continue;
3000 			}
3001 			vrele(vp);
3002 			++n;
3003 		}
3004 		MNT_IUNLOCK(mp);
3005 		mtx_lock(&mountlist_mtx);
3006 		vfs_unbusy(mp, td);
3007 		if (n == len)
3008 			break;
3009 	}
3010 	mtx_unlock(&mountlist_mtx);
3011 
3012 	error = SYSCTL_OUT(req, xvn, n * sizeof *xvn);
3013 	free(xvn, M_TEMP);
3014 	return (error);
3015 }
3016 
3017 SYSCTL_PROC(_kern, KERN_VNODE, vnode, CTLTYPE_OPAQUE|CTLFLAG_RD,
3018 	0, 0, sysctl_vnode, "S,xvnode", "");
3019 #endif
3020 
3021 /*
3022  * Check to see if a filesystem is mounted on a block device.
3023  */
3024 int
3025 vfs_mountedon(vp)
3026 	struct vnode *vp;
3027 {
3028 
3029 	if (vp->v_rdev->si_mountpoint != NULL)
3030 		return (EBUSY);
3031 	return (0);
3032 }
3033 
3034 /*
3035  * Unmount all filesystems. The list is traversed in reverse order
3036  * of mounting to avoid dependencies.
3037  */
3038 void
3039 vfs_unmountall()
3040 {
3041 	struct mount *mp;
3042 	struct thread *td;
3043 	int error;
3044 
3045 	if (curthread != NULL)
3046 		td = curthread;
3047 	else
3048 		td = FIRST_THREAD_IN_PROC(initproc); /* XXX XXX proc0? */
3049 	/*
3050 	 * Since this only runs when rebooting, it is not interlocked.
3051 	 */
3052 	while(!TAILQ_EMPTY(&mountlist)) {
3053 		mp = TAILQ_LAST(&mountlist, mntlist);
3054 		error = dounmount(mp, MNT_FORCE, td);
3055 		if (error) {
3056 			TAILQ_REMOVE(&mountlist, mp, mnt_list);
3057 			printf("unmount of %s failed (",
3058 			    mp->mnt_stat.f_mntonname);
3059 			if (error == EBUSY)
3060 				printf("BUSY)\n");
3061 			else
3062 				printf("%d)\n", error);
3063 		} else {
3064 			/* The unmount has removed mp from the mountlist */
3065 		}
3066 	}
3067 }
3068 
3069 /*
3070  * perform msync on all vnodes under a mount point
3071  * the mount point must be locked.
3072  */
3073 void
3074 vfs_msync(struct mount *mp, int flags)
3075 {
3076 	struct vnode *vp, *nvp;
3077 	struct vm_object *obj;
3078 	int tries;
3079 
3080 	GIANT_REQUIRED;
3081 
3082 	tries = 5;
3083 	MNT_ILOCK(mp);
3084 loop:
3085 	for (vp = TAILQ_FIRST(&mp->mnt_nvnodelist); vp != NULL; vp = nvp) {
3086 		if (vp->v_mount != mp) {
3087 			if (--tries > 0)
3088 				goto loop;
3089 			break;
3090 		}
3091 		nvp = TAILQ_NEXT(vp, v_nmntvnodes);
3092 
3093 		VI_LOCK(vp);
3094 		if (vp->v_iflag & VI_XLOCK) {
3095 			VI_UNLOCK(vp);
3096 			continue;
3097 		}
3098 
3099 		if ((vp->v_iflag & VI_OBJDIRTY) &&
3100 		    (flags == MNT_WAIT || VOP_ISLOCKED(vp, NULL) == 0)) {
3101 			MNT_IUNLOCK(mp);
3102 			if (!vget(vp,
3103 			    LK_EXCLUSIVE | LK_RETRY | LK_INTERLOCK,
3104 			    curthread)) {
3105 				if (vp->v_vflag & VV_NOSYNC) {	/* unlinked */
3106 					vput(vp);
3107 					MNT_ILOCK(mp);
3108 					continue;
3109 				}
3110 
3111 				if (VOP_GETVOBJECT(vp, &obj) == 0) {
3112 					VM_OBJECT_LOCK(obj);
3113 					vm_object_page_clean(obj, 0, 0,
3114 					    flags == MNT_WAIT ?
3115 					    OBJPC_SYNC : OBJPC_NOSYNC);
3116 					VM_OBJECT_UNLOCK(obj);
3117 				}
3118 				vput(vp);
3119 			}
3120 			MNT_ILOCK(mp);
3121 			if (TAILQ_NEXT(vp, v_nmntvnodes) != nvp) {
3122 				if (--tries > 0)
3123 					goto loop;
3124 				break;
3125 			}
3126 		} else
3127 			VI_UNLOCK(vp);
3128 	}
3129 	MNT_IUNLOCK(mp);
3130 }
3131 
3132 /*
3133  * Create the VM object needed for VMIO and mmap support.  This
3134  * is done for all VREG files in the system.  Some filesystems might
3135  * afford the additional metadata buffering capability of the
3136  * VMIO code by making the device node be VMIO mode also.
3137  *
3138  * vp must be locked when vfs_object_create is called.
3139  */
3140 int
3141 vfs_object_create(vp, td, cred)
3142 	struct vnode *vp;
3143 	struct thread *td;
3144 	struct ucred *cred;
3145 {
3146 
3147 	GIANT_REQUIRED;
3148 	return (VOP_CREATEVOBJECT(vp, cred, td));
3149 }
3150 
3151 /*
3152  * Mark a vnode as free, putting it up for recycling.
3153  */
3154 void
3155 vfree(vp)
3156 	struct vnode *vp;
3157 {
3158 
3159 	ASSERT_VI_LOCKED(vp, "vfree");
3160 	mtx_lock(&vnode_free_list_mtx);
3161 	KASSERT((vp->v_iflag & VI_FREE) == 0, ("vnode already free"));
3162 	if (vp->v_iflag & VI_AGE) {
3163 		TAILQ_INSERT_HEAD(&vnode_free_list, vp, v_freelist);
3164 	} else {
3165 		TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist);
3166 	}
3167 	freevnodes++;
3168 	mtx_unlock(&vnode_free_list_mtx);
3169 	vp->v_iflag &= ~VI_AGE;
3170 	vp->v_iflag |= VI_FREE;
3171 }
3172 
3173 /*
3174  * Opposite of vfree() - mark a vnode as in use.
3175  */
3176 void
3177 vbusy(vp)
3178 	struct vnode *vp;
3179 {
3180 
3181 	ASSERT_VI_LOCKED(vp, "vbusy");
3182 	KASSERT((vp->v_iflag & VI_FREE) != 0, ("vnode not free"));
3183 
3184 	mtx_lock(&vnode_free_list_mtx);
3185 	TAILQ_REMOVE(&vnode_free_list, vp, v_freelist);
3186 	freevnodes--;
3187 	mtx_unlock(&vnode_free_list_mtx);
3188 
3189 	vp->v_iflag &= ~(VI_FREE|VI_AGE);
3190 }
3191 
3192 /*
3193  * Initalize per-vnode helper structure to hold poll-related state.
3194  */
3195 void
3196 v_addpollinfo(struct vnode *vp)
3197 {
3198 
3199 	vp->v_pollinfo = uma_zalloc(vnodepoll_zone, M_WAITOK);
3200 	mtx_init(&vp->v_pollinfo->vpi_lock, "vnode pollinfo", NULL, MTX_DEF);
3201 }
3202 
3203 /*
3204  * Record a process's interest in events which might happen to
3205  * a vnode.  Because poll uses the historic select-style interface
3206  * internally, this routine serves as both the ``check for any
3207  * pending events'' and the ``record my interest in future events''
3208  * functions.  (These are done together, while the lock is held,
3209  * to avoid race conditions.)
3210  */
3211 int
3212 vn_pollrecord(vp, td, events)
3213 	struct vnode *vp;
3214 	struct thread *td;
3215 	short events;
3216 {
3217 
3218 	if (vp->v_pollinfo == NULL)
3219 		v_addpollinfo(vp);
3220 	mtx_lock(&vp->v_pollinfo->vpi_lock);
3221 	if (vp->v_pollinfo->vpi_revents & events) {
3222 		/*
3223 		 * This leaves events we are not interested
3224 		 * in available for the other process which
3225 		 * which presumably had requested them
3226 		 * (otherwise they would never have been
3227 		 * recorded).
3228 		 */
3229 		events &= vp->v_pollinfo->vpi_revents;
3230 		vp->v_pollinfo->vpi_revents &= ~events;
3231 
3232 		mtx_unlock(&vp->v_pollinfo->vpi_lock);
3233 		return events;
3234 	}
3235 	vp->v_pollinfo->vpi_events |= events;
3236 	selrecord(td, &vp->v_pollinfo->vpi_selinfo);
3237 	mtx_unlock(&vp->v_pollinfo->vpi_lock);
3238 	return 0;
3239 }
3240 
3241 /*
3242  * Note the occurrence of an event.  If the VN_POLLEVENT macro is used,
3243  * it is possible for us to miss an event due to race conditions, but
3244  * that condition is expected to be rare, so for the moment it is the
3245  * preferred interface.
3246  */
3247 void
3248 vn_pollevent(vp, events)
3249 	struct vnode *vp;
3250 	short events;
3251 {
3252 
3253 	if (vp->v_pollinfo == NULL)
3254 		v_addpollinfo(vp);
3255 	mtx_lock(&vp->v_pollinfo->vpi_lock);
3256 	if (vp->v_pollinfo->vpi_events & events) {
3257 		/*
3258 		 * We clear vpi_events so that we don't
3259 		 * call selwakeup() twice if two events are
3260 		 * posted before the polling process(es) is
3261 		 * awakened.  This also ensures that we take at
3262 		 * most one selwakeup() if the polling process
3263 		 * is no longer interested.  However, it does
3264 		 * mean that only one event can be noticed at
3265 		 * a time.  (Perhaps we should only clear those
3266 		 * event bits which we note?) XXX
3267 		 */
3268 		vp->v_pollinfo->vpi_events = 0;	/* &= ~events ??? */
3269 		vp->v_pollinfo->vpi_revents |= events;
3270 		selwakeuppri(&vp->v_pollinfo->vpi_selinfo, PRIBIO);
3271 	}
3272 	mtx_unlock(&vp->v_pollinfo->vpi_lock);
3273 }
3274 
3275 /*
3276  * Wake up anyone polling on vp because it is being revoked.
3277  * This depends on dead_poll() returning POLLHUP for correct
3278  * behavior.
3279  */
3280 void
3281 vn_pollgone(vp)
3282 	struct vnode *vp;
3283 {
3284 
3285 	mtx_lock(&vp->v_pollinfo->vpi_lock);
3286 	VN_KNOTE(vp, NOTE_REVOKE);
3287 	if (vp->v_pollinfo->vpi_events) {
3288 		vp->v_pollinfo->vpi_events = 0;
3289 		selwakeuppri(&vp->v_pollinfo->vpi_selinfo, PRIBIO);
3290 	}
3291 	mtx_unlock(&vp->v_pollinfo->vpi_lock);
3292 }
3293 
3294 
3295 
3296 /*
3297  * Routine to create and manage a filesystem syncer vnode.
3298  */
3299 #define sync_close ((int (*)(struct  vop_close_args *))nullop)
3300 static int	sync_fsync(struct  vop_fsync_args *);
3301 static int	sync_inactive(struct  vop_inactive_args *);
3302 static int	sync_reclaim(struct  vop_reclaim_args *);
3303 
3304 static vop_t **sync_vnodeop_p;
3305 static struct vnodeopv_entry_desc sync_vnodeop_entries[] = {
3306 	{ &vop_default_desc,	(vop_t *) vop_eopnotsupp },
3307 	{ &vop_close_desc,	(vop_t *) sync_close },		/* close */
3308 	{ &vop_fsync_desc,	(vop_t *) sync_fsync },		/* fsync */
3309 	{ &vop_inactive_desc,	(vop_t *) sync_inactive },	/* inactive */
3310 	{ &vop_reclaim_desc,	(vop_t *) sync_reclaim },	/* reclaim */
3311 	{ &vop_lock_desc,	(vop_t *) vop_stdlock },	/* lock */
3312 	{ &vop_unlock_desc,	(vop_t *) vop_stdunlock },	/* unlock */
3313 	{ &vop_islocked_desc,	(vop_t *) vop_stdislocked },	/* islocked */
3314 	{ NULL, NULL }
3315 };
3316 static struct vnodeopv_desc sync_vnodeop_opv_desc =
3317 	{ &sync_vnodeop_p, sync_vnodeop_entries };
3318 
3319 VNODEOP_SET(sync_vnodeop_opv_desc);
3320 
3321 /*
3322  * Create a new filesystem syncer vnode for the specified mount point.
3323  */
3324 int
3325 vfs_allocate_syncvnode(mp)
3326 	struct mount *mp;
3327 {
3328 	struct vnode *vp;
3329 	static long start, incr, next;
3330 	int error;
3331 
3332 	/* Allocate a new vnode */
3333 	if ((error = getnewvnode("syncer", mp, sync_vnodeop_p, &vp)) != 0) {
3334 		mp->mnt_syncer = NULL;
3335 		return (error);
3336 	}
3337 	vp->v_type = VNON;
3338 	/*
3339 	 * Place the vnode onto the syncer worklist. We attempt to
3340 	 * scatter them about on the list so that they will go off
3341 	 * at evenly distributed times even if all the filesystems
3342 	 * are mounted at once.
3343 	 */
3344 	next += incr;
3345 	if (next == 0 || next > syncer_maxdelay) {
3346 		start /= 2;
3347 		incr /= 2;
3348 		if (start == 0) {
3349 			start = syncer_maxdelay / 2;
3350 			incr = syncer_maxdelay;
3351 		}
3352 		next = start;
3353 	}
3354 	VI_LOCK(vp);
3355 	vn_syncer_add_to_worklist(vp, syncdelay > 0 ? next % syncdelay : 0);
3356 	/* XXX - vn_syncer_add_to_worklist() also grabs and drops sync_mtx. */
3357 	mtx_lock(&sync_mtx);
3358 	sync_vnode_count++;
3359 	mtx_unlock(&sync_mtx);
3360 	VI_UNLOCK(vp);
3361 	mp->mnt_syncer = vp;
3362 	return (0);
3363 }
3364 
3365 /*
3366  * Do a lazy sync of the filesystem.
3367  */
3368 static int
3369 sync_fsync(ap)
3370 	struct vop_fsync_args /* {
3371 		struct vnode *a_vp;
3372 		struct ucred *a_cred;
3373 		int a_waitfor;
3374 		struct thread *a_td;
3375 	} */ *ap;
3376 {
3377 	struct vnode *syncvp = ap->a_vp;
3378 	struct mount *mp = syncvp->v_mount;
3379 	struct thread *td = ap->a_td;
3380 	int error, asyncflag;
3381 
3382 	/*
3383 	 * We only need to do something if this is a lazy evaluation.
3384 	 */
3385 	if (ap->a_waitfor != MNT_LAZY)
3386 		return (0);
3387 
3388 	/*
3389 	 * Move ourselves to the back of the sync list.
3390 	 */
3391 	VI_LOCK(syncvp);
3392 	vn_syncer_add_to_worklist(syncvp, syncdelay);
3393 	VI_UNLOCK(syncvp);
3394 
3395 	/*
3396 	 * Walk the list of vnodes pushing all that are dirty and
3397 	 * not already on the sync list.
3398 	 */
3399 	mtx_lock(&mountlist_mtx);
3400 	if (vfs_busy(mp, LK_EXCLUSIVE | LK_NOWAIT, &mountlist_mtx, td) != 0) {
3401 		mtx_unlock(&mountlist_mtx);
3402 		return (0);
3403 	}
3404 	if (vn_start_write(NULL, &mp, V_NOWAIT) != 0) {
3405 		vfs_unbusy(mp, td);
3406 		return (0);
3407 	}
3408 	asyncflag = mp->mnt_flag & MNT_ASYNC;
3409 	mp->mnt_flag &= ~MNT_ASYNC;
3410 	vfs_msync(mp, MNT_NOWAIT);
3411 	error = VFS_SYNC(mp, MNT_LAZY, ap->a_cred, td);
3412 	if (asyncflag)
3413 		mp->mnt_flag |= MNT_ASYNC;
3414 	vn_finished_write(mp);
3415 	vfs_unbusy(mp, td);
3416 	return (error);
3417 }
3418 
3419 /*
3420  * The syncer vnode is no referenced.
3421  */
3422 static int
3423 sync_inactive(ap)
3424 	struct vop_inactive_args /* {
3425 		struct vnode *a_vp;
3426 		struct thread *a_td;
3427 	} */ *ap;
3428 {
3429 
3430 	VOP_UNLOCK(ap->a_vp, 0, ap->a_td);
3431 	vgone(ap->a_vp);
3432 	return (0);
3433 }
3434 
3435 /*
3436  * The syncer vnode is no longer needed and is being decommissioned.
3437  *
3438  * Modifications to the worklist must be protected by sync_mtx.
3439  */
3440 static int
3441 sync_reclaim(ap)
3442 	struct vop_reclaim_args /* {
3443 		struct vnode *a_vp;
3444 	} */ *ap;
3445 {
3446 	struct vnode *vp = ap->a_vp;
3447 
3448 	VI_LOCK(vp);
3449 	vp->v_mount->mnt_syncer = NULL;
3450 	if (vp->v_iflag & VI_ONWORKLST) {
3451 		mtx_lock(&sync_mtx);
3452 		LIST_REMOVE(vp, v_synclist);
3453  		syncer_worklist_len--;
3454 		sync_vnode_count--;
3455 		mtx_unlock(&sync_mtx);
3456 		vp->v_iflag &= ~VI_ONWORKLST;
3457 	}
3458 	VI_UNLOCK(vp);
3459 
3460 	return (0);
3461 }
3462 
3463 /*
3464  * extract the struct cdev *from a VCHR
3465  */
3466 struct cdev *
3467 vn_todev(vp)
3468 	struct vnode *vp;
3469 {
3470 
3471 	if (vp->v_type != VCHR)
3472 		return (NULL);
3473 	return (vp->v_rdev);
3474 }
3475 
3476 /*
3477  * Check if vnode represents a disk device
3478  */
3479 int
3480 vn_isdisk(vp, errp)
3481 	struct vnode *vp;
3482 	int *errp;
3483 {
3484 	int error;
3485 
3486 	error = 0;
3487 	if (vp->v_type != VCHR)
3488 		error = ENOTBLK;
3489 	else if (vp->v_rdev == NULL)
3490 		error = ENXIO;
3491 	else if (!(devsw(vp->v_rdev)->d_flags & D_DISK))
3492 		error = ENOTBLK;
3493 	if (errp != NULL)
3494 		*errp = error;
3495 	return (error == 0);
3496 }
3497 
3498 /*
3499  * Free data allocated by namei(); see namei(9) for details.
3500  */
3501 void
3502 NDFREE(ndp, flags)
3503      struct nameidata *ndp;
3504      const u_int flags;
3505 {
3506 
3507 	if (!(flags & NDF_NO_FREE_PNBUF) &&
3508 	    (ndp->ni_cnd.cn_flags & HASBUF)) {
3509 		uma_zfree(namei_zone, ndp->ni_cnd.cn_pnbuf);
3510 		ndp->ni_cnd.cn_flags &= ~HASBUF;
3511 	}
3512 	if (!(flags & NDF_NO_DVP_UNLOCK) &&
3513 	    (ndp->ni_cnd.cn_flags & LOCKPARENT) &&
3514 	    ndp->ni_dvp != ndp->ni_vp)
3515 		VOP_UNLOCK(ndp->ni_dvp, 0, ndp->ni_cnd.cn_thread);
3516 	if (!(flags & NDF_NO_DVP_RELE) &&
3517 	    (ndp->ni_cnd.cn_flags & (LOCKPARENT|WANTPARENT))) {
3518 		vrele(ndp->ni_dvp);
3519 		ndp->ni_dvp = NULL;
3520 	}
3521 	if (!(flags & NDF_NO_VP_UNLOCK) &&
3522 	    (ndp->ni_cnd.cn_flags & LOCKLEAF) && ndp->ni_vp)
3523 		VOP_UNLOCK(ndp->ni_vp, 0, ndp->ni_cnd.cn_thread);
3524 	if (!(flags & NDF_NO_VP_RELE) &&
3525 	    ndp->ni_vp) {
3526 		vrele(ndp->ni_vp);
3527 		ndp->ni_vp = NULL;
3528 	}
3529 	if (!(flags & NDF_NO_STARTDIR_RELE) &&
3530 	    (ndp->ni_cnd.cn_flags & SAVESTART)) {
3531 		vrele(ndp->ni_startdir);
3532 		ndp->ni_startdir = NULL;
3533 	}
3534 }
3535 
3536 /*
3537  * Common filesystem object access control check routine.  Accepts a
3538  * vnode's type, "mode", uid and gid, requested access mode, credentials,
3539  * and optional call-by-reference privused argument allowing vaccess()
3540  * to indicate to the caller whether privilege was used to satisfy the
3541  * request (obsoleted).  Returns 0 on success, or an errno on failure.
3542  */
3543 int
3544 vaccess(type, file_mode, file_uid, file_gid, acc_mode, cred, privused)
3545 	enum vtype type;
3546 	mode_t file_mode;
3547 	uid_t file_uid;
3548 	gid_t file_gid;
3549 	mode_t acc_mode;
3550 	struct ucred *cred;
3551 	int *privused;
3552 {
3553 	mode_t dac_granted;
3554 #ifdef CAPABILITIES
3555 	mode_t cap_granted;
3556 #endif
3557 
3558 	/*
3559 	 * Look for a normal, non-privileged way to access the file/directory
3560 	 * as requested.  If it exists, go with that.
3561 	 */
3562 
3563 	if (privused != NULL)
3564 		*privused = 0;
3565 
3566 	dac_granted = 0;
3567 
3568 	/* Check the owner. */
3569 	if (cred->cr_uid == file_uid) {
3570 		dac_granted |= VADMIN;
3571 		if (file_mode & S_IXUSR)
3572 			dac_granted |= VEXEC;
3573 		if (file_mode & S_IRUSR)
3574 			dac_granted |= VREAD;
3575 		if (file_mode & S_IWUSR)
3576 			dac_granted |= (VWRITE | VAPPEND);
3577 
3578 		if ((acc_mode & dac_granted) == acc_mode)
3579 			return (0);
3580 
3581 		goto privcheck;
3582 	}
3583 
3584 	/* Otherwise, check the groups (first match) */
3585 	if (groupmember(file_gid, cred)) {
3586 		if (file_mode & S_IXGRP)
3587 			dac_granted |= VEXEC;
3588 		if (file_mode & S_IRGRP)
3589 			dac_granted |= VREAD;
3590 		if (file_mode & S_IWGRP)
3591 			dac_granted |= (VWRITE | VAPPEND);
3592 
3593 		if ((acc_mode & dac_granted) == acc_mode)
3594 			return (0);
3595 
3596 		goto privcheck;
3597 	}
3598 
3599 	/* Otherwise, check everyone else. */
3600 	if (file_mode & S_IXOTH)
3601 		dac_granted |= VEXEC;
3602 	if (file_mode & S_IROTH)
3603 		dac_granted |= VREAD;
3604 	if (file_mode & S_IWOTH)
3605 		dac_granted |= (VWRITE | VAPPEND);
3606 	if ((acc_mode & dac_granted) == acc_mode)
3607 		return (0);
3608 
3609 privcheck:
3610 	if (!suser_cred(cred, PRISON_ROOT)) {
3611 		/* XXX audit: privilege used */
3612 		if (privused != NULL)
3613 			*privused = 1;
3614 		return (0);
3615 	}
3616 
3617 #ifdef CAPABILITIES
3618 	/*
3619 	 * Build a capability mask to determine if the set of capabilities
3620 	 * satisfies the requirements when combined with the granted mask
3621 	 * from above.
3622 	 * For each capability, if the capability is required, bitwise
3623 	 * or the request type onto the cap_granted mask.
3624 	 */
3625 	cap_granted = 0;
3626 
3627 	if (type == VDIR) {
3628 		/*
3629 		 * For directories, use CAP_DAC_READ_SEARCH to satisfy
3630 		 * VEXEC requests, instead of CAP_DAC_EXECUTE.
3631 		 */
3632 		if ((acc_mode & VEXEC) && ((dac_granted & VEXEC) == 0) &&
3633 		    !cap_check(cred, NULL, CAP_DAC_READ_SEARCH, PRISON_ROOT))
3634 			cap_granted |= VEXEC;
3635 	} else {
3636 		if ((acc_mode & VEXEC) && ((dac_granted & VEXEC) == 0) &&
3637 		    !cap_check(cred, NULL, CAP_DAC_EXECUTE, PRISON_ROOT))
3638 			cap_granted |= VEXEC;
3639 	}
3640 
3641 	if ((acc_mode & VREAD) && ((dac_granted & VREAD) == 0) &&
3642 	    !cap_check(cred, NULL, CAP_DAC_READ_SEARCH, PRISON_ROOT))
3643 		cap_granted |= VREAD;
3644 
3645 	if ((acc_mode & VWRITE) && ((dac_granted & VWRITE) == 0) &&
3646 	    !cap_check(cred, NULL, CAP_DAC_WRITE, PRISON_ROOT))
3647 		cap_granted |= (VWRITE | VAPPEND);
3648 
3649 	if ((acc_mode & VADMIN) && ((dac_granted & VADMIN) == 0) &&
3650 	    !cap_check(cred, NULL, CAP_FOWNER, PRISON_ROOT))
3651 		cap_granted |= VADMIN;
3652 
3653 	if ((acc_mode & (cap_granted | dac_granted)) == acc_mode) {
3654 		/* XXX audit: privilege used */
3655 		if (privused != NULL)
3656 			*privused = 1;
3657 		return (0);
3658 	}
3659 #endif
3660 
3661 	return ((acc_mode & VADMIN) ? EPERM : EACCES);
3662 }
3663 
3664 /*
3665  * Credential check based on process requesting service, and per-attribute
3666  * permissions.
3667  */
3668 int
3669 extattr_check_cred(struct vnode *vp, int attrnamespace,
3670     struct ucred *cred, struct thread *td, int access)
3671 {
3672 
3673 	/*
3674 	 * Kernel-invoked always succeeds.
3675 	 */
3676 	if (cred == NOCRED)
3677 		return (0);
3678 
3679 	/*
3680 	 * Do not allow privileged processes in jail to directly
3681 	 * manipulate system attributes.
3682 	 *
3683 	 * XXX What capability should apply here?
3684 	 * Probably CAP_SYS_SETFFLAG.
3685 	 */
3686 	switch (attrnamespace) {
3687 	case EXTATTR_NAMESPACE_SYSTEM:
3688 		/* Potentially should be: return (EPERM); */
3689 		return (suser_cred(cred, 0));
3690 	case EXTATTR_NAMESPACE_USER:
3691 		return (VOP_ACCESS(vp, access, cred, td));
3692 	default:
3693 		return (EPERM);
3694 	}
3695 }
3696 
3697 #ifdef DEBUG_VFS_LOCKS
3698 /*
3699  * This only exists to supress warnings from unlocked specfs accesses.  It is
3700  * no longer ok to have an unlocked VFS.
3701  */
3702 #define	IGNORE_LOCK(vp) ((vp)->v_type == VCHR || (vp)->v_type == VBAD)
3703 
3704 int vfs_badlock_ddb = 1;	/* Drop into debugger on violation. */
3705 int vfs_badlock_mutex = 1;	/* Check for interlock across VOPs. */
3706 int vfs_badlock_print = 1;	/* Print lock violations. */
3707 
3708 static void
3709 vfs_badlock(const char *msg, const char *str, struct vnode *vp)
3710 {
3711 
3712 	if (vfs_badlock_print)
3713 		printf("%s: %p %s\n", str, (void *)vp, msg);
3714 	if (vfs_badlock_ddb)
3715 		Debugger("lock violation");
3716 }
3717 
3718 void
3719 assert_vi_locked(struct vnode *vp, const char *str)
3720 {
3721 
3722 	if (vfs_badlock_mutex && !mtx_owned(VI_MTX(vp)))
3723 		vfs_badlock("interlock is not locked but should be", str, vp);
3724 }
3725 
3726 void
3727 assert_vi_unlocked(struct vnode *vp, const char *str)
3728 {
3729 
3730 	if (vfs_badlock_mutex && mtx_owned(VI_MTX(vp)))
3731 		vfs_badlock("interlock is locked but should not be", str, vp);
3732 }
3733 
3734 void
3735 assert_vop_locked(struct vnode *vp, const char *str)
3736 {
3737 
3738 	if (vp && !IGNORE_LOCK(vp) && VOP_ISLOCKED(vp, NULL) == 0)
3739 		vfs_badlock("is not locked but should be", str, vp);
3740 }
3741 
3742 void
3743 assert_vop_unlocked(struct vnode *vp, const char *str)
3744 {
3745 
3746 	if (vp && !IGNORE_LOCK(vp) &&
3747 	    VOP_ISLOCKED(vp, curthread) == LK_EXCLUSIVE)
3748 		vfs_badlock("is locked but should not be", str, vp);
3749 }
3750 
3751 #if 0
3752 void
3753 assert_vop_elocked(struct vnode *vp, const char *str)
3754 {
3755 
3756 	if (vp && !IGNORE_LOCK(vp) &&
3757 	    VOP_ISLOCKED(vp, curthread) != LK_EXCLUSIVE)
3758 		vfs_badlock("is not exclusive locked but should be", str, vp);
3759 }
3760 
3761 void
3762 assert_vop_elocked_other(struct vnode *vp, const char *str)
3763 {
3764 
3765 	if (vp && !IGNORE_LOCK(vp) &&
3766 	    VOP_ISLOCKED(vp, curthread) != LK_EXCLOTHER)
3767 		vfs_badlock("is not exclusive locked by another thread",
3768 		    str, vp);
3769 }
3770 
3771 void
3772 assert_vop_slocked(struct vnode *vp, const char *str)
3773 {
3774 
3775 	if (vp && !IGNORE_LOCK(vp) &&
3776 	    VOP_ISLOCKED(vp, curthread) != LK_SHARED)
3777 		vfs_badlock("is not locked shared but should be", str, vp);
3778 }
3779 #endif /* 0 */
3780 
3781 void
3782 vop_rename_pre(void *ap)
3783 {
3784 	struct vop_rename_args *a = ap;
3785 
3786 	if (a->a_tvp)
3787 		ASSERT_VI_UNLOCKED(a->a_tvp, "VOP_RENAME");
3788 	ASSERT_VI_UNLOCKED(a->a_tdvp, "VOP_RENAME");
3789 	ASSERT_VI_UNLOCKED(a->a_fvp, "VOP_RENAME");
3790 	ASSERT_VI_UNLOCKED(a->a_fdvp, "VOP_RENAME");
3791 
3792 	/* Check the source (from). */
3793 	if (a->a_tdvp != a->a_fdvp)
3794 		ASSERT_VOP_UNLOCKED(a->a_fdvp, "vop_rename: fdvp locked");
3795 	if (a->a_tvp != a->a_fvp)
3796 		ASSERT_VOP_UNLOCKED(a->a_fvp, "vop_rename: tvp locked");
3797 
3798 	/* Check the target. */
3799 	if (a->a_tvp)
3800 		ASSERT_VOP_LOCKED(a->a_tvp, "vop_rename: tvp not locked");
3801 	ASSERT_VOP_LOCKED(a->a_tdvp, "vop_rename: tdvp not locked");
3802 }
3803 
3804 void
3805 vop_strategy_pre(void *ap)
3806 {
3807 	struct vop_strategy_args *a;
3808 	struct buf *bp;
3809 
3810 	a = ap;
3811 	bp = a->a_bp;
3812 
3813 	/*
3814 	 * Cluster ops lock their component buffers but not the IO container.
3815 	 */
3816 	if ((bp->b_flags & B_CLUSTER) != 0)
3817 		return;
3818 
3819 	if (BUF_REFCNT(bp) < 1) {
3820 		if (vfs_badlock_print)
3821 			printf(
3822 			    "VOP_STRATEGY: bp is not locked but should be\n");
3823 		if (vfs_badlock_ddb)
3824 			Debugger("lock violation");
3825 	}
3826 }
3827 
3828 void
3829 vop_lookup_pre(void *ap)
3830 {
3831 	struct vop_lookup_args *a;
3832 	struct vnode *dvp;
3833 
3834 	a = ap;
3835 	dvp = a->a_dvp;
3836 	ASSERT_VI_UNLOCKED(dvp, "VOP_LOOKUP");
3837 	ASSERT_VOP_LOCKED(dvp, "VOP_LOOKUP");
3838 }
3839 
3840 void
3841 vop_lookup_post(void *ap, int rc)
3842 {
3843 	struct vop_lookup_args *a;
3844 	struct componentname *cnp;
3845 	struct vnode *dvp;
3846 	struct vnode *vp;
3847 	int flags;
3848 
3849 	a = ap;
3850 	dvp = a->a_dvp;
3851 	cnp = a->a_cnp;
3852 	vp = *(a->a_vpp);
3853 	flags = cnp->cn_flags;
3854 
3855 	ASSERT_VI_UNLOCKED(dvp, "VOP_LOOKUP");
3856 
3857 	/*
3858 	 * If this is the last path component for this lookup and LOCKPARENT
3859 	 * is set, OR if there is an error the directory has to be locked.
3860 	 */
3861 	if ((flags & LOCKPARENT) && (flags & ISLASTCN))
3862 		ASSERT_VOP_LOCKED(dvp, "VOP_LOOKUP (LOCKPARENT)");
3863 	else if (rc != 0)
3864 		ASSERT_VOP_LOCKED(dvp, "VOP_LOOKUP (error)");
3865 	else if (dvp != vp)
3866 		ASSERT_VOP_UNLOCKED(dvp, "VOP_LOOKUP (dvp)");
3867 	if (flags & PDIRUNLOCK)
3868 		ASSERT_VOP_UNLOCKED(dvp, "VOP_LOOKUP (PDIRUNLOCK)");
3869 }
3870 
3871 void
3872 vop_lock_pre(void *ap)
3873 {
3874 	struct vop_lock_args *a = ap;
3875 
3876 	if ((a->a_flags & LK_INTERLOCK) == 0)
3877 		ASSERT_VI_UNLOCKED(a->a_vp, "VOP_LOCK");
3878 	else
3879 		ASSERT_VI_LOCKED(a->a_vp, "VOP_LOCK");
3880 }
3881 
3882 void
3883 vop_lock_post(void *ap, int rc)
3884 {
3885 	struct vop_lock_args *a = ap;
3886 
3887 	ASSERT_VI_UNLOCKED(a->a_vp, "VOP_LOCK");
3888 	if (rc == 0)
3889 		ASSERT_VOP_LOCKED(a->a_vp, "VOP_LOCK");
3890 }
3891 
3892 void
3893 vop_unlock_pre(void *ap)
3894 {
3895 	struct vop_unlock_args *a = ap;
3896 
3897 	if (a->a_flags & LK_INTERLOCK)
3898 		ASSERT_VI_LOCKED(a->a_vp, "VOP_UNLOCK");
3899 	ASSERT_VOP_LOCKED(a->a_vp, "VOP_UNLOCK");
3900 }
3901 
3902 void
3903 vop_unlock_post(void *ap, int rc)
3904 {
3905 	struct vop_unlock_args *a = ap;
3906 
3907 	if (a->a_flags & LK_INTERLOCK)
3908 		ASSERT_VI_UNLOCKED(a->a_vp, "VOP_UNLOCK");
3909 }
3910 #endif /* DEBUG_VFS_LOCKS */
3911