xref: /freebsd/sys/kern/vfs_subr.c (revision 2357939bc239bd5334a169b62313806178dd8f30)
1 /*
2  * Copyright (c) 1989, 1993
3  *	The Regents of the University of California.  All rights reserved.
4  * (c) UNIX System Laboratories, Inc.
5  * All or some portions of this file are derived from material licensed
6  * to the University of California by American Telephone and Telegraph
7  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8  * the permission of UNIX System Laboratories, Inc.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 4. Neither the name of the University nor the names of its contributors
19  *    may be used to endorse or promote products derived from this software
20  *    without specific prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  *
34  *	@(#)vfs_subr.c	8.31 (Berkeley) 5/26/95
35  */
36 
37 /*
38  * External virtual filesystem routines
39  */
40 
41 #include <sys/cdefs.h>
42 __FBSDID("$FreeBSD$");
43 
44 #include "opt_ddb.h"
45 #include "opt_mac.h"
46 
47 #include <sys/param.h>
48 #include <sys/systm.h>
49 #include <sys/bio.h>
50 #include <sys/buf.h>
51 #include <sys/conf.h>
52 #include <sys/eventhandler.h>
53 #include <sys/extattr.h>
54 #include <sys/fcntl.h>
55 #include <sys/kernel.h>
56 #include <sys/kthread.h>
57 #include <sys/mac.h>
58 #include <sys/malloc.h>
59 #include <sys/mount.h>
60 #include <sys/namei.h>
61 #include <sys/sleepqueue.h>
62 #include <sys/stat.h>
63 #include <sys/sysctl.h>
64 #include <sys/syslog.h>
65 #include <sys/vmmeter.h>
66 #include <sys/vnode.h>
67 
68 #include <vm/vm.h>
69 #include <vm/vm_object.h>
70 #include <vm/vm_extern.h>
71 #include <vm/pmap.h>
72 #include <vm/vm_map.h>
73 #include <vm/vm_page.h>
74 #include <vm/vm_kern.h>
75 #include <vm/uma.h>
76 
77 static MALLOC_DEFINE(M_NETADDR, "Export Host", "Export host address structure");
78 
79 static void	addalias(struct vnode *vp, dev_t nvp_rdev);
80 static void	insmntque(struct vnode *vp, struct mount *mp);
81 static void	vclean(struct vnode *vp, int flags, struct thread *td);
82 static void	vlruvp(struct vnode *vp);
83 static int	flushbuflist(struct buf *blist, int flags, struct vnode *vp,
84 		    int slpflag, int slptimeo, int *errorp);
85 static int	vtryrecycle(struct vnode *vp);
86 static void	vx_lock(struct vnode *vp);
87 static void	vx_unlock(struct vnode *vp);
88 static void	vgonechrl(struct vnode *vp, struct thread *td);
89 
90 
91 /*
92  * Number of vnodes in existence.  Increased whenever getnewvnode()
93  * allocates a new vnode, never decreased.
94  */
95 static unsigned long	numvnodes;
96 
97 SYSCTL_LONG(_vfs, OID_AUTO, numvnodes, CTLFLAG_RD, &numvnodes, 0, "");
98 
99 /*
100  * Conversion tables for conversion from vnode types to inode formats
101  * and back.
102  */
103 enum vtype iftovt_tab[16] = {
104 	VNON, VFIFO, VCHR, VNON, VDIR, VNON, VBLK, VNON,
105 	VREG, VNON, VLNK, VNON, VSOCK, VNON, VNON, VBAD,
106 };
107 int vttoif_tab[9] = {
108 	0, S_IFREG, S_IFDIR, S_IFBLK, S_IFCHR, S_IFLNK,
109 	S_IFSOCK, S_IFIFO, S_IFMT,
110 };
111 
112 /*
113  * List of vnodes that are ready for recycling.
114  */
115 static TAILQ_HEAD(freelst, vnode) vnode_free_list;
116 
117 /*
118  * Minimum number of free vnodes.  If there are fewer than this free vnodes,
119  * getnewvnode() will return a newly allocated vnode.
120  */
121 static u_long wantfreevnodes = 25;
122 SYSCTL_LONG(_vfs, OID_AUTO, wantfreevnodes, CTLFLAG_RW, &wantfreevnodes, 0, "");
123 /* Number of vnodes in the free list. */
124 static u_long freevnodes;
125 SYSCTL_LONG(_vfs, OID_AUTO, freevnodes, CTLFLAG_RD, &freevnodes, 0, "");
126 
127 /*
128  * Various variables used for debugging the new implementation of
129  * reassignbuf().
130  * XXX these are probably of (very) limited utility now.
131  */
132 static int reassignbufcalls;
133 SYSCTL_INT(_vfs, OID_AUTO, reassignbufcalls, CTLFLAG_RW, &reassignbufcalls, 0, "");
134 static int nameileafonly;
135 SYSCTL_INT(_vfs, OID_AUTO, nameileafonly, CTLFLAG_RW, &nameileafonly, 0, "");
136 
137 /*
138  * Cache for the mount type id assigned to NFS.  This is used for
139  * special checks in nfs/nfs_nqlease.c and vm/vnode_pager.c.
140  */
141 int	nfs_mount_type = -1;
142 
143 /* To keep more than one thread at a time from running vfs_getnewfsid */
144 static struct mtx mntid_mtx;
145 
146 /*
147  * Lock for any access to the following:
148  *	vnode_free_list
149  *	numvnodes
150  *	freevnodes
151  */
152 static struct mtx vnode_free_list_mtx;
153 
154 /*
155  * For any iteration/modification of dev->si_hlist (linked through
156  * v_specnext)
157  */
158 static struct mtx spechash_mtx;
159 
160 /* Publicly exported FS */
161 struct nfs_public nfs_pub;
162 
163 /* Zone for allocation of new vnodes - used exclusively by getnewvnode() */
164 static uma_zone_t vnode_zone;
165 static uma_zone_t vnodepoll_zone;
166 
167 /* Set to 1 to print out reclaim of active vnodes */
168 int	prtactive;
169 
170 /*
171  * The workitem queue.
172  *
173  * It is useful to delay writes of file data and filesystem metadata
174  * for tens of seconds so that quickly created and deleted files need
175  * not waste disk bandwidth being created and removed. To realize this,
176  * we append vnodes to a "workitem" queue. When running with a soft
177  * updates implementation, most pending metadata dependencies should
178  * not wait for more than a few seconds. Thus, mounted on block devices
179  * are delayed only about a half the time that file data is delayed.
180  * Similarly, directory updates are more critical, so are only delayed
181  * about a third the time that file data is delayed. Thus, there are
182  * SYNCER_MAXDELAY queues that are processed round-robin at a rate of
183  * one each second (driven off the filesystem syncer process). The
184  * syncer_delayno variable indicates the next queue that is to be processed.
185  * Items that need to be processed soon are placed in this queue:
186  *
187  *	syncer_workitem_pending[syncer_delayno]
188  *
189  * A delay of fifteen seconds is done by placing the request fifteen
190  * entries later in the queue:
191  *
192  *	syncer_workitem_pending[(syncer_delayno + 15) & syncer_mask]
193  *
194  */
195 static int syncer_delayno;
196 static long syncer_mask;
197 LIST_HEAD(synclist, vnode);
198 static struct synclist *syncer_workitem_pending;
199 /*
200  * The sync_mtx protects:
201  *	vp->v_synclist
202  *	syncer_delayno
203  *	syncer_workitem_pending
204  *	rushjob
205  */
206 static struct mtx sync_mtx;
207 
208 #define SYNCER_MAXDELAY		32
209 static int syncer_maxdelay = SYNCER_MAXDELAY;	/* maximum delay time */
210 static int syncdelay = 30;		/* max time to delay syncing data */
211 static int filedelay = 30;		/* time to delay syncing files */
212 SYSCTL_INT(_kern, OID_AUTO, filedelay, CTLFLAG_RW, &filedelay, 0, "");
213 static int dirdelay = 29;		/* time to delay syncing directories */
214 SYSCTL_INT(_kern, OID_AUTO, dirdelay, CTLFLAG_RW, &dirdelay, 0, "");
215 static int metadelay = 28;		/* time to delay syncing metadata */
216 SYSCTL_INT(_kern, OID_AUTO, metadelay, CTLFLAG_RW, &metadelay, 0, "");
217 static int rushjob;		/* number of slots to run ASAP */
218 static int stat_rush_requests;	/* number of times I/O speeded up */
219 SYSCTL_INT(_debug, OID_AUTO, rush_requests, CTLFLAG_RW, &stat_rush_requests, 0, "");
220 
221 /*
222  * Number of vnodes we want to exist at any one time.  This is mostly used
223  * to size hash tables in vnode-related code.  It is normally not used in
224  * getnewvnode(), as wantfreevnodes is normally nonzero.)
225  *
226  * XXX desiredvnodes is historical cruft and should not exist.
227  */
228 int desiredvnodes;
229 SYSCTL_INT(_kern, KERN_MAXVNODES, maxvnodes, CTLFLAG_RW,
230     &desiredvnodes, 0, "Maximum number of vnodes");
231 static int minvnodes;
232 SYSCTL_INT(_kern, OID_AUTO, minvnodes, CTLFLAG_RW,
233     &minvnodes, 0, "Minimum number of vnodes");
234 static int vnlru_nowhere;
235 SYSCTL_INT(_debug, OID_AUTO, vnlru_nowhere, CTLFLAG_RW,
236     &vnlru_nowhere, 0, "Number of times the vnlru process ran without success");
237 
238 /* Hook for calling soft updates. */
239 int (*softdep_process_worklist_hook)(struct mount *);
240 
241 /*
242  * Initialize the vnode management data structures.
243  */
244 static void
245 vntblinit(void *dummy __unused)
246 {
247 
248 	/*
249 	 * Desiredvnodes is a function of the physical memory size and
250 	 * the kernel's heap size.  Specifically, desiredvnodes scales
251 	 * in proportion to the physical memory size until two fifths
252 	 * of the kernel's heap size is consumed by vnodes and vm
253 	 * objects.
254 	 */
255 	desiredvnodes = min(maxproc + cnt.v_page_count / 4, 2 * vm_kmem_size /
256 	    (5 * (sizeof(struct vm_object) + sizeof(struct vnode))));
257 	minvnodes = desiredvnodes / 4;
258 	mtx_init(&mountlist_mtx, "mountlist", NULL, MTX_DEF);
259 	mtx_init(&mntid_mtx, "mntid", NULL, MTX_DEF);
260 	mtx_init(&spechash_mtx, "spechash", NULL, MTX_DEF);
261 	TAILQ_INIT(&vnode_free_list);
262 	mtx_init(&vnode_free_list_mtx, "vnode_free_list", NULL, MTX_DEF);
263 	vnode_zone = uma_zcreate("VNODE", sizeof (struct vnode), NULL, NULL,
264 	    NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
265 	vnodepoll_zone = uma_zcreate("VNODEPOLL", sizeof (struct vpollinfo),
266 	      NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
267 	/*
268 	 * Initialize the filesystem syncer.
269 	 */
270 	syncer_workitem_pending = hashinit(syncer_maxdelay, M_VNODE,
271 		&syncer_mask);
272 	syncer_maxdelay = syncer_mask + 1;
273 	mtx_init(&sync_mtx, "Syncer mtx", NULL, MTX_DEF);
274 }
275 SYSINIT(vfs, SI_SUB_VFS, SI_ORDER_FIRST, vntblinit, NULL)
276 
277 
278 /*
279  * Mark a mount point as busy. Used to synchronize access and to delay
280  * unmounting. Interlock is not released on failure.
281  */
282 int
283 vfs_busy(mp, flags, interlkp, td)
284 	struct mount *mp;
285 	int flags;
286 	struct mtx *interlkp;
287 	struct thread *td;
288 {
289 	int lkflags;
290 
291 	if (mp->mnt_kern_flag & MNTK_UNMOUNT) {
292 		if (flags & LK_NOWAIT)
293 			return (ENOENT);
294 		mp->mnt_kern_flag |= MNTK_MWAIT;
295 		/*
296 		 * Since all busy locks are shared except the exclusive
297 		 * lock granted when unmounting, the only place that a
298 		 * wakeup needs to be done is at the release of the
299 		 * exclusive lock at the end of dounmount.
300 		 */
301 		msleep(mp, interlkp, PVFS, "vfs_busy", 0);
302 		return (ENOENT);
303 	}
304 	lkflags = LK_SHARED | LK_NOPAUSE;
305 	if (interlkp)
306 		lkflags |= LK_INTERLOCK;
307 	if (lockmgr(&mp->mnt_lock, lkflags, interlkp, td))
308 		panic("vfs_busy: unexpected lock failure");
309 	return (0);
310 }
311 
312 /*
313  * Free a busy filesystem.
314  */
315 void
316 vfs_unbusy(mp, td)
317 	struct mount *mp;
318 	struct thread *td;
319 {
320 
321 	lockmgr(&mp->mnt_lock, LK_RELEASE, NULL, td);
322 }
323 
324 /*
325  * Lookup a mount point by filesystem identifier.
326  */
327 struct mount *
328 vfs_getvfs(fsid)
329 	fsid_t *fsid;
330 {
331 	register struct mount *mp;
332 
333 	mtx_lock(&mountlist_mtx);
334 	TAILQ_FOREACH(mp, &mountlist, mnt_list) {
335 		if (mp->mnt_stat.f_fsid.val[0] == fsid->val[0] &&
336 		    mp->mnt_stat.f_fsid.val[1] == fsid->val[1]) {
337 			mtx_unlock(&mountlist_mtx);
338 			return (mp);
339 		}
340 	}
341 	mtx_unlock(&mountlist_mtx);
342 	return ((struct mount *) 0);
343 }
344 
345 /*
346  * Get a new unique fsid.  Try to make its val[0] unique, since this value
347  * will be used to create fake device numbers for stat().  Also try (but
348  * not so hard) make its val[0] unique mod 2^16, since some emulators only
349  * support 16-bit device numbers.  We end up with unique val[0]'s for the
350  * first 2^16 calls and unique val[0]'s mod 2^16 for the first 2^8 calls.
351  *
352  * Keep in mind that several mounts may be running in parallel.  Starting
353  * the search one past where the previous search terminated is both a
354  * micro-optimization and a defense against returning the same fsid to
355  * different mounts.
356  */
357 void
358 vfs_getnewfsid(mp)
359 	struct mount *mp;
360 {
361 	static u_int16_t mntid_base;
362 	fsid_t tfsid;
363 	int mtype;
364 
365 	mtx_lock(&mntid_mtx);
366 	mtype = mp->mnt_vfc->vfc_typenum;
367 	tfsid.val[1] = mtype;
368 	mtype = (mtype & 0xFF) << 24;
369 	for (;;) {
370 		tfsid.val[0] = makeudev(255,
371 		    mtype | ((mntid_base & 0xFF00) << 8) | (mntid_base & 0xFF));
372 		mntid_base++;
373 		if (vfs_getvfs(&tfsid) == NULL)
374 			break;
375 	}
376 	mp->mnt_stat.f_fsid.val[0] = tfsid.val[0];
377 	mp->mnt_stat.f_fsid.val[1] = tfsid.val[1];
378 	mtx_unlock(&mntid_mtx);
379 }
380 
381 /*
382  * Knob to control the precision of file timestamps:
383  *
384  *   0 = seconds only; nanoseconds zeroed.
385  *   1 = seconds and nanoseconds, accurate within 1/HZ.
386  *   2 = seconds and nanoseconds, truncated to microseconds.
387  * >=3 = seconds and nanoseconds, maximum precision.
388  */
389 enum { TSP_SEC, TSP_HZ, TSP_USEC, TSP_NSEC };
390 
391 static int timestamp_precision = TSP_SEC;
392 SYSCTL_INT(_vfs, OID_AUTO, timestamp_precision, CTLFLAG_RW,
393     &timestamp_precision, 0, "");
394 
395 /*
396  * Get a current timestamp.
397  */
398 void
399 vfs_timestamp(tsp)
400 	struct timespec *tsp;
401 {
402 	struct timeval tv;
403 
404 	switch (timestamp_precision) {
405 	case TSP_SEC:
406 		tsp->tv_sec = time_second;
407 		tsp->tv_nsec = 0;
408 		break;
409 	case TSP_HZ:
410 		getnanotime(tsp);
411 		break;
412 	case TSP_USEC:
413 		microtime(&tv);
414 		TIMEVAL_TO_TIMESPEC(&tv, tsp);
415 		break;
416 	case TSP_NSEC:
417 	default:
418 		nanotime(tsp);
419 		break;
420 	}
421 }
422 
423 /*
424  * Set vnode attributes to VNOVAL
425  */
426 void
427 vattr_null(vap)
428 	register struct vattr *vap;
429 {
430 
431 	vap->va_type = VNON;
432 	vap->va_size = VNOVAL;
433 	vap->va_bytes = VNOVAL;
434 	vap->va_mode = VNOVAL;
435 	vap->va_nlink = VNOVAL;
436 	vap->va_uid = VNOVAL;
437 	vap->va_gid = VNOVAL;
438 	vap->va_fsid = VNOVAL;
439 	vap->va_fileid = VNOVAL;
440 	vap->va_blocksize = VNOVAL;
441 	vap->va_rdev = VNOVAL;
442 	vap->va_atime.tv_sec = VNOVAL;
443 	vap->va_atime.tv_nsec = VNOVAL;
444 	vap->va_mtime.tv_sec = VNOVAL;
445 	vap->va_mtime.tv_nsec = VNOVAL;
446 	vap->va_ctime.tv_sec = VNOVAL;
447 	vap->va_ctime.tv_nsec = VNOVAL;
448 	vap->va_birthtime.tv_sec = VNOVAL;
449 	vap->va_birthtime.tv_nsec = VNOVAL;
450 	vap->va_flags = VNOVAL;
451 	vap->va_gen = VNOVAL;
452 	vap->va_vaflags = 0;
453 }
454 
455 /*
456  * This routine is called when we have too many vnodes.  It attempts
457  * to free <count> vnodes and will potentially free vnodes that still
458  * have VM backing store (VM backing store is typically the cause
459  * of a vnode blowout so we want to do this).  Therefore, this operation
460  * is not considered cheap.
461  *
462  * A number of conditions may prevent a vnode from being reclaimed.
463  * the buffer cache may have references on the vnode, a directory
464  * vnode may still have references due to the namei cache representing
465  * underlying files, or the vnode may be in active use.   It is not
466  * desireable to reuse such vnodes.  These conditions may cause the
467  * number of vnodes to reach some minimum value regardless of what
468  * you set kern.maxvnodes to.  Do not set kern.maxvnodes too low.
469  */
470 static int
471 vlrureclaim(struct mount *mp)
472 {
473 	struct vnode *vp;
474 	int done;
475 	int trigger;
476 	int usevnodes;
477 	int count;
478 
479 	/*
480 	 * Calculate the trigger point, don't allow user
481 	 * screwups to blow us up.   This prevents us from
482 	 * recycling vnodes with lots of resident pages.  We
483 	 * aren't trying to free memory, we are trying to
484 	 * free vnodes.
485 	 */
486 	usevnodes = desiredvnodes;
487 	if (usevnodes <= 0)
488 		usevnodes = 1;
489 	trigger = cnt.v_page_count * 2 / usevnodes;
490 
491 	done = 0;
492 	MNT_ILOCK(mp);
493 	count = mp->mnt_nvnodelistsize / 10 + 1;
494 	while (count && (vp = TAILQ_FIRST(&mp->mnt_nvnodelist)) != NULL) {
495 		TAILQ_REMOVE(&mp->mnt_nvnodelist, vp, v_nmntvnodes);
496 		TAILQ_INSERT_TAIL(&mp->mnt_nvnodelist, vp, v_nmntvnodes);
497 
498 		if (vp->v_type != VNON &&
499 		    vp->v_type != VBAD &&
500 		    VI_TRYLOCK(vp)) {
501 			if (VMIGHTFREE(vp) &&           /* critical path opt */
502 			    (vp->v_object == NULL ||
503 			    vp->v_object->resident_page_count < trigger)) {
504 				MNT_IUNLOCK(mp);
505 				vgonel(vp, curthread);
506 				done++;
507 				MNT_ILOCK(mp);
508 			} else
509 				VI_UNLOCK(vp);
510 		}
511 		--count;
512 	}
513 	MNT_IUNLOCK(mp);
514 	return done;
515 }
516 
517 /*
518  * Attempt to recycle vnodes in a context that is always safe to block.
519  * Calling vlrurecycle() from the bowels of filesystem code has some
520  * interesting deadlock problems.
521  */
522 static struct proc *vnlruproc;
523 static int vnlruproc_sig;
524 
525 static void
526 vnlru_proc(void)
527 {
528 	struct mount *mp, *nmp;
529 	int done;
530 	struct proc *p = vnlruproc;
531 	struct thread *td = FIRST_THREAD_IN_PROC(p);
532 
533 	mtx_lock(&Giant);
534 
535 	EVENTHANDLER_REGISTER(shutdown_pre_sync, kproc_shutdown, p,
536 	    SHUTDOWN_PRI_FIRST);
537 
538 	for (;;) {
539 		kthread_suspend_check(p);
540 		mtx_lock(&vnode_free_list_mtx);
541 		if (numvnodes - freevnodes <= desiredvnodes * 9 / 10) {
542 			mtx_unlock(&vnode_free_list_mtx);
543 			vnlruproc_sig = 0;
544 			wakeup(&vnlruproc_sig);
545 			tsleep(vnlruproc, PVFS, "vlruwt", hz);
546 			continue;
547 		}
548 		mtx_unlock(&vnode_free_list_mtx);
549 		done = 0;
550 		mtx_lock(&mountlist_mtx);
551 		for (mp = TAILQ_FIRST(&mountlist); mp != NULL; mp = nmp) {
552 			if (vfs_busy(mp, LK_NOWAIT, &mountlist_mtx, td)) {
553 				nmp = TAILQ_NEXT(mp, mnt_list);
554 				continue;
555 			}
556 			done += vlrureclaim(mp);
557 			mtx_lock(&mountlist_mtx);
558 			nmp = TAILQ_NEXT(mp, mnt_list);
559 			vfs_unbusy(mp, td);
560 		}
561 		mtx_unlock(&mountlist_mtx);
562 		if (done == 0) {
563 #if 0
564 			/* These messages are temporary debugging aids */
565 			if (vnlru_nowhere < 5)
566 				printf("vnlru process getting nowhere..\n");
567 			else if (vnlru_nowhere == 5)
568 				printf("vnlru process messages stopped.\n");
569 #endif
570 			vnlru_nowhere++;
571 			tsleep(vnlruproc, PPAUSE, "vlrup", hz * 3);
572 		}
573 	}
574 }
575 
576 static struct kproc_desc vnlru_kp = {
577 	"vnlru",
578 	vnlru_proc,
579 	&vnlruproc
580 };
581 SYSINIT(vnlru, SI_SUB_KTHREAD_UPDATE, SI_ORDER_FIRST, kproc_start, &vnlru_kp)
582 
583 
584 /*
585  * Routines having to do with the management of the vnode table.
586  */
587 
588 /*
589  * Check to see if a free vnode can be recycled. If it can,
590  * recycle it and return it with the vnode interlock held.
591  */
592 static int
593 vtryrecycle(struct vnode *vp)
594 {
595 	struct thread *td = curthread;
596 	vm_object_t object;
597 	struct mount *vnmp;
598 	int error;
599 
600 	/* Don't recycle if we can't get the interlock */
601 	if (!VI_TRYLOCK(vp))
602 		return (EWOULDBLOCK);
603 	/*
604 	 * This vnode may found and locked via some other list, if so we
605 	 * can't recycle it yet.
606 	 */
607 	if (vn_lock(vp, LK_INTERLOCK | LK_EXCLUSIVE | LK_NOWAIT, td) != 0)
608 		return (EWOULDBLOCK);
609 	/*
610 	 * Don't recycle if its filesystem is being suspended.
611 	 */
612 	if (vn_start_write(vp, &vnmp, V_NOWAIT) != 0) {
613 		VOP_UNLOCK(vp, 0, td);
614 		return (EBUSY);
615 	}
616 
617 	/*
618 	 * Don't recycle if we still have cached pages.
619 	 */
620 	if (VOP_GETVOBJECT(vp, &object) == 0) {
621 		VM_OBJECT_LOCK(object);
622 		if (object->resident_page_count ||
623 		    object->ref_count) {
624 			VM_OBJECT_UNLOCK(object);
625 			error = EBUSY;
626 			goto done;
627 		}
628 		VM_OBJECT_UNLOCK(object);
629 	}
630 	if (LIST_FIRST(&vp->v_cache_src)) {
631 		/*
632 		 * note: nameileafonly sysctl is temporary,
633 		 * for debugging only, and will eventually be
634 		 * removed.
635 		 */
636 		if (nameileafonly > 0) {
637 			/*
638 			 * Do not reuse namei-cached directory
639 			 * vnodes that have cached
640 			 * subdirectories.
641 			 */
642 			if (cache_leaf_test(vp) < 0) {
643 				error = EISDIR;
644 				goto done;
645 			}
646 		} else if (nameileafonly < 0 ||
647 			    vmiodirenable == 0) {
648 			/*
649 			 * Do not reuse namei-cached directory
650 			 * vnodes if nameileafonly is -1 or
651 			 * if VMIO backing for directories is
652 			 * turned off (otherwise we reuse them
653 			 * too quickly).
654 			 */
655 			error = EBUSY;
656 			goto done;
657 		}
658 	}
659 	/*
660 	 * If we got this far, we need to acquire the interlock and see if
661 	 * anyone picked up this vnode from another list.  If not, we will
662 	 * mark it with XLOCK via vgonel() so that anyone who does find it
663 	 * will skip over it.
664 	 */
665 	VI_LOCK(vp);
666 	if (VSHOULDBUSY(vp) && (vp->v_iflag & VI_XLOCK) == 0) {
667 		VI_UNLOCK(vp);
668 		error = EBUSY;
669 		goto done;
670 	}
671 	mtx_lock(&vnode_free_list_mtx);
672 	TAILQ_REMOVE(&vnode_free_list, vp, v_freelist);
673 	vp->v_iflag &= ~VI_FREE;
674 	mtx_unlock(&vnode_free_list_mtx);
675 	vp->v_iflag |= VI_DOOMED;
676 	if (vp->v_type != VBAD) {
677 		VOP_UNLOCK(vp, 0, td);
678 		vgonel(vp, td);
679 		VI_LOCK(vp);
680 	} else
681 		VOP_UNLOCK(vp, 0, td);
682 	vn_finished_write(vnmp);
683 	return (0);
684 done:
685 	VOP_UNLOCK(vp, 0, td);
686 	vn_finished_write(vnmp);
687 	return (error);
688 }
689 
690 /*
691  * Return the next vnode from the free list.
692  */
693 int
694 getnewvnode(tag, mp, vops, vpp)
695 	const char *tag;
696 	struct mount *mp;
697 	vop_t **vops;
698 	struct vnode **vpp;
699 {
700 	struct vnode *vp = NULL;
701 	struct vpollinfo *pollinfo = NULL;
702 
703 	mtx_lock(&vnode_free_list_mtx);
704 
705 	/*
706 	 * Try to reuse vnodes if we hit the max.  This situation only
707 	 * occurs in certain large-memory (2G+) situations.  We cannot
708 	 * attempt to directly reclaim vnodes due to nasty recursion
709 	 * problems.
710 	 */
711 	while (numvnodes - freevnodes > desiredvnodes) {
712 		if (vnlruproc_sig == 0) {
713 			vnlruproc_sig = 1;      /* avoid unnecessary wakeups */
714 			wakeup(vnlruproc);
715 		}
716 		mtx_unlock(&vnode_free_list_mtx);
717 		tsleep(&vnlruproc_sig, PVFS, "vlruwk", hz);
718 		mtx_lock(&vnode_free_list_mtx);
719 	}
720 
721 	/*
722 	 * Attempt to reuse a vnode already on the free list, allocating
723 	 * a new vnode if we can't find one or if we have not reached a
724 	 * good minimum for good LRU performance.
725 	 */
726 
727 	if (freevnodes >= wantfreevnodes && numvnodes >= minvnodes) {
728 		int error;
729 		int count;
730 
731 		for (count = 0; count < freevnodes; count++) {
732 			vp = TAILQ_FIRST(&vnode_free_list);
733 
734 			KASSERT(vp->v_usecount == 0 &&
735 			    (vp->v_iflag & VI_DOINGINACT) == 0,
736 			    ("getnewvnode: free vnode isn't"));
737 
738 			TAILQ_REMOVE(&vnode_free_list, vp, v_freelist);
739 			TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist);
740 			mtx_unlock(&vnode_free_list_mtx);
741 			error = vtryrecycle(vp);
742 			mtx_lock(&vnode_free_list_mtx);
743 			if (error == 0)
744 				break;
745 			vp = NULL;
746 		}
747 	}
748 	if (vp) {
749 		freevnodes--;
750 		mtx_unlock(&vnode_free_list_mtx);
751 
752 #ifdef INVARIANTS
753 		{
754 			if (vp->v_data)
755 				panic("cleaned vnode isn't");
756 			if (vp->v_numoutput)
757 				panic("Clean vnode has pending I/O's");
758 			if (vp->v_writecount != 0)
759 				panic("Non-zero write count");
760 		}
761 #endif
762 		if ((pollinfo = vp->v_pollinfo) != NULL) {
763 			/*
764 			 * To avoid lock order reversals, the call to
765 			 * uma_zfree() must be delayed until the vnode
766 			 * interlock is released.
767 			 */
768 			vp->v_pollinfo = NULL;
769 		}
770 #ifdef MAC
771 		mac_destroy_vnode(vp);
772 #endif
773 		vp->v_iflag = 0;
774 		vp->v_vflag = 0;
775 		vp->v_lastw = 0;
776 		vp->v_lasta = 0;
777 		vp->v_cstart = 0;
778 		vp->v_clen = 0;
779 		vp->v_socket = 0;
780 		lockdestroy(vp->v_vnlock);
781 		lockinit(vp->v_vnlock, PVFS, tag, VLKTIMEOUT, LK_NOPAUSE);
782 		KASSERT(vp->v_cleanbufcnt == 0, ("cleanbufcnt not 0"));
783 		KASSERT(vp->v_cleanblkroot == NULL, ("cleanblkroot not NULL"));
784 		KASSERT(vp->v_dirtybufcnt == 0, ("dirtybufcnt not 0"));
785 		KASSERT(vp->v_dirtyblkroot == NULL, ("dirtyblkroot not NULL"));
786 	} else {
787 		numvnodes++;
788 		mtx_unlock(&vnode_free_list_mtx);
789 
790 		vp = (struct vnode *) uma_zalloc(vnode_zone, M_WAITOK|M_ZERO);
791 		mtx_init(&vp->v_interlock, "vnode interlock", NULL, MTX_DEF);
792 		VI_LOCK(vp);
793 		vp->v_dd = vp;
794 		vp->v_vnlock = &vp->v_lock;
795 		lockinit(vp->v_vnlock, PVFS, tag, VLKTIMEOUT, LK_NOPAUSE);
796 		cache_purge(vp);		/* Sets up v_id. */
797 		LIST_INIT(&vp->v_cache_src);
798 		TAILQ_INIT(&vp->v_cache_dst);
799 	}
800 
801 	TAILQ_INIT(&vp->v_cleanblkhd);
802 	TAILQ_INIT(&vp->v_dirtyblkhd);
803 	vp->v_type = VNON;
804 	vp->v_tag = tag;
805 	vp->v_op = vops;
806 	*vpp = vp;
807 	vp->v_usecount = 1;
808 	vp->v_data = 0;
809 	vp->v_cachedid = -1;
810 	VI_UNLOCK(vp);
811 	if (pollinfo != NULL) {
812 		mtx_destroy(&pollinfo->vpi_lock);
813 		uma_zfree(vnodepoll_zone, pollinfo);
814 	}
815 #ifdef MAC
816 	mac_init_vnode(vp);
817 	if (mp != NULL && (mp->mnt_flag & MNT_MULTILABEL) == 0)
818 		mac_associate_vnode_singlelabel(mp, vp);
819 #endif
820 	insmntque(vp, mp);
821 
822 	return (0);
823 }
824 
825 /*
826  * Move a vnode from one mount queue to another.
827  */
828 static void
829 insmntque(vp, mp)
830 	register struct vnode *vp;
831 	register struct mount *mp;
832 {
833 
834 	/*
835 	 * Delete from old mount point vnode list, if on one.
836 	 */
837 	if (vp->v_mount != NULL) {
838 		MNT_ILOCK(vp->v_mount);
839 		KASSERT(vp->v_mount->mnt_nvnodelistsize > 0,
840 			("bad mount point vnode list size"));
841 		TAILQ_REMOVE(&vp->v_mount->mnt_nvnodelist, vp, v_nmntvnodes);
842 		vp->v_mount->mnt_nvnodelistsize--;
843 		MNT_IUNLOCK(vp->v_mount);
844 	}
845 	/*
846 	 * Insert into list of vnodes for the new mount point, if available.
847 	 */
848 	if ((vp->v_mount = mp) != NULL) {
849 		MNT_ILOCK(vp->v_mount);
850 		TAILQ_INSERT_TAIL(&mp->mnt_nvnodelist, vp, v_nmntvnodes);
851 		mp->mnt_nvnodelistsize++;
852 		MNT_IUNLOCK(vp->v_mount);
853 	}
854 }
855 
856 /*
857  * Update outstanding I/O count and do wakeup if requested.
858  */
859 void
860 vwakeup(bp)
861 	register struct buf *bp;
862 {
863 	register struct vnode *vp;
864 
865 	bp->b_flags &= ~B_WRITEINPROG;
866 	if ((vp = bp->b_vp)) {
867 		VI_LOCK(vp);
868 		vp->v_numoutput--;
869 		if (vp->v_numoutput < 0)
870 			panic("vwakeup: neg numoutput");
871 		if ((vp->v_numoutput == 0) && (vp->v_iflag & VI_BWAIT)) {
872 			vp->v_iflag &= ~VI_BWAIT;
873 			wakeup(&vp->v_numoutput);
874 		}
875 		VI_UNLOCK(vp);
876 	}
877 }
878 
879 /*
880  * Flush out and invalidate all buffers associated with a vnode.
881  * Called with the underlying object locked.
882  */
883 int
884 vinvalbuf(vp, flags, cred, td, slpflag, slptimeo)
885 	struct vnode *vp;
886 	int flags;
887 	struct ucred *cred;
888 	struct thread *td;
889 	int slpflag, slptimeo;
890 {
891 	struct buf *blist;
892 	int error;
893 	vm_object_t object;
894 
895 	GIANT_REQUIRED;
896 
897 	ASSERT_VOP_LOCKED(vp, "vinvalbuf");
898 
899 	VI_LOCK(vp);
900 	if (flags & V_SAVE) {
901 		while (vp->v_numoutput) {
902 			vp->v_iflag |= VI_BWAIT;
903 			error = msleep(&vp->v_numoutput, VI_MTX(vp),
904 			    slpflag | (PRIBIO + 1), "vinvlbuf", slptimeo);
905 			if (error) {
906 				VI_UNLOCK(vp);
907 				return (error);
908 			}
909 		}
910 		if (!TAILQ_EMPTY(&vp->v_dirtyblkhd)) {
911 			VI_UNLOCK(vp);
912 			if ((error = VOP_FSYNC(vp, cred, MNT_WAIT, td)) != 0)
913 				return (error);
914 			/*
915 			 * XXX We could save a lock/unlock if this was only
916 			 * enabled under INVARIANTS
917 			 */
918 			VI_LOCK(vp);
919 			if (vp->v_numoutput > 0 ||
920 			    !TAILQ_EMPTY(&vp->v_dirtyblkhd))
921 				panic("vinvalbuf: dirty bufs");
922 		}
923 	}
924 	/*
925 	 * If you alter this loop please notice that interlock is dropped and
926 	 * reacquired in flushbuflist.  Special care is needed to ensure that
927 	 * no race conditions occur from this.
928 	 */
929 	for (error = 0;;) {
930 		if ((blist = TAILQ_FIRST(&vp->v_cleanblkhd)) != 0 &&
931 		    flushbuflist(blist, flags, vp, slpflag, slptimeo, &error)) {
932 			if (error)
933 				break;
934 			continue;
935 		}
936 		if ((blist = TAILQ_FIRST(&vp->v_dirtyblkhd)) != 0 &&
937 		    flushbuflist(blist, flags, vp, slpflag, slptimeo, &error)) {
938 			if (error)
939 				break;
940 			continue;
941 		}
942 		break;
943 	}
944 	if (error) {
945 		VI_UNLOCK(vp);
946 		return (error);
947 	}
948 
949 	/*
950 	 * Wait for I/O to complete.  XXX needs cleaning up.  The vnode can
951 	 * have write I/O in-progress but if there is a VM object then the
952 	 * VM object can also have read-I/O in-progress.
953 	 */
954 	do {
955 		while (vp->v_numoutput > 0) {
956 			vp->v_iflag |= VI_BWAIT;
957 			msleep(&vp->v_numoutput, VI_MTX(vp), PVM, "vnvlbv", 0);
958 		}
959 		VI_UNLOCK(vp);
960 		if (VOP_GETVOBJECT(vp, &object) == 0) {
961 			VM_OBJECT_LOCK(object);
962 			vm_object_pip_wait(object, "vnvlbx");
963 			VM_OBJECT_UNLOCK(object);
964 		}
965 		VI_LOCK(vp);
966 	} while (vp->v_numoutput > 0);
967 	VI_UNLOCK(vp);
968 
969 	/*
970 	 * Destroy the copy in the VM cache, too.
971 	 */
972 	if (VOP_GETVOBJECT(vp, &object) == 0) {
973 		VM_OBJECT_LOCK(object);
974 		vm_object_page_remove(object, 0, 0,
975 			(flags & V_SAVE) ? TRUE : FALSE);
976 		VM_OBJECT_UNLOCK(object);
977 	}
978 
979 #ifdef INVARIANTS
980 	VI_LOCK(vp);
981 	if ((flags & (V_ALT | V_NORMAL)) == 0 &&
982 	    (!TAILQ_EMPTY(&vp->v_dirtyblkhd) ||
983 	     !TAILQ_EMPTY(&vp->v_cleanblkhd)))
984 		panic("vinvalbuf: flush failed");
985 	VI_UNLOCK(vp);
986 #endif
987 	return (0);
988 }
989 
990 /*
991  * Flush out buffers on the specified list.
992  *
993  */
994 static int
995 flushbuflist(blist, flags, vp, slpflag, slptimeo, errorp)
996 	struct buf *blist;
997 	int flags;
998 	struct vnode *vp;
999 	int slpflag, slptimeo;
1000 	int *errorp;
1001 {
1002 	struct buf *bp, *nbp;
1003 	int found, error;
1004 
1005 	ASSERT_VI_LOCKED(vp, "flushbuflist");
1006 
1007 	for (found = 0, bp = blist; bp; bp = nbp) {
1008 		nbp = TAILQ_NEXT(bp, b_vnbufs);
1009 		if (((flags & V_NORMAL) && (bp->b_xflags & BX_ALTDATA)) ||
1010 		    ((flags & V_ALT) && (bp->b_xflags & BX_ALTDATA) == 0)) {
1011 			continue;
1012 		}
1013 		found += 1;
1014 		error = BUF_TIMELOCK(bp,
1015 		    LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK, VI_MTX(vp),
1016 		    "flushbuf", slpflag, slptimeo);
1017 		if (error) {
1018 			if (error != ENOLCK)
1019 				*errorp = error;
1020 			goto done;
1021 		}
1022 		/*
1023 		 * XXX Since there are no node locks for NFS, I
1024 		 * believe there is a slight chance that a delayed
1025 		 * write will occur while sleeping just above, so
1026 		 * check for it.  Note that vfs_bio_awrite expects
1027 		 * buffers to reside on a queue, while bwrite and
1028 		 * brelse do not.
1029 		 */
1030 		if (((bp->b_flags & (B_DELWRI | B_INVAL)) == B_DELWRI) &&
1031 			(flags & V_SAVE)) {
1032 
1033 			if (bp->b_vp == vp) {
1034 				if (bp->b_flags & B_CLUSTEROK) {
1035 					vfs_bio_awrite(bp);
1036 				} else {
1037 					bremfree(bp);
1038 					bp->b_flags |= B_ASYNC;
1039 					bwrite(bp);
1040 				}
1041 			} else {
1042 				bremfree(bp);
1043 				(void) bwrite(bp);
1044 			}
1045 			goto done;
1046 		}
1047 		bremfree(bp);
1048 		bp->b_flags |= (B_INVAL | B_NOCACHE | B_RELBUF);
1049 		bp->b_flags &= ~B_ASYNC;
1050 		brelse(bp);
1051 		VI_LOCK(vp);
1052 	}
1053 	return (found);
1054 done:
1055 	VI_LOCK(vp);
1056 	return (found);
1057 }
1058 
1059 /*
1060  * Truncate a file's buffer and pages to a specified length.  This
1061  * is in lieu of the old vinvalbuf mechanism, which performed unneeded
1062  * sync activity.
1063  */
1064 int
1065 vtruncbuf(vp, cred, td, length, blksize)
1066 	register struct vnode *vp;
1067 	struct ucred *cred;
1068 	struct thread *td;
1069 	off_t length;
1070 	int blksize;
1071 {
1072 	register struct buf *bp;
1073 	struct buf *nbp;
1074 	int anyfreed;
1075 	int trunclbn;
1076 
1077 	/*
1078 	 * Round up to the *next* lbn.
1079 	 */
1080 	trunclbn = (length + blksize - 1) / blksize;
1081 
1082 	ASSERT_VOP_LOCKED(vp, "vtruncbuf");
1083 restart:
1084 	VI_LOCK(vp);
1085 	anyfreed = 1;
1086 	for (;anyfreed;) {
1087 		anyfreed = 0;
1088 		for (bp = TAILQ_FIRST(&vp->v_cleanblkhd); bp; bp = nbp) {
1089 			nbp = TAILQ_NEXT(bp, b_vnbufs);
1090 			if (bp->b_lblkno >= trunclbn) {
1091 				if (BUF_LOCK(bp,
1092 				    LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK,
1093 				    VI_MTX(vp)) == ENOLCK)
1094 					goto restart;
1095 
1096 				bremfree(bp);
1097 				bp->b_flags |= (B_INVAL | B_RELBUF);
1098 				bp->b_flags &= ~B_ASYNC;
1099 				brelse(bp);
1100 				anyfreed = 1;
1101 
1102 				if (nbp &&
1103 				    (((nbp->b_xflags & BX_VNCLEAN) == 0) ||
1104 				    (nbp->b_vp != vp) ||
1105 				    (nbp->b_flags & B_DELWRI))) {
1106 					goto restart;
1107 				}
1108 				VI_LOCK(vp);
1109 			}
1110 		}
1111 
1112 		for (bp = TAILQ_FIRST(&vp->v_dirtyblkhd); bp; bp = nbp) {
1113 			nbp = TAILQ_NEXT(bp, b_vnbufs);
1114 			if (bp->b_lblkno >= trunclbn) {
1115 				if (BUF_LOCK(bp,
1116 				    LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK,
1117 				    VI_MTX(vp)) == ENOLCK)
1118 					goto restart;
1119 				bremfree(bp);
1120 				bp->b_flags |= (B_INVAL | B_RELBUF);
1121 				bp->b_flags &= ~B_ASYNC;
1122 				brelse(bp);
1123 				anyfreed = 1;
1124 				if (nbp &&
1125 				    (((nbp->b_xflags & BX_VNDIRTY) == 0) ||
1126 				    (nbp->b_vp != vp) ||
1127 				    (nbp->b_flags & B_DELWRI) == 0)) {
1128 					goto restart;
1129 				}
1130 				VI_LOCK(vp);
1131 			}
1132 		}
1133 	}
1134 
1135 	if (length > 0) {
1136 restartsync:
1137 		for (bp = TAILQ_FIRST(&vp->v_dirtyblkhd); bp; bp = nbp) {
1138 			nbp = TAILQ_NEXT(bp, b_vnbufs);
1139 			if (bp->b_lblkno > 0)
1140 				continue;
1141 			/*
1142 			 * Since we hold the vnode lock this should only
1143 			 * fail if we're racing with the buf daemon.
1144 			 */
1145 			if (BUF_LOCK(bp,
1146 			    LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK,
1147 			    VI_MTX(vp)) == ENOLCK) {
1148 				goto restart;
1149 			}
1150 			KASSERT((bp->b_flags & B_DELWRI),
1151 			    ("buf(%p) on dirty queue without DELWRI", bp));
1152 
1153 			bremfree(bp);
1154 			bawrite(bp);
1155 			VI_LOCK(vp);
1156 			goto restartsync;
1157 		}
1158 	}
1159 
1160 	while (vp->v_numoutput > 0) {
1161 		vp->v_iflag |= VI_BWAIT;
1162 		msleep(&vp->v_numoutput, VI_MTX(vp), PVM, "vbtrunc", 0);
1163 	}
1164 	VI_UNLOCK(vp);
1165 	vnode_pager_setsize(vp, length);
1166 
1167 	return (0);
1168 }
1169 
1170 /*
1171  * buf_splay() - splay tree core for the clean/dirty list of buffers in
1172  * 		 a vnode.
1173  *
1174  *	NOTE: We have to deal with the special case of a background bitmap
1175  *	buffer, a situation where two buffers will have the same logical
1176  *	block offset.  We want (1) only the foreground buffer to be accessed
1177  *	in a lookup and (2) must differentiate between the foreground and
1178  *	background buffer in the splay tree algorithm because the splay
1179  *	tree cannot normally handle multiple entities with the same 'index'.
1180  *	We accomplish this by adding differentiating flags to the splay tree's
1181  *	numerical domain.
1182  */
1183 static
1184 struct buf *
1185 buf_splay(daddr_t lblkno, b_xflags_t xflags, struct buf *root)
1186 {
1187 	struct buf dummy;
1188 	struct buf *lefttreemax, *righttreemin, *y;
1189 
1190 	if (root == NULL)
1191 		return (NULL);
1192 	lefttreemax = righttreemin = &dummy;
1193 	for (;;) {
1194 		if (lblkno < root->b_lblkno ||
1195 		    (lblkno == root->b_lblkno &&
1196 		    (xflags & BX_BKGRDMARKER) < (root->b_xflags & BX_BKGRDMARKER))) {
1197 			if ((y = root->b_left) == NULL)
1198 				break;
1199 			if (lblkno < y->b_lblkno) {
1200 				/* Rotate right. */
1201 				root->b_left = y->b_right;
1202 				y->b_right = root;
1203 				root = y;
1204 				if ((y = root->b_left) == NULL)
1205 					break;
1206 			}
1207 			/* Link into the new root's right tree. */
1208 			righttreemin->b_left = root;
1209 			righttreemin = root;
1210 		} else if (lblkno > root->b_lblkno ||
1211 		    (lblkno == root->b_lblkno &&
1212 		    (xflags & BX_BKGRDMARKER) > (root->b_xflags & BX_BKGRDMARKER))) {
1213 			if ((y = root->b_right) == NULL)
1214 				break;
1215 			if (lblkno > y->b_lblkno) {
1216 				/* Rotate left. */
1217 				root->b_right = y->b_left;
1218 				y->b_left = root;
1219 				root = y;
1220 				if ((y = root->b_right) == NULL)
1221 					break;
1222 			}
1223 			/* Link into the new root's left tree. */
1224 			lefttreemax->b_right = root;
1225 			lefttreemax = root;
1226 		} else {
1227 			break;
1228 		}
1229 		root = y;
1230 	}
1231 	/* Assemble the new root. */
1232 	lefttreemax->b_right = root->b_left;
1233 	righttreemin->b_left = root->b_right;
1234 	root->b_left = dummy.b_right;
1235 	root->b_right = dummy.b_left;
1236 	return (root);
1237 }
1238 
1239 static
1240 void
1241 buf_vlist_remove(struct buf *bp)
1242 {
1243 	struct vnode *vp = bp->b_vp;
1244 	struct buf *root;
1245 
1246 	ASSERT_VI_LOCKED(vp, "buf_vlist_remove");
1247 	if (bp->b_xflags & BX_VNDIRTY) {
1248 		if (bp != vp->v_dirtyblkroot) {
1249 			root = buf_splay(bp->b_lblkno, bp->b_xflags,
1250 			    vp->v_dirtyblkroot);
1251 			KASSERT(root == bp,
1252 			    ("splay lookup failed during dirty remove"));
1253 		}
1254 		if (bp->b_left == NULL) {
1255 			root = bp->b_right;
1256 		} else {
1257 			root = buf_splay(bp->b_lblkno, bp->b_xflags,
1258 			    bp->b_left);
1259 			root->b_right = bp->b_right;
1260 		}
1261 		vp->v_dirtyblkroot = root;
1262 		TAILQ_REMOVE(&vp->v_dirtyblkhd, bp, b_vnbufs);
1263 		vp->v_dirtybufcnt--;
1264 	} else {
1265 		/* KASSERT(bp->b_xflags & BX_VNCLEAN, ("bp wasn't clean")); */
1266 		if (bp != vp->v_cleanblkroot) {
1267 			root = buf_splay(bp->b_lblkno, bp->b_xflags,
1268 			    vp->v_cleanblkroot);
1269 			KASSERT(root == bp,
1270 			    ("splay lookup failed during clean remove"));
1271 		}
1272 		if (bp->b_left == NULL) {
1273 			root = bp->b_right;
1274 		} else {
1275 			root = buf_splay(bp->b_lblkno, bp->b_xflags,
1276 			    bp->b_left);
1277 			root->b_right = bp->b_right;
1278 		}
1279 		vp->v_cleanblkroot = root;
1280 		TAILQ_REMOVE(&vp->v_cleanblkhd, bp, b_vnbufs);
1281 		vp->v_cleanbufcnt--;
1282 	}
1283 	bp->b_xflags &= ~(BX_VNDIRTY | BX_VNCLEAN);
1284 }
1285 
1286 /*
1287  * Add the buffer to the sorted clean or dirty block list using a
1288  * splay tree algorithm.
1289  *
1290  * NOTE: xflags is passed as a constant, optimizing this inline function!
1291  */
1292 static
1293 void
1294 buf_vlist_add(struct buf *bp, struct vnode *vp, b_xflags_t xflags)
1295 {
1296 	struct buf *root;
1297 
1298 	ASSERT_VI_LOCKED(vp, "buf_vlist_add");
1299 	bp->b_xflags |= xflags;
1300 	if (xflags & BX_VNDIRTY) {
1301 		root = buf_splay(bp->b_lblkno, bp->b_xflags, vp->v_dirtyblkroot);
1302 		if (root == NULL) {
1303 			bp->b_left = NULL;
1304 			bp->b_right = NULL;
1305 			TAILQ_INSERT_TAIL(&vp->v_dirtyblkhd, bp, b_vnbufs);
1306 		} else if (bp->b_lblkno < root->b_lblkno ||
1307 		    (bp->b_lblkno == root->b_lblkno &&
1308 		    (bp->b_xflags & BX_BKGRDMARKER) < (root->b_xflags & BX_BKGRDMARKER))) {
1309 			bp->b_left = root->b_left;
1310 			bp->b_right = root;
1311 			root->b_left = NULL;
1312 			TAILQ_INSERT_BEFORE(root, bp, b_vnbufs);
1313 		} else {
1314 			bp->b_right = root->b_right;
1315 			bp->b_left = root;
1316 			root->b_right = NULL;
1317 			TAILQ_INSERT_AFTER(&vp->v_dirtyblkhd,
1318 			    root, bp, b_vnbufs);
1319 		}
1320 		vp->v_dirtybufcnt++;
1321 		vp->v_dirtyblkroot = bp;
1322 	} else {
1323 		/* KASSERT(xflags & BX_VNCLEAN, ("xflags not clean")); */
1324 		root = buf_splay(bp->b_lblkno, bp->b_xflags, vp->v_cleanblkroot);
1325 		if (root == NULL) {
1326 			bp->b_left = NULL;
1327 			bp->b_right = NULL;
1328 			TAILQ_INSERT_TAIL(&vp->v_cleanblkhd, bp, b_vnbufs);
1329 		} else if (bp->b_lblkno < root->b_lblkno ||
1330 		    (bp->b_lblkno == root->b_lblkno &&
1331 		    (bp->b_xflags & BX_BKGRDMARKER) < (root->b_xflags & BX_BKGRDMARKER))) {
1332 			bp->b_left = root->b_left;
1333 			bp->b_right = root;
1334 			root->b_left = NULL;
1335 			TAILQ_INSERT_BEFORE(root, bp, b_vnbufs);
1336 		} else {
1337 			bp->b_right = root->b_right;
1338 			bp->b_left = root;
1339 			root->b_right = NULL;
1340 			TAILQ_INSERT_AFTER(&vp->v_cleanblkhd,
1341 			    root, bp, b_vnbufs);
1342 		}
1343 		vp->v_cleanbufcnt++;
1344 		vp->v_cleanblkroot = bp;
1345 	}
1346 }
1347 
1348 /*
1349  * Lookup a buffer using the splay tree.  Note that we specifically avoid
1350  * shadow buffers used in background bitmap writes.
1351  *
1352  * This code isn't quite efficient as it could be because we are maintaining
1353  * two sorted lists and do not know which list the block resides in.
1354  *
1355  * During a "make buildworld" the desired buffer is found at one of
1356  * the roots more than 60% of the time.  Thus, checking both roots
1357  * before performing either splay eliminates unnecessary splays on the
1358  * first tree splayed.
1359  */
1360 struct buf *
1361 gbincore(struct vnode *vp, daddr_t lblkno)
1362 {
1363 	struct buf *bp;
1364 
1365 	GIANT_REQUIRED;
1366 
1367 	ASSERT_VI_LOCKED(vp, "gbincore");
1368 	if ((bp = vp->v_cleanblkroot) != NULL &&
1369 	    bp->b_lblkno == lblkno && !(bp->b_xflags & BX_BKGRDMARKER))
1370 		return (bp);
1371 	if ((bp = vp->v_dirtyblkroot) != NULL &&
1372 	    bp->b_lblkno == lblkno && !(bp->b_xflags & BX_BKGRDMARKER))
1373 		return (bp);
1374 	if ((bp = vp->v_cleanblkroot) != NULL) {
1375 		vp->v_cleanblkroot = bp = buf_splay(lblkno, 0, bp);
1376 		if (bp->b_lblkno == lblkno && !(bp->b_xflags & BX_BKGRDMARKER))
1377 			return (bp);
1378 	}
1379 	if ((bp = vp->v_dirtyblkroot) != NULL) {
1380 		vp->v_dirtyblkroot = bp = buf_splay(lblkno, 0, bp);
1381 		if (bp->b_lblkno == lblkno && !(bp->b_xflags & BX_BKGRDMARKER))
1382 			return (bp);
1383 	}
1384 	return (NULL);
1385 }
1386 
1387 /*
1388  * Associate a buffer with a vnode.
1389  */
1390 void
1391 bgetvp(vp, bp)
1392 	register struct vnode *vp;
1393 	register struct buf *bp;
1394 {
1395 
1396 	KASSERT(bp->b_vp == NULL, ("bgetvp: not free"));
1397 
1398 	KASSERT((bp->b_xflags & (BX_VNDIRTY|BX_VNCLEAN)) == 0,
1399 	    ("bgetvp: bp already attached! %p", bp));
1400 
1401 	ASSERT_VI_LOCKED(vp, "bgetvp");
1402 	vholdl(vp);
1403 	bp->b_vp = vp;
1404 	bp->b_dev = vn_todev(vp);
1405 	/*
1406 	 * Insert onto list for new vnode.
1407 	 */
1408 	buf_vlist_add(bp, vp, BX_VNCLEAN);
1409 }
1410 
1411 /*
1412  * Disassociate a buffer from a vnode.
1413  */
1414 void
1415 brelvp(bp)
1416 	register struct buf *bp;
1417 {
1418 	struct vnode *vp;
1419 
1420 	KASSERT(bp->b_vp != NULL, ("brelvp: NULL"));
1421 
1422 	/*
1423 	 * Delete from old vnode list, if on one.
1424 	 */
1425 	vp = bp->b_vp;
1426 	VI_LOCK(vp);
1427 	if (bp->b_xflags & (BX_VNDIRTY | BX_VNCLEAN))
1428 		buf_vlist_remove(bp);
1429 	if ((vp->v_iflag & VI_ONWORKLST) && TAILQ_EMPTY(&vp->v_dirtyblkhd)) {
1430 		vp->v_iflag &= ~VI_ONWORKLST;
1431 		mtx_lock(&sync_mtx);
1432 		LIST_REMOVE(vp, v_synclist);
1433 		mtx_unlock(&sync_mtx);
1434 	}
1435 	vdropl(vp);
1436 	bp->b_vp = (struct vnode *) 0;
1437 	if (bp->b_object)
1438 		bp->b_object = NULL;
1439 	VI_UNLOCK(vp);
1440 }
1441 
1442 /*
1443  * Add an item to the syncer work queue.
1444  */
1445 static void
1446 vn_syncer_add_to_worklist(struct vnode *vp, int delay)
1447 {
1448 	int slot;
1449 
1450 	ASSERT_VI_LOCKED(vp, "vn_syncer_add_to_worklist");
1451 
1452 	mtx_lock(&sync_mtx);
1453 	if (vp->v_iflag & VI_ONWORKLST)
1454 		LIST_REMOVE(vp, v_synclist);
1455 	else
1456 		vp->v_iflag |= VI_ONWORKLST;
1457 
1458 	if (delay > syncer_maxdelay - 2)
1459 		delay = syncer_maxdelay - 2;
1460 	slot = (syncer_delayno + delay) & syncer_mask;
1461 
1462 	LIST_INSERT_HEAD(&syncer_workitem_pending[slot], vp, v_synclist);
1463 	mtx_unlock(&sync_mtx);
1464 }
1465 
1466 struct  proc *updateproc;
1467 static void sched_sync(void);
1468 static struct kproc_desc up_kp = {
1469 	"syncer",
1470 	sched_sync,
1471 	&updateproc
1472 };
1473 SYSINIT(syncer, SI_SUB_KTHREAD_UPDATE, SI_ORDER_FIRST, kproc_start, &up_kp)
1474 
1475 /*
1476  * System filesystem synchronizer daemon.
1477  */
1478 static void
1479 sched_sync(void)
1480 {
1481 	struct synclist *next;
1482 	struct synclist *slp;
1483 	struct vnode *vp;
1484 	struct mount *mp;
1485 	long starttime;
1486 	struct thread *td = FIRST_THREAD_IN_PROC(updateproc);
1487 
1488 	mtx_lock(&Giant);
1489 
1490 	EVENTHANDLER_REGISTER(shutdown_pre_sync, kproc_shutdown, td->td_proc,
1491 	    SHUTDOWN_PRI_LAST);
1492 
1493 	for (;;) {
1494 		kthread_suspend_check(td->td_proc);
1495 
1496 		starttime = time_second;
1497 
1498 		/*
1499 		 * Push files whose dirty time has expired.  Be careful
1500 		 * of interrupt race on slp queue.
1501 		 */
1502 		mtx_lock(&sync_mtx);
1503 		slp = &syncer_workitem_pending[syncer_delayno];
1504 		syncer_delayno += 1;
1505 		if (syncer_delayno == syncer_maxdelay)
1506 			syncer_delayno = 0;
1507 		next = &syncer_workitem_pending[syncer_delayno];
1508 
1509 		while ((vp = LIST_FIRST(slp)) != NULL) {
1510 			if (VOP_ISLOCKED(vp, NULL) != 0 ||
1511 			    vn_start_write(vp, &mp, V_NOWAIT) != 0) {
1512 				LIST_REMOVE(vp, v_synclist);
1513 				LIST_INSERT_HEAD(next, vp, v_synclist);
1514 				continue;
1515 			}
1516 			if (VI_TRYLOCK(vp) == 0) {
1517 				LIST_REMOVE(vp, v_synclist);
1518 				LIST_INSERT_HEAD(next, vp, v_synclist);
1519 				vn_finished_write(mp);
1520 				continue;
1521 			}
1522 			/*
1523 			 * We use vhold in case the vnode does not
1524 			 * successfully sync.  vhold prevents the vnode from
1525 			 * going away when we unlock the sync_mtx so that
1526 			 * we can acquire the vnode interlock.
1527 			 */
1528 			vholdl(vp);
1529 			mtx_unlock(&sync_mtx);
1530 			vn_lock(vp, LK_EXCLUSIVE | LK_RETRY | LK_INTERLOCK, td);
1531 			(void) VOP_FSYNC(vp, td->td_ucred, MNT_LAZY, td);
1532 			VOP_UNLOCK(vp, 0, td);
1533 			vn_finished_write(mp);
1534 			VI_LOCK(vp);
1535 			if ((vp->v_iflag & VI_ONWORKLST) != 0) {
1536 				/*
1537 				 * Put us back on the worklist.  The worklist
1538 				 * routine will remove us from our current
1539 				 * position and then add us back in at a later
1540 				 * position.
1541 				 */
1542 				vn_syncer_add_to_worklist(vp, syncdelay);
1543 			}
1544 			vdropl(vp);
1545 			VI_UNLOCK(vp);
1546 			mtx_lock(&sync_mtx);
1547 		}
1548 		mtx_unlock(&sync_mtx);
1549 
1550 		/*
1551 		 * Do soft update processing.
1552 		 */
1553 		if (softdep_process_worklist_hook != NULL)
1554 			(*softdep_process_worklist_hook)(NULL);
1555 
1556 		/*
1557 		 * The variable rushjob allows the kernel to speed up the
1558 		 * processing of the filesystem syncer process. A rushjob
1559 		 * value of N tells the filesystem syncer to process the next
1560 		 * N seconds worth of work on its queue ASAP. Currently rushjob
1561 		 * is used by the soft update code to speed up the filesystem
1562 		 * syncer process when the incore state is getting so far
1563 		 * ahead of the disk that the kernel memory pool is being
1564 		 * threatened with exhaustion.
1565 		 */
1566 		mtx_lock(&sync_mtx);
1567 		if (rushjob > 0) {
1568 			rushjob -= 1;
1569 			mtx_unlock(&sync_mtx);
1570 			continue;
1571 		}
1572 		mtx_unlock(&sync_mtx);
1573 		/*
1574 		 * If it has taken us less than a second to process the
1575 		 * current work, then wait. Otherwise start right over
1576 		 * again. We can still lose time if any single round
1577 		 * takes more than two seconds, but it does not really
1578 		 * matter as we are just trying to generally pace the
1579 		 * filesystem activity.
1580 		 */
1581 		if (time_second == starttime)
1582 			tsleep(&lbolt, PPAUSE, "syncer", 0);
1583 	}
1584 }
1585 
1586 /*
1587  * Request the syncer daemon to speed up its work.
1588  * We never push it to speed up more than half of its
1589  * normal turn time, otherwise it could take over the cpu.
1590  */
1591 int
1592 speedup_syncer()
1593 {
1594 	struct thread *td;
1595 	int ret = 0;
1596 
1597 	td = FIRST_THREAD_IN_PROC(updateproc);
1598 	sleepq_remove(td, &lbolt);
1599 	mtx_lock(&sync_mtx);
1600 	if (rushjob < syncdelay / 2) {
1601 		rushjob += 1;
1602 		stat_rush_requests += 1;
1603 		ret = 1;
1604 	}
1605 	mtx_unlock(&sync_mtx);
1606 	return (ret);
1607 }
1608 
1609 /*
1610  * Associate a p-buffer with a vnode.
1611  *
1612  * Also sets B_PAGING flag to indicate that vnode is not fully associated
1613  * with the buffer.  i.e. the bp has not been linked into the vnode or
1614  * ref-counted.
1615  */
1616 void
1617 pbgetvp(vp, bp)
1618 	register struct vnode *vp;
1619 	register struct buf *bp;
1620 {
1621 
1622 	KASSERT(bp->b_vp == NULL, ("pbgetvp: not free"));
1623 
1624 	bp->b_vp = vp;
1625 	bp->b_object = vp->v_object;
1626 	bp->b_flags |= B_PAGING;
1627 	bp->b_dev = vn_todev(vp);
1628 }
1629 
1630 /*
1631  * Disassociate a p-buffer from a vnode.
1632  */
1633 void
1634 pbrelvp(bp)
1635 	register struct buf *bp;
1636 {
1637 
1638 	KASSERT(bp->b_vp != NULL, ("pbrelvp: NULL"));
1639 
1640 	/* XXX REMOVE ME */
1641 	VI_LOCK(bp->b_vp);
1642 	if (TAILQ_NEXT(bp, b_vnbufs) != NULL) {
1643 		panic(
1644 		    "relpbuf(): b_vp was probably reassignbuf()d %p %x",
1645 		    bp,
1646 		    (int)bp->b_flags
1647 		);
1648 	}
1649 	VI_UNLOCK(bp->b_vp);
1650 	bp->b_vp = (struct vnode *) 0;
1651 	bp->b_object = NULL;
1652 	bp->b_flags &= ~B_PAGING;
1653 }
1654 
1655 /*
1656  * Reassign a buffer from one vnode to another.
1657  * Used to assign file specific control information
1658  * (indirect blocks) to the vnode to which they belong.
1659  */
1660 void
1661 reassignbuf(bp, newvp)
1662 	register struct buf *bp;
1663 	register struct vnode *newvp;
1664 {
1665 	struct vnode *vp;
1666 	int delay;
1667 
1668 	if (newvp == NULL) {
1669 		printf("reassignbuf: NULL");
1670 		return;
1671 	}
1672 	vp = bp->b_vp;
1673 	++reassignbufcalls;
1674 
1675 	/*
1676 	 * B_PAGING flagged buffers cannot be reassigned because their vp
1677 	 * is not fully linked in.
1678 	 */
1679 	if (bp->b_flags & B_PAGING)
1680 		panic("cannot reassign paging buffer");
1681 
1682 	/*
1683 	 * Delete from old vnode list, if on one.
1684 	 */
1685 	VI_LOCK(vp);
1686 	if (bp->b_xflags & (BX_VNDIRTY | BX_VNCLEAN)) {
1687 		buf_vlist_remove(bp);
1688 		if (vp != newvp) {
1689 			vdropl(bp->b_vp);
1690 			bp->b_vp = NULL;	/* for clarification */
1691 		}
1692 	}
1693 	if (vp != newvp) {
1694 		VI_UNLOCK(vp);
1695 		VI_LOCK(newvp);
1696 	}
1697 	/*
1698 	 * If dirty, put on list of dirty buffers; otherwise insert onto list
1699 	 * of clean buffers.
1700 	 */
1701 	if (bp->b_flags & B_DELWRI) {
1702 		if ((newvp->v_iflag & VI_ONWORKLST) == 0) {
1703 			switch (newvp->v_type) {
1704 			case VDIR:
1705 				delay = dirdelay;
1706 				break;
1707 			case VCHR:
1708 				if (newvp->v_rdev->si_mountpoint != NULL) {
1709 					delay = metadelay;
1710 					break;
1711 				}
1712 				/* FALLTHROUGH */
1713 			default:
1714 				delay = filedelay;
1715 			}
1716 			vn_syncer_add_to_worklist(newvp, delay);
1717 		}
1718 		buf_vlist_add(bp, newvp, BX_VNDIRTY);
1719 	} else {
1720 		buf_vlist_add(bp, newvp, BX_VNCLEAN);
1721 
1722 		if ((newvp->v_iflag & VI_ONWORKLST) &&
1723 		    TAILQ_EMPTY(&newvp->v_dirtyblkhd)) {
1724 			mtx_lock(&sync_mtx);
1725 			LIST_REMOVE(newvp, v_synclist);
1726 			mtx_unlock(&sync_mtx);
1727 			newvp->v_iflag &= ~VI_ONWORKLST;
1728 		}
1729 	}
1730 	if (bp->b_vp != newvp) {
1731 		bp->b_vp = newvp;
1732 		vholdl(bp->b_vp);
1733 	}
1734 	VI_UNLOCK(newvp);
1735 }
1736 
1737 /*
1738  * Create a vnode for a device.
1739  * Used for mounting the root filesystem.
1740  */
1741 int
1742 bdevvp(dev, vpp)
1743 	dev_t dev;
1744 	struct vnode **vpp;
1745 {
1746 	register struct vnode *vp;
1747 	struct vnode *nvp;
1748 	int error;
1749 
1750 	if (dev == NODEV) {
1751 		*vpp = NULLVP;
1752 		return (ENXIO);
1753 	}
1754 	if (vfinddev(dev, vpp))
1755 		return (0);
1756 
1757 	error = getnewvnode("none", (struct mount *)0, spec_vnodeop_p, &nvp);
1758 	if (error) {
1759 		*vpp = NULLVP;
1760 		return (error);
1761 	}
1762 	vp = nvp;
1763 	vp->v_type = VCHR;
1764 	addalias(vp, dev);
1765 	*vpp = vp;
1766 	return (0);
1767 }
1768 
1769 static void
1770 v_incr_usecount(struct vnode *vp, int delta)
1771 {
1772 
1773 	vp->v_usecount += delta;
1774 	if (vp->v_type == VCHR && vp->v_rdev != NULL) {
1775 		mtx_lock(&spechash_mtx);
1776 		vp->v_rdev->si_usecount += delta;
1777 		mtx_unlock(&spechash_mtx);
1778 	}
1779 }
1780 
1781 /*
1782  * Add vnode to the alias list hung off the dev_t.
1783  *
1784  * The reason for this gunk is that multiple vnodes can reference
1785  * the same physical device, so checking vp->v_usecount to see
1786  * how many users there are is inadequate; the v_usecount for
1787  * the vnodes need to be accumulated.  vcount() does that.
1788  */
1789 struct vnode *
1790 addaliasu(nvp, nvp_rdev)
1791 	struct vnode *nvp;
1792 	udev_t nvp_rdev;
1793 {
1794 	struct vnode *ovp;
1795 	vop_t **ops;
1796 	dev_t dev;
1797 
1798 	if (nvp->v_type == VBLK)
1799 		return (nvp);
1800 	if (nvp->v_type != VCHR)
1801 		panic("addaliasu on non-special vnode");
1802 	dev = udev2dev(nvp_rdev);
1803 	if (dev == NODEV)
1804 		return (nvp);
1805 	/*
1806 	 * Check to see if we have a bdevvp vnode with no associated
1807 	 * filesystem. If so, we want to associate the filesystem of
1808 	 * the new newly instigated vnode with the bdevvp vnode and
1809 	 * discard the newly created vnode rather than leaving the
1810 	 * bdevvp vnode lying around with no associated filesystem.
1811 	 */
1812 	if (vfinddev(dev, &ovp) == 0 || ovp->v_data != NULL) {
1813 		addalias(nvp, dev);
1814 		return (nvp);
1815 	}
1816 	/*
1817 	 * Discard unneeded vnode, but save its node specific data.
1818 	 * Note that if there is a lock, it is carried over in the
1819 	 * node specific data to the replacement vnode.
1820 	 */
1821 	vref(ovp);
1822 	ovp->v_data = nvp->v_data;
1823 	ovp->v_tag = nvp->v_tag;
1824 	nvp->v_data = NULL;
1825 	lockdestroy(ovp->v_vnlock);
1826 	lockinit(ovp->v_vnlock, PVFS, nvp->v_vnlock->lk_wmesg,
1827 	    nvp->v_vnlock->lk_timo, nvp->v_vnlock->lk_flags & LK_EXTFLG_MASK);
1828 	ops = ovp->v_op;
1829 	ovp->v_op = nvp->v_op;
1830 	if (VOP_ISLOCKED(nvp, curthread)) {
1831 		VOP_UNLOCK(nvp, 0, curthread);
1832 		vn_lock(ovp, LK_EXCLUSIVE | LK_RETRY, curthread);
1833 	}
1834 	nvp->v_op = ops;
1835 	insmntque(ovp, nvp->v_mount);
1836 	vrele(nvp);
1837 	vgone(nvp);
1838 	return (ovp);
1839 }
1840 
1841 /* This is a local helper function that do the same as addaliasu, but for a
1842  * dev_t instead of an udev_t. */
1843 static void
1844 addalias(nvp, dev)
1845 	struct vnode *nvp;
1846 	dev_t dev;
1847 {
1848 
1849 	KASSERT(nvp->v_type == VCHR, ("addalias on non-special vnode"));
1850 	dev_ref(dev);
1851 	nvp->v_rdev = dev;
1852 	VI_LOCK(nvp);
1853 	mtx_lock(&spechash_mtx);
1854 	SLIST_INSERT_HEAD(&dev->si_hlist, nvp, v_specnext);
1855 	dev->si_usecount += nvp->v_usecount;
1856 	mtx_unlock(&spechash_mtx);
1857 	VI_UNLOCK(nvp);
1858 }
1859 
1860 /*
1861  * Grab a particular vnode from the free list, increment its
1862  * reference count and lock it. The vnode lock bit is set if the
1863  * vnode is being eliminated in vgone. The process is awakened
1864  * when the transition is completed, and an error returned to
1865  * indicate that the vnode is no longer usable (possibly having
1866  * been changed to a new filesystem type).
1867  */
1868 int
1869 vget(vp, flags, td)
1870 	register struct vnode *vp;
1871 	int flags;
1872 	struct thread *td;
1873 {
1874 	int error;
1875 
1876 	/*
1877 	 * If the vnode is in the process of being cleaned out for
1878 	 * another use, we wait for the cleaning to finish and then
1879 	 * return failure. Cleaning is determined by checking that
1880 	 * the VI_XLOCK flag is set.
1881 	 */
1882 	if ((flags & LK_INTERLOCK) == 0)
1883 		VI_LOCK(vp);
1884 	if (vp->v_iflag & VI_XLOCK && vp->v_vxthread != curthread) {
1885 		if ((flags & LK_NOWAIT) == 0) {
1886 			vp->v_iflag |= VI_XWANT;
1887 			msleep(vp, VI_MTX(vp), PINOD | PDROP, "vget", 0);
1888 			return (ENOENT);
1889 		}
1890 		VI_UNLOCK(vp);
1891 		return (EBUSY);
1892 	}
1893 
1894 	v_incr_usecount(vp, 1);
1895 
1896 	if (VSHOULDBUSY(vp))
1897 		vbusy(vp);
1898 	if (flags & LK_TYPE_MASK) {
1899 		if ((error = vn_lock(vp, flags | LK_INTERLOCK, td)) != 0) {
1900 			/*
1901 			 * must expand vrele here because we do not want
1902 			 * to call VOP_INACTIVE if the reference count
1903 			 * drops back to zero since it was never really
1904 			 * active. We must remove it from the free list
1905 			 * before sleeping so that multiple processes do
1906 			 * not try to recycle it.
1907 			 */
1908 			VI_LOCK(vp);
1909 			v_incr_usecount(vp, -1);
1910 			if (VSHOULDFREE(vp))
1911 				vfree(vp);
1912 			else
1913 				vlruvp(vp);
1914 			VI_UNLOCK(vp);
1915 		}
1916 		return (error);
1917 	}
1918 	VI_UNLOCK(vp);
1919 	return (0);
1920 }
1921 
1922 /*
1923  * Increase the reference count of a vnode.
1924  */
1925 void
1926 vref(struct vnode *vp)
1927 {
1928 
1929 	VI_LOCK(vp);
1930 	v_incr_usecount(vp, 1);
1931 	VI_UNLOCK(vp);
1932 }
1933 
1934 /*
1935  * Return reference count of a vnode.
1936  *
1937  * The results of this call are only guaranteed when some mechanism other
1938  * than the VI lock is used to stop other processes from gaining references
1939  * to the vnode.  This may be the case if the caller holds the only reference.
1940  * This is also useful when stale data is acceptable as race conditions may
1941  * be accounted for by some other means.
1942  */
1943 int
1944 vrefcnt(struct vnode *vp)
1945 {
1946 	int usecnt;
1947 
1948 	VI_LOCK(vp);
1949 	usecnt = vp->v_usecount;
1950 	VI_UNLOCK(vp);
1951 
1952 	return (usecnt);
1953 }
1954 
1955 
1956 /*
1957  * Vnode put/release.
1958  * If count drops to zero, call inactive routine and return to freelist.
1959  */
1960 void
1961 vrele(vp)
1962 	struct vnode *vp;
1963 {
1964 	struct thread *td = curthread;	/* XXX */
1965 
1966 	KASSERT(vp != NULL, ("vrele: null vp"));
1967 
1968 	VI_LOCK(vp);
1969 
1970 	/* Skip this v_writecount check if we're going to panic below. */
1971 	KASSERT(vp->v_writecount < vp->v_usecount || vp->v_usecount < 1,
1972 	    ("vrele: missed vn_close"));
1973 
1974 	if (vp->v_usecount > 1 || ((vp->v_iflag & VI_DOINGINACT) &&
1975 	    vp->v_usecount == 1)) {
1976 		v_incr_usecount(vp, -1);
1977 		VI_UNLOCK(vp);
1978 
1979 		return;
1980 	}
1981 
1982 	if (vp->v_usecount == 1) {
1983 		v_incr_usecount(vp, -1);
1984 		/*
1985 		 * We must call VOP_INACTIVE with the node locked. Mark
1986 		 * as VI_DOINGINACT to avoid recursion.
1987 		 */
1988 		if (vn_lock(vp, LK_EXCLUSIVE | LK_INTERLOCK, td) == 0) {
1989 			VI_LOCK(vp);
1990 			vp->v_iflag |= VI_DOINGINACT;
1991 			VI_UNLOCK(vp);
1992 			VOP_INACTIVE(vp, td);
1993 			VI_LOCK(vp);
1994 			KASSERT(vp->v_iflag & VI_DOINGINACT,
1995 			    ("vrele: lost VI_DOINGINACT"));
1996 			vp->v_iflag &= ~VI_DOINGINACT;
1997 		} else
1998 			VI_LOCK(vp);
1999 		if (VSHOULDFREE(vp))
2000 			vfree(vp);
2001 		else
2002 			vlruvp(vp);
2003 		VI_UNLOCK(vp);
2004 
2005 	} else {
2006 #ifdef DIAGNOSTIC
2007 		vprint("vrele: negative ref count", vp);
2008 #endif
2009 		VI_UNLOCK(vp);
2010 		panic("vrele: negative ref cnt");
2011 	}
2012 }
2013 
2014 /*
2015  * Release an already locked vnode.  This give the same effects as
2016  * unlock+vrele(), but takes less time and avoids releasing and
2017  * re-aquiring the lock (as vrele() aquires the lock internally.)
2018  */
2019 void
2020 vput(vp)
2021 	struct vnode *vp;
2022 {
2023 	struct thread *td = curthread;	/* XXX */
2024 
2025 	GIANT_REQUIRED;
2026 
2027 	KASSERT(vp != NULL, ("vput: null vp"));
2028 	VI_LOCK(vp);
2029 	/* Skip this v_writecount check if we're going to panic below. */
2030 	KASSERT(vp->v_writecount < vp->v_usecount || vp->v_usecount < 1,
2031 	    ("vput: missed vn_close"));
2032 
2033 	if (vp->v_usecount > 1 || ((vp->v_iflag & VI_DOINGINACT) &&
2034 	    vp->v_usecount == 1)) {
2035 		v_incr_usecount(vp, -1);
2036 		VOP_UNLOCK(vp, LK_INTERLOCK, td);
2037 		return;
2038 	}
2039 
2040 	if (vp->v_usecount == 1) {
2041 		v_incr_usecount(vp, -1);
2042 		/*
2043 		 * We must call VOP_INACTIVE with the node locked, so
2044 		 * we just need to release the vnode mutex. Mark as
2045 		 * as VI_DOINGINACT to avoid recursion.
2046 		 */
2047 		vp->v_iflag |= VI_DOINGINACT;
2048 		VI_UNLOCK(vp);
2049 		VOP_INACTIVE(vp, td);
2050 		VI_LOCK(vp);
2051 		KASSERT(vp->v_iflag & VI_DOINGINACT,
2052 		    ("vput: lost VI_DOINGINACT"));
2053 		vp->v_iflag &= ~VI_DOINGINACT;
2054 		if (VSHOULDFREE(vp))
2055 			vfree(vp);
2056 		else
2057 			vlruvp(vp);
2058 		VI_UNLOCK(vp);
2059 
2060 	} else {
2061 #ifdef DIAGNOSTIC
2062 		vprint("vput: negative ref count", vp);
2063 #endif
2064 		panic("vput: negative ref cnt");
2065 	}
2066 }
2067 
2068 /*
2069  * Somebody doesn't want the vnode recycled.
2070  */
2071 void
2072 vhold(struct vnode *vp)
2073 {
2074 
2075 	VI_LOCK(vp);
2076 	vholdl(vp);
2077 	VI_UNLOCK(vp);
2078 }
2079 
2080 void
2081 vholdl(vp)
2082 	register struct vnode *vp;
2083 {
2084 
2085 	vp->v_holdcnt++;
2086 	if (VSHOULDBUSY(vp))
2087 		vbusy(vp);
2088 }
2089 
2090 /*
2091  * Note that there is one less who cares about this vnode.  vdrop() is the
2092  * opposite of vhold().
2093  */
2094 void
2095 vdrop(struct vnode *vp)
2096 {
2097 
2098 	VI_LOCK(vp);
2099 	vdropl(vp);
2100 	VI_UNLOCK(vp);
2101 }
2102 
2103 void
2104 vdropl(vp)
2105 	register struct vnode *vp;
2106 {
2107 
2108 	if (vp->v_holdcnt <= 0)
2109 		panic("vdrop: holdcnt");
2110 	vp->v_holdcnt--;
2111 	if (VSHOULDFREE(vp))
2112 		vfree(vp);
2113 	else
2114 		vlruvp(vp);
2115 }
2116 
2117 /*
2118  * Remove any vnodes in the vnode table belonging to mount point mp.
2119  *
2120  * If FORCECLOSE is not specified, there should not be any active ones,
2121  * return error if any are found (nb: this is a user error, not a
2122  * system error). If FORCECLOSE is specified, detach any active vnodes
2123  * that are found.
2124  *
2125  * If WRITECLOSE is set, only flush out regular file vnodes open for
2126  * writing.
2127  *
2128  * SKIPSYSTEM causes any vnodes marked VV_SYSTEM to be skipped.
2129  *
2130  * `rootrefs' specifies the base reference count for the root vnode
2131  * of this filesystem. The root vnode is considered busy if its
2132  * v_usecount exceeds this value. On a successful return, vflush()
2133  * will call vrele() on the root vnode exactly rootrefs times.
2134  * If the SKIPSYSTEM or WRITECLOSE flags are specified, rootrefs must
2135  * be zero.
2136  */
2137 #ifdef DIAGNOSTIC
2138 static int busyprt = 0;		/* print out busy vnodes */
2139 SYSCTL_INT(_debug, OID_AUTO, busyprt, CTLFLAG_RW, &busyprt, 0, "");
2140 #endif
2141 
2142 int
2143 vflush(mp, rootrefs, flags)
2144 	struct mount *mp;
2145 	int rootrefs;
2146 	int flags;
2147 {
2148 	struct thread *td = curthread;	/* XXX */
2149 	struct vnode *vp, *nvp, *rootvp = NULL;
2150 	struct vattr vattr;
2151 	int busy = 0, error;
2152 
2153 	if (rootrefs > 0) {
2154 		KASSERT((flags & (SKIPSYSTEM | WRITECLOSE)) == 0,
2155 		    ("vflush: bad args"));
2156 		/*
2157 		 * Get the filesystem root vnode. We can vput() it
2158 		 * immediately, since with rootrefs > 0, it won't go away.
2159 		 */
2160 		if ((error = VFS_ROOT(mp, &rootvp)) != 0)
2161 			return (error);
2162 		vput(rootvp);
2163 
2164 	}
2165 	MNT_ILOCK(mp);
2166 loop:
2167 	for (vp = TAILQ_FIRST(&mp->mnt_nvnodelist); vp; vp = nvp) {
2168 		/*
2169 		 * Make sure this vnode wasn't reclaimed in getnewvnode().
2170 		 * Start over if it has (it won't be on the list anymore).
2171 		 */
2172 		if (vp->v_mount != mp)
2173 			goto loop;
2174 		nvp = TAILQ_NEXT(vp, v_nmntvnodes);
2175 
2176 		VI_LOCK(vp);
2177 		MNT_IUNLOCK(mp);
2178 		error = vn_lock(vp, LK_INTERLOCK | LK_EXCLUSIVE, td);
2179 		if (error) {
2180 			MNT_ILOCK(mp);
2181 			goto loop;
2182 		}
2183 		/*
2184 		 * Skip over a vnodes marked VV_SYSTEM.
2185 		 */
2186 		if ((flags & SKIPSYSTEM) && (vp->v_vflag & VV_SYSTEM)) {
2187 			VOP_UNLOCK(vp, 0, td);
2188 			MNT_ILOCK(mp);
2189 			continue;
2190 		}
2191 		/*
2192 		 * If WRITECLOSE is set, flush out unlinked but still open
2193 		 * files (even if open only for reading) and regular file
2194 		 * vnodes open for writing.
2195 		 */
2196 		if (flags & WRITECLOSE) {
2197 			error = VOP_GETATTR(vp, &vattr, td->td_ucred, td);
2198 			VI_LOCK(vp);
2199 
2200 			if ((vp->v_type == VNON ||
2201 			    (error == 0 && vattr.va_nlink > 0)) &&
2202 			    (vp->v_writecount == 0 || vp->v_type != VREG)) {
2203 				VOP_UNLOCK(vp, LK_INTERLOCK, td);
2204 				MNT_ILOCK(mp);
2205 				continue;
2206 			}
2207 		} else
2208 			VI_LOCK(vp);
2209 
2210 		VOP_UNLOCK(vp, 0, td);
2211 
2212 		/*
2213 		 * With v_usecount == 0, all we need to do is clear out the
2214 		 * vnode data structures and we are done.
2215 		 */
2216 		if (vp->v_usecount == 0) {
2217 			vgonel(vp, td);
2218 			MNT_ILOCK(mp);
2219 			continue;
2220 		}
2221 
2222 		/*
2223 		 * If FORCECLOSE is set, forcibly close the vnode. For block
2224 		 * or character devices, revert to an anonymous device. For
2225 		 * all other files, just kill them.
2226 		 */
2227 		if (flags & FORCECLOSE) {
2228 			if (vp->v_type != VCHR)
2229 				vgonel(vp, td);
2230 			else
2231 				vgonechrl(vp, td);
2232 			MNT_ILOCK(mp);
2233 			continue;
2234 		}
2235 #ifdef DIAGNOSTIC
2236 		if (busyprt)
2237 			vprint("vflush: busy vnode", vp);
2238 #endif
2239 		VI_UNLOCK(vp);
2240 		MNT_ILOCK(mp);
2241 		busy++;
2242 	}
2243 	MNT_IUNLOCK(mp);
2244 	if (rootrefs > 0 && (flags & FORCECLOSE) == 0) {
2245 		/*
2246 		 * If just the root vnode is busy, and if its refcount
2247 		 * is equal to `rootrefs', then go ahead and kill it.
2248 		 */
2249 		VI_LOCK(rootvp);
2250 		KASSERT(busy > 0, ("vflush: not busy"));
2251 		KASSERT(rootvp->v_usecount >= rootrefs, ("vflush: rootrefs"));
2252 		if (busy == 1 && rootvp->v_usecount == rootrefs) {
2253 			vgonel(rootvp, td);
2254 			busy = 0;
2255 		} else
2256 			VI_UNLOCK(rootvp);
2257 	}
2258 	if (busy)
2259 		return (EBUSY);
2260 	for (; rootrefs > 0; rootrefs--)
2261 		vrele(rootvp);
2262 	return (0);
2263 }
2264 
2265 /*
2266  * This moves a now (likely recyclable) vnode to the end of the
2267  * mountlist.  XXX However, it is temporarily disabled until we
2268  * can clean up ffs_sync() and friends, which have loop restart
2269  * conditions which this code causes to operate O(N^2).
2270  */
2271 static void
2272 vlruvp(struct vnode *vp)
2273 {
2274 #if 0
2275 	struct mount *mp;
2276 
2277 	if ((mp = vp->v_mount) != NULL) {
2278 		MNT_ILOCK(mp);
2279 		TAILQ_REMOVE(&mp->mnt_nvnodelist, vp, v_nmntvnodes);
2280 		TAILQ_INSERT_TAIL(&mp->mnt_nvnodelist, vp, v_nmntvnodes);
2281 		MNT_IUNLOCK(mp);
2282 	}
2283 #endif
2284 }
2285 
2286 static void
2287 vx_lock(struct vnode *vp)
2288 {
2289 
2290 	ASSERT_VI_LOCKED(vp, "vx_lock");
2291 
2292 	/*
2293 	 * Prevent the vnode from being recycled or brought into use while we
2294 	 * clean it out.
2295 	 */
2296 	if (vp->v_iflag & VI_XLOCK)
2297 		panic("vclean: deadlock");
2298 	vp->v_iflag |= VI_XLOCK;
2299 	vp->v_vxthread = curthread;
2300 }
2301 
2302 static void
2303 vx_unlock(struct vnode *vp)
2304 {
2305 	ASSERT_VI_LOCKED(vp, "vx_unlock");
2306 	vp->v_iflag &= ~VI_XLOCK;
2307 	vp->v_vxthread = NULL;
2308 	if (vp->v_iflag & VI_XWANT) {
2309 		vp->v_iflag &= ~VI_XWANT;
2310 		wakeup(vp);
2311 	}
2312 }
2313 
2314 /*
2315  * Disassociate the underlying filesystem from a vnode.
2316  */
2317 static void
2318 vclean(vp, flags, td)
2319 	struct vnode *vp;
2320 	int flags;
2321 	struct thread *td;
2322 {
2323 	int active;
2324 
2325 	ASSERT_VI_LOCKED(vp, "vclean");
2326 	/*
2327 	 * Check to see if the vnode is in use. If so we have to reference it
2328 	 * before we clean it out so that its count cannot fall to zero and
2329 	 * generate a race against ourselves to recycle it.
2330 	 */
2331 	if ((active = vp->v_usecount))
2332 		v_incr_usecount(vp, 1);
2333 
2334 	/*
2335 	 * Even if the count is zero, the VOP_INACTIVE routine may still
2336 	 * have the object locked while it cleans it out. The VOP_LOCK
2337 	 * ensures that the VOP_INACTIVE routine is done with its work.
2338 	 * For active vnodes, it ensures that no other activity can
2339 	 * occur while the underlying object is being cleaned out.
2340 	 */
2341 	VOP_LOCK(vp, LK_DRAIN | LK_INTERLOCK, td);
2342 
2343 	/*
2344 	 * Clean out any buffers associated with the vnode.
2345 	 * If the flush fails, just toss the buffers.
2346 	 */
2347 	if (flags & DOCLOSE) {
2348 		struct buf *bp;
2349 		bp = TAILQ_FIRST(&vp->v_dirtyblkhd);
2350 		if (bp != NULL)
2351 			(void) vn_write_suspend_wait(vp, NULL, V_WAIT);
2352 		if (vinvalbuf(vp, V_SAVE, NOCRED, td, 0, 0) != 0)
2353 			vinvalbuf(vp, 0, NOCRED, td, 0, 0);
2354 	}
2355 
2356 	VOP_DESTROYVOBJECT(vp);
2357 
2358 	/*
2359 	 * Any other processes trying to obtain this lock must first
2360 	 * wait for VXLOCK to clear, then call the new lock operation.
2361 	 */
2362 	VOP_UNLOCK(vp, 0, td);
2363 
2364 	/*
2365 	 * If purging an active vnode, it must be closed and
2366 	 * deactivated before being reclaimed. Note that the
2367 	 * VOP_INACTIVE will unlock the vnode.
2368 	 */
2369 	if (active) {
2370 		if (flags & DOCLOSE)
2371 			VOP_CLOSE(vp, FNONBLOCK, NOCRED, td);
2372 		VI_LOCK(vp);
2373 		if ((vp->v_iflag & VI_DOINGINACT) == 0) {
2374 			vp->v_iflag |= VI_DOINGINACT;
2375 			VI_UNLOCK(vp);
2376 			if (vn_lock(vp, LK_EXCLUSIVE | LK_NOWAIT, td) != 0)
2377 				panic("vclean: cannot relock.");
2378 			VOP_INACTIVE(vp, td);
2379 			VI_LOCK(vp);
2380 			KASSERT(vp->v_iflag & VI_DOINGINACT,
2381 			    ("vclean: lost VI_DOINGINACT"));
2382 			vp->v_iflag &= ~VI_DOINGINACT;
2383 		}
2384 		VI_UNLOCK(vp);
2385 	}
2386 	/*
2387 	 * Reclaim the vnode.
2388 	 */
2389 	if (VOP_RECLAIM(vp, td))
2390 		panic("vclean: cannot reclaim");
2391 
2392 	if (active) {
2393 		/*
2394 		 * Inline copy of vrele() since VOP_INACTIVE
2395 		 * has already been called.
2396 		 */
2397 		VI_LOCK(vp);
2398 		v_incr_usecount(vp, -1);
2399 		if (vp->v_usecount <= 0) {
2400 #ifdef INVARIANTS
2401 			if (vp->v_usecount < 0 || vp->v_writecount != 0) {
2402 				vprint("vclean: bad ref count", vp);
2403 				panic("vclean: ref cnt");
2404 			}
2405 #endif
2406 			if (VSHOULDFREE(vp))
2407 				vfree(vp);
2408 		}
2409 		VI_UNLOCK(vp);
2410 	}
2411 	/*
2412 	 * Delete from old mount point vnode list.
2413 	 */
2414 	if (vp->v_mount != NULL)
2415 		insmntque(vp, (struct mount *)0);
2416 	cache_purge(vp);
2417 	VI_LOCK(vp);
2418 	if (VSHOULDFREE(vp))
2419 		vfree(vp);
2420 
2421 	/*
2422 	 * Done with purge, reset to the standard lock and
2423 	 * notify sleepers of the grim news.
2424 	 */
2425 	vp->v_vnlock = &vp->v_lock;
2426 	vp->v_op = dead_vnodeop_p;
2427 	if (vp->v_pollinfo != NULL)
2428 		vn_pollgone(vp);
2429 	vp->v_tag = "none";
2430 }
2431 
2432 /*
2433  * Eliminate all activity associated with the requested vnode
2434  * and with all vnodes aliased to the requested vnode.
2435  */
2436 int
2437 vop_revoke(ap)
2438 	struct vop_revoke_args /* {
2439 		struct vnode *a_vp;
2440 		int a_flags;
2441 	} */ *ap;
2442 {
2443 	struct vnode *vp, *vq;
2444 	dev_t dev;
2445 
2446 	KASSERT((ap->a_flags & REVOKEALL) != 0, ("vop_revoke"));
2447 	vp = ap->a_vp;
2448 	KASSERT((vp->v_type == VCHR), ("vop_revoke: not VCHR"));
2449 
2450 	VI_LOCK(vp);
2451 	/*
2452 	 * If a vgone (or vclean) is already in progress,
2453 	 * wait until it is done and return.
2454 	 */
2455 	if (vp->v_iflag & VI_XLOCK) {
2456 		vp->v_iflag |= VI_XWANT;
2457 		msleep(vp, VI_MTX(vp), PINOD | PDROP,
2458 		    "vop_revokeall", 0);
2459 		return (0);
2460 	}
2461 	VI_UNLOCK(vp);
2462 	dev = vp->v_rdev;
2463 	for (;;) {
2464 		mtx_lock(&spechash_mtx);
2465 		vq = SLIST_FIRST(&dev->si_hlist);
2466 		mtx_unlock(&spechash_mtx);
2467 		if (vq == NULL)
2468 			break;
2469 		vgone(vq);
2470 	}
2471 	return (0);
2472 }
2473 
2474 /*
2475  * Recycle an unused vnode to the front of the free list.
2476  * Release the passed interlock if the vnode will be recycled.
2477  */
2478 int
2479 vrecycle(vp, inter_lkp, td)
2480 	struct vnode *vp;
2481 	struct mtx *inter_lkp;
2482 	struct thread *td;
2483 {
2484 
2485 	VI_LOCK(vp);
2486 	if (vp->v_usecount == 0) {
2487 		if (inter_lkp) {
2488 			mtx_unlock(inter_lkp);
2489 		}
2490 		vgonel(vp, td);
2491 		return (1);
2492 	}
2493 	VI_UNLOCK(vp);
2494 	return (0);
2495 }
2496 
2497 /*
2498  * Eliminate all activity associated with a vnode
2499  * in preparation for reuse.
2500  */
2501 void
2502 vgone(vp)
2503 	register struct vnode *vp;
2504 {
2505 	struct thread *td = curthread;	/* XXX */
2506 
2507 	VI_LOCK(vp);
2508 	vgonel(vp, td);
2509 }
2510 
2511 /*
2512  * Disassociate a character device from the its underlying filesystem and
2513  * attach it to spec.  This is for use when the chr device is still active
2514  * and the filesystem is going away.
2515  */
2516 static void
2517 vgonechrl(struct vnode *vp, struct thread *td)
2518 {
2519 	ASSERT_VI_LOCKED(vp, "vgonechrl");
2520 	vx_lock(vp);
2521 	/*
2522 	 * This is a custom version of vclean() which does not tearm down
2523 	 * the bufs or vm objects held by this vnode.  This allows filesystems
2524 	 * to continue using devices which were discovered via another
2525 	 * filesystem that has been unmounted.
2526 	 */
2527 	if (vp->v_usecount != 0) {
2528 		v_incr_usecount(vp, 1);
2529 		/*
2530 		 * Ensure that no other activity can occur while the
2531 		 * underlying object is being cleaned out.
2532 		 */
2533 		VOP_LOCK(vp, LK_DRAIN | LK_INTERLOCK, td);
2534 		/*
2535 		 * Any other processes trying to obtain this lock must first
2536 		 * wait for VXLOCK to clear, then call the new lock operation.
2537 		 */
2538 		VOP_UNLOCK(vp, 0, td);
2539 		vp->v_vnlock = &vp->v_lock;
2540 		vp->v_tag = "orphanchr";
2541 		vp->v_op = spec_vnodeop_p;
2542 		if (vp->v_mount != NULL)
2543 			insmntque(vp, (struct mount *)0);
2544 		cache_purge(vp);
2545 		vrele(vp);
2546 		VI_LOCK(vp);
2547 	} else
2548 		vclean(vp, 0, td);
2549 	vp->v_op = spec_vnodeop_p;
2550 	vx_unlock(vp);
2551 	VI_UNLOCK(vp);
2552 }
2553 
2554 /*
2555  * vgone, with the vp interlock held.
2556  */
2557 void
2558 vgonel(vp, td)
2559 	struct vnode *vp;
2560 	struct thread *td;
2561 {
2562 	/*
2563 	 * If a vgone (or vclean) is already in progress,
2564 	 * wait until it is done and return.
2565 	 */
2566 	ASSERT_VI_LOCKED(vp, "vgonel");
2567 	if (vp->v_iflag & VI_XLOCK) {
2568 		vp->v_iflag |= VI_XWANT;
2569 		msleep(vp, VI_MTX(vp), PINOD | PDROP, "vgone", 0);
2570 		return;
2571 	}
2572 	vx_lock(vp);
2573 
2574 	/*
2575 	 * Clean out the filesystem specific data.
2576 	 */
2577 	vclean(vp, DOCLOSE, td);
2578 	VI_UNLOCK(vp);
2579 
2580 	/*
2581 	 * If special device, remove it from special device alias list
2582 	 * if it is on one.
2583 	 */
2584 	VI_LOCK(vp);
2585 	if (vp->v_type == VCHR && vp->v_rdev != NODEV) {
2586 		mtx_lock(&spechash_mtx);
2587 		SLIST_REMOVE(&vp->v_rdev->si_hlist, vp, vnode, v_specnext);
2588 		vp->v_rdev->si_usecount -= vp->v_usecount;
2589 		mtx_unlock(&spechash_mtx);
2590 		dev_rel(vp->v_rdev);
2591 		vp->v_rdev = NULL;
2592 	}
2593 
2594 	/*
2595 	 * If it is on the freelist and not already at the head,
2596 	 * move it to the head of the list. The test of the
2597 	 * VDOOMED flag and the reference count of zero is because
2598 	 * it will be removed from the free list by getnewvnode,
2599 	 * but will not have its reference count incremented until
2600 	 * after calling vgone. If the reference count were
2601 	 * incremented first, vgone would (incorrectly) try to
2602 	 * close the previous instance of the underlying object.
2603 	 */
2604 	if (vp->v_usecount == 0 && !(vp->v_iflag & VI_DOOMED)) {
2605 		mtx_lock(&vnode_free_list_mtx);
2606 		if (vp->v_iflag & VI_FREE) {
2607 			TAILQ_REMOVE(&vnode_free_list, vp, v_freelist);
2608 		} else {
2609 			vp->v_iflag |= VI_FREE;
2610 			freevnodes++;
2611 		}
2612 		TAILQ_INSERT_HEAD(&vnode_free_list, vp, v_freelist);
2613 		mtx_unlock(&vnode_free_list_mtx);
2614 	}
2615 
2616 	vp->v_type = VBAD;
2617 	vx_unlock(vp);
2618 	VI_UNLOCK(vp);
2619 }
2620 
2621 /*
2622  * Lookup a vnode by device number.
2623  */
2624 int
2625 vfinddev(dev, vpp)
2626 	dev_t dev;
2627 	struct vnode **vpp;
2628 {
2629 	struct vnode *vp;
2630 
2631 	mtx_lock(&spechash_mtx);
2632 	SLIST_FOREACH(vp, &dev->si_hlist, v_specnext) {
2633 		*vpp = vp;
2634 		mtx_unlock(&spechash_mtx);
2635 		return (1);
2636 	}
2637 	mtx_unlock(&spechash_mtx);
2638 	return (0);
2639 }
2640 
2641 /*
2642  * Calculate the total number of references to a special device.
2643  */
2644 int
2645 vcount(vp)
2646 	struct vnode *vp;
2647 {
2648 	int count;
2649 
2650 	mtx_lock(&spechash_mtx);
2651 	count = vp->v_rdev->si_usecount;
2652 	mtx_unlock(&spechash_mtx);
2653 	return (count);
2654 }
2655 
2656 /*
2657  * Same as above, but using the dev_t as argument
2658  */
2659 int
2660 count_dev(dev)
2661 	dev_t dev;
2662 {
2663 	int count;
2664 
2665 	mtx_lock(&spechash_mtx);
2666 	count = dev->si_usecount;
2667 	mtx_unlock(&spechash_mtx);
2668 	return(count);
2669 }
2670 
2671 /*
2672  * Print out a description of a vnode.
2673  */
2674 static char *typename[] =
2675 {"VNON", "VREG", "VDIR", "VBLK", "VCHR", "VLNK", "VSOCK", "VFIFO", "VBAD"};
2676 
2677 void
2678 vprint(label, vp)
2679 	char *label;
2680 	struct vnode *vp;
2681 {
2682 	char buf[96];
2683 
2684 	if (label != NULL)
2685 		printf("%s: %p: ", label, (void *)vp);
2686 	else
2687 		printf("%p: ", (void *)vp);
2688 	printf("tag %s, type %s, usecount %d, writecount %d, refcount %d,",
2689 	    vp->v_tag, typename[vp->v_type], vp->v_usecount,
2690 	    vp->v_writecount, vp->v_holdcnt);
2691 	buf[0] = '\0';
2692 	if (vp->v_vflag & VV_ROOT)
2693 		strcat(buf, "|VV_ROOT");
2694 	if (vp->v_vflag & VV_TEXT)
2695 		strcat(buf, "|VV_TEXT");
2696 	if (vp->v_vflag & VV_SYSTEM)
2697 		strcat(buf, "|VV_SYSTEM");
2698 	if (vp->v_iflag & VI_XLOCK)
2699 		strcat(buf, "|VI_XLOCK");
2700 	if (vp->v_iflag & VI_XWANT)
2701 		strcat(buf, "|VI_XWANT");
2702 	if (vp->v_iflag & VI_BWAIT)
2703 		strcat(buf, "|VI_BWAIT");
2704 	if (vp->v_iflag & VI_DOOMED)
2705 		strcat(buf, "|VI_DOOMED");
2706 	if (vp->v_iflag & VI_FREE)
2707 		strcat(buf, "|VI_FREE");
2708 	if (vp->v_vflag & VV_OBJBUF)
2709 		strcat(buf, "|VV_OBJBUF");
2710 	if (buf[0] != '\0')
2711 		printf(" flags (%s),", &buf[1]);
2712 	lockmgr_printinfo(vp->v_vnlock);
2713 	printf("\n");
2714 	if (vp->v_data != NULL)
2715 		VOP_PRINT(vp);
2716 }
2717 
2718 #ifdef DDB
2719 #include <ddb/ddb.h>
2720 /*
2721  * List all of the locked vnodes in the system.
2722  * Called when debugging the kernel.
2723  */
2724 DB_SHOW_COMMAND(lockedvnods, lockedvnodes)
2725 {
2726 	struct mount *mp, *nmp;
2727 	struct vnode *vp;
2728 
2729 	/*
2730 	 * Note: because this is DDB, we can't obey the locking semantics
2731 	 * for these structures, which means we could catch an inconsistent
2732 	 * state and dereference a nasty pointer.  Not much to be done
2733 	 * about that.
2734 	 */
2735 	printf("Locked vnodes\n");
2736 	for (mp = TAILQ_FIRST(&mountlist); mp != NULL; mp = nmp) {
2737 		nmp = TAILQ_NEXT(mp, mnt_list);
2738 		TAILQ_FOREACH(vp, &mp->mnt_nvnodelist, v_nmntvnodes) {
2739 			if (VOP_ISLOCKED(vp, NULL))
2740 				vprint(NULL, vp);
2741 		}
2742 		nmp = TAILQ_NEXT(mp, mnt_list);
2743 	}
2744 }
2745 #endif
2746 
2747 /*
2748  * Fill in a struct xvfsconf based on a struct vfsconf.
2749  */
2750 static void
2751 vfsconf2x(struct vfsconf *vfsp, struct xvfsconf *xvfsp)
2752 {
2753 
2754 	strcpy(xvfsp->vfc_name, vfsp->vfc_name);
2755 	xvfsp->vfc_typenum = vfsp->vfc_typenum;
2756 	xvfsp->vfc_refcount = vfsp->vfc_refcount;
2757 	xvfsp->vfc_flags = vfsp->vfc_flags;
2758 	/*
2759 	 * These are unused in userland, we keep them
2760 	 * to not break binary compatibility.
2761 	 */
2762 	xvfsp->vfc_vfsops = NULL;
2763 	xvfsp->vfc_next = NULL;
2764 }
2765 
2766 /*
2767  * Top level filesystem related information gathering.
2768  */
2769 static int
2770 sysctl_vfs_conflist(SYSCTL_HANDLER_ARGS)
2771 {
2772 	struct vfsconf *vfsp;
2773 	struct xvfsconf *xvfsp;
2774 	int cnt, error, i;
2775 
2776 	cnt = 0;
2777 	for (vfsp = vfsconf; vfsp != NULL; vfsp = vfsp->vfc_next)
2778 		cnt++;
2779 	xvfsp = malloc(sizeof(struct xvfsconf) * cnt, M_TEMP, M_WAITOK);
2780 	/*
2781 	 * Handle the race that we will have here when struct vfsconf
2782 	 * will be locked down by using both cnt and checking vfc_next
2783 	 * against NULL to determine the end of the loop.  The race will
2784 	 * happen because we will have to unlock before calling malloc().
2785 	 * We are protected by Giant for now.
2786 	 */
2787 	i = 0;
2788 	for (vfsp = vfsconf; vfsp != NULL && i < cnt; vfsp = vfsp->vfc_next) {
2789 		vfsconf2x(vfsp, xvfsp + i);
2790 		i++;
2791 	}
2792 	error = SYSCTL_OUT(req, xvfsp, sizeof(struct xvfsconf) * i);
2793 	free(xvfsp, M_TEMP);
2794 	return (error);
2795 }
2796 
2797 SYSCTL_PROC(_vfs, OID_AUTO, conflist, CTLFLAG_RD, NULL, 0, sysctl_vfs_conflist,
2798     "S,xvfsconf", "List of all configured filesystems");
2799 
2800 #ifndef BURN_BRIDGES
2801 static int	sysctl_ovfs_conf(SYSCTL_HANDLER_ARGS);
2802 
2803 static int
2804 vfs_sysctl(SYSCTL_HANDLER_ARGS)
2805 {
2806 	int *name = (int *)arg1 - 1;	/* XXX */
2807 	u_int namelen = arg2 + 1;	/* XXX */
2808 	struct vfsconf *vfsp;
2809 	struct xvfsconf xvfsp;
2810 
2811 	printf("WARNING: userland calling deprecated sysctl, "
2812 	    "please rebuild world\n");
2813 
2814 #if 1 || defined(COMPAT_PRELITE2)
2815 	/* Resolve ambiguity between VFS_VFSCONF and VFS_GENERIC. */
2816 	if (namelen == 1)
2817 		return (sysctl_ovfs_conf(oidp, arg1, arg2, req));
2818 #endif
2819 
2820 	switch (name[1]) {
2821 	case VFS_MAXTYPENUM:
2822 		if (namelen != 2)
2823 			return (ENOTDIR);
2824 		return (SYSCTL_OUT(req, &maxvfsconf, sizeof(int)));
2825 	case VFS_CONF:
2826 		if (namelen != 3)
2827 			return (ENOTDIR);	/* overloaded */
2828 		for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next)
2829 			if (vfsp->vfc_typenum == name[2])
2830 				break;
2831 		if (vfsp == NULL)
2832 			return (EOPNOTSUPP);
2833 		vfsconf2x(vfsp, &xvfsp);
2834 		return (SYSCTL_OUT(req, &xvfsp, sizeof(xvfsp)));
2835 	}
2836 	return (EOPNOTSUPP);
2837 }
2838 
2839 SYSCTL_NODE(_vfs, VFS_GENERIC, generic, CTLFLAG_RD | CTLFLAG_SKIP, vfs_sysctl,
2840 	"Generic filesystem");
2841 
2842 #if 1 || defined(COMPAT_PRELITE2)
2843 
2844 static int
2845 sysctl_ovfs_conf(SYSCTL_HANDLER_ARGS)
2846 {
2847 	int error;
2848 	struct vfsconf *vfsp;
2849 	struct ovfsconf ovfs;
2850 
2851 	for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next) {
2852 		ovfs.vfc_vfsops = vfsp->vfc_vfsops;	/* XXX used as flag */
2853 		strcpy(ovfs.vfc_name, vfsp->vfc_name);
2854 		ovfs.vfc_index = vfsp->vfc_typenum;
2855 		ovfs.vfc_refcount = vfsp->vfc_refcount;
2856 		ovfs.vfc_flags = vfsp->vfc_flags;
2857 		error = SYSCTL_OUT(req, &ovfs, sizeof ovfs);
2858 		if (error)
2859 			return error;
2860 	}
2861 	return 0;
2862 }
2863 
2864 #endif /* 1 || COMPAT_PRELITE2 */
2865 #endif /* !BURN_BRIDGES */
2866 
2867 #define KINFO_VNODESLOP		10
2868 #ifdef notyet
2869 /*
2870  * Dump vnode list (via sysctl).
2871  */
2872 /* ARGSUSED */
2873 static int
2874 sysctl_vnode(SYSCTL_HANDLER_ARGS)
2875 {
2876 	struct xvnode *xvn;
2877 	struct thread *td = req->td;
2878 	struct mount *mp;
2879 	struct vnode *vp;
2880 	int error, len, n;
2881 
2882 	/*
2883 	 * Stale numvnodes access is not fatal here.
2884 	 */
2885 	req->lock = 0;
2886 	len = (numvnodes + KINFO_VNODESLOP) * sizeof *xvn;
2887 	if (!req->oldptr)
2888 		/* Make an estimate */
2889 		return (SYSCTL_OUT(req, 0, len));
2890 
2891 	error = sysctl_wire_old_buffer(req, 0);
2892 	if (error != 0)
2893 		return (error);
2894 	xvn = malloc(len, M_TEMP, M_ZERO | M_WAITOK);
2895 	n = 0;
2896 	mtx_lock(&mountlist_mtx);
2897 	TAILQ_FOREACH(mp, &mountlist, mnt_list) {
2898 		if (vfs_busy(mp, LK_NOWAIT, &mountlist_mtx, td))
2899 			continue;
2900 		MNT_ILOCK(mp);
2901 		TAILQ_FOREACH(vp, &mp->mnt_nvnodelist, v_nmntvnodes) {
2902 			if (n == len)
2903 				break;
2904 			vref(vp);
2905 			xvn[n].xv_size = sizeof *xvn;
2906 			xvn[n].xv_vnode = vp;
2907 #define XV_COPY(field) xvn[n].xv_##field = vp->v_##field
2908 			XV_COPY(usecount);
2909 			XV_COPY(writecount);
2910 			XV_COPY(holdcnt);
2911 			XV_COPY(id);
2912 			XV_COPY(mount);
2913 			XV_COPY(numoutput);
2914 			XV_COPY(type);
2915 #undef XV_COPY
2916 			xvn[n].xv_flag = vp->v_vflag;
2917 
2918 			switch (vp->v_type) {
2919 			case VREG:
2920 			case VDIR:
2921 			case VLNK:
2922 				xvn[n].xv_dev = vp->v_cachedfs;
2923 				xvn[n].xv_ino = vp->v_cachedid;
2924 				break;
2925 			case VBLK:
2926 			case VCHR:
2927 				if (vp->v_rdev == NULL) {
2928 					vrele(vp);
2929 					continue;
2930 				}
2931 				xvn[n].xv_dev = dev2udev(vp->v_rdev);
2932 				break;
2933 			case VSOCK:
2934 				xvn[n].xv_socket = vp->v_socket;
2935 				break;
2936 			case VFIFO:
2937 				xvn[n].xv_fifo = vp->v_fifoinfo;
2938 				break;
2939 			case VNON:
2940 			case VBAD:
2941 			default:
2942 				/* shouldn't happen? */
2943 				vrele(vp);
2944 				continue;
2945 			}
2946 			vrele(vp);
2947 			++n;
2948 		}
2949 		MNT_IUNLOCK(mp);
2950 		mtx_lock(&mountlist_mtx);
2951 		vfs_unbusy(mp, td);
2952 		if (n == len)
2953 			break;
2954 	}
2955 	mtx_unlock(&mountlist_mtx);
2956 
2957 	error = SYSCTL_OUT(req, xvn, n * sizeof *xvn);
2958 	free(xvn, M_TEMP);
2959 	return (error);
2960 }
2961 
2962 SYSCTL_PROC(_kern, KERN_VNODE, vnode, CTLTYPE_OPAQUE|CTLFLAG_RD,
2963 	0, 0, sysctl_vnode, "S,xvnode", "");
2964 #endif
2965 
2966 /*
2967  * Check to see if a filesystem is mounted on a block device.
2968  */
2969 int
2970 vfs_mountedon(vp)
2971 	struct vnode *vp;
2972 {
2973 
2974 	if (vp->v_rdev->si_mountpoint != NULL)
2975 		return (EBUSY);
2976 	return (0);
2977 }
2978 
2979 /*
2980  * Unmount all filesystems. The list is traversed in reverse order
2981  * of mounting to avoid dependencies.
2982  */
2983 void
2984 vfs_unmountall()
2985 {
2986 	struct mount *mp;
2987 	struct thread *td;
2988 	int error;
2989 
2990 	if (curthread != NULL)
2991 		td = curthread;
2992 	else
2993 		td = FIRST_THREAD_IN_PROC(initproc); /* XXX XXX proc0? */
2994 	/*
2995 	 * Since this only runs when rebooting, it is not interlocked.
2996 	 */
2997 	while(!TAILQ_EMPTY(&mountlist)) {
2998 		mp = TAILQ_LAST(&mountlist, mntlist);
2999 		error = dounmount(mp, MNT_FORCE, td);
3000 		if (error) {
3001 			TAILQ_REMOVE(&mountlist, mp, mnt_list);
3002 			printf("unmount of %s failed (",
3003 			    mp->mnt_stat.f_mntonname);
3004 			if (error == EBUSY)
3005 				printf("BUSY)\n");
3006 			else
3007 				printf("%d)\n", error);
3008 		} else {
3009 			/* The unmount has removed mp from the mountlist */
3010 		}
3011 	}
3012 }
3013 
3014 /*
3015  * perform msync on all vnodes under a mount point
3016  * the mount point must be locked.
3017  */
3018 void
3019 vfs_msync(struct mount *mp, int flags)
3020 {
3021 	struct vnode *vp, *nvp;
3022 	struct vm_object *obj;
3023 	int tries;
3024 
3025 	GIANT_REQUIRED;
3026 
3027 	tries = 5;
3028 	MNT_ILOCK(mp);
3029 loop:
3030 	for (vp = TAILQ_FIRST(&mp->mnt_nvnodelist); vp != NULL; vp = nvp) {
3031 		if (vp->v_mount != mp) {
3032 			if (--tries > 0)
3033 				goto loop;
3034 			break;
3035 		}
3036 		nvp = TAILQ_NEXT(vp, v_nmntvnodes);
3037 
3038 		VI_LOCK(vp);
3039 		if (vp->v_iflag & VI_XLOCK) {
3040 			VI_UNLOCK(vp);
3041 			continue;
3042 		}
3043 
3044 		if ((vp->v_iflag & VI_OBJDIRTY) &&
3045 		    (flags == MNT_WAIT || VOP_ISLOCKED(vp, NULL) == 0)) {
3046 			MNT_IUNLOCK(mp);
3047 			if (!vget(vp,
3048 			    LK_EXCLUSIVE | LK_RETRY | LK_INTERLOCK,
3049 			    curthread)) {
3050 				if (vp->v_vflag & VV_NOSYNC) {	/* unlinked */
3051 					vput(vp);
3052 					MNT_ILOCK(mp);
3053 					continue;
3054 				}
3055 
3056 				if (VOP_GETVOBJECT(vp, &obj) == 0) {
3057 					VM_OBJECT_LOCK(obj);
3058 					vm_object_page_clean(obj, 0, 0,
3059 					    flags == MNT_WAIT ?
3060 					    OBJPC_SYNC : OBJPC_NOSYNC);
3061 					VM_OBJECT_UNLOCK(obj);
3062 				}
3063 				vput(vp);
3064 			}
3065 			MNT_ILOCK(mp);
3066 			if (TAILQ_NEXT(vp, v_nmntvnodes) != nvp) {
3067 				if (--tries > 0)
3068 					goto loop;
3069 				break;
3070 			}
3071 		} else
3072 			VI_UNLOCK(vp);
3073 	}
3074 	MNT_IUNLOCK(mp);
3075 }
3076 
3077 /*
3078  * Create the VM object needed for VMIO and mmap support.  This
3079  * is done for all VREG files in the system.  Some filesystems might
3080  * afford the additional metadata buffering capability of the
3081  * VMIO code by making the device node be VMIO mode also.
3082  *
3083  * vp must be locked when vfs_object_create is called.
3084  */
3085 int
3086 vfs_object_create(vp, td, cred)
3087 	struct vnode *vp;
3088 	struct thread *td;
3089 	struct ucred *cred;
3090 {
3091 
3092 	GIANT_REQUIRED;
3093 	return (VOP_CREATEVOBJECT(vp, cred, td));
3094 }
3095 
3096 /*
3097  * Mark a vnode as free, putting it up for recycling.
3098  */
3099 void
3100 vfree(vp)
3101 	struct vnode *vp;
3102 {
3103 
3104 	ASSERT_VI_LOCKED(vp, "vfree");
3105 	mtx_lock(&vnode_free_list_mtx);
3106 	KASSERT((vp->v_iflag & VI_FREE) == 0, ("vnode already free"));
3107 	if (vp->v_iflag & VI_AGE) {
3108 		TAILQ_INSERT_HEAD(&vnode_free_list, vp, v_freelist);
3109 	} else {
3110 		TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist);
3111 	}
3112 	freevnodes++;
3113 	mtx_unlock(&vnode_free_list_mtx);
3114 	vp->v_iflag &= ~VI_AGE;
3115 	vp->v_iflag |= VI_FREE;
3116 }
3117 
3118 /*
3119  * Opposite of vfree() - mark a vnode as in use.
3120  */
3121 void
3122 vbusy(vp)
3123 	struct vnode *vp;
3124 {
3125 
3126 	ASSERT_VI_LOCKED(vp, "vbusy");
3127 	KASSERT((vp->v_iflag & VI_FREE) != 0, ("vnode not free"));
3128 
3129 	mtx_lock(&vnode_free_list_mtx);
3130 	TAILQ_REMOVE(&vnode_free_list, vp, v_freelist);
3131 	freevnodes--;
3132 	mtx_unlock(&vnode_free_list_mtx);
3133 
3134 	vp->v_iflag &= ~(VI_FREE|VI_AGE);
3135 }
3136 
3137 /*
3138  * Initalize per-vnode helper structure to hold poll-related state.
3139  */
3140 void
3141 v_addpollinfo(struct vnode *vp)
3142 {
3143 
3144 	vp->v_pollinfo = uma_zalloc(vnodepoll_zone, M_WAITOK);
3145 	mtx_init(&vp->v_pollinfo->vpi_lock, "vnode pollinfo", NULL, MTX_DEF);
3146 }
3147 
3148 /*
3149  * Record a process's interest in events which might happen to
3150  * a vnode.  Because poll uses the historic select-style interface
3151  * internally, this routine serves as both the ``check for any
3152  * pending events'' and the ``record my interest in future events''
3153  * functions.  (These are done together, while the lock is held,
3154  * to avoid race conditions.)
3155  */
3156 int
3157 vn_pollrecord(vp, td, events)
3158 	struct vnode *vp;
3159 	struct thread *td;
3160 	short events;
3161 {
3162 
3163 	if (vp->v_pollinfo == NULL)
3164 		v_addpollinfo(vp);
3165 	mtx_lock(&vp->v_pollinfo->vpi_lock);
3166 	if (vp->v_pollinfo->vpi_revents & events) {
3167 		/*
3168 		 * This leaves events we are not interested
3169 		 * in available for the other process which
3170 		 * which presumably had requested them
3171 		 * (otherwise they would never have been
3172 		 * recorded).
3173 		 */
3174 		events &= vp->v_pollinfo->vpi_revents;
3175 		vp->v_pollinfo->vpi_revents &= ~events;
3176 
3177 		mtx_unlock(&vp->v_pollinfo->vpi_lock);
3178 		return events;
3179 	}
3180 	vp->v_pollinfo->vpi_events |= events;
3181 	selrecord(td, &vp->v_pollinfo->vpi_selinfo);
3182 	mtx_unlock(&vp->v_pollinfo->vpi_lock);
3183 	return 0;
3184 }
3185 
3186 /*
3187  * Note the occurrence of an event.  If the VN_POLLEVENT macro is used,
3188  * it is possible for us to miss an event due to race conditions, but
3189  * that condition is expected to be rare, so for the moment it is the
3190  * preferred interface.
3191  */
3192 void
3193 vn_pollevent(vp, events)
3194 	struct vnode *vp;
3195 	short events;
3196 {
3197 
3198 	if (vp->v_pollinfo == NULL)
3199 		v_addpollinfo(vp);
3200 	mtx_lock(&vp->v_pollinfo->vpi_lock);
3201 	if (vp->v_pollinfo->vpi_events & events) {
3202 		/*
3203 		 * We clear vpi_events so that we don't
3204 		 * call selwakeup() twice if two events are
3205 		 * posted before the polling process(es) is
3206 		 * awakened.  This also ensures that we take at
3207 		 * most one selwakeup() if the polling process
3208 		 * is no longer interested.  However, it does
3209 		 * mean that only one event can be noticed at
3210 		 * a time.  (Perhaps we should only clear those
3211 		 * event bits which we note?) XXX
3212 		 */
3213 		vp->v_pollinfo->vpi_events = 0;	/* &= ~events ??? */
3214 		vp->v_pollinfo->vpi_revents |= events;
3215 		selwakeuppri(&vp->v_pollinfo->vpi_selinfo, PRIBIO);
3216 	}
3217 	mtx_unlock(&vp->v_pollinfo->vpi_lock);
3218 }
3219 
3220 /*
3221  * Wake up anyone polling on vp because it is being revoked.
3222  * This depends on dead_poll() returning POLLHUP for correct
3223  * behavior.
3224  */
3225 void
3226 vn_pollgone(vp)
3227 	struct vnode *vp;
3228 {
3229 
3230 	mtx_lock(&vp->v_pollinfo->vpi_lock);
3231 	VN_KNOTE(vp, NOTE_REVOKE);
3232 	if (vp->v_pollinfo->vpi_events) {
3233 		vp->v_pollinfo->vpi_events = 0;
3234 		selwakeuppri(&vp->v_pollinfo->vpi_selinfo, PRIBIO);
3235 	}
3236 	mtx_unlock(&vp->v_pollinfo->vpi_lock);
3237 }
3238 
3239 
3240 
3241 /*
3242  * Routine to create and manage a filesystem syncer vnode.
3243  */
3244 #define sync_close ((int (*)(struct  vop_close_args *))nullop)
3245 static int	sync_fsync(struct  vop_fsync_args *);
3246 static int	sync_inactive(struct  vop_inactive_args *);
3247 static int	sync_reclaim(struct  vop_reclaim_args *);
3248 
3249 static vop_t **sync_vnodeop_p;
3250 static struct vnodeopv_entry_desc sync_vnodeop_entries[] = {
3251 	{ &vop_default_desc,	(vop_t *) vop_eopnotsupp },
3252 	{ &vop_close_desc,	(vop_t *) sync_close },		/* close */
3253 	{ &vop_fsync_desc,	(vop_t *) sync_fsync },		/* fsync */
3254 	{ &vop_inactive_desc,	(vop_t *) sync_inactive },	/* inactive */
3255 	{ &vop_reclaim_desc,	(vop_t *) sync_reclaim },	/* reclaim */
3256 	{ &vop_lock_desc,	(vop_t *) vop_stdlock },	/* lock */
3257 	{ &vop_unlock_desc,	(vop_t *) vop_stdunlock },	/* unlock */
3258 	{ &vop_islocked_desc,	(vop_t *) vop_stdislocked },	/* islocked */
3259 	{ NULL, NULL }
3260 };
3261 static struct vnodeopv_desc sync_vnodeop_opv_desc =
3262 	{ &sync_vnodeop_p, sync_vnodeop_entries };
3263 
3264 VNODEOP_SET(sync_vnodeop_opv_desc);
3265 
3266 /*
3267  * Create a new filesystem syncer vnode for the specified mount point.
3268  */
3269 int
3270 vfs_allocate_syncvnode(mp)
3271 	struct mount *mp;
3272 {
3273 	struct vnode *vp;
3274 	static long start, incr, next;
3275 	int error;
3276 
3277 	/* Allocate a new vnode */
3278 	if ((error = getnewvnode("syncer", mp, sync_vnodeop_p, &vp)) != 0) {
3279 		mp->mnt_syncer = NULL;
3280 		return (error);
3281 	}
3282 	vp->v_type = VNON;
3283 	/*
3284 	 * Place the vnode onto the syncer worklist. We attempt to
3285 	 * scatter them about on the list so that they will go off
3286 	 * at evenly distributed times even if all the filesystems
3287 	 * are mounted at once.
3288 	 */
3289 	next += incr;
3290 	if (next == 0 || next > syncer_maxdelay) {
3291 		start /= 2;
3292 		incr /= 2;
3293 		if (start == 0) {
3294 			start = syncer_maxdelay / 2;
3295 			incr = syncer_maxdelay;
3296 		}
3297 		next = start;
3298 	}
3299 	VI_LOCK(vp);
3300 	vn_syncer_add_to_worklist(vp, syncdelay > 0 ? next % syncdelay : 0);
3301 	VI_UNLOCK(vp);
3302 	mp->mnt_syncer = vp;
3303 	return (0);
3304 }
3305 
3306 /*
3307  * Do a lazy sync of the filesystem.
3308  */
3309 static int
3310 sync_fsync(ap)
3311 	struct vop_fsync_args /* {
3312 		struct vnode *a_vp;
3313 		struct ucred *a_cred;
3314 		int a_waitfor;
3315 		struct thread *a_td;
3316 	} */ *ap;
3317 {
3318 	struct vnode *syncvp = ap->a_vp;
3319 	struct mount *mp = syncvp->v_mount;
3320 	struct thread *td = ap->a_td;
3321 	int error, asyncflag;
3322 
3323 	/*
3324 	 * We only need to do something if this is a lazy evaluation.
3325 	 */
3326 	if (ap->a_waitfor != MNT_LAZY)
3327 		return (0);
3328 
3329 	/*
3330 	 * Move ourselves to the back of the sync list.
3331 	 */
3332 	VI_LOCK(syncvp);
3333 	vn_syncer_add_to_worklist(syncvp, syncdelay);
3334 	VI_UNLOCK(syncvp);
3335 
3336 	/*
3337 	 * Walk the list of vnodes pushing all that are dirty and
3338 	 * not already on the sync list.
3339 	 */
3340 	mtx_lock(&mountlist_mtx);
3341 	if (vfs_busy(mp, LK_EXCLUSIVE | LK_NOWAIT, &mountlist_mtx, td) != 0) {
3342 		mtx_unlock(&mountlist_mtx);
3343 		return (0);
3344 	}
3345 	if (vn_start_write(NULL, &mp, V_NOWAIT) != 0) {
3346 		vfs_unbusy(mp, td);
3347 		return (0);
3348 	}
3349 	asyncflag = mp->mnt_flag & MNT_ASYNC;
3350 	mp->mnt_flag &= ~MNT_ASYNC;
3351 	vfs_msync(mp, MNT_NOWAIT);
3352 	error = VFS_SYNC(mp, MNT_LAZY, ap->a_cred, td);
3353 	if (asyncflag)
3354 		mp->mnt_flag |= MNT_ASYNC;
3355 	vn_finished_write(mp);
3356 	vfs_unbusy(mp, td);
3357 	return (error);
3358 }
3359 
3360 /*
3361  * The syncer vnode is no referenced.
3362  */
3363 static int
3364 sync_inactive(ap)
3365 	struct vop_inactive_args /* {
3366 		struct vnode *a_vp;
3367 		struct thread *a_td;
3368 	} */ *ap;
3369 {
3370 
3371 	VOP_UNLOCK(ap->a_vp, 0, ap->a_td);
3372 	vgone(ap->a_vp);
3373 	return (0);
3374 }
3375 
3376 /*
3377  * The syncer vnode is no longer needed and is being decommissioned.
3378  *
3379  * Modifications to the worklist must be protected by sync_mtx.
3380  */
3381 static int
3382 sync_reclaim(ap)
3383 	struct vop_reclaim_args /* {
3384 		struct vnode *a_vp;
3385 	} */ *ap;
3386 {
3387 	struct vnode *vp = ap->a_vp;
3388 
3389 	VI_LOCK(vp);
3390 	vp->v_mount->mnt_syncer = NULL;
3391 	if (vp->v_iflag & VI_ONWORKLST) {
3392 		mtx_lock(&sync_mtx);
3393 		LIST_REMOVE(vp, v_synclist);
3394 		mtx_unlock(&sync_mtx);
3395 		vp->v_iflag &= ~VI_ONWORKLST;
3396 	}
3397 	VI_UNLOCK(vp);
3398 
3399 	return (0);
3400 }
3401 
3402 /*
3403  * extract the dev_t from a VCHR
3404  */
3405 dev_t
3406 vn_todev(vp)
3407 	struct vnode *vp;
3408 {
3409 
3410 	if (vp->v_type != VCHR)
3411 		return (NODEV);
3412 	return (vp->v_rdev);
3413 }
3414 
3415 /*
3416  * Check if vnode represents a disk device
3417  */
3418 int
3419 vn_isdisk(vp, errp)
3420 	struct vnode *vp;
3421 	int *errp;
3422 {
3423 	int error;
3424 
3425 	error = 0;
3426 	if (vp->v_type != VCHR)
3427 		error = ENOTBLK;
3428 	else if (vp->v_rdev == NULL)
3429 		error = ENXIO;
3430 	else if (!(devsw(vp->v_rdev)->d_flags & D_DISK))
3431 		error = ENOTBLK;
3432 	if (errp != NULL)
3433 		*errp = error;
3434 	return (error == 0);
3435 }
3436 
3437 /*
3438  * Free data allocated by namei(); see namei(9) for details.
3439  */
3440 void
3441 NDFREE(ndp, flags)
3442      struct nameidata *ndp;
3443      const u_int flags;
3444 {
3445 
3446 	if (!(flags & NDF_NO_FREE_PNBUF) &&
3447 	    (ndp->ni_cnd.cn_flags & HASBUF)) {
3448 		uma_zfree(namei_zone, ndp->ni_cnd.cn_pnbuf);
3449 		ndp->ni_cnd.cn_flags &= ~HASBUF;
3450 	}
3451 	if (!(flags & NDF_NO_DVP_UNLOCK) &&
3452 	    (ndp->ni_cnd.cn_flags & LOCKPARENT) &&
3453 	    ndp->ni_dvp != ndp->ni_vp)
3454 		VOP_UNLOCK(ndp->ni_dvp, 0, ndp->ni_cnd.cn_thread);
3455 	if (!(flags & NDF_NO_DVP_RELE) &&
3456 	    (ndp->ni_cnd.cn_flags & (LOCKPARENT|WANTPARENT))) {
3457 		vrele(ndp->ni_dvp);
3458 		ndp->ni_dvp = NULL;
3459 	}
3460 	if (!(flags & NDF_NO_VP_UNLOCK) &&
3461 	    (ndp->ni_cnd.cn_flags & LOCKLEAF) && ndp->ni_vp)
3462 		VOP_UNLOCK(ndp->ni_vp, 0, ndp->ni_cnd.cn_thread);
3463 	if (!(flags & NDF_NO_VP_RELE) &&
3464 	    ndp->ni_vp) {
3465 		vrele(ndp->ni_vp);
3466 		ndp->ni_vp = NULL;
3467 	}
3468 	if (!(flags & NDF_NO_STARTDIR_RELE) &&
3469 	    (ndp->ni_cnd.cn_flags & SAVESTART)) {
3470 		vrele(ndp->ni_startdir);
3471 		ndp->ni_startdir = NULL;
3472 	}
3473 }
3474 
3475 /*
3476  * Common filesystem object access control check routine.  Accepts a
3477  * vnode's type, "mode", uid and gid, requested access mode, credentials,
3478  * and optional call-by-reference privused argument allowing vaccess()
3479  * to indicate to the caller whether privilege was used to satisfy the
3480  * request (obsoleted).  Returns 0 on success, or an errno on failure.
3481  */
3482 int
3483 vaccess(type, file_mode, file_uid, file_gid, acc_mode, cred, privused)
3484 	enum vtype type;
3485 	mode_t file_mode;
3486 	uid_t file_uid;
3487 	gid_t file_gid;
3488 	mode_t acc_mode;
3489 	struct ucred *cred;
3490 	int *privused;
3491 {
3492 	mode_t dac_granted;
3493 #ifdef CAPABILITIES
3494 	mode_t cap_granted;
3495 #endif
3496 
3497 	/*
3498 	 * Look for a normal, non-privileged way to access the file/directory
3499 	 * as requested.  If it exists, go with that.
3500 	 */
3501 
3502 	if (privused != NULL)
3503 		*privused = 0;
3504 
3505 	dac_granted = 0;
3506 
3507 	/* Check the owner. */
3508 	if (cred->cr_uid == file_uid) {
3509 		dac_granted |= VADMIN;
3510 		if (file_mode & S_IXUSR)
3511 			dac_granted |= VEXEC;
3512 		if (file_mode & S_IRUSR)
3513 			dac_granted |= VREAD;
3514 		if (file_mode & S_IWUSR)
3515 			dac_granted |= (VWRITE | VAPPEND);
3516 
3517 		if ((acc_mode & dac_granted) == acc_mode)
3518 			return (0);
3519 
3520 		goto privcheck;
3521 	}
3522 
3523 	/* Otherwise, check the groups (first match) */
3524 	if (groupmember(file_gid, cred)) {
3525 		if (file_mode & S_IXGRP)
3526 			dac_granted |= VEXEC;
3527 		if (file_mode & S_IRGRP)
3528 			dac_granted |= VREAD;
3529 		if (file_mode & S_IWGRP)
3530 			dac_granted |= (VWRITE | VAPPEND);
3531 
3532 		if ((acc_mode & dac_granted) == acc_mode)
3533 			return (0);
3534 
3535 		goto privcheck;
3536 	}
3537 
3538 	/* Otherwise, check everyone else. */
3539 	if (file_mode & S_IXOTH)
3540 		dac_granted |= VEXEC;
3541 	if (file_mode & S_IROTH)
3542 		dac_granted |= VREAD;
3543 	if (file_mode & S_IWOTH)
3544 		dac_granted |= (VWRITE | VAPPEND);
3545 	if ((acc_mode & dac_granted) == acc_mode)
3546 		return (0);
3547 
3548 privcheck:
3549 	if (!suser_cred(cred, PRISON_ROOT)) {
3550 		/* XXX audit: privilege used */
3551 		if (privused != NULL)
3552 			*privused = 1;
3553 		return (0);
3554 	}
3555 
3556 #ifdef CAPABILITIES
3557 	/*
3558 	 * Build a capability mask to determine if the set of capabilities
3559 	 * satisfies the requirements when combined with the granted mask
3560 	 * from above.
3561 	 * For each capability, if the capability is required, bitwise
3562 	 * or the request type onto the cap_granted mask.
3563 	 */
3564 	cap_granted = 0;
3565 
3566 	if (type == VDIR) {
3567 		/*
3568 		 * For directories, use CAP_DAC_READ_SEARCH to satisfy
3569 		 * VEXEC requests, instead of CAP_DAC_EXECUTE.
3570 		 */
3571 		if ((acc_mode & VEXEC) && ((dac_granted & VEXEC) == 0) &&
3572 		    !cap_check(cred, NULL, CAP_DAC_READ_SEARCH, PRISON_ROOT))
3573 			cap_granted |= VEXEC;
3574 	} else {
3575 		if ((acc_mode & VEXEC) && ((dac_granted & VEXEC) == 0) &&
3576 		    !cap_check(cred, NULL, CAP_DAC_EXECUTE, PRISON_ROOT))
3577 			cap_granted |= VEXEC;
3578 	}
3579 
3580 	if ((acc_mode & VREAD) && ((dac_granted & VREAD) == 0) &&
3581 	    !cap_check(cred, NULL, CAP_DAC_READ_SEARCH, PRISON_ROOT))
3582 		cap_granted |= VREAD;
3583 
3584 	if ((acc_mode & VWRITE) && ((dac_granted & VWRITE) == 0) &&
3585 	    !cap_check(cred, NULL, CAP_DAC_WRITE, PRISON_ROOT))
3586 		cap_granted |= (VWRITE | VAPPEND);
3587 
3588 	if ((acc_mode & VADMIN) && ((dac_granted & VADMIN) == 0) &&
3589 	    !cap_check(cred, NULL, CAP_FOWNER, PRISON_ROOT))
3590 		cap_granted |= VADMIN;
3591 
3592 	if ((acc_mode & (cap_granted | dac_granted)) == acc_mode) {
3593 		/* XXX audit: privilege used */
3594 		if (privused != NULL)
3595 			*privused = 1;
3596 		return (0);
3597 	}
3598 #endif
3599 
3600 	return ((acc_mode & VADMIN) ? EPERM : EACCES);
3601 }
3602 
3603 /*
3604  * Credential check based on process requesting service, and per-attribute
3605  * permissions.
3606  */
3607 int
3608 extattr_check_cred(struct vnode *vp, int attrnamespace,
3609     struct ucred *cred, struct thread *td, int access)
3610 {
3611 
3612 	/*
3613 	 * Kernel-invoked always succeeds.
3614 	 */
3615 	if (cred == NOCRED)
3616 		return (0);
3617 
3618 	/*
3619 	 * Do not allow privileged processes in jail to directly
3620 	 * manipulate system attributes.
3621 	 *
3622 	 * XXX What capability should apply here?
3623 	 * Probably CAP_SYS_SETFFLAG.
3624 	 */
3625 	switch (attrnamespace) {
3626 	case EXTATTR_NAMESPACE_SYSTEM:
3627 		/* Potentially should be: return (EPERM); */
3628 		return (suser_cred(cred, 0));
3629 	case EXTATTR_NAMESPACE_USER:
3630 		return (VOP_ACCESS(vp, access, cred, td));
3631 	default:
3632 		return (EPERM);
3633 	}
3634 }
3635 
3636 #ifdef DEBUG_VFS_LOCKS
3637 /*
3638  * This only exists to supress warnings from unlocked specfs accesses.  It is
3639  * no longer ok to have an unlocked VFS.
3640  */
3641 #define	IGNORE_LOCK(vp) ((vp)->v_type == VCHR || (vp)->v_type == VBAD)
3642 
3643 int vfs_badlock_ddb = 1;	/* Drop into debugger on violation. */
3644 int vfs_badlock_mutex = 1;	/* Check for interlock across VOPs. */
3645 int vfs_badlock_print = 1;	/* Print lock violations. */
3646 
3647 static void
3648 vfs_badlock(const char *msg, const char *str, struct vnode *vp)
3649 {
3650 
3651 	if (vfs_badlock_print)
3652 		printf("%s: %p %s\n", str, (void *)vp, msg);
3653 	if (vfs_badlock_ddb)
3654 		Debugger("lock violation");
3655 }
3656 
3657 void
3658 assert_vi_locked(struct vnode *vp, const char *str)
3659 {
3660 
3661 	if (vfs_badlock_mutex && !mtx_owned(VI_MTX(vp)))
3662 		vfs_badlock("interlock is not locked but should be", str, vp);
3663 }
3664 
3665 void
3666 assert_vi_unlocked(struct vnode *vp, const char *str)
3667 {
3668 
3669 	if (vfs_badlock_mutex && mtx_owned(VI_MTX(vp)))
3670 		vfs_badlock("interlock is locked but should not be", str, vp);
3671 }
3672 
3673 void
3674 assert_vop_locked(struct vnode *vp, const char *str)
3675 {
3676 
3677 	if (vp && !IGNORE_LOCK(vp) && VOP_ISLOCKED(vp, NULL) == 0)
3678 		vfs_badlock("is not locked but should be", str, vp);
3679 }
3680 
3681 void
3682 assert_vop_unlocked(struct vnode *vp, const char *str)
3683 {
3684 
3685 	if (vp && !IGNORE_LOCK(vp) &&
3686 	    VOP_ISLOCKED(vp, curthread) == LK_EXCLUSIVE)
3687 		vfs_badlock("is locked but should not be", str, vp);
3688 }
3689 
3690 #if 0
3691 void
3692 assert_vop_elocked(struct vnode *vp, const char *str)
3693 {
3694 
3695 	if (vp && !IGNORE_LOCK(vp) &&
3696 	    VOP_ISLOCKED(vp, curthread) != LK_EXCLUSIVE)
3697 		vfs_badlock("is not exclusive locked but should be", str, vp);
3698 }
3699 
3700 void
3701 assert_vop_elocked_other(struct vnode *vp, const char *str)
3702 {
3703 
3704 	if (vp && !IGNORE_LOCK(vp) &&
3705 	    VOP_ISLOCKED(vp, curthread) != LK_EXCLOTHER)
3706 		vfs_badlock("is not exclusive locked by another thread",
3707 		    str, vp);
3708 }
3709 
3710 void
3711 assert_vop_slocked(struct vnode *vp, const char *str)
3712 {
3713 
3714 	if (vp && !IGNORE_LOCK(vp) &&
3715 	    VOP_ISLOCKED(vp, curthread) != LK_SHARED)
3716 		vfs_badlock("is not locked shared but should be", str, vp);
3717 }
3718 #endif /* 0 */
3719 
3720 void
3721 vop_rename_pre(void *ap)
3722 {
3723 	struct vop_rename_args *a = ap;
3724 
3725 	if (a->a_tvp)
3726 		ASSERT_VI_UNLOCKED(a->a_tvp, "VOP_RENAME");
3727 	ASSERT_VI_UNLOCKED(a->a_tdvp, "VOP_RENAME");
3728 	ASSERT_VI_UNLOCKED(a->a_fvp, "VOP_RENAME");
3729 	ASSERT_VI_UNLOCKED(a->a_fdvp, "VOP_RENAME");
3730 
3731 	/* Check the source (from). */
3732 	if (a->a_tdvp != a->a_fdvp)
3733 		ASSERT_VOP_UNLOCKED(a->a_fdvp, "vop_rename: fdvp locked");
3734 	if (a->a_tvp != a->a_fvp)
3735 		ASSERT_VOP_UNLOCKED(a->a_fvp, "vop_rename: tvp locked");
3736 
3737 	/* Check the target. */
3738 	if (a->a_tvp)
3739 		ASSERT_VOP_LOCKED(a->a_tvp, "vop_rename: tvp not locked");
3740 	ASSERT_VOP_LOCKED(a->a_tdvp, "vop_rename: tdvp not locked");
3741 }
3742 
3743 void
3744 vop_strategy_pre(void *ap)
3745 {
3746 	struct vop_strategy_args *a;
3747 	struct buf *bp;
3748 
3749 	a = ap;
3750 	bp = a->a_bp;
3751 
3752 	/*
3753 	 * Cluster ops lock their component buffers but not the IO container.
3754 	 */
3755 	if ((bp->b_flags & B_CLUSTER) != 0)
3756 		return;
3757 
3758 	if (BUF_REFCNT(bp) < 1) {
3759 		if (vfs_badlock_print)
3760 			printf(
3761 			    "VOP_STRATEGY: bp is not locked but should be\n");
3762 		if (vfs_badlock_ddb)
3763 			Debugger("lock violation");
3764 	}
3765 }
3766 
3767 void
3768 vop_lookup_pre(void *ap)
3769 {
3770 	struct vop_lookup_args *a;
3771 	struct vnode *dvp;
3772 
3773 	a = ap;
3774 	dvp = a->a_dvp;
3775 	ASSERT_VI_UNLOCKED(dvp, "VOP_LOOKUP");
3776 	ASSERT_VOP_LOCKED(dvp, "VOP_LOOKUP");
3777 }
3778 
3779 void
3780 vop_lookup_post(void *ap, int rc)
3781 {
3782 	struct vop_lookup_args *a;
3783 	struct componentname *cnp;
3784 	struct vnode *dvp;
3785 	struct vnode *vp;
3786 	int flags;
3787 
3788 	a = ap;
3789 	dvp = a->a_dvp;
3790 	cnp = a->a_cnp;
3791 	vp = *(a->a_vpp);
3792 	flags = cnp->cn_flags;
3793 
3794 	ASSERT_VI_UNLOCKED(dvp, "VOP_LOOKUP");
3795 
3796 	/*
3797 	 * If this is the last path component for this lookup and LOCKPARENT
3798 	 * is set, OR if there is an error the directory has to be locked.
3799 	 */
3800 	if ((flags & LOCKPARENT) && (flags & ISLASTCN))
3801 		ASSERT_VOP_LOCKED(dvp, "VOP_LOOKUP (LOCKPARENT)");
3802 	else if (rc != 0)
3803 		ASSERT_VOP_LOCKED(dvp, "VOP_LOOKUP (error)");
3804 	else if (dvp != vp)
3805 		ASSERT_VOP_UNLOCKED(dvp, "VOP_LOOKUP (dvp)");
3806 	if (flags & PDIRUNLOCK)
3807 		ASSERT_VOP_UNLOCKED(dvp, "VOP_LOOKUP (PDIRUNLOCK)");
3808 }
3809 
3810 void
3811 vop_lock_pre(void *ap)
3812 {
3813 	struct vop_lock_args *a = ap;
3814 
3815 	if ((a->a_flags & LK_INTERLOCK) == 0)
3816 		ASSERT_VI_UNLOCKED(a->a_vp, "VOP_LOCK");
3817 	else
3818 		ASSERT_VI_LOCKED(a->a_vp, "VOP_LOCK");
3819 }
3820 
3821 void
3822 vop_lock_post(void *ap, int rc)
3823 {
3824 	struct vop_lock_args *a = ap;
3825 
3826 	ASSERT_VI_UNLOCKED(a->a_vp, "VOP_LOCK");
3827 	if (rc == 0)
3828 		ASSERT_VOP_LOCKED(a->a_vp, "VOP_LOCK");
3829 }
3830 
3831 void
3832 vop_unlock_pre(void *ap)
3833 {
3834 	struct vop_unlock_args *a = ap;
3835 
3836 	if (a->a_flags & LK_INTERLOCK)
3837 		ASSERT_VI_LOCKED(a->a_vp, "VOP_UNLOCK");
3838 	ASSERT_VOP_LOCKED(a->a_vp, "VOP_UNLOCK");
3839 }
3840 
3841 void
3842 vop_unlock_post(void *ap, int rc)
3843 {
3844 	struct vop_unlock_args *a = ap;
3845 
3846 	if (a->a_flags & LK_INTERLOCK)
3847 		ASSERT_VI_UNLOCKED(a->a_vp, "VOP_UNLOCK");
3848 }
3849 #endif /* DEBUG_VFS_LOCKS */
3850