xref: /freebsd/sys/kern/vfs_subr.c (revision 2546665afcaf0d53dc2c7058fee96354b3680f5a)
1 /*
2  * Copyright (c) 1989, 1993
3  *	The Regents of the University of California.  All rights reserved.
4  * (c) UNIX System Laboratories, Inc.
5  * All or some portions of this file are derived from material licensed
6  * to the University of California by American Telephone and Telegraph
7  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8  * the permission of UNIX System Laboratories, Inc.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 4. Neither the name of the University nor the names of its contributors
19  *    may be used to endorse or promote products derived from this software
20  *    without specific prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  *
34  *	@(#)vfs_subr.c	8.31 (Berkeley) 5/26/95
35  */
36 
37 /*
38  * External virtual filesystem routines
39  */
40 
41 #include <sys/cdefs.h>
42 __FBSDID("$FreeBSD$");
43 
44 #include "opt_ddb.h"
45 #include "opt_mac.h"
46 
47 #include <sys/param.h>
48 #include <sys/systm.h>
49 #include <sys/bio.h>
50 #include <sys/buf.h>
51 #include <sys/conf.h>
52 #include <sys/event.h>
53 #include <sys/eventhandler.h>
54 #include <sys/extattr.h>
55 #include <sys/fcntl.h>
56 #include <sys/kdb.h>
57 #include <sys/kernel.h>
58 #include <sys/kthread.h>
59 #include <sys/mac.h>
60 #include <sys/malloc.h>
61 #include <sys/mount.h>
62 #include <sys/namei.h>
63 #include <sys/sleepqueue.h>
64 #include <sys/stat.h>
65 #include <sys/sysctl.h>
66 #include <sys/syslog.h>
67 #include <sys/vmmeter.h>
68 #include <sys/vnode.h>
69 
70 #include <vm/vm.h>
71 #include <vm/vm_object.h>
72 #include <vm/vm_extern.h>
73 #include <vm/pmap.h>
74 #include <vm/vm_map.h>
75 #include <vm/vm_page.h>
76 #include <vm/vm_kern.h>
77 #include <vm/uma.h>
78 
79 static MALLOC_DEFINE(M_NETADDR, "Export Host", "Export host address structure");
80 
81 static void	addalias(struct vnode *vp, struct cdev *nvp_rdev);
82 static void	delmntque(struct vnode *vp);
83 static void	insmntque(struct vnode *vp, struct mount *mp);
84 static void	vclean(struct vnode *vp, int flags, struct thread *td);
85 static void	vlruvp(struct vnode *vp);
86 static int	flushbuflist(struct buf *blist, int flags, struct vnode *vp,
87 		    int slpflag, int slptimeo, int *errorp);
88 static void	syncer_shutdown(void *arg, int howto);
89 static int	vtryrecycle(struct vnode *vp);
90 static void	vx_lock(struct vnode *vp);
91 static void	vx_unlock(struct vnode *vp);
92 static void	vgonechrl(struct vnode *vp, struct thread *td);
93 
94 
95 /*
96  * Number of vnodes in existence.  Increased whenever getnewvnode()
97  * allocates a new vnode, never decreased.
98  */
99 static unsigned long	numvnodes;
100 
101 SYSCTL_LONG(_vfs, OID_AUTO, numvnodes, CTLFLAG_RD, &numvnodes, 0, "");
102 
103 /*
104  * Conversion tables for conversion from vnode types to inode formats
105  * and back.
106  */
107 enum vtype iftovt_tab[16] = {
108 	VNON, VFIFO, VCHR, VNON, VDIR, VNON, VBLK, VNON,
109 	VREG, VNON, VLNK, VNON, VSOCK, VNON, VNON, VBAD,
110 };
111 int vttoif_tab[9] = {
112 	0, S_IFREG, S_IFDIR, S_IFBLK, S_IFCHR, S_IFLNK,
113 	S_IFSOCK, S_IFIFO, S_IFMT,
114 };
115 
116 /*
117  * List of vnodes that are ready for recycling.
118  */
119 static TAILQ_HEAD(freelst, vnode) vnode_free_list;
120 
121 /*
122  * Minimum number of free vnodes.  If there are fewer than this free vnodes,
123  * getnewvnode() will return a newly allocated vnode.
124  */
125 static u_long wantfreevnodes = 25;
126 SYSCTL_LONG(_vfs, OID_AUTO, wantfreevnodes, CTLFLAG_RW, &wantfreevnodes, 0, "");
127 /* Number of vnodes in the free list. */
128 static u_long freevnodes;
129 SYSCTL_LONG(_vfs, OID_AUTO, freevnodes, CTLFLAG_RD, &freevnodes, 0, "");
130 
131 /*
132  * Various variables used for debugging the new implementation of
133  * reassignbuf().
134  * XXX these are probably of (very) limited utility now.
135  */
136 static int reassignbufcalls;
137 SYSCTL_INT(_vfs, OID_AUTO, reassignbufcalls, CTLFLAG_RW, &reassignbufcalls, 0, "");
138 static int nameileafonly;
139 SYSCTL_INT(_vfs, OID_AUTO, nameileafonly, CTLFLAG_RW, &nameileafonly, 0, "");
140 
141 /*
142  * Cache for the mount type id assigned to NFS.  This is used for
143  * special checks in nfs/nfs_nqlease.c and vm/vnode_pager.c.
144  */
145 int	nfs_mount_type = -1;
146 
147 /* To keep more than one thread at a time from running vfs_getnewfsid */
148 static struct mtx mntid_mtx;
149 
150 /*
151  * Lock for any access to the following:
152  *	vnode_free_list
153  *	numvnodes
154  *	freevnodes
155  */
156 static struct mtx vnode_free_list_mtx;
157 
158 /*
159  * For any iteration/modification of dev->si_hlist (linked through
160  * v_specnext)
161  */
162 static struct mtx spechash_mtx;
163 
164 /* Publicly exported FS */
165 struct nfs_public nfs_pub;
166 
167 /* Zone for allocation of new vnodes - used exclusively by getnewvnode() */
168 static uma_zone_t vnode_zone;
169 static uma_zone_t vnodepoll_zone;
170 
171 /* Set to 1 to print out reclaim of active vnodes */
172 int	prtactive;
173 
174 /*
175  * The workitem queue.
176  *
177  * It is useful to delay writes of file data and filesystem metadata
178  * for tens of seconds so that quickly created and deleted files need
179  * not waste disk bandwidth being created and removed. To realize this,
180  * we append vnodes to a "workitem" queue. When running with a soft
181  * updates implementation, most pending metadata dependencies should
182  * not wait for more than a few seconds. Thus, mounted on block devices
183  * are delayed only about a half the time that file data is delayed.
184  * Similarly, directory updates are more critical, so are only delayed
185  * about a third the time that file data is delayed. Thus, there are
186  * SYNCER_MAXDELAY queues that are processed round-robin at a rate of
187  * one each second (driven off the filesystem syncer process). The
188  * syncer_delayno variable indicates the next queue that is to be processed.
189  * Items that need to be processed soon are placed in this queue:
190  *
191  *	syncer_workitem_pending[syncer_delayno]
192  *
193  * A delay of fifteen seconds is done by placing the request fifteen
194  * entries later in the queue:
195  *
196  *	syncer_workitem_pending[(syncer_delayno + 15) & syncer_mask]
197  *
198  */
199 static int syncer_delayno;
200 static long syncer_mask;
201 LIST_HEAD(synclist, vnode);
202 static struct synclist *syncer_workitem_pending;
203 /*
204  * The sync_mtx protects:
205  *	vp->v_synclist
206  *	sync_vnode_count
207  *	syncer_delayno
208  *	syncer_state
209  *	syncer_workitem_pending
210  *	syncer_worklist_len
211  *	rushjob
212  */
213 static struct mtx sync_mtx;
214 
215 #define SYNCER_MAXDELAY		32
216 static int syncer_maxdelay = SYNCER_MAXDELAY;	/* maximum delay time */
217 static int syncdelay = 30;		/* max time to delay syncing data */
218 static int filedelay = 30;		/* time to delay syncing files */
219 SYSCTL_INT(_kern, OID_AUTO, filedelay, CTLFLAG_RW, &filedelay, 0, "");
220 static int dirdelay = 29;		/* time to delay syncing directories */
221 SYSCTL_INT(_kern, OID_AUTO, dirdelay, CTLFLAG_RW, &dirdelay, 0, "");
222 static int metadelay = 28;		/* time to delay syncing metadata */
223 SYSCTL_INT(_kern, OID_AUTO, metadelay, CTLFLAG_RW, &metadelay, 0, "");
224 static int rushjob;		/* number of slots to run ASAP */
225 static int stat_rush_requests;	/* number of times I/O speeded up */
226 SYSCTL_INT(_debug, OID_AUTO, rush_requests, CTLFLAG_RW, &stat_rush_requests, 0, "");
227 
228 /*
229  * When shutting down the syncer, run it at four times normal speed.
230  */
231 #define SYNCER_SHUTDOWN_SPEEDUP		4
232 static int sync_vnode_count;
233 static int syncer_worklist_len;
234 static enum { SYNCER_RUNNING, SYNCER_SHUTTING_DOWN, SYNCER_FINAL_DELAY }
235     syncer_state;
236 
237 /*
238  * Number of vnodes we want to exist at any one time.  This is mostly used
239  * to size hash tables in vnode-related code.  It is normally not used in
240  * getnewvnode(), as wantfreevnodes is normally nonzero.)
241  *
242  * XXX desiredvnodes is historical cruft and should not exist.
243  */
244 int desiredvnodes;
245 SYSCTL_INT(_kern, KERN_MAXVNODES, maxvnodes, CTLFLAG_RW,
246     &desiredvnodes, 0, "Maximum number of vnodes");
247 static int minvnodes;
248 SYSCTL_INT(_kern, OID_AUTO, minvnodes, CTLFLAG_RW,
249     &minvnodes, 0, "Minimum number of vnodes");
250 static int vnlru_nowhere;
251 SYSCTL_INT(_debug, OID_AUTO, vnlru_nowhere, CTLFLAG_RW,
252     &vnlru_nowhere, 0, "Number of times the vnlru process ran without success");
253 
254 /* Hook for calling soft updates. */
255 int (*softdep_process_worklist_hook)(struct mount *);
256 
257 /*
258  * Initialize the vnode management data structures.
259  */
260 #define	MAX_SAFE_MAXVNODES	100000
261 
262 static void
263 vntblinit(void *dummy __unused)
264 {
265 
266 	/*
267 	 * Desiredvnodes is a function of the physical memory size and
268 	 * the kernel's heap size.  Specifically, desiredvnodes scales
269 	 * in proportion to the physical memory size until two fifths
270 	 * of the kernel's heap size is consumed by vnodes and vm
271 	 * objects.
272 	 */
273 	desiredvnodes = min(maxproc + cnt.v_page_count / 4, 2 * vm_kmem_size /
274 	    (5 * (sizeof(struct vm_object) + sizeof(struct vnode))));
275 	if (desiredvnodes > MAX_SAFE_MAXVNODES) {
276 		if (bootverbose)
277 			printf("Reducing kern.maxvnodes %d -> %d\n",
278 			    desiredvnodes, MAX_SAFE_MAXVNODES);
279 		desiredvnodes = MAX_SAFE_MAXVNODES;
280 	}
281 	minvnodes = desiredvnodes / 4;
282 	mtx_init(&mountlist_mtx, "mountlist", NULL, MTX_DEF);
283 	mtx_init(&mntid_mtx, "mntid", NULL, MTX_DEF);
284 	mtx_init(&spechash_mtx, "spechash", NULL, MTX_DEF);
285 	TAILQ_INIT(&vnode_free_list);
286 	mtx_init(&vnode_free_list_mtx, "vnode_free_list", NULL, MTX_DEF);
287 	vnode_zone = uma_zcreate("VNODE", sizeof (struct vnode), NULL, NULL,
288 	    NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
289 	vnodepoll_zone = uma_zcreate("VNODEPOLL", sizeof (struct vpollinfo),
290 	      NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
291 	/*
292 	 * Initialize the filesystem syncer.
293 	 */
294 	syncer_workitem_pending = hashinit(syncer_maxdelay, M_VNODE,
295 		&syncer_mask);
296 	syncer_maxdelay = syncer_mask + 1;
297 	mtx_init(&sync_mtx, "Syncer mtx", NULL, MTX_DEF);
298 }
299 SYSINIT(vfs, SI_SUB_VFS, SI_ORDER_FIRST, vntblinit, NULL)
300 
301 
302 /*
303  * Mark a mount point as busy. Used to synchronize access and to delay
304  * unmounting. Interlock is not released on failure.
305  */
306 int
307 vfs_busy(mp, flags, interlkp, td)
308 	struct mount *mp;
309 	int flags;
310 	struct mtx *interlkp;
311 	struct thread *td;
312 {
313 	int lkflags;
314 
315 	if (mp->mnt_kern_flag & MNTK_UNMOUNT) {
316 		if (flags & LK_NOWAIT)
317 			return (ENOENT);
318 		mp->mnt_kern_flag |= MNTK_MWAIT;
319 		/*
320 		 * Since all busy locks are shared except the exclusive
321 		 * lock granted when unmounting, the only place that a
322 		 * wakeup needs to be done is at the release of the
323 		 * exclusive lock at the end of dounmount.
324 		 */
325 		msleep(mp, interlkp, PVFS, "vfs_busy", 0);
326 		return (ENOENT);
327 	}
328 	lkflags = LK_SHARED | LK_NOPAUSE;
329 	if (interlkp)
330 		lkflags |= LK_INTERLOCK;
331 	if (lockmgr(&mp->mnt_lock, lkflags, interlkp, td))
332 		panic("vfs_busy: unexpected lock failure");
333 	return (0);
334 }
335 
336 /*
337  * Free a busy filesystem.
338  */
339 void
340 vfs_unbusy(mp, td)
341 	struct mount *mp;
342 	struct thread *td;
343 {
344 
345 	lockmgr(&mp->mnt_lock, LK_RELEASE, NULL, td);
346 }
347 
348 /*
349  * Lookup a mount point by filesystem identifier.
350  */
351 struct mount *
352 vfs_getvfs(fsid)
353 	fsid_t *fsid;
354 {
355 	register struct mount *mp;
356 
357 	mtx_lock(&mountlist_mtx);
358 	TAILQ_FOREACH(mp, &mountlist, mnt_list) {
359 		if (mp->mnt_stat.f_fsid.val[0] == fsid->val[0] &&
360 		    mp->mnt_stat.f_fsid.val[1] == fsid->val[1]) {
361 			mtx_unlock(&mountlist_mtx);
362 			return (mp);
363 		}
364 	}
365 	mtx_unlock(&mountlist_mtx);
366 	return ((struct mount *) 0);
367 }
368 
369 /*
370  * Check if a user can access priveledged mount options.
371  */
372 int
373 vfs_suser(struct mount *mp, struct thread *td)
374 {
375 	int error;
376 
377 	if ((mp->mnt_flag & MNT_USER) == 0 ||
378 	    mp->mnt_cred->cr_uid != td->td_ucred->cr_uid) {
379 		if ((error = suser(td)) != 0)
380 			return (error);
381 	}
382 	return (0);
383 }
384 
385 /*
386  * Get a new unique fsid.  Try to make its val[0] unique, since this value
387  * will be used to create fake device numbers for stat().  Also try (but
388  * not so hard) make its val[0] unique mod 2^16, since some emulators only
389  * support 16-bit device numbers.  We end up with unique val[0]'s for the
390  * first 2^16 calls and unique val[0]'s mod 2^16 for the first 2^8 calls.
391  *
392  * Keep in mind that several mounts may be running in parallel.  Starting
393  * the search one past where the previous search terminated is both a
394  * micro-optimization and a defense against returning the same fsid to
395  * different mounts.
396  */
397 void
398 vfs_getnewfsid(mp)
399 	struct mount *mp;
400 {
401 	static u_int16_t mntid_base;
402 	fsid_t tfsid;
403 	int mtype;
404 
405 	mtx_lock(&mntid_mtx);
406 	mtype = mp->mnt_vfc->vfc_typenum;
407 	tfsid.val[1] = mtype;
408 	mtype = (mtype & 0xFF) << 24;
409 	for (;;) {
410 		tfsid.val[0] = makedev(255,
411 		    mtype | ((mntid_base & 0xFF00) << 8) | (mntid_base & 0xFF));
412 		mntid_base++;
413 		if (vfs_getvfs(&tfsid) == NULL)
414 			break;
415 	}
416 	mp->mnt_stat.f_fsid.val[0] = tfsid.val[0];
417 	mp->mnt_stat.f_fsid.val[1] = tfsid.val[1];
418 	mtx_unlock(&mntid_mtx);
419 }
420 
421 /*
422  * Knob to control the precision of file timestamps:
423  *
424  *   0 = seconds only; nanoseconds zeroed.
425  *   1 = seconds and nanoseconds, accurate within 1/HZ.
426  *   2 = seconds and nanoseconds, truncated to microseconds.
427  * >=3 = seconds and nanoseconds, maximum precision.
428  */
429 enum { TSP_SEC, TSP_HZ, TSP_USEC, TSP_NSEC };
430 
431 static int timestamp_precision = TSP_SEC;
432 SYSCTL_INT(_vfs, OID_AUTO, timestamp_precision, CTLFLAG_RW,
433     &timestamp_precision, 0, "");
434 
435 /*
436  * Get a current timestamp.
437  */
438 void
439 vfs_timestamp(tsp)
440 	struct timespec *tsp;
441 {
442 	struct timeval tv;
443 
444 	switch (timestamp_precision) {
445 	case TSP_SEC:
446 		tsp->tv_sec = time_second;
447 		tsp->tv_nsec = 0;
448 		break;
449 	case TSP_HZ:
450 		getnanotime(tsp);
451 		break;
452 	case TSP_USEC:
453 		microtime(&tv);
454 		TIMEVAL_TO_TIMESPEC(&tv, tsp);
455 		break;
456 	case TSP_NSEC:
457 	default:
458 		nanotime(tsp);
459 		break;
460 	}
461 }
462 
463 /*
464  * Set vnode attributes to VNOVAL
465  */
466 void
467 vattr_null(vap)
468 	register struct vattr *vap;
469 {
470 
471 	vap->va_type = VNON;
472 	vap->va_size = VNOVAL;
473 	vap->va_bytes = VNOVAL;
474 	vap->va_mode = VNOVAL;
475 	vap->va_nlink = VNOVAL;
476 	vap->va_uid = VNOVAL;
477 	vap->va_gid = VNOVAL;
478 	vap->va_fsid = VNOVAL;
479 	vap->va_fileid = VNOVAL;
480 	vap->va_blocksize = VNOVAL;
481 	vap->va_rdev = VNOVAL;
482 	vap->va_atime.tv_sec = VNOVAL;
483 	vap->va_atime.tv_nsec = VNOVAL;
484 	vap->va_mtime.tv_sec = VNOVAL;
485 	vap->va_mtime.tv_nsec = VNOVAL;
486 	vap->va_ctime.tv_sec = VNOVAL;
487 	vap->va_ctime.tv_nsec = VNOVAL;
488 	vap->va_birthtime.tv_sec = VNOVAL;
489 	vap->va_birthtime.tv_nsec = VNOVAL;
490 	vap->va_flags = VNOVAL;
491 	vap->va_gen = VNOVAL;
492 	vap->va_vaflags = 0;
493 }
494 
495 /*
496  * This routine is called when we have too many vnodes.  It attempts
497  * to free <count> vnodes and will potentially free vnodes that still
498  * have VM backing store (VM backing store is typically the cause
499  * of a vnode blowout so we want to do this).  Therefore, this operation
500  * is not considered cheap.
501  *
502  * A number of conditions may prevent a vnode from being reclaimed.
503  * the buffer cache may have references on the vnode, a directory
504  * vnode may still have references due to the namei cache representing
505  * underlying files, or the vnode may be in active use.   It is not
506  * desireable to reuse such vnodes.  These conditions may cause the
507  * number of vnodes to reach some minimum value regardless of what
508  * you set kern.maxvnodes to.  Do not set kern.maxvnodes too low.
509  */
510 static int
511 vlrureclaim(struct mount *mp)
512 {
513 	struct vnode *vp;
514 	int done;
515 	int trigger;
516 	int usevnodes;
517 	int count;
518 
519 	/*
520 	 * Calculate the trigger point, don't allow user
521 	 * screwups to blow us up.   This prevents us from
522 	 * recycling vnodes with lots of resident pages.  We
523 	 * aren't trying to free memory, we are trying to
524 	 * free vnodes.
525 	 */
526 	usevnodes = desiredvnodes;
527 	if (usevnodes <= 0)
528 		usevnodes = 1;
529 	trigger = cnt.v_page_count * 2 / usevnodes;
530 
531 	done = 0;
532 	MNT_ILOCK(mp);
533 	count = mp->mnt_nvnodelistsize / 10 + 1;
534 	while (count && (vp = TAILQ_FIRST(&mp->mnt_nvnodelist)) != NULL) {
535 		TAILQ_REMOVE(&mp->mnt_nvnodelist, vp, v_nmntvnodes);
536 		TAILQ_INSERT_TAIL(&mp->mnt_nvnodelist, vp, v_nmntvnodes);
537 
538 		if (vp->v_type != VNON &&
539 		    vp->v_type != VBAD &&
540 		    VI_TRYLOCK(vp)) {
541 			if (VMIGHTFREE(vp) &&           /* critical path opt */
542 			    (vp->v_object == NULL ||
543 			    vp->v_object->resident_page_count < trigger)) {
544 				MNT_IUNLOCK(mp);
545 				vgonel(vp, curthread);
546 				done++;
547 				MNT_ILOCK(mp);
548 			} else
549 				VI_UNLOCK(vp);
550 		}
551 		--count;
552 	}
553 	MNT_IUNLOCK(mp);
554 	return done;
555 }
556 
557 /*
558  * Attempt to recycle vnodes in a context that is always safe to block.
559  * Calling vlrurecycle() from the bowels of filesystem code has some
560  * interesting deadlock problems.
561  */
562 static struct proc *vnlruproc;
563 static int vnlruproc_sig;
564 
565 static void
566 vnlru_proc(void)
567 {
568 	struct mount *mp, *nmp;
569 	int done;
570 	struct proc *p = vnlruproc;
571 	struct thread *td = FIRST_THREAD_IN_PROC(p);
572 
573 	mtx_lock(&Giant);
574 
575 	EVENTHANDLER_REGISTER(shutdown_pre_sync, kproc_shutdown, p,
576 	    SHUTDOWN_PRI_FIRST);
577 
578 	for (;;) {
579 		kthread_suspend_check(p);
580 		mtx_lock(&vnode_free_list_mtx);
581 		if (numvnodes - freevnodes <= desiredvnodes * 9 / 10) {
582 			mtx_unlock(&vnode_free_list_mtx);
583 			vnlruproc_sig = 0;
584 			wakeup(&vnlruproc_sig);
585 			tsleep(vnlruproc, PVFS, "vlruwt", hz);
586 			continue;
587 		}
588 		mtx_unlock(&vnode_free_list_mtx);
589 		done = 0;
590 		mtx_lock(&mountlist_mtx);
591 		for (mp = TAILQ_FIRST(&mountlist); mp != NULL; mp = nmp) {
592 			if (vfs_busy(mp, LK_NOWAIT, &mountlist_mtx, td)) {
593 				nmp = TAILQ_NEXT(mp, mnt_list);
594 				continue;
595 			}
596 			done += vlrureclaim(mp);
597 			mtx_lock(&mountlist_mtx);
598 			nmp = TAILQ_NEXT(mp, mnt_list);
599 			vfs_unbusy(mp, td);
600 		}
601 		mtx_unlock(&mountlist_mtx);
602 		if (done == 0) {
603 #if 0
604 			/* These messages are temporary debugging aids */
605 			if (vnlru_nowhere < 5)
606 				printf("vnlru process getting nowhere..\n");
607 			else if (vnlru_nowhere == 5)
608 				printf("vnlru process messages stopped.\n");
609 #endif
610 			vnlru_nowhere++;
611 			tsleep(vnlruproc, PPAUSE, "vlrup", hz * 3);
612 		}
613 	}
614 }
615 
616 static struct kproc_desc vnlru_kp = {
617 	"vnlru",
618 	vnlru_proc,
619 	&vnlruproc
620 };
621 SYSINIT(vnlru, SI_SUB_KTHREAD_UPDATE, SI_ORDER_FIRST, kproc_start, &vnlru_kp)
622 
623 
624 /*
625  * Routines having to do with the management of the vnode table.
626  */
627 
628 /*
629  * Check to see if a free vnode can be recycled. If it can,
630  * recycle it and return it with the vnode interlock held.
631  */
632 static int
633 vtryrecycle(struct vnode *vp)
634 {
635 	struct thread *td = curthread;
636 	vm_object_t object;
637 	struct mount *vnmp;
638 	int error;
639 
640 	/* Don't recycle if we can't get the interlock */
641 	if (!VI_TRYLOCK(vp))
642 		return (EWOULDBLOCK);
643 	/*
644 	 * This vnode may found and locked via some other list, if so we
645 	 * can't recycle it yet.
646 	 */
647 	if (vn_lock(vp, LK_INTERLOCK | LK_EXCLUSIVE | LK_NOWAIT, td) != 0)
648 		return (EWOULDBLOCK);
649 	/*
650 	 * Don't recycle if its filesystem is being suspended.
651 	 */
652 	if (vn_start_write(vp, &vnmp, V_NOWAIT) != 0) {
653 		VOP_UNLOCK(vp, 0, td);
654 		return (EBUSY);
655 	}
656 
657 	/*
658 	 * Don't recycle if we still have cached pages.
659 	 */
660 	if (VOP_GETVOBJECT(vp, &object) == 0) {
661 		VM_OBJECT_LOCK(object);
662 		if (object->resident_page_count ||
663 		    object->ref_count) {
664 			VM_OBJECT_UNLOCK(object);
665 			error = EBUSY;
666 			goto done;
667 		}
668 		VM_OBJECT_UNLOCK(object);
669 	}
670 	if (LIST_FIRST(&vp->v_cache_src)) {
671 		/*
672 		 * note: nameileafonly sysctl is temporary,
673 		 * for debugging only, and will eventually be
674 		 * removed.
675 		 */
676 		if (nameileafonly > 0) {
677 			/*
678 			 * Do not reuse namei-cached directory
679 			 * vnodes that have cached
680 			 * subdirectories.
681 			 */
682 			if (cache_leaf_test(vp) < 0) {
683 				error = EISDIR;
684 				goto done;
685 			}
686 		} else if (nameileafonly < 0 ||
687 			    vmiodirenable == 0) {
688 			/*
689 			 * Do not reuse namei-cached directory
690 			 * vnodes if nameileafonly is -1 or
691 			 * if VMIO backing for directories is
692 			 * turned off (otherwise we reuse them
693 			 * too quickly).
694 			 */
695 			error = EBUSY;
696 			goto done;
697 		}
698 	}
699 	/*
700 	 * If we got this far, we need to acquire the interlock and see if
701 	 * anyone picked up this vnode from another list.  If not, we will
702 	 * mark it with XLOCK via vgonel() so that anyone who does find it
703 	 * will skip over it.
704 	 */
705 	VI_LOCK(vp);
706 	if (VSHOULDBUSY(vp) && (vp->v_iflag & VI_XLOCK) == 0) {
707 		VI_UNLOCK(vp);
708 		error = EBUSY;
709 		goto done;
710 	}
711 	mtx_lock(&vnode_free_list_mtx);
712 	TAILQ_REMOVE(&vnode_free_list, vp, v_freelist);
713 	vp->v_iflag &= ~VI_FREE;
714 	mtx_unlock(&vnode_free_list_mtx);
715 	vp->v_iflag |= VI_DOOMED;
716 	if (vp->v_type != VBAD) {
717 		VOP_UNLOCK(vp, 0, td);
718 		vgonel(vp, td);
719 		VI_LOCK(vp);
720 	} else
721 		VOP_UNLOCK(vp, 0, td);
722 	vn_finished_write(vnmp);
723 	return (0);
724 done:
725 	VOP_UNLOCK(vp, 0, td);
726 	vn_finished_write(vnmp);
727 	return (error);
728 }
729 
730 /*
731  * Return the next vnode from the free list.
732  */
733 int
734 getnewvnode(tag, mp, vops, vpp)
735 	const char *tag;
736 	struct mount *mp;
737 	vop_t **vops;
738 	struct vnode **vpp;
739 {
740 	struct vnode *vp = NULL;
741 	struct vpollinfo *pollinfo = NULL;
742 
743 	mtx_lock(&vnode_free_list_mtx);
744 
745 	/*
746 	 * Try to reuse vnodes if we hit the max.  This situation only
747 	 * occurs in certain large-memory (2G+) situations.  We cannot
748 	 * attempt to directly reclaim vnodes due to nasty recursion
749 	 * problems.
750 	 */
751 	while (numvnodes - freevnodes > desiredvnodes) {
752 		if (vnlruproc_sig == 0) {
753 			vnlruproc_sig = 1;      /* avoid unnecessary wakeups */
754 			wakeup(vnlruproc);
755 		}
756 		mtx_unlock(&vnode_free_list_mtx);
757 		tsleep(&vnlruproc_sig, PVFS, "vlruwk", hz);
758 		mtx_lock(&vnode_free_list_mtx);
759 	}
760 
761 	/*
762 	 * Attempt to reuse a vnode already on the free list, allocating
763 	 * a new vnode if we can't find one or if we have not reached a
764 	 * good minimum for good LRU performance.
765 	 */
766 
767 	if (freevnodes >= wantfreevnodes && numvnodes >= minvnodes) {
768 		int error;
769 		int count;
770 
771 		for (count = 0; count < freevnodes; count++) {
772 			vp = TAILQ_FIRST(&vnode_free_list);
773 
774 			KASSERT(vp->v_usecount == 0 &&
775 			    (vp->v_iflag & VI_DOINGINACT) == 0,
776 			    ("getnewvnode: free vnode isn't"));
777 
778 			TAILQ_REMOVE(&vnode_free_list, vp, v_freelist);
779 			TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist);
780 			mtx_unlock(&vnode_free_list_mtx);
781 			error = vtryrecycle(vp);
782 			mtx_lock(&vnode_free_list_mtx);
783 			if (error == 0)
784 				break;
785 			vp = NULL;
786 		}
787 	}
788 	if (vp) {
789 		freevnodes--;
790 		mtx_unlock(&vnode_free_list_mtx);
791 
792 #ifdef INVARIANTS
793 		{
794 			if (vp->v_data)
795 				panic("cleaned vnode isn't");
796 			if (vp->v_numoutput)
797 				panic("Clean vnode has pending I/O's");
798 			if (vp->v_writecount != 0)
799 				panic("Non-zero write count");
800 		}
801 #endif
802 		if ((pollinfo = vp->v_pollinfo) != NULL) {
803 			/*
804 			 * To avoid lock order reversals, the call to
805 			 * uma_zfree() must be delayed until the vnode
806 			 * interlock is released.
807 			 */
808 			vp->v_pollinfo = NULL;
809 		}
810 #ifdef MAC
811 		mac_destroy_vnode(vp);
812 #endif
813 		vp->v_iflag = 0;
814 		vp->v_vflag = 0;
815 		vp->v_lastw = 0;
816 		vp->v_lasta = 0;
817 		vp->v_cstart = 0;
818 		vp->v_clen = 0;
819 		vp->v_socket = 0;
820 		lockdestroy(vp->v_vnlock);
821 		lockinit(vp->v_vnlock, PVFS, tag, VLKTIMEOUT, LK_NOPAUSE);
822 		KASSERT(vp->v_cleanbufcnt == 0, ("cleanbufcnt not 0"));
823 		KASSERT(vp->v_cleanblkroot == NULL, ("cleanblkroot not NULL"));
824 		KASSERT(vp->v_dirtybufcnt == 0, ("dirtybufcnt not 0"));
825 		KASSERT(vp->v_dirtyblkroot == NULL, ("dirtyblkroot not NULL"));
826 	} else {
827 		numvnodes++;
828 		mtx_unlock(&vnode_free_list_mtx);
829 
830 		vp = (struct vnode *) uma_zalloc(vnode_zone, M_WAITOK|M_ZERO);
831 		mtx_init(&vp->v_interlock, "vnode interlock", NULL, MTX_DEF);
832 		VI_LOCK(vp);
833 		vp->v_dd = vp;
834 		vp->v_vnlock = &vp->v_lock;
835 		lockinit(vp->v_vnlock, PVFS, tag, VLKTIMEOUT, LK_NOPAUSE);
836 		cache_purge(vp);		/* Sets up v_id. */
837 		LIST_INIT(&vp->v_cache_src);
838 		TAILQ_INIT(&vp->v_cache_dst);
839 	}
840 
841 	TAILQ_INIT(&vp->v_cleanblkhd);
842 	TAILQ_INIT(&vp->v_dirtyblkhd);
843 	vp->v_type = VNON;
844 	vp->v_tag = tag;
845 	vp->v_op = vops;
846 	*vpp = vp;
847 	vp->v_usecount = 1;
848 	vp->v_data = 0;
849 	vp->v_cachedid = -1;
850 	VI_UNLOCK(vp);
851 	if (pollinfo != NULL) {
852 		mtx_destroy(&pollinfo->vpi_lock);
853 		uma_zfree(vnodepoll_zone, pollinfo);
854 	}
855 #ifdef MAC
856 	mac_init_vnode(vp);
857 	if (mp != NULL && (mp->mnt_flag & MNT_MULTILABEL) == 0)
858 		mac_associate_vnode_singlelabel(mp, vp);
859 #endif
860 	delmntque(vp);
861 	if (mp != NULL) {
862 		insmntque(vp, mp);
863 		vp->v_bsize = mp->mnt_stat.f_iosize;
864 	}
865 
866 	return (0);
867 }
868 
869 /*
870  * Delete from old mount point vnode list, if on one.
871  */
872 static void
873 delmntque(struct vnode *vp)
874 {
875 	struct mount *mp;
876 
877 	if (vp->v_mount == NULL)
878 		return;
879 	mp = vp->v_mount;
880 	MNT_ILOCK(mp);
881 	vp->v_mount = NULL;
882 	KASSERT(mp->mnt_nvnodelistsize > 0,
883 		("bad mount point vnode list size"));
884 	TAILQ_REMOVE(&mp->mnt_nvnodelist, vp, v_nmntvnodes);
885 	mp->mnt_nvnodelistsize--;
886 	MNT_IUNLOCK(mp);
887 }
888 
889 /*
890  * Insert into list of vnodes for the new mount point, if available.
891  */
892 static void
893 insmntque(struct vnode *vp, struct mount *mp)
894 {
895 
896 	vp->v_mount = mp;
897 	KASSERT(mp != NULL, ("Don't call insmntque(foo, NULL)"));
898 	MNT_ILOCK(vp->v_mount);
899 	TAILQ_INSERT_TAIL(&mp->mnt_nvnodelist, vp, v_nmntvnodes);
900 	mp->mnt_nvnodelistsize++;
901 	MNT_IUNLOCK(vp->v_mount);
902 }
903 
904 /*
905  * Update outstanding I/O count and do wakeup if requested.
906  */
907 void
908 vwakeup(bp)
909 	register struct buf *bp;
910 {
911 	register struct vnode *vp;
912 
913 	bp->b_flags &= ~B_WRITEINPROG;
914 	if ((vp = bp->b_vp)) {
915 		VI_LOCK(vp);
916 		vp->v_numoutput--;
917 		if (vp->v_numoutput < 0)
918 			panic("vwakeup: neg numoutput");
919 		if ((vp->v_numoutput == 0) && (vp->v_iflag & VI_BWAIT)) {
920 			vp->v_iflag &= ~VI_BWAIT;
921 			wakeup(&vp->v_numoutput);
922 		}
923 		VI_UNLOCK(vp);
924 	}
925 }
926 
927 /*
928  * Flush out and invalidate all buffers associated with a vnode.
929  * Called with the underlying object locked.
930  */
931 int
932 vinvalbuf(vp, flags, cred, td, slpflag, slptimeo)
933 	struct vnode *vp;
934 	int flags;
935 	struct ucred *cred;
936 	struct thread *td;
937 	int slpflag, slptimeo;
938 {
939 	struct buf *blist;
940 	int error;
941 	vm_object_t object;
942 
943 	GIANT_REQUIRED;
944 
945 	ASSERT_VOP_LOCKED(vp, "vinvalbuf");
946 
947 	VI_LOCK(vp);
948 	if (flags & V_SAVE) {
949 		while (vp->v_numoutput) {
950 			vp->v_iflag |= VI_BWAIT;
951 			error = msleep(&vp->v_numoutput, VI_MTX(vp),
952 			    slpflag | (PRIBIO + 1), "vinvlbuf", slptimeo);
953 			if (error) {
954 				VI_UNLOCK(vp);
955 				return (error);
956 			}
957 		}
958 		if (!TAILQ_EMPTY(&vp->v_dirtyblkhd)) {
959 			VI_UNLOCK(vp);
960 			if ((error = VOP_FSYNC(vp, cred, MNT_WAIT, td)) != 0)
961 				return (error);
962 			/*
963 			 * XXX We could save a lock/unlock if this was only
964 			 * enabled under INVARIANTS
965 			 */
966 			VI_LOCK(vp);
967 			if (vp->v_numoutput > 0 ||
968 			    !TAILQ_EMPTY(&vp->v_dirtyblkhd))
969 				panic("vinvalbuf: dirty bufs");
970 		}
971 	}
972 	/*
973 	 * If you alter this loop please notice that interlock is dropped and
974 	 * reacquired in flushbuflist.  Special care is needed to ensure that
975 	 * no race conditions occur from this.
976 	 */
977 	for (error = 0;;) {
978 		if ((blist = TAILQ_FIRST(&vp->v_cleanblkhd)) != 0 &&
979 		    flushbuflist(blist, flags, vp, slpflag, slptimeo, &error)) {
980 			if (error)
981 				break;
982 			continue;
983 		}
984 		if ((blist = TAILQ_FIRST(&vp->v_dirtyblkhd)) != 0 &&
985 		    flushbuflist(blist, flags, vp, slpflag, slptimeo, &error)) {
986 			if (error)
987 				break;
988 			continue;
989 		}
990 		break;
991 	}
992 	if (error) {
993 		VI_UNLOCK(vp);
994 		return (error);
995 	}
996 
997 	/*
998 	 * Wait for I/O to complete.  XXX needs cleaning up.  The vnode can
999 	 * have write I/O in-progress but if there is a VM object then the
1000 	 * VM object can also have read-I/O in-progress.
1001 	 */
1002 	do {
1003 		while (vp->v_numoutput > 0) {
1004 			vp->v_iflag |= VI_BWAIT;
1005 			msleep(&vp->v_numoutput, VI_MTX(vp), PVM, "vnvlbv", 0);
1006 		}
1007 		VI_UNLOCK(vp);
1008 		if (VOP_GETVOBJECT(vp, &object) == 0) {
1009 			VM_OBJECT_LOCK(object);
1010 			vm_object_pip_wait(object, "vnvlbx");
1011 			VM_OBJECT_UNLOCK(object);
1012 		}
1013 		VI_LOCK(vp);
1014 	} while (vp->v_numoutput > 0);
1015 	VI_UNLOCK(vp);
1016 
1017 	/*
1018 	 * Destroy the copy in the VM cache, too.
1019 	 */
1020 	if (VOP_GETVOBJECT(vp, &object) == 0) {
1021 		VM_OBJECT_LOCK(object);
1022 		vm_object_page_remove(object, 0, 0,
1023 			(flags & V_SAVE) ? TRUE : FALSE);
1024 		VM_OBJECT_UNLOCK(object);
1025 	}
1026 
1027 #ifdef INVARIANTS
1028 	VI_LOCK(vp);
1029 	if ((flags & (V_ALT | V_NORMAL)) == 0 &&
1030 	    (!TAILQ_EMPTY(&vp->v_dirtyblkhd) ||
1031 	     !TAILQ_EMPTY(&vp->v_cleanblkhd)))
1032 		panic("vinvalbuf: flush failed");
1033 	VI_UNLOCK(vp);
1034 #endif
1035 	return (0);
1036 }
1037 
1038 /*
1039  * Flush out buffers on the specified list.
1040  *
1041  */
1042 static int
1043 flushbuflist(blist, flags, vp, slpflag, slptimeo, errorp)
1044 	struct buf *blist;
1045 	int flags;
1046 	struct vnode *vp;
1047 	int slpflag, slptimeo;
1048 	int *errorp;
1049 {
1050 	struct buf *bp, *nbp;
1051 	int found, error;
1052 
1053 	ASSERT_VI_LOCKED(vp, "flushbuflist");
1054 
1055 	for (found = 0, bp = blist; bp; bp = nbp) {
1056 		nbp = TAILQ_NEXT(bp, b_vnbufs);
1057 		if (((flags & V_NORMAL) && (bp->b_xflags & BX_ALTDATA)) ||
1058 		    ((flags & V_ALT) && (bp->b_xflags & BX_ALTDATA) == 0)) {
1059 			continue;
1060 		}
1061 		found += 1;
1062 		error = BUF_TIMELOCK(bp,
1063 		    LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK, VI_MTX(vp),
1064 		    "flushbuf", slpflag, slptimeo);
1065 		if (error) {
1066 			if (error != ENOLCK)
1067 				*errorp = error;
1068 			goto done;
1069 		}
1070 		/*
1071 		 * XXX Since there are no node locks for NFS, I
1072 		 * believe there is a slight chance that a delayed
1073 		 * write will occur while sleeping just above, so
1074 		 * check for it.  Note that vfs_bio_awrite expects
1075 		 * buffers to reside on a queue, while bwrite and
1076 		 * brelse do not.
1077 		 */
1078 		if (((bp->b_flags & (B_DELWRI | B_INVAL)) == B_DELWRI) &&
1079 			(flags & V_SAVE)) {
1080 
1081 			if (bp->b_vp == vp) {
1082 				if (bp->b_flags & B_CLUSTEROK) {
1083 					vfs_bio_awrite(bp);
1084 				} else {
1085 					bremfree(bp);
1086 					bp->b_flags |= B_ASYNC;
1087 					bwrite(bp);
1088 				}
1089 			} else {
1090 				bremfree(bp);
1091 				(void) bwrite(bp);
1092 			}
1093 			goto done;
1094 		}
1095 		bremfree(bp);
1096 		bp->b_flags |= (B_INVAL | B_NOCACHE | B_RELBUF);
1097 		bp->b_flags &= ~B_ASYNC;
1098 		brelse(bp);
1099 		VI_LOCK(vp);
1100 	}
1101 	return (found);
1102 done:
1103 	VI_LOCK(vp);
1104 	return (found);
1105 }
1106 
1107 /*
1108  * Truncate a file's buffer and pages to a specified length.  This
1109  * is in lieu of the old vinvalbuf mechanism, which performed unneeded
1110  * sync activity.
1111  */
1112 int
1113 vtruncbuf(vp, cred, td, length, blksize)
1114 	register struct vnode *vp;
1115 	struct ucred *cred;
1116 	struct thread *td;
1117 	off_t length;
1118 	int blksize;
1119 {
1120 	register struct buf *bp;
1121 	struct buf *nbp;
1122 	int anyfreed;
1123 	int trunclbn;
1124 
1125 	/*
1126 	 * Round up to the *next* lbn.
1127 	 */
1128 	trunclbn = (length + blksize - 1) / blksize;
1129 
1130 	ASSERT_VOP_LOCKED(vp, "vtruncbuf");
1131 restart:
1132 	VI_LOCK(vp);
1133 	anyfreed = 1;
1134 	for (;anyfreed;) {
1135 		anyfreed = 0;
1136 		for (bp = TAILQ_FIRST(&vp->v_cleanblkhd); bp; bp = nbp) {
1137 			nbp = TAILQ_NEXT(bp, b_vnbufs);
1138 			if (bp->b_lblkno >= trunclbn) {
1139 				if (BUF_LOCK(bp,
1140 				    LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK,
1141 				    VI_MTX(vp)) == ENOLCK)
1142 					goto restart;
1143 
1144 				bremfree(bp);
1145 				bp->b_flags |= (B_INVAL | B_RELBUF);
1146 				bp->b_flags &= ~B_ASYNC;
1147 				brelse(bp);
1148 				anyfreed = 1;
1149 
1150 				if (nbp &&
1151 				    (((nbp->b_xflags & BX_VNCLEAN) == 0) ||
1152 				    (nbp->b_vp != vp) ||
1153 				    (nbp->b_flags & B_DELWRI))) {
1154 					goto restart;
1155 				}
1156 				VI_LOCK(vp);
1157 			}
1158 		}
1159 
1160 		for (bp = TAILQ_FIRST(&vp->v_dirtyblkhd); bp; bp = nbp) {
1161 			nbp = TAILQ_NEXT(bp, b_vnbufs);
1162 			if (bp->b_lblkno >= trunclbn) {
1163 				if (BUF_LOCK(bp,
1164 				    LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK,
1165 				    VI_MTX(vp)) == ENOLCK)
1166 					goto restart;
1167 				bremfree(bp);
1168 				bp->b_flags |= (B_INVAL | B_RELBUF);
1169 				bp->b_flags &= ~B_ASYNC;
1170 				brelse(bp);
1171 				anyfreed = 1;
1172 				if (nbp &&
1173 				    (((nbp->b_xflags & BX_VNDIRTY) == 0) ||
1174 				    (nbp->b_vp != vp) ||
1175 				    (nbp->b_flags & B_DELWRI) == 0)) {
1176 					goto restart;
1177 				}
1178 				VI_LOCK(vp);
1179 			}
1180 		}
1181 	}
1182 
1183 	if (length > 0) {
1184 restartsync:
1185 		for (bp = TAILQ_FIRST(&vp->v_dirtyblkhd); bp; bp = nbp) {
1186 			nbp = TAILQ_NEXT(bp, b_vnbufs);
1187 			if (bp->b_lblkno > 0)
1188 				continue;
1189 			/*
1190 			 * Since we hold the vnode lock this should only
1191 			 * fail if we're racing with the buf daemon.
1192 			 */
1193 			if (BUF_LOCK(bp,
1194 			    LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK,
1195 			    VI_MTX(vp)) == ENOLCK) {
1196 				goto restart;
1197 			}
1198 			KASSERT((bp->b_flags & B_DELWRI),
1199 			    ("buf(%p) on dirty queue without DELWRI", bp));
1200 
1201 			bremfree(bp);
1202 			bawrite(bp);
1203 			VI_LOCK(vp);
1204 			goto restartsync;
1205 		}
1206 	}
1207 
1208 	while (vp->v_numoutput > 0) {
1209 		vp->v_iflag |= VI_BWAIT;
1210 		msleep(&vp->v_numoutput, VI_MTX(vp), PVM, "vbtrunc", 0);
1211 	}
1212 	VI_UNLOCK(vp);
1213 	vnode_pager_setsize(vp, length);
1214 
1215 	return (0);
1216 }
1217 
1218 /*
1219  * buf_splay() - splay tree core for the clean/dirty list of buffers in
1220  * 		 a vnode.
1221  *
1222  *	NOTE: We have to deal with the special case of a background bitmap
1223  *	buffer, a situation where two buffers will have the same logical
1224  *	block offset.  We want (1) only the foreground buffer to be accessed
1225  *	in a lookup and (2) must differentiate between the foreground and
1226  *	background buffer in the splay tree algorithm because the splay
1227  *	tree cannot normally handle multiple entities with the same 'index'.
1228  *	We accomplish this by adding differentiating flags to the splay tree's
1229  *	numerical domain.
1230  */
1231 static
1232 struct buf *
1233 buf_splay(daddr_t lblkno, b_xflags_t xflags, struct buf *root)
1234 {
1235 	struct buf dummy;
1236 	struct buf *lefttreemax, *righttreemin, *y;
1237 
1238 	if (root == NULL)
1239 		return (NULL);
1240 	lefttreemax = righttreemin = &dummy;
1241 	for (;;) {
1242 		if (lblkno < root->b_lblkno ||
1243 		    (lblkno == root->b_lblkno &&
1244 		    (xflags & BX_BKGRDMARKER) < (root->b_xflags & BX_BKGRDMARKER))) {
1245 			if ((y = root->b_left) == NULL)
1246 				break;
1247 			if (lblkno < y->b_lblkno) {
1248 				/* Rotate right. */
1249 				root->b_left = y->b_right;
1250 				y->b_right = root;
1251 				root = y;
1252 				if ((y = root->b_left) == NULL)
1253 					break;
1254 			}
1255 			/* Link into the new root's right tree. */
1256 			righttreemin->b_left = root;
1257 			righttreemin = root;
1258 		} else if (lblkno > root->b_lblkno ||
1259 		    (lblkno == root->b_lblkno &&
1260 		    (xflags & BX_BKGRDMARKER) > (root->b_xflags & BX_BKGRDMARKER))) {
1261 			if ((y = root->b_right) == NULL)
1262 				break;
1263 			if (lblkno > y->b_lblkno) {
1264 				/* Rotate left. */
1265 				root->b_right = y->b_left;
1266 				y->b_left = root;
1267 				root = y;
1268 				if ((y = root->b_right) == NULL)
1269 					break;
1270 			}
1271 			/* Link into the new root's left tree. */
1272 			lefttreemax->b_right = root;
1273 			lefttreemax = root;
1274 		} else {
1275 			break;
1276 		}
1277 		root = y;
1278 	}
1279 	/* Assemble the new root. */
1280 	lefttreemax->b_right = root->b_left;
1281 	righttreemin->b_left = root->b_right;
1282 	root->b_left = dummy.b_right;
1283 	root->b_right = dummy.b_left;
1284 	return (root);
1285 }
1286 
1287 static
1288 void
1289 buf_vlist_remove(struct buf *bp)
1290 {
1291 	struct vnode *vp = bp->b_vp;
1292 	struct buf *root;
1293 
1294 	ASSERT_VI_LOCKED(vp, "buf_vlist_remove");
1295 	if (bp->b_xflags & BX_VNDIRTY) {
1296 		if (bp != vp->v_dirtyblkroot) {
1297 			root = buf_splay(bp->b_lblkno, bp->b_xflags,
1298 			    vp->v_dirtyblkroot);
1299 			KASSERT(root == bp,
1300 			    ("splay lookup failed during dirty remove"));
1301 		}
1302 		if (bp->b_left == NULL) {
1303 			root = bp->b_right;
1304 		} else {
1305 			root = buf_splay(bp->b_lblkno, bp->b_xflags,
1306 			    bp->b_left);
1307 			root->b_right = bp->b_right;
1308 		}
1309 		vp->v_dirtyblkroot = root;
1310 		TAILQ_REMOVE(&vp->v_dirtyblkhd, bp, b_vnbufs);
1311 		vp->v_dirtybufcnt--;
1312 	} else {
1313 		/* KASSERT(bp->b_xflags & BX_VNCLEAN, ("bp wasn't clean")); */
1314 		if (bp != vp->v_cleanblkroot) {
1315 			root = buf_splay(bp->b_lblkno, bp->b_xflags,
1316 			    vp->v_cleanblkroot);
1317 			KASSERT(root == bp,
1318 			    ("splay lookup failed during clean remove"));
1319 		}
1320 		if (bp->b_left == NULL) {
1321 			root = bp->b_right;
1322 		} else {
1323 			root = buf_splay(bp->b_lblkno, bp->b_xflags,
1324 			    bp->b_left);
1325 			root->b_right = bp->b_right;
1326 		}
1327 		vp->v_cleanblkroot = root;
1328 		TAILQ_REMOVE(&vp->v_cleanblkhd, bp, b_vnbufs);
1329 		vp->v_cleanbufcnt--;
1330 	}
1331 	bp->b_xflags &= ~(BX_VNDIRTY | BX_VNCLEAN);
1332 }
1333 
1334 /*
1335  * Add the buffer to the sorted clean or dirty block list using a
1336  * splay tree algorithm.
1337  *
1338  * NOTE: xflags is passed as a constant, optimizing this inline function!
1339  */
1340 static
1341 void
1342 buf_vlist_add(struct buf *bp, struct vnode *vp, b_xflags_t xflags)
1343 {
1344 	struct buf *root;
1345 
1346 	ASSERT_VI_LOCKED(vp, "buf_vlist_add");
1347 	bp->b_xflags |= xflags;
1348 	if (xflags & BX_VNDIRTY) {
1349 		root = buf_splay(bp->b_lblkno, bp->b_xflags, vp->v_dirtyblkroot);
1350 		if (root == NULL) {
1351 			bp->b_left = NULL;
1352 			bp->b_right = NULL;
1353 			TAILQ_INSERT_TAIL(&vp->v_dirtyblkhd, bp, b_vnbufs);
1354 		} else if (bp->b_lblkno < root->b_lblkno ||
1355 		    (bp->b_lblkno == root->b_lblkno &&
1356 		    (bp->b_xflags & BX_BKGRDMARKER) < (root->b_xflags & BX_BKGRDMARKER))) {
1357 			bp->b_left = root->b_left;
1358 			bp->b_right = root;
1359 			root->b_left = NULL;
1360 			TAILQ_INSERT_BEFORE(root, bp, b_vnbufs);
1361 		} else {
1362 			bp->b_right = root->b_right;
1363 			bp->b_left = root;
1364 			root->b_right = NULL;
1365 			TAILQ_INSERT_AFTER(&vp->v_dirtyblkhd,
1366 			    root, bp, b_vnbufs);
1367 		}
1368 		vp->v_dirtybufcnt++;
1369 		vp->v_dirtyblkroot = bp;
1370 	} else {
1371 		/* KASSERT(xflags & BX_VNCLEAN, ("xflags not clean")); */
1372 		root = buf_splay(bp->b_lblkno, bp->b_xflags, vp->v_cleanblkroot);
1373 		if (root == NULL) {
1374 			bp->b_left = NULL;
1375 			bp->b_right = NULL;
1376 			TAILQ_INSERT_TAIL(&vp->v_cleanblkhd, bp, b_vnbufs);
1377 		} else if (bp->b_lblkno < root->b_lblkno ||
1378 		    (bp->b_lblkno == root->b_lblkno &&
1379 		    (bp->b_xflags & BX_BKGRDMARKER) < (root->b_xflags & BX_BKGRDMARKER))) {
1380 			bp->b_left = root->b_left;
1381 			bp->b_right = root;
1382 			root->b_left = NULL;
1383 			TAILQ_INSERT_BEFORE(root, bp, b_vnbufs);
1384 		} else {
1385 			bp->b_right = root->b_right;
1386 			bp->b_left = root;
1387 			root->b_right = NULL;
1388 			TAILQ_INSERT_AFTER(&vp->v_cleanblkhd,
1389 			    root, bp, b_vnbufs);
1390 		}
1391 		vp->v_cleanbufcnt++;
1392 		vp->v_cleanblkroot = bp;
1393 	}
1394 }
1395 
1396 /*
1397  * Lookup a buffer using the splay tree.  Note that we specifically avoid
1398  * shadow buffers used in background bitmap writes.
1399  *
1400  * This code isn't quite efficient as it could be because we are maintaining
1401  * two sorted lists and do not know which list the block resides in.
1402  *
1403  * During a "make buildworld" the desired buffer is found at one of
1404  * the roots more than 60% of the time.  Thus, checking both roots
1405  * before performing either splay eliminates unnecessary splays on the
1406  * first tree splayed.
1407  */
1408 struct buf *
1409 gbincore(struct vnode *vp, daddr_t lblkno)
1410 {
1411 	struct buf *bp;
1412 
1413 	GIANT_REQUIRED;
1414 
1415 	ASSERT_VI_LOCKED(vp, "gbincore");
1416 	if ((bp = vp->v_cleanblkroot) != NULL &&
1417 	    bp->b_lblkno == lblkno && !(bp->b_xflags & BX_BKGRDMARKER))
1418 		return (bp);
1419 	if ((bp = vp->v_dirtyblkroot) != NULL &&
1420 	    bp->b_lblkno == lblkno && !(bp->b_xflags & BX_BKGRDMARKER))
1421 		return (bp);
1422 	if ((bp = vp->v_cleanblkroot) != NULL) {
1423 		vp->v_cleanblkroot = bp = buf_splay(lblkno, 0, bp);
1424 		if (bp->b_lblkno == lblkno && !(bp->b_xflags & BX_BKGRDMARKER))
1425 			return (bp);
1426 	}
1427 	if ((bp = vp->v_dirtyblkroot) != NULL) {
1428 		vp->v_dirtyblkroot = bp = buf_splay(lblkno, 0, bp);
1429 		if (bp->b_lblkno == lblkno && !(bp->b_xflags & BX_BKGRDMARKER))
1430 			return (bp);
1431 	}
1432 	return (NULL);
1433 }
1434 
1435 /*
1436  * Associate a buffer with a vnode.
1437  */
1438 void
1439 bgetvp(vp, bp)
1440 	register struct vnode *vp;
1441 	register struct buf *bp;
1442 {
1443 
1444 	KASSERT(bp->b_vp == NULL, ("bgetvp: not free"));
1445 
1446 	KASSERT((bp->b_xflags & (BX_VNDIRTY|BX_VNCLEAN)) == 0,
1447 	    ("bgetvp: bp already attached! %p", bp));
1448 
1449 	ASSERT_VI_LOCKED(vp, "bgetvp");
1450 	vholdl(vp);
1451 	bp->b_vp = vp;
1452 	bp->b_dev = vn_todev(vp);
1453 	/*
1454 	 * Insert onto list for new vnode.
1455 	 */
1456 	buf_vlist_add(bp, vp, BX_VNCLEAN);
1457 }
1458 
1459 /*
1460  * Disassociate a buffer from a vnode.
1461  */
1462 void
1463 brelvp(bp)
1464 	register struct buf *bp;
1465 {
1466 	struct vnode *vp;
1467 
1468 	KASSERT(bp->b_vp != NULL, ("brelvp: NULL"));
1469 
1470 	/*
1471 	 * Delete from old vnode list, if on one.
1472 	 */
1473 	vp = bp->b_vp;
1474 	VI_LOCK(vp);
1475 	if (bp->b_xflags & (BX_VNDIRTY | BX_VNCLEAN))
1476 		buf_vlist_remove(bp);
1477 	if ((vp->v_iflag & VI_ONWORKLST) && TAILQ_EMPTY(&vp->v_dirtyblkhd)) {
1478 		vp->v_iflag &= ~VI_ONWORKLST;
1479 		mtx_lock(&sync_mtx);
1480 		LIST_REMOVE(vp, v_synclist);
1481  		syncer_worklist_len--;
1482 		mtx_unlock(&sync_mtx);
1483 	}
1484 	vdropl(vp);
1485 	bp->b_vp = (struct vnode *) 0;
1486 	if (bp->b_object)
1487 		bp->b_object = NULL;
1488 	VI_UNLOCK(vp);
1489 }
1490 
1491 /*
1492  * Add an item to the syncer work queue.
1493  */
1494 static void
1495 vn_syncer_add_to_worklist(struct vnode *vp, int delay)
1496 {
1497 	int slot;
1498 
1499 	ASSERT_VI_LOCKED(vp, "vn_syncer_add_to_worklist");
1500 
1501 	mtx_lock(&sync_mtx);
1502 	if (vp->v_iflag & VI_ONWORKLST)
1503 		LIST_REMOVE(vp, v_synclist);
1504 	else {
1505 		vp->v_iflag |= VI_ONWORKLST;
1506  		syncer_worklist_len++;
1507 	}
1508 
1509 	if (delay > syncer_maxdelay - 2)
1510 		delay = syncer_maxdelay - 2;
1511 	slot = (syncer_delayno + delay) & syncer_mask;
1512 
1513 	LIST_INSERT_HEAD(&syncer_workitem_pending[slot], vp, v_synclist);
1514 	mtx_unlock(&sync_mtx);
1515 }
1516 
1517 static int
1518 sysctl_vfs_worklist_len(SYSCTL_HANDLER_ARGS)
1519 {
1520 	int error, len;
1521 
1522 	mtx_lock(&sync_mtx);
1523 	len = syncer_worklist_len - sync_vnode_count;
1524 	mtx_unlock(&sync_mtx);
1525 	error = SYSCTL_OUT(req, &len, sizeof(len));
1526 	return (error);
1527 }
1528 
1529 SYSCTL_PROC(_vfs, OID_AUTO, worklist_len, CTLTYPE_INT | CTLFLAG_RD, NULL, 0,
1530     sysctl_vfs_worklist_len, "I", "Syncer thread worklist length");
1531 
1532 struct  proc *updateproc;
1533 static void sched_sync(void);
1534 static struct kproc_desc up_kp = {
1535 	"syncer",
1536 	sched_sync,
1537 	&updateproc
1538 };
1539 SYSINIT(syncer, SI_SUB_KTHREAD_UPDATE, SI_ORDER_FIRST, kproc_start, &up_kp)
1540 
1541 /*
1542  * System filesystem synchronizer daemon.
1543  */
1544 static void
1545 sched_sync(void)
1546 {
1547 	struct synclist *next;
1548 	struct synclist *slp;
1549 	struct vnode *vp;
1550 	struct mount *mp;
1551 	long starttime;
1552 	struct thread *td = FIRST_THREAD_IN_PROC(updateproc);
1553 	static int dummychan;
1554 	int last_work_seen;
1555 	int net_worklist_len;
1556 	int syncer_final_iter;
1557 	int first_printf;
1558 
1559 	mtx_lock(&Giant);
1560 	last_work_seen = 0;
1561 	syncer_final_iter = 0;
1562 	first_printf = 1;
1563 	syncer_state = SYNCER_RUNNING;
1564 	starttime = time_second;
1565 
1566 	EVENTHANDLER_REGISTER(shutdown_pre_sync, syncer_shutdown, td->td_proc,
1567 	    SHUTDOWN_PRI_LAST);
1568 
1569 	for (;;) {
1570 		mtx_lock(&sync_mtx);
1571 		if (syncer_state == SYNCER_FINAL_DELAY &&
1572 		    syncer_final_iter == 0) {
1573 			mtx_unlock(&sync_mtx);
1574 			kthread_suspend_check(td->td_proc);
1575 			mtx_lock(&sync_mtx);
1576 		}
1577 		net_worklist_len = syncer_worklist_len - sync_vnode_count;
1578 		if (syncer_state != SYNCER_RUNNING &&
1579 		    starttime != time_second) {
1580 			if (first_printf) {
1581 				printf("\nSyncer syncing, vnodes remaining...");
1582 				first_printf = 0;
1583 			}
1584 			printf("%d ", net_worklist_len);
1585 		}
1586 		starttime = time_second;
1587 
1588 		/*
1589 		 * Push files whose dirty time has expired.  Be careful
1590 		 * of interrupt race on slp queue.
1591 		 *
1592 		 * Skip over empty worklist slots when shutting down.
1593 		 */
1594 		do {
1595 			slp = &syncer_workitem_pending[syncer_delayno];
1596 			syncer_delayno += 1;
1597 			if (syncer_delayno == syncer_maxdelay)
1598 				syncer_delayno = 0;
1599 			next = &syncer_workitem_pending[syncer_delayno];
1600 			/*
1601 			 * If the worklist has wrapped since the
1602 			 * it was emptied of all but syncer vnodes,
1603 			 * switch to the FINAL_DELAY state and run
1604 			 * for one more second.
1605 			 */
1606 			if (syncer_state == SYNCER_SHUTTING_DOWN &&
1607 			    net_worklist_len == 0 &&
1608 			    last_work_seen == syncer_delayno) {
1609 				syncer_state = SYNCER_FINAL_DELAY;
1610 				syncer_final_iter = SYNCER_SHUTDOWN_SPEEDUP;
1611 			}
1612 		} while (syncer_state != SYNCER_RUNNING && LIST_EMPTY(slp) &&
1613 		    syncer_worklist_len > 0);
1614 
1615 		/*
1616 		 * Keep track of the last time there was anything
1617 		 * on the worklist other than syncer vnodes.
1618 		 * Return to the SHUTTING_DOWN state if any
1619 		 * new work appears.
1620 		 */
1621 		if (net_worklist_len > 0 || syncer_state == SYNCER_RUNNING)
1622 			last_work_seen = syncer_delayno;
1623 		if (net_worklist_len > 0 && syncer_state == SYNCER_FINAL_DELAY)
1624 			syncer_state = SYNCER_SHUTTING_DOWN;
1625 		while ((vp = LIST_FIRST(slp)) != NULL) {
1626 			if (VOP_ISLOCKED(vp, NULL) != 0 ||
1627 			    vn_start_write(vp, &mp, V_NOWAIT) != 0) {
1628 				LIST_REMOVE(vp, v_synclist);
1629 				LIST_INSERT_HEAD(next, vp, v_synclist);
1630 				continue;
1631 			}
1632 			if (VI_TRYLOCK(vp) == 0) {
1633 				LIST_REMOVE(vp, v_synclist);
1634 				LIST_INSERT_HEAD(next, vp, v_synclist);
1635 				vn_finished_write(mp);
1636 				continue;
1637 			}
1638 			/*
1639 			 * We use vhold in case the vnode does not
1640 			 * successfully sync.  vhold prevents the vnode from
1641 			 * going away when we unlock the sync_mtx so that
1642 			 * we can acquire the vnode interlock.
1643 			 */
1644 			vholdl(vp);
1645 			mtx_unlock(&sync_mtx);
1646 			vn_lock(vp, LK_EXCLUSIVE | LK_RETRY | LK_INTERLOCK, td);
1647 			(void) VOP_FSYNC(vp, td->td_ucred, MNT_LAZY, td);
1648 			VOP_UNLOCK(vp, 0, td);
1649 			vn_finished_write(mp);
1650 			VI_LOCK(vp);
1651 			if ((vp->v_iflag & VI_ONWORKLST) != 0) {
1652 				/*
1653 				 * Put us back on the worklist.  The worklist
1654 				 * routine will remove us from our current
1655 				 * position and then add us back in at a later
1656 				 * position.
1657 				 */
1658 				vn_syncer_add_to_worklist(vp, syncdelay);
1659 			}
1660 			vdropl(vp);
1661 			VI_UNLOCK(vp);
1662 			mtx_lock(&sync_mtx);
1663 		}
1664 		if (syncer_state == SYNCER_FINAL_DELAY && syncer_final_iter > 0)
1665 			syncer_final_iter--;
1666 		mtx_unlock(&sync_mtx);
1667 
1668 		/*
1669 		 * Do soft update processing.
1670 		 */
1671 		if (softdep_process_worklist_hook != NULL)
1672 			(*softdep_process_worklist_hook)(NULL);
1673 
1674 		/*
1675 		 * The variable rushjob allows the kernel to speed up the
1676 		 * processing of the filesystem syncer process. A rushjob
1677 		 * value of N tells the filesystem syncer to process the next
1678 		 * N seconds worth of work on its queue ASAP. Currently rushjob
1679 		 * is used by the soft update code to speed up the filesystem
1680 		 * syncer process when the incore state is getting so far
1681 		 * ahead of the disk that the kernel memory pool is being
1682 		 * threatened with exhaustion.
1683 		 */
1684 		mtx_lock(&sync_mtx);
1685 		if (rushjob > 0) {
1686 			rushjob -= 1;
1687 			mtx_unlock(&sync_mtx);
1688 			continue;
1689 		}
1690 		mtx_unlock(&sync_mtx);
1691 		/*
1692 		 * Just sleep for a short period if time between
1693 		 * iterations when shutting down to allow some I/O
1694 		 * to happen.
1695 		 *
1696 		 * If it has taken us less than a second to process the
1697 		 * current work, then wait. Otherwise start right over
1698 		 * again. We can still lose time if any single round
1699 		 * takes more than two seconds, but it does not really
1700 		 * matter as we are just trying to generally pace the
1701 		 * filesystem activity.
1702 		 */
1703 		if (syncer_state != SYNCER_RUNNING)
1704 			tsleep(&dummychan, PPAUSE, "syncfnl",
1705 			    hz / SYNCER_SHUTDOWN_SPEEDUP);
1706 		else if (time_second == starttime)
1707 			tsleep(&lbolt, PPAUSE, "syncer", 0);
1708 	}
1709 }
1710 
1711 /*
1712  * Request the syncer daemon to speed up its work.
1713  * We never push it to speed up more than half of its
1714  * normal turn time, otherwise it could take over the cpu.
1715  */
1716 int
1717 speedup_syncer()
1718 {
1719 	struct thread *td;
1720 	int ret = 0;
1721 
1722 	td = FIRST_THREAD_IN_PROC(updateproc);
1723 	sleepq_remove(td, &lbolt);
1724 	mtx_lock(&sync_mtx);
1725 	if (rushjob < syncdelay / 2) {
1726 		rushjob += 1;
1727 		stat_rush_requests += 1;
1728 		ret = 1;
1729 	}
1730 	mtx_unlock(&sync_mtx);
1731 	return (ret);
1732 }
1733 
1734 /*
1735  * Tell the syncer to speed up its work and run though its work
1736  * list several times, then tell it to shut down.
1737  */
1738 static void
1739 syncer_shutdown(void *arg, int howto)
1740 {
1741 	struct thread *td;
1742 
1743 	td = FIRST_THREAD_IN_PROC(updateproc);
1744 	sleepq_remove(td, &lbolt);
1745 	mtx_lock(&sync_mtx);
1746 	syncer_state = SYNCER_SHUTTING_DOWN;
1747 	rushjob = 0;
1748 	mtx_unlock(&sync_mtx);
1749 	kproc_shutdown(arg, howto);
1750 }
1751 
1752 /*
1753  * Associate a p-buffer with a vnode.
1754  *
1755  * Also sets B_PAGING flag to indicate that vnode is not fully associated
1756  * with the buffer.  i.e. the bp has not been linked into the vnode or
1757  * ref-counted.
1758  */
1759 void
1760 pbgetvp(vp, bp)
1761 	register struct vnode *vp;
1762 	register struct buf *bp;
1763 {
1764 
1765 	KASSERT(bp->b_vp == NULL, ("pbgetvp: not free"));
1766 
1767 	bp->b_vp = vp;
1768 	bp->b_object = vp->v_object;
1769 	bp->b_flags |= B_PAGING;
1770 	bp->b_dev = vn_todev(vp);
1771 }
1772 
1773 /*
1774  * Disassociate a p-buffer from a vnode.
1775  */
1776 void
1777 pbrelvp(bp)
1778 	register struct buf *bp;
1779 {
1780 
1781 	KASSERT(bp->b_vp != NULL, ("pbrelvp: NULL"));
1782 
1783 	/* XXX REMOVE ME */
1784 	VI_LOCK(bp->b_vp);
1785 	if (TAILQ_NEXT(bp, b_vnbufs) != NULL) {
1786 		panic(
1787 		    "relpbuf(): b_vp was probably reassignbuf()d %p %x",
1788 		    bp,
1789 		    (int)bp->b_flags
1790 		);
1791 	}
1792 	VI_UNLOCK(bp->b_vp);
1793 	bp->b_vp = (struct vnode *) 0;
1794 	bp->b_object = NULL;
1795 	bp->b_flags &= ~B_PAGING;
1796 }
1797 
1798 /*
1799  * Reassign a buffer from one vnode to another.
1800  * Used to assign file specific control information
1801  * (indirect blocks) to the vnode to which they belong.
1802  */
1803 void
1804 reassignbuf(struct buf *bp)
1805 {
1806 	struct vnode *vp;
1807 	int delay;
1808 
1809 	vp = bp->b_vp;
1810 	++reassignbufcalls;
1811 
1812 	/*
1813 	 * B_PAGING flagged buffers cannot be reassigned because their vp
1814 	 * is not fully linked in.
1815 	 */
1816 	if (bp->b_flags & B_PAGING)
1817 		panic("cannot reassign paging buffer");
1818 
1819 	/*
1820 	 * Delete from old vnode list, if on one.
1821 	 */
1822 	VI_LOCK(vp);
1823 	if (bp->b_xflags & (BX_VNDIRTY | BX_VNCLEAN))
1824 		buf_vlist_remove(bp);
1825 	/*
1826 	 * If dirty, put on list of dirty buffers; otherwise insert onto list
1827 	 * of clean buffers.
1828 	 */
1829 	if (bp->b_flags & B_DELWRI) {
1830 		if ((vp->v_iflag & VI_ONWORKLST) == 0) {
1831 			switch (vp->v_type) {
1832 			case VDIR:
1833 				delay = dirdelay;
1834 				break;
1835 			case VCHR:
1836 				delay = metadelay;
1837 				break;
1838 			default:
1839 				delay = filedelay;
1840 			}
1841 			vn_syncer_add_to_worklist(vp, delay);
1842 		}
1843 		buf_vlist_add(bp, vp, BX_VNDIRTY);
1844 	} else {
1845 		buf_vlist_add(bp, vp, BX_VNCLEAN);
1846 
1847 		if ((vp->v_iflag & VI_ONWORKLST) &&
1848 		    TAILQ_EMPTY(&vp->v_dirtyblkhd)) {
1849 			mtx_lock(&sync_mtx);
1850 			LIST_REMOVE(vp, v_synclist);
1851  			syncer_worklist_len--;
1852 			mtx_unlock(&sync_mtx);
1853 			vp->v_iflag &= ~VI_ONWORKLST;
1854 		}
1855 	}
1856 	VI_UNLOCK(vp);
1857 }
1858 
1859 /*
1860  * Create a vnode for a device.
1861  * Used for mounting the root filesystem.
1862  */
1863 int
1864 bdevvp(dev, vpp)
1865 	struct cdev *dev;
1866 	struct vnode **vpp;
1867 {
1868 	register struct vnode *vp;
1869 	struct vnode *nvp;
1870 	int error;
1871 
1872 	if (dev == NULL) {
1873 		*vpp = NULLVP;
1874 		return (ENXIO);
1875 	}
1876 	if (vfinddev(dev, vpp))
1877 		return (0);
1878 
1879 	error = getnewvnode("none", (struct mount *)0, spec_vnodeop_p, &nvp);
1880 	if (error) {
1881 		*vpp = NULLVP;
1882 		return (error);
1883 	}
1884 	vp = nvp;
1885 	vp->v_type = VCHR;
1886 	vp->v_bsize = DEV_BSIZE;
1887 	addalias(vp, dev);
1888 	*vpp = vp;
1889 	return (0);
1890 }
1891 
1892 static void
1893 v_incr_usecount(struct vnode *vp, int delta)
1894 {
1895 
1896 	vp->v_usecount += delta;
1897 	if (vp->v_type == VCHR && vp->v_rdev != NULL) {
1898 		mtx_lock(&spechash_mtx);
1899 		vp->v_rdev->si_usecount += delta;
1900 		mtx_unlock(&spechash_mtx);
1901 	}
1902 }
1903 
1904 /*
1905  * Add vnode to the alias list hung off the struct cdev *.
1906  *
1907  * The reason for this gunk is that multiple vnodes can reference
1908  * the same physical device, so checking vp->v_usecount to see
1909  * how many users there are is inadequate; the v_usecount for
1910  * the vnodes need to be accumulated.  vcount() does that.
1911  */
1912 struct vnode *
1913 addaliasu(nvp, nvp_rdev)
1914 	struct vnode *nvp;
1915 	dev_t nvp_rdev;
1916 {
1917 	struct vnode *ovp;
1918 	vop_t **ops;
1919 	struct cdev *dev;
1920 
1921 	if (nvp->v_type == VBLK)
1922 		return (nvp);
1923 	if (nvp->v_type != VCHR)
1924 		panic("addaliasu on non-special vnode");
1925 	dev = findcdev(nvp_rdev);
1926 	if (dev == NULL)
1927 		return (nvp);
1928 	/*
1929 	 * Check to see if we have a bdevvp vnode with no associated
1930 	 * filesystem. If so, we want to associate the filesystem of
1931 	 * the new newly instigated vnode with the bdevvp vnode and
1932 	 * discard the newly created vnode rather than leaving the
1933 	 * bdevvp vnode lying around with no associated filesystem.
1934 	 */
1935 	if (vfinddev(dev, &ovp) == 0 || ovp->v_data != NULL) {
1936 		addalias(nvp, dev);
1937 		return (nvp);
1938 	}
1939 	/*
1940 	 * Discard unneeded vnode, but save its node specific data.
1941 	 * Note that if there is a lock, it is carried over in the
1942 	 * node specific data to the replacement vnode.
1943 	 */
1944 	vref(ovp);
1945 	ovp->v_data = nvp->v_data;
1946 	ovp->v_tag = nvp->v_tag;
1947 	nvp->v_data = NULL;
1948 	lockdestroy(ovp->v_vnlock);
1949 	lockinit(ovp->v_vnlock, PVFS, nvp->v_vnlock->lk_wmesg,
1950 	    nvp->v_vnlock->lk_timo, nvp->v_vnlock->lk_flags & LK_EXTFLG_MASK);
1951 	ops = ovp->v_op;
1952 	ovp->v_op = nvp->v_op;
1953 	if (VOP_ISLOCKED(nvp, curthread)) {
1954 		VOP_UNLOCK(nvp, 0, curthread);
1955 		vn_lock(ovp, LK_EXCLUSIVE | LK_RETRY, curthread);
1956 	}
1957 	nvp->v_op = ops;
1958 	delmntque(ovp);
1959 	insmntque(ovp, nvp->v_mount);
1960 	vrele(nvp);
1961 	vgone(nvp);
1962 	return (ovp);
1963 }
1964 
1965 /* This is a local helper function that do the same as addaliasu, but for a
1966  * struct cdev *instead of an dev_t. */
1967 static void
1968 addalias(nvp, dev)
1969 	struct vnode *nvp;
1970 	struct cdev *dev;
1971 {
1972 
1973 	KASSERT(nvp->v_type == VCHR, ("addalias on non-special vnode"));
1974 	dev_ref(dev);
1975 	nvp->v_rdev = dev;
1976 	VI_LOCK(nvp);
1977 	mtx_lock(&spechash_mtx);
1978 	SLIST_INSERT_HEAD(&dev->si_hlist, nvp, v_specnext);
1979 	dev->si_usecount += nvp->v_usecount;
1980 	mtx_unlock(&spechash_mtx);
1981 	VI_UNLOCK(nvp);
1982 }
1983 
1984 /*
1985  * Grab a particular vnode from the free list, increment its
1986  * reference count and lock it. The vnode lock bit is set if the
1987  * vnode is being eliminated in vgone. The process is awakened
1988  * when the transition is completed, and an error returned to
1989  * indicate that the vnode is no longer usable (possibly having
1990  * been changed to a new filesystem type).
1991  */
1992 int
1993 vget(vp, flags, td)
1994 	register struct vnode *vp;
1995 	int flags;
1996 	struct thread *td;
1997 {
1998 	int error;
1999 
2000 	/*
2001 	 * If the vnode is in the process of being cleaned out for
2002 	 * another use, we wait for the cleaning to finish and then
2003 	 * return failure. Cleaning is determined by checking that
2004 	 * the VI_XLOCK flag is set.
2005 	 */
2006 	if ((flags & LK_INTERLOCK) == 0)
2007 		VI_LOCK(vp);
2008 	if (vp->v_iflag & VI_XLOCK && vp->v_vxthread != curthread) {
2009 		if ((flags & LK_NOWAIT) == 0) {
2010 			vp->v_iflag |= VI_XWANT;
2011 			msleep(vp, VI_MTX(vp), PINOD | PDROP, "vget", 0);
2012 			return (ENOENT);
2013 		}
2014 		VI_UNLOCK(vp);
2015 		return (EBUSY);
2016 	}
2017 
2018 	v_incr_usecount(vp, 1);
2019 
2020 	if (VSHOULDBUSY(vp))
2021 		vbusy(vp);
2022 	if (flags & LK_TYPE_MASK) {
2023 		if ((error = vn_lock(vp, flags | LK_INTERLOCK, td)) != 0) {
2024 			/*
2025 			 * must expand vrele here because we do not want
2026 			 * to call VOP_INACTIVE if the reference count
2027 			 * drops back to zero since it was never really
2028 			 * active. We must remove it from the free list
2029 			 * before sleeping so that multiple processes do
2030 			 * not try to recycle it.
2031 			 */
2032 			VI_LOCK(vp);
2033 			v_incr_usecount(vp, -1);
2034 			if (VSHOULDFREE(vp))
2035 				vfree(vp);
2036 			else
2037 				vlruvp(vp);
2038 			VI_UNLOCK(vp);
2039 		}
2040 		return (error);
2041 	}
2042 	VI_UNLOCK(vp);
2043 	return (0);
2044 }
2045 
2046 /*
2047  * Increase the reference count of a vnode.
2048  */
2049 void
2050 vref(struct vnode *vp)
2051 {
2052 
2053 	VI_LOCK(vp);
2054 	v_incr_usecount(vp, 1);
2055 	VI_UNLOCK(vp);
2056 }
2057 
2058 /*
2059  * Return reference count of a vnode.
2060  *
2061  * The results of this call are only guaranteed when some mechanism other
2062  * than the VI lock is used to stop other processes from gaining references
2063  * to the vnode.  This may be the case if the caller holds the only reference.
2064  * This is also useful when stale data is acceptable as race conditions may
2065  * be accounted for by some other means.
2066  */
2067 int
2068 vrefcnt(struct vnode *vp)
2069 {
2070 	int usecnt;
2071 
2072 	VI_LOCK(vp);
2073 	usecnt = vp->v_usecount;
2074 	VI_UNLOCK(vp);
2075 
2076 	return (usecnt);
2077 }
2078 
2079 
2080 /*
2081  * Vnode put/release.
2082  * If count drops to zero, call inactive routine and return to freelist.
2083  */
2084 void
2085 vrele(vp)
2086 	struct vnode *vp;
2087 {
2088 	struct thread *td = curthread;	/* XXX */
2089 
2090 	GIANT_REQUIRED;
2091 
2092 	KASSERT(vp != NULL, ("vrele: null vp"));
2093 
2094 	VI_LOCK(vp);
2095 
2096 	/* Skip this v_writecount check if we're going to panic below. */
2097 	KASSERT(vp->v_writecount < vp->v_usecount || vp->v_usecount < 1,
2098 	    ("vrele: missed vn_close"));
2099 
2100 	if (vp->v_usecount > 1 || ((vp->v_iflag & VI_DOINGINACT) &&
2101 	    vp->v_usecount == 1)) {
2102 		v_incr_usecount(vp, -1);
2103 		VI_UNLOCK(vp);
2104 
2105 		return;
2106 	}
2107 
2108 	if (vp->v_usecount == 1) {
2109 		v_incr_usecount(vp, -1);
2110 		/*
2111 		 * We must call VOP_INACTIVE with the node locked. Mark
2112 		 * as VI_DOINGINACT to avoid recursion.
2113 		 */
2114 		if (vn_lock(vp, LK_EXCLUSIVE | LK_INTERLOCK, td) == 0) {
2115 			VI_LOCK(vp);
2116 			vp->v_iflag |= VI_DOINGINACT;
2117 			VI_UNLOCK(vp);
2118 			VOP_INACTIVE(vp, td);
2119 			VI_LOCK(vp);
2120 			KASSERT(vp->v_iflag & VI_DOINGINACT,
2121 			    ("vrele: lost VI_DOINGINACT"));
2122 			vp->v_iflag &= ~VI_DOINGINACT;
2123 		} else
2124 			VI_LOCK(vp);
2125 		if (VSHOULDFREE(vp))
2126 			vfree(vp);
2127 		else
2128 			vlruvp(vp);
2129 		VI_UNLOCK(vp);
2130 
2131 	} else {
2132 #ifdef DIAGNOSTIC
2133 		vprint("vrele: negative ref count", vp);
2134 #endif
2135 		VI_UNLOCK(vp);
2136 		panic("vrele: negative ref cnt");
2137 	}
2138 }
2139 
2140 /*
2141  * Release an already locked vnode.  This give the same effects as
2142  * unlock+vrele(), but takes less time and avoids releasing and
2143  * re-aquiring the lock (as vrele() aquires the lock internally.)
2144  */
2145 void
2146 vput(vp)
2147 	struct vnode *vp;
2148 {
2149 	struct thread *td = curthread;	/* XXX */
2150 
2151 	GIANT_REQUIRED;
2152 
2153 	KASSERT(vp != NULL, ("vput: null vp"));
2154 	VI_LOCK(vp);
2155 	/* Skip this v_writecount check if we're going to panic below. */
2156 	KASSERT(vp->v_writecount < vp->v_usecount || vp->v_usecount < 1,
2157 	    ("vput: missed vn_close"));
2158 
2159 	if (vp->v_usecount > 1 || ((vp->v_iflag & VI_DOINGINACT) &&
2160 	    vp->v_usecount == 1)) {
2161 		v_incr_usecount(vp, -1);
2162 		VOP_UNLOCK(vp, LK_INTERLOCK, td);
2163 		return;
2164 	}
2165 
2166 	if (vp->v_usecount == 1) {
2167 		v_incr_usecount(vp, -1);
2168 		/*
2169 		 * We must call VOP_INACTIVE with the node locked, so
2170 		 * we just need to release the vnode mutex. Mark as
2171 		 * as VI_DOINGINACT to avoid recursion.
2172 		 */
2173 		vp->v_iflag |= VI_DOINGINACT;
2174 		VI_UNLOCK(vp);
2175 		VOP_INACTIVE(vp, td);
2176 		VI_LOCK(vp);
2177 		KASSERT(vp->v_iflag & VI_DOINGINACT,
2178 		    ("vput: lost VI_DOINGINACT"));
2179 		vp->v_iflag &= ~VI_DOINGINACT;
2180 		if (VSHOULDFREE(vp))
2181 			vfree(vp);
2182 		else
2183 			vlruvp(vp);
2184 		VI_UNLOCK(vp);
2185 
2186 	} else {
2187 #ifdef DIAGNOSTIC
2188 		vprint("vput: negative ref count", vp);
2189 #endif
2190 		panic("vput: negative ref cnt");
2191 	}
2192 }
2193 
2194 /*
2195  * Somebody doesn't want the vnode recycled.
2196  */
2197 void
2198 vhold(struct vnode *vp)
2199 {
2200 
2201 	VI_LOCK(vp);
2202 	vholdl(vp);
2203 	VI_UNLOCK(vp);
2204 }
2205 
2206 void
2207 vholdl(vp)
2208 	register struct vnode *vp;
2209 {
2210 
2211 	vp->v_holdcnt++;
2212 	if (VSHOULDBUSY(vp))
2213 		vbusy(vp);
2214 }
2215 
2216 /*
2217  * Note that there is one less who cares about this vnode.  vdrop() is the
2218  * opposite of vhold().
2219  */
2220 void
2221 vdrop(struct vnode *vp)
2222 {
2223 
2224 	VI_LOCK(vp);
2225 	vdropl(vp);
2226 	VI_UNLOCK(vp);
2227 }
2228 
2229 void
2230 vdropl(vp)
2231 	register struct vnode *vp;
2232 {
2233 
2234 	if (vp->v_holdcnt <= 0)
2235 		panic("vdrop: holdcnt");
2236 	vp->v_holdcnt--;
2237 	if (VSHOULDFREE(vp))
2238 		vfree(vp);
2239 	else
2240 		vlruvp(vp);
2241 }
2242 
2243 /*
2244  * Remove any vnodes in the vnode table belonging to mount point mp.
2245  *
2246  * If FORCECLOSE is not specified, there should not be any active ones,
2247  * return error if any are found (nb: this is a user error, not a
2248  * system error). If FORCECLOSE is specified, detach any active vnodes
2249  * that are found.
2250  *
2251  * If WRITECLOSE is set, only flush out regular file vnodes open for
2252  * writing.
2253  *
2254  * SKIPSYSTEM causes any vnodes marked VV_SYSTEM to be skipped.
2255  *
2256  * `rootrefs' specifies the base reference count for the root vnode
2257  * of this filesystem. The root vnode is considered busy if its
2258  * v_usecount exceeds this value. On a successful return, vflush(, td)
2259  * will call vrele() on the root vnode exactly rootrefs times.
2260  * If the SKIPSYSTEM or WRITECLOSE flags are specified, rootrefs must
2261  * be zero.
2262  */
2263 #ifdef DIAGNOSTIC
2264 static int busyprt = 0;		/* print out busy vnodes */
2265 SYSCTL_INT(_debug, OID_AUTO, busyprt, CTLFLAG_RW, &busyprt, 0, "");
2266 #endif
2267 
2268 int
2269 vflush(mp, rootrefs, flags, td)
2270 	struct mount *mp;
2271 	int rootrefs;
2272 	int flags;
2273 	struct thread *td;
2274 {
2275 	struct vnode *vp, *nvp, *rootvp = NULL;
2276 	struct vattr vattr;
2277 	int busy = 0, error;
2278 
2279 	if (rootrefs > 0) {
2280 		KASSERT((flags & (SKIPSYSTEM | WRITECLOSE)) == 0,
2281 		    ("vflush: bad args"));
2282 		/*
2283 		 * Get the filesystem root vnode. We can vput() it
2284 		 * immediately, since with rootrefs > 0, it won't go away.
2285 		 */
2286 		if ((error = VFS_ROOT(mp, &rootvp, td)) != 0)
2287 			return (error);
2288 		vput(rootvp);
2289 
2290 	}
2291 	MNT_ILOCK(mp);
2292 loop:
2293 	MNT_VNODE_FOREACH(vp, mp, nvp) {
2294 
2295 		VI_LOCK(vp);
2296 		MNT_IUNLOCK(mp);
2297 		error = vn_lock(vp, LK_INTERLOCK | LK_EXCLUSIVE, td);
2298 		if (error) {
2299 			MNT_ILOCK(mp);
2300 			goto loop;
2301 		}
2302 		/*
2303 		 * Skip over a vnodes marked VV_SYSTEM.
2304 		 */
2305 		if ((flags & SKIPSYSTEM) && (vp->v_vflag & VV_SYSTEM)) {
2306 			VOP_UNLOCK(vp, 0, td);
2307 			MNT_ILOCK(mp);
2308 			continue;
2309 		}
2310 		/*
2311 		 * If WRITECLOSE is set, flush out unlinked but still open
2312 		 * files (even if open only for reading) and regular file
2313 		 * vnodes open for writing.
2314 		 */
2315 		if (flags & WRITECLOSE) {
2316 			error = VOP_GETATTR(vp, &vattr, td->td_ucred, td);
2317 			VI_LOCK(vp);
2318 
2319 			if ((vp->v_type == VNON ||
2320 			    (error == 0 && vattr.va_nlink > 0)) &&
2321 			    (vp->v_writecount == 0 || vp->v_type != VREG)) {
2322 				VOP_UNLOCK(vp, LK_INTERLOCK, td);
2323 				MNT_ILOCK(mp);
2324 				continue;
2325 			}
2326 		} else
2327 			VI_LOCK(vp);
2328 
2329 		VOP_UNLOCK(vp, 0, td);
2330 
2331 		/*
2332 		 * With v_usecount == 0, all we need to do is clear out the
2333 		 * vnode data structures and we are done.
2334 		 */
2335 		if (vp->v_usecount == 0) {
2336 			vgonel(vp, td);
2337 			MNT_ILOCK(mp);
2338 			continue;
2339 		}
2340 
2341 		/*
2342 		 * If FORCECLOSE is set, forcibly close the vnode. For block
2343 		 * or character devices, revert to an anonymous device. For
2344 		 * all other files, just kill them.
2345 		 */
2346 		if (flags & FORCECLOSE) {
2347 			if (vp->v_type != VCHR)
2348 				vgonel(vp, td);
2349 			else
2350 				vgonechrl(vp, td);
2351 			MNT_ILOCK(mp);
2352 			continue;
2353 		}
2354 #ifdef DIAGNOSTIC
2355 		if (busyprt)
2356 			vprint("vflush: busy vnode", vp);
2357 #endif
2358 		VI_UNLOCK(vp);
2359 		MNT_ILOCK(mp);
2360 		busy++;
2361 	}
2362 	MNT_IUNLOCK(mp);
2363 	if (rootrefs > 0 && (flags & FORCECLOSE) == 0) {
2364 		/*
2365 		 * If just the root vnode is busy, and if its refcount
2366 		 * is equal to `rootrefs', then go ahead and kill it.
2367 		 */
2368 		VI_LOCK(rootvp);
2369 		KASSERT(busy > 0, ("vflush: not busy"));
2370 		KASSERT(rootvp->v_usecount >= rootrefs,
2371 		    ("vflush: usecount %d < rootrefs %d",
2372 		     rootvp->v_usecount, rootrefs));
2373 		if (busy == 1 && rootvp->v_usecount == rootrefs) {
2374 			vgonel(rootvp, td);
2375 			busy = 0;
2376 		} else
2377 			VI_UNLOCK(rootvp);
2378 	}
2379 	if (busy)
2380 		return (EBUSY);
2381 	for (; rootrefs > 0; rootrefs--)
2382 		vrele(rootvp);
2383 	return (0);
2384 }
2385 
2386 /*
2387  * This moves a now (likely recyclable) vnode to the end of the
2388  * mountlist.  XXX However, it is temporarily disabled until we
2389  * can clean up ffs_sync() and friends, which have loop restart
2390  * conditions which this code causes to operate O(N^2).
2391  */
2392 static void
2393 vlruvp(struct vnode *vp)
2394 {
2395 #if 0
2396 	struct mount *mp;
2397 
2398 	if ((mp = vp->v_mount) != NULL) {
2399 		MNT_ILOCK(mp);
2400 		TAILQ_REMOVE(&mp->mnt_nvnodelist, vp, v_nmntvnodes);
2401 		TAILQ_INSERT_TAIL(&mp->mnt_nvnodelist, vp, v_nmntvnodes);
2402 		MNT_IUNLOCK(mp);
2403 	}
2404 #endif
2405 }
2406 
2407 static void
2408 vx_lock(struct vnode *vp)
2409 {
2410 
2411 	ASSERT_VI_LOCKED(vp, "vx_lock");
2412 
2413 	/*
2414 	 * Prevent the vnode from being recycled or brought into use while we
2415 	 * clean it out.
2416 	 */
2417 	if (vp->v_iflag & VI_XLOCK)
2418 		panic("vclean: deadlock");
2419 	vp->v_iflag |= VI_XLOCK;
2420 	vp->v_vxthread = curthread;
2421 }
2422 
2423 static void
2424 vx_unlock(struct vnode *vp)
2425 {
2426 	ASSERT_VI_LOCKED(vp, "vx_unlock");
2427 	vp->v_iflag &= ~VI_XLOCK;
2428 	vp->v_vxthread = NULL;
2429 	if (vp->v_iflag & VI_XWANT) {
2430 		vp->v_iflag &= ~VI_XWANT;
2431 		wakeup(vp);
2432 	}
2433 }
2434 
2435 /*
2436  * Disassociate the underlying filesystem from a vnode.
2437  */
2438 static void
2439 vclean(vp, flags, td)
2440 	struct vnode *vp;
2441 	int flags;
2442 	struct thread *td;
2443 {
2444 	int active;
2445 
2446 	ASSERT_VI_LOCKED(vp, "vclean");
2447 	/*
2448 	 * Check to see if the vnode is in use. If so we have to reference it
2449 	 * before we clean it out so that its count cannot fall to zero and
2450 	 * generate a race against ourselves to recycle it.
2451 	 */
2452 	if ((active = vp->v_usecount))
2453 		v_incr_usecount(vp, 1);
2454 
2455 	/*
2456 	 * Even if the count is zero, the VOP_INACTIVE routine may still
2457 	 * have the object locked while it cleans it out. The VOP_LOCK
2458 	 * ensures that the VOP_INACTIVE routine is done with its work.
2459 	 * For active vnodes, it ensures that no other activity can
2460 	 * occur while the underlying object is being cleaned out.
2461 	 */
2462 	VOP_LOCK(vp, LK_DRAIN | LK_INTERLOCK, td);
2463 
2464 	/*
2465 	 * Clean out any buffers associated with the vnode.
2466 	 * If the flush fails, just toss the buffers.
2467 	 */
2468 	if (flags & DOCLOSE) {
2469 		struct buf *bp;
2470 		bp = TAILQ_FIRST(&vp->v_dirtyblkhd);
2471 		if (bp != NULL)
2472 			(void) vn_write_suspend_wait(vp, NULL, V_WAIT);
2473 		if (vinvalbuf(vp, V_SAVE, NOCRED, td, 0, 0) != 0)
2474 			vinvalbuf(vp, 0, NOCRED, td, 0, 0);
2475 	}
2476 
2477 	VOP_DESTROYVOBJECT(vp);
2478 
2479 	/*
2480 	 * Any other processes trying to obtain this lock must first
2481 	 * wait for VXLOCK to clear, then call the new lock operation.
2482 	 */
2483 	VOP_UNLOCK(vp, 0, td);
2484 
2485 	/*
2486 	 * If purging an active vnode, it must be closed and
2487 	 * deactivated before being reclaimed. Note that the
2488 	 * VOP_INACTIVE will unlock the vnode.
2489 	 */
2490 	if (active) {
2491 		if (flags & DOCLOSE)
2492 			VOP_CLOSE(vp, FNONBLOCK, NOCRED, td);
2493 		VI_LOCK(vp);
2494 		if ((vp->v_iflag & VI_DOINGINACT) == 0) {
2495 			vp->v_iflag |= VI_DOINGINACT;
2496 			VI_UNLOCK(vp);
2497 			if (vn_lock(vp, LK_EXCLUSIVE | LK_NOWAIT, td) != 0)
2498 				panic("vclean: cannot relock.");
2499 			VOP_INACTIVE(vp, td);
2500 			VI_LOCK(vp);
2501 			KASSERT(vp->v_iflag & VI_DOINGINACT,
2502 			    ("vclean: lost VI_DOINGINACT"));
2503 			vp->v_iflag &= ~VI_DOINGINACT;
2504 		}
2505 		VI_UNLOCK(vp);
2506 	}
2507 	/*
2508 	 * Reclaim the vnode.
2509 	 */
2510 	if (VOP_RECLAIM(vp, td))
2511 		panic("vclean: cannot reclaim");
2512 
2513 	if (active) {
2514 		/*
2515 		 * Inline copy of vrele() since VOP_INACTIVE
2516 		 * has already been called.
2517 		 */
2518 		VI_LOCK(vp);
2519 		v_incr_usecount(vp, -1);
2520 		if (vp->v_usecount <= 0) {
2521 #ifdef INVARIANTS
2522 			if (vp->v_usecount < 0 || vp->v_writecount != 0) {
2523 				vprint("vclean: bad ref count", vp);
2524 				panic("vclean: ref cnt");
2525 			}
2526 #endif
2527 			if (VSHOULDFREE(vp))
2528 				vfree(vp);
2529 		}
2530 		VI_UNLOCK(vp);
2531 	}
2532 	/*
2533 	 * Delete from old mount point vnode list.
2534 	 */
2535 	delmntque(vp);
2536 	cache_purge(vp);
2537 	VI_LOCK(vp);
2538 	if (VSHOULDFREE(vp))
2539 		vfree(vp);
2540 
2541 	/*
2542 	 * Done with purge, reset to the standard lock and
2543 	 * notify sleepers of the grim news.
2544 	 */
2545 	vp->v_vnlock = &vp->v_lock;
2546 	vp->v_op = dead_vnodeop_p;
2547 	if (vp->v_pollinfo != NULL)
2548 		vn_pollgone(vp);
2549 	vp->v_tag = "none";
2550 }
2551 
2552 /*
2553  * Eliminate all activity associated with the requested vnode
2554  * and with all vnodes aliased to the requested vnode.
2555  */
2556 int
2557 vop_revoke(ap)
2558 	struct vop_revoke_args /* {
2559 		struct vnode *a_vp;
2560 		int a_flags;
2561 	} */ *ap;
2562 {
2563 	struct vnode *vp, *vq;
2564 	struct cdev *dev;
2565 
2566 	KASSERT((ap->a_flags & REVOKEALL) != 0, ("vop_revoke"));
2567 	vp = ap->a_vp;
2568 	KASSERT((vp->v_type == VCHR), ("vop_revoke: not VCHR"));
2569 
2570 	VI_LOCK(vp);
2571 	/*
2572 	 * If a vgone (or vclean) is already in progress,
2573 	 * wait until it is done and return.
2574 	 */
2575 	if (vp->v_iflag & VI_XLOCK) {
2576 		vp->v_iflag |= VI_XWANT;
2577 		msleep(vp, VI_MTX(vp), PINOD | PDROP,
2578 		    "vop_revokeall", 0);
2579 		return (0);
2580 	}
2581 	VI_UNLOCK(vp);
2582 	dev = vp->v_rdev;
2583 	for (;;) {
2584 		mtx_lock(&spechash_mtx);
2585 		vq = SLIST_FIRST(&dev->si_hlist);
2586 		mtx_unlock(&spechash_mtx);
2587 		if (vq == NULL)
2588 			break;
2589 		vgone(vq);
2590 	}
2591 	return (0);
2592 }
2593 
2594 /*
2595  * Recycle an unused vnode to the front of the free list.
2596  * Release the passed interlock if the vnode will be recycled.
2597  */
2598 int
2599 vrecycle(vp, inter_lkp, td)
2600 	struct vnode *vp;
2601 	struct mtx *inter_lkp;
2602 	struct thread *td;
2603 {
2604 
2605 	VI_LOCK(vp);
2606 	if (vp->v_usecount == 0) {
2607 		if (inter_lkp) {
2608 			mtx_unlock(inter_lkp);
2609 		}
2610 		vgonel(vp, td);
2611 		return (1);
2612 	}
2613 	VI_UNLOCK(vp);
2614 	return (0);
2615 }
2616 
2617 /*
2618  * Eliminate all activity associated with a vnode
2619  * in preparation for reuse.
2620  */
2621 void
2622 vgone(vp)
2623 	register struct vnode *vp;
2624 {
2625 	struct thread *td = curthread;	/* XXX */
2626 
2627 	VI_LOCK(vp);
2628 	vgonel(vp, td);
2629 }
2630 
2631 /*
2632  * Disassociate a character device from the its underlying filesystem and
2633  * attach it to spec.  This is for use when the chr device is still active
2634  * and the filesystem is going away.
2635  */
2636 static void
2637 vgonechrl(struct vnode *vp, struct thread *td)
2638 {
2639 	ASSERT_VI_LOCKED(vp, "vgonechrl");
2640 	vx_lock(vp);
2641 	/*
2642 	 * This is a custom version of vclean() which does not tearm down
2643 	 * the bufs or vm objects held by this vnode.  This allows filesystems
2644 	 * to continue using devices which were discovered via another
2645 	 * filesystem that has been unmounted.
2646 	 */
2647 	if (vp->v_usecount != 0) {
2648 		v_incr_usecount(vp, 1);
2649 		/*
2650 		 * Ensure that no other activity can occur while the
2651 		 * underlying object is being cleaned out.
2652 		 */
2653 		VOP_LOCK(vp, LK_DRAIN | LK_INTERLOCK, td);
2654 		/*
2655 		 * Any other processes trying to obtain this lock must first
2656 		 * wait for VXLOCK to clear, then call the new lock operation.
2657 		 */
2658 		VOP_UNLOCK(vp, 0, td);
2659 		vp->v_vnlock = &vp->v_lock;
2660 		vp->v_tag = "orphanchr";
2661 		vp->v_op = spec_vnodeop_p;
2662 		delmntque(vp);
2663 		cache_purge(vp);
2664 		vrele(vp);
2665 		VI_LOCK(vp);
2666 	} else
2667 		vclean(vp, 0, td);
2668 	vp->v_op = spec_vnodeop_p;
2669 	vx_unlock(vp);
2670 	VI_UNLOCK(vp);
2671 }
2672 
2673 /*
2674  * vgone, with the vp interlock held.
2675  */
2676 void
2677 vgonel(vp, td)
2678 	struct vnode *vp;
2679 	struct thread *td;
2680 {
2681 	/*
2682 	 * If a vgone (or vclean) is already in progress,
2683 	 * wait until it is done and return.
2684 	 */
2685 	ASSERT_VI_LOCKED(vp, "vgonel");
2686 	if (vp->v_iflag & VI_XLOCK) {
2687 		vp->v_iflag |= VI_XWANT;
2688 		msleep(vp, VI_MTX(vp), PINOD | PDROP, "vgone", 0);
2689 		return;
2690 	}
2691 	vx_lock(vp);
2692 
2693 	/*
2694 	 * Clean out the filesystem specific data.
2695 	 */
2696 	vclean(vp, DOCLOSE, td);
2697 	VI_UNLOCK(vp);
2698 
2699 	/*
2700 	 * If special device, remove it from special device alias list
2701 	 * if it is on one.
2702 	 */
2703 	VI_LOCK(vp);
2704 	if (vp->v_type == VCHR && vp->v_rdev != NULL) {
2705 		mtx_lock(&spechash_mtx);
2706 		SLIST_REMOVE(&vp->v_rdev->si_hlist, vp, vnode, v_specnext);
2707 		vp->v_rdev->si_usecount -= vp->v_usecount;
2708 		mtx_unlock(&spechash_mtx);
2709 		dev_rel(vp->v_rdev);
2710 		vp->v_rdev = NULL;
2711 	}
2712 
2713 	/*
2714 	 * If it is on the freelist and not already at the head,
2715 	 * move it to the head of the list. The test of the
2716 	 * VDOOMED flag and the reference count of zero is because
2717 	 * it will be removed from the free list by getnewvnode,
2718 	 * but will not have its reference count incremented until
2719 	 * after calling vgone. If the reference count were
2720 	 * incremented first, vgone would (incorrectly) try to
2721 	 * close the previous instance of the underlying object.
2722 	 */
2723 	if (vp->v_usecount == 0 && !(vp->v_iflag & VI_DOOMED)) {
2724 		mtx_lock(&vnode_free_list_mtx);
2725 		if (vp->v_iflag & VI_FREE) {
2726 			TAILQ_REMOVE(&vnode_free_list, vp, v_freelist);
2727 		} else {
2728 			vp->v_iflag |= VI_FREE;
2729 			freevnodes++;
2730 		}
2731 		TAILQ_INSERT_HEAD(&vnode_free_list, vp, v_freelist);
2732 		mtx_unlock(&vnode_free_list_mtx);
2733 	}
2734 
2735 	vp->v_type = VBAD;
2736 	vx_unlock(vp);
2737 	VI_UNLOCK(vp);
2738 }
2739 
2740 /*
2741  * Lookup a vnode by device number.
2742  */
2743 int
2744 vfinddev(dev, vpp)
2745 	struct cdev *dev;
2746 	struct vnode **vpp;
2747 {
2748 	struct vnode *vp;
2749 
2750 	mtx_lock(&spechash_mtx);
2751 	SLIST_FOREACH(vp, &dev->si_hlist, v_specnext) {
2752 		*vpp = vp;
2753 		mtx_unlock(&spechash_mtx);
2754 		return (1);
2755 	}
2756 	mtx_unlock(&spechash_mtx);
2757 	return (0);
2758 }
2759 
2760 /*
2761  * Calculate the total number of references to a special device.
2762  */
2763 int
2764 vcount(vp)
2765 	struct vnode *vp;
2766 {
2767 	int count;
2768 
2769 	mtx_lock(&spechash_mtx);
2770 	count = vp->v_rdev->si_usecount;
2771 	mtx_unlock(&spechash_mtx);
2772 	return (count);
2773 }
2774 
2775 /*
2776  * Same as above, but using the struct cdev *as argument
2777  */
2778 int
2779 count_dev(dev)
2780 	struct cdev *dev;
2781 {
2782 	int count;
2783 
2784 	mtx_lock(&spechash_mtx);
2785 	count = dev->si_usecount;
2786 	mtx_unlock(&spechash_mtx);
2787 	return(count);
2788 }
2789 
2790 /*
2791  * Print out a description of a vnode.
2792  */
2793 static char *typename[] =
2794 {"VNON", "VREG", "VDIR", "VBLK", "VCHR", "VLNK", "VSOCK", "VFIFO", "VBAD"};
2795 
2796 void
2797 vprint(label, vp)
2798 	char *label;
2799 	struct vnode *vp;
2800 {
2801 	char buf[96];
2802 
2803 	if (label != NULL)
2804 		printf("%s: %p: ", label, (void *)vp);
2805 	else
2806 		printf("%p: ", (void *)vp);
2807 	printf("tag %s, type %s, usecount %d, writecount %d, refcount %d,",
2808 	    vp->v_tag, typename[vp->v_type], vp->v_usecount,
2809 	    vp->v_writecount, vp->v_holdcnt);
2810 	buf[0] = '\0';
2811 	if (vp->v_vflag & VV_ROOT)
2812 		strcat(buf, "|VV_ROOT");
2813 	if (vp->v_vflag & VV_TEXT)
2814 		strcat(buf, "|VV_TEXT");
2815 	if (vp->v_vflag & VV_SYSTEM)
2816 		strcat(buf, "|VV_SYSTEM");
2817 	if (vp->v_iflag & VI_XLOCK)
2818 		strcat(buf, "|VI_XLOCK");
2819 	if (vp->v_iflag & VI_XWANT)
2820 		strcat(buf, "|VI_XWANT");
2821 	if (vp->v_iflag & VI_BWAIT)
2822 		strcat(buf, "|VI_BWAIT");
2823 	if (vp->v_iflag & VI_DOOMED)
2824 		strcat(buf, "|VI_DOOMED");
2825 	if (vp->v_iflag & VI_FREE)
2826 		strcat(buf, "|VI_FREE");
2827 	if (vp->v_vflag & VV_OBJBUF)
2828 		strcat(buf, "|VV_OBJBUF");
2829 	if (buf[0] != '\0')
2830 		printf(" flags (%s),", &buf[1]);
2831 	lockmgr_printinfo(vp->v_vnlock);
2832 	printf("\n");
2833 	if (vp->v_data != NULL)
2834 		VOP_PRINT(vp);
2835 }
2836 
2837 #ifdef DDB
2838 #include <ddb/ddb.h>
2839 /*
2840  * List all of the locked vnodes in the system.
2841  * Called when debugging the kernel.
2842  */
2843 DB_SHOW_COMMAND(lockedvnods, lockedvnodes)
2844 {
2845 	struct mount *mp, *nmp;
2846 	struct vnode *vp;
2847 
2848 	/*
2849 	 * Note: because this is DDB, we can't obey the locking semantics
2850 	 * for these structures, which means we could catch an inconsistent
2851 	 * state and dereference a nasty pointer.  Not much to be done
2852 	 * about that.
2853 	 */
2854 	printf("Locked vnodes\n");
2855 	for (mp = TAILQ_FIRST(&mountlist); mp != NULL; mp = nmp) {
2856 		nmp = TAILQ_NEXT(mp, mnt_list);
2857 		TAILQ_FOREACH(vp, &mp->mnt_nvnodelist, v_nmntvnodes) {
2858 			if (VOP_ISLOCKED(vp, NULL))
2859 				vprint(NULL, vp);
2860 		}
2861 		nmp = TAILQ_NEXT(mp, mnt_list);
2862 	}
2863 }
2864 #endif
2865 
2866 /*
2867  * Fill in a struct xvfsconf based on a struct vfsconf.
2868  */
2869 static void
2870 vfsconf2x(struct vfsconf *vfsp, struct xvfsconf *xvfsp)
2871 {
2872 
2873 	strcpy(xvfsp->vfc_name, vfsp->vfc_name);
2874 	xvfsp->vfc_typenum = vfsp->vfc_typenum;
2875 	xvfsp->vfc_refcount = vfsp->vfc_refcount;
2876 	xvfsp->vfc_flags = vfsp->vfc_flags;
2877 	/*
2878 	 * These are unused in userland, we keep them
2879 	 * to not break binary compatibility.
2880 	 */
2881 	xvfsp->vfc_vfsops = NULL;
2882 	xvfsp->vfc_next = NULL;
2883 }
2884 
2885 /*
2886  * Top level filesystem related information gathering.
2887  */
2888 static int
2889 sysctl_vfs_conflist(SYSCTL_HANDLER_ARGS)
2890 {
2891 	struct vfsconf *vfsp;
2892 	struct xvfsconf xvfsp;
2893 	int error;
2894 
2895 	error = 0;
2896 	TAILQ_FOREACH(vfsp, &vfsconf, vfc_list) {
2897 		vfsconf2x(vfsp, &xvfsp);
2898 		error = SYSCTL_OUT(req, &xvfsp, sizeof xvfsp);
2899 		if (error)
2900 			break;
2901 	}
2902 	return (error);
2903 }
2904 
2905 SYSCTL_PROC(_vfs, OID_AUTO, conflist, CTLFLAG_RD, NULL, 0, sysctl_vfs_conflist,
2906     "S,xvfsconf", "List of all configured filesystems");
2907 
2908 #ifndef BURN_BRIDGES
2909 static int	sysctl_ovfs_conf(SYSCTL_HANDLER_ARGS);
2910 
2911 static int
2912 vfs_sysctl(SYSCTL_HANDLER_ARGS)
2913 {
2914 	int *name = (int *)arg1 - 1;	/* XXX */
2915 	u_int namelen = arg2 + 1;	/* XXX */
2916 	struct vfsconf *vfsp;
2917 	struct xvfsconf xvfsp;
2918 
2919 	printf("WARNING: userland calling deprecated sysctl, "
2920 	    "please rebuild world\n");
2921 
2922 #if 1 || defined(COMPAT_PRELITE2)
2923 	/* Resolve ambiguity between VFS_VFSCONF and VFS_GENERIC. */
2924 	if (namelen == 1)
2925 		return (sysctl_ovfs_conf(oidp, arg1, arg2, req));
2926 #endif
2927 
2928 	switch (name[1]) {
2929 	case VFS_MAXTYPENUM:
2930 		if (namelen != 2)
2931 			return (ENOTDIR);
2932 		return (SYSCTL_OUT(req, &maxvfsconf, sizeof(int)));
2933 	case VFS_CONF:
2934 		if (namelen != 3)
2935 			return (ENOTDIR);	/* overloaded */
2936 		TAILQ_FOREACH(vfsp, &vfsconf, vfc_list)
2937 			if (vfsp->vfc_typenum == name[2])
2938 				break;
2939 		if (vfsp == NULL)
2940 			return (EOPNOTSUPP);
2941 		vfsconf2x(vfsp, &xvfsp);
2942 		return (SYSCTL_OUT(req, &xvfsp, sizeof(xvfsp)));
2943 	}
2944 	return (EOPNOTSUPP);
2945 }
2946 
2947 SYSCTL_NODE(_vfs, VFS_GENERIC, generic, CTLFLAG_RD | CTLFLAG_SKIP, vfs_sysctl,
2948 	"Generic filesystem");
2949 
2950 #if 1 || defined(COMPAT_PRELITE2)
2951 
2952 static int
2953 sysctl_ovfs_conf(SYSCTL_HANDLER_ARGS)
2954 {
2955 	int error;
2956 	struct vfsconf *vfsp;
2957 	struct ovfsconf ovfs;
2958 
2959 	TAILQ_FOREACH(vfsp, &vfsconf, vfc_list) {
2960 		ovfs.vfc_vfsops = vfsp->vfc_vfsops;	/* XXX used as flag */
2961 		strcpy(ovfs.vfc_name, vfsp->vfc_name);
2962 		ovfs.vfc_index = vfsp->vfc_typenum;
2963 		ovfs.vfc_refcount = vfsp->vfc_refcount;
2964 		ovfs.vfc_flags = vfsp->vfc_flags;
2965 		error = SYSCTL_OUT(req, &ovfs, sizeof ovfs);
2966 		if (error)
2967 			return error;
2968 	}
2969 	return 0;
2970 }
2971 
2972 #endif /* 1 || COMPAT_PRELITE2 */
2973 #endif /* !BURN_BRIDGES */
2974 
2975 #define KINFO_VNODESLOP		10
2976 #ifdef notyet
2977 /*
2978  * Dump vnode list (via sysctl).
2979  */
2980 /* ARGSUSED */
2981 static int
2982 sysctl_vnode(SYSCTL_HANDLER_ARGS)
2983 {
2984 	struct xvnode *xvn;
2985 	struct thread *td = req->td;
2986 	struct mount *mp;
2987 	struct vnode *vp;
2988 	int error, len, n;
2989 
2990 	/*
2991 	 * Stale numvnodes access is not fatal here.
2992 	 */
2993 	req->lock = 0;
2994 	len = (numvnodes + KINFO_VNODESLOP) * sizeof *xvn;
2995 	if (!req->oldptr)
2996 		/* Make an estimate */
2997 		return (SYSCTL_OUT(req, 0, len));
2998 
2999 	error = sysctl_wire_old_buffer(req, 0);
3000 	if (error != 0)
3001 		return (error);
3002 	xvn = malloc(len, M_TEMP, M_ZERO | M_WAITOK);
3003 	n = 0;
3004 	mtx_lock(&mountlist_mtx);
3005 	TAILQ_FOREACH(mp, &mountlist, mnt_list) {
3006 		if (vfs_busy(mp, LK_NOWAIT, &mountlist_mtx, td))
3007 			continue;
3008 		MNT_ILOCK(mp);
3009 		TAILQ_FOREACH(vp, &mp->mnt_nvnodelist, v_nmntvnodes) {
3010 			if (n == len)
3011 				break;
3012 			vref(vp);
3013 			xvn[n].xv_size = sizeof *xvn;
3014 			xvn[n].xv_vnode = vp;
3015 #define XV_COPY(field) xvn[n].xv_##field = vp->v_##field
3016 			XV_COPY(usecount);
3017 			XV_COPY(writecount);
3018 			XV_COPY(holdcnt);
3019 			XV_COPY(id);
3020 			XV_COPY(mount);
3021 			XV_COPY(numoutput);
3022 			XV_COPY(type);
3023 #undef XV_COPY
3024 			xvn[n].xv_flag = vp->v_vflag;
3025 
3026 			switch (vp->v_type) {
3027 			case VREG:
3028 			case VDIR:
3029 			case VLNK:
3030 				xvn[n].xv_dev = vp->v_cachedfs;
3031 				xvn[n].xv_ino = vp->v_cachedid;
3032 				break;
3033 			case VBLK:
3034 			case VCHR:
3035 				if (vp->v_rdev == NULL) {
3036 					vrele(vp);
3037 					continue;
3038 				}
3039 				xvn[n].xv_dev = dev2udev(vp->v_rdev);
3040 				break;
3041 			case VSOCK:
3042 				xvn[n].xv_socket = vp->v_socket;
3043 				break;
3044 			case VFIFO:
3045 				xvn[n].xv_fifo = vp->v_fifoinfo;
3046 				break;
3047 			case VNON:
3048 			case VBAD:
3049 			default:
3050 				/* shouldn't happen? */
3051 				vrele(vp);
3052 				continue;
3053 			}
3054 			vrele(vp);
3055 			++n;
3056 		}
3057 		MNT_IUNLOCK(mp);
3058 		mtx_lock(&mountlist_mtx);
3059 		vfs_unbusy(mp, td);
3060 		if (n == len)
3061 			break;
3062 	}
3063 	mtx_unlock(&mountlist_mtx);
3064 
3065 	error = SYSCTL_OUT(req, xvn, n * sizeof *xvn);
3066 	free(xvn, M_TEMP);
3067 	return (error);
3068 }
3069 
3070 SYSCTL_PROC(_kern, KERN_VNODE, vnode, CTLTYPE_OPAQUE|CTLFLAG_RD,
3071 	0, 0, sysctl_vnode, "S,xvnode", "");
3072 #endif
3073 
3074 /*
3075  * Check to see if a filesystem is mounted on a block device.
3076  */
3077 int
3078 vfs_mountedon(vp)
3079 	struct vnode *vp;
3080 {
3081 
3082 	if (vp->v_rdev->si_mountpoint != NULL)
3083 		return (EBUSY);
3084 	return (0);
3085 }
3086 
3087 /*
3088  * Unmount all filesystems. The list is traversed in reverse order
3089  * of mounting to avoid dependencies.
3090  */
3091 void
3092 vfs_unmountall()
3093 {
3094 	struct mount *mp;
3095 	struct thread *td;
3096 	int error;
3097 
3098 	if (curthread != NULL)
3099 		td = curthread;
3100 	else
3101 		td = FIRST_THREAD_IN_PROC(initproc); /* XXX XXX proc0? */
3102 	/*
3103 	 * Since this only runs when rebooting, it is not interlocked.
3104 	 */
3105 	while(!TAILQ_EMPTY(&mountlist)) {
3106 		mp = TAILQ_LAST(&mountlist, mntlist);
3107 		error = dounmount(mp, MNT_FORCE, td);
3108 		if (error) {
3109 			TAILQ_REMOVE(&mountlist, mp, mnt_list);
3110 			printf("unmount of %s failed (",
3111 			    mp->mnt_stat.f_mntonname);
3112 			if (error == EBUSY)
3113 				printf("BUSY)\n");
3114 			else
3115 				printf("%d)\n", error);
3116 		} else {
3117 			/* The unmount has removed mp from the mountlist */
3118 		}
3119 	}
3120 }
3121 
3122 /*
3123  * perform msync on all vnodes under a mount point
3124  * the mount point must be locked.
3125  */
3126 void
3127 vfs_msync(struct mount *mp, int flags)
3128 {
3129 	struct vnode *vp, *nvp;
3130 	struct vm_object *obj;
3131 	int tries;
3132 
3133 	GIANT_REQUIRED;
3134 
3135 	tries = 5;
3136 	MNT_ILOCK(mp);
3137 loop:
3138 	TAILQ_FOREACH_SAFE(vp, &mp->mnt_nvnodelist, v_nmntvnodes, nvp) {
3139 		if (vp->v_mount != mp) {
3140 			if (--tries > 0)
3141 				goto loop;
3142 			break;
3143 		}
3144 
3145 		VI_LOCK(vp);
3146 		if (vp->v_iflag & VI_XLOCK) {
3147 			VI_UNLOCK(vp);
3148 			continue;
3149 		}
3150 
3151 		if ((vp->v_iflag & VI_OBJDIRTY) &&
3152 		    (flags == MNT_WAIT || VOP_ISLOCKED(vp, NULL) == 0)) {
3153 			MNT_IUNLOCK(mp);
3154 			if (!vget(vp,
3155 			    LK_EXCLUSIVE | LK_RETRY | LK_INTERLOCK,
3156 			    curthread)) {
3157 				if (vp->v_vflag & VV_NOSYNC) {	/* unlinked */
3158 					vput(vp);
3159 					MNT_ILOCK(mp);
3160 					continue;
3161 				}
3162 
3163 				if (VOP_GETVOBJECT(vp, &obj) == 0) {
3164 					VM_OBJECT_LOCK(obj);
3165 					vm_object_page_clean(obj, 0, 0,
3166 					    flags == MNT_WAIT ?
3167 					    OBJPC_SYNC : OBJPC_NOSYNC);
3168 					VM_OBJECT_UNLOCK(obj);
3169 				}
3170 				vput(vp);
3171 			}
3172 			MNT_ILOCK(mp);
3173 			if (TAILQ_NEXT(vp, v_nmntvnodes) != nvp) {
3174 				if (--tries > 0)
3175 					goto loop;
3176 				break;
3177 			}
3178 		} else
3179 			VI_UNLOCK(vp);
3180 	}
3181 	MNT_IUNLOCK(mp);
3182 }
3183 
3184 /*
3185  * Create the VM object needed for VMIO and mmap support.  This
3186  * is done for all VREG files in the system.  Some filesystems might
3187  * afford the additional metadata buffering capability of the
3188  * VMIO code by making the device node be VMIO mode also.
3189  *
3190  * vp must be locked when vfs_object_create is called.
3191  */
3192 int
3193 vfs_object_create(vp, td, cred)
3194 	struct vnode *vp;
3195 	struct thread *td;
3196 	struct ucred *cred;
3197 {
3198 
3199 	GIANT_REQUIRED;
3200 	return (VOP_CREATEVOBJECT(vp, cred, td));
3201 }
3202 
3203 /*
3204  * Mark a vnode as free, putting it up for recycling.
3205  */
3206 void
3207 vfree(vp)
3208 	struct vnode *vp;
3209 {
3210 
3211 	ASSERT_VI_LOCKED(vp, "vfree");
3212 	mtx_lock(&vnode_free_list_mtx);
3213 	KASSERT((vp->v_iflag & VI_FREE) == 0, ("vnode already free"));
3214 	if (vp->v_iflag & VI_AGE) {
3215 		TAILQ_INSERT_HEAD(&vnode_free_list, vp, v_freelist);
3216 	} else {
3217 		TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist);
3218 	}
3219 	freevnodes++;
3220 	mtx_unlock(&vnode_free_list_mtx);
3221 	vp->v_iflag &= ~VI_AGE;
3222 	vp->v_iflag |= VI_FREE;
3223 }
3224 
3225 /*
3226  * Opposite of vfree() - mark a vnode as in use.
3227  */
3228 void
3229 vbusy(vp)
3230 	struct vnode *vp;
3231 {
3232 
3233 	ASSERT_VI_LOCKED(vp, "vbusy");
3234 	KASSERT((vp->v_iflag & VI_FREE) != 0, ("vnode not free"));
3235 
3236 	mtx_lock(&vnode_free_list_mtx);
3237 	TAILQ_REMOVE(&vnode_free_list, vp, v_freelist);
3238 	freevnodes--;
3239 	mtx_unlock(&vnode_free_list_mtx);
3240 
3241 	vp->v_iflag &= ~(VI_FREE|VI_AGE);
3242 }
3243 
3244 /*
3245  * Initalize per-vnode helper structure to hold poll-related state.
3246  */
3247 void
3248 v_addpollinfo(struct vnode *vp)
3249 {
3250 
3251 	vp->v_pollinfo = uma_zalloc(vnodepoll_zone, M_WAITOK);
3252 	mtx_init(&vp->v_pollinfo->vpi_lock, "vnode pollinfo", NULL, MTX_DEF);
3253 }
3254 
3255 /*
3256  * Record a process's interest in events which might happen to
3257  * a vnode.  Because poll uses the historic select-style interface
3258  * internally, this routine serves as both the ``check for any
3259  * pending events'' and the ``record my interest in future events''
3260  * functions.  (These are done together, while the lock is held,
3261  * to avoid race conditions.)
3262  */
3263 int
3264 vn_pollrecord(vp, td, events)
3265 	struct vnode *vp;
3266 	struct thread *td;
3267 	short events;
3268 {
3269 
3270 	if (vp->v_pollinfo == NULL)
3271 		v_addpollinfo(vp);
3272 	mtx_lock(&vp->v_pollinfo->vpi_lock);
3273 	if (vp->v_pollinfo->vpi_revents & events) {
3274 		/*
3275 		 * This leaves events we are not interested
3276 		 * in available for the other process which
3277 		 * which presumably had requested them
3278 		 * (otherwise they would never have been
3279 		 * recorded).
3280 		 */
3281 		events &= vp->v_pollinfo->vpi_revents;
3282 		vp->v_pollinfo->vpi_revents &= ~events;
3283 
3284 		mtx_unlock(&vp->v_pollinfo->vpi_lock);
3285 		return events;
3286 	}
3287 	vp->v_pollinfo->vpi_events |= events;
3288 	selrecord(td, &vp->v_pollinfo->vpi_selinfo);
3289 	mtx_unlock(&vp->v_pollinfo->vpi_lock);
3290 	return 0;
3291 }
3292 
3293 /*
3294  * Note the occurrence of an event.  If the VN_POLLEVENT macro is used,
3295  * it is possible for us to miss an event due to race conditions, but
3296  * that condition is expected to be rare, so for the moment it is the
3297  * preferred interface.
3298  */
3299 void
3300 vn_pollevent(vp, events)
3301 	struct vnode *vp;
3302 	short events;
3303 {
3304 
3305 	if (vp->v_pollinfo == NULL)
3306 		v_addpollinfo(vp);
3307 	mtx_lock(&vp->v_pollinfo->vpi_lock);
3308 	if (vp->v_pollinfo->vpi_events & events) {
3309 		/*
3310 		 * We clear vpi_events so that we don't
3311 		 * call selwakeup() twice if two events are
3312 		 * posted before the polling process(es) is
3313 		 * awakened.  This also ensures that we take at
3314 		 * most one selwakeup() if the polling process
3315 		 * is no longer interested.  However, it does
3316 		 * mean that only one event can be noticed at
3317 		 * a time.  (Perhaps we should only clear those
3318 		 * event bits which we note?) XXX
3319 		 */
3320 		vp->v_pollinfo->vpi_events = 0;	/* &= ~events ??? */
3321 		vp->v_pollinfo->vpi_revents |= events;
3322 		selwakeuppri(&vp->v_pollinfo->vpi_selinfo, PRIBIO);
3323 	}
3324 	mtx_unlock(&vp->v_pollinfo->vpi_lock);
3325 }
3326 
3327 /*
3328  * Wake up anyone polling on vp because it is being revoked.
3329  * This depends on dead_poll() returning POLLHUP for correct
3330  * behavior.
3331  */
3332 void
3333 vn_pollgone(vp)
3334 	struct vnode *vp;
3335 {
3336 
3337 	mtx_lock(&vp->v_pollinfo->vpi_lock);
3338 	VN_KNOTE(vp, NOTE_REVOKE);
3339 	if (vp->v_pollinfo->vpi_events) {
3340 		vp->v_pollinfo->vpi_events = 0;
3341 		selwakeuppri(&vp->v_pollinfo->vpi_selinfo, PRIBIO);
3342 	}
3343 	mtx_unlock(&vp->v_pollinfo->vpi_lock);
3344 }
3345 
3346 
3347 
3348 /*
3349  * Routine to create and manage a filesystem syncer vnode.
3350  */
3351 #define sync_close ((int (*)(struct  vop_close_args *))nullop)
3352 static int	sync_fsync(struct  vop_fsync_args *);
3353 static int	sync_inactive(struct  vop_inactive_args *);
3354 static int	sync_reclaim(struct  vop_reclaim_args *);
3355 
3356 static vop_t **sync_vnodeop_p;
3357 static struct vnodeopv_entry_desc sync_vnodeop_entries[] = {
3358 	{ &vop_default_desc,	(vop_t *) vop_eopnotsupp },
3359 	{ &vop_close_desc,	(vop_t *) sync_close },		/* close */
3360 	{ &vop_fsync_desc,	(vop_t *) sync_fsync },		/* fsync */
3361 	{ &vop_inactive_desc,	(vop_t *) sync_inactive },	/* inactive */
3362 	{ &vop_reclaim_desc,	(vop_t *) sync_reclaim },	/* reclaim */
3363 	{ &vop_lock_desc,	(vop_t *) vop_stdlock },	/* lock */
3364 	{ &vop_unlock_desc,	(vop_t *) vop_stdunlock },	/* unlock */
3365 	{ &vop_islocked_desc,	(vop_t *) vop_stdislocked },	/* islocked */
3366 	{ NULL, NULL }
3367 };
3368 static struct vnodeopv_desc sync_vnodeop_opv_desc =
3369 	{ &sync_vnodeop_p, sync_vnodeop_entries };
3370 
3371 VNODEOP_SET(sync_vnodeop_opv_desc);
3372 
3373 /*
3374  * Create a new filesystem syncer vnode for the specified mount point.
3375  */
3376 int
3377 vfs_allocate_syncvnode(mp)
3378 	struct mount *mp;
3379 {
3380 	struct vnode *vp;
3381 	static long start, incr, next;
3382 	int error;
3383 
3384 	/* Allocate a new vnode */
3385 	if ((error = getnewvnode("syncer", mp, sync_vnodeop_p, &vp)) != 0) {
3386 		mp->mnt_syncer = NULL;
3387 		return (error);
3388 	}
3389 	vp->v_type = VNON;
3390 	/*
3391 	 * Place the vnode onto the syncer worklist. We attempt to
3392 	 * scatter them about on the list so that they will go off
3393 	 * at evenly distributed times even if all the filesystems
3394 	 * are mounted at once.
3395 	 */
3396 	next += incr;
3397 	if (next == 0 || next > syncer_maxdelay) {
3398 		start /= 2;
3399 		incr /= 2;
3400 		if (start == 0) {
3401 			start = syncer_maxdelay / 2;
3402 			incr = syncer_maxdelay;
3403 		}
3404 		next = start;
3405 	}
3406 	VI_LOCK(vp);
3407 	vn_syncer_add_to_worklist(vp, syncdelay > 0 ? next % syncdelay : 0);
3408 	/* XXX - vn_syncer_add_to_worklist() also grabs and drops sync_mtx. */
3409 	mtx_lock(&sync_mtx);
3410 	sync_vnode_count++;
3411 	mtx_unlock(&sync_mtx);
3412 	VI_UNLOCK(vp);
3413 	mp->mnt_syncer = vp;
3414 	return (0);
3415 }
3416 
3417 /*
3418  * Do a lazy sync of the filesystem.
3419  */
3420 static int
3421 sync_fsync(ap)
3422 	struct vop_fsync_args /* {
3423 		struct vnode *a_vp;
3424 		struct ucred *a_cred;
3425 		int a_waitfor;
3426 		struct thread *a_td;
3427 	} */ *ap;
3428 {
3429 	struct vnode *syncvp = ap->a_vp;
3430 	struct mount *mp = syncvp->v_mount;
3431 	struct thread *td = ap->a_td;
3432 	int error, asyncflag;
3433 
3434 	/*
3435 	 * We only need to do something if this is a lazy evaluation.
3436 	 */
3437 	if (ap->a_waitfor != MNT_LAZY)
3438 		return (0);
3439 
3440 	/*
3441 	 * Move ourselves to the back of the sync list.
3442 	 */
3443 	VI_LOCK(syncvp);
3444 	vn_syncer_add_to_worklist(syncvp, syncdelay);
3445 	VI_UNLOCK(syncvp);
3446 
3447 	/*
3448 	 * Walk the list of vnodes pushing all that are dirty and
3449 	 * not already on the sync list.
3450 	 */
3451 	mtx_lock(&mountlist_mtx);
3452 	if (vfs_busy(mp, LK_EXCLUSIVE | LK_NOWAIT, &mountlist_mtx, td) != 0) {
3453 		mtx_unlock(&mountlist_mtx);
3454 		return (0);
3455 	}
3456 	if (vn_start_write(NULL, &mp, V_NOWAIT) != 0) {
3457 		vfs_unbusy(mp, td);
3458 		return (0);
3459 	}
3460 	asyncflag = mp->mnt_flag & MNT_ASYNC;
3461 	mp->mnt_flag &= ~MNT_ASYNC;
3462 	vfs_msync(mp, MNT_NOWAIT);
3463 	error = VFS_SYNC(mp, MNT_LAZY, ap->a_cred, td);
3464 	if (asyncflag)
3465 		mp->mnt_flag |= MNT_ASYNC;
3466 	vn_finished_write(mp);
3467 	vfs_unbusy(mp, td);
3468 	return (error);
3469 }
3470 
3471 /*
3472  * The syncer vnode is no referenced.
3473  */
3474 static int
3475 sync_inactive(ap)
3476 	struct vop_inactive_args /* {
3477 		struct vnode *a_vp;
3478 		struct thread *a_td;
3479 	} */ *ap;
3480 {
3481 
3482 	VOP_UNLOCK(ap->a_vp, 0, ap->a_td);
3483 	vgone(ap->a_vp);
3484 	return (0);
3485 }
3486 
3487 /*
3488  * The syncer vnode is no longer needed and is being decommissioned.
3489  *
3490  * Modifications to the worklist must be protected by sync_mtx.
3491  */
3492 static int
3493 sync_reclaim(ap)
3494 	struct vop_reclaim_args /* {
3495 		struct vnode *a_vp;
3496 	} */ *ap;
3497 {
3498 	struct vnode *vp = ap->a_vp;
3499 
3500 	VI_LOCK(vp);
3501 	vp->v_mount->mnt_syncer = NULL;
3502 	if (vp->v_iflag & VI_ONWORKLST) {
3503 		mtx_lock(&sync_mtx);
3504 		LIST_REMOVE(vp, v_synclist);
3505  		syncer_worklist_len--;
3506 		sync_vnode_count--;
3507 		mtx_unlock(&sync_mtx);
3508 		vp->v_iflag &= ~VI_ONWORKLST;
3509 	}
3510 	VI_UNLOCK(vp);
3511 
3512 	return (0);
3513 }
3514 
3515 /*
3516  * extract the struct cdev *from a VCHR
3517  */
3518 struct cdev *
3519 vn_todev(vp)
3520 	struct vnode *vp;
3521 {
3522 
3523 	if (vp->v_type != VCHR)
3524 		return (NULL);
3525 	return (vp->v_rdev);
3526 }
3527 
3528 /*
3529  * Check if vnode represents a disk device
3530  */
3531 int
3532 vn_isdisk(vp, errp)
3533 	struct vnode *vp;
3534 	int *errp;
3535 {
3536 	int error;
3537 
3538 	error = 0;
3539 	if (vp->v_type != VCHR)
3540 		error = ENOTBLK;
3541 	else if (vp->v_rdev == NULL)
3542 		error = ENXIO;
3543 	else if (!(devsw(vp->v_rdev)->d_flags & D_DISK))
3544 		error = ENOTBLK;
3545 	if (errp != NULL)
3546 		*errp = error;
3547 	return (error == 0);
3548 }
3549 
3550 /*
3551  * Free data allocated by namei(); see namei(9) for details.
3552  */
3553 void
3554 NDFREE(ndp, flags)
3555      struct nameidata *ndp;
3556      const u_int flags;
3557 {
3558 
3559 	if (!(flags & NDF_NO_FREE_PNBUF) &&
3560 	    (ndp->ni_cnd.cn_flags & HASBUF)) {
3561 		uma_zfree(namei_zone, ndp->ni_cnd.cn_pnbuf);
3562 		ndp->ni_cnd.cn_flags &= ~HASBUF;
3563 	}
3564 	if (!(flags & NDF_NO_DVP_UNLOCK) &&
3565 	    (ndp->ni_cnd.cn_flags & LOCKPARENT) &&
3566 	    ndp->ni_dvp != ndp->ni_vp)
3567 		VOP_UNLOCK(ndp->ni_dvp, 0, ndp->ni_cnd.cn_thread);
3568 	if (!(flags & NDF_NO_DVP_RELE) &&
3569 	    (ndp->ni_cnd.cn_flags & (LOCKPARENT|WANTPARENT))) {
3570 		vrele(ndp->ni_dvp);
3571 		ndp->ni_dvp = NULL;
3572 	}
3573 	if (!(flags & NDF_NO_VP_UNLOCK) &&
3574 	    (ndp->ni_cnd.cn_flags & LOCKLEAF) && ndp->ni_vp)
3575 		VOP_UNLOCK(ndp->ni_vp, 0, ndp->ni_cnd.cn_thread);
3576 	if (!(flags & NDF_NO_VP_RELE) &&
3577 	    ndp->ni_vp) {
3578 		vrele(ndp->ni_vp);
3579 		ndp->ni_vp = NULL;
3580 	}
3581 	if (!(flags & NDF_NO_STARTDIR_RELE) &&
3582 	    (ndp->ni_cnd.cn_flags & SAVESTART)) {
3583 		vrele(ndp->ni_startdir);
3584 		ndp->ni_startdir = NULL;
3585 	}
3586 }
3587 
3588 /*
3589  * Common filesystem object access control check routine.  Accepts a
3590  * vnode's type, "mode", uid and gid, requested access mode, credentials,
3591  * and optional call-by-reference privused argument allowing vaccess()
3592  * to indicate to the caller whether privilege was used to satisfy the
3593  * request (obsoleted).  Returns 0 on success, or an errno on failure.
3594  */
3595 int
3596 vaccess(type, file_mode, file_uid, file_gid, acc_mode, cred, privused)
3597 	enum vtype type;
3598 	mode_t file_mode;
3599 	uid_t file_uid;
3600 	gid_t file_gid;
3601 	mode_t acc_mode;
3602 	struct ucred *cred;
3603 	int *privused;
3604 {
3605 	mode_t dac_granted;
3606 #ifdef CAPABILITIES
3607 	mode_t cap_granted;
3608 #endif
3609 
3610 	/*
3611 	 * Look for a normal, non-privileged way to access the file/directory
3612 	 * as requested.  If it exists, go with that.
3613 	 */
3614 
3615 	if (privused != NULL)
3616 		*privused = 0;
3617 
3618 	dac_granted = 0;
3619 
3620 	/* Check the owner. */
3621 	if (cred->cr_uid == file_uid) {
3622 		dac_granted |= VADMIN;
3623 		if (file_mode & S_IXUSR)
3624 			dac_granted |= VEXEC;
3625 		if (file_mode & S_IRUSR)
3626 			dac_granted |= VREAD;
3627 		if (file_mode & S_IWUSR)
3628 			dac_granted |= (VWRITE | VAPPEND);
3629 
3630 		if ((acc_mode & dac_granted) == acc_mode)
3631 			return (0);
3632 
3633 		goto privcheck;
3634 	}
3635 
3636 	/* Otherwise, check the groups (first match) */
3637 	if (groupmember(file_gid, cred)) {
3638 		if (file_mode & S_IXGRP)
3639 			dac_granted |= VEXEC;
3640 		if (file_mode & S_IRGRP)
3641 			dac_granted |= VREAD;
3642 		if (file_mode & S_IWGRP)
3643 			dac_granted |= (VWRITE | VAPPEND);
3644 
3645 		if ((acc_mode & dac_granted) == acc_mode)
3646 			return (0);
3647 
3648 		goto privcheck;
3649 	}
3650 
3651 	/* Otherwise, check everyone else. */
3652 	if (file_mode & S_IXOTH)
3653 		dac_granted |= VEXEC;
3654 	if (file_mode & S_IROTH)
3655 		dac_granted |= VREAD;
3656 	if (file_mode & S_IWOTH)
3657 		dac_granted |= (VWRITE | VAPPEND);
3658 	if ((acc_mode & dac_granted) == acc_mode)
3659 		return (0);
3660 
3661 privcheck:
3662 	if (!suser_cred(cred, SUSER_ALLOWJAIL)) {
3663 		/* XXX audit: privilege used */
3664 		if (privused != NULL)
3665 			*privused = 1;
3666 		return (0);
3667 	}
3668 
3669 #ifdef CAPABILITIES
3670 	/*
3671 	 * Build a capability mask to determine if the set of capabilities
3672 	 * satisfies the requirements when combined with the granted mask
3673 	 * from above.
3674 	 * For each capability, if the capability is required, bitwise
3675 	 * or the request type onto the cap_granted mask.
3676 	 */
3677 	cap_granted = 0;
3678 
3679 	if (type == VDIR) {
3680 		/*
3681 		 * For directories, use CAP_DAC_READ_SEARCH to satisfy
3682 		 * VEXEC requests, instead of CAP_DAC_EXECUTE.
3683 		 */
3684 		if ((acc_mode & VEXEC) && ((dac_granted & VEXEC) == 0) &&
3685 		    !cap_check(cred, NULL, CAP_DAC_READ_SEARCH, SUSER_ALLOWJAIL))
3686 			cap_granted |= VEXEC;
3687 	} else {
3688 		if ((acc_mode & VEXEC) && ((dac_granted & VEXEC) == 0) &&
3689 		    !cap_check(cred, NULL, CAP_DAC_EXECUTE, SUSER_ALLOWJAIL))
3690 			cap_granted |= VEXEC;
3691 	}
3692 
3693 	if ((acc_mode & VREAD) && ((dac_granted & VREAD) == 0) &&
3694 	    !cap_check(cred, NULL, CAP_DAC_READ_SEARCH, SUSER_ALLOWJAIL))
3695 		cap_granted |= VREAD;
3696 
3697 	if ((acc_mode & VWRITE) && ((dac_granted & VWRITE) == 0) &&
3698 	    !cap_check(cred, NULL, CAP_DAC_WRITE, SUSER_ALLOWJAIL))
3699 		cap_granted |= (VWRITE | VAPPEND);
3700 
3701 	if ((acc_mode & VADMIN) && ((dac_granted & VADMIN) == 0) &&
3702 	    !cap_check(cred, NULL, CAP_FOWNER, SUSER_ALLOWJAIL))
3703 		cap_granted |= VADMIN;
3704 
3705 	if ((acc_mode & (cap_granted | dac_granted)) == acc_mode) {
3706 		/* XXX audit: privilege used */
3707 		if (privused != NULL)
3708 			*privused = 1;
3709 		return (0);
3710 	}
3711 #endif
3712 
3713 	return ((acc_mode & VADMIN) ? EPERM : EACCES);
3714 }
3715 
3716 /*
3717  * Credential check based on process requesting service, and per-attribute
3718  * permissions.
3719  */
3720 int
3721 extattr_check_cred(struct vnode *vp, int attrnamespace,
3722     struct ucred *cred, struct thread *td, int access)
3723 {
3724 
3725 	/*
3726 	 * Kernel-invoked always succeeds.
3727 	 */
3728 	if (cred == NOCRED)
3729 		return (0);
3730 
3731 	/*
3732 	 * Do not allow privileged processes in jail to directly
3733 	 * manipulate system attributes.
3734 	 *
3735 	 * XXX What capability should apply here?
3736 	 * Probably CAP_SYS_SETFFLAG.
3737 	 */
3738 	switch (attrnamespace) {
3739 	case EXTATTR_NAMESPACE_SYSTEM:
3740 		/* Potentially should be: return (EPERM); */
3741 		return (suser_cred(cred, 0));
3742 	case EXTATTR_NAMESPACE_USER:
3743 		return (VOP_ACCESS(vp, access, cred, td));
3744 	default:
3745 		return (EPERM);
3746 	}
3747 }
3748 
3749 #ifdef DEBUG_VFS_LOCKS
3750 /*
3751  * This only exists to supress warnings from unlocked specfs accesses.  It is
3752  * no longer ok to have an unlocked VFS.
3753  */
3754 #define	IGNORE_LOCK(vp) ((vp)->v_type == VCHR || (vp)->v_type == VBAD)
3755 
3756 int vfs_badlock_ddb = 1;	/* Drop into debugger on violation. */
3757 SYSCTL_INT(_debug, OID_AUTO, vfs_badlock_ddb, CTLFLAG_RW, &vfs_badlock_ddb, 0, "");
3758 
3759 int vfs_badlock_mutex = 1;	/* Check for interlock across VOPs. */
3760 SYSCTL_INT(_debug, OID_AUTO, vfs_badlock_mutex, CTLFLAG_RW, &vfs_badlock_mutex, 0, "");
3761 
3762 int vfs_badlock_print = 1;	/* Print lock violations. */
3763 SYSCTL_INT(_debug, OID_AUTO, vfs_badlock_print, CTLFLAG_RW, &vfs_badlock_print, 0, "");
3764 
3765 #ifdef KDB
3766 int vfs_badlock_backtrace = 1;	/* Print backtrace at lock violations. */
3767 SYSCTL_INT(_debug, OID_AUTO, vfs_badlock_backtrace, CTLFLAG_RW, &vfs_badlock_backtrace, 0, "");
3768 #endif
3769 
3770 static void
3771 vfs_badlock(const char *msg, const char *str, struct vnode *vp)
3772 {
3773 
3774 #ifdef KDB
3775 	if (vfs_badlock_backtrace)
3776 		kdb_backtrace();
3777 #endif
3778 	if (vfs_badlock_print)
3779 		printf("%s: %p %s\n", str, (void *)vp, msg);
3780 	if (vfs_badlock_ddb)
3781 		kdb_enter("lock violation");
3782 }
3783 
3784 void
3785 assert_vi_locked(struct vnode *vp, const char *str)
3786 {
3787 
3788 	if (vfs_badlock_mutex && !mtx_owned(VI_MTX(vp)))
3789 		vfs_badlock("interlock is not locked but should be", str, vp);
3790 }
3791 
3792 void
3793 assert_vi_unlocked(struct vnode *vp, const char *str)
3794 {
3795 
3796 	if (vfs_badlock_mutex && mtx_owned(VI_MTX(vp)))
3797 		vfs_badlock("interlock is locked but should not be", str, vp);
3798 }
3799 
3800 void
3801 assert_vop_locked(struct vnode *vp, const char *str)
3802 {
3803 
3804 	if (vp && !IGNORE_LOCK(vp) && VOP_ISLOCKED(vp, NULL) == 0)
3805 		vfs_badlock("is not locked but should be", str, vp);
3806 }
3807 
3808 void
3809 assert_vop_unlocked(struct vnode *vp, const char *str)
3810 {
3811 
3812 	if (vp && !IGNORE_LOCK(vp) &&
3813 	    VOP_ISLOCKED(vp, curthread) == LK_EXCLUSIVE)
3814 		vfs_badlock("is locked but should not be", str, vp);
3815 }
3816 
3817 #if 0
3818 void
3819 assert_vop_elocked(struct vnode *vp, const char *str)
3820 {
3821 
3822 	if (vp && !IGNORE_LOCK(vp) &&
3823 	    VOP_ISLOCKED(vp, curthread) != LK_EXCLUSIVE)
3824 		vfs_badlock("is not exclusive locked but should be", str, vp);
3825 }
3826 
3827 void
3828 assert_vop_elocked_other(struct vnode *vp, const char *str)
3829 {
3830 
3831 	if (vp && !IGNORE_LOCK(vp) &&
3832 	    VOP_ISLOCKED(vp, curthread) != LK_EXCLOTHER)
3833 		vfs_badlock("is not exclusive locked by another thread",
3834 		    str, vp);
3835 }
3836 
3837 void
3838 assert_vop_slocked(struct vnode *vp, const char *str)
3839 {
3840 
3841 	if (vp && !IGNORE_LOCK(vp) &&
3842 	    VOP_ISLOCKED(vp, curthread) != LK_SHARED)
3843 		vfs_badlock("is not locked shared but should be", str, vp);
3844 }
3845 #endif /* 0 */
3846 
3847 void
3848 vop_rename_pre(void *ap)
3849 {
3850 	struct vop_rename_args *a = ap;
3851 
3852 	if (a->a_tvp)
3853 		ASSERT_VI_UNLOCKED(a->a_tvp, "VOP_RENAME");
3854 	ASSERT_VI_UNLOCKED(a->a_tdvp, "VOP_RENAME");
3855 	ASSERT_VI_UNLOCKED(a->a_fvp, "VOP_RENAME");
3856 	ASSERT_VI_UNLOCKED(a->a_fdvp, "VOP_RENAME");
3857 
3858 	/* Check the source (from). */
3859 	if (a->a_tdvp != a->a_fdvp)
3860 		ASSERT_VOP_UNLOCKED(a->a_fdvp, "vop_rename: fdvp locked");
3861 	if (a->a_tvp != a->a_fvp)
3862 		ASSERT_VOP_UNLOCKED(a->a_fvp, "vop_rename: tvp locked");
3863 
3864 	/* Check the target. */
3865 	if (a->a_tvp)
3866 		ASSERT_VOP_LOCKED(a->a_tvp, "vop_rename: tvp not locked");
3867 	ASSERT_VOP_LOCKED(a->a_tdvp, "vop_rename: tdvp not locked");
3868 }
3869 
3870 void
3871 vop_strategy_pre(void *ap)
3872 {
3873 	struct vop_strategy_args *a;
3874 	struct buf *bp;
3875 
3876 	a = ap;
3877 	bp = a->a_bp;
3878 
3879 	/*
3880 	 * Cluster ops lock their component buffers but not the IO container.
3881 	 */
3882 	if ((bp->b_flags & B_CLUSTER) != 0)
3883 		return;
3884 
3885 	if (BUF_REFCNT(bp) < 1) {
3886 		if (vfs_badlock_print)
3887 			printf(
3888 			    "VOP_STRATEGY: bp is not locked but should be\n");
3889 		if (vfs_badlock_ddb)
3890 			kdb_enter("lock violation");
3891 	}
3892 }
3893 
3894 void
3895 vop_lookup_pre(void *ap)
3896 {
3897 	struct vop_lookup_args *a;
3898 	struct vnode *dvp;
3899 
3900 	a = ap;
3901 	dvp = a->a_dvp;
3902 	ASSERT_VI_UNLOCKED(dvp, "VOP_LOOKUP");
3903 	ASSERT_VOP_LOCKED(dvp, "VOP_LOOKUP");
3904 }
3905 
3906 void
3907 vop_lookup_post(void *ap, int rc)
3908 {
3909 	struct vop_lookup_args *a;
3910 	struct componentname *cnp;
3911 	struct vnode *dvp;
3912 	struct vnode *vp;
3913 	int flags;
3914 
3915 	a = ap;
3916 	dvp = a->a_dvp;
3917 	cnp = a->a_cnp;
3918 	vp = *(a->a_vpp);
3919 	flags = cnp->cn_flags;
3920 
3921 	ASSERT_VI_UNLOCKED(dvp, "VOP_LOOKUP");
3922 
3923 	/*
3924 	 * If this is the last path component for this lookup and LOCKPARENT
3925 	 * is set, OR if there is an error the directory has to be locked.
3926 	 */
3927 	if ((flags & LOCKPARENT) && (flags & ISLASTCN))
3928 		ASSERT_VOP_LOCKED(dvp, "VOP_LOOKUP (LOCKPARENT)");
3929 	else if (rc != 0)
3930 		ASSERT_VOP_LOCKED(dvp, "VOP_LOOKUP (error)");
3931 	else if (dvp != vp)
3932 		ASSERT_VOP_UNLOCKED(dvp, "VOP_LOOKUP (dvp)");
3933 	if (flags & PDIRUNLOCK)
3934 		ASSERT_VOP_UNLOCKED(dvp, "VOP_LOOKUP (PDIRUNLOCK)");
3935 }
3936 
3937 void
3938 vop_lock_pre(void *ap)
3939 {
3940 	struct vop_lock_args *a = ap;
3941 
3942 	if ((a->a_flags & LK_INTERLOCK) == 0)
3943 		ASSERT_VI_UNLOCKED(a->a_vp, "VOP_LOCK");
3944 	else
3945 		ASSERT_VI_LOCKED(a->a_vp, "VOP_LOCK");
3946 }
3947 
3948 void
3949 vop_lock_post(void *ap, int rc)
3950 {
3951 	struct vop_lock_args *a = ap;
3952 
3953 	ASSERT_VI_UNLOCKED(a->a_vp, "VOP_LOCK");
3954 	if (rc == 0)
3955 		ASSERT_VOP_LOCKED(a->a_vp, "VOP_LOCK");
3956 }
3957 
3958 void
3959 vop_unlock_pre(void *ap)
3960 {
3961 	struct vop_unlock_args *a = ap;
3962 
3963 	if (a->a_flags & LK_INTERLOCK)
3964 		ASSERT_VI_LOCKED(a->a_vp, "VOP_UNLOCK");
3965 	ASSERT_VOP_LOCKED(a->a_vp, "VOP_UNLOCK");
3966 }
3967 
3968 void
3969 vop_unlock_post(void *ap, int rc)
3970 {
3971 	struct vop_unlock_args *a = ap;
3972 
3973 	if (a->a_flags & LK_INTERLOCK)
3974 		ASSERT_VI_UNLOCKED(a->a_vp, "VOP_UNLOCK");
3975 }
3976 #endif /* DEBUG_VFS_LOCKS */
3977 
3978 static struct klist fs_klist = SLIST_HEAD_INITIALIZER(&fs_klist);
3979 
3980 void
3981 vfs_event_signal(fsid_t *fsid, u_int32_t event, intptr_t data __unused)
3982 {
3983 
3984 	KNOTE(&fs_klist, event);
3985 }
3986 
3987 static int	filt_fsattach(struct knote *kn);
3988 static void	filt_fsdetach(struct knote *kn);
3989 static int	filt_fsevent(struct knote *kn, long hint);
3990 
3991 struct filterops fs_filtops =
3992 	{ 0, filt_fsattach, filt_fsdetach, filt_fsevent };
3993 
3994 static int
3995 filt_fsattach(struct knote *kn)
3996 {
3997 
3998 	kn->kn_flags |= EV_CLEAR;
3999 	SLIST_INSERT_HEAD(&fs_klist, kn, kn_selnext);
4000 	return (0);
4001 }
4002 
4003 static void
4004 filt_fsdetach(struct knote *kn)
4005 {
4006 
4007 	SLIST_REMOVE(&fs_klist, kn, knote, kn_selnext);
4008 }
4009 
4010 static int
4011 filt_fsevent(struct knote *kn, long hint)
4012 {
4013 
4014 	kn->kn_fflags |= hint;
4015 	return (kn->kn_fflags != 0);
4016 }
4017 
4018 static int
4019 sysctl_vfs_ctl(SYSCTL_HANDLER_ARGS)
4020 {
4021 	struct vfsidctl vc;
4022 	int error;
4023 	struct mount *mp;
4024 
4025 	error = SYSCTL_IN(req, &vc, sizeof(vc));
4026 	if (error)
4027 		return (error);
4028 	if (vc.vc_vers != VFS_CTL_VERS1)
4029 		return (EINVAL);
4030 	mp = vfs_getvfs(&vc.vc_fsid);
4031 	if (mp == NULL)
4032 		return (ENOENT);
4033 	/* ensure that a specific sysctl goes to the right filesystem. */
4034 	if (strcmp(vc.vc_fstypename, "*") != 0 &&
4035 	    strcmp(vc.vc_fstypename, mp->mnt_vfc->vfc_name) != 0) {
4036 		return (EINVAL);
4037 	}
4038 	VCTLTOREQ(&vc, req);
4039 	return (VFS_SYSCTL(mp, vc.vc_op, req));
4040 }
4041 
4042 SYSCTL_PROC(_vfs, OID_AUTO, ctl, CTLFLAG_WR,
4043         NULL, 0, sysctl_vfs_ctl, "", "Sysctl by fsid");
4044