xref: /freebsd/sys/kern/vfs_subr.c (revision 6b806d21d144c25f4fad714e1c0cf780f5e27d7e)
1 /*-
2  * Copyright (c) 1989, 1993
3  *	The Regents of the University of California.  All rights reserved.
4  * (c) UNIX System Laboratories, Inc.
5  * All or some portions of this file are derived from material licensed
6  * to the University of California by American Telephone and Telegraph
7  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8  * the permission of UNIX System Laboratories, Inc.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 4. Neither the name of the University nor the names of its contributors
19  *    may be used to endorse or promote products derived from this software
20  *    without specific prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  *
34  *	@(#)vfs_subr.c	8.31 (Berkeley) 5/26/95
35  */
36 
37 /*
38  * External virtual filesystem routines
39  */
40 
41 #include <sys/cdefs.h>
42 __FBSDID("$FreeBSD$");
43 
44 #include "opt_ddb.h"
45 #include "opt_mac.h"
46 
47 #include <sys/param.h>
48 #include <sys/systm.h>
49 #include <sys/bio.h>
50 #include <sys/buf.h>
51 #include <sys/conf.h>
52 #include <sys/event.h>
53 #include <sys/eventhandler.h>
54 #include <sys/extattr.h>
55 #include <sys/fcntl.h>
56 #include <sys/kdb.h>
57 #include <sys/kernel.h>
58 #include <sys/kthread.h>
59 #include <sys/mac.h>
60 #include <sys/malloc.h>
61 #include <sys/mount.h>
62 #include <sys/namei.h>
63 #include <sys/reboot.h>
64 #include <sys/sleepqueue.h>
65 #include <sys/stat.h>
66 #include <sys/sysctl.h>
67 #include <sys/syslog.h>
68 #include <sys/vmmeter.h>
69 #include <sys/vnode.h>
70 
71 #include <machine/stdarg.h>
72 
73 #include <vm/vm.h>
74 #include <vm/vm_object.h>
75 #include <vm/vm_extern.h>
76 #include <vm/pmap.h>
77 #include <vm/vm_map.h>
78 #include <vm/vm_page.h>
79 #include <vm/vm_kern.h>
80 #include <vm/uma.h>
81 
82 static MALLOC_DEFINE(M_NETADDR, "Export Host", "Export host address structure");
83 
84 static void	delmntque(struct vnode *vp);
85 static void	insmntque(struct vnode *vp, struct mount *mp);
86 static void	vlruvp(struct vnode *vp);
87 static int	flushbuflist(struct bufv *bufv, int flags, struct vnode *vp,
88 		    int slpflag, int slptimeo);
89 static void	syncer_shutdown(void *arg, int howto);
90 static int	vtryrecycle(struct vnode *vp);
91 static void	vx_lock(struct vnode *vp);
92 static void	vx_unlock(struct vnode *vp);
93 
94 
95 /*
96  * Enable Giant pushdown based on whether or not the vm is mpsafe in this
97  * build.  Without mpsafevm the buffer cache can not run Giant free.
98  */
99 int mpsafe_vfs = 0;
100 TUNABLE_INT("debug.mpsafevfs", &mpsafe_vfs);
101 SYSCTL_INT(_debug, OID_AUTO, mpsafevfs, CTLFLAG_RD, &mpsafe_vfs, 0,
102     "MPSAFE VFS");
103 
104 /*
105  * Number of vnodes in existence.  Increased whenever getnewvnode()
106  * allocates a new vnode, never decreased.
107  */
108 static unsigned long	numvnodes;
109 
110 SYSCTL_LONG(_vfs, OID_AUTO, numvnodes, CTLFLAG_RD, &numvnodes, 0, "");
111 
112 /*
113  * Conversion tables for conversion from vnode types to inode formats
114  * and back.
115  */
116 enum vtype iftovt_tab[16] = {
117 	VNON, VFIFO, VCHR, VNON, VDIR, VNON, VBLK, VNON,
118 	VREG, VNON, VLNK, VNON, VSOCK, VNON, VNON, VBAD,
119 };
120 int vttoif_tab[9] = {
121 	0, S_IFREG, S_IFDIR, S_IFBLK, S_IFCHR, S_IFLNK,
122 	S_IFSOCK, S_IFIFO, S_IFMT,
123 };
124 
125 /*
126  * List of vnodes that are ready for recycling.
127  */
128 static TAILQ_HEAD(freelst, vnode) vnode_free_list;
129 
130 /*
131  * Minimum number of free vnodes.  If there are fewer than this free vnodes,
132  * getnewvnode() will return a newly allocated vnode.
133  */
134 static u_long wantfreevnodes = 25;
135 SYSCTL_LONG(_vfs, OID_AUTO, wantfreevnodes, CTLFLAG_RW, &wantfreevnodes, 0, "");
136 /* Number of vnodes in the free list. */
137 static u_long freevnodes;
138 SYSCTL_LONG(_vfs, OID_AUTO, freevnodes, CTLFLAG_RD, &freevnodes, 0, "");
139 
140 /*
141  * Various variables used for debugging the new implementation of
142  * reassignbuf().
143  * XXX these are probably of (very) limited utility now.
144  */
145 static int reassignbufcalls;
146 SYSCTL_INT(_vfs, OID_AUTO, reassignbufcalls, CTLFLAG_RW, &reassignbufcalls, 0, "");
147 static int nameileafonly;
148 SYSCTL_INT(_vfs, OID_AUTO, nameileafonly, CTLFLAG_RW, &nameileafonly, 0, "");
149 
150 /*
151  * Cache for the mount type id assigned to NFS.  This is used for
152  * special checks in nfs/nfs_nqlease.c and vm/vnode_pager.c.
153  */
154 int	nfs_mount_type = -1;
155 
156 /* To keep more than one thread at a time from running vfs_getnewfsid */
157 static struct mtx mntid_mtx;
158 
159 /*
160  * Lock for any access to the following:
161  *	vnode_free_list
162  *	numvnodes
163  *	freevnodes
164  */
165 static struct mtx vnode_free_list_mtx;
166 
167 /* Publicly exported FS */
168 struct nfs_public nfs_pub;
169 
170 /* Zone for allocation of new vnodes - used exclusively by getnewvnode() */
171 static uma_zone_t vnode_zone;
172 static uma_zone_t vnodepoll_zone;
173 
174 /* Set to 1 to print out reclaim of active vnodes */
175 int	prtactive;
176 
177 /*
178  * The workitem queue.
179  *
180  * It is useful to delay writes of file data and filesystem metadata
181  * for tens of seconds so that quickly created and deleted files need
182  * not waste disk bandwidth being created and removed. To realize this,
183  * we append vnodes to a "workitem" queue. When running with a soft
184  * updates implementation, most pending metadata dependencies should
185  * not wait for more than a few seconds. Thus, mounted on block devices
186  * are delayed only about a half the time that file data is delayed.
187  * Similarly, directory updates are more critical, so are only delayed
188  * about a third the time that file data is delayed. Thus, there are
189  * SYNCER_MAXDELAY queues that are processed round-robin at a rate of
190  * one each second (driven off the filesystem syncer process). The
191  * syncer_delayno variable indicates the next queue that is to be processed.
192  * Items that need to be processed soon are placed in this queue:
193  *
194  *	syncer_workitem_pending[syncer_delayno]
195  *
196  * A delay of fifteen seconds is done by placing the request fifteen
197  * entries later in the queue:
198  *
199  *	syncer_workitem_pending[(syncer_delayno + 15) & syncer_mask]
200  *
201  */
202 static int syncer_delayno;
203 static long syncer_mask;
204 LIST_HEAD(synclist, bufobj);
205 static struct synclist *syncer_workitem_pending;
206 /*
207  * The sync_mtx protects:
208  *	bo->bo_synclist
209  *	sync_vnode_count
210  *	syncer_delayno
211  *	syncer_state
212  *	syncer_workitem_pending
213  *	syncer_worklist_len
214  *	rushjob
215  */
216 static struct mtx sync_mtx;
217 
218 #define SYNCER_MAXDELAY		32
219 static int syncer_maxdelay = SYNCER_MAXDELAY;	/* maximum delay time */
220 static int syncdelay = 30;		/* max time to delay syncing data */
221 static int filedelay = 30;		/* time to delay syncing files */
222 SYSCTL_INT(_kern, OID_AUTO, filedelay, CTLFLAG_RW, &filedelay, 0, "");
223 static int dirdelay = 29;		/* time to delay syncing directories */
224 SYSCTL_INT(_kern, OID_AUTO, dirdelay, CTLFLAG_RW, &dirdelay, 0, "");
225 static int metadelay = 28;		/* time to delay syncing metadata */
226 SYSCTL_INT(_kern, OID_AUTO, metadelay, CTLFLAG_RW, &metadelay, 0, "");
227 static int rushjob;		/* number of slots to run ASAP */
228 static int stat_rush_requests;	/* number of times I/O speeded up */
229 SYSCTL_INT(_debug, OID_AUTO, rush_requests, CTLFLAG_RW, &stat_rush_requests, 0, "");
230 
231 /*
232  * When shutting down the syncer, run it at four times normal speed.
233  */
234 #define SYNCER_SHUTDOWN_SPEEDUP		4
235 static int sync_vnode_count;
236 static int syncer_worklist_len;
237 static enum { SYNCER_RUNNING, SYNCER_SHUTTING_DOWN, SYNCER_FINAL_DELAY }
238     syncer_state;
239 
240 /*
241  * Number of vnodes we want to exist at any one time.  This is mostly used
242  * to size hash tables in vnode-related code.  It is normally not used in
243  * getnewvnode(), as wantfreevnodes is normally nonzero.)
244  *
245  * XXX desiredvnodes is historical cruft and should not exist.
246  */
247 int desiredvnodes;
248 SYSCTL_INT(_kern, KERN_MAXVNODES, maxvnodes, CTLFLAG_RW,
249     &desiredvnodes, 0, "Maximum number of vnodes");
250 static int minvnodes;
251 SYSCTL_INT(_kern, OID_AUTO, minvnodes, CTLFLAG_RW,
252     &minvnodes, 0, "Minimum number of vnodes");
253 static int vnlru_nowhere;
254 SYSCTL_INT(_debug, OID_AUTO, vnlru_nowhere, CTLFLAG_RW,
255     &vnlru_nowhere, 0, "Number of times the vnlru process ran without success");
256 
257 /* Hook for calling soft updates. */
258 int (*softdep_process_worklist_hook)(struct mount *);
259 
260 /*
261  * Initialize the vnode management data structures.
262  */
263 #ifndef	MAXVNODES_MAX
264 #define	MAXVNODES_MAX	100000
265 #endif
266 static void
267 vntblinit(void *dummy __unused)
268 {
269 
270 	/*
271 	 * Desiredvnodes is a function of the physical memory size and
272 	 * the kernel's heap size.  Specifically, desiredvnodes scales
273 	 * in proportion to the physical memory size until two fifths
274 	 * of the kernel's heap size is consumed by vnodes and vm
275 	 * objects.
276 	 */
277 	desiredvnodes = min(maxproc + cnt.v_page_count / 4, 2 * vm_kmem_size /
278 	    (5 * (sizeof(struct vm_object) + sizeof(struct vnode))));
279 	if (desiredvnodes > MAXVNODES_MAX) {
280 		if (bootverbose)
281 			printf("Reducing kern.maxvnodes %d -> %d\n",
282 			    desiredvnodes, MAXVNODES_MAX);
283 		desiredvnodes = MAXVNODES_MAX;
284 	}
285 	minvnodes = desiredvnodes / 4;
286 	mtx_init(&mountlist_mtx, "mountlist", NULL, MTX_DEF);
287 	mtx_init(&mntid_mtx, "mntid", NULL, MTX_DEF);
288 	TAILQ_INIT(&vnode_free_list);
289 	mtx_init(&vnode_free_list_mtx, "vnode_free_list", NULL, MTX_DEF);
290 	vnode_zone = uma_zcreate("VNODE", sizeof (struct vnode), NULL, NULL,
291 	    NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
292 	vnodepoll_zone = uma_zcreate("VNODEPOLL", sizeof (struct vpollinfo),
293 	      NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
294 	/*
295 	 * Initialize the filesystem syncer.
296 	 */
297 	syncer_workitem_pending = hashinit(syncer_maxdelay, M_VNODE,
298 		&syncer_mask);
299 	syncer_maxdelay = syncer_mask + 1;
300 	mtx_init(&sync_mtx, "Syncer mtx", NULL, MTX_DEF);
301 }
302 SYSINIT(vfs, SI_SUB_VFS, SI_ORDER_FIRST, vntblinit, NULL)
303 
304 
305 /*
306  * Mark a mount point as busy. Used to synchronize access and to delay
307  * unmounting. Interlock is not released on failure.
308  */
309 int
310 vfs_busy(mp, flags, interlkp, td)
311 	struct mount *mp;
312 	int flags;
313 	struct mtx *interlkp;
314 	struct thread *td;
315 {
316 	int lkflags;
317 
318 	MNT_ILOCK(mp);
319 	if (mp->mnt_kern_flag & MNTK_UNMOUNT) {
320 		if (flags & LK_NOWAIT) {
321 			MNT_IUNLOCK(mp);
322 			return (ENOENT);
323 		}
324 		if (interlkp)
325 			mtx_unlock(interlkp);
326 		mp->mnt_kern_flag |= MNTK_MWAIT;
327 		/*
328 		 * Since all busy locks are shared except the exclusive
329 		 * lock granted when unmounting, the only place that a
330 		 * wakeup needs to be done is at the release of the
331 		 * exclusive lock at the end of dounmount.
332 		 */
333 		msleep(mp, MNT_MTX(mp), PVFS|PDROP, "vfs_busy", 0);
334 		if (interlkp)
335 			mtx_lock(interlkp);
336 		return (ENOENT);
337 	}
338 	if (interlkp)
339 		mtx_unlock(interlkp);
340 	lkflags = LK_SHARED | LK_NOPAUSE | LK_INTERLOCK;
341 	if (lockmgr(&mp->mnt_lock, lkflags, MNT_MTX(mp), td))
342 		panic("vfs_busy: unexpected lock failure");
343 	return (0);
344 }
345 
346 /*
347  * Free a busy filesystem.
348  */
349 void
350 vfs_unbusy(mp, td)
351 	struct mount *mp;
352 	struct thread *td;
353 {
354 
355 	lockmgr(&mp->mnt_lock, LK_RELEASE, NULL, td);
356 }
357 
358 /*
359  * Lookup a mount point by filesystem identifier.
360  */
361 struct mount *
362 vfs_getvfs(fsid)
363 	fsid_t *fsid;
364 {
365 	struct mount *mp;
366 
367 	mtx_lock(&mountlist_mtx);
368 	TAILQ_FOREACH(mp, &mountlist, mnt_list) {
369 		if (mp->mnt_stat.f_fsid.val[0] == fsid->val[0] &&
370 		    mp->mnt_stat.f_fsid.val[1] == fsid->val[1]) {
371 			mtx_unlock(&mountlist_mtx);
372 			return (mp);
373 		}
374 	}
375 	mtx_unlock(&mountlist_mtx);
376 	return ((struct mount *) 0);
377 }
378 
379 /*
380  * Check if a user can access priveledged mount options.
381  */
382 int
383 vfs_suser(struct mount *mp, struct thread *td)
384 {
385 	int error;
386 
387 	if ((mp->mnt_flag & MNT_USER) == 0 ||
388 	    mp->mnt_cred->cr_uid != td->td_ucred->cr_uid) {
389 		if ((error = suser(td)) != 0)
390 			return (error);
391 	}
392 	return (0);
393 }
394 
395 /*
396  * Get a new unique fsid.  Try to make its val[0] unique, since this value
397  * will be used to create fake device numbers for stat().  Also try (but
398  * not so hard) make its val[0] unique mod 2^16, since some emulators only
399  * support 16-bit device numbers.  We end up with unique val[0]'s for the
400  * first 2^16 calls and unique val[0]'s mod 2^16 for the first 2^8 calls.
401  *
402  * Keep in mind that several mounts may be running in parallel.  Starting
403  * the search one past where the previous search terminated is both a
404  * micro-optimization and a defense against returning the same fsid to
405  * different mounts.
406  */
407 void
408 vfs_getnewfsid(mp)
409 	struct mount *mp;
410 {
411 	static u_int16_t mntid_base;
412 	fsid_t tfsid;
413 	int mtype;
414 
415 	mtx_lock(&mntid_mtx);
416 	mtype = mp->mnt_vfc->vfc_typenum;
417 	tfsid.val[1] = mtype;
418 	mtype = (mtype & 0xFF) << 24;
419 	for (;;) {
420 		tfsid.val[0] = makedev(255,
421 		    mtype | ((mntid_base & 0xFF00) << 8) | (mntid_base & 0xFF));
422 		mntid_base++;
423 		if (vfs_getvfs(&tfsid) == NULL)
424 			break;
425 	}
426 	mp->mnt_stat.f_fsid.val[0] = tfsid.val[0];
427 	mp->mnt_stat.f_fsid.val[1] = tfsid.val[1];
428 	mtx_unlock(&mntid_mtx);
429 }
430 
431 /*
432  * Knob to control the precision of file timestamps:
433  *
434  *   0 = seconds only; nanoseconds zeroed.
435  *   1 = seconds and nanoseconds, accurate within 1/HZ.
436  *   2 = seconds and nanoseconds, truncated to microseconds.
437  * >=3 = seconds and nanoseconds, maximum precision.
438  */
439 enum { TSP_SEC, TSP_HZ, TSP_USEC, TSP_NSEC };
440 
441 static int timestamp_precision = TSP_SEC;
442 SYSCTL_INT(_vfs, OID_AUTO, timestamp_precision, CTLFLAG_RW,
443     &timestamp_precision, 0, "");
444 
445 /*
446  * Get a current timestamp.
447  */
448 void
449 vfs_timestamp(tsp)
450 	struct timespec *tsp;
451 {
452 	struct timeval tv;
453 
454 	switch (timestamp_precision) {
455 	case TSP_SEC:
456 		tsp->tv_sec = time_second;
457 		tsp->tv_nsec = 0;
458 		break;
459 	case TSP_HZ:
460 		getnanotime(tsp);
461 		break;
462 	case TSP_USEC:
463 		microtime(&tv);
464 		TIMEVAL_TO_TIMESPEC(&tv, tsp);
465 		break;
466 	case TSP_NSEC:
467 	default:
468 		nanotime(tsp);
469 		break;
470 	}
471 }
472 
473 /*
474  * Set vnode attributes to VNOVAL
475  */
476 void
477 vattr_null(vap)
478 	struct vattr *vap;
479 {
480 
481 	vap->va_type = VNON;
482 	vap->va_size = VNOVAL;
483 	vap->va_bytes = VNOVAL;
484 	vap->va_mode = VNOVAL;
485 	vap->va_nlink = VNOVAL;
486 	vap->va_uid = VNOVAL;
487 	vap->va_gid = VNOVAL;
488 	vap->va_fsid = VNOVAL;
489 	vap->va_fileid = VNOVAL;
490 	vap->va_blocksize = VNOVAL;
491 	vap->va_rdev = VNOVAL;
492 	vap->va_atime.tv_sec = VNOVAL;
493 	vap->va_atime.tv_nsec = VNOVAL;
494 	vap->va_mtime.tv_sec = VNOVAL;
495 	vap->va_mtime.tv_nsec = VNOVAL;
496 	vap->va_ctime.tv_sec = VNOVAL;
497 	vap->va_ctime.tv_nsec = VNOVAL;
498 	vap->va_birthtime.tv_sec = VNOVAL;
499 	vap->va_birthtime.tv_nsec = VNOVAL;
500 	vap->va_flags = VNOVAL;
501 	vap->va_gen = VNOVAL;
502 	vap->va_vaflags = 0;
503 }
504 
505 /*
506  * This routine is called when we have too many vnodes.  It attempts
507  * to free <count> vnodes and will potentially free vnodes that still
508  * have VM backing store (VM backing store is typically the cause
509  * of a vnode blowout so we want to do this).  Therefore, this operation
510  * is not considered cheap.
511  *
512  * A number of conditions may prevent a vnode from being reclaimed.
513  * the buffer cache may have references on the vnode, a directory
514  * vnode may still have references due to the namei cache representing
515  * underlying files, or the vnode may be in active use.   It is not
516  * desireable to reuse such vnodes.  These conditions may cause the
517  * number of vnodes to reach some minimum value regardless of what
518  * you set kern.maxvnodes to.  Do not set kern.maxvnodes too low.
519  */
520 static int
521 vlrureclaim(struct mount *mp)
522 {
523 	struct vnode *vp;
524 	int done;
525 	int trigger;
526 	int usevnodes;
527 	int count;
528 
529 	/*
530 	 * Calculate the trigger point, don't allow user
531 	 * screwups to blow us up.   This prevents us from
532 	 * recycling vnodes with lots of resident pages.  We
533 	 * aren't trying to free memory, we are trying to
534 	 * free vnodes.
535 	 */
536 	usevnodes = desiredvnodes;
537 	if (usevnodes <= 0)
538 		usevnodes = 1;
539 	trigger = cnt.v_page_count * 2 / usevnodes;
540 
541 	done = 0;
542 	MNT_ILOCK(mp);
543 	count = mp->mnt_nvnodelistsize / 10 + 1;
544 	while (count && (vp = TAILQ_FIRST(&mp->mnt_nvnodelist)) != NULL) {
545 		TAILQ_REMOVE(&mp->mnt_nvnodelist, vp, v_nmntvnodes);
546 		TAILQ_INSERT_TAIL(&mp->mnt_nvnodelist, vp, v_nmntvnodes);
547 
548 		if (vp->v_type != VNON &&
549 		    vp->v_type != VBAD &&
550 		    VI_TRYLOCK(vp)) {
551 			if (VMIGHTFREE(vp) &&           /* critical path opt */
552 			    (vp->v_object == NULL ||
553 			    vp->v_object->resident_page_count < trigger)) {
554 				MNT_IUNLOCK(mp);
555 				vgonel(vp, curthread);
556 				done++;
557 				MNT_ILOCK(mp);
558 			} else
559 				VI_UNLOCK(vp);
560 		}
561 		--count;
562 	}
563 	MNT_IUNLOCK(mp);
564 	return done;
565 }
566 
567 /*
568  * Attempt to recycle vnodes in a context that is always safe to block.
569  * Calling vlrurecycle() from the bowels of filesystem code has some
570  * interesting deadlock problems.
571  */
572 static struct proc *vnlruproc;
573 static int vnlruproc_sig;
574 
575 static void
576 vnlru_proc(void)
577 {
578 	struct mount *mp, *nmp;
579 	int done;
580 	struct proc *p = vnlruproc;
581 	struct thread *td = FIRST_THREAD_IN_PROC(p);
582 
583 	mtx_lock(&Giant);
584 
585 	EVENTHANDLER_REGISTER(shutdown_pre_sync, kproc_shutdown, p,
586 	    SHUTDOWN_PRI_FIRST);
587 
588 	for (;;) {
589 		kthread_suspend_check(p);
590 		mtx_lock(&vnode_free_list_mtx);
591 		if (numvnodes - freevnodes <= desiredvnodes * 9 / 10) {
592 			vnlruproc_sig = 0;
593 			wakeup(&vnlruproc_sig);
594 			msleep(vnlruproc, &vnode_free_list_mtx,
595 			    PVFS|PDROP, "vlruwt", hz);
596 			continue;
597 		}
598 		mtx_unlock(&vnode_free_list_mtx);
599 		done = 0;
600 		mtx_lock(&mountlist_mtx);
601 		for (mp = TAILQ_FIRST(&mountlist); mp != NULL; mp = nmp) {
602 			if (vfs_busy(mp, LK_NOWAIT, &mountlist_mtx, td)) {
603 				nmp = TAILQ_NEXT(mp, mnt_list);
604 				continue;
605 			}
606 			done += vlrureclaim(mp);
607 			mtx_lock(&mountlist_mtx);
608 			nmp = TAILQ_NEXT(mp, mnt_list);
609 			vfs_unbusy(mp, td);
610 		}
611 		mtx_unlock(&mountlist_mtx);
612 		if (done == 0) {
613 #if 0
614 			/* These messages are temporary debugging aids */
615 			if (vnlru_nowhere < 5)
616 				printf("vnlru process getting nowhere..\n");
617 			else if (vnlru_nowhere == 5)
618 				printf("vnlru process messages stopped.\n");
619 #endif
620 			vnlru_nowhere++;
621 			tsleep(vnlruproc, PPAUSE, "vlrup", hz * 3);
622 		}
623 	}
624 }
625 
626 static struct kproc_desc vnlru_kp = {
627 	"vnlru",
628 	vnlru_proc,
629 	&vnlruproc
630 };
631 SYSINIT(vnlru, SI_SUB_KTHREAD_UPDATE, SI_ORDER_FIRST, kproc_start, &vnlru_kp)
632 
633 
634 /*
635  * Routines having to do with the management of the vnode table.
636  */
637 
638 /*
639  * Check to see if a free vnode can be recycled. If it can,
640  * recycle it and return it with the vnode interlock held.
641  */
642 static int
643 vtryrecycle(struct vnode *vp)
644 {
645 	struct thread *td = curthread;
646 	vm_object_t object;
647 	struct mount *vnmp;
648 	int error;
649 
650 	/* Don't recycle if we can't get the interlock */
651 	if (!VI_TRYLOCK(vp))
652 		return (EWOULDBLOCK);
653 	if (!VCANRECYCLE(vp)) {
654 		VI_UNLOCK(vp);
655 		return (EBUSY);
656 	}
657 	/*
658 	 * This vnode may found and locked via some other list, if so we
659 	 * can't recycle it yet.
660 	 */
661 	if (vn_lock(vp, LK_INTERLOCK | LK_EXCLUSIVE | LK_NOWAIT, td) != 0)
662 		return (EWOULDBLOCK);
663 	/*
664 	 * Don't recycle if its filesystem is being suspended.
665 	 */
666 	if (vn_start_write(vp, &vnmp, V_NOWAIT) != 0) {
667 		VOP_UNLOCK(vp, 0, td);
668 		return (EBUSY);
669 	}
670 
671 	/*
672 	 * Don't recycle if we still have cached pages.
673 	 */
674 	object = vp->v_object;
675 	if (object != NULL) {
676 		VM_OBJECT_LOCK(object);
677 		if (object->resident_page_count ||
678 		    object->ref_count) {
679 			VM_OBJECT_UNLOCK(object);
680 			error = EBUSY;
681 			goto done;
682 		}
683 		VM_OBJECT_UNLOCK(object);
684 	}
685 	if (LIST_FIRST(&vp->v_cache_src)) {
686 		/*
687 		 * note: nameileafonly sysctl is temporary,
688 		 * for debugging only, and will eventually be
689 		 * removed.
690 		 */
691 		if (nameileafonly > 0) {
692 			/*
693 			 * Do not reuse namei-cached directory
694 			 * vnodes that have cached
695 			 * subdirectories.
696 			 */
697 			if (cache_leaf_test(vp) < 0) {
698 				error = EISDIR;
699 				goto done;
700 			}
701 		} else if (nameileafonly < 0 ||
702 			    vmiodirenable == 0) {
703 			/*
704 			 * Do not reuse namei-cached directory
705 			 * vnodes if nameileafonly is -1 or
706 			 * if VMIO backing for directories is
707 			 * turned off (otherwise we reuse them
708 			 * too quickly).
709 			 */
710 			error = EBUSY;
711 			goto done;
712 		}
713 	}
714 	/*
715 	 * If we got this far, we need to acquire the interlock and see if
716 	 * anyone picked up this vnode from another list.  If not, we will
717 	 * mark it with XLOCK via vgonel() so that anyone who does find it
718 	 * will skip over it.
719 	 */
720 	VI_LOCK(vp);
721 	if (!VCANRECYCLE(vp)) {
722 		VI_UNLOCK(vp);
723 		error = EBUSY;
724 		goto done;
725 	}
726 	mtx_lock(&vnode_free_list_mtx);
727 	TAILQ_REMOVE(&vnode_free_list, vp, v_freelist);
728 	vp->v_iflag &= ~VI_FREE;
729 	mtx_unlock(&vnode_free_list_mtx);
730 	vp->v_iflag |= VI_DOOMED;
731 	if ((vp->v_type != VBAD) || (vp->v_data != NULL)) {
732 		VOP_UNLOCK(vp, 0, td);
733 		vgonel(vp, td);
734 	} else
735 		VOP_UNLOCK(vp, LK_INTERLOCK, td);
736 	vn_finished_write(vnmp);
737 	return (0);
738 done:
739 	VOP_UNLOCK(vp, 0, td);
740 	vn_finished_write(vnmp);
741 	return (error);
742 }
743 
744 /*
745  * Return the next vnode from the free list.
746  */
747 int
748 getnewvnode(tag, mp, vops, vpp)
749 	const char *tag;
750 	struct mount *mp;
751 	struct vop_vector *vops;
752 	struct vnode **vpp;
753 {
754 	struct vnode *vp = NULL;
755 	struct vpollinfo *pollinfo = NULL;
756 	struct bufobj *bo;
757 
758 	mtx_lock(&vnode_free_list_mtx);
759 
760 	/*
761 	 * Try to reuse vnodes if we hit the max.  This situation only
762 	 * occurs in certain large-memory (2G+) situations.  We cannot
763 	 * attempt to directly reclaim vnodes due to nasty recursion
764 	 * problems.
765 	 */
766 	while (numvnodes - freevnodes > desiredvnodes) {
767 		if (vnlruproc_sig == 0) {
768 			vnlruproc_sig = 1;      /* avoid unnecessary wakeups */
769 			wakeup(vnlruproc);
770 		}
771 		msleep(&vnlruproc_sig, &vnode_free_list_mtx, PVFS,
772 		    "vlruwk", hz);
773 	}
774 
775 	/*
776 	 * Attempt to reuse a vnode already on the free list, allocating
777 	 * a new vnode if we can't find one or if we have not reached a
778 	 * good minimum for good LRU performance.
779 	 */
780 
781 	if (freevnodes >= wantfreevnodes && numvnodes >= minvnodes) {
782 		int error;
783 		int count;
784 
785 		for (count = 0; count < freevnodes; count++) {
786 			vp = TAILQ_FIRST(&vnode_free_list);
787 			TAILQ_REMOVE(&vnode_free_list, vp, v_freelist);
788 			TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist);
789 			mtx_unlock(&vnode_free_list_mtx);
790 			error = vtryrecycle(vp);
791 			mtx_lock(&vnode_free_list_mtx);
792 			if (error == 0)
793 				break;
794 			vp = NULL;
795 		}
796 	}
797 	if (vp) {
798 		freevnodes--;
799 		bo = &vp->v_bufobj;
800 		mtx_unlock(&vnode_free_list_mtx);
801 
802 #ifdef INVARIANTS
803 		{
804 			if (vp->v_data)
805 				printf("cleaned vnode isn't, "
806 				       "address %p, inode %p\n",
807 				       vp, vp->v_data);
808 			if (bo->bo_numoutput)
809 				panic("%p: Clean vnode has pending I/O's", vp);
810 			if (vp->v_usecount != 0)
811 				panic("%p: Non-zero use count", vp);
812 			if (vp->v_writecount != 0)
813 				panic("%p: Non-zero write count", vp);
814 		}
815 #endif
816 		if ((pollinfo = vp->v_pollinfo) != NULL) {
817 			/*
818 			 * To avoid lock order reversals, the call to
819 			 * uma_zfree() must be delayed until the vnode
820 			 * interlock is released.
821 			 */
822 			vp->v_pollinfo = NULL;
823 		}
824 #ifdef MAC
825 		mac_destroy_vnode(vp);
826 #endif
827 		vp->v_iflag = 0;
828 		vp->v_vflag = 0;
829 		vp->v_lastw = 0;
830 		vp->v_lasta = 0;
831 		vp->v_cstart = 0;
832 		vp->v_clen = 0;
833 		vp->v_socket = 0;
834 		lockdestroy(vp->v_vnlock);
835 		lockinit(vp->v_vnlock, PVFS, tag, VLKTIMEOUT, LK_NOPAUSE);
836 		KASSERT(bo->bo_clean.bv_cnt == 0, ("cleanbufcnt not 0"));
837 		KASSERT(bo->bo_clean.bv_root == NULL, ("cleanblkroot not NULL"));
838 		KASSERT(bo->bo_dirty.bv_cnt == 0, ("dirtybufcnt not 0"));
839 		KASSERT(bo->bo_dirty.bv_root == NULL, ("dirtyblkroot not NULL"));
840 	} else {
841 		numvnodes++;
842 		mtx_unlock(&vnode_free_list_mtx);
843 
844 		vp = (struct vnode *) uma_zalloc(vnode_zone, M_WAITOK|M_ZERO);
845 		mtx_init(&vp->v_interlock, "vnode interlock", NULL, MTX_DEF);
846 		vp->v_dd = vp;
847 		bo = &vp->v_bufobj;
848 		bo->__bo_vnode = vp;
849 		bo->bo_mtx = &vp->v_interlock;
850 		vp->v_vnlock = &vp->v_lock;
851 		lockinit(vp->v_vnlock, PVFS, tag, VLKTIMEOUT, LK_NOPAUSE);
852 		cache_purge(vp);		/* Sets up v_id. */
853 		LIST_INIT(&vp->v_cache_src);
854 		TAILQ_INIT(&vp->v_cache_dst);
855 	}
856 
857 	TAILQ_INIT(&bo->bo_clean.bv_hd);
858 	TAILQ_INIT(&bo->bo_dirty.bv_hd);
859 	bo->bo_ops = &buf_ops_bio;
860 	bo->bo_private = vp;
861 	vp->v_type = VNON;
862 	vp->v_tag = tag;
863 	vp->v_op = vops;
864 	*vpp = vp;
865 	vp->v_usecount = 1;
866 	vp->v_data = 0;
867 	if (pollinfo != NULL) {
868 		knlist_destroy(&pollinfo->vpi_selinfo.si_note);
869 		mtx_destroy(&pollinfo->vpi_lock);
870 		uma_zfree(vnodepoll_zone, pollinfo);
871 	}
872 #ifdef MAC
873 	mac_init_vnode(vp);
874 	if (mp != NULL && (mp->mnt_flag & MNT_MULTILABEL) == 0)
875 		mac_associate_vnode_singlelabel(mp, vp);
876 	else if (mp == NULL)
877 		printf("NULL mp in getnewvnode()\n");
878 #endif
879 	delmntque(vp);
880 	if (mp != NULL) {
881 		insmntque(vp, mp);
882 		bo->bo_bsize = mp->mnt_stat.f_iosize;
883 	}
884 
885 	return (0);
886 }
887 
888 /*
889  * Delete from old mount point vnode list, if on one.
890  */
891 static void
892 delmntque(struct vnode *vp)
893 {
894 	struct mount *mp;
895 
896 	if (vp->v_mount == NULL)
897 		return;
898 	mp = vp->v_mount;
899 	MNT_ILOCK(mp);
900 	vp->v_mount = NULL;
901 	KASSERT(mp->mnt_nvnodelistsize > 0,
902 		("bad mount point vnode list size"));
903 	TAILQ_REMOVE(&mp->mnt_nvnodelist, vp, v_nmntvnodes);
904 	mp->mnt_nvnodelistsize--;
905 	MNT_IUNLOCK(mp);
906 }
907 
908 /*
909  * Insert into list of vnodes for the new mount point, if available.
910  */
911 static void
912 insmntque(struct vnode *vp, struct mount *mp)
913 {
914 
915 	vp->v_mount = mp;
916 	KASSERT(mp != NULL, ("Don't call insmntque(foo, NULL)"));
917 	MNT_ILOCK(vp->v_mount);
918 	TAILQ_INSERT_TAIL(&mp->mnt_nvnodelist, vp, v_nmntvnodes);
919 	mp->mnt_nvnodelistsize++;
920 	MNT_IUNLOCK(vp->v_mount);
921 }
922 
923 /*
924  * Flush out and invalidate all buffers associated with a vnode.
925  * Called with the underlying object locked.
926  */
927 int
928 vinvalbuf(vp, flags, td, slpflag, slptimeo)
929 	struct vnode *vp;
930 	int flags;
931 	struct thread *td;
932 	int slpflag, slptimeo;
933 {
934 	int error;
935 	struct bufobj *bo;
936 
937 	ASSERT_VOP_LOCKED(vp, "vinvalbuf");
938 
939 	bo = &vp->v_bufobj;
940 	BO_LOCK(bo);
941 	if (flags & V_SAVE) {
942 		error = bufobj_wwait(bo, slpflag, slptimeo);
943 		if (error) {
944 			BO_UNLOCK(bo);
945 			return (error);
946 		}
947 		if (bo->bo_dirty.bv_cnt > 0) {
948 			BO_UNLOCK(bo);
949 			if ((error = BO_SYNC(bo, MNT_WAIT, td)) != 0)
950 				return (error);
951 			/*
952 			 * XXX We could save a lock/unlock if this was only
953 			 * enabled under INVARIANTS
954 			 */
955 			BO_LOCK(bo);
956 			if (bo->bo_numoutput > 0 || bo->bo_dirty.bv_cnt > 0)
957 				panic("vinvalbuf: dirty bufs");
958 		}
959 	}
960 	/*
961 	 * If you alter this loop please notice that interlock is dropped and
962 	 * reacquired in flushbuflist.  Special care is needed to ensure that
963 	 * no race conditions occur from this.
964 	 */
965 	do {
966 		error = flushbuflist(&bo->bo_clean,
967 		    flags, vp, slpflag, slptimeo);
968 		if (error == 0)
969 			error = flushbuflist(&bo->bo_dirty,
970 			    flags, vp, slpflag, slptimeo);
971 		if (error != 0 && error != EAGAIN) {
972 			BO_UNLOCK(bo);
973 			return (error);
974 		}
975 	} while (error != 0);
976 
977 	/*
978 	 * Wait for I/O to complete.  XXX needs cleaning up.  The vnode can
979 	 * have write I/O in-progress but if there is a VM object then the
980 	 * VM object can also have read-I/O in-progress.
981 	 */
982 	do {
983 		bufobj_wwait(bo, 0, 0);
984 		BO_UNLOCK(bo);
985 		if (bo->bo_object != NULL) {
986 			VM_OBJECT_LOCK(bo->bo_object);
987 			vm_object_pip_wait(bo->bo_object, "vnvlbx");
988 			VM_OBJECT_UNLOCK(bo->bo_object);
989 		}
990 		BO_LOCK(bo);
991 	} while (bo->bo_numoutput > 0);
992 	BO_UNLOCK(bo);
993 
994 	/*
995 	 * Destroy the copy in the VM cache, too.
996 	 */
997 	if (bo->bo_object != NULL) {
998 		VM_OBJECT_LOCK(bo->bo_object);
999 		vm_object_page_remove(bo->bo_object, 0, 0,
1000 			(flags & V_SAVE) ? TRUE : FALSE);
1001 		VM_OBJECT_UNLOCK(bo->bo_object);
1002 	}
1003 
1004 #ifdef INVARIANTS
1005 	BO_LOCK(bo);
1006 	if ((flags & (V_ALT | V_NORMAL)) == 0 &&
1007 	    (bo->bo_dirty.bv_cnt > 0 || bo->bo_clean.bv_cnt > 0))
1008 		panic("vinvalbuf: flush failed");
1009 	BO_UNLOCK(bo);
1010 #endif
1011 	return (0);
1012 }
1013 
1014 /*
1015  * Flush out buffers on the specified list.
1016  *
1017  */
1018 static int
1019 flushbuflist(bufv, flags, vp, slpflag, slptimeo)
1020 	struct bufv *bufv;
1021 	int flags;
1022 	struct vnode *vp;
1023 	int slpflag, slptimeo;
1024 {
1025 	struct buf *bp, *nbp;
1026 	int retval, error;
1027 	struct bufobj *bo;
1028 
1029 	bo = &vp->v_bufobj;
1030 	ASSERT_BO_LOCKED(bo);
1031 
1032 	retval = 0;
1033 	TAILQ_FOREACH_SAFE(bp, &bufv->bv_hd, b_bobufs, nbp) {
1034 		if (((flags & V_NORMAL) && (bp->b_xflags & BX_ALTDATA)) ||
1035 		    ((flags & V_ALT) && (bp->b_xflags & BX_ALTDATA) == 0)) {
1036 			continue;
1037 		}
1038 		retval = EAGAIN;
1039 		error = BUF_TIMELOCK(bp,
1040 		    LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK, BO_MTX(bo),
1041 		    "flushbuf", slpflag, slptimeo);
1042 		if (error) {
1043 			BO_LOCK(bo);
1044 			return (error != ENOLCK ? error : EAGAIN);
1045 		}
1046 		/*
1047 		 * XXX Since there are no node locks for NFS, I
1048 		 * believe there is a slight chance that a delayed
1049 		 * write will occur while sleeping just above, so
1050 		 * check for it.  Note that vfs_bio_awrite expects
1051 		 * buffers to reside on a queue, while bwrite and
1052 		 * brelse do not.
1053 		 */
1054 		if (((bp->b_flags & (B_DELWRI | B_INVAL)) == B_DELWRI) &&
1055 			(flags & V_SAVE)) {
1056 
1057 			if (bp->b_vp == vp) {
1058 				if (bp->b_flags & B_CLUSTEROK) {
1059 					vfs_bio_awrite(bp);
1060 				} else {
1061 					bremfree(bp);
1062 					bp->b_flags |= B_ASYNC;
1063 					bwrite(bp);
1064 				}
1065 			} else {
1066 				bremfree(bp);
1067 				(void) bwrite(bp);
1068 			}
1069 			BO_LOCK(bo);
1070 			return (EAGAIN);
1071 		}
1072 		bremfree(bp);
1073 		bp->b_flags |= (B_INVAL | B_NOCACHE | B_RELBUF);
1074 		bp->b_flags &= ~B_ASYNC;
1075 		brelse(bp);
1076 		BO_LOCK(bo);
1077 	}
1078 	return (retval);
1079 }
1080 
1081 /*
1082  * Truncate a file's buffer and pages to a specified length.  This
1083  * is in lieu of the old vinvalbuf mechanism, which performed unneeded
1084  * sync activity.
1085  */
1086 int
1087 vtruncbuf(struct vnode *vp, struct ucred *cred, struct thread *td, off_t length, int blksize)
1088 {
1089 	struct buf *bp, *nbp;
1090 	int anyfreed;
1091 	int trunclbn;
1092 	struct bufobj *bo;
1093 
1094 	/*
1095 	 * Round up to the *next* lbn.
1096 	 */
1097 	trunclbn = (length + blksize - 1) / blksize;
1098 
1099 	ASSERT_VOP_LOCKED(vp, "vtruncbuf");
1100 restart:
1101 	VI_LOCK(vp);
1102 	bo = &vp->v_bufobj;
1103 	anyfreed = 1;
1104 	for (;anyfreed;) {
1105 		anyfreed = 0;
1106 		TAILQ_FOREACH_SAFE(bp, &bo->bo_clean.bv_hd, b_bobufs, nbp) {
1107 			if (bp->b_lblkno < trunclbn)
1108 				continue;
1109 			if (BUF_LOCK(bp,
1110 			    LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK,
1111 			    VI_MTX(vp)) == ENOLCK)
1112 				goto restart;
1113 
1114 			bremfree(bp);
1115 			bp->b_flags |= (B_INVAL | B_RELBUF);
1116 			bp->b_flags &= ~B_ASYNC;
1117 			brelse(bp);
1118 			anyfreed = 1;
1119 
1120 			if (nbp != NULL &&
1121 			    (((nbp->b_xflags & BX_VNCLEAN) == 0) ||
1122 			    (nbp->b_vp != vp) ||
1123 			    (nbp->b_flags & B_DELWRI))) {
1124 				goto restart;
1125 			}
1126 			VI_LOCK(vp);
1127 		}
1128 
1129 		TAILQ_FOREACH_SAFE(bp, &bo->bo_dirty.bv_hd, b_bobufs, nbp) {
1130 			if (bp->b_lblkno < trunclbn)
1131 				continue;
1132 			if (BUF_LOCK(bp,
1133 			    LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK,
1134 			    VI_MTX(vp)) == ENOLCK)
1135 				goto restart;
1136 			bremfree(bp);
1137 			bp->b_flags |= (B_INVAL | B_RELBUF);
1138 			bp->b_flags &= ~B_ASYNC;
1139 			brelse(bp);
1140 			anyfreed = 1;
1141 			if (nbp != NULL &&
1142 			    (((nbp->b_xflags & BX_VNDIRTY) == 0) ||
1143 			    (nbp->b_vp != vp) ||
1144 			    (nbp->b_flags & B_DELWRI) == 0)) {
1145 				goto restart;
1146 			}
1147 			VI_LOCK(vp);
1148 		}
1149 	}
1150 
1151 	if (length > 0) {
1152 restartsync:
1153 		TAILQ_FOREACH_SAFE(bp, &bo->bo_dirty.bv_hd, b_bobufs, nbp) {
1154 			if (bp->b_lblkno > 0)
1155 				continue;
1156 			/*
1157 			 * Since we hold the vnode lock this should only
1158 			 * fail if we're racing with the buf daemon.
1159 			 */
1160 			if (BUF_LOCK(bp,
1161 			    LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK,
1162 			    VI_MTX(vp)) == ENOLCK) {
1163 				goto restart;
1164 			}
1165 			KASSERT((bp->b_flags & B_DELWRI),
1166 			    ("buf(%p) on dirty queue without DELWRI", bp));
1167 
1168 			bremfree(bp);
1169 			bawrite(bp);
1170 			VI_LOCK(vp);
1171 			goto restartsync;
1172 		}
1173 	}
1174 
1175 	bufobj_wwait(bo, 0, 0);
1176 	VI_UNLOCK(vp);
1177 	vnode_pager_setsize(vp, length);
1178 
1179 	return (0);
1180 }
1181 
1182 /*
1183  * buf_splay() - splay tree core for the clean/dirty list of buffers in
1184  * 		 a vnode.
1185  *
1186  *	NOTE: We have to deal with the special case of a background bitmap
1187  *	buffer, a situation where two buffers will have the same logical
1188  *	block offset.  We want (1) only the foreground buffer to be accessed
1189  *	in a lookup and (2) must differentiate between the foreground and
1190  *	background buffer in the splay tree algorithm because the splay
1191  *	tree cannot normally handle multiple entities with the same 'index'.
1192  *	We accomplish this by adding differentiating flags to the splay tree's
1193  *	numerical domain.
1194  */
1195 static
1196 struct buf *
1197 buf_splay(daddr_t lblkno, b_xflags_t xflags, struct buf *root)
1198 {
1199 	struct buf dummy;
1200 	struct buf *lefttreemax, *righttreemin, *y;
1201 
1202 	if (root == NULL)
1203 		return (NULL);
1204 	lefttreemax = righttreemin = &dummy;
1205 	for (;;) {
1206 		if (lblkno < root->b_lblkno ||
1207 		    (lblkno == root->b_lblkno &&
1208 		    (xflags & BX_BKGRDMARKER) < (root->b_xflags & BX_BKGRDMARKER))) {
1209 			if ((y = root->b_left) == NULL)
1210 				break;
1211 			if (lblkno < y->b_lblkno) {
1212 				/* Rotate right. */
1213 				root->b_left = y->b_right;
1214 				y->b_right = root;
1215 				root = y;
1216 				if ((y = root->b_left) == NULL)
1217 					break;
1218 			}
1219 			/* Link into the new root's right tree. */
1220 			righttreemin->b_left = root;
1221 			righttreemin = root;
1222 		} else if (lblkno > root->b_lblkno ||
1223 		    (lblkno == root->b_lblkno &&
1224 		    (xflags & BX_BKGRDMARKER) > (root->b_xflags & BX_BKGRDMARKER))) {
1225 			if ((y = root->b_right) == NULL)
1226 				break;
1227 			if (lblkno > y->b_lblkno) {
1228 				/* Rotate left. */
1229 				root->b_right = y->b_left;
1230 				y->b_left = root;
1231 				root = y;
1232 				if ((y = root->b_right) == NULL)
1233 					break;
1234 			}
1235 			/* Link into the new root's left tree. */
1236 			lefttreemax->b_right = root;
1237 			lefttreemax = root;
1238 		} else {
1239 			break;
1240 		}
1241 		root = y;
1242 	}
1243 	/* Assemble the new root. */
1244 	lefttreemax->b_right = root->b_left;
1245 	righttreemin->b_left = root->b_right;
1246 	root->b_left = dummy.b_right;
1247 	root->b_right = dummy.b_left;
1248 	return (root);
1249 }
1250 
1251 static void
1252 buf_vlist_remove(struct buf *bp)
1253 {
1254 	struct buf *root;
1255 	struct bufv *bv;
1256 
1257 	KASSERT(bp->b_bufobj != NULL, ("No b_bufobj %p", bp));
1258 	ASSERT_BO_LOCKED(bp->b_bufobj);
1259 	if (bp->b_xflags & BX_VNDIRTY)
1260 		bv = &bp->b_bufobj->bo_dirty;
1261 	else
1262 		bv = &bp->b_bufobj->bo_clean;
1263 	if (bp != bv->bv_root) {
1264 		root = buf_splay(bp->b_lblkno, bp->b_xflags, bv->bv_root);
1265 		KASSERT(root == bp, ("splay lookup failed in remove"));
1266 	}
1267 	if (bp->b_left == NULL) {
1268 		root = bp->b_right;
1269 	} else {
1270 		root = buf_splay(bp->b_lblkno, bp->b_xflags, bp->b_left);
1271 		root->b_right = bp->b_right;
1272 	}
1273 	bv->bv_root = root;
1274 	TAILQ_REMOVE(&bv->bv_hd, bp, b_bobufs);
1275 	bv->bv_cnt--;
1276 	bp->b_xflags &= ~(BX_VNDIRTY | BX_VNCLEAN);
1277 }
1278 
1279 /*
1280  * Add the buffer to the sorted clean or dirty block list using a
1281  * splay tree algorithm.
1282  *
1283  * NOTE: xflags is passed as a constant, optimizing this inline function!
1284  */
1285 static void
1286 buf_vlist_add(struct buf *bp, struct bufobj *bo, b_xflags_t xflags)
1287 {
1288 	struct buf *root;
1289 	struct bufv *bv;
1290 
1291 	ASSERT_BO_LOCKED(bo);
1292 	bp->b_xflags |= xflags;
1293 	if (xflags & BX_VNDIRTY)
1294 		bv = &bo->bo_dirty;
1295 	else
1296 		bv = &bo->bo_clean;
1297 
1298 	root = buf_splay(bp->b_lblkno, bp->b_xflags, bv->bv_root);
1299 	if (root == NULL) {
1300 		bp->b_left = NULL;
1301 		bp->b_right = NULL;
1302 		TAILQ_INSERT_TAIL(&bv->bv_hd, bp, b_bobufs);
1303 	} else if (bp->b_lblkno < root->b_lblkno ||
1304 	    (bp->b_lblkno == root->b_lblkno &&
1305 	    (bp->b_xflags & BX_BKGRDMARKER) < (root->b_xflags & BX_BKGRDMARKER))) {
1306 		bp->b_left = root->b_left;
1307 		bp->b_right = root;
1308 		root->b_left = NULL;
1309 		TAILQ_INSERT_BEFORE(root, bp, b_bobufs);
1310 	} else {
1311 		bp->b_right = root->b_right;
1312 		bp->b_left = root;
1313 		root->b_right = NULL;
1314 		TAILQ_INSERT_AFTER(&bv->bv_hd, root, bp, b_bobufs);
1315 	}
1316 	bv->bv_cnt++;
1317 	bv->bv_root = bp;
1318 }
1319 
1320 /*
1321  * Lookup a buffer using the splay tree.  Note that we specifically avoid
1322  * shadow buffers used in background bitmap writes.
1323  *
1324  * This code isn't quite efficient as it could be because we are maintaining
1325  * two sorted lists and do not know which list the block resides in.
1326  *
1327  * During a "make buildworld" the desired buffer is found at one of
1328  * the roots more than 60% of the time.  Thus, checking both roots
1329  * before performing either splay eliminates unnecessary splays on the
1330  * first tree splayed.
1331  */
1332 struct buf *
1333 gbincore(struct bufobj *bo, daddr_t lblkno)
1334 {
1335 	struct buf *bp;
1336 
1337 	ASSERT_BO_LOCKED(bo);
1338 	if ((bp = bo->bo_clean.bv_root) != NULL &&
1339 	    bp->b_lblkno == lblkno && !(bp->b_xflags & BX_BKGRDMARKER))
1340 		return (bp);
1341 	if ((bp = bo->bo_dirty.bv_root) != NULL &&
1342 	    bp->b_lblkno == lblkno && !(bp->b_xflags & BX_BKGRDMARKER))
1343 		return (bp);
1344 	if ((bp = bo->bo_clean.bv_root) != NULL) {
1345 		bo->bo_clean.bv_root = bp = buf_splay(lblkno, 0, bp);
1346 		if (bp->b_lblkno == lblkno && !(bp->b_xflags & BX_BKGRDMARKER))
1347 			return (bp);
1348 	}
1349 	if ((bp = bo->bo_dirty.bv_root) != NULL) {
1350 		bo->bo_dirty.bv_root = bp = buf_splay(lblkno, 0, bp);
1351 		if (bp->b_lblkno == lblkno && !(bp->b_xflags & BX_BKGRDMARKER))
1352 			return (bp);
1353 	}
1354 	return (NULL);
1355 }
1356 
1357 /*
1358  * Associate a buffer with a vnode.
1359  */
1360 void
1361 bgetvp(struct vnode *vp, struct buf *bp)
1362 {
1363 	KASSERT(bp->b_vp == NULL, ("bgetvp: not free"));
1364 
1365 	CTR3(KTR_BUF, "bgetvp(%p) vp %p flags %X", bp, vp, bp->b_flags);
1366 	KASSERT((bp->b_xflags & (BX_VNDIRTY|BX_VNCLEAN)) == 0,
1367 	    ("bgetvp: bp already attached! %p", bp));
1368 
1369 	ASSERT_VI_LOCKED(vp, "bgetvp");
1370 	vholdl(vp);
1371 	bp->b_vp = vp;
1372 	bp->b_bufobj = &vp->v_bufobj;
1373 	/*
1374 	 * Insert onto list for new vnode.
1375 	 */
1376 	buf_vlist_add(bp, &vp->v_bufobj, BX_VNCLEAN);
1377 }
1378 
1379 /*
1380  * Disassociate a buffer from a vnode.
1381  */
1382 void
1383 brelvp(struct buf *bp)
1384 {
1385 	struct bufobj *bo;
1386 	struct vnode *vp;
1387 
1388 	CTR3(KTR_BUF, "brelvp(%p) vp %p flags %X", bp, bp->b_vp, bp->b_flags);
1389 	KASSERT(bp->b_vp != NULL, ("brelvp: NULL"));
1390 
1391 	/*
1392 	 * Delete from old vnode list, if on one.
1393 	 */
1394 	vp = bp->b_vp;		/* XXX */
1395 	bo = bp->b_bufobj;
1396 	BO_LOCK(bo);
1397 	if (bp->b_xflags & (BX_VNDIRTY | BX_VNCLEAN))
1398 		buf_vlist_remove(bp);
1399 	if ((bo->bo_flag & BO_ONWORKLST) && bo->bo_dirty.bv_cnt == 0) {
1400 		bo->bo_flag &= ~BO_ONWORKLST;
1401 		mtx_lock(&sync_mtx);
1402 		LIST_REMOVE(bo, bo_synclist);
1403  		syncer_worklist_len--;
1404 		mtx_unlock(&sync_mtx);
1405 	}
1406 	vdropl(vp);
1407 	bp->b_vp = NULL;
1408 	bp->b_bufobj = NULL;
1409 	BO_UNLOCK(bo);
1410 }
1411 
1412 /*
1413  * Add an item to the syncer work queue.
1414  */
1415 static void
1416 vn_syncer_add_to_worklist(struct bufobj *bo, int delay)
1417 {
1418 	int slot;
1419 
1420 	ASSERT_BO_LOCKED(bo);
1421 
1422 	mtx_lock(&sync_mtx);
1423 	if (bo->bo_flag & BO_ONWORKLST)
1424 		LIST_REMOVE(bo, bo_synclist);
1425 	else {
1426 		bo->bo_flag |= BO_ONWORKLST;
1427  		syncer_worklist_len++;
1428 	}
1429 
1430 	if (delay > syncer_maxdelay - 2)
1431 		delay = syncer_maxdelay - 2;
1432 	slot = (syncer_delayno + delay) & syncer_mask;
1433 
1434 	LIST_INSERT_HEAD(&syncer_workitem_pending[slot], bo, bo_synclist);
1435 	mtx_unlock(&sync_mtx);
1436 }
1437 
1438 static int
1439 sysctl_vfs_worklist_len(SYSCTL_HANDLER_ARGS)
1440 {
1441 	int error, len;
1442 
1443 	mtx_lock(&sync_mtx);
1444 	len = syncer_worklist_len - sync_vnode_count;
1445 	mtx_unlock(&sync_mtx);
1446 	error = SYSCTL_OUT(req, &len, sizeof(len));
1447 	return (error);
1448 }
1449 
1450 SYSCTL_PROC(_vfs, OID_AUTO, worklist_len, CTLTYPE_INT | CTLFLAG_RD, NULL, 0,
1451     sysctl_vfs_worklist_len, "I", "Syncer thread worklist length");
1452 
1453 struct  proc *updateproc;
1454 static void sched_sync(void);
1455 static struct kproc_desc up_kp = {
1456 	"syncer",
1457 	sched_sync,
1458 	&updateproc
1459 };
1460 SYSINIT(syncer, SI_SUB_KTHREAD_UPDATE, SI_ORDER_FIRST, kproc_start, &up_kp)
1461 
1462 static int
1463 sync_vnode(struct bufobj *bo, struct thread *td)
1464 {
1465 	struct vnode *vp;
1466 	struct mount *mp;
1467 
1468 	vp = bo->__bo_vnode; 	/* XXX */
1469 	if (VOP_ISLOCKED(vp, NULL) != 0)
1470 		return (1);
1471 	if (VI_TRYLOCK(vp) == 0)
1472 		return (1);
1473 	/*
1474 	 * We use vhold in case the vnode does not
1475 	 * successfully sync.  vhold prevents the vnode from
1476 	 * going away when we unlock the sync_mtx so that
1477 	 * we can acquire the vnode interlock.
1478 	 */
1479 	vholdl(vp);
1480 	mtx_unlock(&sync_mtx);
1481 	VI_UNLOCK(vp);
1482 	if (vn_start_write(vp, &mp, V_NOWAIT) != 0) {
1483 		vdrop(vp);
1484 		mtx_lock(&sync_mtx);
1485 		return (1);
1486 	}
1487 	vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
1488 	(void) VOP_FSYNC(vp, MNT_LAZY, td);
1489 	VOP_UNLOCK(vp, 0, td);
1490 	vn_finished_write(mp);
1491 	VI_LOCK(vp);
1492 	if ((bo->bo_flag & BO_ONWORKLST) != 0) {
1493 		/*
1494 		 * Put us back on the worklist.  The worklist
1495 		 * routine will remove us from our current
1496 		 * position and then add us back in at a later
1497 		 * position.
1498 		 */
1499 		vn_syncer_add_to_worklist(bo, syncdelay);
1500 	}
1501 	vdropl(vp);
1502 	VI_UNLOCK(vp);
1503 	mtx_lock(&sync_mtx);
1504 	return (0);
1505 }
1506 
1507 /*
1508  * System filesystem synchronizer daemon.
1509  */
1510 static void
1511 sched_sync(void)
1512 {
1513 	struct synclist *next;
1514 	struct synclist *slp;
1515 	struct bufobj *bo;
1516 	long starttime;
1517 	struct thread *td = FIRST_THREAD_IN_PROC(updateproc);
1518 	static int dummychan;
1519 	int last_work_seen;
1520 	int net_worklist_len;
1521 	int syncer_final_iter;
1522 	int first_printf;
1523 	int error;
1524 
1525 	mtx_lock(&Giant);
1526 	last_work_seen = 0;
1527 	syncer_final_iter = 0;
1528 	first_printf = 1;
1529 	syncer_state = SYNCER_RUNNING;
1530 	starttime = time_second;
1531 
1532 	EVENTHANDLER_REGISTER(shutdown_pre_sync, syncer_shutdown, td->td_proc,
1533 	    SHUTDOWN_PRI_LAST);
1534 
1535 	for (;;) {
1536 		mtx_lock(&sync_mtx);
1537 		if (syncer_state == SYNCER_FINAL_DELAY &&
1538 		    syncer_final_iter == 0) {
1539 			mtx_unlock(&sync_mtx);
1540 			kthread_suspend_check(td->td_proc);
1541 			mtx_lock(&sync_mtx);
1542 		}
1543 		net_worklist_len = syncer_worklist_len - sync_vnode_count;
1544 		if (syncer_state != SYNCER_RUNNING &&
1545 		    starttime != time_second) {
1546 			if (first_printf) {
1547 				printf("\nSyncing disks, vnodes remaining...");
1548 				first_printf = 0;
1549 			}
1550 			printf("%d ", net_worklist_len);
1551 		}
1552 		starttime = time_second;
1553 
1554 		/*
1555 		 * Push files whose dirty time has expired.  Be careful
1556 		 * of interrupt race on slp queue.
1557 		 *
1558 		 * Skip over empty worklist slots when shutting down.
1559 		 */
1560 		do {
1561 			slp = &syncer_workitem_pending[syncer_delayno];
1562 			syncer_delayno += 1;
1563 			if (syncer_delayno == syncer_maxdelay)
1564 				syncer_delayno = 0;
1565 			next = &syncer_workitem_pending[syncer_delayno];
1566 			/*
1567 			 * If the worklist has wrapped since the
1568 			 * it was emptied of all but syncer vnodes,
1569 			 * switch to the FINAL_DELAY state and run
1570 			 * for one more second.
1571 			 */
1572 			if (syncer_state == SYNCER_SHUTTING_DOWN &&
1573 			    net_worklist_len == 0 &&
1574 			    last_work_seen == syncer_delayno) {
1575 				syncer_state = SYNCER_FINAL_DELAY;
1576 				syncer_final_iter = SYNCER_SHUTDOWN_SPEEDUP;
1577 			}
1578 		} while (syncer_state != SYNCER_RUNNING && LIST_EMPTY(slp) &&
1579 		    syncer_worklist_len > 0);
1580 
1581 		/*
1582 		 * Keep track of the last time there was anything
1583 		 * on the worklist other than syncer vnodes.
1584 		 * Return to the SHUTTING_DOWN state if any
1585 		 * new work appears.
1586 		 */
1587 		if (net_worklist_len > 0 || syncer_state == SYNCER_RUNNING)
1588 			last_work_seen = syncer_delayno;
1589 		if (net_worklist_len > 0 && syncer_state == SYNCER_FINAL_DELAY)
1590 			syncer_state = SYNCER_SHUTTING_DOWN;
1591 		while ((bo = LIST_FIRST(slp)) != NULL) {
1592 			error = sync_vnode(bo, td);
1593 			if (error == 1) {
1594 				LIST_REMOVE(bo, bo_synclist);
1595 				LIST_INSERT_HEAD(next, bo, bo_synclist);
1596 				continue;
1597 			}
1598 		}
1599 		if (syncer_state == SYNCER_FINAL_DELAY && syncer_final_iter > 0)
1600 			syncer_final_iter--;
1601 		mtx_unlock(&sync_mtx);
1602 
1603 		/*
1604 		 * Do soft update processing.
1605 		 */
1606 		if (softdep_process_worklist_hook != NULL)
1607 			(*softdep_process_worklist_hook)(NULL);
1608 
1609 		/*
1610 		 * The variable rushjob allows the kernel to speed up the
1611 		 * processing of the filesystem syncer process. A rushjob
1612 		 * value of N tells the filesystem syncer to process the next
1613 		 * N seconds worth of work on its queue ASAP. Currently rushjob
1614 		 * is used by the soft update code to speed up the filesystem
1615 		 * syncer process when the incore state is getting so far
1616 		 * ahead of the disk that the kernel memory pool is being
1617 		 * threatened with exhaustion.
1618 		 */
1619 		mtx_lock(&sync_mtx);
1620 		if (rushjob > 0) {
1621 			rushjob -= 1;
1622 			mtx_unlock(&sync_mtx);
1623 			continue;
1624 		}
1625 		mtx_unlock(&sync_mtx);
1626 		/*
1627 		 * Just sleep for a short period if time between
1628 		 * iterations when shutting down to allow some I/O
1629 		 * to happen.
1630 		 *
1631 		 * If it has taken us less than a second to process the
1632 		 * current work, then wait. Otherwise start right over
1633 		 * again. We can still lose time if any single round
1634 		 * takes more than two seconds, but it does not really
1635 		 * matter as we are just trying to generally pace the
1636 		 * filesystem activity.
1637 		 */
1638 		if (syncer_state != SYNCER_RUNNING)
1639 			tsleep(&dummychan, PPAUSE, "syncfnl",
1640 			    hz / SYNCER_SHUTDOWN_SPEEDUP);
1641 		else if (time_second == starttime)
1642 			tsleep(&lbolt, PPAUSE, "syncer", 0);
1643 	}
1644 }
1645 
1646 /*
1647  * Request the syncer daemon to speed up its work.
1648  * We never push it to speed up more than half of its
1649  * normal turn time, otherwise it could take over the cpu.
1650  */
1651 int
1652 speedup_syncer()
1653 {
1654 	struct thread *td;
1655 	int ret = 0;
1656 
1657 	td = FIRST_THREAD_IN_PROC(updateproc);
1658 	sleepq_remove(td, &lbolt);
1659 	mtx_lock(&sync_mtx);
1660 	if (rushjob < syncdelay / 2) {
1661 		rushjob += 1;
1662 		stat_rush_requests += 1;
1663 		ret = 1;
1664 	}
1665 	mtx_unlock(&sync_mtx);
1666 	return (ret);
1667 }
1668 
1669 /*
1670  * Tell the syncer to speed up its work and run though its work
1671  * list several times, then tell it to shut down.
1672  */
1673 static void
1674 syncer_shutdown(void *arg, int howto)
1675 {
1676 	struct thread *td;
1677 
1678 	if (howto & RB_NOSYNC)
1679 		return;
1680 	td = FIRST_THREAD_IN_PROC(updateproc);
1681 	sleepq_remove(td, &lbolt);
1682 	mtx_lock(&sync_mtx);
1683 	syncer_state = SYNCER_SHUTTING_DOWN;
1684 	rushjob = 0;
1685 	mtx_unlock(&sync_mtx);
1686 	kproc_shutdown(arg, howto);
1687 }
1688 
1689 /*
1690  * Reassign a buffer from one vnode to another.
1691  * Used to assign file specific control information
1692  * (indirect blocks) to the vnode to which they belong.
1693  */
1694 void
1695 reassignbuf(struct buf *bp)
1696 {
1697 	struct vnode *vp;
1698 	struct bufobj *bo;
1699 	int delay;
1700 
1701 	vp = bp->b_vp;
1702 	bo = bp->b_bufobj;
1703 	++reassignbufcalls;
1704 
1705 	CTR3(KTR_BUF, "reassignbuf(%p) vp %p flags %X",
1706 	    bp, bp->b_vp, bp->b_flags);
1707 	/*
1708 	 * B_PAGING flagged buffers cannot be reassigned because their vp
1709 	 * is not fully linked in.
1710 	 */
1711 	if (bp->b_flags & B_PAGING)
1712 		panic("cannot reassign paging buffer");
1713 
1714 	/*
1715 	 * Delete from old vnode list, if on one.
1716 	 */
1717 	VI_LOCK(vp);
1718 	if (bp->b_xflags & (BX_VNDIRTY | BX_VNCLEAN))
1719 		buf_vlist_remove(bp);
1720 	/*
1721 	 * If dirty, put on list of dirty buffers; otherwise insert onto list
1722 	 * of clean buffers.
1723 	 */
1724 	if (bp->b_flags & B_DELWRI) {
1725 		if ((bo->bo_flag & BO_ONWORKLST) == 0) {
1726 			switch (vp->v_type) {
1727 			case VDIR:
1728 				delay = dirdelay;
1729 				break;
1730 			case VCHR:
1731 				delay = metadelay;
1732 				break;
1733 			default:
1734 				delay = filedelay;
1735 			}
1736 			vn_syncer_add_to_worklist(bo, delay);
1737 		}
1738 		buf_vlist_add(bp, bo, BX_VNDIRTY);
1739 	} else {
1740 		buf_vlist_add(bp, bo, BX_VNCLEAN);
1741 
1742 		if ((bo->bo_flag & BO_ONWORKLST) && bo->bo_dirty.bv_cnt == 0) {
1743 			mtx_lock(&sync_mtx);
1744 			LIST_REMOVE(bo, bo_synclist);
1745  			syncer_worklist_len--;
1746 			mtx_unlock(&sync_mtx);
1747 			bo->bo_flag &= ~BO_ONWORKLST;
1748 		}
1749 	}
1750 	VI_UNLOCK(vp);
1751 }
1752 
1753 static void
1754 v_incr_usecount(struct vnode *vp, int delta)
1755 {
1756 
1757 	vp->v_usecount += delta;
1758 	if (vp->v_type == VCHR && vp->v_rdev != NULL) {
1759 		dev_lock();
1760 		vp->v_rdev->si_usecount += delta;
1761 		dev_unlock();
1762 	}
1763 }
1764 
1765 /*
1766  * Grab a particular vnode from the free list, increment its
1767  * reference count and lock it. The vnode lock bit is set if the
1768  * vnode is being eliminated in vgone. The process is awakened
1769  * when the transition is completed, and an error returned to
1770  * indicate that the vnode is no longer usable (possibly having
1771  * been changed to a new filesystem type).
1772  */
1773 int
1774 vget(vp, flags, td)
1775 	struct vnode *vp;
1776 	int flags;
1777 	struct thread *td;
1778 {
1779 	int error;
1780 
1781 	/*
1782 	 * If the vnode is in the process of being cleaned out for
1783 	 * another use, we wait for the cleaning to finish and then
1784 	 * return failure. Cleaning is determined by checking that
1785 	 * the VI_XLOCK flag is set.
1786 	 */
1787 	if ((flags & LK_INTERLOCK) == 0)
1788 		VI_LOCK(vp);
1789 	if (vp->v_iflag & VI_XLOCK && vp->v_vxthread != curthread) {
1790 		if ((flags & LK_NOWAIT) == 0) {
1791 			vp->v_iflag |= VI_XWANT;
1792 			msleep(vp, VI_MTX(vp), PINOD | PDROP, "vget", 0);
1793 			return (ENOENT);
1794 		}
1795 		VI_UNLOCK(vp);
1796 		return (EBUSY);
1797 	}
1798 
1799 	v_incr_usecount(vp, 1);
1800 
1801 	if (VSHOULDBUSY(vp))
1802 		vbusy(vp);
1803 	if (flags & LK_TYPE_MASK) {
1804 		if ((error = vn_lock(vp, flags | LK_INTERLOCK, td)) != 0) {
1805 			/*
1806 			 * must expand vrele here because we do not want
1807 			 * to call VOP_INACTIVE if the reference count
1808 			 * drops back to zero since it was never really
1809 			 * active. We must remove it from the free list
1810 			 * before sleeping so that multiple processes do
1811 			 * not try to recycle it.
1812 			 */
1813 			VI_LOCK(vp);
1814 			v_incr_usecount(vp, -1);
1815 			if (VSHOULDFREE(vp))
1816 				vfree(vp);
1817 			else
1818 				vlruvp(vp);
1819 			VI_UNLOCK(vp);
1820 		}
1821 		return (error);
1822 	}
1823 	VI_UNLOCK(vp);
1824 	return (0);
1825 }
1826 
1827 /*
1828  * Increase the reference count of a vnode.
1829  */
1830 void
1831 vref(struct vnode *vp)
1832 {
1833 
1834 	VI_LOCK(vp);
1835 	v_incr_usecount(vp, 1);
1836 	VI_UNLOCK(vp);
1837 }
1838 
1839 /*
1840  * Return reference count of a vnode.
1841  *
1842  * The results of this call are only guaranteed when some mechanism other
1843  * than the VI lock is used to stop other processes from gaining references
1844  * to the vnode.  This may be the case if the caller holds the only reference.
1845  * This is also useful when stale data is acceptable as race conditions may
1846  * be accounted for by some other means.
1847  */
1848 int
1849 vrefcnt(struct vnode *vp)
1850 {
1851 	int usecnt;
1852 
1853 	VI_LOCK(vp);
1854 	usecnt = vp->v_usecount;
1855 	VI_UNLOCK(vp);
1856 
1857 	return (usecnt);
1858 }
1859 
1860 
1861 /*
1862  * Vnode put/release.
1863  * If count drops to zero, call inactive routine and return to freelist.
1864  */
1865 void
1866 vrele(vp)
1867 	struct vnode *vp;
1868 {
1869 	struct thread *td = curthread;	/* XXX */
1870 
1871 	KASSERT(vp != NULL, ("vrele: null vp"));
1872 
1873 	VI_LOCK(vp);
1874 
1875 	/* Skip this v_writecount check if we're going to panic below. */
1876 	KASSERT(vp->v_writecount < vp->v_usecount || vp->v_usecount < 1,
1877 	    ("vrele: missed vn_close"));
1878 
1879 	if (vp->v_usecount > 1 || ((vp->v_iflag & VI_DOINGINACT) &&
1880 	    vp->v_usecount == 1)) {
1881 		v_incr_usecount(vp, -1);
1882 		VI_UNLOCK(vp);
1883 
1884 		return;
1885 	}
1886 
1887 	if (vp->v_usecount == 1) {
1888 		v_incr_usecount(vp, -1);
1889 		/*
1890 		 * We must call VOP_INACTIVE with the node locked. Mark
1891 		 * as VI_DOINGINACT to avoid recursion.
1892 		 */
1893 		if (vn_lock(vp, LK_EXCLUSIVE | LK_INTERLOCK, td) == 0) {
1894 			VI_LOCK(vp);
1895 			KASSERT((vp->v_iflag & VI_DOINGINACT) == 0,
1896 			    ("vrele: recursed on VI_DOINGINACT"));
1897 			vp->v_iflag |= VI_DOINGINACT;
1898 			VI_UNLOCK(vp);
1899 			VOP_INACTIVE(vp, td);
1900 			VI_LOCK(vp);
1901 			KASSERT(vp->v_iflag & VI_DOINGINACT,
1902 			    ("vrele: lost VI_DOINGINACT"));
1903 			vp->v_iflag &= ~VI_DOINGINACT;
1904 		} else
1905 			VI_LOCK(vp);
1906 		if (VSHOULDFREE(vp))
1907 			vfree(vp);
1908 		else
1909 			vlruvp(vp);
1910 		VI_UNLOCK(vp);
1911 
1912 	} else {
1913 #ifdef DIAGNOSTIC
1914 		vprint("vrele: negative ref count", vp);
1915 #endif
1916 		VI_UNLOCK(vp);
1917 		panic("vrele: negative ref cnt");
1918 	}
1919 }
1920 
1921 /*
1922  * Release an already locked vnode.  This give the same effects as
1923  * unlock+vrele(), but takes less time and avoids releasing and
1924  * re-aquiring the lock (as vrele() aquires the lock internally.)
1925  */
1926 void
1927 vput(vp)
1928 	struct vnode *vp;
1929 {
1930 	struct thread *td = curthread;	/* XXX */
1931 
1932 	KASSERT(vp != NULL, ("vput: null vp"));
1933 	VI_LOCK(vp);
1934 	/* Skip this v_writecount check if we're going to panic below. */
1935 	KASSERT(vp->v_writecount < vp->v_usecount || vp->v_usecount < 1,
1936 	    ("vput: missed vn_close"));
1937 
1938 	if (vp->v_usecount > 1 || ((vp->v_iflag & VI_DOINGINACT) &&
1939 	    vp->v_usecount == 1)) {
1940 		v_incr_usecount(vp, -1);
1941 		VOP_UNLOCK(vp, LK_INTERLOCK, td);
1942 		return;
1943 	}
1944 
1945 	if (vp->v_usecount == 1) {
1946 		v_incr_usecount(vp, -1);
1947 		/*
1948 		 * We must call VOP_INACTIVE with the node locked, so
1949 		 * we just need to release the vnode mutex. Mark as
1950 		 * as VI_DOINGINACT to avoid recursion.
1951 		 */
1952 		KASSERT((vp->v_iflag & VI_DOINGINACT) == 0,
1953 		    ("vput: recursed on VI_DOINGINACT"));
1954 		vp->v_iflag |= VI_DOINGINACT;
1955 		VI_UNLOCK(vp);
1956 		VOP_INACTIVE(vp, td);
1957 		VI_LOCK(vp);
1958 		KASSERT(vp->v_iflag & VI_DOINGINACT,
1959 		    ("vput: lost VI_DOINGINACT"));
1960 		vp->v_iflag &= ~VI_DOINGINACT;
1961 		if (VSHOULDFREE(vp))
1962 			vfree(vp);
1963 		else
1964 			vlruvp(vp);
1965 		VI_UNLOCK(vp);
1966 
1967 	} else {
1968 #ifdef DIAGNOSTIC
1969 		vprint("vput: negative ref count", vp);
1970 #endif
1971 		panic("vput: negative ref cnt");
1972 	}
1973 }
1974 
1975 /*
1976  * Somebody doesn't want the vnode recycled.
1977  */
1978 void
1979 vhold(struct vnode *vp)
1980 {
1981 
1982 	VI_LOCK(vp);
1983 	vholdl(vp);
1984 	VI_UNLOCK(vp);
1985 }
1986 
1987 void
1988 vholdl(struct vnode *vp)
1989 {
1990 
1991 	vp->v_holdcnt++;
1992 	if (VSHOULDBUSY(vp))
1993 		vbusy(vp);
1994 }
1995 
1996 /*
1997  * Note that there is one less who cares about this vnode.  vdrop() is the
1998  * opposite of vhold().
1999  */
2000 void
2001 vdrop(struct vnode *vp)
2002 {
2003 
2004 	VI_LOCK(vp);
2005 	vdropl(vp);
2006 	VI_UNLOCK(vp);
2007 }
2008 
2009 void
2010 vdropl(vp)
2011 	struct vnode *vp;
2012 {
2013 
2014 	if (vp->v_holdcnt <= 0)
2015 		panic("vdrop: holdcnt");
2016 	vp->v_holdcnt--;
2017 	if (VSHOULDFREE(vp))
2018 		vfree(vp);
2019 	else
2020 		vlruvp(vp);
2021 }
2022 
2023 /*
2024  * Remove any vnodes in the vnode table belonging to mount point mp.
2025  *
2026  * If FORCECLOSE is not specified, there should not be any active ones,
2027  * return error if any are found (nb: this is a user error, not a
2028  * system error). If FORCECLOSE is specified, detach any active vnodes
2029  * that are found.
2030  *
2031  * If WRITECLOSE is set, only flush out regular file vnodes open for
2032  * writing.
2033  *
2034  * SKIPSYSTEM causes any vnodes marked VV_SYSTEM to be skipped.
2035  *
2036  * `rootrefs' specifies the base reference count for the root vnode
2037  * of this filesystem. The root vnode is considered busy if its
2038  * v_usecount exceeds this value. On a successful return, vflush(, td)
2039  * will call vrele() on the root vnode exactly rootrefs times.
2040  * If the SKIPSYSTEM or WRITECLOSE flags are specified, rootrefs must
2041  * be zero.
2042  */
2043 #ifdef DIAGNOSTIC
2044 static int busyprt = 0;		/* print out busy vnodes */
2045 SYSCTL_INT(_debug, OID_AUTO, busyprt, CTLFLAG_RW, &busyprt, 0, "");
2046 #endif
2047 
2048 int
2049 vflush(mp, rootrefs, flags, td)
2050 	struct mount *mp;
2051 	int rootrefs;
2052 	int flags;
2053 	struct thread *td;
2054 {
2055 	struct vnode *vp, *nvp, *rootvp = NULL;
2056 	struct vattr vattr;
2057 	int busy = 0, error;
2058 
2059 	if (rootrefs > 0) {
2060 		KASSERT((flags & (SKIPSYSTEM | WRITECLOSE)) == 0,
2061 		    ("vflush: bad args"));
2062 		/*
2063 		 * Get the filesystem root vnode. We can vput() it
2064 		 * immediately, since with rootrefs > 0, it won't go away.
2065 		 */
2066 		if ((error = VFS_ROOT(mp, &rootvp, td)) != 0)
2067 			return (error);
2068 		vput(rootvp);
2069 
2070 	}
2071 	MNT_ILOCK(mp);
2072 loop:
2073 	MNT_VNODE_FOREACH(vp, mp, nvp) {
2074 
2075 		VI_LOCK(vp);
2076 		MNT_IUNLOCK(mp);
2077 		error = vn_lock(vp, LK_INTERLOCK | LK_EXCLUSIVE, td);
2078 		if (error) {
2079 			MNT_ILOCK(mp);
2080 			goto loop;
2081 		}
2082 		/*
2083 		 * Skip over a vnodes marked VV_SYSTEM.
2084 		 */
2085 		if ((flags & SKIPSYSTEM) && (vp->v_vflag & VV_SYSTEM)) {
2086 			VOP_UNLOCK(vp, 0, td);
2087 			MNT_ILOCK(mp);
2088 			continue;
2089 		}
2090 		/*
2091 		 * If WRITECLOSE is set, flush out unlinked but still open
2092 		 * files (even if open only for reading) and regular file
2093 		 * vnodes open for writing.
2094 		 */
2095 		if (flags & WRITECLOSE) {
2096 			error = VOP_GETATTR(vp, &vattr, td->td_ucred, td);
2097 			VI_LOCK(vp);
2098 
2099 			if ((vp->v_type == VNON ||
2100 			    (error == 0 && vattr.va_nlink > 0)) &&
2101 			    (vp->v_writecount == 0 || vp->v_type != VREG)) {
2102 				VOP_UNLOCK(vp, LK_INTERLOCK, td);
2103 				MNT_ILOCK(mp);
2104 				continue;
2105 			}
2106 		} else
2107 			VI_LOCK(vp);
2108 
2109 		VOP_UNLOCK(vp, 0, td);
2110 
2111 		/*
2112 		 * With v_usecount == 0, all we need to do is clear out the
2113 		 * vnode data structures and we are done.
2114 		 */
2115 		if (vp->v_usecount == 0) {
2116 			vgonel(vp, td);
2117 			MNT_ILOCK(mp);
2118 			continue;
2119 		}
2120 
2121 		/*
2122 		 * If FORCECLOSE is set, forcibly close the vnode. For block
2123 		 * or character devices, revert to an anonymous device. For
2124 		 * all other files, just kill them.
2125 		 */
2126 		if (flags & FORCECLOSE) {
2127 			KASSERT(vp->v_type != VCHR && vp->v_type != VBLK,
2128 			    ("device VNODE %p is FORCECLOSED", vp));
2129 			vgonel(vp, td);
2130 			MNT_ILOCK(mp);
2131 			continue;
2132 		}
2133 #ifdef DIAGNOSTIC
2134 		if (busyprt)
2135 			vprint("vflush: busy vnode", vp);
2136 #endif
2137 		VI_UNLOCK(vp);
2138 		MNT_ILOCK(mp);
2139 		busy++;
2140 	}
2141 	MNT_IUNLOCK(mp);
2142 	if (rootrefs > 0 && (flags & FORCECLOSE) == 0) {
2143 		/*
2144 		 * If just the root vnode is busy, and if its refcount
2145 		 * is equal to `rootrefs', then go ahead and kill it.
2146 		 */
2147 		VI_LOCK(rootvp);
2148 		KASSERT(busy > 0, ("vflush: not busy"));
2149 		KASSERT(rootvp->v_usecount >= rootrefs,
2150 		    ("vflush: usecount %d < rootrefs %d",
2151 		     rootvp->v_usecount, rootrefs));
2152 		if (busy == 1 && rootvp->v_usecount == rootrefs) {
2153 			vgonel(rootvp, td);
2154 			busy = 0;
2155 		} else
2156 			VI_UNLOCK(rootvp);
2157 	}
2158 	if (busy)
2159 		return (EBUSY);
2160 	for (; rootrefs > 0; rootrefs--)
2161 		vrele(rootvp);
2162 	return (0);
2163 }
2164 
2165 /*
2166  * This moves a now (likely recyclable) vnode to the end of the
2167  * mountlist.  XXX However, it is temporarily disabled until we
2168  * can clean up ffs_sync() and friends, which have loop restart
2169  * conditions which this code causes to operate O(N^2).
2170  */
2171 static void
2172 vlruvp(struct vnode *vp)
2173 {
2174 #if 0
2175 	struct mount *mp;
2176 
2177 	if ((mp = vp->v_mount) != NULL) {
2178 		MNT_ILOCK(mp);
2179 		TAILQ_REMOVE(&mp->mnt_nvnodelist, vp, v_nmntvnodes);
2180 		TAILQ_INSERT_TAIL(&mp->mnt_nvnodelist, vp, v_nmntvnodes);
2181 		MNT_IUNLOCK(mp);
2182 	}
2183 #endif
2184 }
2185 
2186 static void
2187 vx_lock(struct vnode *vp)
2188 {
2189 
2190 	ASSERT_VI_LOCKED(vp, "vx_lock");
2191 
2192 	/*
2193 	 * Prevent the vnode from being recycled or brought into use while we
2194 	 * clean it out.
2195 	 */
2196 	if (vp->v_iflag & VI_XLOCK)
2197 		panic("vx_lock: deadlock");
2198 	vp->v_iflag |= VI_XLOCK;
2199 	vp->v_vxthread = curthread;
2200 }
2201 
2202 static void
2203 vx_unlock(struct vnode *vp)
2204 {
2205 	ASSERT_VI_LOCKED(vp, "vx_unlock");
2206 	vp->v_iflag &= ~VI_XLOCK;
2207 	vp->v_vxthread = NULL;
2208 	if (vp->v_iflag & VI_XWANT) {
2209 		vp->v_iflag &= ~VI_XWANT;
2210 		wakeup(vp);
2211 	}
2212 }
2213 /*
2214  * Recycle an unused vnode to the front of the free list.
2215  * Release the passed interlock if the vnode will be recycled.
2216  */
2217 int
2218 vrecycle(struct vnode *vp, struct thread *td)
2219 {
2220 
2221 	VI_LOCK(vp);
2222 	if (vp->v_usecount == 0) {
2223 		vgonel(vp, td);
2224 		return (1);
2225 	}
2226 	VI_UNLOCK(vp);
2227 	return (0);
2228 }
2229 
2230 /*
2231  * Eliminate all activity associated with a vnode
2232  * in preparation for reuse.
2233  */
2234 void
2235 vgone(struct vnode *vp)
2236 {
2237 	struct thread *td = curthread;	/* XXX */
2238 
2239 	VI_LOCK(vp);
2240 	vgonel(vp, td);
2241 }
2242 
2243 /*
2244  * vgone, with the vp interlock held.
2245  */
2246 void
2247 vgonel(struct vnode *vp, struct thread *td)
2248 {
2249 	int active;
2250 
2251 	/*
2252 	 * If a vgone (or vclean) is already in progress,
2253 	 * wait until it is done and return.
2254 	 */
2255 	ASSERT_VI_LOCKED(vp, "vgonel");
2256 	if (vp->v_iflag & VI_XLOCK) {
2257 		vp->v_iflag |= VI_XWANT;
2258 		msleep(vp, VI_MTX(vp), PINOD | PDROP, "vgone", 0);
2259 		return;
2260 	}
2261 	vx_lock(vp);
2262 
2263 	/*
2264 	 * Check to see if the vnode is in use. If so we have to reference it
2265 	 * before we clean it out so that its count cannot fall to zero and
2266 	 * generate a race against ourselves to recycle it.
2267 	 */
2268 	if ((active = vp->v_usecount))
2269 		v_incr_usecount(vp, 1);
2270 
2271 	/*
2272 	 * Even if the count is zero, the VOP_INACTIVE routine may still
2273 	 * have the object locked while it cleans it out. The VOP_LOCK
2274 	 * ensures that the VOP_INACTIVE routine is done with its work.
2275 	 * For active vnodes, it ensures that no other activity can
2276 	 * occur while the underlying object is being cleaned out.
2277 	 */
2278 	VOP_LOCK(vp, LK_DRAIN | LK_INTERLOCK, td);
2279 
2280 	/*
2281 	 * Clean out any buffers associated with the vnode.
2282 	 * If the flush fails, just toss the buffers.
2283 	 */
2284 	if (!TAILQ_EMPTY(&vp->v_bufobj.bo_dirty.bv_hd));
2285 		(void) vn_write_suspend_wait(vp, NULL, V_WAIT);
2286 	if (vinvalbuf(vp, V_SAVE, td, 0, 0) != 0)
2287 		vinvalbuf(vp, 0, td, 0, 0);
2288 
2289 	/*
2290 	 * Any other processes trying to obtain this lock must first
2291 	 * wait for VXLOCK to clear, then call the new lock operation.
2292 	 */
2293 	VOP_UNLOCK(vp, 0, td);
2294 
2295 	/*
2296 	 * If purging an active vnode, it must be closed and
2297 	 * deactivated before being reclaimed. Note that the
2298 	 * VOP_INACTIVE will unlock the vnode.
2299 	 */
2300 	if (active) {
2301 		VOP_CLOSE(vp, FNONBLOCK, NOCRED, td);
2302 		VI_LOCK(vp);
2303 		if ((vp->v_iflag & VI_DOINGINACT) == 0) {
2304 			KASSERT((vp->v_iflag & VI_DOINGINACT) == 0,
2305 			    ("vclean: recursed on VI_DOINGINACT"));
2306 			vp->v_iflag |= VI_DOINGINACT;
2307 			VI_UNLOCK(vp);
2308 			if (vn_lock(vp, LK_EXCLUSIVE | LK_NOWAIT, td) != 0)
2309 				panic("vclean: cannot relock.");
2310 			VOP_INACTIVE(vp, td);
2311 			VI_LOCK(vp);
2312 			KASSERT(vp->v_iflag & VI_DOINGINACT,
2313 			    ("vclean: lost VI_DOINGINACT"));
2314 			vp->v_iflag &= ~VI_DOINGINACT;
2315 		}
2316 		VI_UNLOCK(vp);
2317 	}
2318 	/*
2319 	 * Reclaim the vnode.
2320 	 */
2321 	if (VOP_RECLAIM(vp, td))
2322 		panic("vclean: cannot reclaim");
2323 
2324 	KASSERT(vp->v_object == NULL,
2325 	    ("vop_reclaim left v_object vp=%p, tag=%s", vp, vp->v_tag));
2326 
2327 	if (active) {
2328 		/*
2329 		 * Inline copy of vrele() since VOP_INACTIVE
2330 		 * has already been called.
2331 		 */
2332 		VI_LOCK(vp);
2333 		v_incr_usecount(vp, -1);
2334 		if (vp->v_usecount <= 0) {
2335 #ifdef INVARIANTS
2336 			if (vp->v_usecount < 0 || vp->v_writecount != 0) {
2337 				vprint("vclean: bad ref count", vp);
2338 				panic("vclean: ref cnt");
2339 			}
2340 #endif
2341 			if (VSHOULDFREE(vp))
2342 				vfree(vp);
2343 		}
2344 		VI_UNLOCK(vp);
2345 	}
2346 	/*
2347 	 * Delete from old mount point vnode list.
2348 	 */
2349 	delmntque(vp);
2350 	cache_purge(vp);
2351 	VI_LOCK(vp);
2352 	if (VSHOULDFREE(vp))
2353 		vfree(vp);
2354 
2355 	/*
2356 	 * Done with purge, reset to the standard lock and
2357 	 * notify sleepers of the grim news.
2358 	 */
2359 	vp->v_vnlock = &vp->v_lock;
2360 	vp->v_op = &dead_vnodeops;
2361 	if (vp->v_pollinfo != NULL)
2362 		vn_pollgone(vp);
2363 	vp->v_tag = "none";
2364 
2365 	VI_UNLOCK(vp);
2366 
2367 	/*
2368 	 * If special device, remove it from special device alias list
2369 	 * if it is on one.
2370 	 */
2371 	VI_LOCK(vp);
2372 	if (vp->v_type == VCHR && vp->v_rdev != NULL)
2373 		dev_rel(vp);
2374 
2375 	/*
2376 	 * If it is on the freelist and not already at the head,
2377 	 * move it to the head of the list. The test of the
2378 	 * VDOOMED flag and the reference count of zero is because
2379 	 * it will be removed from the free list by getnewvnode,
2380 	 * but will not have its reference count incremented until
2381 	 * after calling vgone. If the reference count were
2382 	 * incremented first, vgone would (incorrectly) try to
2383 	 * close the previous instance of the underlying object.
2384 	 */
2385 	if (vp->v_usecount == 0 && !(vp->v_iflag & VI_DOOMED)) {
2386 		mtx_lock(&vnode_free_list_mtx);
2387 		if (vp->v_iflag & VI_FREE) {
2388 			TAILQ_REMOVE(&vnode_free_list, vp, v_freelist);
2389 		} else {
2390 			vp->v_iflag |= VI_FREE;
2391 			freevnodes++;
2392 		}
2393 		TAILQ_INSERT_HEAD(&vnode_free_list, vp, v_freelist);
2394 		mtx_unlock(&vnode_free_list_mtx);
2395 	}
2396 
2397 	vp->v_type = VBAD;
2398 	vx_unlock(vp);
2399 	VI_UNLOCK(vp);
2400 }
2401 
2402 /*
2403  * Lookup a vnode by device number.
2404  */
2405 int
2406 vfinddev(dev, vpp)
2407 	struct cdev *dev;
2408 	struct vnode **vpp;
2409 {
2410 	struct vnode *vp;
2411 
2412 	dev_lock();
2413 	SLIST_FOREACH(vp, &dev->si_hlist, v_specnext) {
2414 		*vpp = vp;
2415 		dev_unlock();
2416 		return (1);
2417 	}
2418 	dev_unlock();
2419 	return (0);
2420 }
2421 
2422 /*
2423  * Calculate the total number of references to a special device.
2424  */
2425 int
2426 vcount(vp)
2427 	struct vnode *vp;
2428 {
2429 	int count;
2430 
2431 	dev_lock();
2432 	count = vp->v_rdev->si_usecount;
2433 	dev_unlock();
2434 	return (count);
2435 }
2436 
2437 /*
2438  * Same as above, but using the struct cdev *as argument
2439  */
2440 int
2441 count_dev(dev)
2442 	struct cdev *dev;
2443 {
2444 	int count;
2445 
2446 	dev_lock();
2447 	count = dev->si_usecount;
2448 	dev_unlock();
2449 	return(count);
2450 }
2451 
2452 /*
2453  * Print out a description of a vnode.
2454  */
2455 static char *typename[] =
2456 {"VNON", "VREG", "VDIR", "VBLK", "VCHR", "VLNK", "VSOCK", "VFIFO", "VBAD"};
2457 
2458 void
2459 vn_printf(struct vnode *vp, const char *fmt, ...)
2460 {
2461 	va_list ap;
2462 	char buf[96];
2463 
2464 	va_start(ap, fmt);
2465 	vprintf(fmt, ap);
2466 	va_end(ap);
2467 	printf("%p: ", (void *)vp);
2468 	printf("tag %s, type %s\n", vp->v_tag, typename[vp->v_type]);
2469 	printf("    usecount %d, writecount %d, refcount %d mountedhere %p\n",
2470 	    vp->v_usecount, vp->v_writecount, vp->v_holdcnt, vp->v_mountedhere);
2471 	buf[0] = '\0';
2472 	buf[1] = '\0';
2473 	if (vp->v_vflag & VV_ROOT)
2474 		strcat(buf, "|VV_ROOT");
2475 	if (vp->v_vflag & VV_TEXT)
2476 		strcat(buf, "|VV_TEXT");
2477 	if (vp->v_vflag & VV_SYSTEM)
2478 		strcat(buf, "|VV_SYSTEM");
2479 	if (vp->v_iflag & VI_XLOCK)
2480 		strcat(buf, "|VI_XLOCK");
2481 	if (vp->v_iflag & VI_XWANT)
2482 		strcat(buf, "|VI_XWANT");
2483 	if (vp->v_iflag & VI_DOOMED)
2484 		strcat(buf, "|VI_DOOMED");
2485 	if (vp->v_iflag & VI_FREE)
2486 		strcat(buf, "|VI_FREE");
2487 	printf("    flags (%s)\n", buf + 1);
2488 	if (mtx_owned(VI_MTX(vp)))
2489 		printf(" VI_LOCKed");
2490 	if (vp->v_object != NULL);
2491 		printf("    v_object %p\n", vp->v_object);
2492 	printf("    ");
2493 	lockmgr_printinfo(vp->v_vnlock);
2494 	printf("\n");
2495 	if (vp->v_data != NULL)
2496 		VOP_PRINT(vp);
2497 }
2498 
2499 #ifdef DDB
2500 #include <ddb/ddb.h>
2501 /*
2502  * List all of the locked vnodes in the system.
2503  * Called when debugging the kernel.
2504  */
2505 DB_SHOW_COMMAND(lockedvnods, lockedvnodes)
2506 {
2507 	struct mount *mp, *nmp;
2508 	struct vnode *vp;
2509 
2510 	/*
2511 	 * Note: because this is DDB, we can't obey the locking semantics
2512 	 * for these structures, which means we could catch an inconsistent
2513 	 * state and dereference a nasty pointer.  Not much to be done
2514 	 * about that.
2515 	 */
2516 	printf("Locked vnodes\n");
2517 	for (mp = TAILQ_FIRST(&mountlist); mp != NULL; mp = nmp) {
2518 		nmp = TAILQ_NEXT(mp, mnt_list);
2519 		TAILQ_FOREACH(vp, &mp->mnt_nvnodelist, v_nmntvnodes) {
2520 			if (VOP_ISLOCKED(vp, NULL))
2521 				vprint(NULL, vp);
2522 		}
2523 		nmp = TAILQ_NEXT(mp, mnt_list);
2524 	}
2525 }
2526 #endif
2527 
2528 /*
2529  * Fill in a struct xvfsconf based on a struct vfsconf.
2530  */
2531 static void
2532 vfsconf2x(struct vfsconf *vfsp, struct xvfsconf *xvfsp)
2533 {
2534 
2535 	strcpy(xvfsp->vfc_name, vfsp->vfc_name);
2536 	xvfsp->vfc_typenum = vfsp->vfc_typenum;
2537 	xvfsp->vfc_refcount = vfsp->vfc_refcount;
2538 	xvfsp->vfc_flags = vfsp->vfc_flags;
2539 	/*
2540 	 * These are unused in userland, we keep them
2541 	 * to not break binary compatibility.
2542 	 */
2543 	xvfsp->vfc_vfsops = NULL;
2544 	xvfsp->vfc_next = NULL;
2545 }
2546 
2547 /*
2548  * Top level filesystem related information gathering.
2549  */
2550 static int
2551 sysctl_vfs_conflist(SYSCTL_HANDLER_ARGS)
2552 {
2553 	struct vfsconf *vfsp;
2554 	struct xvfsconf xvfsp;
2555 	int error;
2556 
2557 	error = 0;
2558 	TAILQ_FOREACH(vfsp, &vfsconf, vfc_list) {
2559 		vfsconf2x(vfsp, &xvfsp);
2560 		error = SYSCTL_OUT(req, &xvfsp, sizeof xvfsp);
2561 		if (error)
2562 			break;
2563 	}
2564 	return (error);
2565 }
2566 
2567 SYSCTL_PROC(_vfs, OID_AUTO, conflist, CTLFLAG_RD, NULL, 0, sysctl_vfs_conflist,
2568     "S,xvfsconf", "List of all configured filesystems");
2569 
2570 #ifndef BURN_BRIDGES
2571 static int	sysctl_ovfs_conf(SYSCTL_HANDLER_ARGS);
2572 
2573 static int
2574 vfs_sysctl(SYSCTL_HANDLER_ARGS)
2575 {
2576 	int *name = (int *)arg1 - 1;	/* XXX */
2577 	u_int namelen = arg2 + 1;	/* XXX */
2578 	struct vfsconf *vfsp;
2579 	struct xvfsconf xvfsp;
2580 
2581 	printf("WARNING: userland calling deprecated sysctl, "
2582 	    "please rebuild world\n");
2583 
2584 #if 1 || defined(COMPAT_PRELITE2)
2585 	/* Resolve ambiguity between VFS_VFSCONF and VFS_GENERIC. */
2586 	if (namelen == 1)
2587 		return (sysctl_ovfs_conf(oidp, arg1, arg2, req));
2588 #endif
2589 
2590 	switch (name[1]) {
2591 	case VFS_MAXTYPENUM:
2592 		if (namelen != 2)
2593 			return (ENOTDIR);
2594 		return (SYSCTL_OUT(req, &maxvfsconf, sizeof(int)));
2595 	case VFS_CONF:
2596 		if (namelen != 3)
2597 			return (ENOTDIR);	/* overloaded */
2598 		TAILQ_FOREACH(vfsp, &vfsconf, vfc_list)
2599 			if (vfsp->vfc_typenum == name[2])
2600 				break;
2601 		if (vfsp == NULL)
2602 			return (EOPNOTSUPP);
2603 		vfsconf2x(vfsp, &xvfsp);
2604 		return (SYSCTL_OUT(req, &xvfsp, sizeof(xvfsp)));
2605 	}
2606 	return (EOPNOTSUPP);
2607 }
2608 
2609 SYSCTL_NODE(_vfs, VFS_GENERIC, generic, CTLFLAG_RD | CTLFLAG_SKIP, vfs_sysctl,
2610 	"Generic filesystem");
2611 
2612 #if 1 || defined(COMPAT_PRELITE2)
2613 
2614 static int
2615 sysctl_ovfs_conf(SYSCTL_HANDLER_ARGS)
2616 {
2617 	int error;
2618 	struct vfsconf *vfsp;
2619 	struct ovfsconf ovfs;
2620 
2621 	TAILQ_FOREACH(vfsp, &vfsconf, vfc_list) {
2622 		ovfs.vfc_vfsops = vfsp->vfc_vfsops;	/* XXX used as flag */
2623 		strcpy(ovfs.vfc_name, vfsp->vfc_name);
2624 		ovfs.vfc_index = vfsp->vfc_typenum;
2625 		ovfs.vfc_refcount = vfsp->vfc_refcount;
2626 		ovfs.vfc_flags = vfsp->vfc_flags;
2627 		error = SYSCTL_OUT(req, &ovfs, sizeof ovfs);
2628 		if (error)
2629 			return error;
2630 	}
2631 	return 0;
2632 }
2633 
2634 #endif /* 1 || COMPAT_PRELITE2 */
2635 #endif /* !BURN_BRIDGES */
2636 
2637 #define KINFO_VNODESLOP		10
2638 #ifdef notyet
2639 /*
2640  * Dump vnode list (via sysctl).
2641  */
2642 /* ARGSUSED */
2643 static int
2644 sysctl_vnode(SYSCTL_HANDLER_ARGS)
2645 {
2646 	struct xvnode *xvn;
2647 	struct thread *td = req->td;
2648 	struct mount *mp;
2649 	struct vnode *vp;
2650 	int error, len, n;
2651 
2652 	/*
2653 	 * Stale numvnodes access is not fatal here.
2654 	 */
2655 	req->lock = 0;
2656 	len = (numvnodes + KINFO_VNODESLOP) * sizeof *xvn;
2657 	if (!req->oldptr)
2658 		/* Make an estimate */
2659 		return (SYSCTL_OUT(req, 0, len));
2660 
2661 	error = sysctl_wire_old_buffer(req, 0);
2662 	if (error != 0)
2663 		return (error);
2664 	xvn = malloc(len, M_TEMP, M_ZERO | M_WAITOK);
2665 	n = 0;
2666 	mtx_lock(&mountlist_mtx);
2667 	TAILQ_FOREACH(mp, &mountlist, mnt_list) {
2668 		if (vfs_busy(mp, LK_NOWAIT, &mountlist_mtx, td))
2669 			continue;
2670 		MNT_ILOCK(mp);
2671 		TAILQ_FOREACH(vp, &mp->mnt_nvnodelist, v_nmntvnodes) {
2672 			if (n == len)
2673 				break;
2674 			vref(vp);
2675 			xvn[n].xv_size = sizeof *xvn;
2676 			xvn[n].xv_vnode = vp;
2677 #define XV_COPY(field) xvn[n].xv_##field = vp->v_##field
2678 			XV_COPY(usecount);
2679 			XV_COPY(writecount);
2680 			XV_COPY(holdcnt);
2681 			XV_COPY(id);
2682 			XV_COPY(mount);
2683 			XV_COPY(numoutput);
2684 			XV_COPY(type);
2685 #undef XV_COPY
2686 			xvn[n].xv_flag = vp->v_vflag;
2687 
2688 			switch (vp->v_type) {
2689 			case VREG:
2690 			case VDIR:
2691 			case VLNK:
2692 				break;
2693 			case VBLK:
2694 			case VCHR:
2695 				if (vp->v_rdev == NULL) {
2696 					vrele(vp);
2697 					continue;
2698 				}
2699 				xvn[n].xv_dev = dev2udev(vp->v_rdev);
2700 				break;
2701 			case VSOCK:
2702 				xvn[n].xv_socket = vp->v_socket;
2703 				break;
2704 			case VFIFO:
2705 				xvn[n].xv_fifo = vp->v_fifoinfo;
2706 				break;
2707 			case VNON:
2708 			case VBAD:
2709 			default:
2710 				/* shouldn't happen? */
2711 				vrele(vp);
2712 				continue;
2713 			}
2714 			vrele(vp);
2715 			++n;
2716 		}
2717 		MNT_IUNLOCK(mp);
2718 		mtx_lock(&mountlist_mtx);
2719 		vfs_unbusy(mp, td);
2720 		if (n == len)
2721 			break;
2722 	}
2723 	mtx_unlock(&mountlist_mtx);
2724 
2725 	error = SYSCTL_OUT(req, xvn, n * sizeof *xvn);
2726 	free(xvn, M_TEMP);
2727 	return (error);
2728 }
2729 
2730 SYSCTL_PROC(_kern, KERN_VNODE, vnode, CTLTYPE_OPAQUE|CTLFLAG_RD,
2731 	0, 0, sysctl_vnode, "S,xvnode", "");
2732 #endif
2733 
2734 /*
2735  * Unmount all filesystems. The list is traversed in reverse order
2736  * of mounting to avoid dependencies.
2737  */
2738 void
2739 vfs_unmountall()
2740 {
2741 	struct mount *mp;
2742 	struct thread *td;
2743 	int error;
2744 
2745 	if (curthread != NULL)
2746 		td = curthread;
2747 	else
2748 		td = FIRST_THREAD_IN_PROC(initproc); /* XXX XXX proc0? */
2749 	/*
2750 	 * Since this only runs when rebooting, it is not interlocked.
2751 	 */
2752 	while(!TAILQ_EMPTY(&mountlist)) {
2753 		mp = TAILQ_LAST(&mountlist, mntlist);
2754 		error = dounmount(mp, MNT_FORCE, td);
2755 		if (error) {
2756 			TAILQ_REMOVE(&mountlist, mp, mnt_list);
2757 			printf("unmount of %s failed (",
2758 			    mp->mnt_stat.f_mntonname);
2759 			if (error == EBUSY)
2760 				printf("BUSY)\n");
2761 			else
2762 				printf("%d)\n", error);
2763 		} else {
2764 			/* The unmount has removed mp from the mountlist */
2765 		}
2766 	}
2767 }
2768 
2769 /*
2770  * perform msync on all vnodes under a mount point
2771  * the mount point must be locked.
2772  */
2773 void
2774 vfs_msync(struct mount *mp, int flags)
2775 {
2776 	struct vnode *vp, *nvp;
2777 	struct vm_object *obj;
2778 	int tries;
2779 
2780 	tries = 5;
2781 	MNT_ILOCK(mp);
2782 loop:
2783 	TAILQ_FOREACH_SAFE(vp, &mp->mnt_nvnodelist, v_nmntvnodes, nvp) {
2784 		if (vp->v_mount != mp) {
2785 			if (--tries > 0)
2786 				goto loop;
2787 			break;
2788 		}
2789 
2790 		VI_LOCK(vp);
2791 		if (vp->v_iflag & VI_XLOCK) {
2792 			VI_UNLOCK(vp);
2793 			continue;
2794 		}
2795 
2796 		if ((vp->v_iflag & VI_OBJDIRTY) &&
2797 		    (flags == MNT_WAIT || VOP_ISLOCKED(vp, NULL) == 0)) {
2798 			MNT_IUNLOCK(mp);
2799 			if (!vget(vp,
2800 			    LK_EXCLUSIVE | LK_RETRY | LK_INTERLOCK,
2801 			    curthread)) {
2802 				if (vp->v_vflag & VV_NOSYNC) {	/* unlinked */
2803 					vput(vp);
2804 					MNT_ILOCK(mp);
2805 					continue;
2806 				}
2807 
2808 				obj = vp->v_object;
2809 				if (obj != NULL) {
2810 					VM_OBJECT_LOCK(obj);
2811 					vm_object_page_clean(obj, 0, 0,
2812 					    flags == MNT_WAIT ?
2813 					    OBJPC_SYNC : OBJPC_NOSYNC);
2814 					VM_OBJECT_UNLOCK(obj);
2815 				}
2816 				vput(vp);
2817 			}
2818 			MNT_ILOCK(mp);
2819 			if (TAILQ_NEXT(vp, v_nmntvnodes) != nvp) {
2820 				if (--tries > 0)
2821 					goto loop;
2822 				break;
2823 			}
2824 		} else
2825 			VI_UNLOCK(vp);
2826 	}
2827 	MNT_IUNLOCK(mp);
2828 }
2829 
2830 /*
2831  * Mark a vnode as free, putting it up for recycling.
2832  */
2833 void
2834 vfree(struct vnode *vp)
2835 {
2836 
2837 	ASSERT_VI_LOCKED(vp, "vfree");
2838 	mtx_lock(&vnode_free_list_mtx);
2839 	KASSERT((vp->v_iflag & VI_FREE) == 0, ("vnode already free"));
2840 	if (vp->v_iflag & VI_AGE) {
2841 		TAILQ_INSERT_HEAD(&vnode_free_list, vp, v_freelist);
2842 	} else {
2843 		TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist);
2844 	}
2845 	freevnodes++;
2846 	mtx_unlock(&vnode_free_list_mtx);
2847 	vp->v_iflag &= ~VI_AGE;
2848 	vp->v_iflag |= VI_FREE;
2849 }
2850 
2851 /*
2852  * Opposite of vfree() - mark a vnode as in use.
2853  */
2854 void
2855 vbusy(struct vnode *vp)
2856 {
2857 
2858 	ASSERT_VI_LOCKED(vp, "vbusy");
2859 	KASSERT((vp->v_iflag & VI_FREE) != 0, ("vnode not free"));
2860 
2861 	mtx_lock(&vnode_free_list_mtx);
2862 	TAILQ_REMOVE(&vnode_free_list, vp, v_freelist);
2863 	freevnodes--;
2864 	mtx_unlock(&vnode_free_list_mtx);
2865 
2866 	vp->v_iflag &= ~(VI_FREE|VI_AGE);
2867 }
2868 
2869 /*
2870  * Initalize per-vnode helper structure to hold poll-related state.
2871  */
2872 void
2873 v_addpollinfo(struct vnode *vp)
2874 {
2875 	struct vpollinfo *vi;
2876 
2877 	vi = uma_zalloc(vnodepoll_zone, M_WAITOK);
2878 	if (vp->v_pollinfo != NULL) {
2879 		uma_zfree(vnodepoll_zone, vi);
2880 		return;
2881 	}
2882 	vp->v_pollinfo = vi;
2883 	mtx_init(&vp->v_pollinfo->vpi_lock, "vnode pollinfo", NULL, MTX_DEF);
2884 	knlist_init(&vp->v_pollinfo->vpi_selinfo.si_note,
2885 	    &vp->v_pollinfo->vpi_lock);
2886 }
2887 
2888 /*
2889  * Record a process's interest in events which might happen to
2890  * a vnode.  Because poll uses the historic select-style interface
2891  * internally, this routine serves as both the ``check for any
2892  * pending events'' and the ``record my interest in future events''
2893  * functions.  (These are done together, while the lock is held,
2894  * to avoid race conditions.)
2895  */
2896 int
2897 vn_pollrecord(vp, td, events)
2898 	struct vnode *vp;
2899 	struct thread *td;
2900 	short events;
2901 {
2902 
2903 	if (vp->v_pollinfo == NULL)
2904 		v_addpollinfo(vp);
2905 	mtx_lock(&vp->v_pollinfo->vpi_lock);
2906 	if (vp->v_pollinfo->vpi_revents & events) {
2907 		/*
2908 		 * This leaves events we are not interested
2909 		 * in available for the other process which
2910 		 * which presumably had requested them
2911 		 * (otherwise they would never have been
2912 		 * recorded).
2913 		 */
2914 		events &= vp->v_pollinfo->vpi_revents;
2915 		vp->v_pollinfo->vpi_revents &= ~events;
2916 
2917 		mtx_unlock(&vp->v_pollinfo->vpi_lock);
2918 		return events;
2919 	}
2920 	vp->v_pollinfo->vpi_events |= events;
2921 	selrecord(td, &vp->v_pollinfo->vpi_selinfo);
2922 	mtx_unlock(&vp->v_pollinfo->vpi_lock);
2923 	return 0;
2924 }
2925 
2926 /*
2927  * Note the occurrence of an event.  If the VN_POLLEVENT macro is used,
2928  * it is possible for us to miss an event due to race conditions, but
2929  * that condition is expected to be rare, so for the moment it is the
2930  * preferred interface.
2931  */
2932 void
2933 vn_pollevent(vp, events)
2934 	struct vnode *vp;
2935 	short events;
2936 {
2937 
2938 	if (vp->v_pollinfo == NULL)
2939 		v_addpollinfo(vp);
2940 	mtx_lock(&vp->v_pollinfo->vpi_lock);
2941 	if (vp->v_pollinfo->vpi_events & events) {
2942 		/*
2943 		 * We clear vpi_events so that we don't
2944 		 * call selwakeup() twice if two events are
2945 		 * posted before the polling process(es) is
2946 		 * awakened.  This also ensures that we take at
2947 		 * most one selwakeup() if the polling process
2948 		 * is no longer interested.  However, it does
2949 		 * mean that only one event can be noticed at
2950 		 * a time.  (Perhaps we should only clear those
2951 		 * event bits which we note?) XXX
2952 		 */
2953 		vp->v_pollinfo->vpi_events = 0;	/* &= ~events ??? */
2954 		vp->v_pollinfo->vpi_revents |= events;
2955 		selwakeuppri(&vp->v_pollinfo->vpi_selinfo, PRIBIO);
2956 	}
2957 	mtx_unlock(&vp->v_pollinfo->vpi_lock);
2958 }
2959 
2960 /*
2961  * Wake up anyone polling on vp because it is being revoked.
2962  * This depends on dead_poll() returning POLLHUP for correct
2963  * behavior.
2964  */
2965 void
2966 vn_pollgone(vp)
2967 	struct vnode *vp;
2968 {
2969 
2970 	mtx_lock(&vp->v_pollinfo->vpi_lock);
2971 	VN_KNOTE_LOCKED(vp, NOTE_REVOKE);
2972 	if (vp->v_pollinfo->vpi_events) {
2973 		vp->v_pollinfo->vpi_events = 0;
2974 		selwakeuppri(&vp->v_pollinfo->vpi_selinfo, PRIBIO);
2975 	}
2976 	mtx_unlock(&vp->v_pollinfo->vpi_lock);
2977 }
2978 
2979 
2980 
2981 /*
2982  * Routine to create and manage a filesystem syncer vnode.
2983  */
2984 #define sync_close ((int (*)(struct  vop_close_args *))nullop)
2985 static int	sync_fsync(struct  vop_fsync_args *);
2986 static int	sync_inactive(struct  vop_inactive_args *);
2987 static int	sync_reclaim(struct  vop_reclaim_args *);
2988 
2989 static struct vop_vector sync_vnodeops = {
2990 	.vop_bypass =	VOP_EOPNOTSUPP,
2991 	.vop_close =	sync_close,		/* close */
2992 	.vop_fsync =	sync_fsync,		/* fsync */
2993 	.vop_inactive =	sync_inactive,	/* inactive */
2994 	.vop_reclaim =	sync_reclaim,	/* reclaim */
2995 	.vop_lock =	vop_stdlock,	/* lock */
2996 	.vop_unlock =	vop_stdunlock,	/* unlock */
2997 	.vop_islocked =	vop_stdislocked,	/* islocked */
2998 };
2999 
3000 /*
3001  * Create a new filesystem syncer vnode for the specified mount point.
3002  */
3003 int
3004 vfs_allocate_syncvnode(mp)
3005 	struct mount *mp;
3006 {
3007 	struct vnode *vp;
3008 	static long start, incr, next;
3009 	int error;
3010 
3011 	/* Allocate a new vnode */
3012 	if ((error = getnewvnode("syncer", mp, &sync_vnodeops, &vp)) != 0) {
3013 		mp->mnt_syncer = NULL;
3014 		return (error);
3015 	}
3016 	vp->v_type = VNON;
3017 	/*
3018 	 * Place the vnode onto the syncer worklist. We attempt to
3019 	 * scatter them about on the list so that they will go off
3020 	 * at evenly distributed times even if all the filesystems
3021 	 * are mounted at once.
3022 	 */
3023 	next += incr;
3024 	if (next == 0 || next > syncer_maxdelay) {
3025 		start /= 2;
3026 		incr /= 2;
3027 		if (start == 0) {
3028 			start = syncer_maxdelay / 2;
3029 			incr = syncer_maxdelay;
3030 		}
3031 		next = start;
3032 	}
3033 	VI_LOCK(vp);
3034 	vn_syncer_add_to_worklist(&vp->v_bufobj,
3035 	    syncdelay > 0 ? next % syncdelay : 0);
3036 	/* XXX - vn_syncer_add_to_worklist() also grabs and drops sync_mtx. */
3037 	mtx_lock(&sync_mtx);
3038 	sync_vnode_count++;
3039 	mtx_unlock(&sync_mtx);
3040 	VI_UNLOCK(vp);
3041 	mp->mnt_syncer = vp;
3042 	return (0);
3043 }
3044 
3045 /*
3046  * Do a lazy sync of the filesystem.
3047  */
3048 static int
3049 sync_fsync(ap)
3050 	struct vop_fsync_args /* {
3051 		struct vnode *a_vp;
3052 		struct ucred *a_cred;
3053 		int a_waitfor;
3054 		struct thread *a_td;
3055 	} */ *ap;
3056 {
3057 	struct vnode *syncvp = ap->a_vp;
3058 	struct mount *mp = syncvp->v_mount;
3059 	struct thread *td = ap->a_td;
3060 	int error, asyncflag;
3061 	struct bufobj *bo;
3062 
3063 	/*
3064 	 * We only need to do something if this is a lazy evaluation.
3065 	 */
3066 	if (ap->a_waitfor != MNT_LAZY)
3067 		return (0);
3068 
3069 	/*
3070 	 * Move ourselves to the back of the sync list.
3071 	 */
3072 	bo = &syncvp->v_bufobj;
3073 	BO_LOCK(bo);
3074 	vn_syncer_add_to_worklist(bo, syncdelay);
3075 	BO_UNLOCK(bo);
3076 
3077 	/*
3078 	 * Walk the list of vnodes pushing all that are dirty and
3079 	 * not already on the sync list.
3080 	 */
3081 	mtx_lock(&mountlist_mtx);
3082 	if (vfs_busy(mp, LK_EXCLUSIVE | LK_NOWAIT, &mountlist_mtx, td) != 0) {
3083 		mtx_unlock(&mountlist_mtx);
3084 		return (0);
3085 	}
3086 	if (vn_start_write(NULL, &mp, V_NOWAIT) != 0) {
3087 		vfs_unbusy(mp, td);
3088 		return (0);
3089 	}
3090 	asyncflag = mp->mnt_flag & MNT_ASYNC;
3091 	mp->mnt_flag &= ~MNT_ASYNC;
3092 	vfs_msync(mp, MNT_NOWAIT);
3093 	error = VFS_SYNC(mp, MNT_LAZY, td);
3094 	if (asyncflag)
3095 		mp->mnt_flag |= MNT_ASYNC;
3096 	vn_finished_write(mp);
3097 	vfs_unbusy(mp, td);
3098 	return (error);
3099 }
3100 
3101 /*
3102  * The syncer vnode is no referenced.
3103  */
3104 static int
3105 sync_inactive(ap)
3106 	struct vop_inactive_args /* {
3107 		struct vnode *a_vp;
3108 		struct thread *a_td;
3109 	} */ *ap;
3110 {
3111 
3112 	VOP_UNLOCK(ap->a_vp, 0, ap->a_td);
3113 	vgone(ap->a_vp);
3114 	return (0);
3115 }
3116 
3117 /*
3118  * The syncer vnode is no longer needed and is being decommissioned.
3119  *
3120  * Modifications to the worklist must be protected by sync_mtx.
3121  */
3122 static int
3123 sync_reclaim(ap)
3124 	struct vop_reclaim_args /* {
3125 		struct vnode *a_vp;
3126 	} */ *ap;
3127 {
3128 	struct vnode *vp = ap->a_vp;
3129 	struct bufobj *bo;
3130 
3131 	VI_LOCK(vp);
3132 	bo = &vp->v_bufobj;
3133 	vp->v_mount->mnt_syncer = NULL;
3134 	if (bo->bo_flag & BO_ONWORKLST) {
3135 		mtx_lock(&sync_mtx);
3136 		LIST_REMOVE(bo, bo_synclist);
3137  		syncer_worklist_len--;
3138 		sync_vnode_count--;
3139 		mtx_unlock(&sync_mtx);
3140 		bo->bo_flag &= ~BO_ONWORKLST;
3141 	}
3142 	VI_UNLOCK(vp);
3143 
3144 	return (0);
3145 }
3146 
3147 /*
3148  * Check if vnode represents a disk device
3149  */
3150 int
3151 vn_isdisk(vp, errp)
3152 	struct vnode *vp;
3153 	int *errp;
3154 {
3155 	int error;
3156 
3157 	error = 0;
3158 	dev_lock();
3159 	if (vp->v_type != VCHR)
3160 		error = ENOTBLK;
3161 	else if (vp->v_rdev == NULL)
3162 		error = ENXIO;
3163 	else if (vp->v_rdev->si_devsw == NULL)
3164 		error = ENXIO;
3165 	else if (!(vp->v_rdev->si_devsw->d_flags & D_DISK))
3166 		error = ENOTBLK;
3167 	dev_unlock();
3168 	if (errp != NULL)
3169 		*errp = error;
3170 	return (error == 0);
3171 }
3172 
3173 /*
3174  * Free data allocated by namei(); see namei(9) for details.
3175  */
3176 void
3177 NDFREE(ndp, flags)
3178      struct nameidata *ndp;
3179      const u_int flags;
3180 {
3181 
3182 	if (!(flags & NDF_NO_FREE_PNBUF) &&
3183 	    (ndp->ni_cnd.cn_flags & HASBUF)) {
3184 		uma_zfree(namei_zone, ndp->ni_cnd.cn_pnbuf);
3185 		ndp->ni_cnd.cn_flags &= ~HASBUF;
3186 	}
3187 	if (!(flags & NDF_NO_DVP_UNLOCK) &&
3188 	    (ndp->ni_cnd.cn_flags & LOCKPARENT) &&
3189 	    ndp->ni_dvp != ndp->ni_vp)
3190 		VOP_UNLOCK(ndp->ni_dvp, 0, ndp->ni_cnd.cn_thread);
3191 	if (!(flags & NDF_NO_DVP_RELE) &&
3192 	    (ndp->ni_cnd.cn_flags & (LOCKPARENT|WANTPARENT))) {
3193 		vrele(ndp->ni_dvp);
3194 		ndp->ni_dvp = NULL;
3195 	}
3196 	if (!(flags & NDF_NO_VP_UNLOCK) &&
3197 	    (ndp->ni_cnd.cn_flags & LOCKLEAF) && ndp->ni_vp)
3198 		VOP_UNLOCK(ndp->ni_vp, 0, ndp->ni_cnd.cn_thread);
3199 	if (!(flags & NDF_NO_VP_RELE) &&
3200 	    ndp->ni_vp) {
3201 		vrele(ndp->ni_vp);
3202 		ndp->ni_vp = NULL;
3203 	}
3204 	if (!(flags & NDF_NO_STARTDIR_RELE) &&
3205 	    (ndp->ni_cnd.cn_flags & SAVESTART)) {
3206 		vrele(ndp->ni_startdir);
3207 		ndp->ni_startdir = NULL;
3208 	}
3209 }
3210 
3211 /*
3212  * Common filesystem object access control check routine.  Accepts a
3213  * vnode's type, "mode", uid and gid, requested access mode, credentials,
3214  * and optional call-by-reference privused argument allowing vaccess()
3215  * to indicate to the caller whether privilege was used to satisfy the
3216  * request (obsoleted).  Returns 0 on success, or an errno on failure.
3217  */
3218 int
3219 vaccess(type, file_mode, file_uid, file_gid, acc_mode, cred, privused)
3220 	enum vtype type;
3221 	mode_t file_mode;
3222 	uid_t file_uid;
3223 	gid_t file_gid;
3224 	mode_t acc_mode;
3225 	struct ucred *cred;
3226 	int *privused;
3227 {
3228 	mode_t dac_granted;
3229 #ifdef CAPABILITIES
3230 	mode_t cap_granted;
3231 #endif
3232 
3233 	/*
3234 	 * Look for a normal, non-privileged way to access the file/directory
3235 	 * as requested.  If it exists, go with that.
3236 	 */
3237 
3238 	if (privused != NULL)
3239 		*privused = 0;
3240 
3241 	dac_granted = 0;
3242 
3243 	/* Check the owner. */
3244 	if (cred->cr_uid == file_uid) {
3245 		dac_granted |= VADMIN;
3246 		if (file_mode & S_IXUSR)
3247 			dac_granted |= VEXEC;
3248 		if (file_mode & S_IRUSR)
3249 			dac_granted |= VREAD;
3250 		if (file_mode & S_IWUSR)
3251 			dac_granted |= (VWRITE | VAPPEND);
3252 
3253 		if ((acc_mode & dac_granted) == acc_mode)
3254 			return (0);
3255 
3256 		goto privcheck;
3257 	}
3258 
3259 	/* Otherwise, check the groups (first match) */
3260 	if (groupmember(file_gid, cred)) {
3261 		if (file_mode & S_IXGRP)
3262 			dac_granted |= VEXEC;
3263 		if (file_mode & S_IRGRP)
3264 			dac_granted |= VREAD;
3265 		if (file_mode & S_IWGRP)
3266 			dac_granted |= (VWRITE | VAPPEND);
3267 
3268 		if ((acc_mode & dac_granted) == acc_mode)
3269 			return (0);
3270 
3271 		goto privcheck;
3272 	}
3273 
3274 	/* Otherwise, check everyone else. */
3275 	if (file_mode & S_IXOTH)
3276 		dac_granted |= VEXEC;
3277 	if (file_mode & S_IROTH)
3278 		dac_granted |= VREAD;
3279 	if (file_mode & S_IWOTH)
3280 		dac_granted |= (VWRITE | VAPPEND);
3281 	if ((acc_mode & dac_granted) == acc_mode)
3282 		return (0);
3283 
3284 privcheck:
3285 	if (!suser_cred(cred, SUSER_ALLOWJAIL)) {
3286 		/* XXX audit: privilege used */
3287 		if (privused != NULL)
3288 			*privused = 1;
3289 		return (0);
3290 	}
3291 
3292 #ifdef CAPABILITIES
3293 	/*
3294 	 * Build a capability mask to determine if the set of capabilities
3295 	 * satisfies the requirements when combined with the granted mask
3296 	 * from above.
3297 	 * For each capability, if the capability is required, bitwise
3298 	 * or the request type onto the cap_granted mask.
3299 	 */
3300 	cap_granted = 0;
3301 
3302 	if (type == VDIR) {
3303 		/*
3304 		 * For directories, use CAP_DAC_READ_SEARCH to satisfy
3305 		 * VEXEC requests, instead of CAP_DAC_EXECUTE.
3306 		 */
3307 		if ((acc_mode & VEXEC) && ((dac_granted & VEXEC) == 0) &&
3308 		    !cap_check(cred, NULL, CAP_DAC_READ_SEARCH, SUSER_ALLOWJAIL))
3309 			cap_granted |= VEXEC;
3310 	} else {
3311 		if ((acc_mode & VEXEC) && ((dac_granted & VEXEC) == 0) &&
3312 		    !cap_check(cred, NULL, CAP_DAC_EXECUTE, SUSER_ALLOWJAIL))
3313 			cap_granted |= VEXEC;
3314 	}
3315 
3316 	if ((acc_mode & VREAD) && ((dac_granted & VREAD) == 0) &&
3317 	    !cap_check(cred, NULL, CAP_DAC_READ_SEARCH, SUSER_ALLOWJAIL))
3318 		cap_granted |= VREAD;
3319 
3320 	if ((acc_mode & VWRITE) && ((dac_granted & VWRITE) == 0) &&
3321 	    !cap_check(cred, NULL, CAP_DAC_WRITE, SUSER_ALLOWJAIL))
3322 		cap_granted |= (VWRITE | VAPPEND);
3323 
3324 	if ((acc_mode & VADMIN) && ((dac_granted & VADMIN) == 0) &&
3325 	    !cap_check(cred, NULL, CAP_FOWNER, SUSER_ALLOWJAIL))
3326 		cap_granted |= VADMIN;
3327 
3328 	if ((acc_mode & (cap_granted | dac_granted)) == acc_mode) {
3329 		/* XXX audit: privilege used */
3330 		if (privused != NULL)
3331 			*privused = 1;
3332 		return (0);
3333 	}
3334 #endif
3335 
3336 	return ((acc_mode & VADMIN) ? EPERM : EACCES);
3337 }
3338 
3339 /*
3340  * Credential check based on process requesting service, and per-attribute
3341  * permissions.
3342  */
3343 int
3344 extattr_check_cred(struct vnode *vp, int attrnamespace,
3345     struct ucred *cred, struct thread *td, int access)
3346 {
3347 
3348 	/*
3349 	 * Kernel-invoked always succeeds.
3350 	 */
3351 	if (cred == NOCRED)
3352 		return (0);
3353 
3354 	/*
3355 	 * Do not allow privileged processes in jail to directly
3356 	 * manipulate system attributes.
3357 	 *
3358 	 * XXX What capability should apply here?
3359 	 * Probably CAP_SYS_SETFFLAG.
3360 	 */
3361 	switch (attrnamespace) {
3362 	case EXTATTR_NAMESPACE_SYSTEM:
3363 		/* Potentially should be: return (EPERM); */
3364 		return (suser_cred(cred, 0));
3365 	case EXTATTR_NAMESPACE_USER:
3366 		return (VOP_ACCESS(vp, access, cred, td));
3367 	default:
3368 		return (EPERM);
3369 	}
3370 }
3371 
3372 #ifdef DEBUG_VFS_LOCKS
3373 /*
3374  * This only exists to supress warnings from unlocked specfs accesses.  It is
3375  * no longer ok to have an unlocked VFS.
3376  */
3377 #define	IGNORE_LOCK(vp) ((vp)->v_type == VCHR || (vp)->v_type == VBAD)
3378 
3379 int vfs_badlock_ddb = 1;	/* Drop into debugger on violation. */
3380 SYSCTL_INT(_debug, OID_AUTO, vfs_badlock_ddb, CTLFLAG_RW, &vfs_badlock_ddb, 0, "");
3381 
3382 int vfs_badlock_mutex = 1;	/* Check for interlock across VOPs. */
3383 SYSCTL_INT(_debug, OID_AUTO, vfs_badlock_mutex, CTLFLAG_RW, &vfs_badlock_mutex, 0, "");
3384 
3385 int vfs_badlock_print = 1;	/* Print lock violations. */
3386 SYSCTL_INT(_debug, OID_AUTO, vfs_badlock_print, CTLFLAG_RW, &vfs_badlock_print, 0, "");
3387 
3388 #ifdef KDB
3389 int vfs_badlock_backtrace = 1;	/* Print backtrace at lock violations. */
3390 SYSCTL_INT(_debug, OID_AUTO, vfs_badlock_backtrace, CTLFLAG_RW, &vfs_badlock_backtrace, 0, "");
3391 #endif
3392 
3393 static void
3394 vfs_badlock(const char *msg, const char *str, struct vnode *vp)
3395 {
3396 
3397 #ifdef KDB
3398 	if (vfs_badlock_backtrace)
3399 		kdb_backtrace();
3400 #endif
3401 	if (vfs_badlock_print)
3402 		printf("%s: %p %s\n", str, (void *)vp, msg);
3403 	if (vfs_badlock_ddb)
3404 		kdb_enter("lock violation");
3405 }
3406 
3407 void
3408 assert_vi_locked(struct vnode *vp, const char *str)
3409 {
3410 
3411 	if (vfs_badlock_mutex && !mtx_owned(VI_MTX(vp)))
3412 		vfs_badlock("interlock is not locked but should be", str, vp);
3413 }
3414 
3415 void
3416 assert_vi_unlocked(struct vnode *vp, const char *str)
3417 {
3418 
3419 	if (vfs_badlock_mutex && mtx_owned(VI_MTX(vp)))
3420 		vfs_badlock("interlock is locked but should not be", str, vp);
3421 }
3422 
3423 void
3424 assert_vop_locked(struct vnode *vp, const char *str)
3425 {
3426 
3427 	if (vp && !IGNORE_LOCK(vp) && VOP_ISLOCKED(vp, NULL) == 0)
3428 		vfs_badlock("is not locked but should be", str, vp);
3429 }
3430 
3431 void
3432 assert_vop_unlocked(struct vnode *vp, const char *str)
3433 {
3434 
3435 	if (vp && !IGNORE_LOCK(vp) &&
3436 	    VOP_ISLOCKED(vp, curthread) == LK_EXCLUSIVE)
3437 		vfs_badlock("is locked but should not be", str, vp);
3438 }
3439 
3440 #if 0
3441 void
3442 assert_vop_elocked(struct vnode *vp, const char *str)
3443 {
3444 
3445 	if (vp && !IGNORE_LOCK(vp) &&
3446 	    VOP_ISLOCKED(vp, curthread) != LK_EXCLUSIVE)
3447 		vfs_badlock("is not exclusive locked but should be", str, vp);
3448 }
3449 
3450 void
3451 assert_vop_elocked_other(struct vnode *vp, const char *str)
3452 {
3453 
3454 	if (vp && !IGNORE_LOCK(vp) &&
3455 	    VOP_ISLOCKED(vp, curthread) != LK_EXCLOTHER)
3456 		vfs_badlock("is not exclusive locked by another thread",
3457 		    str, vp);
3458 }
3459 
3460 void
3461 assert_vop_slocked(struct vnode *vp, const char *str)
3462 {
3463 
3464 	if (vp && !IGNORE_LOCK(vp) &&
3465 	    VOP_ISLOCKED(vp, curthread) != LK_SHARED)
3466 		vfs_badlock("is not locked shared but should be", str, vp);
3467 }
3468 #endif /* 0 */
3469 
3470 void
3471 vop_rename_pre(void *ap)
3472 {
3473 	struct vop_rename_args *a = ap;
3474 
3475 	if (a->a_tvp)
3476 		ASSERT_VI_UNLOCKED(a->a_tvp, "VOP_RENAME");
3477 	ASSERT_VI_UNLOCKED(a->a_tdvp, "VOP_RENAME");
3478 	ASSERT_VI_UNLOCKED(a->a_fvp, "VOP_RENAME");
3479 	ASSERT_VI_UNLOCKED(a->a_fdvp, "VOP_RENAME");
3480 
3481 	/* Check the source (from). */
3482 	if (a->a_tdvp != a->a_fdvp)
3483 		ASSERT_VOP_UNLOCKED(a->a_fdvp, "vop_rename: fdvp locked");
3484 	if (a->a_tvp != a->a_fvp)
3485 		ASSERT_VOP_UNLOCKED(a->a_fvp, "vop_rename: tvp locked");
3486 
3487 	/* Check the target. */
3488 	if (a->a_tvp)
3489 		ASSERT_VOP_LOCKED(a->a_tvp, "vop_rename: tvp not locked");
3490 	ASSERT_VOP_LOCKED(a->a_tdvp, "vop_rename: tdvp not locked");
3491 }
3492 
3493 void
3494 vop_strategy_pre(void *ap)
3495 {
3496 	struct vop_strategy_args *a;
3497 	struct buf *bp;
3498 
3499 	a = ap;
3500 	bp = a->a_bp;
3501 
3502 	/*
3503 	 * Cluster ops lock their component buffers but not the IO container.
3504 	 */
3505 	if ((bp->b_flags & B_CLUSTER) != 0)
3506 		return;
3507 
3508 	if (BUF_REFCNT(bp) < 1) {
3509 		if (vfs_badlock_print)
3510 			printf(
3511 			    "VOP_STRATEGY: bp is not locked but should be\n");
3512 		if (vfs_badlock_ddb)
3513 			kdb_enter("lock violation");
3514 	}
3515 }
3516 
3517 void
3518 vop_lookup_pre(void *ap)
3519 {
3520 	struct vop_lookup_args *a;
3521 	struct vnode *dvp;
3522 
3523 	a = ap;
3524 	dvp = a->a_dvp;
3525 	ASSERT_VI_UNLOCKED(dvp, "VOP_LOOKUP");
3526 	ASSERT_VOP_LOCKED(dvp, "VOP_LOOKUP");
3527 }
3528 
3529 void
3530 vop_lookup_post(void *ap, int rc)
3531 {
3532 	struct vop_lookup_args *a;
3533 	struct componentname *cnp;
3534 	struct vnode *dvp;
3535 	struct vnode *vp;
3536 	int flags;
3537 
3538 	a = ap;
3539 	dvp = a->a_dvp;
3540 	cnp = a->a_cnp;
3541 	vp = *(a->a_vpp);
3542 	flags = cnp->cn_flags;
3543 
3544 	ASSERT_VI_UNLOCKED(dvp, "VOP_LOOKUP");
3545 
3546 	/*
3547 	 * If this is the last path component for this lookup and LOCKPARENT
3548 	 * is set, OR if there is an error the directory has to be locked.
3549 	 */
3550 	if ((flags & LOCKPARENT) && (flags & ISLASTCN))
3551 		ASSERT_VOP_LOCKED(dvp, "VOP_LOOKUP (LOCKPARENT)");
3552 	else if (rc != 0)
3553 		ASSERT_VOP_LOCKED(dvp, "VOP_LOOKUP (error)");
3554 	else if (dvp != vp)
3555 		ASSERT_VOP_UNLOCKED(dvp, "VOP_LOOKUP (dvp)");
3556 	if (flags & PDIRUNLOCK)
3557 		ASSERT_VOP_UNLOCKED(dvp, "VOP_LOOKUP (PDIRUNLOCK)");
3558 }
3559 
3560 void
3561 vop_lock_pre(void *ap)
3562 {
3563 	struct vop_lock_args *a = ap;
3564 
3565 	if ((a->a_flags & LK_INTERLOCK) == 0)
3566 		ASSERT_VI_UNLOCKED(a->a_vp, "VOP_LOCK");
3567 	else
3568 		ASSERT_VI_LOCKED(a->a_vp, "VOP_LOCK");
3569 }
3570 
3571 void
3572 vop_lock_post(void *ap, int rc)
3573 {
3574 	struct vop_lock_args *a = ap;
3575 
3576 	ASSERT_VI_UNLOCKED(a->a_vp, "VOP_LOCK");
3577 	if (rc == 0)
3578 		ASSERT_VOP_LOCKED(a->a_vp, "VOP_LOCK");
3579 }
3580 
3581 void
3582 vop_unlock_pre(void *ap)
3583 {
3584 	struct vop_unlock_args *a = ap;
3585 
3586 	if (a->a_flags & LK_INTERLOCK)
3587 		ASSERT_VI_LOCKED(a->a_vp, "VOP_UNLOCK");
3588 	ASSERT_VOP_LOCKED(a->a_vp, "VOP_UNLOCK");
3589 }
3590 
3591 void
3592 vop_unlock_post(void *ap, int rc)
3593 {
3594 	struct vop_unlock_args *a = ap;
3595 
3596 	if (a->a_flags & LK_INTERLOCK)
3597 		ASSERT_VI_UNLOCKED(a->a_vp, "VOP_UNLOCK");
3598 }
3599 #endif /* DEBUG_VFS_LOCKS */
3600 
3601 static struct knlist fs_knlist;
3602 
3603 static void
3604 vfs_event_init(void *arg)
3605 {
3606 	knlist_init(&fs_knlist, NULL);
3607 }
3608 /* XXX - correct order? */
3609 SYSINIT(vfs_knlist, SI_SUB_VFS, SI_ORDER_ANY, vfs_event_init, NULL);
3610 
3611 void
3612 vfs_event_signal(fsid_t *fsid, u_int32_t event, intptr_t data __unused)
3613 {
3614 
3615 	KNOTE_UNLOCKED(&fs_knlist, event);
3616 }
3617 
3618 static int	filt_fsattach(struct knote *kn);
3619 static void	filt_fsdetach(struct knote *kn);
3620 static int	filt_fsevent(struct knote *kn, long hint);
3621 
3622 struct filterops fs_filtops =
3623 	{ 0, filt_fsattach, filt_fsdetach, filt_fsevent };
3624 
3625 static int
3626 filt_fsattach(struct knote *kn)
3627 {
3628 
3629 	kn->kn_flags |= EV_CLEAR;
3630 	knlist_add(&fs_knlist, kn, 0);
3631 	return (0);
3632 }
3633 
3634 static void
3635 filt_fsdetach(struct knote *kn)
3636 {
3637 
3638 	knlist_remove(&fs_knlist, kn, 0);
3639 }
3640 
3641 static int
3642 filt_fsevent(struct knote *kn, long hint)
3643 {
3644 
3645 	kn->kn_fflags |= hint;
3646 	return (kn->kn_fflags != 0);
3647 }
3648 
3649 static int
3650 sysctl_vfs_ctl(SYSCTL_HANDLER_ARGS)
3651 {
3652 	struct vfsidctl vc;
3653 	int error;
3654 	struct mount *mp;
3655 
3656 	error = SYSCTL_IN(req, &vc, sizeof(vc));
3657 	if (error)
3658 		return (error);
3659 	if (vc.vc_vers != VFS_CTL_VERS1)
3660 		return (EINVAL);
3661 	mp = vfs_getvfs(&vc.vc_fsid);
3662 	if (mp == NULL)
3663 		return (ENOENT);
3664 	/* ensure that a specific sysctl goes to the right filesystem. */
3665 	if (strcmp(vc.vc_fstypename, "*") != 0 &&
3666 	    strcmp(vc.vc_fstypename, mp->mnt_vfc->vfc_name) != 0) {
3667 		return (EINVAL);
3668 	}
3669 	VCTLTOREQ(&vc, req);
3670 	return (VFS_SYSCTL(mp, vc.vc_op, req));
3671 }
3672 
3673 SYSCTL_PROC(_vfs, OID_AUTO, ctl, CTLFLAG_WR,
3674         NULL, 0, sysctl_vfs_ctl, "", "Sysctl by fsid");
3675 
3676 /*
3677  * Function to initialize a va_filerev field sensibly.
3678  * XXX: Wouldn't a random number make a lot more sense ??
3679  */
3680 u_quad_t
3681 init_va_filerev(void)
3682 {
3683 	struct bintime bt;
3684 
3685 	getbinuptime(&bt);
3686 	return (((u_quad_t)bt.sec << 32LL) | (bt.frac >> 32LL));
3687 }
3688