xref: /freebsd/sys/kern/vfs_subr.c (revision 7660b554bc59a07be0431c17e0e33815818baa69)
1 /*
2  * Copyright (c) 1989, 1993
3  *	The Regents of the University of California.  All rights reserved.
4  * (c) UNIX System Laboratories, Inc.
5  * All or some portions of this file are derived from material licensed
6  * to the University of California by American Telephone and Telegraph
7  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8  * the permission of UNIX System Laboratories, Inc.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. All advertising materials mentioning features or use of this software
19  *    must display the following acknowledgement:
20  *	This product includes software developed by the University of
21  *	California, Berkeley and its contributors.
22  * 4. Neither the name of the University nor the names of its contributors
23  *    may be used to endorse or promote products derived from this software
24  *    without specific prior written permission.
25  *
26  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36  * SUCH DAMAGE.
37  *
38  *	@(#)vfs_subr.c	8.31 (Berkeley) 5/26/95
39  */
40 
41 /*
42  * External virtual filesystem routines
43  */
44 
45 #include <sys/cdefs.h>
46 __FBSDID("$FreeBSD$");
47 
48 #include "opt_ddb.h"
49 #include "opt_mac.h"
50 
51 #include <sys/param.h>
52 #include <sys/systm.h>
53 #include <sys/bio.h>
54 #include <sys/buf.h>
55 #include <sys/conf.h>
56 #include <sys/eventhandler.h>
57 #include <sys/extattr.h>
58 #include <sys/fcntl.h>
59 #include <sys/kernel.h>
60 #include <sys/kthread.h>
61 #include <sys/mac.h>
62 #include <sys/malloc.h>
63 #include <sys/mount.h>
64 #include <sys/namei.h>
65 #include <sys/stat.h>
66 #include <sys/sysctl.h>
67 #include <sys/syslog.h>
68 #include <sys/vmmeter.h>
69 #include <sys/vnode.h>
70 
71 #include <vm/vm.h>
72 #include <vm/vm_object.h>
73 #include <vm/vm_extern.h>
74 #include <vm/pmap.h>
75 #include <vm/vm_map.h>
76 #include <vm/vm_page.h>
77 #include <vm/vm_kern.h>
78 #include <vm/uma.h>
79 
80 static MALLOC_DEFINE(M_NETADDR, "Export Host", "Export host address structure");
81 
82 static void	addalias(struct vnode *vp, dev_t nvp_rdev);
83 static void	insmntque(struct vnode *vp, struct mount *mp);
84 static void	vclean(struct vnode *vp, int flags, struct thread *td);
85 static void	vlruvp(struct vnode *vp);
86 static int	flushbuflist(struct buf *blist, int flags, struct vnode *vp,
87 		    int slpflag, int slptimeo, int *errorp);
88 static int	vcanrecycle(struct vnode *vp, struct mount **vnmpp);
89 
90 
91 /*
92  * Number of vnodes in existence.  Increased whenever getnewvnode()
93  * allocates a new vnode, never decreased.
94  */
95 static unsigned long	numvnodes;
96 
97 SYSCTL_LONG(_vfs, OID_AUTO, numvnodes, CTLFLAG_RD, &numvnodes, 0, "");
98 
99 /*
100  * Conversion tables for conversion from vnode types to inode formats
101  * and back.
102  */
103 enum vtype iftovt_tab[16] = {
104 	VNON, VFIFO, VCHR, VNON, VDIR, VNON, VBLK, VNON,
105 	VREG, VNON, VLNK, VNON, VSOCK, VNON, VNON, VBAD,
106 };
107 int vttoif_tab[9] = {
108 	0, S_IFREG, S_IFDIR, S_IFBLK, S_IFCHR, S_IFLNK,
109 	S_IFSOCK, S_IFIFO, S_IFMT,
110 };
111 
112 /*
113  * List of vnodes that are ready for recycling.
114  */
115 static TAILQ_HEAD(freelst, vnode) vnode_free_list;
116 
117 /*
118  * Minimum number of free vnodes.  If there are fewer than this free vnodes,
119  * getnewvnode() will return a newly allocated vnode.
120  */
121 static u_long wantfreevnodes = 25;
122 SYSCTL_LONG(_vfs, OID_AUTO, wantfreevnodes, CTLFLAG_RW, &wantfreevnodes, 0, "");
123 /* Number of vnodes in the free list. */
124 static u_long freevnodes;
125 SYSCTL_LONG(_vfs, OID_AUTO, freevnodes, CTLFLAG_RD, &freevnodes, 0, "");
126 
127 /*
128  * Various variables used for debugging the new implementation of
129  * reassignbuf().
130  * XXX these are probably of (very) limited utility now.
131  */
132 static int reassignbufcalls;
133 SYSCTL_INT(_vfs, OID_AUTO, reassignbufcalls, CTLFLAG_RW, &reassignbufcalls, 0, "");
134 static int nameileafonly;
135 SYSCTL_INT(_vfs, OID_AUTO, nameileafonly, CTLFLAG_RW, &nameileafonly, 0, "");
136 
137 /*
138  * Cache for the mount type id assigned to NFS.  This is used for
139  * special checks in nfs/nfs_nqlease.c and vm/vnode_pager.c.
140  */
141 int	nfs_mount_type = -1;
142 
143 /* To keep more than one thread at a time from running vfs_getnewfsid */
144 static struct mtx mntid_mtx;
145 
146 /*
147  * Lock for any access to the following:
148  *	vnode_free_list
149  *	numvnodes
150  *	freevnodes
151  */
152 static struct mtx vnode_free_list_mtx;
153 
154 /*
155  * For any iteration/modification of dev->si_hlist (linked through
156  * v_specnext)
157  */
158 static struct mtx spechash_mtx;
159 
160 /* Publicly exported FS */
161 struct nfs_public nfs_pub;
162 
163 /* Zone for allocation of new vnodes - used exclusively by getnewvnode() */
164 static uma_zone_t vnode_zone;
165 static uma_zone_t vnodepoll_zone;
166 
167 /* Set to 1 to print out reclaim of active vnodes */
168 int	prtactive;
169 
170 /*
171  * The workitem queue.
172  *
173  * It is useful to delay writes of file data and filesystem metadata
174  * for tens of seconds so that quickly created and deleted files need
175  * not waste disk bandwidth being created and removed. To realize this,
176  * we append vnodes to a "workitem" queue. When running with a soft
177  * updates implementation, most pending metadata dependencies should
178  * not wait for more than a few seconds. Thus, mounted on block devices
179  * are delayed only about a half the time that file data is delayed.
180  * Similarly, directory updates are more critical, so are only delayed
181  * about a third the time that file data is delayed. Thus, there are
182  * SYNCER_MAXDELAY queues that are processed round-robin at a rate of
183  * one each second (driven off the filesystem syncer process). The
184  * syncer_delayno variable indicates the next queue that is to be processed.
185  * Items that need to be processed soon are placed in this queue:
186  *
187  *	syncer_workitem_pending[syncer_delayno]
188  *
189  * A delay of fifteen seconds is done by placing the request fifteen
190  * entries later in the queue:
191  *
192  *	syncer_workitem_pending[(syncer_delayno + 15) & syncer_mask]
193  *
194  */
195 static int syncer_delayno;
196 static long syncer_mask;
197 LIST_HEAD(synclist, vnode);
198 static struct synclist *syncer_workitem_pending;
199 /*
200  * The sync_mtx protects:
201  *	vp->v_synclist
202  *	syncer_delayno
203  *	syncer_workitem_pending
204  *	rushjob
205  */
206 static struct mtx sync_mtx;
207 
208 #define SYNCER_MAXDELAY		32
209 static int syncer_maxdelay = SYNCER_MAXDELAY;	/* maximum delay time */
210 static int syncdelay = 30;		/* max time to delay syncing data */
211 static int filedelay = 30;		/* time to delay syncing files */
212 SYSCTL_INT(_kern, OID_AUTO, filedelay, CTLFLAG_RW, &filedelay, 0, "");
213 static int dirdelay = 29;		/* time to delay syncing directories */
214 SYSCTL_INT(_kern, OID_AUTO, dirdelay, CTLFLAG_RW, &dirdelay, 0, "");
215 static int metadelay = 28;		/* time to delay syncing metadata */
216 SYSCTL_INT(_kern, OID_AUTO, metadelay, CTLFLAG_RW, &metadelay, 0, "");
217 static int rushjob;		/* number of slots to run ASAP */
218 static int stat_rush_requests;	/* number of times I/O speeded up */
219 SYSCTL_INT(_debug, OID_AUTO, rush_requests, CTLFLAG_RW, &stat_rush_requests, 0, "");
220 
221 /*
222  * Number of vnodes we want to exist at any one time.  This is mostly used
223  * to size hash tables in vnode-related code.  It is normally not used in
224  * getnewvnode(), as wantfreevnodes is normally nonzero.)
225  *
226  * XXX desiredvnodes is historical cruft and should not exist.
227  */
228 int desiredvnodes;
229 SYSCTL_INT(_kern, KERN_MAXVNODES, maxvnodes, CTLFLAG_RW,
230     &desiredvnodes, 0, "Maximum number of vnodes");
231 static int minvnodes;
232 SYSCTL_INT(_kern, OID_AUTO, minvnodes, CTLFLAG_RW,
233     &minvnodes, 0, "Minimum number of vnodes");
234 static int vnlru_nowhere;
235 SYSCTL_INT(_debug, OID_AUTO, vnlru_nowhere, CTLFLAG_RW, &vnlru_nowhere, 0,
236     "Number of times the vnlru process ran without success");
237 
238 /* Hook for calling soft updates */
239 int (*softdep_process_worklist_hook)(struct mount *);
240 
241 /*
242  * This only exists to supress warnings from unlocked specfs accesses.  It is
243  * no longer ok to have an unlocked VFS.
244  */
245 #define IGNORE_LOCK(vp) ((vp)->v_type == VCHR || (vp)->v_type == VBAD)
246 
247 /* Print lock violations */
248 int vfs_badlock_print = 1;
249 
250 /* Panic on violation */
251 int vfs_badlock_panic = 1;
252 
253 /* Check for interlock across VOPs */
254 int vfs_badlock_mutex = 1;
255 
256 static void
257 vfs_badlock(char *msg, char *str, struct vnode *vp)
258 {
259 	if (vfs_badlock_print)
260 		printf("%s: %p %s\n", str, vp, msg);
261 	if (vfs_badlock_panic)
262 		Debugger("Lock violation.\n");
263 }
264 
265 void
266 assert_vi_unlocked(struct vnode *vp, char *str)
267 {
268 	if (vfs_badlock_mutex && mtx_owned(VI_MTX(vp)))
269 		vfs_badlock("interlock is locked but should not be", str, vp);
270 }
271 
272 void
273 assert_vi_locked(struct vnode *vp, char *str)
274 {
275 	if (vfs_badlock_mutex && !mtx_owned(VI_MTX(vp)))
276 		vfs_badlock("interlock is not locked but should be", str, vp);
277 }
278 
279 void
280 assert_vop_locked(struct vnode *vp, char *str)
281 {
282 	if (vp && !IGNORE_LOCK(vp) && !VOP_ISLOCKED(vp, NULL))
283 		vfs_badlock("is not locked but should be", str, vp);
284 }
285 
286 void
287 assert_vop_unlocked(struct vnode *vp, char *str)
288 {
289 	if (vp && !IGNORE_LOCK(vp) &&
290 	    VOP_ISLOCKED(vp, curthread) == LK_EXCLUSIVE)
291 		vfs_badlock("is locked but should not be", str, vp);
292 }
293 
294 void
295 assert_vop_elocked(struct vnode *vp, char *str)
296 {
297 	if (vp && !IGNORE_LOCK(vp) &&
298 	    VOP_ISLOCKED(vp, curthread) != LK_EXCLUSIVE)
299 		vfs_badlock("is not exclusive locked but should be", str, vp);
300 }
301 
302 void
303 assert_vop_elocked_other(struct vnode *vp, char *str)
304 {
305 	if (vp && !IGNORE_LOCK(vp) &&
306 	    VOP_ISLOCKED(vp, curthread) != LK_EXCLOTHER)
307 		vfs_badlock("is not exclusive locked by another thread",
308 		    str, vp);
309 }
310 
311 void
312 assert_vop_slocked(struct vnode *vp, char *str)
313 {
314 	if (vp && !IGNORE_LOCK(vp) &&
315 	    VOP_ISLOCKED(vp, curthread) != LK_SHARED)
316 		vfs_badlock("is not locked shared but should be", str, vp);
317 }
318 
319 void
320 vop_rename_pre(void *ap)
321 {
322 	struct vop_rename_args *a = ap;
323 
324 	if (a->a_tvp)
325 		ASSERT_VI_UNLOCKED(a->a_tvp, "VOP_RENAME");
326 	ASSERT_VI_UNLOCKED(a->a_tdvp, "VOP_RENAME");
327 	ASSERT_VI_UNLOCKED(a->a_fvp, "VOP_RENAME");
328 	ASSERT_VI_UNLOCKED(a->a_fdvp, "VOP_RENAME");
329 
330 	/* Check the source (from) */
331 	if (a->a_tdvp != a->a_fdvp)
332 		ASSERT_VOP_UNLOCKED(a->a_fdvp, "vop_rename: fdvp locked.\n");
333 	if (a->a_tvp != a->a_fvp)
334 		ASSERT_VOP_UNLOCKED(a->a_fvp, "vop_rename: tvp locked.\n");
335 
336 	/* Check the target */
337 	if (a->a_tvp)
338 		ASSERT_VOP_LOCKED(a->a_tvp, "vop_rename: tvp not locked.\n");
339 
340 	ASSERT_VOP_LOCKED(a->a_tdvp, "vop_rename: tdvp not locked.\n");
341 }
342 
343 void
344 vop_strategy_pre(void *ap)
345 {
346 	struct vop_strategy_args *a = ap;
347 	struct buf *bp;
348 
349 	bp = a->a_bp;
350 
351 	/*
352 	 * Cluster ops lock their component buffers but not the IO container.
353 	 */
354 	if ((bp->b_flags & B_CLUSTER) != 0)
355 		return;
356 
357 	if (BUF_REFCNT(bp) < 1) {
358 		if (vfs_badlock_print)
359 			printf("VOP_STRATEGY: bp is not locked but should be.\n");
360 		if (vfs_badlock_panic)
361 			Debugger("Lock violation.\n");
362 	}
363 }
364 
365 void
366 vop_lookup_pre(void *ap)
367 {
368 	struct vop_lookup_args *a = ap;
369 	struct vnode *dvp;
370 
371 	dvp = a->a_dvp;
372 
373 	ASSERT_VI_UNLOCKED(dvp, "VOP_LOOKUP");
374 	ASSERT_VOP_LOCKED(dvp, "VOP_LOOKUP");
375 }
376 
377 void
378 vop_lookup_post(void *ap, int rc)
379 {
380 	struct vop_lookup_args *a = ap;
381 	struct componentname *cnp;
382 	struct vnode *dvp;
383 	struct vnode *vp;
384 	int flags;
385 
386 	dvp = a->a_dvp;
387 	cnp = a->a_cnp;
388 	vp = *(a->a_vpp);
389 	flags = cnp->cn_flags;
390 
391 
392 	ASSERT_VI_UNLOCKED(dvp, "VOP_LOOKUP");
393 	/*
394 	 * If this is the last path component for this lookup and LOCPARENT
395 	 * is set, OR if there is an error the directory has to be locked.
396 	 */
397 	if ((flags & LOCKPARENT) && (flags & ISLASTCN))
398 		ASSERT_VOP_LOCKED(dvp, "VOP_LOOKUP (LOCKPARENT)");
399 	else if (rc != 0)
400 		ASSERT_VOP_LOCKED(dvp, "VOP_LOOKUP (error)");
401 	else if (dvp != vp)
402 		ASSERT_VOP_UNLOCKED(dvp, "VOP_LOOKUP (dvp)");
403 
404 	if (flags & PDIRUNLOCK)
405 		ASSERT_VOP_UNLOCKED(dvp, "VOP_LOOKUP (PDIRUNLOCK)");
406 }
407 
408 void
409 vop_unlock_pre(void *ap)
410 {
411 	struct vop_unlock_args *a = ap;
412 
413 	if (a->a_flags & LK_INTERLOCK)
414 		ASSERT_VI_LOCKED(a->a_vp, "VOP_UNLOCK");
415 
416 	ASSERT_VOP_LOCKED(a->a_vp, "VOP_UNLOCK");
417 }
418 
419 void
420 vop_unlock_post(void *ap, int rc)
421 {
422 	struct vop_unlock_args *a = ap;
423 
424 	if (a->a_flags & LK_INTERLOCK)
425 		ASSERT_VI_UNLOCKED(a->a_vp, "VOP_UNLOCK");
426 }
427 
428 void
429 vop_lock_pre(void *ap)
430 {
431 	struct vop_lock_args *a = ap;
432 
433 	if ((a->a_flags & LK_INTERLOCK) == 0)
434 		ASSERT_VI_UNLOCKED(a->a_vp, "VOP_LOCK");
435 	else
436 		ASSERT_VI_LOCKED(a->a_vp, "VOP_LOCK");
437 }
438 
439 void
440 vop_lock_post(void *ap, int rc)
441 {
442 	struct vop_lock_args *a;
443 
444 	a = ap;
445 
446 	ASSERT_VI_UNLOCKED(a->a_vp, "VOP_LOCK");
447 	if (rc == 0)
448 		ASSERT_VOP_LOCKED(a->a_vp, "VOP_LOCK");
449 }
450 
451 void
452 v_addpollinfo(struct vnode *vp)
453 {
454 	vp->v_pollinfo = uma_zalloc(vnodepoll_zone, M_WAITOK);
455 	mtx_init(&vp->v_pollinfo->vpi_lock, "vnode pollinfo", NULL, MTX_DEF);
456 }
457 
458 /*
459  * Initialize the vnode management data structures.
460  */
461 static void
462 vntblinit(void *dummy __unused)
463 {
464 
465 	/*
466 	 * Desiredvnodes is a function of the physical memory size and
467 	 * the kernel's heap size.  Specifically, desiredvnodes scales
468 	 * in proportion to the physical memory size until two fifths
469 	 * of the kernel's heap size is consumed by vnodes and vm
470 	 * objects.
471 	 */
472 	desiredvnodes = min(maxproc + cnt.v_page_count / 4, 2 * vm_kmem_size /
473 	    (5 * (sizeof(struct vm_object) + sizeof(struct vnode))));
474 	minvnodes = desiredvnodes / 4;
475 	mtx_init(&mountlist_mtx, "mountlist", NULL, MTX_DEF);
476 	mtx_init(&mntvnode_mtx, "mntvnode", NULL, MTX_DEF);
477 	mtx_init(&mntid_mtx, "mntid", NULL, MTX_DEF);
478 	mtx_init(&spechash_mtx, "spechash", NULL, MTX_DEF);
479 	TAILQ_INIT(&vnode_free_list);
480 	mtx_init(&vnode_free_list_mtx, "vnode_free_list", NULL, MTX_DEF);
481 	vnode_zone = uma_zcreate("VNODE", sizeof (struct vnode), NULL, NULL,
482 	    NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
483 	vnodepoll_zone = uma_zcreate("VNODEPOLL", sizeof (struct vpollinfo),
484 	      NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
485 	/*
486 	 * Initialize the filesystem syncer.
487 	 */
488 	syncer_workitem_pending = hashinit(syncer_maxdelay, M_VNODE,
489 		&syncer_mask);
490 	syncer_maxdelay = syncer_mask + 1;
491 	mtx_init(&sync_mtx, "Syncer mtx", NULL, MTX_DEF);
492 }
493 SYSINIT(vfs, SI_SUB_VFS, SI_ORDER_FIRST, vntblinit, NULL)
494 
495 
496 /*
497  * Mark a mount point as busy. Used to synchronize access and to delay
498  * unmounting. Interlock is not released on failure.
499  */
500 int
501 vfs_busy(mp, flags, interlkp, td)
502 	struct mount *mp;
503 	int flags;
504 	struct mtx *interlkp;
505 	struct thread *td;
506 {
507 	int lkflags;
508 
509 	if (mp->mnt_kern_flag & MNTK_UNMOUNT) {
510 		if (flags & LK_NOWAIT)
511 			return (ENOENT);
512 		mp->mnt_kern_flag |= MNTK_MWAIT;
513 		/*
514 		 * Since all busy locks are shared except the exclusive
515 		 * lock granted when unmounting, the only place that a
516 		 * wakeup needs to be done is at the release of the
517 		 * exclusive lock at the end of dounmount.
518 		 */
519 		msleep(mp, interlkp, PVFS, "vfs_busy", 0);
520 		return (ENOENT);
521 	}
522 	lkflags = LK_SHARED | LK_NOPAUSE;
523 	if (interlkp)
524 		lkflags |= LK_INTERLOCK;
525 	if (lockmgr(&mp->mnt_lock, lkflags, interlkp, td))
526 		panic("vfs_busy: unexpected lock failure");
527 	return (0);
528 }
529 
530 /*
531  * Free a busy filesystem.
532  */
533 void
534 vfs_unbusy(mp, td)
535 	struct mount *mp;
536 	struct thread *td;
537 {
538 
539 	lockmgr(&mp->mnt_lock, LK_RELEASE, NULL, td);
540 }
541 
542 /*
543  * Lookup a mount point by filesystem identifier.
544  */
545 struct mount *
546 vfs_getvfs(fsid)
547 	fsid_t *fsid;
548 {
549 	register struct mount *mp;
550 
551 	mtx_lock(&mountlist_mtx);
552 	TAILQ_FOREACH(mp, &mountlist, mnt_list) {
553 		if (mp->mnt_stat.f_fsid.val[0] == fsid->val[0] &&
554 		    mp->mnt_stat.f_fsid.val[1] == fsid->val[1]) {
555 			mtx_unlock(&mountlist_mtx);
556 			return (mp);
557 		}
558 	}
559 	mtx_unlock(&mountlist_mtx);
560 	return ((struct mount *) 0);
561 }
562 
563 /*
564  * Get a new unique fsid.  Try to make its val[0] unique, since this value
565  * will be used to create fake device numbers for stat().  Also try (but
566  * not so hard) make its val[0] unique mod 2^16, since some emulators only
567  * support 16-bit device numbers.  We end up with unique val[0]'s for the
568  * first 2^16 calls and unique val[0]'s mod 2^16 for the first 2^8 calls.
569  *
570  * Keep in mind that several mounts may be running in parallel.  Starting
571  * the search one past where the previous search terminated is both a
572  * micro-optimization and a defense against returning the same fsid to
573  * different mounts.
574  */
575 void
576 vfs_getnewfsid(mp)
577 	struct mount *mp;
578 {
579 	static u_int16_t mntid_base;
580 	fsid_t tfsid;
581 	int mtype;
582 
583 	mtx_lock(&mntid_mtx);
584 	mtype = mp->mnt_vfc->vfc_typenum;
585 	tfsid.val[1] = mtype;
586 	mtype = (mtype & 0xFF) << 24;
587 	for (;;) {
588 		tfsid.val[0] = makeudev(255,
589 		    mtype | ((mntid_base & 0xFF00) << 8) | (mntid_base & 0xFF));
590 		mntid_base++;
591 		if (vfs_getvfs(&tfsid) == NULL)
592 			break;
593 	}
594 	mp->mnt_stat.f_fsid.val[0] = tfsid.val[0];
595 	mp->mnt_stat.f_fsid.val[1] = tfsid.val[1];
596 	mtx_unlock(&mntid_mtx);
597 }
598 
599 /*
600  * Knob to control the precision of file timestamps:
601  *
602  *   0 = seconds only; nanoseconds zeroed.
603  *   1 = seconds and nanoseconds, accurate within 1/HZ.
604  *   2 = seconds and nanoseconds, truncated to microseconds.
605  * >=3 = seconds and nanoseconds, maximum precision.
606  */
607 enum { TSP_SEC, TSP_HZ, TSP_USEC, TSP_NSEC };
608 
609 static int timestamp_precision = TSP_SEC;
610 SYSCTL_INT(_vfs, OID_AUTO, timestamp_precision, CTLFLAG_RW,
611     &timestamp_precision, 0, "");
612 
613 /*
614  * Get a current timestamp.
615  */
616 void
617 vfs_timestamp(tsp)
618 	struct timespec *tsp;
619 {
620 	struct timeval tv;
621 
622 	switch (timestamp_precision) {
623 	case TSP_SEC:
624 		tsp->tv_sec = time_second;
625 		tsp->tv_nsec = 0;
626 		break;
627 	case TSP_HZ:
628 		getnanotime(tsp);
629 		break;
630 	case TSP_USEC:
631 		microtime(&tv);
632 		TIMEVAL_TO_TIMESPEC(&tv, tsp);
633 		break;
634 	case TSP_NSEC:
635 	default:
636 		nanotime(tsp);
637 		break;
638 	}
639 }
640 
641 /*
642  * Set vnode attributes to VNOVAL
643  */
644 void
645 vattr_null(vap)
646 	register struct vattr *vap;
647 {
648 
649 	vap->va_type = VNON;
650 	vap->va_size = VNOVAL;
651 	vap->va_bytes = VNOVAL;
652 	vap->va_mode = VNOVAL;
653 	vap->va_nlink = VNOVAL;
654 	vap->va_uid = VNOVAL;
655 	vap->va_gid = VNOVAL;
656 	vap->va_fsid = VNOVAL;
657 	vap->va_fileid = VNOVAL;
658 	vap->va_blocksize = VNOVAL;
659 	vap->va_rdev = VNOVAL;
660 	vap->va_atime.tv_sec = VNOVAL;
661 	vap->va_atime.tv_nsec = VNOVAL;
662 	vap->va_mtime.tv_sec = VNOVAL;
663 	vap->va_mtime.tv_nsec = VNOVAL;
664 	vap->va_ctime.tv_sec = VNOVAL;
665 	vap->va_ctime.tv_nsec = VNOVAL;
666 	vap->va_birthtime.tv_sec = VNOVAL;
667 	vap->va_birthtime.tv_nsec = VNOVAL;
668 	vap->va_flags = VNOVAL;
669 	vap->va_gen = VNOVAL;
670 	vap->va_vaflags = 0;
671 }
672 
673 /*
674  * This routine is called when we have too many vnodes.  It attempts
675  * to free <count> vnodes and will potentially free vnodes that still
676  * have VM backing store (VM backing store is typically the cause
677  * of a vnode blowout so we want to do this).  Therefore, this operation
678  * is not considered cheap.
679  *
680  * A number of conditions may prevent a vnode from being reclaimed.
681  * the buffer cache may have references on the vnode, a directory
682  * vnode may still have references due to the namei cache representing
683  * underlying files, or the vnode may be in active use.   It is not
684  * desireable to reuse such vnodes.  These conditions may cause the
685  * number of vnodes to reach some minimum value regardless of what
686  * you set kern.maxvnodes to.  Do not set kern.maxvnodes too low.
687  */
688 static int
689 vlrureclaim(struct mount *mp)
690 {
691 	struct vnode *vp;
692 	int done;
693 	int trigger;
694 	int usevnodes;
695 	int count;
696 
697 	/*
698 	 * Calculate the trigger point, don't allow user
699 	 * screwups to blow us up.   This prevents us from
700 	 * recycling vnodes with lots of resident pages.  We
701 	 * aren't trying to free memory, we are trying to
702 	 * free vnodes.
703 	 */
704 	usevnodes = desiredvnodes;
705 	if (usevnodes <= 0)
706 		usevnodes = 1;
707 	trigger = cnt.v_page_count * 2 / usevnodes;
708 
709 	done = 0;
710 	mtx_lock(&mntvnode_mtx);
711 	count = mp->mnt_nvnodelistsize / 10 + 1;
712 	while (count && (vp = TAILQ_FIRST(&mp->mnt_nvnodelist)) != NULL) {
713 		TAILQ_REMOVE(&mp->mnt_nvnodelist, vp, v_nmntvnodes);
714 		TAILQ_INSERT_TAIL(&mp->mnt_nvnodelist, vp, v_nmntvnodes);
715 
716 		if (vp->v_type != VNON &&
717 		    vp->v_type != VBAD &&
718 		    VI_TRYLOCK(vp)) {
719 			if (VMIGHTFREE(vp) &&           /* critical path opt */
720 			    (vp->v_object == NULL ||
721 			    vp->v_object->resident_page_count < trigger)) {
722 				mtx_unlock(&mntvnode_mtx);
723 				vgonel(vp, curthread);
724 				done++;
725 				mtx_lock(&mntvnode_mtx);
726 			} else
727 				VI_UNLOCK(vp);
728 		}
729 		--count;
730 	}
731 	mtx_unlock(&mntvnode_mtx);
732 	return done;
733 }
734 
735 /*
736  * Attempt to recycle vnodes in a context that is always safe to block.
737  * Calling vlrurecycle() from the bowels of filesystem code has some
738  * interesting deadlock problems.
739  */
740 static struct proc *vnlruproc;
741 static int vnlruproc_sig;
742 
743 static void
744 vnlru_proc(void)
745 {
746 	struct mount *mp, *nmp;
747 	int s;
748 	int done;
749 	struct proc *p = vnlruproc;
750 	struct thread *td = FIRST_THREAD_IN_PROC(p);	/* XXXKSE */
751 
752 	mtx_lock(&Giant);
753 
754 	EVENTHANDLER_REGISTER(shutdown_pre_sync, kproc_shutdown, p,
755 	    SHUTDOWN_PRI_FIRST);
756 
757 	s = splbio();
758 	for (;;) {
759 		kthread_suspend_check(p);
760 		mtx_lock(&vnode_free_list_mtx);
761 		if (numvnodes - freevnodes <= desiredvnodes * 9 / 10) {
762 			mtx_unlock(&vnode_free_list_mtx);
763 			vnlruproc_sig = 0;
764 			wakeup(&vnlruproc_sig);
765 			tsleep(vnlruproc, PVFS, "vlruwt", hz);
766 			continue;
767 		}
768 		mtx_unlock(&vnode_free_list_mtx);
769 		done = 0;
770 		mtx_lock(&mountlist_mtx);
771 		for (mp = TAILQ_FIRST(&mountlist); mp != NULL; mp = nmp) {
772 			if (vfs_busy(mp, LK_NOWAIT, &mountlist_mtx, td)) {
773 				nmp = TAILQ_NEXT(mp, mnt_list);
774 				continue;
775 			}
776 			done += vlrureclaim(mp);
777 			mtx_lock(&mountlist_mtx);
778 			nmp = TAILQ_NEXT(mp, mnt_list);
779 			vfs_unbusy(mp, td);
780 		}
781 		mtx_unlock(&mountlist_mtx);
782 		if (done == 0) {
783 #if 0
784 			/* These messages are temporary debugging aids */
785 			if (vnlru_nowhere < 5)
786 				printf("vnlru process getting nowhere..\n");
787 			else if (vnlru_nowhere == 5)
788 				printf("vnlru process messages stopped.\n");
789 #endif
790 			vnlru_nowhere++;
791 			tsleep(vnlruproc, PPAUSE, "vlrup", hz * 3);
792 		}
793 	}
794 	splx(s);
795 }
796 
797 static struct kproc_desc vnlru_kp = {
798 	"vnlru",
799 	vnlru_proc,
800 	&vnlruproc
801 };
802 SYSINIT(vnlru, SI_SUB_KTHREAD_UPDATE, SI_ORDER_FIRST, kproc_start, &vnlru_kp)
803 
804 
805 /*
806  * Routines having to do with the management of the vnode table.
807  */
808 
809 /*
810  * Check to see if a free vnode can be recycled. If it can,
811  * return it locked with the vn lock, but not interlock. Also
812  * get the vn_start_write lock. Otherwise indicate the error.
813  */
814 static int
815 vcanrecycle(struct vnode *vp, struct mount **vnmpp)
816 {
817 	struct thread *td = curthread;
818 	vm_object_t object;
819 	int error;
820 
821 	/* Don't recycle if we can't get the interlock */
822 	if (!VI_TRYLOCK(vp))
823 		return (EWOULDBLOCK);
824 
825 	/* We should be able to immediately acquire this */
826 	/* XXX This looks like it should panic if it fails */
827 	if (vn_lock(vp, LK_INTERLOCK | LK_EXCLUSIVE, td) != 0) {
828 		if (VOP_ISLOCKED(vp, td))
829 			panic("vcanrecycle: locked vnode");
830 		return (EWOULDBLOCK);
831 	}
832 
833 	/*
834 	 * Don't recycle if its filesystem is being suspended.
835 	 */
836 	if (vn_start_write(vp, vnmpp, V_NOWAIT) != 0) {
837 		error = EBUSY;
838 		goto done;
839 	}
840 
841 	/*
842 	 * Don't recycle if we still have cached pages.
843 	 */
844 	if (VOP_GETVOBJECT(vp, &object) == 0) {
845 		VM_OBJECT_LOCK(object);
846 		if (object->resident_page_count ||
847 		    object->ref_count) {
848 			VM_OBJECT_UNLOCK(object);
849 			error = EBUSY;
850 			goto done;
851 		}
852 		VM_OBJECT_UNLOCK(object);
853 	}
854 	if (LIST_FIRST(&vp->v_cache_src)) {
855 		/*
856 		 * note: nameileafonly sysctl is temporary,
857 		 * for debugging only, and will eventually be
858 		 * removed.
859 		 */
860 		if (nameileafonly > 0) {
861 			/*
862 			 * Do not reuse namei-cached directory
863 			 * vnodes that have cached
864 			 * subdirectories.
865 			 */
866 			if (cache_leaf_test(vp) < 0) {
867 				error = EISDIR;
868 				goto done;
869 			}
870 		} else if (nameileafonly < 0 ||
871 			    vmiodirenable == 0) {
872 			/*
873 			 * Do not reuse namei-cached directory
874 			 * vnodes if nameileafonly is -1 or
875 			 * if VMIO backing for directories is
876 			 * turned off (otherwise we reuse them
877 			 * too quickly).
878 			 */
879 			error = EBUSY;
880 			goto done;
881 		}
882 	}
883 	return (0);
884 done:
885 	VOP_UNLOCK(vp, 0, td);
886 	return (error);
887 }
888 
889 /*
890  * Return the next vnode from the free list.
891  */
892 int
893 getnewvnode(tag, mp, vops, vpp)
894 	const char *tag;
895 	struct mount *mp;
896 	vop_t **vops;
897 	struct vnode **vpp;
898 {
899 	struct thread *td = curthread;	/* XXX */
900 	struct vnode *vp = NULL;
901 	struct vpollinfo *pollinfo = NULL;
902 	struct mount *vnmp;
903 
904 	mtx_lock(&vnode_free_list_mtx);
905 
906 	/*
907 	 * Try to reuse vnodes if we hit the max.  This situation only
908 	 * occurs in certain large-memory (2G+) situations.  We cannot
909 	 * attempt to directly reclaim vnodes due to nasty recursion
910 	 * problems.
911 	 */
912 	while (numvnodes - freevnodes > desiredvnodes) {
913 		if (vnlruproc_sig == 0) {
914 			vnlruproc_sig = 1;      /* avoid unnecessary wakeups */
915 			wakeup(vnlruproc);
916 		}
917 		mtx_unlock(&vnode_free_list_mtx);
918 		tsleep(&vnlruproc_sig, PVFS, "vlruwk", hz);
919 		mtx_lock(&vnode_free_list_mtx);
920 	}
921 
922 	/*
923 	 * Attempt to reuse a vnode already on the free list, allocating
924 	 * a new vnode if we can't find one or if we have not reached a
925 	 * good minimum for good LRU performance.
926 	 */
927 
928 	if (freevnodes >= wantfreevnodes && numvnodes >= minvnodes) {
929 		int error;
930 		int count;
931 
932 		for (count = 0; count < freevnodes; count++) {
933 			vp = TAILQ_FIRST(&vnode_free_list);
934 
935 			KASSERT(vp->v_usecount == 0 &&
936 			    (vp->v_iflag & VI_DOINGINACT) == 0,
937 			    ("getnewvnode: free vnode isn't"));
938 
939 			TAILQ_REMOVE(&vnode_free_list, vp, v_freelist);
940 			/*
941 			 * We have to drop the free list mtx to avoid lock
942 			 * order reversals with interlock.
943 			 */
944 			mtx_unlock(&vnode_free_list_mtx);
945 			error = vcanrecycle(vp, &vnmp);
946 			mtx_lock(&vnode_free_list_mtx);
947 			if (error == 0)
948 				break;
949 			TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist);
950 			vp = NULL;
951 		}
952 	}
953 	if (vp) {
954 		freevnodes--;
955 		mtx_unlock(&vnode_free_list_mtx);
956 
957 		cache_purge(vp);
958 		VI_LOCK(vp);
959 		vp->v_iflag |= VI_DOOMED;
960 		vp->v_iflag &= ~VI_FREE;
961 		if (vp->v_type != VBAD) {
962 			VOP_UNLOCK(vp, 0, td);
963 			vgonel(vp, td);
964 			VI_LOCK(vp);
965 		} else {
966 			VOP_UNLOCK(vp, 0, td);
967 		}
968 		vn_finished_write(vnmp);
969 
970 #ifdef INVARIANTS
971 		{
972 			if (vp->v_data)
973 				panic("cleaned vnode isn't");
974 			if (vp->v_numoutput)
975 				panic("Clean vnode has pending I/O's");
976 			if (vp->v_writecount != 0)
977 				panic("Non-zero write count");
978 		}
979 #endif
980 		if ((pollinfo = vp->v_pollinfo) != NULL) {
981 			/*
982 			 * To avoid lock order reversals, the call to
983 			 * uma_zfree() must be delayed until the vnode
984 			 * interlock is released.
985 			 */
986 			vp->v_pollinfo = NULL;
987 		}
988 #ifdef MAC
989 		mac_destroy_vnode(vp);
990 #endif
991 		vp->v_iflag = 0;
992 		vp->v_vflag = 0;
993 		vp->v_lastw = 0;
994 		vp->v_lasta = 0;
995 		vp->v_cstart = 0;
996 		vp->v_clen = 0;
997 		vp->v_socket = 0;
998 		lockdestroy(vp->v_vnlock);
999 		lockinit(vp->v_vnlock, PVFS, tag, VLKTIMEOUT, LK_NOPAUSE);
1000 		KASSERT(vp->v_cleanbufcnt == 0, ("cleanbufcnt not 0"));
1001 		KASSERT(vp->v_cleanblkroot == NULL, ("cleanblkroot not NULL"));
1002 		KASSERT(vp->v_dirtybufcnt == 0, ("dirtybufcnt not 0"));
1003 		KASSERT(vp->v_dirtyblkroot == NULL, ("dirtyblkroot not NULL"));
1004 	} else {
1005 		numvnodes++;
1006 		mtx_unlock(&vnode_free_list_mtx);
1007 
1008 		vp = (struct vnode *) uma_zalloc(vnode_zone, M_WAITOK|M_ZERO);
1009 		mtx_init(&vp->v_interlock, "vnode interlock", NULL, MTX_DEF);
1010 		VI_LOCK(vp);
1011 		vp->v_dd = vp;
1012 		vp->v_vnlock = &vp->v_lock;
1013 		lockinit(vp->v_vnlock, PVFS, tag, VLKTIMEOUT, LK_NOPAUSE);
1014 		cache_purge(vp);
1015 		LIST_INIT(&vp->v_cache_src);
1016 		TAILQ_INIT(&vp->v_cache_dst);
1017 	}
1018 
1019 	TAILQ_INIT(&vp->v_cleanblkhd);
1020 	TAILQ_INIT(&vp->v_dirtyblkhd);
1021 	vp->v_type = VNON;
1022 	vp->v_tag = tag;
1023 	vp->v_op = vops;
1024 	*vpp = vp;
1025 	vp->v_usecount = 1;
1026 	vp->v_data = 0;
1027 	vp->v_cachedid = -1;
1028 	VI_UNLOCK(vp);
1029 	if (pollinfo != NULL) {
1030 		mtx_destroy(&pollinfo->vpi_lock);
1031 		uma_zfree(vnodepoll_zone, pollinfo);
1032 	}
1033 #ifdef MAC
1034 	mac_init_vnode(vp);
1035 	if (mp != NULL && (mp->mnt_flag & MNT_MULTILABEL) == 0)
1036 		mac_associate_vnode_singlelabel(mp, vp);
1037 #endif
1038 	insmntque(vp, mp);
1039 
1040 	return (0);
1041 }
1042 
1043 /*
1044  * Move a vnode from one mount queue to another.
1045  */
1046 static void
1047 insmntque(vp, mp)
1048 	register struct vnode *vp;
1049 	register struct mount *mp;
1050 {
1051 
1052 	mtx_lock(&mntvnode_mtx);
1053 	/*
1054 	 * Delete from old mount point vnode list, if on one.
1055 	 */
1056 	if (vp->v_mount != NULL) {
1057 		KASSERT(vp->v_mount->mnt_nvnodelistsize > 0,
1058 			("bad mount point vnode list size"));
1059 		TAILQ_REMOVE(&vp->v_mount->mnt_nvnodelist, vp, v_nmntvnodes);
1060 		vp->v_mount->mnt_nvnodelistsize--;
1061 	}
1062 	/*
1063 	 * Insert into list of vnodes for the new mount point, if available.
1064 	 */
1065 	if ((vp->v_mount = mp) == NULL) {
1066 		mtx_unlock(&mntvnode_mtx);
1067 		return;
1068 	}
1069 	TAILQ_INSERT_TAIL(&mp->mnt_nvnodelist, vp, v_nmntvnodes);
1070 	mp->mnt_nvnodelistsize++;
1071 	mtx_unlock(&mntvnode_mtx);
1072 }
1073 
1074 /*
1075  * Update outstanding I/O count and do wakeup if requested.
1076  */
1077 void
1078 vwakeup(bp)
1079 	register struct buf *bp;
1080 {
1081 	register struct vnode *vp;
1082 
1083 	bp->b_flags &= ~B_WRITEINPROG;
1084 	if ((vp = bp->b_vp)) {
1085 		VI_LOCK(vp);
1086 		vp->v_numoutput--;
1087 		if (vp->v_numoutput < 0)
1088 			panic("vwakeup: neg numoutput");
1089 		if ((vp->v_numoutput == 0) && (vp->v_iflag & VI_BWAIT)) {
1090 			vp->v_iflag &= ~VI_BWAIT;
1091 			wakeup(&vp->v_numoutput);
1092 		}
1093 		VI_UNLOCK(vp);
1094 	}
1095 }
1096 
1097 /*
1098  * Flush out and invalidate all buffers associated with a vnode.
1099  * Called with the underlying object locked.
1100  */
1101 int
1102 vinvalbuf(vp, flags, cred, td, slpflag, slptimeo)
1103 	struct vnode *vp;
1104 	int flags;
1105 	struct ucred *cred;
1106 	struct thread *td;
1107 	int slpflag, slptimeo;
1108 {
1109 	struct buf *blist;
1110 	int s, error;
1111 	vm_object_t object;
1112 
1113 	GIANT_REQUIRED;
1114 
1115 	ASSERT_VOP_LOCKED(vp, "vinvalbuf");
1116 
1117 	VI_LOCK(vp);
1118 	if (flags & V_SAVE) {
1119 		s = splbio();
1120 		while (vp->v_numoutput) {
1121 			vp->v_iflag |= VI_BWAIT;
1122 			error = msleep(&vp->v_numoutput, VI_MTX(vp),
1123 			    slpflag | (PRIBIO + 1), "vinvlbuf", slptimeo);
1124 			if (error) {
1125 				VI_UNLOCK(vp);
1126 				splx(s);
1127 				return (error);
1128 			}
1129 		}
1130 		if (!TAILQ_EMPTY(&vp->v_dirtyblkhd)) {
1131 			splx(s);
1132 			VI_UNLOCK(vp);
1133 			if ((error = VOP_FSYNC(vp, cred, MNT_WAIT, td)) != 0)
1134 				return (error);
1135 			/*
1136 			 * XXX We could save a lock/unlock if this was only
1137 			 * enabled under INVARIANTS
1138 			 */
1139 			VI_LOCK(vp);
1140 			s = splbio();
1141 			if (vp->v_numoutput > 0 ||
1142 			    !TAILQ_EMPTY(&vp->v_dirtyblkhd))
1143 				panic("vinvalbuf: dirty bufs");
1144 		}
1145 		splx(s);
1146 	}
1147 	s = splbio();
1148 	/*
1149 	 * If you alter this loop please notice that interlock is dropped and
1150 	 * reacquired in flushbuflist.  Special care is needed to ensure that
1151 	 * no race conditions occur from this.
1152 	 */
1153 	for (error = 0;;) {
1154 		if ((blist = TAILQ_FIRST(&vp->v_cleanblkhd)) != 0 &&
1155 		    flushbuflist(blist, flags, vp, slpflag, slptimeo, &error)) {
1156 			if (error)
1157 				break;
1158 			continue;
1159 		}
1160 		if ((blist = TAILQ_FIRST(&vp->v_dirtyblkhd)) != 0 &&
1161 		    flushbuflist(blist, flags, vp, slpflag, slptimeo, &error)) {
1162 			if (error)
1163 				break;
1164 			continue;
1165 		}
1166 		break;
1167 	}
1168 	if (error) {
1169 		splx(s);
1170 		VI_UNLOCK(vp);
1171 		return (error);
1172 	}
1173 
1174 	/*
1175 	 * Wait for I/O to complete.  XXX needs cleaning up.  The vnode can
1176 	 * have write I/O in-progress but if there is a VM object then the
1177 	 * VM object can also have read-I/O in-progress.
1178 	 */
1179 	do {
1180 		while (vp->v_numoutput > 0) {
1181 			vp->v_iflag |= VI_BWAIT;
1182 			msleep(&vp->v_numoutput, VI_MTX(vp), PVM, "vnvlbv", 0);
1183 		}
1184 		VI_UNLOCK(vp);
1185 		if (VOP_GETVOBJECT(vp, &object) == 0) {
1186 			VM_OBJECT_LOCK(object);
1187 			vm_object_pip_wait(object, "vnvlbx");
1188 			VM_OBJECT_UNLOCK(object);
1189 		}
1190 		VI_LOCK(vp);
1191 	} while (vp->v_numoutput > 0);
1192 	VI_UNLOCK(vp);
1193 
1194 	splx(s);
1195 
1196 	/*
1197 	 * Destroy the copy in the VM cache, too.
1198 	 */
1199 	if (VOP_GETVOBJECT(vp, &object) == 0) {
1200 		VM_OBJECT_LOCK(object);
1201 		vm_object_page_remove(object, 0, 0,
1202 			(flags & V_SAVE) ? TRUE : FALSE);
1203 		VM_OBJECT_UNLOCK(object);
1204 	}
1205 
1206 #ifdef INVARIANTS
1207 	VI_LOCK(vp);
1208 	if ((flags & (V_ALT | V_NORMAL)) == 0 &&
1209 	    (!TAILQ_EMPTY(&vp->v_dirtyblkhd) ||
1210 	     !TAILQ_EMPTY(&vp->v_cleanblkhd)))
1211 		panic("vinvalbuf: flush failed");
1212 	VI_UNLOCK(vp);
1213 #endif
1214 	return (0);
1215 }
1216 
1217 /*
1218  * Flush out buffers on the specified list.
1219  *
1220  */
1221 static int
1222 flushbuflist(blist, flags, vp, slpflag, slptimeo, errorp)
1223 	struct buf *blist;
1224 	int flags;
1225 	struct vnode *vp;
1226 	int slpflag, slptimeo;
1227 	int *errorp;
1228 {
1229 	struct buf *bp, *nbp;
1230 	int found, error;
1231 
1232 	ASSERT_VI_LOCKED(vp, "flushbuflist");
1233 
1234 	for (found = 0, bp = blist; bp; bp = nbp) {
1235 		nbp = TAILQ_NEXT(bp, b_vnbufs);
1236 		if (((flags & V_NORMAL) && (bp->b_xflags & BX_ALTDATA)) ||
1237 		    ((flags & V_ALT) && (bp->b_xflags & BX_ALTDATA) == 0)) {
1238 			continue;
1239 		}
1240 		found += 1;
1241 		error = BUF_TIMELOCK(bp,
1242 		    LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK, VI_MTX(vp),
1243 		    "flushbuf", slpflag, slptimeo);
1244 		if (error) {
1245 			if (error != ENOLCK)
1246 				*errorp = error;
1247 			goto done;
1248 		}
1249 		/*
1250 		 * XXX Since there are no node locks for NFS, I
1251 		 * believe there is a slight chance that a delayed
1252 		 * write will occur while sleeping just above, so
1253 		 * check for it.  Note that vfs_bio_awrite expects
1254 		 * buffers to reside on a queue, while BUF_WRITE and
1255 		 * brelse do not.
1256 		 */
1257 		if (((bp->b_flags & (B_DELWRI | B_INVAL)) == B_DELWRI) &&
1258 			(flags & V_SAVE)) {
1259 
1260 			if (bp->b_vp == vp) {
1261 				if (bp->b_flags & B_CLUSTEROK) {
1262 					vfs_bio_awrite(bp);
1263 				} else {
1264 					bremfree(bp);
1265 					bp->b_flags |= B_ASYNC;
1266 					BUF_WRITE(bp);
1267 				}
1268 			} else {
1269 				bremfree(bp);
1270 				(void) BUF_WRITE(bp);
1271 			}
1272 			goto done;
1273 		}
1274 		bremfree(bp);
1275 		bp->b_flags |= (B_INVAL | B_NOCACHE | B_RELBUF);
1276 		bp->b_flags &= ~B_ASYNC;
1277 		brelse(bp);
1278 		VI_LOCK(vp);
1279 	}
1280 	return (found);
1281 done:
1282 	VI_LOCK(vp);
1283 	return (found);
1284 }
1285 
1286 /*
1287  * Truncate a file's buffer and pages to a specified length.  This
1288  * is in lieu of the old vinvalbuf mechanism, which performed unneeded
1289  * sync activity.
1290  */
1291 int
1292 vtruncbuf(vp, cred, td, length, blksize)
1293 	register struct vnode *vp;
1294 	struct ucred *cred;
1295 	struct thread *td;
1296 	off_t length;
1297 	int blksize;
1298 {
1299 	register struct buf *bp;
1300 	struct buf *nbp;
1301 	int s, anyfreed;
1302 	int trunclbn;
1303 
1304 	/*
1305 	 * Round up to the *next* lbn.
1306 	 */
1307 	trunclbn = (length + blksize - 1) / blksize;
1308 
1309 	s = splbio();
1310 	ASSERT_VOP_LOCKED(vp, "vtruncbuf");
1311 restart:
1312 	VI_LOCK(vp);
1313 	anyfreed = 1;
1314 	for (;anyfreed;) {
1315 		anyfreed = 0;
1316 		for (bp = TAILQ_FIRST(&vp->v_cleanblkhd); bp; bp = nbp) {
1317 			nbp = TAILQ_NEXT(bp, b_vnbufs);
1318 			if (bp->b_lblkno >= trunclbn) {
1319 				if (BUF_LOCK(bp,
1320 				    LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK,
1321 				    VI_MTX(vp)) == ENOLCK)
1322 					goto restart;
1323 
1324 				bremfree(bp);
1325 				bp->b_flags |= (B_INVAL | B_RELBUF);
1326 				bp->b_flags &= ~B_ASYNC;
1327 				brelse(bp);
1328 				anyfreed = 1;
1329 
1330 				if (nbp &&
1331 				    (((nbp->b_xflags & BX_VNCLEAN) == 0) ||
1332 				    (nbp->b_vp != vp) ||
1333 				    (nbp->b_flags & B_DELWRI))) {
1334 					goto restart;
1335 				}
1336 				VI_LOCK(vp);
1337 			}
1338 		}
1339 
1340 		for (bp = TAILQ_FIRST(&vp->v_dirtyblkhd); bp; bp = nbp) {
1341 			nbp = TAILQ_NEXT(bp, b_vnbufs);
1342 			if (bp->b_lblkno >= trunclbn) {
1343 				if (BUF_LOCK(bp,
1344 				    LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK,
1345 				    VI_MTX(vp)) == ENOLCK)
1346 					goto restart;
1347 				bremfree(bp);
1348 				bp->b_flags |= (B_INVAL | B_RELBUF);
1349 				bp->b_flags &= ~B_ASYNC;
1350 				brelse(bp);
1351 				anyfreed = 1;
1352 				if (nbp &&
1353 				    (((nbp->b_xflags & BX_VNDIRTY) == 0) ||
1354 				    (nbp->b_vp != vp) ||
1355 				    (nbp->b_flags & B_DELWRI) == 0)) {
1356 					goto restart;
1357 				}
1358 				VI_LOCK(vp);
1359 			}
1360 		}
1361 	}
1362 
1363 	if (length > 0) {
1364 restartsync:
1365 		for (bp = TAILQ_FIRST(&vp->v_dirtyblkhd); bp; bp = nbp) {
1366 			nbp = TAILQ_NEXT(bp, b_vnbufs);
1367 			if (bp->b_lblkno > 0)
1368 				continue;
1369 			/*
1370 			 * Since we hold the vnode lock this should only
1371 			 * fail if we're racing with the buf daemon.
1372 			 */
1373 			if (BUF_LOCK(bp,
1374 			    LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK,
1375 			    VI_MTX(vp)) == ENOLCK) {
1376 				goto restart;
1377 			}
1378 			KASSERT((bp->b_flags & B_DELWRI),
1379 			    ("buf(%p) on dirty queue without DELWRI.", bp));
1380 
1381 			bremfree(bp);
1382 			bawrite(bp);
1383 			VI_LOCK(vp);
1384 			goto restartsync;
1385 		}
1386 	}
1387 
1388 	while (vp->v_numoutput > 0) {
1389 		vp->v_iflag |= VI_BWAIT;
1390 		msleep(&vp->v_numoutput, VI_MTX(vp), PVM, "vbtrunc", 0);
1391 	}
1392 	VI_UNLOCK(vp);
1393 	splx(s);
1394 
1395 	vnode_pager_setsize(vp, length);
1396 
1397 	return (0);
1398 }
1399 
1400 /*
1401  * buf_splay() - splay tree core for the clean/dirty list of buffers in
1402  * 		 a vnode.
1403  *
1404  *	NOTE: We have to deal with the special case of a background bitmap
1405  *	buffer, a situation where two buffers will have the same logical
1406  *	block offset.  We want (1) only the foreground buffer to be accessed
1407  *	in a lookup and (2) must differentiate between the foreground and
1408  *	background buffer in the splay tree algorithm because the splay
1409  *	tree cannot normally handle multiple entities with the same 'index'.
1410  *	We accomplish this by adding differentiating flags to the splay tree's
1411  *	numerical domain.
1412  */
1413 static
1414 struct buf *
1415 buf_splay(daddr_t lblkno, b_xflags_t xflags, struct buf *root)
1416 {
1417 	struct buf dummy;
1418 	struct buf *lefttreemax, *righttreemin, *y;
1419 
1420 	if (root == NULL)
1421 		return (NULL);
1422 	lefttreemax = righttreemin = &dummy;
1423 	for (;;) {
1424 		if (lblkno < root->b_lblkno ||
1425 		    (lblkno == root->b_lblkno &&
1426 		    (xflags & BX_BKGRDMARKER) < (root->b_xflags & BX_BKGRDMARKER))) {
1427 			if ((y = root->b_left) == NULL)
1428 				break;
1429 			if (lblkno < y->b_lblkno) {
1430 				/* Rotate right. */
1431 				root->b_left = y->b_right;
1432 				y->b_right = root;
1433 				root = y;
1434 				if ((y = root->b_left) == NULL)
1435 					break;
1436 			}
1437 			/* Link into the new root's right tree. */
1438 			righttreemin->b_left = root;
1439 			righttreemin = root;
1440 		} else if (lblkno > root->b_lblkno ||
1441 		    (lblkno == root->b_lblkno &&
1442 		    (xflags & BX_BKGRDMARKER) > (root->b_xflags & BX_BKGRDMARKER))) {
1443 			if ((y = root->b_right) == NULL)
1444 				break;
1445 			if (lblkno > y->b_lblkno) {
1446 				/* Rotate left. */
1447 				root->b_right = y->b_left;
1448 				y->b_left = root;
1449 				root = y;
1450 				if ((y = root->b_right) == NULL)
1451 					break;
1452 			}
1453 			/* Link into the new root's left tree. */
1454 			lefttreemax->b_right = root;
1455 			lefttreemax = root;
1456 		} else {
1457 			break;
1458 		}
1459 		root = y;
1460 	}
1461 	/* Assemble the new root. */
1462 	lefttreemax->b_right = root->b_left;
1463 	righttreemin->b_left = root->b_right;
1464 	root->b_left = dummy.b_right;
1465 	root->b_right = dummy.b_left;
1466 	return (root);
1467 }
1468 
1469 static
1470 void
1471 buf_vlist_remove(struct buf *bp)
1472 {
1473 	struct vnode *vp = bp->b_vp;
1474 	struct buf *root;
1475 
1476 	ASSERT_VI_LOCKED(vp, "buf_vlist_remove");
1477 	if (bp->b_xflags & BX_VNDIRTY) {
1478 		if (bp != vp->v_dirtyblkroot) {
1479 			root = buf_splay(bp->b_lblkno, bp->b_xflags, vp->v_dirtyblkroot);
1480 			KASSERT(root == bp, ("splay lookup failed during dirty remove"));
1481 		}
1482 		if (bp->b_left == NULL) {
1483 			root = bp->b_right;
1484 		} else {
1485 			root = buf_splay(bp->b_lblkno, bp->b_xflags, bp->b_left);
1486 			root->b_right = bp->b_right;
1487 		}
1488 		vp->v_dirtyblkroot = root;
1489 		TAILQ_REMOVE(&vp->v_dirtyblkhd, bp, b_vnbufs);
1490 		vp->v_dirtybufcnt--;
1491 	} else {
1492 		/* KASSERT(bp->b_xflags & BX_VNCLEAN, ("bp wasn't clean")); */
1493 		if (bp != vp->v_cleanblkroot) {
1494 			root = buf_splay(bp->b_lblkno, bp->b_xflags, vp->v_cleanblkroot);
1495 			KASSERT(root == bp, ("splay lookup failed during clean remove"));
1496 		}
1497 		if (bp->b_left == NULL) {
1498 			root = bp->b_right;
1499 		} else {
1500 			root = buf_splay(bp->b_lblkno, bp->b_xflags, bp->b_left);
1501 			root->b_right = bp->b_right;
1502 		}
1503 		vp->v_cleanblkroot = root;
1504 		TAILQ_REMOVE(&vp->v_cleanblkhd, bp, b_vnbufs);
1505 		vp->v_cleanbufcnt--;
1506 	}
1507 	bp->b_xflags &= ~(BX_VNDIRTY | BX_VNCLEAN);
1508 }
1509 
1510 /*
1511  * Add the buffer to the sorted clean or dirty block list using a
1512  * splay tree algorithm.
1513  *
1514  * NOTE: xflags is passed as a constant, optimizing this inline function!
1515  */
1516 static
1517 void
1518 buf_vlist_add(struct buf *bp, struct vnode *vp, b_xflags_t xflags)
1519 {
1520 	struct buf *root;
1521 
1522 	ASSERT_VI_LOCKED(vp, "buf_vlist_add");
1523 	bp->b_xflags |= xflags;
1524 	if (xflags & BX_VNDIRTY) {
1525 		root = buf_splay(bp->b_lblkno, bp->b_xflags, vp->v_dirtyblkroot);
1526 		if (root == NULL) {
1527 			bp->b_left = NULL;
1528 			bp->b_right = NULL;
1529 			TAILQ_INSERT_TAIL(&vp->v_dirtyblkhd, bp, b_vnbufs);
1530 		} else if (bp->b_lblkno < root->b_lblkno ||
1531 		    (bp->b_lblkno == root->b_lblkno &&
1532 		    (bp->b_xflags & BX_BKGRDMARKER) < (root->b_xflags & BX_BKGRDMARKER))) {
1533 			bp->b_left = root->b_left;
1534 			bp->b_right = root;
1535 			root->b_left = NULL;
1536 			TAILQ_INSERT_BEFORE(root, bp, b_vnbufs);
1537 		} else {
1538 			bp->b_right = root->b_right;
1539 			bp->b_left = root;
1540 			root->b_right = NULL;
1541 			TAILQ_INSERT_AFTER(&vp->v_dirtyblkhd,
1542 			    root, bp, b_vnbufs);
1543 		}
1544 		vp->v_dirtybufcnt++;
1545 		vp->v_dirtyblkroot = bp;
1546 	} else {
1547 		/* KASSERT(xflags & BX_VNCLEAN, ("xflags not clean")); */
1548 		root = buf_splay(bp->b_lblkno, bp->b_xflags, vp->v_cleanblkroot);
1549 		if (root == NULL) {
1550 			bp->b_left = NULL;
1551 			bp->b_right = NULL;
1552 			TAILQ_INSERT_TAIL(&vp->v_cleanblkhd, bp, b_vnbufs);
1553 		} else if (bp->b_lblkno < root->b_lblkno ||
1554 		    (bp->b_lblkno == root->b_lblkno &&
1555 		    (bp->b_xflags & BX_BKGRDMARKER) < (root->b_xflags & BX_BKGRDMARKER))) {
1556 			bp->b_left = root->b_left;
1557 			bp->b_right = root;
1558 			root->b_left = NULL;
1559 			TAILQ_INSERT_BEFORE(root, bp, b_vnbufs);
1560 		} else {
1561 			bp->b_right = root->b_right;
1562 			bp->b_left = root;
1563 			root->b_right = NULL;
1564 			TAILQ_INSERT_AFTER(&vp->v_cleanblkhd,
1565 			    root, bp, b_vnbufs);
1566 		}
1567 		vp->v_cleanbufcnt++;
1568 		vp->v_cleanblkroot = bp;
1569 	}
1570 }
1571 
1572 /*
1573  * Lookup a buffer using the splay tree.  Note that we specifically avoid
1574  * shadow buffers used in background bitmap writes.
1575  *
1576  * This code isn't quite efficient as it could be because we are maintaining
1577  * two sorted lists and do not know which list the block resides in.
1578  *
1579  * During a "make buildworld" the desired buffer is found at one of
1580  * the roots more than 60% of the time.  Thus, checking both roots
1581  * before performing either splay eliminates unnecessary splays on the
1582  * first tree splayed.
1583  */
1584 struct buf *
1585 gbincore(struct vnode *vp, daddr_t lblkno)
1586 {
1587 	struct buf *bp;
1588 
1589 	GIANT_REQUIRED;
1590 
1591 	ASSERT_VI_LOCKED(vp, "gbincore");
1592 	if ((bp = vp->v_cleanblkroot) != NULL &&
1593 	    bp->b_lblkno == lblkno && !(bp->b_xflags & BX_BKGRDMARKER))
1594 		return (bp);
1595 	if ((bp = vp->v_dirtyblkroot) != NULL &&
1596 	    bp->b_lblkno == lblkno && !(bp->b_xflags & BX_BKGRDMARKER))
1597 		return (bp);
1598 	if ((bp = vp->v_cleanblkroot) != NULL) {
1599 		vp->v_cleanblkroot = bp = buf_splay(lblkno, 0, bp);
1600 		if (bp->b_lblkno == lblkno && !(bp->b_xflags & BX_BKGRDMARKER))
1601 			return (bp);
1602 	}
1603 	if ((bp = vp->v_dirtyblkroot) != NULL) {
1604 		vp->v_dirtyblkroot = bp = buf_splay(lblkno, 0, bp);
1605 		if (bp->b_lblkno == lblkno && !(bp->b_xflags & BX_BKGRDMARKER))
1606 			return (bp);
1607 	}
1608 	return (NULL);
1609 }
1610 
1611 /*
1612  * Associate a buffer with a vnode.
1613  */
1614 void
1615 bgetvp(vp, bp)
1616 	register struct vnode *vp;
1617 	register struct buf *bp;
1618 {
1619 	int s;
1620 
1621 	KASSERT(bp->b_vp == NULL, ("bgetvp: not free"));
1622 
1623 	KASSERT((bp->b_xflags & (BX_VNDIRTY|BX_VNCLEAN)) == 0,
1624 	    ("bgetvp: bp already attached! %p", bp));
1625 
1626 	ASSERT_VI_LOCKED(vp, "bgetvp");
1627 	vholdl(vp);
1628 	bp->b_vp = vp;
1629 	bp->b_dev = vn_todev(vp);
1630 	/*
1631 	 * Insert onto list for new vnode.
1632 	 */
1633 	s = splbio();
1634 	buf_vlist_add(bp, vp, BX_VNCLEAN);
1635 	splx(s);
1636 }
1637 
1638 /*
1639  * Disassociate a buffer from a vnode.
1640  */
1641 void
1642 brelvp(bp)
1643 	register struct buf *bp;
1644 {
1645 	struct vnode *vp;
1646 	int s;
1647 
1648 	KASSERT(bp->b_vp != NULL, ("brelvp: NULL"));
1649 
1650 	/*
1651 	 * Delete from old vnode list, if on one.
1652 	 */
1653 	vp = bp->b_vp;
1654 	s = splbio();
1655 	VI_LOCK(vp);
1656 	if (bp->b_xflags & (BX_VNDIRTY | BX_VNCLEAN))
1657 		buf_vlist_remove(bp);
1658 	if ((vp->v_iflag & VI_ONWORKLST) && TAILQ_EMPTY(&vp->v_dirtyblkhd)) {
1659 		vp->v_iflag &= ~VI_ONWORKLST;
1660 		mtx_lock(&sync_mtx);
1661 		LIST_REMOVE(vp, v_synclist);
1662 		mtx_unlock(&sync_mtx);
1663 	}
1664 	vdropl(vp);
1665 	VI_UNLOCK(vp);
1666 	bp->b_vp = (struct vnode *) 0;
1667 	if (bp->b_object)
1668 		bp->b_object = NULL;
1669 	splx(s);
1670 }
1671 
1672 /*
1673  * Add an item to the syncer work queue.
1674  */
1675 static void
1676 vn_syncer_add_to_worklist(struct vnode *vp, int delay)
1677 {
1678 	int s, slot;
1679 
1680 	s = splbio();
1681 	ASSERT_VI_LOCKED(vp, "vn_syncer_add_to_worklist");
1682 
1683 	mtx_lock(&sync_mtx);
1684 	if (vp->v_iflag & VI_ONWORKLST)
1685 		LIST_REMOVE(vp, v_synclist);
1686 	else
1687 		vp->v_iflag |= VI_ONWORKLST;
1688 
1689 	if (delay > syncer_maxdelay - 2)
1690 		delay = syncer_maxdelay - 2;
1691 	slot = (syncer_delayno + delay) & syncer_mask;
1692 
1693 	LIST_INSERT_HEAD(&syncer_workitem_pending[slot], vp, v_synclist);
1694 	mtx_unlock(&sync_mtx);
1695 
1696 	splx(s);
1697 }
1698 
1699 struct  proc *updateproc;
1700 static void sched_sync(void);
1701 static struct kproc_desc up_kp = {
1702 	"syncer",
1703 	sched_sync,
1704 	&updateproc
1705 };
1706 SYSINIT(syncer, SI_SUB_KTHREAD_UPDATE, SI_ORDER_FIRST, kproc_start, &up_kp)
1707 
1708 /*
1709  * System filesystem synchronizer daemon.
1710  */
1711 static void
1712 sched_sync(void)
1713 {
1714 	struct synclist *slp;
1715 	struct vnode *vp;
1716 	struct mount *mp;
1717 	long starttime;
1718 	int s;
1719 	struct thread *td = FIRST_THREAD_IN_PROC(updateproc);  /* XXXKSE */
1720 
1721 	mtx_lock(&Giant);
1722 
1723 	EVENTHANDLER_REGISTER(shutdown_pre_sync, kproc_shutdown, td->td_proc,
1724 	    SHUTDOWN_PRI_LAST);
1725 
1726 	for (;;) {
1727 		kthread_suspend_check(td->td_proc);
1728 
1729 		starttime = time_second;
1730 
1731 		/*
1732 		 * Push files whose dirty time has expired.  Be careful
1733 		 * of interrupt race on slp queue.
1734 		 */
1735 		s = splbio();
1736 		mtx_lock(&sync_mtx);
1737 		slp = &syncer_workitem_pending[syncer_delayno];
1738 		syncer_delayno += 1;
1739 		if (syncer_delayno == syncer_maxdelay)
1740 			syncer_delayno = 0;
1741 		splx(s);
1742 
1743 		while ((vp = LIST_FIRST(slp)) != NULL) {
1744 			mtx_unlock(&sync_mtx);
1745 			if (VOP_ISLOCKED(vp, NULL) == 0 &&
1746 			    vn_start_write(vp, &mp, V_NOWAIT) == 0) {
1747 				vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
1748 				(void) VOP_FSYNC(vp, td->td_ucred, MNT_LAZY, td);
1749 				VOP_UNLOCK(vp, 0, td);
1750 				vn_finished_write(mp);
1751 			}
1752 			s = splbio();
1753 			mtx_lock(&sync_mtx);
1754 			if (LIST_FIRST(slp) == vp) {
1755 				mtx_unlock(&sync_mtx);
1756 				/*
1757 				 * Note: VFS vnodes can remain on the
1758 				 * worklist too with no dirty blocks, but
1759 				 * since sync_fsync() moves it to a different
1760 				 * slot we are safe.
1761 				 */
1762 				VI_LOCK(vp);
1763 				if (TAILQ_EMPTY(&vp->v_dirtyblkhd) &&
1764 				    !vn_isdisk(vp, NULL)) {
1765 					panic("sched_sync: fsync failed "
1766 					      "vp %p tag %s", vp, vp->v_tag);
1767 				}
1768 				/*
1769 				 * Put us back on the worklist.  The worklist
1770 				 * routine will remove us from our current
1771 				 * position and then add us back in at a later
1772 				 * position.
1773 				 */
1774 				vn_syncer_add_to_worklist(vp, syncdelay);
1775 				VI_UNLOCK(vp);
1776 				mtx_lock(&sync_mtx);
1777 			}
1778 			splx(s);
1779 		}
1780 		mtx_unlock(&sync_mtx);
1781 
1782 		/*
1783 		 * Do soft update processing.
1784 		 */
1785 		if (softdep_process_worklist_hook != NULL)
1786 			(*softdep_process_worklist_hook)(NULL);
1787 
1788 		/*
1789 		 * The variable rushjob allows the kernel to speed up the
1790 		 * processing of the filesystem syncer process. A rushjob
1791 		 * value of N tells the filesystem syncer to process the next
1792 		 * N seconds worth of work on its queue ASAP. Currently rushjob
1793 		 * is used by the soft update code to speed up the filesystem
1794 		 * syncer process when the incore state is getting so far
1795 		 * ahead of the disk that the kernel memory pool is being
1796 		 * threatened with exhaustion.
1797 		 */
1798 		mtx_lock(&sync_mtx);
1799 		if (rushjob > 0) {
1800 			rushjob -= 1;
1801 			mtx_unlock(&sync_mtx);
1802 			continue;
1803 		}
1804 		mtx_unlock(&sync_mtx);
1805 		/*
1806 		 * If it has taken us less than a second to process the
1807 		 * current work, then wait. Otherwise start right over
1808 		 * again. We can still lose time if any single round
1809 		 * takes more than two seconds, but it does not really
1810 		 * matter as we are just trying to generally pace the
1811 		 * filesystem activity.
1812 		 */
1813 		if (time_second == starttime)
1814 			tsleep(&lbolt, PPAUSE, "syncer", 0);
1815 	}
1816 }
1817 
1818 /*
1819  * Request the syncer daemon to speed up its work.
1820  * We never push it to speed up more than half of its
1821  * normal turn time, otherwise it could take over the cpu.
1822  * XXXKSE  only one update?
1823  */
1824 int
1825 speedup_syncer()
1826 {
1827 	struct thread *td;
1828 	int ret = 0;
1829 
1830 	td = FIRST_THREAD_IN_PROC(updateproc);
1831 	mtx_lock_spin(&sched_lock);
1832 	if (td->td_wchan == &lbolt) {
1833 		unsleep(td);
1834 		TD_CLR_SLEEPING(td);
1835 		setrunnable(td);
1836 	}
1837 	mtx_unlock_spin(&sched_lock);
1838 	mtx_lock(&sync_mtx);
1839 	if (rushjob < syncdelay / 2) {
1840 		rushjob += 1;
1841 		stat_rush_requests += 1;
1842 		ret = 1;
1843 	}
1844 	mtx_unlock(&sync_mtx);
1845 	return (ret);
1846 }
1847 
1848 /*
1849  * Associate a p-buffer with a vnode.
1850  *
1851  * Also sets B_PAGING flag to indicate that vnode is not fully associated
1852  * with the buffer.  i.e. the bp has not been linked into the vnode or
1853  * ref-counted.
1854  */
1855 void
1856 pbgetvp(vp, bp)
1857 	register struct vnode *vp;
1858 	register struct buf *bp;
1859 {
1860 
1861 	KASSERT(bp->b_vp == NULL, ("pbgetvp: not free"));
1862 
1863 	bp->b_vp = vp;
1864 	bp->b_flags |= B_PAGING;
1865 	bp->b_dev = vn_todev(vp);
1866 }
1867 
1868 /*
1869  * Disassociate a p-buffer from a vnode.
1870  */
1871 void
1872 pbrelvp(bp)
1873 	register struct buf *bp;
1874 {
1875 
1876 	KASSERT(bp->b_vp != NULL, ("pbrelvp: NULL"));
1877 
1878 	/* XXX REMOVE ME */
1879 	VI_LOCK(bp->b_vp);
1880 	if (TAILQ_NEXT(bp, b_vnbufs) != NULL) {
1881 		panic(
1882 		    "relpbuf(): b_vp was probably reassignbuf()d %p %x",
1883 		    bp,
1884 		    (int)bp->b_flags
1885 		);
1886 	}
1887 	VI_UNLOCK(bp->b_vp);
1888 	bp->b_vp = (struct vnode *) 0;
1889 	bp->b_flags &= ~B_PAGING;
1890 }
1891 
1892 /*
1893  * Reassign a buffer from one vnode to another.
1894  * Used to assign file specific control information
1895  * (indirect blocks) to the vnode to which they belong.
1896  */
1897 void
1898 reassignbuf(bp, newvp)
1899 	register struct buf *bp;
1900 	register struct vnode *newvp;
1901 {
1902 	int delay;
1903 	int s;
1904 
1905 	if (newvp == NULL) {
1906 		printf("reassignbuf: NULL");
1907 		return;
1908 	}
1909 	++reassignbufcalls;
1910 
1911 	/*
1912 	 * B_PAGING flagged buffers cannot be reassigned because their vp
1913 	 * is not fully linked in.
1914 	 */
1915 	if (bp->b_flags & B_PAGING)
1916 		panic("cannot reassign paging buffer");
1917 
1918 	s = splbio();
1919 	/*
1920 	 * Delete from old vnode list, if on one.
1921 	 */
1922 	VI_LOCK(bp->b_vp);
1923 	if (bp->b_xflags & (BX_VNDIRTY | BX_VNCLEAN)) {
1924 		buf_vlist_remove(bp);
1925 		if (bp->b_vp != newvp) {
1926 			vdropl(bp->b_vp);
1927 			bp->b_vp = NULL;	/* for clarification */
1928 		}
1929 	}
1930 	VI_UNLOCK(bp->b_vp);
1931 	/*
1932 	 * If dirty, put on list of dirty buffers; otherwise insert onto list
1933 	 * of clean buffers.
1934 	 */
1935 	VI_LOCK(newvp);
1936 	if (bp->b_flags & B_DELWRI) {
1937 		if ((newvp->v_iflag & VI_ONWORKLST) == 0) {
1938 			switch (newvp->v_type) {
1939 			case VDIR:
1940 				delay = dirdelay;
1941 				break;
1942 			case VCHR:
1943 				if (newvp->v_rdev->si_mountpoint != NULL) {
1944 					delay = metadelay;
1945 					break;
1946 				}
1947 				/* FALLTHROUGH */
1948 			default:
1949 				delay = filedelay;
1950 			}
1951 			vn_syncer_add_to_worklist(newvp, delay);
1952 		}
1953 		buf_vlist_add(bp, newvp, BX_VNDIRTY);
1954 	} else {
1955 		buf_vlist_add(bp, newvp, BX_VNCLEAN);
1956 
1957 		if ((newvp->v_iflag & VI_ONWORKLST) &&
1958 		    TAILQ_EMPTY(&newvp->v_dirtyblkhd)) {
1959 			mtx_lock(&sync_mtx);
1960 			LIST_REMOVE(newvp, v_synclist);
1961 			mtx_unlock(&sync_mtx);
1962 			newvp->v_iflag &= ~VI_ONWORKLST;
1963 		}
1964 	}
1965 	if (bp->b_vp != newvp) {
1966 		bp->b_vp = newvp;
1967 		vholdl(bp->b_vp);
1968 	}
1969 	VI_UNLOCK(newvp);
1970 	splx(s);
1971 }
1972 
1973 /*
1974  * Create a vnode for a device.
1975  * Used for mounting the root filesystem.
1976  */
1977 int
1978 bdevvp(dev, vpp)
1979 	dev_t dev;
1980 	struct vnode **vpp;
1981 {
1982 	register struct vnode *vp;
1983 	struct vnode *nvp;
1984 	int error;
1985 
1986 	if (dev == NODEV) {
1987 		*vpp = NULLVP;
1988 		return (ENXIO);
1989 	}
1990 	if (vfinddev(dev, VCHR, vpp))
1991 		return (0);
1992 	error = getnewvnode("none", (struct mount *)0, spec_vnodeop_p, &nvp);
1993 	if (error) {
1994 		*vpp = NULLVP;
1995 		return (error);
1996 	}
1997 	vp = nvp;
1998 	vp->v_type = VCHR;
1999 	addalias(vp, dev);
2000 	*vpp = vp;
2001 	return (0);
2002 }
2003 
2004 static void
2005 v_incr_usecount(struct vnode *vp, int delta)
2006 {
2007 	vp->v_usecount += delta;
2008 	if (vp->v_type == VCHR && vp->v_rdev != NULL) {
2009 		mtx_lock(&spechash_mtx);
2010 		vp->v_rdev->si_usecount += delta;
2011 		mtx_unlock(&spechash_mtx);
2012 	}
2013 }
2014 
2015 /*
2016  * Add vnode to the alias list hung off the dev_t.
2017  *
2018  * The reason for this gunk is that multiple vnodes can reference
2019  * the same physical device, so checking vp->v_usecount to see
2020  * how many users there are is inadequate; the v_usecount for
2021  * the vnodes need to be accumulated.  vcount() does that.
2022  */
2023 struct vnode *
2024 addaliasu(nvp, nvp_rdev)
2025 	struct vnode *nvp;
2026 	udev_t nvp_rdev;
2027 {
2028 	struct vnode *ovp;
2029 	vop_t **ops;
2030 	dev_t dev;
2031 
2032 	if (nvp->v_type == VBLK)
2033 		return (nvp);
2034 	if (nvp->v_type != VCHR)
2035 		panic("addaliasu on non-special vnode");
2036 	dev = udev2dev(nvp_rdev, 0);
2037 	/*
2038 	 * Check to see if we have a bdevvp vnode with no associated
2039 	 * filesystem. If so, we want to associate the filesystem of
2040 	 * the new newly instigated vnode with the bdevvp vnode and
2041 	 * discard the newly created vnode rather than leaving the
2042 	 * bdevvp vnode lying around with no associated filesystem.
2043 	 */
2044 	if (vfinddev(dev, nvp->v_type, &ovp) == 0 || ovp->v_data != NULL) {
2045 		addalias(nvp, dev);
2046 		return (nvp);
2047 	}
2048 	/*
2049 	 * Discard unneeded vnode, but save its node specific data.
2050 	 * Note that if there is a lock, it is carried over in the
2051 	 * node specific data to the replacement vnode.
2052 	 */
2053 	vref(ovp);
2054 	ovp->v_data = nvp->v_data;
2055 	ovp->v_tag = nvp->v_tag;
2056 	nvp->v_data = NULL;
2057 	lockdestroy(ovp->v_vnlock);
2058 	lockinit(ovp->v_vnlock, PVFS, nvp->v_vnlock->lk_wmesg,
2059 	    nvp->v_vnlock->lk_timo, nvp->v_vnlock->lk_flags & LK_EXTFLG_MASK);
2060 	ops = ovp->v_op;
2061 	ovp->v_op = nvp->v_op;
2062 	if (VOP_ISLOCKED(nvp, curthread)) {
2063 		VOP_UNLOCK(nvp, 0, curthread);
2064 		vn_lock(ovp, LK_EXCLUSIVE | LK_RETRY, curthread);
2065 	}
2066 	nvp->v_op = ops;
2067 	insmntque(ovp, nvp->v_mount);
2068 	vrele(nvp);
2069 	vgone(nvp);
2070 	return (ovp);
2071 }
2072 
2073 /* This is a local helper function that do the same as addaliasu, but for a
2074  * dev_t instead of an udev_t. */
2075 static void
2076 addalias(nvp, dev)
2077 	struct vnode *nvp;
2078 	dev_t dev;
2079 {
2080 
2081 	KASSERT(nvp->v_type == VCHR, ("addalias on non-special vnode"));
2082 	nvp->v_rdev = dev;
2083 	VI_LOCK(nvp);
2084 	mtx_lock(&spechash_mtx);
2085 	SLIST_INSERT_HEAD(&dev->si_hlist, nvp, v_specnext);
2086 	dev->si_usecount += nvp->v_usecount;
2087 	mtx_unlock(&spechash_mtx);
2088 	VI_UNLOCK(nvp);
2089 }
2090 
2091 /*
2092  * Grab a particular vnode from the free list, increment its
2093  * reference count and lock it. The vnode lock bit is set if the
2094  * vnode is being eliminated in vgone. The process is awakened
2095  * when the transition is completed, and an error returned to
2096  * indicate that the vnode is no longer usable (possibly having
2097  * been changed to a new filesystem type).
2098  */
2099 int
2100 vget(vp, flags, td)
2101 	register struct vnode *vp;
2102 	int flags;
2103 	struct thread *td;
2104 {
2105 	int error;
2106 
2107 	/*
2108 	 * If the vnode is in the process of being cleaned out for
2109 	 * another use, we wait for the cleaning to finish and then
2110 	 * return failure. Cleaning is determined by checking that
2111 	 * the VI_XLOCK flag is set.
2112 	 */
2113 	if ((flags & LK_INTERLOCK) == 0)
2114 		VI_LOCK(vp);
2115 	if (vp->v_iflag & VI_XLOCK && vp->v_vxproc != curthread) {
2116 		vp->v_iflag |= VI_XWANT;
2117 		msleep(vp, VI_MTX(vp), PINOD | PDROP, "vget", 0);
2118 		return (ENOENT);
2119 	}
2120 
2121 	v_incr_usecount(vp, 1);
2122 
2123 	if (VSHOULDBUSY(vp))
2124 		vbusy(vp);
2125 	if (flags & LK_TYPE_MASK) {
2126 		if ((error = vn_lock(vp, flags | LK_INTERLOCK, td)) != 0) {
2127 			/*
2128 			 * must expand vrele here because we do not want
2129 			 * to call VOP_INACTIVE if the reference count
2130 			 * drops back to zero since it was never really
2131 			 * active. We must remove it from the free list
2132 			 * before sleeping so that multiple processes do
2133 			 * not try to recycle it.
2134 			 */
2135 			VI_LOCK(vp);
2136 			v_incr_usecount(vp, -1);
2137 			if (VSHOULDFREE(vp))
2138 				vfree(vp);
2139 			else
2140 				vlruvp(vp);
2141 			VI_UNLOCK(vp);
2142 		}
2143 		return (error);
2144 	}
2145 	VI_UNLOCK(vp);
2146 	return (0);
2147 }
2148 
2149 /*
2150  * Increase the reference count of a vnode.
2151  */
2152 void
2153 vref(struct vnode *vp)
2154 {
2155 	VI_LOCK(vp);
2156 	v_incr_usecount(vp, 1);
2157 	VI_UNLOCK(vp);
2158 }
2159 
2160 /*
2161  * Return reference count of a vnode.
2162  *
2163  * The results of this call are only guaranteed when some mechanism other
2164  * than the VI lock is used to stop other processes from gaining references
2165  * to the vnode.  This may be the case if the caller holds the only reference.
2166  * This is also useful when stale data is acceptable as race conditions may
2167  * be accounted for by some other means.
2168  */
2169 int
2170 vrefcnt(struct vnode *vp)
2171 {
2172 	int usecnt;
2173 
2174 	VI_LOCK(vp);
2175 	usecnt = vp->v_usecount;
2176 	VI_UNLOCK(vp);
2177 
2178 	return (usecnt);
2179 }
2180 
2181 
2182 /*
2183  * Vnode put/release.
2184  * If count drops to zero, call inactive routine and return to freelist.
2185  */
2186 void
2187 vrele(vp)
2188 	struct vnode *vp;
2189 {
2190 	struct thread *td = curthread;	/* XXX */
2191 
2192 	KASSERT(vp != NULL, ("vrele: null vp"));
2193 
2194 	VI_LOCK(vp);
2195 
2196 	/* Skip this v_writecount check if we're going to panic below. */
2197 	KASSERT(vp->v_writecount < vp->v_usecount || vp->v_usecount < 1,
2198 	    ("vrele: missed vn_close"));
2199 
2200 	if (vp->v_usecount > 1 || ((vp->v_iflag & VI_DOINGINACT) &&
2201 	    vp->v_usecount == 1)) {
2202 		v_incr_usecount(vp, -1);
2203 		VI_UNLOCK(vp);
2204 
2205 		return;
2206 	}
2207 
2208 	if (vp->v_usecount == 1) {
2209 		v_incr_usecount(vp, -1);
2210 		/*
2211 		 * We must call VOP_INACTIVE with the node locked. Mark
2212 		 * as VI_DOINGINACT to avoid recursion.
2213 		 */
2214 		if (vn_lock(vp, LK_EXCLUSIVE | LK_INTERLOCK, td) == 0) {
2215 			VI_LOCK(vp);
2216 			vp->v_iflag |= VI_DOINGINACT;
2217 			VI_UNLOCK(vp);
2218 			VOP_INACTIVE(vp, td);
2219 			VI_LOCK(vp);
2220 			KASSERT(vp->v_iflag & VI_DOINGINACT,
2221 			    ("vrele: lost VI_DOINGINACT"));
2222 			vp->v_iflag &= ~VI_DOINGINACT;
2223 		} else
2224 			VI_LOCK(vp);
2225 		if (VSHOULDFREE(vp))
2226 			vfree(vp);
2227 		else
2228 			vlruvp(vp);
2229 		VI_UNLOCK(vp);
2230 
2231 	} else {
2232 #ifdef DIAGNOSTIC
2233 		vprint("vrele: negative ref count", vp);
2234 #endif
2235 		VI_UNLOCK(vp);
2236 		panic("vrele: negative ref cnt");
2237 	}
2238 }
2239 
2240 /*
2241  * Release an already locked vnode.  This give the same effects as
2242  * unlock+vrele(), but takes less time and avoids releasing and
2243  * re-aquiring the lock (as vrele() aquires the lock internally.)
2244  */
2245 void
2246 vput(vp)
2247 	struct vnode *vp;
2248 {
2249 	struct thread *td = curthread;	/* XXX */
2250 
2251 	GIANT_REQUIRED;
2252 
2253 	KASSERT(vp != NULL, ("vput: null vp"));
2254 	VI_LOCK(vp);
2255 	/* Skip this v_writecount check if we're going to panic below. */
2256 	KASSERT(vp->v_writecount < vp->v_usecount || vp->v_usecount < 1,
2257 	    ("vput: missed vn_close"));
2258 
2259 	if (vp->v_usecount > 1 || ((vp->v_iflag & VI_DOINGINACT) &&
2260 	    vp->v_usecount == 1)) {
2261 		v_incr_usecount(vp, -1);
2262 		VOP_UNLOCK(vp, LK_INTERLOCK, td);
2263 		return;
2264 	}
2265 
2266 	if (vp->v_usecount == 1) {
2267 		v_incr_usecount(vp, -1);
2268 		/*
2269 		 * We must call VOP_INACTIVE with the node locked, so
2270 		 * we just need to release the vnode mutex. Mark as
2271 		 * as VI_DOINGINACT to avoid recursion.
2272 		 */
2273 		vp->v_iflag |= VI_DOINGINACT;
2274 		VI_UNLOCK(vp);
2275 		VOP_INACTIVE(vp, td);
2276 		VI_LOCK(vp);
2277 		KASSERT(vp->v_iflag & VI_DOINGINACT,
2278 		    ("vput: lost VI_DOINGINACT"));
2279 		vp->v_iflag &= ~VI_DOINGINACT;
2280 		if (VSHOULDFREE(vp))
2281 			vfree(vp);
2282 		else
2283 			vlruvp(vp);
2284 		VI_UNLOCK(vp);
2285 
2286 	} else {
2287 #ifdef DIAGNOSTIC
2288 		vprint("vput: negative ref count", vp);
2289 #endif
2290 		panic("vput: negative ref cnt");
2291 	}
2292 }
2293 
2294 /*
2295  * Somebody doesn't want the vnode recycled.
2296  */
2297 void
2298 vhold(struct vnode *vp)
2299 {
2300 	VI_LOCK(vp);
2301 	vholdl(vp);
2302 	VI_UNLOCK(vp);
2303 }
2304 
2305 void
2306 vholdl(vp)
2307 	register struct vnode *vp;
2308 {
2309 	int s;
2310 
2311 	s = splbio();
2312 	vp->v_holdcnt++;
2313 	if (VSHOULDBUSY(vp))
2314 		vbusy(vp);
2315 	splx(s);
2316 }
2317 
2318 /*
2319  * Note that there is one less who cares about this vnode.  vdrop() is the
2320  * opposite of vhold().
2321  */
2322 void
2323 vdrop(struct vnode *vp)
2324 {
2325 	VI_LOCK(vp);
2326 	vdropl(vp);
2327 	VI_UNLOCK(vp);
2328 }
2329 
2330 void
2331 vdropl(vp)
2332 	register struct vnode *vp;
2333 {
2334 	int s;
2335 
2336 	s = splbio();
2337 	if (vp->v_holdcnt <= 0)
2338 		panic("vdrop: holdcnt");
2339 	vp->v_holdcnt--;
2340 	if (VSHOULDFREE(vp))
2341 		vfree(vp);
2342 	else
2343 		vlruvp(vp);
2344 	splx(s);
2345 }
2346 
2347 /*
2348  * Remove any vnodes in the vnode table belonging to mount point mp.
2349  *
2350  * If FORCECLOSE is not specified, there should not be any active ones,
2351  * return error if any are found (nb: this is a user error, not a
2352  * system error). If FORCECLOSE is specified, detach any active vnodes
2353  * that are found.
2354  *
2355  * If WRITECLOSE is set, only flush out regular file vnodes open for
2356  * writing.
2357  *
2358  * SKIPSYSTEM causes any vnodes marked VV_SYSTEM to be skipped.
2359  *
2360  * `rootrefs' specifies the base reference count for the root vnode
2361  * of this filesystem. The root vnode is considered busy if its
2362  * v_usecount exceeds this value. On a successful return, vflush()
2363  * will call vrele() on the root vnode exactly rootrefs times.
2364  * If the SKIPSYSTEM or WRITECLOSE flags are specified, rootrefs must
2365  * be zero.
2366  */
2367 #ifdef DIAGNOSTIC
2368 static int busyprt = 0;		/* print out busy vnodes */
2369 SYSCTL_INT(_debug, OID_AUTO, busyprt, CTLFLAG_RW, &busyprt, 0, "");
2370 #endif
2371 
2372 int
2373 vflush(mp, rootrefs, flags)
2374 	struct mount *mp;
2375 	int rootrefs;
2376 	int flags;
2377 {
2378 	struct thread *td = curthread;	/* XXX */
2379 	struct vnode *vp, *nvp, *rootvp = NULL;
2380 	struct vattr vattr;
2381 	int busy = 0, error;
2382 
2383 	if (rootrefs > 0) {
2384 		KASSERT((flags & (SKIPSYSTEM | WRITECLOSE)) == 0,
2385 		    ("vflush: bad args"));
2386 		/*
2387 		 * Get the filesystem root vnode. We can vput() it
2388 		 * immediately, since with rootrefs > 0, it won't go away.
2389 		 */
2390 		if ((error = VFS_ROOT(mp, &rootvp)) != 0)
2391 			return (error);
2392 		vput(rootvp);
2393 
2394 	}
2395 	mtx_lock(&mntvnode_mtx);
2396 loop:
2397 	for (vp = TAILQ_FIRST(&mp->mnt_nvnodelist); vp; vp = nvp) {
2398 		/*
2399 		 * Make sure this vnode wasn't reclaimed in getnewvnode().
2400 		 * Start over if it has (it won't be on the list anymore).
2401 		 */
2402 		if (vp->v_mount != mp)
2403 			goto loop;
2404 		nvp = TAILQ_NEXT(vp, v_nmntvnodes);
2405 
2406 		VI_LOCK(vp);
2407 		mtx_unlock(&mntvnode_mtx);
2408 		vn_lock(vp, LK_INTERLOCK | LK_EXCLUSIVE | LK_RETRY, td);
2409 		/*
2410 		 * This vnode could have been reclaimed while we were
2411 		 * waiting for the lock since we are not holding a
2412 		 * reference.
2413 		 * Start over if the vnode was reclaimed.
2414 		 */
2415 		if (vp->v_mount != mp) {
2416 			VOP_UNLOCK(vp, 0, td);
2417 			mtx_lock(&mntvnode_mtx);
2418 			goto loop;
2419 		}
2420 		/*
2421 		 * Skip over a vnodes marked VV_SYSTEM.
2422 		 */
2423 		if ((flags & SKIPSYSTEM) && (vp->v_vflag & VV_SYSTEM)) {
2424 			VOP_UNLOCK(vp, 0, td);
2425 			mtx_lock(&mntvnode_mtx);
2426 			continue;
2427 		}
2428 		/*
2429 		 * If WRITECLOSE is set, flush out unlinked but still open
2430 		 * files (even if open only for reading) and regular file
2431 		 * vnodes open for writing.
2432 		 */
2433 		if (flags & WRITECLOSE) {
2434 			error = VOP_GETATTR(vp, &vattr, td->td_ucred, td);
2435 			VI_LOCK(vp);
2436 
2437 			if ((vp->v_type == VNON ||
2438 			    (error == 0 && vattr.va_nlink > 0)) &&
2439 			    (vp->v_writecount == 0 || vp->v_type != VREG)) {
2440 				VOP_UNLOCK(vp, LK_INTERLOCK, td);
2441 				mtx_lock(&mntvnode_mtx);
2442 				continue;
2443 			}
2444 		} else
2445 			VI_LOCK(vp);
2446 
2447 		VOP_UNLOCK(vp, 0, td);
2448 
2449 		/*
2450 		 * With v_usecount == 0, all we need to do is clear out the
2451 		 * vnode data structures and we are done.
2452 		 */
2453 		if (vp->v_usecount == 0) {
2454 			vgonel(vp, td);
2455 			mtx_lock(&mntvnode_mtx);
2456 			continue;
2457 		}
2458 
2459 		/*
2460 		 * If FORCECLOSE is set, forcibly close the vnode. For block
2461 		 * or character devices, revert to an anonymous device. For
2462 		 * all other files, just kill them.
2463 		 */
2464 		if (flags & FORCECLOSE) {
2465 			if (vp->v_type != VCHR) {
2466 				vgonel(vp, td);
2467 			} else {
2468 				vclean(vp, 0, td);
2469 				VI_UNLOCK(vp);
2470 				vp->v_op = spec_vnodeop_p;
2471 				insmntque(vp, (struct mount *) 0);
2472 			}
2473 			mtx_lock(&mntvnode_mtx);
2474 			continue;
2475 		}
2476 #ifdef DIAGNOSTIC
2477 		if (busyprt)
2478 			vprint("vflush: busy vnode", vp);
2479 #endif
2480 		VI_UNLOCK(vp);
2481 		mtx_lock(&mntvnode_mtx);
2482 		busy++;
2483 	}
2484 	mtx_unlock(&mntvnode_mtx);
2485 	if (rootrefs > 0 && (flags & FORCECLOSE) == 0) {
2486 		/*
2487 		 * If just the root vnode is busy, and if its refcount
2488 		 * is equal to `rootrefs', then go ahead and kill it.
2489 		 */
2490 		VI_LOCK(rootvp);
2491 		KASSERT(busy > 0, ("vflush: not busy"));
2492 		KASSERT(rootvp->v_usecount >= rootrefs, ("vflush: rootrefs"));
2493 		if (busy == 1 && rootvp->v_usecount == rootrefs) {
2494 			vgonel(rootvp, td);
2495 			busy = 0;
2496 		} else
2497 			VI_UNLOCK(rootvp);
2498 	}
2499 	if (busy)
2500 		return (EBUSY);
2501 	for (; rootrefs > 0; rootrefs--)
2502 		vrele(rootvp);
2503 	return (0);
2504 }
2505 
2506 /*
2507  * This moves a now (likely recyclable) vnode to the end of the
2508  * mountlist.  XXX However, it is temporarily disabled until we
2509  * can clean up ffs_sync() and friends, which have loop restart
2510  * conditions which this code causes to operate O(N^2).
2511  */
2512 static void
2513 vlruvp(struct vnode *vp)
2514 {
2515 #if 0
2516 	struct mount *mp;
2517 
2518 	if ((mp = vp->v_mount) != NULL) {
2519 		mtx_lock(&mntvnode_mtx);
2520 		TAILQ_REMOVE(&mp->mnt_nvnodelist, vp, v_nmntvnodes);
2521 		TAILQ_INSERT_TAIL(&mp->mnt_nvnodelist, vp, v_nmntvnodes);
2522 		mtx_unlock(&mntvnode_mtx);
2523 	}
2524 #endif
2525 }
2526 
2527 /*
2528  * Disassociate the underlying filesystem from a vnode.
2529  */
2530 static void
2531 vclean(vp, flags, td)
2532 	struct vnode *vp;
2533 	int flags;
2534 	struct thread *td;
2535 {
2536 	int active;
2537 
2538 	ASSERT_VI_LOCKED(vp, "vclean");
2539 	/*
2540 	 * Check to see if the vnode is in use. If so we have to reference it
2541 	 * before we clean it out so that its count cannot fall to zero and
2542 	 * generate a race against ourselves to recycle it.
2543 	 */
2544 	if ((active = vp->v_usecount))
2545 		v_incr_usecount(vp, 1);
2546 
2547 	/*
2548 	 * Prevent the vnode from being recycled or brought into use while we
2549 	 * clean it out.
2550 	 */
2551 	if (vp->v_iflag & VI_XLOCK)
2552 		panic("vclean: deadlock");
2553 	vp->v_iflag |= VI_XLOCK;
2554 	vp->v_vxproc = curthread;
2555 	/*
2556 	 * Even if the count is zero, the VOP_INACTIVE routine may still
2557 	 * have the object locked while it cleans it out. The VOP_LOCK
2558 	 * ensures that the VOP_INACTIVE routine is done with its work.
2559 	 * For active vnodes, it ensures that no other activity can
2560 	 * occur while the underlying object is being cleaned out.
2561 	 */
2562 	VOP_LOCK(vp, LK_DRAIN | LK_INTERLOCK, td);
2563 
2564 	/*
2565 	 * Clean out any buffers associated with the vnode.
2566 	 * If the flush fails, just toss the buffers.
2567 	 */
2568 	if (flags & DOCLOSE) {
2569 		struct buf *bp;
2570 		VI_LOCK(vp);
2571 		bp = TAILQ_FIRST(&vp->v_dirtyblkhd);
2572 		VI_UNLOCK(vp);
2573 		if (bp != NULL)
2574 			(void) vn_write_suspend_wait(vp, NULL, V_WAIT);
2575 		if (vinvalbuf(vp, V_SAVE, NOCRED, td, 0, 0) != 0)
2576 			vinvalbuf(vp, 0, NOCRED, td, 0, 0);
2577 	}
2578 
2579 	VOP_DESTROYVOBJECT(vp);
2580 
2581 	/*
2582 	 * Any other processes trying to obtain this lock must first
2583 	 * wait for VXLOCK to clear, then call the new lock operation.
2584 	 */
2585 	VOP_UNLOCK(vp, 0, td);
2586 
2587 	/*
2588 	 * If purging an active vnode, it must be closed and
2589 	 * deactivated before being reclaimed. Note that the
2590 	 * VOP_INACTIVE will unlock the vnode.
2591 	 */
2592 	if (active) {
2593 		if (flags & DOCLOSE)
2594 			VOP_CLOSE(vp, FNONBLOCK, NOCRED, td);
2595 		VI_LOCK(vp);
2596 		if ((vp->v_iflag & VI_DOINGINACT) == 0) {
2597 			vp->v_iflag |= VI_DOINGINACT;
2598 			VI_UNLOCK(vp);
2599 			if (vn_lock(vp, LK_EXCLUSIVE | LK_NOWAIT, td) != 0)
2600 				panic("vclean: cannot relock.");
2601 			VOP_INACTIVE(vp, td);
2602 			VI_LOCK(vp);
2603 			KASSERT(vp->v_iflag & VI_DOINGINACT,
2604 			    ("vclean: lost VI_DOINGINACT"));
2605 			vp->v_iflag &= ~VI_DOINGINACT;
2606 		}
2607 		VI_UNLOCK(vp);
2608 	}
2609 
2610 	/*
2611 	 * Reclaim the vnode.
2612 	 */
2613 	if (VOP_RECLAIM(vp, td))
2614 		panic("vclean: cannot reclaim");
2615 
2616 	if (active) {
2617 		/*
2618 		 * Inline copy of vrele() since VOP_INACTIVE
2619 		 * has already been called.
2620 		 */
2621 		VI_LOCK(vp);
2622 		v_incr_usecount(vp, -1);
2623 		if (vp->v_usecount <= 0) {
2624 #ifdef DIAGNOSTIC
2625 			if (vp->v_usecount < 0 || vp->v_writecount != 0) {
2626 				vprint("vclean: bad ref count", vp);
2627 				panic("vclean: ref cnt");
2628 			}
2629 #endif
2630 			vfree(vp);
2631 		}
2632 		VI_UNLOCK(vp);
2633 	}
2634 
2635 	cache_purge(vp);
2636 	VI_LOCK(vp);
2637 	if (VSHOULDFREE(vp))
2638 		vfree(vp);
2639 
2640 	/*
2641 	 * Done with purge, reset to the standard lock and
2642 	 * notify sleepers of the grim news.
2643 	 */
2644 	vp->v_vnlock = &vp->v_lock;
2645 	vp->v_op = dead_vnodeop_p;
2646 	if (vp->v_pollinfo != NULL)
2647 		vn_pollgone(vp);
2648 	vp->v_tag = "none";
2649 	vp->v_iflag &= ~VI_XLOCK;
2650 	vp->v_vxproc = NULL;
2651 	if (vp->v_iflag & VI_XWANT) {
2652 		vp->v_iflag &= ~VI_XWANT;
2653 		wakeup(vp);
2654 	}
2655 }
2656 
2657 /*
2658  * Eliminate all activity associated with the requested vnode
2659  * and with all vnodes aliased to the requested vnode.
2660  */
2661 int
2662 vop_revoke(ap)
2663 	struct vop_revoke_args /* {
2664 		struct vnode *a_vp;
2665 		int a_flags;
2666 	} */ *ap;
2667 {
2668 	struct vnode *vp, *vq;
2669 	dev_t dev;
2670 
2671 	KASSERT((ap->a_flags & REVOKEALL) != 0, ("vop_revoke"));
2672 	vp = ap->a_vp;
2673 	KASSERT((vp->v_type == VCHR), ("vop_revoke: not VCHR"));
2674 
2675 	VI_LOCK(vp);
2676 	/*
2677 	 * If a vgone (or vclean) is already in progress,
2678 	 * wait until it is done and return.
2679 	 */
2680 	if (vp->v_iflag & VI_XLOCK) {
2681 		vp->v_iflag |= VI_XWANT;
2682 		msleep(vp, VI_MTX(vp), PINOD | PDROP,
2683 		    "vop_revokeall", 0);
2684 		return (0);
2685 	}
2686 	VI_UNLOCK(vp);
2687 	dev = vp->v_rdev;
2688 	for (;;) {
2689 		mtx_lock(&spechash_mtx);
2690 		vq = SLIST_FIRST(&dev->si_hlist);
2691 		mtx_unlock(&spechash_mtx);
2692 		if (!vq)
2693 			break;
2694 		vgone(vq);
2695 	}
2696 	return (0);
2697 }
2698 
2699 /*
2700  * Recycle an unused vnode to the front of the free list.
2701  * Release the passed interlock if the vnode will be recycled.
2702  */
2703 int
2704 vrecycle(vp, inter_lkp, td)
2705 	struct vnode *vp;
2706 	struct mtx *inter_lkp;
2707 	struct thread *td;
2708 {
2709 
2710 	VI_LOCK(vp);
2711 	if (vp->v_usecount == 0) {
2712 		if (inter_lkp) {
2713 			mtx_unlock(inter_lkp);
2714 		}
2715 		vgonel(vp, td);
2716 		return (1);
2717 	}
2718 	VI_UNLOCK(vp);
2719 	return (0);
2720 }
2721 
2722 /*
2723  * Eliminate all activity associated with a vnode
2724  * in preparation for reuse.
2725  */
2726 void
2727 vgone(vp)
2728 	register struct vnode *vp;
2729 {
2730 	struct thread *td = curthread;	/* XXX */
2731 
2732 	VI_LOCK(vp);
2733 	vgonel(vp, td);
2734 }
2735 
2736 /*
2737  * vgone, with the vp interlock held.
2738  */
2739 void
2740 vgonel(vp, td)
2741 	struct vnode *vp;
2742 	struct thread *td;
2743 {
2744 	int s;
2745 
2746 	/*
2747 	 * If a vgone (or vclean) is already in progress,
2748 	 * wait until it is done and return.
2749 	 */
2750 	ASSERT_VI_LOCKED(vp, "vgonel");
2751 	if (vp->v_iflag & VI_XLOCK) {
2752 		vp->v_iflag |= VI_XWANT;
2753 		msleep(vp, VI_MTX(vp), PINOD | PDROP, "vgone", 0);
2754 		return;
2755 	}
2756 
2757 	/*
2758 	 * Clean out the filesystem specific data.
2759 	 */
2760 	vclean(vp, DOCLOSE, td);
2761 	VI_UNLOCK(vp);
2762 
2763 	/*
2764 	 * Delete from old mount point vnode list, if on one.
2765 	 */
2766 	if (vp->v_mount != NULL)
2767 		insmntque(vp, (struct mount *)0);
2768 	/*
2769 	 * If special device, remove it from special device alias list
2770 	 * if it is on one.
2771 	 */
2772 	if (vp->v_type == VCHR && vp->v_rdev != NULL && vp->v_rdev != NODEV) {
2773 		VI_LOCK(vp);
2774 		mtx_lock(&spechash_mtx);
2775 		SLIST_REMOVE(&vp->v_rdev->si_hlist, vp, vnode, v_specnext);
2776 		vp->v_rdev->si_usecount -= vp->v_usecount;
2777 		mtx_unlock(&spechash_mtx);
2778 		VI_UNLOCK(vp);
2779 		vp->v_rdev = NULL;
2780 	}
2781 
2782 	/*
2783 	 * If it is on the freelist and not already at the head,
2784 	 * move it to the head of the list. The test of the
2785 	 * VDOOMED flag and the reference count of zero is because
2786 	 * it will be removed from the free list by getnewvnode,
2787 	 * but will not have its reference count incremented until
2788 	 * after calling vgone. If the reference count were
2789 	 * incremented first, vgone would (incorrectly) try to
2790 	 * close the previous instance of the underlying object.
2791 	 */
2792 	VI_LOCK(vp);
2793 	if (vp->v_usecount == 0 && !(vp->v_iflag & VI_DOOMED)) {
2794 		s = splbio();
2795 		mtx_lock(&vnode_free_list_mtx);
2796 		if (vp->v_iflag & VI_FREE) {
2797 			TAILQ_REMOVE(&vnode_free_list, vp, v_freelist);
2798 		} else {
2799 			vp->v_iflag |= VI_FREE;
2800 			freevnodes++;
2801 		}
2802 		TAILQ_INSERT_HEAD(&vnode_free_list, vp, v_freelist);
2803 		mtx_unlock(&vnode_free_list_mtx);
2804 		splx(s);
2805 	}
2806 
2807 	vp->v_type = VBAD;
2808 	VI_UNLOCK(vp);
2809 }
2810 
2811 /*
2812  * Lookup a vnode by device number.
2813  */
2814 int
2815 vfinddev(dev, type, vpp)
2816 	dev_t dev;
2817 	enum vtype type;
2818 	struct vnode **vpp;
2819 {
2820 	struct vnode *vp;
2821 
2822 	mtx_lock(&spechash_mtx);
2823 	SLIST_FOREACH(vp, &dev->si_hlist, v_specnext) {
2824 		if (type == vp->v_type) {
2825 			*vpp = vp;
2826 			mtx_unlock(&spechash_mtx);
2827 			return (1);
2828 		}
2829 	}
2830 	mtx_unlock(&spechash_mtx);
2831 	return (0);
2832 }
2833 
2834 /*
2835  * Calculate the total number of references to a special device.
2836  */
2837 int
2838 vcount(vp)
2839 	struct vnode *vp;
2840 {
2841 	int count;
2842 
2843 	mtx_lock(&spechash_mtx);
2844 	count = vp->v_rdev->si_usecount;
2845 	mtx_unlock(&spechash_mtx);
2846 	return (count);
2847 }
2848 
2849 /*
2850  * Same as above, but using the dev_t as argument
2851  */
2852 int
2853 count_dev(dev)
2854 	dev_t dev;
2855 {
2856 	struct vnode *vp;
2857 
2858 	vp = SLIST_FIRST(&dev->si_hlist);
2859 	if (vp == NULL)
2860 		return (0);
2861 	return(vcount(vp));
2862 }
2863 
2864 /*
2865  * Print out a description of a vnode.
2866  */
2867 static char *typename[] =
2868 {"VNON", "VREG", "VDIR", "VBLK", "VCHR", "VLNK", "VSOCK", "VFIFO", "VBAD"};
2869 
2870 void
2871 vprint(label, vp)
2872 	char *label;
2873 	struct vnode *vp;
2874 {
2875 	char buf[96];
2876 
2877 	if (label != NULL)
2878 		printf("%s: %p: ", label, (void *)vp);
2879 	else
2880 		printf("%p: ", (void *)vp);
2881 	printf("tag %s, type %s, usecount %d, writecount %d, refcount %d,",
2882 	    vp->v_tag, typename[vp->v_type], vp->v_usecount,
2883 	    vp->v_writecount, vp->v_holdcnt);
2884 	buf[0] = '\0';
2885 	if (vp->v_vflag & VV_ROOT)
2886 		strcat(buf, "|VV_ROOT");
2887 	if (vp->v_vflag & VV_TEXT)
2888 		strcat(buf, "|VV_TEXT");
2889 	if (vp->v_vflag & VV_SYSTEM)
2890 		strcat(buf, "|VV_SYSTEM");
2891 	if (vp->v_iflag & VI_XLOCK)
2892 		strcat(buf, "|VI_XLOCK");
2893 	if (vp->v_iflag & VI_XWANT)
2894 		strcat(buf, "|VI_XWANT");
2895 	if (vp->v_iflag & VI_BWAIT)
2896 		strcat(buf, "|VI_BWAIT");
2897 	if (vp->v_iflag & VI_DOOMED)
2898 		strcat(buf, "|VI_DOOMED");
2899 	if (vp->v_iflag & VI_FREE)
2900 		strcat(buf, "|VI_FREE");
2901 	if (vp->v_vflag & VV_OBJBUF)
2902 		strcat(buf, "|VV_OBJBUF");
2903 	if (buf[0] != '\0')
2904 		printf(" flags (%s),", &buf[1]);
2905 	lockmgr_printinfo(vp->v_vnlock);
2906 	printf("\n");
2907 	if (vp->v_data != NULL)
2908 		VOP_PRINT(vp);
2909 }
2910 
2911 #ifdef DDB
2912 #include <ddb/ddb.h>
2913 /*
2914  * List all of the locked vnodes in the system.
2915  * Called when debugging the kernel.
2916  */
2917 DB_SHOW_COMMAND(lockedvnods, lockedvnodes)
2918 {
2919 	struct mount *mp, *nmp;
2920 	struct vnode *vp;
2921 
2922 	/*
2923 	 * Note: because this is DDB, we can't obey the locking semantics
2924 	 * for these structures, which means we could catch an inconsistent
2925 	 * state and dereference a nasty pointer.  Not much to be done
2926 	 * about that.
2927 	 */
2928 	printf("Locked vnodes\n");
2929 	for (mp = TAILQ_FIRST(&mountlist); mp != NULL; mp = nmp) {
2930 		nmp = TAILQ_NEXT(mp, mnt_list);
2931 		TAILQ_FOREACH(vp, &mp->mnt_nvnodelist, v_nmntvnodes) {
2932 			if (VOP_ISLOCKED(vp, NULL))
2933 				vprint(NULL, vp);
2934 		}
2935 		nmp = TAILQ_NEXT(mp, mnt_list);
2936 	}
2937 }
2938 #endif
2939 
2940 /*
2941  * Fill in a struct xvfsconf based on a struct vfsconf.
2942  */
2943 static void
2944 vfsconf2x(struct vfsconf *vfsp, struct xvfsconf *xvfsp)
2945 {
2946 
2947 	strcpy(xvfsp->vfc_name, vfsp->vfc_name);
2948 	xvfsp->vfc_typenum = vfsp->vfc_typenum;
2949 	xvfsp->vfc_refcount = vfsp->vfc_refcount;
2950 	xvfsp->vfc_flags = vfsp->vfc_flags;
2951 	/*
2952 	 * These are unused in userland, we keep them
2953 	 * to not break binary compatibility.
2954 	 */
2955 	xvfsp->vfc_vfsops = NULL;
2956 	xvfsp->vfc_next = NULL;
2957 }
2958 
2959 static int
2960 sysctl_vfs_conflist(SYSCTL_HANDLER_ARGS)
2961 {
2962 	struct vfsconf *vfsp;
2963 	struct xvfsconf *xvfsp;
2964 	int cnt, error, i;
2965 
2966 	cnt = 0;
2967 	for (vfsp = vfsconf; vfsp != NULL; vfsp = vfsp->vfc_next)
2968 		cnt++;
2969 	xvfsp = malloc(sizeof(struct xvfsconf) * cnt, M_TEMP, M_WAITOK);
2970 	/*
2971 	 * Handle the race that we will have here when struct vfsconf
2972 	 * will be locked down by using both cnt and checking vfc_next
2973 	 * against NULL to determine the end of the loop.  The race will
2974 	 * happen because we will have to unlock before calling malloc().
2975 	 * We are protected by Giant for now.
2976 	 */
2977 	i = 0;
2978 	for (vfsp = vfsconf; vfsp != NULL && i < cnt; vfsp = vfsp->vfc_next) {
2979 		vfsconf2x(vfsp, xvfsp + i);
2980 		i++;
2981 	}
2982 	error = SYSCTL_OUT(req, xvfsp, sizeof(struct xvfsconf) * i);
2983 	free(xvfsp, M_TEMP);
2984 	return (error);
2985 }
2986 
2987 SYSCTL_PROC(_vfs, OID_AUTO, conflist, CTLFLAG_RD, NULL, 0, sysctl_vfs_conflist,
2988     "S,xvfsconf", "List of all configured filesystems");
2989 
2990 /*
2991  * Top level filesystem related information gathering.
2992  */
2993 static int	sysctl_ovfs_conf(SYSCTL_HANDLER_ARGS);
2994 
2995 static int
2996 vfs_sysctl(SYSCTL_HANDLER_ARGS)
2997 {
2998 	int *name = (int *)arg1 - 1;	/* XXX */
2999 	u_int namelen = arg2 + 1;	/* XXX */
3000 	struct vfsconf *vfsp;
3001 	struct xvfsconf xvfsp;
3002 
3003 	printf("WARNING: userland calling deprecated sysctl, "
3004 	    "please rebuild world\n");
3005 
3006 #if 1 || defined(COMPAT_PRELITE2)
3007 	/* Resolve ambiguity between VFS_VFSCONF and VFS_GENERIC. */
3008 	if (namelen == 1)
3009 		return (sysctl_ovfs_conf(oidp, arg1, arg2, req));
3010 #endif
3011 
3012 	switch (name[1]) {
3013 	case VFS_MAXTYPENUM:
3014 		if (namelen != 2)
3015 			return (ENOTDIR);
3016 		return (SYSCTL_OUT(req, &maxvfsconf, sizeof(int)));
3017 	case VFS_CONF:
3018 		if (namelen != 3)
3019 			return (ENOTDIR);	/* overloaded */
3020 		for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next)
3021 			if (vfsp->vfc_typenum == name[2])
3022 				break;
3023 		if (vfsp == NULL)
3024 			return (EOPNOTSUPP);
3025 		vfsconf2x(vfsp, &xvfsp);
3026 		return (SYSCTL_OUT(req, &xvfsp, sizeof(xvfsp)));
3027 	}
3028 	return (EOPNOTSUPP);
3029 }
3030 
3031 SYSCTL_NODE(_vfs, VFS_GENERIC, generic, CTLFLAG_RD | CTLFLAG_SKIP, vfs_sysctl,
3032 	"Generic filesystem");
3033 
3034 #if 1 || defined(COMPAT_PRELITE2)
3035 
3036 static int
3037 sysctl_ovfs_conf(SYSCTL_HANDLER_ARGS)
3038 {
3039 	int error;
3040 	struct vfsconf *vfsp;
3041 	struct ovfsconf ovfs;
3042 
3043 	for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next) {
3044 		ovfs.vfc_vfsops = vfsp->vfc_vfsops;	/* XXX used as flag */
3045 		strcpy(ovfs.vfc_name, vfsp->vfc_name);
3046 		ovfs.vfc_index = vfsp->vfc_typenum;
3047 		ovfs.vfc_refcount = vfsp->vfc_refcount;
3048 		ovfs.vfc_flags = vfsp->vfc_flags;
3049 		error = SYSCTL_OUT(req, &ovfs, sizeof ovfs);
3050 		if (error)
3051 			return error;
3052 	}
3053 	return 0;
3054 }
3055 
3056 #endif /* 1 || COMPAT_PRELITE2 */
3057 
3058 #define KINFO_VNODESLOP		10
3059 #ifdef notyet
3060 /*
3061  * Dump vnode list (via sysctl).
3062  */
3063 /* ARGSUSED */
3064 static int
3065 sysctl_vnode(SYSCTL_HANDLER_ARGS)
3066 {
3067 	struct xvnode *xvn;
3068 	struct thread *td = req->td;
3069 	struct mount *mp;
3070 	struct vnode *vp;
3071 	int error, len, n;
3072 
3073 	/*
3074 	 * Stale numvnodes access is not fatal here.
3075 	 */
3076 	req->lock = 0;
3077 	len = (numvnodes + KINFO_VNODESLOP) * sizeof *xvn;
3078 	if (!req->oldptr)
3079 		/* Make an estimate */
3080 		return (SYSCTL_OUT(req, 0, len));
3081 
3082 	sysctl_wire_old_buffer(req, 0);
3083 	xvn = malloc(len, M_TEMP, M_ZERO | M_WAITOK);
3084 	n = 0;
3085 	mtx_lock(&mountlist_mtx);
3086 	TAILQ_FOREACH(mp, &mountlist, mnt_list) {
3087 		if (vfs_busy(mp, LK_NOWAIT, &mountlist_mtx, td))
3088 			continue;
3089 		mtx_lock(&mntvnode_mtx);
3090 		TAILQ_FOREACH(vp, &mp->mnt_nvnodelist, v_nmntvnodes) {
3091 			if (n == len)
3092 				break;
3093 			vref(vp);
3094 			xvn[n].xv_size = sizeof *xvn;
3095 			xvn[n].xv_vnode = vp;
3096 #define XV_COPY(field) xvn[n].xv_##field = vp->v_##field
3097 			XV_COPY(usecount);
3098 			XV_COPY(writecount);
3099 			XV_COPY(holdcnt);
3100 			XV_COPY(id);
3101 			XV_COPY(mount);
3102 			XV_COPY(numoutput);
3103 			XV_COPY(type);
3104 #undef XV_COPY
3105 			xvn[n].xv_flag = vp->v_vflag;
3106 
3107 			switch (vp->v_type) {
3108 			case VREG:
3109 			case VDIR:
3110 			case VLNK:
3111 				xvn[n].xv_dev = vp->v_cachedfs;
3112 				xvn[n].xv_ino = vp->v_cachedid;
3113 				break;
3114 			case VBLK:
3115 			case VCHR:
3116 				if (vp->v_rdev == NULL) {
3117 					vrele(vp);
3118 					continue;
3119 				}
3120 				xvn[n].xv_dev = dev2udev(vp->v_rdev);
3121 				break;
3122 			case VSOCK:
3123 				xvn[n].xv_socket = vp->v_socket;
3124 				break;
3125 			case VFIFO:
3126 				xvn[n].xv_fifo = vp->v_fifoinfo;
3127 				break;
3128 			case VNON:
3129 			case VBAD:
3130 			default:
3131 				/* shouldn't happen? */
3132 				vrele(vp);
3133 				continue;
3134 			}
3135 			vrele(vp);
3136 			++n;
3137 		}
3138 		mtx_unlock(&mntvnode_mtx);
3139 		mtx_lock(&mountlist_mtx);
3140 		vfs_unbusy(mp, td);
3141 		if (n == len)
3142 			break;
3143 	}
3144 	mtx_unlock(&mountlist_mtx);
3145 
3146 	error = SYSCTL_OUT(req, xvn, n * sizeof *xvn);
3147 	free(xvn, M_TEMP);
3148 	return (error);
3149 }
3150 
3151 SYSCTL_PROC(_kern, KERN_VNODE, vnode, CTLTYPE_OPAQUE|CTLFLAG_RD,
3152 	0, 0, sysctl_vnode, "S,xvnode", "");
3153 #endif
3154 
3155 /*
3156  * Check to see if a filesystem is mounted on a block device.
3157  */
3158 int
3159 vfs_mountedon(vp)
3160 	struct vnode *vp;
3161 {
3162 
3163 	if (vp->v_rdev->si_mountpoint != NULL)
3164 		return (EBUSY);
3165 	return (0);
3166 }
3167 
3168 /*
3169  * Unmount all filesystems. The list is traversed in reverse order
3170  * of mounting to avoid dependencies.
3171  */
3172 void
3173 vfs_unmountall()
3174 {
3175 	struct mount *mp;
3176 	struct thread *td;
3177 	int error;
3178 
3179 	if (curthread != NULL)
3180 		td = curthread;
3181 	else
3182 		td = FIRST_THREAD_IN_PROC(initproc); /* XXX XXX proc0? */
3183 	/*
3184 	 * Since this only runs when rebooting, it is not interlocked.
3185 	 */
3186 	while(!TAILQ_EMPTY(&mountlist)) {
3187 		mp = TAILQ_LAST(&mountlist, mntlist);
3188 		error = dounmount(mp, MNT_FORCE, td);
3189 		if (error) {
3190 			TAILQ_REMOVE(&mountlist, mp, mnt_list);
3191 			printf("unmount of %s failed (",
3192 			    mp->mnt_stat.f_mntonname);
3193 			if (error == EBUSY)
3194 				printf("BUSY)\n");
3195 			else
3196 				printf("%d)\n", error);
3197 		} else {
3198 			/* The unmount has removed mp from the mountlist */
3199 		}
3200 	}
3201 }
3202 
3203 /*
3204  * perform msync on all vnodes under a mount point
3205  * the mount point must be locked.
3206  */
3207 void
3208 vfs_msync(struct mount *mp, int flags)
3209 {
3210 	struct vnode *vp, *nvp;
3211 	struct vm_object *obj;
3212 	int tries;
3213 
3214 	GIANT_REQUIRED;
3215 
3216 	tries = 5;
3217 	mtx_lock(&mntvnode_mtx);
3218 loop:
3219 	for (vp = TAILQ_FIRST(&mp->mnt_nvnodelist); vp != NULL; vp = nvp) {
3220 		if (vp->v_mount != mp) {
3221 			if (--tries > 0)
3222 				goto loop;
3223 			break;
3224 		}
3225 		nvp = TAILQ_NEXT(vp, v_nmntvnodes);
3226 
3227 		VI_LOCK(vp);
3228 		if (vp->v_iflag & VI_XLOCK) {	/* XXX: what if MNT_WAIT? */
3229 			VI_UNLOCK(vp);
3230 			continue;
3231 		}
3232 
3233 		if ((vp->v_iflag & VI_OBJDIRTY) &&
3234 		    (flags == MNT_WAIT || VOP_ISLOCKED(vp, NULL) == 0)) {
3235 			mtx_unlock(&mntvnode_mtx);
3236 			if (!vget(vp,
3237 			    LK_EXCLUSIVE | LK_RETRY | LK_INTERLOCK,
3238 			    curthread)) {
3239 				if (vp->v_vflag & VV_NOSYNC) {	/* unlinked */
3240 					vput(vp);
3241 					mtx_lock(&mntvnode_mtx);
3242 					continue;
3243 				}
3244 
3245 				if (VOP_GETVOBJECT(vp, &obj) == 0) {
3246 					VM_OBJECT_LOCK(obj);
3247 					vm_object_page_clean(obj, 0, 0,
3248 					    flags == MNT_WAIT ?
3249 					    OBJPC_SYNC : OBJPC_NOSYNC);
3250 					VM_OBJECT_UNLOCK(obj);
3251 				}
3252 				vput(vp);
3253 			}
3254 			mtx_lock(&mntvnode_mtx);
3255 			if (TAILQ_NEXT(vp, v_nmntvnodes) != nvp) {
3256 				if (--tries > 0)
3257 					goto loop;
3258 				break;
3259 			}
3260 		} else
3261 			VI_UNLOCK(vp);
3262 	}
3263 	mtx_unlock(&mntvnode_mtx);
3264 }
3265 
3266 /*
3267  * Create the VM object needed for VMIO and mmap support.  This
3268  * is done for all VREG files in the system.  Some filesystems might
3269  * afford the additional metadata buffering capability of the
3270  * VMIO code by making the device node be VMIO mode also.
3271  *
3272  * vp must be locked when vfs_object_create is called.
3273  */
3274 int
3275 vfs_object_create(vp, td, cred)
3276 	struct vnode *vp;
3277 	struct thread *td;
3278 	struct ucred *cred;
3279 {
3280 	GIANT_REQUIRED;
3281 	return (VOP_CREATEVOBJECT(vp, cred, td));
3282 }
3283 
3284 /*
3285  * Mark a vnode as free, putting it up for recycling.
3286  */
3287 void
3288 vfree(vp)
3289 	struct vnode *vp;
3290 {
3291 	int s;
3292 
3293 	ASSERT_VI_LOCKED(vp, "vfree");
3294 	s = splbio();
3295 	mtx_lock(&vnode_free_list_mtx);
3296 	KASSERT((vp->v_iflag & VI_FREE) == 0, ("vnode already free"));
3297 	if (vp->v_iflag & VI_AGE) {
3298 		TAILQ_INSERT_HEAD(&vnode_free_list, vp, v_freelist);
3299 	} else {
3300 		TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist);
3301 	}
3302 	freevnodes++;
3303 	mtx_unlock(&vnode_free_list_mtx);
3304 	vp->v_iflag &= ~VI_AGE;
3305 	vp->v_iflag |= VI_FREE;
3306 	splx(s);
3307 }
3308 
3309 /*
3310  * Opposite of vfree() - mark a vnode as in use.
3311  */
3312 void
3313 vbusy(vp)
3314 	struct vnode *vp;
3315 {
3316 	int s;
3317 
3318 	s = splbio();
3319 	ASSERT_VI_LOCKED(vp, "vbusy");
3320 	KASSERT((vp->v_iflag & VI_FREE) != 0, ("vnode not free"));
3321 
3322 	mtx_lock(&vnode_free_list_mtx);
3323 	TAILQ_REMOVE(&vnode_free_list, vp, v_freelist);
3324 	freevnodes--;
3325 	mtx_unlock(&vnode_free_list_mtx);
3326 
3327 	vp->v_iflag &= ~(VI_FREE|VI_AGE);
3328 	splx(s);
3329 }
3330 
3331 /*
3332  * Record a process's interest in events which might happen to
3333  * a vnode.  Because poll uses the historic select-style interface
3334  * internally, this routine serves as both the ``check for any
3335  * pending events'' and the ``record my interest in future events''
3336  * functions.  (These are done together, while the lock is held,
3337  * to avoid race conditions.)
3338  */
3339 int
3340 vn_pollrecord(vp, td, events)
3341 	struct vnode *vp;
3342 	struct thread *td;
3343 	short events;
3344 {
3345 
3346 	if (vp->v_pollinfo == NULL)
3347 		v_addpollinfo(vp);
3348 	mtx_lock(&vp->v_pollinfo->vpi_lock);
3349 	if (vp->v_pollinfo->vpi_revents & events) {
3350 		/*
3351 		 * This leaves events we are not interested
3352 		 * in available for the other process which
3353 		 * which presumably had requested them
3354 		 * (otherwise they would never have been
3355 		 * recorded).
3356 		 */
3357 		events &= vp->v_pollinfo->vpi_revents;
3358 		vp->v_pollinfo->vpi_revents &= ~events;
3359 
3360 		mtx_unlock(&vp->v_pollinfo->vpi_lock);
3361 		return events;
3362 	}
3363 	vp->v_pollinfo->vpi_events |= events;
3364 	selrecord(td, &vp->v_pollinfo->vpi_selinfo);
3365 	mtx_unlock(&vp->v_pollinfo->vpi_lock);
3366 	return 0;
3367 }
3368 
3369 /*
3370  * Note the occurrence of an event.  If the VN_POLLEVENT macro is used,
3371  * it is possible for us to miss an event due to race conditions, but
3372  * that condition is expected to be rare, so for the moment it is the
3373  * preferred interface.
3374  */
3375 void
3376 vn_pollevent(vp, events)
3377 	struct vnode *vp;
3378 	short events;
3379 {
3380 
3381 	if (vp->v_pollinfo == NULL)
3382 		v_addpollinfo(vp);
3383 	mtx_lock(&vp->v_pollinfo->vpi_lock);
3384 	if (vp->v_pollinfo->vpi_events & events) {
3385 		/*
3386 		 * We clear vpi_events so that we don't
3387 		 * call selwakeup() twice if two events are
3388 		 * posted before the polling process(es) is
3389 		 * awakened.  This also ensures that we take at
3390 		 * most one selwakeup() if the polling process
3391 		 * is no longer interested.  However, it does
3392 		 * mean that only one event can be noticed at
3393 		 * a time.  (Perhaps we should only clear those
3394 		 * event bits which we note?) XXX
3395 		 */
3396 		vp->v_pollinfo->vpi_events = 0;	/* &= ~events ??? */
3397 		vp->v_pollinfo->vpi_revents |= events;
3398 		selwakeup(&vp->v_pollinfo->vpi_selinfo);
3399 	}
3400 	mtx_unlock(&vp->v_pollinfo->vpi_lock);
3401 }
3402 
3403 /*
3404  * Wake up anyone polling on vp because it is being revoked.
3405  * This depends on dead_poll() returning POLLHUP for correct
3406  * behavior.
3407  */
3408 void
3409 vn_pollgone(vp)
3410 	struct vnode *vp;
3411 {
3412 
3413 	mtx_lock(&vp->v_pollinfo->vpi_lock);
3414 	VN_KNOTE(vp, NOTE_REVOKE);
3415 	if (vp->v_pollinfo->vpi_events) {
3416 		vp->v_pollinfo->vpi_events = 0;
3417 		selwakeup(&vp->v_pollinfo->vpi_selinfo);
3418 	}
3419 	mtx_unlock(&vp->v_pollinfo->vpi_lock);
3420 }
3421 
3422 
3423 
3424 /*
3425  * Routine to create and manage a filesystem syncer vnode.
3426  */
3427 #define sync_close ((int (*)(struct  vop_close_args *))nullop)
3428 static int	sync_fsync(struct  vop_fsync_args *);
3429 static int	sync_inactive(struct  vop_inactive_args *);
3430 static int	sync_reclaim(struct  vop_reclaim_args *);
3431 
3432 static vop_t **sync_vnodeop_p;
3433 static struct vnodeopv_entry_desc sync_vnodeop_entries[] = {
3434 	{ &vop_default_desc,	(vop_t *) vop_eopnotsupp },
3435 	{ &vop_close_desc,	(vop_t *) sync_close },		/* close */
3436 	{ &vop_fsync_desc,	(vop_t *) sync_fsync },		/* fsync */
3437 	{ &vop_inactive_desc,	(vop_t *) sync_inactive },	/* inactive */
3438 	{ &vop_reclaim_desc,	(vop_t *) sync_reclaim },	/* reclaim */
3439 	{ &vop_lock_desc,	(vop_t *) vop_stdlock },	/* lock */
3440 	{ &vop_unlock_desc,	(vop_t *) vop_stdunlock },	/* unlock */
3441 	{ &vop_islocked_desc,	(vop_t *) vop_stdislocked },	/* islocked */
3442 	{ NULL, NULL }
3443 };
3444 static struct vnodeopv_desc sync_vnodeop_opv_desc =
3445 	{ &sync_vnodeop_p, sync_vnodeop_entries };
3446 
3447 VNODEOP_SET(sync_vnodeop_opv_desc);
3448 
3449 /*
3450  * Create a new filesystem syncer vnode for the specified mount point.
3451  */
3452 int
3453 vfs_allocate_syncvnode(mp)
3454 	struct mount *mp;
3455 {
3456 	struct vnode *vp;
3457 	static long start, incr, next;
3458 	int error;
3459 
3460 	/* Allocate a new vnode */
3461 	if ((error = getnewvnode("syncer", mp, sync_vnodeop_p, &vp)) != 0) {
3462 		mp->mnt_syncer = NULL;
3463 		return (error);
3464 	}
3465 	vp->v_type = VNON;
3466 	/*
3467 	 * Place the vnode onto the syncer worklist. We attempt to
3468 	 * scatter them about on the list so that they will go off
3469 	 * at evenly distributed times even if all the filesystems
3470 	 * are mounted at once.
3471 	 */
3472 	next += incr;
3473 	if (next == 0 || next > syncer_maxdelay) {
3474 		start /= 2;
3475 		incr /= 2;
3476 		if (start == 0) {
3477 			start = syncer_maxdelay / 2;
3478 			incr = syncer_maxdelay;
3479 		}
3480 		next = start;
3481 	}
3482 	VI_LOCK(vp);
3483 	vn_syncer_add_to_worklist(vp, syncdelay > 0 ? next % syncdelay : 0);
3484 	VI_UNLOCK(vp);
3485 	mp->mnt_syncer = vp;
3486 	return (0);
3487 }
3488 
3489 /*
3490  * Do a lazy sync of the filesystem.
3491  */
3492 static int
3493 sync_fsync(ap)
3494 	struct vop_fsync_args /* {
3495 		struct vnode *a_vp;
3496 		struct ucred *a_cred;
3497 		int a_waitfor;
3498 		struct thread *a_td;
3499 	} */ *ap;
3500 {
3501 	struct vnode *syncvp = ap->a_vp;
3502 	struct mount *mp = syncvp->v_mount;
3503 	struct thread *td = ap->a_td;
3504 	int error, asyncflag;
3505 
3506 	/*
3507 	 * We only need to do something if this is a lazy evaluation.
3508 	 */
3509 	if (ap->a_waitfor != MNT_LAZY)
3510 		return (0);
3511 
3512 	/*
3513 	 * Move ourselves to the back of the sync list.
3514 	 */
3515 	VI_LOCK(syncvp);
3516 	vn_syncer_add_to_worklist(syncvp, syncdelay);
3517 	VI_UNLOCK(syncvp);
3518 
3519 	/*
3520 	 * Walk the list of vnodes pushing all that are dirty and
3521 	 * not already on the sync list.
3522 	 */
3523 	mtx_lock(&mountlist_mtx);
3524 	if (vfs_busy(mp, LK_EXCLUSIVE | LK_NOWAIT, &mountlist_mtx, td) != 0) {
3525 		mtx_unlock(&mountlist_mtx);
3526 		return (0);
3527 	}
3528 	if (vn_start_write(NULL, &mp, V_NOWAIT) != 0) {
3529 		vfs_unbusy(mp, td);
3530 		return (0);
3531 	}
3532 	asyncflag = mp->mnt_flag & MNT_ASYNC;
3533 	mp->mnt_flag &= ~MNT_ASYNC;
3534 	vfs_msync(mp, MNT_NOWAIT);
3535 	error = VFS_SYNC(mp, MNT_LAZY, ap->a_cred, td);
3536 	if (asyncflag)
3537 		mp->mnt_flag |= MNT_ASYNC;
3538 	vn_finished_write(mp);
3539 	vfs_unbusy(mp, td);
3540 	return (error);
3541 }
3542 
3543 /*
3544  * The syncer vnode is no referenced.
3545  */
3546 static int
3547 sync_inactive(ap)
3548 	struct vop_inactive_args /* {
3549 		struct vnode *a_vp;
3550 		struct thread *a_td;
3551 	} */ *ap;
3552 {
3553 
3554 	VOP_UNLOCK(ap->a_vp, 0, ap->a_td);
3555 	vgone(ap->a_vp);
3556 	return (0);
3557 }
3558 
3559 /*
3560  * The syncer vnode is no longer needed and is being decommissioned.
3561  *
3562  * Modifications to the worklist must be protected at splbio().
3563  */
3564 static int
3565 sync_reclaim(ap)
3566 	struct vop_reclaim_args /* {
3567 		struct vnode *a_vp;
3568 	} */ *ap;
3569 {
3570 	struct vnode *vp = ap->a_vp;
3571 	int s;
3572 
3573 	s = splbio();
3574 	vp->v_mount->mnt_syncer = NULL;
3575 	VI_LOCK(vp);
3576 	if (vp->v_iflag & VI_ONWORKLST) {
3577 		mtx_lock(&sync_mtx);
3578 		LIST_REMOVE(vp, v_synclist);
3579 		mtx_unlock(&sync_mtx);
3580 		vp->v_iflag &= ~VI_ONWORKLST;
3581 	}
3582 	VI_UNLOCK(vp);
3583 	splx(s);
3584 
3585 	return (0);
3586 }
3587 
3588 /*
3589  * extract the dev_t from a VCHR
3590  */
3591 dev_t
3592 vn_todev(vp)
3593 	struct vnode *vp;
3594 {
3595 	if (vp->v_type != VCHR)
3596 		return (NODEV);
3597 	return (vp->v_rdev);
3598 }
3599 
3600 /*
3601  * Check if vnode represents a disk device
3602  */
3603 int
3604 vn_isdisk(vp, errp)
3605 	struct vnode *vp;
3606 	int *errp;
3607 {
3608 	struct cdevsw *cdevsw;
3609 
3610 	if (vp->v_type != VCHR) {
3611 		if (errp != NULL)
3612 			*errp = ENOTBLK;
3613 		return (0);
3614 	}
3615 	if (vp->v_rdev == NULL) {
3616 		if (errp != NULL)
3617 			*errp = ENXIO;
3618 		return (0);
3619 	}
3620 	cdevsw = devsw(vp->v_rdev);
3621 	if (cdevsw == NULL) {
3622 		if (errp != NULL)
3623 			*errp = ENXIO;
3624 		return (0);
3625 	}
3626 	if (!(cdevsw->d_flags & D_DISK)) {
3627 		if (errp != NULL)
3628 			*errp = ENOTBLK;
3629 		return (0);
3630 	}
3631 	if (errp != NULL)
3632 		*errp = 0;
3633 	return (1);
3634 }
3635 
3636 /*
3637  * Free data allocated by namei(); see namei(9) for details.
3638  */
3639 void
3640 NDFREE(ndp, flags)
3641      struct nameidata *ndp;
3642      const u_int flags;
3643 {
3644 	if (!(flags & NDF_NO_FREE_PNBUF) &&
3645 	    (ndp->ni_cnd.cn_flags & HASBUF)) {
3646 		uma_zfree(namei_zone, ndp->ni_cnd.cn_pnbuf);
3647 		ndp->ni_cnd.cn_flags &= ~HASBUF;
3648 	}
3649 	if (!(flags & NDF_NO_DVP_UNLOCK) &&
3650 	    (ndp->ni_cnd.cn_flags & LOCKPARENT) &&
3651 	    ndp->ni_dvp != ndp->ni_vp)
3652 		VOP_UNLOCK(ndp->ni_dvp, 0, ndp->ni_cnd.cn_thread);
3653 	if (!(flags & NDF_NO_DVP_RELE) &&
3654 	    (ndp->ni_cnd.cn_flags & (LOCKPARENT|WANTPARENT))) {
3655 		vrele(ndp->ni_dvp);
3656 		ndp->ni_dvp = NULL;
3657 	}
3658 	if (!(flags & NDF_NO_VP_UNLOCK) &&
3659 	    (ndp->ni_cnd.cn_flags & LOCKLEAF) && ndp->ni_vp)
3660 		VOP_UNLOCK(ndp->ni_vp, 0, ndp->ni_cnd.cn_thread);
3661 	if (!(flags & NDF_NO_VP_RELE) &&
3662 	    ndp->ni_vp) {
3663 		vrele(ndp->ni_vp);
3664 		ndp->ni_vp = NULL;
3665 	}
3666 	if (!(flags & NDF_NO_STARTDIR_RELE) &&
3667 	    (ndp->ni_cnd.cn_flags & SAVESTART)) {
3668 		vrele(ndp->ni_startdir);
3669 		ndp->ni_startdir = NULL;
3670 	}
3671 }
3672 
3673 /*
3674  * Common filesystem object access control check routine.  Accepts a
3675  * vnode's type, "mode", uid and gid, requested access mode, credentials,
3676  * and optional call-by-reference privused argument allowing vaccess()
3677  * to indicate to the caller whether privilege was used to satisfy the
3678  * request (obsoleted).  Returns 0 on success, or an errno on failure.
3679  */
3680 int
3681 vaccess(type, file_mode, file_uid, file_gid, acc_mode, cred, privused)
3682 	enum vtype type;
3683 	mode_t file_mode;
3684 	uid_t file_uid;
3685 	gid_t file_gid;
3686 	mode_t acc_mode;
3687 	struct ucred *cred;
3688 	int *privused;
3689 {
3690 	mode_t dac_granted;
3691 #ifdef CAPABILITIES
3692 	mode_t cap_granted;
3693 #endif
3694 
3695 	/*
3696 	 * Look for a normal, non-privileged way to access the file/directory
3697 	 * as requested.  If it exists, go with that.
3698 	 */
3699 
3700 	if (privused != NULL)
3701 		*privused = 0;
3702 
3703 	dac_granted = 0;
3704 
3705 	/* Check the owner. */
3706 	if (cred->cr_uid == file_uid) {
3707 		dac_granted |= VADMIN;
3708 		if (file_mode & S_IXUSR)
3709 			dac_granted |= VEXEC;
3710 		if (file_mode & S_IRUSR)
3711 			dac_granted |= VREAD;
3712 		if (file_mode & S_IWUSR)
3713 			dac_granted |= (VWRITE | VAPPEND);
3714 
3715 		if ((acc_mode & dac_granted) == acc_mode)
3716 			return (0);
3717 
3718 		goto privcheck;
3719 	}
3720 
3721 	/* Otherwise, check the groups (first match) */
3722 	if (groupmember(file_gid, cred)) {
3723 		if (file_mode & S_IXGRP)
3724 			dac_granted |= VEXEC;
3725 		if (file_mode & S_IRGRP)
3726 			dac_granted |= VREAD;
3727 		if (file_mode & S_IWGRP)
3728 			dac_granted |= (VWRITE | VAPPEND);
3729 
3730 		if ((acc_mode & dac_granted) == acc_mode)
3731 			return (0);
3732 
3733 		goto privcheck;
3734 	}
3735 
3736 	/* Otherwise, check everyone else. */
3737 	if (file_mode & S_IXOTH)
3738 		dac_granted |= VEXEC;
3739 	if (file_mode & S_IROTH)
3740 		dac_granted |= VREAD;
3741 	if (file_mode & S_IWOTH)
3742 		dac_granted |= (VWRITE | VAPPEND);
3743 	if ((acc_mode & dac_granted) == acc_mode)
3744 		return (0);
3745 
3746 privcheck:
3747 	if (!suser_cred(cred, PRISON_ROOT)) {
3748 		/* XXX audit: privilege used */
3749 		if (privused != NULL)
3750 			*privused = 1;
3751 		return (0);
3752 	}
3753 
3754 #ifdef CAPABILITIES
3755 	/*
3756 	 * Build a capability mask to determine if the set of capabilities
3757 	 * satisfies the requirements when combined with the granted mask
3758 	 * from above.
3759 	 * For each capability, if the capability is required, bitwise
3760 	 * or the request type onto the cap_granted mask.
3761 	 */
3762 	cap_granted = 0;
3763 
3764 	if (type == VDIR) {
3765 		/*
3766 		 * For directories, use CAP_DAC_READ_SEARCH to satisfy
3767 		 * VEXEC requests, instead of CAP_DAC_EXECUTE.
3768 		 */
3769 		if ((acc_mode & VEXEC) && ((dac_granted & VEXEC) == 0) &&
3770 		    !cap_check(cred, NULL, CAP_DAC_READ_SEARCH, PRISON_ROOT))
3771 			cap_granted |= VEXEC;
3772 	} else {
3773 		if ((acc_mode & VEXEC) && ((dac_granted & VEXEC) == 0) &&
3774 		    !cap_check(cred, NULL, CAP_DAC_EXECUTE, PRISON_ROOT))
3775 			cap_granted |= VEXEC;
3776 	}
3777 
3778 	if ((acc_mode & VREAD) && ((dac_granted & VREAD) == 0) &&
3779 	    !cap_check(cred, NULL, CAP_DAC_READ_SEARCH, PRISON_ROOT))
3780 		cap_granted |= VREAD;
3781 
3782 	if ((acc_mode & VWRITE) && ((dac_granted & VWRITE) == 0) &&
3783 	    !cap_check(cred, NULL, CAP_DAC_WRITE, PRISON_ROOT))
3784 		cap_granted |= (VWRITE | VAPPEND);
3785 
3786 	if ((acc_mode & VADMIN) && ((dac_granted & VADMIN) == 0) &&
3787 	    !cap_check(cred, NULL, CAP_FOWNER, PRISON_ROOT))
3788 		cap_granted |= VADMIN;
3789 
3790 	if ((acc_mode & (cap_granted | dac_granted)) == acc_mode) {
3791 		/* XXX audit: privilege used */
3792 		if (privused != NULL)
3793 			*privused = 1;
3794 		return (0);
3795 	}
3796 #endif
3797 
3798 	return ((acc_mode & VADMIN) ? EPERM : EACCES);
3799 }
3800 
3801 /*
3802  * Credential check based on process requesting service, and per-attribute
3803  * permissions.
3804  */
3805 int
3806 extattr_check_cred(struct vnode *vp, int attrnamespace,
3807     struct ucred *cred, struct thread *td, int access)
3808 {
3809 
3810 	/*
3811 	 * Kernel-invoked always succeeds.
3812 	 */
3813 	if (cred == NOCRED)
3814 		return (0);
3815 
3816 	/*
3817 	 * Do not allow privileged processes in jail to directly
3818 	 * manipulate system attributes.
3819 	 *
3820 	 * XXX What capability should apply here?
3821 	 * Probably CAP_SYS_SETFFLAG.
3822 	 */
3823 	switch (attrnamespace) {
3824 	case EXTATTR_NAMESPACE_SYSTEM:
3825 		/* Potentially should be: return (EPERM); */
3826 		return (suser_cred(cred, 0));
3827 	case EXTATTR_NAMESPACE_USER:
3828 		return (VOP_ACCESS(vp, access, cred, td));
3829 	default:
3830 		return (EPERM);
3831 	}
3832 }
3833