xref: /freebsd/sys/kern/vfs_subr.c (revision dce6e6518b85561495cff38a3074a69d29d58a55)
1 /*
2  * Copyright (c) 1989, 1993
3  *	The Regents of the University of California.  All rights reserved.
4  * (c) UNIX System Laboratories, Inc.
5  * All or some portions of this file are derived from material licensed
6  * to the University of California by American Telephone and Telegraph
7  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8  * the permission of UNIX System Laboratories, Inc.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. All advertising materials mentioning features or use of this software
19  *    must display the following acknowledgement:
20  *	This product includes software developed by the University of
21  *	California, Berkeley and its contributors.
22  * 4. Neither the name of the University nor the names of its contributors
23  *    may be used to endorse or promote products derived from this software
24  *    without specific prior written permission.
25  *
26  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36  * SUCH DAMAGE.
37  *
38  *	@(#)vfs_subr.c	8.31 (Berkeley) 5/26/95
39  */
40 
41 /*
42  * External virtual filesystem routines
43  */
44 
45 #include <sys/cdefs.h>
46 __FBSDID("$FreeBSD$");
47 
48 #include "opt_ddb.h"
49 #include "opt_mac.h"
50 
51 #include <sys/param.h>
52 #include <sys/systm.h>
53 #include <sys/bio.h>
54 #include <sys/buf.h>
55 #include <sys/conf.h>
56 #include <sys/eventhandler.h>
57 #include <sys/extattr.h>
58 #include <sys/fcntl.h>
59 #include <sys/kernel.h>
60 #include <sys/kthread.h>
61 #include <sys/mac.h>
62 #include <sys/malloc.h>
63 #include <sys/mount.h>
64 #include <sys/namei.h>
65 #include <sys/stat.h>
66 #include <sys/sysctl.h>
67 #include <sys/syslog.h>
68 #include <sys/vmmeter.h>
69 #include <sys/vnode.h>
70 
71 #include <vm/vm.h>
72 #include <vm/vm_object.h>
73 #include <vm/vm_extern.h>
74 #include <vm/pmap.h>
75 #include <vm/vm_map.h>
76 #include <vm/vm_page.h>
77 #include <vm/vm_kern.h>
78 #include <vm/uma.h>
79 
80 static MALLOC_DEFINE(M_NETADDR, "Export Host", "Export host address structure");
81 
82 static void	addalias(struct vnode *vp, dev_t nvp_rdev);
83 static void	insmntque(struct vnode *vp, struct mount *mp);
84 static void	vclean(struct vnode *vp, int flags, struct thread *td);
85 static void	vlruvp(struct vnode *vp);
86 static int	flushbuflist(struct buf *blist, int flags, struct vnode *vp,
87 		    int slpflag, int slptimeo, int *errorp);
88 static int	vcanrecycle(struct vnode *vp, struct mount **vnmpp);
89 
90 
91 /*
92  * Number of vnodes in existence.  Increased whenever getnewvnode()
93  * allocates a new vnode, never decreased.
94  */
95 static unsigned long	numvnodes;
96 
97 SYSCTL_LONG(_vfs, OID_AUTO, numvnodes, CTLFLAG_RD, &numvnodes, 0, "");
98 
99 /*
100  * Conversion tables for conversion from vnode types to inode formats
101  * and back.
102  */
103 enum vtype iftovt_tab[16] = {
104 	VNON, VFIFO, VCHR, VNON, VDIR, VNON, VBLK, VNON,
105 	VREG, VNON, VLNK, VNON, VSOCK, VNON, VNON, VBAD,
106 };
107 int vttoif_tab[9] = {
108 	0, S_IFREG, S_IFDIR, S_IFBLK, S_IFCHR, S_IFLNK,
109 	S_IFSOCK, S_IFIFO, S_IFMT,
110 };
111 
112 /*
113  * List of vnodes that are ready for recycling.
114  */
115 static TAILQ_HEAD(freelst, vnode) vnode_free_list;
116 
117 /*
118  * Minimum number of free vnodes.  If there are fewer than this free vnodes,
119  * getnewvnode() will return a newly allocated vnode.
120  */
121 static u_long wantfreevnodes = 25;
122 SYSCTL_LONG(_vfs, OID_AUTO, wantfreevnodes, CTLFLAG_RW, &wantfreevnodes, 0, "");
123 /* Number of vnodes in the free list. */
124 static u_long freevnodes;
125 SYSCTL_LONG(_vfs, OID_AUTO, freevnodes, CTLFLAG_RD, &freevnodes, 0, "");
126 
127 /*
128  * Various variables used for debugging the new implementation of
129  * reassignbuf().
130  * XXX these are probably of (very) limited utility now.
131  */
132 static int reassignbufcalls;
133 SYSCTL_INT(_vfs, OID_AUTO, reassignbufcalls, CTLFLAG_RW, &reassignbufcalls, 0, "");
134 static int nameileafonly;
135 SYSCTL_INT(_vfs, OID_AUTO, nameileafonly, CTLFLAG_RW, &nameileafonly, 0, "");
136 
137 /*
138  * Cache for the mount type id assigned to NFS.  This is used for
139  * special checks in nfs/nfs_nqlease.c and vm/vnode_pager.c.
140  */
141 int	nfs_mount_type = -1;
142 
143 /* To keep more than one thread at a time from running vfs_getnewfsid */
144 static struct mtx mntid_mtx;
145 
146 /*
147  * Lock for any access to the following:
148  *	vnode_free_list
149  *	numvnodes
150  *	freevnodes
151  */
152 static struct mtx vnode_free_list_mtx;
153 
154 /*
155  * For any iteration/modification of dev->si_hlist (linked through
156  * v_specnext)
157  */
158 static struct mtx spechash_mtx;
159 
160 /* Publicly exported FS */
161 struct nfs_public nfs_pub;
162 
163 /* Zone for allocation of new vnodes - used exclusively by getnewvnode() */
164 static uma_zone_t vnode_zone;
165 static uma_zone_t vnodepoll_zone;
166 
167 /* Set to 1 to print out reclaim of active vnodes */
168 int	prtactive;
169 
170 /*
171  * The workitem queue.
172  *
173  * It is useful to delay writes of file data and filesystem metadata
174  * for tens of seconds so that quickly created and deleted files need
175  * not waste disk bandwidth being created and removed. To realize this,
176  * we append vnodes to a "workitem" queue. When running with a soft
177  * updates implementation, most pending metadata dependencies should
178  * not wait for more than a few seconds. Thus, mounted on block devices
179  * are delayed only about a half the time that file data is delayed.
180  * Similarly, directory updates are more critical, so are only delayed
181  * about a third the time that file data is delayed. Thus, there are
182  * SYNCER_MAXDELAY queues that are processed round-robin at a rate of
183  * one each second (driven off the filesystem syncer process). The
184  * syncer_delayno variable indicates the next queue that is to be processed.
185  * Items that need to be processed soon are placed in this queue:
186  *
187  *	syncer_workitem_pending[syncer_delayno]
188  *
189  * A delay of fifteen seconds is done by placing the request fifteen
190  * entries later in the queue:
191  *
192  *	syncer_workitem_pending[(syncer_delayno + 15) & syncer_mask]
193  *
194  */
195 static int syncer_delayno;
196 static long syncer_mask;
197 LIST_HEAD(synclist, vnode);
198 static struct synclist *syncer_workitem_pending;
199 /*
200  * The sync_mtx protects:
201  *	vp->v_synclist
202  *	syncer_delayno
203  *	syncer_workitem_pending
204  *	rushjob
205  */
206 static struct mtx sync_mtx;
207 
208 #define SYNCER_MAXDELAY		32
209 static int syncer_maxdelay = SYNCER_MAXDELAY;	/* maximum delay time */
210 static int syncdelay = 30;		/* max time to delay syncing data */
211 static int filedelay = 30;		/* time to delay syncing files */
212 SYSCTL_INT(_kern, OID_AUTO, filedelay, CTLFLAG_RW, &filedelay, 0, "");
213 static int dirdelay = 29;		/* time to delay syncing directories */
214 SYSCTL_INT(_kern, OID_AUTO, dirdelay, CTLFLAG_RW, &dirdelay, 0, "");
215 static int metadelay = 28;		/* time to delay syncing metadata */
216 SYSCTL_INT(_kern, OID_AUTO, metadelay, CTLFLAG_RW, &metadelay, 0, "");
217 static int rushjob;		/* number of slots to run ASAP */
218 static int stat_rush_requests;	/* number of times I/O speeded up */
219 SYSCTL_INT(_debug, OID_AUTO, rush_requests, CTLFLAG_RW, &stat_rush_requests, 0, "");
220 
221 /*
222  * Number of vnodes we want to exist at any one time.  This is mostly used
223  * to size hash tables in vnode-related code.  It is normally not used in
224  * getnewvnode(), as wantfreevnodes is normally nonzero.)
225  *
226  * XXX desiredvnodes is historical cruft and should not exist.
227  */
228 int desiredvnodes;
229 SYSCTL_INT(_kern, KERN_MAXVNODES, maxvnodes, CTLFLAG_RW,
230     &desiredvnodes, 0, "Maximum number of vnodes");
231 static int minvnodes;
232 SYSCTL_INT(_kern, OID_AUTO, minvnodes, CTLFLAG_RW,
233     &minvnodes, 0, "Minimum number of vnodes");
234 static int vnlru_nowhere;
235 SYSCTL_INT(_debug, OID_AUTO, vnlru_nowhere, CTLFLAG_RW, &vnlru_nowhere, 0,
236     "Number of times the vnlru process ran without success");
237 
238 /* Hook for calling soft updates */
239 int (*softdep_process_worklist_hook)(struct mount *);
240 
241 /*
242  * This only exists to supress warnings from unlocked specfs accesses.  It is
243  * no longer ok to have an unlocked VFS.
244  */
245 #define IGNORE_LOCK(vp) ((vp)->v_type == VCHR || (vp)->v_type == VBAD)
246 
247 /* Print lock violations */
248 int vfs_badlock_print = 1;
249 
250 /* Panic on violation */
251 int vfs_badlock_panic = 1;
252 
253 /* Check for interlock across VOPs */
254 int vfs_badlock_mutex = 1;
255 
256 static void
257 vfs_badlock(char *msg, char *str, struct vnode *vp)
258 {
259 	if (vfs_badlock_print)
260 		printf("%s: %p %s\n", str, vp, msg);
261 	if (vfs_badlock_panic)
262 		Debugger("Lock violation.\n");
263 }
264 
265 void
266 assert_vi_unlocked(struct vnode *vp, char *str)
267 {
268 	if (vfs_badlock_mutex && mtx_owned(VI_MTX(vp)))
269 		vfs_badlock("interlock is locked but should not be", str, vp);
270 }
271 
272 void
273 assert_vi_locked(struct vnode *vp, char *str)
274 {
275 	if (vfs_badlock_mutex && !mtx_owned(VI_MTX(vp)))
276 		vfs_badlock("interlock is not locked but should be", str, vp);
277 }
278 
279 void
280 assert_vop_locked(struct vnode *vp, char *str)
281 {
282 	if (vp && !IGNORE_LOCK(vp) && !VOP_ISLOCKED(vp, NULL))
283 		vfs_badlock("is not locked but should be", str, vp);
284 }
285 
286 void
287 assert_vop_unlocked(struct vnode *vp, char *str)
288 {
289 	if (vp && !IGNORE_LOCK(vp) &&
290 	    VOP_ISLOCKED(vp, curthread) == LK_EXCLUSIVE)
291 		vfs_badlock("is locked but should not be", str, vp);
292 }
293 
294 void
295 assert_vop_elocked(struct vnode *vp, char *str)
296 {
297 	if (vp && !IGNORE_LOCK(vp) &&
298 	    VOP_ISLOCKED(vp, curthread) != LK_EXCLUSIVE)
299 		vfs_badlock("is not exclusive locked but should be", str, vp);
300 }
301 
302 void
303 assert_vop_elocked_other(struct vnode *vp, char *str)
304 {
305 	if (vp && !IGNORE_LOCK(vp) &&
306 	    VOP_ISLOCKED(vp, curthread) != LK_EXCLOTHER)
307 		vfs_badlock("is not exclusive locked by another thread",
308 		    str, vp);
309 }
310 
311 void
312 assert_vop_slocked(struct vnode *vp, char *str)
313 {
314 	if (vp && !IGNORE_LOCK(vp) &&
315 	    VOP_ISLOCKED(vp, curthread) != LK_SHARED)
316 		vfs_badlock("is not locked shared but should be", str, vp);
317 }
318 
319 void
320 vop_rename_pre(void *ap)
321 {
322 	struct vop_rename_args *a = ap;
323 
324 	if (a->a_tvp)
325 		ASSERT_VI_UNLOCKED(a->a_tvp, "VOP_RENAME");
326 	ASSERT_VI_UNLOCKED(a->a_tdvp, "VOP_RENAME");
327 	ASSERT_VI_UNLOCKED(a->a_fvp, "VOP_RENAME");
328 	ASSERT_VI_UNLOCKED(a->a_fdvp, "VOP_RENAME");
329 
330 	/* Check the source (from) */
331 	if (a->a_tdvp != a->a_fdvp)
332 		ASSERT_VOP_UNLOCKED(a->a_fdvp, "vop_rename: fdvp locked.\n");
333 	if (a->a_tvp != a->a_fvp)
334 		ASSERT_VOP_UNLOCKED(a->a_fvp, "vop_rename: tvp locked.\n");
335 
336 	/* Check the target */
337 	if (a->a_tvp)
338 		ASSERT_VOP_LOCKED(a->a_tvp, "vop_rename: tvp not locked.\n");
339 
340 	ASSERT_VOP_LOCKED(a->a_tdvp, "vop_rename: tdvp not locked.\n");
341 }
342 
343 void
344 vop_strategy_pre(void *ap)
345 {
346 	struct vop_strategy_args *a = ap;
347 	struct buf *bp;
348 
349 	bp = a->a_bp;
350 
351 	/*
352 	 * Cluster ops lock their component buffers but not the IO container.
353 	 */
354 	if ((bp->b_flags & B_CLUSTER) != 0)
355 		return;
356 
357 	if (BUF_REFCNT(bp) < 1) {
358 		if (vfs_badlock_print)
359 			printf("VOP_STRATEGY: bp is not locked but should be.\n");
360 		if (vfs_badlock_panic)
361 			Debugger("Lock violation.\n");
362 	}
363 }
364 
365 void
366 vop_lookup_pre(void *ap)
367 {
368 	struct vop_lookup_args *a = ap;
369 	struct vnode *dvp;
370 
371 	dvp = a->a_dvp;
372 
373 	ASSERT_VI_UNLOCKED(dvp, "VOP_LOOKUP");
374 	ASSERT_VOP_LOCKED(dvp, "VOP_LOOKUP");
375 }
376 
377 void
378 vop_lookup_post(void *ap, int rc)
379 {
380 	struct vop_lookup_args *a = ap;
381 	struct componentname *cnp;
382 	struct vnode *dvp;
383 	struct vnode *vp;
384 	int flags;
385 
386 	dvp = a->a_dvp;
387 	cnp = a->a_cnp;
388 	vp = *(a->a_vpp);
389 	flags = cnp->cn_flags;
390 
391 
392 	ASSERT_VI_UNLOCKED(dvp, "VOP_LOOKUP");
393 	/*
394 	 * If this is the last path component for this lookup and LOCPARENT
395 	 * is set, OR if there is an error the directory has to be locked.
396 	 */
397 	if ((flags & LOCKPARENT) && (flags & ISLASTCN))
398 		ASSERT_VOP_LOCKED(dvp, "VOP_LOOKUP (LOCKPARENT)");
399 	else if (rc != 0)
400 		ASSERT_VOP_LOCKED(dvp, "VOP_LOOKUP (error)");
401 	else if (dvp != vp)
402 		ASSERT_VOP_UNLOCKED(dvp, "VOP_LOOKUP (dvp)");
403 
404 	if (flags & PDIRUNLOCK)
405 		ASSERT_VOP_UNLOCKED(dvp, "VOP_LOOKUP (PDIRUNLOCK)");
406 }
407 
408 void
409 vop_unlock_pre(void *ap)
410 {
411 	struct vop_unlock_args *a = ap;
412 
413 	if (a->a_flags & LK_INTERLOCK)
414 		ASSERT_VI_LOCKED(a->a_vp, "VOP_UNLOCK");
415 
416 	ASSERT_VOP_LOCKED(a->a_vp, "VOP_UNLOCK");
417 }
418 
419 void
420 vop_unlock_post(void *ap, int rc)
421 {
422 	struct vop_unlock_args *a = ap;
423 
424 	if (a->a_flags & LK_INTERLOCK)
425 		ASSERT_VI_UNLOCKED(a->a_vp, "VOP_UNLOCK");
426 }
427 
428 void
429 vop_lock_pre(void *ap)
430 {
431 	struct vop_lock_args *a = ap;
432 
433 	if ((a->a_flags & LK_INTERLOCK) == 0)
434 		ASSERT_VI_UNLOCKED(a->a_vp, "VOP_LOCK");
435 	else
436 		ASSERT_VI_LOCKED(a->a_vp, "VOP_LOCK");
437 }
438 
439 void
440 vop_lock_post(void *ap, int rc)
441 {
442 	struct vop_lock_args *a;
443 
444 	a = ap;
445 
446 	ASSERT_VI_UNLOCKED(a->a_vp, "VOP_LOCK");
447 	if (rc == 0)
448 		ASSERT_VOP_LOCKED(a->a_vp, "VOP_LOCK");
449 }
450 
451 void
452 v_addpollinfo(struct vnode *vp)
453 {
454 	vp->v_pollinfo = uma_zalloc(vnodepoll_zone, M_WAITOK);
455 	mtx_init(&vp->v_pollinfo->vpi_lock, "vnode pollinfo", NULL, MTX_DEF);
456 }
457 
458 /*
459  * Initialize the vnode management data structures.
460  */
461 static void
462 vntblinit(void *dummy __unused)
463 {
464 
465 	/*
466 	 * Desiredvnodes is a function of the physical memory size and
467 	 * the kernel's heap size.  Specifically, desiredvnodes scales
468 	 * in proportion to the physical memory size until two fifths
469 	 * of the kernel's heap size is consumed by vnodes and vm
470 	 * objects.
471 	 */
472 	desiredvnodes = min(maxproc + cnt.v_page_count / 4, 2 * vm_kmem_size /
473 	    (5 * (sizeof(struct vm_object) + sizeof(struct vnode))));
474 	minvnodes = desiredvnodes / 4;
475 	mtx_init(&mountlist_mtx, "mountlist", NULL, MTX_DEF);
476 	mtx_init(&mntvnode_mtx, "mntvnode", NULL, MTX_DEF);
477 	mtx_init(&mntid_mtx, "mntid", NULL, MTX_DEF);
478 	mtx_init(&spechash_mtx, "spechash", NULL, MTX_DEF);
479 	TAILQ_INIT(&vnode_free_list);
480 	mtx_init(&vnode_free_list_mtx, "vnode_free_list", NULL, MTX_DEF);
481 	vnode_zone = uma_zcreate("VNODE", sizeof (struct vnode), NULL, NULL,
482 	    NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
483 	vnodepoll_zone = uma_zcreate("VNODEPOLL", sizeof (struct vpollinfo),
484 	      NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
485 	/*
486 	 * Initialize the filesystem syncer.
487 	 */
488 	syncer_workitem_pending = hashinit(syncer_maxdelay, M_VNODE,
489 		&syncer_mask);
490 	syncer_maxdelay = syncer_mask + 1;
491 	mtx_init(&sync_mtx, "Syncer mtx", NULL, MTX_DEF);
492 }
493 SYSINIT(vfs, SI_SUB_VFS, SI_ORDER_FIRST, vntblinit, NULL)
494 
495 
496 /*
497  * Mark a mount point as busy. Used to synchronize access and to delay
498  * unmounting. Interlock is not released on failure.
499  */
500 int
501 vfs_busy(mp, flags, interlkp, td)
502 	struct mount *mp;
503 	int flags;
504 	struct mtx *interlkp;
505 	struct thread *td;
506 {
507 	int lkflags;
508 
509 	if (mp->mnt_kern_flag & MNTK_UNMOUNT) {
510 		if (flags & LK_NOWAIT)
511 			return (ENOENT);
512 		mp->mnt_kern_flag |= MNTK_MWAIT;
513 		/*
514 		 * Since all busy locks are shared except the exclusive
515 		 * lock granted when unmounting, the only place that a
516 		 * wakeup needs to be done is at the release of the
517 		 * exclusive lock at the end of dounmount.
518 		 */
519 		msleep(mp, interlkp, PVFS, "vfs_busy", 0);
520 		return (ENOENT);
521 	}
522 	lkflags = LK_SHARED | LK_NOPAUSE;
523 	if (interlkp)
524 		lkflags |= LK_INTERLOCK;
525 	if (lockmgr(&mp->mnt_lock, lkflags, interlkp, td))
526 		panic("vfs_busy: unexpected lock failure");
527 	return (0);
528 }
529 
530 /*
531  * Free a busy filesystem.
532  */
533 void
534 vfs_unbusy(mp, td)
535 	struct mount *mp;
536 	struct thread *td;
537 {
538 
539 	lockmgr(&mp->mnt_lock, LK_RELEASE, NULL, td);
540 }
541 
542 /*
543  * Lookup a mount point by filesystem identifier.
544  */
545 struct mount *
546 vfs_getvfs(fsid)
547 	fsid_t *fsid;
548 {
549 	register struct mount *mp;
550 
551 	mtx_lock(&mountlist_mtx);
552 	TAILQ_FOREACH(mp, &mountlist, mnt_list) {
553 		if (mp->mnt_stat.f_fsid.val[0] == fsid->val[0] &&
554 		    mp->mnt_stat.f_fsid.val[1] == fsid->val[1]) {
555 			mtx_unlock(&mountlist_mtx);
556 			return (mp);
557 		}
558 	}
559 	mtx_unlock(&mountlist_mtx);
560 	return ((struct mount *) 0);
561 }
562 
563 /*
564  * Get a new unique fsid.  Try to make its val[0] unique, since this value
565  * will be used to create fake device numbers for stat().  Also try (but
566  * not so hard) make its val[0] unique mod 2^16, since some emulators only
567  * support 16-bit device numbers.  We end up with unique val[0]'s for the
568  * first 2^16 calls and unique val[0]'s mod 2^16 for the first 2^8 calls.
569  *
570  * Keep in mind that several mounts may be running in parallel.  Starting
571  * the search one past where the previous search terminated is both a
572  * micro-optimization and a defense against returning the same fsid to
573  * different mounts.
574  */
575 void
576 vfs_getnewfsid(mp)
577 	struct mount *mp;
578 {
579 	static u_int16_t mntid_base;
580 	fsid_t tfsid;
581 	int mtype;
582 
583 	mtx_lock(&mntid_mtx);
584 	mtype = mp->mnt_vfc->vfc_typenum;
585 	tfsid.val[1] = mtype;
586 	mtype = (mtype & 0xFF) << 24;
587 	for (;;) {
588 		tfsid.val[0] = makeudev(255,
589 		    mtype | ((mntid_base & 0xFF00) << 8) | (mntid_base & 0xFF));
590 		mntid_base++;
591 		if (vfs_getvfs(&tfsid) == NULL)
592 			break;
593 	}
594 	mp->mnt_stat.f_fsid.val[0] = tfsid.val[0];
595 	mp->mnt_stat.f_fsid.val[1] = tfsid.val[1];
596 	mtx_unlock(&mntid_mtx);
597 }
598 
599 /*
600  * Knob to control the precision of file timestamps:
601  *
602  *   0 = seconds only; nanoseconds zeroed.
603  *   1 = seconds and nanoseconds, accurate within 1/HZ.
604  *   2 = seconds and nanoseconds, truncated to microseconds.
605  * >=3 = seconds and nanoseconds, maximum precision.
606  */
607 enum { TSP_SEC, TSP_HZ, TSP_USEC, TSP_NSEC };
608 
609 static int timestamp_precision = TSP_SEC;
610 SYSCTL_INT(_vfs, OID_AUTO, timestamp_precision, CTLFLAG_RW,
611     &timestamp_precision, 0, "");
612 
613 /*
614  * Get a current timestamp.
615  */
616 void
617 vfs_timestamp(tsp)
618 	struct timespec *tsp;
619 {
620 	struct timeval tv;
621 
622 	switch (timestamp_precision) {
623 	case TSP_SEC:
624 		tsp->tv_sec = time_second;
625 		tsp->tv_nsec = 0;
626 		break;
627 	case TSP_HZ:
628 		getnanotime(tsp);
629 		break;
630 	case TSP_USEC:
631 		microtime(&tv);
632 		TIMEVAL_TO_TIMESPEC(&tv, tsp);
633 		break;
634 	case TSP_NSEC:
635 	default:
636 		nanotime(tsp);
637 		break;
638 	}
639 }
640 
641 /*
642  * Set vnode attributes to VNOVAL
643  */
644 void
645 vattr_null(vap)
646 	register struct vattr *vap;
647 {
648 
649 	vap->va_type = VNON;
650 	vap->va_size = VNOVAL;
651 	vap->va_bytes = VNOVAL;
652 	vap->va_mode = VNOVAL;
653 	vap->va_nlink = VNOVAL;
654 	vap->va_uid = VNOVAL;
655 	vap->va_gid = VNOVAL;
656 	vap->va_fsid = VNOVAL;
657 	vap->va_fileid = VNOVAL;
658 	vap->va_blocksize = VNOVAL;
659 	vap->va_rdev = VNOVAL;
660 	vap->va_atime.tv_sec = VNOVAL;
661 	vap->va_atime.tv_nsec = VNOVAL;
662 	vap->va_mtime.tv_sec = VNOVAL;
663 	vap->va_mtime.tv_nsec = VNOVAL;
664 	vap->va_ctime.tv_sec = VNOVAL;
665 	vap->va_ctime.tv_nsec = VNOVAL;
666 	vap->va_birthtime.tv_sec = VNOVAL;
667 	vap->va_birthtime.tv_nsec = VNOVAL;
668 	vap->va_flags = VNOVAL;
669 	vap->va_gen = VNOVAL;
670 	vap->va_vaflags = 0;
671 }
672 
673 /*
674  * This routine is called when we have too many vnodes.  It attempts
675  * to free <count> vnodes and will potentially free vnodes that still
676  * have VM backing store (VM backing store is typically the cause
677  * of a vnode blowout so we want to do this).  Therefore, this operation
678  * is not considered cheap.
679  *
680  * A number of conditions may prevent a vnode from being reclaimed.
681  * the buffer cache may have references on the vnode, a directory
682  * vnode may still have references due to the namei cache representing
683  * underlying files, or the vnode may be in active use.   It is not
684  * desireable to reuse such vnodes.  These conditions may cause the
685  * number of vnodes to reach some minimum value regardless of what
686  * you set kern.maxvnodes to.  Do not set kern.maxvnodes too low.
687  */
688 static int
689 vlrureclaim(struct mount *mp)
690 {
691 	struct vnode *vp;
692 	int done;
693 	int trigger;
694 	int usevnodes;
695 	int count;
696 
697 	/*
698 	 * Calculate the trigger point, don't allow user
699 	 * screwups to blow us up.   This prevents us from
700 	 * recycling vnodes with lots of resident pages.  We
701 	 * aren't trying to free memory, we are trying to
702 	 * free vnodes.
703 	 */
704 	usevnodes = desiredvnodes;
705 	if (usevnodes <= 0)
706 		usevnodes = 1;
707 	trigger = cnt.v_page_count * 2 / usevnodes;
708 
709 	done = 0;
710 	mtx_lock(&mntvnode_mtx);
711 	count = mp->mnt_nvnodelistsize / 10 + 1;
712 	while (count && (vp = TAILQ_FIRST(&mp->mnt_nvnodelist)) != NULL) {
713 		TAILQ_REMOVE(&mp->mnt_nvnodelist, vp, v_nmntvnodes);
714 		TAILQ_INSERT_TAIL(&mp->mnt_nvnodelist, vp, v_nmntvnodes);
715 
716 		if (vp->v_type != VNON &&
717 		    vp->v_type != VBAD &&
718 		    VI_TRYLOCK(vp)) {
719 			if (VMIGHTFREE(vp) &&           /* critical path opt */
720 			    (vp->v_object == NULL ||
721 			    vp->v_object->resident_page_count < trigger)) {
722 				mtx_unlock(&mntvnode_mtx);
723 				vgonel(vp, curthread);
724 				done++;
725 				mtx_lock(&mntvnode_mtx);
726 			} else
727 				VI_UNLOCK(vp);
728 		}
729 		--count;
730 	}
731 	mtx_unlock(&mntvnode_mtx);
732 	return done;
733 }
734 
735 /*
736  * Attempt to recycle vnodes in a context that is always safe to block.
737  * Calling vlrurecycle() from the bowels of filesystem code has some
738  * interesting deadlock problems.
739  */
740 static struct proc *vnlruproc;
741 static int vnlruproc_sig;
742 
743 static void
744 vnlru_proc(void)
745 {
746 	struct mount *mp, *nmp;
747 	int s;
748 	int done;
749 	struct proc *p = vnlruproc;
750 	struct thread *td = FIRST_THREAD_IN_PROC(p);	/* XXXKSE */
751 
752 	mtx_lock(&Giant);
753 
754 	EVENTHANDLER_REGISTER(shutdown_pre_sync, kproc_shutdown, p,
755 	    SHUTDOWN_PRI_FIRST);
756 
757 	s = splbio();
758 	for (;;) {
759 		kthread_suspend_check(p);
760 		mtx_lock(&vnode_free_list_mtx);
761 		if (numvnodes - freevnodes <= desiredvnodes * 9 / 10) {
762 			mtx_unlock(&vnode_free_list_mtx);
763 			vnlruproc_sig = 0;
764 			wakeup(&vnlruproc_sig);
765 			tsleep(vnlruproc, PVFS, "vlruwt", hz);
766 			continue;
767 		}
768 		mtx_unlock(&vnode_free_list_mtx);
769 		done = 0;
770 		mtx_lock(&mountlist_mtx);
771 		for (mp = TAILQ_FIRST(&mountlist); mp != NULL; mp = nmp) {
772 			if (vfs_busy(mp, LK_NOWAIT, &mountlist_mtx, td)) {
773 				nmp = TAILQ_NEXT(mp, mnt_list);
774 				continue;
775 			}
776 			done += vlrureclaim(mp);
777 			mtx_lock(&mountlist_mtx);
778 			nmp = TAILQ_NEXT(mp, mnt_list);
779 			vfs_unbusy(mp, td);
780 		}
781 		mtx_unlock(&mountlist_mtx);
782 		if (done == 0) {
783 #if 0
784 			/* These messages are temporary debugging aids */
785 			if (vnlru_nowhere < 5)
786 				printf("vnlru process getting nowhere..\n");
787 			else if (vnlru_nowhere == 5)
788 				printf("vnlru process messages stopped.\n");
789 #endif
790 			vnlru_nowhere++;
791 			tsleep(vnlruproc, PPAUSE, "vlrup", hz * 3);
792 		}
793 	}
794 	splx(s);
795 }
796 
797 static struct kproc_desc vnlru_kp = {
798 	"vnlru",
799 	vnlru_proc,
800 	&vnlruproc
801 };
802 SYSINIT(vnlru, SI_SUB_KTHREAD_UPDATE, SI_ORDER_FIRST, kproc_start, &vnlru_kp)
803 
804 
805 /*
806  * Routines having to do with the management of the vnode table.
807  */
808 
809 /*
810  * Check to see if a free vnode can be recycled. If it can,
811  * return it locked with the vn lock, but not interlock. Also
812  * get the vn_start_write lock. Otherwise indicate the error.
813  */
814 static int
815 vcanrecycle(struct vnode *vp, struct mount **vnmpp)
816 {
817 	struct thread *td = curthread;
818 	vm_object_t object;
819 	int error;
820 
821 	/* Don't recycle if we can't get the interlock */
822 	if (!VI_TRYLOCK(vp))
823 		return (EWOULDBLOCK);
824 
825 	/* We should be able to immediately acquire this */
826 	/* XXX This looks like it should panic if it fails */
827 	if (vn_lock(vp, LK_INTERLOCK | LK_EXCLUSIVE, td) != 0) {
828 		if (VOP_ISLOCKED(vp, td))
829 			panic("vcanrecycle: locked vnode");
830 		return (EWOULDBLOCK);
831 	}
832 
833 	/*
834 	 * Don't recycle if its filesystem is being suspended.
835 	 */
836 	if (vn_start_write(vp, vnmpp, V_NOWAIT) != 0) {
837 		error = EBUSY;
838 		goto done;
839 	}
840 
841 	/*
842 	 * Don't recycle if we still have cached pages.
843 	 */
844 	if (VOP_GETVOBJECT(vp, &object) == 0) {
845 		VM_OBJECT_LOCK(object);
846 		if (object->resident_page_count ||
847 		    object->ref_count) {
848 			VM_OBJECT_UNLOCK(object);
849 			error = EBUSY;
850 			goto done;
851 		}
852 		VM_OBJECT_UNLOCK(object);
853 	}
854 	if (LIST_FIRST(&vp->v_cache_src)) {
855 		/*
856 		 * note: nameileafonly sysctl is temporary,
857 		 * for debugging only, and will eventually be
858 		 * removed.
859 		 */
860 		if (nameileafonly > 0) {
861 			/*
862 			 * Do not reuse namei-cached directory
863 			 * vnodes that have cached
864 			 * subdirectories.
865 			 */
866 			if (cache_leaf_test(vp) < 0) {
867 				error = EISDIR;
868 				goto done;
869 			}
870 		} else if (nameileafonly < 0 ||
871 			    vmiodirenable == 0) {
872 			/*
873 			 * Do not reuse namei-cached directory
874 			 * vnodes if nameileafonly is -1 or
875 			 * if VMIO backing for directories is
876 			 * turned off (otherwise we reuse them
877 			 * too quickly).
878 			 */
879 			error = EBUSY;
880 			goto done;
881 		}
882 	}
883 	return (0);
884 done:
885 	VOP_UNLOCK(vp, 0, td);
886 	return (error);
887 }
888 
889 /*
890  * Return the next vnode from the free list.
891  */
892 int
893 getnewvnode(tag, mp, vops, vpp)
894 	const char *tag;
895 	struct mount *mp;
896 	vop_t **vops;
897 	struct vnode **vpp;
898 {
899 	struct thread *td = curthread;	/* XXX */
900 	struct vnode *vp = NULL;
901 	struct vpollinfo *pollinfo = NULL;
902 	struct mount *vnmp;
903 
904 	mtx_lock(&vnode_free_list_mtx);
905 
906 	/*
907 	 * Try to reuse vnodes if we hit the max.  This situation only
908 	 * occurs in certain large-memory (2G+) situations.  We cannot
909 	 * attempt to directly reclaim vnodes due to nasty recursion
910 	 * problems.
911 	 */
912 	while (numvnodes - freevnodes > desiredvnodes) {
913 		if (vnlruproc_sig == 0) {
914 			vnlruproc_sig = 1;      /* avoid unnecessary wakeups */
915 			wakeup(vnlruproc);
916 		}
917 		mtx_unlock(&vnode_free_list_mtx);
918 		tsleep(&vnlruproc_sig, PVFS, "vlruwk", hz);
919 		mtx_lock(&vnode_free_list_mtx);
920 	}
921 
922 	/*
923 	 * Attempt to reuse a vnode already on the free list, allocating
924 	 * a new vnode if we can't find one or if we have not reached a
925 	 * good minimum for good LRU performance.
926 	 */
927 
928 	if (freevnodes >= wantfreevnodes && numvnodes >= minvnodes) {
929 		int error;
930 		int count;
931 
932 		for (count = 0; count < freevnodes; count++) {
933 			vp = TAILQ_FIRST(&vnode_free_list);
934 
935 			KASSERT(vp->v_usecount == 0 &&
936 			    (vp->v_iflag & VI_DOINGINACT) == 0,
937 			    ("getnewvnode: free vnode isn't"));
938 
939 			TAILQ_REMOVE(&vnode_free_list, vp, v_freelist);
940 			/*
941 			 * We have to drop the free list mtx to avoid lock
942 			 * order reversals with interlock.
943 			 */
944 			mtx_unlock(&vnode_free_list_mtx);
945 			error = vcanrecycle(vp, &vnmp);
946 			mtx_lock(&vnode_free_list_mtx);
947 			if (error == 0)
948 				break;
949 			TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist);
950 			vp = NULL;
951 		}
952 	}
953 	if (vp) {
954 		freevnodes--;
955 		mtx_unlock(&vnode_free_list_mtx);
956 
957 		cache_purge(vp);
958 		VI_LOCK(vp);
959 		vp->v_iflag |= VI_DOOMED;
960 		vp->v_iflag &= ~VI_FREE;
961 		if (vp->v_type != VBAD) {
962 			VOP_UNLOCK(vp, 0, td);
963 			vgonel(vp, td);
964 			VI_LOCK(vp);
965 		} else {
966 			VOP_UNLOCK(vp, 0, td);
967 		}
968 		vn_finished_write(vnmp);
969 
970 #ifdef INVARIANTS
971 		{
972 			if (vp->v_data)
973 				panic("cleaned vnode isn't");
974 			if (vp->v_numoutput)
975 				panic("Clean vnode has pending I/O's");
976 			if (vp->v_writecount != 0)
977 				panic("Non-zero write count");
978 		}
979 #endif
980 		if ((pollinfo = vp->v_pollinfo) != NULL) {
981 			/*
982 			 * To avoid lock order reversals, the call to
983 			 * uma_zfree() must be delayed until the vnode
984 			 * interlock is released.
985 			 */
986 			vp->v_pollinfo = NULL;
987 		}
988 #ifdef MAC
989 		mac_destroy_vnode(vp);
990 #endif
991 		vp->v_iflag = 0;
992 		vp->v_vflag = 0;
993 		vp->v_lastw = 0;
994 		vp->v_lasta = 0;
995 		vp->v_cstart = 0;
996 		vp->v_clen = 0;
997 		vp->v_socket = 0;
998 		lockdestroy(vp->v_vnlock);
999 		lockinit(vp->v_vnlock, PVFS, tag, VLKTIMEOUT, LK_NOPAUSE);
1000 		KASSERT(vp->v_cleanbufcnt == 0, ("cleanbufcnt not 0"));
1001 		KASSERT(vp->v_cleanblkroot == NULL, ("cleanblkroot not NULL"));
1002 		KASSERT(vp->v_dirtybufcnt == 0, ("dirtybufcnt not 0"));
1003 		KASSERT(vp->v_dirtyblkroot == NULL, ("dirtyblkroot not NULL"));
1004 	} else {
1005 		numvnodes++;
1006 		mtx_unlock(&vnode_free_list_mtx);
1007 
1008 		vp = (struct vnode *) uma_zalloc(vnode_zone, M_WAITOK|M_ZERO);
1009 		mtx_init(&vp->v_interlock, "vnode interlock", NULL, MTX_DEF);
1010 		VI_LOCK(vp);
1011 		vp->v_dd = vp;
1012 		vp->v_vnlock = &vp->v_lock;
1013 		lockinit(vp->v_vnlock, PVFS, tag, VLKTIMEOUT, LK_NOPAUSE);
1014 		cache_purge(vp);
1015 		LIST_INIT(&vp->v_cache_src);
1016 		TAILQ_INIT(&vp->v_cache_dst);
1017 	}
1018 
1019 	TAILQ_INIT(&vp->v_cleanblkhd);
1020 	TAILQ_INIT(&vp->v_dirtyblkhd);
1021 	vp->v_type = VNON;
1022 	vp->v_tag = tag;
1023 	vp->v_op = vops;
1024 	*vpp = vp;
1025 	vp->v_usecount = 1;
1026 	vp->v_data = 0;
1027 	vp->v_cachedid = -1;
1028 	VI_UNLOCK(vp);
1029 	if (pollinfo != NULL) {
1030 		mtx_destroy(&pollinfo->vpi_lock);
1031 		uma_zfree(vnodepoll_zone, pollinfo);
1032 	}
1033 #ifdef MAC
1034 	mac_init_vnode(vp);
1035 	if (mp != NULL && (mp->mnt_flag & MNT_MULTILABEL) == 0)
1036 		mac_associate_vnode_singlelabel(mp, vp);
1037 #endif
1038 	insmntque(vp, mp);
1039 
1040 	return (0);
1041 }
1042 
1043 /*
1044  * Move a vnode from one mount queue to another.
1045  */
1046 static void
1047 insmntque(vp, mp)
1048 	register struct vnode *vp;
1049 	register struct mount *mp;
1050 {
1051 
1052 	mtx_lock(&mntvnode_mtx);
1053 	/*
1054 	 * Delete from old mount point vnode list, if on one.
1055 	 */
1056 	if (vp->v_mount != NULL) {
1057 		KASSERT(vp->v_mount->mnt_nvnodelistsize > 0,
1058 			("bad mount point vnode list size"));
1059 		TAILQ_REMOVE(&vp->v_mount->mnt_nvnodelist, vp, v_nmntvnodes);
1060 		vp->v_mount->mnt_nvnodelistsize--;
1061 	}
1062 	/*
1063 	 * Insert into list of vnodes for the new mount point, if available.
1064 	 */
1065 	if ((vp->v_mount = mp) == NULL) {
1066 		mtx_unlock(&mntvnode_mtx);
1067 		return;
1068 	}
1069 	TAILQ_INSERT_TAIL(&mp->mnt_nvnodelist, vp, v_nmntvnodes);
1070 	mp->mnt_nvnodelistsize++;
1071 	mtx_unlock(&mntvnode_mtx);
1072 }
1073 
1074 /*
1075  * Update outstanding I/O count and do wakeup if requested.
1076  */
1077 void
1078 vwakeup(bp)
1079 	register struct buf *bp;
1080 {
1081 	register struct vnode *vp;
1082 
1083 	bp->b_flags &= ~B_WRITEINPROG;
1084 	if ((vp = bp->b_vp)) {
1085 		VI_LOCK(vp);
1086 		vp->v_numoutput--;
1087 		if (vp->v_numoutput < 0)
1088 			panic("vwakeup: neg numoutput");
1089 		if ((vp->v_numoutput == 0) && (vp->v_iflag & VI_BWAIT)) {
1090 			vp->v_iflag &= ~VI_BWAIT;
1091 			wakeup(&vp->v_numoutput);
1092 		}
1093 		VI_UNLOCK(vp);
1094 	}
1095 }
1096 
1097 /*
1098  * Flush out and invalidate all buffers associated with a vnode.
1099  * Called with the underlying object locked.
1100  */
1101 int
1102 vinvalbuf(vp, flags, cred, td, slpflag, slptimeo)
1103 	struct vnode *vp;
1104 	int flags;
1105 	struct ucred *cred;
1106 	struct thread *td;
1107 	int slpflag, slptimeo;
1108 {
1109 	struct buf *blist;
1110 	int s, error;
1111 	vm_object_t object;
1112 
1113 	GIANT_REQUIRED;
1114 
1115 	ASSERT_VOP_LOCKED(vp, "vinvalbuf");
1116 
1117 	VI_LOCK(vp);
1118 	if (flags & V_SAVE) {
1119 		s = splbio();
1120 		while (vp->v_numoutput) {
1121 			vp->v_iflag |= VI_BWAIT;
1122 			error = msleep(&vp->v_numoutput, VI_MTX(vp),
1123 			    slpflag | (PRIBIO + 1), "vinvlbuf", slptimeo);
1124 			if (error) {
1125 				VI_UNLOCK(vp);
1126 				splx(s);
1127 				return (error);
1128 			}
1129 		}
1130 		if (!TAILQ_EMPTY(&vp->v_dirtyblkhd)) {
1131 			splx(s);
1132 			VI_UNLOCK(vp);
1133 			if ((error = VOP_FSYNC(vp, cred, MNT_WAIT, td)) != 0)
1134 				return (error);
1135 			/*
1136 			 * XXX We could save a lock/unlock if this was only
1137 			 * enabled under INVARIANTS
1138 			 */
1139 			VI_LOCK(vp);
1140 			s = splbio();
1141 			if (vp->v_numoutput > 0 ||
1142 			    !TAILQ_EMPTY(&vp->v_dirtyblkhd))
1143 				panic("vinvalbuf: dirty bufs");
1144 		}
1145 		splx(s);
1146 	}
1147 	s = splbio();
1148 	/*
1149 	 * If you alter this loop please notice that interlock is dropped and
1150 	 * reacquired in flushbuflist.  Special care is needed to ensure that
1151 	 * no race conditions occur from this.
1152 	 */
1153 	for (error = 0;;) {
1154 		if ((blist = TAILQ_FIRST(&vp->v_cleanblkhd)) != 0 &&
1155 		    flushbuflist(blist, flags, vp, slpflag, slptimeo, &error)) {
1156 			if (error)
1157 				break;
1158 			continue;
1159 		}
1160 		if ((blist = TAILQ_FIRST(&vp->v_dirtyblkhd)) != 0 &&
1161 		    flushbuflist(blist, flags, vp, slpflag, slptimeo, &error)) {
1162 			if (error)
1163 				break;
1164 			continue;
1165 		}
1166 		break;
1167 	}
1168 	if (error) {
1169 		splx(s);
1170 		VI_UNLOCK(vp);
1171 		return (error);
1172 	}
1173 
1174 	/*
1175 	 * Wait for I/O to complete.  XXX needs cleaning up.  The vnode can
1176 	 * have write I/O in-progress but if there is a VM object then the
1177 	 * VM object can also have read-I/O in-progress.
1178 	 */
1179 	do {
1180 		while (vp->v_numoutput > 0) {
1181 			vp->v_iflag |= VI_BWAIT;
1182 			msleep(&vp->v_numoutput, VI_MTX(vp), PVM, "vnvlbv", 0);
1183 		}
1184 		VI_UNLOCK(vp);
1185 		if (VOP_GETVOBJECT(vp, &object) == 0) {
1186 			VM_OBJECT_LOCK(object);
1187 			vm_object_pip_wait(object, "vnvlbx");
1188 			VM_OBJECT_UNLOCK(object);
1189 		}
1190 		VI_LOCK(vp);
1191 	} while (vp->v_numoutput > 0);
1192 	VI_UNLOCK(vp);
1193 
1194 	splx(s);
1195 
1196 	/*
1197 	 * Destroy the copy in the VM cache, too.
1198 	 */
1199 	if (VOP_GETVOBJECT(vp, &object) == 0) {
1200 		VM_OBJECT_LOCK(object);
1201 		vm_object_page_remove(object, 0, 0,
1202 			(flags & V_SAVE) ? TRUE : FALSE);
1203 		VM_OBJECT_UNLOCK(object);
1204 	}
1205 
1206 #ifdef INVARIANTS
1207 	VI_LOCK(vp);
1208 	if ((flags & (V_ALT | V_NORMAL)) == 0 &&
1209 	    (!TAILQ_EMPTY(&vp->v_dirtyblkhd) ||
1210 	     !TAILQ_EMPTY(&vp->v_cleanblkhd)))
1211 		panic("vinvalbuf: flush failed");
1212 	VI_UNLOCK(vp);
1213 #endif
1214 	return (0);
1215 }
1216 
1217 /*
1218  * Flush out buffers on the specified list.
1219  *
1220  */
1221 static int
1222 flushbuflist(blist, flags, vp, slpflag, slptimeo, errorp)
1223 	struct buf *blist;
1224 	int flags;
1225 	struct vnode *vp;
1226 	int slpflag, slptimeo;
1227 	int *errorp;
1228 {
1229 	struct buf *bp, *nbp;
1230 	int found, error;
1231 
1232 	ASSERT_VI_LOCKED(vp, "flushbuflist");
1233 
1234 	for (found = 0, bp = blist; bp; bp = nbp) {
1235 		nbp = TAILQ_NEXT(bp, b_vnbufs);
1236 		if (((flags & V_NORMAL) && (bp->b_xflags & BX_ALTDATA)) ||
1237 		    ((flags & V_ALT) && (bp->b_xflags & BX_ALTDATA) == 0)) {
1238 			continue;
1239 		}
1240 		found += 1;
1241 		error = BUF_TIMELOCK(bp,
1242 		    LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK, VI_MTX(vp),
1243 		    "flushbuf", slpflag, slptimeo);
1244 		if (error) {
1245 			if (error != ENOLCK)
1246 				*errorp = error;
1247 			goto done;
1248 		}
1249 		/*
1250 		 * XXX Since there are no node locks for NFS, I
1251 		 * believe there is a slight chance that a delayed
1252 		 * write will occur while sleeping just above, so
1253 		 * check for it.  Note that vfs_bio_awrite expects
1254 		 * buffers to reside on a queue, while BUF_WRITE and
1255 		 * brelse do not.
1256 		 */
1257 		if (((bp->b_flags & (B_DELWRI | B_INVAL)) == B_DELWRI) &&
1258 			(flags & V_SAVE)) {
1259 
1260 			if (bp->b_vp == vp) {
1261 				if (bp->b_flags & B_CLUSTEROK) {
1262 					vfs_bio_awrite(bp);
1263 				} else {
1264 					bremfree(bp);
1265 					bp->b_flags |= B_ASYNC;
1266 					BUF_WRITE(bp);
1267 				}
1268 			} else {
1269 				bremfree(bp);
1270 				(void) BUF_WRITE(bp);
1271 			}
1272 			goto done;
1273 		}
1274 		bremfree(bp);
1275 		bp->b_flags |= (B_INVAL | B_NOCACHE | B_RELBUF);
1276 		bp->b_flags &= ~B_ASYNC;
1277 		brelse(bp);
1278 		VI_LOCK(vp);
1279 	}
1280 	return (found);
1281 done:
1282 	VI_LOCK(vp);
1283 	return (found);
1284 }
1285 
1286 /*
1287  * Truncate a file's buffer and pages to a specified length.  This
1288  * is in lieu of the old vinvalbuf mechanism, which performed unneeded
1289  * sync activity.
1290  */
1291 int
1292 vtruncbuf(vp, cred, td, length, blksize)
1293 	register struct vnode *vp;
1294 	struct ucred *cred;
1295 	struct thread *td;
1296 	off_t length;
1297 	int blksize;
1298 {
1299 	register struct buf *bp;
1300 	struct buf *nbp;
1301 	int s, anyfreed;
1302 	int trunclbn;
1303 
1304 	/*
1305 	 * Round up to the *next* lbn.
1306 	 */
1307 	trunclbn = (length + blksize - 1) / blksize;
1308 
1309 	s = splbio();
1310 	ASSERT_VOP_LOCKED(vp, "vtruncbuf");
1311 restart:
1312 	VI_LOCK(vp);
1313 	anyfreed = 1;
1314 	for (;anyfreed;) {
1315 		anyfreed = 0;
1316 		for (bp = TAILQ_FIRST(&vp->v_cleanblkhd); bp; bp = nbp) {
1317 			nbp = TAILQ_NEXT(bp, b_vnbufs);
1318 			if (bp->b_lblkno >= trunclbn) {
1319 				if (BUF_LOCK(bp,
1320 				    LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK,
1321 				    VI_MTX(vp)) == ENOLCK)
1322 					goto restart;
1323 
1324 				bremfree(bp);
1325 				bp->b_flags |= (B_INVAL | B_RELBUF);
1326 				bp->b_flags &= ~B_ASYNC;
1327 				brelse(bp);
1328 				anyfreed = 1;
1329 
1330 				if (nbp &&
1331 				    (((nbp->b_xflags & BX_VNCLEAN) == 0) ||
1332 				    (nbp->b_vp != vp) ||
1333 				    (nbp->b_flags & B_DELWRI))) {
1334 					goto restart;
1335 				}
1336 				VI_LOCK(vp);
1337 			}
1338 		}
1339 
1340 		for (bp = TAILQ_FIRST(&vp->v_dirtyblkhd); bp; bp = nbp) {
1341 			nbp = TAILQ_NEXT(bp, b_vnbufs);
1342 			if (bp->b_lblkno >= trunclbn) {
1343 				if (BUF_LOCK(bp,
1344 				    LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK,
1345 				    VI_MTX(vp)) == ENOLCK)
1346 					goto restart;
1347 				bremfree(bp);
1348 				bp->b_flags |= (B_INVAL | B_RELBUF);
1349 				bp->b_flags &= ~B_ASYNC;
1350 				brelse(bp);
1351 				anyfreed = 1;
1352 				if (nbp &&
1353 				    (((nbp->b_xflags & BX_VNDIRTY) == 0) ||
1354 				    (nbp->b_vp != vp) ||
1355 				    (nbp->b_flags & B_DELWRI) == 0)) {
1356 					goto restart;
1357 				}
1358 				VI_LOCK(vp);
1359 			}
1360 		}
1361 	}
1362 
1363 	if (length > 0) {
1364 restartsync:
1365 		for (bp = TAILQ_FIRST(&vp->v_dirtyblkhd); bp; bp = nbp) {
1366 			nbp = TAILQ_NEXT(bp, b_vnbufs);
1367 			if (bp->b_lblkno > 0)
1368 				continue;
1369 			/*
1370 			 * Since we hold the vnode lock this should only
1371 			 * fail if we're racing with the buf daemon.
1372 			 */
1373 			if (BUF_LOCK(bp,
1374 			    LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK,
1375 			    VI_MTX(vp)) == ENOLCK) {
1376 				goto restart;
1377 			}
1378 			KASSERT((bp->b_flags & B_DELWRI),
1379 			    ("buf(%p) on dirty queue without DELWRI.", bp));
1380 
1381 			bremfree(bp);
1382 			bawrite(bp);
1383 			VI_LOCK(vp);
1384 			goto restartsync;
1385 		}
1386 	}
1387 
1388 	while (vp->v_numoutput > 0) {
1389 		vp->v_iflag |= VI_BWAIT;
1390 		msleep(&vp->v_numoutput, VI_MTX(vp), PVM, "vbtrunc", 0);
1391 	}
1392 	VI_UNLOCK(vp);
1393 	splx(s);
1394 
1395 	vnode_pager_setsize(vp, length);
1396 
1397 	return (0);
1398 }
1399 
1400 /*
1401  * buf_splay() - splay tree core for the clean/dirty list of buffers in
1402  * 		 a vnode.
1403  *
1404  *	NOTE: We have to deal with the special case of a background bitmap
1405  *	buffer, a situation where two buffers will have the same logical
1406  *	block offset.  We want (1) only the foreground buffer to be accessed
1407  *	in a lookup and (2) must differentiate between the foreground and
1408  *	background buffer in the splay tree algorithm because the splay
1409  *	tree cannot normally handle multiple entities with the same 'index'.
1410  *	We accomplish this by adding differentiating flags to the splay tree's
1411  *	numerical domain.
1412  */
1413 static
1414 struct buf *
1415 buf_splay(daddr_t lblkno, b_xflags_t xflags, struct buf *root)
1416 {
1417 	struct buf dummy;
1418 	struct buf *lefttreemax, *righttreemin, *y;
1419 
1420 	if (root == NULL)
1421 		return (NULL);
1422 	lefttreemax = righttreemin = &dummy;
1423 	for (;;) {
1424 		if (lblkno < root->b_lblkno ||
1425 		    (lblkno == root->b_lblkno &&
1426 		    (xflags & BX_BKGRDMARKER) < (root->b_xflags & BX_BKGRDMARKER))) {
1427 			if ((y = root->b_left) == NULL)
1428 				break;
1429 			if (lblkno < y->b_lblkno) {
1430 				/* Rotate right. */
1431 				root->b_left = y->b_right;
1432 				y->b_right = root;
1433 				root = y;
1434 				if ((y = root->b_left) == NULL)
1435 					break;
1436 			}
1437 			/* Link into the new root's right tree. */
1438 			righttreemin->b_left = root;
1439 			righttreemin = root;
1440 		} else if (lblkno > root->b_lblkno ||
1441 		    (lblkno == root->b_lblkno &&
1442 		    (xflags & BX_BKGRDMARKER) > (root->b_xflags & BX_BKGRDMARKER))) {
1443 			if ((y = root->b_right) == NULL)
1444 				break;
1445 			if (lblkno > y->b_lblkno) {
1446 				/* Rotate left. */
1447 				root->b_right = y->b_left;
1448 				y->b_left = root;
1449 				root = y;
1450 				if ((y = root->b_right) == NULL)
1451 					break;
1452 			}
1453 			/* Link into the new root's left tree. */
1454 			lefttreemax->b_right = root;
1455 			lefttreemax = root;
1456 		} else {
1457 			break;
1458 		}
1459 		root = y;
1460 	}
1461 	/* Assemble the new root. */
1462 	lefttreemax->b_right = root->b_left;
1463 	righttreemin->b_left = root->b_right;
1464 	root->b_left = dummy.b_right;
1465 	root->b_right = dummy.b_left;
1466 	return (root);
1467 }
1468 
1469 static
1470 void
1471 buf_vlist_remove(struct buf *bp)
1472 {
1473 	struct vnode *vp = bp->b_vp;
1474 	struct buf *root;
1475 
1476 	ASSERT_VI_LOCKED(vp, "buf_vlist_remove");
1477 	if (bp->b_xflags & BX_VNDIRTY) {
1478 		if (bp != vp->v_dirtyblkroot) {
1479 			root = buf_splay(bp->b_lblkno, bp->b_xflags, vp->v_dirtyblkroot);
1480 			KASSERT(root == bp, ("splay lookup failed during dirty remove"));
1481 		}
1482 		if (bp->b_left == NULL) {
1483 			root = bp->b_right;
1484 		} else {
1485 			root = buf_splay(bp->b_lblkno, bp->b_xflags, bp->b_left);
1486 			root->b_right = bp->b_right;
1487 		}
1488 		vp->v_dirtyblkroot = root;
1489 		TAILQ_REMOVE(&vp->v_dirtyblkhd, bp, b_vnbufs);
1490 		vp->v_dirtybufcnt--;
1491 	} else {
1492 		/* KASSERT(bp->b_xflags & BX_VNCLEAN, ("bp wasn't clean")); */
1493 		if (bp != vp->v_cleanblkroot) {
1494 			root = buf_splay(bp->b_lblkno, bp->b_xflags, vp->v_cleanblkroot);
1495 			KASSERT(root == bp, ("splay lookup failed during clean remove"));
1496 		}
1497 		if (bp->b_left == NULL) {
1498 			root = bp->b_right;
1499 		} else {
1500 			root = buf_splay(bp->b_lblkno, bp->b_xflags, bp->b_left);
1501 			root->b_right = bp->b_right;
1502 		}
1503 		vp->v_cleanblkroot = root;
1504 		TAILQ_REMOVE(&vp->v_cleanblkhd, bp, b_vnbufs);
1505 		vp->v_cleanbufcnt--;
1506 	}
1507 	bp->b_xflags &= ~(BX_VNDIRTY | BX_VNCLEAN);
1508 }
1509 
1510 /*
1511  * Add the buffer to the sorted clean or dirty block list using a
1512  * splay tree algorithm.
1513  *
1514  * NOTE: xflags is passed as a constant, optimizing this inline function!
1515  */
1516 static
1517 void
1518 buf_vlist_add(struct buf *bp, struct vnode *vp, b_xflags_t xflags)
1519 {
1520 	struct buf *root;
1521 
1522 	ASSERT_VI_LOCKED(vp, "buf_vlist_add");
1523 	bp->b_xflags |= xflags;
1524 	if (xflags & BX_VNDIRTY) {
1525 		root = buf_splay(bp->b_lblkno, bp->b_xflags, vp->v_dirtyblkroot);
1526 		if (root == NULL) {
1527 			bp->b_left = NULL;
1528 			bp->b_right = NULL;
1529 			TAILQ_INSERT_TAIL(&vp->v_dirtyblkhd, bp, b_vnbufs);
1530 		} else if (bp->b_lblkno < root->b_lblkno ||
1531 		    (bp->b_lblkno == root->b_lblkno &&
1532 		    (bp->b_xflags & BX_BKGRDMARKER) < (root->b_xflags & BX_BKGRDMARKER))) {
1533 			bp->b_left = root->b_left;
1534 			bp->b_right = root;
1535 			root->b_left = NULL;
1536 			TAILQ_INSERT_BEFORE(root, bp, b_vnbufs);
1537 		} else {
1538 			bp->b_right = root->b_right;
1539 			bp->b_left = root;
1540 			root->b_right = NULL;
1541 			TAILQ_INSERT_AFTER(&vp->v_dirtyblkhd,
1542 			    root, bp, b_vnbufs);
1543 		}
1544 		vp->v_dirtybufcnt++;
1545 		vp->v_dirtyblkroot = bp;
1546 	} else {
1547 		/* KASSERT(xflags & BX_VNCLEAN, ("xflags not clean")); */
1548 		root = buf_splay(bp->b_lblkno, bp->b_xflags, vp->v_cleanblkroot);
1549 		if (root == NULL) {
1550 			bp->b_left = NULL;
1551 			bp->b_right = NULL;
1552 			TAILQ_INSERT_TAIL(&vp->v_cleanblkhd, bp, b_vnbufs);
1553 		} else if (bp->b_lblkno < root->b_lblkno ||
1554 		    (bp->b_lblkno == root->b_lblkno &&
1555 		    (bp->b_xflags & BX_BKGRDMARKER) < (root->b_xflags & BX_BKGRDMARKER))) {
1556 			bp->b_left = root->b_left;
1557 			bp->b_right = root;
1558 			root->b_left = NULL;
1559 			TAILQ_INSERT_BEFORE(root, bp, b_vnbufs);
1560 		} else {
1561 			bp->b_right = root->b_right;
1562 			bp->b_left = root;
1563 			root->b_right = NULL;
1564 			TAILQ_INSERT_AFTER(&vp->v_cleanblkhd,
1565 			    root, bp, b_vnbufs);
1566 		}
1567 		vp->v_cleanbufcnt++;
1568 		vp->v_cleanblkroot = bp;
1569 	}
1570 }
1571 
1572 /*
1573  * Lookup a buffer using the splay tree.  Note that we specifically avoid
1574  * shadow buffers used in background bitmap writes.
1575  *
1576  * This code isn't quite efficient as it could be because we are maintaining
1577  * two sorted lists and do not know which list the block resides in.
1578  *
1579  * During a "make buildworld" the desired buffer is found at one of
1580  * the roots more than 60% of the time.  Thus, checking both roots
1581  * before performing either splay eliminates unnecessary splays on the
1582  * first tree splayed.
1583  */
1584 struct buf *
1585 gbincore(struct vnode *vp, daddr_t lblkno)
1586 {
1587 	struct buf *bp;
1588 
1589 	GIANT_REQUIRED;
1590 
1591 	ASSERT_VI_LOCKED(vp, "gbincore");
1592 	if ((bp = vp->v_cleanblkroot) != NULL &&
1593 	    bp->b_lblkno == lblkno && !(bp->b_xflags & BX_BKGRDMARKER))
1594 		return (bp);
1595 	if ((bp = vp->v_dirtyblkroot) != NULL &&
1596 	    bp->b_lblkno == lblkno && !(bp->b_xflags & BX_BKGRDMARKER))
1597 		return (bp);
1598 	if ((bp = vp->v_cleanblkroot) != NULL) {
1599 		vp->v_cleanblkroot = bp = buf_splay(lblkno, 0, bp);
1600 		if (bp->b_lblkno == lblkno && !(bp->b_xflags & BX_BKGRDMARKER))
1601 			return (bp);
1602 	}
1603 	if ((bp = vp->v_dirtyblkroot) != NULL) {
1604 		vp->v_dirtyblkroot = bp = buf_splay(lblkno, 0, bp);
1605 		if (bp->b_lblkno == lblkno && !(bp->b_xflags & BX_BKGRDMARKER))
1606 			return (bp);
1607 	}
1608 	return (NULL);
1609 }
1610 
1611 /*
1612  * Associate a buffer with a vnode.
1613  */
1614 void
1615 bgetvp(vp, bp)
1616 	register struct vnode *vp;
1617 	register struct buf *bp;
1618 {
1619 	int s;
1620 
1621 	KASSERT(bp->b_vp == NULL, ("bgetvp: not free"));
1622 
1623 	KASSERT((bp->b_xflags & (BX_VNDIRTY|BX_VNCLEAN)) == 0,
1624 	    ("bgetvp: bp already attached! %p", bp));
1625 
1626 	ASSERT_VI_LOCKED(vp, "bgetvp");
1627 	vholdl(vp);
1628 	bp->b_vp = vp;
1629 	bp->b_dev = vn_todev(vp);
1630 	/*
1631 	 * Insert onto list for new vnode.
1632 	 */
1633 	s = splbio();
1634 	buf_vlist_add(bp, vp, BX_VNCLEAN);
1635 	splx(s);
1636 }
1637 
1638 /*
1639  * Disassociate a buffer from a vnode.
1640  */
1641 void
1642 brelvp(bp)
1643 	register struct buf *bp;
1644 {
1645 	struct vnode *vp;
1646 	int s;
1647 
1648 	KASSERT(bp->b_vp != NULL, ("brelvp: NULL"));
1649 
1650 	/*
1651 	 * Delete from old vnode list, if on one.
1652 	 */
1653 	vp = bp->b_vp;
1654 	s = splbio();
1655 	VI_LOCK(vp);
1656 	if (bp->b_xflags & (BX_VNDIRTY | BX_VNCLEAN))
1657 		buf_vlist_remove(bp);
1658 	if ((vp->v_iflag & VI_ONWORKLST) && TAILQ_EMPTY(&vp->v_dirtyblkhd)) {
1659 		vp->v_iflag &= ~VI_ONWORKLST;
1660 		mtx_lock(&sync_mtx);
1661 		LIST_REMOVE(vp, v_synclist);
1662 		mtx_unlock(&sync_mtx);
1663 	}
1664 	vdropl(vp);
1665 	VI_UNLOCK(vp);
1666 	bp->b_vp = (struct vnode *) 0;
1667 	if (bp->b_object)
1668 		bp->b_object = NULL;
1669 	splx(s);
1670 }
1671 
1672 /*
1673  * Add an item to the syncer work queue.
1674  */
1675 static void
1676 vn_syncer_add_to_worklist(struct vnode *vp, int delay)
1677 {
1678 	int s, slot;
1679 
1680 	s = splbio();
1681 	ASSERT_VI_LOCKED(vp, "vn_syncer_add_to_worklist");
1682 
1683 	mtx_lock(&sync_mtx);
1684 	if (vp->v_iflag & VI_ONWORKLST)
1685 		LIST_REMOVE(vp, v_synclist);
1686 	else
1687 		vp->v_iflag |= VI_ONWORKLST;
1688 
1689 	if (delay > syncer_maxdelay - 2)
1690 		delay = syncer_maxdelay - 2;
1691 	slot = (syncer_delayno + delay) & syncer_mask;
1692 
1693 	LIST_INSERT_HEAD(&syncer_workitem_pending[slot], vp, v_synclist);
1694 	mtx_unlock(&sync_mtx);
1695 
1696 	splx(s);
1697 }
1698 
1699 struct  proc *updateproc;
1700 static void sched_sync(void);
1701 static struct kproc_desc up_kp = {
1702 	"syncer",
1703 	sched_sync,
1704 	&updateproc
1705 };
1706 SYSINIT(syncer, SI_SUB_KTHREAD_UPDATE, SI_ORDER_FIRST, kproc_start, &up_kp)
1707 
1708 /*
1709  * System filesystem synchronizer daemon.
1710  */
1711 static void
1712 sched_sync(void)
1713 {
1714 	struct synclist *slp;
1715 	struct vnode *vp;
1716 	struct mount *mp;
1717 	long starttime;
1718 	int s;
1719 	struct thread *td = FIRST_THREAD_IN_PROC(updateproc);  /* XXXKSE */
1720 
1721 	mtx_lock(&Giant);
1722 
1723 	EVENTHANDLER_REGISTER(shutdown_pre_sync, kproc_shutdown, td->td_proc,
1724 	    SHUTDOWN_PRI_LAST);
1725 
1726 	for (;;) {
1727 		kthread_suspend_check(td->td_proc);
1728 
1729 		starttime = time_second;
1730 
1731 		/*
1732 		 * Push files whose dirty time has expired.  Be careful
1733 		 * of interrupt race on slp queue.
1734 		 */
1735 		s = splbio();
1736 		mtx_lock(&sync_mtx);
1737 		slp = &syncer_workitem_pending[syncer_delayno];
1738 		syncer_delayno += 1;
1739 		if (syncer_delayno == syncer_maxdelay)
1740 			syncer_delayno = 0;
1741 		splx(s);
1742 
1743 		while ((vp = LIST_FIRST(slp)) != NULL) {
1744 			mtx_unlock(&sync_mtx);
1745 			if (VOP_ISLOCKED(vp, NULL) == 0 &&
1746 			    vn_start_write(vp, &mp, V_NOWAIT) == 0) {
1747 				vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
1748 				(void) VOP_FSYNC(vp, td->td_ucred, MNT_LAZY, td);
1749 				VOP_UNLOCK(vp, 0, td);
1750 				vn_finished_write(mp);
1751 			}
1752 			s = splbio();
1753 			mtx_lock(&sync_mtx);
1754 			if (LIST_FIRST(slp) == vp) {
1755 				mtx_unlock(&sync_mtx);
1756 				/*
1757 				 * Note: VFS vnodes can remain on the
1758 				 * worklist too with no dirty blocks, but
1759 				 * since sync_fsync() moves it to a different
1760 				 * slot we are safe.
1761 				 */
1762 				VI_LOCK(vp);
1763 				if (TAILQ_EMPTY(&vp->v_dirtyblkhd) &&
1764 				    !vn_isdisk(vp, NULL)) {
1765 					panic("sched_sync: fsync failed "
1766 					      "vp %p tag %s", vp, vp->v_tag);
1767 				}
1768 				/*
1769 				 * Put us back on the worklist.  The worklist
1770 				 * routine will remove us from our current
1771 				 * position and then add us back in at a later
1772 				 * position.
1773 				 */
1774 				vn_syncer_add_to_worklist(vp, syncdelay);
1775 				VI_UNLOCK(vp);
1776 				mtx_lock(&sync_mtx);
1777 			}
1778 			splx(s);
1779 		}
1780 		mtx_unlock(&sync_mtx);
1781 
1782 		/*
1783 		 * Do soft update processing.
1784 		 */
1785 		if (softdep_process_worklist_hook != NULL)
1786 			(*softdep_process_worklist_hook)(NULL);
1787 
1788 		/*
1789 		 * The variable rushjob allows the kernel to speed up the
1790 		 * processing of the filesystem syncer process. A rushjob
1791 		 * value of N tells the filesystem syncer to process the next
1792 		 * N seconds worth of work on its queue ASAP. Currently rushjob
1793 		 * is used by the soft update code to speed up the filesystem
1794 		 * syncer process when the incore state is getting so far
1795 		 * ahead of the disk that the kernel memory pool is being
1796 		 * threatened with exhaustion.
1797 		 */
1798 		mtx_lock(&sync_mtx);
1799 		if (rushjob > 0) {
1800 			rushjob -= 1;
1801 			mtx_unlock(&sync_mtx);
1802 			continue;
1803 		}
1804 		mtx_unlock(&sync_mtx);
1805 		/*
1806 		 * If it has taken us less than a second to process the
1807 		 * current work, then wait. Otherwise start right over
1808 		 * again. We can still lose time if any single round
1809 		 * takes more than two seconds, but it does not really
1810 		 * matter as we are just trying to generally pace the
1811 		 * filesystem activity.
1812 		 */
1813 		if (time_second == starttime)
1814 			tsleep(&lbolt, PPAUSE, "syncer", 0);
1815 	}
1816 }
1817 
1818 /*
1819  * Request the syncer daemon to speed up its work.
1820  * We never push it to speed up more than half of its
1821  * normal turn time, otherwise it could take over the cpu.
1822  * XXXKSE  only one update?
1823  */
1824 int
1825 speedup_syncer()
1826 {
1827 	struct thread *td;
1828 	int ret = 0;
1829 
1830 	td = FIRST_THREAD_IN_PROC(updateproc);
1831 	mtx_lock_spin(&sched_lock);
1832 	if (td->td_wchan == &lbolt) {
1833 		unsleep(td);
1834 		TD_CLR_SLEEPING(td);
1835 		setrunnable(td);
1836 	}
1837 	mtx_unlock_spin(&sched_lock);
1838 	mtx_lock(&sync_mtx);
1839 	if (rushjob < syncdelay / 2) {
1840 		rushjob += 1;
1841 		stat_rush_requests += 1;
1842 		ret = 1;
1843 	}
1844 	mtx_unlock(&sync_mtx);
1845 	return (ret);
1846 }
1847 
1848 /*
1849  * Associate a p-buffer with a vnode.
1850  *
1851  * Also sets B_PAGING flag to indicate that vnode is not fully associated
1852  * with the buffer.  i.e. the bp has not been linked into the vnode or
1853  * ref-counted.
1854  */
1855 void
1856 pbgetvp(vp, bp)
1857 	register struct vnode *vp;
1858 	register struct buf *bp;
1859 {
1860 
1861 	KASSERT(bp->b_vp == NULL, ("pbgetvp: not free"));
1862 
1863 	bp->b_vp = vp;
1864 	bp->b_flags |= B_PAGING;
1865 	bp->b_dev = vn_todev(vp);
1866 }
1867 
1868 /*
1869  * Disassociate a p-buffer from a vnode.
1870  */
1871 void
1872 pbrelvp(bp)
1873 	register struct buf *bp;
1874 {
1875 
1876 	KASSERT(bp->b_vp != NULL, ("pbrelvp: NULL"));
1877 
1878 	/* XXX REMOVE ME */
1879 	VI_LOCK(bp->b_vp);
1880 	if (TAILQ_NEXT(bp, b_vnbufs) != NULL) {
1881 		panic(
1882 		    "relpbuf(): b_vp was probably reassignbuf()d %p %x",
1883 		    bp,
1884 		    (int)bp->b_flags
1885 		);
1886 	}
1887 	VI_UNLOCK(bp->b_vp);
1888 	bp->b_vp = (struct vnode *) 0;
1889 	bp->b_flags &= ~B_PAGING;
1890 }
1891 
1892 /*
1893  * Reassign a buffer from one vnode to another.
1894  * Used to assign file specific control information
1895  * (indirect blocks) to the vnode to which they belong.
1896  */
1897 void
1898 reassignbuf(bp, newvp)
1899 	register struct buf *bp;
1900 	register struct vnode *newvp;
1901 {
1902 	int delay;
1903 	int s;
1904 
1905 	if (newvp == NULL) {
1906 		printf("reassignbuf: NULL");
1907 		return;
1908 	}
1909 	++reassignbufcalls;
1910 
1911 	/*
1912 	 * B_PAGING flagged buffers cannot be reassigned because their vp
1913 	 * is not fully linked in.
1914 	 */
1915 	if (bp->b_flags & B_PAGING)
1916 		panic("cannot reassign paging buffer");
1917 
1918 	s = splbio();
1919 	/*
1920 	 * Delete from old vnode list, if on one.
1921 	 */
1922 	VI_LOCK(bp->b_vp);
1923 	if (bp->b_xflags & (BX_VNDIRTY | BX_VNCLEAN)) {
1924 		buf_vlist_remove(bp);
1925 		if (bp->b_vp != newvp) {
1926 			vdropl(bp->b_vp);
1927 			bp->b_vp = NULL;	/* for clarification */
1928 		}
1929 	}
1930 	VI_UNLOCK(bp->b_vp);
1931 	/*
1932 	 * If dirty, put on list of dirty buffers; otherwise insert onto list
1933 	 * of clean buffers.
1934 	 */
1935 	VI_LOCK(newvp);
1936 	if (bp->b_flags & B_DELWRI) {
1937 		if ((newvp->v_iflag & VI_ONWORKLST) == 0) {
1938 			switch (newvp->v_type) {
1939 			case VDIR:
1940 				delay = dirdelay;
1941 				break;
1942 			case VCHR:
1943 				if (newvp->v_rdev->si_mountpoint != NULL) {
1944 					delay = metadelay;
1945 					break;
1946 				}
1947 				/* FALLTHROUGH */
1948 			default:
1949 				delay = filedelay;
1950 			}
1951 			vn_syncer_add_to_worklist(newvp, delay);
1952 		}
1953 		buf_vlist_add(bp, newvp, BX_VNDIRTY);
1954 	} else {
1955 		buf_vlist_add(bp, newvp, BX_VNCLEAN);
1956 
1957 		if ((newvp->v_iflag & VI_ONWORKLST) &&
1958 		    TAILQ_EMPTY(&newvp->v_dirtyblkhd)) {
1959 			mtx_lock(&sync_mtx);
1960 			LIST_REMOVE(newvp, v_synclist);
1961 			mtx_unlock(&sync_mtx);
1962 			newvp->v_iflag &= ~VI_ONWORKLST;
1963 		}
1964 	}
1965 	if (bp->b_vp != newvp) {
1966 		bp->b_vp = newvp;
1967 		vholdl(bp->b_vp);
1968 	}
1969 	VI_UNLOCK(newvp);
1970 	splx(s);
1971 }
1972 
1973 /*
1974  * Create a vnode for a device.
1975  * Used for mounting the root filesystem.
1976  */
1977 int
1978 bdevvp(dev, vpp)
1979 	dev_t dev;
1980 	struct vnode **vpp;
1981 {
1982 	register struct vnode *vp;
1983 	struct vnode *nvp;
1984 	int error;
1985 
1986 	if (dev == NODEV) {
1987 		*vpp = NULLVP;
1988 		return (ENXIO);
1989 	}
1990 	if (vfinddev(dev, VCHR, vpp))
1991 		return (0);
1992 	error = getnewvnode("none", (struct mount *)0, spec_vnodeop_p, &nvp);
1993 	if (error) {
1994 		*vpp = NULLVP;
1995 		return (error);
1996 	}
1997 	vp = nvp;
1998 	vp->v_type = VCHR;
1999 	addalias(vp, dev);
2000 	*vpp = vp;
2001 	return (0);
2002 }
2003 
2004 static void
2005 v_incr_usecount(struct vnode *vp, int delta)
2006 {
2007 	vp->v_usecount += delta;
2008 	if (vp->v_type == VCHR && vp->v_rdev != NULL) {
2009 		mtx_lock(&spechash_mtx);
2010 		vp->v_rdev->si_usecount += delta;
2011 		mtx_unlock(&spechash_mtx);
2012 	}
2013 }
2014 
2015 /*
2016  * Add vnode to the alias list hung off the dev_t.
2017  *
2018  * The reason for this gunk is that multiple vnodes can reference
2019  * the same physical device, so checking vp->v_usecount to see
2020  * how many users there are is inadequate; the v_usecount for
2021  * the vnodes need to be accumulated.  vcount() does that.
2022  */
2023 struct vnode *
2024 addaliasu(nvp, nvp_rdev)
2025 	struct vnode *nvp;
2026 	udev_t nvp_rdev;
2027 {
2028 	struct vnode *ovp;
2029 	vop_t **ops;
2030 	dev_t dev;
2031 
2032 	if (nvp->v_type == VBLK)
2033 		return (nvp);
2034 	if (nvp->v_type != VCHR)
2035 		panic("addaliasu on non-special vnode");
2036 	dev = udev2dev(nvp_rdev, 0);
2037 	/*
2038 	 * Check to see if we have a bdevvp vnode with no associated
2039 	 * filesystem. If so, we want to associate the filesystem of
2040 	 * the new newly instigated vnode with the bdevvp vnode and
2041 	 * discard the newly created vnode rather than leaving the
2042 	 * bdevvp vnode lying around with no associated filesystem.
2043 	 */
2044 	if (vfinddev(dev, nvp->v_type, &ovp) == 0 || ovp->v_data != NULL) {
2045 		addalias(nvp, dev);
2046 		return (nvp);
2047 	}
2048 	/*
2049 	 * Discard unneeded vnode, but save its node specific data.
2050 	 * Note that if there is a lock, it is carried over in the
2051 	 * node specific data to the replacement vnode.
2052 	 */
2053 	vref(ovp);
2054 	ovp->v_data = nvp->v_data;
2055 	ovp->v_tag = nvp->v_tag;
2056 	nvp->v_data = NULL;
2057 	lockdestroy(ovp->v_vnlock);
2058 	lockinit(ovp->v_vnlock, PVFS, nvp->v_vnlock->lk_wmesg,
2059 	    nvp->v_vnlock->lk_timo, nvp->v_vnlock->lk_flags & LK_EXTFLG_MASK);
2060 	ops = ovp->v_op;
2061 	ovp->v_op = nvp->v_op;
2062 	if (VOP_ISLOCKED(nvp, curthread)) {
2063 		VOP_UNLOCK(nvp, 0, curthread);
2064 		vn_lock(ovp, LK_EXCLUSIVE | LK_RETRY, curthread);
2065 	}
2066 	nvp->v_op = ops;
2067 	insmntque(ovp, nvp->v_mount);
2068 	vrele(nvp);
2069 	vgone(nvp);
2070 	return (ovp);
2071 }
2072 
2073 /* This is a local helper function that do the same as addaliasu, but for a
2074  * dev_t instead of an udev_t. */
2075 static void
2076 addalias(nvp, dev)
2077 	struct vnode *nvp;
2078 	dev_t dev;
2079 {
2080 
2081 	KASSERT(nvp->v_type == VCHR, ("addalias on non-special vnode"));
2082 	nvp->v_rdev = dev;
2083 	VI_LOCK(nvp);
2084 	mtx_lock(&spechash_mtx);
2085 	SLIST_INSERT_HEAD(&dev->si_hlist, nvp, v_specnext);
2086 	dev->si_usecount += nvp->v_usecount;
2087 	mtx_unlock(&spechash_mtx);
2088 	VI_UNLOCK(nvp);
2089 }
2090 
2091 /*
2092  * Grab a particular vnode from the free list, increment its
2093  * reference count and lock it. The vnode lock bit is set if the
2094  * vnode is being eliminated in vgone. The process is awakened
2095  * when the transition is completed, and an error returned to
2096  * indicate that the vnode is no longer usable (possibly having
2097  * been changed to a new filesystem type).
2098  */
2099 int
2100 vget(vp, flags, td)
2101 	register struct vnode *vp;
2102 	int flags;
2103 	struct thread *td;
2104 {
2105 	int error;
2106 
2107 	/*
2108 	 * If the vnode is in the process of being cleaned out for
2109 	 * another use, we wait for the cleaning to finish and then
2110 	 * return failure. Cleaning is determined by checking that
2111 	 * the VI_XLOCK flag is set.
2112 	 */
2113 	if ((flags & LK_INTERLOCK) == 0)
2114 		VI_LOCK(vp);
2115 	if (vp->v_iflag & VI_XLOCK && vp->v_vxproc != curthread) {
2116 		vp->v_iflag |= VI_XWANT;
2117 		msleep(vp, VI_MTX(vp), PINOD | PDROP, "vget", 0);
2118 		return (ENOENT);
2119 	}
2120 
2121 	v_incr_usecount(vp, 1);
2122 
2123 	if (VSHOULDBUSY(vp))
2124 		vbusy(vp);
2125 	if (flags & LK_TYPE_MASK) {
2126 		if ((error = vn_lock(vp, flags | LK_INTERLOCK, td)) != 0) {
2127 			/*
2128 			 * must expand vrele here because we do not want
2129 			 * to call VOP_INACTIVE if the reference count
2130 			 * drops back to zero since it was never really
2131 			 * active. We must remove it from the free list
2132 			 * before sleeping so that multiple processes do
2133 			 * not try to recycle it.
2134 			 */
2135 			VI_LOCK(vp);
2136 			v_incr_usecount(vp, -1);
2137 			if (VSHOULDFREE(vp))
2138 				vfree(vp);
2139 			else
2140 				vlruvp(vp);
2141 			VI_UNLOCK(vp);
2142 		}
2143 		return (error);
2144 	}
2145 	VI_UNLOCK(vp);
2146 	return (0);
2147 }
2148 
2149 /*
2150  * Increase the reference count of a vnode.
2151  */
2152 void
2153 vref(struct vnode *vp)
2154 {
2155 	VI_LOCK(vp);
2156 	v_incr_usecount(vp, 1);
2157 	VI_UNLOCK(vp);
2158 }
2159 
2160 /*
2161  * Return reference count of a vnode.
2162  *
2163  * The results of this call are only guaranteed when some mechanism other
2164  * than the VI lock is used to stop other processes from gaining references
2165  * to the vnode.  This may be the case if the caller holds the only reference.
2166  * This is also useful when stale data is acceptable as race conditions may
2167  * be accounted for by some other means.
2168  */
2169 int
2170 vrefcnt(struct vnode *vp)
2171 {
2172 	int usecnt;
2173 
2174 	VI_LOCK(vp);
2175 	usecnt = vp->v_usecount;
2176 	VI_UNLOCK(vp);
2177 
2178 	return (usecnt);
2179 }
2180 
2181 
2182 /*
2183  * Vnode put/release.
2184  * If count drops to zero, call inactive routine and return to freelist.
2185  */
2186 void
2187 vrele(vp)
2188 	struct vnode *vp;
2189 {
2190 	struct thread *td = curthread;	/* XXX */
2191 
2192 	KASSERT(vp != NULL, ("vrele: null vp"));
2193 
2194 	VI_LOCK(vp);
2195 
2196 	/* Skip this v_writecount check if we're going to panic below. */
2197 	KASSERT(vp->v_writecount < vp->v_usecount || vp->v_usecount < 1,
2198 	    ("vrele: missed vn_close"));
2199 
2200 	if (vp->v_usecount > 1 || ((vp->v_iflag & VI_DOINGINACT) &&
2201 	    vp->v_usecount == 1)) {
2202 		v_incr_usecount(vp, -1);
2203 		VI_UNLOCK(vp);
2204 
2205 		return;
2206 	}
2207 
2208 	if (vp->v_usecount == 1) {
2209 		v_incr_usecount(vp, -1);
2210 		/*
2211 		 * We must call VOP_INACTIVE with the node locked. Mark
2212 		 * as VI_DOINGINACT to avoid recursion.
2213 		 */
2214 		if (vn_lock(vp, LK_EXCLUSIVE | LK_INTERLOCK, td) == 0) {
2215 			VI_LOCK(vp);
2216 			vp->v_iflag |= VI_DOINGINACT;
2217 			VI_UNLOCK(vp);
2218 			VOP_INACTIVE(vp, td);
2219 			VI_LOCK(vp);
2220 			KASSERT(vp->v_iflag & VI_DOINGINACT,
2221 			    ("vrele: lost VI_DOINGINACT"));
2222 			vp->v_iflag &= ~VI_DOINGINACT;
2223 			VI_UNLOCK(vp);
2224 		}
2225 		VI_LOCK(vp);
2226 		if (VSHOULDFREE(vp))
2227 			vfree(vp);
2228 		else
2229 			vlruvp(vp);
2230 		VI_UNLOCK(vp);
2231 
2232 	} else {
2233 #ifdef DIAGNOSTIC
2234 		vprint("vrele: negative ref count", vp);
2235 #endif
2236 		VI_UNLOCK(vp);
2237 		panic("vrele: negative ref cnt");
2238 	}
2239 }
2240 
2241 /*
2242  * Release an already locked vnode.  This give the same effects as
2243  * unlock+vrele(), but takes less time and avoids releasing and
2244  * re-aquiring the lock (as vrele() aquires the lock internally.)
2245  */
2246 void
2247 vput(vp)
2248 	struct vnode *vp;
2249 {
2250 	struct thread *td = curthread;	/* XXX */
2251 
2252 	GIANT_REQUIRED;
2253 
2254 	KASSERT(vp != NULL, ("vput: null vp"));
2255 	VI_LOCK(vp);
2256 	/* Skip this v_writecount check if we're going to panic below. */
2257 	KASSERT(vp->v_writecount < vp->v_usecount || vp->v_usecount < 1,
2258 	    ("vput: missed vn_close"));
2259 
2260 	if (vp->v_usecount > 1 || ((vp->v_iflag & VI_DOINGINACT) &&
2261 	    vp->v_usecount == 1)) {
2262 		v_incr_usecount(vp, -1);
2263 		VOP_UNLOCK(vp, LK_INTERLOCK, td);
2264 		return;
2265 	}
2266 
2267 	if (vp->v_usecount == 1) {
2268 		v_incr_usecount(vp, -1);
2269 		/*
2270 		 * We must call VOP_INACTIVE with the node locked, so
2271 		 * we just need to release the vnode mutex. Mark as
2272 		 * as VI_DOINGINACT to avoid recursion.
2273 		 */
2274 		vp->v_iflag |= VI_DOINGINACT;
2275 		VI_UNLOCK(vp);
2276 		VOP_INACTIVE(vp, td);
2277 		VI_LOCK(vp);
2278 		KASSERT(vp->v_iflag & VI_DOINGINACT,
2279 		    ("vput: lost VI_DOINGINACT"));
2280 		vp->v_iflag &= ~VI_DOINGINACT;
2281 		if (VSHOULDFREE(vp))
2282 			vfree(vp);
2283 		else
2284 			vlruvp(vp);
2285 		VI_UNLOCK(vp);
2286 
2287 	} else {
2288 #ifdef DIAGNOSTIC
2289 		vprint("vput: negative ref count", vp);
2290 #endif
2291 		panic("vput: negative ref cnt");
2292 	}
2293 }
2294 
2295 /*
2296  * Somebody doesn't want the vnode recycled.
2297  */
2298 void
2299 vhold(struct vnode *vp)
2300 {
2301 	VI_LOCK(vp);
2302 	vholdl(vp);
2303 	VI_UNLOCK(vp);
2304 }
2305 
2306 void
2307 vholdl(vp)
2308 	register struct vnode *vp;
2309 {
2310 	int s;
2311 
2312 	s = splbio();
2313 	vp->v_holdcnt++;
2314 	if (VSHOULDBUSY(vp))
2315 		vbusy(vp);
2316 	splx(s);
2317 }
2318 
2319 /*
2320  * Note that there is one less who cares about this vnode.  vdrop() is the
2321  * opposite of vhold().
2322  */
2323 void
2324 vdrop(struct vnode *vp)
2325 {
2326 	VI_LOCK(vp);
2327 	vdropl(vp);
2328 	VI_UNLOCK(vp);
2329 }
2330 
2331 void
2332 vdropl(vp)
2333 	register struct vnode *vp;
2334 {
2335 	int s;
2336 
2337 	s = splbio();
2338 	if (vp->v_holdcnt <= 0)
2339 		panic("vdrop: holdcnt");
2340 	vp->v_holdcnt--;
2341 	if (VSHOULDFREE(vp))
2342 		vfree(vp);
2343 	else
2344 		vlruvp(vp);
2345 	splx(s);
2346 }
2347 
2348 /*
2349  * Remove any vnodes in the vnode table belonging to mount point mp.
2350  *
2351  * If FORCECLOSE is not specified, there should not be any active ones,
2352  * return error if any are found (nb: this is a user error, not a
2353  * system error). If FORCECLOSE is specified, detach any active vnodes
2354  * that are found.
2355  *
2356  * If WRITECLOSE is set, only flush out regular file vnodes open for
2357  * writing.
2358  *
2359  * SKIPSYSTEM causes any vnodes marked VV_SYSTEM to be skipped.
2360  *
2361  * `rootrefs' specifies the base reference count for the root vnode
2362  * of this filesystem. The root vnode is considered busy if its
2363  * v_usecount exceeds this value. On a successful return, vflush()
2364  * will call vrele() on the root vnode exactly rootrefs times.
2365  * If the SKIPSYSTEM or WRITECLOSE flags are specified, rootrefs must
2366  * be zero.
2367  */
2368 #ifdef DIAGNOSTIC
2369 static int busyprt = 0;		/* print out busy vnodes */
2370 SYSCTL_INT(_debug, OID_AUTO, busyprt, CTLFLAG_RW, &busyprt, 0, "");
2371 #endif
2372 
2373 int
2374 vflush(mp, rootrefs, flags)
2375 	struct mount *mp;
2376 	int rootrefs;
2377 	int flags;
2378 {
2379 	struct thread *td = curthread;	/* XXX */
2380 	struct vnode *vp, *nvp, *rootvp = NULL;
2381 	struct vattr vattr;
2382 	int busy = 0, error;
2383 
2384 	if (rootrefs > 0) {
2385 		KASSERT((flags & (SKIPSYSTEM | WRITECLOSE)) == 0,
2386 		    ("vflush: bad args"));
2387 		/*
2388 		 * Get the filesystem root vnode. We can vput() it
2389 		 * immediately, since with rootrefs > 0, it won't go away.
2390 		 */
2391 		if ((error = VFS_ROOT(mp, &rootvp)) != 0)
2392 			return (error);
2393 		vput(rootvp);
2394 
2395 	}
2396 	mtx_lock(&mntvnode_mtx);
2397 loop:
2398 	for (vp = TAILQ_FIRST(&mp->mnt_nvnodelist); vp; vp = nvp) {
2399 		/*
2400 		 * Make sure this vnode wasn't reclaimed in getnewvnode().
2401 		 * Start over if it has (it won't be on the list anymore).
2402 		 */
2403 		if (vp->v_mount != mp)
2404 			goto loop;
2405 		nvp = TAILQ_NEXT(vp, v_nmntvnodes);
2406 
2407 		VI_LOCK(vp);
2408 		mtx_unlock(&mntvnode_mtx);
2409 		vn_lock(vp, LK_INTERLOCK | LK_EXCLUSIVE | LK_RETRY, td);
2410 		/*
2411 		 * This vnode could have been reclaimed while we were
2412 		 * waiting for the lock since we are not holding a
2413 		 * reference.
2414 		 * Start over if the vnode was reclaimed.
2415 		 */
2416 		if (vp->v_mount != mp) {
2417 			VOP_UNLOCK(vp, 0, td);
2418 			mtx_lock(&mntvnode_mtx);
2419 			goto loop;
2420 		}
2421 		/*
2422 		 * Skip over a vnodes marked VV_SYSTEM.
2423 		 */
2424 		if ((flags & SKIPSYSTEM) && (vp->v_vflag & VV_SYSTEM)) {
2425 			VOP_UNLOCK(vp, 0, td);
2426 			mtx_lock(&mntvnode_mtx);
2427 			continue;
2428 		}
2429 		/*
2430 		 * If WRITECLOSE is set, flush out unlinked but still open
2431 		 * files (even if open only for reading) and regular file
2432 		 * vnodes open for writing.
2433 		 */
2434 		if (flags & WRITECLOSE) {
2435 			error = VOP_GETATTR(vp, &vattr, td->td_ucred, td);
2436 			VI_LOCK(vp);
2437 
2438 			if ((vp->v_type == VNON ||
2439 			    (error == 0 && vattr.va_nlink > 0)) &&
2440 			    (vp->v_writecount == 0 || vp->v_type != VREG)) {
2441 				VOP_UNLOCK(vp, LK_INTERLOCK, td);
2442 				mtx_lock(&mntvnode_mtx);
2443 				continue;
2444 			}
2445 		} else
2446 			VI_LOCK(vp);
2447 
2448 		VOP_UNLOCK(vp, 0, td);
2449 
2450 		/*
2451 		 * With v_usecount == 0, all we need to do is clear out the
2452 		 * vnode data structures and we are done.
2453 		 */
2454 		if (vp->v_usecount == 0) {
2455 			vgonel(vp, td);
2456 			mtx_lock(&mntvnode_mtx);
2457 			continue;
2458 		}
2459 
2460 		/*
2461 		 * If FORCECLOSE is set, forcibly close the vnode. For block
2462 		 * or character devices, revert to an anonymous device. For
2463 		 * all other files, just kill them.
2464 		 */
2465 		if (flags & FORCECLOSE) {
2466 			if (vp->v_type != VCHR) {
2467 				vgonel(vp, td);
2468 			} else {
2469 				vclean(vp, 0, td);
2470 				VI_UNLOCK(vp);
2471 				vp->v_op = spec_vnodeop_p;
2472 				insmntque(vp, (struct mount *) 0);
2473 			}
2474 			mtx_lock(&mntvnode_mtx);
2475 			continue;
2476 		}
2477 #ifdef DIAGNOSTIC
2478 		if (busyprt)
2479 			vprint("vflush: busy vnode", vp);
2480 #endif
2481 		VI_UNLOCK(vp);
2482 		mtx_lock(&mntvnode_mtx);
2483 		busy++;
2484 	}
2485 	mtx_unlock(&mntvnode_mtx);
2486 	if (rootrefs > 0 && (flags & FORCECLOSE) == 0) {
2487 		/*
2488 		 * If just the root vnode is busy, and if its refcount
2489 		 * is equal to `rootrefs', then go ahead and kill it.
2490 		 */
2491 		VI_LOCK(rootvp);
2492 		KASSERT(busy > 0, ("vflush: not busy"));
2493 		KASSERT(rootvp->v_usecount >= rootrefs, ("vflush: rootrefs"));
2494 		if (busy == 1 && rootvp->v_usecount == rootrefs) {
2495 			vgonel(rootvp, td);
2496 			busy = 0;
2497 		} else
2498 			VI_UNLOCK(rootvp);
2499 	}
2500 	if (busy)
2501 		return (EBUSY);
2502 	for (; rootrefs > 0; rootrefs--)
2503 		vrele(rootvp);
2504 	return (0);
2505 }
2506 
2507 /*
2508  * This moves a now (likely recyclable) vnode to the end of the
2509  * mountlist.  XXX However, it is temporarily disabled until we
2510  * can clean up ffs_sync() and friends, which have loop restart
2511  * conditions which this code causes to operate O(N^2).
2512  */
2513 static void
2514 vlruvp(struct vnode *vp)
2515 {
2516 #if 0
2517 	struct mount *mp;
2518 
2519 	if ((mp = vp->v_mount) != NULL) {
2520 		mtx_lock(&mntvnode_mtx);
2521 		TAILQ_REMOVE(&mp->mnt_nvnodelist, vp, v_nmntvnodes);
2522 		TAILQ_INSERT_TAIL(&mp->mnt_nvnodelist, vp, v_nmntvnodes);
2523 		mtx_unlock(&mntvnode_mtx);
2524 	}
2525 #endif
2526 }
2527 
2528 /*
2529  * Disassociate the underlying filesystem from a vnode.
2530  */
2531 static void
2532 vclean(vp, flags, td)
2533 	struct vnode *vp;
2534 	int flags;
2535 	struct thread *td;
2536 {
2537 	int active;
2538 
2539 	ASSERT_VI_LOCKED(vp, "vclean");
2540 	/*
2541 	 * Check to see if the vnode is in use. If so we have to reference it
2542 	 * before we clean it out so that its count cannot fall to zero and
2543 	 * generate a race against ourselves to recycle it.
2544 	 */
2545 	if ((active = vp->v_usecount))
2546 		v_incr_usecount(vp, 1);
2547 
2548 	/*
2549 	 * Prevent the vnode from being recycled or brought into use while we
2550 	 * clean it out.
2551 	 */
2552 	if (vp->v_iflag & VI_XLOCK)
2553 		panic("vclean: deadlock");
2554 	vp->v_iflag |= VI_XLOCK;
2555 	vp->v_vxproc = curthread;
2556 	/*
2557 	 * Even if the count is zero, the VOP_INACTIVE routine may still
2558 	 * have the object locked while it cleans it out. The VOP_LOCK
2559 	 * ensures that the VOP_INACTIVE routine is done with its work.
2560 	 * For active vnodes, it ensures that no other activity can
2561 	 * occur while the underlying object is being cleaned out.
2562 	 */
2563 	VOP_LOCK(vp, LK_DRAIN | LK_INTERLOCK, td);
2564 
2565 	/*
2566 	 * Clean out any buffers associated with the vnode.
2567 	 * If the flush fails, just toss the buffers.
2568 	 */
2569 	if (flags & DOCLOSE) {
2570 		struct buf *bp;
2571 		VI_LOCK(vp);
2572 		bp = TAILQ_FIRST(&vp->v_dirtyblkhd);
2573 		VI_UNLOCK(vp);
2574 		if (bp != NULL)
2575 			(void) vn_write_suspend_wait(vp, NULL, V_WAIT);
2576 		if (vinvalbuf(vp, V_SAVE, NOCRED, td, 0, 0) != 0)
2577 			vinvalbuf(vp, 0, NOCRED, td, 0, 0);
2578 	}
2579 
2580 	VOP_DESTROYVOBJECT(vp);
2581 
2582 	/*
2583 	 * Any other processes trying to obtain this lock must first
2584 	 * wait for VXLOCK to clear, then call the new lock operation.
2585 	 */
2586 	VOP_UNLOCK(vp, 0, td);
2587 
2588 	/*
2589 	 * If purging an active vnode, it must be closed and
2590 	 * deactivated before being reclaimed. Note that the
2591 	 * VOP_INACTIVE will unlock the vnode.
2592 	 */
2593 	if (active) {
2594 		if (flags & DOCLOSE)
2595 			VOP_CLOSE(vp, FNONBLOCK, NOCRED, td);
2596 		VI_LOCK(vp);
2597 		if ((vp->v_iflag & VI_DOINGINACT) == 0) {
2598 			vp->v_iflag |= VI_DOINGINACT;
2599 			VI_UNLOCK(vp);
2600 			if (vn_lock(vp, LK_EXCLUSIVE | LK_NOWAIT, td) != 0)
2601 				panic("vclean: cannot relock.");
2602 			VOP_INACTIVE(vp, td);
2603 			VI_LOCK(vp);
2604 			KASSERT(vp->v_iflag & VI_DOINGINACT,
2605 			    ("vclean: lost VI_DOINGINACT"));
2606 			vp->v_iflag &= ~VI_DOINGINACT;
2607 		}
2608 		VI_UNLOCK(vp);
2609 	}
2610 
2611 	/*
2612 	 * Reclaim the vnode.
2613 	 */
2614 	if (VOP_RECLAIM(vp, td))
2615 		panic("vclean: cannot reclaim");
2616 
2617 	if (active) {
2618 		/*
2619 		 * Inline copy of vrele() since VOP_INACTIVE
2620 		 * has already been called.
2621 		 */
2622 		VI_LOCK(vp);
2623 		v_incr_usecount(vp, -1);
2624 		if (vp->v_usecount <= 0) {
2625 #ifdef DIAGNOSTIC
2626 			if (vp->v_usecount < 0 || vp->v_writecount != 0) {
2627 				vprint("vclean: bad ref count", vp);
2628 				panic("vclean: ref cnt");
2629 			}
2630 #endif
2631 			vfree(vp);
2632 		}
2633 		VI_UNLOCK(vp);
2634 	}
2635 
2636 	cache_purge(vp);
2637 	VI_LOCK(vp);
2638 	if (VSHOULDFREE(vp))
2639 		vfree(vp);
2640 
2641 	/*
2642 	 * Done with purge, reset to the standard lock and
2643 	 * notify sleepers of the grim news.
2644 	 */
2645 	vp->v_vnlock = &vp->v_lock;
2646 	vp->v_op = dead_vnodeop_p;
2647 	if (vp->v_pollinfo != NULL)
2648 		vn_pollgone(vp);
2649 	vp->v_tag = "none";
2650 	vp->v_iflag &= ~VI_XLOCK;
2651 	vp->v_vxproc = NULL;
2652 	if (vp->v_iflag & VI_XWANT) {
2653 		vp->v_iflag &= ~VI_XWANT;
2654 		wakeup(vp);
2655 	}
2656 }
2657 
2658 /*
2659  * Eliminate all activity associated with the requested vnode
2660  * and with all vnodes aliased to the requested vnode.
2661  */
2662 int
2663 vop_revoke(ap)
2664 	struct vop_revoke_args /* {
2665 		struct vnode *a_vp;
2666 		int a_flags;
2667 	} */ *ap;
2668 {
2669 	struct vnode *vp, *vq;
2670 	dev_t dev;
2671 
2672 	KASSERT((ap->a_flags & REVOKEALL) != 0, ("vop_revoke"));
2673 	vp = ap->a_vp;
2674 	KASSERT((vp->v_type == VCHR), ("vop_revoke: not VCHR"));
2675 
2676 	VI_LOCK(vp);
2677 	/*
2678 	 * If a vgone (or vclean) is already in progress,
2679 	 * wait until it is done and return.
2680 	 */
2681 	if (vp->v_iflag & VI_XLOCK) {
2682 		vp->v_iflag |= VI_XWANT;
2683 		msleep(vp, VI_MTX(vp), PINOD | PDROP,
2684 		    "vop_revokeall", 0);
2685 		return (0);
2686 	}
2687 	VI_UNLOCK(vp);
2688 	dev = vp->v_rdev;
2689 	for (;;) {
2690 		mtx_lock(&spechash_mtx);
2691 		vq = SLIST_FIRST(&dev->si_hlist);
2692 		mtx_unlock(&spechash_mtx);
2693 		if (!vq)
2694 			break;
2695 		vgone(vq);
2696 	}
2697 	return (0);
2698 }
2699 
2700 /*
2701  * Recycle an unused vnode to the front of the free list.
2702  * Release the passed interlock if the vnode will be recycled.
2703  */
2704 int
2705 vrecycle(vp, inter_lkp, td)
2706 	struct vnode *vp;
2707 	struct mtx *inter_lkp;
2708 	struct thread *td;
2709 {
2710 
2711 	VI_LOCK(vp);
2712 	if (vp->v_usecount == 0) {
2713 		if (inter_lkp) {
2714 			mtx_unlock(inter_lkp);
2715 		}
2716 		vgonel(vp, td);
2717 		return (1);
2718 	}
2719 	VI_UNLOCK(vp);
2720 	return (0);
2721 }
2722 
2723 /*
2724  * Eliminate all activity associated with a vnode
2725  * in preparation for reuse.
2726  */
2727 void
2728 vgone(vp)
2729 	register struct vnode *vp;
2730 {
2731 	struct thread *td = curthread;	/* XXX */
2732 
2733 	VI_LOCK(vp);
2734 	vgonel(vp, td);
2735 }
2736 
2737 /*
2738  * vgone, with the vp interlock held.
2739  */
2740 void
2741 vgonel(vp, td)
2742 	struct vnode *vp;
2743 	struct thread *td;
2744 {
2745 	int s;
2746 
2747 	/*
2748 	 * If a vgone (or vclean) is already in progress,
2749 	 * wait until it is done and return.
2750 	 */
2751 	ASSERT_VI_LOCKED(vp, "vgonel");
2752 	if (vp->v_iflag & VI_XLOCK) {
2753 		vp->v_iflag |= VI_XWANT;
2754 		msleep(vp, VI_MTX(vp), PINOD | PDROP, "vgone", 0);
2755 		return;
2756 	}
2757 
2758 	/*
2759 	 * Clean out the filesystem specific data.
2760 	 */
2761 	vclean(vp, DOCLOSE, td);
2762 	VI_UNLOCK(vp);
2763 
2764 	/*
2765 	 * Delete from old mount point vnode list, if on one.
2766 	 */
2767 	if (vp->v_mount != NULL)
2768 		insmntque(vp, (struct mount *)0);
2769 	/*
2770 	 * If special device, remove it from special device alias list
2771 	 * if it is on one.
2772 	 */
2773 	if (vp->v_type == VCHR && vp->v_rdev != NULL && vp->v_rdev != NODEV) {
2774 		VI_LOCK(vp);
2775 		mtx_lock(&spechash_mtx);
2776 		SLIST_REMOVE(&vp->v_rdev->si_hlist, vp, vnode, v_specnext);
2777 		vp->v_rdev->si_usecount -= vp->v_usecount;
2778 		mtx_unlock(&spechash_mtx);
2779 		VI_UNLOCK(vp);
2780 		vp->v_rdev = NULL;
2781 	}
2782 
2783 	/*
2784 	 * If it is on the freelist and not already at the head,
2785 	 * move it to the head of the list. The test of the
2786 	 * VDOOMED flag and the reference count of zero is because
2787 	 * it will be removed from the free list by getnewvnode,
2788 	 * but will not have its reference count incremented until
2789 	 * after calling vgone. If the reference count were
2790 	 * incremented first, vgone would (incorrectly) try to
2791 	 * close the previous instance of the underlying object.
2792 	 */
2793 	VI_LOCK(vp);
2794 	if (vp->v_usecount == 0 && !(vp->v_iflag & VI_DOOMED)) {
2795 		s = splbio();
2796 		mtx_lock(&vnode_free_list_mtx);
2797 		if (vp->v_iflag & VI_FREE) {
2798 			TAILQ_REMOVE(&vnode_free_list, vp, v_freelist);
2799 		} else {
2800 			vp->v_iflag |= VI_FREE;
2801 			freevnodes++;
2802 		}
2803 		TAILQ_INSERT_HEAD(&vnode_free_list, vp, v_freelist);
2804 		mtx_unlock(&vnode_free_list_mtx);
2805 		splx(s);
2806 	}
2807 
2808 	vp->v_type = VBAD;
2809 	VI_UNLOCK(vp);
2810 }
2811 
2812 /*
2813  * Lookup a vnode by device number.
2814  */
2815 int
2816 vfinddev(dev, type, vpp)
2817 	dev_t dev;
2818 	enum vtype type;
2819 	struct vnode **vpp;
2820 {
2821 	struct vnode *vp;
2822 
2823 	mtx_lock(&spechash_mtx);
2824 	SLIST_FOREACH(vp, &dev->si_hlist, v_specnext) {
2825 		if (type == vp->v_type) {
2826 			*vpp = vp;
2827 			mtx_unlock(&spechash_mtx);
2828 			return (1);
2829 		}
2830 	}
2831 	mtx_unlock(&spechash_mtx);
2832 	return (0);
2833 }
2834 
2835 /*
2836  * Calculate the total number of references to a special device.
2837  */
2838 int
2839 vcount(vp)
2840 	struct vnode *vp;
2841 {
2842 	int count;
2843 
2844 	mtx_lock(&spechash_mtx);
2845 	count = vp->v_rdev->si_usecount;
2846 	mtx_unlock(&spechash_mtx);
2847 	return (count);
2848 }
2849 
2850 /*
2851  * Same as above, but using the dev_t as argument
2852  */
2853 int
2854 count_dev(dev)
2855 	dev_t dev;
2856 {
2857 	struct vnode *vp;
2858 
2859 	vp = SLIST_FIRST(&dev->si_hlist);
2860 	if (vp == NULL)
2861 		return (0);
2862 	return(vcount(vp));
2863 }
2864 
2865 /*
2866  * Print out a description of a vnode.
2867  */
2868 static char *typename[] =
2869 {"VNON", "VREG", "VDIR", "VBLK", "VCHR", "VLNK", "VSOCK", "VFIFO", "VBAD"};
2870 
2871 void
2872 vprint(label, vp)
2873 	char *label;
2874 	struct vnode *vp;
2875 {
2876 	char buf[96];
2877 
2878 	if (label != NULL)
2879 		printf("%s: %p: ", label, (void *)vp);
2880 	else
2881 		printf("%p: ", (void *)vp);
2882 	printf("tag %s, type %s, usecount %d, writecount %d, refcount %d,",
2883 	    vp->v_tag, typename[vp->v_type], vp->v_usecount,
2884 	    vp->v_writecount, vp->v_holdcnt);
2885 	buf[0] = '\0';
2886 	if (vp->v_vflag & VV_ROOT)
2887 		strcat(buf, "|VV_ROOT");
2888 	if (vp->v_vflag & VV_TEXT)
2889 		strcat(buf, "|VV_TEXT");
2890 	if (vp->v_vflag & VV_SYSTEM)
2891 		strcat(buf, "|VV_SYSTEM");
2892 	if (vp->v_iflag & VI_XLOCK)
2893 		strcat(buf, "|VI_XLOCK");
2894 	if (vp->v_iflag & VI_XWANT)
2895 		strcat(buf, "|VI_XWANT");
2896 	if (vp->v_iflag & VI_BWAIT)
2897 		strcat(buf, "|VI_BWAIT");
2898 	if (vp->v_iflag & VI_DOOMED)
2899 		strcat(buf, "|VI_DOOMED");
2900 	if (vp->v_iflag & VI_FREE)
2901 		strcat(buf, "|VI_FREE");
2902 	if (vp->v_vflag & VV_OBJBUF)
2903 		strcat(buf, "|VV_OBJBUF");
2904 	if (buf[0] != '\0')
2905 		printf(" flags (%s),", &buf[1]);
2906 	lockmgr_printinfo(vp->v_vnlock);
2907 	printf("\n");
2908 	if (vp->v_data != NULL)
2909 		VOP_PRINT(vp);
2910 }
2911 
2912 #ifdef DDB
2913 #include <ddb/ddb.h>
2914 /*
2915  * List all of the locked vnodes in the system.
2916  * Called when debugging the kernel.
2917  */
2918 DB_SHOW_COMMAND(lockedvnods, lockedvnodes)
2919 {
2920 	struct mount *mp, *nmp;
2921 	struct vnode *vp;
2922 
2923 	/*
2924 	 * Note: because this is DDB, we can't obey the locking semantics
2925 	 * for these structures, which means we could catch an inconsistent
2926 	 * state and dereference a nasty pointer.  Not much to be done
2927 	 * about that.
2928 	 */
2929 	printf("Locked vnodes\n");
2930 	for (mp = TAILQ_FIRST(&mountlist); mp != NULL; mp = nmp) {
2931 		nmp = TAILQ_NEXT(mp, mnt_list);
2932 		TAILQ_FOREACH(vp, &mp->mnt_nvnodelist, v_nmntvnodes) {
2933 			if (VOP_ISLOCKED(vp, NULL))
2934 				vprint(NULL, vp);
2935 		}
2936 		nmp = TAILQ_NEXT(mp, mnt_list);
2937 	}
2938 }
2939 #endif
2940 
2941 /*
2942  * Fill in a struct xvfsconf based on a struct vfsconf.
2943  */
2944 static void
2945 vfsconf2x(struct vfsconf *vfsp, struct xvfsconf *xvfsp)
2946 {
2947 
2948 	strcpy(xvfsp->vfc_name, vfsp->vfc_name);
2949 	xvfsp->vfc_typenum = vfsp->vfc_typenum;
2950 	xvfsp->vfc_refcount = vfsp->vfc_refcount;
2951 	xvfsp->vfc_flags = vfsp->vfc_flags;
2952 	/*
2953 	 * These are unused in userland, we keep them
2954 	 * to not break binary compatibility.
2955 	 */
2956 	xvfsp->vfc_vfsops = NULL;
2957 	xvfsp->vfc_next = NULL;
2958 }
2959 
2960 static int
2961 sysctl_vfs_conflist(SYSCTL_HANDLER_ARGS)
2962 {
2963 	struct vfsconf *vfsp;
2964 	struct xvfsconf *xvfsp;
2965 	int cnt, error, i;
2966 
2967 	cnt = 0;
2968 	for (vfsp = vfsconf; vfsp != NULL; vfsp = vfsp->vfc_next)
2969 		cnt++;
2970 	xvfsp = malloc(sizeof(struct xvfsconf) * cnt, M_TEMP, M_WAITOK);
2971 	/*
2972 	 * Handle the race that we will have here when struct vfsconf
2973 	 * will be locked down by using both cnt and checking vfc_next
2974 	 * against NULL to determine the end of the loop.  The race will
2975 	 * happen because we will have to unlock before calling malloc().
2976 	 * We are protected by Giant for now.
2977 	 */
2978 	i = 0;
2979 	for (vfsp = vfsconf; vfsp != NULL && i < cnt; vfsp = vfsp->vfc_next) {
2980 		vfsconf2x(vfsp, xvfsp + i);
2981 		i++;
2982 	}
2983 	error = SYSCTL_OUT(req, xvfsp, sizeof(struct xvfsconf) * i);
2984 	free(xvfsp, M_TEMP);
2985 	return (error);
2986 }
2987 
2988 SYSCTL_PROC(_vfs, OID_AUTO, conflist, CTLFLAG_RD, NULL, 0, sysctl_vfs_conflist,
2989     "S,xvfsconf", "List of all configured filesystems");
2990 
2991 /*
2992  * Top level filesystem related information gathering.
2993  */
2994 static int	sysctl_ovfs_conf(SYSCTL_HANDLER_ARGS);
2995 
2996 static int
2997 vfs_sysctl(SYSCTL_HANDLER_ARGS)
2998 {
2999 	int *name = (int *)arg1 - 1;	/* XXX */
3000 	u_int namelen = arg2 + 1;	/* XXX */
3001 	struct vfsconf *vfsp;
3002 	struct xvfsconf xvfsp;
3003 
3004 	printf("WARNING: userland calling deprecated sysctl, "
3005 	    "please rebuild world\n");
3006 
3007 #if 1 || defined(COMPAT_PRELITE2)
3008 	/* Resolve ambiguity between VFS_VFSCONF and VFS_GENERIC. */
3009 	if (namelen == 1)
3010 		return (sysctl_ovfs_conf(oidp, arg1, arg2, req));
3011 #endif
3012 
3013 	switch (name[1]) {
3014 	case VFS_MAXTYPENUM:
3015 		if (namelen != 2)
3016 			return (ENOTDIR);
3017 		return (SYSCTL_OUT(req, &maxvfsconf, sizeof(int)));
3018 	case VFS_CONF:
3019 		if (namelen != 3)
3020 			return (ENOTDIR);	/* overloaded */
3021 		for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next)
3022 			if (vfsp->vfc_typenum == name[2])
3023 				break;
3024 		if (vfsp == NULL)
3025 			return (EOPNOTSUPP);
3026 		vfsconf2x(vfsp, &xvfsp);
3027 		return (SYSCTL_OUT(req, &xvfsp, sizeof(xvfsp)));
3028 	}
3029 	return (EOPNOTSUPP);
3030 }
3031 
3032 SYSCTL_NODE(_vfs, VFS_GENERIC, generic, CTLFLAG_RD | CTLFLAG_SKIP, vfs_sysctl,
3033 	"Generic filesystem");
3034 
3035 #if 1 || defined(COMPAT_PRELITE2)
3036 
3037 static int
3038 sysctl_ovfs_conf(SYSCTL_HANDLER_ARGS)
3039 {
3040 	int error;
3041 	struct vfsconf *vfsp;
3042 	struct ovfsconf ovfs;
3043 
3044 	for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next) {
3045 		ovfs.vfc_vfsops = vfsp->vfc_vfsops;	/* XXX used as flag */
3046 		strcpy(ovfs.vfc_name, vfsp->vfc_name);
3047 		ovfs.vfc_index = vfsp->vfc_typenum;
3048 		ovfs.vfc_refcount = vfsp->vfc_refcount;
3049 		ovfs.vfc_flags = vfsp->vfc_flags;
3050 		error = SYSCTL_OUT(req, &ovfs, sizeof ovfs);
3051 		if (error)
3052 			return error;
3053 	}
3054 	return 0;
3055 }
3056 
3057 #endif /* 1 || COMPAT_PRELITE2 */
3058 
3059 #define KINFO_VNODESLOP		10
3060 #ifdef notyet
3061 /*
3062  * Dump vnode list (via sysctl).
3063  */
3064 /* ARGSUSED */
3065 static int
3066 sysctl_vnode(SYSCTL_HANDLER_ARGS)
3067 {
3068 	struct xvnode *xvn;
3069 	struct thread *td = req->td;
3070 	struct mount *mp;
3071 	struct vnode *vp;
3072 	int error, len, n;
3073 
3074 	/*
3075 	 * Stale numvnodes access is not fatal here.
3076 	 */
3077 	req->lock = 0;
3078 	len = (numvnodes + KINFO_VNODESLOP) * sizeof *xvn;
3079 	if (!req->oldptr)
3080 		/* Make an estimate */
3081 		return (SYSCTL_OUT(req, 0, len));
3082 
3083 	sysctl_wire_old_buffer(req, 0);
3084 	xvn = malloc(len, M_TEMP, M_ZERO | M_WAITOK);
3085 	n = 0;
3086 	mtx_lock(&mountlist_mtx);
3087 	TAILQ_FOREACH(mp, &mountlist, mnt_list) {
3088 		if (vfs_busy(mp, LK_NOWAIT, &mountlist_mtx, td))
3089 			continue;
3090 		mtx_lock(&mntvnode_mtx);
3091 		TAILQ_FOREACH(vp, &mp->mnt_nvnodelist, v_nmntvnodes) {
3092 			if (n == len)
3093 				break;
3094 			vref(vp);
3095 			xvn[n].xv_size = sizeof *xvn;
3096 			xvn[n].xv_vnode = vp;
3097 #define XV_COPY(field) xvn[n].xv_##field = vp->v_##field
3098 			XV_COPY(usecount);
3099 			XV_COPY(writecount);
3100 			XV_COPY(holdcnt);
3101 			XV_COPY(id);
3102 			XV_COPY(mount);
3103 			XV_COPY(numoutput);
3104 			XV_COPY(type);
3105 #undef XV_COPY
3106 			xvn[n].xv_flag = vp->v_vflag;
3107 
3108 			switch (vp->v_type) {
3109 			case VREG:
3110 			case VDIR:
3111 			case VLNK:
3112 				xvn[n].xv_dev = vp->v_cachedfs;
3113 				xvn[n].xv_ino = vp->v_cachedid;
3114 				break;
3115 			case VBLK:
3116 			case VCHR:
3117 				if (vp->v_rdev == NULL) {
3118 					vrele(vp);
3119 					continue;
3120 				}
3121 				xvn[n].xv_dev = dev2udev(vp->v_rdev);
3122 				break;
3123 			case VSOCK:
3124 				xvn[n].xv_socket = vp->v_socket;
3125 				break;
3126 			case VFIFO:
3127 				xvn[n].xv_fifo = vp->v_fifoinfo;
3128 				break;
3129 			case VNON:
3130 			case VBAD:
3131 			default:
3132 				/* shouldn't happen? */
3133 				vrele(vp);
3134 				continue;
3135 			}
3136 			vrele(vp);
3137 			++n;
3138 		}
3139 		mtx_unlock(&mntvnode_mtx);
3140 		mtx_lock(&mountlist_mtx);
3141 		vfs_unbusy(mp, td);
3142 		if (n == len)
3143 			break;
3144 	}
3145 	mtx_unlock(&mountlist_mtx);
3146 
3147 	error = SYSCTL_OUT(req, xvn, n * sizeof *xvn);
3148 	free(xvn, M_TEMP);
3149 	return (error);
3150 }
3151 
3152 SYSCTL_PROC(_kern, KERN_VNODE, vnode, CTLTYPE_OPAQUE|CTLFLAG_RD,
3153 	0, 0, sysctl_vnode, "S,xvnode", "");
3154 #endif
3155 
3156 /*
3157  * Check to see if a filesystem is mounted on a block device.
3158  */
3159 int
3160 vfs_mountedon(vp)
3161 	struct vnode *vp;
3162 {
3163 
3164 	if (vp->v_rdev->si_mountpoint != NULL)
3165 		return (EBUSY);
3166 	return (0);
3167 }
3168 
3169 /*
3170  * Unmount all filesystems. The list is traversed in reverse order
3171  * of mounting to avoid dependencies.
3172  */
3173 void
3174 vfs_unmountall()
3175 {
3176 	struct mount *mp;
3177 	struct thread *td;
3178 	int error;
3179 
3180 	if (curthread != NULL)
3181 		td = curthread;
3182 	else
3183 		td = FIRST_THREAD_IN_PROC(initproc); /* XXX XXX proc0? */
3184 	/*
3185 	 * Since this only runs when rebooting, it is not interlocked.
3186 	 */
3187 	while(!TAILQ_EMPTY(&mountlist)) {
3188 		mp = TAILQ_LAST(&mountlist, mntlist);
3189 		error = dounmount(mp, MNT_FORCE, td);
3190 		if (error) {
3191 			TAILQ_REMOVE(&mountlist, mp, mnt_list);
3192 			printf("unmount of %s failed (",
3193 			    mp->mnt_stat.f_mntonname);
3194 			if (error == EBUSY)
3195 				printf("BUSY)\n");
3196 			else
3197 				printf("%d)\n", error);
3198 		} else {
3199 			/* The unmount has removed mp from the mountlist */
3200 		}
3201 	}
3202 }
3203 
3204 /*
3205  * perform msync on all vnodes under a mount point
3206  * the mount point must be locked.
3207  */
3208 void
3209 vfs_msync(struct mount *mp, int flags)
3210 {
3211 	struct vnode *vp, *nvp;
3212 	struct vm_object *obj;
3213 	int tries;
3214 
3215 	GIANT_REQUIRED;
3216 
3217 	tries = 5;
3218 	mtx_lock(&mntvnode_mtx);
3219 loop:
3220 	for (vp = TAILQ_FIRST(&mp->mnt_nvnodelist); vp != NULL; vp = nvp) {
3221 		if (vp->v_mount != mp) {
3222 			if (--tries > 0)
3223 				goto loop;
3224 			break;
3225 		}
3226 		nvp = TAILQ_NEXT(vp, v_nmntvnodes);
3227 
3228 		VI_LOCK(vp);
3229 		if (vp->v_iflag & VI_XLOCK) {	/* XXX: what if MNT_WAIT? */
3230 			VI_UNLOCK(vp);
3231 			continue;
3232 		}
3233 
3234 		if ((vp->v_iflag & VI_OBJDIRTY) &&
3235 		    (flags == MNT_WAIT || VOP_ISLOCKED(vp, NULL) == 0)) {
3236 			mtx_unlock(&mntvnode_mtx);
3237 			if (!vget(vp,
3238 			    LK_EXCLUSIVE | LK_RETRY | LK_INTERLOCK,
3239 			    curthread)) {
3240 				if (vp->v_vflag & VV_NOSYNC) {	/* unlinked */
3241 					vput(vp);
3242 					mtx_lock(&mntvnode_mtx);
3243 					continue;
3244 				}
3245 
3246 				if (VOP_GETVOBJECT(vp, &obj) == 0) {
3247 					VM_OBJECT_LOCK(obj);
3248 					vm_object_page_clean(obj, 0, 0,
3249 					    flags == MNT_WAIT ?
3250 					    OBJPC_SYNC : OBJPC_NOSYNC);
3251 					VM_OBJECT_UNLOCK(obj);
3252 				}
3253 				vput(vp);
3254 			}
3255 			mtx_lock(&mntvnode_mtx);
3256 			if (TAILQ_NEXT(vp, v_nmntvnodes) != nvp) {
3257 				if (--tries > 0)
3258 					goto loop;
3259 				break;
3260 			}
3261 		} else
3262 			VI_UNLOCK(vp);
3263 	}
3264 	mtx_unlock(&mntvnode_mtx);
3265 }
3266 
3267 /*
3268  * Create the VM object needed for VMIO and mmap support.  This
3269  * is done for all VREG files in the system.  Some filesystems might
3270  * afford the additional metadata buffering capability of the
3271  * VMIO code by making the device node be VMIO mode also.
3272  *
3273  * vp must be locked when vfs_object_create is called.
3274  */
3275 int
3276 vfs_object_create(vp, td, cred)
3277 	struct vnode *vp;
3278 	struct thread *td;
3279 	struct ucred *cred;
3280 {
3281 	GIANT_REQUIRED;
3282 	return (VOP_CREATEVOBJECT(vp, cred, td));
3283 }
3284 
3285 /*
3286  * Mark a vnode as free, putting it up for recycling.
3287  */
3288 void
3289 vfree(vp)
3290 	struct vnode *vp;
3291 {
3292 	int s;
3293 
3294 	ASSERT_VI_LOCKED(vp, "vfree");
3295 	s = splbio();
3296 	mtx_lock(&vnode_free_list_mtx);
3297 	KASSERT((vp->v_iflag & VI_FREE) == 0, ("vnode already free"));
3298 	if (vp->v_iflag & VI_AGE) {
3299 		TAILQ_INSERT_HEAD(&vnode_free_list, vp, v_freelist);
3300 	} else {
3301 		TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist);
3302 	}
3303 	freevnodes++;
3304 	mtx_unlock(&vnode_free_list_mtx);
3305 	vp->v_iflag &= ~VI_AGE;
3306 	vp->v_iflag |= VI_FREE;
3307 	splx(s);
3308 }
3309 
3310 /*
3311  * Opposite of vfree() - mark a vnode as in use.
3312  */
3313 void
3314 vbusy(vp)
3315 	struct vnode *vp;
3316 {
3317 	int s;
3318 
3319 	s = splbio();
3320 	ASSERT_VI_LOCKED(vp, "vbusy");
3321 	KASSERT((vp->v_iflag & VI_FREE) != 0, ("vnode not free"));
3322 
3323 	mtx_lock(&vnode_free_list_mtx);
3324 	TAILQ_REMOVE(&vnode_free_list, vp, v_freelist);
3325 	freevnodes--;
3326 	mtx_unlock(&vnode_free_list_mtx);
3327 
3328 	vp->v_iflag &= ~(VI_FREE|VI_AGE);
3329 	splx(s);
3330 }
3331 
3332 /*
3333  * Record a process's interest in events which might happen to
3334  * a vnode.  Because poll uses the historic select-style interface
3335  * internally, this routine serves as both the ``check for any
3336  * pending events'' and the ``record my interest in future events''
3337  * functions.  (These are done together, while the lock is held,
3338  * to avoid race conditions.)
3339  */
3340 int
3341 vn_pollrecord(vp, td, events)
3342 	struct vnode *vp;
3343 	struct thread *td;
3344 	short events;
3345 {
3346 
3347 	if (vp->v_pollinfo == NULL)
3348 		v_addpollinfo(vp);
3349 	mtx_lock(&vp->v_pollinfo->vpi_lock);
3350 	if (vp->v_pollinfo->vpi_revents & events) {
3351 		/*
3352 		 * This leaves events we are not interested
3353 		 * in available for the other process which
3354 		 * which presumably had requested them
3355 		 * (otherwise they would never have been
3356 		 * recorded).
3357 		 */
3358 		events &= vp->v_pollinfo->vpi_revents;
3359 		vp->v_pollinfo->vpi_revents &= ~events;
3360 
3361 		mtx_unlock(&vp->v_pollinfo->vpi_lock);
3362 		return events;
3363 	}
3364 	vp->v_pollinfo->vpi_events |= events;
3365 	selrecord(td, &vp->v_pollinfo->vpi_selinfo);
3366 	mtx_unlock(&vp->v_pollinfo->vpi_lock);
3367 	return 0;
3368 }
3369 
3370 /*
3371  * Note the occurrence of an event.  If the VN_POLLEVENT macro is used,
3372  * it is possible for us to miss an event due to race conditions, but
3373  * that condition is expected to be rare, so for the moment it is the
3374  * preferred interface.
3375  */
3376 void
3377 vn_pollevent(vp, events)
3378 	struct vnode *vp;
3379 	short events;
3380 {
3381 
3382 	if (vp->v_pollinfo == NULL)
3383 		v_addpollinfo(vp);
3384 	mtx_lock(&vp->v_pollinfo->vpi_lock);
3385 	if (vp->v_pollinfo->vpi_events & events) {
3386 		/*
3387 		 * We clear vpi_events so that we don't
3388 		 * call selwakeup() twice if two events are
3389 		 * posted before the polling process(es) is
3390 		 * awakened.  This also ensures that we take at
3391 		 * most one selwakeup() if the polling process
3392 		 * is no longer interested.  However, it does
3393 		 * mean that only one event can be noticed at
3394 		 * a time.  (Perhaps we should only clear those
3395 		 * event bits which we note?) XXX
3396 		 */
3397 		vp->v_pollinfo->vpi_events = 0;	/* &= ~events ??? */
3398 		vp->v_pollinfo->vpi_revents |= events;
3399 		selwakeup(&vp->v_pollinfo->vpi_selinfo);
3400 	}
3401 	mtx_unlock(&vp->v_pollinfo->vpi_lock);
3402 }
3403 
3404 /*
3405  * Wake up anyone polling on vp because it is being revoked.
3406  * This depends on dead_poll() returning POLLHUP for correct
3407  * behavior.
3408  */
3409 void
3410 vn_pollgone(vp)
3411 	struct vnode *vp;
3412 {
3413 
3414 	mtx_lock(&vp->v_pollinfo->vpi_lock);
3415 	VN_KNOTE(vp, NOTE_REVOKE);
3416 	if (vp->v_pollinfo->vpi_events) {
3417 		vp->v_pollinfo->vpi_events = 0;
3418 		selwakeup(&vp->v_pollinfo->vpi_selinfo);
3419 	}
3420 	mtx_unlock(&vp->v_pollinfo->vpi_lock);
3421 }
3422 
3423 
3424 
3425 /*
3426  * Routine to create and manage a filesystem syncer vnode.
3427  */
3428 #define sync_close ((int (*)(struct  vop_close_args *))nullop)
3429 static int	sync_fsync(struct  vop_fsync_args *);
3430 static int	sync_inactive(struct  vop_inactive_args *);
3431 static int	sync_reclaim(struct  vop_reclaim_args *);
3432 
3433 static vop_t **sync_vnodeop_p;
3434 static struct vnodeopv_entry_desc sync_vnodeop_entries[] = {
3435 	{ &vop_default_desc,	(vop_t *) vop_eopnotsupp },
3436 	{ &vop_close_desc,	(vop_t *) sync_close },		/* close */
3437 	{ &vop_fsync_desc,	(vop_t *) sync_fsync },		/* fsync */
3438 	{ &vop_inactive_desc,	(vop_t *) sync_inactive },	/* inactive */
3439 	{ &vop_reclaim_desc,	(vop_t *) sync_reclaim },	/* reclaim */
3440 	{ &vop_lock_desc,	(vop_t *) vop_stdlock },	/* lock */
3441 	{ &vop_unlock_desc,	(vop_t *) vop_stdunlock },	/* unlock */
3442 	{ &vop_islocked_desc,	(vop_t *) vop_stdislocked },	/* islocked */
3443 	{ NULL, NULL }
3444 };
3445 static struct vnodeopv_desc sync_vnodeop_opv_desc =
3446 	{ &sync_vnodeop_p, sync_vnodeop_entries };
3447 
3448 VNODEOP_SET(sync_vnodeop_opv_desc);
3449 
3450 /*
3451  * Create a new filesystem syncer vnode for the specified mount point.
3452  */
3453 int
3454 vfs_allocate_syncvnode(mp)
3455 	struct mount *mp;
3456 {
3457 	struct vnode *vp;
3458 	static long start, incr, next;
3459 	int error;
3460 
3461 	/* Allocate a new vnode */
3462 	if ((error = getnewvnode("syncer", mp, sync_vnodeop_p, &vp)) != 0) {
3463 		mp->mnt_syncer = NULL;
3464 		return (error);
3465 	}
3466 	vp->v_type = VNON;
3467 	/*
3468 	 * Place the vnode onto the syncer worklist. We attempt to
3469 	 * scatter them about on the list so that they will go off
3470 	 * at evenly distributed times even if all the filesystems
3471 	 * are mounted at once.
3472 	 */
3473 	next += incr;
3474 	if (next == 0 || next > syncer_maxdelay) {
3475 		start /= 2;
3476 		incr /= 2;
3477 		if (start == 0) {
3478 			start = syncer_maxdelay / 2;
3479 			incr = syncer_maxdelay;
3480 		}
3481 		next = start;
3482 	}
3483 	VI_LOCK(vp);
3484 	vn_syncer_add_to_worklist(vp, syncdelay > 0 ? next % syncdelay : 0);
3485 	VI_UNLOCK(vp);
3486 	mp->mnt_syncer = vp;
3487 	return (0);
3488 }
3489 
3490 /*
3491  * Do a lazy sync of the filesystem.
3492  */
3493 static int
3494 sync_fsync(ap)
3495 	struct vop_fsync_args /* {
3496 		struct vnode *a_vp;
3497 		struct ucred *a_cred;
3498 		int a_waitfor;
3499 		struct thread *a_td;
3500 	} */ *ap;
3501 {
3502 	struct vnode *syncvp = ap->a_vp;
3503 	struct mount *mp = syncvp->v_mount;
3504 	struct thread *td = ap->a_td;
3505 	int error, asyncflag;
3506 
3507 	/*
3508 	 * We only need to do something if this is a lazy evaluation.
3509 	 */
3510 	if (ap->a_waitfor != MNT_LAZY)
3511 		return (0);
3512 
3513 	/*
3514 	 * Move ourselves to the back of the sync list.
3515 	 */
3516 	VI_LOCK(syncvp);
3517 	vn_syncer_add_to_worklist(syncvp, syncdelay);
3518 	VI_UNLOCK(syncvp);
3519 
3520 	/*
3521 	 * Walk the list of vnodes pushing all that are dirty and
3522 	 * not already on the sync list.
3523 	 */
3524 	mtx_lock(&mountlist_mtx);
3525 	if (vfs_busy(mp, LK_EXCLUSIVE | LK_NOWAIT, &mountlist_mtx, td) != 0) {
3526 		mtx_unlock(&mountlist_mtx);
3527 		return (0);
3528 	}
3529 	if (vn_start_write(NULL, &mp, V_NOWAIT) != 0) {
3530 		vfs_unbusy(mp, td);
3531 		return (0);
3532 	}
3533 	asyncflag = mp->mnt_flag & MNT_ASYNC;
3534 	mp->mnt_flag &= ~MNT_ASYNC;
3535 	vfs_msync(mp, MNT_NOWAIT);
3536 	error = VFS_SYNC(mp, MNT_LAZY, ap->a_cred, td);
3537 	if (asyncflag)
3538 		mp->mnt_flag |= MNT_ASYNC;
3539 	vn_finished_write(mp);
3540 	vfs_unbusy(mp, td);
3541 	return (error);
3542 }
3543 
3544 /*
3545  * The syncer vnode is no referenced.
3546  */
3547 static int
3548 sync_inactive(ap)
3549 	struct vop_inactive_args /* {
3550 		struct vnode *a_vp;
3551 		struct thread *a_td;
3552 	} */ *ap;
3553 {
3554 
3555 	VOP_UNLOCK(ap->a_vp, 0, ap->a_td);
3556 	vgone(ap->a_vp);
3557 	return (0);
3558 }
3559 
3560 /*
3561  * The syncer vnode is no longer needed and is being decommissioned.
3562  *
3563  * Modifications to the worklist must be protected at splbio().
3564  */
3565 static int
3566 sync_reclaim(ap)
3567 	struct vop_reclaim_args /* {
3568 		struct vnode *a_vp;
3569 	} */ *ap;
3570 {
3571 	struct vnode *vp = ap->a_vp;
3572 	int s;
3573 
3574 	s = splbio();
3575 	vp->v_mount->mnt_syncer = NULL;
3576 	VI_LOCK(vp);
3577 	if (vp->v_iflag & VI_ONWORKLST) {
3578 		mtx_lock(&sync_mtx);
3579 		LIST_REMOVE(vp, v_synclist);
3580 		mtx_unlock(&sync_mtx);
3581 		vp->v_iflag &= ~VI_ONWORKLST;
3582 	}
3583 	VI_UNLOCK(vp);
3584 	splx(s);
3585 
3586 	return (0);
3587 }
3588 
3589 /*
3590  * extract the dev_t from a VCHR
3591  */
3592 dev_t
3593 vn_todev(vp)
3594 	struct vnode *vp;
3595 {
3596 	if (vp->v_type != VCHR)
3597 		return (NODEV);
3598 	return (vp->v_rdev);
3599 }
3600 
3601 /*
3602  * Check if vnode represents a disk device
3603  */
3604 int
3605 vn_isdisk(vp, errp)
3606 	struct vnode *vp;
3607 	int *errp;
3608 {
3609 	struct cdevsw *cdevsw;
3610 
3611 	if (vp->v_type != VCHR) {
3612 		if (errp != NULL)
3613 			*errp = ENOTBLK;
3614 		return (0);
3615 	}
3616 	if (vp->v_rdev == NULL) {
3617 		if (errp != NULL)
3618 			*errp = ENXIO;
3619 		return (0);
3620 	}
3621 	cdevsw = devsw(vp->v_rdev);
3622 	if (cdevsw == NULL) {
3623 		if (errp != NULL)
3624 			*errp = ENXIO;
3625 		return (0);
3626 	}
3627 	if (!(cdevsw->d_flags & D_DISK)) {
3628 		if (errp != NULL)
3629 			*errp = ENOTBLK;
3630 		return (0);
3631 	}
3632 	if (errp != NULL)
3633 		*errp = 0;
3634 	return (1);
3635 }
3636 
3637 /*
3638  * Free data allocated by namei(); see namei(9) for details.
3639  */
3640 void
3641 NDFREE(ndp, flags)
3642      struct nameidata *ndp;
3643      const uint flags;
3644 {
3645 	if (!(flags & NDF_NO_FREE_PNBUF) &&
3646 	    (ndp->ni_cnd.cn_flags & HASBUF)) {
3647 		uma_zfree(namei_zone, ndp->ni_cnd.cn_pnbuf);
3648 		ndp->ni_cnd.cn_flags &= ~HASBUF;
3649 	}
3650 	if (!(flags & NDF_NO_DVP_UNLOCK) &&
3651 	    (ndp->ni_cnd.cn_flags & LOCKPARENT) &&
3652 	    ndp->ni_dvp != ndp->ni_vp)
3653 		VOP_UNLOCK(ndp->ni_dvp, 0, ndp->ni_cnd.cn_thread);
3654 	if (!(flags & NDF_NO_DVP_RELE) &&
3655 	    (ndp->ni_cnd.cn_flags & (LOCKPARENT|WANTPARENT))) {
3656 		vrele(ndp->ni_dvp);
3657 		ndp->ni_dvp = NULL;
3658 	}
3659 	if (!(flags & NDF_NO_VP_UNLOCK) &&
3660 	    (ndp->ni_cnd.cn_flags & LOCKLEAF) && ndp->ni_vp)
3661 		VOP_UNLOCK(ndp->ni_vp, 0, ndp->ni_cnd.cn_thread);
3662 	if (!(flags & NDF_NO_VP_RELE) &&
3663 	    ndp->ni_vp) {
3664 		vrele(ndp->ni_vp);
3665 		ndp->ni_vp = NULL;
3666 	}
3667 	if (!(flags & NDF_NO_STARTDIR_RELE) &&
3668 	    (ndp->ni_cnd.cn_flags & SAVESTART)) {
3669 		vrele(ndp->ni_startdir);
3670 		ndp->ni_startdir = NULL;
3671 	}
3672 }
3673 
3674 /*
3675  * Common filesystem object access control check routine.  Accepts a
3676  * vnode's type, "mode", uid and gid, requested access mode, credentials,
3677  * and optional call-by-reference privused argument allowing vaccess()
3678  * to indicate to the caller whether privilege was used to satisfy the
3679  * request (obsoleted).  Returns 0 on success, or an errno on failure.
3680  */
3681 int
3682 vaccess(type, file_mode, file_uid, file_gid, acc_mode, cred, privused)
3683 	enum vtype type;
3684 	mode_t file_mode;
3685 	uid_t file_uid;
3686 	gid_t file_gid;
3687 	mode_t acc_mode;
3688 	struct ucred *cred;
3689 	int *privused;
3690 {
3691 	mode_t dac_granted;
3692 #ifdef CAPABILITIES
3693 	mode_t cap_granted;
3694 #endif
3695 
3696 	/*
3697 	 * Look for a normal, non-privileged way to access the file/directory
3698 	 * as requested.  If it exists, go with that.
3699 	 */
3700 
3701 	if (privused != NULL)
3702 		*privused = 0;
3703 
3704 	dac_granted = 0;
3705 
3706 	/* Check the owner. */
3707 	if (cred->cr_uid == file_uid) {
3708 		dac_granted |= VADMIN;
3709 		if (file_mode & S_IXUSR)
3710 			dac_granted |= VEXEC;
3711 		if (file_mode & S_IRUSR)
3712 			dac_granted |= VREAD;
3713 		if (file_mode & S_IWUSR)
3714 			dac_granted |= (VWRITE | VAPPEND);
3715 
3716 		if ((acc_mode & dac_granted) == acc_mode)
3717 			return (0);
3718 
3719 		goto privcheck;
3720 	}
3721 
3722 	/* Otherwise, check the groups (first match) */
3723 	if (groupmember(file_gid, cred)) {
3724 		if (file_mode & S_IXGRP)
3725 			dac_granted |= VEXEC;
3726 		if (file_mode & S_IRGRP)
3727 			dac_granted |= VREAD;
3728 		if (file_mode & S_IWGRP)
3729 			dac_granted |= (VWRITE | VAPPEND);
3730 
3731 		if ((acc_mode & dac_granted) == acc_mode)
3732 			return (0);
3733 
3734 		goto privcheck;
3735 	}
3736 
3737 	/* Otherwise, check everyone else. */
3738 	if (file_mode & S_IXOTH)
3739 		dac_granted |= VEXEC;
3740 	if (file_mode & S_IROTH)
3741 		dac_granted |= VREAD;
3742 	if (file_mode & S_IWOTH)
3743 		dac_granted |= (VWRITE | VAPPEND);
3744 	if ((acc_mode & dac_granted) == acc_mode)
3745 		return (0);
3746 
3747 privcheck:
3748 	if (!suser_cred(cred, PRISON_ROOT)) {
3749 		/* XXX audit: privilege used */
3750 		if (privused != NULL)
3751 			*privused = 1;
3752 		return (0);
3753 	}
3754 
3755 #ifdef CAPABILITIES
3756 	/*
3757 	 * Build a capability mask to determine if the set of capabilities
3758 	 * satisfies the requirements when combined with the granted mask
3759 	 * from above.
3760 	 * For each capability, if the capability is required, bitwise
3761 	 * or the request type onto the cap_granted mask.
3762 	 */
3763 	cap_granted = 0;
3764 
3765 	if (type == VDIR) {
3766 		/*
3767 		 * For directories, use CAP_DAC_READ_SEARCH to satisfy
3768 		 * VEXEC requests, instead of CAP_DAC_EXECUTE.
3769 		 */
3770 		if ((acc_mode & VEXEC) && ((dac_granted & VEXEC) == 0) &&
3771 		    !cap_check(cred, NULL, CAP_DAC_READ_SEARCH, PRISON_ROOT))
3772 			cap_granted |= VEXEC;
3773 	} else {
3774 		if ((acc_mode & VEXEC) && ((dac_granted & VEXEC) == 0) &&
3775 		    !cap_check(cred, NULL, CAP_DAC_EXECUTE, PRISON_ROOT))
3776 			cap_granted |= VEXEC;
3777 	}
3778 
3779 	if ((acc_mode & VREAD) && ((dac_granted & VREAD) == 0) &&
3780 	    !cap_check(cred, NULL, CAP_DAC_READ_SEARCH, PRISON_ROOT))
3781 		cap_granted |= VREAD;
3782 
3783 	if ((acc_mode & VWRITE) && ((dac_granted & VWRITE) == 0) &&
3784 	    !cap_check(cred, NULL, CAP_DAC_WRITE, PRISON_ROOT))
3785 		cap_granted |= (VWRITE | VAPPEND);
3786 
3787 	if ((acc_mode & VADMIN) && ((dac_granted & VADMIN) == 0) &&
3788 	    !cap_check(cred, NULL, CAP_FOWNER, PRISON_ROOT))
3789 		cap_granted |= VADMIN;
3790 
3791 	if ((acc_mode & (cap_granted | dac_granted)) == acc_mode) {
3792 		/* XXX audit: privilege used */
3793 		if (privused != NULL)
3794 			*privused = 1;
3795 		return (0);
3796 	}
3797 #endif
3798 
3799 	return ((acc_mode & VADMIN) ? EPERM : EACCES);
3800 }
3801 
3802 /*
3803  * Credential check based on process requesting service, and per-attribute
3804  * permissions.
3805  */
3806 int
3807 extattr_check_cred(struct vnode *vp, int attrnamespace,
3808     struct ucred *cred, struct thread *td, int access)
3809 {
3810 
3811 	/*
3812 	 * Kernel-invoked always succeeds.
3813 	 */
3814 	if (cred == NOCRED)
3815 		return (0);
3816 
3817 	/*
3818 	 * Do not allow privileged processes in jail to directly
3819 	 * manipulate system attributes.
3820 	 *
3821 	 * XXX What capability should apply here?
3822 	 * Probably CAP_SYS_SETFFLAG.
3823 	 */
3824 	switch (attrnamespace) {
3825 	case EXTATTR_NAMESPACE_SYSTEM:
3826 		/* Potentially should be: return (EPERM); */
3827 		return (suser_cred(cred, 0));
3828 	case EXTATTR_NAMESPACE_USER:
3829 		return (VOP_ACCESS(vp, access, cred, td));
3830 	default:
3831 		return (EPERM);
3832 	}
3833 }
3834