xref: /freebsd/sys/kern/vfs_subr.c (revision f9218d3d4fd34f082473b3a021c6d4d109fb47cf)
1 /*
2  * Copyright (c) 1989, 1993
3  *	The Regents of the University of California.  All rights reserved.
4  * (c) UNIX System Laboratories, Inc.
5  * All or some portions of this file are derived from material licensed
6  * to the University of California by American Telephone and Telegraph
7  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8  * the permission of UNIX System Laboratories, Inc.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. All advertising materials mentioning features or use of this software
19  *    must display the following acknowledgement:
20  *	This product includes software developed by the University of
21  *	California, Berkeley and its contributors.
22  * 4. Neither the name of the University nor the names of its contributors
23  *    may be used to endorse or promote products derived from this software
24  *    without specific prior written permission.
25  *
26  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36  * SUCH DAMAGE.
37  *
38  *	@(#)vfs_subr.c	8.31 (Berkeley) 5/26/95
39  * $FreeBSD$
40  */
41 
42 /*
43  * External virtual filesystem routines
44  */
45 #include "opt_ddb.h"
46 #include "opt_mac.h"
47 
48 #include <sys/param.h>
49 #include <sys/systm.h>
50 #include <sys/bio.h>
51 #include <sys/buf.h>
52 #include <sys/conf.h>
53 #include <sys/eventhandler.h>
54 #include <sys/extattr.h>
55 #include <sys/fcntl.h>
56 #include <sys/kernel.h>
57 #include <sys/kthread.h>
58 #include <sys/mac.h>
59 #include <sys/malloc.h>
60 #include <sys/mount.h>
61 #include <sys/namei.h>
62 #include <sys/stat.h>
63 #include <sys/sysctl.h>
64 #include <sys/syslog.h>
65 #include <sys/vmmeter.h>
66 #include <sys/vnode.h>
67 
68 #include <vm/vm.h>
69 #include <vm/vm_object.h>
70 #include <vm/vm_extern.h>
71 #include <vm/pmap.h>
72 #include <vm/vm_map.h>
73 #include <vm/vm_page.h>
74 #include <vm/uma.h>
75 
76 static MALLOC_DEFINE(M_NETADDR, "Export Host", "Export host address structure");
77 
78 static void	addalias(struct vnode *vp, dev_t nvp_rdev);
79 static void	insmntque(struct vnode *vp, struct mount *mp);
80 static void	vclean(struct vnode *vp, int flags, struct thread *td);
81 static void	vlruvp(struct vnode *vp);
82 static int	flushbuflist(struct buf *blist, int flags, struct vnode *vp,
83 		    int slpflag, int slptimeo, int *errorp);
84 static int	vcanrecycle(struct vnode *vp, struct mount **vnmpp);
85 
86 
87 /*
88  * Number of vnodes in existence.  Increased whenever getnewvnode()
89  * allocates a new vnode, never decreased.
90  */
91 static unsigned long	numvnodes;
92 
93 SYSCTL_LONG(_vfs, OID_AUTO, numvnodes, CTLFLAG_RD, &numvnodes, 0, "");
94 
95 /*
96  * Conversion tables for conversion from vnode types to inode formats
97  * and back.
98  */
99 enum vtype iftovt_tab[16] = {
100 	VNON, VFIFO, VCHR, VNON, VDIR, VNON, VBLK, VNON,
101 	VREG, VNON, VLNK, VNON, VSOCK, VNON, VNON, VBAD,
102 };
103 int vttoif_tab[9] = {
104 	0, S_IFREG, S_IFDIR, S_IFBLK, S_IFCHR, S_IFLNK,
105 	S_IFSOCK, S_IFIFO, S_IFMT,
106 };
107 
108 /*
109  * List of vnodes that are ready for recycling.
110  */
111 static TAILQ_HEAD(freelst, vnode) vnode_free_list;
112 
113 /*
114  * Minimum number of free vnodes.  If there are fewer than this free vnodes,
115  * getnewvnode() will return a newly allocated vnode.
116  */
117 static u_long wantfreevnodes = 25;
118 SYSCTL_LONG(_vfs, OID_AUTO, wantfreevnodes, CTLFLAG_RW, &wantfreevnodes, 0, "");
119 /* Number of vnodes in the free list. */
120 static u_long freevnodes;
121 SYSCTL_LONG(_vfs, OID_AUTO, freevnodes, CTLFLAG_RD, &freevnodes, 0, "");
122 
123 /*
124  * Various variables used for debugging the new implementation of
125  * reassignbuf().
126  * XXX these are probably of (very) limited utility now.
127  */
128 static int reassignbufcalls;
129 SYSCTL_INT(_vfs, OID_AUTO, reassignbufcalls, CTLFLAG_RW, &reassignbufcalls, 0, "");
130 static int nameileafonly;
131 SYSCTL_INT(_vfs, OID_AUTO, nameileafonly, CTLFLAG_RW, &nameileafonly, 0, "");
132 
133 #ifdef ENABLE_VFS_IOOPT
134 /* See NOTES for a description of this setting. */
135 int vfs_ioopt;
136 SYSCTL_INT(_vfs, OID_AUTO, ioopt, CTLFLAG_RW, &vfs_ioopt, 0, "");
137 #endif
138 
139 /*
140  * Cache for the mount type id assigned to NFS.  This is used for
141  * special checks in nfs/nfs_nqlease.c and vm/vnode_pager.c.
142  */
143 int	nfs_mount_type = -1;
144 
145 /* To keep more than one thread at a time from running vfs_getnewfsid */
146 static struct mtx mntid_mtx;
147 
148 /*
149  * Lock for any access to the following:
150  *	vnode_free_list
151  *	numvnodes
152  *	freevnodes
153  */
154 static struct mtx vnode_free_list_mtx;
155 
156 /*
157  * For any iteration/modification of dev->si_hlist (linked through
158  * v_specnext)
159  */
160 static struct mtx spechash_mtx;
161 
162 /* Publicly exported FS */
163 struct nfs_public nfs_pub;
164 
165 /* Zone for allocation of new vnodes - used exclusively by getnewvnode() */
166 static uma_zone_t vnode_zone;
167 static uma_zone_t vnodepoll_zone;
168 
169 /* Set to 1 to print out reclaim of active vnodes */
170 int	prtactive;
171 
172 /*
173  * The workitem queue.
174  *
175  * It is useful to delay writes of file data and filesystem metadata
176  * for tens of seconds so that quickly created and deleted files need
177  * not waste disk bandwidth being created and removed. To realize this,
178  * we append vnodes to a "workitem" queue. When running with a soft
179  * updates implementation, most pending metadata dependencies should
180  * not wait for more than a few seconds. Thus, mounted on block devices
181  * are delayed only about a half the time that file data is delayed.
182  * Similarly, directory updates are more critical, so are only delayed
183  * about a third the time that file data is delayed. Thus, there are
184  * SYNCER_MAXDELAY queues that are processed round-robin at a rate of
185  * one each second (driven off the filesystem syncer process). The
186  * syncer_delayno variable indicates the next queue that is to be processed.
187  * Items that need to be processed soon are placed in this queue:
188  *
189  *	syncer_workitem_pending[syncer_delayno]
190  *
191  * A delay of fifteen seconds is done by placing the request fifteen
192  * entries later in the queue:
193  *
194  *	syncer_workitem_pending[(syncer_delayno + 15) & syncer_mask]
195  *
196  */
197 static int syncer_delayno;
198 static long syncer_mask;
199 LIST_HEAD(synclist, vnode);
200 static struct synclist *syncer_workitem_pending;
201 /*
202  * The sync_mtx protects:
203  *	vp->v_synclist
204  *	syncer_delayno
205  *	syncer_workitem_pending
206  *	rushjob
207  */
208 static struct mtx sync_mtx;
209 
210 #define SYNCER_MAXDELAY		32
211 static int syncer_maxdelay = SYNCER_MAXDELAY;	/* maximum delay time */
212 static int syncdelay = 30;		/* max time to delay syncing data */
213 static int filedelay = 30;		/* time to delay syncing files */
214 SYSCTL_INT(_kern, OID_AUTO, filedelay, CTLFLAG_RW, &filedelay, 0, "");
215 static int dirdelay = 29;		/* time to delay syncing directories */
216 SYSCTL_INT(_kern, OID_AUTO, dirdelay, CTLFLAG_RW, &dirdelay, 0, "");
217 static int metadelay = 28;		/* time to delay syncing metadata */
218 SYSCTL_INT(_kern, OID_AUTO, metadelay, CTLFLAG_RW, &metadelay, 0, "");
219 static int rushjob;		/* number of slots to run ASAP */
220 static int stat_rush_requests;	/* number of times I/O speeded up */
221 SYSCTL_INT(_debug, OID_AUTO, rush_requests, CTLFLAG_RW, &stat_rush_requests, 0, "");
222 
223 /*
224  * Number of vnodes we want to exist at any one time.  This is mostly used
225  * to size hash tables in vnode-related code.  It is normally not used in
226  * getnewvnode(), as wantfreevnodes is normally nonzero.)
227  *
228  * XXX desiredvnodes is historical cruft and should not exist.
229  */
230 int desiredvnodes;
231 SYSCTL_INT(_kern, KERN_MAXVNODES, maxvnodes, CTLFLAG_RW,
232     &desiredvnodes, 0, "Maximum number of vnodes");
233 static int minvnodes;
234 SYSCTL_INT(_kern, OID_AUTO, minvnodes, CTLFLAG_RW,
235     &minvnodes, 0, "Minimum number of vnodes");
236 static int vnlru_nowhere;
237 SYSCTL_INT(_debug, OID_AUTO, vnlru_nowhere, CTLFLAG_RW, &vnlru_nowhere, 0,
238     "Number of times the vnlru process ran without success");
239 
240 /* Hook for calling soft updates */
241 int (*softdep_process_worklist_hook)(struct mount *);
242 
243 /*
244  * This only exists to supress warnings from unlocked specfs accesses.  It is
245  * no longer ok to have an unlocked VFS.
246  */
247 #define IGNORE_LOCK(vp) ((vp)->v_type == VCHR || (vp)->v_type == VBAD)
248 
249 /* Print lock violations */
250 int vfs_badlock_print = 1;
251 
252 /* Panic on violation */
253 int vfs_badlock_panic = 1;
254 
255 /* Check for interlock across VOPs */
256 int vfs_badlock_mutex = 1;
257 
258 static void
259 vfs_badlock(char *msg, char *str, struct vnode *vp)
260 {
261 	if (vfs_badlock_print)
262 		printf("%s: %p %s\n", str, vp, msg);
263 	if (vfs_badlock_panic)
264 		Debugger("Lock violation.\n");
265 }
266 
267 void
268 assert_vi_unlocked(struct vnode *vp, char *str)
269 {
270 	if (vfs_badlock_mutex && mtx_owned(VI_MTX(vp)))
271 		vfs_badlock("interlock is locked but should not be", str, vp);
272 }
273 
274 void
275 assert_vi_locked(struct vnode *vp, char *str)
276 {
277 	if (vfs_badlock_mutex && !mtx_owned(VI_MTX(vp)))
278 		vfs_badlock("interlock is not locked but should be", str, vp);
279 }
280 
281 void
282 assert_vop_locked(struct vnode *vp, char *str)
283 {
284 	if (vp && !IGNORE_LOCK(vp) && !VOP_ISLOCKED(vp, NULL))
285 		vfs_badlock("is not locked but should be", str, vp);
286 }
287 
288 void
289 assert_vop_unlocked(struct vnode *vp, char *str)
290 {
291 	if (vp && !IGNORE_LOCK(vp) &&
292 	    VOP_ISLOCKED(vp, curthread) == LK_EXCLUSIVE)
293 		vfs_badlock("is locked but should not be", str, vp);
294 }
295 
296 void
297 assert_vop_elocked(struct vnode *vp, char *str)
298 {
299 	if (vp && !IGNORE_LOCK(vp) &&
300 	    VOP_ISLOCKED(vp, curthread) != LK_EXCLUSIVE)
301 		vfs_badlock("is not exclusive locked but should be", str, vp);
302 }
303 
304 void
305 assert_vop_elocked_other(struct vnode *vp, char *str)
306 {
307 	if (vp && !IGNORE_LOCK(vp) &&
308 	    VOP_ISLOCKED(vp, curthread) != LK_EXCLOTHER)
309 		vfs_badlock("is not exclusive locked by another thread",
310 		    str, vp);
311 }
312 
313 void
314 assert_vop_slocked(struct vnode *vp, char *str)
315 {
316 	if (vp && !IGNORE_LOCK(vp) &&
317 	    VOP_ISLOCKED(vp, curthread) != LK_SHARED)
318 		vfs_badlock("is not locked shared but should be", str, vp);
319 }
320 
321 void
322 vop_rename_pre(void *ap)
323 {
324 	struct vop_rename_args *a = ap;
325 
326 	if (a->a_tvp)
327 		ASSERT_VI_UNLOCKED(a->a_tvp, "VOP_RENAME");
328 	ASSERT_VI_UNLOCKED(a->a_tdvp, "VOP_RENAME");
329 	ASSERT_VI_UNLOCKED(a->a_fvp, "VOP_RENAME");
330 	ASSERT_VI_UNLOCKED(a->a_fdvp, "VOP_RENAME");
331 
332 	/* Check the source (from) */
333 	if (a->a_tdvp != a->a_fdvp)
334 		ASSERT_VOP_UNLOCKED(a->a_fdvp, "vop_rename: fdvp locked.\n");
335 	if (a->a_tvp != a->a_fvp)
336 		ASSERT_VOP_UNLOCKED(a->a_fvp, "vop_rename: tvp locked.\n");
337 
338 	/* Check the target */
339 	if (a->a_tvp)
340 		ASSERT_VOP_LOCKED(a->a_tvp, "vop_rename: tvp not locked.\n");
341 
342 	ASSERT_VOP_LOCKED(a->a_tdvp, "vop_rename: tdvp not locked.\n");
343 }
344 
345 void
346 vop_strategy_pre(void *ap)
347 {
348 	struct vop_strategy_args *a = ap;
349 	struct buf *bp;
350 
351 	bp = a->a_bp;
352 
353 	/*
354 	 * Cluster ops lock their component buffers but not the IO container.
355 	 */
356 	if ((bp->b_flags & B_CLUSTER) != 0)
357 		return;
358 
359 	if (BUF_REFCNT(bp) < 1) {
360 		if (vfs_badlock_print)
361 			printf("VOP_STRATEGY: bp is not locked but should be.\n");
362 		if (vfs_badlock_panic)
363 			Debugger("Lock violation.\n");
364 	}
365 }
366 
367 void
368 vop_lookup_pre(void *ap)
369 {
370 	struct vop_lookup_args *a = ap;
371 	struct vnode *dvp;
372 
373 	dvp = a->a_dvp;
374 
375 	ASSERT_VI_UNLOCKED(dvp, "VOP_LOOKUP");
376 	ASSERT_VOP_LOCKED(dvp, "VOP_LOOKUP");
377 }
378 
379 void
380 vop_lookup_post(void *ap, int rc)
381 {
382 	struct vop_lookup_args *a = ap;
383 	struct componentname *cnp;
384 	struct vnode *dvp;
385 	struct vnode *vp;
386 	int flags;
387 
388 	dvp = a->a_dvp;
389 	cnp = a->a_cnp;
390 	vp = *(a->a_vpp);
391 	flags = cnp->cn_flags;
392 
393 
394 	ASSERT_VI_UNLOCKED(dvp, "VOP_LOOKUP");
395 	/*
396 	 * If this is the last path component for this lookup and LOCPARENT
397 	 * is set, OR if there is an error the directory has to be locked.
398 	 */
399 	if ((flags & LOCKPARENT) && (flags & ISLASTCN))
400 		ASSERT_VOP_LOCKED(dvp, "VOP_LOOKUP (LOCKPARENT)");
401 	else if (rc != 0)
402 		ASSERT_VOP_LOCKED(dvp, "VOP_LOOKUP (error)");
403 	else if (dvp != vp)
404 		ASSERT_VOP_UNLOCKED(dvp, "VOP_LOOKUP (dvp)");
405 
406 	if (flags & PDIRUNLOCK)
407 		ASSERT_VOP_UNLOCKED(dvp, "VOP_LOOKUP (PDIRUNLOCK)");
408 }
409 
410 void
411 vop_unlock_pre(void *ap)
412 {
413 	struct vop_unlock_args *a = ap;
414 
415 	if (a->a_flags & LK_INTERLOCK)
416 		ASSERT_VI_LOCKED(a->a_vp, "VOP_UNLOCK");
417 
418 	ASSERT_VOP_LOCKED(a->a_vp, "VOP_UNLOCK");
419 }
420 
421 void
422 vop_unlock_post(void *ap, int rc)
423 {
424 	struct vop_unlock_args *a = ap;
425 
426 	if (a->a_flags & LK_INTERLOCK)
427 		ASSERT_VI_UNLOCKED(a->a_vp, "VOP_UNLOCK");
428 }
429 
430 void
431 vop_lock_pre(void *ap)
432 {
433 	struct vop_lock_args *a = ap;
434 
435 	if ((a->a_flags & LK_INTERLOCK) == 0)
436 		ASSERT_VI_UNLOCKED(a->a_vp, "VOP_LOCK");
437 	else
438 		ASSERT_VI_LOCKED(a->a_vp, "VOP_LOCK");
439 }
440 
441 void
442 vop_lock_post(void *ap, int rc)
443 {
444 	struct vop_lock_args *a;
445 
446 	a = ap;
447 
448 	ASSERT_VI_UNLOCKED(a->a_vp, "VOP_LOCK");
449 	ASSERT_VOP_LOCKED(a->a_vp, "VOP_LOCK");
450 }
451 
452 void
453 v_addpollinfo(struct vnode *vp)
454 {
455 	vp->v_pollinfo = uma_zalloc(vnodepoll_zone, M_WAITOK);
456 	mtx_init(&vp->v_pollinfo->vpi_lock, "vnode pollinfo", NULL, MTX_DEF);
457 }
458 
459 /*
460  * Initialize the vnode management data structures.
461  */
462 static void
463 vntblinit(void *dummy __unused)
464 {
465 
466 	desiredvnodes = maxproc + cnt.v_page_count / 4;
467 	minvnodes = desiredvnodes / 4;
468 	mtx_init(&mountlist_mtx, "mountlist", NULL, MTX_DEF);
469 	mtx_init(&mntvnode_mtx, "mntvnode", NULL, MTX_DEF);
470 	mtx_init(&mntid_mtx, "mntid", NULL, MTX_DEF);
471 	mtx_init(&spechash_mtx, "spechash", NULL, MTX_DEF);
472 	TAILQ_INIT(&vnode_free_list);
473 	mtx_init(&vnode_free_list_mtx, "vnode_free_list", NULL, MTX_DEF);
474 	vnode_zone = uma_zcreate("VNODE", sizeof (struct vnode), NULL, NULL,
475 	    NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
476 	vnodepoll_zone = uma_zcreate("VNODEPOLL", sizeof (struct vpollinfo),
477 	      NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
478 	/*
479 	 * Initialize the filesystem syncer.
480 	 */
481 	syncer_workitem_pending = hashinit(syncer_maxdelay, M_VNODE,
482 		&syncer_mask);
483 	syncer_maxdelay = syncer_mask + 1;
484 	mtx_init(&sync_mtx, "Syncer mtx", NULL, MTX_DEF);
485 }
486 SYSINIT(vfs, SI_SUB_VFS, SI_ORDER_FIRST, vntblinit, NULL)
487 
488 
489 /*
490  * Mark a mount point as busy. Used to synchronize access and to delay
491  * unmounting. Interlock is not released on failure.
492  */
493 int
494 vfs_busy(mp, flags, interlkp, td)
495 	struct mount *mp;
496 	int flags;
497 	struct mtx *interlkp;
498 	struct thread *td;
499 {
500 	int lkflags;
501 
502 	if (mp->mnt_kern_flag & MNTK_UNMOUNT) {
503 		if (flags & LK_NOWAIT)
504 			return (ENOENT);
505 		mp->mnt_kern_flag |= MNTK_MWAIT;
506 		/*
507 		 * Since all busy locks are shared except the exclusive
508 		 * lock granted when unmounting, the only place that a
509 		 * wakeup needs to be done is at the release of the
510 		 * exclusive lock at the end of dounmount.
511 		 */
512 		msleep(mp, interlkp, PVFS, "vfs_busy", 0);
513 		return (ENOENT);
514 	}
515 	lkflags = LK_SHARED | LK_NOPAUSE;
516 	if (interlkp)
517 		lkflags |= LK_INTERLOCK;
518 	if (lockmgr(&mp->mnt_lock, lkflags, interlkp, td))
519 		panic("vfs_busy: unexpected lock failure");
520 	return (0);
521 }
522 
523 /*
524  * Free a busy filesystem.
525  */
526 void
527 vfs_unbusy(mp, td)
528 	struct mount *mp;
529 	struct thread *td;
530 {
531 
532 	lockmgr(&mp->mnt_lock, LK_RELEASE, NULL, td);
533 }
534 
535 /*
536  * Lookup a mount point by filesystem identifier.
537  */
538 struct mount *
539 vfs_getvfs(fsid)
540 	fsid_t *fsid;
541 {
542 	register struct mount *mp;
543 
544 	mtx_lock(&mountlist_mtx);
545 	TAILQ_FOREACH(mp, &mountlist, mnt_list) {
546 		if (mp->mnt_stat.f_fsid.val[0] == fsid->val[0] &&
547 		    mp->mnt_stat.f_fsid.val[1] == fsid->val[1]) {
548 			mtx_unlock(&mountlist_mtx);
549 			return (mp);
550 		}
551 	}
552 	mtx_unlock(&mountlist_mtx);
553 	return ((struct mount *) 0);
554 }
555 
556 /*
557  * Get a new unique fsid.  Try to make its val[0] unique, since this value
558  * will be used to create fake device numbers for stat().  Also try (but
559  * not so hard) make its val[0] unique mod 2^16, since some emulators only
560  * support 16-bit device numbers.  We end up with unique val[0]'s for the
561  * first 2^16 calls and unique val[0]'s mod 2^16 for the first 2^8 calls.
562  *
563  * Keep in mind that several mounts may be running in parallel.  Starting
564  * the search one past where the previous search terminated is both a
565  * micro-optimization and a defense against returning the same fsid to
566  * different mounts.
567  */
568 void
569 vfs_getnewfsid(mp)
570 	struct mount *mp;
571 {
572 	static u_int16_t mntid_base;
573 	fsid_t tfsid;
574 	int mtype;
575 
576 	mtx_lock(&mntid_mtx);
577 	mtype = mp->mnt_vfc->vfc_typenum;
578 	tfsid.val[1] = mtype;
579 	mtype = (mtype & 0xFF) << 24;
580 	for (;;) {
581 		tfsid.val[0] = makeudev(255,
582 		    mtype | ((mntid_base & 0xFF00) << 8) | (mntid_base & 0xFF));
583 		mntid_base++;
584 		if (vfs_getvfs(&tfsid) == NULL)
585 			break;
586 	}
587 	mp->mnt_stat.f_fsid.val[0] = tfsid.val[0];
588 	mp->mnt_stat.f_fsid.val[1] = tfsid.val[1];
589 	mtx_unlock(&mntid_mtx);
590 }
591 
592 /*
593  * Knob to control the precision of file timestamps:
594  *
595  *   0 = seconds only; nanoseconds zeroed.
596  *   1 = seconds and nanoseconds, accurate within 1/HZ.
597  *   2 = seconds and nanoseconds, truncated to microseconds.
598  * >=3 = seconds and nanoseconds, maximum precision.
599  */
600 enum { TSP_SEC, TSP_HZ, TSP_USEC, TSP_NSEC };
601 
602 static int timestamp_precision = TSP_SEC;
603 SYSCTL_INT(_vfs, OID_AUTO, timestamp_precision, CTLFLAG_RW,
604     &timestamp_precision, 0, "");
605 
606 /*
607  * Get a current timestamp.
608  */
609 void
610 vfs_timestamp(tsp)
611 	struct timespec *tsp;
612 {
613 	struct timeval tv;
614 
615 	switch (timestamp_precision) {
616 	case TSP_SEC:
617 		tsp->tv_sec = time_second;
618 		tsp->tv_nsec = 0;
619 		break;
620 	case TSP_HZ:
621 		getnanotime(tsp);
622 		break;
623 	case TSP_USEC:
624 		microtime(&tv);
625 		TIMEVAL_TO_TIMESPEC(&tv, tsp);
626 		break;
627 	case TSP_NSEC:
628 	default:
629 		nanotime(tsp);
630 		break;
631 	}
632 }
633 
634 /*
635  * Set vnode attributes to VNOVAL
636  */
637 void
638 vattr_null(vap)
639 	register struct vattr *vap;
640 {
641 
642 	vap->va_type = VNON;
643 	vap->va_size = VNOVAL;
644 	vap->va_bytes = VNOVAL;
645 	vap->va_mode = VNOVAL;
646 	vap->va_nlink = VNOVAL;
647 	vap->va_uid = VNOVAL;
648 	vap->va_gid = VNOVAL;
649 	vap->va_fsid = VNOVAL;
650 	vap->va_fileid = VNOVAL;
651 	vap->va_blocksize = VNOVAL;
652 	vap->va_rdev = VNOVAL;
653 	vap->va_atime.tv_sec = VNOVAL;
654 	vap->va_atime.tv_nsec = VNOVAL;
655 	vap->va_mtime.tv_sec = VNOVAL;
656 	vap->va_mtime.tv_nsec = VNOVAL;
657 	vap->va_ctime.tv_sec = VNOVAL;
658 	vap->va_ctime.tv_nsec = VNOVAL;
659 	vap->va_birthtime.tv_sec = VNOVAL;
660 	vap->va_birthtime.tv_nsec = VNOVAL;
661 	vap->va_flags = VNOVAL;
662 	vap->va_gen = VNOVAL;
663 	vap->va_vaflags = 0;
664 }
665 
666 /*
667  * This routine is called when we have too many vnodes.  It attempts
668  * to free <count> vnodes and will potentially free vnodes that still
669  * have VM backing store (VM backing store is typically the cause
670  * of a vnode blowout so we want to do this).  Therefore, this operation
671  * is not considered cheap.
672  *
673  * A number of conditions may prevent a vnode from being reclaimed.
674  * the buffer cache may have references on the vnode, a directory
675  * vnode may still have references due to the namei cache representing
676  * underlying files, or the vnode may be in active use.   It is not
677  * desireable to reuse such vnodes.  These conditions may cause the
678  * number of vnodes to reach some minimum value regardless of what
679  * you set kern.maxvnodes to.  Do not set kern.maxvnodes too low.
680  */
681 static int
682 vlrureclaim(struct mount *mp, int count)
683 {
684 	struct vnode *vp;
685 	int done;
686 	int trigger;
687 	int usevnodes;
688 
689 	/*
690 	 * Calculate the trigger point, don't allow user
691 	 * screwups to blow us up.   This prevents us from
692 	 * recycling vnodes with lots of resident pages.  We
693 	 * aren't trying to free memory, we are trying to
694 	 * free vnodes.
695 	 */
696 	usevnodes = desiredvnodes;
697 	if (usevnodes <= 0)
698 		usevnodes = 1;
699 	trigger = cnt.v_page_count * 2 / usevnodes;
700 
701 	done = 0;
702 	mtx_lock(&mntvnode_mtx);
703 	while (count && (vp = TAILQ_FIRST(&mp->mnt_nvnodelist)) != NULL) {
704 		TAILQ_REMOVE(&mp->mnt_nvnodelist, vp, v_nmntvnodes);
705 		TAILQ_INSERT_TAIL(&mp->mnt_nvnodelist, vp, v_nmntvnodes);
706 
707 		if (vp->v_type != VNON &&
708 		    vp->v_type != VBAD &&
709 		    VI_TRYLOCK(vp)) {
710 			if (VMIGHTFREE(vp) &&           /* critical path opt */
711 			    (vp->v_object == NULL ||
712 			    vp->v_object->resident_page_count < trigger)) {
713 				mtx_unlock(&mntvnode_mtx);
714 				vgonel(vp, curthread);
715 				done++;
716 				mtx_lock(&mntvnode_mtx);
717 			} else
718 				VI_UNLOCK(vp);
719 		}
720 		--count;
721 	}
722 	mtx_unlock(&mntvnode_mtx);
723 	return done;
724 }
725 
726 /*
727  * Attempt to recycle vnodes in a context that is always safe to block.
728  * Calling vlrurecycle() from the bowels of filesystem code has some
729  * interesting deadlock problems.
730  */
731 static struct proc *vnlruproc;
732 static int vnlruproc_sig;
733 
734 static void
735 vnlru_proc(void)
736 {
737 	struct mount *mp, *nmp;
738 	int s;
739 	int done, take;
740 	struct proc *p = vnlruproc;
741 	struct thread *td = FIRST_THREAD_IN_PROC(p);	/* XXXKSE */
742 
743 	mtx_lock(&Giant);
744 
745 	EVENTHANDLER_REGISTER(shutdown_pre_sync, kproc_shutdown, p,
746 	    SHUTDOWN_PRI_FIRST);
747 
748 	s = splbio();
749 	for (;;) {
750 		kthread_suspend_check(p);
751 		mtx_lock(&vnode_free_list_mtx);
752 		if (numvnodes - freevnodes <= desiredvnodes * 9 / 10) {
753 			mtx_unlock(&vnode_free_list_mtx);
754 			vnlruproc_sig = 0;
755 			wakeup(&vnlruproc_sig);
756 			tsleep(vnlruproc, PVFS, "vlruwt", hz);
757 			continue;
758 		}
759 		mtx_unlock(&vnode_free_list_mtx);
760 		done = 0;
761 		mtx_lock(&mountlist_mtx);
762 		take = 0;
763 		TAILQ_FOREACH(mp, &mountlist, mnt_list)
764 			take++;
765 		take = desiredvnodes / (take * 10);
766 		for (mp = TAILQ_FIRST(&mountlist); mp != NULL; mp = nmp) {
767 			if (vfs_busy(mp, LK_NOWAIT, &mountlist_mtx, td)) {
768 				nmp = TAILQ_NEXT(mp, mnt_list);
769 				continue;
770 			}
771 			done += vlrureclaim(mp, take);
772 			mtx_lock(&mountlist_mtx);
773 			nmp = TAILQ_NEXT(mp, mnt_list);
774 			vfs_unbusy(mp, td);
775 		}
776 		mtx_unlock(&mountlist_mtx);
777 		if (done == 0) {
778 #if 0
779 			/* These messages are temporary debugging aids */
780 			if (vnlru_nowhere < 5)
781 				printf("vnlru process getting nowhere..\n");
782 			else if (vnlru_nowhere == 5)
783 				printf("vnlru process messages stopped.\n");
784 #endif
785 			vnlru_nowhere++;
786 			tsleep(vnlruproc, PPAUSE, "vlrup", hz * 3);
787 		}
788 	}
789 	splx(s);
790 }
791 
792 static struct kproc_desc vnlru_kp = {
793 	"vnlru",
794 	vnlru_proc,
795 	&vnlruproc
796 };
797 SYSINIT(vnlru, SI_SUB_KTHREAD_UPDATE, SI_ORDER_FIRST, kproc_start, &vnlru_kp)
798 
799 
800 /*
801  * Routines having to do with the management of the vnode table.
802  */
803 
804 /*
805  * Check to see if a free vnode can be recycled. If it can,
806  * return it locked with the vn lock, but not interlock. Also
807  * get the vn_start_write lock. Otherwise indicate the error.
808  */
809 static int
810 vcanrecycle(struct vnode *vp, struct mount **vnmpp)
811 {
812 	struct thread *td = curthread;
813 	vm_object_t object;
814 	int error;
815 
816 	/* Don't recycle if we can't get the interlock */
817 	if (!VI_TRYLOCK(vp))
818 		return (EWOULDBLOCK);
819 
820 	/* We should be able to immediately acquire this */
821 	/* XXX This looks like it should panic if it fails */
822 	if (vn_lock(vp, LK_INTERLOCK | LK_EXCLUSIVE, td) != 0) {
823 		if (VOP_ISLOCKED(vp, td))
824 			panic("vcanrecycle: locked vnode");
825 		return (EWOULDBLOCK);
826 	}
827 
828 	/*
829 	 * Don't recycle if its filesystem is being suspended.
830 	 */
831 	if (vn_start_write(vp, vnmpp, V_NOWAIT) != 0) {
832 		error = EBUSY;
833 		goto done;
834 	}
835 
836 	/*
837 	 * Don't recycle if we still have cached pages.
838 	 */
839 	if (VOP_GETVOBJECT(vp, &object) == 0 &&
840 	     (object->resident_page_count ||
841 	      object->ref_count)) {
842 		error = EBUSY;
843 		goto done;
844 	}
845 	if (LIST_FIRST(&vp->v_cache_src)) {
846 		/*
847 		 * note: nameileafonly sysctl is temporary,
848 		 * for debugging only, and will eventually be
849 		 * removed.
850 		 */
851 		if (nameileafonly > 0) {
852 			/*
853 			 * Do not reuse namei-cached directory
854 			 * vnodes that have cached
855 			 * subdirectories.
856 			 */
857 			if (cache_leaf_test(vp) < 0) {
858 				error = EISDIR;
859 				goto done;
860 			}
861 		} else if (nameileafonly < 0 ||
862 			    vmiodirenable == 0) {
863 			/*
864 			 * Do not reuse namei-cached directory
865 			 * vnodes if nameileafonly is -1 or
866 			 * if VMIO backing for directories is
867 			 * turned off (otherwise we reuse them
868 			 * too quickly).
869 			 */
870 			error = EBUSY;
871 			goto done;
872 		}
873 	}
874 	return (0);
875 done:
876 	VOP_UNLOCK(vp, 0, td);
877 	return (error);
878 }
879 
880 /*
881  * Return the next vnode from the free list.
882  */
883 int
884 getnewvnode(tag, mp, vops, vpp)
885 	const char *tag;
886 	struct mount *mp;
887 	vop_t **vops;
888 	struct vnode **vpp;
889 {
890 	int s;
891 	struct thread *td = curthread;	/* XXX */
892 	struct vnode *vp = NULL;
893 	struct vpollinfo *pollinfo = NULL;
894 	struct mount *vnmp;
895 
896 	s = splbio();
897 	mtx_lock(&vnode_free_list_mtx);
898 
899 	/*
900 	 * Try to reuse vnodes if we hit the max.  This situation only
901 	 * occurs in certain large-memory (2G+) situations.  We cannot
902 	 * attempt to directly reclaim vnodes due to nasty recursion
903 	 * problems.
904 	 */
905 	while (numvnodes - freevnodes > desiredvnodes) {
906 		if (vnlruproc_sig == 0) {
907 			vnlruproc_sig = 1;      /* avoid unnecessary wakeups */
908 			wakeup(vnlruproc);
909 		}
910 		mtx_unlock(&vnode_free_list_mtx);
911 		tsleep(&vnlruproc_sig, PVFS, "vlruwk", hz);
912 		mtx_lock(&vnode_free_list_mtx);
913 	}
914 
915 	/*
916 	 * Attempt to reuse a vnode already on the free list, allocating
917 	 * a new vnode if we can't find one or if we have not reached a
918 	 * good minimum for good LRU performance.
919 	 */
920 
921 	if (freevnodes >= wantfreevnodes && numvnodes >= minvnodes) {
922 		int error;
923 		int count;
924 
925 		for (count = 0; count < freevnodes; count++) {
926 			vp = TAILQ_FIRST(&vnode_free_list);
927 
928 			KASSERT(vp->v_usecount == 0 &&
929 			    (vp->v_iflag & VI_DOINGINACT) == 0,
930 			    ("getnewvnode: free vnode isn't"));
931 
932 			TAILQ_REMOVE(&vnode_free_list, vp, v_freelist);
933 			/*
934 			 * We have to drop the free list mtx to avoid lock
935 			 * order reversals with interlock.
936 			 */
937 			mtx_unlock(&vnode_free_list_mtx);
938 			error = vcanrecycle(vp, &vnmp);
939 			mtx_lock(&vnode_free_list_mtx);
940 			if (error == 0)
941 				break;
942 			TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist);
943 			vp = NULL;
944 		}
945 	}
946 	if (vp) {
947 		freevnodes--;
948 		mtx_unlock(&vnode_free_list_mtx);
949 
950 		cache_purge(vp);
951 		VI_LOCK(vp);
952 		vp->v_iflag |= VI_DOOMED;
953 		vp->v_iflag &= ~VI_FREE;
954 		if (vp->v_type != VBAD) {
955 			VOP_UNLOCK(vp, 0, td);
956 			vgonel(vp, td);
957 			VI_LOCK(vp);
958 		} else {
959 			VOP_UNLOCK(vp, 0, td);
960 		}
961 		vn_finished_write(vnmp);
962 
963 #ifdef INVARIANTS
964 		{
965 			if (vp->v_data)
966 				panic("cleaned vnode isn't");
967 			if (vp->v_numoutput)
968 				panic("Clean vnode has pending I/O's");
969 			if (vp->v_writecount != 0)
970 				panic("Non-zero write count");
971 		}
972 #endif
973 		if ((pollinfo = vp->v_pollinfo) != NULL) {
974 			/*
975 			 * To avoid lock order reversals, the call to
976 			 * uma_zfree() must be delayed until the vnode
977 			 * interlock is released.
978 			 */
979 			vp->v_pollinfo = NULL;
980 		}
981 #ifdef MAC
982 		mac_destroy_vnode(vp);
983 #endif
984 		vp->v_iflag = 0;
985 		vp->v_vflag = 0;
986 		vp->v_lastw = 0;
987 		vp->v_lasta = 0;
988 		vp->v_cstart = 0;
989 		vp->v_clen = 0;
990 		vp->v_socket = 0;
991 		lockdestroy(vp->v_vnlock);
992 		lockinit(vp->v_vnlock, PVFS, tag, VLKTIMEOUT, LK_NOPAUSE);
993 		KASSERT(vp->v_cleanbufcnt == 0, ("cleanbufcnt not 0"));
994 		KASSERT(vp->v_cleanblkroot == NULL, ("cleanblkroot not NULL"));
995 		KASSERT(vp->v_dirtybufcnt == 0, ("dirtybufcnt not 0"));
996 		KASSERT(vp->v_dirtyblkroot == NULL, ("dirtyblkroot not NULL"));
997 	} else {
998 		numvnodes++;
999 		mtx_unlock(&vnode_free_list_mtx);
1000 
1001 		vp = (struct vnode *) uma_zalloc(vnode_zone, M_WAITOK|M_ZERO);
1002 		mtx_init(&vp->v_interlock, "vnode interlock", NULL, MTX_DEF);
1003 		VI_LOCK(vp);
1004 		vp->v_dd = vp;
1005 		vp->v_vnlock = &vp->v_lock;
1006 		lockinit(vp->v_vnlock, PVFS, tag, VLKTIMEOUT, LK_NOPAUSE);
1007 		cache_purge(vp);
1008 		LIST_INIT(&vp->v_cache_src);
1009 		TAILQ_INIT(&vp->v_cache_dst);
1010 	}
1011 
1012 	TAILQ_INIT(&vp->v_cleanblkhd);
1013 	TAILQ_INIT(&vp->v_dirtyblkhd);
1014 	vp->v_type = VNON;
1015 	vp->v_tag = tag;
1016 	vp->v_op = vops;
1017 	*vpp = vp;
1018 	vp->v_usecount = 1;
1019 	vp->v_data = 0;
1020 	vp->v_cachedid = -1;
1021 	VI_UNLOCK(vp);
1022 	if (pollinfo != NULL) {
1023 		mtx_destroy(&pollinfo->vpi_lock);
1024 		uma_zfree(vnodepoll_zone, pollinfo);
1025 	}
1026 #ifdef MAC
1027 	mac_init_vnode(vp);
1028 	if (mp != NULL && (mp->mnt_flag & MNT_MULTILABEL) == 0)
1029 		mac_associate_vnode_singlelabel(mp, vp);
1030 #endif
1031 	insmntque(vp, mp);
1032 
1033 	return (0);
1034 }
1035 
1036 /*
1037  * Move a vnode from one mount queue to another.
1038  */
1039 static void
1040 insmntque(vp, mp)
1041 	register struct vnode *vp;
1042 	register struct mount *mp;
1043 {
1044 
1045 	mtx_lock(&mntvnode_mtx);
1046 	/*
1047 	 * Delete from old mount point vnode list, if on one.
1048 	 */
1049 	if (vp->v_mount != NULL)
1050 		TAILQ_REMOVE(&vp->v_mount->mnt_nvnodelist, vp, v_nmntvnodes);
1051 	/*
1052 	 * Insert into list of vnodes for the new mount point, if available.
1053 	 */
1054 	if ((vp->v_mount = mp) == NULL) {
1055 		mtx_unlock(&mntvnode_mtx);
1056 		return;
1057 	}
1058 	TAILQ_INSERT_TAIL(&mp->mnt_nvnodelist, vp, v_nmntvnodes);
1059 	mtx_unlock(&mntvnode_mtx);
1060 }
1061 
1062 /*
1063  * Update outstanding I/O count and do wakeup if requested.
1064  */
1065 void
1066 vwakeup(bp)
1067 	register struct buf *bp;
1068 {
1069 	register struct vnode *vp;
1070 
1071 	bp->b_flags &= ~B_WRITEINPROG;
1072 	if ((vp = bp->b_vp)) {
1073 		VI_LOCK(vp);
1074 		vp->v_numoutput--;
1075 		if (vp->v_numoutput < 0)
1076 			panic("vwakeup: neg numoutput");
1077 		if ((vp->v_numoutput == 0) && (vp->v_iflag & VI_BWAIT)) {
1078 			vp->v_iflag &= ~VI_BWAIT;
1079 			wakeup(&vp->v_numoutput);
1080 		}
1081 		VI_UNLOCK(vp);
1082 	}
1083 }
1084 
1085 /*
1086  * Flush out and invalidate all buffers associated with a vnode.
1087  * Called with the underlying object locked.
1088  */
1089 int
1090 vinvalbuf(vp, flags, cred, td, slpflag, slptimeo)
1091 	struct vnode *vp;
1092 	int flags;
1093 	struct ucred *cred;
1094 	struct thread *td;
1095 	int slpflag, slptimeo;
1096 {
1097 	struct buf *blist;
1098 	int s, error;
1099 	vm_object_t object;
1100 
1101 	GIANT_REQUIRED;
1102 
1103 	ASSERT_VOP_LOCKED(vp, "vinvalbuf");
1104 
1105 	VI_LOCK(vp);
1106 	if (flags & V_SAVE) {
1107 		s = splbio();
1108 		while (vp->v_numoutput) {
1109 			vp->v_iflag |= VI_BWAIT;
1110 			error = msleep(&vp->v_numoutput, VI_MTX(vp),
1111 			    slpflag | (PRIBIO + 1), "vinvlbuf", slptimeo);
1112 			if (error) {
1113 				VI_UNLOCK(vp);
1114 				splx(s);
1115 				return (error);
1116 			}
1117 		}
1118 		if (!TAILQ_EMPTY(&vp->v_dirtyblkhd)) {
1119 			splx(s);
1120 			VI_UNLOCK(vp);
1121 			if ((error = VOP_FSYNC(vp, cred, MNT_WAIT, td)) != 0)
1122 				return (error);
1123 			/*
1124 			 * XXX We could save a lock/unlock if this was only
1125 			 * enabled under INVARIANTS
1126 			 */
1127 			VI_LOCK(vp);
1128 			s = splbio();
1129 			if (vp->v_numoutput > 0 ||
1130 			    !TAILQ_EMPTY(&vp->v_dirtyblkhd))
1131 				panic("vinvalbuf: dirty bufs");
1132 		}
1133 		splx(s);
1134 	}
1135 	s = splbio();
1136 	/*
1137 	 * If you alter this loop please notice that interlock is dropped and
1138 	 * reacquired in flushbuflist.  Special care is needed to ensure that
1139 	 * no race conditions occur from this.
1140 	 */
1141 	for (error = 0;;) {
1142 		if ((blist = TAILQ_FIRST(&vp->v_cleanblkhd)) != 0 &&
1143 		    flushbuflist(blist, flags, vp, slpflag, slptimeo, &error)) {
1144 			if (error)
1145 				break;
1146 			continue;
1147 		}
1148 		if ((blist = TAILQ_FIRST(&vp->v_dirtyblkhd)) != 0 &&
1149 		    flushbuflist(blist, flags, vp, slpflag, slptimeo, &error)) {
1150 			if (error)
1151 				break;
1152 			continue;
1153 		}
1154 		break;
1155 	}
1156 	if (error) {
1157 		splx(s);
1158 		VI_UNLOCK(vp);
1159 		return (error);
1160 	}
1161 
1162 	/*
1163 	 * Wait for I/O to complete.  XXX needs cleaning up.  The vnode can
1164 	 * have write I/O in-progress but if there is a VM object then the
1165 	 * VM object can also have read-I/O in-progress.
1166 	 */
1167 	do {
1168 		while (vp->v_numoutput > 0) {
1169 			vp->v_iflag |= VI_BWAIT;
1170 			msleep(&vp->v_numoutput, VI_MTX(vp), PVM, "vnvlbv", 0);
1171 		}
1172 		VI_UNLOCK(vp);
1173 		if (VOP_GETVOBJECT(vp, &object) == 0) {
1174 			while (object->paging_in_progress)
1175 			vm_object_pip_sleep(object, "vnvlbx");
1176 		}
1177 		VI_LOCK(vp);
1178 	} while (vp->v_numoutput > 0);
1179 	VI_UNLOCK(vp);
1180 
1181 	splx(s);
1182 
1183 	/*
1184 	 * Destroy the copy in the VM cache, too.
1185 	 */
1186 	if (VOP_GETVOBJECT(vp, &object) == 0) {
1187 		vm_object_lock(object);
1188 		vm_object_page_remove(object, 0, 0,
1189 			(flags & V_SAVE) ? TRUE : FALSE);
1190 		vm_object_unlock(object);
1191 	}
1192 
1193 #ifdef INVARIANTS
1194 	VI_LOCK(vp);
1195 	if ((flags & (V_ALT | V_NORMAL)) == 0 &&
1196 	    (!TAILQ_EMPTY(&vp->v_dirtyblkhd) ||
1197 	     !TAILQ_EMPTY(&vp->v_cleanblkhd)))
1198 		panic("vinvalbuf: flush failed");
1199 	VI_UNLOCK(vp);
1200 #endif
1201 	return (0);
1202 }
1203 
1204 /*
1205  * Flush out buffers on the specified list.
1206  *
1207  */
1208 static int
1209 flushbuflist(blist, flags, vp, slpflag, slptimeo, errorp)
1210 	struct buf *blist;
1211 	int flags;
1212 	struct vnode *vp;
1213 	int slpflag, slptimeo;
1214 	int *errorp;
1215 {
1216 	struct buf *bp, *nbp;
1217 	int found, error;
1218 
1219 	ASSERT_VI_LOCKED(vp, "flushbuflist");
1220 
1221 	for (found = 0, bp = blist; bp; bp = nbp) {
1222 		nbp = TAILQ_NEXT(bp, b_vnbufs);
1223 		if (((flags & V_NORMAL) && (bp->b_xflags & BX_ALTDATA)) ||
1224 		    ((flags & V_ALT) && (bp->b_xflags & BX_ALTDATA) == 0)) {
1225 			continue;
1226 		}
1227 		found += 1;
1228 		error = BUF_TIMELOCK(bp,
1229 		    LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK, VI_MTX(vp),
1230 		    "flushbuf", slpflag, slptimeo);
1231 		if (error) {
1232 			if (error != ENOLCK)
1233 				*errorp = error;
1234 			goto done;
1235 		}
1236 		/*
1237 		 * XXX Since there are no node locks for NFS, I
1238 		 * believe there is a slight chance that a delayed
1239 		 * write will occur while sleeping just above, so
1240 		 * check for it.  Note that vfs_bio_awrite expects
1241 		 * buffers to reside on a queue, while BUF_WRITE and
1242 		 * brelse do not.
1243 		 */
1244 		if (((bp->b_flags & (B_DELWRI | B_INVAL)) == B_DELWRI) &&
1245 			(flags & V_SAVE)) {
1246 
1247 			if (bp->b_vp == vp) {
1248 				if (bp->b_flags & B_CLUSTEROK) {
1249 					BUF_UNLOCK(bp);
1250 					vfs_bio_awrite(bp);
1251 				} else {
1252 					bremfree(bp);
1253 					bp->b_flags |= B_ASYNC;
1254 					BUF_WRITE(bp);
1255 				}
1256 			} else {
1257 				bremfree(bp);
1258 				(void) BUF_WRITE(bp);
1259 			}
1260 			goto done;
1261 		}
1262 		bremfree(bp);
1263 		bp->b_flags |= (B_INVAL | B_NOCACHE | B_RELBUF);
1264 		bp->b_flags &= ~B_ASYNC;
1265 		brelse(bp);
1266 		VI_LOCK(vp);
1267 	}
1268 	return (found);
1269 done:
1270 	VI_LOCK(vp);
1271 	return (found);
1272 }
1273 
1274 /*
1275  * Truncate a file's buffer and pages to a specified length.  This
1276  * is in lieu of the old vinvalbuf mechanism, which performed unneeded
1277  * sync activity.
1278  */
1279 int
1280 vtruncbuf(vp, cred, td, length, blksize)
1281 	register struct vnode *vp;
1282 	struct ucred *cred;
1283 	struct thread *td;
1284 	off_t length;
1285 	int blksize;
1286 {
1287 	register struct buf *bp;
1288 	struct buf *nbp;
1289 	int s, anyfreed;
1290 	int trunclbn;
1291 
1292 	/*
1293 	 * Round up to the *next* lbn.
1294 	 */
1295 	trunclbn = (length + blksize - 1) / blksize;
1296 
1297 	s = splbio();
1298 	ASSERT_VOP_LOCKED(vp, "vtruncbuf");
1299 restart:
1300 	VI_LOCK(vp);
1301 	anyfreed = 1;
1302 	for (;anyfreed;) {
1303 		anyfreed = 0;
1304 		for (bp = TAILQ_FIRST(&vp->v_cleanblkhd); bp; bp = nbp) {
1305 			nbp = TAILQ_NEXT(bp, b_vnbufs);
1306 			if (bp->b_lblkno >= trunclbn) {
1307 				if (BUF_LOCK(bp,
1308 				    LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK,
1309 				    VI_MTX(vp)) == ENOLCK)
1310 					goto restart;
1311 
1312 				bremfree(bp);
1313 				bp->b_flags |= (B_INVAL | B_RELBUF);
1314 				bp->b_flags &= ~B_ASYNC;
1315 				brelse(bp);
1316 				anyfreed = 1;
1317 
1318 				if (nbp &&
1319 				    (((nbp->b_xflags & BX_VNCLEAN) == 0) ||
1320 				    (nbp->b_vp != vp) ||
1321 				    (nbp->b_flags & B_DELWRI))) {
1322 					goto restart;
1323 				}
1324 				VI_LOCK(vp);
1325 			}
1326 		}
1327 
1328 		for (bp = TAILQ_FIRST(&vp->v_dirtyblkhd); bp; bp = nbp) {
1329 			nbp = TAILQ_NEXT(bp, b_vnbufs);
1330 			if (bp->b_lblkno >= trunclbn) {
1331 				if (BUF_LOCK(bp,
1332 				    LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK,
1333 				    VI_MTX(vp)) == ENOLCK)
1334 					goto restart;
1335 				bremfree(bp);
1336 				bp->b_flags |= (B_INVAL | B_RELBUF);
1337 				bp->b_flags &= ~B_ASYNC;
1338 				brelse(bp);
1339 				anyfreed = 1;
1340 				if (nbp &&
1341 				    (((nbp->b_xflags & BX_VNDIRTY) == 0) ||
1342 				    (nbp->b_vp != vp) ||
1343 				    (nbp->b_flags & B_DELWRI) == 0)) {
1344 					goto restart;
1345 				}
1346 				VI_LOCK(vp);
1347 			}
1348 		}
1349 	}
1350 
1351 	if (length > 0) {
1352 restartsync:
1353 		for (bp = TAILQ_FIRST(&vp->v_dirtyblkhd); bp; bp = nbp) {
1354 			nbp = TAILQ_NEXT(bp, b_vnbufs);
1355 			if ((bp->b_flags & B_DELWRI) && (bp->b_lblkno < 0)) {
1356 				if (BUF_LOCK(bp,
1357 				    LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK,
1358 				    VI_MTX(vp)) == ENOLCK)
1359 					goto restart;
1360 				bremfree(bp);
1361 				if (bp->b_vp == vp)
1362 					bp->b_flags |= B_ASYNC;
1363 				else
1364 					bp->b_flags &= ~B_ASYNC;
1365 
1366 				BUF_WRITE(bp);
1367 				VI_LOCK(vp);
1368 				goto restartsync;
1369 			}
1370 		}
1371 	}
1372 
1373 	while (vp->v_numoutput > 0) {
1374 		vp->v_iflag |= VI_BWAIT;
1375 		msleep(&vp->v_numoutput, VI_MTX(vp), PVM, "vbtrunc", 0);
1376 	}
1377 	VI_UNLOCK(vp);
1378 	splx(s);
1379 
1380 	vnode_pager_setsize(vp, length);
1381 
1382 	return (0);
1383 }
1384 
1385 /*
1386  * buf_splay() - splay tree core for the clean/dirty list of buffers in
1387  * 		 a vnode.
1388  *
1389  *	NOTE: We have to deal with the special case of a background bitmap
1390  *	buffer, a situation where two buffers will have the same logical
1391  *	block offset.  We want (1) only the foreground buffer to be accessed
1392  *	in a lookup and (2) must differentiate between the foreground and
1393  *	background buffer in the splay tree algorithm because the splay
1394  *	tree cannot normally handle multiple entities with the same 'index'.
1395  *	We accomplish this by adding differentiating flags to the splay tree's
1396  *	numerical domain.
1397  */
1398 static
1399 struct buf *
1400 buf_splay(daddr_t lblkno, b_xflags_t xflags, struct buf *root)
1401 {
1402 	struct buf dummy;
1403 	struct buf *lefttreemax, *righttreemin, *y;
1404 
1405 	if (root == NULL)
1406 		return (NULL);
1407 	lefttreemax = righttreemin = &dummy;
1408 	for (;;) {
1409 		if (lblkno < root->b_lblkno ||
1410 		    (lblkno == root->b_lblkno &&
1411 		    (xflags & BX_BKGRDMARKER) < (root->b_xflags & BX_BKGRDMARKER))) {
1412 			if ((y = root->b_left) == NULL)
1413 				break;
1414 			if (lblkno < y->b_lblkno) {
1415 				/* Rotate right. */
1416 				root->b_left = y->b_right;
1417 				y->b_right = root;
1418 				root = y;
1419 				if ((y = root->b_left) == NULL)
1420 					break;
1421 			}
1422 			/* Link into the new root's right tree. */
1423 			righttreemin->b_left = root;
1424 			righttreemin = root;
1425 		} else if (lblkno > root->b_lblkno ||
1426 		    (lblkno == root->b_lblkno &&
1427 		    (xflags & BX_BKGRDMARKER) > (root->b_xflags & BX_BKGRDMARKER))) {
1428 			if ((y = root->b_right) == NULL)
1429 				break;
1430 			if (lblkno > y->b_lblkno) {
1431 				/* Rotate left. */
1432 				root->b_right = y->b_left;
1433 				y->b_left = root;
1434 				root = y;
1435 				if ((y = root->b_right) == NULL)
1436 					break;
1437 			}
1438 			/* Link into the new root's left tree. */
1439 			lefttreemax->b_right = root;
1440 			lefttreemax = root;
1441 		} else {
1442 			break;
1443 		}
1444 		root = y;
1445 	}
1446 	/* Assemble the new root. */
1447 	lefttreemax->b_right = root->b_left;
1448 	righttreemin->b_left = root->b_right;
1449 	root->b_left = dummy.b_right;
1450 	root->b_right = dummy.b_left;
1451 	return (root);
1452 }
1453 
1454 static
1455 void
1456 buf_vlist_remove(struct buf *bp)
1457 {
1458 	struct vnode *vp = bp->b_vp;
1459 	struct buf *root;
1460 
1461 	ASSERT_VI_LOCKED(vp, "buf_vlist_remove");
1462 	if (bp->b_xflags & BX_VNDIRTY) {
1463 		if (bp != vp->v_dirtyblkroot) {
1464 			root = buf_splay(bp->b_lblkno, bp->b_xflags, vp->v_dirtyblkroot);
1465 			KASSERT(root == bp, ("splay lookup failed during dirty remove"));
1466 		}
1467 		if (bp->b_left == NULL) {
1468 			root = bp->b_right;
1469 		} else {
1470 			root = buf_splay(bp->b_lblkno, bp->b_xflags, bp->b_left);
1471 			root->b_right = bp->b_right;
1472 		}
1473 		vp->v_dirtyblkroot = root;
1474 		TAILQ_REMOVE(&vp->v_dirtyblkhd, bp, b_vnbufs);
1475 		vp->v_dirtybufcnt--;
1476 	} else {
1477 		/* KASSERT(bp->b_xflags & BX_VNCLEAN, ("bp wasn't clean")); */
1478 		if (bp != vp->v_cleanblkroot) {
1479 			root = buf_splay(bp->b_lblkno, bp->b_xflags, vp->v_cleanblkroot);
1480 			KASSERT(root == bp, ("splay lookup failed during clean remove"));
1481 		}
1482 		if (bp->b_left == NULL) {
1483 			root = bp->b_right;
1484 		} else {
1485 			root = buf_splay(bp->b_lblkno, bp->b_xflags, bp->b_left);
1486 			root->b_right = bp->b_right;
1487 		}
1488 		vp->v_cleanblkroot = root;
1489 		TAILQ_REMOVE(&vp->v_cleanblkhd, bp, b_vnbufs);
1490 		vp->v_cleanbufcnt--;
1491 	}
1492 	bp->b_xflags &= ~(BX_VNDIRTY | BX_VNCLEAN);
1493 }
1494 
1495 /*
1496  * Add the buffer to the sorted clean or dirty block list using a
1497  * splay tree algorithm.
1498  *
1499  * NOTE: xflags is passed as a constant, optimizing this inline function!
1500  */
1501 static
1502 void
1503 buf_vlist_add(struct buf *bp, struct vnode *vp, b_xflags_t xflags)
1504 {
1505 	struct buf *root;
1506 
1507 	ASSERT_VI_LOCKED(vp, "buf_vlist_add");
1508 	bp->b_xflags |= xflags;
1509 	if (xflags & BX_VNDIRTY) {
1510 		root = buf_splay(bp->b_lblkno, bp->b_xflags, vp->v_dirtyblkroot);
1511 		if (root == NULL) {
1512 			bp->b_left = NULL;
1513 			bp->b_right = NULL;
1514 			TAILQ_INSERT_TAIL(&vp->v_dirtyblkhd, bp, b_vnbufs);
1515 		} else if (bp->b_lblkno < root->b_lblkno ||
1516 		    (bp->b_lblkno == root->b_lblkno &&
1517 		    (bp->b_xflags & BX_BKGRDMARKER) < (root->b_xflags & BX_BKGRDMARKER))) {
1518 			bp->b_left = root->b_left;
1519 			bp->b_right = root;
1520 			root->b_left = NULL;
1521 			TAILQ_INSERT_BEFORE(root, bp, b_vnbufs);
1522 		} else {
1523 			bp->b_right = root->b_right;
1524 			bp->b_left = root;
1525 			root->b_right = NULL;
1526 			TAILQ_INSERT_AFTER(&vp->v_dirtyblkhd,
1527 			    root, bp, b_vnbufs);
1528 		}
1529 		vp->v_dirtybufcnt++;
1530 		vp->v_dirtyblkroot = bp;
1531 	} else {
1532 		/* KASSERT(xflags & BX_VNCLEAN, ("xflags not clean")); */
1533 		root = buf_splay(bp->b_lblkno, bp->b_xflags, vp->v_cleanblkroot);
1534 		if (root == NULL) {
1535 			bp->b_left = NULL;
1536 			bp->b_right = NULL;
1537 			TAILQ_INSERT_TAIL(&vp->v_cleanblkhd, bp, b_vnbufs);
1538 		} else if (bp->b_lblkno < root->b_lblkno ||
1539 		    (bp->b_lblkno == root->b_lblkno &&
1540 		    (bp->b_xflags & BX_BKGRDMARKER) < (root->b_xflags & BX_BKGRDMARKER))) {
1541 			bp->b_left = root->b_left;
1542 			bp->b_right = root;
1543 			root->b_left = NULL;
1544 			TAILQ_INSERT_BEFORE(root, bp, b_vnbufs);
1545 		} else {
1546 			bp->b_right = root->b_right;
1547 			bp->b_left = root;
1548 			root->b_right = NULL;
1549 			TAILQ_INSERT_AFTER(&vp->v_cleanblkhd,
1550 			    root, bp, b_vnbufs);
1551 		}
1552 		vp->v_cleanbufcnt++;
1553 		vp->v_cleanblkroot = bp;
1554 	}
1555 }
1556 
1557 /*
1558  * Lookup a buffer using the splay tree.  Note that we specifically avoid
1559  * shadow buffers used in background bitmap writes.
1560  *
1561  * This code isn't quite efficient as it could be because we are maintaining
1562  * two sorted lists and do not know which list the block resides in.
1563  */
1564 struct buf *
1565 gbincore(struct vnode *vp, daddr_t lblkno)
1566 {
1567 	struct buf *bp;
1568 
1569 	GIANT_REQUIRED;
1570 
1571 	ASSERT_VI_LOCKED(vp, "gbincore");
1572 	bp = vp->v_cleanblkroot = buf_splay(lblkno, 0, vp->v_cleanblkroot);
1573 	if (bp && bp->b_lblkno == lblkno && !(bp->b_xflags & BX_BKGRDMARKER))
1574 		return(bp);
1575 	bp = vp->v_dirtyblkroot = buf_splay(lblkno, 0, vp->v_dirtyblkroot);
1576 	if (bp && bp->b_lblkno == lblkno && !(bp->b_xflags & BX_BKGRDMARKER))
1577 		return(bp);
1578 	return(NULL);
1579 }
1580 
1581 /*
1582  * Associate a buffer with a vnode.
1583  */
1584 void
1585 bgetvp(vp, bp)
1586 	register struct vnode *vp;
1587 	register struct buf *bp;
1588 {
1589 	int s;
1590 
1591 	KASSERT(bp->b_vp == NULL, ("bgetvp: not free"));
1592 
1593 	KASSERT((bp->b_xflags & (BX_VNDIRTY|BX_VNCLEAN)) == 0,
1594 	    ("bgetvp: bp already attached! %p", bp));
1595 
1596 	ASSERT_VI_LOCKED(vp, "bgetvp");
1597 	vholdl(vp);
1598 	bp->b_vp = vp;
1599 	bp->b_dev = vn_todev(vp);
1600 	/*
1601 	 * Insert onto list for new vnode.
1602 	 */
1603 	s = splbio();
1604 	buf_vlist_add(bp, vp, BX_VNCLEAN);
1605 	splx(s);
1606 }
1607 
1608 /*
1609  * Disassociate a buffer from a vnode.
1610  */
1611 void
1612 brelvp(bp)
1613 	register struct buf *bp;
1614 {
1615 	struct vnode *vp;
1616 	int s;
1617 
1618 	KASSERT(bp->b_vp != NULL, ("brelvp: NULL"));
1619 
1620 	/*
1621 	 * Delete from old vnode list, if on one.
1622 	 */
1623 	vp = bp->b_vp;
1624 	s = splbio();
1625 	VI_LOCK(vp);
1626 	if (bp->b_xflags & (BX_VNDIRTY | BX_VNCLEAN))
1627 		buf_vlist_remove(bp);
1628 	if ((vp->v_iflag & VI_ONWORKLST) && TAILQ_EMPTY(&vp->v_dirtyblkhd)) {
1629 		vp->v_iflag &= ~VI_ONWORKLST;
1630 		mtx_lock(&sync_mtx);
1631 		LIST_REMOVE(vp, v_synclist);
1632 		mtx_unlock(&sync_mtx);
1633 	}
1634 	vdropl(vp);
1635 	VI_UNLOCK(vp);
1636 	bp->b_vp = (struct vnode *) 0;
1637 	if (bp->b_object)
1638 		bp->b_object = NULL;
1639 	splx(s);
1640 }
1641 
1642 /*
1643  * Add an item to the syncer work queue.
1644  */
1645 static void
1646 vn_syncer_add_to_worklist(struct vnode *vp, int delay)
1647 {
1648 	int s, slot;
1649 
1650 	s = splbio();
1651 	ASSERT_VI_LOCKED(vp, "vn_syncer_add_to_worklist");
1652 
1653 	mtx_lock(&sync_mtx);
1654 	if (vp->v_iflag & VI_ONWORKLST)
1655 		LIST_REMOVE(vp, v_synclist);
1656 	else
1657 		vp->v_iflag |= VI_ONWORKLST;
1658 
1659 	if (delay > syncer_maxdelay - 2)
1660 		delay = syncer_maxdelay - 2;
1661 	slot = (syncer_delayno + delay) & syncer_mask;
1662 
1663 	LIST_INSERT_HEAD(&syncer_workitem_pending[slot], vp, v_synclist);
1664 	mtx_unlock(&sync_mtx);
1665 
1666 	splx(s);
1667 }
1668 
1669 struct  proc *updateproc;
1670 static void sched_sync(void);
1671 static struct kproc_desc up_kp = {
1672 	"syncer",
1673 	sched_sync,
1674 	&updateproc
1675 };
1676 SYSINIT(syncer, SI_SUB_KTHREAD_UPDATE, SI_ORDER_FIRST, kproc_start, &up_kp)
1677 
1678 /*
1679  * System filesystem synchronizer daemon.
1680  */
1681 static void
1682 sched_sync(void)
1683 {
1684 	struct synclist *slp;
1685 	struct vnode *vp;
1686 	struct mount *mp;
1687 	long starttime;
1688 	int s;
1689 	struct thread *td = FIRST_THREAD_IN_PROC(updateproc);  /* XXXKSE */
1690 
1691 	mtx_lock(&Giant);
1692 
1693 	EVENTHANDLER_REGISTER(shutdown_pre_sync, kproc_shutdown, td->td_proc,
1694 	    SHUTDOWN_PRI_LAST);
1695 
1696 	for (;;) {
1697 		kthread_suspend_check(td->td_proc);
1698 
1699 		starttime = time_second;
1700 
1701 		/*
1702 		 * Push files whose dirty time has expired.  Be careful
1703 		 * of interrupt race on slp queue.
1704 		 */
1705 		s = splbio();
1706 		mtx_lock(&sync_mtx);
1707 		slp = &syncer_workitem_pending[syncer_delayno];
1708 		syncer_delayno += 1;
1709 		if (syncer_delayno == syncer_maxdelay)
1710 			syncer_delayno = 0;
1711 		splx(s);
1712 
1713 		while ((vp = LIST_FIRST(slp)) != NULL) {
1714 			mtx_unlock(&sync_mtx);
1715 			if (VOP_ISLOCKED(vp, NULL) == 0 &&
1716 			    vn_start_write(vp, &mp, V_NOWAIT) == 0) {
1717 				vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
1718 				(void) VOP_FSYNC(vp, td->td_ucred, MNT_LAZY, td);
1719 				VOP_UNLOCK(vp, 0, td);
1720 				vn_finished_write(mp);
1721 			}
1722 			s = splbio();
1723 			mtx_lock(&sync_mtx);
1724 			if (LIST_FIRST(slp) == vp) {
1725 				mtx_unlock(&sync_mtx);
1726 				/*
1727 				 * Note: VFS vnodes can remain on the
1728 				 * worklist too with no dirty blocks, but
1729 				 * since sync_fsync() moves it to a different
1730 				 * slot we are safe.
1731 				 */
1732 				VI_LOCK(vp);
1733 				if (TAILQ_EMPTY(&vp->v_dirtyblkhd) &&
1734 				    !vn_isdisk(vp, NULL)) {
1735 					panic("sched_sync: fsync failed "
1736 					      "vp %p tag %s", vp, vp->v_tag);
1737 				}
1738 				/*
1739 				 * Put us back on the worklist.  The worklist
1740 				 * routine will remove us from our current
1741 				 * position and then add us back in at a later
1742 				 * position.
1743 				 */
1744 				vn_syncer_add_to_worklist(vp, syncdelay);
1745 				VI_UNLOCK(vp);
1746 				mtx_lock(&sync_mtx);
1747 			}
1748 			splx(s);
1749 		}
1750 		mtx_unlock(&sync_mtx);
1751 
1752 		/*
1753 		 * Do soft update processing.
1754 		 */
1755 		if (softdep_process_worklist_hook != NULL)
1756 			(*softdep_process_worklist_hook)(NULL);
1757 
1758 		/*
1759 		 * The variable rushjob allows the kernel to speed up the
1760 		 * processing of the filesystem syncer process. A rushjob
1761 		 * value of N tells the filesystem syncer to process the next
1762 		 * N seconds worth of work on its queue ASAP. Currently rushjob
1763 		 * is used by the soft update code to speed up the filesystem
1764 		 * syncer process when the incore state is getting so far
1765 		 * ahead of the disk that the kernel memory pool is being
1766 		 * threatened with exhaustion.
1767 		 */
1768 		mtx_lock(&sync_mtx);
1769 		if (rushjob > 0) {
1770 			rushjob -= 1;
1771 			mtx_unlock(&sync_mtx);
1772 			continue;
1773 		}
1774 		mtx_unlock(&sync_mtx);
1775 		/*
1776 		 * If it has taken us less than a second to process the
1777 		 * current work, then wait. Otherwise start right over
1778 		 * again. We can still lose time if any single round
1779 		 * takes more than two seconds, but it does not really
1780 		 * matter as we are just trying to generally pace the
1781 		 * filesystem activity.
1782 		 */
1783 		if (time_second == starttime)
1784 			tsleep(&lbolt, PPAUSE, "syncer", 0);
1785 	}
1786 }
1787 
1788 /*
1789  * Request the syncer daemon to speed up its work.
1790  * We never push it to speed up more than half of its
1791  * normal turn time, otherwise it could take over the cpu.
1792  * XXXKSE  only one update?
1793  */
1794 int
1795 speedup_syncer()
1796 {
1797 	struct thread *td;
1798 	int ret = 0;
1799 
1800 	td = FIRST_THREAD_IN_PROC(updateproc);
1801 	mtx_lock_spin(&sched_lock);
1802 	if (td->td_wchan == &lbolt) {
1803 		unsleep(td);
1804 		TD_CLR_SLEEPING(td);
1805 		setrunnable(td);
1806 	}
1807 	mtx_unlock_spin(&sched_lock);
1808 	mtx_lock(&sync_mtx);
1809 	if (rushjob < syncdelay / 2) {
1810 		rushjob += 1;
1811 		stat_rush_requests += 1;
1812 		ret = 1;
1813 	}
1814 	mtx_unlock(&sync_mtx);
1815 	return (ret);
1816 }
1817 
1818 /*
1819  * Associate a p-buffer with a vnode.
1820  *
1821  * Also sets B_PAGING flag to indicate that vnode is not fully associated
1822  * with the buffer.  i.e. the bp has not been linked into the vnode or
1823  * ref-counted.
1824  */
1825 void
1826 pbgetvp(vp, bp)
1827 	register struct vnode *vp;
1828 	register struct buf *bp;
1829 {
1830 
1831 	KASSERT(bp->b_vp == NULL, ("pbgetvp: not free"));
1832 
1833 	bp->b_vp = vp;
1834 	bp->b_flags |= B_PAGING;
1835 	bp->b_dev = vn_todev(vp);
1836 }
1837 
1838 /*
1839  * Disassociate a p-buffer from a vnode.
1840  */
1841 void
1842 pbrelvp(bp)
1843 	register struct buf *bp;
1844 {
1845 
1846 	KASSERT(bp->b_vp != NULL, ("pbrelvp: NULL"));
1847 
1848 	/* XXX REMOVE ME */
1849 	VI_LOCK(bp->b_vp);
1850 	if (TAILQ_NEXT(bp, b_vnbufs) != NULL) {
1851 		panic(
1852 		    "relpbuf(): b_vp was probably reassignbuf()d %p %x",
1853 		    bp,
1854 		    (int)bp->b_flags
1855 		);
1856 	}
1857 	VI_UNLOCK(bp->b_vp);
1858 	bp->b_vp = (struct vnode *) 0;
1859 	bp->b_flags &= ~B_PAGING;
1860 }
1861 
1862 /*
1863  * Reassign a buffer from one vnode to another.
1864  * Used to assign file specific control information
1865  * (indirect blocks) to the vnode to which they belong.
1866  */
1867 void
1868 reassignbuf(bp, newvp)
1869 	register struct buf *bp;
1870 	register struct vnode *newvp;
1871 {
1872 	int delay;
1873 	int s;
1874 
1875 	if (newvp == NULL) {
1876 		printf("reassignbuf: NULL");
1877 		return;
1878 	}
1879 	++reassignbufcalls;
1880 
1881 	/*
1882 	 * B_PAGING flagged buffers cannot be reassigned because their vp
1883 	 * is not fully linked in.
1884 	 */
1885 	if (bp->b_flags & B_PAGING)
1886 		panic("cannot reassign paging buffer");
1887 
1888 	s = splbio();
1889 	/*
1890 	 * Delete from old vnode list, if on one.
1891 	 */
1892 	VI_LOCK(bp->b_vp);
1893 	if (bp->b_xflags & (BX_VNDIRTY | BX_VNCLEAN)) {
1894 		buf_vlist_remove(bp);
1895 		if (bp->b_vp != newvp) {
1896 			vdropl(bp->b_vp);
1897 			bp->b_vp = NULL;	/* for clarification */
1898 		}
1899 	}
1900 	VI_UNLOCK(bp->b_vp);
1901 	/*
1902 	 * If dirty, put on list of dirty buffers; otherwise insert onto list
1903 	 * of clean buffers.
1904 	 */
1905 	VI_LOCK(newvp);
1906 	if (bp->b_flags & B_DELWRI) {
1907 		if ((newvp->v_iflag & VI_ONWORKLST) == 0) {
1908 			switch (newvp->v_type) {
1909 			case VDIR:
1910 				delay = dirdelay;
1911 				break;
1912 			case VCHR:
1913 				if (newvp->v_rdev->si_mountpoint != NULL) {
1914 					delay = metadelay;
1915 					break;
1916 				}
1917 				/* FALLTHROUGH */
1918 			default:
1919 				delay = filedelay;
1920 			}
1921 			vn_syncer_add_to_worklist(newvp, delay);
1922 		}
1923 		buf_vlist_add(bp, newvp, BX_VNDIRTY);
1924 	} else {
1925 		buf_vlist_add(bp, newvp, BX_VNCLEAN);
1926 
1927 		if ((newvp->v_iflag & VI_ONWORKLST) &&
1928 		    TAILQ_EMPTY(&newvp->v_dirtyblkhd)) {
1929 			mtx_lock(&sync_mtx);
1930 			LIST_REMOVE(newvp, v_synclist);
1931 			mtx_unlock(&sync_mtx);
1932 			newvp->v_iflag &= ~VI_ONWORKLST;
1933 		}
1934 	}
1935 	if (bp->b_vp != newvp) {
1936 		bp->b_vp = newvp;
1937 		vholdl(bp->b_vp);
1938 	}
1939 	VI_UNLOCK(newvp);
1940 	splx(s);
1941 }
1942 
1943 /*
1944  * Create a vnode for a device.
1945  * Used for mounting the root filesystem.
1946  */
1947 int
1948 bdevvp(dev, vpp)
1949 	dev_t dev;
1950 	struct vnode **vpp;
1951 {
1952 	register struct vnode *vp;
1953 	struct vnode *nvp;
1954 	int error;
1955 
1956 	if (dev == NODEV) {
1957 		*vpp = NULLVP;
1958 		return (ENXIO);
1959 	}
1960 	if (vfinddev(dev, VCHR, vpp))
1961 		return (0);
1962 	error = getnewvnode("none", (struct mount *)0, spec_vnodeop_p, &nvp);
1963 	if (error) {
1964 		*vpp = NULLVP;
1965 		return (error);
1966 	}
1967 	vp = nvp;
1968 	vp->v_type = VCHR;
1969 	addalias(vp, dev);
1970 	*vpp = vp;
1971 	return (0);
1972 }
1973 
1974 static void
1975 v_incr_usecount(struct vnode *vp, int delta)
1976 {
1977 	vp->v_usecount += delta;
1978 	if (vp->v_type == VCHR && vp->v_rdev != NULL) {
1979 		mtx_lock(&spechash_mtx);
1980 		vp->v_rdev->si_usecount += delta;
1981 		mtx_unlock(&spechash_mtx);
1982 	}
1983 }
1984 
1985 /*
1986  * Add vnode to the alias list hung off the dev_t.
1987  *
1988  * The reason for this gunk is that multiple vnodes can reference
1989  * the same physical device, so checking vp->v_usecount to see
1990  * how many users there are is inadequate; the v_usecount for
1991  * the vnodes need to be accumulated.  vcount() does that.
1992  */
1993 struct vnode *
1994 addaliasu(nvp, nvp_rdev)
1995 	struct vnode *nvp;
1996 	udev_t nvp_rdev;
1997 {
1998 	struct vnode *ovp;
1999 	vop_t **ops;
2000 	dev_t dev;
2001 
2002 	if (nvp->v_type == VBLK)
2003 		return (nvp);
2004 	if (nvp->v_type != VCHR)
2005 		panic("addaliasu on non-special vnode");
2006 	dev = udev2dev(nvp_rdev, 0);
2007 	/*
2008 	 * Check to see if we have a bdevvp vnode with no associated
2009 	 * filesystem. If so, we want to associate the filesystem of
2010 	 * the new newly instigated vnode with the bdevvp vnode and
2011 	 * discard the newly created vnode rather than leaving the
2012 	 * bdevvp vnode lying around with no associated filesystem.
2013 	 */
2014 	if (vfinddev(dev, nvp->v_type, &ovp) == 0 || ovp->v_data != NULL) {
2015 		addalias(nvp, dev);
2016 		return (nvp);
2017 	}
2018 	/*
2019 	 * Discard unneeded vnode, but save its node specific data.
2020 	 * Note that if there is a lock, it is carried over in the
2021 	 * node specific data to the replacement vnode.
2022 	 */
2023 	vref(ovp);
2024 	ovp->v_data = nvp->v_data;
2025 	ovp->v_tag = nvp->v_tag;
2026 	nvp->v_data = NULL;
2027 	lockdestroy(ovp->v_vnlock);
2028 	lockinit(ovp->v_vnlock, PVFS, nvp->v_vnlock->lk_wmesg,
2029 	    nvp->v_vnlock->lk_timo, nvp->v_vnlock->lk_flags & LK_EXTFLG_MASK);
2030 	ops = ovp->v_op;
2031 	ovp->v_op = nvp->v_op;
2032 	if (VOP_ISLOCKED(nvp, curthread)) {
2033 		VOP_UNLOCK(nvp, 0, curthread);
2034 		vn_lock(ovp, LK_EXCLUSIVE | LK_RETRY, curthread);
2035 	}
2036 	nvp->v_op = ops;
2037 	insmntque(ovp, nvp->v_mount);
2038 	vrele(nvp);
2039 	vgone(nvp);
2040 	return (ovp);
2041 }
2042 
2043 /* This is a local helper function that do the same as addaliasu, but for a
2044  * dev_t instead of an udev_t. */
2045 static void
2046 addalias(nvp, dev)
2047 	struct vnode *nvp;
2048 	dev_t dev;
2049 {
2050 
2051 	KASSERT(nvp->v_type == VCHR, ("addalias on non-special vnode"));
2052 	nvp->v_rdev = dev;
2053 	VI_LOCK(nvp);
2054 	mtx_lock(&spechash_mtx);
2055 	SLIST_INSERT_HEAD(&dev->si_hlist, nvp, v_specnext);
2056 	dev->si_usecount += nvp->v_usecount;
2057 	mtx_unlock(&spechash_mtx);
2058 	VI_UNLOCK(nvp);
2059 }
2060 
2061 /*
2062  * Grab a particular vnode from the free list, increment its
2063  * reference count and lock it. The vnode lock bit is set if the
2064  * vnode is being eliminated in vgone. The process is awakened
2065  * when the transition is completed, and an error returned to
2066  * indicate that the vnode is no longer usable (possibly having
2067  * been changed to a new filesystem type).
2068  */
2069 int
2070 vget(vp, flags, td)
2071 	register struct vnode *vp;
2072 	int flags;
2073 	struct thread *td;
2074 {
2075 	int error;
2076 
2077 	/*
2078 	 * If the vnode is in the process of being cleaned out for
2079 	 * another use, we wait for the cleaning to finish and then
2080 	 * return failure. Cleaning is determined by checking that
2081 	 * the VI_XLOCK flag is set.
2082 	 */
2083 	if ((flags & LK_INTERLOCK) == 0)
2084 		VI_LOCK(vp);
2085 	if (vp->v_iflag & VI_XLOCK && vp->v_vxproc != curthread) {
2086 		vp->v_iflag |= VI_XWANT;
2087 		msleep(vp, VI_MTX(vp), PINOD | PDROP, "vget", 0);
2088 		return (ENOENT);
2089 	}
2090 
2091 	v_incr_usecount(vp, 1);
2092 
2093 	if (VSHOULDBUSY(vp))
2094 		vbusy(vp);
2095 	if (flags & LK_TYPE_MASK) {
2096 		if ((error = vn_lock(vp, flags | LK_INTERLOCK, td)) != 0) {
2097 			/*
2098 			 * must expand vrele here because we do not want
2099 			 * to call VOP_INACTIVE if the reference count
2100 			 * drops back to zero since it was never really
2101 			 * active. We must remove it from the free list
2102 			 * before sleeping so that multiple processes do
2103 			 * not try to recycle it.
2104 			 */
2105 			VI_LOCK(vp);
2106 			v_incr_usecount(vp, -1);
2107 			if (VSHOULDFREE(vp))
2108 				vfree(vp);
2109 			else
2110 				vlruvp(vp);
2111 			VI_UNLOCK(vp);
2112 		}
2113 		return (error);
2114 	}
2115 	VI_UNLOCK(vp);
2116 	return (0);
2117 }
2118 
2119 /*
2120  * Increase the reference count of a vnode.
2121  */
2122 void
2123 vref(struct vnode *vp)
2124 {
2125 	VI_LOCK(vp);
2126 	v_incr_usecount(vp, 1);
2127 	VI_UNLOCK(vp);
2128 }
2129 
2130 /*
2131  * Return reference count of a vnode.
2132  *
2133  * The results of this call are only guaranteed when some mechanism other
2134  * than the VI lock is used to stop other processes from gaining references
2135  * to the vnode.  This may be the case if the caller holds the only reference.
2136  * This is also useful when stale data is acceptable as race conditions may
2137  * be accounted for by some other means.
2138  */
2139 int
2140 vrefcnt(struct vnode *vp)
2141 {
2142 	int usecnt;
2143 
2144 	VI_LOCK(vp);
2145 	usecnt = vp->v_usecount;
2146 	VI_UNLOCK(vp);
2147 
2148 	return (usecnt);
2149 }
2150 
2151 
2152 /*
2153  * Vnode put/release.
2154  * If count drops to zero, call inactive routine and return to freelist.
2155  */
2156 void
2157 vrele(vp)
2158 	struct vnode *vp;
2159 {
2160 	struct thread *td = curthread;	/* XXX */
2161 
2162 	KASSERT(vp != NULL, ("vrele: null vp"));
2163 
2164 	VI_LOCK(vp);
2165 
2166 	/* Skip this v_writecount check if we're going to panic below. */
2167 	KASSERT(vp->v_writecount < vp->v_usecount || vp->v_usecount < 1,
2168 	    ("vrele: missed vn_close"));
2169 
2170 	if (vp->v_usecount > 1 || ((vp->v_iflag & VI_DOINGINACT) &&
2171 	    vp->v_usecount == 1)) {
2172 		v_incr_usecount(vp, -1);
2173 		VI_UNLOCK(vp);
2174 
2175 		return;
2176 	}
2177 
2178 	if (vp->v_usecount == 1) {
2179 		v_incr_usecount(vp, -1);
2180 		/*
2181 		 * We must call VOP_INACTIVE with the node locked. Mark
2182 		 * as VI_DOINGINACT to avoid recursion.
2183 		 */
2184 		if (vn_lock(vp, LK_EXCLUSIVE | LK_INTERLOCK, td) == 0) {
2185 			VI_LOCK(vp);
2186 			vp->v_iflag |= VI_DOINGINACT;
2187 			VI_UNLOCK(vp);
2188 			VOP_INACTIVE(vp, td);
2189 			VI_LOCK(vp);
2190 			KASSERT(vp->v_iflag & VI_DOINGINACT,
2191 			    ("vrele: lost VI_DOINGINACT"));
2192 			vp->v_iflag &= ~VI_DOINGINACT;
2193 			VI_UNLOCK(vp);
2194 		}
2195 		VI_LOCK(vp);
2196 		if (VSHOULDFREE(vp))
2197 			vfree(vp);
2198 		else
2199 			vlruvp(vp);
2200 		VI_UNLOCK(vp);
2201 
2202 	} else {
2203 #ifdef DIAGNOSTIC
2204 		vprint("vrele: negative ref count", vp);
2205 #endif
2206 		VI_UNLOCK(vp);
2207 		panic("vrele: negative ref cnt");
2208 	}
2209 }
2210 
2211 /*
2212  * Release an already locked vnode.  This give the same effects as
2213  * unlock+vrele(), but takes less time and avoids releasing and
2214  * re-aquiring the lock (as vrele() aquires the lock internally.)
2215  */
2216 void
2217 vput(vp)
2218 	struct vnode *vp;
2219 {
2220 	struct thread *td = curthread;	/* XXX */
2221 
2222 	GIANT_REQUIRED;
2223 
2224 	KASSERT(vp != NULL, ("vput: null vp"));
2225 	VI_LOCK(vp);
2226 	/* Skip this v_writecount check if we're going to panic below. */
2227 	KASSERT(vp->v_writecount < vp->v_usecount || vp->v_usecount < 1,
2228 	    ("vput: missed vn_close"));
2229 
2230 	if (vp->v_usecount > 1 || ((vp->v_iflag & VI_DOINGINACT) &&
2231 	    vp->v_usecount == 1)) {
2232 		v_incr_usecount(vp, -1);
2233 		VOP_UNLOCK(vp, LK_INTERLOCK, td);
2234 		return;
2235 	}
2236 
2237 	if (vp->v_usecount == 1) {
2238 		v_incr_usecount(vp, -1);
2239 		/*
2240 		 * We must call VOP_INACTIVE with the node locked, so
2241 		 * we just need to release the vnode mutex. Mark as
2242 		 * as VI_DOINGINACT to avoid recursion.
2243 		 */
2244 		vp->v_iflag |= VI_DOINGINACT;
2245 		VI_UNLOCK(vp);
2246 		VOP_INACTIVE(vp, td);
2247 		VI_LOCK(vp);
2248 		KASSERT(vp->v_iflag & VI_DOINGINACT,
2249 		    ("vput: lost VI_DOINGINACT"));
2250 		vp->v_iflag &= ~VI_DOINGINACT;
2251 		if (VSHOULDFREE(vp))
2252 			vfree(vp);
2253 		else
2254 			vlruvp(vp);
2255 		VI_UNLOCK(vp);
2256 
2257 	} else {
2258 #ifdef DIAGNOSTIC
2259 		vprint("vput: negative ref count", vp);
2260 #endif
2261 		panic("vput: negative ref cnt");
2262 	}
2263 }
2264 
2265 /*
2266  * Somebody doesn't want the vnode recycled.
2267  */
2268 void
2269 vhold(struct vnode *vp)
2270 {
2271 	VI_LOCK(vp);
2272 	vholdl(vp);
2273 	VI_UNLOCK(vp);
2274 }
2275 
2276 void
2277 vholdl(vp)
2278 	register struct vnode *vp;
2279 {
2280 	int s;
2281 
2282 	s = splbio();
2283 	vp->v_holdcnt++;
2284 	if (VSHOULDBUSY(vp))
2285 		vbusy(vp);
2286 	splx(s);
2287 }
2288 
2289 /*
2290  * Note that there is one less who cares about this vnode.  vdrop() is the
2291  * opposite of vhold().
2292  */
2293 void
2294 vdrop(struct vnode *vp)
2295 {
2296 	VI_LOCK(vp);
2297 	vdropl(vp);
2298 	VI_UNLOCK(vp);
2299 }
2300 
2301 void
2302 vdropl(vp)
2303 	register struct vnode *vp;
2304 {
2305 	int s;
2306 
2307 	s = splbio();
2308 	if (vp->v_holdcnt <= 0)
2309 		panic("vdrop: holdcnt");
2310 	vp->v_holdcnt--;
2311 	if (VSHOULDFREE(vp))
2312 		vfree(vp);
2313 	else
2314 		vlruvp(vp);
2315 	splx(s);
2316 }
2317 
2318 /*
2319  * Remove any vnodes in the vnode table belonging to mount point mp.
2320  *
2321  * If FORCECLOSE is not specified, there should not be any active ones,
2322  * return error if any are found (nb: this is a user error, not a
2323  * system error). If FORCECLOSE is specified, detach any active vnodes
2324  * that are found.
2325  *
2326  * If WRITECLOSE is set, only flush out regular file vnodes open for
2327  * writing.
2328  *
2329  * SKIPSYSTEM causes any vnodes marked VV_SYSTEM to be skipped.
2330  *
2331  * `rootrefs' specifies the base reference count for the root vnode
2332  * of this filesystem. The root vnode is considered busy if its
2333  * v_usecount exceeds this value. On a successful return, vflush()
2334  * will call vrele() on the root vnode exactly rootrefs times.
2335  * If the SKIPSYSTEM or WRITECLOSE flags are specified, rootrefs must
2336  * be zero.
2337  */
2338 #ifdef DIAGNOSTIC
2339 static int busyprt = 0;		/* print out busy vnodes */
2340 SYSCTL_INT(_debug, OID_AUTO, busyprt, CTLFLAG_RW, &busyprt, 0, "");
2341 #endif
2342 
2343 int
2344 vflush(mp, rootrefs, flags)
2345 	struct mount *mp;
2346 	int rootrefs;
2347 	int flags;
2348 {
2349 	struct thread *td = curthread;	/* XXX */
2350 	struct vnode *vp, *nvp, *rootvp = NULL;
2351 	struct vattr vattr;
2352 	int busy = 0, error;
2353 
2354 	if (rootrefs > 0) {
2355 		KASSERT((flags & (SKIPSYSTEM | WRITECLOSE)) == 0,
2356 		    ("vflush: bad args"));
2357 		/*
2358 		 * Get the filesystem root vnode. We can vput() it
2359 		 * immediately, since with rootrefs > 0, it won't go away.
2360 		 */
2361 		if ((error = VFS_ROOT(mp, &rootvp)) != 0)
2362 			return (error);
2363 		vput(rootvp);
2364 
2365 	}
2366 	mtx_lock(&mntvnode_mtx);
2367 loop:
2368 	for (vp = TAILQ_FIRST(&mp->mnt_nvnodelist); vp; vp = nvp) {
2369 		/*
2370 		 * Make sure this vnode wasn't reclaimed in getnewvnode().
2371 		 * Start over if it has (it won't be on the list anymore).
2372 		 */
2373 		if (vp->v_mount != mp)
2374 			goto loop;
2375 		nvp = TAILQ_NEXT(vp, v_nmntvnodes);
2376 
2377 		VI_LOCK(vp);
2378 		mtx_unlock(&mntvnode_mtx);
2379 		vn_lock(vp, LK_INTERLOCK | LK_EXCLUSIVE | LK_RETRY, td);
2380 		/*
2381 		 * Skip over a vnodes marked VV_SYSTEM.
2382 		 */
2383 		if ((flags & SKIPSYSTEM) && (vp->v_vflag & VV_SYSTEM)) {
2384 			VOP_UNLOCK(vp, 0, td);
2385 			mtx_lock(&mntvnode_mtx);
2386 			continue;
2387 		}
2388 		/*
2389 		 * If WRITECLOSE is set, flush out unlinked but still open
2390 		 * files (even if open only for reading) and regular file
2391 		 * vnodes open for writing.
2392 		 */
2393 		if (flags & WRITECLOSE) {
2394 			error = VOP_GETATTR(vp, &vattr, td->td_ucred, td);
2395 			VI_LOCK(vp);
2396 
2397 			if ((vp->v_type == VNON ||
2398 			    (error == 0 && vattr.va_nlink > 0)) &&
2399 			    (vp->v_writecount == 0 || vp->v_type != VREG)) {
2400 				VOP_UNLOCK(vp, LK_INTERLOCK, td);
2401 				mtx_lock(&mntvnode_mtx);
2402 				continue;
2403 			}
2404 		} else
2405 			VI_LOCK(vp);
2406 
2407 		VOP_UNLOCK(vp, 0, td);
2408 
2409 		/*
2410 		 * With v_usecount == 0, all we need to do is clear out the
2411 		 * vnode data structures and we are done.
2412 		 */
2413 		if (vp->v_usecount == 0) {
2414 			vgonel(vp, td);
2415 			mtx_lock(&mntvnode_mtx);
2416 			continue;
2417 		}
2418 
2419 		/*
2420 		 * If FORCECLOSE is set, forcibly close the vnode. For block
2421 		 * or character devices, revert to an anonymous device. For
2422 		 * all other files, just kill them.
2423 		 */
2424 		if (flags & FORCECLOSE) {
2425 			if (vp->v_type != VCHR) {
2426 				vgonel(vp, td);
2427 			} else {
2428 				vclean(vp, 0, td);
2429 				VI_UNLOCK(vp);
2430 				vp->v_op = spec_vnodeop_p;
2431 				insmntque(vp, (struct mount *) 0);
2432 			}
2433 			mtx_lock(&mntvnode_mtx);
2434 			continue;
2435 		}
2436 #ifdef DIAGNOSTIC
2437 		if (busyprt)
2438 			vprint("vflush: busy vnode", vp);
2439 #endif
2440 		VI_UNLOCK(vp);
2441 		mtx_lock(&mntvnode_mtx);
2442 		busy++;
2443 	}
2444 	mtx_unlock(&mntvnode_mtx);
2445 	if (rootrefs > 0 && (flags & FORCECLOSE) == 0) {
2446 		/*
2447 		 * If just the root vnode is busy, and if its refcount
2448 		 * is equal to `rootrefs', then go ahead and kill it.
2449 		 */
2450 		VI_LOCK(rootvp);
2451 		KASSERT(busy > 0, ("vflush: not busy"));
2452 		KASSERT(rootvp->v_usecount >= rootrefs, ("vflush: rootrefs"));
2453 		if (busy == 1 && rootvp->v_usecount == rootrefs) {
2454 			vgonel(rootvp, td);
2455 			busy = 0;
2456 		} else
2457 			VI_UNLOCK(rootvp);
2458 	}
2459 	if (busy)
2460 		return (EBUSY);
2461 	for (; rootrefs > 0; rootrefs--)
2462 		vrele(rootvp);
2463 	return (0);
2464 }
2465 
2466 /*
2467  * This moves a now (likely recyclable) vnode to the end of the
2468  * mountlist.  XXX However, it is temporarily disabled until we
2469  * can clean up ffs_sync() and friends, which have loop restart
2470  * conditions which this code causes to operate O(N^2).
2471  */
2472 static void
2473 vlruvp(struct vnode *vp)
2474 {
2475 #if 0
2476 	struct mount *mp;
2477 
2478 	if ((mp = vp->v_mount) != NULL) {
2479 		mtx_lock(&mntvnode_mtx);
2480 		TAILQ_REMOVE(&mp->mnt_nvnodelist, vp, v_nmntvnodes);
2481 		TAILQ_INSERT_TAIL(&mp->mnt_nvnodelist, vp, v_nmntvnodes);
2482 		mtx_unlock(&mntvnode_mtx);
2483 	}
2484 #endif
2485 }
2486 
2487 /*
2488  * Disassociate the underlying filesystem from a vnode.
2489  */
2490 static void
2491 vclean(vp, flags, td)
2492 	struct vnode *vp;
2493 	int flags;
2494 	struct thread *td;
2495 {
2496 	int active;
2497 
2498 	ASSERT_VI_LOCKED(vp, "vclean");
2499 	/*
2500 	 * Check to see if the vnode is in use. If so we have to reference it
2501 	 * before we clean it out so that its count cannot fall to zero and
2502 	 * generate a race against ourselves to recycle it.
2503 	 */
2504 	if ((active = vp->v_usecount))
2505 		v_incr_usecount(vp, 1);
2506 
2507 	/*
2508 	 * Prevent the vnode from being recycled or brought into use while we
2509 	 * clean it out.
2510 	 */
2511 	if (vp->v_iflag & VI_XLOCK)
2512 		panic("vclean: deadlock");
2513 	vp->v_iflag |= VI_XLOCK;
2514 	vp->v_vxproc = curthread;
2515 	/*
2516 	 * Even if the count is zero, the VOP_INACTIVE routine may still
2517 	 * have the object locked while it cleans it out. The VOP_LOCK
2518 	 * ensures that the VOP_INACTIVE routine is done with its work.
2519 	 * For active vnodes, it ensures that no other activity can
2520 	 * occur while the underlying object is being cleaned out.
2521 	 */
2522 	VOP_LOCK(vp, LK_DRAIN | LK_INTERLOCK, td);
2523 
2524 	/*
2525 	 * Clean out any buffers associated with the vnode.
2526 	 * If the flush fails, just toss the buffers.
2527 	 */
2528 	if (flags & DOCLOSE) {
2529 		struct buf *bp;
2530 		VI_LOCK(vp);
2531 		bp = TAILQ_FIRST(&vp->v_dirtyblkhd);
2532 		VI_UNLOCK(vp);
2533 		if (bp != NULL)
2534 			(void) vn_write_suspend_wait(vp, NULL, V_WAIT);
2535 		if (vinvalbuf(vp, V_SAVE, NOCRED, td, 0, 0) != 0)
2536 			vinvalbuf(vp, 0, NOCRED, td, 0, 0);
2537 	}
2538 
2539 	VOP_DESTROYVOBJECT(vp);
2540 
2541 	/*
2542 	 * Any other processes trying to obtain this lock must first
2543 	 * wait for VXLOCK to clear, then call the new lock operation.
2544 	 */
2545 	VOP_UNLOCK(vp, 0, td);
2546 
2547 	/*
2548 	 * If purging an active vnode, it must be closed and
2549 	 * deactivated before being reclaimed. Note that the
2550 	 * VOP_INACTIVE will unlock the vnode.
2551 	 */
2552 	if (active) {
2553 		if (flags & DOCLOSE)
2554 			VOP_CLOSE(vp, FNONBLOCK, NOCRED, td);
2555 		VI_LOCK(vp);
2556 		if ((vp->v_iflag & VI_DOINGINACT) == 0) {
2557 			vp->v_iflag |= VI_DOINGINACT;
2558 			VI_UNLOCK(vp);
2559 			if (vn_lock(vp, LK_EXCLUSIVE | LK_NOWAIT, td) != 0)
2560 				panic("vclean: cannot relock.");
2561 			VOP_INACTIVE(vp, td);
2562 			VI_LOCK(vp);
2563 			KASSERT(vp->v_iflag & VI_DOINGINACT,
2564 			    ("vclean: lost VI_DOINGINACT"));
2565 			vp->v_iflag &= ~VI_DOINGINACT;
2566 		}
2567 		VI_UNLOCK(vp);
2568 	}
2569 
2570 	/*
2571 	 * Reclaim the vnode.
2572 	 */
2573 	if (VOP_RECLAIM(vp, td))
2574 		panic("vclean: cannot reclaim");
2575 
2576 	if (active) {
2577 		/*
2578 		 * Inline copy of vrele() since VOP_INACTIVE
2579 		 * has already been called.
2580 		 */
2581 		VI_LOCK(vp);
2582 		v_incr_usecount(vp, -1);
2583 		if (vp->v_usecount <= 0) {
2584 #ifdef DIAGNOSTIC
2585 			if (vp->v_usecount < 0 || vp->v_writecount != 0) {
2586 				vprint("vclean: bad ref count", vp);
2587 				panic("vclean: ref cnt");
2588 			}
2589 #endif
2590 			vfree(vp);
2591 		}
2592 		VI_UNLOCK(vp);
2593 	}
2594 
2595 	cache_purge(vp);
2596 	VI_LOCK(vp);
2597 	if (VSHOULDFREE(vp))
2598 		vfree(vp);
2599 
2600 	/*
2601 	 * Done with purge, reset to the standard lock and
2602 	 * notify sleepers of the grim news.
2603 	 */
2604 	vp->v_vnlock = &vp->v_lock;
2605 	vp->v_op = dead_vnodeop_p;
2606 	if (vp->v_pollinfo != NULL)
2607 		vn_pollgone(vp);
2608 	vp->v_tag = "none";
2609 	vp->v_iflag &= ~VI_XLOCK;
2610 	vp->v_vxproc = NULL;
2611 	if (vp->v_iflag & VI_XWANT) {
2612 		vp->v_iflag &= ~VI_XWANT;
2613 		wakeup(vp);
2614 	}
2615 }
2616 
2617 /*
2618  * Eliminate all activity associated with the requested vnode
2619  * and with all vnodes aliased to the requested vnode.
2620  */
2621 int
2622 vop_revoke(ap)
2623 	struct vop_revoke_args /* {
2624 		struct vnode *a_vp;
2625 		int a_flags;
2626 	} */ *ap;
2627 {
2628 	struct vnode *vp, *vq;
2629 	dev_t dev;
2630 
2631 	KASSERT((ap->a_flags & REVOKEALL) != 0, ("vop_revoke"));
2632 	vp = ap->a_vp;
2633 	KASSERT((vp->v_type == VCHR), ("vop_revoke: not VCHR"));
2634 
2635 	VI_LOCK(vp);
2636 	/*
2637 	 * If a vgone (or vclean) is already in progress,
2638 	 * wait until it is done and return.
2639 	 */
2640 	if (vp->v_iflag & VI_XLOCK) {
2641 		vp->v_iflag |= VI_XWANT;
2642 		msleep(vp, VI_MTX(vp), PINOD | PDROP,
2643 		    "vop_revokeall", 0);
2644 		return (0);
2645 	}
2646 	VI_UNLOCK(vp);
2647 	dev = vp->v_rdev;
2648 	for (;;) {
2649 		mtx_lock(&spechash_mtx);
2650 		vq = SLIST_FIRST(&dev->si_hlist);
2651 		mtx_unlock(&spechash_mtx);
2652 		if (!vq)
2653 			break;
2654 		vgone(vq);
2655 	}
2656 	return (0);
2657 }
2658 
2659 /*
2660  * Recycle an unused vnode to the front of the free list.
2661  * Release the passed interlock if the vnode will be recycled.
2662  */
2663 int
2664 vrecycle(vp, inter_lkp, td)
2665 	struct vnode *vp;
2666 	struct mtx *inter_lkp;
2667 	struct thread *td;
2668 {
2669 
2670 	VI_LOCK(vp);
2671 	if (vp->v_usecount == 0) {
2672 		if (inter_lkp) {
2673 			mtx_unlock(inter_lkp);
2674 		}
2675 		vgonel(vp, td);
2676 		return (1);
2677 	}
2678 	VI_UNLOCK(vp);
2679 	return (0);
2680 }
2681 
2682 /*
2683  * Eliminate all activity associated with a vnode
2684  * in preparation for reuse.
2685  */
2686 void
2687 vgone(vp)
2688 	register struct vnode *vp;
2689 {
2690 	struct thread *td = curthread;	/* XXX */
2691 
2692 	VI_LOCK(vp);
2693 	vgonel(vp, td);
2694 }
2695 
2696 /*
2697  * vgone, with the vp interlock held.
2698  */
2699 void
2700 vgonel(vp, td)
2701 	struct vnode *vp;
2702 	struct thread *td;
2703 {
2704 	int s;
2705 
2706 	/*
2707 	 * If a vgone (or vclean) is already in progress,
2708 	 * wait until it is done and return.
2709 	 */
2710 	ASSERT_VI_LOCKED(vp, "vgonel");
2711 	if (vp->v_iflag & VI_XLOCK) {
2712 		vp->v_iflag |= VI_XWANT;
2713 		msleep(vp, VI_MTX(vp), PINOD | PDROP, "vgone", 0);
2714 		return;
2715 	}
2716 
2717 	/*
2718 	 * Clean out the filesystem specific data.
2719 	 */
2720 	vclean(vp, DOCLOSE, td);
2721 	VI_UNLOCK(vp);
2722 
2723 	/*
2724 	 * Delete from old mount point vnode list, if on one.
2725 	 */
2726 	if (vp->v_mount != NULL)
2727 		insmntque(vp, (struct mount *)0);
2728 	/*
2729 	 * If special device, remove it from special device alias list
2730 	 * if it is on one.
2731 	 */
2732 	if (vp->v_type == VCHR && vp->v_rdev != NULL && vp->v_rdev != NODEV) {
2733 		VI_LOCK(vp);
2734 		mtx_lock(&spechash_mtx);
2735 		SLIST_REMOVE(&vp->v_rdev->si_hlist, vp, vnode, v_specnext);
2736 		vp->v_rdev->si_usecount -= vp->v_usecount;
2737 		mtx_unlock(&spechash_mtx);
2738 		VI_UNLOCK(vp);
2739 		vp->v_rdev = NULL;
2740 	}
2741 
2742 	/*
2743 	 * If it is on the freelist and not already at the head,
2744 	 * move it to the head of the list. The test of the
2745 	 * VDOOMED flag and the reference count of zero is because
2746 	 * it will be removed from the free list by getnewvnode,
2747 	 * but will not have its reference count incremented until
2748 	 * after calling vgone. If the reference count were
2749 	 * incremented first, vgone would (incorrectly) try to
2750 	 * close the previous instance of the underlying object.
2751 	 */
2752 	VI_LOCK(vp);
2753 	if (vp->v_usecount == 0 && !(vp->v_iflag & VI_DOOMED)) {
2754 		s = splbio();
2755 		mtx_lock(&vnode_free_list_mtx);
2756 		if (vp->v_iflag & VI_FREE) {
2757 			TAILQ_REMOVE(&vnode_free_list, vp, v_freelist);
2758 		} else {
2759 			vp->v_iflag |= VI_FREE;
2760 			freevnodes++;
2761 		}
2762 		TAILQ_INSERT_HEAD(&vnode_free_list, vp, v_freelist);
2763 		mtx_unlock(&vnode_free_list_mtx);
2764 		splx(s);
2765 	}
2766 
2767 	vp->v_type = VBAD;
2768 	VI_UNLOCK(vp);
2769 }
2770 
2771 /*
2772  * Lookup a vnode by device number.
2773  */
2774 int
2775 vfinddev(dev, type, vpp)
2776 	dev_t dev;
2777 	enum vtype type;
2778 	struct vnode **vpp;
2779 {
2780 	struct vnode *vp;
2781 
2782 	mtx_lock(&spechash_mtx);
2783 	SLIST_FOREACH(vp, &dev->si_hlist, v_specnext) {
2784 		if (type == vp->v_type) {
2785 			*vpp = vp;
2786 			mtx_unlock(&spechash_mtx);
2787 			return (1);
2788 		}
2789 	}
2790 	mtx_unlock(&spechash_mtx);
2791 	return (0);
2792 }
2793 
2794 /*
2795  * Calculate the total number of references to a special device.
2796  */
2797 int
2798 vcount(vp)
2799 	struct vnode *vp;
2800 {
2801 	int count;
2802 
2803 	mtx_lock(&spechash_mtx);
2804 	count = vp->v_rdev->si_usecount;
2805 	mtx_unlock(&spechash_mtx);
2806 	return (count);
2807 }
2808 
2809 /*
2810  * Same as above, but using the dev_t as argument
2811  */
2812 int
2813 count_dev(dev)
2814 	dev_t dev;
2815 {
2816 	struct vnode *vp;
2817 
2818 	vp = SLIST_FIRST(&dev->si_hlist);
2819 	if (vp == NULL)
2820 		return (0);
2821 	return(vcount(vp));
2822 }
2823 
2824 /*
2825  * Print out a description of a vnode.
2826  */
2827 static char *typename[] =
2828 {"VNON", "VREG", "VDIR", "VBLK", "VCHR", "VLNK", "VSOCK", "VFIFO", "VBAD"};
2829 
2830 void
2831 vprint(label, vp)
2832 	char *label;
2833 	struct vnode *vp;
2834 {
2835 	char buf[96];
2836 
2837 	if (label != NULL)
2838 		printf("%s: %p: ", label, (void *)vp);
2839 	else
2840 		printf("%p: ", (void *)vp);
2841 	printf("tag %s, type %s, usecount %d, writecount %d, refcount %d,",
2842 	    vp->v_tag, typename[vp->v_type], vp->v_usecount,
2843 	    vp->v_writecount, vp->v_holdcnt);
2844 	buf[0] = '\0';
2845 	if (vp->v_vflag & VV_ROOT)
2846 		strcat(buf, "|VV_ROOT");
2847 	if (vp->v_vflag & VV_TEXT)
2848 		strcat(buf, "|VV_TEXT");
2849 	if (vp->v_vflag & VV_SYSTEM)
2850 		strcat(buf, "|VV_SYSTEM");
2851 	if (vp->v_iflag & VI_XLOCK)
2852 		strcat(buf, "|VI_XLOCK");
2853 	if (vp->v_iflag & VI_XWANT)
2854 		strcat(buf, "|VI_XWANT");
2855 	if (vp->v_iflag & VI_BWAIT)
2856 		strcat(buf, "|VI_BWAIT");
2857 	if (vp->v_iflag & VI_DOOMED)
2858 		strcat(buf, "|VI_DOOMED");
2859 	if (vp->v_iflag & VI_FREE)
2860 		strcat(buf, "|VI_FREE");
2861 	if (vp->v_vflag & VV_OBJBUF)
2862 		strcat(buf, "|VV_OBJBUF");
2863 	if (buf[0] != '\0')
2864 		printf(" flags (%s),", &buf[1]);
2865 	lockmgr_printinfo(vp->v_vnlock);
2866 	printf("\n");
2867 	if (vp->v_data != NULL) {
2868 		printf("\t");
2869 		VOP_PRINT(vp);
2870 	}
2871 }
2872 
2873 #ifdef DDB
2874 #include <ddb/ddb.h>
2875 /*
2876  * List all of the locked vnodes in the system.
2877  * Called when debugging the kernel.
2878  */
2879 DB_SHOW_COMMAND(lockedvnods, lockedvnodes)
2880 {
2881 	struct thread *td = curthread;	/* XXX */
2882 	struct mount *mp, *nmp;
2883 	struct vnode *vp;
2884 
2885 	printf("Locked vnodes\n");
2886 	mtx_lock(&mountlist_mtx);
2887 	for (mp = TAILQ_FIRST(&mountlist); mp != NULL; mp = nmp) {
2888 		if (vfs_busy(mp, LK_NOWAIT, &mountlist_mtx, td)) {
2889 			nmp = TAILQ_NEXT(mp, mnt_list);
2890 			continue;
2891 		}
2892 		mtx_lock(&mntvnode_mtx);
2893 		TAILQ_FOREACH(vp, &mp->mnt_nvnodelist, v_nmntvnodes) {
2894 			if (VOP_ISLOCKED(vp, NULL))
2895 				vprint((char *)0, vp);
2896 		}
2897 		mtx_unlock(&mntvnode_mtx);
2898 		mtx_lock(&mountlist_mtx);
2899 		nmp = TAILQ_NEXT(mp, mnt_list);
2900 		vfs_unbusy(mp, td);
2901 	}
2902 	mtx_unlock(&mountlist_mtx);
2903 }
2904 #endif
2905 
2906 /*
2907  * Fill in a struct xvfsconf based on a struct vfsconf.
2908  */
2909 static void
2910 vfsconf2x(struct vfsconf *vfsp, struct xvfsconf *xvfsp)
2911 {
2912 
2913 	strcpy(xvfsp->vfc_name, vfsp->vfc_name);
2914 	xvfsp->vfc_typenum = vfsp->vfc_typenum;
2915 	xvfsp->vfc_refcount = vfsp->vfc_refcount;
2916 	xvfsp->vfc_flags = vfsp->vfc_flags;
2917 	/*
2918 	 * These are unused in userland, we keep them
2919 	 * to not break binary compatibility.
2920 	 */
2921 	xvfsp->vfc_vfsops = NULL;
2922 	xvfsp->vfc_next = NULL;
2923 }
2924 
2925 static int
2926 sysctl_vfs_conflist(SYSCTL_HANDLER_ARGS)
2927 {
2928 	struct vfsconf *vfsp;
2929 	struct xvfsconf *xvfsp;
2930 	int cnt, error, i;
2931 
2932 	cnt = 0;
2933 	for (vfsp = vfsconf; vfsp != NULL; vfsp = vfsp->vfc_next)
2934 		cnt++;
2935 	xvfsp = malloc(sizeof(struct xvfsconf) * cnt, M_TEMP, M_WAITOK);
2936 	/*
2937 	 * Handle the race that we will have here when struct vfsconf
2938 	 * will be locked down by using both cnt and checking vfc_next
2939 	 * against NULL to determine the end of the loop.  The race will
2940 	 * happen because we will have to unlock before calling malloc().
2941 	 * We are protected by Giant for now.
2942 	 */
2943 	i = 0;
2944 	for (vfsp = vfsconf; vfsp != NULL && i < cnt; vfsp = vfsp->vfc_next) {
2945 		vfsconf2x(vfsp, xvfsp + i);
2946 		i++;
2947 	}
2948 	error = SYSCTL_OUT(req, xvfsp, sizeof(struct xvfsconf) * i);
2949 	free(xvfsp, M_TEMP);
2950 	return (error);
2951 }
2952 
2953 SYSCTL_PROC(_vfs, OID_AUTO, conflist, CTLFLAG_RD, NULL, 0, sysctl_vfs_conflist,
2954     "S,xvfsconf", "List of all configured filesystems");
2955 
2956 /*
2957  * Top level filesystem related information gathering.
2958  */
2959 static int	sysctl_ovfs_conf(SYSCTL_HANDLER_ARGS);
2960 
2961 static int
2962 vfs_sysctl(SYSCTL_HANDLER_ARGS)
2963 {
2964 	int *name = (int *)arg1 - 1;	/* XXX */
2965 	u_int namelen = arg2 + 1;	/* XXX */
2966 	struct vfsconf *vfsp;
2967 	struct xvfsconf xvfsp;
2968 
2969 	printf("WARNING: userland calling deprecated sysctl, "
2970 	    "please rebuild world\n");
2971 
2972 #if 1 || defined(COMPAT_PRELITE2)
2973 	/* Resolve ambiguity between VFS_VFSCONF and VFS_GENERIC. */
2974 	if (namelen == 1)
2975 		return (sysctl_ovfs_conf(oidp, arg1, arg2, req));
2976 #endif
2977 
2978 	switch (name[1]) {
2979 	case VFS_MAXTYPENUM:
2980 		if (namelen != 2)
2981 			return (ENOTDIR);
2982 		return (SYSCTL_OUT(req, &maxvfsconf, sizeof(int)));
2983 	case VFS_CONF:
2984 		if (namelen != 3)
2985 			return (ENOTDIR);	/* overloaded */
2986 		for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next)
2987 			if (vfsp->vfc_typenum == name[2])
2988 				break;
2989 		if (vfsp == NULL)
2990 			return (EOPNOTSUPP);
2991 		vfsconf2x(vfsp, &xvfsp);
2992 		return (SYSCTL_OUT(req, &xvfsp, sizeof(xvfsp)));
2993 	}
2994 	return (EOPNOTSUPP);
2995 }
2996 
2997 SYSCTL_NODE(_vfs, VFS_GENERIC, generic, CTLFLAG_RD | CTLFLAG_SKIP, vfs_sysctl,
2998 	"Generic filesystem");
2999 
3000 #if 1 || defined(COMPAT_PRELITE2)
3001 
3002 static int
3003 sysctl_ovfs_conf(SYSCTL_HANDLER_ARGS)
3004 {
3005 	int error;
3006 	struct vfsconf *vfsp;
3007 	struct ovfsconf ovfs;
3008 
3009 	for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next) {
3010 		ovfs.vfc_vfsops = vfsp->vfc_vfsops;	/* XXX used as flag */
3011 		strcpy(ovfs.vfc_name, vfsp->vfc_name);
3012 		ovfs.vfc_index = vfsp->vfc_typenum;
3013 		ovfs.vfc_refcount = vfsp->vfc_refcount;
3014 		ovfs.vfc_flags = vfsp->vfc_flags;
3015 		error = SYSCTL_OUT(req, &ovfs, sizeof ovfs);
3016 		if (error)
3017 			return error;
3018 	}
3019 	return 0;
3020 }
3021 
3022 #endif /* 1 || COMPAT_PRELITE2 */
3023 
3024 #define KINFO_VNODESLOP		10
3025 #ifdef notyet
3026 /*
3027  * Dump vnode list (via sysctl).
3028  */
3029 /* ARGSUSED */
3030 static int
3031 sysctl_vnode(SYSCTL_HANDLER_ARGS)
3032 {
3033 	struct xvnode *xvn;
3034 	struct thread *td = req->td;
3035 	struct mount *mp;
3036 	struct vnode *vp;
3037 	int error, len, n;
3038 
3039 	/*
3040 	 * Stale numvnodes access is not fatal here.
3041 	 */
3042 	req->lock = 0;
3043 	len = (numvnodes + KINFO_VNODESLOP) * sizeof *xvn;
3044 	if (!req->oldptr)
3045 		/* Make an estimate */
3046 		return (SYSCTL_OUT(req, 0, len));
3047 
3048 	sysctl_wire_old_buffer(req, 0);
3049 	xvn = malloc(len, M_TEMP, M_ZERO | M_WAITOK);
3050 	n = 0;
3051 	mtx_lock(&mountlist_mtx);
3052 	TAILQ_FOREACH(mp, &mountlist, mnt_list) {
3053 		if (vfs_busy(mp, LK_NOWAIT, &mountlist_mtx, td))
3054 			continue;
3055 		mtx_lock(&mntvnode_mtx);
3056 		TAILQ_FOREACH(vp, &mp->mnt_nvnodelist, v_nmntvnodes) {
3057 			if (n == len)
3058 				break;
3059 			vref(vp);
3060 			xvn[n].xv_size = sizeof *xvn;
3061 			xvn[n].xv_vnode = vp;
3062 #define XV_COPY(field) xvn[n].xv_##field = vp->v_##field
3063 			XV_COPY(usecount);
3064 			XV_COPY(writecount);
3065 			XV_COPY(holdcnt);
3066 			XV_COPY(id);
3067 			XV_COPY(mount);
3068 			XV_COPY(numoutput);
3069 			XV_COPY(type);
3070 #undef XV_COPY
3071 			xvn[n].xv_flag = vp->v_vflag;
3072 
3073 			switch (vp->v_type) {
3074 			case VREG:
3075 			case VDIR:
3076 			case VLNK:
3077 				xvn[n].xv_dev = vp->v_cachedfs;
3078 				xvn[n].xv_ino = vp->v_cachedid;
3079 				break;
3080 			case VBLK:
3081 			case VCHR:
3082 				if (vp->v_rdev == NULL) {
3083 					vrele(vp);
3084 					continue;
3085 				}
3086 				xvn[n].xv_dev = dev2udev(vp->v_rdev);
3087 				break;
3088 			case VSOCK:
3089 				xvn[n].xv_socket = vp->v_socket;
3090 				break;
3091 			case VFIFO:
3092 				xvn[n].xv_fifo = vp->v_fifoinfo;
3093 				break;
3094 			case VNON:
3095 			case VBAD:
3096 			default:
3097 				/* shouldn't happen? */
3098 				vrele(vp);
3099 				continue;
3100 			}
3101 			vrele(vp);
3102 			++n;
3103 		}
3104 		mtx_unlock(&mntvnode_mtx);
3105 		mtx_lock(&mountlist_mtx);
3106 		vfs_unbusy(mp, td);
3107 		if (n == len)
3108 			break;
3109 	}
3110 	mtx_unlock(&mountlist_mtx);
3111 
3112 	error = SYSCTL_OUT(req, xvn, n * sizeof *xvn);
3113 	free(xvn, M_TEMP);
3114 	return (error);
3115 }
3116 
3117 SYSCTL_PROC(_kern, KERN_VNODE, vnode, CTLTYPE_OPAQUE|CTLFLAG_RD,
3118 	0, 0, sysctl_vnode, "S,xvnode", "");
3119 #endif
3120 
3121 /*
3122  * Check to see if a filesystem is mounted on a block device.
3123  */
3124 int
3125 vfs_mountedon(vp)
3126 	struct vnode *vp;
3127 {
3128 
3129 	if (vp->v_rdev->si_mountpoint != NULL)
3130 		return (EBUSY);
3131 	return (0);
3132 }
3133 
3134 /*
3135  * Unmount all filesystems. The list is traversed in reverse order
3136  * of mounting to avoid dependencies.
3137  */
3138 void
3139 vfs_unmountall()
3140 {
3141 	struct mount *mp;
3142 	struct thread *td;
3143 	int error;
3144 
3145 	if (curthread != NULL)
3146 		td = curthread;
3147 	else
3148 		td = FIRST_THREAD_IN_PROC(initproc); /* XXX XXX proc0? */
3149 	/*
3150 	 * Since this only runs when rebooting, it is not interlocked.
3151 	 */
3152 	while(!TAILQ_EMPTY(&mountlist)) {
3153 		mp = TAILQ_LAST(&mountlist, mntlist);
3154 		error = dounmount(mp, MNT_FORCE, td);
3155 		if (error) {
3156 			TAILQ_REMOVE(&mountlist, mp, mnt_list);
3157 			printf("unmount of %s failed (",
3158 			    mp->mnt_stat.f_mntonname);
3159 			if (error == EBUSY)
3160 				printf("BUSY)\n");
3161 			else
3162 				printf("%d)\n", error);
3163 		} else {
3164 			/* The unmount has removed mp from the mountlist */
3165 		}
3166 	}
3167 }
3168 
3169 /*
3170  * perform msync on all vnodes under a mount point
3171  * the mount point must be locked.
3172  */
3173 void
3174 vfs_msync(struct mount *mp, int flags)
3175 {
3176 	struct vnode *vp, *nvp;
3177 	struct vm_object *obj;
3178 	int tries;
3179 
3180 	GIANT_REQUIRED;
3181 
3182 	tries = 5;
3183 	mtx_lock(&mntvnode_mtx);
3184 loop:
3185 	for (vp = TAILQ_FIRST(&mp->mnt_nvnodelist); vp != NULL; vp = nvp) {
3186 		if (vp->v_mount != mp) {
3187 			if (--tries > 0)
3188 				goto loop;
3189 			break;
3190 		}
3191 		nvp = TAILQ_NEXT(vp, v_nmntvnodes);
3192 
3193 		VI_LOCK(vp);
3194 		if (vp->v_iflag & VI_XLOCK) {	/* XXX: what if MNT_WAIT? */
3195 			VI_UNLOCK(vp);
3196 			continue;
3197 		}
3198 
3199 		if ((vp->v_iflag & VI_OBJDIRTY) &&
3200 		    (flags == MNT_WAIT || VOP_ISLOCKED(vp, NULL) == 0)) {
3201 			mtx_unlock(&mntvnode_mtx);
3202 			if (!vget(vp,
3203 			    LK_EXCLUSIVE | LK_RETRY | LK_INTERLOCK,
3204 			    curthread)) {
3205 				if (vp->v_vflag & VV_NOSYNC) {	/* unlinked */
3206 					vput(vp);
3207 					mtx_lock(&mntvnode_mtx);
3208 					continue;
3209 				}
3210 
3211 				if (VOP_GETVOBJECT(vp, &obj) == 0) {
3212 					vm_object_page_clean(obj, 0, 0,
3213 					    flags == MNT_WAIT ?
3214 					    OBJPC_SYNC : OBJPC_NOSYNC);
3215 				}
3216 				vput(vp);
3217 			}
3218 			mtx_lock(&mntvnode_mtx);
3219 			if (TAILQ_NEXT(vp, v_nmntvnodes) != nvp) {
3220 				if (--tries > 0)
3221 					goto loop;
3222 				break;
3223 			}
3224 		} else
3225 			VI_UNLOCK(vp);
3226 	}
3227 	mtx_unlock(&mntvnode_mtx);
3228 }
3229 
3230 /*
3231  * Create the VM object needed for VMIO and mmap support.  This
3232  * is done for all VREG files in the system.  Some filesystems might
3233  * afford the additional metadata buffering capability of the
3234  * VMIO code by making the device node be VMIO mode also.
3235  *
3236  * vp must be locked when vfs_object_create is called.
3237  */
3238 int
3239 vfs_object_create(vp, td, cred)
3240 	struct vnode *vp;
3241 	struct thread *td;
3242 	struct ucred *cred;
3243 {
3244 	GIANT_REQUIRED;
3245 	return (VOP_CREATEVOBJECT(vp, cred, td));
3246 }
3247 
3248 /*
3249  * Mark a vnode as free, putting it up for recycling.
3250  */
3251 void
3252 vfree(vp)
3253 	struct vnode *vp;
3254 {
3255 	int s;
3256 
3257 	ASSERT_VI_LOCKED(vp, "vfree");
3258 	s = splbio();
3259 	mtx_lock(&vnode_free_list_mtx);
3260 	KASSERT((vp->v_iflag & VI_FREE) == 0, ("vnode already free"));
3261 	if (vp->v_iflag & VI_AGE) {
3262 		TAILQ_INSERT_HEAD(&vnode_free_list, vp, v_freelist);
3263 	} else {
3264 		TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist);
3265 	}
3266 	freevnodes++;
3267 	mtx_unlock(&vnode_free_list_mtx);
3268 	vp->v_iflag &= ~VI_AGE;
3269 	vp->v_iflag |= VI_FREE;
3270 	splx(s);
3271 }
3272 
3273 /*
3274  * Opposite of vfree() - mark a vnode as in use.
3275  */
3276 void
3277 vbusy(vp)
3278 	struct vnode *vp;
3279 {
3280 	int s;
3281 
3282 	s = splbio();
3283 	ASSERT_VI_LOCKED(vp, "vbusy");
3284 	KASSERT((vp->v_iflag & VI_FREE) != 0, ("vnode not free"));
3285 
3286 	mtx_lock(&vnode_free_list_mtx);
3287 	TAILQ_REMOVE(&vnode_free_list, vp, v_freelist);
3288 	freevnodes--;
3289 	mtx_unlock(&vnode_free_list_mtx);
3290 
3291 	vp->v_iflag &= ~(VI_FREE|VI_AGE);
3292 	splx(s);
3293 }
3294 
3295 /*
3296  * Record a process's interest in events which might happen to
3297  * a vnode.  Because poll uses the historic select-style interface
3298  * internally, this routine serves as both the ``check for any
3299  * pending events'' and the ``record my interest in future events''
3300  * functions.  (These are done together, while the lock is held,
3301  * to avoid race conditions.)
3302  */
3303 int
3304 vn_pollrecord(vp, td, events)
3305 	struct vnode *vp;
3306 	struct thread *td;
3307 	short events;
3308 {
3309 
3310 	if (vp->v_pollinfo == NULL)
3311 		v_addpollinfo(vp);
3312 	mtx_lock(&vp->v_pollinfo->vpi_lock);
3313 	if (vp->v_pollinfo->vpi_revents & events) {
3314 		/*
3315 		 * This leaves events we are not interested
3316 		 * in available for the other process which
3317 		 * which presumably had requested them
3318 		 * (otherwise they would never have been
3319 		 * recorded).
3320 		 */
3321 		events &= vp->v_pollinfo->vpi_revents;
3322 		vp->v_pollinfo->vpi_revents &= ~events;
3323 
3324 		mtx_unlock(&vp->v_pollinfo->vpi_lock);
3325 		return events;
3326 	}
3327 	vp->v_pollinfo->vpi_events |= events;
3328 	selrecord(td, &vp->v_pollinfo->vpi_selinfo);
3329 	mtx_unlock(&vp->v_pollinfo->vpi_lock);
3330 	return 0;
3331 }
3332 
3333 /*
3334  * Note the occurrence of an event.  If the VN_POLLEVENT macro is used,
3335  * it is possible for us to miss an event due to race conditions, but
3336  * that condition is expected to be rare, so for the moment it is the
3337  * preferred interface.
3338  */
3339 void
3340 vn_pollevent(vp, events)
3341 	struct vnode *vp;
3342 	short events;
3343 {
3344 
3345 	if (vp->v_pollinfo == NULL)
3346 		v_addpollinfo(vp);
3347 	mtx_lock(&vp->v_pollinfo->vpi_lock);
3348 	if (vp->v_pollinfo->vpi_events & events) {
3349 		/*
3350 		 * We clear vpi_events so that we don't
3351 		 * call selwakeup() twice if two events are
3352 		 * posted before the polling process(es) is
3353 		 * awakened.  This also ensures that we take at
3354 		 * most one selwakeup() if the polling process
3355 		 * is no longer interested.  However, it does
3356 		 * mean that only one event can be noticed at
3357 		 * a time.  (Perhaps we should only clear those
3358 		 * event bits which we note?) XXX
3359 		 */
3360 		vp->v_pollinfo->vpi_events = 0;	/* &= ~events ??? */
3361 		vp->v_pollinfo->vpi_revents |= events;
3362 		selwakeup(&vp->v_pollinfo->vpi_selinfo);
3363 	}
3364 	mtx_unlock(&vp->v_pollinfo->vpi_lock);
3365 }
3366 
3367 /*
3368  * Wake up anyone polling on vp because it is being revoked.
3369  * This depends on dead_poll() returning POLLHUP for correct
3370  * behavior.
3371  */
3372 void
3373 vn_pollgone(vp)
3374 	struct vnode *vp;
3375 {
3376 
3377 	mtx_lock(&vp->v_pollinfo->vpi_lock);
3378 	VN_KNOTE(vp, NOTE_REVOKE);
3379 	if (vp->v_pollinfo->vpi_events) {
3380 		vp->v_pollinfo->vpi_events = 0;
3381 		selwakeup(&vp->v_pollinfo->vpi_selinfo);
3382 	}
3383 	mtx_unlock(&vp->v_pollinfo->vpi_lock);
3384 }
3385 
3386 
3387 
3388 /*
3389  * Routine to create and manage a filesystem syncer vnode.
3390  */
3391 #define sync_close ((int (*)(struct  vop_close_args *))nullop)
3392 static int	sync_fsync(struct  vop_fsync_args *);
3393 static int	sync_inactive(struct  vop_inactive_args *);
3394 static int	sync_reclaim(struct  vop_reclaim_args *);
3395 static int	sync_print(struct vop_print_args *);
3396 
3397 static vop_t **sync_vnodeop_p;
3398 static struct vnodeopv_entry_desc sync_vnodeop_entries[] = {
3399 	{ &vop_default_desc,	(vop_t *) vop_eopnotsupp },
3400 	{ &vop_close_desc,	(vop_t *) sync_close },		/* close */
3401 	{ &vop_fsync_desc,	(vop_t *) sync_fsync },		/* fsync */
3402 	{ &vop_inactive_desc,	(vop_t *) sync_inactive },	/* inactive */
3403 	{ &vop_reclaim_desc,	(vop_t *) sync_reclaim },	/* reclaim */
3404 	{ &vop_lock_desc,	(vop_t *) vop_stdlock },	/* lock */
3405 	{ &vop_unlock_desc,	(vop_t *) vop_stdunlock },	/* unlock */
3406 	{ &vop_print_desc,	(vop_t *) sync_print },		/* print */
3407 	{ &vop_islocked_desc,	(vop_t *) vop_stdislocked },	/* islocked */
3408 	{ NULL, NULL }
3409 };
3410 static struct vnodeopv_desc sync_vnodeop_opv_desc =
3411 	{ &sync_vnodeop_p, sync_vnodeop_entries };
3412 
3413 VNODEOP_SET(sync_vnodeop_opv_desc);
3414 
3415 /*
3416  * Create a new filesystem syncer vnode for the specified mount point.
3417  */
3418 int
3419 vfs_allocate_syncvnode(mp)
3420 	struct mount *mp;
3421 {
3422 	struct vnode *vp;
3423 	static long start, incr, next;
3424 	int error;
3425 
3426 	/* Allocate a new vnode */
3427 	if ((error = getnewvnode("vfs", mp, sync_vnodeop_p, &vp)) != 0) {
3428 		mp->mnt_syncer = NULL;
3429 		return (error);
3430 	}
3431 	vp->v_type = VNON;
3432 	/*
3433 	 * Place the vnode onto the syncer worklist. We attempt to
3434 	 * scatter them about on the list so that they will go off
3435 	 * at evenly distributed times even if all the filesystems
3436 	 * are mounted at once.
3437 	 */
3438 	next += incr;
3439 	if (next == 0 || next > syncer_maxdelay) {
3440 		start /= 2;
3441 		incr /= 2;
3442 		if (start == 0) {
3443 			start = syncer_maxdelay / 2;
3444 			incr = syncer_maxdelay;
3445 		}
3446 		next = start;
3447 	}
3448 	VI_LOCK(vp);
3449 	vn_syncer_add_to_worklist(vp, syncdelay > 0 ? next % syncdelay : 0);
3450 	VI_UNLOCK(vp);
3451 	mp->mnt_syncer = vp;
3452 	return (0);
3453 }
3454 
3455 /*
3456  * Do a lazy sync of the filesystem.
3457  */
3458 static int
3459 sync_fsync(ap)
3460 	struct vop_fsync_args /* {
3461 		struct vnode *a_vp;
3462 		struct ucred *a_cred;
3463 		int a_waitfor;
3464 		struct thread *a_td;
3465 	} */ *ap;
3466 {
3467 	struct vnode *syncvp = ap->a_vp;
3468 	struct mount *mp = syncvp->v_mount;
3469 	struct thread *td = ap->a_td;
3470 	int error, asyncflag;
3471 
3472 	/*
3473 	 * We only need to do something if this is a lazy evaluation.
3474 	 */
3475 	if (ap->a_waitfor != MNT_LAZY)
3476 		return (0);
3477 
3478 	/*
3479 	 * Move ourselves to the back of the sync list.
3480 	 */
3481 	VI_LOCK(syncvp);
3482 	vn_syncer_add_to_worklist(syncvp, syncdelay);
3483 	VI_UNLOCK(syncvp);
3484 
3485 	/*
3486 	 * Walk the list of vnodes pushing all that are dirty and
3487 	 * not already on the sync list.
3488 	 */
3489 	mtx_lock(&mountlist_mtx);
3490 	if (vfs_busy(mp, LK_EXCLUSIVE | LK_NOWAIT, &mountlist_mtx, td) != 0) {
3491 		mtx_unlock(&mountlist_mtx);
3492 		return (0);
3493 	}
3494 	if (vn_start_write(NULL, &mp, V_NOWAIT) != 0) {
3495 		vfs_unbusy(mp, td);
3496 		return (0);
3497 	}
3498 	asyncflag = mp->mnt_flag & MNT_ASYNC;
3499 	mp->mnt_flag &= ~MNT_ASYNC;
3500 	vfs_msync(mp, MNT_NOWAIT);
3501 	error = VFS_SYNC(mp, MNT_LAZY, ap->a_cred, td);
3502 	if (asyncflag)
3503 		mp->mnt_flag |= MNT_ASYNC;
3504 	vn_finished_write(mp);
3505 	vfs_unbusy(mp, td);
3506 	return (error);
3507 }
3508 
3509 /*
3510  * The syncer vnode is no referenced.
3511  */
3512 static int
3513 sync_inactive(ap)
3514 	struct vop_inactive_args /* {
3515 		struct vnode *a_vp;
3516 		struct thread *a_td;
3517 	} */ *ap;
3518 {
3519 
3520 	VOP_UNLOCK(ap->a_vp, 0, ap->a_td);
3521 	vgone(ap->a_vp);
3522 	return (0);
3523 }
3524 
3525 /*
3526  * The syncer vnode is no longer needed and is being decommissioned.
3527  *
3528  * Modifications to the worklist must be protected at splbio().
3529  */
3530 static int
3531 sync_reclaim(ap)
3532 	struct vop_reclaim_args /* {
3533 		struct vnode *a_vp;
3534 	} */ *ap;
3535 {
3536 	struct vnode *vp = ap->a_vp;
3537 	int s;
3538 
3539 	s = splbio();
3540 	vp->v_mount->mnt_syncer = NULL;
3541 	VI_LOCK(vp);
3542 	if (vp->v_iflag & VI_ONWORKLST) {
3543 		mtx_lock(&sync_mtx);
3544 		LIST_REMOVE(vp, v_synclist);
3545 		mtx_unlock(&sync_mtx);
3546 		vp->v_iflag &= ~VI_ONWORKLST;
3547 	}
3548 	VI_UNLOCK(vp);
3549 	splx(s);
3550 
3551 	return (0);
3552 }
3553 
3554 /*
3555  * Print out a syncer vnode.
3556  */
3557 static int
3558 sync_print(ap)
3559 	struct vop_print_args /* {
3560 		struct vnode *a_vp;
3561 	} */ *ap;
3562 {
3563 	struct vnode *vp = ap->a_vp;
3564 
3565 	printf("syncer vnode");
3566 	if (vp->v_vnlock != NULL)
3567 		lockmgr_printinfo(vp->v_vnlock);
3568 	printf("\n");
3569 	return (0);
3570 }
3571 
3572 /*
3573  * extract the dev_t from a VCHR
3574  */
3575 dev_t
3576 vn_todev(vp)
3577 	struct vnode *vp;
3578 {
3579 	if (vp->v_type != VCHR)
3580 		return (NODEV);
3581 	return (vp->v_rdev);
3582 }
3583 
3584 /*
3585  * Check if vnode represents a disk device
3586  */
3587 int
3588 vn_isdisk(vp, errp)
3589 	struct vnode *vp;
3590 	int *errp;
3591 {
3592 	struct cdevsw *cdevsw;
3593 
3594 	if (vp->v_type != VCHR) {
3595 		if (errp != NULL)
3596 			*errp = ENOTBLK;
3597 		return (0);
3598 	}
3599 	if (vp->v_rdev == NULL) {
3600 		if (errp != NULL)
3601 			*errp = ENXIO;
3602 		return (0);
3603 	}
3604 	cdevsw = devsw(vp->v_rdev);
3605 	if (cdevsw == NULL) {
3606 		if (errp != NULL)
3607 			*errp = ENXIO;
3608 		return (0);
3609 	}
3610 	if (!(cdevsw->d_flags & D_DISK)) {
3611 		if (errp != NULL)
3612 			*errp = ENOTBLK;
3613 		return (0);
3614 	}
3615 	if (errp != NULL)
3616 		*errp = 0;
3617 	return (1);
3618 }
3619 
3620 /*
3621  * Free data allocated by namei(); see namei(9) for details.
3622  */
3623 void
3624 NDFREE(ndp, flags)
3625      struct nameidata *ndp;
3626      const uint flags;
3627 {
3628 	if (!(flags & NDF_NO_FREE_PNBUF) &&
3629 	    (ndp->ni_cnd.cn_flags & HASBUF)) {
3630 		uma_zfree(namei_zone, ndp->ni_cnd.cn_pnbuf);
3631 		ndp->ni_cnd.cn_flags &= ~HASBUF;
3632 	}
3633 	if (!(flags & NDF_NO_DVP_UNLOCK) &&
3634 	    (ndp->ni_cnd.cn_flags & LOCKPARENT) &&
3635 	    ndp->ni_dvp != ndp->ni_vp)
3636 		VOP_UNLOCK(ndp->ni_dvp, 0, ndp->ni_cnd.cn_thread);
3637 	if (!(flags & NDF_NO_DVP_RELE) &&
3638 	    (ndp->ni_cnd.cn_flags & (LOCKPARENT|WANTPARENT))) {
3639 		vrele(ndp->ni_dvp);
3640 		ndp->ni_dvp = NULL;
3641 	}
3642 	if (!(flags & NDF_NO_VP_UNLOCK) &&
3643 	    (ndp->ni_cnd.cn_flags & LOCKLEAF) && ndp->ni_vp)
3644 		VOP_UNLOCK(ndp->ni_vp, 0, ndp->ni_cnd.cn_thread);
3645 	if (!(flags & NDF_NO_VP_RELE) &&
3646 	    ndp->ni_vp) {
3647 		vrele(ndp->ni_vp);
3648 		ndp->ni_vp = NULL;
3649 	}
3650 	if (!(flags & NDF_NO_STARTDIR_RELE) &&
3651 	    (ndp->ni_cnd.cn_flags & SAVESTART)) {
3652 		vrele(ndp->ni_startdir);
3653 		ndp->ni_startdir = NULL;
3654 	}
3655 }
3656 
3657 /*
3658  * Common filesystem object access control check routine.  Accepts a
3659  * vnode's type, "mode", uid and gid, requested access mode, credentials,
3660  * and optional call-by-reference privused argument allowing vaccess()
3661  * to indicate to the caller whether privilege was used to satisfy the
3662  * request (obsoleted).  Returns 0 on success, or an errno on failure.
3663  */
3664 int
3665 vaccess(type, file_mode, file_uid, file_gid, acc_mode, cred, privused)
3666 	enum vtype type;
3667 	mode_t file_mode;
3668 	uid_t file_uid;
3669 	gid_t file_gid;
3670 	mode_t acc_mode;
3671 	struct ucred *cred;
3672 	int *privused;
3673 {
3674 	mode_t dac_granted;
3675 #ifdef CAPABILITIES
3676 	mode_t cap_granted;
3677 #endif
3678 
3679 	/*
3680 	 * Look for a normal, non-privileged way to access the file/directory
3681 	 * as requested.  If it exists, go with that.
3682 	 */
3683 
3684 	if (privused != NULL)
3685 		*privused = 0;
3686 
3687 	dac_granted = 0;
3688 
3689 	/* Check the owner. */
3690 	if (cred->cr_uid == file_uid) {
3691 		dac_granted |= VADMIN;
3692 		if (file_mode & S_IXUSR)
3693 			dac_granted |= VEXEC;
3694 		if (file_mode & S_IRUSR)
3695 			dac_granted |= VREAD;
3696 		if (file_mode & S_IWUSR)
3697 			dac_granted |= (VWRITE | VAPPEND);
3698 
3699 		if ((acc_mode & dac_granted) == acc_mode)
3700 			return (0);
3701 
3702 		goto privcheck;
3703 	}
3704 
3705 	/* Otherwise, check the groups (first match) */
3706 	if (groupmember(file_gid, cred)) {
3707 		if (file_mode & S_IXGRP)
3708 			dac_granted |= VEXEC;
3709 		if (file_mode & S_IRGRP)
3710 			dac_granted |= VREAD;
3711 		if (file_mode & S_IWGRP)
3712 			dac_granted |= (VWRITE | VAPPEND);
3713 
3714 		if ((acc_mode & dac_granted) == acc_mode)
3715 			return (0);
3716 
3717 		goto privcheck;
3718 	}
3719 
3720 	/* Otherwise, check everyone else. */
3721 	if (file_mode & S_IXOTH)
3722 		dac_granted |= VEXEC;
3723 	if (file_mode & S_IROTH)
3724 		dac_granted |= VREAD;
3725 	if (file_mode & S_IWOTH)
3726 		dac_granted |= (VWRITE | VAPPEND);
3727 	if ((acc_mode & dac_granted) == acc_mode)
3728 		return (0);
3729 
3730 privcheck:
3731 	if (!suser_cred(cred, PRISON_ROOT)) {
3732 		/* XXX audit: privilege used */
3733 		if (privused != NULL)
3734 			*privused = 1;
3735 		return (0);
3736 	}
3737 
3738 #ifdef CAPABILITIES
3739 	/*
3740 	 * Build a capability mask to determine if the set of capabilities
3741 	 * satisfies the requirements when combined with the granted mask
3742 	 * from above.
3743 	 * For each capability, if the capability is required, bitwise
3744 	 * or the request type onto the cap_granted mask.
3745 	 */
3746 	cap_granted = 0;
3747 
3748 	if (type == VDIR) {
3749 		/*
3750 		 * For directories, use CAP_DAC_READ_SEARCH to satisfy
3751 		 * VEXEC requests, instead of CAP_DAC_EXECUTE.
3752 		 */
3753 		if ((acc_mode & VEXEC) && ((dac_granted & VEXEC) == 0) &&
3754 		    !cap_check(cred, NULL, CAP_DAC_READ_SEARCH, PRISON_ROOT))
3755 			cap_granted |= VEXEC;
3756 	} else {
3757 		if ((acc_mode & VEXEC) && ((dac_granted & VEXEC) == 0) &&
3758 		    !cap_check(cred, NULL, CAP_DAC_EXECUTE, PRISON_ROOT))
3759 			cap_granted |= VEXEC;
3760 	}
3761 
3762 	if ((acc_mode & VREAD) && ((dac_granted & VREAD) == 0) &&
3763 	    !cap_check(cred, NULL, CAP_DAC_READ_SEARCH, PRISON_ROOT))
3764 		cap_granted |= VREAD;
3765 
3766 	if ((acc_mode & VWRITE) && ((dac_granted & VWRITE) == 0) &&
3767 	    !cap_check(cred, NULL, CAP_DAC_WRITE, PRISON_ROOT))
3768 		cap_granted |= (VWRITE | VAPPEND);
3769 
3770 	if ((acc_mode & VADMIN) && ((dac_granted & VADMIN) == 0) &&
3771 	    !cap_check(cred, NULL, CAP_FOWNER, PRISON_ROOT))
3772 		cap_granted |= VADMIN;
3773 
3774 	if ((acc_mode & (cap_granted | dac_granted)) == acc_mode) {
3775 		/* XXX audit: privilege used */
3776 		if (privused != NULL)
3777 			*privused = 1;
3778 		return (0);
3779 	}
3780 #endif
3781 
3782 	return ((acc_mode & VADMIN) ? EPERM : EACCES);
3783 }
3784 
3785 /*
3786  * Credential check based on process requesting service, and per-attribute
3787  * permissions.
3788  */
3789 int
3790 extattr_check_cred(struct vnode *vp, int attrnamespace,
3791     struct ucred *cred, struct thread *td, int access)
3792 {
3793 
3794 	/*
3795 	 * Kernel-invoked always succeeds.
3796 	 */
3797 	if (cred == NOCRED)
3798 		return (0);
3799 
3800 	/*
3801 	 * Do not allow privileged processes in jail to directly
3802 	 * manipulate system attributes.
3803 	 *
3804 	 * XXX What capability should apply here?
3805 	 * Probably CAP_SYS_SETFFLAG.
3806 	 */
3807 	switch (attrnamespace) {
3808 	case EXTATTR_NAMESPACE_SYSTEM:
3809 		/* Potentially should be: return (EPERM); */
3810 		return (suser_cred(cred, 0));
3811 	case EXTATTR_NAMESPACE_USER:
3812 		return (VOP_ACCESS(vp, access, cred, td));
3813 	default:
3814 		return (EPERM);
3815 	}
3816 }
3817