xref: /freebsd/sys/kern/vfs_subr.c (revision 4b2eaea43fec8e8792be611dea204071a10b655a)
1 /*
2  * Copyright (c) 1989, 1993
3  *	The Regents of the University of California.  All rights reserved.
4  * (c) UNIX System Laboratories, Inc.
5  * All or some portions of this file are derived from material licensed
6  * to the University of California by American Telephone and Telegraph
7  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8  * the permission of UNIX System Laboratories, Inc.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. All advertising materials mentioning features or use of this software
19  *    must display the following acknowledgement:
20  *	This product includes software developed by the University of
21  *	California, Berkeley and its contributors.
22  * 4. Neither the name of the University nor the names of its contributors
23  *    may be used to endorse or promote products derived from this software
24  *    without specific prior written permission.
25  *
26  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36  * SUCH DAMAGE.
37  *
38  *	@(#)vfs_subr.c	8.31 (Berkeley) 5/26/95
39  * $FreeBSD$
40  */
41 
42 /*
43  * External virtual filesystem routines
44  */
45 #include "opt_ddb.h"
46 #include "opt_mac.h"
47 
48 #include <sys/param.h>
49 #include <sys/systm.h>
50 #include <sys/bio.h>
51 #include <sys/buf.h>
52 #include <sys/conf.h>
53 #include <sys/eventhandler.h>
54 #include <sys/extattr.h>
55 #include <sys/fcntl.h>
56 #include <sys/kernel.h>
57 #include <sys/kthread.h>
58 #include <sys/mac.h>
59 #include <sys/malloc.h>
60 #include <sys/mount.h>
61 #include <sys/namei.h>
62 #include <sys/stat.h>
63 #include <sys/sysctl.h>
64 #include <sys/syslog.h>
65 #include <sys/vmmeter.h>
66 #include <sys/vnode.h>
67 
68 #include <vm/vm.h>
69 #include <vm/vm_object.h>
70 #include <vm/vm_extern.h>
71 #include <vm/pmap.h>
72 #include <vm/vm_map.h>
73 #include <vm/vm_page.h>
74 #include <vm/uma.h>
75 
76 static MALLOC_DEFINE(M_NETADDR, "Export Host", "Export host address structure");
77 
78 static void	addalias(struct vnode *vp, dev_t nvp_rdev);
79 static void	insmntque(struct vnode *vp, struct mount *mp);
80 static void	vclean(struct vnode *vp, int flags, struct thread *td);
81 static void	vlruvp(struct vnode *vp);
82 static int	flushbuflist(struct buf *blist, int flags, struct vnode *vp,
83 		    int slpflag, int slptimeo, int *errorp);
84 static int	vcanrecycle(struct vnode *vp, struct mount **vnmpp);
85 
86 
87 /*
88  * Number of vnodes in existence.  Increased whenever getnewvnode()
89  * allocates a new vnode, never decreased.
90  */
91 static unsigned long	numvnodes;
92 
93 SYSCTL_LONG(_vfs, OID_AUTO, numvnodes, CTLFLAG_RD, &numvnodes, 0, "");
94 
95 /*
96  * Conversion tables for conversion from vnode types to inode formats
97  * and back.
98  */
99 enum vtype iftovt_tab[16] = {
100 	VNON, VFIFO, VCHR, VNON, VDIR, VNON, VBLK, VNON,
101 	VREG, VNON, VLNK, VNON, VSOCK, VNON, VNON, VBAD,
102 };
103 int vttoif_tab[9] = {
104 	0, S_IFREG, S_IFDIR, S_IFBLK, S_IFCHR, S_IFLNK,
105 	S_IFSOCK, S_IFIFO, S_IFMT,
106 };
107 
108 /*
109  * List of vnodes that are ready for recycling.
110  */
111 static TAILQ_HEAD(freelst, vnode) vnode_free_list;
112 
113 /*
114  * Minimum number of free vnodes.  If there are fewer than this free vnodes,
115  * getnewvnode() will return a newly allocated vnode.
116  */
117 static u_long wantfreevnodes = 25;
118 SYSCTL_LONG(_vfs, OID_AUTO, wantfreevnodes, CTLFLAG_RW, &wantfreevnodes, 0, "");
119 /* Number of vnodes in the free list. */
120 static u_long freevnodes;
121 SYSCTL_LONG(_vfs, OID_AUTO, freevnodes, CTLFLAG_RD, &freevnodes, 0, "");
122 
123 /*
124  * Various variables used for debugging the new implementation of
125  * reassignbuf().
126  * XXX these are probably of (very) limited utility now.
127  */
128 static int reassignbufcalls;
129 SYSCTL_INT(_vfs, OID_AUTO, reassignbufcalls, CTLFLAG_RW, &reassignbufcalls, 0, "");
130 static int nameileafonly;
131 SYSCTL_INT(_vfs, OID_AUTO, nameileafonly, CTLFLAG_RW, &nameileafonly, 0, "");
132 
133 #ifdef ENABLE_VFS_IOOPT
134 /* See NOTES for a description of this setting. */
135 int vfs_ioopt;
136 SYSCTL_INT(_vfs, OID_AUTO, ioopt, CTLFLAG_RW, &vfs_ioopt, 0, "");
137 #endif
138 
139 /*
140  * Cache for the mount type id assigned to NFS.  This is used for
141  * special checks in nfs/nfs_nqlease.c and vm/vnode_pager.c.
142  */
143 int	nfs_mount_type = -1;
144 
145 /* To keep more than one thread at a time from running vfs_getnewfsid */
146 static struct mtx mntid_mtx;
147 
148 /*
149  * Lock for any access to the following:
150  *	vnode_free_list
151  *	numvnodes
152  *	freevnodes
153  */
154 static struct mtx vnode_free_list_mtx;
155 
156 /*
157  * For any iteration/modification of dev->si_hlist (linked through
158  * v_specnext)
159  */
160 static struct mtx spechash_mtx;
161 
162 /* Publicly exported FS */
163 struct nfs_public nfs_pub;
164 
165 /* Zone for allocation of new vnodes - used exclusively by getnewvnode() */
166 static uma_zone_t vnode_zone;
167 static uma_zone_t vnodepoll_zone;
168 
169 /* Set to 1 to print out reclaim of active vnodes */
170 int	prtactive;
171 
172 /*
173  * The workitem queue.
174  *
175  * It is useful to delay writes of file data and filesystem metadata
176  * for tens of seconds so that quickly created and deleted files need
177  * not waste disk bandwidth being created and removed. To realize this,
178  * we append vnodes to a "workitem" queue. When running with a soft
179  * updates implementation, most pending metadata dependencies should
180  * not wait for more than a few seconds. Thus, mounted on block devices
181  * are delayed only about a half the time that file data is delayed.
182  * Similarly, directory updates are more critical, so are only delayed
183  * about a third the time that file data is delayed. Thus, there are
184  * SYNCER_MAXDELAY queues that are processed round-robin at a rate of
185  * one each second (driven off the filesystem syncer process). The
186  * syncer_delayno variable indicates the next queue that is to be processed.
187  * Items that need to be processed soon are placed in this queue:
188  *
189  *	syncer_workitem_pending[syncer_delayno]
190  *
191  * A delay of fifteen seconds is done by placing the request fifteen
192  * entries later in the queue:
193  *
194  *	syncer_workitem_pending[(syncer_delayno + 15) & syncer_mask]
195  *
196  */
197 static int syncer_delayno;
198 static long syncer_mask;
199 LIST_HEAD(synclist, vnode);
200 static struct synclist *syncer_workitem_pending;
201 /*
202  * The sync_mtx protects:
203  *	vp->v_synclist
204  *	syncer_delayno
205  *	syncer_workitem_pending
206  *	rushjob
207  */
208 static struct mtx sync_mtx;
209 
210 #define SYNCER_MAXDELAY		32
211 static int syncer_maxdelay = SYNCER_MAXDELAY;	/* maximum delay time */
212 static int syncdelay = 30;		/* max time to delay syncing data */
213 static int filedelay = 30;		/* time to delay syncing files */
214 SYSCTL_INT(_kern, OID_AUTO, filedelay, CTLFLAG_RW, &filedelay, 0, "");
215 static int dirdelay = 29;		/* time to delay syncing directories */
216 SYSCTL_INT(_kern, OID_AUTO, dirdelay, CTLFLAG_RW, &dirdelay, 0, "");
217 static int metadelay = 28;		/* time to delay syncing metadata */
218 SYSCTL_INT(_kern, OID_AUTO, metadelay, CTLFLAG_RW, &metadelay, 0, "");
219 static int rushjob;		/* number of slots to run ASAP */
220 static int stat_rush_requests;	/* number of times I/O speeded up */
221 SYSCTL_INT(_debug, OID_AUTO, rush_requests, CTLFLAG_RW, &stat_rush_requests, 0, "");
222 
223 /*
224  * Number of vnodes we want to exist at any one time.  This is mostly used
225  * to size hash tables in vnode-related code.  It is normally not used in
226  * getnewvnode(), as wantfreevnodes is normally nonzero.)
227  *
228  * XXX desiredvnodes is historical cruft and should not exist.
229  */
230 int desiredvnodes;
231 SYSCTL_INT(_kern, KERN_MAXVNODES, maxvnodes, CTLFLAG_RW,
232     &desiredvnodes, 0, "Maximum number of vnodes");
233 static int minvnodes;
234 SYSCTL_INT(_kern, OID_AUTO, minvnodes, CTLFLAG_RW,
235     &minvnodes, 0, "Minimum number of vnodes");
236 static int vnlru_nowhere;
237 SYSCTL_INT(_debug, OID_AUTO, vnlru_nowhere, CTLFLAG_RW, &vnlru_nowhere, 0,
238     "Number of times the vnlru process ran without success");
239 
240 /* Hook for calling soft updates */
241 int (*softdep_process_worklist_hook)(struct mount *);
242 
243 /*
244  * This only exists to supress warnings from unlocked specfs accesses.  It is
245  * no longer ok to have an unlocked VFS.
246  */
247 #define IGNORE_LOCK(vp) ((vp)->v_type == VCHR || (vp)->v_type == VBAD)
248 
249 /* Print lock violations */
250 int vfs_badlock_print = 1;
251 
252 /* Panic on violation */
253 int vfs_badlock_panic = 1;
254 
255 /* Check for interlock across VOPs */
256 int vfs_badlock_mutex = 1;
257 
258 static void
259 vfs_badlock(char *msg, char *str, struct vnode *vp)
260 {
261 	if (vfs_badlock_print)
262 		printf("%s: %p %s\n", str, vp, msg);
263 	if (vfs_badlock_panic)
264 		Debugger("Lock violation.\n");
265 }
266 
267 void
268 assert_vi_unlocked(struct vnode *vp, char *str)
269 {
270 	if (vfs_badlock_mutex && mtx_owned(VI_MTX(vp)))
271 		vfs_badlock("interlock is locked but should not be", str, vp);
272 }
273 
274 void
275 assert_vi_locked(struct vnode *vp, char *str)
276 {
277 	if (vfs_badlock_mutex && !mtx_owned(VI_MTX(vp)))
278 		vfs_badlock("interlock is not locked but should be", str, vp);
279 }
280 
281 void
282 assert_vop_locked(struct vnode *vp, char *str)
283 {
284 	if (vp && !IGNORE_LOCK(vp) && !VOP_ISLOCKED(vp, NULL))
285 		vfs_badlock("is not locked but should be", str, vp);
286 }
287 
288 void
289 assert_vop_unlocked(struct vnode *vp, char *str)
290 {
291 	if (vp && !IGNORE_LOCK(vp) &&
292 	    VOP_ISLOCKED(vp, curthread) == LK_EXCLUSIVE)
293 		vfs_badlock("is locked but should not be", str, vp);
294 }
295 
296 void
297 assert_vop_elocked(struct vnode *vp, char *str)
298 {
299 	if (vp && !IGNORE_LOCK(vp) &&
300 	    VOP_ISLOCKED(vp, curthread) != LK_EXCLUSIVE)
301 		vfs_badlock("is not exclusive locked but should be", str, vp);
302 }
303 
304 void
305 assert_vop_elocked_other(struct vnode *vp, char *str)
306 {
307 	if (vp && !IGNORE_LOCK(vp) &&
308 	    VOP_ISLOCKED(vp, curthread) != LK_EXCLOTHER)
309 		vfs_badlock("is not exclusive locked by another thread",
310 		    str, vp);
311 }
312 
313 void
314 assert_vop_slocked(struct vnode *vp, char *str)
315 {
316 	if (vp && !IGNORE_LOCK(vp) &&
317 	    VOP_ISLOCKED(vp, curthread) != LK_SHARED)
318 		vfs_badlock("is not locked shared but should be", str, vp);
319 }
320 
321 void
322 vop_rename_pre(void *ap)
323 {
324 	struct vop_rename_args *a = ap;
325 
326 	if (a->a_tvp)
327 		ASSERT_VI_UNLOCKED(a->a_tvp, "VOP_RENAME");
328 	ASSERT_VI_UNLOCKED(a->a_tdvp, "VOP_RENAME");
329 	ASSERT_VI_UNLOCKED(a->a_fvp, "VOP_RENAME");
330 	ASSERT_VI_UNLOCKED(a->a_fdvp, "VOP_RENAME");
331 
332 	/* Check the source (from) */
333 	if (a->a_tdvp != a->a_fdvp)
334 		ASSERT_VOP_UNLOCKED(a->a_fdvp, "vop_rename: fdvp locked.\n");
335 	if (a->a_tvp != a->a_fvp)
336 		ASSERT_VOP_UNLOCKED(a->a_fvp, "vop_rename: tvp locked.\n");
337 
338 	/* Check the target */
339 	if (a->a_tvp)
340 		ASSERT_VOP_LOCKED(a->a_tvp, "vop_rename: tvp not locked.\n");
341 
342 	ASSERT_VOP_LOCKED(a->a_tdvp, "vop_rename: tdvp not locked.\n");
343 }
344 
345 void
346 vop_strategy_pre(void *ap)
347 {
348 	struct vop_strategy_args *a = ap;
349 	struct buf *bp;
350 
351 	bp = a->a_bp;
352 
353 	/*
354 	 * Cluster ops lock their component buffers but not the IO container.
355 	 */
356 	if ((bp->b_flags & B_CLUSTER) != 0)
357 		return;
358 
359 	if (BUF_REFCNT(bp) < 1) {
360 		if (vfs_badlock_print)
361 			printf("VOP_STRATEGY: bp is not locked but should be.\n");
362 		if (vfs_badlock_panic)
363 			Debugger("Lock violation.\n");
364 	}
365 }
366 
367 void
368 vop_lookup_pre(void *ap)
369 {
370 	struct vop_lookup_args *a = ap;
371 	struct vnode *dvp;
372 
373 	dvp = a->a_dvp;
374 
375 	ASSERT_VI_UNLOCKED(dvp, "VOP_LOOKUP");
376 	ASSERT_VOP_LOCKED(dvp, "VOP_LOOKUP");
377 }
378 
379 void
380 vop_lookup_post(void *ap, int rc)
381 {
382 	struct vop_lookup_args *a = ap;
383 	struct componentname *cnp;
384 	struct vnode *dvp;
385 	struct vnode *vp;
386 	int flags;
387 
388 	dvp = a->a_dvp;
389 	cnp = a->a_cnp;
390 	vp = *(a->a_vpp);
391 	flags = cnp->cn_flags;
392 
393 
394 	ASSERT_VI_UNLOCKED(dvp, "VOP_LOOKUP");
395 	/*
396 	 * If this is the last path component for this lookup and LOCPARENT
397 	 * is set, OR if there is an error the directory has to be locked.
398 	 */
399 	if ((flags & LOCKPARENT) && (flags & ISLASTCN))
400 		ASSERT_VOP_LOCKED(dvp, "VOP_LOOKUP (LOCKPARENT)");
401 	else if (rc != 0)
402 		ASSERT_VOP_LOCKED(dvp, "VOP_LOOKUP (error)");
403 	else if (dvp != vp)
404 		ASSERT_VOP_UNLOCKED(dvp, "VOP_LOOKUP (dvp)");
405 
406 	if (flags & PDIRUNLOCK)
407 		ASSERT_VOP_UNLOCKED(dvp, "VOP_LOOKUP (PDIRUNLOCK)");
408 }
409 
410 void
411 vop_unlock_pre(void *ap)
412 {
413 	struct vop_unlock_args *a = ap;
414 
415 	if (a->a_flags & LK_INTERLOCK)
416 		ASSERT_VI_LOCKED(a->a_vp, "VOP_UNLOCK");
417 
418 	ASSERT_VOP_LOCKED(a->a_vp, "VOP_UNLOCK");
419 }
420 
421 void
422 vop_unlock_post(void *ap, int rc)
423 {
424 	struct vop_unlock_args *a = ap;
425 
426 	if (a->a_flags & LK_INTERLOCK)
427 		ASSERT_VI_UNLOCKED(a->a_vp, "VOP_UNLOCK");
428 }
429 
430 void
431 vop_lock_pre(void *ap)
432 {
433 	struct vop_lock_args *a = ap;
434 
435 	if ((a->a_flags & LK_INTERLOCK) == 0)
436 		ASSERT_VI_UNLOCKED(a->a_vp, "VOP_LOCK");
437 	else
438 		ASSERT_VI_LOCKED(a->a_vp, "VOP_LOCK");
439 }
440 
441 void
442 vop_lock_post(void *ap, int rc)
443 {
444 	struct vop_lock_args *a;
445 
446 	a = ap;
447 
448 	ASSERT_VI_UNLOCKED(a->a_vp, "VOP_LOCK");
449 	ASSERT_VOP_LOCKED(a->a_vp, "VOP_LOCK");
450 }
451 
452 void
453 v_addpollinfo(struct vnode *vp)
454 {
455 	vp->v_pollinfo = uma_zalloc(vnodepoll_zone, 0);
456 	mtx_init(&vp->v_pollinfo->vpi_lock, "vnode pollinfo", NULL, MTX_DEF);
457 }
458 
459 /*
460  * Initialize the vnode management data structures.
461  */
462 static void
463 vntblinit(void *dummy __unused)
464 {
465 
466 	desiredvnodes = maxproc + cnt.v_page_count / 4;
467 	minvnodes = desiredvnodes / 4;
468 	mtx_init(&mountlist_mtx, "mountlist", NULL, MTX_DEF);
469 	mtx_init(&mntvnode_mtx, "mntvnode", NULL, MTX_DEF);
470 	mtx_init(&mntid_mtx, "mntid", NULL, MTX_DEF);
471 	mtx_init(&spechash_mtx, "spechash", NULL, MTX_DEF);
472 	TAILQ_INIT(&vnode_free_list);
473 	mtx_init(&vnode_free_list_mtx, "vnode_free_list", NULL, MTX_DEF);
474 	vnode_zone = uma_zcreate("VNODE", sizeof (struct vnode), NULL, NULL,
475 	    NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
476 	vnodepoll_zone = uma_zcreate("VNODEPOLL", sizeof (struct vpollinfo),
477 	      NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
478 	/*
479 	 * Initialize the filesystem syncer.
480 	 */
481 	syncer_workitem_pending = hashinit(syncer_maxdelay, M_VNODE,
482 		&syncer_mask);
483 	syncer_maxdelay = syncer_mask + 1;
484 	mtx_init(&sync_mtx, "Syncer mtx", NULL, MTX_DEF);
485 }
486 SYSINIT(vfs, SI_SUB_VFS, SI_ORDER_FIRST, vntblinit, NULL)
487 
488 
489 /*
490  * Mark a mount point as busy. Used to synchronize access and to delay
491  * unmounting. Interlock is not released on failure.
492  */
493 int
494 vfs_busy(mp, flags, interlkp, td)
495 	struct mount *mp;
496 	int flags;
497 	struct mtx *interlkp;
498 	struct thread *td;
499 {
500 	int lkflags;
501 
502 	if (mp->mnt_kern_flag & MNTK_UNMOUNT) {
503 		if (flags & LK_NOWAIT)
504 			return (ENOENT);
505 		mp->mnt_kern_flag |= MNTK_MWAIT;
506 		/*
507 		 * Since all busy locks are shared except the exclusive
508 		 * lock granted when unmounting, the only place that a
509 		 * wakeup needs to be done is at the release of the
510 		 * exclusive lock at the end of dounmount.
511 		 */
512 		msleep(mp, interlkp, PVFS, "vfs_busy", 0);
513 		return (ENOENT);
514 	}
515 	lkflags = LK_SHARED | LK_NOPAUSE;
516 	if (interlkp)
517 		lkflags |= LK_INTERLOCK;
518 	if (lockmgr(&mp->mnt_lock, lkflags, interlkp, td))
519 		panic("vfs_busy: unexpected lock failure");
520 	return (0);
521 }
522 
523 /*
524  * Free a busy filesystem.
525  */
526 void
527 vfs_unbusy(mp, td)
528 	struct mount *mp;
529 	struct thread *td;
530 {
531 
532 	lockmgr(&mp->mnt_lock, LK_RELEASE, NULL, td);
533 }
534 
535 /*
536  * Lookup a mount point by filesystem identifier.
537  */
538 struct mount *
539 vfs_getvfs(fsid)
540 	fsid_t *fsid;
541 {
542 	register struct mount *mp;
543 
544 	mtx_lock(&mountlist_mtx);
545 	TAILQ_FOREACH(mp, &mountlist, mnt_list) {
546 		if (mp->mnt_stat.f_fsid.val[0] == fsid->val[0] &&
547 		    mp->mnt_stat.f_fsid.val[1] == fsid->val[1]) {
548 			mtx_unlock(&mountlist_mtx);
549 			return (mp);
550 		}
551 	}
552 	mtx_unlock(&mountlist_mtx);
553 	return ((struct mount *) 0);
554 }
555 
556 /*
557  * Get a new unique fsid.  Try to make its val[0] unique, since this value
558  * will be used to create fake device numbers for stat().  Also try (but
559  * not so hard) make its val[0] unique mod 2^16, since some emulators only
560  * support 16-bit device numbers.  We end up with unique val[0]'s for the
561  * first 2^16 calls and unique val[0]'s mod 2^16 for the first 2^8 calls.
562  *
563  * Keep in mind that several mounts may be running in parallel.  Starting
564  * the search one past where the previous search terminated is both a
565  * micro-optimization and a defense against returning the same fsid to
566  * different mounts.
567  */
568 void
569 vfs_getnewfsid(mp)
570 	struct mount *mp;
571 {
572 	static u_int16_t mntid_base;
573 	fsid_t tfsid;
574 	int mtype;
575 
576 	mtx_lock(&mntid_mtx);
577 	mtype = mp->mnt_vfc->vfc_typenum;
578 	tfsid.val[1] = mtype;
579 	mtype = (mtype & 0xFF) << 24;
580 	for (;;) {
581 		tfsid.val[0] = makeudev(255,
582 		    mtype | ((mntid_base & 0xFF00) << 8) | (mntid_base & 0xFF));
583 		mntid_base++;
584 		if (vfs_getvfs(&tfsid) == NULL)
585 			break;
586 	}
587 	mp->mnt_stat.f_fsid.val[0] = tfsid.val[0];
588 	mp->mnt_stat.f_fsid.val[1] = tfsid.val[1];
589 	mtx_unlock(&mntid_mtx);
590 }
591 
592 /*
593  * Knob to control the precision of file timestamps:
594  *
595  *   0 = seconds only; nanoseconds zeroed.
596  *   1 = seconds and nanoseconds, accurate within 1/HZ.
597  *   2 = seconds and nanoseconds, truncated to microseconds.
598  * >=3 = seconds and nanoseconds, maximum precision.
599  */
600 enum { TSP_SEC, TSP_HZ, TSP_USEC, TSP_NSEC };
601 
602 static int timestamp_precision = TSP_SEC;
603 SYSCTL_INT(_vfs, OID_AUTO, timestamp_precision, CTLFLAG_RW,
604     &timestamp_precision, 0, "");
605 
606 /*
607  * Get a current timestamp.
608  */
609 void
610 vfs_timestamp(tsp)
611 	struct timespec *tsp;
612 {
613 	struct timeval tv;
614 
615 	switch (timestamp_precision) {
616 	case TSP_SEC:
617 		tsp->tv_sec = time_second;
618 		tsp->tv_nsec = 0;
619 		break;
620 	case TSP_HZ:
621 		getnanotime(tsp);
622 		break;
623 	case TSP_USEC:
624 		microtime(&tv);
625 		TIMEVAL_TO_TIMESPEC(&tv, tsp);
626 		break;
627 	case TSP_NSEC:
628 	default:
629 		nanotime(tsp);
630 		break;
631 	}
632 }
633 
634 /*
635  * Set vnode attributes to VNOVAL
636  */
637 void
638 vattr_null(vap)
639 	register struct vattr *vap;
640 {
641 
642 	vap->va_type = VNON;
643 	vap->va_size = VNOVAL;
644 	vap->va_bytes = VNOVAL;
645 	vap->va_mode = VNOVAL;
646 	vap->va_nlink = VNOVAL;
647 	vap->va_uid = VNOVAL;
648 	vap->va_gid = VNOVAL;
649 	vap->va_fsid = VNOVAL;
650 	vap->va_fileid = VNOVAL;
651 	vap->va_blocksize = VNOVAL;
652 	vap->va_rdev = VNOVAL;
653 	vap->va_atime.tv_sec = VNOVAL;
654 	vap->va_atime.tv_nsec = VNOVAL;
655 	vap->va_mtime.tv_sec = VNOVAL;
656 	vap->va_mtime.tv_nsec = VNOVAL;
657 	vap->va_ctime.tv_sec = VNOVAL;
658 	vap->va_ctime.tv_nsec = VNOVAL;
659 	vap->va_birthtime.tv_sec = VNOVAL;
660 	vap->va_birthtime.tv_nsec = VNOVAL;
661 	vap->va_flags = VNOVAL;
662 	vap->va_gen = VNOVAL;
663 	vap->va_vaflags = 0;
664 }
665 
666 /*
667  * This routine is called when we have too many vnodes.  It attempts
668  * to free <count> vnodes and will potentially free vnodes that still
669  * have VM backing store (VM backing store is typically the cause
670  * of a vnode blowout so we want to do this).  Therefore, this operation
671  * is not considered cheap.
672  *
673  * A number of conditions may prevent a vnode from being reclaimed.
674  * the buffer cache may have references on the vnode, a directory
675  * vnode may still have references due to the namei cache representing
676  * underlying files, or the vnode may be in active use.   It is not
677  * desireable to reuse such vnodes.  These conditions may cause the
678  * number of vnodes to reach some minimum value regardless of what
679  * you set kern.maxvnodes to.  Do not set kern.maxvnodes too low.
680  */
681 static int
682 vlrureclaim(struct mount *mp, int count)
683 {
684 	struct vnode *vp;
685 	int done;
686 	int trigger;
687 	int usevnodes;
688 
689 	/*
690 	 * Calculate the trigger point, don't allow user
691 	 * screwups to blow us up.   This prevents us from
692 	 * recycling vnodes with lots of resident pages.  We
693 	 * aren't trying to free memory, we are trying to
694 	 * free vnodes.
695 	 */
696 	usevnodes = desiredvnodes;
697 	if (usevnodes <= 0)
698 		usevnodes = 1;
699 	trigger = cnt.v_page_count * 2 / usevnodes;
700 
701 	done = 0;
702 	mtx_lock(&mntvnode_mtx);
703 	while (count && (vp = TAILQ_FIRST(&mp->mnt_nvnodelist)) != NULL) {
704 		TAILQ_REMOVE(&mp->mnt_nvnodelist, vp, v_nmntvnodes);
705 		TAILQ_INSERT_TAIL(&mp->mnt_nvnodelist, vp, v_nmntvnodes);
706 
707 		if (vp->v_type != VNON &&
708 		    vp->v_type != VBAD &&
709 		    VI_TRYLOCK(vp)) {
710 			if (VMIGHTFREE(vp) &&           /* critical path opt */
711 			    (vp->v_object == NULL ||
712 			    vp->v_object->resident_page_count < trigger)) {
713 				mtx_unlock(&mntvnode_mtx);
714 				vgonel(vp, curthread);
715 				done++;
716 				mtx_lock(&mntvnode_mtx);
717 			} else
718 				VI_UNLOCK(vp);
719 		}
720 		--count;
721 	}
722 	mtx_unlock(&mntvnode_mtx);
723 	return done;
724 }
725 
726 /*
727  * Attempt to recycle vnodes in a context that is always safe to block.
728  * Calling vlrurecycle() from the bowels of filesystem code has some
729  * interesting deadlock problems.
730  */
731 static struct proc *vnlruproc;
732 static int vnlruproc_sig;
733 
734 static void
735 vnlru_proc(void)
736 {
737 	struct mount *mp, *nmp;
738 	int s;
739 	int done, take;
740 	struct proc *p = vnlruproc;
741 	struct thread *td = FIRST_THREAD_IN_PROC(p);	/* XXXKSE */
742 
743 	mtx_lock(&Giant);
744 
745 	EVENTHANDLER_REGISTER(shutdown_pre_sync, kproc_shutdown, p,
746 	    SHUTDOWN_PRI_FIRST);
747 
748 	s = splbio();
749 	for (;;) {
750 		kthread_suspend_check(p);
751 		mtx_lock(&vnode_free_list_mtx);
752 		if (numvnodes - freevnodes <= desiredvnodes * 9 / 10) {
753 			mtx_unlock(&vnode_free_list_mtx);
754 			vnlruproc_sig = 0;
755 			wakeup(&vnlruproc_sig);
756 			tsleep(vnlruproc, PVFS, "vlruwt", hz);
757 			continue;
758 		}
759 		mtx_unlock(&vnode_free_list_mtx);
760 		done = 0;
761 		mtx_lock(&mountlist_mtx);
762 		take = 0;
763 		TAILQ_FOREACH(mp, &mountlist, mnt_list)
764 			take++;
765 		take = desiredvnodes / (take * 10);
766 		for (mp = TAILQ_FIRST(&mountlist); mp != NULL; mp = nmp) {
767 			if (vfs_busy(mp, LK_NOWAIT, &mountlist_mtx, td)) {
768 				nmp = TAILQ_NEXT(mp, mnt_list);
769 				continue;
770 			}
771 			done += vlrureclaim(mp, take);
772 			mtx_lock(&mountlist_mtx);
773 			nmp = TAILQ_NEXT(mp, mnt_list);
774 			vfs_unbusy(mp, td);
775 		}
776 		mtx_unlock(&mountlist_mtx);
777 		if (done == 0) {
778 #if 0
779 			/* These messages are temporary debugging aids */
780 			if (vnlru_nowhere < 5)
781 				printf("vnlru process getting nowhere..\n");
782 			else if (vnlru_nowhere == 5)
783 				printf("vnlru process messages stopped.\n");
784 #endif
785 			vnlru_nowhere++;
786 			tsleep(vnlruproc, PPAUSE, "vlrup", hz * 3);
787 		}
788 	}
789 	splx(s);
790 }
791 
792 static struct kproc_desc vnlru_kp = {
793 	"vnlru",
794 	vnlru_proc,
795 	&vnlruproc
796 };
797 SYSINIT(vnlru, SI_SUB_KTHREAD_UPDATE, SI_ORDER_FIRST, kproc_start, &vnlru_kp)
798 
799 
800 /*
801  * Routines having to do with the management of the vnode table.
802  */
803 
804 /*
805  * Check to see if a free vnode can be recycled. If it can,
806  * return it locked with the vn lock, but not interlock. Also
807  * get the vn_start_write lock. Otherwise indicate the error.
808  */
809 static int
810 vcanrecycle(struct vnode *vp, struct mount **vnmpp)
811 {
812 	struct thread *td = curthread;
813 	vm_object_t object;
814 	int error;
815 
816 	/* Don't recycle if we can't get the interlock */
817 	if (!VI_TRYLOCK(vp))
818 		return (EWOULDBLOCK);
819 
820 	/* We should be able to immediately acquire this */
821 	/* XXX This looks like it should panic if it fails */
822 	if (vn_lock(vp, LK_INTERLOCK | LK_EXCLUSIVE, td) != 0) {
823 		if (VOP_ISLOCKED(vp, td))
824 			panic("vcanrecycle: locked vnode");
825 		return (EWOULDBLOCK);
826 	}
827 
828 	/*
829 	 * Don't recycle if its filesystem is being suspended.
830 	 */
831 	if (vn_start_write(vp, vnmpp, V_NOWAIT) != 0) {
832 		error = EBUSY;
833 		goto done;
834 	}
835 
836 	/*
837 	 * Don't recycle if we still have cached pages.
838 	 */
839 	if (VOP_GETVOBJECT(vp, &object) == 0 &&
840 	     (object->resident_page_count ||
841 	      object->ref_count)) {
842 		error = EBUSY;
843 		goto done;
844 	}
845 	if (LIST_FIRST(&vp->v_cache_src)) {
846 		/*
847 		 * note: nameileafonly sysctl is temporary,
848 		 * for debugging only, and will eventually be
849 		 * removed.
850 		 */
851 		if (nameileafonly > 0) {
852 			/*
853 			 * Do not reuse namei-cached directory
854 			 * vnodes that have cached
855 			 * subdirectories.
856 			 */
857 			if (cache_leaf_test(vp) < 0) {
858 				error = EISDIR;
859 				goto done;
860 			}
861 		} else if (nameileafonly < 0 ||
862 			    vmiodirenable == 0) {
863 			/*
864 			 * Do not reuse namei-cached directory
865 			 * vnodes if nameileafonly is -1 or
866 			 * if VMIO backing for directories is
867 			 * turned off (otherwise we reuse them
868 			 * too quickly).
869 			 */
870 			error = EBUSY;
871 			goto done;
872 		}
873 	}
874 	return (0);
875 done:
876 	VOP_UNLOCK(vp, 0, td);
877 	return (error);
878 }
879 
880 /*
881  * Return the next vnode from the free list.
882  */
883 int
884 getnewvnode(tag, mp, vops, vpp)
885 	const char *tag;
886 	struct mount *mp;
887 	vop_t **vops;
888 	struct vnode **vpp;
889 {
890 	int s;
891 	struct thread *td = curthread;	/* XXX */
892 	struct vnode *vp = NULL;
893 	struct vpollinfo *pollinfo = NULL;
894 	struct mount *vnmp;
895 
896 	s = splbio();
897 	mtx_lock(&vnode_free_list_mtx);
898 
899 	/*
900 	 * Try to reuse vnodes if we hit the max.  This situation only
901 	 * occurs in certain large-memory (2G+) situations.  We cannot
902 	 * attempt to directly reclaim vnodes due to nasty recursion
903 	 * problems.
904 	 */
905 	while (numvnodes - freevnodes > desiredvnodes) {
906 		if (vnlruproc_sig == 0) {
907 			vnlruproc_sig = 1;      /* avoid unnecessary wakeups */
908 			wakeup(vnlruproc);
909 		}
910 		mtx_unlock(&vnode_free_list_mtx);
911 		tsleep(&vnlruproc_sig, PVFS, "vlruwk", hz);
912 		mtx_lock(&vnode_free_list_mtx);
913 	}
914 
915 	/*
916 	 * Attempt to reuse a vnode already on the free list, allocating
917 	 * a new vnode if we can't find one or if we have not reached a
918 	 * good minimum for good LRU performance.
919 	 */
920 
921 	if (freevnodes >= wantfreevnodes && numvnodes >= minvnodes) {
922 		int error;
923 		int count;
924 
925 		for (count = 0; count < freevnodes; count++) {
926 			vp = TAILQ_FIRST(&vnode_free_list);
927 
928 			KASSERT(vp->v_usecount == 0 &&
929 			    (vp->v_iflag & VI_DOINGINACT) == 0,
930 			    ("getnewvnode: free vnode isn't"));
931 
932 			TAILQ_REMOVE(&vnode_free_list, vp, v_freelist);
933 			/*
934 			 * We have to drop the free list mtx to avoid lock
935 			 * order reversals with interlock.
936 			 */
937 			mtx_unlock(&vnode_free_list_mtx);
938 			error = vcanrecycle(vp, &vnmp);
939 			mtx_lock(&vnode_free_list_mtx);
940 			if (error == 0)
941 				break;
942 			TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist);
943 			vp = NULL;
944 		}
945 	}
946 	if (vp) {
947 		freevnodes--;
948 		mtx_unlock(&vnode_free_list_mtx);
949 
950 		cache_purge(vp);
951 		VI_LOCK(vp);
952 		vp->v_iflag |= VI_DOOMED;
953 		vp->v_iflag &= ~VI_FREE;
954 		if (vp->v_type != VBAD) {
955 			VOP_UNLOCK(vp, 0, td);
956 			vgonel(vp, td);
957 			VI_LOCK(vp);
958 		} else {
959 			VOP_UNLOCK(vp, 0, td);
960 		}
961 		vn_finished_write(vnmp);
962 
963 #ifdef INVARIANTS
964 		{
965 			if (vp->v_data)
966 				panic("cleaned vnode isn't");
967 			if (vp->v_numoutput)
968 				panic("Clean vnode has pending I/O's");
969 			if (vp->v_writecount != 0)
970 				panic("Non-zero write count");
971 		}
972 #endif
973 		if ((pollinfo = vp->v_pollinfo) != NULL) {
974 			/*
975 			 * To avoid lock order reversals, the call to
976 			 * uma_zfree() must be delayed until the vnode
977 			 * interlock is released.
978 			 */
979 			vp->v_pollinfo = NULL;
980 		}
981 #ifdef MAC
982 		mac_destroy_vnode(vp);
983 #endif
984 		vp->v_iflag = 0;
985 		vp->v_vflag = 0;
986 		vp->v_lastw = 0;
987 		vp->v_lasta = 0;
988 		vp->v_cstart = 0;
989 		vp->v_clen = 0;
990 		vp->v_socket = 0;
991 		lockdestroy(vp->v_vnlock);
992 		lockinit(vp->v_vnlock, PVFS, tag, VLKTIMEOUT, LK_NOPAUSE);
993 		KASSERT(vp->v_cleanblkroot == NULL, ("cleanblkroot not NULL"));
994 		KASSERT(vp->v_dirtyblkroot == NULL, ("dirtyblkroot not NULL"));
995 	} else {
996 		numvnodes++;
997 		mtx_unlock(&vnode_free_list_mtx);
998 
999 		vp = (struct vnode *) uma_zalloc(vnode_zone, M_ZERO);
1000 		mtx_init(&vp->v_interlock, "vnode interlock", NULL, MTX_DEF);
1001 		VI_LOCK(vp);
1002 		vp->v_dd = vp;
1003 		vp->v_vnlock = &vp->v_lock;
1004 		lockinit(vp->v_vnlock, PVFS, tag, VLKTIMEOUT, LK_NOPAUSE);
1005 		cache_purge(vp);
1006 		LIST_INIT(&vp->v_cache_src);
1007 		TAILQ_INIT(&vp->v_cache_dst);
1008 	}
1009 
1010 	TAILQ_INIT(&vp->v_cleanblkhd);
1011 	TAILQ_INIT(&vp->v_dirtyblkhd);
1012 	vp->v_type = VNON;
1013 	vp->v_tag = tag;
1014 	vp->v_op = vops;
1015 	*vpp = vp;
1016 	vp->v_usecount = 1;
1017 	vp->v_data = 0;
1018 	vp->v_cachedid = -1;
1019 	VI_UNLOCK(vp);
1020 	if (pollinfo != NULL) {
1021 		mtx_destroy(&pollinfo->vpi_lock);
1022 		uma_zfree(vnodepoll_zone, pollinfo);
1023 	}
1024 #ifdef MAC
1025 	mac_init_vnode(vp);
1026 	if (mp != NULL && (mp->mnt_flag & MNT_MULTILABEL) == 0)
1027 		mac_associate_vnode_singlelabel(mp, vp);
1028 #endif
1029 	insmntque(vp, mp);
1030 
1031 	return (0);
1032 }
1033 
1034 /*
1035  * Move a vnode from one mount queue to another.
1036  */
1037 static void
1038 insmntque(vp, mp)
1039 	register struct vnode *vp;
1040 	register struct mount *mp;
1041 {
1042 
1043 	mtx_lock(&mntvnode_mtx);
1044 	/*
1045 	 * Delete from old mount point vnode list, if on one.
1046 	 */
1047 	if (vp->v_mount != NULL)
1048 		TAILQ_REMOVE(&vp->v_mount->mnt_nvnodelist, vp, v_nmntvnodes);
1049 	/*
1050 	 * Insert into list of vnodes for the new mount point, if available.
1051 	 */
1052 	if ((vp->v_mount = mp) == NULL) {
1053 		mtx_unlock(&mntvnode_mtx);
1054 		return;
1055 	}
1056 	TAILQ_INSERT_TAIL(&mp->mnt_nvnodelist, vp, v_nmntvnodes);
1057 	mtx_unlock(&mntvnode_mtx);
1058 }
1059 
1060 /*
1061  * Update outstanding I/O count and do wakeup if requested.
1062  */
1063 void
1064 vwakeup(bp)
1065 	register struct buf *bp;
1066 {
1067 	register struct vnode *vp;
1068 
1069 	bp->b_flags &= ~B_WRITEINPROG;
1070 	if ((vp = bp->b_vp)) {
1071 		VI_LOCK(vp);
1072 		vp->v_numoutput--;
1073 		if (vp->v_numoutput < 0)
1074 			panic("vwakeup: neg numoutput");
1075 		if ((vp->v_numoutput == 0) && (vp->v_iflag & VI_BWAIT)) {
1076 			vp->v_iflag &= ~VI_BWAIT;
1077 			wakeup(&vp->v_numoutput);
1078 		}
1079 		VI_UNLOCK(vp);
1080 	}
1081 }
1082 
1083 /*
1084  * Flush out and invalidate all buffers associated with a vnode.
1085  * Called with the underlying object locked.
1086  */
1087 int
1088 vinvalbuf(vp, flags, cred, td, slpflag, slptimeo)
1089 	struct vnode *vp;
1090 	int flags;
1091 	struct ucred *cred;
1092 	struct thread *td;
1093 	int slpflag, slptimeo;
1094 {
1095 	struct buf *blist;
1096 	int s, error;
1097 	vm_object_t object;
1098 
1099 	GIANT_REQUIRED;
1100 
1101 	ASSERT_VOP_LOCKED(vp, "vinvalbuf");
1102 
1103 	VI_LOCK(vp);
1104 	if (flags & V_SAVE) {
1105 		s = splbio();
1106 		while (vp->v_numoutput) {
1107 			vp->v_iflag |= VI_BWAIT;
1108 			error = msleep(&vp->v_numoutput, VI_MTX(vp),
1109 			    slpflag | (PRIBIO + 1), "vinvlbuf", slptimeo);
1110 			if (error) {
1111 				VI_UNLOCK(vp);
1112 				splx(s);
1113 				return (error);
1114 			}
1115 		}
1116 		if (!TAILQ_EMPTY(&vp->v_dirtyblkhd)) {
1117 			splx(s);
1118 			VI_UNLOCK(vp);
1119 			if ((error = VOP_FSYNC(vp, cred, MNT_WAIT, td)) != 0)
1120 				return (error);
1121 			/*
1122 			 * XXX We could save a lock/unlock if this was only
1123 			 * enabled under INVARIANTS
1124 			 */
1125 			VI_LOCK(vp);
1126 			s = splbio();
1127 			if (vp->v_numoutput > 0 ||
1128 			    !TAILQ_EMPTY(&vp->v_dirtyblkhd))
1129 				panic("vinvalbuf: dirty bufs");
1130 		}
1131 		splx(s);
1132 	}
1133 	s = splbio();
1134 	/*
1135 	 * If you alter this loop please notice that interlock is dropped and
1136 	 * reacquired in flushbuflist.  Special care is needed to ensure that
1137 	 * no race conditions occur from this.
1138 	 */
1139 	for (error = 0;;) {
1140 		if ((blist = TAILQ_FIRST(&vp->v_cleanblkhd)) != 0 &&
1141 		    flushbuflist(blist, flags, vp, slpflag, slptimeo, &error)) {
1142 			if (error)
1143 				break;
1144 			continue;
1145 		}
1146 		if ((blist = TAILQ_FIRST(&vp->v_dirtyblkhd)) != 0 &&
1147 		    flushbuflist(blist, flags, vp, slpflag, slptimeo, &error)) {
1148 			if (error)
1149 				break;
1150 			continue;
1151 		}
1152 		break;
1153 	}
1154 	if (error) {
1155 		splx(s);
1156 		VI_UNLOCK(vp);
1157 		return (error);
1158 	}
1159 
1160 	/*
1161 	 * Wait for I/O to complete.  XXX needs cleaning up.  The vnode can
1162 	 * have write I/O in-progress but if there is a VM object then the
1163 	 * VM object can also have read-I/O in-progress.
1164 	 */
1165 	do {
1166 		while (vp->v_numoutput > 0) {
1167 			vp->v_iflag |= VI_BWAIT;
1168 			msleep(&vp->v_numoutput, VI_MTX(vp), PVM, "vnvlbv", 0);
1169 		}
1170 		VI_UNLOCK(vp);
1171 		if (VOP_GETVOBJECT(vp, &object) == 0) {
1172 			while (object->paging_in_progress)
1173 			vm_object_pip_sleep(object, "vnvlbx");
1174 		}
1175 		VI_LOCK(vp);
1176 	} while (vp->v_numoutput > 0);
1177 	VI_UNLOCK(vp);
1178 
1179 	splx(s);
1180 
1181 	/*
1182 	 * Destroy the copy in the VM cache, too.
1183 	 */
1184 	if (VOP_GETVOBJECT(vp, &object) == 0) {
1185 		vm_object_lock(object);
1186 		vm_object_page_remove(object, 0, 0,
1187 			(flags & V_SAVE) ? TRUE : FALSE);
1188 		vm_object_unlock(object);
1189 	}
1190 
1191 #ifdef INVARIANTS
1192 	VI_LOCK(vp);
1193 	if ((flags & (V_ALT | V_NORMAL)) == 0 &&
1194 	    (!TAILQ_EMPTY(&vp->v_dirtyblkhd) ||
1195 	     !TAILQ_EMPTY(&vp->v_cleanblkhd)))
1196 		panic("vinvalbuf: flush failed");
1197 	VI_UNLOCK(vp);
1198 #endif
1199 	return (0);
1200 }
1201 
1202 /*
1203  * Flush out buffers on the specified list.
1204  *
1205  */
1206 static int
1207 flushbuflist(blist, flags, vp, slpflag, slptimeo, errorp)
1208 	struct buf *blist;
1209 	int flags;
1210 	struct vnode *vp;
1211 	int slpflag, slptimeo;
1212 	int *errorp;
1213 {
1214 	struct buf *bp, *nbp;
1215 	int found, error;
1216 
1217 	ASSERT_VI_LOCKED(vp, "flushbuflist");
1218 
1219 	for (found = 0, bp = blist; bp; bp = nbp) {
1220 		nbp = TAILQ_NEXT(bp, b_vnbufs);
1221 		VI_UNLOCK(vp);
1222 		if (((flags & V_NORMAL) && (bp->b_xflags & BX_ALTDATA)) ||
1223 		    ((flags & V_ALT) && (bp->b_xflags & BX_ALTDATA) == 0)) {
1224 			VI_LOCK(vp);
1225 			continue;
1226 		}
1227 		found += 1;
1228 		if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT)) {
1229 			error = BUF_TIMELOCK(bp,
1230 			    LK_EXCLUSIVE | LK_SLEEPFAIL,
1231 			    "flushbuf", slpflag, slptimeo);
1232 			if (error != ENOLCK)
1233 				*errorp = error;
1234 			goto done;
1235 		}
1236 		/*
1237 		 * XXX Since there are no node locks for NFS, I
1238 		 * believe there is a slight chance that a delayed
1239 		 * write will occur while sleeping just above, so
1240 		 * check for it.  Note that vfs_bio_awrite expects
1241 		 * buffers to reside on a queue, while BUF_WRITE and
1242 		 * brelse do not.
1243 		 */
1244 		if (((bp->b_flags & (B_DELWRI | B_INVAL)) == B_DELWRI) &&
1245 			(flags & V_SAVE)) {
1246 
1247 			if (bp->b_vp == vp) {
1248 				if (bp->b_flags & B_CLUSTEROK) {
1249 					BUF_UNLOCK(bp);
1250 					vfs_bio_awrite(bp);
1251 				} else {
1252 					bremfree(bp);
1253 					bp->b_flags |= B_ASYNC;
1254 					BUF_WRITE(bp);
1255 				}
1256 			} else {
1257 				bremfree(bp);
1258 				(void) BUF_WRITE(bp);
1259 			}
1260 			goto done;
1261 		}
1262 		bremfree(bp);
1263 		bp->b_flags |= (B_INVAL | B_NOCACHE | B_RELBUF);
1264 		bp->b_flags &= ~B_ASYNC;
1265 		brelse(bp);
1266 		VI_LOCK(vp);
1267 	}
1268 	return (found);
1269 done:
1270 	VI_LOCK(vp);
1271 	return (found);
1272 }
1273 
1274 /*
1275  * Truncate a file's buffer and pages to a specified length.  This
1276  * is in lieu of the old vinvalbuf mechanism, which performed unneeded
1277  * sync activity.
1278  */
1279 int
1280 vtruncbuf(vp, cred, td, length, blksize)
1281 	register struct vnode *vp;
1282 	struct ucred *cred;
1283 	struct thread *td;
1284 	off_t length;
1285 	int blksize;
1286 {
1287 	register struct buf *bp;
1288 	struct buf *nbp;
1289 	int s, anyfreed;
1290 	int trunclbn;
1291 
1292 	/*
1293 	 * Round up to the *next* lbn.
1294 	 */
1295 	trunclbn = (length + blksize - 1) / blksize;
1296 
1297 	s = splbio();
1298 	ASSERT_VOP_LOCKED(vp, "vtruncbuf");
1299 restart:
1300 	VI_LOCK(vp);
1301 	anyfreed = 1;
1302 	for (;anyfreed;) {
1303 		anyfreed = 0;
1304 		for (bp = TAILQ_FIRST(&vp->v_cleanblkhd); bp; bp = nbp) {
1305 			nbp = TAILQ_NEXT(bp, b_vnbufs);
1306 			VI_UNLOCK(vp);
1307 			if (bp->b_lblkno >= trunclbn) {
1308 				if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT)) {
1309 					BUF_LOCK(bp, LK_EXCLUSIVE|LK_SLEEPFAIL);
1310 					goto restart;
1311 				} else {
1312 					bremfree(bp);
1313 					bp->b_flags |= (B_INVAL | B_RELBUF);
1314 					bp->b_flags &= ~B_ASYNC;
1315 					brelse(bp);
1316 					anyfreed = 1;
1317 				}
1318 				if (nbp &&
1319 				    (((nbp->b_xflags & BX_VNCLEAN) == 0) ||
1320 				    (nbp->b_vp != vp) ||
1321 				    (nbp->b_flags & B_DELWRI))) {
1322 					goto restart;
1323 				}
1324 			}
1325 			VI_LOCK(vp);
1326 		}
1327 
1328 		for (bp = TAILQ_FIRST(&vp->v_dirtyblkhd); bp; bp = nbp) {
1329 			nbp = TAILQ_NEXT(bp, b_vnbufs);
1330 			VI_UNLOCK(vp);
1331 			if (bp->b_lblkno >= trunclbn) {
1332 				if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT)) {
1333 					BUF_LOCK(bp, LK_EXCLUSIVE|LK_SLEEPFAIL);
1334 					goto restart;
1335 				} else {
1336 					bremfree(bp);
1337 					bp->b_flags |= (B_INVAL | B_RELBUF);
1338 					bp->b_flags &= ~B_ASYNC;
1339 					brelse(bp);
1340 					anyfreed = 1;
1341 				}
1342 				if (nbp &&
1343 				    (((nbp->b_xflags & BX_VNDIRTY) == 0) ||
1344 				    (nbp->b_vp != vp) ||
1345 				    (nbp->b_flags & B_DELWRI) == 0)) {
1346 					goto restart;
1347 				}
1348 			}
1349 			VI_LOCK(vp);
1350 		}
1351 	}
1352 
1353 	if (length > 0) {
1354 restartsync:
1355 		for (bp = TAILQ_FIRST(&vp->v_dirtyblkhd); bp; bp = nbp) {
1356 			nbp = TAILQ_NEXT(bp, b_vnbufs);
1357 			VI_UNLOCK(vp);
1358 			if ((bp->b_flags & B_DELWRI) && (bp->b_lblkno < 0)) {
1359 				if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT)) {
1360 					BUF_LOCK(bp, LK_EXCLUSIVE|LK_SLEEPFAIL);
1361 					goto restart;
1362 				} else {
1363 					bremfree(bp);
1364 					if (bp->b_vp == vp) {
1365 						bp->b_flags |= B_ASYNC;
1366 					} else {
1367 						bp->b_flags &= ~B_ASYNC;
1368 					}
1369 					BUF_WRITE(bp);
1370 				}
1371 				VI_LOCK(vp);
1372 				goto restartsync;
1373 			}
1374 			VI_LOCK(vp);
1375 		}
1376 	}
1377 
1378 	while (vp->v_numoutput > 0) {
1379 		vp->v_iflag |= VI_BWAIT;
1380 		msleep(&vp->v_numoutput, VI_MTX(vp), PVM, "vbtrunc", 0);
1381 	}
1382 	VI_UNLOCK(vp);
1383 	splx(s);
1384 
1385 	vnode_pager_setsize(vp, length);
1386 
1387 	return (0);
1388 }
1389 
1390 /*
1391  * buf_splay() - splay tree core for the clean/dirty list of buffers in
1392  * 		 a vnode.
1393  *
1394  *	NOTE: We have to deal with the special case of a background bitmap
1395  *	buffer, a situation where two buffers will have the same logical
1396  *	block offset.  We want (1) only the foreground buffer to be accessed
1397  *	in a lookup and (2) must differentiate between the foreground and
1398  *	background buffer in the splay tree algorithm because the splay
1399  *	tree cannot normally handle multiple entities with the same 'index'.
1400  *	We accomplish this by adding differentiating flags to the splay tree's
1401  *	numerical domain.
1402  */
1403 static
1404 struct buf *
1405 buf_splay(daddr_t lblkno, b_xflags_t xflags, struct buf *root)
1406 {
1407 	struct buf dummy;
1408 	struct buf *lefttreemax, *righttreemin, *y;
1409 
1410 	if (root == NULL)
1411 		return (NULL);
1412 	lefttreemax = righttreemin = &dummy;
1413 	for (;;) {
1414 		if (lblkno < root->b_lblkno ||
1415 		    (lblkno == root->b_lblkno &&
1416 		    (xflags & BX_BKGRDMARKER) < (root->b_xflags & BX_BKGRDMARKER))) {
1417 			if ((y = root->b_left) == NULL)
1418 				break;
1419 			if (lblkno < y->b_lblkno) {
1420 				/* Rotate right. */
1421 				root->b_left = y->b_right;
1422 				y->b_right = root;
1423 				root = y;
1424 				if ((y = root->b_left) == NULL)
1425 					break;
1426 			}
1427 			/* Link into the new root's right tree. */
1428 			righttreemin->b_left = root;
1429 			righttreemin = root;
1430 		} else if (lblkno > root->b_lblkno ||
1431 		    (lblkno == root->b_lblkno &&
1432 		    (xflags & BX_BKGRDMARKER) > (root->b_xflags & BX_BKGRDMARKER))) {
1433 			if ((y = root->b_right) == NULL)
1434 				break;
1435 			if (lblkno > y->b_lblkno) {
1436 				/* Rotate left. */
1437 				root->b_right = y->b_left;
1438 				y->b_left = root;
1439 				root = y;
1440 				if ((y = root->b_right) == NULL)
1441 					break;
1442 			}
1443 			/* Link into the new root's left tree. */
1444 			lefttreemax->b_right = root;
1445 			lefttreemax = root;
1446 		} else {
1447 			break;
1448 		}
1449 		root = y;
1450 	}
1451 	/* Assemble the new root. */
1452 	lefttreemax->b_right = root->b_left;
1453 	righttreemin->b_left = root->b_right;
1454 	root->b_left = dummy.b_right;
1455 	root->b_right = dummy.b_left;
1456 	return (root);
1457 }
1458 
1459 static
1460 void
1461 buf_vlist_remove(struct buf *bp)
1462 {
1463 	struct vnode *vp = bp->b_vp;
1464 	struct buf *root;
1465 
1466 	ASSERT_VI_LOCKED(vp, "buf_vlist_remove");
1467 	if (bp->b_xflags & BX_VNDIRTY) {
1468 		if (bp != vp->v_dirtyblkroot) {
1469 			root = buf_splay(bp->b_lblkno, bp->b_xflags, vp->v_dirtyblkroot);
1470 			KASSERT(root == bp, ("splay lookup failed during dirty remove"));
1471 		}
1472 		if (bp->b_left == NULL) {
1473 			root = bp->b_right;
1474 		} else {
1475 			root = buf_splay(bp->b_lblkno, bp->b_xflags, bp->b_left);
1476 			root->b_right = bp->b_right;
1477 		}
1478 		vp->v_dirtyblkroot = root;
1479 		TAILQ_REMOVE(&vp->v_dirtyblkhd, bp, b_vnbufs);
1480 	} else {
1481 		/* KASSERT(bp->b_xflags & BX_VNCLEAN, ("bp wasn't clean")); */
1482 		if (bp != vp->v_cleanblkroot) {
1483 			root = buf_splay(bp->b_lblkno, bp->b_xflags, vp->v_cleanblkroot);
1484 			KASSERT(root == bp, ("splay lookup failed during clean remove"));
1485 		}
1486 		if (bp->b_left == NULL) {
1487 			root = bp->b_right;
1488 		} else {
1489 			root = buf_splay(bp->b_lblkno, bp->b_xflags, bp->b_left);
1490 			root->b_right = bp->b_right;
1491 		}
1492 		vp->v_cleanblkroot = root;
1493 		TAILQ_REMOVE(&vp->v_cleanblkhd, bp, b_vnbufs);
1494 	}
1495 	bp->b_xflags &= ~(BX_VNDIRTY | BX_VNCLEAN);
1496 }
1497 
1498 /*
1499  * Add the buffer to the sorted clean or dirty block list using a
1500  * splay tree algorithm.
1501  *
1502  * NOTE: xflags is passed as a constant, optimizing this inline function!
1503  */
1504 static
1505 void
1506 buf_vlist_add(struct buf *bp, struct vnode *vp, b_xflags_t xflags)
1507 {
1508 	struct buf *root;
1509 
1510 	ASSERT_VI_LOCKED(vp, "buf_vlist_add");
1511 	bp->b_xflags |= xflags;
1512 	if (xflags & BX_VNDIRTY) {
1513 		root = buf_splay(bp->b_lblkno, bp->b_xflags, vp->v_dirtyblkroot);
1514 		if (root == NULL) {
1515 			bp->b_left = NULL;
1516 			bp->b_right = NULL;
1517 			TAILQ_INSERT_TAIL(&vp->v_dirtyblkhd, bp, b_vnbufs);
1518 		} else if (bp->b_lblkno < root->b_lblkno ||
1519 		    (bp->b_lblkno == root->b_lblkno &&
1520 		    (bp->b_xflags & BX_BKGRDMARKER) < (root->b_xflags & BX_BKGRDMARKER))) {
1521 			bp->b_left = root->b_left;
1522 			bp->b_right = root;
1523 			root->b_left = NULL;
1524 			TAILQ_INSERT_BEFORE(root, bp, b_vnbufs);
1525 		} else {
1526 			bp->b_right = root->b_right;
1527 			bp->b_left = root;
1528 			root->b_right = NULL;
1529 			TAILQ_INSERT_AFTER(&vp->v_dirtyblkhd,
1530 			    root, bp, b_vnbufs);
1531 		}
1532 		vp->v_dirtyblkroot = bp;
1533 	} else {
1534 		/* KASSERT(xflags & BX_VNCLEAN, ("xflags not clean")); */
1535 		root = buf_splay(bp->b_lblkno, bp->b_xflags, vp->v_cleanblkroot);
1536 		if (root == NULL) {
1537 			bp->b_left = NULL;
1538 			bp->b_right = NULL;
1539 			TAILQ_INSERT_TAIL(&vp->v_cleanblkhd, bp, b_vnbufs);
1540 		} else if (bp->b_lblkno < root->b_lblkno ||
1541 		    (bp->b_lblkno == root->b_lblkno &&
1542 		    (bp->b_xflags & BX_BKGRDMARKER) < (root->b_xflags & BX_BKGRDMARKER))) {
1543 			bp->b_left = root->b_left;
1544 			bp->b_right = root;
1545 			root->b_left = NULL;
1546 			TAILQ_INSERT_BEFORE(root, bp, b_vnbufs);
1547 		} else {
1548 			bp->b_right = root->b_right;
1549 			bp->b_left = root;
1550 			root->b_right = NULL;
1551 			TAILQ_INSERT_AFTER(&vp->v_cleanblkhd,
1552 			    root, bp, b_vnbufs);
1553 		}
1554 		vp->v_cleanblkroot = bp;
1555 	}
1556 }
1557 
1558 #ifndef USE_BUFHASH
1559 
1560 /*
1561  * Lookup a buffer using the splay tree.  Note that we specifically avoid
1562  * shadow buffers used in background bitmap writes.
1563  *
1564  * This code isn't quite efficient as it could be because we are maintaining
1565  * two sorted lists and do not know which list the block resides in.
1566  */
1567 struct buf *
1568 gbincore(struct vnode *vp, daddr_t lblkno)
1569 {
1570 	struct buf *bp;
1571 
1572 	GIANT_REQUIRED;
1573 
1574 	ASSERT_VI_LOCKED(vp, "gbincore");
1575 	bp = vp->v_cleanblkroot = buf_splay(lblkno, 0, vp->v_cleanblkroot);
1576 	if (bp && bp->b_lblkno == lblkno && !(bp->b_xflags & BX_BKGRDMARKER))
1577 		return(bp);
1578 	bp = vp->v_dirtyblkroot = buf_splay(lblkno, 0, vp->v_dirtyblkroot);
1579 	if (bp && bp->b_lblkno == lblkno && !(bp->b_xflags & BX_BKGRDMARKER))
1580 		return(bp);
1581 	return(NULL);
1582 }
1583 
1584 #endif
1585 
1586 /*
1587  * Associate a buffer with a vnode.
1588  */
1589 void
1590 bgetvp(vp, bp)
1591 	register struct vnode *vp;
1592 	register struct buf *bp;
1593 {
1594 	int s;
1595 
1596 	KASSERT(bp->b_vp == NULL, ("bgetvp: not free"));
1597 
1598 	KASSERT((bp->b_xflags & (BX_VNDIRTY|BX_VNCLEAN)) == 0,
1599 	    ("bgetvp: bp already attached! %p", bp));
1600 
1601 	VI_LOCK(vp);
1602 	vholdl(vp);
1603 	bp->b_vp = vp;
1604 	bp->b_dev = vn_todev(vp);
1605 	/*
1606 	 * Insert onto list for new vnode.
1607 	 */
1608 	s = splbio();
1609 	buf_vlist_add(bp, vp, BX_VNCLEAN);
1610 	splx(s);
1611 	VI_UNLOCK(vp);
1612 }
1613 
1614 /*
1615  * Disassociate a buffer from a vnode.
1616  */
1617 void
1618 brelvp(bp)
1619 	register struct buf *bp;
1620 {
1621 	struct vnode *vp;
1622 	int s;
1623 
1624 	KASSERT(bp->b_vp != NULL, ("brelvp: NULL"));
1625 
1626 	/*
1627 	 * Delete from old vnode list, if on one.
1628 	 */
1629 	vp = bp->b_vp;
1630 	s = splbio();
1631 	VI_LOCK(vp);
1632 	if (bp->b_xflags & (BX_VNDIRTY | BX_VNCLEAN))
1633 		buf_vlist_remove(bp);
1634 	if ((vp->v_iflag & VI_ONWORKLST) && TAILQ_EMPTY(&vp->v_dirtyblkhd)) {
1635 		vp->v_iflag &= ~VI_ONWORKLST;
1636 		mtx_lock(&sync_mtx);
1637 		LIST_REMOVE(vp, v_synclist);
1638 		mtx_unlock(&sync_mtx);
1639 	}
1640 	vdropl(vp);
1641 	VI_UNLOCK(vp);
1642 	bp->b_vp = (struct vnode *) 0;
1643 	if (bp->b_object)
1644 		bp->b_object = NULL;
1645 	splx(s);
1646 }
1647 
1648 /*
1649  * Add an item to the syncer work queue.
1650  */
1651 static void
1652 vn_syncer_add_to_worklist(struct vnode *vp, int delay)
1653 {
1654 	int s, slot;
1655 
1656 	s = splbio();
1657 	ASSERT_VI_LOCKED(vp, "vn_syncer_add_to_worklist");
1658 
1659 	mtx_lock(&sync_mtx);
1660 	if (vp->v_iflag & VI_ONWORKLST)
1661 		LIST_REMOVE(vp, v_synclist);
1662 	else
1663 		vp->v_iflag |= VI_ONWORKLST;
1664 
1665 	if (delay > syncer_maxdelay - 2)
1666 		delay = syncer_maxdelay - 2;
1667 	slot = (syncer_delayno + delay) & syncer_mask;
1668 
1669 	LIST_INSERT_HEAD(&syncer_workitem_pending[slot], vp, v_synclist);
1670 	mtx_unlock(&sync_mtx);
1671 
1672 	splx(s);
1673 }
1674 
1675 struct  proc *updateproc;
1676 static void sched_sync(void);
1677 static struct kproc_desc up_kp = {
1678 	"syncer",
1679 	sched_sync,
1680 	&updateproc
1681 };
1682 SYSINIT(syncer, SI_SUB_KTHREAD_UPDATE, SI_ORDER_FIRST, kproc_start, &up_kp)
1683 
1684 /*
1685  * System filesystem synchronizer daemon.
1686  */
1687 static void
1688 sched_sync(void)
1689 {
1690 	struct synclist *slp;
1691 	struct vnode *vp;
1692 	struct mount *mp;
1693 	long starttime;
1694 	int s;
1695 	struct thread *td = FIRST_THREAD_IN_PROC(updateproc);  /* XXXKSE */
1696 
1697 	mtx_lock(&Giant);
1698 
1699 	EVENTHANDLER_REGISTER(shutdown_pre_sync, kproc_shutdown, td->td_proc,
1700 	    SHUTDOWN_PRI_LAST);
1701 
1702 	for (;;) {
1703 		kthread_suspend_check(td->td_proc);
1704 
1705 		starttime = time_second;
1706 
1707 		/*
1708 		 * Push files whose dirty time has expired.  Be careful
1709 		 * of interrupt race on slp queue.
1710 		 */
1711 		s = splbio();
1712 		mtx_lock(&sync_mtx);
1713 		slp = &syncer_workitem_pending[syncer_delayno];
1714 		syncer_delayno += 1;
1715 		if (syncer_delayno == syncer_maxdelay)
1716 			syncer_delayno = 0;
1717 		splx(s);
1718 
1719 		while ((vp = LIST_FIRST(slp)) != NULL) {
1720 			mtx_unlock(&sync_mtx);
1721 			if (VOP_ISLOCKED(vp, NULL) == 0 &&
1722 			    vn_start_write(vp, &mp, V_NOWAIT) == 0) {
1723 				vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
1724 				(void) VOP_FSYNC(vp, td->td_ucred, MNT_LAZY, td);
1725 				VOP_UNLOCK(vp, 0, td);
1726 				vn_finished_write(mp);
1727 			}
1728 			s = splbio();
1729 			mtx_lock(&sync_mtx);
1730 			if (LIST_FIRST(slp) == vp) {
1731 				mtx_unlock(&sync_mtx);
1732 				/*
1733 				 * Note: VFS vnodes can remain on the
1734 				 * worklist too with no dirty blocks, but
1735 				 * since sync_fsync() moves it to a different
1736 				 * slot we are safe.
1737 				 */
1738 				VI_LOCK(vp);
1739 				if (TAILQ_EMPTY(&vp->v_dirtyblkhd) &&
1740 				    !vn_isdisk(vp, NULL)) {
1741 					panic("sched_sync: fsync failed "
1742 					      "vp %p tag %s", vp, vp->v_tag);
1743 				}
1744 				/*
1745 				 * Put us back on the worklist.  The worklist
1746 				 * routine will remove us from our current
1747 				 * position and then add us back in at a later
1748 				 * position.
1749 				 */
1750 				vn_syncer_add_to_worklist(vp, syncdelay);
1751 				VI_UNLOCK(vp);
1752 				mtx_lock(&sync_mtx);
1753 			}
1754 			splx(s);
1755 		}
1756 		mtx_unlock(&sync_mtx);
1757 
1758 		/*
1759 		 * Do soft update processing.
1760 		 */
1761 		if (softdep_process_worklist_hook != NULL)
1762 			(*softdep_process_worklist_hook)(NULL);
1763 
1764 		/*
1765 		 * The variable rushjob allows the kernel to speed up the
1766 		 * processing of the filesystem syncer process. A rushjob
1767 		 * value of N tells the filesystem syncer to process the next
1768 		 * N seconds worth of work on its queue ASAP. Currently rushjob
1769 		 * is used by the soft update code to speed up the filesystem
1770 		 * syncer process when the incore state is getting so far
1771 		 * ahead of the disk that the kernel memory pool is being
1772 		 * threatened with exhaustion.
1773 		 */
1774 		mtx_lock(&sync_mtx);
1775 		if (rushjob > 0) {
1776 			rushjob -= 1;
1777 			mtx_unlock(&sync_mtx);
1778 			continue;
1779 		}
1780 		mtx_unlock(&sync_mtx);
1781 		/*
1782 		 * If it has taken us less than a second to process the
1783 		 * current work, then wait. Otherwise start right over
1784 		 * again. We can still lose time if any single round
1785 		 * takes more than two seconds, but it does not really
1786 		 * matter as we are just trying to generally pace the
1787 		 * filesystem activity.
1788 		 */
1789 		if (time_second == starttime)
1790 			tsleep(&lbolt, PPAUSE, "syncer", 0);
1791 	}
1792 }
1793 
1794 /*
1795  * Request the syncer daemon to speed up its work.
1796  * We never push it to speed up more than half of its
1797  * normal turn time, otherwise it could take over the cpu.
1798  * XXXKSE  only one update?
1799  */
1800 int
1801 speedup_syncer()
1802 {
1803 	struct thread *td;
1804 	int ret = 0;
1805 
1806 	td = FIRST_THREAD_IN_PROC(updateproc);
1807 	mtx_lock_spin(&sched_lock);
1808 	if (td->td_wchan == &lbolt) {
1809 		unsleep(td);
1810 		TD_CLR_SLEEPING(td);
1811 		setrunnable(td);
1812 	}
1813 	mtx_unlock_spin(&sched_lock);
1814 	mtx_lock(&sync_mtx);
1815 	if (rushjob < syncdelay / 2) {
1816 		rushjob += 1;
1817 		stat_rush_requests += 1;
1818 		ret = 1;
1819 	}
1820 	mtx_unlock(&sync_mtx);
1821 	return (ret);
1822 }
1823 
1824 /*
1825  * Associate a p-buffer with a vnode.
1826  *
1827  * Also sets B_PAGING flag to indicate that vnode is not fully associated
1828  * with the buffer.  i.e. the bp has not been linked into the vnode or
1829  * ref-counted.
1830  */
1831 void
1832 pbgetvp(vp, bp)
1833 	register struct vnode *vp;
1834 	register struct buf *bp;
1835 {
1836 
1837 	KASSERT(bp->b_vp == NULL, ("pbgetvp: not free"));
1838 
1839 	bp->b_vp = vp;
1840 	bp->b_flags |= B_PAGING;
1841 	bp->b_dev = vn_todev(vp);
1842 }
1843 
1844 /*
1845  * Disassociate a p-buffer from a vnode.
1846  */
1847 void
1848 pbrelvp(bp)
1849 	register struct buf *bp;
1850 {
1851 
1852 	KASSERT(bp->b_vp != NULL, ("pbrelvp: NULL"));
1853 
1854 	/* XXX REMOVE ME */
1855 	VI_LOCK(bp->b_vp);
1856 	if (TAILQ_NEXT(bp, b_vnbufs) != NULL) {
1857 		panic(
1858 		    "relpbuf(): b_vp was probably reassignbuf()d %p %x",
1859 		    bp,
1860 		    (int)bp->b_flags
1861 		);
1862 	}
1863 	VI_UNLOCK(bp->b_vp);
1864 	bp->b_vp = (struct vnode *) 0;
1865 	bp->b_flags &= ~B_PAGING;
1866 }
1867 
1868 /*
1869  * Reassign a buffer from one vnode to another.
1870  * Used to assign file specific control information
1871  * (indirect blocks) to the vnode to which they belong.
1872  */
1873 void
1874 reassignbuf(bp, newvp)
1875 	register struct buf *bp;
1876 	register struct vnode *newvp;
1877 {
1878 	int delay;
1879 	int s;
1880 
1881 	if (newvp == NULL) {
1882 		printf("reassignbuf: NULL");
1883 		return;
1884 	}
1885 	++reassignbufcalls;
1886 
1887 	/*
1888 	 * B_PAGING flagged buffers cannot be reassigned because their vp
1889 	 * is not fully linked in.
1890 	 */
1891 	if (bp->b_flags & B_PAGING)
1892 		panic("cannot reassign paging buffer");
1893 
1894 	s = splbio();
1895 	/*
1896 	 * Delete from old vnode list, if on one.
1897 	 */
1898 	VI_LOCK(bp->b_vp);
1899 	if (bp->b_xflags & (BX_VNDIRTY | BX_VNCLEAN)) {
1900 		buf_vlist_remove(bp);
1901 		if (bp->b_vp != newvp) {
1902 			vdropl(bp->b_vp);
1903 			bp->b_vp = NULL;	/* for clarification */
1904 		}
1905 	}
1906 	VI_UNLOCK(bp->b_vp);
1907 	/*
1908 	 * If dirty, put on list of dirty buffers; otherwise insert onto list
1909 	 * of clean buffers.
1910 	 */
1911 	VI_LOCK(newvp);
1912 	if (bp->b_flags & B_DELWRI) {
1913 		if ((newvp->v_iflag & VI_ONWORKLST) == 0) {
1914 			switch (newvp->v_type) {
1915 			case VDIR:
1916 				delay = dirdelay;
1917 				break;
1918 			case VCHR:
1919 				if (newvp->v_rdev->si_mountpoint != NULL) {
1920 					delay = metadelay;
1921 					break;
1922 				}
1923 				/* FALLTHROUGH */
1924 			default:
1925 				delay = filedelay;
1926 			}
1927 			vn_syncer_add_to_worklist(newvp, delay);
1928 		}
1929 		buf_vlist_add(bp, newvp, BX_VNDIRTY);
1930 	} else {
1931 		buf_vlist_add(bp, newvp, BX_VNCLEAN);
1932 
1933 		if ((newvp->v_iflag & VI_ONWORKLST) &&
1934 		    TAILQ_EMPTY(&newvp->v_dirtyblkhd)) {
1935 			mtx_lock(&sync_mtx);
1936 			LIST_REMOVE(newvp, v_synclist);
1937 			mtx_unlock(&sync_mtx);
1938 			newvp->v_iflag &= ~VI_ONWORKLST;
1939 		}
1940 	}
1941 	if (bp->b_vp != newvp) {
1942 		bp->b_vp = newvp;
1943 		vholdl(bp->b_vp);
1944 	}
1945 	VI_UNLOCK(newvp);
1946 	splx(s);
1947 }
1948 
1949 /*
1950  * Create a vnode for a device.
1951  * Used for mounting the root filesystem.
1952  */
1953 int
1954 bdevvp(dev, vpp)
1955 	dev_t dev;
1956 	struct vnode **vpp;
1957 {
1958 	register struct vnode *vp;
1959 	struct vnode *nvp;
1960 	int error;
1961 
1962 	if (dev == NODEV) {
1963 		*vpp = NULLVP;
1964 		return (ENXIO);
1965 	}
1966 	if (vfinddev(dev, VCHR, vpp))
1967 		return (0);
1968 	error = getnewvnode("none", (struct mount *)0, spec_vnodeop_p, &nvp);
1969 	if (error) {
1970 		*vpp = NULLVP;
1971 		return (error);
1972 	}
1973 	vp = nvp;
1974 	vp->v_type = VCHR;
1975 	addalias(vp, dev);
1976 	*vpp = vp;
1977 	return (0);
1978 }
1979 
1980 static void
1981 v_incr_usecount(struct vnode *vp, int delta)
1982 {
1983 	vp->v_usecount += delta;
1984 	if (vp->v_type == VCHR && vp->v_rdev != NULL) {
1985 		mtx_lock(&spechash_mtx);
1986 		vp->v_rdev->si_usecount += delta;
1987 		mtx_unlock(&spechash_mtx);
1988 	}
1989 }
1990 
1991 /*
1992  * Add vnode to the alias list hung off the dev_t.
1993  *
1994  * The reason for this gunk is that multiple vnodes can reference
1995  * the same physical device, so checking vp->v_usecount to see
1996  * how many users there are is inadequate; the v_usecount for
1997  * the vnodes need to be accumulated.  vcount() does that.
1998  */
1999 struct vnode *
2000 addaliasu(nvp, nvp_rdev)
2001 	struct vnode *nvp;
2002 	udev_t nvp_rdev;
2003 {
2004 	struct vnode *ovp;
2005 	vop_t **ops;
2006 	dev_t dev;
2007 
2008 	if (nvp->v_type == VBLK)
2009 		return (nvp);
2010 	if (nvp->v_type != VCHR)
2011 		panic("addaliasu on non-special vnode");
2012 	dev = udev2dev(nvp_rdev, 0);
2013 	/*
2014 	 * Check to see if we have a bdevvp vnode with no associated
2015 	 * filesystem. If so, we want to associate the filesystem of
2016 	 * the new newly instigated vnode with the bdevvp vnode and
2017 	 * discard the newly created vnode rather than leaving the
2018 	 * bdevvp vnode lying around with no associated filesystem.
2019 	 */
2020 	if (vfinddev(dev, nvp->v_type, &ovp) == 0 || ovp->v_data != NULL) {
2021 		addalias(nvp, dev);
2022 		return (nvp);
2023 	}
2024 	/*
2025 	 * Discard unneeded vnode, but save its node specific data.
2026 	 * Note that if there is a lock, it is carried over in the
2027 	 * node specific data to the replacement vnode.
2028 	 */
2029 	vref(ovp);
2030 	ovp->v_data = nvp->v_data;
2031 	ovp->v_tag = nvp->v_tag;
2032 	nvp->v_data = NULL;
2033 	lockdestroy(ovp->v_vnlock);
2034 	lockinit(ovp->v_vnlock, PVFS, nvp->v_vnlock->lk_wmesg,
2035 	    nvp->v_vnlock->lk_timo, nvp->v_vnlock->lk_flags & LK_EXTFLG_MASK);
2036 	ops = ovp->v_op;
2037 	ovp->v_op = nvp->v_op;
2038 	if (VOP_ISLOCKED(nvp, curthread)) {
2039 		VOP_UNLOCK(nvp, 0, curthread);
2040 		vn_lock(ovp, LK_EXCLUSIVE | LK_RETRY, curthread);
2041 	}
2042 	nvp->v_op = ops;
2043 	insmntque(ovp, nvp->v_mount);
2044 	vrele(nvp);
2045 	vgone(nvp);
2046 	return (ovp);
2047 }
2048 
2049 /* This is a local helper function that do the same as addaliasu, but for a
2050  * dev_t instead of an udev_t. */
2051 static void
2052 addalias(nvp, dev)
2053 	struct vnode *nvp;
2054 	dev_t dev;
2055 {
2056 
2057 	KASSERT(nvp->v_type == VCHR, ("addalias on non-special vnode"));
2058 	nvp->v_rdev = dev;
2059 	VI_LOCK(nvp);
2060 	mtx_lock(&spechash_mtx);
2061 	SLIST_INSERT_HEAD(&dev->si_hlist, nvp, v_specnext);
2062 	dev->si_usecount += nvp->v_usecount;
2063 	mtx_unlock(&spechash_mtx);
2064 	VI_UNLOCK(nvp);
2065 }
2066 
2067 /*
2068  * Grab a particular vnode from the free list, increment its
2069  * reference count and lock it. The vnode lock bit is set if the
2070  * vnode is being eliminated in vgone. The process is awakened
2071  * when the transition is completed, and an error returned to
2072  * indicate that the vnode is no longer usable (possibly having
2073  * been changed to a new filesystem type).
2074  */
2075 int
2076 vget(vp, flags, td)
2077 	register struct vnode *vp;
2078 	int flags;
2079 	struct thread *td;
2080 {
2081 	int error;
2082 
2083 	/*
2084 	 * If the vnode is in the process of being cleaned out for
2085 	 * another use, we wait for the cleaning to finish and then
2086 	 * return failure. Cleaning is determined by checking that
2087 	 * the VI_XLOCK flag is set.
2088 	 */
2089 	if ((flags & LK_INTERLOCK) == 0)
2090 		VI_LOCK(vp);
2091 	if (vp->v_iflag & VI_XLOCK && vp->v_vxproc != curthread) {
2092 		vp->v_iflag |= VI_XWANT;
2093 		msleep(vp, VI_MTX(vp), PINOD | PDROP, "vget", 0);
2094 		return (ENOENT);
2095 	}
2096 
2097 	v_incr_usecount(vp, 1);
2098 
2099 	if (VSHOULDBUSY(vp))
2100 		vbusy(vp);
2101 	if (flags & LK_TYPE_MASK) {
2102 		if ((error = vn_lock(vp, flags | LK_INTERLOCK, td)) != 0) {
2103 			/*
2104 			 * must expand vrele here because we do not want
2105 			 * to call VOP_INACTIVE if the reference count
2106 			 * drops back to zero since it was never really
2107 			 * active. We must remove it from the free list
2108 			 * before sleeping so that multiple processes do
2109 			 * not try to recycle it.
2110 			 */
2111 			VI_LOCK(vp);
2112 			v_incr_usecount(vp, -1);
2113 			if (VSHOULDFREE(vp))
2114 				vfree(vp);
2115 			else
2116 				vlruvp(vp);
2117 			VI_UNLOCK(vp);
2118 		}
2119 		return (error);
2120 	}
2121 	VI_UNLOCK(vp);
2122 	return (0);
2123 }
2124 
2125 /*
2126  * Increase the reference count of a vnode.
2127  */
2128 void
2129 vref(struct vnode *vp)
2130 {
2131 	VI_LOCK(vp);
2132 	v_incr_usecount(vp, 1);
2133 	VI_UNLOCK(vp);
2134 }
2135 
2136 /*
2137  * Return reference count of a vnode.
2138  *
2139  * The results of this call are only guaranteed when some mechanism other
2140  * than the VI lock is used to stop other processes from gaining references
2141  * to the vnode.  This may be the case if the caller holds the only reference.
2142  * This is also useful when stale data is acceptable as race conditions may
2143  * be accounted for by some other means.
2144  */
2145 int
2146 vrefcnt(struct vnode *vp)
2147 {
2148 	int usecnt;
2149 
2150 	VI_LOCK(vp);
2151 	usecnt = vp->v_usecount;
2152 	VI_UNLOCK(vp);
2153 
2154 	return (usecnt);
2155 }
2156 
2157 
2158 /*
2159  * Vnode put/release.
2160  * If count drops to zero, call inactive routine and return to freelist.
2161  */
2162 void
2163 vrele(vp)
2164 	struct vnode *vp;
2165 {
2166 	struct thread *td = curthread;	/* XXX */
2167 
2168 	KASSERT(vp != NULL, ("vrele: null vp"));
2169 
2170 	VI_LOCK(vp);
2171 
2172 	/* Skip this v_writecount check if we're going to panic below. */
2173 	KASSERT(vp->v_writecount < vp->v_usecount || vp->v_usecount < 1,
2174 	    ("vrele: missed vn_close"));
2175 
2176 	if (vp->v_usecount > 1 || ((vp->v_iflag & VI_DOINGINACT) &&
2177 	    vp->v_usecount == 1)) {
2178 		v_incr_usecount(vp, -1);
2179 		VI_UNLOCK(vp);
2180 
2181 		return;
2182 	}
2183 
2184 	if (vp->v_usecount == 1) {
2185 		v_incr_usecount(vp, -1);
2186 		/*
2187 		 * We must call VOP_INACTIVE with the node locked. Mark
2188 		 * as VI_DOINGINACT to avoid recursion.
2189 		 */
2190 		if (vn_lock(vp, LK_EXCLUSIVE | LK_INTERLOCK, td) == 0) {
2191 			VI_LOCK(vp);
2192 			vp->v_iflag |= VI_DOINGINACT;
2193 			VI_UNLOCK(vp);
2194 			VOP_INACTIVE(vp, td);
2195 			VI_LOCK(vp);
2196 			KASSERT(vp->v_iflag & VI_DOINGINACT,
2197 			    ("vrele: lost VI_DOINGINACT"));
2198 			vp->v_iflag &= ~VI_DOINGINACT;
2199 			VI_UNLOCK(vp);
2200 		}
2201 		VI_LOCK(vp);
2202 		if (VSHOULDFREE(vp))
2203 			vfree(vp);
2204 		else
2205 			vlruvp(vp);
2206 		VI_UNLOCK(vp);
2207 
2208 	} else {
2209 #ifdef DIAGNOSTIC
2210 		vprint("vrele: negative ref count", vp);
2211 #endif
2212 		VI_UNLOCK(vp);
2213 		panic("vrele: negative ref cnt");
2214 	}
2215 }
2216 
2217 /*
2218  * Release an already locked vnode.  This give the same effects as
2219  * unlock+vrele(), but takes less time and avoids releasing and
2220  * re-aquiring the lock (as vrele() aquires the lock internally.)
2221  */
2222 void
2223 vput(vp)
2224 	struct vnode *vp;
2225 {
2226 	struct thread *td = curthread;	/* XXX */
2227 
2228 	GIANT_REQUIRED;
2229 
2230 	KASSERT(vp != NULL, ("vput: null vp"));
2231 	VI_LOCK(vp);
2232 	/* Skip this v_writecount check if we're going to panic below. */
2233 	KASSERT(vp->v_writecount < vp->v_usecount || vp->v_usecount < 1,
2234 	    ("vput: missed vn_close"));
2235 
2236 	if (vp->v_usecount > 1 || ((vp->v_iflag & VI_DOINGINACT) &&
2237 	    vp->v_usecount == 1)) {
2238 		v_incr_usecount(vp, -1);
2239 		VOP_UNLOCK(vp, LK_INTERLOCK, td);
2240 		return;
2241 	}
2242 
2243 	if (vp->v_usecount == 1) {
2244 		v_incr_usecount(vp, -1);
2245 		/*
2246 		 * We must call VOP_INACTIVE with the node locked, so
2247 		 * we just need to release the vnode mutex. Mark as
2248 		 * as VI_DOINGINACT to avoid recursion.
2249 		 */
2250 		vp->v_iflag |= VI_DOINGINACT;
2251 		VI_UNLOCK(vp);
2252 		VOP_INACTIVE(vp, td);
2253 		VI_LOCK(vp);
2254 		KASSERT(vp->v_iflag & VI_DOINGINACT,
2255 		    ("vput: lost VI_DOINGINACT"));
2256 		vp->v_iflag &= ~VI_DOINGINACT;
2257 		if (VSHOULDFREE(vp))
2258 			vfree(vp);
2259 		else
2260 			vlruvp(vp);
2261 		VI_UNLOCK(vp);
2262 
2263 	} else {
2264 #ifdef DIAGNOSTIC
2265 		vprint("vput: negative ref count", vp);
2266 #endif
2267 		panic("vput: negative ref cnt");
2268 	}
2269 }
2270 
2271 /*
2272  * Somebody doesn't want the vnode recycled.
2273  */
2274 void
2275 vhold(struct vnode *vp)
2276 {
2277 	VI_LOCK(vp);
2278 	vholdl(vp);
2279 	VI_UNLOCK(vp);
2280 }
2281 
2282 void
2283 vholdl(vp)
2284 	register struct vnode *vp;
2285 {
2286 	int s;
2287 
2288 	s = splbio();
2289 	vp->v_holdcnt++;
2290 	if (VSHOULDBUSY(vp))
2291 		vbusy(vp);
2292 	splx(s);
2293 }
2294 
2295 /*
2296  * Note that there is one less who cares about this vnode.  vdrop() is the
2297  * opposite of vhold().
2298  */
2299 void
2300 vdrop(struct vnode *vp)
2301 {
2302 	VI_LOCK(vp);
2303 	vdropl(vp);
2304 	VI_UNLOCK(vp);
2305 }
2306 
2307 void
2308 vdropl(vp)
2309 	register struct vnode *vp;
2310 {
2311 	int s;
2312 
2313 	s = splbio();
2314 	if (vp->v_holdcnt <= 0)
2315 		panic("vdrop: holdcnt");
2316 	vp->v_holdcnt--;
2317 	if (VSHOULDFREE(vp))
2318 		vfree(vp);
2319 	else
2320 		vlruvp(vp);
2321 	splx(s);
2322 }
2323 
2324 /*
2325  * Remove any vnodes in the vnode table belonging to mount point mp.
2326  *
2327  * If FORCECLOSE is not specified, there should not be any active ones,
2328  * return error if any are found (nb: this is a user error, not a
2329  * system error). If FORCECLOSE is specified, detach any active vnodes
2330  * that are found.
2331  *
2332  * If WRITECLOSE is set, only flush out regular file vnodes open for
2333  * writing.
2334  *
2335  * SKIPSYSTEM causes any vnodes marked VV_SYSTEM to be skipped.
2336  *
2337  * `rootrefs' specifies the base reference count for the root vnode
2338  * of this filesystem. The root vnode is considered busy if its
2339  * v_usecount exceeds this value. On a successful return, vflush()
2340  * will call vrele() on the root vnode exactly rootrefs times.
2341  * If the SKIPSYSTEM or WRITECLOSE flags are specified, rootrefs must
2342  * be zero.
2343  */
2344 #ifdef DIAGNOSTIC
2345 static int busyprt = 0;		/* print out busy vnodes */
2346 SYSCTL_INT(_debug, OID_AUTO, busyprt, CTLFLAG_RW, &busyprt, 0, "");
2347 #endif
2348 
2349 int
2350 vflush(mp, rootrefs, flags)
2351 	struct mount *mp;
2352 	int rootrefs;
2353 	int flags;
2354 {
2355 	struct thread *td = curthread;	/* XXX */
2356 	struct vnode *vp, *nvp, *rootvp = NULL;
2357 	struct vattr vattr;
2358 	int busy = 0, error;
2359 
2360 	if (rootrefs > 0) {
2361 		KASSERT((flags & (SKIPSYSTEM | WRITECLOSE)) == 0,
2362 		    ("vflush: bad args"));
2363 		/*
2364 		 * Get the filesystem root vnode. We can vput() it
2365 		 * immediately, since with rootrefs > 0, it won't go away.
2366 		 */
2367 		if ((error = VFS_ROOT(mp, &rootvp)) != 0)
2368 			return (error);
2369 		vput(rootvp);
2370 
2371 	}
2372 	mtx_lock(&mntvnode_mtx);
2373 loop:
2374 	for (vp = TAILQ_FIRST(&mp->mnt_nvnodelist); vp; vp = nvp) {
2375 		/*
2376 		 * Make sure this vnode wasn't reclaimed in getnewvnode().
2377 		 * Start over if it has (it won't be on the list anymore).
2378 		 */
2379 		if (vp->v_mount != mp)
2380 			goto loop;
2381 		nvp = TAILQ_NEXT(vp, v_nmntvnodes);
2382 
2383 		VI_LOCK(vp);
2384 		mtx_unlock(&mntvnode_mtx);
2385 		vn_lock(vp, LK_INTERLOCK | LK_EXCLUSIVE | LK_RETRY, td);
2386 		/*
2387 		 * Skip over a vnodes marked VV_SYSTEM.
2388 		 */
2389 		if ((flags & SKIPSYSTEM) && (vp->v_vflag & VV_SYSTEM)) {
2390 			VOP_UNLOCK(vp, 0, td);
2391 			mtx_lock(&mntvnode_mtx);
2392 			continue;
2393 		}
2394 		/*
2395 		 * If WRITECLOSE is set, flush out unlinked but still open
2396 		 * files (even if open only for reading) and regular file
2397 		 * vnodes open for writing.
2398 		 */
2399 		if (flags & WRITECLOSE) {
2400 			error = VOP_GETATTR(vp, &vattr, td->td_ucred, td);
2401 			VI_LOCK(vp);
2402 
2403 			if ((vp->v_type == VNON ||
2404 			    (error == 0 && vattr.va_nlink > 0)) &&
2405 			    (vp->v_writecount == 0 || vp->v_type != VREG)) {
2406 				VOP_UNLOCK(vp, LK_INTERLOCK, td);
2407 				mtx_lock(&mntvnode_mtx);
2408 				continue;
2409 			}
2410 		} else
2411 			VI_LOCK(vp);
2412 
2413 		VOP_UNLOCK(vp, 0, td);
2414 
2415 		/*
2416 		 * With v_usecount == 0, all we need to do is clear out the
2417 		 * vnode data structures and we are done.
2418 		 */
2419 		if (vp->v_usecount == 0) {
2420 			vgonel(vp, td);
2421 			mtx_lock(&mntvnode_mtx);
2422 			continue;
2423 		}
2424 
2425 		/*
2426 		 * If FORCECLOSE is set, forcibly close the vnode. For block
2427 		 * or character devices, revert to an anonymous device. For
2428 		 * all other files, just kill them.
2429 		 */
2430 		if (flags & FORCECLOSE) {
2431 			if (vp->v_type != VCHR) {
2432 				vgonel(vp, td);
2433 			} else {
2434 				vclean(vp, 0, td);
2435 				VI_UNLOCK(vp);
2436 				vp->v_op = spec_vnodeop_p;
2437 				insmntque(vp, (struct mount *) 0);
2438 			}
2439 			mtx_lock(&mntvnode_mtx);
2440 			continue;
2441 		}
2442 #ifdef DIAGNOSTIC
2443 		if (busyprt)
2444 			vprint("vflush: busy vnode", vp);
2445 #endif
2446 		VI_UNLOCK(vp);
2447 		mtx_lock(&mntvnode_mtx);
2448 		busy++;
2449 	}
2450 	mtx_unlock(&mntvnode_mtx);
2451 	if (rootrefs > 0 && (flags & FORCECLOSE) == 0) {
2452 		/*
2453 		 * If just the root vnode is busy, and if its refcount
2454 		 * is equal to `rootrefs', then go ahead and kill it.
2455 		 */
2456 		VI_LOCK(rootvp);
2457 		KASSERT(busy > 0, ("vflush: not busy"));
2458 		KASSERT(rootvp->v_usecount >= rootrefs, ("vflush: rootrefs"));
2459 		if (busy == 1 && rootvp->v_usecount == rootrefs) {
2460 			vgonel(rootvp, td);
2461 			busy = 0;
2462 		} else
2463 			VI_UNLOCK(rootvp);
2464 	}
2465 	if (busy)
2466 		return (EBUSY);
2467 	for (; rootrefs > 0; rootrefs--)
2468 		vrele(rootvp);
2469 	return (0);
2470 }
2471 
2472 /*
2473  * This moves a now (likely recyclable) vnode to the end of the
2474  * mountlist.  XXX However, it is temporarily disabled until we
2475  * can clean up ffs_sync() and friends, which have loop restart
2476  * conditions which this code causes to operate O(N^2).
2477  */
2478 static void
2479 vlruvp(struct vnode *vp)
2480 {
2481 #if 0
2482 	struct mount *mp;
2483 
2484 	if ((mp = vp->v_mount) != NULL) {
2485 		mtx_lock(&mntvnode_mtx);
2486 		TAILQ_REMOVE(&mp->mnt_nvnodelist, vp, v_nmntvnodes);
2487 		TAILQ_INSERT_TAIL(&mp->mnt_nvnodelist, vp, v_nmntvnodes);
2488 		mtx_unlock(&mntvnode_mtx);
2489 	}
2490 #endif
2491 }
2492 
2493 /*
2494  * Disassociate the underlying filesystem from a vnode.
2495  */
2496 static void
2497 vclean(vp, flags, td)
2498 	struct vnode *vp;
2499 	int flags;
2500 	struct thread *td;
2501 {
2502 	int active;
2503 
2504 	ASSERT_VI_LOCKED(vp, "vclean");
2505 	/*
2506 	 * Check to see if the vnode is in use. If so we have to reference it
2507 	 * before we clean it out so that its count cannot fall to zero and
2508 	 * generate a race against ourselves to recycle it.
2509 	 */
2510 	if ((active = vp->v_usecount))
2511 		v_incr_usecount(vp, 1);
2512 
2513 	/*
2514 	 * Prevent the vnode from being recycled or brought into use while we
2515 	 * clean it out.
2516 	 */
2517 	if (vp->v_iflag & VI_XLOCK)
2518 		panic("vclean: deadlock");
2519 	vp->v_iflag |= VI_XLOCK;
2520 	vp->v_vxproc = curthread;
2521 	/*
2522 	 * Even if the count is zero, the VOP_INACTIVE routine may still
2523 	 * have the object locked while it cleans it out. The VOP_LOCK
2524 	 * ensures that the VOP_INACTIVE routine is done with its work.
2525 	 * For active vnodes, it ensures that no other activity can
2526 	 * occur while the underlying object is being cleaned out.
2527 	 */
2528 	VOP_LOCK(vp, LK_DRAIN | LK_INTERLOCK, td);
2529 
2530 	/*
2531 	 * Clean out any buffers associated with the vnode.
2532 	 * If the flush fails, just toss the buffers.
2533 	 */
2534 	if (flags & DOCLOSE) {
2535 		struct buf *bp;
2536 		VI_LOCK(vp);
2537 		bp = TAILQ_FIRST(&vp->v_dirtyblkhd);
2538 		VI_UNLOCK(vp);
2539 		if (bp != NULL)
2540 			(void) vn_write_suspend_wait(vp, NULL, V_WAIT);
2541 		if (vinvalbuf(vp, V_SAVE, NOCRED, td, 0, 0) != 0)
2542 			vinvalbuf(vp, 0, NOCRED, td, 0, 0);
2543 	}
2544 
2545 	VOP_DESTROYVOBJECT(vp);
2546 
2547 	/*
2548 	 * Any other processes trying to obtain this lock must first
2549 	 * wait for VXLOCK to clear, then call the new lock operation.
2550 	 */
2551 	VOP_UNLOCK(vp, 0, td);
2552 
2553 	/*
2554 	 * If purging an active vnode, it must be closed and
2555 	 * deactivated before being reclaimed. Note that the
2556 	 * VOP_INACTIVE will unlock the vnode.
2557 	 */
2558 	if (active) {
2559 		if (flags & DOCLOSE)
2560 			VOP_CLOSE(vp, FNONBLOCK, NOCRED, td);
2561 		VI_LOCK(vp);
2562 		if ((vp->v_iflag & VI_DOINGINACT) == 0) {
2563 			vp->v_iflag |= VI_DOINGINACT;
2564 			VI_UNLOCK(vp);
2565 			if (vn_lock(vp, LK_EXCLUSIVE | LK_NOWAIT, td) != 0)
2566 				panic("vclean: cannot relock.");
2567 			VOP_INACTIVE(vp, td);
2568 			VI_LOCK(vp);
2569 			KASSERT(vp->v_iflag & VI_DOINGINACT,
2570 			    ("vclean: lost VI_DOINGINACT"));
2571 			vp->v_iflag &= ~VI_DOINGINACT;
2572 		}
2573 		VI_UNLOCK(vp);
2574 	}
2575 
2576 	/*
2577 	 * Reclaim the vnode.
2578 	 */
2579 	if (VOP_RECLAIM(vp, td))
2580 		panic("vclean: cannot reclaim");
2581 
2582 	if (active) {
2583 		/*
2584 		 * Inline copy of vrele() since VOP_INACTIVE
2585 		 * has already been called.
2586 		 */
2587 		VI_LOCK(vp);
2588 		v_incr_usecount(vp, -1);
2589 		if (vp->v_usecount <= 0) {
2590 #ifdef DIAGNOSTIC
2591 			if (vp->v_usecount < 0 || vp->v_writecount != 0) {
2592 				vprint("vclean: bad ref count", vp);
2593 				panic("vclean: ref cnt");
2594 			}
2595 #endif
2596 			vfree(vp);
2597 		}
2598 		VI_UNLOCK(vp);
2599 	}
2600 
2601 	cache_purge(vp);
2602 	VI_LOCK(vp);
2603 	if (VSHOULDFREE(vp))
2604 		vfree(vp);
2605 
2606 	/*
2607 	 * Done with purge, reset to the standard lock and
2608 	 * notify sleepers of the grim news.
2609 	 */
2610 	vp->v_vnlock = &vp->v_lock;
2611 	vp->v_op = dead_vnodeop_p;
2612 	if (vp->v_pollinfo != NULL)
2613 		vn_pollgone(vp);
2614 	vp->v_tag = "none";
2615 	vp->v_iflag &= ~VI_XLOCK;
2616 	vp->v_vxproc = NULL;
2617 	if (vp->v_iflag & VI_XWANT) {
2618 		vp->v_iflag &= ~VI_XWANT;
2619 		wakeup(vp);
2620 	}
2621 }
2622 
2623 /*
2624  * Eliminate all activity associated with the requested vnode
2625  * and with all vnodes aliased to the requested vnode.
2626  */
2627 int
2628 vop_revoke(ap)
2629 	struct vop_revoke_args /* {
2630 		struct vnode *a_vp;
2631 		int a_flags;
2632 	} */ *ap;
2633 {
2634 	struct vnode *vp, *vq;
2635 	dev_t dev;
2636 
2637 	KASSERT((ap->a_flags & REVOKEALL) != 0, ("vop_revoke"));
2638 	vp = ap->a_vp;
2639 	KASSERT((vp->v_type == VCHR), ("vop_revoke: not VCHR"));
2640 
2641 	VI_LOCK(vp);
2642 	/*
2643 	 * If a vgone (or vclean) is already in progress,
2644 	 * wait until it is done and return.
2645 	 */
2646 	if (vp->v_iflag & VI_XLOCK) {
2647 		vp->v_iflag |= VI_XWANT;
2648 		msleep(vp, VI_MTX(vp), PINOD | PDROP,
2649 		    "vop_revokeall", 0);
2650 		return (0);
2651 	}
2652 	VI_UNLOCK(vp);
2653 	dev = vp->v_rdev;
2654 	for (;;) {
2655 		mtx_lock(&spechash_mtx);
2656 		vq = SLIST_FIRST(&dev->si_hlist);
2657 		mtx_unlock(&spechash_mtx);
2658 		if (!vq)
2659 			break;
2660 		vgone(vq);
2661 	}
2662 	return (0);
2663 }
2664 
2665 /*
2666  * Recycle an unused vnode to the front of the free list.
2667  * Release the passed interlock if the vnode will be recycled.
2668  */
2669 int
2670 vrecycle(vp, inter_lkp, td)
2671 	struct vnode *vp;
2672 	struct mtx *inter_lkp;
2673 	struct thread *td;
2674 {
2675 
2676 	VI_LOCK(vp);
2677 	if (vp->v_usecount == 0) {
2678 		if (inter_lkp) {
2679 			mtx_unlock(inter_lkp);
2680 		}
2681 		vgonel(vp, td);
2682 		return (1);
2683 	}
2684 	VI_UNLOCK(vp);
2685 	return (0);
2686 }
2687 
2688 /*
2689  * Eliminate all activity associated with a vnode
2690  * in preparation for reuse.
2691  */
2692 void
2693 vgone(vp)
2694 	register struct vnode *vp;
2695 {
2696 	struct thread *td = curthread;	/* XXX */
2697 
2698 	VI_LOCK(vp);
2699 	vgonel(vp, td);
2700 }
2701 
2702 /*
2703  * vgone, with the vp interlock held.
2704  */
2705 void
2706 vgonel(vp, td)
2707 	struct vnode *vp;
2708 	struct thread *td;
2709 {
2710 	int s;
2711 
2712 	/*
2713 	 * If a vgone (or vclean) is already in progress,
2714 	 * wait until it is done and return.
2715 	 */
2716 	ASSERT_VI_LOCKED(vp, "vgonel");
2717 	if (vp->v_iflag & VI_XLOCK) {
2718 		vp->v_iflag |= VI_XWANT;
2719 		msleep(vp, VI_MTX(vp), PINOD | PDROP, "vgone", 0);
2720 		return;
2721 	}
2722 
2723 	/*
2724 	 * Clean out the filesystem specific data.
2725 	 */
2726 	vclean(vp, DOCLOSE, td);
2727 	VI_UNLOCK(vp);
2728 
2729 	/*
2730 	 * Delete from old mount point vnode list, if on one.
2731 	 */
2732 	if (vp->v_mount != NULL)
2733 		insmntque(vp, (struct mount *)0);
2734 	/*
2735 	 * If special device, remove it from special device alias list
2736 	 * if it is on one.
2737 	 */
2738 	if (vp->v_type == VCHR && vp->v_rdev != NULL && vp->v_rdev != NODEV) {
2739 		VI_LOCK(vp);
2740 		mtx_lock(&spechash_mtx);
2741 		SLIST_REMOVE(&vp->v_rdev->si_hlist, vp, vnode, v_specnext);
2742 		vp->v_rdev->si_usecount -= vp->v_usecount;
2743 		mtx_unlock(&spechash_mtx);
2744 		VI_UNLOCK(vp);
2745 		vp->v_rdev = NULL;
2746 	}
2747 
2748 	/*
2749 	 * If it is on the freelist and not already at the head,
2750 	 * move it to the head of the list. The test of the
2751 	 * VDOOMED flag and the reference count of zero is because
2752 	 * it will be removed from the free list by getnewvnode,
2753 	 * but will not have its reference count incremented until
2754 	 * after calling vgone. If the reference count were
2755 	 * incremented first, vgone would (incorrectly) try to
2756 	 * close the previous instance of the underlying object.
2757 	 */
2758 	VI_LOCK(vp);
2759 	if (vp->v_usecount == 0 && !(vp->v_iflag & VI_DOOMED)) {
2760 		s = splbio();
2761 		mtx_lock(&vnode_free_list_mtx);
2762 		if (vp->v_iflag & VI_FREE) {
2763 			TAILQ_REMOVE(&vnode_free_list, vp, v_freelist);
2764 		} else {
2765 			vp->v_iflag |= VI_FREE;
2766 			freevnodes++;
2767 		}
2768 		TAILQ_INSERT_HEAD(&vnode_free_list, vp, v_freelist);
2769 		mtx_unlock(&vnode_free_list_mtx);
2770 		splx(s);
2771 	}
2772 
2773 	vp->v_type = VBAD;
2774 	VI_UNLOCK(vp);
2775 }
2776 
2777 /*
2778  * Lookup a vnode by device number.
2779  */
2780 int
2781 vfinddev(dev, type, vpp)
2782 	dev_t dev;
2783 	enum vtype type;
2784 	struct vnode **vpp;
2785 {
2786 	struct vnode *vp;
2787 
2788 	mtx_lock(&spechash_mtx);
2789 	SLIST_FOREACH(vp, &dev->si_hlist, v_specnext) {
2790 		if (type == vp->v_type) {
2791 			*vpp = vp;
2792 			mtx_unlock(&spechash_mtx);
2793 			return (1);
2794 		}
2795 	}
2796 	mtx_unlock(&spechash_mtx);
2797 	return (0);
2798 }
2799 
2800 /*
2801  * Calculate the total number of references to a special device.
2802  */
2803 int
2804 vcount(vp)
2805 	struct vnode *vp;
2806 {
2807 	int count;
2808 
2809 	mtx_lock(&spechash_mtx);
2810 	count = vp->v_rdev->si_usecount;
2811 	mtx_unlock(&spechash_mtx);
2812 	return (count);
2813 }
2814 
2815 /*
2816  * Same as above, but using the dev_t as argument
2817  */
2818 int
2819 count_dev(dev)
2820 	dev_t dev;
2821 {
2822 	struct vnode *vp;
2823 
2824 	vp = SLIST_FIRST(&dev->si_hlist);
2825 	if (vp == NULL)
2826 		return (0);
2827 	return(vcount(vp));
2828 }
2829 
2830 /*
2831  * Print out a description of a vnode.
2832  */
2833 static char *typename[] =
2834 {"VNON", "VREG", "VDIR", "VBLK", "VCHR", "VLNK", "VSOCK", "VFIFO", "VBAD"};
2835 
2836 void
2837 vprint(label, vp)
2838 	char *label;
2839 	struct vnode *vp;
2840 {
2841 	char buf[96];
2842 
2843 	if (label != NULL)
2844 		printf("%s: %p: ", label, (void *)vp);
2845 	else
2846 		printf("%p: ", (void *)vp);
2847 	printf("tag %s, type %s, usecount %d, writecount %d, refcount %d,",
2848 	    vp->v_tag, typename[vp->v_type], vp->v_usecount,
2849 	    vp->v_writecount, vp->v_holdcnt);
2850 	buf[0] = '\0';
2851 	if (vp->v_vflag & VV_ROOT)
2852 		strcat(buf, "|VV_ROOT");
2853 	if (vp->v_vflag & VV_TEXT)
2854 		strcat(buf, "|VV_TEXT");
2855 	if (vp->v_vflag & VV_SYSTEM)
2856 		strcat(buf, "|VV_SYSTEM");
2857 	if (vp->v_iflag & VI_XLOCK)
2858 		strcat(buf, "|VI_XLOCK");
2859 	if (vp->v_iflag & VI_XWANT)
2860 		strcat(buf, "|VI_XWANT");
2861 	if (vp->v_iflag & VI_BWAIT)
2862 		strcat(buf, "|VI_BWAIT");
2863 	if (vp->v_iflag & VI_DOOMED)
2864 		strcat(buf, "|VI_DOOMED");
2865 	if (vp->v_iflag & VI_FREE)
2866 		strcat(buf, "|VI_FREE");
2867 	if (vp->v_vflag & VV_OBJBUF)
2868 		strcat(buf, "|VV_OBJBUF");
2869 	if (buf[0] != '\0')
2870 		printf(" flags (%s),", &buf[1]);
2871 	lockmgr_printinfo(vp->v_vnlock);
2872 	printf("\n");
2873 	if (vp->v_data != NULL) {
2874 		printf("\t");
2875 		VOP_PRINT(vp);
2876 	}
2877 }
2878 
2879 #ifdef DDB
2880 #include <ddb/ddb.h>
2881 /*
2882  * List all of the locked vnodes in the system.
2883  * Called when debugging the kernel.
2884  */
2885 DB_SHOW_COMMAND(lockedvnods, lockedvnodes)
2886 {
2887 	struct thread *td = curthread;	/* XXX */
2888 	struct mount *mp, *nmp;
2889 	struct vnode *vp;
2890 
2891 	printf("Locked vnodes\n");
2892 	mtx_lock(&mountlist_mtx);
2893 	for (mp = TAILQ_FIRST(&mountlist); mp != NULL; mp = nmp) {
2894 		if (vfs_busy(mp, LK_NOWAIT, &mountlist_mtx, td)) {
2895 			nmp = TAILQ_NEXT(mp, mnt_list);
2896 			continue;
2897 		}
2898 		mtx_lock(&mntvnode_mtx);
2899 		TAILQ_FOREACH(vp, &mp->mnt_nvnodelist, v_nmntvnodes) {
2900 			if (VOP_ISLOCKED(vp, NULL))
2901 				vprint((char *)0, vp);
2902 		}
2903 		mtx_unlock(&mntvnode_mtx);
2904 		mtx_lock(&mountlist_mtx);
2905 		nmp = TAILQ_NEXT(mp, mnt_list);
2906 		vfs_unbusy(mp, td);
2907 	}
2908 	mtx_unlock(&mountlist_mtx);
2909 }
2910 #endif
2911 
2912 /*
2913  * Fill in a struct xvfsconf based on a struct vfsconf.
2914  */
2915 static void
2916 vfsconf2x(struct vfsconf *vfsp, struct xvfsconf *xvfsp)
2917 {
2918 
2919 	strcpy(xvfsp->vfc_name, vfsp->vfc_name);
2920 	xvfsp->vfc_typenum = vfsp->vfc_typenum;
2921 	xvfsp->vfc_refcount = vfsp->vfc_refcount;
2922 	xvfsp->vfc_flags = vfsp->vfc_flags;
2923 	/*
2924 	 * These are unused in userland, we keep them
2925 	 * to not break binary compatibility.
2926 	 */
2927 	xvfsp->vfc_vfsops = NULL;
2928 	xvfsp->vfc_next = NULL;
2929 }
2930 
2931 static int
2932 sysctl_vfs_conflist(SYSCTL_HANDLER_ARGS)
2933 {
2934 	struct vfsconf *vfsp;
2935 	struct xvfsconf *xvfsp;
2936 	int cnt, error, i;
2937 
2938 	cnt = 0;
2939 	for (vfsp = vfsconf; vfsp != NULL; vfsp = vfsp->vfc_next)
2940 		cnt++;
2941 	xvfsp = malloc(sizeof(struct xvfsconf) * cnt, M_TEMP, 0);
2942 	/*
2943 	 * Handle the race that we will have here when struct vfsconf
2944 	 * will be locked down by using both cnt and checking vfc_next
2945 	 * against NULL to determine the end of the loop.  The race will
2946 	 * happen because we will have to unlock before calling malloc().
2947 	 * We are protected by Giant for now.
2948 	 */
2949 	i = 0;
2950 	for (vfsp = vfsconf; vfsp != NULL && i < cnt; vfsp = vfsp->vfc_next) {
2951 		vfsconf2x(vfsp, xvfsp + i);
2952 		i++;
2953 	}
2954 	error = SYSCTL_OUT(req, xvfsp, sizeof(struct xvfsconf) * i);
2955 	free(xvfsp, M_TEMP);
2956 	return (error);
2957 }
2958 
2959 SYSCTL_PROC(_vfs, OID_AUTO, conflist, CTLFLAG_RD, NULL, 0, sysctl_vfs_conflist,
2960     "S,xvfsconf", "List of all configured filesystems");
2961 
2962 /*
2963  * Top level filesystem related information gathering.
2964  */
2965 static int	sysctl_ovfs_conf(SYSCTL_HANDLER_ARGS);
2966 
2967 static int
2968 vfs_sysctl(SYSCTL_HANDLER_ARGS)
2969 {
2970 	int *name = (int *)arg1 - 1;	/* XXX */
2971 	u_int namelen = arg2 + 1;	/* XXX */
2972 	struct vfsconf *vfsp;
2973 	struct xvfsconf xvfsp;
2974 
2975 	printf("WARNING: userland calling deprecated sysctl, "
2976 	    "please rebuild world\n");
2977 
2978 #if 1 || defined(COMPAT_PRELITE2)
2979 	/* Resolve ambiguity between VFS_VFSCONF and VFS_GENERIC. */
2980 	if (namelen == 1)
2981 		return (sysctl_ovfs_conf(oidp, arg1, arg2, req));
2982 #endif
2983 
2984 	switch (name[1]) {
2985 	case VFS_MAXTYPENUM:
2986 		if (namelen != 2)
2987 			return (ENOTDIR);
2988 		return (SYSCTL_OUT(req, &maxvfsconf, sizeof(int)));
2989 	case VFS_CONF:
2990 		if (namelen != 3)
2991 			return (ENOTDIR);	/* overloaded */
2992 		for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next)
2993 			if (vfsp->vfc_typenum == name[2])
2994 				break;
2995 		if (vfsp == NULL)
2996 			return (EOPNOTSUPP);
2997 		vfsconf2x(vfsp, &xvfsp);
2998 		return (SYSCTL_OUT(req, &xvfsp, sizeof(xvfsp)));
2999 	}
3000 	return (EOPNOTSUPP);
3001 }
3002 
3003 SYSCTL_NODE(_vfs, VFS_GENERIC, generic, CTLFLAG_RD | CTLFLAG_SKIP, vfs_sysctl,
3004 	"Generic filesystem");
3005 
3006 #if 1 || defined(COMPAT_PRELITE2)
3007 
3008 static int
3009 sysctl_ovfs_conf(SYSCTL_HANDLER_ARGS)
3010 {
3011 	int error;
3012 	struct vfsconf *vfsp;
3013 	struct ovfsconf ovfs;
3014 
3015 	for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next) {
3016 		ovfs.vfc_vfsops = vfsp->vfc_vfsops;	/* XXX used as flag */
3017 		strcpy(ovfs.vfc_name, vfsp->vfc_name);
3018 		ovfs.vfc_index = vfsp->vfc_typenum;
3019 		ovfs.vfc_refcount = vfsp->vfc_refcount;
3020 		ovfs.vfc_flags = vfsp->vfc_flags;
3021 		error = SYSCTL_OUT(req, &ovfs, sizeof ovfs);
3022 		if (error)
3023 			return error;
3024 	}
3025 	return 0;
3026 }
3027 
3028 #endif /* 1 || COMPAT_PRELITE2 */
3029 
3030 #define KINFO_VNODESLOP		10
3031 /*
3032  * Dump vnode list (via sysctl).
3033  */
3034 /* ARGSUSED */
3035 static int
3036 sysctl_vnode(SYSCTL_HANDLER_ARGS)
3037 {
3038 	struct xvnode *xvn;
3039 	struct thread *td = req->td;
3040 	struct mount *mp;
3041 	struct vnode *vp;
3042 	int error, len, n;
3043 
3044 	/*
3045 	 * Stale numvnodes access is not fatal here.
3046 	 */
3047 	req->lock = 0;
3048 	len = (numvnodes + KINFO_VNODESLOP) * sizeof *xvn;
3049 	if (!req->oldptr)
3050 		/* Make an estimate */
3051 		return (SYSCTL_OUT(req, 0, len));
3052 
3053 	sysctl_wire_old_buffer(req, 0);
3054 	xvn = malloc(len, M_TEMP, M_ZERO | 0);
3055 	n = 0;
3056 	mtx_lock(&mountlist_mtx);
3057 	TAILQ_FOREACH(mp, &mountlist, mnt_list) {
3058 		if (vfs_busy(mp, LK_NOWAIT, &mountlist_mtx, td))
3059 			continue;
3060 		mtx_lock(&mntvnode_mtx);
3061 		TAILQ_FOREACH(vp, &mp->mnt_nvnodelist, v_nmntvnodes) {
3062 			if (n == len)
3063 				break;
3064 			vref(vp);
3065 			xvn[n].xv_size = sizeof *xvn;
3066 			xvn[n].xv_vnode = vp;
3067 #define XV_COPY(field) xvn[n].xv_##field = vp->v_##field
3068 			XV_COPY(usecount);
3069 			XV_COPY(writecount);
3070 			XV_COPY(holdcnt);
3071 			XV_COPY(id);
3072 			XV_COPY(mount);
3073 			XV_COPY(numoutput);
3074 			XV_COPY(type);
3075 #undef XV_COPY
3076 			xvn[n].xv_flag = vp->v_vflag;
3077 
3078 			switch (vp->v_type) {
3079 			case VREG:
3080 			case VDIR:
3081 			case VLNK:
3082 				xvn[n].xv_dev = vp->v_cachedfs;
3083 				xvn[n].xv_ino = vp->v_cachedid;
3084 				break;
3085 			case VBLK:
3086 			case VCHR:
3087 				if (vp->v_rdev == NULL) {
3088 					vrele(vp);
3089 					continue;
3090 				}
3091 				xvn[n].xv_dev = dev2udev(vp->v_rdev);
3092 				break;
3093 			case VSOCK:
3094 				xvn[n].xv_socket = vp->v_socket;
3095 				break;
3096 			case VFIFO:
3097 				xvn[n].xv_fifo = vp->v_fifoinfo;
3098 				break;
3099 			case VNON:
3100 			case VBAD:
3101 			default:
3102 				/* shouldn't happen? */
3103 				vrele(vp);
3104 				continue;
3105 			}
3106 			vrele(vp);
3107 			++n;
3108 		}
3109 		mtx_unlock(&mntvnode_mtx);
3110 		mtx_lock(&mountlist_mtx);
3111 		vfs_unbusy(mp, td);
3112 		if (n == len)
3113 			break;
3114 	}
3115 	mtx_unlock(&mountlist_mtx);
3116 
3117 	error = SYSCTL_OUT(req, xvn, n * sizeof *xvn);
3118 	free(xvn, M_TEMP);
3119 	return (error);
3120 }
3121 
3122 SYSCTL_PROC(_kern, KERN_VNODE, vnode, CTLTYPE_OPAQUE|CTLFLAG_RD,
3123 	0, 0, sysctl_vnode, "S,xvnode", "");
3124 
3125 /*
3126  * Check to see if a filesystem is mounted on a block device.
3127  */
3128 int
3129 vfs_mountedon(vp)
3130 	struct vnode *vp;
3131 {
3132 
3133 	if (vp->v_rdev->si_mountpoint != NULL)
3134 		return (EBUSY);
3135 	return (0);
3136 }
3137 
3138 /*
3139  * Unmount all filesystems. The list is traversed in reverse order
3140  * of mounting to avoid dependencies.
3141  */
3142 void
3143 vfs_unmountall()
3144 {
3145 	struct mount *mp;
3146 	struct thread *td;
3147 	int error;
3148 
3149 	if (curthread != NULL)
3150 		td = curthread;
3151 	else
3152 		td = FIRST_THREAD_IN_PROC(initproc); /* XXX XXX proc0? */
3153 	/*
3154 	 * Since this only runs when rebooting, it is not interlocked.
3155 	 */
3156 	while(!TAILQ_EMPTY(&mountlist)) {
3157 		mp = TAILQ_LAST(&mountlist, mntlist);
3158 		error = dounmount(mp, MNT_FORCE, td);
3159 		if (error) {
3160 			TAILQ_REMOVE(&mountlist, mp, mnt_list);
3161 			printf("unmount of %s failed (",
3162 			    mp->mnt_stat.f_mntonname);
3163 			if (error == EBUSY)
3164 				printf("BUSY)\n");
3165 			else
3166 				printf("%d)\n", error);
3167 		} else {
3168 			/* The unmount has removed mp from the mountlist */
3169 		}
3170 	}
3171 }
3172 
3173 /*
3174  * perform msync on all vnodes under a mount point
3175  * the mount point must be locked.
3176  */
3177 void
3178 vfs_msync(struct mount *mp, int flags)
3179 {
3180 	struct vnode *vp, *nvp;
3181 	struct vm_object *obj;
3182 	int tries;
3183 
3184 	GIANT_REQUIRED;
3185 
3186 	tries = 5;
3187 	mtx_lock(&mntvnode_mtx);
3188 loop:
3189 	for (vp = TAILQ_FIRST(&mp->mnt_nvnodelist); vp != NULL; vp = nvp) {
3190 		if (vp->v_mount != mp) {
3191 			if (--tries > 0)
3192 				goto loop;
3193 			break;
3194 		}
3195 		nvp = TAILQ_NEXT(vp, v_nmntvnodes);
3196 
3197 		VI_LOCK(vp);
3198 		if (vp->v_iflag & VI_XLOCK) {	/* XXX: what if MNT_WAIT? */
3199 			VI_UNLOCK(vp);
3200 			continue;
3201 		}
3202 
3203 		if ((vp->v_iflag & VI_OBJDIRTY) &&
3204 		    (flags == MNT_WAIT || VOP_ISLOCKED(vp, NULL) == 0)) {
3205 			mtx_unlock(&mntvnode_mtx);
3206 			if (!vget(vp,
3207 			    LK_EXCLUSIVE | LK_RETRY | LK_INTERLOCK,
3208 			    curthread)) {
3209 				if (vp->v_vflag & VV_NOSYNC) {	/* unlinked */
3210 					vput(vp);
3211 					mtx_lock(&mntvnode_mtx);
3212 					continue;
3213 				}
3214 
3215 				if (VOP_GETVOBJECT(vp, &obj) == 0) {
3216 					vm_object_page_clean(obj, 0, 0,
3217 					    flags == MNT_WAIT ?
3218 					    OBJPC_SYNC : OBJPC_NOSYNC);
3219 				}
3220 				vput(vp);
3221 			}
3222 			mtx_lock(&mntvnode_mtx);
3223 			if (TAILQ_NEXT(vp, v_nmntvnodes) != nvp) {
3224 				if (--tries > 0)
3225 					goto loop;
3226 				break;
3227 			}
3228 		} else
3229 			VI_UNLOCK(vp);
3230 	}
3231 	mtx_unlock(&mntvnode_mtx);
3232 }
3233 
3234 /*
3235  * Create the VM object needed for VMIO and mmap support.  This
3236  * is done for all VREG files in the system.  Some filesystems might
3237  * afford the additional metadata buffering capability of the
3238  * VMIO code by making the device node be VMIO mode also.
3239  *
3240  * vp must be locked when vfs_object_create is called.
3241  */
3242 int
3243 vfs_object_create(vp, td, cred)
3244 	struct vnode *vp;
3245 	struct thread *td;
3246 	struct ucred *cred;
3247 {
3248 	GIANT_REQUIRED;
3249 	return (VOP_CREATEVOBJECT(vp, cred, td));
3250 }
3251 
3252 /*
3253  * Mark a vnode as free, putting it up for recycling.
3254  */
3255 void
3256 vfree(vp)
3257 	struct vnode *vp;
3258 {
3259 	int s;
3260 
3261 	ASSERT_VI_LOCKED(vp, "vfree");
3262 	s = splbio();
3263 	mtx_lock(&vnode_free_list_mtx);
3264 	KASSERT((vp->v_iflag & VI_FREE) == 0, ("vnode already free"));
3265 	if (vp->v_iflag & VI_AGE) {
3266 		TAILQ_INSERT_HEAD(&vnode_free_list, vp, v_freelist);
3267 	} else {
3268 		TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist);
3269 	}
3270 	freevnodes++;
3271 	mtx_unlock(&vnode_free_list_mtx);
3272 	vp->v_iflag &= ~VI_AGE;
3273 	vp->v_iflag |= VI_FREE;
3274 	splx(s);
3275 }
3276 
3277 /*
3278  * Opposite of vfree() - mark a vnode as in use.
3279  */
3280 void
3281 vbusy(vp)
3282 	struct vnode *vp;
3283 {
3284 	int s;
3285 
3286 	s = splbio();
3287 	ASSERT_VI_LOCKED(vp, "vbusy");
3288 	KASSERT((vp->v_iflag & VI_FREE) != 0, ("vnode not free"));
3289 
3290 	mtx_lock(&vnode_free_list_mtx);
3291 	TAILQ_REMOVE(&vnode_free_list, vp, v_freelist);
3292 	freevnodes--;
3293 	mtx_unlock(&vnode_free_list_mtx);
3294 
3295 	vp->v_iflag &= ~(VI_FREE|VI_AGE);
3296 	splx(s);
3297 }
3298 
3299 /*
3300  * Record a process's interest in events which might happen to
3301  * a vnode.  Because poll uses the historic select-style interface
3302  * internally, this routine serves as both the ``check for any
3303  * pending events'' and the ``record my interest in future events''
3304  * functions.  (These are done together, while the lock is held,
3305  * to avoid race conditions.)
3306  */
3307 int
3308 vn_pollrecord(vp, td, events)
3309 	struct vnode *vp;
3310 	struct thread *td;
3311 	short events;
3312 {
3313 
3314 	if (vp->v_pollinfo == NULL)
3315 		v_addpollinfo(vp);
3316 	mtx_lock(&vp->v_pollinfo->vpi_lock);
3317 	if (vp->v_pollinfo->vpi_revents & events) {
3318 		/*
3319 		 * This leaves events we are not interested
3320 		 * in available for the other process which
3321 		 * which presumably had requested them
3322 		 * (otherwise they would never have been
3323 		 * recorded).
3324 		 */
3325 		events &= vp->v_pollinfo->vpi_revents;
3326 		vp->v_pollinfo->vpi_revents &= ~events;
3327 
3328 		mtx_unlock(&vp->v_pollinfo->vpi_lock);
3329 		return events;
3330 	}
3331 	vp->v_pollinfo->vpi_events |= events;
3332 	selrecord(td, &vp->v_pollinfo->vpi_selinfo);
3333 	mtx_unlock(&vp->v_pollinfo->vpi_lock);
3334 	return 0;
3335 }
3336 
3337 /*
3338  * Note the occurrence of an event.  If the VN_POLLEVENT macro is used,
3339  * it is possible for us to miss an event due to race conditions, but
3340  * that condition is expected to be rare, so for the moment it is the
3341  * preferred interface.
3342  */
3343 void
3344 vn_pollevent(vp, events)
3345 	struct vnode *vp;
3346 	short events;
3347 {
3348 
3349 	if (vp->v_pollinfo == NULL)
3350 		v_addpollinfo(vp);
3351 	mtx_lock(&vp->v_pollinfo->vpi_lock);
3352 	if (vp->v_pollinfo->vpi_events & events) {
3353 		/*
3354 		 * We clear vpi_events so that we don't
3355 		 * call selwakeup() twice if two events are
3356 		 * posted before the polling process(es) is
3357 		 * awakened.  This also ensures that we take at
3358 		 * most one selwakeup() if the polling process
3359 		 * is no longer interested.  However, it does
3360 		 * mean that only one event can be noticed at
3361 		 * a time.  (Perhaps we should only clear those
3362 		 * event bits which we note?) XXX
3363 		 */
3364 		vp->v_pollinfo->vpi_events = 0;	/* &= ~events ??? */
3365 		vp->v_pollinfo->vpi_revents |= events;
3366 		selwakeup(&vp->v_pollinfo->vpi_selinfo);
3367 	}
3368 	mtx_unlock(&vp->v_pollinfo->vpi_lock);
3369 }
3370 
3371 /*
3372  * Wake up anyone polling on vp because it is being revoked.
3373  * This depends on dead_poll() returning POLLHUP for correct
3374  * behavior.
3375  */
3376 void
3377 vn_pollgone(vp)
3378 	struct vnode *vp;
3379 {
3380 
3381 	mtx_lock(&vp->v_pollinfo->vpi_lock);
3382 	VN_KNOTE(vp, NOTE_REVOKE);
3383 	if (vp->v_pollinfo->vpi_events) {
3384 		vp->v_pollinfo->vpi_events = 0;
3385 		selwakeup(&vp->v_pollinfo->vpi_selinfo);
3386 	}
3387 	mtx_unlock(&vp->v_pollinfo->vpi_lock);
3388 }
3389 
3390 
3391 
3392 /*
3393  * Routine to create and manage a filesystem syncer vnode.
3394  */
3395 #define sync_close ((int (*)(struct  vop_close_args *))nullop)
3396 static int	sync_fsync(struct  vop_fsync_args *);
3397 static int	sync_inactive(struct  vop_inactive_args *);
3398 static int	sync_reclaim(struct  vop_reclaim_args *);
3399 static int	sync_print(struct vop_print_args *);
3400 
3401 static vop_t **sync_vnodeop_p;
3402 static struct vnodeopv_entry_desc sync_vnodeop_entries[] = {
3403 	{ &vop_default_desc,	(vop_t *) vop_eopnotsupp },
3404 	{ &vop_close_desc,	(vop_t *) sync_close },		/* close */
3405 	{ &vop_fsync_desc,	(vop_t *) sync_fsync },		/* fsync */
3406 	{ &vop_inactive_desc,	(vop_t *) sync_inactive },	/* inactive */
3407 	{ &vop_reclaim_desc,	(vop_t *) sync_reclaim },	/* reclaim */
3408 	{ &vop_lock_desc,	(vop_t *) vop_stdlock },	/* lock */
3409 	{ &vop_unlock_desc,	(vop_t *) vop_stdunlock },	/* unlock */
3410 	{ &vop_print_desc,	(vop_t *) sync_print },		/* print */
3411 	{ &vop_islocked_desc,	(vop_t *) vop_stdislocked },	/* islocked */
3412 	{ NULL, NULL }
3413 };
3414 static struct vnodeopv_desc sync_vnodeop_opv_desc =
3415 	{ &sync_vnodeop_p, sync_vnodeop_entries };
3416 
3417 VNODEOP_SET(sync_vnodeop_opv_desc);
3418 
3419 /*
3420  * Create a new filesystem syncer vnode for the specified mount point.
3421  */
3422 int
3423 vfs_allocate_syncvnode(mp)
3424 	struct mount *mp;
3425 {
3426 	struct vnode *vp;
3427 	static long start, incr, next;
3428 	int error;
3429 
3430 	/* Allocate a new vnode */
3431 	if ((error = getnewvnode("vfs", mp, sync_vnodeop_p, &vp)) != 0) {
3432 		mp->mnt_syncer = NULL;
3433 		return (error);
3434 	}
3435 	vp->v_type = VNON;
3436 	/*
3437 	 * Place the vnode onto the syncer worklist. We attempt to
3438 	 * scatter them about on the list so that they will go off
3439 	 * at evenly distributed times even if all the filesystems
3440 	 * are mounted at once.
3441 	 */
3442 	next += incr;
3443 	if (next == 0 || next > syncer_maxdelay) {
3444 		start /= 2;
3445 		incr /= 2;
3446 		if (start == 0) {
3447 			start = syncer_maxdelay / 2;
3448 			incr = syncer_maxdelay;
3449 		}
3450 		next = start;
3451 	}
3452 	VI_LOCK(vp);
3453 	vn_syncer_add_to_worklist(vp, syncdelay > 0 ? next % syncdelay : 0);
3454 	VI_UNLOCK(vp);
3455 	mp->mnt_syncer = vp;
3456 	return (0);
3457 }
3458 
3459 /*
3460  * Do a lazy sync of the filesystem.
3461  */
3462 static int
3463 sync_fsync(ap)
3464 	struct vop_fsync_args /* {
3465 		struct vnode *a_vp;
3466 		struct ucred *a_cred;
3467 		int a_waitfor;
3468 		struct thread *a_td;
3469 	} */ *ap;
3470 {
3471 	struct vnode *syncvp = ap->a_vp;
3472 	struct mount *mp = syncvp->v_mount;
3473 	struct thread *td = ap->a_td;
3474 	int error, asyncflag;
3475 
3476 	/*
3477 	 * We only need to do something if this is a lazy evaluation.
3478 	 */
3479 	if (ap->a_waitfor != MNT_LAZY)
3480 		return (0);
3481 
3482 	/*
3483 	 * Move ourselves to the back of the sync list.
3484 	 */
3485 	VI_LOCK(syncvp);
3486 	vn_syncer_add_to_worklist(syncvp, syncdelay);
3487 	VI_UNLOCK(syncvp);
3488 
3489 	/*
3490 	 * Walk the list of vnodes pushing all that are dirty and
3491 	 * not already on the sync list.
3492 	 */
3493 	mtx_lock(&mountlist_mtx);
3494 	if (vfs_busy(mp, LK_EXCLUSIVE | LK_NOWAIT, &mountlist_mtx, td) != 0) {
3495 		mtx_unlock(&mountlist_mtx);
3496 		return (0);
3497 	}
3498 	if (vn_start_write(NULL, &mp, V_NOWAIT) != 0) {
3499 		vfs_unbusy(mp, td);
3500 		return (0);
3501 	}
3502 	asyncflag = mp->mnt_flag & MNT_ASYNC;
3503 	mp->mnt_flag &= ~MNT_ASYNC;
3504 	vfs_msync(mp, MNT_NOWAIT);
3505 	error = VFS_SYNC(mp, MNT_LAZY, ap->a_cred, td);
3506 	if (asyncflag)
3507 		mp->mnt_flag |= MNT_ASYNC;
3508 	vn_finished_write(mp);
3509 	vfs_unbusy(mp, td);
3510 	return (error);
3511 }
3512 
3513 /*
3514  * The syncer vnode is no referenced.
3515  */
3516 static int
3517 sync_inactive(ap)
3518 	struct vop_inactive_args /* {
3519 		struct vnode *a_vp;
3520 		struct thread *a_td;
3521 	} */ *ap;
3522 {
3523 
3524 	VOP_UNLOCK(ap->a_vp, 0, ap->a_td);
3525 	vgone(ap->a_vp);
3526 	return (0);
3527 }
3528 
3529 /*
3530  * The syncer vnode is no longer needed and is being decommissioned.
3531  *
3532  * Modifications to the worklist must be protected at splbio().
3533  */
3534 static int
3535 sync_reclaim(ap)
3536 	struct vop_reclaim_args /* {
3537 		struct vnode *a_vp;
3538 	} */ *ap;
3539 {
3540 	struct vnode *vp = ap->a_vp;
3541 	int s;
3542 
3543 	s = splbio();
3544 	vp->v_mount->mnt_syncer = NULL;
3545 	VI_LOCK(vp);
3546 	if (vp->v_iflag & VI_ONWORKLST) {
3547 		mtx_lock(&sync_mtx);
3548 		LIST_REMOVE(vp, v_synclist);
3549 		mtx_unlock(&sync_mtx);
3550 		vp->v_iflag &= ~VI_ONWORKLST;
3551 	}
3552 	VI_UNLOCK(vp);
3553 	splx(s);
3554 
3555 	return (0);
3556 }
3557 
3558 /*
3559  * Print out a syncer vnode.
3560  */
3561 static int
3562 sync_print(ap)
3563 	struct vop_print_args /* {
3564 		struct vnode *a_vp;
3565 	} */ *ap;
3566 {
3567 	struct vnode *vp = ap->a_vp;
3568 
3569 	printf("syncer vnode");
3570 	if (vp->v_vnlock != NULL)
3571 		lockmgr_printinfo(vp->v_vnlock);
3572 	printf("\n");
3573 	return (0);
3574 }
3575 
3576 /*
3577  * extract the dev_t from a VCHR
3578  */
3579 dev_t
3580 vn_todev(vp)
3581 	struct vnode *vp;
3582 {
3583 	if (vp->v_type != VCHR)
3584 		return (NODEV);
3585 	return (vp->v_rdev);
3586 }
3587 
3588 /*
3589  * Check if vnode represents a disk device
3590  */
3591 int
3592 vn_isdisk(vp, errp)
3593 	struct vnode *vp;
3594 	int *errp;
3595 {
3596 	struct cdevsw *cdevsw;
3597 
3598 	if (vp->v_type != VCHR) {
3599 		if (errp != NULL)
3600 			*errp = ENOTBLK;
3601 		return (0);
3602 	}
3603 	if (vp->v_rdev == NULL) {
3604 		if (errp != NULL)
3605 			*errp = ENXIO;
3606 		return (0);
3607 	}
3608 	cdevsw = devsw(vp->v_rdev);
3609 	if (cdevsw == NULL) {
3610 		if (errp != NULL)
3611 			*errp = ENXIO;
3612 		return (0);
3613 	}
3614 	if (!(cdevsw->d_flags & D_DISK)) {
3615 		if (errp != NULL)
3616 			*errp = ENOTBLK;
3617 		return (0);
3618 	}
3619 	if (errp != NULL)
3620 		*errp = 0;
3621 	return (1);
3622 }
3623 
3624 /*
3625  * Free data allocated by namei(); see namei(9) for details.
3626  */
3627 void
3628 NDFREE(ndp, flags)
3629      struct nameidata *ndp;
3630      const uint flags;
3631 {
3632 	if (!(flags & NDF_NO_FREE_PNBUF) &&
3633 	    (ndp->ni_cnd.cn_flags & HASBUF)) {
3634 		uma_zfree(namei_zone, ndp->ni_cnd.cn_pnbuf);
3635 		ndp->ni_cnd.cn_flags &= ~HASBUF;
3636 	}
3637 	if (!(flags & NDF_NO_DVP_UNLOCK) &&
3638 	    (ndp->ni_cnd.cn_flags & LOCKPARENT) &&
3639 	    ndp->ni_dvp != ndp->ni_vp)
3640 		VOP_UNLOCK(ndp->ni_dvp, 0, ndp->ni_cnd.cn_thread);
3641 	if (!(flags & NDF_NO_DVP_RELE) &&
3642 	    (ndp->ni_cnd.cn_flags & (LOCKPARENT|WANTPARENT))) {
3643 		vrele(ndp->ni_dvp);
3644 		ndp->ni_dvp = NULL;
3645 	}
3646 	if (!(flags & NDF_NO_VP_UNLOCK) &&
3647 	    (ndp->ni_cnd.cn_flags & LOCKLEAF) && ndp->ni_vp)
3648 		VOP_UNLOCK(ndp->ni_vp, 0, ndp->ni_cnd.cn_thread);
3649 	if (!(flags & NDF_NO_VP_RELE) &&
3650 	    ndp->ni_vp) {
3651 		vrele(ndp->ni_vp);
3652 		ndp->ni_vp = NULL;
3653 	}
3654 	if (!(flags & NDF_NO_STARTDIR_RELE) &&
3655 	    (ndp->ni_cnd.cn_flags & SAVESTART)) {
3656 		vrele(ndp->ni_startdir);
3657 		ndp->ni_startdir = NULL;
3658 	}
3659 }
3660 
3661 /*
3662  * Common filesystem object access control check routine.  Accepts a
3663  * vnode's type, "mode", uid and gid, requested access mode, credentials,
3664  * and optional call-by-reference privused argument allowing vaccess()
3665  * to indicate to the caller whether privilege was used to satisfy the
3666  * request (obsoleted).  Returns 0 on success, or an errno on failure.
3667  */
3668 int
3669 vaccess(type, file_mode, file_uid, file_gid, acc_mode, cred, privused)
3670 	enum vtype type;
3671 	mode_t file_mode;
3672 	uid_t file_uid;
3673 	gid_t file_gid;
3674 	mode_t acc_mode;
3675 	struct ucred *cred;
3676 	int *privused;
3677 {
3678 	mode_t dac_granted;
3679 #ifdef CAPABILITIES
3680 	mode_t cap_granted;
3681 #endif
3682 
3683 	/*
3684 	 * Look for a normal, non-privileged way to access the file/directory
3685 	 * as requested.  If it exists, go with that.
3686 	 */
3687 
3688 	if (privused != NULL)
3689 		*privused = 0;
3690 
3691 	dac_granted = 0;
3692 
3693 	/* Check the owner. */
3694 	if (cred->cr_uid == file_uid) {
3695 		dac_granted |= VADMIN;
3696 		if (file_mode & S_IXUSR)
3697 			dac_granted |= VEXEC;
3698 		if (file_mode & S_IRUSR)
3699 			dac_granted |= VREAD;
3700 		if (file_mode & S_IWUSR)
3701 			dac_granted |= (VWRITE | VAPPEND);
3702 
3703 		if ((acc_mode & dac_granted) == acc_mode)
3704 			return (0);
3705 
3706 		goto privcheck;
3707 	}
3708 
3709 	/* Otherwise, check the groups (first match) */
3710 	if (groupmember(file_gid, cred)) {
3711 		if (file_mode & S_IXGRP)
3712 			dac_granted |= VEXEC;
3713 		if (file_mode & S_IRGRP)
3714 			dac_granted |= VREAD;
3715 		if (file_mode & S_IWGRP)
3716 			dac_granted |= (VWRITE | VAPPEND);
3717 
3718 		if ((acc_mode & dac_granted) == acc_mode)
3719 			return (0);
3720 
3721 		goto privcheck;
3722 	}
3723 
3724 	/* Otherwise, check everyone else. */
3725 	if (file_mode & S_IXOTH)
3726 		dac_granted |= VEXEC;
3727 	if (file_mode & S_IROTH)
3728 		dac_granted |= VREAD;
3729 	if (file_mode & S_IWOTH)
3730 		dac_granted |= (VWRITE | VAPPEND);
3731 	if ((acc_mode & dac_granted) == acc_mode)
3732 		return (0);
3733 
3734 privcheck:
3735 	if (!suser_cred(cred, PRISON_ROOT)) {
3736 		/* XXX audit: privilege used */
3737 		if (privused != NULL)
3738 			*privused = 1;
3739 		return (0);
3740 	}
3741 
3742 #ifdef CAPABILITIES
3743 	/*
3744 	 * Build a capability mask to determine if the set of capabilities
3745 	 * satisfies the requirements when combined with the granted mask
3746 	 * from above.
3747 	 * For each capability, if the capability is required, bitwise
3748 	 * or the request type onto the cap_granted mask.
3749 	 */
3750 	cap_granted = 0;
3751 
3752 	if (type == VDIR) {
3753 		/*
3754 		 * For directories, use CAP_DAC_READ_SEARCH to satisfy
3755 		 * VEXEC requests, instead of CAP_DAC_EXECUTE.
3756 		 */
3757 		if ((acc_mode & VEXEC) && ((dac_granted & VEXEC) == 0) &&
3758 		    !cap_check(cred, NULL, CAP_DAC_READ_SEARCH, PRISON_ROOT))
3759 			cap_granted |= VEXEC;
3760 	} else {
3761 		if ((acc_mode & VEXEC) && ((dac_granted & VEXEC) == 0) &&
3762 		    !cap_check(cred, NULL, CAP_DAC_EXECUTE, PRISON_ROOT))
3763 			cap_granted |= VEXEC;
3764 	}
3765 
3766 	if ((acc_mode & VREAD) && ((dac_granted & VREAD) == 0) &&
3767 	    !cap_check(cred, NULL, CAP_DAC_READ_SEARCH, PRISON_ROOT))
3768 		cap_granted |= VREAD;
3769 
3770 	if ((acc_mode & VWRITE) && ((dac_granted & VWRITE) == 0) &&
3771 	    !cap_check(cred, NULL, CAP_DAC_WRITE, PRISON_ROOT))
3772 		cap_granted |= (VWRITE | VAPPEND);
3773 
3774 	if ((acc_mode & VADMIN) && ((dac_granted & VADMIN) == 0) &&
3775 	    !cap_check(cred, NULL, CAP_FOWNER, PRISON_ROOT))
3776 		cap_granted |= VADMIN;
3777 
3778 	if ((acc_mode & (cap_granted | dac_granted)) == acc_mode) {
3779 		/* XXX audit: privilege used */
3780 		if (privused != NULL)
3781 			*privused = 1;
3782 		return (0);
3783 	}
3784 #endif
3785 
3786 	return ((acc_mode & VADMIN) ? EPERM : EACCES);
3787 }
3788 
3789 /*
3790  * Credential check based on process requesting service, and per-attribute
3791  * permissions.
3792  */
3793 int
3794 extattr_check_cred(struct vnode *vp, int attrnamespace,
3795     struct ucred *cred, struct thread *td, int access)
3796 {
3797 
3798 	/*
3799 	 * Kernel-invoked always succeeds.
3800 	 */
3801 	if (cred == NOCRED)
3802 		return (0);
3803 
3804 	/*
3805 	 * Do not allow privileged processes in jail to directly
3806 	 * manipulate system attributes.
3807 	 *
3808 	 * XXX What capability should apply here?
3809 	 * Probably CAP_SYS_SETFFLAG.
3810 	 */
3811 	switch (attrnamespace) {
3812 	case EXTATTR_NAMESPACE_SYSTEM:
3813 		/* Potentially should be: return (EPERM); */
3814 		return (suser_cred(cred, 0));
3815 	case EXTATTR_NAMESPACE_USER:
3816 		return (VOP_ACCESS(vp, access, cred, td));
3817 	default:
3818 		return (EPERM);
3819 	}
3820 }
3821