xref: /freebsd/sys/kern/vfs_subr.c (revision ce4946daa5ce852d28008dac492029500ab2ee95)
1 /*
2  * Copyright (c) 1989, 1993
3  *	The Regents of the University of California.  All rights reserved.
4  * (c) UNIX System Laboratories, Inc.
5  * All or some portions of this file are derived from material licensed
6  * to the University of California by American Telephone and Telegraph
7  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8  * the permission of UNIX System Laboratories, Inc.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. All advertising materials mentioning features or use of this software
19  *    must display the following acknowledgement:
20  *	This product includes software developed by the University of
21  *	California, Berkeley and its contributors.
22  * 4. Neither the name of the University nor the names of its contributors
23  *    may be used to endorse or promote products derived from this software
24  *    without specific prior written permission.
25  *
26  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36  * SUCH DAMAGE.
37  *
38  *	@(#)vfs_subr.c	8.31 (Berkeley) 5/26/95
39  * $FreeBSD$
40  */
41 
42 /*
43  * External virtual filesystem routines
44  */
45 #include "opt_ddb.h"
46 #include "opt_ffs.h"
47 
48 #include <sys/param.h>
49 #include <sys/systm.h>
50 #include <sys/bio.h>
51 #include <sys/buf.h>
52 #include <sys/conf.h>
53 #include <sys/eventhandler.h>
54 #include <sys/fcntl.h>
55 #include <sys/kernel.h>
56 #include <sys/kthread.h>
57 #include <sys/malloc.h>
58 #include <sys/mount.h>
59 #include <sys/namei.h>
60 #include <sys/stat.h>
61 #include <sys/sysctl.h>
62 #include <sys/vmmeter.h>
63 #include <sys/vnode.h>
64 
65 #include <vm/vm.h>
66 #include <vm/vm_object.h>
67 #include <vm/vm_extern.h>
68 #include <vm/pmap.h>
69 #include <vm/vm_map.h>
70 #include <vm/vm_page.h>
71 #include <vm/vm_zone.h>
72 
73 static MALLOC_DEFINE(M_NETADDR, "Export Host", "Export host address structure");
74 
75 static void	addalias __P((struct vnode *vp, dev_t nvp_rdev));
76 static void	insmntque __P((struct vnode *vp, struct mount *mp));
77 static void	vclean __P((struct vnode *vp, int flags, struct proc *p));
78 
79 /*
80  * Number of vnodes in existence.  Increased whenever getnewvnode()
81  * allocates a new vnode, never decreased.
82  */
83 static unsigned long	numvnodes;
84 SYSCTL_LONG(_debug, OID_AUTO, numvnodes, CTLFLAG_RD, &numvnodes, 0, "");
85 
86 /*
87  * Conversion tables for conversion from vnode types to inode formats
88  * and back.
89  */
90 enum vtype iftovt_tab[16] = {
91 	VNON, VFIFO, VCHR, VNON, VDIR, VNON, VBLK, VNON,
92 	VREG, VNON, VLNK, VNON, VSOCK, VNON, VNON, VBAD,
93 };
94 int vttoif_tab[9] = {
95 	0, S_IFREG, S_IFDIR, S_IFBLK, S_IFCHR, S_IFLNK,
96 	S_IFSOCK, S_IFIFO, S_IFMT,
97 };
98 
99 /*
100  * List of vnodes that are ready for recycling.
101  */
102 static TAILQ_HEAD(freelst, vnode) vnode_free_list;
103 
104 /*
105  * Minimum number of free vnodes.  If there are fewer than this free vnodes,
106  * getnewvnode() will return a newly allocated vnode.
107  */
108 static u_long wantfreevnodes = 25;
109 SYSCTL_LONG(_debug, OID_AUTO, wantfreevnodes, CTLFLAG_RW, &wantfreevnodes, 0, "");
110 /* Number of vnodes in the free list. */
111 static u_long freevnodes = 0;
112 SYSCTL_LONG(_debug, OID_AUTO, freevnodes, CTLFLAG_RD, &freevnodes, 0, "");
113 /* Number of vnode allocation. */
114 static u_long vnodeallocs = 0;
115 SYSCTL_LONG(_debug, OID_AUTO, vnodeallocs, CTLFLAG_RD, &vnodeallocs, 0, "");
116 /* Period of vnode recycle from namecache in vnode allocation times. */
117 static u_long vnoderecycleperiod = 1000;
118 SYSCTL_LONG(_debug, OID_AUTO, vnoderecycleperiod, CTLFLAG_RW, &vnoderecycleperiod, 0, "");
119 /* Minimum number of total vnodes required to invoke vnode recycle from namecache. */
120 static u_long vnoderecyclemintotalvn = 2000;
121 SYSCTL_LONG(_debug, OID_AUTO, vnoderecyclemintotalvn, CTLFLAG_RW, &vnoderecyclemintotalvn, 0, "");
122 /* Minimum number of free vnodes required to invoke vnode recycle from namecache. */
123 static u_long vnoderecycleminfreevn = 2000;
124 SYSCTL_LONG(_debug, OID_AUTO, vnoderecycleminfreevn, CTLFLAG_RW, &vnoderecycleminfreevn, 0, "");
125 /* Number of vnodes attempted to recycle at a time. */
126 static u_long vnoderecyclenumber = 3000;
127 SYSCTL_LONG(_debug, OID_AUTO, vnoderecyclenumber, CTLFLAG_RW, &vnoderecyclenumber, 0, "");
128 
129 /*
130  * Various variables used for debugging the new implementation of
131  * reassignbuf().
132  * XXX these are probably of (very) limited utility now.
133  */
134 static int reassignbufcalls;
135 SYSCTL_INT(_vfs, OID_AUTO, reassignbufcalls, CTLFLAG_RW, &reassignbufcalls, 0, "");
136 static int reassignbufloops;
137 SYSCTL_INT(_vfs, OID_AUTO, reassignbufloops, CTLFLAG_RW, &reassignbufloops, 0, "");
138 static int reassignbufsortgood;
139 SYSCTL_INT(_vfs, OID_AUTO, reassignbufsortgood, CTLFLAG_RW, &reassignbufsortgood, 0, "");
140 static int reassignbufsortbad;
141 SYSCTL_INT(_vfs, OID_AUTO, reassignbufsortbad, CTLFLAG_RW, &reassignbufsortbad, 0, "");
142 /* Set to 0 for old insertion-sort based reassignbuf, 1 for modern method. */
143 static int reassignbufmethod = 1;
144 SYSCTL_INT(_vfs, OID_AUTO, reassignbufmethod, CTLFLAG_RW, &reassignbufmethod, 0, "");
145 
146 #ifdef ENABLE_VFS_IOOPT
147 /* See NOTES for a description of this setting. */
148 int vfs_ioopt = 0;
149 SYSCTL_INT(_vfs, OID_AUTO, ioopt, CTLFLAG_RW, &vfs_ioopt, 0, "");
150 #endif
151 
152 /* List of mounted filesystems. */
153 struct mntlist mountlist = TAILQ_HEAD_INITIALIZER(mountlist);
154 
155 /* For any iteration/modification of mountlist */
156 struct mtx mountlist_mtx;
157 
158 /* For any iteration/modification of mnt_vnodelist */
159 struct mtx mntvnode_mtx;
160 
161 /*
162  * Cache for the mount type id assigned to NFS.  This is used for
163  * special checks in nfs/nfs_nqlease.c and vm/vnode_pager.c.
164  */
165 int	nfs_mount_type = -1;
166 
167 /* To keep more than one thread at a time from running vfs_getnewfsid */
168 static struct mtx mntid_mtx;
169 
170 /* For any iteration/modification of vnode_free_list */
171 static struct mtx vnode_free_list_mtx;
172 
173 /*
174  * For any iteration/modification of dev->si_hlist (linked through
175  * v_specnext)
176  */
177 static struct mtx spechash_mtx;
178 
179 /* Publicly exported FS */
180 struct nfs_public nfs_pub;
181 
182 /* Zone for allocation of new vnodes - used exclusively by getnewvnode() */
183 static vm_zone_t vnode_zone;
184 
185 /* Set to 1 to print out reclaim of active vnodes */
186 int	prtactive = 0;
187 
188 /*
189  * The workitem queue.
190  *
191  * It is useful to delay writes of file data and filesystem metadata
192  * for tens of seconds so that quickly created and deleted files need
193  * not waste disk bandwidth being created and removed. To realize this,
194  * we append vnodes to a "workitem" queue. When running with a soft
195  * updates implementation, most pending metadata dependencies should
196  * not wait for more than a few seconds. Thus, mounted on block devices
197  * are delayed only about a half the time that file data is delayed.
198  * Similarly, directory updates are more critical, so are only delayed
199  * about a third the time that file data is delayed. Thus, there are
200  * SYNCER_MAXDELAY queues that are processed round-robin at a rate of
201  * one each second (driven off the filesystem syncer process). The
202  * syncer_delayno variable indicates the next queue that is to be processed.
203  * Items that need to be processed soon are placed in this queue:
204  *
205  *	syncer_workitem_pending[syncer_delayno]
206  *
207  * A delay of fifteen seconds is done by placing the request fifteen
208  * entries later in the queue:
209  *
210  *	syncer_workitem_pending[(syncer_delayno + 15) & syncer_mask]
211  *
212  */
213 static int syncer_delayno = 0;
214 static long syncer_mask;
215 LIST_HEAD(synclist, vnode);
216 static struct synclist *syncer_workitem_pending;
217 
218 #define SYNCER_MAXDELAY		32
219 static int syncer_maxdelay = SYNCER_MAXDELAY;	/* maximum delay time */
220 time_t syncdelay = 30;		/* max time to delay syncing data */
221 time_t filedelay = 30;		/* time to delay syncing files */
222 SYSCTL_INT(_kern, OID_AUTO, filedelay, CTLFLAG_RW, &filedelay, 0, "");
223 time_t dirdelay = 29;		/* time to delay syncing directories */
224 SYSCTL_INT(_kern, OID_AUTO, dirdelay, CTLFLAG_RW, &dirdelay, 0, "");
225 time_t metadelay = 28;		/* time to delay syncing metadata */
226 SYSCTL_INT(_kern, OID_AUTO, metadelay, CTLFLAG_RW, &metadelay, 0, "");
227 static int rushjob;		/* number of slots to run ASAP */
228 static int stat_rush_requests;	/* number of times I/O speeded up */
229 SYSCTL_INT(_debug, OID_AUTO, rush_requests, CTLFLAG_RW, &stat_rush_requests, 0, "");
230 
231 /*
232  * Number of vnodes we want to exist at any one time.  This is mostly used
233  * to size hash tables in vnode-related code.  It is normally not used in
234  * getnewvnode(), as wantfreevnodes is normally nonzero.)
235  *
236  * XXX desiredvnodes is historical cruft and should not exist.
237  */
238 int desiredvnodes;
239 SYSCTL_INT(_kern, KERN_MAXVNODES, maxvnodes, CTLFLAG_RW,
240     &desiredvnodes, 0, "Maximum number of vnodes");
241 
242 /*
243  * Initialize the vnode management data structures.
244  */
245 static void
246 vntblinit(void *dummy __unused)
247 {
248 
249 	desiredvnodes = maxproc + cnt.v_page_count / 4;
250 	mtx_init(&mountlist_mtx, "mountlist", MTX_DEF);
251 	mtx_init(&mntvnode_mtx, "mntvnode", MTX_DEF);
252 	mtx_init(&mntid_mtx, "mntid", MTX_DEF);
253 	mtx_init(&spechash_mtx, "spechash", MTX_DEF);
254 	TAILQ_INIT(&vnode_free_list);
255 	mtx_init(&vnode_free_list_mtx, "vnode_free_list", MTX_DEF);
256 	vnode_zone = zinit("VNODE", sizeof (struct vnode), 0, 0, 5);
257 	/*
258 	 * Initialize the filesystem syncer.
259 	 */
260 	syncer_workitem_pending = hashinit(syncer_maxdelay, M_VNODE,
261 		&syncer_mask);
262 	syncer_maxdelay = syncer_mask + 1;
263 }
264 SYSINIT(vfs, SI_SUB_VFS, SI_ORDER_FIRST, vntblinit, NULL)
265 
266 
267 /*
268  * Mark a mount point as busy. Used to synchronize access and to delay
269  * unmounting. Interlock is not released on failure.
270  */
271 int
272 vfs_busy(mp, flags, interlkp, p)
273 	struct mount *mp;
274 	int flags;
275 	struct mtx *interlkp;
276 	struct proc *p;
277 {
278 	int lkflags;
279 
280 	if (mp->mnt_kern_flag & MNTK_UNMOUNT) {
281 		if (flags & LK_NOWAIT)
282 			return (ENOENT);
283 		mp->mnt_kern_flag |= MNTK_MWAIT;
284 		/*
285 		 * Since all busy locks are shared except the exclusive
286 		 * lock granted when unmounting, the only place that a
287 		 * wakeup needs to be done is at the release of the
288 		 * exclusive lock at the end of dounmount.
289 		 */
290 		msleep((caddr_t)mp, interlkp, PVFS, "vfs_busy", 0);
291 		return (ENOENT);
292 	}
293 	lkflags = LK_SHARED | LK_NOPAUSE;
294 	if (interlkp)
295 		lkflags |= LK_INTERLOCK;
296 	if (lockmgr(&mp->mnt_lock, lkflags, interlkp, p))
297 		panic("vfs_busy: unexpected lock failure");
298 	return (0);
299 }
300 
301 /*
302  * Free a busy filesystem.
303  */
304 void
305 vfs_unbusy(mp, p)
306 	struct mount *mp;
307 	struct proc *p;
308 {
309 
310 	lockmgr(&mp->mnt_lock, LK_RELEASE, NULL, p);
311 }
312 
313 /*
314  * Lookup a filesystem type, and if found allocate and initialize
315  * a mount structure for it.
316  *
317  * Devname is usually updated by mount(8) after booting.
318  */
319 int
320 vfs_rootmountalloc(fstypename, devname, mpp)
321 	char *fstypename;
322 	char *devname;
323 	struct mount **mpp;
324 {
325 	struct proc *p = curproc;	/* XXX */
326 	struct vfsconf *vfsp;
327 	struct mount *mp;
328 
329 	if (fstypename == NULL)
330 		return (ENODEV);
331 	for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next)
332 		if (!strcmp(vfsp->vfc_name, fstypename))
333 			break;
334 	if (vfsp == NULL)
335 		return (ENODEV);
336 	mp = malloc((u_long)sizeof(struct mount), M_MOUNT, M_WAITOK | M_ZERO);
337 	lockinit(&mp->mnt_lock, PVFS, "vfslock", 0, LK_NOPAUSE);
338 	(void)vfs_busy(mp, LK_NOWAIT, 0, p);
339 	LIST_INIT(&mp->mnt_vnodelist);
340 	mp->mnt_vfc = vfsp;
341 	mp->mnt_op = vfsp->vfc_vfsops;
342 	mp->mnt_flag = MNT_RDONLY;
343 	mp->mnt_vnodecovered = NULLVP;
344 	vfsp->vfc_refcount++;
345 	mp->mnt_iosize_max = DFLTPHYS;
346 	mp->mnt_stat.f_type = vfsp->vfc_typenum;
347 	mp->mnt_flag |= vfsp->vfc_flags & MNT_VISFLAGMASK;
348 	strncpy(mp->mnt_stat.f_fstypename, vfsp->vfc_name, MFSNAMELEN);
349 	mp->mnt_stat.f_mntonname[0] = '/';
350 	mp->mnt_stat.f_mntonname[1] = 0;
351 	(void) copystr(devname, mp->mnt_stat.f_mntfromname, MNAMELEN - 1, 0);
352 	*mpp = mp;
353 	return (0);
354 }
355 
356 /*
357  * Find an appropriate filesystem to use for the root. If a filesystem
358  * has not been preselected, walk through the list of known filesystems
359  * trying those that have mountroot routines, and try them until one
360  * works or we have tried them all.
361  */
362 #ifdef notdef	/* XXX JH */
363 int
364 lite2_vfs_mountroot()
365 {
366 	struct vfsconf *vfsp;
367 	extern int (*lite2_mountroot) __P((void));
368 	int error;
369 
370 	if (lite2_mountroot != NULL)
371 		return ((*lite2_mountroot)());
372 	for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next) {
373 		if (vfsp->vfc_mountroot == NULL)
374 			continue;
375 		if ((error = (*vfsp->vfc_mountroot)()) == 0)
376 			return (0);
377 		printf("%s_mountroot failed: %d\n", vfsp->vfc_name, error);
378 	}
379 	return (ENODEV);
380 }
381 #endif
382 
383 /*
384  * Lookup a mount point by filesystem identifier.
385  */
386 struct mount *
387 vfs_getvfs(fsid)
388 	fsid_t *fsid;
389 {
390 	register struct mount *mp;
391 
392 	mtx_lock(&mountlist_mtx);
393 	TAILQ_FOREACH(mp, &mountlist, mnt_list) {
394 		if (mp->mnt_stat.f_fsid.val[0] == fsid->val[0] &&
395 		    mp->mnt_stat.f_fsid.val[1] == fsid->val[1]) {
396 			mtx_unlock(&mountlist_mtx);
397 			return (mp);
398 	    }
399 	}
400 	mtx_unlock(&mountlist_mtx);
401 	return ((struct mount *) 0);
402 }
403 
404 /*
405  * Get a new unique fsid.  Try to make its val[0] unique, since this value
406  * will be used to create fake device numbers for stat().  Also try (but
407  * not so hard) make its val[0] unique mod 2^16, since some emulators only
408  * support 16-bit device numbers.  We end up with unique val[0]'s for the
409  * first 2^16 calls and unique val[0]'s mod 2^16 for the first 2^8 calls.
410  *
411  * Keep in mind that several mounts may be running in parallel.  Starting
412  * the search one past where the previous search terminated is both a
413  * micro-optimization and a defense against returning the same fsid to
414  * different mounts.
415  */
416 void
417 vfs_getnewfsid(mp)
418 	struct mount *mp;
419 {
420 	static u_int16_t mntid_base;
421 	fsid_t tfsid;
422 	int mtype;
423 
424 	mtx_lock(&mntid_mtx);
425 	mtype = mp->mnt_vfc->vfc_typenum;
426 	tfsid.val[1] = mtype;
427 	mtype = (mtype & 0xFF) << 24;
428 	for (;;) {
429 		tfsid.val[0] = makeudev(255,
430 		    mtype | ((mntid_base & 0xFF00) << 8) | (mntid_base & 0xFF));
431 		mntid_base++;
432 		if (vfs_getvfs(&tfsid) == NULL)
433 			break;
434 	}
435 	mp->mnt_stat.f_fsid.val[0] = tfsid.val[0];
436 	mp->mnt_stat.f_fsid.val[1] = tfsid.val[1];
437 	mtx_unlock(&mntid_mtx);
438 }
439 
440 /*
441  * Knob to control the precision of file timestamps:
442  *
443  *   0 = seconds only; nanoseconds zeroed.
444  *   1 = seconds and nanoseconds, accurate within 1/HZ.
445  *   2 = seconds and nanoseconds, truncated to microseconds.
446  * >=3 = seconds and nanoseconds, maximum precision.
447  */
448 enum { TSP_SEC, TSP_HZ, TSP_USEC, TSP_NSEC };
449 
450 static int timestamp_precision = TSP_SEC;
451 SYSCTL_INT(_vfs, OID_AUTO, timestamp_precision, CTLFLAG_RW,
452     &timestamp_precision, 0, "");
453 
454 /*
455  * Get a current timestamp.
456  */
457 void
458 vfs_timestamp(tsp)
459 	struct timespec *tsp;
460 {
461 	struct timeval tv;
462 
463 	switch (timestamp_precision) {
464 	case TSP_SEC:
465 		tsp->tv_sec = time_second;
466 		tsp->tv_nsec = 0;
467 		break;
468 	case TSP_HZ:
469 		getnanotime(tsp);
470 		break;
471 	case TSP_USEC:
472 		microtime(&tv);
473 		TIMEVAL_TO_TIMESPEC(&tv, tsp);
474 		break;
475 	case TSP_NSEC:
476 	default:
477 		nanotime(tsp);
478 		break;
479 	}
480 }
481 
482 /*
483  * Set vnode attributes to VNOVAL
484  */
485 void
486 vattr_null(vap)
487 	register struct vattr *vap;
488 {
489 
490 	vap->va_type = VNON;
491 	vap->va_size = VNOVAL;
492 	vap->va_bytes = VNOVAL;
493 	vap->va_mode = VNOVAL;
494 	vap->va_nlink = VNOVAL;
495 	vap->va_uid = VNOVAL;
496 	vap->va_gid = VNOVAL;
497 	vap->va_fsid = VNOVAL;
498 	vap->va_fileid = VNOVAL;
499 	vap->va_blocksize = VNOVAL;
500 	vap->va_rdev = VNOVAL;
501 	vap->va_atime.tv_sec = VNOVAL;
502 	vap->va_atime.tv_nsec = VNOVAL;
503 	vap->va_mtime.tv_sec = VNOVAL;
504 	vap->va_mtime.tv_nsec = VNOVAL;
505 	vap->va_ctime.tv_sec = VNOVAL;
506 	vap->va_ctime.tv_nsec = VNOVAL;
507 	vap->va_flags = VNOVAL;
508 	vap->va_gen = VNOVAL;
509 	vap->va_vaflags = 0;
510 }
511 
512 /*
513  * Routines having to do with the management of the vnode table.
514  */
515 
516 /*
517  * Return the next vnode from the free list.
518  */
519 int
520 getnewvnode(tag, mp, vops, vpp)
521 	enum vtagtype tag;
522 	struct mount *mp;
523 	vop_t **vops;
524 	struct vnode **vpp;
525 {
526 	int s, count;
527 	struct proc *p = curproc;	/* XXX */
528 	struct vnode *vp = NULL;
529 	struct mount *vnmp;
530 	vm_object_t object;
531 
532 	/*
533 	 * We take the least recently used vnode from the freelist
534 	 * if we can get it and it has no cached pages, and no
535 	 * namecache entries are relative to it.
536 	 * Otherwise we allocate a new vnode
537 	 */
538 
539 	s = splbio();
540 	mtx_lock(&vnode_free_list_mtx);
541 
542 	if (wantfreevnodes && freevnodes < wantfreevnodes) {
543 		vp = NULL;
544 	} else if (!wantfreevnodes && freevnodes <= desiredvnodes) {
545 		/*
546 		 * XXX: this is only here to be backwards compatible
547 		 */
548 		vp = NULL;
549 	} else for (count = 0; count < freevnodes; count++) {
550 		vp = TAILQ_FIRST(&vnode_free_list);
551 		if (vp == NULL || vp->v_usecount)
552 			panic("getnewvnode: free vnode isn't");
553 		TAILQ_REMOVE(&vnode_free_list, vp, v_freelist);
554 
555 		/*
556 		 * Don't recycle if active in the namecache or
557 		 * if it still has cached pages or we cannot get
558 		 * its interlock.
559 		 */
560 		if (LIST_FIRST(&vp->v_cache_src) != NULL ||
561 		    (VOP_GETVOBJECT(vp, &object) == 0 &&
562 		     (object->resident_page_count || object->ref_count)) ||
563 		    !mtx_trylock(&vp->v_interlock)) {
564 			TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist);
565 			vp = NULL;
566 			continue;
567 		}
568 		/*
569 		 * Skip over it if its filesystem is being suspended.
570 		 */
571 		if (vn_start_write(vp, &vnmp, V_NOWAIT) == 0)
572 			break;
573 		mtx_unlock(&vp->v_interlock);
574 		TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist);
575 		vp = NULL;
576 	}
577 	if (vp) {
578 		vp->v_flag |= VDOOMED;
579 		vp->v_flag &= ~VFREE;
580 		freevnodes--;
581 		mtx_unlock(&vnode_free_list_mtx);
582 		cache_purge(vp);
583 		vp->v_lease = NULL;
584 		if (vp->v_type != VBAD) {
585 			vgonel(vp, p);
586 		} else {
587 			mtx_unlock(&vp->v_interlock);
588 		}
589 		vn_finished_write(vnmp);
590 
591 #ifdef INVARIANTS
592 		{
593 			int s;
594 
595 			if (vp->v_data)
596 				panic("cleaned vnode isn't");
597 			s = splbio();
598 			if (vp->v_numoutput)
599 				panic("Clean vnode has pending I/O's");
600 			splx(s);
601 			if (vp->v_writecount != 0)
602 				panic("Non-zero write count");
603 		}
604 #endif
605 		vp->v_flag = 0;
606 		vp->v_lastw = 0;
607 		vp->v_lasta = 0;
608 		vp->v_cstart = 0;
609 		vp->v_clen = 0;
610 		vp->v_socket = 0;
611 	} else {
612 		mtx_unlock(&vnode_free_list_mtx);
613 		vp = (struct vnode *) zalloc(vnode_zone);
614 		bzero((char *) vp, sizeof *vp);
615 		mtx_init(&vp->v_interlock, "vnode interlock", MTX_DEF);
616 		vp->v_dd = vp;
617 		mtx_init(&vp->v_pollinfo.vpi_lock, "vnode pollinfo", MTX_DEF);
618 		cache_purge(vp);
619 		LIST_INIT(&vp->v_cache_src);
620 		TAILQ_INIT(&vp->v_cache_dst);
621 		numvnodes++;
622 	}
623 
624 	TAILQ_INIT(&vp->v_cleanblkhd);
625 	TAILQ_INIT(&vp->v_dirtyblkhd);
626 	vp->v_type = VNON;
627 	vp->v_tag = tag;
628 	vp->v_op = vops;
629 	lockinit(&vp->v_lock, PVFS, "vnlock", 0, LK_NOPAUSE);
630 	insmntque(vp, mp);
631 	*vpp = vp;
632 	vp->v_usecount = 1;
633 	vp->v_data = 0;
634 
635 	splx(s);
636 
637 	vfs_object_create(vp, p, p->p_ucred);
638 
639 	vnodeallocs++;
640 	if (vnodeallocs % vnoderecycleperiod == 0 &&
641 	    freevnodes < vnoderecycleminfreevn &&
642 	    vnoderecyclemintotalvn < numvnodes) {
643 		/* Recycle vnodes. */
644 		cache_purgeleafdirs(vnoderecyclenumber);
645 	}
646 
647 	return (0);
648 }
649 
650 /*
651  * Move a vnode from one mount queue to another.
652  */
653 static void
654 insmntque(vp, mp)
655 	register struct vnode *vp;
656 	register struct mount *mp;
657 {
658 
659 	mtx_lock(&mntvnode_mtx);
660 	/*
661 	 * Delete from old mount point vnode list, if on one.
662 	 */
663 	if (vp->v_mount != NULL)
664 		LIST_REMOVE(vp, v_mntvnodes);
665 	/*
666 	 * Insert into list of vnodes for the new mount point, if available.
667 	 */
668 	if ((vp->v_mount = mp) == NULL) {
669 		mtx_unlock(&mntvnode_mtx);
670 		return;
671 	}
672 	LIST_INSERT_HEAD(&mp->mnt_vnodelist, vp, v_mntvnodes);
673 	mtx_unlock(&mntvnode_mtx);
674 }
675 
676 /*
677  * Update outstanding I/O count and do wakeup if requested.
678  */
679 void
680 vwakeup(bp)
681 	register struct buf *bp;
682 {
683 	register struct vnode *vp;
684 
685 	bp->b_flags &= ~B_WRITEINPROG;
686 	if ((vp = bp->b_vp)) {
687 		vp->v_numoutput--;
688 		if (vp->v_numoutput < 0)
689 			panic("vwakeup: neg numoutput");
690 		if ((vp->v_numoutput == 0) && (vp->v_flag & VBWAIT)) {
691 			vp->v_flag &= ~VBWAIT;
692 			wakeup((caddr_t) &vp->v_numoutput);
693 		}
694 	}
695 }
696 
697 /*
698  * Flush out and invalidate all buffers associated with a vnode.
699  * Called with the underlying object locked.
700  */
701 int
702 vinvalbuf(vp, flags, cred, p, slpflag, slptimeo)
703 	register struct vnode *vp;
704 	int flags;
705 	struct ucred *cred;
706 	struct proc *p;
707 	int slpflag, slptimeo;
708 {
709 	register struct buf *bp;
710 	struct buf *nbp, *blist;
711 	int s, error;
712 	vm_object_t object;
713 
714 	if (flags & V_SAVE) {
715 		s = splbio();
716 		while (vp->v_numoutput) {
717 			vp->v_flag |= VBWAIT;
718 			error = tsleep((caddr_t)&vp->v_numoutput,
719 			    slpflag | (PRIBIO + 1), "vinvlbuf", slptimeo);
720 			if (error) {
721 				splx(s);
722 				return (error);
723 			}
724 		}
725 		if (!TAILQ_EMPTY(&vp->v_dirtyblkhd)) {
726 			splx(s);
727 			if ((error = VOP_FSYNC(vp, cred, MNT_WAIT, p)) != 0)
728 				return (error);
729 			s = splbio();
730 			if (vp->v_numoutput > 0 ||
731 			    !TAILQ_EMPTY(&vp->v_dirtyblkhd))
732 				panic("vinvalbuf: dirty bufs");
733 		}
734 		splx(s);
735   	}
736 	s = splbio();
737 	for (;;) {
738 		blist = TAILQ_FIRST(&vp->v_cleanblkhd);
739 		if (!blist)
740 			blist = TAILQ_FIRST(&vp->v_dirtyblkhd);
741 		if (!blist)
742 			break;
743 
744 		for (bp = blist; bp; bp = nbp) {
745 			nbp = TAILQ_NEXT(bp, b_vnbufs);
746 			if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT)) {
747 				error = BUF_TIMELOCK(bp,
748 				    LK_EXCLUSIVE | LK_SLEEPFAIL,
749 				    "vinvalbuf", slpflag, slptimeo);
750 				if (error == ENOLCK)
751 					break;
752 				splx(s);
753 				return (error);
754 			}
755 			/*
756 			 * XXX Since there are no node locks for NFS, I
757 			 * believe there is a slight chance that a delayed
758 			 * write will occur while sleeping just above, so
759 			 * check for it.  Note that vfs_bio_awrite expects
760 			 * buffers to reside on a queue, while BUF_WRITE and
761 			 * brelse do not.
762 			 */
763 			if (((bp->b_flags & (B_DELWRI | B_INVAL)) == B_DELWRI) &&
764 				(flags & V_SAVE)) {
765 
766 				if (bp->b_vp == vp) {
767 					if (bp->b_flags & B_CLUSTEROK) {
768 						BUF_UNLOCK(bp);
769 						vfs_bio_awrite(bp);
770 					} else {
771 						bremfree(bp);
772 						bp->b_flags |= B_ASYNC;
773 						BUF_WRITE(bp);
774 					}
775 				} else {
776 					bremfree(bp);
777 					(void) BUF_WRITE(bp);
778 				}
779 				break;
780 			}
781 			bremfree(bp);
782 			bp->b_flags |= (B_INVAL | B_NOCACHE | B_RELBUF);
783 			bp->b_flags &= ~B_ASYNC;
784 			brelse(bp);
785 		}
786 	}
787 
788 	while (vp->v_numoutput > 0) {
789 		vp->v_flag |= VBWAIT;
790 		tsleep(&vp->v_numoutput, PVM, "vnvlbv", 0);
791 	}
792 
793 	splx(s);
794 
795 	/*
796 	 * Destroy the copy in the VM cache, too.
797 	 */
798 	mtx_lock(&vp->v_interlock);
799 	if (VOP_GETVOBJECT(vp, &object) == 0) {
800 		vm_object_page_remove(object, 0, 0,
801 			(flags & V_SAVE) ? TRUE : FALSE);
802 	}
803 	mtx_unlock(&vp->v_interlock);
804 
805 	if (!TAILQ_EMPTY(&vp->v_dirtyblkhd) || !TAILQ_EMPTY(&vp->v_cleanblkhd))
806 		panic("vinvalbuf: flush failed");
807 	return (0);
808 }
809 
810 /*
811  * Truncate a file's buffer and pages to a specified length.  This
812  * is in lieu of the old vinvalbuf mechanism, which performed unneeded
813  * sync activity.
814  */
815 int
816 vtruncbuf(vp, cred, p, length, blksize)
817 	register struct vnode *vp;
818 	struct ucred *cred;
819 	struct proc *p;
820 	off_t length;
821 	int blksize;
822 {
823 	register struct buf *bp;
824 	struct buf *nbp;
825 	int s, anyfreed;
826 	int trunclbn;
827 
828 	/*
829 	 * Round up to the *next* lbn.
830 	 */
831 	trunclbn = (length + blksize - 1) / blksize;
832 
833 	s = splbio();
834 restart:
835 	anyfreed = 1;
836 	for (;anyfreed;) {
837 		anyfreed = 0;
838 		for (bp = TAILQ_FIRST(&vp->v_cleanblkhd); bp; bp = nbp) {
839 			nbp = TAILQ_NEXT(bp, b_vnbufs);
840 			if (bp->b_lblkno >= trunclbn) {
841 				if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT)) {
842 					BUF_LOCK(bp, LK_EXCLUSIVE|LK_SLEEPFAIL);
843 					goto restart;
844 				} else {
845 					bremfree(bp);
846 					bp->b_flags |= (B_INVAL | B_RELBUF);
847 					bp->b_flags &= ~B_ASYNC;
848 					brelse(bp);
849 					anyfreed = 1;
850 				}
851 				if (nbp &&
852 				    (((nbp->b_xflags & BX_VNCLEAN) == 0) ||
853 				    (nbp->b_vp != vp) ||
854 				    (nbp->b_flags & B_DELWRI))) {
855 					goto restart;
856 				}
857 			}
858 		}
859 
860 		for (bp = TAILQ_FIRST(&vp->v_dirtyblkhd); bp; bp = nbp) {
861 			nbp = TAILQ_NEXT(bp, b_vnbufs);
862 			if (bp->b_lblkno >= trunclbn) {
863 				if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT)) {
864 					BUF_LOCK(bp, LK_EXCLUSIVE|LK_SLEEPFAIL);
865 					goto restart;
866 				} else {
867 					bremfree(bp);
868 					bp->b_flags |= (B_INVAL | B_RELBUF);
869 					bp->b_flags &= ~B_ASYNC;
870 					brelse(bp);
871 					anyfreed = 1;
872 				}
873 				if (nbp &&
874 				    (((nbp->b_xflags & BX_VNDIRTY) == 0) ||
875 				    (nbp->b_vp != vp) ||
876 				    (nbp->b_flags & B_DELWRI) == 0)) {
877 					goto restart;
878 				}
879 			}
880 		}
881 	}
882 
883 	if (length > 0) {
884 restartsync:
885 		for (bp = TAILQ_FIRST(&vp->v_dirtyblkhd); bp; bp = nbp) {
886 			nbp = TAILQ_NEXT(bp, b_vnbufs);
887 			if ((bp->b_flags & B_DELWRI) && (bp->b_lblkno < 0)) {
888 				if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT)) {
889 					BUF_LOCK(bp, LK_EXCLUSIVE|LK_SLEEPFAIL);
890 					goto restart;
891 				} else {
892 					bremfree(bp);
893 					if (bp->b_vp == vp) {
894 						bp->b_flags |= B_ASYNC;
895 					} else {
896 						bp->b_flags &= ~B_ASYNC;
897 					}
898 					BUF_WRITE(bp);
899 				}
900 				goto restartsync;
901 			}
902 
903 		}
904 	}
905 
906 	while (vp->v_numoutput > 0) {
907 		vp->v_flag |= VBWAIT;
908 		tsleep(&vp->v_numoutput, PVM, "vbtrunc", 0);
909 	}
910 
911 	splx(s);
912 
913 	vnode_pager_setsize(vp, length);
914 
915 	return (0);
916 }
917 
918 /*
919  * Associate a buffer with a vnode.
920  */
921 void
922 bgetvp(vp, bp)
923 	register struct vnode *vp;
924 	register struct buf *bp;
925 {
926 	int s;
927 
928 	KASSERT(bp->b_vp == NULL, ("bgetvp: not free"));
929 
930 	vhold(vp);
931 	bp->b_vp = vp;
932 	bp->b_dev = vn_todev(vp);
933 	/*
934 	 * Insert onto list for new vnode.
935 	 */
936 	s = splbio();
937 	bp->b_xflags |= BX_VNCLEAN;
938 	bp->b_xflags &= ~BX_VNDIRTY;
939 	TAILQ_INSERT_TAIL(&vp->v_cleanblkhd, bp, b_vnbufs);
940 	splx(s);
941 }
942 
943 /*
944  * Disassociate a buffer from a vnode.
945  */
946 void
947 brelvp(bp)
948 	register struct buf *bp;
949 {
950 	struct vnode *vp;
951 	struct buflists *listheadp;
952 	int s;
953 
954 	KASSERT(bp->b_vp != NULL, ("brelvp: NULL"));
955 
956 	/*
957 	 * Delete from old vnode list, if on one.
958 	 */
959 	vp = bp->b_vp;
960 	s = splbio();
961 	if (bp->b_xflags & (BX_VNDIRTY | BX_VNCLEAN)) {
962 		if (bp->b_xflags & BX_VNDIRTY)
963 			listheadp = &vp->v_dirtyblkhd;
964 		else
965 			listheadp = &vp->v_cleanblkhd;
966 		TAILQ_REMOVE(listheadp, bp, b_vnbufs);
967 		bp->b_xflags &= ~(BX_VNDIRTY | BX_VNCLEAN);
968 	}
969 	if ((vp->v_flag & VONWORKLST) && TAILQ_EMPTY(&vp->v_dirtyblkhd)) {
970 		vp->v_flag &= ~VONWORKLST;
971 		LIST_REMOVE(vp, v_synclist);
972 	}
973 	splx(s);
974 	bp->b_vp = (struct vnode *) 0;
975 	vdrop(vp);
976 }
977 
978 /*
979  * Add an item to the syncer work queue.
980  */
981 static void
982 vn_syncer_add_to_worklist(struct vnode *vp, int delay)
983 {
984 	int s, slot;
985 
986 	s = splbio();
987 
988 	if (vp->v_flag & VONWORKLST) {
989 		LIST_REMOVE(vp, v_synclist);
990 	}
991 
992 	if (delay > syncer_maxdelay - 2)
993 		delay = syncer_maxdelay - 2;
994 	slot = (syncer_delayno + delay) & syncer_mask;
995 
996 	LIST_INSERT_HEAD(&syncer_workitem_pending[slot], vp, v_synclist);
997 	vp->v_flag |= VONWORKLST;
998 	splx(s);
999 }
1000 
1001 struct  proc *updateproc;
1002 static void sched_sync __P((void));
1003 static struct kproc_desc up_kp = {
1004 	"syncer",
1005 	sched_sync,
1006 	&updateproc
1007 };
1008 SYSINIT(syncer, SI_SUB_KTHREAD_UPDATE, SI_ORDER_FIRST, kproc_start, &up_kp)
1009 
1010 /*
1011  * System filesystem synchronizer daemon.
1012  */
1013 void
1014 sched_sync(void)
1015 {
1016 	struct synclist *slp;
1017 	struct vnode *vp;
1018 	struct mount *mp;
1019 	long starttime;
1020 	int s;
1021 	struct proc *p = updateproc;
1022 
1023 	mtx_lock(&Giant);
1024 
1025 	EVENTHANDLER_REGISTER(shutdown_pre_sync, kproc_shutdown, p,
1026 	    SHUTDOWN_PRI_LAST);
1027 
1028 	for (;;) {
1029 		kthread_suspend_check(p);
1030 
1031 		starttime = time_second;
1032 
1033 		/*
1034 		 * Push files whose dirty time has expired.  Be careful
1035 		 * of interrupt race on slp queue.
1036 		 */
1037 		s = splbio();
1038 		slp = &syncer_workitem_pending[syncer_delayno];
1039 		syncer_delayno += 1;
1040 		if (syncer_delayno == syncer_maxdelay)
1041 			syncer_delayno = 0;
1042 		splx(s);
1043 
1044 		while ((vp = LIST_FIRST(slp)) != NULL) {
1045 			if (VOP_ISLOCKED(vp, NULL) == 0 &&
1046 			    vn_start_write(vp, &mp, V_NOWAIT) == 0) {
1047 				vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p);
1048 				(void) VOP_FSYNC(vp, p->p_ucred, MNT_LAZY, p);
1049 				VOP_UNLOCK(vp, 0, p);
1050 				vn_finished_write(mp);
1051 			}
1052 			s = splbio();
1053 			if (LIST_FIRST(slp) == vp) {
1054 				/*
1055 				 * Note: v_tag VT_VFS vps can remain on the
1056 				 * worklist too with no dirty blocks, but
1057 				 * since sync_fsync() moves it to a different
1058 				 * slot we are safe.
1059 				 */
1060 				if (TAILQ_EMPTY(&vp->v_dirtyblkhd) &&
1061 				    !vn_isdisk(vp, NULL))
1062 					panic("sched_sync: fsync failed vp %p tag %d", vp, vp->v_tag);
1063 				/*
1064 				 * Put us back on the worklist.  The worklist
1065 				 * routine will remove us from our current
1066 				 * position and then add us back in at a later
1067 				 * position.
1068 				 */
1069 				vn_syncer_add_to_worklist(vp, syncdelay);
1070 			}
1071 			splx(s);
1072 		}
1073 
1074 		/*
1075 		 * Do soft update processing.
1076 		 */
1077 #ifdef SOFTUPDATES
1078 		softdep_process_worklist(NULL);
1079 #endif
1080 
1081 		/*
1082 		 * The variable rushjob allows the kernel to speed up the
1083 		 * processing of the filesystem syncer process. A rushjob
1084 		 * value of N tells the filesystem syncer to process the next
1085 		 * N seconds worth of work on its queue ASAP. Currently rushjob
1086 		 * is used by the soft update code to speed up the filesystem
1087 		 * syncer process when the incore state is getting so far
1088 		 * ahead of the disk that the kernel memory pool is being
1089 		 * threatened with exhaustion.
1090 		 */
1091 		if (rushjob > 0) {
1092 			rushjob -= 1;
1093 			continue;
1094 		}
1095 		/*
1096 		 * If it has taken us less than a second to process the
1097 		 * current work, then wait. Otherwise start right over
1098 		 * again. We can still lose time if any single round
1099 		 * takes more than two seconds, but it does not really
1100 		 * matter as we are just trying to generally pace the
1101 		 * filesystem activity.
1102 		 */
1103 		if (time_second == starttime)
1104 			tsleep(&lbolt, PPAUSE, "syncer", 0);
1105 	}
1106 }
1107 
1108 /*
1109  * Request the syncer daemon to speed up its work.
1110  * We never push it to speed up more than half of its
1111  * normal turn time, otherwise it could take over the cpu.
1112  */
1113 int
1114 speedup_syncer()
1115 {
1116 
1117 	mtx_lock_spin(&sched_lock);
1118 	if (updateproc->p_wchan == &lbolt)
1119 		setrunnable(updateproc);
1120 	mtx_unlock_spin(&sched_lock);
1121 	if (rushjob < syncdelay / 2) {
1122 		rushjob += 1;
1123 		stat_rush_requests += 1;
1124 		return (1);
1125 	}
1126 	return(0);
1127 }
1128 
1129 /*
1130  * Associate a p-buffer with a vnode.
1131  *
1132  * Also sets B_PAGING flag to indicate that vnode is not fully associated
1133  * with the buffer.  i.e. the bp has not been linked into the vnode or
1134  * ref-counted.
1135  */
1136 void
1137 pbgetvp(vp, bp)
1138 	register struct vnode *vp;
1139 	register struct buf *bp;
1140 {
1141 
1142 	KASSERT(bp->b_vp == NULL, ("pbgetvp: not free"));
1143 
1144 	bp->b_vp = vp;
1145 	bp->b_flags |= B_PAGING;
1146 	bp->b_dev = vn_todev(vp);
1147 }
1148 
1149 /*
1150  * Disassociate a p-buffer from a vnode.
1151  */
1152 void
1153 pbrelvp(bp)
1154 	register struct buf *bp;
1155 {
1156 
1157 	KASSERT(bp->b_vp != NULL, ("pbrelvp: NULL"));
1158 
1159 	/* XXX REMOVE ME */
1160 	if (TAILQ_NEXT(bp, b_vnbufs) != NULL) {
1161 		panic(
1162 		    "relpbuf(): b_vp was probably reassignbuf()d %p %x",
1163 		    bp,
1164 		    (int)bp->b_flags
1165 		);
1166 	}
1167 	bp->b_vp = (struct vnode *) 0;
1168 	bp->b_flags &= ~B_PAGING;
1169 }
1170 
1171 /*
1172  * Change the vnode a pager buffer is associated with.
1173  */
1174 void
1175 pbreassignbuf(bp, newvp)
1176 	struct buf *bp;
1177 	struct vnode *newvp;
1178 {
1179 
1180 	KASSERT(bp->b_flags & B_PAGING,
1181 	    ("pbreassignbuf() on non phys bp %p", bp));
1182 	bp->b_vp = newvp;
1183 }
1184 
1185 /*
1186  * Reassign a buffer from one vnode to another.
1187  * Used to assign file specific control information
1188  * (indirect blocks) to the vnode to which they belong.
1189  */
1190 void
1191 reassignbuf(bp, newvp)
1192 	register struct buf *bp;
1193 	register struct vnode *newvp;
1194 {
1195 	struct buflists *listheadp;
1196 	int delay;
1197 	int s;
1198 
1199 	if (newvp == NULL) {
1200 		printf("reassignbuf: NULL");
1201 		return;
1202 	}
1203 	++reassignbufcalls;
1204 
1205 	/*
1206 	 * B_PAGING flagged buffers cannot be reassigned because their vp
1207 	 * is not fully linked in.
1208 	 */
1209 	if (bp->b_flags & B_PAGING)
1210 		panic("cannot reassign paging buffer");
1211 
1212 	s = splbio();
1213 	/*
1214 	 * Delete from old vnode list, if on one.
1215 	 */
1216 	if (bp->b_xflags & (BX_VNDIRTY | BX_VNCLEAN)) {
1217 		if (bp->b_xflags & BX_VNDIRTY)
1218 			listheadp = &bp->b_vp->v_dirtyblkhd;
1219 		else
1220 			listheadp = &bp->b_vp->v_cleanblkhd;
1221 		TAILQ_REMOVE(listheadp, bp, b_vnbufs);
1222 		bp->b_xflags &= ~(BX_VNDIRTY | BX_VNCLEAN);
1223 		if (bp->b_vp != newvp) {
1224 			vdrop(bp->b_vp);
1225 			bp->b_vp = NULL;	/* for clarification */
1226 		}
1227 	}
1228 	/*
1229 	 * If dirty, put on list of dirty buffers; otherwise insert onto list
1230 	 * of clean buffers.
1231 	 */
1232 	if (bp->b_flags & B_DELWRI) {
1233 		struct buf *tbp;
1234 
1235 		listheadp = &newvp->v_dirtyblkhd;
1236 		if ((newvp->v_flag & VONWORKLST) == 0) {
1237 			switch (newvp->v_type) {
1238 			case VDIR:
1239 				delay = dirdelay;
1240 				break;
1241 			case VCHR:
1242 				if (newvp->v_rdev->si_mountpoint != NULL) {
1243 					delay = metadelay;
1244 					break;
1245 				}
1246 				/* fall through */
1247 			default:
1248 				delay = filedelay;
1249 			}
1250 			vn_syncer_add_to_worklist(newvp, delay);
1251 		}
1252 		bp->b_xflags |= BX_VNDIRTY;
1253 		tbp = TAILQ_FIRST(listheadp);
1254 		if (tbp == NULL ||
1255 		    bp->b_lblkno == 0 ||
1256 		    (bp->b_lblkno > 0 && tbp->b_lblkno < 0) ||
1257 		    (bp->b_lblkno > 0 && bp->b_lblkno < tbp->b_lblkno)) {
1258 			TAILQ_INSERT_HEAD(listheadp, bp, b_vnbufs);
1259 			++reassignbufsortgood;
1260 		} else if (bp->b_lblkno < 0) {
1261 			TAILQ_INSERT_TAIL(listheadp, bp, b_vnbufs);
1262 			++reassignbufsortgood;
1263 		} else if (reassignbufmethod == 1) {
1264 			/*
1265 			 * New sorting algorithm, only handle sequential case,
1266 			 * otherwise append to end (but before metadata)
1267 			 */
1268 			if ((tbp = gbincore(newvp, bp->b_lblkno - 1)) != NULL &&
1269 			    (tbp->b_xflags & BX_VNDIRTY)) {
1270 				/*
1271 				 * Found the best place to insert the buffer
1272 				 */
1273 				TAILQ_INSERT_AFTER(listheadp, tbp, bp, b_vnbufs);
1274 				++reassignbufsortgood;
1275 			} else {
1276 				/*
1277 				 * Missed, append to end, but before meta-data.
1278 				 * We know that the head buffer in the list is
1279 				 * not meta-data due to prior conditionals.
1280 				 *
1281 				 * Indirect effects:  NFS second stage write
1282 				 * tends to wind up here, giving maximum
1283 				 * distance between the unstable write and the
1284 				 * commit rpc.
1285 				 */
1286 				tbp = TAILQ_LAST(listheadp, buflists);
1287 				while (tbp && tbp->b_lblkno < 0)
1288 					tbp = TAILQ_PREV(tbp, buflists, b_vnbufs);
1289 				TAILQ_INSERT_AFTER(listheadp, tbp, bp, b_vnbufs);
1290 				++reassignbufsortbad;
1291 			}
1292 		} else {
1293 			/*
1294 			 * Old sorting algorithm, scan queue and insert
1295 			 */
1296 			struct buf *ttbp;
1297 			while ((ttbp = TAILQ_NEXT(tbp, b_vnbufs)) &&
1298 			    (ttbp->b_lblkno < bp->b_lblkno)) {
1299 				++reassignbufloops;
1300 				tbp = ttbp;
1301 			}
1302 			TAILQ_INSERT_AFTER(listheadp, tbp, bp, b_vnbufs);
1303 		}
1304 	} else {
1305 		bp->b_xflags |= BX_VNCLEAN;
1306 		TAILQ_INSERT_TAIL(&newvp->v_cleanblkhd, bp, b_vnbufs);
1307 		if ((newvp->v_flag & VONWORKLST) &&
1308 		    TAILQ_EMPTY(&newvp->v_dirtyblkhd)) {
1309 			newvp->v_flag &= ~VONWORKLST;
1310 			LIST_REMOVE(newvp, v_synclist);
1311 		}
1312 	}
1313 	if (bp->b_vp != newvp) {
1314 		bp->b_vp = newvp;
1315 		vhold(bp->b_vp);
1316 	}
1317 	splx(s);
1318 }
1319 
1320 /*
1321  * Create a vnode for a device.
1322  * Used for mounting the root file system.
1323  */
1324 int
1325 bdevvp(dev, vpp)
1326 	dev_t dev;
1327 	struct vnode **vpp;
1328 {
1329 	register struct vnode *vp;
1330 	struct vnode *nvp;
1331 	int error;
1332 
1333 	if (dev == NODEV) {
1334 		*vpp = NULLVP;
1335 		return (ENXIO);
1336 	}
1337 	if (vfinddev(dev, VCHR, vpp))
1338 		return (0);
1339 	error = getnewvnode(VT_NON, (struct mount *)0, spec_vnodeop_p, &nvp);
1340 	if (error) {
1341 		*vpp = NULLVP;
1342 		return (error);
1343 	}
1344 	vp = nvp;
1345 	vp->v_type = VCHR;
1346 	addalias(vp, dev);
1347 	*vpp = vp;
1348 	return (0);
1349 }
1350 
1351 /*
1352  * Add vnode to the alias list hung off the dev_t.
1353  *
1354  * The reason for this gunk is that multiple vnodes can reference
1355  * the same physical device, so checking vp->v_usecount to see
1356  * how many users there are is inadequate; the v_usecount for
1357  * the vnodes need to be accumulated.  vcount() does that.
1358  */
1359 struct vnode *
1360 addaliasu(nvp, nvp_rdev)
1361 	struct vnode *nvp;
1362 	udev_t nvp_rdev;
1363 {
1364 	struct vnode *ovp;
1365 	vop_t **ops;
1366 	dev_t dev;
1367 
1368 	if (nvp->v_type == VBLK)
1369 		return (nvp);
1370 	if (nvp->v_type != VCHR)
1371 		panic("addaliasu on non-special vnode");
1372 	dev = udev2dev(nvp_rdev, 0);
1373 	/*
1374 	 * Check to see if we have a bdevvp vnode with no associated
1375 	 * filesystem. If so, we want to associate the filesystem of
1376 	 * the new newly instigated vnode with the bdevvp vnode and
1377 	 * discard the newly created vnode rather than leaving the
1378 	 * bdevvp vnode lying around with no associated filesystem.
1379 	 */
1380 	if (vfinddev(dev, nvp->v_type, &ovp) == 0 || ovp->v_data != NULL) {
1381 		addalias(nvp, dev);
1382 		return (nvp);
1383 	}
1384 	/*
1385 	 * Discard unneeded vnode, but save its node specific data.
1386 	 * Note that if there is a lock, it is carried over in the
1387 	 * node specific data to the replacement vnode.
1388 	 */
1389 	vref(ovp);
1390 	ovp->v_data = nvp->v_data;
1391 	ovp->v_tag = nvp->v_tag;
1392 	nvp->v_data = NULL;
1393 	lockinit(&ovp->v_lock, PVFS, nvp->v_lock.lk_wmesg,
1394 	    nvp->v_lock.lk_timo, nvp->v_lock.lk_flags & LK_EXTFLG_MASK);
1395 	if (nvp->v_vnlock)
1396 		ovp->v_vnlock = &ovp->v_lock;
1397 	ops = ovp->v_op;
1398 	ovp->v_op = nvp->v_op;
1399 	if (VOP_ISLOCKED(nvp, curproc)) {
1400 		VOP_UNLOCK(nvp, 0, curproc);
1401 		vn_lock(ovp, LK_EXCLUSIVE | LK_RETRY, curproc);
1402 	}
1403 	nvp->v_op = ops;
1404 	insmntque(ovp, nvp->v_mount);
1405 	vrele(nvp);
1406 	vgone(nvp);
1407 	return (ovp);
1408 }
1409 
1410 /* This is a local helper function that do the same as addaliasu, but for a
1411  * dev_t instead of an udev_t. */
1412 static void
1413 addalias(nvp, dev)
1414 	struct vnode *nvp;
1415 	dev_t dev;
1416 {
1417 
1418 	KASSERT(nvp->v_type == VCHR, ("addalias on non-special vnode"));
1419 	nvp->v_rdev = dev;
1420 	mtx_lock(&spechash_mtx);
1421 	SLIST_INSERT_HEAD(&dev->si_hlist, nvp, v_specnext);
1422 	mtx_unlock(&spechash_mtx);
1423 }
1424 
1425 /*
1426  * Grab a particular vnode from the free list, increment its
1427  * reference count and lock it. The vnode lock bit is set if the
1428  * vnode is being eliminated in vgone. The process is awakened
1429  * when the transition is completed, and an error returned to
1430  * indicate that the vnode is no longer usable (possibly having
1431  * been changed to a new file system type).
1432  */
1433 int
1434 vget(vp, flags, p)
1435 	register struct vnode *vp;
1436 	int flags;
1437 	struct proc *p;
1438 {
1439 	int error;
1440 
1441 	/*
1442 	 * If the vnode is in the process of being cleaned out for
1443 	 * another use, we wait for the cleaning to finish and then
1444 	 * return failure. Cleaning is determined by checking that
1445 	 * the VXLOCK flag is set.
1446 	 */
1447 	if ((flags & LK_INTERLOCK) == 0)
1448 		mtx_lock(&vp->v_interlock);
1449 	if (vp->v_flag & VXLOCK) {
1450 		if (vp->v_vxproc == curproc) {
1451 			printf("VXLOCK interlock avoided\n");
1452 		} else {
1453 			vp->v_flag |= VXWANT;
1454 			msleep((caddr_t)vp, &vp->v_interlock, PINOD | PDROP,
1455 			    "vget", 0);
1456 			return (ENOENT);
1457 		}
1458 	}
1459 
1460 	vp->v_usecount++;
1461 
1462 	if (VSHOULDBUSY(vp))
1463 		vbusy(vp);
1464 	if (flags & LK_TYPE_MASK) {
1465 		if ((error = vn_lock(vp, flags | LK_INTERLOCK, p)) != 0) {
1466 			/*
1467 			 * must expand vrele here because we do not want
1468 			 * to call VOP_INACTIVE if the reference count
1469 			 * drops back to zero since it was never really
1470 			 * active. We must remove it from the free list
1471 			 * before sleeping so that multiple processes do
1472 			 * not try to recycle it.
1473 			 */
1474 			mtx_lock(&vp->v_interlock);
1475 			vp->v_usecount--;
1476 			if (VSHOULDFREE(vp))
1477 				vfree(vp);
1478 			mtx_unlock(&vp->v_interlock);
1479 		}
1480 		return (error);
1481 	}
1482 	mtx_unlock(&vp->v_interlock);
1483 	return (0);
1484 }
1485 
1486 /*
1487  * Increase the reference count of a vnode.
1488  */
1489 void
1490 vref(struct vnode *vp)
1491 {
1492 	mtx_lock(&vp->v_interlock);
1493 	vp->v_usecount++;
1494 	mtx_unlock(&vp->v_interlock);
1495 }
1496 
1497 /*
1498  * Vnode put/release.
1499  * If count drops to zero, call inactive routine and return to freelist.
1500  */
1501 void
1502 vrele(vp)
1503 	struct vnode *vp;
1504 {
1505 	struct proc *p = curproc;	/* XXX */
1506 
1507 	KASSERT(vp != NULL, ("vrele: null vp"));
1508 
1509 	mtx_lock(&vp->v_interlock);
1510 
1511 	KASSERT(vp->v_writecount < vp->v_usecount, ("vrele: missed vn_close"));
1512 
1513 	if (vp->v_usecount > 1) {
1514 
1515 		vp->v_usecount--;
1516 		mtx_unlock(&vp->v_interlock);
1517 
1518 		return;
1519 	}
1520 
1521 	if (vp->v_usecount == 1) {
1522 
1523 		vp->v_usecount--;
1524 		if (VSHOULDFREE(vp))
1525 			vfree(vp);
1526 	/*
1527 	 * If we are doing a vput, the node is already locked, and we must
1528 	 * call VOP_INACTIVE with the node locked.  So, in the case of
1529 	 * vrele, we explicitly lock the vnode before calling VOP_INACTIVE.
1530 	 */
1531 		if (vn_lock(vp, LK_EXCLUSIVE | LK_INTERLOCK, p) == 0) {
1532 			VOP_INACTIVE(vp, p);
1533 		}
1534 
1535 	} else {
1536 #ifdef DIAGNOSTIC
1537 		vprint("vrele: negative ref count", vp);
1538 		mtx_unlock(&vp->v_interlock);
1539 #endif
1540 		panic("vrele: negative ref cnt");
1541 	}
1542 }
1543 
1544 /*
1545  * Release an already locked vnode.  This give the same effects as
1546  * unlock+vrele(), but takes less time and avoids releasing and
1547  * re-aquiring the lock (as vrele() aquires the lock internally.)
1548  */
1549 void
1550 vput(vp)
1551 	struct vnode *vp;
1552 {
1553 	struct proc *p = curproc;	/* XXX */
1554 
1555 	KASSERT(vp != NULL, ("vput: null vp"));
1556 	mtx_lock(&vp->v_interlock);
1557 	KASSERT(vp->v_writecount < vp->v_usecount, ("vput: missed vn_close"));
1558 
1559 	if (vp->v_usecount > 1) {
1560 
1561 		vp->v_usecount--;
1562 		VOP_UNLOCK(vp, LK_INTERLOCK, p);
1563 		return;
1564 
1565 	}
1566 
1567 	if (vp->v_usecount == 1) {
1568 
1569 		vp->v_usecount--;
1570 		if (VSHOULDFREE(vp))
1571 			vfree(vp);
1572 	/*
1573 	 * If we are doing a vput, the node is already locked, and we must
1574 	 * call VOP_INACTIVE with the node locked.  So, in the case of
1575 	 * vrele, we explicitly lock the vnode before calling VOP_INACTIVE.
1576 	 */
1577 		mtx_unlock(&vp->v_interlock);
1578 		VOP_INACTIVE(vp, p);
1579 
1580 	} else {
1581 #ifdef DIAGNOSTIC
1582 		vprint("vput: negative ref count", vp);
1583 #endif
1584 		panic("vput: negative ref cnt");
1585 	}
1586 }
1587 
1588 /*
1589  * Somebody doesn't want the vnode recycled.
1590  */
1591 void
1592 vhold(vp)
1593 	register struct vnode *vp;
1594 {
1595 	int s;
1596 
1597   	s = splbio();
1598 	vp->v_holdcnt++;
1599 	if (VSHOULDBUSY(vp))
1600 		vbusy(vp);
1601 	splx(s);
1602 }
1603 
1604 /*
1605  * Note that there is one less who cares about this vnode.  vdrop() is the
1606  * opposite of vhold().
1607  */
1608 void
1609 vdrop(vp)
1610 	register struct vnode *vp;
1611 {
1612 	int s;
1613 
1614 	s = splbio();
1615 	if (vp->v_holdcnt <= 0)
1616 		panic("vdrop: holdcnt");
1617 	vp->v_holdcnt--;
1618 	if (VSHOULDFREE(vp))
1619 		vfree(vp);
1620 	splx(s);
1621 }
1622 
1623 /*
1624  * Remove any vnodes in the vnode table belonging to mount point mp.
1625  *
1626  * If MNT_NOFORCE is specified, there should not be any active ones,
1627  * return error if any are found (nb: this is a user error, not a
1628  * system error). If MNT_FORCE is specified, detach any active vnodes
1629  * that are found.
1630  */
1631 #ifdef DIAGNOSTIC
1632 static int busyprt = 0;		/* print out busy vnodes */
1633 SYSCTL_INT(_debug, OID_AUTO, busyprt, CTLFLAG_RW, &busyprt, 0, "");
1634 #endif
1635 
1636 int
1637 vflush(mp, skipvp, flags)
1638 	struct mount *mp;
1639 	struct vnode *skipvp;
1640 	int flags;
1641 {
1642 	struct proc *p = curproc;	/* XXX */
1643 	struct vnode *vp, *nvp;
1644 	int busy = 0;
1645 
1646 	mtx_lock(&mntvnode_mtx);
1647 loop:
1648 	for (vp = LIST_FIRST(&mp->mnt_vnodelist); vp; vp = nvp) {
1649 		/*
1650 		 * Make sure this vnode wasn't reclaimed in getnewvnode().
1651 		 * Start over if it has (it won't be on the list anymore).
1652 		 */
1653 		if (vp->v_mount != mp)
1654 			goto loop;
1655 		nvp = LIST_NEXT(vp, v_mntvnodes);
1656 		/*
1657 		 * Skip over a selected vnode.
1658 		 */
1659 		if (vp == skipvp)
1660 			continue;
1661 
1662 		mtx_lock(&vp->v_interlock);
1663 		/*
1664 		 * Skip over a vnodes marked VSYSTEM.
1665 		 */
1666 		if ((flags & SKIPSYSTEM) && (vp->v_flag & VSYSTEM)) {
1667 			mtx_unlock(&vp->v_interlock);
1668 			continue;
1669 		}
1670 		/*
1671 		 * If WRITECLOSE is set, only flush out regular file vnodes
1672 		 * open for writing.
1673 		 */
1674 		if ((flags & WRITECLOSE) &&
1675 		    (vp->v_writecount == 0 || vp->v_type != VREG)) {
1676 			mtx_unlock(&vp->v_interlock);
1677 			continue;
1678 		}
1679 
1680 		/*
1681 		 * With v_usecount == 0, all we need to do is clear out the
1682 		 * vnode data structures and we are done.
1683 		 */
1684 		if (vp->v_usecount == 0) {
1685 			mtx_unlock(&mntvnode_mtx);
1686 			vgonel(vp, p);
1687 			mtx_lock(&mntvnode_mtx);
1688 			continue;
1689 		}
1690 
1691 		/*
1692 		 * If FORCECLOSE is set, forcibly close the vnode. For block
1693 		 * or character devices, revert to an anonymous device. For
1694 		 * all other files, just kill them.
1695 		 */
1696 		if (flags & FORCECLOSE) {
1697 			mtx_unlock(&mntvnode_mtx);
1698 			if (vp->v_type != VCHR) {
1699 				vgonel(vp, p);
1700 			} else {
1701 				vclean(vp, 0, p);
1702 				vp->v_op = spec_vnodeop_p;
1703 				insmntque(vp, (struct mount *) 0);
1704 			}
1705 			mtx_lock(&mntvnode_mtx);
1706 			continue;
1707 		}
1708 #ifdef DIAGNOSTIC
1709 		if (busyprt)
1710 			vprint("vflush: busy vnode", vp);
1711 #endif
1712 		mtx_unlock(&vp->v_interlock);
1713 		busy++;
1714 	}
1715 	mtx_unlock(&mntvnode_mtx);
1716 	if (busy)
1717 		return (EBUSY);
1718 	return (0);
1719 }
1720 
1721 /*
1722  * Disassociate the underlying file system from a vnode.
1723  */
1724 static void
1725 vclean(vp, flags, p)
1726 	struct vnode *vp;
1727 	int flags;
1728 	struct proc *p;
1729 {
1730 	int active;
1731 
1732 	/*
1733 	 * Check to see if the vnode is in use. If so we have to reference it
1734 	 * before we clean it out so that its count cannot fall to zero and
1735 	 * generate a race against ourselves to recycle it.
1736 	 */
1737 	if ((active = vp->v_usecount))
1738 		vp->v_usecount++;
1739 
1740 	/*
1741 	 * Prevent the vnode from being recycled or brought into use while we
1742 	 * clean it out.
1743 	 */
1744 	if (vp->v_flag & VXLOCK)
1745 		panic("vclean: deadlock");
1746 	vp->v_flag |= VXLOCK;
1747 	vp->v_vxproc = curproc;
1748 	/*
1749 	 * Even if the count is zero, the VOP_INACTIVE routine may still
1750 	 * have the object locked while it cleans it out. The VOP_LOCK
1751 	 * ensures that the VOP_INACTIVE routine is done with its work.
1752 	 * For active vnodes, it ensures that no other activity can
1753 	 * occur while the underlying object is being cleaned out.
1754 	 */
1755 	VOP_LOCK(vp, LK_DRAIN | LK_INTERLOCK, p);
1756 
1757 	/*
1758 	 * Clean out any buffers associated with the vnode.
1759 	 * If the flush fails, just toss the buffers.
1760 	 */
1761 	if (flags & DOCLOSE) {
1762 		if (TAILQ_FIRST(&vp->v_dirtyblkhd) != NULL)
1763 			(void) vn_write_suspend_wait(vp, NULL, V_WAIT);
1764 		if (vinvalbuf(vp, V_SAVE, NOCRED, p, 0, 0) != 0)
1765 			vinvalbuf(vp, 0, NOCRED, p, 0, 0);
1766 	}
1767 
1768 	VOP_DESTROYVOBJECT(vp);
1769 
1770 	/*
1771 	 * If purging an active vnode, it must be closed and
1772 	 * deactivated before being reclaimed. Note that the
1773 	 * VOP_INACTIVE will unlock the vnode.
1774 	 */
1775 	if (active) {
1776 		if (flags & DOCLOSE)
1777 			VOP_CLOSE(vp, FNONBLOCK, NOCRED, p);
1778 		VOP_INACTIVE(vp, p);
1779 	} else {
1780 		/*
1781 		 * Any other processes trying to obtain this lock must first
1782 		 * wait for VXLOCK to clear, then call the new lock operation.
1783 		 */
1784 		VOP_UNLOCK(vp, 0, p);
1785 	}
1786 	/*
1787 	 * Reclaim the vnode.
1788 	 */
1789 	if (VOP_RECLAIM(vp, p))
1790 		panic("vclean: cannot reclaim");
1791 
1792 	if (active) {
1793 		/*
1794 		 * Inline copy of vrele() since VOP_INACTIVE
1795 		 * has already been called.
1796 		 */
1797 		mtx_lock(&vp->v_interlock);
1798 		if (--vp->v_usecount <= 0) {
1799 #ifdef DIAGNOSTIC
1800 			if (vp->v_usecount < 0 || vp->v_writecount != 0) {
1801 				vprint("vclean: bad ref count", vp);
1802 				panic("vclean: ref cnt");
1803 			}
1804 #endif
1805 			vfree(vp);
1806 		}
1807 		mtx_unlock(&vp->v_interlock);
1808 	}
1809 
1810 	cache_purge(vp);
1811 	vp->v_vnlock = NULL;
1812 	lockdestroy(&vp->v_lock);
1813 
1814 	if (VSHOULDFREE(vp))
1815 		vfree(vp);
1816 
1817 	/*
1818 	 * Done with purge, notify sleepers of the grim news.
1819 	 */
1820 	vp->v_op = dead_vnodeop_p;
1821 	vn_pollgone(vp);
1822 	vp->v_tag = VT_NON;
1823 	vp->v_flag &= ~VXLOCK;
1824 	vp->v_vxproc = NULL;
1825 	if (vp->v_flag & VXWANT) {
1826 		vp->v_flag &= ~VXWANT;
1827 		wakeup((caddr_t) vp);
1828 	}
1829 }
1830 
1831 /*
1832  * Eliminate all activity associated with the requested vnode
1833  * and with all vnodes aliased to the requested vnode.
1834  */
1835 int
1836 vop_revoke(ap)
1837 	struct vop_revoke_args /* {
1838 		struct vnode *a_vp;
1839 		int a_flags;
1840 	} */ *ap;
1841 {
1842 	struct vnode *vp, *vq;
1843 	dev_t dev;
1844 
1845 	KASSERT((ap->a_flags & REVOKEALL) != 0, ("vop_revoke"));
1846 
1847 	vp = ap->a_vp;
1848 	/*
1849 	 * If a vgone (or vclean) is already in progress,
1850 	 * wait until it is done and return.
1851 	 */
1852 	if (vp->v_flag & VXLOCK) {
1853 		vp->v_flag |= VXWANT;
1854 		msleep((caddr_t)vp, &vp->v_interlock, PINOD | PDROP,
1855 		    "vop_revokeall", 0);
1856 		return (0);
1857 	}
1858 	dev = vp->v_rdev;
1859 	for (;;) {
1860 		mtx_lock(&spechash_mtx);
1861 		vq = SLIST_FIRST(&dev->si_hlist);
1862 		mtx_unlock(&spechash_mtx);
1863 		if (!vq)
1864 			break;
1865 		vgone(vq);
1866 	}
1867 	return (0);
1868 }
1869 
1870 /*
1871  * Recycle an unused vnode to the front of the free list.
1872  * Release the passed interlock if the vnode will be recycled.
1873  */
1874 int
1875 vrecycle(vp, inter_lkp, p)
1876 	struct vnode *vp;
1877 	struct mtx *inter_lkp;
1878 	struct proc *p;
1879 {
1880 
1881 	mtx_lock(&vp->v_interlock);
1882 	if (vp->v_usecount == 0) {
1883 		if (inter_lkp) {
1884 			mtx_unlock(inter_lkp);
1885 		}
1886 		vgonel(vp, p);
1887 		return (1);
1888 	}
1889 	mtx_unlock(&vp->v_interlock);
1890 	return (0);
1891 }
1892 
1893 /*
1894  * Eliminate all activity associated with a vnode
1895  * in preparation for reuse.
1896  */
1897 void
1898 vgone(vp)
1899 	register struct vnode *vp;
1900 {
1901 	struct proc *p = curproc;	/* XXX */
1902 
1903 	mtx_lock(&vp->v_interlock);
1904 	vgonel(vp, p);
1905 }
1906 
1907 /*
1908  * vgone, with the vp interlock held.
1909  */
1910 void
1911 vgonel(vp, p)
1912 	struct vnode *vp;
1913 	struct proc *p;
1914 {
1915 	int s;
1916 
1917 	/*
1918 	 * If a vgone (or vclean) is already in progress,
1919 	 * wait until it is done and return.
1920 	 */
1921 	if (vp->v_flag & VXLOCK) {
1922 		vp->v_flag |= VXWANT;
1923 		msleep((caddr_t)vp, &vp->v_interlock, PINOD | PDROP,
1924 		    "vgone", 0);
1925 		return;
1926 	}
1927 
1928 	/*
1929 	 * Clean out the filesystem specific data.
1930 	 */
1931 	vclean(vp, DOCLOSE, p);
1932 	mtx_lock(&vp->v_interlock);
1933 
1934 	/*
1935 	 * Delete from old mount point vnode list, if on one.
1936 	 */
1937 	if (vp->v_mount != NULL)
1938 		insmntque(vp, (struct mount *)0);
1939 	/*
1940 	 * If special device, remove it from special device alias list
1941 	 * if it is on one.
1942 	 */
1943 	if (vp->v_type == VCHR && vp->v_rdev != NULL && vp->v_rdev != NODEV) {
1944 		mtx_lock(&spechash_mtx);
1945 		SLIST_REMOVE(&vp->v_rdev->si_hlist, vp, vnode, v_specnext);
1946 		freedev(vp->v_rdev);
1947 		mtx_unlock(&spechash_mtx);
1948 		vp->v_rdev = NULL;
1949 	}
1950 
1951 	/*
1952 	 * If it is on the freelist and not already at the head,
1953 	 * move it to the head of the list. The test of the
1954 	 * VDOOMED flag and the reference count of zero is because
1955 	 * it will be removed from the free list by getnewvnode,
1956 	 * but will not have its reference count incremented until
1957 	 * after calling vgone. If the reference count were
1958 	 * incremented first, vgone would (incorrectly) try to
1959 	 * close the previous instance of the underlying object.
1960 	 */
1961 	if (vp->v_usecount == 0 && !(vp->v_flag & VDOOMED)) {
1962 		s = splbio();
1963 		mtx_lock(&vnode_free_list_mtx);
1964 		if (vp->v_flag & VFREE)
1965 			TAILQ_REMOVE(&vnode_free_list, vp, v_freelist);
1966 		else
1967 			freevnodes++;
1968 		vp->v_flag |= VFREE;
1969 		TAILQ_INSERT_HEAD(&vnode_free_list, vp, v_freelist);
1970 		mtx_unlock(&vnode_free_list_mtx);
1971 		splx(s);
1972 	}
1973 
1974 	vp->v_type = VBAD;
1975 	mtx_unlock(&vp->v_interlock);
1976 }
1977 
1978 /*
1979  * Lookup a vnode by device number.
1980  */
1981 int
1982 vfinddev(dev, type, vpp)
1983 	dev_t dev;
1984 	enum vtype type;
1985 	struct vnode **vpp;
1986 {
1987 	struct vnode *vp;
1988 
1989 	mtx_lock(&spechash_mtx);
1990 	SLIST_FOREACH(vp, &dev->si_hlist, v_specnext) {
1991 		if (type == vp->v_type) {
1992 			*vpp = vp;
1993 			mtx_unlock(&spechash_mtx);
1994 			return (1);
1995 		}
1996 	}
1997 	mtx_unlock(&spechash_mtx);
1998 	return (0);
1999 }
2000 
2001 /*
2002  * Calculate the total number of references to a special device.
2003  */
2004 int
2005 vcount(vp)
2006 	struct vnode *vp;
2007 {
2008 	struct vnode *vq;
2009 	int count;
2010 
2011 	count = 0;
2012 	mtx_lock(&spechash_mtx);
2013 	SLIST_FOREACH(vq, &vp->v_rdev->si_hlist, v_specnext)
2014 		count += vq->v_usecount;
2015 	mtx_unlock(&spechash_mtx);
2016 	return (count);
2017 }
2018 
2019 /*
2020  * Same as above, but using the dev_t as argument
2021  */
2022 int
2023 count_dev(dev)
2024 	dev_t dev;
2025 {
2026 	struct vnode *vp;
2027 
2028 	vp = SLIST_FIRST(&dev->si_hlist);
2029 	if (vp == NULL)
2030 		return (0);
2031 	return(vcount(vp));
2032 }
2033 
2034 /*
2035  * Print out a description of a vnode.
2036  */
2037 static char *typename[] =
2038 {"VNON", "VREG", "VDIR", "VBLK", "VCHR", "VLNK", "VSOCK", "VFIFO", "VBAD"};
2039 
2040 void
2041 vprint(label, vp)
2042 	char *label;
2043 	struct vnode *vp;
2044 {
2045 	char buf[96];
2046 
2047 	if (label != NULL)
2048 		printf("%s: %p: ", label, (void *)vp);
2049 	else
2050 		printf("%p: ", (void *)vp);
2051 	printf("type %s, usecount %d, writecount %d, refcount %d,",
2052 	    typename[vp->v_type], vp->v_usecount, vp->v_writecount,
2053 	    vp->v_holdcnt);
2054 	buf[0] = '\0';
2055 	if (vp->v_flag & VROOT)
2056 		strcat(buf, "|VROOT");
2057 	if (vp->v_flag & VTEXT)
2058 		strcat(buf, "|VTEXT");
2059 	if (vp->v_flag & VSYSTEM)
2060 		strcat(buf, "|VSYSTEM");
2061 	if (vp->v_flag & VXLOCK)
2062 		strcat(buf, "|VXLOCK");
2063 	if (vp->v_flag & VXWANT)
2064 		strcat(buf, "|VXWANT");
2065 	if (vp->v_flag & VBWAIT)
2066 		strcat(buf, "|VBWAIT");
2067 	if (vp->v_flag & VDOOMED)
2068 		strcat(buf, "|VDOOMED");
2069 	if (vp->v_flag & VFREE)
2070 		strcat(buf, "|VFREE");
2071 	if (vp->v_flag & VOBJBUF)
2072 		strcat(buf, "|VOBJBUF");
2073 	if (buf[0] != '\0')
2074 		printf(" flags (%s)", &buf[1]);
2075 	if (vp->v_data == NULL) {
2076 		printf("\n");
2077 	} else {
2078 		printf("\n\t");
2079 		VOP_PRINT(vp);
2080 	}
2081 }
2082 
2083 #ifdef DDB
2084 #include <ddb/ddb.h>
2085 /*
2086  * List all of the locked vnodes in the system.
2087  * Called when debugging the kernel.
2088  */
2089 DB_SHOW_COMMAND(lockedvnodes, lockedvnodes)
2090 {
2091 	struct proc *p = curproc;	/* XXX */
2092 	struct mount *mp, *nmp;
2093 	struct vnode *vp;
2094 
2095 	printf("Locked vnodes\n");
2096 	mtx_lock(&mountlist_mtx);
2097 	for (mp = TAILQ_FIRST(&mountlist); mp != NULL; mp = nmp) {
2098 		if (vfs_busy(mp, LK_NOWAIT, &mountlist_mtx, p)) {
2099 			nmp = TAILQ_NEXT(mp, mnt_list);
2100 			continue;
2101 		}
2102 		LIST_FOREACH(vp, &mp->mnt_vnodelist, v_mntvnodes) {
2103 			if (VOP_ISLOCKED(vp, NULL))
2104 				vprint((char *)0, vp);
2105 		}
2106 		mtx_lock(&mountlist_mtx);
2107 		nmp = TAILQ_NEXT(mp, mnt_list);
2108 		vfs_unbusy(mp, p);
2109 	}
2110 	mtx_unlock(&mountlist_mtx);
2111 }
2112 #endif
2113 
2114 /*
2115  * Top level filesystem related information gathering.
2116  */
2117 static int	sysctl_ovfs_conf __P((SYSCTL_HANDLER_ARGS));
2118 
2119 static int
2120 vfs_sysctl(SYSCTL_HANDLER_ARGS)
2121 {
2122 	int *name = (int *)arg1 - 1;	/* XXX */
2123 	u_int namelen = arg2 + 1;	/* XXX */
2124 	struct vfsconf *vfsp;
2125 
2126 #if 1 || defined(COMPAT_PRELITE2)
2127 	/* Resolve ambiguity between VFS_VFSCONF and VFS_GENERIC. */
2128 	if (namelen == 1)
2129 		return (sysctl_ovfs_conf(oidp, arg1, arg2, req));
2130 #endif
2131 
2132 	/* XXX the below code does not compile; vfs_sysctl does not exist. */
2133 #ifdef notyet
2134 	/* all sysctl names at this level are at least name and field */
2135 	if (namelen < 2)
2136 		return (ENOTDIR);		/* overloaded */
2137 	if (name[0] != VFS_GENERIC) {
2138 		for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next)
2139 			if (vfsp->vfc_typenum == name[0])
2140 				break;
2141 		if (vfsp == NULL)
2142 			return (EOPNOTSUPP);
2143 		return ((*vfsp->vfc_vfsops->vfs_sysctl)(&name[1], namelen - 1,
2144 		    oldp, oldlenp, newp, newlen, p));
2145 	}
2146 #endif
2147 	switch (name[1]) {
2148 	case VFS_MAXTYPENUM:
2149 		if (namelen != 2)
2150 			return (ENOTDIR);
2151 		return (SYSCTL_OUT(req, &maxvfsconf, sizeof(int)));
2152 	case VFS_CONF:
2153 		if (namelen != 3)
2154 			return (ENOTDIR);	/* overloaded */
2155 		for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next)
2156 			if (vfsp->vfc_typenum == name[2])
2157 				break;
2158 		if (vfsp == NULL)
2159 			return (EOPNOTSUPP);
2160 		return (SYSCTL_OUT(req, vfsp, sizeof *vfsp));
2161 	}
2162 	return (EOPNOTSUPP);
2163 }
2164 
2165 SYSCTL_NODE(_vfs, VFS_GENERIC, generic, CTLFLAG_RD, vfs_sysctl,
2166 	"Generic filesystem");
2167 
2168 #if 1 || defined(COMPAT_PRELITE2)
2169 
2170 static int
2171 sysctl_ovfs_conf(SYSCTL_HANDLER_ARGS)
2172 {
2173 	int error;
2174 	struct vfsconf *vfsp;
2175 	struct ovfsconf ovfs;
2176 
2177 	for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next) {
2178 		ovfs.vfc_vfsops = vfsp->vfc_vfsops;	/* XXX used as flag */
2179 		strcpy(ovfs.vfc_name, vfsp->vfc_name);
2180 		ovfs.vfc_index = vfsp->vfc_typenum;
2181 		ovfs.vfc_refcount = vfsp->vfc_refcount;
2182 		ovfs.vfc_flags = vfsp->vfc_flags;
2183 		error = SYSCTL_OUT(req, &ovfs, sizeof ovfs);
2184 		if (error)
2185 			return error;
2186 	}
2187 	return 0;
2188 }
2189 
2190 #endif /* 1 || COMPAT_PRELITE2 */
2191 
2192 #if COMPILING_LINT
2193 #define KINFO_VNODESLOP	10
2194 /*
2195  * Dump vnode list (via sysctl).
2196  * Copyout address of vnode followed by vnode.
2197  */
2198 /* ARGSUSED */
2199 static int
2200 sysctl_vnode(SYSCTL_HANDLER_ARGS)
2201 {
2202 	struct proc *p = curproc;	/* XXX */
2203 	struct mount *mp, *nmp;
2204 	struct vnode *nvp, *vp;
2205 	int error;
2206 
2207 #define VPTRSZ	sizeof (struct vnode *)
2208 #define VNODESZ	sizeof (struct vnode)
2209 
2210 	req->lock = 0;
2211 	if (!req->oldptr) /* Make an estimate */
2212 		return (SYSCTL_OUT(req, 0,
2213 			(numvnodes + KINFO_VNODESLOP) * (VPTRSZ + VNODESZ)));
2214 
2215 	mtx_lock(&mountlist_mtx);
2216 	for (mp = TAILQ_FIRST(&mountlist); mp != NULL; mp = nmp) {
2217 		if (vfs_busy(mp, LK_NOWAIT, &mountlist_mtx, p)) {
2218 			nmp = TAILQ_NEXT(mp, mnt_list);
2219 			continue;
2220 		}
2221 again:
2222 		mtx_lock(&mntvnode_mtx);
2223 		for (vp = LIST_FIRST(&mp->mnt_vnodelist);
2224 		     vp != NULL;
2225 		     vp = nvp) {
2226 			/*
2227 			 * Check that the vp is still associated with
2228 			 * this filesystem.  RACE: could have been
2229 			 * recycled onto the same filesystem.
2230 			 */
2231 			if (vp->v_mount != mp) {
2232 				mtx_unlock(&mntvnode_mtx);
2233 				goto again;
2234 			}
2235 			nvp = LIST_NEXT(vp, v_mntvnodes);
2236 			mtx_unlock(&mntvnode_mtx);
2237 			if ((error = SYSCTL_OUT(req, &vp, VPTRSZ)) ||
2238 			    (error = SYSCTL_OUT(req, vp, VNODESZ)))
2239 				return (error);
2240 			mtx_lock(&mntvnode_mtx);
2241 		}
2242 		mtx_unlock(&mntvnode_mtx);
2243 		mtx_lock(&mountlist_mtx);
2244 		nmp = TAILQ_NEXT(mp, mnt_list);
2245 		vfs_unbusy(mp, p);
2246 	}
2247 	mtx_unlock(&mountlist_mtx);
2248 
2249 	return (0);
2250 }
2251 
2252 /*
2253  * XXX
2254  * Exporting the vnode list on large systems causes them to crash.
2255  * Exporting the vnode list on medium systems causes sysctl to coredump.
2256  */
2257 SYSCTL_PROC(_kern, KERN_VNODE, vnode, CTLTYPE_OPAQUE|CTLFLAG_RD,
2258 	0, 0, sysctl_vnode, "S,vnode", "");
2259 #endif
2260 
2261 /*
2262  * Check to see if a filesystem is mounted on a block device.
2263  */
2264 int
2265 vfs_mountedon(vp)
2266 	struct vnode *vp;
2267 {
2268 
2269 	if (vp->v_rdev->si_mountpoint != NULL)
2270 		return (EBUSY);
2271 	return (0);
2272 }
2273 
2274 /*
2275  * Unmount all filesystems. The list is traversed in reverse order
2276  * of mounting to avoid dependencies.
2277  */
2278 void
2279 vfs_unmountall()
2280 {
2281 	struct mount *mp;
2282 	struct proc *p;
2283 	int error;
2284 
2285 	if (curproc != NULL)
2286 		p = curproc;
2287 	else
2288 		p = initproc;	/* XXX XXX should this be proc0? */
2289 	/*
2290 	 * Since this only runs when rebooting, it is not interlocked.
2291 	 */
2292 	while(!TAILQ_EMPTY(&mountlist)) {
2293 		mp = TAILQ_LAST(&mountlist, mntlist);
2294 		error = dounmount(mp, MNT_FORCE, p);
2295 		if (error) {
2296 			TAILQ_REMOVE(&mountlist, mp, mnt_list);
2297 			printf("unmount of %s failed (",
2298 			    mp->mnt_stat.f_mntonname);
2299 			if (error == EBUSY)
2300 				printf("BUSY)\n");
2301 			else
2302 				printf("%d)\n", error);
2303 		} else {
2304 			/* The unmount has removed mp from the mountlist */
2305 		}
2306 	}
2307 }
2308 
2309 /*
2310  * perform msync on all vnodes under a mount point
2311  * the mount point must be locked.
2312  */
2313 void
2314 vfs_msync(struct mount *mp, int flags) {
2315 	struct vnode *vp, *nvp;
2316 	struct vm_object *obj;
2317 	int anyio, tries;
2318 
2319 	tries = 5;
2320 loop:
2321 	anyio = 0;
2322 	for (vp = LIST_FIRST(&mp->mnt_vnodelist); vp != NULL; vp = nvp) {
2323 
2324 		nvp = LIST_NEXT(vp, v_mntvnodes);
2325 
2326 		if (vp->v_mount != mp) {
2327 			goto loop;
2328 		}
2329 
2330 		if (vp->v_flag & VXLOCK)	/* XXX: what if MNT_WAIT? */
2331 			continue;
2332 
2333 		if (flags != MNT_WAIT) {
2334 			if (VOP_GETVOBJECT(vp, &obj) != 0 ||
2335 			    (obj->flags & OBJ_MIGHTBEDIRTY) == 0)
2336 				continue;
2337 			if (VOP_ISLOCKED(vp, NULL))
2338 				continue;
2339 		}
2340 
2341 		mtx_lock(&vp->v_interlock);
2342 		if (VOP_GETVOBJECT(vp, &obj) == 0 &&
2343 		    (obj->flags & OBJ_MIGHTBEDIRTY)) {
2344 			if (!vget(vp,
2345 				LK_INTERLOCK | LK_EXCLUSIVE | LK_RETRY | LK_NOOBJ, curproc)) {
2346 				if (VOP_GETVOBJECT(vp, &obj) == 0) {
2347 					vm_object_page_clean(obj, 0, 0, flags == MNT_WAIT ? OBJPC_SYNC : OBJPC_NOSYNC);
2348 					anyio = 1;
2349 				}
2350 				vput(vp);
2351 			}
2352 		} else {
2353 			mtx_unlock(&vp->v_interlock);
2354 		}
2355 	}
2356 	if (anyio && (--tries > 0))
2357 		goto loop;
2358 }
2359 
2360 /*
2361  * Create the VM object needed for VMIO and mmap support.  This
2362  * is done for all VREG files in the system.  Some filesystems might
2363  * afford the additional metadata buffering capability of the
2364  * VMIO code by making the device node be VMIO mode also.
2365  *
2366  * vp must be locked when vfs_object_create is called.
2367  */
2368 int
2369 vfs_object_create(vp, p, cred)
2370 	struct vnode *vp;
2371 	struct proc *p;
2372 	struct ucred *cred;
2373 {
2374 	return (VOP_CREATEVOBJECT(vp, cred, p));
2375 }
2376 
2377 /*
2378  * Mark a vnode as free, putting it up for recycling.
2379  */
2380 void
2381 vfree(vp)
2382 	struct vnode *vp;
2383 {
2384 	int s;
2385 
2386 	s = splbio();
2387 	mtx_lock(&vnode_free_list_mtx);
2388 	KASSERT((vp->v_flag & VFREE) == 0, ("vnode already free"));
2389 	if (vp->v_flag & VAGE) {
2390 		TAILQ_INSERT_HEAD(&vnode_free_list, vp, v_freelist);
2391 	} else {
2392 		TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist);
2393 	}
2394 	freevnodes++;
2395 	mtx_unlock(&vnode_free_list_mtx);
2396 	vp->v_flag &= ~VAGE;
2397 	vp->v_flag |= VFREE;
2398 	splx(s);
2399 }
2400 
2401 /*
2402  * Opposite of vfree() - mark a vnode as in use.
2403  */
2404 void
2405 vbusy(vp)
2406 	struct vnode *vp;
2407 {
2408 	int s;
2409 
2410 	s = splbio();
2411 	mtx_lock(&vnode_free_list_mtx);
2412 	KASSERT((vp->v_flag & VFREE) != 0, ("vnode not free"));
2413 	TAILQ_REMOVE(&vnode_free_list, vp, v_freelist);
2414 	freevnodes--;
2415 	mtx_unlock(&vnode_free_list_mtx);
2416 	vp->v_flag &= ~(VFREE|VAGE);
2417 	splx(s);
2418 }
2419 
2420 /*
2421  * Record a process's interest in events which might happen to
2422  * a vnode.  Because poll uses the historic select-style interface
2423  * internally, this routine serves as both the ``check for any
2424  * pending events'' and the ``record my interest in future events''
2425  * functions.  (These are done together, while the lock is held,
2426  * to avoid race conditions.)
2427  */
2428 int
2429 vn_pollrecord(vp, p, events)
2430 	struct vnode *vp;
2431 	struct proc *p;
2432 	short events;
2433 {
2434 	mtx_lock(&vp->v_pollinfo.vpi_lock);
2435 	if (vp->v_pollinfo.vpi_revents & events) {
2436 		/*
2437 		 * This leaves events we are not interested
2438 		 * in available for the other process which
2439 		 * which presumably had requested them
2440 		 * (otherwise they would never have been
2441 		 * recorded).
2442 		 */
2443 		events &= vp->v_pollinfo.vpi_revents;
2444 		vp->v_pollinfo.vpi_revents &= ~events;
2445 
2446 		mtx_unlock(&vp->v_pollinfo.vpi_lock);
2447 		return events;
2448 	}
2449 	vp->v_pollinfo.vpi_events |= events;
2450 	selrecord(p, &vp->v_pollinfo.vpi_selinfo);
2451 	mtx_unlock(&vp->v_pollinfo.vpi_lock);
2452 	return 0;
2453 }
2454 
2455 /*
2456  * Note the occurrence of an event.  If the VN_POLLEVENT macro is used,
2457  * it is possible for us to miss an event due to race conditions, but
2458  * that condition is expected to be rare, so for the moment it is the
2459  * preferred interface.
2460  */
2461 void
2462 vn_pollevent(vp, events)
2463 	struct vnode *vp;
2464 	short events;
2465 {
2466 	mtx_lock(&vp->v_pollinfo.vpi_lock);
2467 	if (vp->v_pollinfo.vpi_events & events) {
2468 		/*
2469 		 * We clear vpi_events so that we don't
2470 		 * call selwakeup() twice if two events are
2471 		 * posted before the polling process(es) is
2472 		 * awakened.  This also ensures that we take at
2473 		 * most one selwakeup() if the polling process
2474 		 * is no longer interested.  However, it does
2475 		 * mean that only one event can be noticed at
2476 		 * a time.  (Perhaps we should only clear those
2477 		 * event bits which we note?) XXX
2478 		 */
2479 		vp->v_pollinfo.vpi_events = 0;	/* &= ~events ??? */
2480 		vp->v_pollinfo.vpi_revents |= events;
2481 		selwakeup(&vp->v_pollinfo.vpi_selinfo);
2482 	}
2483 	mtx_unlock(&vp->v_pollinfo.vpi_lock);
2484 }
2485 
2486 #define VN_KNOTE(vp, b) \
2487 	KNOTE((struct klist *)&vp->v_pollinfo.vpi_selinfo.si_note, (b))
2488 
2489 /*
2490  * Wake up anyone polling on vp because it is being revoked.
2491  * This depends on dead_poll() returning POLLHUP for correct
2492  * behavior.
2493  */
2494 void
2495 vn_pollgone(vp)
2496 	struct vnode *vp;
2497 {
2498 	mtx_lock(&vp->v_pollinfo.vpi_lock);
2499         VN_KNOTE(vp, NOTE_REVOKE);
2500 	if (vp->v_pollinfo.vpi_events) {
2501 		vp->v_pollinfo.vpi_events = 0;
2502 		selwakeup(&vp->v_pollinfo.vpi_selinfo);
2503 	}
2504 	mtx_unlock(&vp->v_pollinfo.vpi_lock);
2505 }
2506 
2507 
2508 
2509 /*
2510  * Routine to create and manage a filesystem syncer vnode.
2511  */
2512 #define sync_close ((int (*) __P((struct  vop_close_args *)))nullop)
2513 static int	sync_fsync __P((struct  vop_fsync_args *));
2514 static int	sync_inactive __P((struct  vop_inactive_args *));
2515 static int	sync_reclaim  __P((struct  vop_reclaim_args *));
2516 #define sync_lock ((int (*) __P((struct  vop_lock_args *)))vop_nolock)
2517 #define sync_unlock ((int (*) __P((struct  vop_unlock_args *)))vop_nounlock)
2518 static int	sync_print __P((struct vop_print_args *));
2519 #define sync_islocked ((int(*) __P((struct vop_islocked_args *)))vop_noislocked)
2520 
2521 static vop_t **sync_vnodeop_p;
2522 static struct vnodeopv_entry_desc sync_vnodeop_entries[] = {
2523 	{ &vop_default_desc,	(vop_t *) vop_eopnotsupp },
2524 	{ &vop_close_desc,	(vop_t *) sync_close },		/* close */
2525 	{ &vop_fsync_desc,	(vop_t *) sync_fsync },		/* fsync */
2526 	{ &vop_inactive_desc,	(vop_t *) sync_inactive },	/* inactive */
2527 	{ &vop_reclaim_desc,	(vop_t *) sync_reclaim },	/* reclaim */
2528 	{ &vop_lock_desc,	(vop_t *) sync_lock },		/* lock */
2529 	{ &vop_unlock_desc,	(vop_t *) sync_unlock },	/* unlock */
2530 	{ &vop_print_desc,	(vop_t *) sync_print },		/* print */
2531 	{ &vop_islocked_desc,	(vop_t *) sync_islocked },	/* islocked */
2532 	{ NULL, NULL }
2533 };
2534 static struct vnodeopv_desc sync_vnodeop_opv_desc =
2535 	{ &sync_vnodeop_p, sync_vnodeop_entries };
2536 
2537 VNODEOP_SET(sync_vnodeop_opv_desc);
2538 
2539 /*
2540  * Create a new filesystem syncer vnode for the specified mount point.
2541  */
2542 int
2543 vfs_allocate_syncvnode(mp)
2544 	struct mount *mp;
2545 {
2546 	struct vnode *vp;
2547 	static long start, incr, next;
2548 	int error;
2549 
2550 	/* Allocate a new vnode */
2551 	if ((error = getnewvnode(VT_VFS, mp, sync_vnodeop_p, &vp)) != 0) {
2552 		mp->mnt_syncer = NULL;
2553 		return (error);
2554 	}
2555 	vp->v_type = VNON;
2556 	/*
2557 	 * Place the vnode onto the syncer worklist. We attempt to
2558 	 * scatter them about on the list so that they will go off
2559 	 * at evenly distributed times even if all the filesystems
2560 	 * are mounted at once.
2561 	 */
2562 	next += incr;
2563 	if (next == 0 || next > syncer_maxdelay) {
2564 		start /= 2;
2565 		incr /= 2;
2566 		if (start == 0) {
2567 			start = syncer_maxdelay / 2;
2568 			incr = syncer_maxdelay;
2569 		}
2570 		next = start;
2571 	}
2572 	vn_syncer_add_to_worklist(vp, syncdelay > 0 ? next % syncdelay : 0);
2573 	mp->mnt_syncer = vp;
2574 	return (0);
2575 }
2576 
2577 /*
2578  * Do a lazy sync of the filesystem.
2579  */
2580 static int
2581 sync_fsync(ap)
2582 	struct vop_fsync_args /* {
2583 		struct vnode *a_vp;
2584 		struct ucred *a_cred;
2585 		int a_waitfor;
2586 		struct proc *a_p;
2587 	} */ *ap;
2588 {
2589 	struct vnode *syncvp = ap->a_vp;
2590 	struct mount *mp = syncvp->v_mount;
2591 	struct proc *p = ap->a_p;
2592 	int asyncflag;
2593 
2594 	/*
2595 	 * We only need to do something if this is a lazy evaluation.
2596 	 */
2597 	if (ap->a_waitfor != MNT_LAZY)
2598 		return (0);
2599 
2600 	/*
2601 	 * Move ourselves to the back of the sync list.
2602 	 */
2603 	vn_syncer_add_to_worklist(syncvp, syncdelay);
2604 
2605 	/*
2606 	 * Walk the list of vnodes pushing all that are dirty and
2607 	 * not already on the sync list.
2608 	 */
2609 	mtx_lock(&mountlist_mtx);
2610 	if (vfs_busy(mp, LK_EXCLUSIVE | LK_NOWAIT, &mountlist_mtx, p) != 0) {
2611 		mtx_unlock(&mountlist_mtx);
2612 		return (0);
2613 	}
2614 	if (vn_start_write(NULL, &mp, V_NOWAIT) != 0) {
2615 		vfs_unbusy(mp, p);
2616 		return (0);
2617 	}
2618 	asyncflag = mp->mnt_flag & MNT_ASYNC;
2619 	mp->mnt_flag &= ~MNT_ASYNC;
2620 	vfs_msync(mp, MNT_NOWAIT);
2621 	VFS_SYNC(mp, MNT_LAZY, ap->a_cred, p);
2622 	if (asyncflag)
2623 		mp->mnt_flag |= MNT_ASYNC;
2624 	vn_finished_write(mp);
2625 	vfs_unbusy(mp, p);
2626 	return (0);
2627 }
2628 
2629 /*
2630  * The syncer vnode is no referenced.
2631  */
2632 static int
2633 sync_inactive(ap)
2634 	struct vop_inactive_args /* {
2635 		struct vnode *a_vp;
2636 		struct proc *a_p;
2637 	} */ *ap;
2638 {
2639 
2640 	vgone(ap->a_vp);
2641 	return (0);
2642 }
2643 
2644 /*
2645  * The syncer vnode is no longer needed and is being decommissioned.
2646  *
2647  * Modifications to the worklist must be protected at splbio().
2648  */
2649 static int
2650 sync_reclaim(ap)
2651 	struct vop_reclaim_args /* {
2652 		struct vnode *a_vp;
2653 	} */ *ap;
2654 {
2655 	struct vnode *vp = ap->a_vp;
2656 	int s;
2657 
2658 	s = splbio();
2659 	vp->v_mount->mnt_syncer = NULL;
2660 	if (vp->v_flag & VONWORKLST) {
2661 		LIST_REMOVE(vp, v_synclist);
2662 		vp->v_flag &= ~VONWORKLST;
2663 	}
2664 	splx(s);
2665 
2666 	return (0);
2667 }
2668 
2669 /*
2670  * Print out a syncer vnode.
2671  */
2672 static int
2673 sync_print(ap)
2674 	struct vop_print_args /* {
2675 		struct vnode *a_vp;
2676 	} */ *ap;
2677 {
2678 	struct vnode *vp = ap->a_vp;
2679 
2680 	printf("syncer vnode");
2681 	if (vp->v_vnlock != NULL)
2682 		lockmgr_printinfo(vp->v_vnlock);
2683 	printf("\n");
2684 	return (0);
2685 }
2686 
2687 /*
2688  * extract the dev_t from a VCHR
2689  */
2690 dev_t
2691 vn_todev(vp)
2692 	struct vnode *vp;
2693 {
2694 	if (vp->v_type != VCHR)
2695 		return (NODEV);
2696 	return (vp->v_rdev);
2697 }
2698 
2699 /*
2700  * Check if vnode represents a disk device
2701  */
2702 int
2703 vn_isdisk(vp, errp)
2704 	struct vnode *vp;
2705 	int *errp;
2706 {
2707 	struct cdevsw *cdevsw;
2708 
2709 	if (vp->v_type != VCHR) {
2710 		if (errp != NULL)
2711 			*errp = ENOTBLK;
2712 		return (0);
2713 	}
2714 	if (vp->v_rdev == NULL) {
2715 		if (errp != NULL)
2716 			*errp = ENXIO;
2717 		return (0);
2718 	}
2719 	cdevsw = devsw(vp->v_rdev);
2720 	if (cdevsw == NULL) {
2721 		if (errp != NULL)
2722 			*errp = ENXIO;
2723 		return (0);
2724 	}
2725 	if (!(cdevsw->d_flags & D_DISK)) {
2726 		if (errp != NULL)
2727 			*errp = ENOTBLK;
2728 		return (0);
2729 	}
2730 	if (errp != NULL)
2731 		*errp = 0;
2732 	return (1);
2733 }
2734 
2735 /*
2736  * Free data allocated by namei(); see namei(9) for details.
2737  */
2738 void
2739 NDFREE(ndp, flags)
2740      struct nameidata *ndp;
2741      const uint flags;
2742 {
2743 	if (!(flags & NDF_NO_FREE_PNBUF) &&
2744 	    (ndp->ni_cnd.cn_flags & HASBUF)) {
2745 		zfree(namei_zone, ndp->ni_cnd.cn_pnbuf);
2746 		ndp->ni_cnd.cn_flags &= ~HASBUF;
2747 	}
2748 	if (!(flags & NDF_NO_DVP_UNLOCK) &&
2749 	    (ndp->ni_cnd.cn_flags & LOCKPARENT) &&
2750 	    ndp->ni_dvp != ndp->ni_vp)
2751 		VOP_UNLOCK(ndp->ni_dvp, 0, ndp->ni_cnd.cn_proc);
2752 	if (!(flags & NDF_NO_DVP_RELE) &&
2753 	    (ndp->ni_cnd.cn_flags & (LOCKPARENT|WANTPARENT))) {
2754 		vrele(ndp->ni_dvp);
2755 		ndp->ni_dvp = NULL;
2756 	}
2757 	if (!(flags & NDF_NO_VP_UNLOCK) &&
2758 	    (ndp->ni_cnd.cn_flags & LOCKLEAF) && ndp->ni_vp)
2759 		VOP_UNLOCK(ndp->ni_vp, 0, ndp->ni_cnd.cn_proc);
2760 	if (!(flags & NDF_NO_VP_RELE) &&
2761 	    ndp->ni_vp) {
2762 		vrele(ndp->ni_vp);
2763 		ndp->ni_vp = NULL;
2764 	}
2765 	if (!(flags & NDF_NO_STARTDIR_RELE) &&
2766 	    (ndp->ni_cnd.cn_flags & SAVESTART)) {
2767 		vrele(ndp->ni_startdir);
2768 		ndp->ni_startdir = NULL;
2769 	}
2770 }
2771 
2772 /*
2773  * Common file system object access control check routine.  Accepts a
2774  * vnode's type, "mode", uid and gid, requested access mode, credentials,
2775  * and optional call-by-reference privused argument allowing vaccess()
2776  * to indicate to the caller whether privilege was used to satisfy the
2777  * request.  Returns 0 on success, or an errno on failure.
2778  */
2779 int
2780 vaccess(type, file_mode, file_uid, file_gid, acc_mode, cred, privused)
2781 	enum vtype type;
2782 	mode_t file_mode;
2783 	uid_t file_uid;
2784 	gid_t file_gid;
2785 	mode_t acc_mode;
2786 	struct ucred *cred;
2787 	int *privused;
2788 {
2789 	mode_t dac_granted;
2790 #ifdef CAPABILITIES
2791 	mode_t cap_granted;
2792 #endif
2793 
2794 	/*
2795 	 * Look for a normal, non-privileged way to access the file/directory
2796 	 * as requested.  If it exists, go with that.
2797 	 */
2798 
2799 	if (privused != NULL)
2800 		*privused = 0;
2801 
2802 	dac_granted = 0;
2803 
2804 	/* Check the owner. */
2805 	if (cred->cr_uid == file_uid) {
2806 		dac_granted |= VADMIN;
2807 		if (file_mode & S_IXUSR)
2808 			dac_granted |= VEXEC;
2809 		if (file_mode & S_IRUSR)
2810 			dac_granted |= VREAD;
2811 		if (file_mode & S_IWUSR)
2812 			dac_granted |= VWRITE;
2813 
2814 		if ((acc_mode & dac_granted) == acc_mode)
2815 			return (0);
2816 
2817 		goto privcheck;
2818 	}
2819 
2820 	/* Otherwise, check the groups (first match) */
2821 	if (groupmember(file_gid, cred)) {
2822 		if (file_mode & S_IXGRP)
2823 			dac_granted |= VEXEC;
2824 		if (file_mode & S_IRGRP)
2825 			dac_granted |= VREAD;
2826 		if (file_mode & S_IWGRP)
2827 			dac_granted |= VWRITE;
2828 
2829 		if ((acc_mode & dac_granted) == acc_mode)
2830 			return (0);
2831 
2832 		goto privcheck;
2833 	}
2834 
2835 	/* Otherwise, check everyone else. */
2836 	if (file_mode & S_IXOTH)
2837 		dac_granted |= VEXEC;
2838 	if (file_mode & S_IROTH)
2839 		dac_granted |= VREAD;
2840 	if (file_mode & S_IWOTH)
2841 		dac_granted |= VWRITE;
2842 	if ((acc_mode & dac_granted) == acc_mode)
2843 		return (0);
2844 
2845 privcheck:
2846 	if (!suser_xxx(cred, NULL, PRISON_ROOT)) {
2847 		/* XXX audit: privilege used */
2848 		if (privused != NULL)
2849 			*privused = 1;
2850 		return (0);
2851 	}
2852 
2853 #ifdef CAPABILITIES
2854 	/*
2855 	 * Build a capability mask to determine if the set of capabilities
2856 	 * satisfies the requirements when combined with the granted mask
2857 	 * from above.
2858 	 * For each capability, if the capability is required, bitwise
2859 	 * or the request type onto the cap_granted mask.
2860 	 */
2861 	cap_granted = 0;
2862 	if ((acc_mode & VEXEC) && ((dac_granted & VEXEC) == 0) &&
2863 	    !cap_check_xxx(cred, NULL, CAP_DAC_EXECUTE, PRISON_ROOT))
2864 	    cap_granted |= VEXEC;
2865 
2866 	if ((acc_mode & VREAD) && ((dac_granted & VREAD) == 0) &&
2867 	    !cap_check_xxx(cred, NULL, CAP_DAC_READ_SEARCH, PRISON_ROOT))
2868 		cap_granted |= VREAD;
2869 
2870 	if ((acc_mode & VWRITE) && ((dac_granted & VWRITE) == 0) &&
2871 	    !cap_check_xxx(cred, NULL, CAP_DAC_WRITE, PRISON_ROOT))
2872 		cap_granted |= VWRITE;
2873 
2874 	if ((acc_mode & VADMIN) && ((dac_granted & VADMIN) == 0) &&
2875 	    !cap_check_xxx(cred, NULL, CAP_FOWNER, PRISON_ROOT))
2876 		cap_granted |= VADMIN;
2877 
2878 	if ((acc_mode & (cap_granted | dac_granted)) == acc_mode) {
2879 		/* XXX audit: privilege used */
2880 		if (privused != NULL)
2881 			*privused = 1;
2882 		return (0);
2883 	}
2884 #endif
2885 
2886 	return ((acc_mode & VADMIN) ? EPERM : EACCES);
2887 }
2888 
2889