xref: /freebsd/sys/kern/vfs_subr.c (revision b601c69bdbe8755d26570261d7fd4c02ee4eff74)
1 /*
2  * Copyright (c) 1989, 1993
3  *	The Regents of the University of California.  All rights reserved.
4  * (c) UNIX System Laboratories, Inc.
5  * All or some portions of this file are derived from material licensed
6  * to the University of California by American Telephone and Telegraph
7  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8  * the permission of UNIX System Laboratories, Inc.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. All advertising materials mentioning features or use of this software
19  *    must display the following acknowledgement:
20  *	This product includes software developed by the University of
21  *	California, Berkeley and its contributors.
22  * 4. Neither the name of the University nor the names of its contributors
23  *    may be used to endorse or promote products derived from this software
24  *    without specific prior written permission.
25  *
26  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36  * SUCH DAMAGE.
37  *
38  *	@(#)vfs_subr.c	8.31 (Berkeley) 5/26/95
39  * $FreeBSD$
40  */
41 
42 /*
43  * External virtual filesystem routines
44  */
45 #include "opt_ddb.h"
46 #include "opt_ffs.h"
47 
48 #include <sys/param.h>
49 #include <sys/systm.h>
50 #include <sys/bio.h>
51 #include <sys/buf.h>
52 #include <sys/conf.h>
53 #include <sys/dirent.h>
54 #include <sys/domain.h>
55 #include <sys/eventhandler.h>
56 #include <sys/fcntl.h>
57 #include <sys/kernel.h>
58 #include <sys/kthread.h>
59 #include <sys/malloc.h>
60 #include <sys/mount.h>
61 #include <sys/namei.h>
62 #include <sys/proc.h>
63 #include <sys/reboot.h>
64 #include <sys/socket.h>
65 #include <sys/stat.h>
66 #include <sys/sysctl.h>
67 #include <sys/vmmeter.h>
68 #include <sys/vnode.h>
69 
70 #include <machine/limits.h>
71 
72 #include <vm/vm.h>
73 #include <vm/vm_object.h>
74 #include <vm/vm_extern.h>
75 #include <vm/pmap.h>
76 #include <vm/vm_map.h>
77 #include <vm/vm_page.h>
78 #include <vm/vm_pager.h>
79 #include <vm/vnode_pager.h>
80 #include <vm/vm_zone.h>
81 
82 static MALLOC_DEFINE(M_NETADDR, "Export Host", "Export host address structure");
83 
84 static void	insmntque __P((struct vnode *vp, struct mount *mp));
85 static void	vclean __P((struct vnode *vp, int flags, struct proc *p));
86 static unsigned long	numvnodes;
87 SYSCTL_INT(_debug, OID_AUTO, numvnodes, CTLFLAG_RD, &numvnodes, 0, "");
88 
89 enum vtype iftovt_tab[16] = {
90 	VNON, VFIFO, VCHR, VNON, VDIR, VNON, VBLK, VNON,
91 	VREG, VNON, VLNK, VNON, VSOCK, VNON, VNON, VBAD,
92 };
93 int vttoif_tab[9] = {
94 	0, S_IFREG, S_IFDIR, S_IFBLK, S_IFCHR, S_IFLNK,
95 	S_IFSOCK, S_IFIFO, S_IFMT,
96 };
97 
98 static TAILQ_HEAD(freelst, vnode) vnode_free_list;	/* vnode free list */
99 
100 static u_long wantfreevnodes = 25;
101 SYSCTL_INT(_debug, OID_AUTO, wantfreevnodes, CTLFLAG_RW, &wantfreevnodes, 0, "");
102 static u_long freevnodes = 0;
103 SYSCTL_INT(_debug, OID_AUTO, freevnodes, CTLFLAG_RD, &freevnodes, 0, "");
104 
105 static int reassignbufcalls;
106 SYSCTL_INT(_vfs, OID_AUTO, reassignbufcalls, CTLFLAG_RW, &reassignbufcalls, 0, "");
107 static int reassignbufloops;
108 SYSCTL_INT(_vfs, OID_AUTO, reassignbufloops, CTLFLAG_RW, &reassignbufloops, 0, "");
109 static int reassignbufsortgood;
110 SYSCTL_INT(_vfs, OID_AUTO, reassignbufsortgood, CTLFLAG_RW, &reassignbufsortgood, 0, "");
111 static int reassignbufsortbad;
112 SYSCTL_INT(_vfs, OID_AUTO, reassignbufsortbad, CTLFLAG_RW, &reassignbufsortbad, 0, "");
113 static int reassignbufmethod = 1;
114 SYSCTL_INT(_vfs, OID_AUTO, reassignbufmethod, CTLFLAG_RW, &reassignbufmethod, 0, "");
115 
116 #ifdef ENABLE_VFS_IOOPT
117 int vfs_ioopt = 0;
118 SYSCTL_INT(_vfs, OID_AUTO, ioopt, CTLFLAG_RW, &vfs_ioopt, 0, "");
119 #endif
120 
121 struct mntlist mountlist = TAILQ_HEAD_INITIALIZER(mountlist); /* mounted fs */
122 struct simplelock mountlist_slock;
123 struct simplelock mntvnode_slock;
124 int	nfs_mount_type = -1;
125 #ifndef NULL_SIMPLELOCKS
126 static struct simplelock mntid_slock;
127 static struct simplelock vnode_free_list_slock;
128 static struct simplelock spechash_slock;
129 #endif
130 struct nfs_public nfs_pub;	/* publicly exported FS */
131 static vm_zone_t vnode_zone;
132 int	prtactive = 0;		/* 1 => print out reclaim of active vnodes */
133 
134 /*
135  * The workitem queue.
136  */
137 #define SYNCER_MAXDELAY		32
138 static int syncer_maxdelay = SYNCER_MAXDELAY;	/* maximum delay time */
139 time_t syncdelay = 30;		/* max time to delay syncing data */
140 time_t filedelay = 30;		/* time to delay syncing files */
141 SYSCTL_INT(_kern, OID_AUTO, filedelay, CTLFLAG_RW, &filedelay, 0, "");
142 time_t dirdelay = 29;		/* time to delay syncing directories */
143 SYSCTL_INT(_kern, OID_AUTO, dirdelay, CTLFLAG_RW, &dirdelay, 0, "");
144 time_t metadelay = 28;		/* time to delay syncing metadata */
145 SYSCTL_INT(_kern, OID_AUTO, metadelay, CTLFLAG_RW, &metadelay, 0, "");
146 static int rushjob;			/* number of slots to run ASAP */
147 static int stat_rush_requests;	/* number of times I/O speeded up */
148 SYSCTL_INT(_debug, OID_AUTO, rush_requests, CTLFLAG_RW, &stat_rush_requests, 0, "");
149 
150 static int syncer_delayno = 0;
151 static long syncer_mask;
152 LIST_HEAD(synclist, vnode);
153 static struct synclist *syncer_workitem_pending;
154 
155 int desiredvnodes;
156 SYSCTL_INT(_kern, KERN_MAXVNODES, maxvnodes, CTLFLAG_RW,
157     &desiredvnodes, 0, "Maximum number of vnodes");
158 
159 static void	vfs_free_addrlist __P((struct netexport *nep));
160 static int	vfs_free_netcred __P((struct radix_node *rn, void *w));
161 static int	vfs_hang_addrlist __P((struct mount *mp, struct netexport *nep,
162 				       struct export_args *argp));
163 
164 /*
165  * Initialize the vnode management data structures.
166  */
167 void
168 vntblinit()
169 {
170 
171 	desiredvnodes = maxproc + cnt.v_page_count / 4;
172 	simple_lock_init(&mntvnode_slock);
173 	simple_lock_init(&mntid_slock);
174 	simple_lock_init(&spechash_slock);
175 	TAILQ_INIT(&vnode_free_list);
176 	simple_lock_init(&vnode_free_list_slock);
177 	vnode_zone = zinit("VNODE", sizeof (struct vnode), 0, 0, 5);
178 	/*
179 	 * Initialize the filesystem syncer.
180 	 */
181 	syncer_workitem_pending = hashinit(syncer_maxdelay, M_VNODE,
182 		&syncer_mask);
183 	syncer_maxdelay = syncer_mask + 1;
184 }
185 
186 /*
187  * Mark a mount point as busy. Used to synchronize access and to delay
188  * unmounting. Interlock is not released on failure.
189  */
190 int
191 vfs_busy(mp, flags, interlkp, p)
192 	struct mount *mp;
193 	int flags;
194 	struct simplelock *interlkp;
195 	struct proc *p;
196 {
197 	int lkflags;
198 
199 	if (mp->mnt_kern_flag & MNTK_UNMOUNT) {
200 		if (flags & LK_NOWAIT)
201 			return (ENOENT);
202 		mp->mnt_kern_flag |= MNTK_MWAIT;
203 		if (interlkp) {
204 			simple_unlock(interlkp);
205 		}
206 		/*
207 		 * Since all busy locks are shared except the exclusive
208 		 * lock granted when unmounting, the only place that a
209 		 * wakeup needs to be done is at the release of the
210 		 * exclusive lock at the end of dounmount.
211 		 */
212 		tsleep((caddr_t)mp, PVFS, "vfs_busy", 0);
213 		if (interlkp) {
214 			simple_lock(interlkp);
215 		}
216 		return (ENOENT);
217 	}
218 	lkflags = LK_SHARED | LK_NOPAUSE;
219 	if (interlkp)
220 		lkflags |= LK_INTERLOCK;
221 	if (lockmgr(&mp->mnt_lock, lkflags, interlkp, p))
222 		panic("vfs_busy: unexpected lock failure");
223 	return (0);
224 }
225 
226 /*
227  * Free a busy filesystem.
228  */
229 void
230 vfs_unbusy(mp, p)
231 	struct mount *mp;
232 	struct proc *p;
233 {
234 
235 	lockmgr(&mp->mnt_lock, LK_RELEASE, NULL, p);
236 }
237 
238 /*
239  * Lookup a filesystem type, and if found allocate and initialize
240  * a mount structure for it.
241  *
242  * Devname is usually updated by mount(8) after booting.
243  */
244 int
245 vfs_rootmountalloc(fstypename, devname, mpp)
246 	char *fstypename;
247 	char *devname;
248 	struct mount **mpp;
249 {
250 	struct proc *p = curproc;	/* XXX */
251 	struct vfsconf *vfsp;
252 	struct mount *mp;
253 
254 	if (fstypename == NULL)
255 		return (ENODEV);
256 	for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next)
257 		if (!strcmp(vfsp->vfc_name, fstypename))
258 			break;
259 	if (vfsp == NULL)
260 		return (ENODEV);
261 	mp = malloc((u_long)sizeof(struct mount), M_MOUNT, M_WAITOK);
262 	bzero((char *)mp, (u_long)sizeof(struct mount));
263 	lockinit(&mp->mnt_lock, PVFS, "vfslock", 0, LK_NOPAUSE);
264 	(void)vfs_busy(mp, LK_NOWAIT, 0, p);
265 	LIST_INIT(&mp->mnt_vnodelist);
266 	mp->mnt_vfc = vfsp;
267 	mp->mnt_op = vfsp->vfc_vfsops;
268 	mp->mnt_flag = MNT_RDONLY;
269 	mp->mnt_vnodecovered = NULLVP;
270 	vfsp->vfc_refcount++;
271 	mp->mnt_iosize_max = DFLTPHYS;
272 	mp->mnt_stat.f_type = vfsp->vfc_typenum;
273 	mp->mnt_flag |= vfsp->vfc_flags & MNT_VISFLAGMASK;
274 	strncpy(mp->mnt_stat.f_fstypename, vfsp->vfc_name, MFSNAMELEN);
275 	mp->mnt_stat.f_mntonname[0] = '/';
276 	mp->mnt_stat.f_mntonname[1] = 0;
277 	(void) copystr(devname, mp->mnt_stat.f_mntfromname, MNAMELEN - 1, 0);
278 	*mpp = mp;
279 	return (0);
280 }
281 
282 /*
283  * Find an appropriate filesystem to use for the root. If a filesystem
284  * has not been preselected, walk through the list of known filesystems
285  * trying those that have mountroot routines, and try them until one
286  * works or we have tried them all.
287  */
288 #ifdef notdef	/* XXX JH */
289 int
290 lite2_vfs_mountroot()
291 {
292 	struct vfsconf *vfsp;
293 	extern int (*lite2_mountroot) __P((void));
294 	int error;
295 
296 	if (lite2_mountroot != NULL)
297 		return ((*lite2_mountroot)());
298 	for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next) {
299 		if (vfsp->vfc_mountroot == NULL)
300 			continue;
301 		if ((error = (*vfsp->vfc_mountroot)()) == 0)
302 			return (0);
303 		printf("%s_mountroot failed: %d\n", vfsp->vfc_name, error);
304 	}
305 	return (ENODEV);
306 }
307 #endif
308 
309 /*
310  * Lookup a mount point by filesystem identifier.
311  */
312 struct mount *
313 vfs_getvfs(fsid)
314 	fsid_t *fsid;
315 {
316 	register struct mount *mp;
317 
318 	simple_lock(&mountlist_slock);
319 	TAILQ_FOREACH(mp, &mountlist, mnt_list) {
320 		if (mp->mnt_stat.f_fsid.val[0] == fsid->val[0] &&
321 		    mp->mnt_stat.f_fsid.val[1] == fsid->val[1]) {
322 			simple_unlock(&mountlist_slock);
323 			return (mp);
324 	    }
325 	}
326 	simple_unlock(&mountlist_slock);
327 	return ((struct mount *) 0);
328 }
329 
330 /*
331  * Get a new unique fsid.  Try to make its val[0] unique, since this value
332  * will be used to create fake device numbers for stat().  Also try (but
333  * not so hard) make its val[0] unique mod 2^16, since some emulators only
334  * support 16-bit device numbers.  We end up with unique val[0]'s for the
335  * first 2^16 calls and unique val[0]'s mod 2^16 for the first 2^8 calls.
336  *
337  * Keep in mind that several mounts may be running in parallel.  Starting
338  * the search one past where the previous search terminated is both a
339  * micro-optimization and a defense against returning the same fsid to
340  * different mounts.
341  */
342 void
343 vfs_getnewfsid(mp)
344 	struct mount *mp;
345 {
346 	static u_int16_t mntid_base;
347 	fsid_t tfsid;
348 	int mtype;
349 
350 	simple_lock(&mntid_slock);
351 	mtype = mp->mnt_vfc->vfc_typenum;
352 	tfsid.val[1] = mtype;
353 	mtype = (mtype & 0xFF) << 24;
354 	for (;;) {
355 		tfsid.val[0] = makeudev(255,
356 		    mtype | ((mntid_base & 0xFF00) << 8) | (mntid_base & 0xFF));
357 		mntid_base++;
358 		if (vfs_getvfs(&tfsid) == NULL)
359 			break;
360 	}
361 	mp->mnt_stat.f_fsid.val[0] = tfsid.val[0];
362 	mp->mnt_stat.f_fsid.val[1] = tfsid.val[1];
363 	simple_unlock(&mntid_slock);
364 }
365 
366 /*
367  * Knob to control the precision of file timestamps:
368  *
369  *   0 = seconds only; nanoseconds zeroed.
370  *   1 = seconds and nanoseconds, accurate within 1/HZ.
371  *   2 = seconds and nanoseconds, truncated to microseconds.
372  * >=3 = seconds and nanoseconds, maximum precision.
373  */
374 enum { TSP_SEC, TSP_HZ, TSP_USEC, TSP_NSEC };
375 
376 static int timestamp_precision = TSP_SEC;
377 SYSCTL_INT(_vfs, OID_AUTO, timestamp_precision, CTLFLAG_RW,
378     &timestamp_precision, 0, "");
379 
380 /*
381  * Get a current timestamp.
382  */
383 void
384 vfs_timestamp(tsp)
385 	struct timespec *tsp;
386 {
387 	struct timeval tv;
388 
389 	switch (timestamp_precision) {
390 	case TSP_SEC:
391 		tsp->tv_sec = time_second;
392 		tsp->tv_nsec = 0;
393 		break;
394 	case TSP_HZ:
395 		getnanotime(tsp);
396 		break;
397 	case TSP_USEC:
398 		microtime(&tv);
399 		TIMEVAL_TO_TIMESPEC(&tv, tsp);
400 		break;
401 	case TSP_NSEC:
402 	default:
403 		nanotime(tsp);
404 		break;
405 	}
406 }
407 
408 /*
409  * Set vnode attributes to VNOVAL
410  */
411 void
412 vattr_null(vap)
413 	register struct vattr *vap;
414 {
415 
416 	vap->va_type = VNON;
417 	vap->va_size = VNOVAL;
418 	vap->va_bytes = VNOVAL;
419 	vap->va_mode = VNOVAL;
420 	vap->va_nlink = VNOVAL;
421 	vap->va_uid = VNOVAL;
422 	vap->va_gid = VNOVAL;
423 	vap->va_fsid = VNOVAL;
424 	vap->va_fileid = VNOVAL;
425 	vap->va_blocksize = VNOVAL;
426 	vap->va_rdev = VNOVAL;
427 	vap->va_atime.tv_sec = VNOVAL;
428 	vap->va_atime.tv_nsec = VNOVAL;
429 	vap->va_mtime.tv_sec = VNOVAL;
430 	vap->va_mtime.tv_nsec = VNOVAL;
431 	vap->va_ctime.tv_sec = VNOVAL;
432 	vap->va_ctime.tv_nsec = VNOVAL;
433 	vap->va_flags = VNOVAL;
434 	vap->va_gen = VNOVAL;
435 	vap->va_vaflags = 0;
436 }
437 
438 /*
439  * Routines having to do with the management of the vnode table.
440  */
441 extern vop_t **dead_vnodeop_p;
442 
443 /*
444  * Return the next vnode from the free list.
445  */
446 int
447 getnewvnode(tag, mp, vops, vpp)
448 	enum vtagtype tag;
449 	struct mount *mp;
450 	vop_t **vops;
451 	struct vnode **vpp;
452 {
453 	int s, count;
454 	struct proc *p = curproc;	/* XXX */
455 	struct vnode *vp = NULL;
456 	struct mount *vnmp;
457 	vm_object_t object;
458 
459 	/*
460 	 * We take the least recently used vnode from the freelist
461 	 * if we can get it and it has no cached pages, and no
462 	 * namecache entries are relative to it.
463 	 * Otherwise we allocate a new vnode
464 	 */
465 
466 	s = splbio();
467 	simple_lock(&vnode_free_list_slock);
468 
469 	if (wantfreevnodes && freevnodes < wantfreevnodes) {
470 		vp = NULL;
471 	} else if (!wantfreevnodes && freevnodes <= desiredvnodes) {
472 		/*
473 		 * XXX: this is only here to be backwards compatible
474 		 */
475 		vp = NULL;
476 	} else for (count = 0; count < freevnodes; count++) {
477 		vp = TAILQ_FIRST(&vnode_free_list);
478 		if (vp == NULL || vp->v_usecount)
479 			panic("getnewvnode: free vnode isn't");
480 		TAILQ_REMOVE(&vnode_free_list, vp, v_freelist);
481 		/*
482 		 * Don't recycle if active in the namecache or
483 		 * if it still has cached pages or we cannot get
484 		 * its interlock.
485 		 */
486 		object = vp->v_object;
487 		if (LIST_FIRST(&vp->v_cache_src) != NULL ||
488 		    (object && (object->resident_page_count ||
489 		     object->ref_count)) ||
490 		    !simple_lock_try(&vp->v_interlock)) {
491 			TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist);
492 			vp = NULL;
493 			continue;
494 		}
495 		/*
496 		 * Skip over it if its filesystem is being suspended.
497 		 */
498 		if (vn_start_write(vp, &vnmp, V_NOWAIT) == 0)
499 			break;
500 		simple_unlock(&vp->v_interlock);
501 		TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist);
502 		vp = NULL;
503 	}
504 	if (vp) {
505 		vp->v_flag |= VDOOMED;
506 		freevnodes--;
507 		simple_unlock(&vnode_free_list_slock);
508 		cache_purge(vp);
509 		vp->v_lease = NULL;
510 		if (vp->v_type != VBAD) {
511 			vgonel(vp, p);
512 		} else {
513 			simple_unlock(&vp->v_interlock);
514 		}
515 		vn_finished_write(vnmp);
516 
517 #ifdef INVARIANTS
518 		{
519 			int s;
520 
521 			if (vp->v_data)
522 				panic("cleaned vnode isn't");
523 			s = splbio();
524 			if (vp->v_numoutput)
525 				panic("Clean vnode has pending I/O's");
526 			splx(s);
527 			if (vp->v_writecount != 0)
528 				panic("Non-zero write count");
529 		}
530 #endif
531 		vp->v_flag = 0;
532 		vp->v_lastw = 0;
533 		vp->v_lasta = 0;
534 		vp->v_cstart = 0;
535 		vp->v_clen = 0;
536 		vp->v_socket = 0;
537 	} else {
538 		simple_unlock(&vnode_free_list_slock);
539 		vp = (struct vnode *) zalloc(vnode_zone);
540 		bzero((char *) vp, sizeof *vp);
541 		simple_lock_init(&vp->v_interlock);
542 		vp->v_dd = vp;
543 		cache_purge(vp);
544 		LIST_INIT(&vp->v_cache_src);
545 		TAILQ_INIT(&vp->v_cache_dst);
546 		numvnodes++;
547 	}
548 
549 	TAILQ_INIT(&vp->v_cleanblkhd);
550 	TAILQ_INIT(&vp->v_dirtyblkhd);
551 	vp->v_type = VNON;
552 	vp->v_tag = tag;
553 	vp->v_op = vops;
554 	insmntque(vp, mp);
555 	*vpp = vp;
556 	vp->v_usecount = 1;
557 	vp->v_data = 0;
558 	splx(s);
559 
560 	vfs_object_create(vp, p, p->p_ucred);
561 	return (0);
562 }
563 
564 /*
565  * Move a vnode from one mount queue to another.
566  */
567 static void
568 insmntque(vp, mp)
569 	register struct vnode *vp;
570 	register struct mount *mp;
571 {
572 
573 	simple_lock(&mntvnode_slock);
574 	/*
575 	 * Delete from old mount point vnode list, if on one.
576 	 */
577 	if (vp->v_mount != NULL)
578 		LIST_REMOVE(vp, v_mntvnodes);
579 	/*
580 	 * Insert into list of vnodes for the new mount point, if available.
581 	 */
582 	if ((vp->v_mount = mp) == NULL) {
583 		simple_unlock(&mntvnode_slock);
584 		return;
585 	}
586 	LIST_INSERT_HEAD(&mp->mnt_vnodelist, vp, v_mntvnodes);
587 	simple_unlock(&mntvnode_slock);
588 }
589 
590 /*
591  * Update outstanding I/O count and do wakeup if requested.
592  */
593 void
594 vwakeup(bp)
595 	register struct buf *bp;
596 {
597 	register struct vnode *vp;
598 
599 	bp->b_flags &= ~B_WRITEINPROG;
600 	if ((vp = bp->b_vp)) {
601 		vp->v_numoutput--;
602 		if (vp->v_numoutput < 0)
603 			panic("vwakeup: neg numoutput");
604 		if ((vp->v_numoutput == 0) && (vp->v_flag & VBWAIT)) {
605 			vp->v_flag &= ~VBWAIT;
606 			wakeup((caddr_t) &vp->v_numoutput);
607 		}
608 	}
609 }
610 
611 /*
612  * Flush out and invalidate all buffers associated with a vnode.
613  * Called with the underlying object locked.
614  */
615 int
616 vinvalbuf(vp, flags, cred, p, slpflag, slptimeo)
617 	register struct vnode *vp;
618 	int flags;
619 	struct ucred *cred;
620 	struct proc *p;
621 	int slpflag, slptimeo;
622 {
623 	register struct buf *bp;
624 	struct buf *nbp, *blist;
625 	int s, error;
626 	vm_object_t object;
627 
628 	if (flags & V_SAVE) {
629 		s = splbio();
630 		while (vp->v_numoutput) {
631 			vp->v_flag |= VBWAIT;
632 			error = tsleep((caddr_t)&vp->v_numoutput,
633 			    slpflag | (PRIBIO + 1), "vinvlbuf", slptimeo);
634 			if (error) {
635 				splx(s);
636 				return (error);
637 			}
638 		}
639 		if (!TAILQ_EMPTY(&vp->v_dirtyblkhd)) {
640 			splx(s);
641 			if ((error = VOP_FSYNC(vp, cred, MNT_WAIT, p)) != 0)
642 				return (error);
643 			s = splbio();
644 			if (vp->v_numoutput > 0 ||
645 			    !TAILQ_EMPTY(&vp->v_dirtyblkhd))
646 				panic("vinvalbuf: dirty bufs");
647 		}
648 		splx(s);
649   	}
650 	s = splbio();
651 	for (;;) {
652 		blist = TAILQ_FIRST(&vp->v_cleanblkhd);
653 		if (!blist)
654 			blist = TAILQ_FIRST(&vp->v_dirtyblkhd);
655 		if (!blist)
656 			break;
657 
658 		for (bp = blist; bp; bp = nbp) {
659 			nbp = TAILQ_NEXT(bp, b_vnbufs);
660 			if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT)) {
661 				error = BUF_TIMELOCK(bp,
662 				    LK_EXCLUSIVE | LK_SLEEPFAIL,
663 				    "vinvalbuf", slpflag, slptimeo);
664 				if (error == ENOLCK)
665 					break;
666 				splx(s);
667 				return (error);
668 			}
669 			/*
670 			 * XXX Since there are no node locks for NFS, I
671 			 * believe there is a slight chance that a delayed
672 			 * write will occur while sleeping just above, so
673 			 * check for it.  Note that vfs_bio_awrite expects
674 			 * buffers to reside on a queue, while VOP_BWRITE and
675 			 * brelse do not.
676 			 */
677 			if (((bp->b_flags & (B_DELWRI | B_INVAL)) == B_DELWRI) &&
678 				(flags & V_SAVE)) {
679 
680 				if (bp->b_vp == vp) {
681 					if (bp->b_flags & B_CLUSTEROK) {
682 						BUF_UNLOCK(bp);
683 						vfs_bio_awrite(bp);
684 					} else {
685 						bremfree(bp);
686 						bp->b_flags |= B_ASYNC;
687 						BUF_WRITE(bp);
688 					}
689 				} else {
690 					bremfree(bp);
691 					(void) BUF_WRITE(bp);
692 				}
693 				break;
694 			}
695 			bremfree(bp);
696 			bp->b_flags |= (B_INVAL | B_NOCACHE | B_RELBUF);
697 			bp->b_flags &= ~B_ASYNC;
698 			brelse(bp);
699 		}
700 	}
701 
702 	while (vp->v_numoutput > 0) {
703 		vp->v_flag |= VBWAIT;
704 		tsleep(&vp->v_numoutput, PVM, "vnvlbv", 0);
705 	}
706 
707 	splx(s);
708 
709 	/*
710 	 * Destroy the copy in the VM cache, too.
711 	 */
712 	simple_lock(&vp->v_interlock);
713 	object = vp->v_object;
714 	if (object != NULL) {
715 		vm_object_page_remove(object, 0, 0,
716 			(flags & V_SAVE) ? TRUE : FALSE);
717 	}
718 	simple_unlock(&vp->v_interlock);
719 
720 	if (!TAILQ_EMPTY(&vp->v_dirtyblkhd) || !TAILQ_EMPTY(&vp->v_cleanblkhd))
721 		panic("vinvalbuf: flush failed");
722 	return (0);
723 }
724 
725 /*
726  * Truncate a file's buffer and pages to a specified length.  This
727  * is in lieu of the old vinvalbuf mechanism, which performed unneeded
728  * sync activity.
729  */
730 int
731 vtruncbuf(vp, cred, p, length, blksize)
732 	register struct vnode *vp;
733 	struct ucred *cred;
734 	struct proc *p;
735 	off_t length;
736 	int blksize;
737 {
738 	register struct buf *bp;
739 	struct buf *nbp;
740 	int s, anyfreed;
741 	int trunclbn;
742 
743 	/*
744 	 * Round up to the *next* lbn.
745 	 */
746 	trunclbn = (length + blksize - 1) / blksize;
747 
748 	s = splbio();
749 restart:
750 	anyfreed = 1;
751 	for (;anyfreed;) {
752 		anyfreed = 0;
753 		for (bp = TAILQ_FIRST(&vp->v_cleanblkhd); bp; bp = nbp) {
754 			nbp = TAILQ_NEXT(bp, b_vnbufs);
755 			if (bp->b_lblkno >= trunclbn) {
756 				if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT)) {
757 					BUF_LOCK(bp, LK_EXCLUSIVE|LK_SLEEPFAIL);
758 					goto restart;
759 				} else {
760 					bremfree(bp);
761 					bp->b_flags |= (B_INVAL | B_RELBUF);
762 					bp->b_flags &= ~B_ASYNC;
763 					brelse(bp);
764 					anyfreed = 1;
765 				}
766 				if (nbp &&
767 				    (((nbp->b_xflags & BX_VNCLEAN) == 0) ||
768 				    (nbp->b_vp != vp) ||
769 				    (nbp->b_flags & B_DELWRI))) {
770 					goto restart;
771 				}
772 			}
773 		}
774 
775 		for (bp = TAILQ_FIRST(&vp->v_dirtyblkhd); bp; bp = nbp) {
776 			nbp = TAILQ_NEXT(bp, b_vnbufs);
777 			if (bp->b_lblkno >= trunclbn) {
778 				if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT)) {
779 					BUF_LOCK(bp, LK_EXCLUSIVE|LK_SLEEPFAIL);
780 					goto restart;
781 				} else {
782 					bremfree(bp);
783 					bp->b_flags |= (B_INVAL | B_RELBUF);
784 					bp->b_flags &= ~B_ASYNC;
785 					brelse(bp);
786 					anyfreed = 1;
787 				}
788 				if (nbp &&
789 				    (((nbp->b_xflags & BX_VNDIRTY) == 0) ||
790 				    (nbp->b_vp != vp) ||
791 				    (nbp->b_flags & B_DELWRI) == 0)) {
792 					goto restart;
793 				}
794 			}
795 		}
796 	}
797 
798 	if (length > 0) {
799 restartsync:
800 		for (bp = TAILQ_FIRST(&vp->v_dirtyblkhd); bp; bp = nbp) {
801 			nbp = TAILQ_NEXT(bp, b_vnbufs);
802 			if ((bp->b_flags & B_DELWRI) && (bp->b_lblkno < 0)) {
803 				if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT)) {
804 					BUF_LOCK(bp, LK_EXCLUSIVE|LK_SLEEPFAIL);
805 					goto restart;
806 				} else {
807 					bremfree(bp);
808 					if (bp->b_vp == vp) {
809 						bp->b_flags |= B_ASYNC;
810 					} else {
811 						bp->b_flags &= ~B_ASYNC;
812 					}
813 					BUF_WRITE(bp);
814 				}
815 				goto restartsync;
816 			}
817 
818 		}
819 	}
820 
821 	while (vp->v_numoutput > 0) {
822 		vp->v_flag |= VBWAIT;
823 		tsleep(&vp->v_numoutput, PVM, "vbtrunc", 0);
824 	}
825 
826 	splx(s);
827 
828 	vnode_pager_setsize(vp, length);
829 
830 	return (0);
831 }
832 
833 /*
834  * Associate a buffer with a vnode.
835  */
836 void
837 bgetvp(vp, bp)
838 	register struct vnode *vp;
839 	register struct buf *bp;
840 {
841 	int s;
842 
843 	KASSERT(bp->b_vp == NULL, ("bgetvp: not free"));
844 
845 	vhold(vp);
846 	bp->b_vp = vp;
847 	bp->b_dev = vn_todev(vp);
848 	/*
849 	 * Insert onto list for new vnode.
850 	 */
851 	s = splbio();
852 	bp->b_xflags |= BX_VNCLEAN;
853 	bp->b_xflags &= ~BX_VNDIRTY;
854 	TAILQ_INSERT_TAIL(&vp->v_cleanblkhd, bp, b_vnbufs);
855 	splx(s);
856 }
857 
858 /*
859  * Disassociate a buffer from a vnode.
860  */
861 void
862 brelvp(bp)
863 	register struct buf *bp;
864 {
865 	struct vnode *vp;
866 	struct buflists *listheadp;
867 	int s;
868 
869 	KASSERT(bp->b_vp != NULL, ("brelvp: NULL"));
870 
871 	/*
872 	 * Delete from old vnode list, if on one.
873 	 */
874 	vp = bp->b_vp;
875 	s = splbio();
876 	if (bp->b_xflags & (BX_VNDIRTY | BX_VNCLEAN)) {
877 		if (bp->b_xflags & BX_VNDIRTY)
878 			listheadp = &vp->v_dirtyblkhd;
879 		else
880 			listheadp = &vp->v_cleanblkhd;
881 		TAILQ_REMOVE(listheadp, bp, b_vnbufs);
882 		bp->b_xflags &= ~(BX_VNDIRTY | BX_VNCLEAN);
883 	}
884 	if ((vp->v_flag & VONWORKLST) && TAILQ_EMPTY(&vp->v_dirtyblkhd)) {
885 		vp->v_flag &= ~VONWORKLST;
886 		LIST_REMOVE(vp, v_synclist);
887 	}
888 	splx(s);
889 	bp->b_vp = (struct vnode *) 0;
890 	vdrop(vp);
891 }
892 
893 /*
894  * The workitem queue.
895  *
896  * It is useful to delay writes of file data and filesystem metadata
897  * for tens of seconds so that quickly created and deleted files need
898  * not waste disk bandwidth being created and removed. To realize this,
899  * we append vnodes to a "workitem" queue. When running with a soft
900  * updates implementation, most pending metadata dependencies should
901  * not wait for more than a few seconds. Thus, mounted on block devices
902  * are delayed only about a half the time that file data is delayed.
903  * Similarly, directory updates are more critical, so are only delayed
904  * about a third the time that file data is delayed. Thus, there are
905  * SYNCER_MAXDELAY queues that are processed round-robin at a rate of
906  * one each second (driven off the filesystem syncer process). The
907  * syncer_delayno variable indicates the next queue that is to be processed.
908  * Items that need to be processed soon are placed in this queue:
909  *
910  *	syncer_workitem_pending[syncer_delayno]
911  *
912  * A delay of fifteen seconds is done by placing the request fifteen
913  * entries later in the queue:
914  *
915  *	syncer_workitem_pending[(syncer_delayno + 15) & syncer_mask]
916  *
917  */
918 
919 /*
920  * Add an item to the syncer work queue.
921  */
922 static void
923 vn_syncer_add_to_worklist(struct vnode *vp, int delay)
924 {
925 	int s, slot;
926 
927 	s = splbio();
928 
929 	if (vp->v_flag & VONWORKLST) {
930 		LIST_REMOVE(vp, v_synclist);
931 	}
932 
933 	if (delay > syncer_maxdelay - 2)
934 		delay = syncer_maxdelay - 2;
935 	slot = (syncer_delayno + delay) & syncer_mask;
936 
937 	LIST_INSERT_HEAD(&syncer_workitem_pending[slot], vp, v_synclist);
938 	vp->v_flag |= VONWORKLST;
939 	splx(s);
940 }
941 
942 struct  proc *updateproc;
943 static void sched_sync __P((void));
944 static struct kproc_desc up_kp = {
945 	"syncer",
946 	sched_sync,
947 	&updateproc
948 };
949 SYSINIT(syncer, SI_SUB_KTHREAD_UPDATE, SI_ORDER_FIRST, kproc_start, &up_kp)
950 
951 /*
952  * System filesystem synchronizer daemon.
953  */
954 void
955 sched_sync(void)
956 {
957 	struct synclist *slp;
958 	struct vnode *vp;
959 	struct mount *mp;
960 	long starttime;
961 	int s;
962 	struct proc *p = updateproc;
963 
964 	EVENTHANDLER_REGISTER(shutdown_pre_sync, shutdown_kproc, p,
965 	    SHUTDOWN_PRI_LAST);
966 
967 	for (;;) {
968 		kproc_suspend_loop(p);
969 
970 		starttime = time_second;
971 
972 		/*
973 		 * Push files whose dirty time has expired.  Be careful
974 		 * of interrupt race on slp queue.
975 		 */
976 		s = splbio();
977 		slp = &syncer_workitem_pending[syncer_delayno];
978 		syncer_delayno += 1;
979 		if (syncer_delayno == syncer_maxdelay)
980 			syncer_delayno = 0;
981 		splx(s);
982 
983 		while ((vp = LIST_FIRST(slp)) != NULL) {
984 			if (VOP_ISLOCKED(vp, NULL) == 0 &&
985 			    vn_start_write(vp, &mp, V_NOWAIT) == 0) {
986 				vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p);
987 				(void) VOP_FSYNC(vp, p->p_ucred, MNT_LAZY, p);
988 				VOP_UNLOCK(vp, 0, p);
989 				vn_finished_write(mp);
990 			}
991 			s = splbio();
992 			if (LIST_FIRST(slp) == vp) {
993 				/*
994 				 * Note: v_tag VT_VFS vps can remain on the
995 				 * worklist too with no dirty blocks, but
996 				 * since sync_fsync() moves it to a different
997 				 * slot we are safe.
998 				 */
999 				if (TAILQ_EMPTY(&vp->v_dirtyblkhd) &&
1000 				    !vn_isdisk(vp, NULL))
1001 					panic("sched_sync: fsync failed vp %p tag %d", vp, vp->v_tag);
1002 				/*
1003 				 * Put us back on the worklist.  The worklist
1004 				 * routine will remove us from our current
1005 				 * position and then add us back in at a later
1006 				 * position.
1007 				 */
1008 				vn_syncer_add_to_worklist(vp, syncdelay);
1009 			}
1010 			splx(s);
1011 		}
1012 
1013 		/*
1014 		 * Do soft update processing.
1015 		 */
1016 #ifdef SOFTUPDATES
1017 		softdep_process_worklist(NULL);
1018 #endif
1019 
1020 		/*
1021 		 * The variable rushjob allows the kernel to speed up the
1022 		 * processing of the filesystem syncer process. A rushjob
1023 		 * value of N tells the filesystem syncer to process the next
1024 		 * N seconds worth of work on its queue ASAP. Currently rushjob
1025 		 * is used by the soft update code to speed up the filesystem
1026 		 * syncer process when the incore state is getting so far
1027 		 * ahead of the disk that the kernel memory pool is being
1028 		 * threatened with exhaustion.
1029 		 */
1030 		if (rushjob > 0) {
1031 			rushjob -= 1;
1032 			continue;
1033 		}
1034 		/*
1035 		 * If it has taken us less than a second to process the
1036 		 * current work, then wait. Otherwise start right over
1037 		 * again. We can still lose time if any single round
1038 		 * takes more than two seconds, but it does not really
1039 		 * matter as we are just trying to generally pace the
1040 		 * filesystem activity.
1041 		 */
1042 		if (time_second == starttime)
1043 			tsleep(&lbolt, PPAUSE, "syncer", 0);
1044 	}
1045 }
1046 
1047 /*
1048  * Request the syncer daemon to speed up its work.
1049  * We never push it to speed up more than half of its
1050  * normal turn time, otherwise it could take over the cpu.
1051  */
1052 int
1053 speedup_syncer()
1054 {
1055 	int s;
1056 
1057 	s = splhigh();
1058 	if (updateproc->p_wchan == &lbolt)
1059 		setrunnable(updateproc);
1060 	splx(s);
1061 	if (rushjob < syncdelay / 2) {
1062 		rushjob += 1;
1063 		stat_rush_requests += 1;
1064 		return (1);
1065 	}
1066 	return(0);
1067 }
1068 
1069 /*
1070  * Associate a p-buffer with a vnode.
1071  *
1072  * Also sets B_PAGING flag to indicate that vnode is not fully associated
1073  * with the buffer.  i.e. the bp has not been linked into the vnode or
1074  * ref-counted.
1075  */
1076 void
1077 pbgetvp(vp, bp)
1078 	register struct vnode *vp;
1079 	register struct buf *bp;
1080 {
1081 
1082 	KASSERT(bp->b_vp == NULL, ("pbgetvp: not free"));
1083 
1084 	bp->b_vp = vp;
1085 	bp->b_flags |= B_PAGING;
1086 	bp->b_dev = vn_todev(vp);
1087 }
1088 
1089 /*
1090  * Disassociate a p-buffer from a vnode.
1091  */
1092 void
1093 pbrelvp(bp)
1094 	register struct buf *bp;
1095 {
1096 
1097 	KASSERT(bp->b_vp != NULL, ("pbrelvp: NULL"));
1098 
1099 	/* XXX REMOVE ME */
1100 	if (bp->b_vnbufs.tqe_next != NULL) {
1101 		panic(
1102 		    "relpbuf(): b_vp was probably reassignbuf()d %p %x",
1103 		    bp,
1104 		    (int)bp->b_flags
1105 		);
1106 	}
1107 	bp->b_vp = (struct vnode *) 0;
1108 	bp->b_flags &= ~B_PAGING;
1109 }
1110 
1111 void
1112 pbreassignbuf(bp, newvp)
1113 	struct buf *bp;
1114 	struct vnode *newvp;
1115 {
1116 	if ((bp->b_flags & B_PAGING) == 0) {
1117 		panic(
1118 		    "pbreassignbuf() on non phys bp %p",
1119 		    bp
1120 		);
1121 	}
1122 	bp->b_vp = newvp;
1123 }
1124 
1125 /*
1126  * Reassign a buffer from one vnode to another.
1127  * Used to assign file specific control information
1128  * (indirect blocks) to the vnode to which they belong.
1129  */
1130 void
1131 reassignbuf(bp, newvp)
1132 	register struct buf *bp;
1133 	register struct vnode *newvp;
1134 {
1135 	struct buflists *listheadp;
1136 	int delay;
1137 	int s;
1138 
1139 	if (newvp == NULL) {
1140 		printf("reassignbuf: NULL");
1141 		return;
1142 	}
1143 	++reassignbufcalls;
1144 
1145 	/*
1146 	 * B_PAGING flagged buffers cannot be reassigned because their vp
1147 	 * is not fully linked in.
1148 	 */
1149 	if (bp->b_flags & B_PAGING)
1150 		panic("cannot reassign paging buffer");
1151 
1152 	s = splbio();
1153 	/*
1154 	 * Delete from old vnode list, if on one.
1155 	 */
1156 	if (bp->b_xflags & (BX_VNDIRTY | BX_VNCLEAN)) {
1157 		if (bp->b_xflags & BX_VNDIRTY)
1158 			listheadp = &bp->b_vp->v_dirtyblkhd;
1159 		else
1160 			listheadp = &bp->b_vp->v_cleanblkhd;
1161 		TAILQ_REMOVE(listheadp, bp, b_vnbufs);
1162 		bp->b_xflags &= ~(BX_VNDIRTY | BX_VNCLEAN);
1163 		if (bp->b_vp != newvp) {
1164 			vdrop(bp->b_vp);
1165 			bp->b_vp = NULL;	/* for clarification */
1166 		}
1167 	}
1168 	/*
1169 	 * If dirty, put on list of dirty buffers; otherwise insert onto list
1170 	 * of clean buffers.
1171 	 */
1172 	if (bp->b_flags & B_DELWRI) {
1173 		struct buf *tbp;
1174 
1175 		listheadp = &newvp->v_dirtyblkhd;
1176 		if ((newvp->v_flag & VONWORKLST) == 0) {
1177 			switch (newvp->v_type) {
1178 			case VDIR:
1179 				delay = dirdelay;
1180 				break;
1181 			case VCHR:
1182 			case VBLK:
1183 				if (newvp->v_specmountpoint != NULL) {
1184 					delay = metadelay;
1185 					break;
1186 				}
1187 				/* fall through */
1188 			default:
1189 				delay = filedelay;
1190 			}
1191 			vn_syncer_add_to_worklist(newvp, delay);
1192 		}
1193 		bp->b_xflags |= BX_VNDIRTY;
1194 		tbp = TAILQ_FIRST(listheadp);
1195 		if (tbp == NULL ||
1196 		    bp->b_lblkno == 0 ||
1197 		    (bp->b_lblkno > 0 && tbp->b_lblkno < 0) ||
1198 		    (bp->b_lblkno > 0 && bp->b_lblkno < tbp->b_lblkno)) {
1199 			TAILQ_INSERT_HEAD(listheadp, bp, b_vnbufs);
1200 			++reassignbufsortgood;
1201 		} else if (bp->b_lblkno < 0) {
1202 			TAILQ_INSERT_TAIL(listheadp, bp, b_vnbufs);
1203 			++reassignbufsortgood;
1204 		} else if (reassignbufmethod == 1) {
1205 			/*
1206 			 * New sorting algorithm, only handle sequential case,
1207 			 * otherwise append to end (but before metadata)
1208 			 */
1209 			if ((tbp = gbincore(newvp, bp->b_lblkno - 1)) != NULL &&
1210 			    (tbp->b_xflags & BX_VNDIRTY)) {
1211 				/*
1212 				 * Found the best place to insert the buffer
1213 				 */
1214 				TAILQ_INSERT_AFTER(listheadp, tbp, bp, b_vnbufs);
1215 				++reassignbufsortgood;
1216 			} else {
1217 				/*
1218 				 * Missed, append to end, but before meta-data.
1219 				 * We know that the head buffer in the list is
1220 				 * not meta-data due to prior conditionals.
1221 				 *
1222 				 * Indirect effects:  NFS second stage write
1223 				 * tends to wind up here, giving maximum
1224 				 * distance between the unstable write and the
1225 				 * commit rpc.
1226 				 */
1227 				tbp = TAILQ_LAST(listheadp, buflists);
1228 				while (tbp && tbp->b_lblkno < 0)
1229 					tbp = TAILQ_PREV(tbp, buflists, b_vnbufs);
1230 				TAILQ_INSERT_AFTER(listheadp, tbp, bp, b_vnbufs);
1231 				++reassignbufsortbad;
1232 			}
1233 		} else {
1234 			/*
1235 			 * Old sorting algorithm, scan queue and insert
1236 			 */
1237 			struct buf *ttbp;
1238 			while ((ttbp = TAILQ_NEXT(tbp, b_vnbufs)) &&
1239 			    (ttbp->b_lblkno < bp->b_lblkno)) {
1240 				++reassignbufloops;
1241 				tbp = ttbp;
1242 			}
1243 			TAILQ_INSERT_AFTER(listheadp, tbp, bp, b_vnbufs);
1244 		}
1245 	} else {
1246 		bp->b_xflags |= BX_VNCLEAN;
1247 		TAILQ_INSERT_TAIL(&newvp->v_cleanblkhd, bp, b_vnbufs);
1248 		if ((newvp->v_flag & VONWORKLST) &&
1249 		    TAILQ_EMPTY(&newvp->v_dirtyblkhd)) {
1250 			newvp->v_flag &= ~VONWORKLST;
1251 			LIST_REMOVE(newvp, v_synclist);
1252 		}
1253 	}
1254 	if (bp->b_vp != newvp) {
1255 		bp->b_vp = newvp;
1256 		vhold(bp->b_vp);
1257 	}
1258 	splx(s);
1259 }
1260 
1261 /*
1262  * Create a vnode for a block device.
1263  * Used for mounting the root file system.
1264  * XXX: This now changed to a VCHR due to the block/char merging.
1265  */
1266 int
1267 bdevvp(dev, vpp)
1268 	dev_t dev;
1269 	struct vnode **vpp;
1270 {
1271 	register struct vnode *vp;
1272 	struct vnode *nvp;
1273 	int error;
1274 
1275 	if (dev == NODEV) {
1276 		*vpp = NULLVP;
1277 		return (ENXIO);
1278 	}
1279 	error = getnewvnode(VT_NON, (struct mount *)0, spec_vnodeop_p, &nvp);
1280 	if (error) {
1281 		*vpp = NULLVP;
1282 		return (error);
1283 	}
1284 	vp = nvp;
1285 	vp->v_type = VCHR;
1286 	addalias(vp, dev);
1287 	*vpp = vp;
1288 	return (0);
1289 }
1290 
1291 /*
1292  * Add vnode to the alias list hung off the dev_t.
1293  *
1294  * The reason for this gunk is that multiple vnodes can reference
1295  * the same physical device, so checking vp->v_usecount to see
1296  * how many users there are is inadequate; the v_usecount for
1297  * the vnodes need to be accumulated.  vcount() does that.
1298  */
1299 void
1300 addaliasu(nvp, nvp_rdev)
1301 	struct vnode *nvp;
1302 	udev_t nvp_rdev;
1303 {
1304 
1305 	if (nvp->v_type != VBLK && nvp->v_type != VCHR)
1306 		panic("addaliasu on non-special vnode");
1307 	addalias(nvp, udev2dev(nvp_rdev, nvp->v_type == VBLK ? 1 : 0));
1308 }
1309 
1310 void
1311 addalias(nvp, dev)
1312 	struct vnode *nvp;
1313 	dev_t dev;
1314 {
1315 
1316 	if (nvp->v_type != VBLK && nvp->v_type != VCHR)
1317 		panic("addalias on non-special vnode");
1318 
1319 	nvp->v_rdev = dev;
1320 	simple_lock(&spechash_slock);
1321 	SLIST_INSERT_HEAD(&dev->si_hlist, nvp, v_specnext);
1322 	simple_unlock(&spechash_slock);
1323 }
1324 
1325 /*
1326  * Grab a particular vnode from the free list, increment its
1327  * reference count and lock it. The vnode lock bit is set if the
1328  * vnode is being eliminated in vgone. The process is awakened
1329  * when the transition is completed, and an error returned to
1330  * indicate that the vnode is no longer usable (possibly having
1331  * been changed to a new file system type).
1332  */
1333 int
1334 vget(vp, flags, p)
1335 	register struct vnode *vp;
1336 	int flags;
1337 	struct proc *p;
1338 {
1339 	int error;
1340 
1341 	/*
1342 	 * If the vnode is in the process of being cleaned out for
1343 	 * another use, we wait for the cleaning to finish and then
1344 	 * return failure. Cleaning is determined by checking that
1345 	 * the VXLOCK flag is set.
1346 	 */
1347 	if ((flags & LK_INTERLOCK) == 0) {
1348 		simple_lock(&vp->v_interlock);
1349 	}
1350 	if (vp->v_flag & VXLOCK) {
1351 		vp->v_flag |= VXWANT;
1352 		simple_unlock(&vp->v_interlock);
1353 		tsleep((caddr_t)vp, PINOD, "vget", 0);
1354 		return (ENOENT);
1355 	}
1356 
1357 	vp->v_usecount++;
1358 
1359 	if (VSHOULDBUSY(vp))
1360 		vbusy(vp);
1361 	if (flags & LK_TYPE_MASK) {
1362 		if ((error = vn_lock(vp, flags | LK_INTERLOCK, p)) != 0) {
1363 			/*
1364 			 * must expand vrele here because we do not want
1365 			 * to call VOP_INACTIVE if the reference count
1366 			 * drops back to zero since it was never really
1367 			 * active. We must remove it from the free list
1368 			 * before sleeping so that multiple processes do
1369 			 * not try to recycle it.
1370 			 */
1371 			simple_lock(&vp->v_interlock);
1372 			vp->v_usecount--;
1373 			if (VSHOULDFREE(vp))
1374 				vfree(vp);
1375 			simple_unlock(&vp->v_interlock);
1376 		}
1377 		return (error);
1378 	}
1379 	simple_unlock(&vp->v_interlock);
1380 	return (0);
1381 }
1382 
1383 void
1384 vref(struct vnode *vp)
1385 {
1386 	simple_lock(&vp->v_interlock);
1387 	vp->v_usecount++;
1388 	simple_unlock(&vp->v_interlock);
1389 }
1390 
1391 /*
1392  * Vnode put/release.
1393  * If count drops to zero, call inactive routine and return to freelist.
1394  */
1395 void
1396 vrele(vp)
1397 	struct vnode *vp;
1398 {
1399 	struct proc *p = curproc;	/* XXX */
1400 
1401 	KASSERT(vp != NULL, ("vrele: null vp"));
1402 	KASSERT(vp->v_writecount < vp->v_usecount, ("vrele: missed vn_close"));
1403 
1404 	simple_lock(&vp->v_interlock);
1405 
1406 	if (vp->v_usecount > 1) {
1407 
1408 		vp->v_usecount--;
1409 		simple_unlock(&vp->v_interlock);
1410 
1411 		return;
1412 	}
1413 
1414 	if (vp->v_usecount == 1) {
1415 
1416 		vp->v_usecount--;
1417 		if (VSHOULDFREE(vp))
1418 			vfree(vp);
1419 	/*
1420 	 * If we are doing a vput, the node is already locked, and we must
1421 	 * call VOP_INACTIVE with the node locked.  So, in the case of
1422 	 * vrele, we explicitly lock the vnode before calling VOP_INACTIVE.
1423 	 */
1424 		if (vn_lock(vp, LK_EXCLUSIVE | LK_INTERLOCK, p) == 0) {
1425 			VOP_INACTIVE(vp, p);
1426 		}
1427 
1428 	} else {
1429 #ifdef DIAGNOSTIC
1430 		vprint("vrele: negative ref count", vp);
1431 		simple_unlock(&vp->v_interlock);
1432 #endif
1433 		panic("vrele: negative ref cnt");
1434 	}
1435 }
1436 
1437 void
1438 vput(vp)
1439 	struct vnode *vp;
1440 {
1441 	struct proc *p = curproc;	/* XXX */
1442 
1443 	KASSERT(vp != NULL, ("vput: null vp"));
1444 	KASSERT(vp->v_writecount < vp->v_usecount, ("vput: missed vn_close"));
1445 
1446 	simple_lock(&vp->v_interlock);
1447 
1448 	if (vp->v_usecount > 1) {
1449 
1450 		vp->v_usecount--;
1451 		VOP_UNLOCK(vp, LK_INTERLOCK, p);
1452 		return;
1453 
1454 	}
1455 
1456 	if (vp->v_usecount == 1) {
1457 
1458 		vp->v_usecount--;
1459 		if (VSHOULDFREE(vp))
1460 			vfree(vp);
1461 	/*
1462 	 * If we are doing a vput, the node is already locked, and we must
1463 	 * call VOP_INACTIVE with the node locked.  So, in the case of
1464 	 * vrele, we explicitly lock the vnode before calling VOP_INACTIVE.
1465 	 */
1466 		simple_unlock(&vp->v_interlock);
1467 		VOP_INACTIVE(vp, p);
1468 
1469 	} else {
1470 #ifdef DIAGNOSTIC
1471 		vprint("vput: negative ref count", vp);
1472 #endif
1473 		panic("vput: negative ref cnt");
1474 	}
1475 }
1476 
1477 /*
1478  * Somebody doesn't want the vnode recycled.
1479  */
1480 void
1481 vhold(vp)
1482 	register struct vnode *vp;
1483 {
1484 	int s;
1485 
1486   	s = splbio();
1487 	vp->v_holdcnt++;
1488 	if (VSHOULDBUSY(vp))
1489 		vbusy(vp);
1490 	splx(s);
1491 }
1492 
1493 /*
1494  * One less who cares about this vnode.
1495  */
1496 void
1497 vdrop(vp)
1498 	register struct vnode *vp;
1499 {
1500 	int s;
1501 
1502 	s = splbio();
1503 	if (vp->v_holdcnt <= 0)
1504 		panic("vdrop: holdcnt");
1505 	vp->v_holdcnt--;
1506 	if (VSHOULDFREE(vp))
1507 		vfree(vp);
1508 	splx(s);
1509 }
1510 
1511 /*
1512  * Remove any vnodes in the vnode table belonging to mount point mp.
1513  *
1514  * If MNT_NOFORCE is specified, there should not be any active ones,
1515  * return error if any are found (nb: this is a user error, not a
1516  * system error). If MNT_FORCE is specified, detach any active vnodes
1517  * that are found.
1518  */
1519 #ifdef DIAGNOSTIC
1520 static int busyprt = 0;		/* print out busy vnodes */
1521 SYSCTL_INT(_debug, OID_AUTO, busyprt, CTLFLAG_RW, &busyprt, 0, "");
1522 #endif
1523 
1524 int
1525 vflush(mp, skipvp, flags)
1526 	struct mount *mp;
1527 	struct vnode *skipvp;
1528 	int flags;
1529 {
1530 	struct proc *p = curproc;	/* XXX */
1531 	struct vnode *vp, *nvp;
1532 	int busy = 0;
1533 
1534 	simple_lock(&mntvnode_slock);
1535 loop:
1536 	for (vp = LIST_FIRST(&mp->mnt_vnodelist); vp; vp = nvp) {
1537 		/*
1538 		 * Make sure this vnode wasn't reclaimed in getnewvnode().
1539 		 * Start over if it has (it won't be on the list anymore).
1540 		 */
1541 		if (vp->v_mount != mp)
1542 			goto loop;
1543 		nvp = LIST_NEXT(vp, v_mntvnodes);
1544 		/*
1545 		 * Skip over a selected vnode.
1546 		 */
1547 		if (vp == skipvp)
1548 			continue;
1549 
1550 		simple_lock(&vp->v_interlock);
1551 		/*
1552 		 * Skip over a vnodes marked VSYSTEM.
1553 		 */
1554 		if ((flags & SKIPSYSTEM) && (vp->v_flag & VSYSTEM)) {
1555 			simple_unlock(&vp->v_interlock);
1556 			continue;
1557 		}
1558 		/*
1559 		 * If WRITECLOSE is set, only flush out regular file vnodes
1560 		 * open for writing.
1561 		 */
1562 		if ((flags & WRITECLOSE) &&
1563 		    (vp->v_writecount == 0 || vp->v_type != VREG)) {
1564 			simple_unlock(&vp->v_interlock);
1565 			continue;
1566 		}
1567 
1568 		/*
1569 		 * With v_usecount == 0, all we need to do is clear out the
1570 		 * vnode data structures and we are done.
1571 		 */
1572 		if (vp->v_usecount == 0) {
1573 			simple_unlock(&mntvnode_slock);
1574 			vgonel(vp, p);
1575 			simple_lock(&mntvnode_slock);
1576 			continue;
1577 		}
1578 
1579 		/*
1580 		 * If FORCECLOSE is set, forcibly close the vnode. For block
1581 		 * or character devices, revert to an anonymous device. For
1582 		 * all other files, just kill them.
1583 		 */
1584 		if (flags & FORCECLOSE) {
1585 			simple_unlock(&mntvnode_slock);
1586 			if (vp->v_type != VBLK && vp->v_type != VCHR) {
1587 				vgonel(vp, p);
1588 			} else {
1589 				vclean(vp, 0, p);
1590 				vp->v_op = spec_vnodeop_p;
1591 				insmntque(vp, (struct mount *) 0);
1592 			}
1593 			simple_lock(&mntvnode_slock);
1594 			continue;
1595 		}
1596 #ifdef DIAGNOSTIC
1597 		if (busyprt)
1598 			vprint("vflush: busy vnode", vp);
1599 #endif
1600 		simple_unlock(&vp->v_interlock);
1601 		busy++;
1602 	}
1603 	simple_unlock(&mntvnode_slock);
1604 	if (busy)
1605 		return (EBUSY);
1606 	return (0);
1607 }
1608 
1609 /*
1610  * Disassociate the underlying file system from a vnode.
1611  */
1612 static void
1613 vclean(vp, flags, p)
1614 	struct vnode *vp;
1615 	int flags;
1616 	struct proc *p;
1617 {
1618 	int active;
1619 	vm_object_t obj;
1620 
1621 	/*
1622 	 * Check to see if the vnode is in use. If so we have to reference it
1623 	 * before we clean it out so that its count cannot fall to zero and
1624 	 * generate a race against ourselves to recycle it.
1625 	 */
1626 	if ((active = vp->v_usecount))
1627 		vp->v_usecount++;
1628 
1629 	/*
1630 	 * Prevent the vnode from being recycled or brought into use while we
1631 	 * clean it out.
1632 	 */
1633 	if (vp->v_flag & VXLOCK)
1634 		panic("vclean: deadlock");
1635 	vp->v_flag |= VXLOCK;
1636 	/*
1637 	 * Even if the count is zero, the VOP_INACTIVE routine may still
1638 	 * have the object locked while it cleans it out. The VOP_LOCK
1639 	 * ensures that the VOP_INACTIVE routine is done with its work.
1640 	 * For active vnodes, it ensures that no other activity can
1641 	 * occur while the underlying object is being cleaned out.
1642 	 */
1643 	VOP_LOCK(vp, LK_DRAIN | LK_INTERLOCK, p);
1644 
1645 	/*
1646 	 * Clean out any buffers associated with the vnode.
1647 	 * If the flush fails, just toss the buffers.
1648 	 */
1649 	if (flags & DOCLOSE) {
1650 		if (TAILQ_FIRST(&vp->v_dirtyblkhd) != NULL)
1651 			(void) vn_write_suspend_wait(vp, V_WAIT);
1652 		if (vinvalbuf(vp, V_SAVE, NOCRED, p, 0, 0) != 0)
1653 			vinvalbuf(vp, 0, NOCRED, p, 0, 0);
1654 	}
1655 
1656 	if ((obj = vp->v_object) != NULL) {
1657 		if (obj->ref_count == 0) {
1658 			/*
1659 			 * vclean() may be called twice. The first time
1660 			 * removes the primary reference to the object,
1661 			 * the second time goes one further and is a
1662 			 * special-case to terminate the object.
1663 			 */
1664 			vm_object_terminate(obj);
1665 		} else {
1666 			/*
1667 			 * Woe to the process that tries to page now :-).
1668 			 */
1669 			vm_pager_deallocate(obj);
1670 		}
1671 	}
1672 
1673 	/*
1674 	 * If purging an active vnode, it must be closed and
1675 	 * deactivated before being reclaimed. Note that the
1676 	 * VOP_INACTIVE will unlock the vnode.
1677 	 */
1678 	if (active) {
1679 		if (flags & DOCLOSE)
1680 			VOP_CLOSE(vp, FNONBLOCK, NOCRED, p);
1681 		VOP_INACTIVE(vp, p);
1682 	} else {
1683 		/*
1684 		 * Any other processes trying to obtain this lock must first
1685 		 * wait for VXLOCK to clear, then call the new lock operation.
1686 		 */
1687 		VOP_UNLOCK(vp, 0, p);
1688 	}
1689 	/*
1690 	 * Reclaim the vnode.
1691 	 */
1692 	if (VOP_RECLAIM(vp, p))
1693 		panic("vclean: cannot reclaim");
1694 
1695 	if (active) {
1696 		/*
1697 		 * Inline copy of vrele() since VOP_INACTIVE
1698 		 * has already been called.
1699 		 */
1700 		simple_lock(&vp->v_interlock);
1701 		if (--vp->v_usecount <= 0) {
1702 #ifdef DIAGNOSTIC
1703 			if (vp->v_usecount < 0 || vp->v_writecount != 0) {
1704 				vprint("vclean: bad ref count", vp);
1705 				panic("vclean: ref cnt");
1706 			}
1707 #endif
1708 			vfree(vp);
1709 		}
1710 		simple_unlock(&vp->v_interlock);
1711 	}
1712 
1713 	cache_purge(vp);
1714 	if (vp->v_vnlock) {
1715 		FREE(vp->v_vnlock, M_VNODE);
1716 		vp->v_vnlock = NULL;
1717 	}
1718 
1719 	if (VSHOULDFREE(vp))
1720 		vfree(vp);
1721 
1722 	/*
1723 	 * Done with purge, notify sleepers of the grim news.
1724 	 */
1725 	vp->v_op = dead_vnodeop_p;
1726 	vn_pollgone(vp);
1727 	vp->v_tag = VT_NON;
1728 	vp->v_flag &= ~VXLOCK;
1729 	if (vp->v_flag & VXWANT) {
1730 		vp->v_flag &= ~VXWANT;
1731 		wakeup((caddr_t) vp);
1732 	}
1733 }
1734 
1735 /*
1736  * Eliminate all activity associated with the requested vnode
1737  * and with all vnodes aliased to the requested vnode.
1738  */
1739 int
1740 vop_revoke(ap)
1741 	struct vop_revoke_args /* {
1742 		struct vnode *a_vp;
1743 		int a_flags;
1744 	} */ *ap;
1745 {
1746 	struct vnode *vp, *vq;
1747 	dev_t dev;
1748 
1749 	KASSERT((ap->a_flags & REVOKEALL) != 0, ("vop_revoke"));
1750 
1751 	vp = ap->a_vp;
1752 	/*
1753 	 * If a vgone (or vclean) is already in progress,
1754 	 * wait until it is done and return.
1755 	 */
1756 	if (vp->v_flag & VXLOCK) {
1757 		vp->v_flag |= VXWANT;
1758 		simple_unlock(&vp->v_interlock);
1759 		tsleep((caddr_t)vp, PINOD, "vop_revokeall", 0);
1760 		return (0);
1761 	}
1762 	dev = vp->v_rdev;
1763 	for (;;) {
1764 		simple_lock(&spechash_slock);
1765 		vq = SLIST_FIRST(&dev->si_hlist);
1766 		simple_unlock(&spechash_slock);
1767 		if (!vq)
1768 			break;
1769 		vgone(vq);
1770 	}
1771 	return (0);
1772 }
1773 
1774 /*
1775  * Recycle an unused vnode to the front of the free list.
1776  * Release the passed interlock if the vnode will be recycled.
1777  */
1778 int
1779 vrecycle(vp, inter_lkp, p)
1780 	struct vnode *vp;
1781 	struct simplelock *inter_lkp;
1782 	struct proc *p;
1783 {
1784 
1785 	simple_lock(&vp->v_interlock);
1786 	if (vp->v_usecount == 0) {
1787 		if (inter_lkp) {
1788 			simple_unlock(inter_lkp);
1789 		}
1790 		vgonel(vp, p);
1791 		return (1);
1792 	}
1793 	simple_unlock(&vp->v_interlock);
1794 	return (0);
1795 }
1796 
1797 /*
1798  * Eliminate all activity associated with a vnode
1799  * in preparation for reuse.
1800  */
1801 void
1802 vgone(vp)
1803 	register struct vnode *vp;
1804 {
1805 	struct proc *p = curproc;	/* XXX */
1806 
1807 	simple_lock(&vp->v_interlock);
1808 	vgonel(vp, p);
1809 }
1810 
1811 /*
1812  * vgone, with the vp interlock held.
1813  */
1814 void
1815 vgonel(vp, p)
1816 	struct vnode *vp;
1817 	struct proc *p;
1818 {
1819 	int s;
1820 
1821 	/*
1822 	 * If a vgone (or vclean) is already in progress,
1823 	 * wait until it is done and return.
1824 	 */
1825 	if (vp->v_flag & VXLOCK) {
1826 		vp->v_flag |= VXWANT;
1827 		simple_unlock(&vp->v_interlock);
1828 		tsleep((caddr_t)vp, PINOD, "vgone", 0);
1829 		return;
1830 	}
1831 
1832 	/*
1833 	 * Clean out the filesystem specific data.
1834 	 */
1835 	vclean(vp, DOCLOSE, p);
1836 	simple_lock(&vp->v_interlock);
1837 
1838 	/*
1839 	 * Delete from old mount point vnode list, if on one.
1840 	 */
1841 	if (vp->v_mount != NULL)
1842 		insmntque(vp, (struct mount *)0);
1843 	/*
1844 	 * If special device, remove it from special device alias list
1845 	 * if it is on one.
1846 	 */
1847 	if ((vp->v_type == VBLK || vp->v_type == VCHR) && vp->v_rdev != NULL) {
1848 		simple_lock(&spechash_slock);
1849 		SLIST_REMOVE(&vp->v_hashchain, vp, vnode, v_specnext);
1850 		freedev(vp->v_rdev);
1851 		simple_unlock(&spechash_slock);
1852 		vp->v_rdev = NULL;
1853 	}
1854 
1855 	/*
1856 	 * If it is on the freelist and not already at the head,
1857 	 * move it to the head of the list. The test of the
1858 	 * VDOOMED flag and the reference count of zero is because
1859 	 * it will be removed from the free list by getnewvnode,
1860 	 * but will not have its reference count incremented until
1861 	 * after calling vgone. If the reference count were
1862 	 * incremented first, vgone would (incorrectly) try to
1863 	 * close the previous instance of the underlying object.
1864 	 */
1865 	if (vp->v_usecount == 0 && !(vp->v_flag & VDOOMED)) {
1866 		s = splbio();
1867 		simple_lock(&vnode_free_list_slock);
1868 		if (vp->v_flag & VFREE)
1869 			TAILQ_REMOVE(&vnode_free_list, vp, v_freelist);
1870 		else
1871 			freevnodes++;
1872 		vp->v_flag |= VFREE;
1873 		TAILQ_INSERT_HEAD(&vnode_free_list, vp, v_freelist);
1874 		simple_unlock(&vnode_free_list_slock);
1875 		splx(s);
1876 	}
1877 
1878 	vp->v_type = VBAD;
1879 	simple_unlock(&vp->v_interlock);
1880 }
1881 
1882 /*
1883  * Lookup a vnode by device number.
1884  */
1885 int
1886 vfinddev(dev, type, vpp)
1887 	dev_t dev;
1888 	enum vtype type;
1889 	struct vnode **vpp;
1890 {
1891 	struct vnode *vp;
1892 
1893 	simple_lock(&spechash_slock);
1894 	SLIST_FOREACH(vp, &dev->si_hlist, v_specnext) {
1895 		if (type == vp->v_type) {
1896 			*vpp = vp;
1897 			simple_unlock(&spechash_slock);
1898 			return (1);
1899 		}
1900 	}
1901 	simple_unlock(&spechash_slock);
1902 	return (0);
1903 }
1904 
1905 /*
1906  * Calculate the total number of references to a special device.
1907  */
1908 int
1909 vcount(vp)
1910 	struct vnode *vp;
1911 {
1912 	struct vnode *vq;
1913 	int count;
1914 
1915 	count = 0;
1916 	simple_lock(&spechash_slock);
1917 	SLIST_FOREACH(vq, &vp->v_hashchain, v_specnext)
1918 		count += vq->v_usecount;
1919 	simple_unlock(&spechash_slock);
1920 	return (count);
1921 }
1922 
1923 /*
1924  * Same as above, but using the dev_t as argument
1925  */
1926 
1927 int
1928 count_dev(dev)
1929 	dev_t dev;
1930 {
1931 	struct vnode *vp;
1932 
1933 	vp = SLIST_FIRST(&dev->si_hlist);
1934 	if (vp == NULL)
1935 		return (0);
1936 	return(vcount(vp));
1937 }
1938 
1939 /*
1940  * Print out a description of a vnode.
1941  */
1942 static char *typename[] =
1943 {"VNON", "VREG", "VDIR", "VBLK", "VCHR", "VLNK", "VSOCK", "VFIFO", "VBAD"};
1944 
1945 void
1946 vprint(label, vp)
1947 	char *label;
1948 	struct vnode *vp;
1949 {
1950 	char buf[96];
1951 
1952 	if (label != NULL)
1953 		printf("%s: %p: ", label, (void *)vp);
1954 	else
1955 		printf("%p: ", (void *)vp);
1956 	printf("type %s, usecount %d, writecount %d, refcount %d,",
1957 	    typename[vp->v_type], vp->v_usecount, vp->v_writecount,
1958 	    vp->v_holdcnt);
1959 	buf[0] = '\0';
1960 	if (vp->v_flag & VROOT)
1961 		strcat(buf, "|VROOT");
1962 	if (vp->v_flag & VTEXT)
1963 		strcat(buf, "|VTEXT");
1964 	if (vp->v_flag & VSYSTEM)
1965 		strcat(buf, "|VSYSTEM");
1966 	if (vp->v_flag & VXLOCK)
1967 		strcat(buf, "|VXLOCK");
1968 	if (vp->v_flag & VXWANT)
1969 		strcat(buf, "|VXWANT");
1970 	if (vp->v_flag & VBWAIT)
1971 		strcat(buf, "|VBWAIT");
1972 	if (vp->v_flag & VDOOMED)
1973 		strcat(buf, "|VDOOMED");
1974 	if (vp->v_flag & VFREE)
1975 		strcat(buf, "|VFREE");
1976 	if (vp->v_flag & VOBJBUF)
1977 		strcat(buf, "|VOBJBUF");
1978 	if (buf[0] != '\0')
1979 		printf(" flags (%s)", &buf[1]);
1980 	if (vp->v_data == NULL) {
1981 		printf("\n");
1982 	} else {
1983 		printf("\n\t");
1984 		VOP_PRINT(vp);
1985 	}
1986 }
1987 
1988 #ifdef DDB
1989 #include <ddb/ddb.h>
1990 /*
1991  * List all of the locked vnodes in the system.
1992  * Called when debugging the kernel.
1993  */
1994 DB_SHOW_COMMAND(lockedvnodes, lockedvnodes)
1995 {
1996 	struct proc *p = curproc;	/* XXX */
1997 	struct mount *mp, *nmp;
1998 	struct vnode *vp;
1999 
2000 	printf("Locked vnodes\n");
2001 	simple_lock(&mountlist_slock);
2002 	for (mp = TAILQ_FIRST(&mountlist); mp != NULL; mp = nmp) {
2003 		if (vfs_busy(mp, LK_NOWAIT, &mountlist_slock, p)) {
2004 			nmp = TAILQ_NEXT(mp, mnt_list);
2005 			continue;
2006 		}
2007 		LIST_FOREACH(vp, &mp->mnt_vnodelist, v_mntvnodes) {
2008 			if (VOP_ISLOCKED(vp, NULL))
2009 				vprint((char *)0, vp);
2010 		}
2011 		simple_lock(&mountlist_slock);
2012 		nmp = TAILQ_NEXT(mp, mnt_list);
2013 		vfs_unbusy(mp, p);
2014 	}
2015 	simple_unlock(&mountlist_slock);
2016 }
2017 #endif
2018 
2019 /*
2020  * Top level filesystem related information gathering.
2021  */
2022 static int	sysctl_ovfs_conf __P((SYSCTL_HANDLER_ARGS));
2023 
2024 static int
2025 vfs_sysctl(SYSCTL_HANDLER_ARGS)
2026 {
2027 	int *name = (int *)arg1 - 1;	/* XXX */
2028 	u_int namelen = arg2 + 1;	/* XXX */
2029 	struct vfsconf *vfsp;
2030 
2031 #if 1 || defined(COMPAT_PRELITE2)
2032 	/* Resolve ambiguity between VFS_VFSCONF and VFS_GENERIC. */
2033 	if (namelen == 1)
2034 		return (sysctl_ovfs_conf(oidp, arg1, arg2, req));
2035 #endif
2036 
2037 #ifdef notyet
2038 	/* all sysctl names at this level are at least name and field */
2039 	if (namelen < 2)
2040 		return (ENOTDIR);		/* overloaded */
2041 	if (name[0] != VFS_GENERIC) {
2042 		for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next)
2043 			if (vfsp->vfc_typenum == name[0])
2044 				break;
2045 		if (vfsp == NULL)
2046 			return (EOPNOTSUPP);
2047 		return ((*vfsp->vfc_vfsops->vfs_sysctl)(&name[1], namelen - 1,
2048 		    oldp, oldlenp, newp, newlen, p));
2049 	}
2050 #endif
2051 	switch (name[1]) {
2052 	case VFS_MAXTYPENUM:
2053 		if (namelen != 2)
2054 			return (ENOTDIR);
2055 		return (SYSCTL_OUT(req, &maxvfsconf, sizeof(int)));
2056 	case VFS_CONF:
2057 		if (namelen != 3)
2058 			return (ENOTDIR);	/* overloaded */
2059 		for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next)
2060 			if (vfsp->vfc_typenum == name[2])
2061 				break;
2062 		if (vfsp == NULL)
2063 			return (EOPNOTSUPP);
2064 		return (SYSCTL_OUT(req, vfsp, sizeof *vfsp));
2065 	}
2066 	return (EOPNOTSUPP);
2067 }
2068 
2069 SYSCTL_NODE(_vfs, VFS_GENERIC, generic, CTLFLAG_RD, vfs_sysctl,
2070 	"Generic filesystem");
2071 
2072 #if 1 || defined(COMPAT_PRELITE2)
2073 
2074 static int
2075 sysctl_ovfs_conf(SYSCTL_HANDLER_ARGS)
2076 {
2077 	int error;
2078 	struct vfsconf *vfsp;
2079 	struct ovfsconf ovfs;
2080 
2081 	for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next) {
2082 		ovfs.vfc_vfsops = vfsp->vfc_vfsops;	/* XXX used as flag */
2083 		strcpy(ovfs.vfc_name, vfsp->vfc_name);
2084 		ovfs.vfc_index = vfsp->vfc_typenum;
2085 		ovfs.vfc_refcount = vfsp->vfc_refcount;
2086 		ovfs.vfc_flags = vfsp->vfc_flags;
2087 		error = SYSCTL_OUT(req, &ovfs, sizeof ovfs);
2088 		if (error)
2089 			return error;
2090 	}
2091 	return 0;
2092 }
2093 
2094 #endif /* 1 || COMPAT_PRELITE2 */
2095 
2096 #if 0
2097 #define KINFO_VNODESLOP	10
2098 /*
2099  * Dump vnode list (via sysctl).
2100  * Copyout address of vnode followed by vnode.
2101  */
2102 /* ARGSUSED */
2103 static int
2104 sysctl_vnode(SYSCTL_HANDLER_ARGS)
2105 {
2106 	struct proc *p = curproc;	/* XXX */
2107 	struct mount *mp, *nmp;
2108 	struct vnode *nvp, *vp;
2109 	int error;
2110 
2111 #define VPTRSZ	sizeof (struct vnode *)
2112 #define VNODESZ	sizeof (struct vnode)
2113 
2114 	req->lock = 0;
2115 	if (!req->oldptr) /* Make an estimate */
2116 		return (SYSCTL_OUT(req, 0,
2117 			(numvnodes + KINFO_VNODESLOP) * (VPTRSZ + VNODESZ)));
2118 
2119 	simple_lock(&mountlist_slock);
2120 	for (mp = TAILQ_FIRST(&mountlist); mp != NULL; mp = nmp) {
2121 		if (vfs_busy(mp, LK_NOWAIT, &mountlist_slock, p)) {
2122 			nmp = TAILQ_NEXT(mp, mnt_list);
2123 			continue;
2124 		}
2125 again:
2126 		simple_lock(&mntvnode_slock);
2127 		for (vp = LIST_FIRST(&mp->mnt_vnodelist);
2128 		     vp != NULL;
2129 		     vp = nvp) {
2130 			/*
2131 			 * Check that the vp is still associated with
2132 			 * this filesystem.  RACE: could have been
2133 			 * recycled onto the same filesystem.
2134 			 */
2135 			if (vp->v_mount != mp) {
2136 				simple_unlock(&mntvnode_slock);
2137 				goto again;
2138 			}
2139 			nvp = LIST_NEXT(vp, v_mntvnodes);
2140 			simple_unlock(&mntvnode_slock);
2141 			if ((error = SYSCTL_OUT(req, &vp, VPTRSZ)) ||
2142 			    (error = SYSCTL_OUT(req, vp, VNODESZ)))
2143 				return (error);
2144 			simple_lock(&mntvnode_slock);
2145 		}
2146 		simple_unlock(&mntvnode_slock);
2147 		simple_lock(&mountlist_slock);
2148 		nmp = TAILQ_NEXT(mp, mnt_list);
2149 		vfs_unbusy(mp, p);
2150 	}
2151 	simple_unlock(&mountlist_slock);
2152 
2153 	return (0);
2154 }
2155 #endif
2156 
2157 /*
2158  * XXX
2159  * Exporting the vnode list on large systems causes them to crash.
2160  * Exporting the vnode list on medium systems causes sysctl to coredump.
2161  */
2162 #if 0
2163 SYSCTL_PROC(_kern, KERN_VNODE, vnode, CTLTYPE_OPAQUE|CTLFLAG_RD,
2164 	0, 0, sysctl_vnode, "S,vnode", "");
2165 #endif
2166 
2167 /*
2168  * Check to see if a filesystem is mounted on a block device.
2169  */
2170 int
2171 vfs_mountedon(vp)
2172 	struct vnode *vp;
2173 {
2174 
2175 	if (vp->v_specmountpoint != NULL)
2176 		return (EBUSY);
2177 	return (0);
2178 }
2179 
2180 /*
2181  * Unmount all filesystems. The list is traversed in reverse order
2182  * of mounting to avoid dependencies.
2183  */
2184 void
2185 vfs_unmountall()
2186 {
2187 	struct mount *mp;
2188 	struct proc *p;
2189 	int error;
2190 
2191 	if (curproc != NULL)
2192 		p = curproc;
2193 	else
2194 		p = initproc;	/* XXX XXX should this be proc0? */
2195 	/*
2196 	 * Since this only runs when rebooting, it is not interlocked.
2197 	 */
2198 	while(!TAILQ_EMPTY(&mountlist)) {
2199 		mp = TAILQ_LAST(&mountlist, mntlist);
2200 		error = dounmount(mp, MNT_FORCE, p);
2201 		if (error) {
2202 			TAILQ_REMOVE(&mountlist, mp, mnt_list);
2203 			printf("unmount of %s failed (",
2204 			    mp->mnt_stat.f_mntonname);
2205 			if (error == EBUSY)
2206 				printf("BUSY)\n");
2207 			else
2208 				printf("%d)\n", error);
2209 		} else {
2210 			/* The unmount has removed mp from the mountlist */
2211 		}
2212 	}
2213 }
2214 
2215 /*
2216  * Build hash lists of net addresses and hang them off the mount point.
2217  * Called by ufs_mount() to set up the lists of export addresses.
2218  */
2219 static int
2220 vfs_hang_addrlist(mp, nep, argp)
2221 	struct mount *mp;
2222 	struct netexport *nep;
2223 	struct export_args *argp;
2224 {
2225 	register struct netcred *np;
2226 	register struct radix_node_head *rnh;
2227 	register int i;
2228 	struct radix_node *rn;
2229 	struct sockaddr *saddr, *smask = 0;
2230 	struct domain *dom;
2231 	int error;
2232 
2233 	if (argp->ex_addrlen == 0) {
2234 		if (mp->mnt_flag & MNT_DEFEXPORTED)
2235 			return (EPERM);
2236 		np = &nep->ne_defexported;
2237 		np->netc_exflags = argp->ex_flags;
2238 		np->netc_anon = argp->ex_anon;
2239 		np->netc_anon.cr_ref = 1;
2240 		mp->mnt_flag |= MNT_DEFEXPORTED;
2241 		return (0);
2242 	}
2243 	i = sizeof(struct netcred) + argp->ex_addrlen + argp->ex_masklen;
2244 	np = (struct netcred *) malloc(i, M_NETADDR, M_WAITOK);
2245 	bzero((caddr_t) np, i);
2246 	saddr = (struct sockaddr *) (np + 1);
2247 	if ((error = copyin(argp->ex_addr, (caddr_t) saddr, argp->ex_addrlen)))
2248 		goto out;
2249 	if (saddr->sa_len > argp->ex_addrlen)
2250 		saddr->sa_len = argp->ex_addrlen;
2251 	if (argp->ex_masklen) {
2252 		smask = (struct sockaddr *) ((caddr_t) saddr + argp->ex_addrlen);
2253 		error = copyin(argp->ex_mask, (caddr_t) smask, argp->ex_masklen);
2254 		if (error)
2255 			goto out;
2256 		if (smask->sa_len > argp->ex_masklen)
2257 			smask->sa_len = argp->ex_masklen;
2258 	}
2259 	i = saddr->sa_family;
2260 	if ((rnh = nep->ne_rtable[i]) == 0) {
2261 		/*
2262 		 * Seems silly to initialize every AF when most are not used,
2263 		 * do so on demand here
2264 		 */
2265 		for (dom = domains; dom; dom = dom->dom_next)
2266 			if (dom->dom_family == i && dom->dom_rtattach) {
2267 				dom->dom_rtattach((void **) &nep->ne_rtable[i],
2268 				    dom->dom_rtoffset);
2269 				break;
2270 			}
2271 		if ((rnh = nep->ne_rtable[i]) == 0) {
2272 			error = ENOBUFS;
2273 			goto out;
2274 		}
2275 	}
2276 	rn = (*rnh->rnh_addaddr) ((caddr_t) saddr, (caddr_t) smask, rnh,
2277 	    np->netc_rnodes);
2278 	if (rn == 0 || np != (struct netcred *) rn) {	/* already exists */
2279 		error = EPERM;
2280 		goto out;
2281 	}
2282 	np->netc_exflags = argp->ex_flags;
2283 	np->netc_anon = argp->ex_anon;
2284 	np->netc_anon.cr_ref = 1;
2285 	return (0);
2286 out:
2287 	free(np, M_NETADDR);
2288 	return (error);
2289 }
2290 
2291 /* ARGSUSED */
2292 static int
2293 vfs_free_netcred(rn, w)
2294 	struct radix_node *rn;
2295 	void *w;
2296 {
2297 	register struct radix_node_head *rnh = (struct radix_node_head *) w;
2298 
2299 	(*rnh->rnh_deladdr) (rn->rn_key, rn->rn_mask, rnh);
2300 	free((caddr_t) rn, M_NETADDR);
2301 	return (0);
2302 }
2303 
2304 /*
2305  * Free the net address hash lists that are hanging off the mount points.
2306  */
2307 static void
2308 vfs_free_addrlist(nep)
2309 	struct netexport *nep;
2310 {
2311 	register int i;
2312 	register struct radix_node_head *rnh;
2313 
2314 	for (i = 0; i <= AF_MAX; i++)
2315 		if ((rnh = nep->ne_rtable[i])) {
2316 			(*rnh->rnh_walktree) (rnh, vfs_free_netcred,
2317 			    (caddr_t) rnh);
2318 			free((caddr_t) rnh, M_RTABLE);
2319 			nep->ne_rtable[i] = 0;
2320 		}
2321 }
2322 
2323 int
2324 vfs_export(mp, nep, argp)
2325 	struct mount *mp;
2326 	struct netexport *nep;
2327 	struct export_args *argp;
2328 {
2329 	int error;
2330 
2331 	if (argp->ex_flags & MNT_DELEXPORT) {
2332 		if (mp->mnt_flag & MNT_EXPUBLIC) {
2333 			vfs_setpublicfs(NULL, NULL, NULL);
2334 			mp->mnt_flag &= ~MNT_EXPUBLIC;
2335 		}
2336 		vfs_free_addrlist(nep);
2337 		mp->mnt_flag &= ~(MNT_EXPORTED | MNT_DEFEXPORTED);
2338 	}
2339 	if (argp->ex_flags & MNT_EXPORTED) {
2340 		if (argp->ex_flags & MNT_EXPUBLIC) {
2341 			if ((error = vfs_setpublicfs(mp, nep, argp)) != 0)
2342 				return (error);
2343 			mp->mnt_flag |= MNT_EXPUBLIC;
2344 		}
2345 		if ((error = vfs_hang_addrlist(mp, nep, argp)))
2346 			return (error);
2347 		mp->mnt_flag |= MNT_EXPORTED;
2348 	}
2349 	return (0);
2350 }
2351 
2352 
2353 /*
2354  * Set the publicly exported filesystem (WebNFS). Currently, only
2355  * one public filesystem is possible in the spec (RFC 2054 and 2055)
2356  */
2357 int
2358 vfs_setpublicfs(mp, nep, argp)
2359 	struct mount *mp;
2360 	struct netexport *nep;
2361 	struct export_args *argp;
2362 {
2363 	int error;
2364 	struct vnode *rvp;
2365 	char *cp;
2366 
2367 	/*
2368 	 * mp == NULL -> invalidate the current info, the FS is
2369 	 * no longer exported. May be called from either vfs_export
2370 	 * or unmount, so check if it hasn't already been done.
2371 	 */
2372 	if (mp == NULL) {
2373 		if (nfs_pub.np_valid) {
2374 			nfs_pub.np_valid = 0;
2375 			if (nfs_pub.np_index != NULL) {
2376 				FREE(nfs_pub.np_index, M_TEMP);
2377 				nfs_pub.np_index = NULL;
2378 			}
2379 		}
2380 		return (0);
2381 	}
2382 
2383 	/*
2384 	 * Only one allowed at a time.
2385 	 */
2386 	if (nfs_pub.np_valid != 0 && mp != nfs_pub.np_mount)
2387 		return (EBUSY);
2388 
2389 	/*
2390 	 * Get real filehandle for root of exported FS.
2391 	 */
2392 	bzero((caddr_t)&nfs_pub.np_handle, sizeof(nfs_pub.np_handle));
2393 	nfs_pub.np_handle.fh_fsid = mp->mnt_stat.f_fsid;
2394 
2395 	if ((error = VFS_ROOT(mp, &rvp)))
2396 		return (error);
2397 
2398 	if ((error = VFS_VPTOFH(rvp, &nfs_pub.np_handle.fh_fid)))
2399 		return (error);
2400 
2401 	vput(rvp);
2402 
2403 	/*
2404 	 * If an indexfile was specified, pull it in.
2405 	 */
2406 	if (argp->ex_indexfile != NULL) {
2407 		MALLOC(nfs_pub.np_index, char *, MAXNAMLEN + 1, M_TEMP,
2408 		    M_WAITOK);
2409 		error = copyinstr(argp->ex_indexfile, nfs_pub.np_index,
2410 		    MAXNAMLEN, (size_t *)0);
2411 		if (!error) {
2412 			/*
2413 			 * Check for illegal filenames.
2414 			 */
2415 			for (cp = nfs_pub.np_index; *cp; cp++) {
2416 				if (*cp == '/') {
2417 					error = EINVAL;
2418 					break;
2419 				}
2420 			}
2421 		}
2422 		if (error) {
2423 			FREE(nfs_pub.np_index, M_TEMP);
2424 			return (error);
2425 		}
2426 	}
2427 
2428 	nfs_pub.np_mount = mp;
2429 	nfs_pub.np_valid = 1;
2430 	return (0);
2431 }
2432 
2433 struct netcred *
2434 vfs_export_lookup(mp, nep, nam)
2435 	register struct mount *mp;
2436 	struct netexport *nep;
2437 	struct sockaddr *nam;
2438 {
2439 	register struct netcred *np;
2440 	register struct radix_node_head *rnh;
2441 	struct sockaddr *saddr;
2442 
2443 	np = NULL;
2444 	if (mp->mnt_flag & MNT_EXPORTED) {
2445 		/*
2446 		 * Lookup in the export list first.
2447 		 */
2448 		if (nam != NULL) {
2449 			saddr = nam;
2450 			rnh = nep->ne_rtable[saddr->sa_family];
2451 			if (rnh != NULL) {
2452 				np = (struct netcred *)
2453 					(*rnh->rnh_matchaddr)((caddr_t)saddr,
2454 							      rnh);
2455 				if (np && np->netc_rnodes->rn_flags & RNF_ROOT)
2456 					np = NULL;
2457 			}
2458 		}
2459 		/*
2460 		 * If no address match, use the default if it exists.
2461 		 */
2462 		if (np == NULL && mp->mnt_flag & MNT_DEFEXPORTED)
2463 			np = &nep->ne_defexported;
2464 	}
2465 	return (np);
2466 }
2467 
2468 /*
2469  * perform msync on all vnodes under a mount point
2470  * the mount point must be locked.
2471  */
2472 void
2473 vfs_msync(struct mount *mp, int flags) {
2474 	struct vnode *vp, *nvp;
2475 	struct vm_object *obj;
2476 	int anyio, tries;
2477 
2478 	tries = 5;
2479 loop:
2480 	anyio = 0;
2481 	for (vp = LIST_FIRST(&mp->mnt_vnodelist); vp != NULL; vp = nvp) {
2482 
2483 		nvp = LIST_NEXT(vp, v_mntvnodes);
2484 
2485 		if (vp->v_mount != mp) {
2486 			goto loop;
2487 		}
2488 
2489 		if (vp->v_flag & VXLOCK)	/* XXX: what if MNT_WAIT? */
2490 			continue;
2491 
2492 		if (flags != MNT_WAIT) {
2493 			obj = vp->v_object;
2494 			if (obj == NULL || (obj->flags & OBJ_MIGHTBEDIRTY) == 0)
2495 				continue;
2496 			if (VOP_ISLOCKED(vp, NULL))
2497 				continue;
2498 		}
2499 
2500 		simple_lock(&vp->v_interlock);
2501 		if (vp->v_object &&
2502 		   (vp->v_object->flags & OBJ_MIGHTBEDIRTY)) {
2503 			if (!vget(vp,
2504 				LK_INTERLOCK | LK_EXCLUSIVE | LK_RETRY | LK_NOOBJ, curproc)) {
2505 				if (vp->v_object) {
2506 					vm_object_page_clean(vp->v_object, 0, 0, flags == MNT_WAIT ? OBJPC_SYNC : OBJPC_NOSYNC);
2507 					anyio = 1;
2508 				}
2509 				vput(vp);
2510 			}
2511 		} else {
2512 			simple_unlock(&vp->v_interlock);
2513 		}
2514 	}
2515 	if (anyio && (--tries > 0))
2516 		goto loop;
2517 }
2518 
2519 /*
2520  * Create the VM object needed for VMIO and mmap support.  This
2521  * is done for all VREG files in the system.  Some filesystems might
2522  * afford the additional metadata buffering capability of the
2523  * VMIO code by making the device node be VMIO mode also.
2524  *
2525  * vp must be locked when vfs_object_create is called.
2526  */
2527 int
2528 vfs_object_create(vp, p, cred)
2529 	struct vnode *vp;
2530 	struct proc *p;
2531 	struct ucred *cred;
2532 {
2533 	struct vattr vat;
2534 	vm_object_t object;
2535 	int error = 0;
2536 
2537 	if (!vn_isdisk(vp, NULL) && vn_canvmio(vp) == FALSE)
2538 		return 0;
2539 
2540 retry:
2541 	if ((object = vp->v_object) == NULL) {
2542 		if (vp->v_type == VREG || vp->v_type == VDIR) {
2543 			if ((error = VOP_GETATTR(vp, &vat, cred, p)) != 0)
2544 				goto retn;
2545 			object = vnode_pager_alloc(vp, vat.va_size, 0, 0);
2546 		} else if (devsw(vp->v_rdev) != NULL) {
2547 			/*
2548 			 * This simply allocates the biggest object possible
2549 			 * for a disk vnode.  This should be fixed, but doesn't
2550 			 * cause any problems (yet).
2551 			 */
2552 			object = vnode_pager_alloc(vp, IDX_TO_OFF(INT_MAX), 0, 0);
2553 		} else {
2554 			goto retn;
2555 		}
2556 		/*
2557 		 * Dereference the reference we just created.  This assumes
2558 		 * that the object is associated with the vp.
2559 		 */
2560 		object->ref_count--;
2561 		vp->v_usecount--;
2562 	} else {
2563 		if (object->flags & OBJ_DEAD) {
2564 			VOP_UNLOCK(vp, 0, p);
2565 			tsleep(object, PVM, "vodead", 0);
2566 			vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p);
2567 			goto retry;
2568 		}
2569 	}
2570 
2571 	KASSERT(vp->v_object != NULL, ("vfs_object_create: NULL object"));
2572 	vp->v_flag |= VOBJBUF;
2573 
2574 retn:
2575 	return error;
2576 }
2577 
2578 void
2579 vfree(vp)
2580 	struct vnode *vp;
2581 {
2582 	int s;
2583 
2584 	s = splbio();
2585 	simple_lock(&vnode_free_list_slock);
2586 	KASSERT((vp->v_flag & VFREE) == 0, ("vnode already free"));
2587 	if (vp->v_flag & VAGE) {
2588 		TAILQ_INSERT_HEAD(&vnode_free_list, vp, v_freelist);
2589 	} else {
2590 		TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist);
2591 	}
2592 	freevnodes++;
2593 	simple_unlock(&vnode_free_list_slock);
2594 	vp->v_flag &= ~VAGE;
2595 	vp->v_flag |= VFREE;
2596 	splx(s);
2597 }
2598 
2599 void
2600 vbusy(vp)
2601 	struct vnode *vp;
2602 {
2603 	int s;
2604 
2605 	s = splbio();
2606 	simple_lock(&vnode_free_list_slock);
2607 	KASSERT((vp->v_flag & VFREE) != 0, ("vnode not free"));
2608 	TAILQ_REMOVE(&vnode_free_list, vp, v_freelist);
2609 	freevnodes--;
2610 	simple_unlock(&vnode_free_list_slock);
2611 	vp->v_flag &= ~(VFREE|VAGE);
2612 	splx(s);
2613 }
2614 
2615 /*
2616  * Record a process's interest in events which might happen to
2617  * a vnode.  Because poll uses the historic select-style interface
2618  * internally, this routine serves as both the ``check for any
2619  * pending events'' and the ``record my interest in future events''
2620  * functions.  (These are done together, while the lock is held,
2621  * to avoid race conditions.)
2622  */
2623 int
2624 vn_pollrecord(vp, p, events)
2625 	struct vnode *vp;
2626 	struct proc *p;
2627 	short events;
2628 {
2629 	simple_lock(&vp->v_pollinfo.vpi_lock);
2630 	if (vp->v_pollinfo.vpi_revents & events) {
2631 		/*
2632 		 * This leaves events we are not interested
2633 		 * in available for the other process which
2634 		 * which presumably had requested them
2635 		 * (otherwise they would never have been
2636 		 * recorded).
2637 		 */
2638 		events &= vp->v_pollinfo.vpi_revents;
2639 		vp->v_pollinfo.vpi_revents &= ~events;
2640 
2641 		simple_unlock(&vp->v_pollinfo.vpi_lock);
2642 		return events;
2643 	}
2644 	vp->v_pollinfo.vpi_events |= events;
2645 	selrecord(p, &vp->v_pollinfo.vpi_selinfo);
2646 	simple_unlock(&vp->v_pollinfo.vpi_lock);
2647 	return 0;
2648 }
2649 
2650 /*
2651  * Note the occurrence of an event.  If the VN_POLLEVENT macro is used,
2652  * it is possible for us to miss an event due to race conditions, but
2653  * that condition is expected to be rare, so for the moment it is the
2654  * preferred interface.
2655  */
2656 void
2657 vn_pollevent(vp, events)
2658 	struct vnode *vp;
2659 	short events;
2660 {
2661 	simple_lock(&vp->v_pollinfo.vpi_lock);
2662 	if (vp->v_pollinfo.vpi_events & events) {
2663 		/*
2664 		 * We clear vpi_events so that we don't
2665 		 * call selwakeup() twice if two events are
2666 		 * posted before the polling process(es) is
2667 		 * awakened.  This also ensures that we take at
2668 		 * most one selwakeup() if the polling process
2669 		 * is no longer interested.  However, it does
2670 		 * mean that only one event can be noticed at
2671 		 * a time.  (Perhaps we should only clear those
2672 		 * event bits which we note?) XXX
2673 		 */
2674 		vp->v_pollinfo.vpi_events = 0;	/* &= ~events ??? */
2675 		vp->v_pollinfo.vpi_revents |= events;
2676 		selwakeup(&vp->v_pollinfo.vpi_selinfo);
2677 	}
2678 	simple_unlock(&vp->v_pollinfo.vpi_lock);
2679 }
2680 
2681 /*
2682  * Wake up anyone polling on vp because it is being revoked.
2683  * This depends on dead_poll() returning POLLHUP for correct
2684  * behavior.
2685  */
2686 void
2687 vn_pollgone(vp)
2688 	struct vnode *vp;
2689 {
2690 	simple_lock(&vp->v_pollinfo.vpi_lock);
2691 	if (vp->v_pollinfo.vpi_events) {
2692 		vp->v_pollinfo.vpi_events = 0;
2693 		selwakeup(&vp->v_pollinfo.vpi_selinfo);
2694 	}
2695 	simple_unlock(&vp->v_pollinfo.vpi_lock);
2696 }
2697 
2698 
2699 
2700 /*
2701  * Routine to create and manage a filesystem syncer vnode.
2702  */
2703 #define sync_close ((int (*) __P((struct  vop_close_args *)))nullop)
2704 static int	sync_fsync __P((struct  vop_fsync_args *));
2705 static int	sync_inactive __P((struct  vop_inactive_args *));
2706 static int	sync_reclaim  __P((struct  vop_reclaim_args *));
2707 #define sync_lock ((int (*) __P((struct  vop_lock_args *)))vop_nolock)
2708 #define sync_unlock ((int (*) __P((struct  vop_unlock_args *)))vop_nounlock)
2709 static int	sync_print __P((struct vop_print_args *));
2710 #define sync_islocked ((int(*) __P((struct vop_islocked_args *)))vop_noislocked)
2711 
2712 static vop_t **sync_vnodeop_p;
2713 static struct vnodeopv_entry_desc sync_vnodeop_entries[] = {
2714 	{ &vop_default_desc,	(vop_t *) vop_eopnotsupp },
2715 	{ &vop_close_desc,	(vop_t *) sync_close },		/* close */
2716 	{ &vop_fsync_desc,	(vop_t *) sync_fsync },		/* fsync */
2717 	{ &vop_inactive_desc,	(vop_t *) sync_inactive },	/* inactive */
2718 	{ &vop_reclaim_desc,	(vop_t *) sync_reclaim },	/* reclaim */
2719 	{ &vop_lock_desc,	(vop_t *) sync_lock },		/* lock */
2720 	{ &vop_unlock_desc,	(vop_t *) sync_unlock },	/* unlock */
2721 	{ &vop_print_desc,	(vop_t *) sync_print },		/* print */
2722 	{ &vop_islocked_desc,	(vop_t *) sync_islocked },	/* islocked */
2723 	{ NULL, NULL }
2724 };
2725 static struct vnodeopv_desc sync_vnodeop_opv_desc =
2726 	{ &sync_vnodeop_p, sync_vnodeop_entries };
2727 
2728 VNODEOP_SET(sync_vnodeop_opv_desc);
2729 
2730 /*
2731  * Create a new filesystem syncer vnode for the specified mount point.
2732  */
2733 int
2734 vfs_allocate_syncvnode(mp)
2735 	struct mount *mp;
2736 {
2737 	struct vnode *vp;
2738 	static long start, incr, next;
2739 	int error;
2740 
2741 	/* Allocate a new vnode */
2742 	if ((error = getnewvnode(VT_VFS, mp, sync_vnodeop_p, &vp)) != 0) {
2743 		mp->mnt_syncer = NULL;
2744 		return (error);
2745 	}
2746 	vp->v_type = VNON;
2747 	/*
2748 	 * Place the vnode onto the syncer worklist. We attempt to
2749 	 * scatter them about on the list so that they will go off
2750 	 * at evenly distributed times even if all the filesystems
2751 	 * are mounted at once.
2752 	 */
2753 	next += incr;
2754 	if (next == 0 || next > syncer_maxdelay) {
2755 		start /= 2;
2756 		incr /= 2;
2757 		if (start == 0) {
2758 			start = syncer_maxdelay / 2;
2759 			incr = syncer_maxdelay;
2760 		}
2761 		next = start;
2762 	}
2763 	vn_syncer_add_to_worklist(vp, syncdelay > 0 ? next % syncdelay : 0);
2764 	mp->mnt_syncer = vp;
2765 	return (0);
2766 }
2767 
2768 /*
2769  * Do a lazy sync of the filesystem.
2770  */
2771 static int
2772 sync_fsync(ap)
2773 	struct vop_fsync_args /* {
2774 		struct vnode *a_vp;
2775 		struct ucred *a_cred;
2776 		int a_waitfor;
2777 		struct proc *a_p;
2778 	} */ *ap;
2779 {
2780 	struct vnode *syncvp = ap->a_vp;
2781 	struct mount *mp = syncvp->v_mount;
2782 	struct proc *p = ap->a_p;
2783 	int asyncflag;
2784 
2785 	/*
2786 	 * We only need to do something if this is a lazy evaluation.
2787 	 */
2788 	if (ap->a_waitfor != MNT_LAZY)
2789 		return (0);
2790 
2791 	/*
2792 	 * Move ourselves to the back of the sync list.
2793 	 */
2794 	vn_syncer_add_to_worklist(syncvp, syncdelay);
2795 
2796 	/*
2797 	 * Walk the list of vnodes pushing all that are dirty and
2798 	 * not already on the sync list.
2799 	 */
2800 	simple_lock(&mountlist_slock);
2801 	if (vfs_busy(mp, LK_EXCLUSIVE | LK_NOWAIT, &mountlist_slock, p) != 0) {
2802 		simple_unlock(&mountlist_slock);
2803 		return (0);
2804 	}
2805 	if (vn_start_write(NULL, &mp, V_NOWAIT) != 0) {
2806 		vfs_unbusy(mp, p);
2807 		simple_unlock(&mountlist_slock);
2808 		return (0);
2809 	}
2810 	asyncflag = mp->mnt_flag & MNT_ASYNC;
2811 	mp->mnt_flag &= ~MNT_ASYNC;
2812 	vfs_msync(mp, MNT_NOWAIT);
2813 	VFS_SYNC(mp, MNT_LAZY, ap->a_cred, p);
2814 	if (asyncflag)
2815 		mp->mnt_flag |= MNT_ASYNC;
2816 	vn_finished_write(mp);
2817 	vfs_unbusy(mp, p);
2818 	return (0);
2819 }
2820 
2821 /*
2822  * The syncer vnode is no referenced.
2823  */
2824 static int
2825 sync_inactive(ap)
2826 	struct vop_inactive_args /* {
2827 		struct vnode *a_vp;
2828 		struct proc *a_p;
2829 	} */ *ap;
2830 {
2831 
2832 	vgone(ap->a_vp);
2833 	return (0);
2834 }
2835 
2836 /*
2837  * The syncer vnode is no longer needed and is being decommissioned.
2838  *
2839  * Modifications to the worklist must be protected at splbio().
2840  */
2841 static int
2842 sync_reclaim(ap)
2843 	struct vop_reclaim_args /* {
2844 		struct vnode *a_vp;
2845 	} */ *ap;
2846 {
2847 	struct vnode *vp = ap->a_vp;
2848 	int s;
2849 
2850 	s = splbio();
2851 	vp->v_mount->mnt_syncer = NULL;
2852 	if (vp->v_flag & VONWORKLST) {
2853 		LIST_REMOVE(vp, v_synclist);
2854 		vp->v_flag &= ~VONWORKLST;
2855 	}
2856 	splx(s);
2857 
2858 	return (0);
2859 }
2860 
2861 /*
2862  * Print out a syncer vnode.
2863  */
2864 static int
2865 sync_print(ap)
2866 	struct vop_print_args /* {
2867 		struct vnode *a_vp;
2868 	} */ *ap;
2869 {
2870 	struct vnode *vp = ap->a_vp;
2871 
2872 	printf("syncer vnode");
2873 	if (vp->v_vnlock != NULL)
2874 		lockmgr_printinfo(vp->v_vnlock);
2875 	printf("\n");
2876 	return (0);
2877 }
2878 
2879 /*
2880  * extract the dev_t from a VBLK or VCHR
2881  */
2882 dev_t
2883 vn_todev(vp)
2884 	struct vnode *vp;
2885 {
2886 	if (vp->v_type != VBLK && vp->v_type != VCHR)
2887 		return (NODEV);
2888 	return (vp->v_rdev);
2889 }
2890 
2891 /*
2892  * Check if vnode represents a disk device
2893  */
2894 int
2895 vn_isdisk(vp, errp)
2896 	struct vnode *vp;
2897 	int *errp;
2898 {
2899 	if (vp->v_type != VBLK && vp->v_type != VCHR) {
2900 		if (errp != NULL)
2901 			*errp = ENOTBLK;
2902 		return (0);
2903 	}
2904 	if (vp->v_rdev == NULL) {
2905 		if (errp != NULL)
2906 			*errp = ENXIO;
2907 		return (0);
2908 	}
2909 	if (!devsw(vp->v_rdev)) {
2910 		if (errp != NULL)
2911 			*errp = ENXIO;
2912 		return (0);
2913 	}
2914 	if (!(devsw(vp->v_rdev)->d_flags & D_DISK)) {
2915 		if (errp != NULL)
2916 			*errp = ENOTBLK;
2917 		return (0);
2918 	}
2919 	if (errp != NULL)
2920 		*errp = 0;
2921 	return (1);
2922 }
2923 
2924 void
2925 NDFREE(ndp, flags)
2926      struct nameidata *ndp;
2927      const uint flags;
2928 {
2929 	if (!(flags & NDF_NO_FREE_PNBUF) &&
2930 	    (ndp->ni_cnd.cn_flags & HASBUF)) {
2931 		zfree(namei_zone, ndp->ni_cnd.cn_pnbuf);
2932 		ndp->ni_cnd.cn_flags &= ~HASBUF;
2933 	}
2934 	if (!(flags & NDF_NO_DVP_UNLOCK) &&
2935 	    (ndp->ni_cnd.cn_flags & LOCKPARENT) &&
2936 	    ndp->ni_dvp != ndp->ni_vp)
2937 		VOP_UNLOCK(ndp->ni_dvp, 0, ndp->ni_cnd.cn_proc);
2938 	if (!(flags & NDF_NO_DVP_RELE) &&
2939 	    (ndp->ni_cnd.cn_flags & (LOCKPARENT|WANTPARENT))) {
2940 		vrele(ndp->ni_dvp);
2941 		ndp->ni_dvp = NULL;
2942 	}
2943 	if (!(flags & NDF_NO_VP_UNLOCK) &&
2944 	    (ndp->ni_cnd.cn_flags & LOCKLEAF) && ndp->ni_vp)
2945 		VOP_UNLOCK(ndp->ni_vp, 0, ndp->ni_cnd.cn_proc);
2946 	if (!(flags & NDF_NO_VP_RELE) &&
2947 	    ndp->ni_vp) {
2948 		vrele(ndp->ni_vp);
2949 		ndp->ni_vp = NULL;
2950 	}
2951 	if (!(flags & NDF_NO_STARTDIR_RELE) &&
2952 	    (ndp->ni_cnd.cn_flags & SAVESTART)) {
2953 		vrele(ndp->ni_startdir);
2954 		ndp->ni_startdir = NULL;
2955 	}
2956 }
2957