xref: /freebsd/sys/kern/vfs_export.c (revision 952d112864d8008aa87278a30a539d888a8493cd)
1 /*
2  * Copyright (c) 1989, 1993
3  *	The Regents of the University of California.  All rights reserved.
4  * (c) UNIX System Laboratories, Inc.
5  * All or some portions of this file are derived from material licensed
6  * to the University of California by American Telephone and Telegraph
7  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8  * the permission of UNIX System Laboratories, Inc.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. All advertising materials mentioning features or use of this software
19  *    must display the following acknowledgement:
20  *	This product includes software developed by the University of
21  *	California, Berkeley and its contributors.
22  * 4. Neither the name of the University nor the names of its contributors
23  *    may be used to endorse or promote products derived from this software
24  *    without specific prior written permission.
25  *
26  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36  * SUCH DAMAGE.
37  *
38  *	@(#)vfs_subr.c	8.31 (Berkeley) 5/26/95
39  * $Id: vfs_subr.c,v 1.81 1997/04/01 13:05:34 bde Exp $
40  */
41 
42 /*
43  * External virtual filesystem routines
44  */
45 #include "opt_ddb.h"
46 #include "opt_devfs.h"
47 
48 #include <sys/param.h>
49 #include <sys/systm.h>
50 #include <sys/kernel.h>
51 #include <sys/file.h>
52 #include <sys/proc.h>
53 #include <sys/mount.h>
54 #include <sys/time.h>
55 #include <sys/vnode.h>
56 #include <sys/stat.h>
57 #include <sys/namei.h>
58 #include <sys/ucred.h>
59 #include <sys/buf.h>
60 #include <sys/errno.h>
61 #include <sys/malloc.h>
62 #include <sys/domain.h>
63 #include <sys/mbuf.h>
64 
65 #include <vm/vm.h>
66 #include <vm/vm_param.h>
67 #include <vm/vm_object.h>
68 #include <vm/vm_extern.h>
69 #include <vm/vm_pager.h>
70 #include <vm/vnode_pager.h>
71 #include <sys/sysctl.h>
72 
73 #include <miscfs/specfs/specdev.h>
74 
75 #ifdef DDB
76 extern void	printlockedvnodes __P((void));
77 #endif
78 static void	vclean __P((struct vnode *vp, int flags, struct proc *p));
79 static void	vgonel __P((struct vnode *vp, struct proc *p));
80 unsigned long	numvnodes;
81 extern void	vputrele __P((struct vnode *vp, int put));
82 
83 enum vtype iftovt_tab[16] = {
84 	VNON, VFIFO, VCHR, VNON, VDIR, VNON, VBLK, VNON,
85 	VREG, VNON, VLNK, VNON, VSOCK, VNON, VNON, VBAD,
86 };
87 int vttoif_tab[9] = {
88 	0, S_IFREG, S_IFDIR, S_IFBLK, S_IFCHR, S_IFLNK,
89 	S_IFSOCK, S_IFIFO, S_IFMT,
90 };
91 
92 /*
93  * Insq/Remq for the vnode usage lists.
94  */
95 #define	bufinsvn(bp, dp)	LIST_INSERT_HEAD(dp, bp, b_vnbufs)
96 #define	bufremvn(bp) {							\
97 	LIST_REMOVE(bp, b_vnbufs);					\
98 	(bp)->b_vnbufs.le_next = NOLIST;				\
99 }
100 TAILQ_HEAD(freelst, vnode) vnode_free_list;	/* vnode free list */
101 static u_long freevnodes = 0;
102 
103 struct mntlist mountlist;	/* mounted filesystem list */
104 struct simplelock mountlist_slock;
105 static struct simplelock mntid_slock;
106 struct simplelock mntvnode_slock;
107 struct simplelock vnode_free_list_slock;
108 static struct simplelock spechash_slock;
109 
110 int desiredvnodes;
111 SYSCTL_INT(_kern, KERN_MAXVNODES, maxvnodes, CTLFLAG_RW, &desiredvnodes, 0, "");
112 
113 static void	vfs_free_addrlist __P((struct netexport *nep));
114 static int	vfs_free_netcred __P((struct radix_node *rn, void *w));
115 static int	vfs_hang_addrlist __P((struct mount *mp, struct netexport *nep,
116 				       struct export_args *argp));
117 
118 /*
119  * Initialize the vnode management data structures.
120  */
121 void
122 vntblinit()
123 {
124 
125 	desiredvnodes = maxproc + vm_object_cache_max;
126 	simple_lock_init(&mntvnode_slock);
127 	simple_lock_init(&mntid_slock);
128 	simple_lock_init(&spechash_slock);
129 	TAILQ_INIT(&vnode_free_list);
130 	simple_lock_init(&vnode_free_list_slock);
131 	CIRCLEQ_INIT(&mountlist);
132 }
133 
134 /*
135  * Mark a mount point as busy. Used to synchronize access and to delay
136  * unmounting. Interlock is not released on failure.
137  */
138 int
139 vfs_busy(mp, flags, interlkp, p)
140 	struct mount *mp;
141 	int flags;
142 	struct simplelock *interlkp;
143 	struct proc *p;
144 {
145 	int lkflags;
146 
147 	if (mp->mnt_flag & MNT_UNMOUNT) {
148 		if (flags & LK_NOWAIT)
149 			return (ENOENT);
150 		mp->mnt_flag |= MNT_MWAIT;
151 		if (interlkp) {
152 			simple_unlock(interlkp);
153 		}
154 		/*
155 		 * Since all busy locks are shared except the exclusive
156 		 * lock granted when unmounting, the only place that a
157 		 * wakeup needs to be done is at the release of the
158 		 * exclusive lock at the end of dounmount.
159 		 */
160 		tsleep((caddr_t)mp, PVFS, "vfs_busy", 0);
161 		if (interlkp) {
162 			simple_lock(interlkp);
163 		}
164 		return (ENOENT);
165 	}
166 	lkflags = LK_SHARED;
167 	if (interlkp)
168 		lkflags |= LK_INTERLOCK;
169 	if (lockmgr(&mp->mnt_lock, lkflags, interlkp, p))
170 		panic("vfs_busy: unexpected lock failure");
171 	return (0);
172 }
173 
174 /*
175  * Free a busy filesystem.
176  */
177 void
178 vfs_unbusy(mp, p)
179 	struct mount *mp;
180 	struct proc *p;
181 {
182 
183 	lockmgr(&mp->mnt_lock, LK_RELEASE, NULL, p);
184 }
185 
186 /*
187  * Lookup a filesystem type, and if found allocate and initialize
188  * a mount structure for it.
189  *
190  * Devname is usually updated by mount(8) after booting.
191  */
192 int
193 vfs_rootmountalloc(fstypename, devname, mpp)
194 	char *fstypename;
195 	char *devname;
196 	struct mount **mpp;
197 {
198 	struct proc *p = curproc;	/* XXX */
199 	struct vfsconf *vfsp;
200 	struct mount *mp;
201 
202 	for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next)
203 		if (!strcmp(vfsp->vfc_name, fstypename))
204 			break;
205 	if (vfsp == NULL)
206 		return (ENODEV);
207 	mp = malloc((u_long)sizeof(struct mount), M_MOUNT, M_WAITOK);
208 	bzero((char *)mp, (u_long)sizeof(struct mount));
209 	lockinit(&mp->mnt_lock, PVFS, "vfslock", 0, 0);
210 	(void)vfs_busy(mp, LK_NOWAIT, 0, p);
211 	LIST_INIT(&mp->mnt_vnodelist);
212 	mp->mnt_vfc = vfsp;
213 	mp->mnt_op = vfsp->vfc_vfsops;
214 	mp->mnt_flag = MNT_RDONLY;
215 	mp->mnt_vnodecovered = NULLVP;
216 	vfsp->vfc_refcount++;
217 	mp->mnt_stat.f_type = vfsp->vfc_typenum;
218 	mp->mnt_flag |= vfsp->vfc_flags & MNT_VISFLAGMASK;
219 	strncpy(mp->mnt_stat.f_fstypename, vfsp->vfc_name, MFSNAMELEN);
220 	mp->mnt_stat.f_mntonname[0] = '/';
221 	mp->mnt_stat.f_mntonname[1] = 0;
222 	(void) copystr(devname, mp->mnt_stat.f_mntfromname, MNAMELEN - 1, 0);
223 	*mpp = mp;
224 	return (0);
225 }
226 
227 /*
228  * Find an appropriate filesystem to use for the root. If a filesystem
229  * has not been preselected, walk through the list of known filesystems
230  * trying those that have mountroot routines, and try them until one
231  * works or we have tried them all.
232  */
233 #ifdef notdef	/* XXX JH */
234 int
235 lite2_vfs_mountroot(void)
236 {
237 	struct vfsconf *vfsp;
238 	extern int (*lite2_mountroot)(void);
239 	int error;
240 
241 	if (lite2_mountroot != NULL)
242 		return ((*lite2_mountroot)());
243 	for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next) {
244 		if (vfsp->vfc_mountroot == NULL)
245 			continue;
246 		if ((error = (*vfsp->vfc_mountroot)()) == 0)
247 			return (0);
248 		printf("%s_mountroot failed: %d\n", vfsp->vfc_name, error);
249 	}
250 	return (ENODEV);
251 }
252 #endif
253 
254 /*
255  * Lookup a mount point by filesystem identifier.
256  */
257 struct mount *
258 vfs_getvfs(fsid)
259 	fsid_t *fsid;
260 {
261 	register struct mount *mp;
262 
263 	simple_lock(&mountlist_slock);
264 	for (mp = mountlist.cqh_first; mp != (void *)&mountlist;
265 	    mp = mp->mnt_list.cqe_next) {
266 		if (mp->mnt_stat.f_fsid.val[0] == fsid->val[0] &&
267 		    mp->mnt_stat.f_fsid.val[1] == fsid->val[1]) {
268 			simple_unlock(&mountlist_slock);
269 			return (mp);
270 	    }
271 	}
272 	simple_unlock(&mountlist_slock);
273 	return ((struct mount *) 0);
274 }
275 
276 /*
277  * Get a new unique fsid
278  */
279 void
280 vfs_getnewfsid(mp)
281 	struct mount *mp;
282 {
283 	static u_short xxxfs_mntid;
284 
285 	fsid_t tfsid;
286 	int mtype;
287 
288 	simple_lock(&mntid_slock);
289 	mtype = mp->mnt_vfc->vfc_typenum;
290 	mp->mnt_stat.f_fsid.val[0] = makedev(nblkdev + mtype, 0);
291 	mp->mnt_stat.f_fsid.val[1] = mtype;
292 	if (xxxfs_mntid == 0)
293 		++xxxfs_mntid;
294 	tfsid.val[0] = makedev(nblkdev + mtype, xxxfs_mntid);
295 	tfsid.val[1] = mtype;
296 	if (mountlist.cqh_first != (void *)&mountlist) {
297 		while (vfs_getvfs(&tfsid)) {
298 			tfsid.val[0]++;
299 			xxxfs_mntid++;
300 		}
301 	}
302 	mp->mnt_stat.f_fsid.val[0] = tfsid.val[0];
303 	simple_unlock(&mntid_slock);
304 }
305 
306 /*
307  * Set vnode attributes to VNOVAL
308  */
309 void
310 vattr_null(vap)
311 	register struct vattr *vap;
312 {
313 
314 	vap->va_type = VNON;
315 	vap->va_size = VNOVAL;
316 	vap->va_bytes = VNOVAL;
317 	vap->va_mode = vap->va_nlink = vap->va_uid = vap->va_gid =
318 	    vap->va_fsid = vap->va_fileid =
319 	    vap->va_blocksize = vap->va_rdev =
320 	    vap->va_atime.tv_sec = vap->va_atime.tv_nsec =
321 	    vap->va_mtime.tv_sec = vap->va_mtime.tv_nsec =
322 	    vap->va_ctime.tv_sec = vap->va_ctime.tv_nsec =
323 	    vap->va_flags = vap->va_gen = VNOVAL;
324 	vap->va_vaflags = 0;
325 }
326 
327 /*
328  * Routines having to do with the management of the vnode table.
329  */
330 extern vop_t **dead_vnodeop_p;
331 
332 /*
333  * Return the next vnode from the free list.
334  */
335 int
336 getnewvnode(tag, mp, vops, vpp)
337 	enum vtagtype tag;
338 	struct mount *mp;
339 	vop_t **vops;
340 	struct vnode **vpp;
341 {
342 	struct proc *p = curproc;	/* XXX */
343 	struct vnode *vp;
344 
345 	simple_lock(&vnode_free_list_slock);
346 retry:
347 	/*
348 	 * we allocate a new vnode if
349 	 * 	1. we don't have any free
350 	 *		Pretty obvious, we actually used to panic, but that
351 	 *		is a silly thing to do.
352 	 *	2. we havn't filled our pool yet
353 	 *		We don't want to trash the incore (VM-)vnodecache.
354 	 *	3. if less that 1/4th of our vnodes are free.
355 	 *		We don't want to trash the namei cache either.
356 	 */
357 	if (freevnodes < (numvnodes >> 2) ||
358 	    numvnodes < desiredvnodes ||
359 	    vnode_free_list.tqh_first == NULL) {
360 		simple_unlock(&vnode_free_list_slock);
361 		vp = (struct vnode *) malloc((u_long) sizeof *vp,
362 		    M_VNODE, M_WAITOK);
363 		bzero((char *) vp, sizeof *vp);
364 		numvnodes++;
365 	} else {
366 		for (vp = vnode_free_list.tqh_first;
367 				vp != NULLVP; vp = vp->v_freelist.tqe_next) {
368 			if (simple_lock_try(&vp->v_interlock))
369 				break;
370 		}
371 		/*
372 		 * Unless this is a bad time of the month, at most
373 		 * the first NCPUS items on the free list are
374 		 * locked, so this is close enough to being empty.
375 		 */
376 		if (vp == NULLVP) {
377 			simple_unlock(&vnode_free_list_slock);
378 			tablefull("vnode");
379 			*vpp = 0;
380 			return (ENFILE);
381 		}
382 		if (vp->v_usecount)
383 			panic("free vnode isn't");
384 		TAILQ_REMOVE(&vnode_free_list, vp, v_freelist);
385 		if (vp->v_usage > 0) {
386 			simple_unlock(&vp->v_interlock);
387 			--vp->v_usage;
388 			TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist);
389 			goto retry;
390 		}
391 		freevnodes--;
392 
393 		/* see comment on why 0xdeadb is set at end of vgone (below) */
394 		vp->v_freelist.tqe_prev = (struct vnode **) 0xdeadb;
395 		simple_unlock(&vnode_free_list_slock);
396 		vp->v_lease = NULL;
397 		if (vp->v_type != VBAD)
398 			vgonel(vp, p);
399 		else {
400 			simple_unlock(&vp->v_interlock);
401 		}
402 
403 #ifdef DIAGNOSTIC
404 		{
405 			int s;
406 
407 			if (vp->v_data)
408 				panic("cleaned vnode isn't");
409 			s = splbio();
410 			if (vp->v_numoutput)
411 				panic("Clean vnode has pending I/O's");
412 			splx(s);
413 		}
414 #endif
415 		vp->v_flag = 0;
416 		vp->v_lastr = 0;
417 		vp->v_lastw = 0;
418 		vp->v_lasta = 0;
419 		vp->v_cstart = 0;
420 		vp->v_clen = 0;
421 		vp->v_socket = 0;
422 		vp->v_writecount = 0;	/* XXX */
423 		vp->v_usage = 0;
424 	}
425 	vp->v_type = VNON;
426 	cache_purge(vp);
427 	vp->v_tag = tag;
428 	vp->v_op = vops;
429 	insmntque(vp, mp);
430 	*vpp = vp;
431 	vp->v_usecount = 1;
432 	vp->v_data = 0;
433 	return (0);
434 }
435 
436 /*
437  * Move a vnode from one mount queue to another.
438  */
439 void
440 insmntque(vp, mp)
441 	register struct vnode *vp;
442 	register struct mount *mp;
443 {
444 
445 	simple_lock(&mntvnode_slock);
446 	/*
447 	 * Delete from old mount point vnode list, if on one.
448 	 */
449 	if (vp->v_mount != NULL)
450 		LIST_REMOVE(vp, v_mntvnodes);
451 	/*
452 	 * Insert into list of vnodes for the new mount point, if available.
453 	 */
454 	if ((vp->v_mount = mp) == NULL) {
455 		simple_unlock(&mntvnode_slock);
456 		return;
457 	}
458 	LIST_INSERT_HEAD(&mp->mnt_vnodelist, vp, v_mntvnodes);
459 	simple_unlock(&mntvnode_slock);
460 }
461 
462 /*
463  * Update outstanding I/O count and do wakeup if requested.
464  */
465 void
466 vwakeup(bp)
467 	register struct buf *bp;
468 {
469 	register struct vnode *vp;
470 
471 	bp->b_flags &= ~B_WRITEINPROG;
472 	if ((vp = bp->b_vp)) {
473 		vp->v_numoutput--;
474 		if (vp->v_numoutput < 0)
475 			panic("vwakeup: neg numoutput");
476 		if ((vp->v_numoutput == 0) && (vp->v_flag & VBWAIT)) {
477 			vp->v_flag &= ~VBWAIT;
478 			wakeup((caddr_t) &vp->v_numoutput);
479 		}
480 	}
481 }
482 
483 /*
484  * Flush out and invalidate all buffers associated with a vnode.
485  * Called with the underlying object locked.
486  */
487 int
488 vinvalbuf(vp, flags, cred, p, slpflag, slptimeo)
489 	register struct vnode *vp;
490 	int flags;
491 	struct ucred *cred;
492 	struct proc *p;
493 	int slpflag, slptimeo;
494 {
495 	register struct buf *bp;
496 	struct buf *nbp, *blist;
497 	int s, error;
498 	vm_object_t object;
499 
500 	if (flags & V_SAVE) {
501 		if ((error = VOP_FSYNC(vp, cred, MNT_WAIT, p)))
502 			return (error);
503 		if (vp->v_dirtyblkhd.lh_first != NULL)
504 			panic("vinvalbuf: dirty bufs");
505 	}
506 
507 	s = splbio();
508 	for (;;) {
509 		if ((blist = vp->v_cleanblkhd.lh_first) && (flags & V_SAVEMETA))
510 			while (blist && blist->b_lblkno < 0)
511 				blist = blist->b_vnbufs.le_next;
512 		if (!blist && (blist = vp->v_dirtyblkhd.lh_first) &&
513 		    (flags & V_SAVEMETA))
514 			while (blist && blist->b_lblkno < 0)
515 				blist = blist->b_vnbufs.le_next;
516 		if (!blist)
517 			break;
518 
519 		for (bp = blist; bp; bp = nbp) {
520 			nbp = bp->b_vnbufs.le_next;
521 			if ((flags & V_SAVEMETA) && bp->b_lblkno < 0)
522 				continue;
523 			if (bp->b_flags & B_BUSY) {
524 				bp->b_flags |= B_WANTED;
525 				error = tsleep((caddr_t) bp,
526 				    slpflag | (PRIBIO + 1), "vinvalbuf",
527 				    slptimeo);
528 				if (error) {
529 					splx(s);
530 					return (error);
531 				}
532 				break;
533 			}
534 			bremfree(bp);
535 			bp->b_flags |= B_BUSY;
536 			/*
537 			 * XXX Since there are no node locks for NFS, I
538 			 * believe there is a slight chance that a delayed
539 			 * write will occur while sleeping just above, so
540 			 * check for it.
541 			 */
542 			if ((bp->b_flags & B_DELWRI) && (flags & V_SAVE)) {
543 				(void) VOP_BWRITE(bp);
544 				break;
545 			}
546 			bp->b_flags |= (B_INVAL|B_NOCACHE|B_RELBUF);
547 			brelse(bp);
548 		}
549 	}
550 
551 	while (vp->v_numoutput > 0) {
552 		vp->v_flag |= VBWAIT;
553 		tsleep(&vp->v_numoutput, PVM, "vnvlbv", 0);
554 	}
555 
556 	splx(s);
557 
558 	/*
559 	 * Destroy the copy in the VM cache, too.
560 	 */
561 	object = vp->v_object;
562 	if (object != NULL) {
563 		vm_object_page_remove(object, 0, object->size,
564 		    (flags & V_SAVE) ? TRUE : FALSE);
565 	}
566 	if (!(flags & V_SAVEMETA) &&
567 	    (vp->v_dirtyblkhd.lh_first || vp->v_cleanblkhd.lh_first))
568 		panic("vinvalbuf: flush failed");
569 	return (0);
570 }
571 
572 /*
573  * Associate a buffer with a vnode.
574  */
575 void
576 bgetvp(vp, bp)
577 	register struct vnode *vp;
578 	register struct buf *bp;
579 {
580 	int s;
581 
582 	if (bp->b_vp)
583 		panic("bgetvp: not free");
584 	VHOLD(vp);
585 	bp->b_vp = vp;
586 	if (vp->v_type == VBLK || vp->v_type == VCHR)
587 		bp->b_dev = vp->v_rdev;
588 	else
589 		bp->b_dev = NODEV;
590 	/*
591 	 * Insert onto list for new vnode.
592 	 */
593 	s = splbio();
594 	bufinsvn(bp, &vp->v_cleanblkhd);
595 	splx(s);
596 }
597 
598 /*
599  * Disassociate a buffer from a vnode.
600  */
601 void
602 brelvp(bp)
603 	register struct buf *bp;
604 {
605 	struct vnode *vp;
606 	int s;
607 
608 	if (bp->b_vp == (struct vnode *) 0)
609 		panic("brelvp: NULL");
610 	/*
611 	 * Delete from old vnode list, if on one.
612 	 */
613 	s = splbio();
614 	if (bp->b_vnbufs.le_next != NOLIST)
615 		bufremvn(bp);
616 	splx(s);
617 
618 	vp = bp->b_vp;
619 	bp->b_vp = (struct vnode *) 0;
620 	HOLDRELE(vp);
621 }
622 
623 /*
624  * Associate a p-buffer with a vnode.
625  */
626 void
627 pbgetvp(vp, bp)
628 	register struct vnode *vp;
629 	register struct buf *bp;
630 {
631 #if defined(DIAGNOSTIC)
632 	if (bp->b_vp)
633 		panic("pbgetvp: not free");
634 #endif
635 	bp->b_vp = vp;
636 	if (vp->v_type == VBLK || vp->v_type == VCHR)
637 		bp->b_dev = vp->v_rdev;
638 	else
639 		bp->b_dev = NODEV;
640 }
641 
642 /*
643  * Disassociate a p-buffer from a vnode.
644  */
645 void
646 pbrelvp(bp)
647 	register struct buf *bp;
648 {
649 	struct vnode *vp;
650 
651 #if defined(DIAGNOSTIC)
652 	if (bp->b_vp == (struct vnode *) 0)
653 		panic("pbrelvp: NULL");
654 #endif
655 
656 	bp->b_vp = (struct vnode *) 0;
657 }
658 
659 /*
660  * Reassign a buffer from one vnode to another.
661  * Used to assign file specific control information
662  * (indirect blocks) to the vnode to which they belong.
663  */
664 void
665 reassignbuf(bp, newvp)
666 	register struct buf *bp;
667 	register struct vnode *newvp;
668 {
669 	int s;
670 
671 	if (newvp == NULL) {
672 		printf("reassignbuf: NULL");
673 		return;
674 	}
675 
676 	s = splbio();
677 	/*
678 	 * Delete from old vnode list, if on one.
679 	 */
680 	if (bp->b_vnbufs.le_next != NOLIST)
681 		bufremvn(bp);
682 	/*
683 	 * If dirty, put on list of dirty buffers; otherwise insert onto list
684 	 * of clean buffers.
685 	 */
686 	if (bp->b_flags & B_DELWRI) {
687 		struct buf *tbp;
688 
689 		tbp = newvp->v_dirtyblkhd.lh_first;
690 		if (!tbp || (tbp->b_lblkno > bp->b_lblkno)) {
691 			bufinsvn(bp, &newvp->v_dirtyblkhd);
692 		} else {
693 			while (tbp->b_vnbufs.le_next &&
694 				(tbp->b_vnbufs.le_next->b_lblkno < bp->b_lblkno)) {
695 				tbp = tbp->b_vnbufs.le_next;
696 			}
697 			LIST_INSERT_AFTER(tbp, bp, b_vnbufs);
698 		}
699 	} else {
700 		bufinsvn(bp, &newvp->v_cleanblkhd);
701 	}
702 	splx(s);
703 }
704 
705 #ifndef DEVFS_ROOT
706 /*
707  * Create a vnode for a block device.
708  * Used for root filesystem, argdev, and swap areas.
709  * Also used for memory file system special devices.
710  */
711 int
712 bdevvp(dev, vpp)
713 	dev_t dev;
714 	struct vnode **vpp;
715 {
716 	register struct vnode *vp;
717 	struct vnode *nvp;
718 	int error;
719 
720 	if (dev == NODEV)
721 		return (0);
722 	error = getnewvnode(VT_NON, (struct mount *) 0, spec_vnodeop_p, &nvp);
723 	if (error) {
724 		*vpp = 0;
725 		return (error);
726 	}
727 	vp = nvp;
728 	vp->v_type = VBLK;
729 	if ((nvp = checkalias(vp, dev, (struct mount *) 0))) {
730 		vput(vp);
731 		vp = nvp;
732 	}
733 	*vpp = vp;
734 	return (0);
735 }
736 #endif /* !DEVFS_ROOT */
737 
738 /*
739  * Check to see if the new vnode represents a special device
740  * for which we already have a vnode (either because of
741  * bdevvp() or because of a different vnode representing
742  * the same block device). If such an alias exists, deallocate
743  * the existing contents and return the aliased vnode. The
744  * caller is responsible for filling it with its new contents.
745  */
746 struct vnode *
747 checkalias(nvp, nvp_rdev, mp)
748 	register struct vnode *nvp;
749 	dev_t nvp_rdev;
750 	struct mount *mp;
751 {
752 	struct proc *p = curproc;	/* XXX */
753 	struct vnode *vp;
754 	struct vnode **vpp;
755 
756 	if (nvp->v_type != VBLK && nvp->v_type != VCHR)
757 		return (NULLVP);
758 
759 	vpp = &speclisth[SPECHASH(nvp_rdev)];
760 loop:
761 	simple_lock(&spechash_slock);
762 	for (vp = *vpp; vp; vp = vp->v_specnext) {
763 		if (nvp_rdev != vp->v_rdev || nvp->v_type != vp->v_type)
764 			continue;
765 		/*
766 		 * Alias, but not in use, so flush it out.
767 		 */
768 		simple_lock(&vp->v_interlock);
769 		if (vp->v_usecount == 0) {
770 			simple_unlock(&spechash_slock);
771 			vgonel(vp, p);
772 			goto loop;
773 		}
774 		if (vget(vp, LK_EXCLUSIVE | LK_INTERLOCK, p)) {
775 			simple_unlock(&spechash_slock);
776 			goto loop;
777 		}
778 		break;
779 	}
780 	if (vp == NULL || vp->v_tag != VT_NON) {
781 		MALLOC(nvp->v_specinfo, struct specinfo *,
782 		    sizeof(struct specinfo), M_VNODE, M_WAITOK);
783 		nvp->v_rdev = nvp_rdev;
784 		nvp->v_hashchain = vpp;
785 		nvp->v_specnext = *vpp;
786 		nvp->v_specflags = 0;
787 		simple_unlock(&spechash_slock);
788 		*vpp = nvp;
789 		if (vp != NULLVP) {
790 			nvp->v_flag |= VALIASED;
791 			vp->v_flag |= VALIASED;
792 			vput(vp);
793 		}
794 		return (NULLVP);
795 	}
796 	simple_unlock(&spechash_slock);
797 	VOP_UNLOCK(vp, 0, p);
798 	simple_lock(&vp->v_interlock);
799 	vclean(vp, 0, p);
800 	vp->v_op = nvp->v_op;
801 	vp->v_tag = nvp->v_tag;
802 	nvp->v_type = VNON;
803 	insmntque(vp, mp);
804 	return (vp);
805 }
806 
807 /*
808  * Grab a particular vnode from the free list, increment its
809  * reference count and lock it. The vnode lock bit is set the
810  * vnode is being eliminated in vgone. The process is awakened
811  * when the transition is completed, and an error returned to
812  * indicate that the vnode is no longer usable (possibly having
813  * been changed to a new file system type).
814  */
815 int
816 vget(vp, flags, p)
817 	register struct vnode *vp;
818 	int flags;
819 	struct proc *p;
820 {
821 	int error;
822 
823 	/*
824 	 * If the vnode is in the process of being cleaned out for
825 	 * another use, we wait for the cleaning to finish and then
826 	 * return failure. Cleaning is determined by checking that
827 	 * the VXLOCK flag is set.
828 	 */
829 	if ((flags & LK_INTERLOCK) == 0) {
830 		simple_lock(&vp->v_interlock);
831 	}
832 	if (vp->v_flag & VXLOCK) {
833 		vp->v_flag |= VXWANT;
834 		simple_unlock(&vp->v_interlock);
835 		tsleep((caddr_t)vp, PINOD, "vget", 0);
836 		return (ENOENT);
837 	}
838 	if (vp->v_usecount == 0) {
839 		simple_lock(&vnode_free_list_slock);
840 		TAILQ_REMOVE(&vnode_free_list, vp, v_freelist);
841 		simple_unlock(&vnode_free_list_slock);
842 		freevnodes--;
843 	}
844 	vp->v_usecount++;
845 	/*
846 	 * Create the VM object, if needed
847 	 */
848 	if ((vp->v_type == VREG) &&
849 		((vp->v_object == NULL) ||
850 			(vp->v_object->flags & OBJ_VFS_REF) == 0)) {
851 		/*
852 		 * XXX vfs_object_create probably needs the interlock.
853 		 */
854 		simple_unlock(&vp->v_interlock);
855 		vfs_object_create(vp, curproc, curproc->p_ucred, 0);
856 		simple_lock(&vp->v_interlock);
857 	}
858 	if (flags & LK_TYPE_MASK) {
859 		if (error = vn_lock(vp, flags | LK_INTERLOCK, p))
860 			vrele(vp);
861 		return (error);
862 	}
863 	simple_unlock(&vp->v_interlock);
864 	return (0);
865 }
866 
867 /*
868  * Stubs to use when there is no locking to be done on the underlying object.
869  * A minimal shared lock is necessary to ensure that the underlying object
870  * is not revoked while an operation is in progress. So, an active shared
871  * count is maintained in an auxillary vnode lock structure.
872  */
873 int
874 vop_sharedlock(ap)
875 	struct vop_lock_args /* {
876 		struct vnode *a_vp;
877 		int a_flags;
878 		struct proc *a_p;
879 	} */ *ap;
880 {
881 	/*
882 	 * This code cannot be used until all the non-locking filesystems
883 	 * (notably NFS) are converted to properly lock and release nodes.
884 	 * Also, certain vnode operations change the locking state within
885 	 * the operation (create, mknod, remove, link, rename, mkdir, rmdir,
886 	 * and symlink). Ideally these operations should not change the
887 	 * lock state, but should be changed to let the caller of the
888 	 * function unlock them. Otherwise all intermediate vnode layers
889 	 * (such as union, umapfs, etc) must catch these functions to do
890 	 * the necessary locking at their layer. Note that the inactive
891 	 * and lookup operations also change their lock state, but this
892 	 * cannot be avoided, so these two operations will always need
893 	 * to be handled in intermediate layers.
894 	 */
895 	struct vnode *vp = ap->a_vp;
896 	int vnflags, flags = ap->a_flags;
897 
898 	if (vp->v_vnlock == NULL) {
899 		if ((flags & LK_TYPE_MASK) == LK_DRAIN)
900 			return (0);
901 		MALLOC(vp->v_vnlock, struct lock *, sizeof(struct lock),
902 		    M_VNODE, M_WAITOK);
903 		lockinit(vp->v_vnlock, PVFS, "vnlock", 0, 0);
904 	}
905 	switch (flags & LK_TYPE_MASK) {
906 	case LK_DRAIN:
907 		vnflags = LK_DRAIN;
908 		break;
909 	case LK_EXCLUSIVE:
910 #ifdef DEBUG_VFS_LOCKS
911 		/*
912 		 * Normally, we use shared locks here, but that confuses
913 		 * the locking assertions.
914 		 */
915 		vnflags = LK_EXCLUSIVE;
916 		break;
917 #endif
918 	case LK_SHARED:
919 		vnflags = LK_SHARED;
920 		break;
921 	case LK_UPGRADE:
922 	case LK_EXCLUPGRADE:
923 	case LK_DOWNGRADE:
924 		return (0);
925 	case LK_RELEASE:
926 	default:
927 		panic("vop_nolock: bad operation %d", flags & LK_TYPE_MASK);
928 	}
929 	if (flags & LK_INTERLOCK)
930 		vnflags |= LK_INTERLOCK;
931 	return(lockmgr(vp->v_vnlock, vnflags, &vp->v_interlock, ap->a_p));
932 }
933 
934 /*
935  * Stubs to use when there is no locking to be done on the underlying object.
936  * A minimal shared lock is necessary to ensure that the underlying object
937  * is not revoked while an operation is in progress. So, an active shared
938  * count is maintained in an auxillary vnode lock structure.
939  */
940 int
941 vop_nolock(ap)
942 	struct vop_lock_args /* {
943 		struct vnode *a_vp;
944 		int a_flags;
945 		struct proc *a_p;
946 	} */ *ap;
947 {
948 #ifdef notyet
949 	/*
950 	 * This code cannot be used until all the non-locking filesystems
951 	 * (notably NFS) are converted to properly lock and release nodes.
952 	 * Also, certain vnode operations change the locking state within
953 	 * the operation (create, mknod, remove, link, rename, mkdir, rmdir,
954 	 * and symlink). Ideally these operations should not change the
955 	 * lock state, but should be changed to let the caller of the
956 	 * function unlock them. Otherwise all intermediate vnode layers
957 	 * (such as union, umapfs, etc) must catch these functions to do
958 	 * the necessary locking at their layer. Note that the inactive
959 	 * and lookup operations also change their lock state, but this
960 	 * cannot be avoided, so these two operations will always need
961 	 * to be handled in intermediate layers.
962 	 */
963 	struct vnode *vp = ap->a_vp;
964 	int vnflags, flags = ap->a_flags;
965 
966 	if (vp->v_vnlock == NULL) {
967 		if ((flags & LK_TYPE_MASK) == LK_DRAIN)
968 			return (0);
969 		MALLOC(vp->v_vnlock, struct lock *, sizeof(struct lock),
970 		    M_VNODE, M_WAITOK);
971 		lockinit(vp->v_vnlock, PVFS, "vnlock", 0, 0);
972 	}
973 	switch (flags & LK_TYPE_MASK) {
974 	case LK_DRAIN:
975 		vnflags = LK_DRAIN;
976 		break;
977 	case LK_EXCLUSIVE:
978 	case LK_SHARED:
979 		vnflags = LK_SHARED;
980 		break;
981 	case LK_UPGRADE:
982 	case LK_EXCLUPGRADE:
983 	case LK_DOWNGRADE:
984 		return (0);
985 	case LK_RELEASE:
986 	default:
987 		panic("vop_nolock: bad operation %d", flags & LK_TYPE_MASK);
988 	}
989 	if (flags & LK_INTERLOCK)
990 		vnflags |= LK_INTERLOCK;
991 	return(lockmgr(vp->v_vnlock, vnflags, &vp->v_interlock, ap->a_p));
992 #else /* for now */
993 	/*
994 	 * Since we are not using the lock manager, we must clear
995 	 * the interlock here.
996 	 */
997 	if (ap->a_flags & LK_INTERLOCK) {
998 		simple_unlock(&ap->a_vp->v_interlock);
999 	}
1000 	return (0);
1001 #endif
1002 }
1003 
1004 /*
1005  * Do the inverse of vop_nolock, handling the interlock in a compatible way.
1006  */
1007 int
1008 vop_nounlock(ap)
1009 	struct vop_unlock_args /* {
1010 		struct vnode *a_vp;
1011 		int a_flags;
1012 		struct proc *a_p;
1013 	} */ *ap;
1014 {
1015 	struct vnode *vp = ap->a_vp;
1016 
1017 	if (vp->v_vnlock == NULL) {
1018 		if (ap->a_flags & LK_INTERLOCK)
1019 			simple_unlock(&ap->a_vp->v_interlock);
1020 		return (0);
1021 	}
1022 	return (lockmgr(vp->v_vnlock, LK_RELEASE | ap->a_flags,
1023 		&ap->a_vp->v_interlock, ap->a_p));
1024 }
1025 
1026 /*
1027  * Return whether or not the node is in use.
1028  */
1029 int
1030 vop_noislocked(ap)
1031 	struct vop_islocked_args /* {
1032 		struct vnode *a_vp;
1033 	} */ *ap;
1034 {
1035 	struct vnode *vp = ap->a_vp;
1036 
1037 	if (vp->v_vnlock == NULL)
1038 		return (0);
1039 	return (lockstatus(vp->v_vnlock));
1040 }
1041 
1042 /* #ifdef DIAGNOSTIC */
1043 /*
1044  * Vnode reference, just increment the count
1045  */
1046 void
1047 vref(vp)
1048 	struct vnode *vp;
1049 {
1050 	simple_lock(&vp->v_interlock);
1051 	if (vp->v_usecount <= 0)
1052 		panic("vref used where vget required");
1053 
1054 	vp->v_usecount++;
1055 
1056 	if ((vp->v_type == VREG) &&
1057 		((vp->v_object == NULL) ||
1058 			((vp->v_object->flags & OBJ_VFS_REF) == 0)) ) {
1059 		/*
1060 		 * We need to lock to VP during the time that
1061 		 * the object is created.  This is necessary to
1062 		 * keep the system from re-entrantly doing it
1063 		 * multiple times.
1064 		 * XXX vfs_object_create probably needs the interlock?
1065 		 */
1066 		simple_unlock(&vp->v_interlock);
1067 		vfs_object_create(vp, curproc, curproc->p_ucred, 0);
1068 		return;
1069 	}
1070 	simple_unlock(&vp->v_interlock);
1071 }
1072 
1073 /*
1074  * Vnode put/release.
1075  * If count drops to zero, call inactive routine and return to freelist.
1076  */
1077 void
1078 vputrele(vp, put)
1079 	struct vnode *vp;
1080 	int put;
1081 {
1082 	struct proc *p = curproc;	/* XXX */
1083 
1084 #ifdef DIAGNOSTIC
1085 	if (vp == NULL)
1086 		panic("vputrele: null vp");
1087 #endif
1088 	simple_lock(&vp->v_interlock);
1089 	vp->v_usecount--;
1090 
1091 	if ((vp->v_usecount == 1) &&
1092 		vp->v_object &&
1093 		(vp->v_object->flags & OBJ_VFS_REF)) {
1094 		vp->v_object->flags &= ~OBJ_VFS_REF;
1095 		if (put) {
1096 			VOP_UNLOCK(vp, LK_INTERLOCK, p);
1097 		} else {
1098 			simple_unlock(&vp->v_interlock);
1099 		}
1100 		vm_object_deallocate(vp->v_object);
1101 		return;
1102 	}
1103 
1104 	if (vp->v_usecount > 0) {
1105 		if (put) {
1106 			VOP_UNLOCK(vp, LK_INTERLOCK, p);
1107 		} else {
1108 			simple_unlock(&vp->v_interlock);
1109 		}
1110 		return;
1111 	}
1112 
1113 	if (vp->v_usecount < 0) {
1114 #ifdef DIAGNOSTIC
1115 		vprint("vputrele: negative ref count", vp);
1116 #endif
1117 		panic("vputrele: negative ref cnt");
1118 	}
1119 	simple_lock(&vnode_free_list_slock);
1120 	if (vp->v_flag & VAGE) {
1121 		vp->v_flag &= ~VAGE;
1122 		vp->v_usage = 0;
1123 		if(vp->v_tag != VT_TFS)
1124 			TAILQ_INSERT_HEAD(&vnode_free_list, vp, v_freelist);
1125 	} else {
1126 		if(vp->v_tag != VT_TFS)
1127 			TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist);
1128 	}
1129 	freevnodes++;
1130 	simple_unlock(&vnode_free_list_slock);
1131 
1132 	/*
1133 	 * If we are doing a vput, the node is already locked, and we must
1134 	 * call VOP_INACTIVE with the node locked.  So, in the case of
1135 	 * vrele, we explicitly lock the vnode before calling VOP_INACTIVE.
1136 	 */
1137 	if (put) {
1138 		simple_unlock(&vp->v_interlock);
1139 		VOP_INACTIVE(vp, p);
1140 	} else if (vn_lock(vp, LK_EXCLUSIVE | LK_INTERLOCK, p) == 0) {
1141 		VOP_INACTIVE(vp, p);
1142 	}
1143 }
1144 
1145 /*
1146  * vput(), just unlock and vrele()
1147  */
1148 void
1149 vput(vp)
1150 	struct vnode *vp;
1151 {
1152 	vputrele(vp, 1);
1153 }
1154 
1155 void
1156 vrele(vp)
1157 	struct vnode *vp;
1158 {
1159 	vputrele(vp, 0);
1160 }
1161 
1162 #ifdef DIAGNOSTIC
1163 /*
1164  * Page or buffer structure gets a reference.
1165  */
1166 void
1167 vhold(vp)
1168 	register struct vnode *vp;
1169 {
1170 
1171 	simple_lock(&vp->v_interlock);
1172 	vp->v_holdcnt++;
1173 	simple_unlock(&vp->v_interlock);
1174 }
1175 
1176 /*
1177  * Page or buffer structure frees a reference.
1178  */
1179 void
1180 holdrele(vp)
1181 	register struct vnode *vp;
1182 {
1183 
1184 	simple_lock(&vp->v_interlock);
1185 	if (vp->v_holdcnt <= 0)
1186 		panic("holdrele: holdcnt");
1187 	vp->v_holdcnt--;
1188 	simple_unlock(&vp->v_interlock);
1189 }
1190 #endif /* DIAGNOSTIC */
1191 
1192 /*
1193  * Remove any vnodes in the vnode table belonging to mount point mp.
1194  *
1195  * If MNT_NOFORCE is specified, there should not be any active ones,
1196  * return error if any are found (nb: this is a user error, not a
1197  * system error). If MNT_FORCE is specified, detach any active vnodes
1198  * that are found.
1199  */
1200 #ifdef DIAGNOSTIC
1201 static int busyprt = 0;		/* print out busy vnodes */
1202 SYSCTL_INT(_debug, OID_AUTO, busyprt, CTLFLAG_RW, &busyprt, 0, "");
1203 #endif
1204 
1205 int
1206 vflush(mp, skipvp, flags)
1207 	struct mount *mp;
1208 	struct vnode *skipvp;
1209 	int flags;
1210 {
1211 	struct proc *p = curproc;	/* XXX */
1212 	struct vnode *vp, *nvp;
1213 	int busy = 0;
1214 
1215 	simple_lock(&mntvnode_slock);
1216 loop:
1217 	for (vp = mp->mnt_vnodelist.lh_first; vp; vp = nvp) {
1218 		/*
1219 		 * Make sure this vnode wasn't reclaimed in getnewvnode().
1220 		 * Start over if it has (it won't be on the list anymore).
1221 		 */
1222 		if (vp->v_mount != mp)
1223 			goto loop;
1224 		nvp = vp->v_mntvnodes.le_next;
1225 		/*
1226 		 * Skip over a selected vnode.
1227 		 */
1228 		if (vp == skipvp)
1229 			continue;
1230 
1231 		simple_lock(&vp->v_interlock);
1232 		/*
1233 		 * Skip over a vnodes marked VSYSTEM.
1234 		 */
1235 		if ((flags & SKIPSYSTEM) && (vp->v_flag & VSYSTEM)) {
1236 			simple_unlock(&vp->v_interlock);
1237 			continue;
1238 		}
1239 		/*
1240 		 * If WRITECLOSE is set, only flush out regular file vnodes
1241 		 * open for writing.
1242 		 */
1243 		if ((flags & WRITECLOSE) &&
1244 		    (vp->v_writecount == 0 || vp->v_type != VREG)) {
1245 			simple_unlock(&vp->v_interlock);
1246 			continue;
1247 		}
1248 
1249 		if (vp->v_object && (vp->v_object->flags & OBJ_VFS_REF)) {
1250 			simple_unlock(&vp->v_interlock);
1251 			simple_unlock(&mntvnode_slock);
1252 			vm_object_reference(vp->v_object);
1253 			pager_cache(vp->v_object, FALSE);
1254 			vp->v_object->flags &= ~OBJ_VFS_REF;
1255 			vm_object_deallocate(vp->v_object);
1256 			simple_lock(&mntvnode_slock);
1257 			simple_lock(&vp->v_interlock);
1258 		}
1259 
1260 		/*
1261 		 * With v_usecount == 0, all we need to do is clear out the
1262 		 * vnode data structures and we are done.
1263 		 */
1264 		if (vp->v_usecount == 0) {
1265 			simple_unlock(&mntvnode_slock);
1266 			vgonel(vp, p);
1267 			simple_lock(&mntvnode_slock);
1268 			continue;
1269 		}
1270 
1271 		/*
1272 		 * If FORCECLOSE is set, forcibly close the vnode. For block
1273 		 * or character devices, revert to an anonymous device. For
1274 		 * all other files, just kill them.
1275 		 */
1276 		if (flags & FORCECLOSE) {
1277 			simple_unlock(&mntvnode_slock);
1278 			if (vp->v_type != VBLK && vp->v_type != VCHR) {
1279 				vgonel(vp, p);
1280 			} else {
1281 				vclean(vp, 0, p);
1282 				vp->v_op = spec_vnodeop_p;
1283 				insmntque(vp, (struct mount *) 0);
1284 			}
1285 			simple_lock(&mntvnode_slock);
1286 			continue;
1287 		}
1288 #ifdef DIAGNOSTIC
1289 		if (busyprt)
1290 			vprint("vflush: busy vnode", vp);
1291 #endif
1292 		simple_unlock(&vp->v_interlock);
1293 		busy++;
1294 	}
1295 	simple_unlock(&mntvnode_slock);
1296 	if (busy)
1297 		return (EBUSY);
1298 	return (0);
1299 }
1300 
1301 /*
1302  * Disassociate the underlying file system from a vnode.
1303  */
1304 static void
1305 vclean(struct vnode *vp, int flags, struct proc *p)
1306 {
1307 	int active;
1308 
1309 	/*
1310 	 * Check to see if the vnode is in use. If so we have to reference it
1311 	 * before we clean it out so that its count cannot fall to zero and
1312 	 * generate a race against ourselves to recycle it.
1313 	 */
1314 	if ((active = vp->v_usecount))
1315 		vp->v_usecount++;
1316 	/*
1317 	 * Prevent the vnode from being recycled or brought into use while we
1318 	 * clean it out.
1319 	 */
1320 	if (vp->v_flag & VXLOCK)
1321 		panic("vclean: deadlock");
1322 	vp->v_flag |= VXLOCK;
1323 	/*
1324 	 * Even if the count is zero, the VOP_INACTIVE routine may still
1325 	 * have the object locked while it cleans it out. The VOP_LOCK
1326 	 * ensures that the VOP_INACTIVE routine is done with its work.
1327 	 * For active vnodes, it ensures that no other activity can
1328 	 * occur while the underlying object is being cleaned out.
1329 	 */
1330 	VOP_LOCK(vp, LK_DRAIN | LK_INTERLOCK, p);
1331 	/*
1332 	 * Clean out any buffers associated with the vnode.
1333 	 */
1334 	if (flags & DOCLOSE)
1335 		vinvalbuf(vp, V_SAVE, NOCRED, p, 0, 0);
1336 	/*
1337 	 * If purging an active vnode, it must be closed and
1338 	 * deactivated before being reclaimed. Note that the
1339 	 * VOP_INACTIVE will unlock the vnode.
1340 	 */
1341 	if (active) {
1342 		if (flags & DOCLOSE)
1343 			VOP_CLOSE(vp, IO_NDELAY, NOCRED, p);
1344 		VOP_INACTIVE(vp, p);
1345 	} else {
1346 		/*
1347 		 * Any other processes trying to obtain this lock must first
1348 		 * wait for VXLOCK to clear, then call the new lock operation.
1349 		 */
1350 		VOP_UNLOCK(vp, 0, p);
1351 	}
1352 	/*
1353 	 * Reclaim the vnode.
1354 	 */
1355 	if (VOP_RECLAIM(vp, p))
1356 		panic("vclean: cannot reclaim");
1357 	if (active)
1358 		vrele(vp);
1359 	cache_purge(vp);
1360 	if (vp->v_vnlock) {
1361 #ifdef DIAGNOSTIC
1362 		if ((vp->v_vnlock->lk_flags & LK_DRAINED) == 0)
1363 			vprint("vclean: lock not drained", vp);
1364 #endif
1365 		FREE(vp->v_vnlock, M_VNODE);
1366 		vp->v_vnlock = NULL;
1367 	}
1368 
1369 	/*
1370 	 * Done with purge, notify sleepers of the grim news.
1371 	 */
1372 	vp->v_op = dead_vnodeop_p;
1373 	vp->v_tag = VT_NON;
1374 	vp->v_flag &= ~VXLOCK;
1375 	if (vp->v_flag & VXWANT) {
1376 		vp->v_flag &= ~VXWANT;
1377 		wakeup((caddr_t) vp);
1378 	}
1379 }
1380 
1381 /*
1382  * Eliminate all activity associated with the requested vnode
1383  * and with all vnodes aliased to the requested vnode.
1384  */
1385 int
1386 vop_revoke(ap)
1387 	struct vop_revoke_args /* {
1388 		struct vnode *a_vp;
1389 		int a_flags;
1390 	} */ *ap;
1391 {
1392 	struct vnode *vp, *vq;
1393 	struct proc *p = curproc;	/* XXX */
1394 
1395 #ifdef DIAGNOSTIC
1396 	if ((ap->a_flags & REVOKEALL) == 0)
1397 		panic("vop_revoke");
1398 #endif
1399 
1400 	vp = ap->a_vp;
1401 	simple_lock(&vp->v_interlock);
1402 
1403 	if (vp->v_flag & VALIASED) {
1404 		/*
1405 		 * If a vgone (or vclean) is already in progress,
1406 		 * wait until it is done and return.
1407 		 */
1408 		if (vp->v_flag & VXLOCK) {
1409 			vp->v_flag |= VXWANT;
1410 			simple_unlock(&vp->v_interlock);
1411 			tsleep((caddr_t)vp, PINOD, "vop_revokeall", 0);
1412 			return (0);
1413 		}
1414 		/*
1415 		 * Ensure that vp will not be vgone'd while we
1416 		 * are eliminating its aliases.
1417 		 */
1418 		vp->v_flag |= VXLOCK;
1419 		simple_unlock(&vp->v_interlock);
1420 		while (vp->v_flag & VALIASED) {
1421 			simple_lock(&spechash_slock);
1422 			for (vq = *vp->v_hashchain; vq; vq = vq->v_specnext) {
1423 				if (vq->v_rdev != vp->v_rdev ||
1424 				    vq->v_type != vp->v_type || vp == vq)
1425 					continue;
1426 				simple_unlock(&spechash_slock);
1427 				vgone(vq);
1428 				break;
1429 			}
1430 			if (vq == NULLVP) {
1431 				simple_unlock(&spechash_slock);
1432 			}
1433 		}
1434 		/*
1435 		 * Remove the lock so that vgone below will
1436 		 * really eliminate the vnode after which time
1437 		 * vgone will awaken any sleepers.
1438 		 */
1439 		simple_lock(&vp->v_interlock);
1440 		vp->v_flag &= ~VXLOCK;
1441 	}
1442 	vgonel(vp, p);
1443 	return (0);
1444 }
1445 
1446 /*
1447  * Recycle an unused vnode to the front of the free list.
1448  * Release the passed interlock if the vnode will be recycled.
1449  */
1450 int
1451 vrecycle(vp, inter_lkp, p)
1452 	struct vnode *vp;
1453 	struct simplelock *inter_lkp;
1454 	struct proc *p;
1455 {
1456 
1457 	simple_lock(&vp->v_interlock);
1458 	if (vp->v_usecount == 0) {
1459 		if (inter_lkp) {
1460 			simple_unlock(inter_lkp);
1461 		}
1462 		vgonel(vp, p);
1463 		return (1);
1464 	}
1465 	simple_unlock(&vp->v_interlock);
1466 	return (0);
1467 }
1468 
1469 /*
1470  * Eliminate all activity associated with a vnode
1471  * in preparation for reuse.
1472  */
1473 void
1474 vgone(vp)
1475 	register struct vnode *vp;
1476 {
1477 	struct proc *p = curproc;	/* XXX */
1478 
1479 	simple_lock(&vp->v_interlock);
1480 	vgonel(vp, p);
1481 }
1482 
1483 /*
1484  * vgone, with the vp interlock held.
1485  */
1486 static void
1487 vgonel(vp, p)
1488 	struct vnode *vp;
1489 	struct proc *p;
1490 {
1491 	struct vnode *vq;
1492 	struct vnode *vx;
1493 
1494 	/*
1495 	 * If a vgone (or vclean) is already in progress,
1496 	 * wait until it is done and return.
1497 	 */
1498 	if (vp->v_flag & VXLOCK) {
1499 		vp->v_flag |= VXWANT;
1500 		simple_unlock(&vp->v_interlock);
1501 		tsleep((caddr_t)vp, PINOD, "vgone", 0);
1502 		return;
1503 	}
1504 
1505 	if (vp->v_object) {
1506 		vp->v_object->flags |= OBJ_VNODE_GONE;
1507 	}
1508 
1509 	/*
1510 	 * Clean out the filesystem specific data.
1511 	 */
1512 	vclean(vp, DOCLOSE, p);
1513 	/*
1514 	 * Delete from old mount point vnode list, if on one.
1515 	 */
1516 	if (vp->v_mount != NULL)
1517 		insmntque(vp, (struct mount *)0);
1518 	/*
1519 	 * If special device, remove it from special device alias list
1520 	 * if it is on one.
1521 	 */
1522 	if ((vp->v_type == VBLK || vp->v_type == VCHR) && vp->v_specinfo != 0) {
1523 		simple_lock(&spechash_slock);
1524 		if (*vp->v_hashchain == vp) {
1525 			*vp->v_hashchain = vp->v_specnext;
1526 		} else {
1527 			for (vq = *vp->v_hashchain; vq; vq = vq->v_specnext) {
1528 				if (vq->v_specnext != vp)
1529 					continue;
1530 				vq->v_specnext = vp->v_specnext;
1531 				break;
1532 			}
1533 			if (vq == NULL)
1534 				panic("missing bdev");
1535 		}
1536 		if (vp->v_flag & VALIASED) {
1537 			vx = NULL;
1538 			for (vq = *vp->v_hashchain; vq; vq = vq->v_specnext) {
1539 				if (vq->v_rdev != vp->v_rdev ||
1540 				    vq->v_type != vp->v_type)
1541 					continue;
1542 				if (vx)
1543 					break;
1544 				vx = vq;
1545 			}
1546 			if (vx == NULL)
1547 				panic("missing alias");
1548 			if (vq == NULL)
1549 				vx->v_flag &= ~VALIASED;
1550 			vp->v_flag &= ~VALIASED;
1551 		}
1552 		simple_unlock(&spechash_slock);
1553 		FREE(vp->v_specinfo, M_VNODE);
1554 		vp->v_specinfo = NULL;
1555 	}
1556 
1557 	/*
1558 	 * If it is on the freelist and not already at the head,
1559 	 * move it to the head of the list. The test of the back
1560 	 * pointer and the reference count of zero is because
1561 	 * it will be removed from the free list by getnewvnode,
1562 	 * but will not have its reference count incremented until
1563 	 * after calling vgone. If the reference count were
1564 	 * incremented first, vgone would (incorrectly) try to
1565 	 * close the previous instance of the underlying object.
1566 	 * So, the back pointer is explicitly set to `0xdeadb' in
1567 	 * getnewvnode after removing it from the freelist to ensure
1568 	 * that we do not try to move it here.
1569 	 */
1570 	if (vp->v_usecount == 0) {
1571 		simple_lock(&vnode_free_list_slock);
1572 		if ((vp->v_freelist.tqe_prev != (struct vnode **)0xdeadb) &&
1573 			vnode_free_list.tqh_first != vp) {
1574 			TAILQ_REMOVE(&vnode_free_list, vp, v_freelist);
1575 			TAILQ_INSERT_HEAD(&vnode_free_list, vp, v_freelist);
1576 		}
1577 		simple_unlock(&vnode_free_list_slock);
1578 	}
1579 
1580 	vp->v_type = VBAD;
1581 }
1582 
1583 /*
1584  * Lookup a vnode by device number.
1585  */
1586 int
1587 vfinddev(dev, type, vpp)
1588 	dev_t dev;
1589 	enum vtype type;
1590 	struct vnode **vpp;
1591 {
1592 	register struct vnode *vp;
1593 	int rc = 0;
1594 
1595 	simple_lock(&spechash_slock);
1596 	for (vp = speclisth[SPECHASH(dev)]; vp; vp = vp->v_specnext) {
1597 		if (dev != vp->v_rdev || type != vp->v_type)
1598 			continue;
1599 		*vpp = vp;
1600 		rc = 1;
1601 		break;
1602 	}
1603 	simple_unlock(&spechash_slock);
1604 	return (rc);
1605 }
1606 
1607 /*
1608  * Calculate the total number of references to a special device.
1609  */
1610 int
1611 vcount(vp)
1612 	register struct vnode *vp;
1613 {
1614 	struct vnode *vq, *vnext;
1615 	int count;
1616 
1617 loop:
1618 	if ((vp->v_flag & VALIASED) == 0)
1619 		return (vp->v_usecount);
1620 	simple_lock(&spechash_slock);
1621 	for (count = 0, vq = *vp->v_hashchain; vq; vq = vnext) {
1622 		vnext = vq->v_specnext;
1623 		if (vq->v_rdev != vp->v_rdev || vq->v_type != vp->v_type)
1624 			continue;
1625 		/*
1626 		 * Alias, but not in use, so flush it out.
1627 		 */
1628 		if (vq->v_usecount == 0 && vq != vp) {
1629 			simple_unlock(&spechash_slock);
1630 			vgone(vq);
1631 			goto loop;
1632 		}
1633 		count += vq->v_usecount;
1634 	}
1635 	simple_unlock(&spechash_slock);
1636 	return (count);
1637 }
1638 
1639 /*
1640  * Print out a description of a vnode.
1641  */
1642 static char *typename[] =
1643 {"VNON", "VREG", "VDIR", "VBLK", "VCHR", "VLNK", "VSOCK", "VFIFO", "VBAD"};
1644 
1645 void
1646 vprint(label, vp)
1647 	char *label;
1648 	register struct vnode *vp;
1649 {
1650 	char buf[64];
1651 
1652 	if (label != NULL)
1653 		printf("%s: %x: ", label, vp);
1654 	else
1655 		printf("%x: ", vp);
1656 	printf("type %s, usecount %d, writecount %d, refcount %ld,",
1657 	    typename[vp->v_type], vp->v_usecount, vp->v_writecount,
1658 	    vp->v_holdcnt);
1659 	buf[0] = '\0';
1660 	if (vp->v_flag & VROOT)
1661 		strcat(buf, "|VROOT");
1662 	if (vp->v_flag & VTEXT)
1663 		strcat(buf, "|VTEXT");
1664 	if (vp->v_flag & VSYSTEM)
1665 		strcat(buf, "|VSYSTEM");
1666 	if (vp->v_flag & VXLOCK)
1667 		strcat(buf, "|VXLOCK");
1668 	if (vp->v_flag & VXWANT)
1669 		strcat(buf, "|VXWANT");
1670 	if (vp->v_flag & VBWAIT)
1671 		strcat(buf, "|VBWAIT");
1672 	if (vp->v_flag & VALIASED)
1673 		strcat(buf, "|VALIASED");
1674 	if (buf[0] != '\0')
1675 		printf(" flags (%s)", &buf[1]);
1676 	if (vp->v_data == NULL) {
1677 		printf("\n");
1678 	} else {
1679 		printf("\n\t");
1680 		VOP_PRINT(vp);
1681 	}
1682 }
1683 
1684 #ifdef DDB
1685 /*
1686  * List all of the locked vnodes in the system.
1687  * Called when debugging the kernel.
1688  */
1689 void
1690 printlockedvnodes()
1691 {
1692 	struct proc *p = curproc;	/* XXX */
1693 	struct mount *mp, *nmp;
1694 	struct vnode *vp;
1695 
1696 	printf("Locked vnodes\n");
1697 	simple_lock(&mountlist_slock);
1698 	for (mp = mountlist.cqh_first; mp != (void *)&mountlist; mp = nmp) {
1699 		if (vfs_busy(mp, LK_NOWAIT, &mountlist_slock, p)) {
1700 			nmp = mp->mnt_list.cqe_next;
1701 			continue;
1702 		}
1703 		for (vp = mp->mnt_vnodelist.lh_first;
1704 		     vp != NULL;
1705 		     vp = vp->v_mntvnodes.le_next) {
1706 			if (VOP_ISLOCKED(vp))
1707 				vprint((char *)0, vp);
1708 		}
1709 		simple_lock(&mountlist_slock);
1710 		nmp = mp->mnt_list.cqe_next;
1711 		vfs_unbusy(mp, p);
1712 	}
1713 	simple_unlock(&mountlist_slock);
1714 }
1715 #endif
1716 
1717 /*
1718  * Top level filesystem related information gathering.
1719  */
1720 static int	sysctl_ovfs_conf __P(SYSCTL_HANDLER_ARGS);
1721 
1722 static int
1723 vfs_sysctl SYSCTL_HANDLER_ARGS
1724 {
1725 	int *name = (int *)arg1 - 1;	/* XXX */
1726 	u_int namelen = arg2 + 1;	/* XXX */
1727 	struct vfsconf *vfsp;
1728 
1729 #ifndef NO_COMPAT_PRELITE2
1730 	/* Resolve ambiguity between VFS_VFSCONF and VFS_GENERIC. */
1731 	if (namelen == 1)
1732 		return (sysctl_ovfs_conf(oidp, arg1, arg2, req));
1733 #endif
1734 
1735 #ifdef notyet
1736 	/* all sysctl names at this level are at least name and field */
1737 	if (namelen < 2)
1738 		return (ENOTDIR);		/* overloaded */
1739 	if (name[0] != VFS_GENERIC) {
1740 		for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next)
1741 			if (vfsp->vfc_typenum == name[0])
1742 				break;
1743 		if (vfsp == NULL)
1744 			return (EOPNOTSUPP);
1745 		return ((*vfsp->vfc_vfsops->vfs_sysctl)(&name[1], namelen - 1,
1746 		    oldp, oldlenp, newp, newlen, p));
1747 	}
1748 #endif
1749 	switch (name[1]) {
1750 	case VFS_MAXTYPENUM:
1751 		if (namelen != 2)
1752 			return (ENOTDIR);
1753 		return (SYSCTL_OUT(req, &maxvfsconf, sizeof(int)));
1754 	case VFS_CONF:
1755 		if (namelen != 3)
1756 			return (ENOTDIR);	/* overloaded */
1757 		for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next)
1758 			if (vfsp->vfc_typenum == name[2])
1759 				break;
1760 		if (vfsp == NULL)
1761 			return (EOPNOTSUPP);
1762 		return (SYSCTL_OUT(req, vfsp, sizeof *vfsp));
1763 	}
1764 	return (EOPNOTSUPP);
1765 }
1766 
1767 SYSCTL_NODE(_vfs, VFS_GENERIC, generic, CTLFLAG_RD, vfs_sysctl,
1768 	"Generic filesystem");
1769 
1770 #ifndef NO_COMPAT_PRELITE2
1771 
1772 static int
1773 sysctl_ovfs_conf SYSCTL_HANDLER_ARGS
1774 {
1775 	int error;
1776 	struct vfsconf *vfsp;
1777 	struct ovfsconf ovfs;
1778 
1779 	for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next) {
1780 		ovfs.vfc_vfsops = vfsp->vfc_vfsops;	/* XXX used as flag */
1781 		strcpy(ovfs.vfc_name, vfsp->vfc_name);
1782 		ovfs.vfc_index = vfsp->vfc_typenum;
1783 		ovfs.vfc_refcount = vfsp->vfc_refcount;
1784 		ovfs.vfc_flags = vfsp->vfc_flags;
1785 		error = SYSCTL_OUT(req, &ovfs, sizeof ovfs);
1786 		if (error)
1787 			return error;
1788 	}
1789 	return 0;
1790 }
1791 
1792 #endif /* !NO_COMPAT_PRELITE2 */
1793 
1794 int kinfo_vdebug = 1;
1795 int kinfo_vgetfailed;
1796 
1797 #define KINFO_VNODESLOP	10
1798 /*
1799  * Dump vnode list (via sysctl).
1800  * Copyout address of vnode followed by vnode.
1801  */
1802 /* ARGSUSED */
1803 static int
1804 sysctl_vnode SYSCTL_HANDLER_ARGS
1805 {
1806 	struct proc *p = curproc;	/* XXX */
1807 	struct mount *mp, *nmp;
1808 	struct vnode *nvp, *vp;
1809 	int error;
1810 
1811 #define VPTRSZ	sizeof (struct vnode *)
1812 #define VNODESZ	sizeof (struct vnode)
1813 
1814 	req->lock = 0;
1815 	if (!req->oldptr) /* Make an estimate */
1816 		return (SYSCTL_OUT(req, 0,
1817 			(numvnodes + KINFO_VNODESLOP) * (VPTRSZ + VNODESZ)));
1818 
1819 	simple_lock(&mountlist_slock);
1820 	for (mp = mountlist.cqh_first; mp != (void *)&mountlist; mp = nmp) {
1821 		if (vfs_busy(mp, LK_NOWAIT, &mountlist_slock, p)) {
1822 			nmp = mp->mnt_list.cqe_next;
1823 			continue;
1824 		}
1825 again:
1826 		simple_lock(&mntvnode_slock);
1827 		for (vp = mp->mnt_vnodelist.lh_first;
1828 		     vp != NULL;
1829 		     vp = nvp) {
1830 			/*
1831 			 * Check that the vp is still associated with
1832 			 * this filesystem.  RACE: could have been
1833 			 * recycled onto the same filesystem.
1834 			 */
1835 			if (vp->v_mount != mp) {
1836 				simple_unlock(&mntvnode_slock);
1837 				if (kinfo_vdebug)
1838 					printf("kinfo: vp changed\n");
1839 				goto again;
1840 			}
1841 			nvp = vp->v_mntvnodes.le_next;
1842 			simple_unlock(&mntvnode_slock);
1843 			if ((error = SYSCTL_OUT(req, &vp, VPTRSZ)) ||
1844 			    (error = SYSCTL_OUT(req, vp, VNODESZ)))
1845 				return (error);
1846 			simple_lock(&mntvnode_slock);
1847 		}
1848 		simple_unlock(&mntvnode_slock);
1849 		simple_lock(&mountlist_slock);
1850 		nmp = mp->mnt_list.cqe_next;
1851 		vfs_unbusy(mp, p);
1852 	}
1853 	simple_unlock(&mountlist_slock);
1854 
1855 	return (0);
1856 }
1857 
1858 SYSCTL_PROC(_kern, KERN_VNODE, vnode, CTLTYPE_OPAQUE|CTLFLAG_RD,
1859 	0, 0, sysctl_vnode, "S,vnode", "");
1860 
1861 /*
1862  * Check to see if a filesystem is mounted on a block device.
1863  */
1864 int
1865 vfs_mountedon(vp)
1866 	struct vnode *vp;
1867 {
1868 	struct vnode *vq;
1869 	int error = 0;
1870 
1871 	if (vp->v_specflags & SI_MOUNTEDON)
1872 		return (EBUSY);
1873 	if (vp->v_flag & VALIASED) {
1874 		simple_lock(&spechash_slock);
1875 		for (vq = *vp->v_hashchain; vq; vq = vq->v_specnext) {
1876 			if (vq->v_rdev != vp->v_rdev ||
1877 			    vq->v_type != vp->v_type)
1878 				continue;
1879 			if (vq->v_specflags & SI_MOUNTEDON) {
1880 				error = EBUSY;
1881 				break;
1882 			}
1883 		}
1884 		simple_unlock(&spechash_slock);
1885 	}
1886 	return (error);
1887 }
1888 
1889 /*
1890  * Unmount all filesystems. The list is traversed in reverse order
1891  * of mounting to avoid dependencies.
1892  */
1893 void
1894 vfs_unmountall()
1895 {
1896 	struct mount *mp, *nmp;
1897 	struct proc *p = initproc;	/* XXX XXX should this be proc0? */
1898 	int error;
1899 
1900 	/*
1901 	 * Since this only runs when rebooting, it is not interlocked.
1902 	 */
1903 	for (mp = mountlist.cqh_last; mp != (void *)&mountlist; mp = nmp) {
1904 		nmp = mp->mnt_list.cqe_prev;
1905 		error = dounmount(mp, MNT_FORCE, p);
1906 		if (error) {
1907 			printf("unmount of %s failed (",
1908 			    mp->mnt_stat.f_mntonname);
1909 			if (error == EBUSY)
1910 				printf("BUSY)\n");
1911 			else
1912 				printf("%d)\n", error);
1913 		}
1914 	}
1915 }
1916 
1917 /*
1918  * Build hash lists of net addresses and hang them off the mount point.
1919  * Called by ufs_mount() to set up the lists of export addresses.
1920  */
1921 static int
1922 vfs_hang_addrlist(struct mount *mp, struct netexport *nep,
1923 	struct export_args *argp)
1924 {
1925 	register struct netcred *np;
1926 	register struct radix_node_head *rnh;
1927 	register int i;
1928 	struct radix_node *rn;
1929 	struct sockaddr *saddr, *smask = 0;
1930 	struct domain *dom;
1931 	int error;
1932 
1933 	if (argp->ex_addrlen == 0) {
1934 		if (mp->mnt_flag & MNT_DEFEXPORTED)
1935 			return (EPERM);
1936 		np = &nep->ne_defexported;
1937 		np->netc_exflags = argp->ex_flags;
1938 		np->netc_anon = argp->ex_anon;
1939 		np->netc_anon.cr_ref = 1;
1940 		mp->mnt_flag |= MNT_DEFEXPORTED;
1941 		return (0);
1942 	}
1943 	i = sizeof(struct netcred) + argp->ex_addrlen + argp->ex_masklen;
1944 	np = (struct netcred *) malloc(i, M_NETADDR, M_WAITOK);
1945 	bzero((caddr_t) np, i);
1946 	saddr = (struct sockaddr *) (np + 1);
1947 	if ((error = copyin(argp->ex_addr, (caddr_t) saddr, argp->ex_addrlen)))
1948 		goto out;
1949 	if (saddr->sa_len > argp->ex_addrlen)
1950 		saddr->sa_len = argp->ex_addrlen;
1951 	if (argp->ex_masklen) {
1952 		smask = (struct sockaddr *) ((caddr_t) saddr + argp->ex_addrlen);
1953 		error = copyin(argp->ex_addr, (caddr_t) smask, argp->ex_masklen);
1954 		if (error)
1955 			goto out;
1956 		if (smask->sa_len > argp->ex_masklen)
1957 			smask->sa_len = argp->ex_masklen;
1958 	}
1959 	i = saddr->sa_family;
1960 	if ((rnh = nep->ne_rtable[i]) == 0) {
1961 		/*
1962 		 * Seems silly to initialize every AF when most are not used,
1963 		 * do so on demand here
1964 		 */
1965 		for (dom = domains; dom; dom = dom->dom_next)
1966 			if (dom->dom_family == i && dom->dom_rtattach) {
1967 				dom->dom_rtattach((void **) &nep->ne_rtable[i],
1968 				    dom->dom_rtoffset);
1969 				break;
1970 			}
1971 		if ((rnh = nep->ne_rtable[i]) == 0) {
1972 			error = ENOBUFS;
1973 			goto out;
1974 		}
1975 	}
1976 	rn = (*rnh->rnh_addaddr) ((caddr_t) saddr, (caddr_t) smask, rnh,
1977 	    np->netc_rnodes);
1978 	if (rn == 0 || np != (struct netcred *) rn) {	/* already exists */
1979 		error = EPERM;
1980 		goto out;
1981 	}
1982 	np->netc_exflags = argp->ex_flags;
1983 	np->netc_anon = argp->ex_anon;
1984 	np->netc_anon.cr_ref = 1;
1985 	return (0);
1986 out:
1987 	free(np, M_NETADDR);
1988 	return (error);
1989 }
1990 
1991 /* ARGSUSED */
1992 static int
1993 vfs_free_netcred(struct radix_node *rn, void *w)
1994 {
1995 	register struct radix_node_head *rnh = (struct radix_node_head *) w;
1996 
1997 	(*rnh->rnh_deladdr) (rn->rn_key, rn->rn_mask, rnh);
1998 	free((caddr_t) rn, M_NETADDR);
1999 	return (0);
2000 }
2001 
2002 /*
2003  * Free the net address hash lists that are hanging off the mount points.
2004  */
2005 static void
2006 vfs_free_addrlist(struct netexport *nep)
2007 {
2008 	register int i;
2009 	register struct radix_node_head *rnh;
2010 
2011 	for (i = 0; i <= AF_MAX; i++)
2012 		if ((rnh = nep->ne_rtable[i])) {
2013 			(*rnh->rnh_walktree) (rnh, vfs_free_netcred,
2014 			    (caddr_t) rnh);
2015 			free((caddr_t) rnh, M_RTABLE);
2016 			nep->ne_rtable[i] = 0;
2017 		}
2018 }
2019 
2020 int
2021 vfs_export(mp, nep, argp)
2022 	struct mount *mp;
2023 	struct netexport *nep;
2024 	struct export_args *argp;
2025 {
2026 	int error;
2027 
2028 	if (argp->ex_flags & MNT_DELEXPORT) {
2029 		vfs_free_addrlist(nep);
2030 		mp->mnt_flag &= ~(MNT_EXPORTED | MNT_DEFEXPORTED);
2031 	}
2032 	if (argp->ex_flags & MNT_EXPORTED) {
2033 		if ((error = vfs_hang_addrlist(mp, nep, argp)))
2034 			return (error);
2035 		mp->mnt_flag |= MNT_EXPORTED;
2036 	}
2037 	return (0);
2038 }
2039 
2040 struct netcred *
2041 vfs_export_lookup(mp, nep, nam)
2042 	register struct mount *mp;
2043 	struct netexport *nep;
2044 	struct mbuf *nam;
2045 {
2046 	register struct netcred *np;
2047 	register struct radix_node_head *rnh;
2048 	struct sockaddr *saddr;
2049 
2050 	np = NULL;
2051 	if (mp->mnt_flag & MNT_EXPORTED) {
2052 		/*
2053 		 * Lookup in the export list first.
2054 		 */
2055 		if (nam != NULL) {
2056 			saddr = mtod(nam, struct sockaddr *);
2057 			rnh = nep->ne_rtable[saddr->sa_family];
2058 			if (rnh != NULL) {
2059 				np = (struct netcred *)
2060 					(*rnh->rnh_matchaddr)((caddr_t)saddr,
2061 							      rnh);
2062 				if (np && np->netc_rnodes->rn_flags & RNF_ROOT)
2063 					np = NULL;
2064 			}
2065 		}
2066 		/*
2067 		 * If no address match, use the default if it exists.
2068 		 */
2069 		if (np == NULL && mp->mnt_flag & MNT_DEFEXPORTED)
2070 			np = &nep->ne_defexported;
2071 	}
2072 	return (np);
2073 }
2074 
2075 /*
2076  * perform msync on all vnodes under a mount point
2077  * the mount point must be locked.
2078  */
2079 void
2080 vfs_msync(struct mount *mp, int flags) {
2081 	struct vnode *vp, *nvp;
2082 loop:
2083 	for (vp = mp->mnt_vnodelist.lh_first; vp != NULL; vp = nvp) {
2084 
2085 		if (vp->v_mount != mp)
2086 			goto loop;
2087 		nvp = vp->v_mntvnodes.le_next;
2088 		if (VOP_ISLOCKED(vp) && (flags != MNT_WAIT))
2089 			continue;
2090 		if (vp->v_object &&
2091 		   (vp->v_object->flags & OBJ_MIGHTBEDIRTY)) {
2092 			vm_object_page_clean(vp->v_object, 0, 0, TRUE, TRUE);
2093 		}
2094 	}
2095 }
2096 
2097 /*
2098  * Create the VM object needed for VMIO and mmap support.  This
2099  * is done for all VREG files in the system.  Some filesystems might
2100  * afford the additional metadata buffering capability of the
2101  * VMIO code by making the device node be VMIO mode also.
2102  */
2103 int
2104 vfs_object_create(vp, p, cred, waslocked)
2105 	struct vnode *vp;
2106 	struct proc *p;
2107 	struct ucred *cred;
2108 	int waslocked;
2109 {
2110 	struct vattr vat;
2111 	vm_object_t object;
2112 	int error = 0;
2113 
2114 retry:
2115 	if ((object = vp->v_object) == NULL) {
2116 		if (vp->v_type == VREG) {
2117 			if ((error = VOP_GETATTR(vp, &vat, cred, p)) != 0)
2118 				goto retn;
2119 			(void) vnode_pager_alloc(vp,
2120 				OFF_TO_IDX(round_page(vat.va_size)), 0, 0);
2121 		} else {
2122 			/*
2123 			 * This simply allocates the biggest object possible
2124 			 * for a VBLK vnode.  This should be fixed, but doesn't
2125 			 * cause any problems (yet).
2126 			 */
2127 			(void) vnode_pager_alloc(vp, INT_MAX, 0, 0);
2128 		}
2129 		vp->v_object->flags |= OBJ_VFS_REF;
2130 	} else {
2131 		if (object->flags & OBJ_DEAD) {
2132 			if (waslocked)
2133 				VOP_UNLOCK(vp, 0, p);
2134 			tsleep(object, PVM, "vodead", 0);
2135 			if (waslocked)
2136 				vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p);
2137 			goto retry;
2138 		}
2139 		if ((object->flags & OBJ_VFS_REF) == 0) {
2140 			object->flags |= OBJ_VFS_REF;
2141 			vm_object_reference(object);
2142 		}
2143 	}
2144 	if (vp->v_object)
2145 		vp->v_flag |= VVMIO;
2146 
2147 retn:
2148 	return error;
2149 }
2150