xref: /freebsd/sys/kern/vfs_subr.c (revision ef5d438ed4bc17ad7ece3e40fe4d1f9baf3aadf7)
1 /*
2  * Copyright (c) 1989, 1993
3  *	The Regents of the University of California.  All rights reserved.
4  * (c) UNIX System Laboratories, Inc.
5  * All or some portions of this file are derived from material licensed
6  * to the University of California by American Telephone and Telegraph
7  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8  * the permission of UNIX System Laboratories, Inc.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. All advertising materials mentioning features or use of this software
19  *    must display the following acknowledgement:
20  *	This product includes software developed by the University of
21  *	California, Berkeley and its contributors.
22  * 4. Neither the name of the University nor the names of its contributors
23  *    may be used to endorse or promote products derived from this software
24  *    without specific prior written permission.
25  *
26  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36  * SUCH DAMAGE.
37  *
38  *	@(#)vfs_subr.c	8.13 (Berkeley) 4/18/94
39  * $Id: vfs_subr.c,v 1.51 1996/01/04 21:12:26 wollman Exp $
40  */
41 
42 /*
43  * External virtual filesystem routines
44  */
45 #include "opt_ddb.h"
46 
47 #include <sys/param.h>
48 #include <sys/systm.h>
49 #include <sys/kernel.h>
50 #include <sys/file.h>
51 #include <sys/proc.h>
52 #include <sys/mount.h>
53 #include <sys/time.h>
54 #include <sys/vnode.h>
55 #include <sys/stat.h>
56 #include <sys/namei.h>
57 #include <sys/ucred.h>
58 #include <sys/buf.h>
59 #include <sys/errno.h>
60 #include <sys/malloc.h>
61 #include <sys/domain.h>
62 #include <sys/mbuf.h>
63 
64 #include <vm/vm.h>
65 #include <vm/vm_param.h>
66 #include <vm/vm_object.h>
67 #include <vm/vm_extern.h>
68 #include <sys/sysctl.h>
69 
70 #include <miscfs/specfs/specdev.h>
71 
72 #ifdef DDB
73 extern void	printlockedvnodes __P((void));
74 #endif
75 extern void	vclean __P((struct vnode *vp, int flags));
76 extern void	vfs_unmountroot __P((struct mount *rootfs));
77 
78 enum vtype iftovt_tab[16] = {
79 	VNON, VFIFO, VCHR, VNON, VDIR, VNON, VBLK, VNON,
80 	VREG, VNON, VLNK, VNON, VSOCK, VNON, VNON, VBAD,
81 };
82 int vttoif_tab[9] = {
83 	0, S_IFREG, S_IFDIR, S_IFBLK, S_IFCHR, S_IFLNK,
84 	S_IFSOCK, S_IFIFO, S_IFMT,
85 };
86 
87 /*
88  * Insq/Remq for the vnode usage lists.
89  */
90 #define	bufinsvn(bp, dp)	LIST_INSERT_HEAD(dp, bp, b_vnbufs)
91 #define	bufremvn(bp) {  \
92 	LIST_REMOVE(bp, b_vnbufs); \
93 	(bp)->b_vnbufs.le_next = NOLIST; \
94 }
95 
96 TAILQ_HEAD(freelst, vnode) vnode_free_list;	/* vnode free list */
97 u_long freevnodes	= 0;
98 
99 struct mntlist mountlist;	/* mounted filesystem list */
100 
101 int desiredvnodes;
102 SYSCTL_INT(_kern, KERN_MAXVNODES, maxvnodes, CTLFLAG_RD, &desiredvnodes, 0, "");
103 
104 static void	vfs_free_addrlist __P((struct netexport *nep));
105 static int	vfs_free_netcred __P((struct radix_node *rn, void *w));
106 static int	vfs_hang_addrlist __P((struct mount *mp, struct netexport *nep,
107 				       struct export_args *argp));
108 
109 /*
110  * Initialize the vnode management data structures.
111  */
112 void
113 vntblinit()
114 {
115 	desiredvnodes = maxproc + vm_object_cache_max;
116 
117 	TAILQ_INIT(&vnode_free_list);
118 	CIRCLEQ_INIT(&mountlist);
119 }
120 
121 /*
122  * Lock a filesystem.
123  * Used to prevent access to it while mounting and unmounting.
124  */
125 int
126 vfs_lock(mp)
127 	register struct mount *mp;
128 {
129 
130 	while (mp->mnt_flag & MNT_MLOCK) {
131 		mp->mnt_flag |= MNT_MWAIT;
132 		(void) tsleep((caddr_t) mp, PVFS, "vfslck", 0);
133 	}
134 	mp->mnt_flag |= MNT_MLOCK;
135 	return (0);
136 }
137 
138 /*
139  * Unlock a locked filesystem.
140  * Panic if filesystem is not locked.
141  */
142 void
143 vfs_unlock(mp)
144 	register struct mount *mp;
145 {
146 
147 	if ((mp->mnt_flag & MNT_MLOCK) == 0)
148 		panic("vfs_unlock: not locked");
149 	mp->mnt_flag &= ~MNT_MLOCK;
150 	if (mp->mnt_flag & MNT_MWAIT) {
151 		mp->mnt_flag &= ~MNT_MWAIT;
152 		wakeup((caddr_t) mp);
153 	}
154 }
155 
156 /*
157  * Mark a mount point as busy.
158  * Used to synchronize access and to delay unmounting.
159  */
160 int
161 vfs_busy(mp)
162 	register struct mount *mp;
163 {
164 
165 	while (mp->mnt_flag & MNT_MPBUSY) {
166 		mp->mnt_flag |= MNT_MPWANT;
167 		(void) tsleep((caddr_t) &mp->mnt_flag, PVFS, "vfsbsy", 0);
168 	}
169 	if (mp->mnt_flag & MNT_UNMOUNT)
170 		return (1);
171 	mp->mnt_flag |= MNT_MPBUSY;
172 	return (0);
173 }
174 
175 /*
176  * Free a busy filesystem.
177  * Panic if filesystem is not busy.
178  */
179 void
180 vfs_unbusy(mp)
181 	register struct mount *mp;
182 {
183 
184 	if ((mp->mnt_flag & MNT_MPBUSY) == 0)
185 		panic("vfs_unbusy: not busy");
186 	mp->mnt_flag &= ~MNT_MPBUSY;
187 	if (mp->mnt_flag & MNT_MPWANT) {
188 		mp->mnt_flag &= ~MNT_MPWANT;
189 		wakeup((caddr_t) &mp->mnt_flag);
190 	}
191 }
192 
193 void
194 vfs_unmountroot(struct mount *rootfs)
195 {
196 	struct mount *mp = rootfs;
197 	int error;
198 
199 	if (vfs_busy(mp)) {
200 		printf("failed to unmount root\n");
201 		return;
202 	}
203 	mp->mnt_flag |= MNT_UNMOUNT;
204 	if ((error = vfs_lock(mp))) {
205 		printf("lock of root filesystem failed (%d)\n", error);
206 		return;
207 	}
208 	vnode_pager_umount(mp);	/* release cached vnodes */
209 	cache_purgevfs(mp);	/* remove cache entries for this file sys */
210 
211 	if ((error = VFS_SYNC(mp, MNT_WAIT, initproc->p_ucred, initproc)))
212 		printf("sync of root filesystem failed (%d)\n", error);
213 
214 	if ((error = VFS_UNMOUNT(mp, MNT_FORCE, initproc))) {
215 		printf("unmount of root filesystem failed (");
216 		if (error == EBUSY)
217 			printf("BUSY)\n");
218 		else
219 			printf("%d)\n", error);
220 	}
221 	mp->mnt_flag &= ~MNT_UNMOUNT;
222 	vfs_unbusy(mp);
223 }
224 
225 /*
226  * Unmount all filesystems.  Should only be called by halt().
227  */
228 void
229 vfs_unmountall()
230 {
231 	struct mount *mp, *nmp, *rootfs = NULL;
232 	int error;
233 
234 	/* unmount all but rootfs */
235 	for (mp = mountlist.cqh_last; mp != (void *)&mountlist; mp = nmp) {
236 		nmp = mp->mnt_list.cqe_prev;
237 
238 		if (mp->mnt_flag & MNT_ROOTFS) {
239 			rootfs = mp;
240 			continue;
241 		}
242 		error = dounmount(mp, MNT_FORCE, initproc);
243 		if (error) {
244 			printf("unmount of %s failed (", mp->mnt_stat.f_mntonname);
245 			if (error == EBUSY)
246 				printf("BUSY)\n");
247 			else
248 				printf("%d)\n", error);
249 		}
250 	}
251 
252 	/* and finally... */
253 	if (rootfs) {
254 		vfs_unmountroot(rootfs);
255 	} else {
256 		printf("no root filesystem\n");
257 	}
258 }
259 
260 /*
261  * Lookup a mount point by filesystem identifier.
262  */
263 struct mount *
264 getvfs(fsid)
265 	fsid_t *fsid;
266 {
267 	register struct mount *mp;
268 
269 	for (mp = mountlist.cqh_first; mp != (void *)&mountlist;
270 	    mp = mp->mnt_list.cqe_next) {
271 		if (mp->mnt_stat.f_fsid.val[0] == fsid->val[0] &&
272 		    mp->mnt_stat.f_fsid.val[1] == fsid->val[1])
273 			return (mp);
274 	}
275 	return ((struct mount *) 0);
276 }
277 
278 /*
279  * Get a new unique fsid
280  */
281 void
282 getnewfsid(mp, mtype)
283 	struct mount *mp;
284 	int mtype;
285 {
286 	static u_short xxxfs_mntid;
287 
288 	fsid_t tfsid;
289 
290 	mp->mnt_stat.f_fsid.val[0] = makedev(nblkdev + mtype, 0);
291 	mp->mnt_stat.f_fsid.val[1] = mtype;
292 	if (xxxfs_mntid == 0)
293 		++xxxfs_mntid;
294 	tfsid.val[0] = makedev(nblkdev + mtype, xxxfs_mntid);
295 	tfsid.val[1] = mtype;
296 	if (mountlist.cqh_first != (void *)&mountlist) {
297 		while (getvfs(&tfsid)) {
298 			tfsid.val[0]++;
299 			xxxfs_mntid++;
300 		}
301 	}
302 	mp->mnt_stat.f_fsid.val[0] = tfsid.val[0];
303 }
304 
305 /*
306  * Set vnode attributes to VNOVAL
307  */
308 void
309 vattr_null(vap)
310 	register struct vattr *vap;
311 {
312 
313 	vap->va_type = VNON;
314 	vap->va_size = VNOVAL;
315 	vap->va_bytes = VNOVAL;
316 	vap->va_mode = vap->va_nlink = vap->va_uid = vap->va_gid =
317 	    vap->va_fsid = vap->va_fileid =
318 	    vap->va_blocksize = vap->va_rdev =
319 	    vap->va_atime.ts_sec = vap->va_atime.ts_nsec =
320 	    vap->va_mtime.ts_sec = vap->va_mtime.ts_nsec =
321 	    vap->va_ctime.ts_sec = vap->va_ctime.ts_nsec =
322 	    vap->va_flags = vap->va_gen = VNOVAL;
323 	vap->va_vaflags = 0;
324 }
325 
326 /*
327  * Routines having to do with the management of the vnode table.
328  */
329 extern vop_t **dead_vnodeop_p;
330 
331 /*
332  * Return the next vnode from the free list.
333  */
334 int
335 getnewvnode(tag, mp, vops, vpp)
336 	enum vtagtype tag;
337 	struct mount *mp;
338 	vop_t **vops;
339 	struct vnode **vpp;
340 {
341 	register struct vnode *vp;
342 
343 retry:
344 	vp = vnode_free_list.tqh_first;
345 	/*
346 	 * we allocate a new vnode if
347 	 * 	1. we don't have any free
348 	 *		Pretty obvious, we actually used to panic, but that
349 	 *		is a silly thing to do.
350 	 *	2. we havn't filled our pool yet
351 	 *		We don't want to trash the incore (VM-)vnodecache.
352 	 *	3. if less that 1/4th of our vnodes are free.
353 	 *		We don't want to trash the namei cache either.
354 	 */
355 	if (freevnodes < (numvnodes >> 2) ||
356 	    numvnodes < desiredvnodes ||
357 	    vp == NULL) {
358 		vp = (struct vnode *) malloc((u_long) sizeof *vp,
359 		    M_VNODE, M_WAITOK);
360 		bzero((char *) vp, sizeof *vp);
361 		numvnodes++;
362 	} else {
363 		TAILQ_REMOVE(&vnode_free_list, vp, v_freelist);
364 		if (vp->v_usage > 0) {
365 			--vp->v_usage;
366 			TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist);
367 			goto retry;
368 		}
369 		freevnodes--;
370 
371 		/* see comment on why 0xdeadb is set at end of vgone (below) */
372 		vp->v_freelist.tqe_prev = (struct vnode **) 0xdeadb;
373 		vp->v_lease = NULL;
374 		if (vp->v_type != VBAD)
375 			vgone(vp);
376 		if (vp->v_usecount)
377 			panic("free vnode isn't");
378 
379 #ifdef DIAGNOSTIC
380 		{
381 			int s;
382 
383 			if (vp->v_data)
384 				panic("cleaned vnode isn't");
385 			s = splbio();
386 			if (vp->v_numoutput)
387 				panic("Clean vnode has pending I/O's");
388 			splx(s);
389 		}
390 #endif
391 		vp->v_flag = 0;
392 		vp->v_lastr = 0;
393 		vp->v_ralen = 0;
394 		vp->v_maxra = 0;
395 		vp->v_lastw = 0;
396 		vp->v_lasta = 0;
397 		vp->v_cstart = 0;
398 		vp->v_clen = 0;
399 		vp->v_socket = 0;
400 		vp->v_writecount = 0;	/* XXX */
401 		vp->v_usage = 0;
402 	}
403 	vp->v_type = VNON;
404 	cache_purge(vp);
405 	vp->v_tag = tag;
406 	vp->v_op = vops;
407 	insmntque(vp, mp);
408 	*vpp = vp;
409 	vp->v_usecount = 1;
410 	vp->v_data = 0;
411 	return (0);
412 }
413 
414 /*
415  * Move a vnode from one mount queue to another.
416  */
417 void
418 insmntque(vp, mp)
419 	register struct vnode *vp;
420 	register struct mount *mp;
421 {
422 
423 	/*
424 	 * Delete from old mount point vnode list, if on one.
425 	 */
426 	if (vp->v_mount != NULL)
427 		LIST_REMOVE(vp, v_mntvnodes);
428 	/*
429 	 * Insert into list of vnodes for the new mount point, if available.
430 	 */
431 	if ((vp->v_mount = mp) == NULL)
432 		return;
433 	LIST_INSERT_HEAD(&mp->mnt_vnodelist, vp, v_mntvnodes);
434 }
435 
436 /*
437  * Update outstanding I/O count and do wakeup if requested.
438  */
439 void
440 vwakeup(bp)
441 	register struct buf *bp;
442 {
443 	register struct vnode *vp;
444 
445 	bp->b_flags &= ~B_WRITEINPROG;
446 	if ((vp = bp->b_vp)) {
447 		vp->v_numoutput--;
448 		if (vp->v_numoutput < 0)
449 			panic("vwakeup: neg numoutput");
450 		if ((vp->v_numoutput == 0) && (vp->v_flag & VBWAIT)) {
451 			vp->v_flag &= ~VBWAIT;
452 			wakeup((caddr_t) &vp->v_numoutput);
453 		}
454 	}
455 }
456 
457 /*
458  * Flush out and invalidate all buffers associated with a vnode.
459  * Called with the underlying object locked.
460  */
461 int
462 vinvalbuf(vp, flags, cred, p, slpflag, slptimeo)
463 	register struct vnode *vp;
464 	int flags;
465 	struct ucred *cred;
466 	struct proc *p;
467 	int slpflag, slptimeo;
468 {
469 	register struct buf *bp;
470 	struct buf *nbp, *blist;
471 	int s, error;
472 	vm_object_t object;
473 
474 	if (flags & V_SAVE) {
475 		if ((error = VOP_FSYNC(vp, cred, MNT_WAIT, p)))
476 			return (error);
477 		if (vp->v_dirtyblkhd.lh_first != NULL)
478 			panic("vinvalbuf: dirty bufs");
479 	}
480 	for (;;) {
481 		if ((blist = vp->v_cleanblkhd.lh_first) && (flags & V_SAVEMETA))
482 			while (blist && blist->b_lblkno < 0)
483 				blist = blist->b_vnbufs.le_next;
484 		if (!blist && (blist = vp->v_dirtyblkhd.lh_first) &&
485 		    (flags & V_SAVEMETA))
486 			while (blist && blist->b_lblkno < 0)
487 				blist = blist->b_vnbufs.le_next;
488 		if (!blist)
489 			break;
490 
491 		for (bp = blist; bp; bp = nbp) {
492 			nbp = bp->b_vnbufs.le_next;
493 			if ((flags & V_SAVEMETA) && bp->b_lblkno < 0)
494 				continue;
495 			s = splbio();
496 			if (bp->b_flags & B_BUSY) {
497 				bp->b_flags |= B_WANTED;
498 				error = tsleep((caddr_t) bp,
499 				    slpflag | (PRIBIO + 1), "vinvalbuf",
500 				    slptimeo);
501 				splx(s);
502 				if (error)
503 					return (error);
504 				break;
505 			}
506 			bremfree(bp);
507 			bp->b_flags |= B_BUSY;
508 			splx(s);
509 			/*
510 			 * XXX Since there are no node locks for NFS, I
511 			 * believe there is a slight chance that a delayed
512 			 * write will occur while sleeping just above, so
513 			 * check for it.
514 			 */
515 			if ((bp->b_flags & B_DELWRI) && (flags & V_SAVE)) {
516 				(void) VOP_BWRITE(bp);
517 				break;
518 			}
519 			bp->b_flags |= (B_INVAL|B_NOCACHE|B_RELBUF);
520 			brelse(bp);
521 		}
522 	}
523 
524 	s = splbio();
525 	while (vp->v_numoutput > 0) {
526 		vp->v_flag |= VBWAIT;
527 		tsleep(&vp->v_numoutput, PVM, "vnvlbv", 0);
528 	}
529 	splx(s);
530 
531 	/*
532 	 * Destroy the copy in the VM cache, too.
533 	 */
534 	object = vp->v_object;
535 	if (object != NULL) {
536 		vm_object_page_remove(object, 0, object->size,
537 		    (flags & V_SAVE) ? TRUE : FALSE);
538 	}
539 	if (!(flags & V_SAVEMETA) &&
540 	    (vp->v_dirtyblkhd.lh_first || vp->v_cleanblkhd.lh_first))
541 		panic("vinvalbuf: flush failed");
542 	return (0);
543 }
544 
545 /*
546  * Associate a buffer with a vnode.
547  */
548 void
549 bgetvp(vp, bp)
550 	register struct vnode *vp;
551 	register struct buf *bp;
552 {
553 	int s;
554 
555 	if (bp->b_vp)
556 		panic("bgetvp: not free");
557 	VHOLD(vp);
558 	bp->b_vp = vp;
559 	if (vp->v_type == VBLK || vp->v_type == VCHR)
560 		bp->b_dev = vp->v_rdev;
561 	else
562 		bp->b_dev = NODEV;
563 	/*
564 	 * Insert onto list for new vnode.
565 	 */
566 	s = splbio();
567 	bufinsvn(bp, &vp->v_cleanblkhd);
568 	splx(s);
569 }
570 
571 /*
572  * Disassociate a buffer from a vnode.
573  */
574 void
575 brelvp(bp)
576 	register struct buf *bp;
577 {
578 	struct vnode *vp;
579 	int s;
580 
581 	if (bp->b_vp == (struct vnode *) 0)
582 		panic("brelvp: NULL");
583 	/*
584 	 * Delete from old vnode list, if on one.
585 	 */
586 	s = splbio();
587 	if (bp->b_vnbufs.le_next != NOLIST)
588 		bufremvn(bp);
589 	splx(s);
590 
591 	vp = bp->b_vp;
592 	bp->b_vp = (struct vnode *) 0;
593 	HOLDRELE(vp);
594 }
595 
596 /*
597  * Associate a p-buffer with a vnode.
598  */
599 void
600 pbgetvp(vp, bp)
601 	register struct vnode *vp;
602 	register struct buf *bp;
603 {
604 	if (bp->b_vp)
605 		panic("pbgetvp: not free");
606 	VHOLD(vp);
607 	bp->b_vp = vp;
608 	if (vp->v_type == VBLK || vp->v_type == VCHR)
609 		bp->b_dev = vp->v_rdev;
610 	else
611 		bp->b_dev = NODEV;
612 }
613 
614 /*
615  * Disassociate a p-buffer from a vnode.
616  */
617 void
618 pbrelvp(bp)
619 	register struct buf *bp;
620 {
621 	struct vnode *vp;
622 
623 	if (bp->b_vp == (struct vnode *) 0)
624 		panic("brelvp: NULL");
625 
626 	vp = bp->b_vp;
627 	bp->b_vp = (struct vnode *) 0;
628 	HOLDRELE(vp);
629 }
630 
631 /*
632  * Reassign a buffer from one vnode to another.
633  * Used to assign file specific control information
634  * (indirect blocks) to the vnode to which they belong.
635  */
636 void
637 reassignbuf(bp, newvp)
638 	register struct buf *bp;
639 	register struct vnode *newvp;
640 {
641 	register struct buflists *listheadp;
642 
643 	if (newvp == NULL) {
644 		printf("reassignbuf: NULL");
645 		return;
646 	}
647 	/*
648 	 * Delete from old vnode list, if on one.
649 	 */
650 	if (bp->b_vnbufs.le_next != NOLIST)
651 		bufremvn(bp);
652 	/*
653 	 * If dirty, put on list of dirty buffers; otherwise insert onto list
654 	 * of clean buffers.
655 	 */
656 	if (bp->b_flags & B_DELWRI) {
657 		struct buf *tbp;
658 
659 		tbp = newvp->v_dirtyblkhd.lh_first;
660 		if (!tbp || (tbp->b_lblkno > bp->b_lblkno)) {
661 			bufinsvn(bp, &newvp->v_dirtyblkhd);
662 		} else {
663 			while (tbp->b_vnbufs.le_next &&
664 				(tbp->b_vnbufs.le_next->b_lblkno < bp->b_lblkno)) {
665 				tbp = tbp->b_vnbufs.le_next;
666 			}
667 			LIST_INSERT_AFTER(tbp, bp, b_vnbufs);
668 		}
669 	} else {
670 		listheadp = &newvp->v_cleanblkhd;
671 		bufinsvn(bp, listheadp);
672 	}
673 }
674 
675 /*
676  * Create a vnode for a block device.
677  * Used for root filesystem, argdev, and swap areas.
678  * Also used for memory file system special devices.
679  */
680 int
681 bdevvp(dev, vpp)
682 	dev_t dev;
683 	struct vnode **vpp;
684 {
685 	register struct vnode *vp;
686 	struct vnode *nvp;
687 	int error;
688 
689 	if (dev == NODEV)
690 		return (0);
691 	error = getnewvnode(VT_NON, (struct mount *) 0, spec_vnodeop_p, &nvp);
692 	if (error) {
693 		*vpp = 0;
694 		return (error);
695 	}
696 	vp = nvp;
697 	vp->v_type = VBLK;
698 	if ((nvp = checkalias(vp, dev, (struct mount *) 0))) {
699 		vput(vp);
700 		vp = nvp;
701 	}
702 	*vpp = vp;
703 	return (0);
704 }
705 
706 /*
707  * Check to see if the new vnode represents a special device
708  * for which we already have a vnode (either because of
709  * bdevvp() or because of a different vnode representing
710  * the same block device). If such an alias exists, deallocate
711  * the existing contents and return the aliased vnode. The
712  * caller is responsible for filling it with its new contents.
713  */
714 struct vnode *
715 checkalias(nvp, nvp_rdev, mp)
716 	register struct vnode *nvp;
717 	dev_t nvp_rdev;
718 	struct mount *mp;
719 {
720 	register struct vnode *vp;
721 	struct vnode **vpp;
722 
723 	if (nvp->v_type != VBLK && nvp->v_type != VCHR)
724 		return (NULLVP);
725 
726 	vpp = &speclisth[SPECHASH(nvp_rdev)];
727 loop:
728 	for (vp = *vpp; vp; vp = vp->v_specnext) {
729 		if (nvp_rdev != vp->v_rdev || nvp->v_type != vp->v_type)
730 			continue;
731 		/*
732 		 * Alias, but not in use, so flush it out.
733 		 */
734 		if (vp->v_usecount == 0) {
735 			vgone(vp);
736 			goto loop;
737 		}
738 		if (vget(vp, 1))
739 			goto loop;
740 		break;
741 	}
742 	if (vp == NULL || vp->v_tag != VT_NON) {
743 		MALLOC(nvp->v_specinfo, struct specinfo *,
744 		    sizeof(struct specinfo), M_VNODE, M_WAITOK);
745 		nvp->v_rdev = nvp_rdev;
746 		nvp->v_hashchain = vpp;
747 		nvp->v_specnext = *vpp;
748 		nvp->v_specflags = 0;
749 		*vpp = nvp;
750 		if (vp != NULL) {
751 			nvp->v_flag |= VALIASED;
752 			vp->v_flag |= VALIASED;
753 			vput(vp);
754 		}
755 		return (NULLVP);
756 	}
757 	VOP_UNLOCK(vp);
758 	vclean(vp, 0);
759 	vp->v_op = nvp->v_op;
760 	vp->v_tag = nvp->v_tag;
761 	nvp->v_type = VNON;
762 	insmntque(vp, mp);
763 	return (vp);
764 }
765 
766 /*
767  * Grab a particular vnode from the free list, increment its
768  * reference count and lock it. The vnode lock bit is set the
769  * vnode is being eliminated in vgone. The process is awakened
770  * when the transition is completed, and an error returned to
771  * indicate that the vnode is no longer usable (possibly having
772  * been changed to a new file system type).
773  */
774 int
775 vget(vp, lockflag)
776 	register struct vnode *vp;
777 	int lockflag;
778 {
779 
780 	/*
781 	 * If the vnode is in the process of being cleaned out for another
782 	 * use, we wait for the cleaning to finish and then return failure.
783 	 * Cleaning is determined either by checking that the VXLOCK flag is
784 	 * set, or that the use count is zero with the back pointer set to
785 	 * show that it has been removed from the free list by getnewvnode.
786 	 * The VXLOCK flag may not have been set yet because vclean is blocked
787 	 * in the VOP_LOCK call waiting for the VOP_INACTIVE to complete.
788 	 */
789 	if ((vp->v_flag & VXLOCK) ||
790 	    (vp->v_usecount == 0 &&
791 		vp->v_freelist.tqe_prev == (struct vnode **) 0xdeadb)) {
792 		vp->v_flag |= VXWANT;
793 		(void) tsleep((caddr_t) vp, PINOD, "vget", 0);
794 		return (1);
795 	}
796 	if (vp->v_usecount == 0) {
797 		TAILQ_REMOVE(&vnode_free_list, vp, v_freelist);
798 		freevnodes--;
799 	}
800 	vp->v_usecount++;
801 	if (lockflag)
802 		VOP_LOCK(vp);
803 	return (0);
804 }
805 
806 /*
807  * Vnode reference, just increment the count
808  */
809 void
810 vref(vp)
811 	struct vnode *vp;
812 {
813 
814 	if (vp->v_usecount <= 0)
815 		panic("vref used where vget required");
816 	vp->v_usecount++;
817 }
818 
819 /*
820  * vput(), just unlock and vrele()
821  */
822 void
823 vput(vp)
824 	register struct vnode *vp;
825 {
826 
827 	VOP_UNLOCK(vp);
828 	vrele(vp);
829 }
830 
831 /*
832  * Vnode release.
833  * If count drops to zero, call inactive routine and return to freelist.
834  */
835 void
836 vrele(vp)
837 	register struct vnode *vp;
838 {
839 
840 #ifdef DIAGNOSTIC
841 	if (vp == NULL)
842 		panic("vrele: null vp");
843 #endif
844 	vp->v_usecount--;
845 	if (vp->v_usecount > 0)
846 		return;
847 	if (vp->v_usecount < 0 /* || vp->v_writecount < 0 */ ) {
848 #ifdef DIAGNOSTIC
849 		vprint("vrele: negative ref count", vp);
850 #endif
851 		panic("vrele: negative reference cnt");
852 	}
853 	if (vp->v_flag & VAGE) {
854 		TAILQ_INSERT_HEAD(&vnode_free_list, vp, v_freelist);
855 		vp->v_flag &= ~VAGE;
856 		vp->v_usage = 0;
857 	} else {
858 		TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist);
859 	}
860 	freevnodes++;
861 
862 	VOP_INACTIVE(vp);
863 }
864 
865 #ifdef DIAGNOSTIC
866 /*
867  * Page or buffer structure gets a reference.
868  */
869 void
870 vhold(vp)
871 	register struct vnode *vp;
872 {
873 
874 	vp->v_holdcnt++;
875 }
876 
877 /*
878  * Page or buffer structure frees a reference.
879  */
880 void
881 holdrele(vp)
882 	register struct vnode *vp;
883 {
884 
885 	if (vp->v_holdcnt <= 0)
886 		panic("holdrele: holdcnt");
887 	vp->v_holdcnt--;
888 }
889 #endif /* DIAGNOSTIC */
890 
891 /*
892  * Remove any vnodes in the vnode table belonging to mount point mp.
893  *
894  * If MNT_NOFORCE is specified, there should not be any active ones,
895  * return error if any are found (nb: this is a user error, not a
896  * system error). If MNT_FORCE is specified, detach any active vnodes
897  * that are found.
898  */
899 #ifdef DIAGNOSTIC
900 static int busyprt = 0;		/* print out busy vnodes */
901 SYSCTL_INT(_debug, 1, busyprt, CTLFLAG_RW, &busyprt, 0, "");
902 #endif
903 
904 int
905 vflush(mp, skipvp, flags)
906 	struct mount *mp;
907 	struct vnode *skipvp;
908 	int flags;
909 {
910 	register struct vnode *vp, *nvp;
911 	int busy = 0;
912 
913 	if ((mp->mnt_flag & MNT_MPBUSY) == 0)
914 		panic("vflush: not busy");
915 loop:
916 	for (vp = mp->mnt_vnodelist.lh_first; vp; vp = nvp) {
917 		/*
918 		 * Make sure this vnode wasn't reclaimed in getnewvnode().
919 		 * Start over if it has (it won't be on the list anymore).
920 		 */
921 		if (vp->v_mount != mp)
922 			goto loop;
923 		nvp = vp->v_mntvnodes.le_next;
924 		/*
925 		 * Skip over a selected vnode.
926 		 */
927 		if (vp == skipvp)
928 			continue;
929 		/*
930 		 * Skip over a vnodes marked VSYSTEM.
931 		 */
932 		if ((flags & SKIPSYSTEM) && (vp->v_flag & VSYSTEM))
933 			continue;
934 		/*
935 		 * If WRITECLOSE is set, only flush out regular file vnodes
936 		 * open for writing.
937 		 */
938 		if ((flags & WRITECLOSE) &&
939 		    (vp->v_writecount == 0 || vp->v_type != VREG))
940 			continue;
941 		/*
942 		 * With v_usecount == 0, all we need to do is clear out the
943 		 * vnode data structures and we are done.
944 		 */
945 		if (vp->v_usecount == 0) {
946 			vgone(vp);
947 			continue;
948 		}
949 		/*
950 		 * If FORCECLOSE is set, forcibly close the vnode. For block
951 		 * or character devices, revert to an anonymous device. For
952 		 * all other files, just kill them.
953 		 */
954 		if (flags & FORCECLOSE) {
955 			if (vp->v_type != VBLK && vp->v_type != VCHR) {
956 				vgone(vp);
957 			} else {
958 				vclean(vp, 0);
959 				vp->v_op = spec_vnodeop_p;
960 				insmntque(vp, (struct mount *) 0);
961 			}
962 			continue;
963 		}
964 #ifdef DIAGNOSTIC
965 		if (busyprt)
966 			vprint("vflush: busy vnode", vp);
967 #endif
968 		busy++;
969 	}
970 	if (busy)
971 		return (EBUSY);
972 	return (0);
973 }
974 
975 /*
976  * Disassociate the underlying file system from a vnode.
977  */
978 void
979 vclean(struct vnode *vp, int flags)
980 {
981 	int active;
982 
983 	/*
984 	 * Check to see if the vnode is in use. If so we have to reference it
985 	 * before we clean it out so that its count cannot fall to zero and
986 	 * generate a race against ourselves to recycle it.
987 	 */
988 	if ((active = vp->v_usecount))
989 		VREF(vp);
990 	/*
991 	 * Even if the count is zero, the VOP_INACTIVE routine may still have
992 	 * the object locked while it cleans it out. The VOP_LOCK ensures that
993 	 * the VOP_INACTIVE routine is done with its work. For active vnodes,
994 	 * it ensures that no other activity can occur while the underlying
995 	 * object is being cleaned out.
996 	 */
997 	VOP_LOCK(vp);
998 	/*
999 	 * Prevent the vnode from being recycled or brought into use while we
1000 	 * clean it out.
1001 	 */
1002 	if (vp->v_flag & VXLOCK)
1003 		panic("vclean: deadlock");
1004 	vp->v_flag |= VXLOCK;
1005 	/*
1006 	 * Clean out any buffers associated with the vnode.
1007 	 */
1008 	if (flags & DOCLOSE)
1009 		vinvalbuf(vp, V_SAVE, NOCRED, NULL, 0, 0);
1010 	/*
1011 	 * Any other processes trying to obtain this lock must first wait for
1012 	 * VXLOCK to clear, then call the new lock operation.
1013 	 */
1014 	VOP_UNLOCK(vp);
1015 	/*
1016 	 * If purging an active vnode, it must be closed and deactivated
1017 	 * before being reclaimed.
1018 	 */
1019 	if (active) {
1020 		if (flags & DOCLOSE)
1021 			VOP_CLOSE(vp, FNONBLOCK, NOCRED, NULL);
1022 		VOP_INACTIVE(vp);
1023 	}
1024 	/*
1025 	 * Reclaim the vnode.
1026 	 */
1027 	if (VOP_RECLAIM(vp))
1028 		panic("vclean: cannot reclaim");
1029 	if (active)
1030 		vrele(vp);
1031 
1032 	/*
1033 	 * Done with purge, notify sleepers of the grim news.
1034 	 */
1035 	vp->v_op = dead_vnodeop_p;
1036 	vp->v_tag = VT_NON;
1037 	vp->v_flag &= ~VXLOCK;
1038 	if (vp->v_flag & VXWANT) {
1039 		vp->v_flag &= ~VXWANT;
1040 		wakeup((caddr_t) vp);
1041 	}
1042 }
1043 
1044 /*
1045  * Eliminate all activity associated with  the requested vnode
1046  * and with all vnodes aliased to the requested vnode.
1047  */
1048 void
1049 vgoneall(vp)
1050 	register struct vnode *vp;
1051 {
1052 	register struct vnode *vq;
1053 
1054 	if (vp->v_flag & VALIASED) {
1055 		/*
1056 		 * If a vgone (or vclean) is already in progress, wait until
1057 		 * it is done and return.
1058 		 */
1059 		if (vp->v_flag & VXLOCK) {
1060 			vp->v_flag |= VXWANT;
1061 			(void) tsleep((caddr_t) vp, PINOD, "vgall", 0);
1062 			return;
1063 		}
1064 		/*
1065 		 * Ensure that vp will not be vgone'd while we are eliminating
1066 		 * its aliases.
1067 		 */
1068 		vp->v_flag |= VXLOCK;
1069 		while (vp->v_flag & VALIASED) {
1070 			for (vq = *vp->v_hashchain; vq; vq = vq->v_specnext) {
1071 				if (vq->v_rdev != vp->v_rdev ||
1072 				    vq->v_type != vp->v_type || vp == vq)
1073 					continue;
1074 				vgone(vq);
1075 				break;
1076 			}
1077 		}
1078 		/*
1079 		 * Remove the lock so that vgone below will really eliminate
1080 		 * the vnode after which time vgone will awaken any sleepers.
1081 		 */
1082 		vp->v_flag &= ~VXLOCK;
1083 	}
1084 	vgone(vp);
1085 }
1086 
1087 /*
1088  * Eliminate all activity associated with a vnode
1089  * in preparation for reuse.
1090  */
1091 void
1092 vgone(vp)
1093 	register struct vnode *vp;
1094 {
1095 	register struct vnode *vq;
1096 	struct vnode *vx;
1097 
1098 	/*
1099 	 * If a vgone (or vclean) is already in progress, wait until it is
1100 	 * done and return.
1101 	 */
1102 	if (vp->v_flag & VXLOCK) {
1103 		vp->v_flag |= VXWANT;
1104 		(void) tsleep((caddr_t) vp, PINOD, "vgone", 0);
1105 		return;
1106 	}
1107 	/*
1108 	 * Clean out the filesystem specific data.
1109 	 */
1110 	vclean(vp, DOCLOSE);
1111 	/*
1112 	 * Delete from old mount point vnode list, if on one.
1113 	 */
1114 	if (vp->v_mount != NULL) {
1115 		LIST_REMOVE(vp, v_mntvnodes);
1116 		vp->v_mount = NULL;
1117 	}
1118 	/*
1119 	 * If special device, remove it from special device alias list.
1120 	 */
1121 	if (vp->v_type == VBLK || vp->v_type == VCHR) {
1122 		if (*vp->v_hashchain == vp) {
1123 			*vp->v_hashchain = vp->v_specnext;
1124 		} else {
1125 			for (vq = *vp->v_hashchain; vq; vq = vq->v_specnext) {
1126 				if (vq->v_specnext != vp)
1127 					continue;
1128 				vq->v_specnext = vp->v_specnext;
1129 				break;
1130 			}
1131 			if (vq == NULL)
1132 				panic("missing bdev");
1133 		}
1134 		if (vp->v_flag & VALIASED) {
1135 			vx = NULL;
1136 			for (vq = *vp->v_hashchain; vq; vq = vq->v_specnext) {
1137 				if (vq->v_rdev != vp->v_rdev ||
1138 				    vq->v_type != vp->v_type)
1139 					continue;
1140 				if (vx)
1141 					break;
1142 				vx = vq;
1143 			}
1144 			if (vx == NULL)
1145 				panic("missing alias");
1146 			if (vq == NULL)
1147 				vx->v_flag &= ~VALIASED;
1148 			vp->v_flag &= ~VALIASED;
1149 		}
1150 		FREE(vp->v_specinfo, M_VNODE);
1151 		vp->v_specinfo = NULL;
1152 	}
1153 	/*
1154 	 * If it is on the freelist and not already at the head, move it to
1155 	 * the head of the list. The test of the back pointer and the
1156 	 * reference count of zero is because it will be removed from the free
1157 	 * list by getnewvnode, but will not have its reference count
1158 	 * incremented until after calling vgone. If the reference count were
1159 	 * incremented first, vgone would (incorrectly) try to close the
1160 	 * previous instance of the underlying object. So, the back pointer is
1161 	 * explicitly set to `0xdeadb' in getnewvnode after removing it from
1162 	 * the freelist to ensure that we do not try to move it here.
1163 	 */
1164 	if (vp->v_usecount == 0 &&
1165 	    vp->v_freelist.tqe_prev != (struct vnode **) 0xdeadb &&
1166 	    vnode_free_list.tqh_first != vp) {
1167 		TAILQ_REMOVE(&vnode_free_list, vp, v_freelist);
1168 		TAILQ_INSERT_HEAD(&vnode_free_list, vp, v_freelist);
1169 	}
1170 	vp->v_type = VBAD;
1171 }
1172 
1173 /*
1174  * Lookup a vnode by device number.
1175  */
1176 int
1177 vfinddev(dev, type, vpp)
1178 	dev_t dev;
1179 	enum vtype type;
1180 	struct vnode **vpp;
1181 {
1182 	register struct vnode *vp;
1183 
1184 	for (vp = speclisth[SPECHASH(dev)]; vp; vp = vp->v_specnext) {
1185 		if (dev != vp->v_rdev || type != vp->v_type)
1186 			continue;
1187 		*vpp = vp;
1188 		return (1);
1189 	}
1190 	return (0);
1191 }
1192 
1193 /*
1194  * Calculate the total number of references to a special device.
1195  */
1196 int
1197 vcount(vp)
1198 	register struct vnode *vp;
1199 {
1200 	register struct vnode *vq, *vnext;
1201 	int count;
1202 
1203 loop:
1204 	if ((vp->v_flag & VALIASED) == 0)
1205 		return (vp->v_usecount);
1206 	for (count = 0, vq = *vp->v_hashchain; vq; vq = vnext) {
1207 		vnext = vq->v_specnext;
1208 		if (vq->v_rdev != vp->v_rdev || vq->v_type != vp->v_type)
1209 			continue;
1210 		/*
1211 		 * Alias, but not in use, so flush it out.
1212 		 */
1213 		if (vq->v_usecount == 0 && vq != vp) {
1214 			vgone(vq);
1215 			goto loop;
1216 		}
1217 		count += vq->v_usecount;
1218 	}
1219 	return (count);
1220 }
1221 
1222 /*
1223  * Print out a description of a vnode.
1224  */
1225 static char *typename[] =
1226 {"VNON", "VREG", "VDIR", "VBLK", "VCHR", "VLNK", "VSOCK", "VFIFO", "VBAD"};
1227 
1228 void
1229 vprint(label, vp)
1230 	char *label;
1231 	register struct vnode *vp;
1232 {
1233 	char buf[64];
1234 
1235 	if (label != NULL)
1236 		printf("%s: ", label);
1237 	printf("type %s, usecount %d, writecount %d, refcount %ld,",
1238 	    typename[vp->v_type], vp->v_usecount, vp->v_writecount,
1239 	    vp->v_holdcnt);
1240 	buf[0] = '\0';
1241 	if (vp->v_flag & VROOT)
1242 		strcat(buf, "|VROOT");
1243 	if (vp->v_flag & VTEXT)
1244 		strcat(buf, "|VTEXT");
1245 	if (vp->v_flag & VSYSTEM)
1246 		strcat(buf, "|VSYSTEM");
1247 	if (vp->v_flag & VXLOCK)
1248 		strcat(buf, "|VXLOCK");
1249 	if (vp->v_flag & VXWANT)
1250 		strcat(buf, "|VXWANT");
1251 	if (vp->v_flag & VBWAIT)
1252 		strcat(buf, "|VBWAIT");
1253 	if (vp->v_flag & VALIASED)
1254 		strcat(buf, "|VALIASED");
1255 	if (buf[0] != '\0')
1256 		printf(" flags (%s)", &buf[1]);
1257 	if (vp->v_data == NULL) {
1258 		printf("\n");
1259 	} else {
1260 		printf("\n\t");
1261 		VOP_PRINT(vp);
1262 	}
1263 }
1264 
1265 #ifdef DDB
1266 /*
1267  * List all of the locked vnodes in the system.
1268  * Called when debugging the kernel.
1269  */
1270 void
1271 printlockedvnodes(void)
1272 {
1273 	register struct mount *mp;
1274 	register struct vnode *vp;
1275 
1276 	printf("Locked vnodes\n");
1277 	for (mp = mountlist.cqh_first; mp != (void *)&mountlist;
1278 	    mp = mp->mnt_list.cqe_next) {
1279 		for (vp = mp->mnt_vnodelist.lh_first;
1280 		    vp != NULL;
1281 		    vp = vp->v_mntvnodes.le_next)
1282 			if (VOP_ISLOCKED(vp))
1283 				vprint((char *) 0, vp);
1284 	}
1285 }
1286 #endif
1287 
1288 int kinfo_vdebug = 1;
1289 int kinfo_vgetfailed;
1290 
1291 #define KINFO_VNODESLOP	10
1292 /*
1293  * Dump vnode list (via sysctl).
1294  * Copyout address of vnode followed by vnode.
1295  */
1296 /* ARGSUSED */
1297 static int
1298 sysctl_vnode SYSCTL_HANDLER_ARGS
1299 {
1300 	register struct mount *mp, *nmp;
1301 	struct vnode *vp;
1302 	int error;
1303 
1304 #define VPTRSZ	sizeof (struct vnode *)
1305 #define VNODESZ	sizeof (struct vnode)
1306 
1307 	req->lock = 0;
1308 	if (!req->oldptr) /* Make an estimate */
1309 		return (SYSCTL_OUT(req, 0,
1310 			(numvnodes + KINFO_VNODESLOP) * (VPTRSZ + VNODESZ)));
1311 
1312 	for (mp = mountlist.cqh_first; mp != (void *)&mountlist; mp = nmp) {
1313 		nmp = mp->mnt_list.cqe_next;
1314 		if (vfs_busy(mp))
1315 			continue;
1316 again:
1317 		for (vp = mp->mnt_vnodelist.lh_first;
1318 		    vp != NULL;
1319 		    vp = vp->v_mntvnodes.le_next) {
1320 			/*
1321 			 * Check that the vp is still associated with this
1322 			 * filesystem.  RACE: could have been recycled onto
1323 			 * the same filesystem.
1324 			 */
1325 			if (vp->v_mount != mp) {
1326 				if (kinfo_vdebug)
1327 					printf("kinfo: vp changed\n");
1328 				goto again;
1329 			}
1330 			if ((error = SYSCTL_OUT(req, &vp, VPTRSZ)) ||
1331 			    (error = SYSCTL_OUT(req, vp, VNODESZ))) {
1332 				vfs_unbusy(mp);
1333 				return (error);
1334 			}
1335 		}
1336 		vfs_unbusy(mp);
1337 	}
1338 
1339 	return (0);
1340 }
1341 
1342 SYSCTL_PROC(_kern, KERN_VNODE, vnode, CTLTYPE_OPAQUE|CTLFLAG_RD,
1343 	0, 0, sysctl_vnode, "S,vnode", "");
1344 
1345 /*
1346  * Check to see if a filesystem is mounted on a block device.
1347  */
1348 int
1349 vfs_mountedon(vp)
1350 	register struct vnode *vp;
1351 {
1352 	register struct vnode *vq;
1353 
1354 	if (vp->v_specflags & SI_MOUNTEDON)
1355 		return (EBUSY);
1356 	if (vp->v_flag & VALIASED) {
1357 		for (vq = *vp->v_hashchain; vq; vq = vq->v_specnext) {
1358 			if (vq->v_rdev != vp->v_rdev ||
1359 			    vq->v_type != vp->v_type)
1360 				continue;
1361 			if (vq->v_specflags & SI_MOUNTEDON)
1362 				return (EBUSY);
1363 		}
1364 	}
1365 	return (0);
1366 }
1367 
1368 /*
1369  * Build hash lists of net addresses and hang them off the mount point.
1370  * Called by ufs_mount() to set up the lists of export addresses.
1371  */
1372 static int
1373 vfs_hang_addrlist(struct mount *mp, struct netexport *nep,
1374 	struct export_args *argp)
1375 {
1376 	register struct netcred *np;
1377 	register struct radix_node_head *rnh;
1378 	register int i;
1379 	struct radix_node *rn;
1380 	struct sockaddr *saddr, *smask = 0;
1381 	struct domain *dom;
1382 	int error;
1383 
1384 	if (argp->ex_addrlen == 0) {
1385 		if (mp->mnt_flag & MNT_DEFEXPORTED)
1386 			return (EPERM);
1387 		np = &nep->ne_defexported;
1388 		np->netc_exflags = argp->ex_flags;
1389 		np->netc_anon = argp->ex_anon;
1390 		np->netc_anon.cr_ref = 1;
1391 		mp->mnt_flag |= MNT_DEFEXPORTED;
1392 		return (0);
1393 	}
1394 	i = sizeof(struct netcred) + argp->ex_addrlen + argp->ex_masklen;
1395 	np = (struct netcred *) malloc(i, M_NETADDR, M_WAITOK);
1396 	bzero((caddr_t) np, i);
1397 	saddr = (struct sockaddr *) (np + 1);
1398 	if ((error = copyin(argp->ex_addr, (caddr_t) saddr, argp->ex_addrlen)))
1399 		goto out;
1400 	if (saddr->sa_len > argp->ex_addrlen)
1401 		saddr->sa_len = argp->ex_addrlen;
1402 	if (argp->ex_masklen) {
1403 		smask = (struct sockaddr *) ((caddr_t) saddr + argp->ex_addrlen);
1404 		error = copyin(argp->ex_addr, (caddr_t) smask, argp->ex_masklen);
1405 		if (error)
1406 			goto out;
1407 		if (smask->sa_len > argp->ex_masklen)
1408 			smask->sa_len = argp->ex_masklen;
1409 	}
1410 	i = saddr->sa_family;
1411 	if ((rnh = nep->ne_rtable[i]) == 0) {
1412 		/*
1413 		 * Seems silly to initialize every AF when most are not used,
1414 		 * do so on demand here
1415 		 */
1416 		for (dom = domains; dom; dom = dom->dom_next)
1417 			if (dom->dom_family == i && dom->dom_rtattach) {
1418 				dom->dom_rtattach((void **) &nep->ne_rtable[i],
1419 				    dom->dom_rtoffset);
1420 				break;
1421 			}
1422 		if ((rnh = nep->ne_rtable[i]) == 0) {
1423 			error = ENOBUFS;
1424 			goto out;
1425 		}
1426 	}
1427 	rn = (*rnh->rnh_addaddr) ((caddr_t) saddr, (caddr_t) smask, rnh,
1428 	    np->netc_rnodes);
1429 	if (rn == 0 || np != (struct netcred *) rn) {	/* already exists */
1430 		error = EPERM;
1431 		goto out;
1432 	}
1433 	np->netc_exflags = argp->ex_flags;
1434 	np->netc_anon = argp->ex_anon;
1435 	np->netc_anon.cr_ref = 1;
1436 	return (0);
1437 out:
1438 	free(np, M_NETADDR);
1439 	return (error);
1440 }
1441 
1442 /* ARGSUSED */
1443 static int
1444 vfs_free_netcred(struct radix_node *rn, void *w)
1445 {
1446 	register struct radix_node_head *rnh = (struct radix_node_head *) w;
1447 
1448 	(*rnh->rnh_deladdr) (rn->rn_key, rn->rn_mask, rnh);
1449 	free((caddr_t) rn, M_NETADDR);
1450 	return (0);
1451 }
1452 
1453 /*
1454  * Free the net address hash lists that are hanging off the mount points.
1455  */
1456 static void
1457 vfs_free_addrlist(struct netexport *nep)
1458 {
1459 	register int i;
1460 	register struct radix_node_head *rnh;
1461 
1462 	for (i = 0; i <= AF_MAX; i++)
1463 		if ((rnh = nep->ne_rtable[i])) {
1464 			(*rnh->rnh_walktree) (rnh, vfs_free_netcred,
1465 			    (caddr_t) rnh);
1466 			free((caddr_t) rnh, M_RTABLE);
1467 			nep->ne_rtable[i] = 0;
1468 		}
1469 }
1470 
1471 int
1472 vfs_export(mp, nep, argp)
1473 	struct mount *mp;
1474 	struct netexport *nep;
1475 	struct export_args *argp;
1476 {
1477 	int error;
1478 
1479 	if (argp->ex_flags & MNT_DELEXPORT) {
1480 		vfs_free_addrlist(nep);
1481 		mp->mnt_flag &= ~(MNT_EXPORTED | MNT_DEFEXPORTED);
1482 	}
1483 	if (argp->ex_flags & MNT_EXPORTED) {
1484 		if ((error = vfs_hang_addrlist(mp, nep, argp)))
1485 			return (error);
1486 		mp->mnt_flag |= MNT_EXPORTED;
1487 	}
1488 	return (0);
1489 }
1490 
1491 struct netcred *
1492 vfs_export_lookup(mp, nep, nam)
1493 	register struct mount *mp;
1494 	struct netexport *nep;
1495 	struct mbuf *nam;
1496 {
1497 	register struct netcred *np;
1498 	register struct radix_node_head *rnh;
1499 	struct sockaddr *saddr;
1500 
1501 	np = NULL;
1502 	if (mp->mnt_flag & MNT_EXPORTED) {
1503 		/*
1504 		 * Lookup in the export list first.
1505 		 */
1506 		if (nam != NULL) {
1507 			saddr = mtod(nam, struct sockaddr *);
1508 			rnh = nep->ne_rtable[saddr->sa_family];
1509 			if (rnh != NULL) {
1510 				np = (struct netcred *)
1511 				    (*rnh->rnh_matchaddr) ((caddr_t) saddr,
1512 				    rnh);
1513 				if (np && np->netc_rnodes->rn_flags & RNF_ROOT)
1514 					np = NULL;
1515 			}
1516 		}
1517 		/*
1518 		 * If no address match, use the default if it exists.
1519 		 */
1520 		if (np == NULL && mp->mnt_flag & MNT_DEFEXPORTED)
1521 			np = &nep->ne_defexported;
1522 	}
1523 	return (np);
1524 }
1525 
1526 
1527 /*
1528  * perform msync on all vnodes under a mount point
1529  * the mount point must be locked.
1530  */
1531 void
1532 vfs_msync(struct mount *mp, int flags) {
1533 	struct vnode *vp, *nvp;
1534 loop:
1535 	for (vp = mp->mnt_vnodelist.lh_first; vp != NULL; vp = nvp) {
1536 
1537 		if (vp->v_mount != mp)
1538 			goto loop;
1539 		nvp = vp->v_mntvnodes.le_next;
1540 		if (VOP_ISLOCKED(vp) && (flags != MNT_WAIT))
1541 			continue;
1542 		if (vp->v_object &&
1543 		   (((vm_object_t) vp->v_object)->flags & OBJ_MIGHTBEDIRTY)) {
1544 			vm_object_page_clean(vp->v_object, 0, 0, TRUE, TRUE);
1545 		}
1546 	}
1547 }
1548