xref: /freebsd/sys/kern/vfs_subr.c (revision 48991a368427cadb9cdac39581d1676c29619c52)
1 /*
2  * Copyright (c) 1989, 1993
3  *	The Regents of the University of California.  All rights reserved.
4  * (c) UNIX System Laboratories, Inc.
5  * All or some portions of this file are derived from material licensed
6  * to the University of California by American Telephone and Telegraph
7  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8  * the permission of UNIX System Laboratories, Inc.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. All advertising materials mentioning features or use of this software
19  *    must display the following acknowledgement:
20  *	This product includes software developed by the University of
21  *	California, Berkeley and its contributors.
22  * 4. Neither the name of the University nor the names of its contributors
23  *    may be used to endorse or promote products derived from this software
24  *    without specific prior written permission.
25  *
26  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36  * SUCH DAMAGE.
37  *
38  *	@(#)vfs_subr.c	8.13 (Berkeley) 4/18/94
39  * $Id: vfs_subr.c,v 1.43 1995/11/20 12:42:11 phk Exp $
40  */
41 
42 /*
43  * External virtual filesystem routines
44  */
45 
46 #include <sys/param.h>
47 #include <sys/systm.h>
48 #include <sys/kernel.h>
49 #include <sys/file.h>
50 #include <sys/proc.h>
51 #include <sys/mount.h>
52 #include <sys/time.h>
53 #include <sys/vnode.h>
54 #include <sys/stat.h>
55 #include <sys/namei.h>
56 #include <sys/ucred.h>
57 #include <sys/buf.h>
58 #include <sys/errno.h>
59 #include <sys/malloc.h>
60 #include <sys/domain.h>
61 #include <sys/mbuf.h>
62 
63 #include <vm/vm.h>
64 #include <sys/sysctl.h>
65 
66 #include <miscfs/specfs/specdev.h>
67 
68 enum vtype iftovt_tab[16] = {
69 	VNON, VFIFO, VCHR, VNON, VDIR, VNON, VBLK, VNON,
70 	VREG, VNON, VLNK, VNON, VSOCK, VNON, VNON, VBAD,
71 };
72 int vttoif_tab[9] = {
73 	0, S_IFREG, S_IFDIR, S_IFBLK, S_IFCHR, S_IFLNK,
74 	S_IFSOCK, S_IFIFO, S_IFMT,
75 };
76 
77 /*
78  * Insq/Remq for the vnode usage lists.
79  */
80 #define	bufinsvn(bp, dp)	LIST_INSERT_HEAD(dp, bp, b_vnbufs)
81 #define	bufremvn(bp) {  \
82 	LIST_REMOVE(bp, b_vnbufs); \
83 	(bp)->b_vnbufs.le_next = NOLIST; \
84 }
85 
86 TAILQ_HEAD(freelst, vnode) vnode_free_list;	/* vnode free list */
87 u_long freevnodes	= 0;
88 
89 struct mntlist mountlist;	/* mounted filesystem list */
90 
91 int desiredvnodes;
92 SYSCTL_INT(_kern, KERN_MAXVNODES, maxvnodes, CTLFLAG_RD, &desiredvnodes, 0, "");
93 
94 /*
95  * Initialize the vnode management data structures.
96  */
97 void
98 vntblinit()
99 {
100 	desiredvnodes = maxproc + vm_object_cache_max;
101 
102 	TAILQ_INIT(&vnode_free_list);
103 	CIRCLEQ_INIT(&mountlist);
104 }
105 
106 /*
107  * Lock a filesystem.
108  * Used to prevent access to it while mounting and unmounting.
109  */
110 int
111 vfs_lock(mp)
112 	register struct mount *mp;
113 {
114 
115 	while (mp->mnt_flag & MNT_MLOCK) {
116 		mp->mnt_flag |= MNT_MWAIT;
117 		(void) tsleep((caddr_t) mp, PVFS, "vfslck", 0);
118 	}
119 	mp->mnt_flag |= MNT_MLOCK;
120 	return (0);
121 }
122 
123 /*
124  * Unlock a locked filesystem.
125  * Panic if filesystem is not locked.
126  */
127 void
128 vfs_unlock(mp)
129 	register struct mount *mp;
130 {
131 
132 	if ((mp->mnt_flag & MNT_MLOCK) == 0)
133 		panic("vfs_unlock: not locked");
134 	mp->mnt_flag &= ~MNT_MLOCK;
135 	if (mp->mnt_flag & MNT_MWAIT) {
136 		mp->mnt_flag &= ~MNT_MWAIT;
137 		wakeup((caddr_t) mp);
138 	}
139 }
140 
141 /*
142  * Mark a mount point as busy.
143  * Used to synchronize access and to delay unmounting.
144  */
145 int
146 vfs_busy(mp)
147 	register struct mount *mp;
148 {
149 
150 	while (mp->mnt_flag & MNT_MPBUSY) {
151 		mp->mnt_flag |= MNT_MPWANT;
152 		(void) tsleep((caddr_t) &mp->mnt_flag, PVFS, "vfsbsy", 0);
153 	}
154 	if (mp->mnt_flag & MNT_UNMOUNT)
155 		return (1);
156 	mp->mnt_flag |= MNT_MPBUSY;
157 	return (0);
158 }
159 
160 /*
161  * Free a busy filesystem.
162  * Panic if filesystem is not busy.
163  */
164 void
165 vfs_unbusy(mp)
166 	register struct mount *mp;
167 {
168 
169 	if ((mp->mnt_flag & MNT_MPBUSY) == 0)
170 		panic("vfs_unbusy: not busy");
171 	mp->mnt_flag &= ~MNT_MPBUSY;
172 	if (mp->mnt_flag & MNT_MPWANT) {
173 		mp->mnt_flag &= ~MNT_MPWANT;
174 		wakeup((caddr_t) &mp->mnt_flag);
175 	}
176 }
177 
178 void
179 vfs_unmountroot(struct mount *rootfs)
180 {
181 	struct mount *mp = rootfs;
182 	int error;
183 
184 	if (vfs_busy(mp)) {
185 		printf("failed to unmount root\n");
186 		return;
187 	}
188 	mp->mnt_flag |= MNT_UNMOUNT;
189 	if ((error = vfs_lock(mp))) {
190 		printf("lock of root filesystem failed (%d)\n", error);
191 		return;
192 	}
193 	vnode_pager_umount(mp);	/* release cached vnodes */
194 	cache_purgevfs(mp);	/* remove cache entries for this file sys */
195 
196 	if ((error = VFS_SYNC(mp, MNT_WAIT, initproc->p_ucred, initproc)))
197 		printf("sync of root filesystem failed (%d)\n", error);
198 
199 	if ((error = VFS_UNMOUNT(mp, MNT_FORCE, initproc))) {
200 		printf("unmount of root filesystem failed (");
201 		if (error == EBUSY)
202 			printf("BUSY)\n");
203 		else
204 			printf("%d)\n", error);
205 	}
206 	mp->mnt_flag &= ~MNT_UNMOUNT;
207 	vfs_unbusy(mp);
208 }
209 
210 /*
211  * Unmount all filesystems.  Should only be called by halt().
212  */
213 void
214 vfs_unmountall()
215 {
216 	struct mount *mp, *nmp, *rootfs = NULL;
217 	int error;
218 
219 	/* unmount all but rootfs */
220 	for (mp = mountlist.cqh_last; mp != (void *)&mountlist; mp = nmp) {
221 		nmp = mp->mnt_list.cqe_prev;
222 
223 		if (mp->mnt_flag & MNT_ROOTFS) {
224 			rootfs = mp;
225 			continue;
226 		}
227 		error = dounmount(mp, MNT_FORCE, initproc);
228 		if (error) {
229 			printf("unmount of %s failed (", mp->mnt_stat.f_mntonname);
230 			if (error == EBUSY)
231 				printf("BUSY)\n");
232 			else
233 				printf("%d)\n", error);
234 		}
235 	}
236 
237 	/* and finally... */
238 	if (rootfs) {
239 		vfs_unmountroot(rootfs);
240 	} else {
241 		printf("no root filesystem\n");
242 	}
243 }
244 
245 /*
246  * Lookup a mount point by filesystem identifier.
247  */
248 struct mount *
249 getvfs(fsid)
250 	fsid_t *fsid;
251 {
252 	register struct mount *mp;
253 
254 	for (mp = mountlist.cqh_first; mp != (void *)&mountlist;
255 	    mp = mp->mnt_list.cqe_next) {
256 		if (mp->mnt_stat.f_fsid.val[0] == fsid->val[0] &&
257 		    mp->mnt_stat.f_fsid.val[1] == fsid->val[1])
258 			return (mp);
259 	}
260 	return ((struct mount *) 0);
261 }
262 
263 /*
264  * Get a new unique fsid
265  */
266 void
267 getnewfsid(mp, mtype)
268 	struct mount *mp;
269 	int mtype;
270 {
271 	static u_short xxxfs_mntid;
272 
273 	fsid_t tfsid;
274 
275 	mp->mnt_stat.f_fsid.val[0] = makedev(nblkdev + mtype, 0);
276 	mp->mnt_stat.f_fsid.val[1] = mtype;
277 	if (xxxfs_mntid == 0)
278 		++xxxfs_mntid;
279 	tfsid.val[0] = makedev(nblkdev + mtype, xxxfs_mntid);
280 	tfsid.val[1] = mtype;
281 	if (mountlist.cqh_first != (void *)&mountlist) {
282 		while (getvfs(&tfsid)) {
283 			tfsid.val[0]++;
284 			xxxfs_mntid++;
285 		}
286 	}
287 	mp->mnt_stat.f_fsid.val[0] = tfsid.val[0];
288 }
289 
290 /*
291  * Set vnode attributes to VNOVAL
292  */
293 void
294 vattr_null(vap)
295 	register struct vattr *vap;
296 {
297 
298 	vap->va_type = VNON;
299 	vap->va_size = VNOVAL;
300 	vap->va_bytes = VNOVAL;
301 	vap->va_mode = vap->va_nlink = vap->va_uid = vap->va_gid =
302 	    vap->va_fsid = vap->va_fileid =
303 	    vap->va_blocksize = vap->va_rdev =
304 	    vap->va_atime.ts_sec = vap->va_atime.ts_nsec =
305 	    vap->va_mtime.ts_sec = vap->va_mtime.ts_nsec =
306 	    vap->va_ctime.ts_sec = vap->va_ctime.ts_nsec =
307 	    vap->va_flags = vap->va_gen = VNOVAL;
308 	vap->va_vaflags = 0;
309 }
310 
311 /*
312  * Routines having to do with the management of the vnode table.
313  */
314 extern vop_t **dead_vnodeop_p;
315 extern void vclean();
316 
317 /*
318  * Return the next vnode from the free list.
319  */
320 int
321 getnewvnode(tag, mp, vops, vpp)
322 	enum vtagtype tag;
323 	struct mount *mp;
324 	vop_t **vops;
325 	struct vnode **vpp;
326 {
327 	register struct vnode *vp;
328 
329 	vp = vnode_free_list.tqh_first;
330 	/*
331 	 * we allocate a new vnode if
332 	 * 	1. we don't have any free
333 	 *		Pretty obvious, we actually used to panic, but that
334 	 *		is a silly thing to do.
335 	 *	2. we havn't filled our pool yet
336 	 *		We don't want to trash the incore (VM-)vnodecache.
337 	 *	3. if less that 1/4th of our vnodes are free.
338 	 *		We don't want to trash the namei cache either.
339 	 */
340 	if (freevnodes < (numvnodes >> 2) ||
341 	    numvnodes < desiredvnodes ||
342 	    vp == NULL) {
343 		vp = (struct vnode *) malloc((u_long) sizeof *vp,
344 		    M_VNODE, M_WAITOK);
345 		bzero((char *) vp, sizeof *vp);
346 		numvnodes++;
347 	} else {
348 		TAILQ_REMOVE(&vnode_free_list, vp, v_freelist);
349 		freevnodes--;
350 
351 		if (vp->v_usecount)
352 			panic("free vnode isn't");
353 
354 		/* see comment on why 0xdeadb is set at end of vgone (below) */
355 		vp->v_freelist.tqe_prev = (struct vnode **) 0xdeadb;
356 		vp->v_lease = NULL;
357 		if (vp->v_type != VBAD)
358 			vgone(vp);
359 #ifdef DIAGNOSTIC
360 		{
361 			int s;
362 
363 			if (vp->v_data)
364 				panic("cleaned vnode isn't");
365 			s = splbio();
366 			if (vp->v_numoutput)
367 				panic("Clean vnode has pending I/O's");
368 			splx(s);
369 		}
370 #endif
371 		vp->v_flag = 0;
372 		vp->v_lastr = 0;
373 		vp->v_ralen = 0;
374 		vp->v_maxra = 0;
375 		vp->v_lastw = 0;
376 		vp->v_lasta = 0;
377 		vp->v_cstart = 0;
378 		vp->v_clen = 0;
379 		vp->v_socket = 0;
380 		vp->v_writecount = 0;	/* XXX */
381 	}
382 	vp->v_type = VNON;
383 	cache_purge(vp);
384 	vp->v_tag = tag;
385 	vp->v_op = vops;
386 	insmntque(vp, mp);
387 	*vpp = vp;
388 	vp->v_usecount = 1;
389 	vp->v_data = 0;
390 	return (0);
391 }
392 
393 /*
394  * Move a vnode from one mount queue to another.
395  */
396 void
397 insmntque(vp, mp)
398 	register struct vnode *vp;
399 	register struct mount *mp;
400 {
401 
402 	/*
403 	 * Delete from old mount point vnode list, if on one.
404 	 */
405 	if (vp->v_mount != NULL)
406 		LIST_REMOVE(vp, v_mntvnodes);
407 	/*
408 	 * Insert into list of vnodes for the new mount point, if available.
409 	 */
410 	if ((vp->v_mount = mp) == NULL)
411 		return;
412 	LIST_INSERT_HEAD(&mp->mnt_vnodelist, vp, v_mntvnodes);
413 }
414 
415 /*
416  * Update outstanding I/O count and do wakeup if requested.
417  */
418 void
419 vwakeup(bp)
420 	register struct buf *bp;
421 {
422 	register struct vnode *vp;
423 
424 	bp->b_flags &= ~B_WRITEINPROG;
425 	if ((vp = bp->b_vp)) {
426 		vp->v_numoutput--;
427 		if (vp->v_numoutput < 0)
428 			panic("vwakeup: neg numoutput");
429 		if ((vp->v_numoutput == 0) && (vp->v_flag & VBWAIT)) {
430 			vp->v_flag &= ~VBWAIT;
431 			wakeup((caddr_t) &vp->v_numoutput);
432 		}
433 	}
434 }
435 
436 /*
437  * Flush out and invalidate all buffers associated with a vnode.
438  * Called with the underlying object locked.
439  */
440 int
441 vinvalbuf(vp, flags, cred, p, slpflag, slptimeo)
442 	register struct vnode *vp;
443 	int flags;
444 	struct ucred *cred;
445 	struct proc *p;
446 	int slpflag, slptimeo;
447 {
448 	register struct buf *bp;
449 	struct buf *nbp, *blist;
450 	int s, error;
451 	vm_object_t object;
452 
453 	if (flags & V_SAVE) {
454 		if ((error = VOP_FSYNC(vp, cred, MNT_WAIT, p)))
455 			return (error);
456 		if (vp->v_dirtyblkhd.lh_first != NULL)
457 			panic("vinvalbuf: dirty bufs");
458 	}
459 	for (;;) {
460 		if ((blist = vp->v_cleanblkhd.lh_first) && (flags & V_SAVEMETA))
461 			while (blist && blist->b_lblkno < 0)
462 				blist = blist->b_vnbufs.le_next;
463 		if (!blist && (blist = vp->v_dirtyblkhd.lh_first) &&
464 		    (flags & V_SAVEMETA))
465 			while (blist && blist->b_lblkno < 0)
466 				blist = blist->b_vnbufs.le_next;
467 		if (!blist)
468 			break;
469 
470 		for (bp = blist; bp; bp = nbp) {
471 			nbp = bp->b_vnbufs.le_next;
472 			if ((flags & V_SAVEMETA) && bp->b_lblkno < 0)
473 				continue;
474 			s = splbio();
475 			if (bp->b_flags & B_BUSY) {
476 				bp->b_flags |= B_WANTED;
477 				error = tsleep((caddr_t) bp,
478 				    slpflag | (PRIBIO + 1), "vinvalbuf",
479 				    slptimeo);
480 				splx(s);
481 				if (error)
482 					return (error);
483 				break;
484 			}
485 			bremfree(bp);
486 			bp->b_flags |= B_BUSY;
487 			splx(s);
488 			/*
489 			 * XXX Since there are no node locks for NFS, I
490 			 * believe there is a slight chance that a delayed
491 			 * write will occur while sleeping just above, so
492 			 * check for it.
493 			 */
494 			if ((bp->b_flags & B_DELWRI) && (flags & V_SAVE)) {
495 				(void) VOP_BWRITE(bp);
496 				break;
497 			}
498 			bp->b_flags |= (B_INVAL|B_NOCACHE|B_RELBUF);
499 			brelse(bp);
500 		}
501 	}
502 
503 	s = splbio();
504 	while (vp->v_numoutput > 0) {
505 		vp->v_flag |= VBWAIT;
506 		tsleep(&vp->v_numoutput, PVM, "vnvlbv", 0);
507 	}
508 	splx(s);
509 
510 	/*
511 	 * Destroy the copy in the VM cache, too.
512 	 */
513 	object = vp->v_object;
514 	if (object != NULL) {
515 		vm_object_page_remove(object, 0, object->size,
516 		    (flags & V_SAVE) ? TRUE : FALSE);
517 	}
518 	if (!(flags & V_SAVEMETA) &&
519 	    (vp->v_dirtyblkhd.lh_first || vp->v_cleanblkhd.lh_first))
520 		panic("vinvalbuf: flush failed");
521 	return (0);
522 }
523 
524 /*
525  * Associate a buffer with a vnode.
526  */
527 void
528 bgetvp(vp, bp)
529 	register struct vnode *vp;
530 	register struct buf *bp;
531 {
532 	int s;
533 
534 	if (bp->b_vp)
535 		panic("bgetvp: not free");
536 	VHOLD(vp);
537 	bp->b_vp = vp;
538 	if (vp->v_type == VBLK || vp->v_type == VCHR)
539 		bp->b_dev = vp->v_rdev;
540 	else
541 		bp->b_dev = NODEV;
542 	/*
543 	 * Insert onto list for new vnode.
544 	 */
545 	s = splbio();
546 	bufinsvn(bp, &vp->v_cleanblkhd);
547 	splx(s);
548 }
549 
550 /*
551  * Disassociate a buffer from a vnode.
552  */
553 void
554 brelvp(bp)
555 	register struct buf *bp;
556 {
557 	struct vnode *vp;
558 	int s;
559 
560 	if (bp->b_vp == (struct vnode *) 0)
561 		panic("brelvp: NULL");
562 	/*
563 	 * Delete from old vnode list, if on one.
564 	 */
565 	s = splbio();
566 	if (bp->b_vnbufs.le_next != NOLIST)
567 		bufremvn(bp);
568 	splx(s);
569 
570 	vp = bp->b_vp;
571 	bp->b_vp = (struct vnode *) 0;
572 	HOLDRELE(vp);
573 }
574 
575 /*
576  * Associate a p-buffer with a vnode.
577  */
578 void
579 pbgetvp(vp, bp)
580 	register struct vnode *vp;
581 	register struct buf *bp;
582 {
583 	if (bp->b_vp)
584 		panic("pbgetvp: not free");
585 	VHOLD(vp);
586 	bp->b_vp = vp;
587 	if (vp->v_type == VBLK || vp->v_type == VCHR)
588 		bp->b_dev = vp->v_rdev;
589 	else
590 		bp->b_dev = NODEV;
591 }
592 
593 /*
594  * Disassociate a p-buffer from a vnode.
595  */
596 void
597 pbrelvp(bp)
598 	register struct buf *bp;
599 {
600 	struct vnode *vp;
601 
602 	if (bp->b_vp == (struct vnode *) 0)
603 		panic("brelvp: NULL");
604 
605 	vp = bp->b_vp;
606 	bp->b_vp = (struct vnode *) 0;
607 	HOLDRELE(vp);
608 }
609 
610 /*
611  * Reassign a buffer from one vnode to another.
612  * Used to assign file specific control information
613  * (indirect blocks) to the vnode to which they belong.
614  */
615 void
616 reassignbuf(bp, newvp)
617 	register struct buf *bp;
618 	register struct vnode *newvp;
619 {
620 	register struct buflists *listheadp;
621 
622 	if (newvp == NULL) {
623 		printf("reassignbuf: NULL");
624 		return;
625 	}
626 	/*
627 	 * Delete from old vnode list, if on one.
628 	 */
629 	if (bp->b_vnbufs.le_next != NOLIST)
630 		bufremvn(bp);
631 	/*
632 	 * If dirty, put on list of dirty buffers; otherwise insert onto list
633 	 * of clean buffers.
634 	 */
635 	if (bp->b_flags & B_DELWRI) {
636 		struct buf *tbp;
637 
638 		tbp = newvp->v_dirtyblkhd.lh_first;
639 		if (!tbp || (tbp->b_lblkno > bp->b_lblkno)) {
640 			bufinsvn(bp, &newvp->v_dirtyblkhd);
641 		} else {
642 			while (tbp->b_vnbufs.le_next && (tbp->b_vnbufs.le_next->b_lblkno < bp->b_lblkno)) {
643 				tbp = tbp->b_vnbufs.le_next;
644 			}
645 			LIST_INSERT_AFTER(tbp, bp, b_vnbufs);
646 		}
647 	} else {
648 		listheadp = &newvp->v_cleanblkhd;
649 		bufinsvn(bp, listheadp);
650 	}
651 }
652 
653 /*
654  * Create a vnode for a block device.
655  * Used for root filesystem, argdev, and swap areas.
656  * Also used for memory file system special devices.
657  */
658 int
659 bdevvp(dev, vpp)
660 	dev_t dev;
661 	struct vnode **vpp;
662 {
663 	register struct vnode *vp;
664 	struct vnode *nvp;
665 	int error;
666 
667 	if (dev == NODEV)
668 		return (0);
669 	error = getnewvnode(VT_NON, (struct mount *) 0, spec_vnodeop_p, &nvp);
670 	if (error) {
671 		*vpp = 0;
672 		return (error);
673 	}
674 	vp = nvp;
675 	vp->v_type = VBLK;
676 	if ((nvp = checkalias(vp, dev, (struct mount *) 0))) {
677 		vput(vp);
678 		vp = nvp;
679 	}
680 	*vpp = vp;
681 	return (0);
682 }
683 
684 /*
685  * Check to see if the new vnode represents a special device
686  * for which we already have a vnode (either because of
687  * bdevvp() or because of a different vnode representing
688  * the same block device). If such an alias exists, deallocate
689  * the existing contents and return the aliased vnode. The
690  * caller is responsible for filling it with its new contents.
691  */
692 struct vnode *
693 checkalias(nvp, nvp_rdev, mp)
694 	register struct vnode *nvp;
695 	dev_t nvp_rdev;
696 	struct mount *mp;
697 {
698 	register struct vnode *vp;
699 	struct vnode **vpp;
700 
701 	if (nvp->v_type != VBLK && nvp->v_type != VCHR)
702 		return (NULLVP);
703 
704 	vpp = &speclisth[SPECHASH(nvp_rdev)];
705 loop:
706 	for (vp = *vpp; vp; vp = vp->v_specnext) {
707 		if (nvp_rdev != vp->v_rdev || nvp->v_type != vp->v_type)
708 			continue;
709 		/*
710 		 * Alias, but not in use, so flush it out.
711 		 */
712 		if (vp->v_usecount == 0) {
713 			vgone(vp);
714 			goto loop;
715 		}
716 		if (vget(vp, 1))
717 			goto loop;
718 		break;
719 	}
720 	if (vp == NULL || vp->v_tag != VT_NON) {
721 		MALLOC(nvp->v_specinfo, struct specinfo *,
722 		    sizeof(struct specinfo), M_VNODE, M_WAITOK);
723 		nvp->v_rdev = nvp_rdev;
724 		nvp->v_hashchain = vpp;
725 		nvp->v_specnext = *vpp;
726 		nvp->v_specflags = 0;
727 		*vpp = nvp;
728 		if (vp != NULL) {
729 			nvp->v_flag |= VALIASED;
730 			vp->v_flag |= VALIASED;
731 			vput(vp);
732 		}
733 		return (NULLVP);
734 	}
735 	VOP_UNLOCK(vp);
736 	vclean(vp, 0);
737 	vp->v_op = nvp->v_op;
738 	vp->v_tag = nvp->v_tag;
739 	nvp->v_type = VNON;
740 	insmntque(vp, mp);
741 	return (vp);
742 }
743 
744 /*
745  * Grab a particular vnode from the free list, increment its
746  * reference count and lock it. The vnode lock bit is set the
747  * vnode is being eliminated in vgone. The process is awakened
748  * when the transition is completed, and an error returned to
749  * indicate that the vnode is no longer usable (possibly having
750  * been changed to a new file system type).
751  */
752 int
753 vget(vp, lockflag)
754 	register struct vnode *vp;
755 	int lockflag;
756 {
757 
758 	/*
759 	 * If the vnode is in the process of being cleaned out for another
760 	 * use, we wait for the cleaning to finish and then return failure.
761 	 * Cleaning is determined either by checking that the VXLOCK flag is
762 	 * set, or that the use count is zero with the back pointer set to
763 	 * show that it has been removed from the free list by getnewvnode.
764 	 * The VXLOCK flag may not have been set yet because vclean is blocked
765 	 * in the VOP_LOCK call waiting for the VOP_INACTIVE to complete.
766 	 */
767 	if ((vp->v_flag & VXLOCK) ||
768 	    (vp->v_usecount == 0 &&
769 		vp->v_freelist.tqe_prev == (struct vnode **) 0xdeadb)) {
770 		vp->v_flag |= VXWANT;
771 		(void) tsleep((caddr_t) vp, PINOD, "vget", 0);
772 		return (1);
773 	}
774 	if (vp->v_usecount == 0) {
775 		TAILQ_REMOVE(&vnode_free_list, vp, v_freelist);
776 		freevnodes--;
777 	}
778 	vp->v_usecount++;
779 	if (lockflag)
780 		VOP_LOCK(vp);
781 	return (0);
782 }
783 
784 /*
785  * Vnode reference, just increment the count
786  */
787 void
788 vref(vp)
789 	struct vnode *vp;
790 {
791 
792 	if (vp->v_usecount <= 0)
793 		panic("vref used where vget required");
794 	vp->v_usecount++;
795 }
796 
797 /*
798  * vput(), just unlock and vrele()
799  */
800 void
801 vput(vp)
802 	register struct vnode *vp;
803 {
804 
805 	VOP_UNLOCK(vp);
806 	vrele(vp);
807 }
808 
809 /*
810  * Vnode release.
811  * If count drops to zero, call inactive routine and return to freelist.
812  */
813 void
814 vrele(vp)
815 	register struct vnode *vp;
816 {
817 
818 #ifdef DIAGNOSTIC
819 	if (vp == NULL)
820 		panic("vrele: null vp");
821 #endif
822 	vp->v_usecount--;
823 	if (vp->v_usecount > 0)
824 		return;
825 #ifdef DIAGNOSTIC
826 	if (vp->v_usecount < 0 /* || vp->v_writecount < 0 */ ) {
827 		vprint("vrele: negative ref count", vp);
828 		panic("vrele: negative reference cnt");
829 	}
830 #endif
831 	if (vp->v_flag & VAGE) {
832 		TAILQ_INSERT_HEAD(&vnode_free_list, vp, v_freelist);
833 		vp->v_flag &= ~VAGE;
834 	} else {
835 		TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist);
836 	}
837 	freevnodes++;
838 
839 	VOP_INACTIVE(vp);
840 }
841 
842 #ifdef DIAGNOSTIC
843 /*
844  * Page or buffer structure gets a reference.
845  */
846 void
847 vhold(vp)
848 	register struct vnode *vp;
849 {
850 
851 	vp->v_holdcnt++;
852 }
853 
854 /*
855  * Page or buffer structure frees a reference.
856  */
857 void
858 holdrele(vp)
859 	register struct vnode *vp;
860 {
861 
862 	if (vp->v_holdcnt <= 0)
863 		panic("holdrele: holdcnt");
864 	vp->v_holdcnt--;
865 }
866 #endif /* DIAGNOSTIC */
867 
868 /*
869  * Remove any vnodes in the vnode table belonging to mount point mp.
870  *
871  * If MNT_NOFORCE is specified, there should not be any active ones,
872  * return error if any are found (nb: this is a user error, not a
873  * system error). If MNT_FORCE is specified, detach any active vnodes
874  * that are found.
875  */
876 #ifdef DIAGNOSTIC
877 int busyprt = 0;		/* print out busy vnodes */
878 SYSCTL_INT(_debug, 1, busyprt, CTLFLAG_RW, &busyprt, 0, "");
879 #endif
880 
881 int
882 vflush(mp, skipvp, flags)
883 	struct mount *mp;
884 	struct vnode *skipvp;
885 	int flags;
886 {
887 	register struct vnode *vp, *nvp;
888 	int busy = 0;
889 
890 	if ((mp->mnt_flag & MNT_MPBUSY) == 0)
891 		panic("vflush: not busy");
892 loop:
893 	for (vp = mp->mnt_vnodelist.lh_first; vp; vp = nvp) {
894 		/*
895 		 * Make sure this vnode wasn't reclaimed in getnewvnode().
896 		 * Start over if it has (it won't be on the list anymore).
897 		 */
898 		if (vp->v_mount != mp)
899 			goto loop;
900 		nvp = vp->v_mntvnodes.le_next;
901 		/*
902 		 * Skip over a selected vnode.
903 		 */
904 		if (vp == skipvp)
905 			continue;
906 		/*
907 		 * Skip over a vnodes marked VSYSTEM.
908 		 */
909 		if ((flags & SKIPSYSTEM) && (vp->v_flag & VSYSTEM))
910 			continue;
911 		/*
912 		 * If WRITECLOSE is set, only flush out regular file vnodes
913 		 * open for writing.
914 		 */
915 		if ((flags & WRITECLOSE) &&
916 		    (vp->v_writecount == 0 || vp->v_type != VREG))
917 			continue;
918 		/*
919 		 * With v_usecount == 0, all we need to do is clear out the
920 		 * vnode data structures and we are done.
921 		 */
922 		if (vp->v_usecount == 0) {
923 			vgone(vp);
924 			continue;
925 		}
926 		/*
927 		 * If FORCECLOSE is set, forcibly close the vnode. For block
928 		 * or character devices, revert to an anonymous device. For
929 		 * all other files, just kill them.
930 		 */
931 		if (flags & FORCECLOSE) {
932 			if (vp->v_type != VBLK && vp->v_type != VCHR) {
933 				vgone(vp);
934 			} else {
935 				vclean(vp, 0);
936 				vp->v_op = spec_vnodeop_p;
937 				insmntque(vp, (struct mount *) 0);
938 			}
939 			continue;
940 		}
941 #ifdef DIAGNOSTIC
942 		if (busyprt)
943 			vprint("vflush: busy vnode", vp);
944 #endif
945 		busy++;
946 	}
947 	if (busy)
948 		return (EBUSY);
949 	return (0);
950 }
951 
952 /*
953  * Disassociate the underlying file system from a vnode.
954  */
955 void
956 vclean(struct vnode *vp, int flags)
957 {
958 	int active;
959 
960 	/*
961 	 * Check to see if the vnode is in use. If so we have to reference it
962 	 * before we clean it out so that its count cannot fall to zero and
963 	 * generate a race against ourselves to recycle it.
964 	 */
965 	if ((active = vp->v_usecount))
966 		VREF(vp);
967 	/*
968 	 * Even if the count is zero, the VOP_INACTIVE routine may still have
969 	 * the object locked while it cleans it out. The VOP_LOCK ensures that
970 	 * the VOP_INACTIVE routine is done with its work. For active vnodes,
971 	 * it ensures that no other activity can occur while the underlying
972 	 * object is being cleaned out.
973 	 */
974 	VOP_LOCK(vp);
975 	/*
976 	 * Prevent the vnode from being recycled or brought into use while we
977 	 * clean it out.
978 	 */
979 	if (vp->v_flag & VXLOCK)
980 		panic("vclean: deadlock");
981 	vp->v_flag |= VXLOCK;
982 	/*
983 	 * Clean out any buffers associated with the vnode.
984 	 */
985 	if (flags & DOCLOSE)
986 		vinvalbuf(vp, V_SAVE, NOCRED, NULL, 0, 0);
987 	/*
988 	 * Any other processes trying to obtain this lock must first wait for
989 	 * VXLOCK to clear, then call the new lock operation.
990 	 */
991 	VOP_UNLOCK(vp);
992 	/*
993 	 * If purging an active vnode, it must be closed and deactivated
994 	 * before being reclaimed.
995 	 */
996 	if (active) {
997 		if (flags & DOCLOSE)
998 			VOP_CLOSE(vp, FNONBLOCK, NOCRED, NULL);
999 		VOP_INACTIVE(vp);
1000 	}
1001 	/*
1002 	 * Reclaim the vnode.
1003 	 */
1004 	if (VOP_RECLAIM(vp))
1005 		panic("vclean: cannot reclaim");
1006 	if (active)
1007 		vrele(vp);
1008 
1009 	/*
1010 	 * Done with purge, notify sleepers of the grim news.
1011 	 */
1012 	vp->v_op = dead_vnodeop_p;
1013 	vp->v_tag = VT_NON;
1014 	vp->v_flag &= ~VXLOCK;
1015 	if (vp->v_flag & VXWANT) {
1016 		vp->v_flag &= ~VXWANT;
1017 		wakeup((caddr_t) vp);
1018 	}
1019 }
1020 
1021 /*
1022  * Eliminate all activity associated with  the requested vnode
1023  * and with all vnodes aliased to the requested vnode.
1024  */
1025 void
1026 vgoneall(vp)
1027 	register struct vnode *vp;
1028 {
1029 	register struct vnode *vq;
1030 
1031 	if (vp->v_flag & VALIASED) {
1032 		/*
1033 		 * If a vgone (or vclean) is already in progress, wait until
1034 		 * it is done and return.
1035 		 */
1036 		if (vp->v_flag & VXLOCK) {
1037 			vp->v_flag |= VXWANT;
1038 			(void) tsleep((caddr_t) vp, PINOD, "vgall", 0);
1039 			return;
1040 		}
1041 		/*
1042 		 * Ensure that vp will not be vgone'd while we are eliminating
1043 		 * its aliases.
1044 		 */
1045 		vp->v_flag |= VXLOCK;
1046 		while (vp->v_flag & VALIASED) {
1047 			for (vq = *vp->v_hashchain; vq; vq = vq->v_specnext) {
1048 				if (vq->v_rdev != vp->v_rdev ||
1049 				    vq->v_type != vp->v_type || vp == vq)
1050 					continue;
1051 				vgone(vq);
1052 				break;
1053 			}
1054 		}
1055 		/*
1056 		 * Remove the lock so that vgone below will really eliminate
1057 		 * the vnode after which time vgone will awaken any sleepers.
1058 		 */
1059 		vp->v_flag &= ~VXLOCK;
1060 	}
1061 	vgone(vp);
1062 }
1063 
1064 /*
1065  * Eliminate all activity associated with a vnode
1066  * in preparation for reuse.
1067  */
1068 void
1069 vgone(vp)
1070 	register struct vnode *vp;
1071 {
1072 	register struct vnode *vq;
1073 	struct vnode *vx;
1074 
1075 	/*
1076 	 * If a vgone (or vclean) is already in progress, wait until it is
1077 	 * done and return.
1078 	 */
1079 	if (vp->v_flag & VXLOCK) {
1080 		vp->v_flag |= VXWANT;
1081 		(void) tsleep((caddr_t) vp, PINOD, "vgone", 0);
1082 		return;
1083 	}
1084 	/*
1085 	 * Clean out the filesystem specific data.
1086 	 */
1087 	vclean(vp, DOCLOSE);
1088 	/*
1089 	 * Delete from old mount point vnode list, if on one.
1090 	 */
1091 	if (vp->v_mount != NULL) {
1092 		LIST_REMOVE(vp, v_mntvnodes);
1093 		vp->v_mount = NULL;
1094 	}
1095 	/*
1096 	 * If special device, remove it from special device alias list.
1097 	 */
1098 	if (vp->v_type == VBLK || vp->v_type == VCHR) {
1099 		if (*vp->v_hashchain == vp) {
1100 			*vp->v_hashchain = vp->v_specnext;
1101 		} else {
1102 			for (vq = *vp->v_hashchain; vq; vq = vq->v_specnext) {
1103 				if (vq->v_specnext != vp)
1104 					continue;
1105 				vq->v_specnext = vp->v_specnext;
1106 				break;
1107 			}
1108 			if (vq == NULL)
1109 				panic("missing bdev");
1110 		}
1111 		if (vp->v_flag & VALIASED) {
1112 			vx = NULL;
1113 			for (vq = *vp->v_hashchain; vq; vq = vq->v_specnext) {
1114 				if (vq->v_rdev != vp->v_rdev ||
1115 				    vq->v_type != vp->v_type)
1116 					continue;
1117 				if (vx)
1118 					break;
1119 				vx = vq;
1120 			}
1121 			if (vx == NULL)
1122 				panic("missing alias");
1123 			if (vq == NULL)
1124 				vx->v_flag &= ~VALIASED;
1125 			vp->v_flag &= ~VALIASED;
1126 		}
1127 		FREE(vp->v_specinfo, M_VNODE);
1128 		vp->v_specinfo = NULL;
1129 	}
1130 	/*
1131 	 * If it is on the freelist and not already at the head, move it to
1132 	 * the head of the list. The test of the back pointer and the
1133 	 * reference count of zero is because it will be removed from the free
1134 	 * list by getnewvnode, but will not have its reference count
1135 	 * incremented until after calling vgone. If the reference count were
1136 	 * incremented first, vgone would (incorrectly) try to close the
1137 	 * previous instance of the underlying object. So, the back pointer is
1138 	 * explicitly set to `0xdeadb' in getnewvnode after removing it from
1139 	 * the freelist to ensure that we do not try to move it here.
1140 	 */
1141 	if (vp->v_usecount == 0 &&
1142 	    vp->v_freelist.tqe_prev != (struct vnode **) 0xdeadb &&
1143 	    vnode_free_list.tqh_first != vp) {
1144 		TAILQ_REMOVE(&vnode_free_list, vp, v_freelist);
1145 		TAILQ_INSERT_HEAD(&vnode_free_list, vp, v_freelist);
1146 	}
1147 	vp->v_type = VBAD;
1148 }
1149 
1150 /*
1151  * Lookup a vnode by device number.
1152  */
1153 int
1154 vfinddev(dev, type, vpp)
1155 	dev_t dev;
1156 	enum vtype type;
1157 	struct vnode **vpp;
1158 {
1159 	register struct vnode *vp;
1160 
1161 	for (vp = speclisth[SPECHASH(dev)]; vp; vp = vp->v_specnext) {
1162 		if (dev != vp->v_rdev || type != vp->v_type)
1163 			continue;
1164 		*vpp = vp;
1165 		return (1);
1166 	}
1167 	return (0);
1168 }
1169 
1170 /*
1171  * Calculate the total number of references to a special device.
1172  */
1173 int
1174 vcount(vp)
1175 	register struct vnode *vp;
1176 {
1177 	register struct vnode *vq, *vnext;
1178 	int count;
1179 
1180 loop:
1181 	if ((vp->v_flag & VALIASED) == 0)
1182 		return (vp->v_usecount);
1183 	for (count = 0, vq = *vp->v_hashchain; vq; vq = vnext) {
1184 		vnext = vq->v_specnext;
1185 		if (vq->v_rdev != vp->v_rdev || vq->v_type != vp->v_type)
1186 			continue;
1187 		/*
1188 		 * Alias, but not in use, so flush it out.
1189 		 */
1190 		if (vq->v_usecount == 0 && vq != vp) {
1191 			vgone(vq);
1192 			goto loop;
1193 		}
1194 		count += vq->v_usecount;
1195 	}
1196 	return (count);
1197 }
1198 
1199 /*
1200  * Print out a description of a vnode.
1201  */
1202 static char *typename[] =
1203 {"VNON", "VREG", "VDIR", "VBLK", "VCHR", "VLNK", "VSOCK", "VFIFO", "VBAD"};
1204 
1205 void
1206 vprint(label, vp)
1207 	char *label;
1208 	register struct vnode *vp;
1209 {
1210 	char buf[64];
1211 
1212 	if (label != NULL)
1213 		printf("%s: ", label);
1214 	printf("type %s, usecount %d, writecount %d, refcount %ld,",
1215 	    typename[vp->v_type], vp->v_usecount, vp->v_writecount,
1216 	    vp->v_holdcnt);
1217 	buf[0] = '\0';
1218 	if (vp->v_flag & VROOT)
1219 		strcat(buf, "|VROOT");
1220 	if (vp->v_flag & VTEXT)
1221 		strcat(buf, "|VTEXT");
1222 	if (vp->v_flag & VSYSTEM)
1223 		strcat(buf, "|VSYSTEM");
1224 	if (vp->v_flag & VXLOCK)
1225 		strcat(buf, "|VXLOCK");
1226 	if (vp->v_flag & VXWANT)
1227 		strcat(buf, "|VXWANT");
1228 	if (vp->v_flag & VBWAIT)
1229 		strcat(buf, "|VBWAIT");
1230 	if (vp->v_flag & VALIASED)
1231 		strcat(buf, "|VALIASED");
1232 	if (buf[0] != '\0')
1233 		printf(" flags (%s)", &buf[1]);
1234 	if (vp->v_data == NULL) {
1235 		printf("\n");
1236 	} else {
1237 		printf("\n\t");
1238 		VOP_PRINT(vp);
1239 	}
1240 }
1241 
1242 #ifdef DDB
1243 /*
1244  * List all of the locked vnodes in the system.
1245  * Called when debugging the kernel.
1246  */
1247 void
1248 printlockedvnodes(void)
1249 {
1250 	register struct mount *mp;
1251 	register struct vnode *vp;
1252 
1253 	printf("Locked vnodes\n");
1254 	for (mp = mountlist.cqh_first; mp != (void *)&mountlist;
1255 	    mp = mp->mnt_list.cqe_next) {
1256 		for (vp = mp->mnt_vnodelist.lh_first;
1257 		    vp != NULL;
1258 		    vp = vp->v_mntvnodes.le_next)
1259 			if (VOP_ISLOCKED(vp))
1260 				vprint((char *) 0, vp);
1261 	}
1262 }
1263 #endif
1264 
1265 int kinfo_vdebug = 1;
1266 int kinfo_vgetfailed;
1267 
1268 #define KINFO_VNODESLOP	10
1269 /*
1270  * Dump vnode list (via sysctl).
1271  * Copyout address of vnode followed by vnode.
1272  */
1273 /* ARGSUSED */
1274 static int
1275 sysctl_vnode SYSCTL_HANDLER_ARGS
1276 {
1277 	register struct mount *mp, *nmp;
1278 	struct vnode *vp;
1279 	int error;
1280 
1281 #define VPTRSZ	sizeof (struct vnode *)
1282 #define VNODESZ	sizeof (struct vnode)
1283 
1284 	req->lock = 0;
1285 	if (!req->oldptr) /* Make an estimate */
1286 		return (SYSCTL_OUT(req, 0,
1287 			(numvnodes + KINFO_VNODESLOP) * (VPTRSZ + VNODESZ)));
1288 
1289 	for (mp = mountlist.cqh_first; mp != (void *)&mountlist; mp = nmp) {
1290 		nmp = mp->mnt_list.cqe_next;
1291 		if (vfs_busy(mp))
1292 			continue;
1293 again:
1294 		for (vp = mp->mnt_vnodelist.lh_first;
1295 		    vp != NULL;
1296 		    vp = vp->v_mntvnodes.le_next) {
1297 			/*
1298 			 * Check that the vp is still associated with this
1299 			 * filesystem.  RACE: could have been recycled onto
1300 			 * the same filesystem.
1301 			 */
1302 			if (vp->v_mount != mp) {
1303 				if (kinfo_vdebug)
1304 					printf("kinfo: vp changed\n");
1305 				goto again;
1306 			}
1307 			if ((error = SYSCTL_OUT(req, &vp, VPTRSZ)) ||
1308 			    (error = SYSCTL_OUT(req, vp, VNODESZ))) {
1309 				vfs_unbusy(mp);
1310 				return (error);
1311 			}
1312 		}
1313 		vfs_unbusy(mp);
1314 	}
1315 
1316 	return (0);
1317 }
1318 
1319 SYSCTL_NODE(_kern, KERN_VNODE, vnode, CTLTYPE_OPAQUE|CTLFLAG_RD,
1320 	sysctl_vnode, "");
1321 
1322 
1323 /*
1324  * Check to see if a filesystem is mounted on a block device.
1325  */
1326 int
1327 vfs_mountedon(vp)
1328 	register struct vnode *vp;
1329 {
1330 	register struct vnode *vq;
1331 
1332 	if (vp->v_specflags & SI_MOUNTEDON)
1333 		return (EBUSY);
1334 	if (vp->v_flag & VALIASED) {
1335 		for (vq = *vp->v_hashchain; vq; vq = vq->v_specnext) {
1336 			if (vq->v_rdev != vp->v_rdev ||
1337 			    vq->v_type != vp->v_type)
1338 				continue;
1339 			if (vq->v_specflags & SI_MOUNTEDON)
1340 				return (EBUSY);
1341 		}
1342 	}
1343 	return (0);
1344 }
1345 
1346 /*
1347  * Build hash lists of net addresses and hang them off the mount point.
1348  * Called by ufs_mount() to set up the lists of export addresses.
1349  */
1350 static int
1351 vfs_hang_addrlist(struct mount *mp, struct netexport *nep,
1352 	struct export_args *argp)
1353 {
1354 	register struct netcred *np;
1355 	register struct radix_node_head *rnh;
1356 	register int i;
1357 	struct radix_node *rn;
1358 	struct sockaddr *saddr, *smask = 0;
1359 	struct domain *dom;
1360 	int error;
1361 
1362 	if (argp->ex_addrlen == 0) {
1363 		if (mp->mnt_flag & MNT_DEFEXPORTED)
1364 			return (EPERM);
1365 		np = &nep->ne_defexported;
1366 		np->netc_exflags = argp->ex_flags;
1367 		np->netc_anon = argp->ex_anon;
1368 		np->netc_anon.cr_ref = 1;
1369 		mp->mnt_flag |= MNT_DEFEXPORTED;
1370 		return (0);
1371 	}
1372 	i = sizeof(struct netcred) + argp->ex_addrlen + argp->ex_masklen;
1373 	np = (struct netcred *) malloc(i, M_NETADDR, M_WAITOK);
1374 	bzero((caddr_t) np, i);
1375 	saddr = (struct sockaddr *) (np + 1);
1376 	if ((error = copyin(argp->ex_addr, (caddr_t) saddr, argp->ex_addrlen)))
1377 		goto out;
1378 	if (saddr->sa_len > argp->ex_addrlen)
1379 		saddr->sa_len = argp->ex_addrlen;
1380 	if (argp->ex_masklen) {
1381 		smask = (struct sockaddr *) ((caddr_t) saddr + argp->ex_addrlen);
1382 		error = copyin(argp->ex_addr, (caddr_t) smask, argp->ex_masklen);
1383 		if (error)
1384 			goto out;
1385 		if (smask->sa_len > argp->ex_masklen)
1386 			smask->sa_len = argp->ex_masklen;
1387 	}
1388 	i = saddr->sa_family;
1389 	if ((rnh = nep->ne_rtable[i]) == 0) {
1390 		/*
1391 		 * Seems silly to initialize every AF when most are not used,
1392 		 * do so on demand here
1393 		 */
1394 		for (dom = domains; dom; dom = dom->dom_next)
1395 			if (dom->dom_family == i && dom->dom_rtattach) {
1396 				dom->dom_rtattach((void **) &nep->ne_rtable[i],
1397 				    dom->dom_rtoffset);
1398 				break;
1399 			}
1400 		if ((rnh = nep->ne_rtable[i]) == 0) {
1401 			error = ENOBUFS;
1402 			goto out;
1403 		}
1404 	}
1405 	rn = (*rnh->rnh_addaddr) ((caddr_t) saddr, (caddr_t) smask, rnh,
1406 	    np->netc_rnodes);
1407 	if (rn == 0 || np != (struct netcred *) rn) {	/* already exists */
1408 		error = EPERM;
1409 		goto out;
1410 	}
1411 	np->netc_exflags = argp->ex_flags;
1412 	np->netc_anon = argp->ex_anon;
1413 	np->netc_anon.cr_ref = 1;
1414 	return (0);
1415 out:
1416 	free(np, M_NETADDR);
1417 	return (error);
1418 }
1419 
1420 /* ARGSUSED */
1421 static int
1422 vfs_free_netcred(struct radix_node *rn, void *w)
1423 {
1424 	register struct radix_node_head *rnh = (struct radix_node_head *) w;
1425 
1426 	(*rnh->rnh_deladdr) (rn->rn_key, rn->rn_mask, rnh);
1427 	free((caddr_t) rn, M_NETADDR);
1428 	return (0);
1429 }
1430 
1431 /*
1432  * Free the net address hash lists that are hanging off the mount points.
1433  */
1434 static void
1435 vfs_free_addrlist(struct netexport *nep)
1436 {
1437 	register int i;
1438 	register struct radix_node_head *rnh;
1439 
1440 	for (i = 0; i <= AF_MAX; i++)
1441 		if ((rnh = nep->ne_rtable[i])) {
1442 			(*rnh->rnh_walktree) (rnh, vfs_free_netcred,
1443 			    (caddr_t) rnh);
1444 			free((caddr_t) rnh, M_RTABLE);
1445 			nep->ne_rtable[i] = 0;
1446 		}
1447 }
1448 
1449 int
1450 vfs_export(mp, nep, argp)
1451 	struct mount *mp;
1452 	struct netexport *nep;
1453 	struct export_args *argp;
1454 {
1455 	int error;
1456 
1457 	if (argp->ex_flags & MNT_DELEXPORT) {
1458 		vfs_free_addrlist(nep);
1459 		mp->mnt_flag &= ~(MNT_EXPORTED | MNT_DEFEXPORTED);
1460 	}
1461 	if (argp->ex_flags & MNT_EXPORTED) {
1462 		if ((error = vfs_hang_addrlist(mp, nep, argp)))
1463 			return (error);
1464 		mp->mnt_flag |= MNT_EXPORTED;
1465 	}
1466 	return (0);
1467 }
1468 
1469 struct netcred *
1470 vfs_export_lookup(mp, nep, nam)
1471 	register struct mount *mp;
1472 	struct netexport *nep;
1473 	struct mbuf *nam;
1474 {
1475 	register struct netcred *np;
1476 	register struct radix_node_head *rnh;
1477 	struct sockaddr *saddr;
1478 
1479 	np = NULL;
1480 	if (mp->mnt_flag & MNT_EXPORTED) {
1481 		/*
1482 		 * Lookup in the export list first.
1483 		 */
1484 		if (nam != NULL) {
1485 			saddr = mtod(nam, struct sockaddr *);
1486 			rnh = nep->ne_rtable[saddr->sa_family];
1487 			if (rnh != NULL) {
1488 				np = (struct netcred *)
1489 				    (*rnh->rnh_matchaddr) ((caddr_t) saddr,
1490 				    rnh);
1491 				if (np && np->netc_rnodes->rn_flags & RNF_ROOT)
1492 					np = NULL;
1493 			}
1494 		}
1495 		/*
1496 		 * If no address match, use the default if it exists.
1497 		 */
1498 		if (np == NULL && mp->mnt_flag & MNT_DEFEXPORTED)
1499 			np = &nep->ne_defexported;
1500 	}
1501 	return (np);
1502 }
1503 
1504 
1505 /*
1506  * perform msync on all vnodes under a mount point
1507  * the mount point must be locked.
1508  */
1509 void
1510 vfs_msync(struct mount *mp, int flags) {
1511 	struct vnode *vp;
1512 loop:
1513 	for (vp = mp->mnt_vnodelist.lh_first;
1514 	     vp != NULL;
1515 	     vp = vp->v_mntvnodes.le_next) {
1516 
1517 		if (vp->v_mount != mp)
1518 			goto loop;
1519 		if (VOP_ISLOCKED(vp) && (flags != MNT_WAIT))
1520 			continue;
1521 		if (vp->v_object &&
1522 		   (((vm_object_t) vp->v_object)->flags & OBJ_MIGHTBEDIRTY)) {
1523 			vm_object_page_clean(vp->v_object, 0, 0, TRUE, TRUE);
1524 		}
1525 	}
1526 }
1527