xref: /freebsd/sys/kern/vfs_subr.c (revision 61afd5bb22d787b0641523e7b9b95c964d669bd5)
1 /*
2  * Copyright (c) 1989, 1993
3  *	The Regents of the University of California.  All rights reserved.
4  * (c) UNIX System Laboratories, Inc.
5  * All or some portions of this file are derived from material licensed
6  * to the University of California by American Telephone and Telegraph
7  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8  * the permission of UNIX System Laboratories, Inc.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. All advertising materials mentioning features or use of this software
19  *    must display the following acknowledgement:
20  *	This product includes software developed by the University of
21  *	California, Berkeley and its contributors.
22  * 4. Neither the name of the University nor the names of its contributors
23  *    may be used to endorse or promote products derived from this software
24  *    without specific prior written permission.
25  *
26  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36  * SUCH DAMAGE.
37  *
38  *	@(#)vfs_subr.c	8.13 (Berkeley) 4/18/94
39  * $Id: vfs_subr.c,v 1.65 1996/11/12 09:24:31 bde Exp $
40  */
41 
42 /*
43  * External virtual filesystem routines
44  */
45 #include "opt_ddb.h"
46 #include "opt_devfs.h"
47 
48 #include <sys/param.h>
49 #include <sys/systm.h>
50 #include <sys/kernel.h>
51 #include <sys/file.h>
52 #include <sys/proc.h>
53 #include <sys/mount.h>
54 #include <sys/time.h>
55 #include <sys/vnode.h>
56 #include <sys/stat.h>
57 #include <sys/namei.h>
58 #include <sys/ucred.h>
59 #include <sys/buf.h>
60 #include <sys/errno.h>
61 #include <sys/malloc.h>
62 #include <sys/domain.h>
63 #include <sys/mbuf.h>
64 
65 #include <vm/vm.h>
66 #include <vm/vm_param.h>
67 #include <vm/vm_object.h>
68 #include <vm/vm_extern.h>
69 #include <vm/vm_pager.h>
70 #include <vm/vnode_pager.h>
71 #include <sys/sysctl.h>
72 
73 #include <miscfs/specfs/specdev.h>
74 
75 #ifdef DDB
76 extern void	printlockedvnodes __P((void));
77 #endif
78 extern void	vclean __P((struct vnode *vp, int flags));
79 extern void	vfs_unmountroot __P((struct mount *rootfs));
80 
81 enum vtype iftovt_tab[16] = {
82 	VNON, VFIFO, VCHR, VNON, VDIR, VNON, VBLK, VNON,
83 	VREG, VNON, VLNK, VNON, VSOCK, VNON, VNON, VBAD,
84 };
85 int vttoif_tab[9] = {
86 	0, S_IFREG, S_IFDIR, S_IFBLK, S_IFCHR, S_IFLNK,
87 	S_IFSOCK, S_IFIFO, S_IFMT,
88 };
89 
90 /*
91  * Insq/Remq for the vnode usage lists.
92  */
93 #define	bufinsvn(bp, dp)	LIST_INSERT_HEAD(dp, bp, b_vnbufs)
94 #define	bufremvn(bp) {  \
95 	LIST_REMOVE(bp, b_vnbufs); \
96 	(bp)->b_vnbufs.le_next = NOLIST; \
97 }
98 
99 TAILQ_HEAD(freelst, vnode) vnode_free_list;	/* vnode free list */
100 static u_long freevnodes = 0;
101 
102 struct mntlist mountlist;	/* mounted filesystem list */
103 
104 int desiredvnodes;
105 SYSCTL_INT(_kern, KERN_MAXVNODES, maxvnodes, CTLFLAG_RW, &desiredvnodes, 0, "");
106 
107 static void	vfs_free_addrlist __P((struct netexport *nep));
108 static int	vfs_free_netcred __P((struct radix_node *rn, void *w));
109 static int	vfs_hang_addrlist __P((struct mount *mp, struct netexport *nep,
110 				       struct export_args *argp));
111 
112 /*
113  * Initialize the vnode management data structures.
114  */
115 void
116 vntblinit()
117 {
118 	desiredvnodes = maxproc + vm_object_cache_max + extravnodes;
119 
120 	TAILQ_INIT(&vnode_free_list);
121 	CIRCLEQ_INIT(&mountlist);
122 }
123 
124 /*
125  * Lock a filesystem.
126  * Used to prevent access to it while mounting and unmounting.
127  */
128 int
129 vfs_lock(mp)
130 	register struct mount *mp;
131 {
132 
133 	while (mp->mnt_flag & MNT_MLOCK) {
134 		mp->mnt_flag |= MNT_MWAIT;
135 		(void) tsleep((caddr_t) mp, PVFS, "vfslck", 0);
136 	}
137 	mp->mnt_flag |= MNT_MLOCK;
138 	return (0);
139 }
140 
141 /*
142  * Unlock a locked filesystem.
143  * Panic if filesystem is not locked.
144  */
145 void
146 vfs_unlock(mp)
147 	register struct mount *mp;
148 {
149 
150 	if ((mp->mnt_flag & MNT_MLOCK) == 0)
151 		panic("vfs_unlock: not locked");
152 	mp->mnt_flag &= ~MNT_MLOCK;
153 	if (mp->mnt_flag & MNT_MWAIT) {
154 		mp->mnt_flag &= ~MNT_MWAIT;
155 		wakeup((caddr_t) mp);
156 	}
157 }
158 
159 /*
160  * Mark a mount point as busy.
161  * Used to synchronize access and to delay unmounting.
162  */
163 int
164 vfs_busy(mp)
165 	register struct mount *mp;
166 {
167 
168 	while (mp->mnt_flag & MNT_MPBUSY) {
169 		mp->mnt_flag |= MNT_MPWANT;
170 		(void) tsleep((caddr_t) &mp->mnt_flag, PVFS, "vfsbsy", 0);
171 	}
172 	if (mp->mnt_flag & MNT_UNMOUNT)
173 		return (1);
174 	mp->mnt_flag |= MNT_MPBUSY;
175 	return (0);
176 }
177 
178 /*
179  * Free a busy filesystem.
180  * Panic if filesystem is not busy.
181  */
182 void
183 vfs_unbusy(mp)
184 	register struct mount *mp;
185 {
186 
187 	if ((mp->mnt_flag & MNT_MPBUSY) == 0)
188 		panic("vfs_unbusy: not busy");
189 	mp->mnt_flag &= ~MNT_MPBUSY;
190 	if (mp->mnt_flag & MNT_MPWANT) {
191 		mp->mnt_flag &= ~MNT_MPWANT;
192 		wakeup((caddr_t) &mp->mnt_flag);
193 	}
194 }
195 
196 void
197 vfs_unmountroot(struct mount *rootfs)
198 {
199 	struct mount *mp = rootfs;
200 	int error;
201 
202 	if (vfs_busy(mp)) {
203 		printf("failed to unmount root\n");
204 		return;
205 	}
206 	mp->mnt_flag |= MNT_UNMOUNT;
207 	if ((error = vfs_lock(mp))) {
208 		printf("lock of root filesystem failed (%d)\n", error);
209 		return;
210 	}
211 	vnode_pager_umount(mp);	/* release cached vnodes */
212 	cache_purgevfs(mp);	/* remove cache entries for this file sys */
213 
214 	if ((error = VFS_SYNC(mp, MNT_WAIT, initproc->p_ucred, initproc)))
215 		printf("sync of root filesystem failed (%d)\n", error);
216 
217 	if ((error = VFS_UNMOUNT(mp, MNT_FORCE, initproc))) {
218 		printf("unmount of root filesystem failed (");
219 		if (error == EBUSY)
220 			printf("BUSY)\n");
221 		else
222 			printf("%d)\n", error);
223 	}
224 	mp->mnt_flag &= ~MNT_UNMOUNT;
225 	vfs_unbusy(mp);
226 }
227 
228 /*
229  * Unmount all filesystems.  Should only be called by halt().
230  */
231 void
232 vfs_unmountall()
233 {
234 	struct mount *mp, *nmp, *rootfs = NULL;
235 	int error;
236 
237 	/* unmount all but rootfs */
238 	for (mp = mountlist.cqh_last; mp != (void *)&mountlist; mp = nmp) {
239 		nmp = mp->mnt_list.cqe_prev;
240 
241 		if (mp->mnt_flag & MNT_ROOTFS) {
242 			rootfs = mp;
243 			continue;
244 		}
245 		error = dounmount(mp, MNT_FORCE, initproc);
246 		if (error) {
247 			printf("unmount of %s failed (", mp->mnt_stat.f_mntonname);
248 			if (error == EBUSY)
249 				printf("BUSY)\n");
250 			else
251 				printf("%d)\n", error);
252 		}
253 	}
254 
255 	/* and finally... */
256 	if (rootfs) {
257 		vfs_unmountroot(rootfs);
258 	} else {
259 		printf("no root filesystem\n");
260 	}
261 }
262 
263 /*
264  * Lookup a mount point by filesystem identifier.
265  */
266 struct mount *
267 getvfs(fsid)
268 	fsid_t *fsid;
269 {
270 	register struct mount *mp;
271 
272 	for (mp = mountlist.cqh_first; mp != (void *)&mountlist;
273 	    mp = mp->mnt_list.cqe_next) {
274 		if (mp->mnt_stat.f_fsid.val[0] == fsid->val[0] &&
275 		    mp->mnt_stat.f_fsid.val[1] == fsid->val[1])
276 			return (mp);
277 	}
278 	return ((struct mount *) 0);
279 }
280 
281 /*
282  * Get a new unique fsid
283  */
284 void
285 getnewfsid(mp, mtype)
286 	struct mount *mp;
287 	int mtype;
288 {
289 	static u_short xxxfs_mntid;
290 
291 	fsid_t tfsid;
292 
293 	mp->mnt_stat.f_fsid.val[0] = makedev(nblkdev + mtype, 0);
294 	mp->mnt_stat.f_fsid.val[1] = mtype;
295 	if (xxxfs_mntid == 0)
296 		++xxxfs_mntid;
297 	tfsid.val[0] = makedev(nblkdev + mtype, xxxfs_mntid);
298 	tfsid.val[1] = mtype;
299 	if (mountlist.cqh_first != (void *)&mountlist) {
300 		while (getvfs(&tfsid)) {
301 			tfsid.val[0]++;
302 			xxxfs_mntid++;
303 		}
304 	}
305 	mp->mnt_stat.f_fsid.val[0] = tfsid.val[0];
306 }
307 
308 /*
309  * Set vnode attributes to VNOVAL
310  */
311 void
312 vattr_null(vap)
313 	register struct vattr *vap;
314 {
315 
316 	vap->va_type = VNON;
317 	vap->va_size = VNOVAL;
318 	vap->va_bytes = VNOVAL;
319 	vap->va_mode = vap->va_nlink = vap->va_uid = vap->va_gid =
320 	    vap->va_fsid = vap->va_fileid =
321 	    vap->va_blocksize = vap->va_rdev =
322 	    vap->va_atime.tv_sec = vap->va_atime.tv_nsec =
323 	    vap->va_mtime.tv_sec = vap->va_mtime.tv_nsec =
324 	    vap->va_ctime.tv_sec = vap->va_ctime.tv_nsec =
325 	    vap->va_flags = vap->va_gen = VNOVAL;
326 	vap->va_vaflags = 0;
327 }
328 
329 /*
330  * Routines having to do with the management of the vnode table.
331  */
332 extern vop_t **dead_vnodeop_p;
333 
334 /*
335  * Return the next vnode from the free list.
336  */
337 int
338 getnewvnode(tag, mp, vops, vpp)
339 	enum vtagtype tag;
340 	struct mount *mp;
341 	vop_t **vops;
342 	struct vnode **vpp;
343 {
344 	register struct vnode *vp;
345 
346 retry:
347 	vp = vnode_free_list.tqh_first;
348 	/*
349 	 * we allocate a new vnode if
350 	 * 	1. we don't have any free
351 	 *		Pretty obvious, we actually used to panic, but that
352 	 *		is a silly thing to do.
353 	 *	2. we havn't filled our pool yet
354 	 *		We don't want to trash the incore (VM-)vnodecache.
355 	 *	3. if less that 1/4th of our vnodes are free.
356 	 *		We don't want to trash the namei cache either.
357 	 */
358 	if (freevnodes < (numvnodes >> 2) ||
359 	    numvnodes < desiredvnodes ||
360 	    vp == NULL) {
361 		vp = (struct vnode *) malloc((u_long) sizeof *vp,
362 		    M_VNODE, M_WAITOK);
363 		bzero((char *) vp, sizeof *vp);
364 		numvnodes++;
365 	} else {
366 		TAILQ_REMOVE(&vnode_free_list, vp, v_freelist);
367 		if (vp->v_usage > 0) {
368 			--vp->v_usage;
369 			TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist);
370 			goto retry;
371 		}
372 		freevnodes--;
373 		if (vp->v_usecount)
374 			panic("free vnode isn't");
375 
376 		/* see comment on why 0xdeadb is set at end of vgone (below) */
377 		vp->v_freelist.tqe_prev = (struct vnode **) 0xdeadb;
378 		vp->v_lease = NULL;
379 		if (vp->v_type != VBAD)
380 			vgone(vp);
381 
382 #ifdef DIAGNOSTIC
383 		{
384 			int s;
385 
386 			if (vp->v_data)
387 				panic("cleaned vnode isn't");
388 			s = splbio();
389 			if (vp->v_numoutput)
390 				panic("Clean vnode has pending I/O's");
391 			splx(s);
392 		}
393 #endif
394 		vp->v_flag = 0;
395 		vp->v_lastr = 0;
396 		vp->v_lastw = 0;
397 		vp->v_lasta = 0;
398 		vp->v_cstart = 0;
399 		vp->v_clen = 0;
400 		vp->v_socket = 0;
401 		vp->v_writecount = 0;	/* XXX */
402 		vp->v_usage = 0;
403 	}
404 	vp->v_type = VNON;
405 	cache_purge(vp);
406 	vp->v_tag = tag;
407 	vp->v_op = vops;
408 	insmntque(vp, mp);
409 	*vpp = vp;
410 	vp->v_usecount = 1;
411 	vp->v_data = 0;
412 	return (0);
413 }
414 
415 /*
416  * Move a vnode from one mount queue to another.
417  */
418 void
419 insmntque(vp, mp)
420 	register struct vnode *vp;
421 	register struct mount *mp;
422 {
423 
424 	/*
425 	 * Delete from old mount point vnode list, if on one.
426 	 */
427 	if (vp->v_mount != NULL)
428 		LIST_REMOVE(vp, v_mntvnodes);
429 	/*
430 	 * Insert into list of vnodes for the new mount point, if available.
431 	 */
432 	if ((vp->v_mount = mp) == NULL)
433 		return;
434 	LIST_INSERT_HEAD(&mp->mnt_vnodelist, vp, v_mntvnodes);
435 }
436 
437 /*
438  * Update outstanding I/O count and do wakeup if requested.
439  */
440 void
441 vwakeup(bp)
442 	register struct buf *bp;
443 {
444 	register struct vnode *vp;
445 
446 	bp->b_flags &= ~B_WRITEINPROG;
447 	if ((vp = bp->b_vp)) {
448 		vp->v_numoutput--;
449 		if (vp->v_numoutput < 0)
450 			panic("vwakeup: neg numoutput");
451 		if ((vp->v_numoutput == 0) && (vp->v_flag & VBWAIT)) {
452 			vp->v_flag &= ~VBWAIT;
453 			wakeup((caddr_t) &vp->v_numoutput);
454 		}
455 	}
456 }
457 
458 /*
459  * Flush out and invalidate all buffers associated with a vnode.
460  * Called with the underlying object locked.
461  */
462 int
463 vinvalbuf(vp, flags, cred, p, slpflag, slptimeo)
464 	register struct vnode *vp;
465 	int flags;
466 	struct ucred *cred;
467 	struct proc *p;
468 	int slpflag, slptimeo;
469 {
470 	register struct buf *bp;
471 	struct buf *nbp, *blist;
472 	int s, error;
473 	vm_object_t object;
474 
475 	if (flags & V_SAVE) {
476 		if ((error = VOP_FSYNC(vp, cred, MNT_WAIT, p)))
477 			return (error);
478 		if (vp->v_dirtyblkhd.lh_first != NULL)
479 			panic("vinvalbuf: dirty bufs");
480 	}
481 
482 	s = splbio();
483 	for (;;) {
484 		if ((blist = vp->v_cleanblkhd.lh_first) && (flags & V_SAVEMETA))
485 			while (blist && blist->b_lblkno < 0)
486 				blist = blist->b_vnbufs.le_next;
487 		if (!blist && (blist = vp->v_dirtyblkhd.lh_first) &&
488 		    (flags & V_SAVEMETA))
489 			while (blist && blist->b_lblkno < 0)
490 				blist = blist->b_vnbufs.le_next;
491 		if (!blist)
492 			break;
493 
494 		for (bp = blist; bp; bp = nbp) {
495 			nbp = bp->b_vnbufs.le_next;
496 			if ((flags & V_SAVEMETA) && bp->b_lblkno < 0)
497 				continue;
498 			if (bp->b_flags & B_BUSY) {
499 				bp->b_flags |= B_WANTED;
500 				error = tsleep((caddr_t) bp,
501 				    slpflag | (PRIBIO + 1), "vinvalbuf",
502 				    slptimeo);
503 				splx(s);
504 				if (error)
505 					return (error);
506 				break;
507 			}
508 			bremfree(bp);
509 			bp->b_flags |= B_BUSY;
510 			/*
511 			 * XXX Since there are no node locks for NFS, I
512 			 * believe there is a slight chance that a delayed
513 			 * write will occur while sleeping just above, so
514 			 * check for it.
515 			 */
516 			if ((bp->b_flags & B_DELWRI) && (flags & V_SAVE)) {
517 				(void) VOP_BWRITE(bp);
518 				break;
519 			}
520 			bp->b_flags |= (B_INVAL|B_NOCACHE|B_RELBUF);
521 			brelse(bp);
522 		}
523 	}
524 	splx(s);
525 
526 	s = splbio();
527 	while (vp->v_numoutput > 0) {
528 		vp->v_flag |= VBWAIT;
529 		tsleep(&vp->v_numoutput, PVM, "vnvlbv", 0);
530 	}
531 	splx(s);
532 
533 	/*
534 	 * Destroy the copy in the VM cache, too.
535 	 */
536 	object = vp->v_object;
537 	if (object != NULL) {
538 		vm_object_page_remove(object, 0, object->size,
539 		    (flags & V_SAVE) ? TRUE : FALSE);
540 	}
541 	if (!(flags & V_SAVEMETA) &&
542 	    (vp->v_dirtyblkhd.lh_first || vp->v_cleanblkhd.lh_first))
543 		panic("vinvalbuf: flush failed");
544 	return (0);
545 }
546 
547 /*
548  * Associate a buffer with a vnode.
549  */
550 void
551 bgetvp(vp, bp)
552 	register struct vnode *vp;
553 	register struct buf *bp;
554 {
555 	int s;
556 
557 	if (bp->b_vp)
558 		panic("bgetvp: not free");
559 	VHOLD(vp);
560 	bp->b_vp = vp;
561 	if (vp->v_type == VBLK || vp->v_type == VCHR)
562 		bp->b_dev = vp->v_rdev;
563 	else
564 		bp->b_dev = NODEV;
565 	/*
566 	 * Insert onto list for new vnode.
567 	 */
568 	s = splbio();
569 	bufinsvn(bp, &vp->v_cleanblkhd);
570 	splx(s);
571 }
572 
573 /*
574  * Disassociate a buffer from a vnode.
575  */
576 void
577 brelvp(bp)
578 	register struct buf *bp;
579 {
580 	struct vnode *vp;
581 	int s;
582 
583 	if (bp->b_vp == (struct vnode *) 0)
584 		panic("brelvp: NULL");
585 	/*
586 	 * Delete from old vnode list, if on one.
587 	 */
588 	s = splbio();
589 	if (bp->b_vnbufs.le_next != NOLIST)
590 		bufremvn(bp);
591 	splx(s);
592 
593 	vp = bp->b_vp;
594 	bp->b_vp = (struct vnode *) 0;
595 	HOLDRELE(vp);
596 }
597 
598 /*
599  * Associate a p-buffer with a vnode.
600  */
601 void
602 pbgetvp(vp, bp)
603 	register struct vnode *vp;
604 	register struct buf *bp;
605 {
606 	if (bp->b_vp)
607 		panic("pbgetvp: not free");
608 	VHOLD(vp);
609 	bp->b_vp = vp;
610 	if (vp->v_type == VBLK || vp->v_type == VCHR)
611 		bp->b_dev = vp->v_rdev;
612 	else
613 		bp->b_dev = NODEV;
614 }
615 
616 /*
617  * Disassociate a p-buffer from a vnode.
618  */
619 void
620 pbrelvp(bp)
621 	register struct buf *bp;
622 {
623 	struct vnode *vp;
624 
625 	if (bp->b_vp == (struct vnode *) 0)
626 		panic("brelvp: NULL");
627 
628 	vp = bp->b_vp;
629 	bp->b_vp = (struct vnode *) 0;
630 	HOLDRELE(vp);
631 }
632 
633 /*
634  * Reassign a buffer from one vnode to another.
635  * Used to assign file specific control information
636  * (indirect blocks) to the vnode to which they belong.
637  */
638 void
639 reassignbuf(bp, newvp)
640 	register struct buf *bp;
641 	register struct vnode *newvp;
642 {
643 	int s;
644 
645 	if (newvp == NULL) {
646 		printf("reassignbuf: NULL");
647 		return;
648 	}
649 
650 	s = splbio();
651 	/*
652 	 * Delete from old vnode list, if on one.
653 	 */
654 	if (bp->b_vnbufs.le_next != NOLIST)
655 		bufremvn(bp);
656 	/*
657 	 * If dirty, put on list of dirty buffers; otherwise insert onto list
658 	 * of clean buffers.
659 	 */
660 	if (bp->b_flags & B_DELWRI) {
661 		struct buf *tbp;
662 
663 		tbp = newvp->v_dirtyblkhd.lh_first;
664 		if (!tbp || (tbp->b_lblkno > bp->b_lblkno)) {
665 			bufinsvn(bp, &newvp->v_dirtyblkhd);
666 		} else {
667 			while (tbp->b_vnbufs.le_next &&
668 				(tbp->b_vnbufs.le_next->b_lblkno < bp->b_lblkno)) {
669 				tbp = tbp->b_vnbufs.le_next;
670 			}
671 			LIST_INSERT_AFTER(tbp, bp, b_vnbufs);
672 		}
673 	} else {
674 		bufinsvn(bp, &newvp->v_cleanblkhd);
675 	}
676 	splx(s);
677 }
678 
679 #ifndef DEVFS_ROOT
680 /*
681  * Create a vnode for a block device.
682  * Used for root filesystem, argdev, and swap areas.
683  * Also used for memory file system special devices.
684  */
685 int
686 bdevvp(dev, vpp)
687 	dev_t dev;
688 	struct vnode **vpp;
689 {
690 	register struct vnode *vp;
691 	struct vnode *nvp;
692 	int error;
693 
694 	if (dev == NODEV)
695 		return (0);
696 	error = getnewvnode(VT_NON, (struct mount *) 0, spec_vnodeop_p, &nvp);
697 	if (error) {
698 		*vpp = 0;
699 		return (error);
700 	}
701 	vp = nvp;
702 	vp->v_type = VBLK;
703 	if ((nvp = checkalias(vp, dev, (struct mount *) 0))) {
704 		vput(vp);
705 		vp = nvp;
706 	}
707 	*vpp = vp;
708 	return (0);
709 }
710 #endif /* !DEVFS_ROOT */
711 
712 /*
713  * Check to see if the new vnode represents a special device
714  * for which we already have a vnode (either because of
715  * bdevvp() or because of a different vnode representing
716  * the same block device). If such an alias exists, deallocate
717  * the existing contents and return the aliased vnode. The
718  * caller is responsible for filling it with its new contents.
719  */
720 struct vnode *
721 checkalias(nvp, nvp_rdev, mp)
722 	register struct vnode *nvp;
723 	dev_t nvp_rdev;
724 	struct mount *mp;
725 {
726 	register struct vnode *vp;
727 	struct vnode **vpp;
728 
729 	if (nvp->v_type != VBLK && nvp->v_type != VCHR)
730 		return (NULLVP);
731 
732 	vpp = &speclisth[SPECHASH(nvp_rdev)];
733 loop:
734 	for (vp = *vpp; vp; vp = vp->v_specnext) {
735 		if (nvp_rdev != vp->v_rdev || nvp->v_type != vp->v_type)
736 			continue;
737 		/*
738 		 * Alias, but not in use, so flush it out.
739 		 */
740 		if (vp->v_usecount == 0) {
741 			vgone(vp);
742 			goto loop;
743 		}
744 		if (vget(vp, 1))
745 			goto loop;
746 		break;
747 	}
748 
749 	if (vp == NULL || vp->v_tag != VT_NON) {
750 		MALLOC(nvp->v_specinfo, struct specinfo *,
751 		    sizeof(struct specinfo), M_VNODE, M_WAITOK);
752 		nvp->v_rdev = nvp_rdev;
753 		nvp->v_hashchain = vpp;
754 		nvp->v_specnext = *vpp;
755 		nvp->v_specflags = 0;
756 		*vpp = nvp;
757 		if (vp != NULL) {
758 			nvp->v_flag |= VALIASED;
759 			vp->v_flag |= VALIASED;
760 			vput(vp);
761 		}
762 		return (NULLVP);
763 	}
764 	VOP_UNLOCK(vp);
765 	vclean(vp, 0);
766 	vp->v_op = nvp->v_op;
767 	vp->v_tag = nvp->v_tag;
768 	nvp->v_type = VNON;
769 	insmntque(vp, mp);
770 	return (vp);
771 }
772 
773 /*
774  * Grab a particular vnode from the free list, increment its
775  * reference count and lock it. The vnode lock bit is set the
776  * vnode is being eliminated in vgone. The process is awakened
777  * when the transition is completed, and an error returned to
778  * indicate that the vnode is no longer usable (possibly having
779  * been changed to a new file system type).
780  */
781 int
782 vget(vp, lockflag)
783 	register struct vnode *vp;
784 	int lockflag;
785 {
786 
787 	/*
788 	 * If the vnode is in the process of being cleaned out for another
789 	 * use, we wait for the cleaning to finish and then return failure.
790 	 * Cleaning is determined either by checking that the VXLOCK flag is
791 	 * set, or that the use count is zero with the back pointer set to
792 	 * show that it has been removed from the free list by getnewvnode.
793 	 * The VXLOCK flag may not have been set yet because vclean is blocked
794 	 * in the VOP_LOCK call waiting for the VOP_INACTIVE to complete.
795 	 */
796 	if ((vp->v_flag & VXLOCK) ||
797 	    (vp->v_usecount == 0 &&
798 		vp->v_freelist.tqe_prev == (struct vnode **) 0xdeadb)) {
799 		vp->v_flag |= VXWANT;
800 		(void) tsleep((caddr_t) vp, PINOD, "vget", 0);
801 		return (1);
802 	}
803 	if (vp->v_usecount == 0) {
804 		TAILQ_REMOVE(&vnode_free_list, vp, v_freelist);
805 		freevnodes--;
806 	}
807 	vp->v_usecount++;
808 
809 	/*
810 	 * Create the VM object, if needed
811 	 */
812 	if ((vp->v_type == VREG) &&
813 		((vp->v_object == NULL) ||
814 			(vp->v_object->flags & OBJ_VFS_REF) == 0)) {
815 		vfs_object_create(vp, curproc, curproc->p_ucred, 0);
816 	}
817 	if (lockflag)
818 		VOP_LOCK(vp);
819 
820 	return (0);
821 }
822 
823 /*
824  * Vnode reference, just increment the count
825  */
826 void
827 vref(vp)
828 	struct vnode *vp;
829 {
830 	if (vp->v_usecount <= 0)
831 		panic("vref used where vget required");
832 
833 	vp->v_usecount++;
834 
835 	if ((vp->v_type == VREG) &&
836 		((vp->v_object == NULL) ||
837 			((vp->v_object->flags & OBJ_VFS_REF) == 0)) ) {
838 		/*
839 		 * We need to lock to VP during the time that
840 		 * the object is created.  This is necessary to
841 		 * keep the system from re-entrantly doing it
842 		 * multiple times.
843 		 */
844 		vfs_object_create(vp, curproc, curproc->p_ucred, 0);
845 	}
846 }
847 
848 /*
849  * vput(), just unlock and vrele()
850  */
851 void
852 vput(vp)
853 	register struct vnode *vp;
854 {
855 	VOP_UNLOCK(vp);
856 	vrele(vp);
857 }
858 
859 /*
860  * Vnode release.
861  * If count drops to zero, call inactive routine and return to freelist.
862  */
863 void
864 vrele(vp)
865 	register struct vnode *vp;
866 {
867 
868 #ifdef DIAGNOSTIC
869 	if (vp == NULL)
870 		panic("vrele: null vp");
871 #endif
872 
873 	vp->v_usecount--;
874 
875 	if ((vp->v_usecount == 1) &&
876 		vp->v_object &&
877 		(vp->v_object->flags & OBJ_VFS_REF)) {
878 		vp->v_object->flags &= ~OBJ_VFS_REF;
879 		vm_object_deallocate(vp->v_object);
880 		return;
881 	}
882 
883 	if (vp->v_usecount > 0)
884 		return;
885 
886 	if (vp->v_usecount < 0) {
887 #ifdef DIAGNOSTIC
888 		vprint("vrele: negative ref count", vp);
889 #endif
890 		panic("vrele: negative reference cnt");
891 	}
892 	if (vp->v_flag & VAGE) {
893 		if(vp->v_tag != VT_TFS)
894 			TAILQ_INSERT_HEAD(&vnode_free_list, vp, v_freelist);
895 		vp->v_flag &= ~VAGE;
896 		vp->v_usage = 0;
897 	} else {
898 		if(vp->v_tag != VT_TFS)
899 			TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist);
900 	}
901 	freevnodes++;
902 
903 	VOP_INACTIVE(vp);
904 }
905 
906 #ifdef DIAGNOSTIC
907 /*
908  * Page or buffer structure gets a reference.
909  */
910 void
911 vhold(vp)
912 	register struct vnode *vp;
913 {
914 
915 	vp->v_holdcnt++;
916 }
917 
918 /*
919  * Page or buffer structure frees a reference.
920  */
921 void
922 holdrele(vp)
923 	register struct vnode *vp;
924 {
925 
926 	if (vp->v_holdcnt <= 0)
927 		panic("holdrele: holdcnt");
928 	vp->v_holdcnt--;
929 }
930 #endif /* DIAGNOSTIC */
931 
932 /*
933  * Remove any vnodes in the vnode table belonging to mount point mp.
934  *
935  * If MNT_NOFORCE is specified, there should not be any active ones,
936  * return error if any are found (nb: this is a user error, not a
937  * system error). If MNT_FORCE is specified, detach any active vnodes
938  * that are found.
939  */
940 #ifdef DIAGNOSTIC
941 static int busyprt = 0;		/* print out busy vnodes */
942 SYSCTL_INT(_debug, 1, busyprt, CTLFLAG_RW, &busyprt, 0, "");
943 #endif
944 
945 int
946 vflush(mp, skipvp, flags)
947 	struct mount *mp;
948 	struct vnode *skipvp;
949 	int flags;
950 {
951 	register struct vnode *vp, *nvp;
952 	int busy = 0;
953 
954 	if ((mp->mnt_flag & MNT_MPBUSY) == 0)
955 		panic("vflush: not busy");
956 loop:
957 	for (vp = mp->mnt_vnodelist.lh_first; vp; vp = nvp) {
958 		/*
959 		 * Make sure this vnode wasn't reclaimed in getnewvnode().
960 		 * Start over if it has (it won't be on the list anymore).
961 		 */
962 		if (vp->v_mount != mp)
963 			goto loop;
964 		nvp = vp->v_mntvnodes.le_next;
965 		/*
966 		 * Skip over a selected vnode.
967 		 */
968 		if (vp == skipvp)
969 			continue;
970 		/*
971 		 * Skip over a vnodes marked VSYSTEM.
972 		 */
973 		if ((flags & SKIPSYSTEM) && (vp->v_flag & VSYSTEM))
974 			continue;
975 		/*
976 		 * If WRITECLOSE is set, only flush out regular file vnodes
977 		 * open for writing.
978 		 */
979 		if ((flags & WRITECLOSE) &&
980 		    (vp->v_writecount == 0 || vp->v_type != VREG))
981 			continue;
982 
983 		if (vp->v_object && (vp->v_object->flags & OBJ_VFS_REF)) {
984 			vm_object_reference(vp->v_object);
985 			pager_cache(vp->v_object, FALSE);
986 			vp->v_object->flags &= ~OBJ_VFS_REF;
987 			vm_object_deallocate(vp->v_object);
988 		}
989 
990 		/*
991 		 * With v_usecount == 0, all we need to do is clear out the
992 		 * vnode data structures and we are done.
993 		 */
994 		if (vp->v_usecount == 0) {
995 			vgone(vp);
996 			continue;
997 		}
998 
999 		/*
1000 		 * If FORCECLOSE is set, forcibly close the vnode. For block
1001 		 * or character devices, revert to an anonymous device. For
1002 		 * all other files, just kill them.
1003 		 */
1004 		if (flags & FORCECLOSE) {
1005 			if (vp->v_type != VBLK && vp->v_type != VCHR) {
1006 				vgone(vp);
1007 			} else {
1008 				vclean(vp, 0);
1009 				vp->v_op = spec_vnodeop_p;
1010 				insmntque(vp, (struct mount *) 0);
1011 			}
1012 			continue;
1013 		}
1014 #ifdef DIAGNOSTIC
1015 		if (busyprt)
1016 			vprint("vflush: busy vnode", vp);
1017 #endif
1018 		busy++;
1019 	}
1020 	if (busy)
1021 		return (EBUSY);
1022 	return (0);
1023 }
1024 
1025 /*
1026  * Disassociate the underlying file system from a vnode.
1027  */
1028 void
1029 vclean(struct vnode *vp, int flags)
1030 {
1031 	int active;
1032 
1033 	/*
1034 	 * Check to see if the vnode is in use. If so we have to reference it
1035 	 * before we clean it out so that its count cannot fall to zero and
1036 	 * generate a race against ourselves to recycle it.
1037 	 */
1038 	if ((active = vp->v_usecount))
1039 		VREF(vp);
1040 	/*
1041 	 * Even if the count is zero, the VOP_INACTIVE routine may still have
1042 	 * the object locked while it cleans it out. The VOP_LOCK ensures that
1043 	 * the VOP_INACTIVE routine is done with its work. For active vnodes,
1044 	 * it ensures that no other activity can occur while the underlying
1045 	 * object is being cleaned out.
1046 	 */
1047 	VOP_LOCK(vp);
1048 	/*
1049 	 * Prevent the vnode from being recycled or brought into use while we
1050 	 * clean it out.
1051 	 */
1052 	if (vp->v_flag & VXLOCK)
1053 		panic("vclean: deadlock");
1054 	vp->v_flag |= VXLOCK;
1055 	/*
1056 	 * Clean out any buffers associated with the vnode.
1057 	 */
1058 	if (flags & DOCLOSE)
1059 		vinvalbuf(vp, V_SAVE, NOCRED, NULL, 0, 0);
1060 	/*
1061 	 * Any other processes trying to obtain this lock must first wait for
1062 	 * VXLOCK to clear, then call the new lock operation.
1063 	 */
1064 	VOP_UNLOCK(vp);
1065 	/*
1066 	 * If purging an active vnode, it must be closed and deactivated
1067 	 * before being reclaimed.
1068 	 */
1069 	if (active) {
1070 		if (flags & DOCLOSE)
1071 			VOP_CLOSE(vp, FNONBLOCK, NOCRED, NULL);
1072 		VOP_INACTIVE(vp);
1073 	}
1074 	/*
1075 	 * Reclaim the vnode.
1076 	 */
1077 	if (VOP_RECLAIM(vp))
1078 		panic("vclean: cannot reclaim");
1079 	if (active)
1080 		vrele(vp);
1081 
1082 	/*
1083 	 * Done with purge, notify sleepers of the grim news.
1084 	 */
1085 	vp->v_op = dead_vnodeop_p;
1086 	vp->v_tag = VT_NON;
1087 	vp->v_flag &= ~VXLOCK;
1088 	if (vp->v_flag & VXWANT) {
1089 		vp->v_flag &= ~VXWANT;
1090 		wakeup((caddr_t) vp);
1091 	}
1092 }
1093 
1094 /*
1095  * Eliminate all activity associated with  the requested vnode
1096  * and with all vnodes aliased to the requested vnode.
1097  */
1098 void
1099 vgoneall(vp)
1100 	register struct vnode *vp;
1101 {
1102 	register struct vnode *vq;
1103 
1104 	if (vp->v_flag & VALIASED) {
1105 		/*
1106 		 * If a vgone (or vclean) is already in progress, wait until
1107 		 * it is done and return.
1108 		 */
1109 		if (vp->v_flag & VXLOCK) {
1110 			vp->v_flag |= VXWANT;
1111 			(void) tsleep((caddr_t) vp, PINOD, "vgall", 0);
1112 			return;
1113 		}
1114 		/*
1115 		 * Ensure that vp will not be vgone'd while we are eliminating
1116 		 * its aliases.
1117 		 */
1118 		vp->v_flag |= VXLOCK;
1119 		while (vp->v_flag & VALIASED) {
1120 			for (vq = *vp->v_hashchain; vq; vq = vq->v_specnext) {
1121 				if (vq->v_rdev != vp->v_rdev ||
1122 				    vq->v_type != vp->v_type || vp == vq)
1123 					continue;
1124 				vgone(vq);
1125 				break;
1126 			}
1127 		}
1128 		/*
1129 		 * Remove the lock so that vgone below will really eliminate
1130 		 * the vnode after which time vgone will awaken any sleepers.
1131 		 */
1132 		vp->v_flag &= ~VXLOCK;
1133 	}
1134 	vgone(vp);
1135 }
1136 
1137 /*
1138  * Eliminate all activity associated with a vnode
1139  * in preparation for reuse.
1140  */
1141 void
1142 vgone(vp)
1143 	register struct vnode *vp;
1144 {
1145 	register struct vnode *vq;
1146 	struct vnode *vx;
1147 
1148 	/*
1149 	 * If a vgone (or vclean) is already in progress, wait until it is
1150 	 * done and return.
1151 	 */
1152 	if (vp->v_flag & VXLOCK) {
1153 		vp->v_flag |= VXWANT;
1154 		(void) tsleep((caddr_t) vp, PINOD, "vgone", 0);
1155 		return;
1156 	}
1157 
1158 	if (vp->v_object) {
1159 		vp->v_object->flags |= OBJ_VNODE_GONE;
1160 	}
1161 
1162 	/*
1163 	 * Clean out the filesystem specific data.
1164 	 */
1165 	vclean(vp, DOCLOSE);
1166 	/*
1167 	 * Delete from old mount point vnode list, if on one.
1168 	 */
1169 	if (vp->v_mount != NULL) {
1170 		LIST_REMOVE(vp, v_mntvnodes);
1171 		vp->v_mount = NULL;
1172 	}
1173 	/*
1174 	 * If special device, remove it from special device alias list.
1175 	 */
1176 	if (vp->v_type == VBLK || vp->v_type == VCHR) {
1177 		if (*vp->v_hashchain == vp) {
1178 			*vp->v_hashchain = vp->v_specnext;
1179 		} else {
1180 			for (vq = *vp->v_hashchain; vq; vq = vq->v_specnext) {
1181 				if (vq->v_specnext != vp)
1182 					continue;
1183 				vq->v_specnext = vp->v_specnext;
1184 				break;
1185 			}
1186 			if (vq == NULL)
1187 				panic("missing bdev");
1188 		}
1189 		if (vp->v_flag & VALIASED) {
1190 			vx = NULL;
1191 			for (vq = *vp->v_hashchain; vq; vq = vq->v_specnext) {
1192 				if (vq->v_rdev != vp->v_rdev ||
1193 				    vq->v_type != vp->v_type)
1194 					continue;
1195 				if (vx)
1196 					break;
1197 				vx = vq;
1198 			}
1199 			if (vx == NULL)
1200 				panic("missing alias");
1201 			if (vq == NULL)
1202 				vx->v_flag &= ~VALIASED;
1203 			vp->v_flag &= ~VALIASED;
1204 		}
1205 		FREE(vp->v_specinfo, M_VNODE);
1206 		vp->v_specinfo = NULL;
1207 	}
1208 	/*
1209 	 * If it is on the freelist and not already at the head, move it to
1210 	 * the head of the list. The test of the back pointer and the
1211 	 * reference count of zero is because it will be removed from the free
1212 	 * list by getnewvnode, but will not have its reference count
1213 	 * incremented until after calling vgone. If the reference count were
1214 	 * incremented first, vgone would (incorrectly) try to close the
1215 	 * previous instance of the underlying object. So, the back pointer is
1216 	 * explicitly set to `0xdeadb' in getnewvnode after removing it from
1217 	 * the freelist to ensure that we do not try to move it here.
1218 	 */
1219 	if (vp->v_usecount == 0 &&
1220 	    vp->v_freelist.tqe_prev != (struct vnode **) 0xdeadb &&
1221 	    vnode_free_list.tqh_first != vp) {
1222 		if(vp->v_tag != VT_TFS) {
1223 			TAILQ_REMOVE(&vnode_free_list, vp, v_freelist);
1224 			TAILQ_INSERT_HEAD(&vnode_free_list, vp, v_freelist);
1225 		}
1226 	}
1227 	vp->v_type = VBAD;
1228 }
1229 
1230 /*
1231  * Lookup a vnode by device number.
1232  */
1233 int
1234 vfinddev(dev, type, vpp)
1235 	dev_t dev;
1236 	enum vtype type;
1237 	struct vnode **vpp;
1238 {
1239 	register struct vnode *vp;
1240 
1241 	for (vp = speclisth[SPECHASH(dev)]; vp; vp = vp->v_specnext) {
1242 		if (dev != vp->v_rdev || type != vp->v_type)
1243 			continue;
1244 		*vpp = vp;
1245 		return (1);
1246 	}
1247 	return (0);
1248 }
1249 
1250 /*
1251  * Calculate the total number of references to a special device.
1252  */
1253 int
1254 vcount(vp)
1255 	register struct vnode *vp;
1256 {
1257 	register struct vnode *vq, *vnext;
1258 	int count;
1259 
1260 loop:
1261 	if ((vp->v_flag & VALIASED) == 0)
1262 		return (vp->v_usecount);
1263 	for (count = 0, vq = *vp->v_hashchain; vq; vq = vnext) {
1264 		vnext = vq->v_specnext;
1265 		if (vq->v_rdev != vp->v_rdev || vq->v_type != vp->v_type)
1266 			continue;
1267 		/*
1268 		 * Alias, but not in use, so flush it out.
1269 		 */
1270 		if (vq->v_usecount == 0 && vq != vp) {
1271 			vgone(vq);
1272 			goto loop;
1273 		}
1274 		count += vq->v_usecount;
1275 	}
1276 	return (count);
1277 }
1278 
1279 /*
1280  * Print out a description of a vnode.
1281  */
1282 static char *typename[] =
1283 {"VNON", "VREG", "VDIR", "VBLK", "VCHR", "VLNK", "VSOCK", "VFIFO", "VBAD"};
1284 
1285 void
1286 vprint(label, vp)
1287 	char *label;
1288 	register struct vnode *vp;
1289 {
1290 	char buf[64];
1291 
1292 	if (label != NULL)
1293 		printf("%s: ", label);
1294 	printf("type %s, usecount %d, writecount %d, refcount %ld,",
1295 	    typename[vp->v_type], vp->v_usecount, vp->v_writecount,
1296 	    vp->v_holdcnt);
1297 	buf[0] = '\0';
1298 	if (vp->v_flag & VROOT)
1299 		strcat(buf, "|VROOT");
1300 	if (vp->v_flag & VTEXT)
1301 		strcat(buf, "|VTEXT");
1302 	if (vp->v_flag & VSYSTEM)
1303 		strcat(buf, "|VSYSTEM");
1304 	if (vp->v_flag & VXLOCK)
1305 		strcat(buf, "|VXLOCK");
1306 	if (vp->v_flag & VXWANT)
1307 		strcat(buf, "|VXWANT");
1308 	if (vp->v_flag & VBWAIT)
1309 		strcat(buf, "|VBWAIT");
1310 	if (vp->v_flag & VALIASED)
1311 		strcat(buf, "|VALIASED");
1312 	if (buf[0] != '\0')
1313 		printf(" flags (%s)", &buf[1]);
1314 	if (vp->v_data == NULL) {
1315 		printf("\n");
1316 	} else {
1317 		printf("\n\t");
1318 		VOP_PRINT(vp);
1319 	}
1320 }
1321 
1322 #ifdef DDB
1323 /*
1324  * List all of the locked vnodes in the system.
1325  * Called when debugging the kernel.
1326  */
1327 void
1328 printlockedvnodes(void)
1329 {
1330 	register struct mount *mp;
1331 	register struct vnode *vp;
1332 
1333 	printf("Locked vnodes\n");
1334 	for (mp = mountlist.cqh_first; mp != (void *)&mountlist;
1335 	    mp = mp->mnt_list.cqe_next) {
1336 		for (vp = mp->mnt_vnodelist.lh_first;
1337 		    vp != NULL;
1338 		    vp = vp->v_mntvnodes.le_next)
1339 			if (VOP_ISLOCKED(vp))
1340 				vprint((char *) 0, vp);
1341 	}
1342 }
1343 #endif
1344 
1345 int kinfo_vdebug = 1;
1346 int kinfo_vgetfailed;
1347 
1348 #define KINFO_VNODESLOP	10
1349 /*
1350  * Dump vnode list (via sysctl).
1351  * Copyout address of vnode followed by vnode.
1352  */
1353 /* ARGSUSED */
1354 static int
1355 sysctl_vnode SYSCTL_HANDLER_ARGS
1356 {
1357 	register struct mount *mp, *nmp;
1358 	struct vnode *vp;
1359 	int error;
1360 
1361 #define VPTRSZ	sizeof (struct vnode *)
1362 #define VNODESZ	sizeof (struct vnode)
1363 
1364 	req->lock = 0;
1365 	if (!req->oldptr) /* Make an estimate */
1366 		return (SYSCTL_OUT(req, 0,
1367 			(numvnodes + KINFO_VNODESLOP) * (VPTRSZ + VNODESZ)));
1368 
1369 	for (mp = mountlist.cqh_first; mp != (void *)&mountlist; mp = nmp) {
1370 		nmp = mp->mnt_list.cqe_next;
1371 		if (vfs_busy(mp))
1372 			continue;
1373 again:
1374 		for (vp = mp->mnt_vnodelist.lh_first;
1375 		    vp != NULL;
1376 		    vp = vp->v_mntvnodes.le_next) {
1377 			/*
1378 			 * Check that the vp is still associated with this
1379 			 * filesystem.  RACE: could have been recycled onto
1380 			 * the same filesystem.
1381 			 */
1382 			if (vp->v_mount != mp) {
1383 				if (kinfo_vdebug)
1384 					printf("kinfo: vp changed\n");
1385 				goto again;
1386 			}
1387 			if ((error = SYSCTL_OUT(req, &vp, VPTRSZ)) ||
1388 			    (error = SYSCTL_OUT(req, vp, VNODESZ))) {
1389 				vfs_unbusy(mp);
1390 				return (error);
1391 			}
1392 		}
1393 		vfs_unbusy(mp);
1394 	}
1395 
1396 	return (0);
1397 }
1398 
1399 SYSCTL_PROC(_kern, KERN_VNODE, vnode, CTLTYPE_OPAQUE|CTLFLAG_RD,
1400 	0, 0, sysctl_vnode, "S,vnode", "");
1401 
1402 /*
1403  * Check to see if a filesystem is mounted on a block device.
1404  */
1405 int
1406 vfs_mountedon(vp)
1407 	register struct vnode *vp;
1408 {
1409 	register struct vnode *vq;
1410 
1411 	if (vp->v_specflags & SI_MOUNTEDON)
1412 		return (EBUSY);
1413 	if (vp->v_flag & VALIASED) {
1414 		for (vq = *vp->v_hashchain; vq; vq = vq->v_specnext) {
1415 			if (vq->v_rdev != vp->v_rdev ||
1416 			    vq->v_type != vp->v_type)
1417 				continue;
1418 			if (vq->v_specflags & SI_MOUNTEDON)
1419 				return (EBUSY);
1420 		}
1421 	}
1422 	return (0);
1423 }
1424 
1425 /*
1426  * Build hash lists of net addresses and hang them off the mount point.
1427  * Called by ufs_mount() to set up the lists of export addresses.
1428  */
1429 static int
1430 vfs_hang_addrlist(struct mount *mp, struct netexport *nep,
1431 	struct export_args *argp)
1432 {
1433 	register struct netcred *np;
1434 	register struct radix_node_head *rnh;
1435 	register int i;
1436 	struct radix_node *rn;
1437 	struct sockaddr *saddr, *smask = 0;
1438 	struct domain *dom;
1439 	int error;
1440 
1441 	if (argp->ex_addrlen == 0) {
1442 		if (mp->mnt_flag & MNT_DEFEXPORTED)
1443 			return (EPERM);
1444 		np = &nep->ne_defexported;
1445 		np->netc_exflags = argp->ex_flags;
1446 		np->netc_anon = argp->ex_anon;
1447 		np->netc_anon.cr_ref = 1;
1448 		mp->mnt_flag |= MNT_DEFEXPORTED;
1449 		return (0);
1450 	}
1451 	i = sizeof(struct netcred) + argp->ex_addrlen + argp->ex_masklen;
1452 	np = (struct netcred *) malloc(i, M_NETADDR, M_WAITOK);
1453 	bzero((caddr_t) np, i);
1454 	saddr = (struct sockaddr *) (np + 1);
1455 	if ((error = copyin(argp->ex_addr, (caddr_t) saddr, argp->ex_addrlen)))
1456 		goto out;
1457 	if (saddr->sa_len > argp->ex_addrlen)
1458 		saddr->sa_len = argp->ex_addrlen;
1459 	if (argp->ex_masklen) {
1460 		smask = (struct sockaddr *) ((caddr_t) saddr + argp->ex_addrlen);
1461 		error = copyin(argp->ex_addr, (caddr_t) smask, argp->ex_masklen);
1462 		if (error)
1463 			goto out;
1464 		if (smask->sa_len > argp->ex_masklen)
1465 			smask->sa_len = argp->ex_masklen;
1466 	}
1467 	i = saddr->sa_family;
1468 	if ((rnh = nep->ne_rtable[i]) == 0) {
1469 		/*
1470 		 * Seems silly to initialize every AF when most are not used,
1471 		 * do so on demand here
1472 		 */
1473 		for (dom = domains; dom; dom = dom->dom_next)
1474 			if (dom->dom_family == i && dom->dom_rtattach) {
1475 				dom->dom_rtattach((void **) &nep->ne_rtable[i],
1476 				    dom->dom_rtoffset);
1477 				break;
1478 			}
1479 		if ((rnh = nep->ne_rtable[i]) == 0) {
1480 			error = ENOBUFS;
1481 			goto out;
1482 		}
1483 	}
1484 	rn = (*rnh->rnh_addaddr) ((caddr_t) saddr, (caddr_t) smask, rnh,
1485 	    np->netc_rnodes);
1486 	if (rn == 0 || np != (struct netcred *) rn) {	/* already exists */
1487 		error = EPERM;
1488 		goto out;
1489 	}
1490 	np->netc_exflags = argp->ex_flags;
1491 	np->netc_anon = argp->ex_anon;
1492 	np->netc_anon.cr_ref = 1;
1493 	return (0);
1494 out:
1495 	free(np, M_NETADDR);
1496 	return (error);
1497 }
1498 
1499 /* ARGSUSED */
1500 static int
1501 vfs_free_netcred(struct radix_node *rn, void *w)
1502 {
1503 	register struct radix_node_head *rnh = (struct radix_node_head *) w;
1504 
1505 	(*rnh->rnh_deladdr) (rn->rn_key, rn->rn_mask, rnh);
1506 	free((caddr_t) rn, M_NETADDR);
1507 	return (0);
1508 }
1509 
1510 /*
1511  * Free the net address hash lists that are hanging off the mount points.
1512  */
1513 static void
1514 vfs_free_addrlist(struct netexport *nep)
1515 {
1516 	register int i;
1517 	register struct radix_node_head *rnh;
1518 
1519 	for (i = 0; i <= AF_MAX; i++)
1520 		if ((rnh = nep->ne_rtable[i])) {
1521 			(*rnh->rnh_walktree) (rnh, vfs_free_netcred,
1522 			    (caddr_t) rnh);
1523 			free((caddr_t) rnh, M_RTABLE);
1524 			nep->ne_rtable[i] = 0;
1525 		}
1526 }
1527 
1528 int
1529 vfs_export(mp, nep, argp)
1530 	struct mount *mp;
1531 	struct netexport *nep;
1532 	struct export_args *argp;
1533 {
1534 	int error;
1535 
1536 	if (argp->ex_flags & MNT_DELEXPORT) {
1537 		vfs_free_addrlist(nep);
1538 		mp->mnt_flag &= ~(MNT_EXPORTED | MNT_DEFEXPORTED);
1539 	}
1540 	if (argp->ex_flags & MNT_EXPORTED) {
1541 		if ((error = vfs_hang_addrlist(mp, nep, argp)))
1542 			return (error);
1543 		mp->mnt_flag |= MNT_EXPORTED;
1544 	}
1545 	return (0);
1546 }
1547 
1548 struct netcred *
1549 vfs_export_lookup(mp, nep, nam)
1550 	register struct mount *mp;
1551 	struct netexport *nep;
1552 	struct mbuf *nam;
1553 {
1554 	register struct netcred *np;
1555 	register struct radix_node_head *rnh;
1556 	struct sockaddr *saddr;
1557 
1558 	np = NULL;
1559 	if (mp->mnt_flag & MNT_EXPORTED) {
1560 		/*
1561 		 * Lookup in the export list first.
1562 		 */
1563 		if (nam != NULL) {
1564 			saddr = mtod(nam, struct sockaddr *);
1565 			rnh = nep->ne_rtable[saddr->sa_family];
1566 			if (rnh != NULL) {
1567 				np = (struct netcred *)
1568 				    (*rnh->rnh_matchaddr) ((caddr_t) saddr,
1569 				    rnh);
1570 				if (np && np->netc_rnodes->rn_flags & RNF_ROOT)
1571 					np = NULL;
1572 			}
1573 		}
1574 		/*
1575 		 * If no address match, use the default if it exists.
1576 		 */
1577 		if (np == NULL && mp->mnt_flag & MNT_DEFEXPORTED)
1578 			np = &nep->ne_defexported;
1579 	}
1580 	return (np);
1581 }
1582 
1583 
1584 /*
1585  * perform msync on all vnodes under a mount point
1586  * the mount point must be locked.
1587  */
1588 void
1589 vfs_msync(struct mount *mp, int flags) {
1590 	struct vnode *vp, *nvp;
1591 loop:
1592 	for (vp = mp->mnt_vnodelist.lh_first; vp != NULL; vp = nvp) {
1593 
1594 		if (vp->v_mount != mp)
1595 			goto loop;
1596 		nvp = vp->v_mntvnodes.le_next;
1597 		if (VOP_ISLOCKED(vp) && (flags != MNT_WAIT))
1598 			continue;
1599 		if (vp->v_object &&
1600 		   (vp->v_object->flags & OBJ_MIGHTBEDIRTY)) {
1601 			vm_object_page_clean(vp->v_object, 0, 0, TRUE, TRUE);
1602 		}
1603 	}
1604 }
1605 
1606 /*
1607  * Create the VM object needed for VMIO and mmap support.  This
1608  * is done for all VREG files in the system.  Some filesystems might
1609  * afford the additional metadata buffering capability of the
1610  * VMIO code by making the device node be VMIO mode also.
1611  */
1612 int
1613 vfs_object_create(vp, p, cred, waslocked)
1614 	struct vnode *vp;
1615 	struct proc *p;
1616 	struct ucred *cred;
1617 	int waslocked;
1618 {
1619 	struct vattr vat;
1620 	vm_object_t object;
1621 	int error = 0;
1622 
1623 retry:
1624 	if ((object = vp->v_object) == NULL) {
1625 		if (vp->v_type == VREG) {
1626 			if ((error = VOP_GETATTR(vp, &vat, cred, p)) != 0)
1627 				goto retn;
1628 			(void) vnode_pager_alloc(vp,
1629 				OFF_TO_IDX(round_page(vat.va_size)), 0, 0);
1630 		} else {
1631 			/*
1632 			 * This simply allocates the biggest object possible
1633 			 * for a VBLK vnode.  This should be fixed, but doesn't
1634 			 * cause any problems (yet).
1635 			 */
1636 			(void) vnode_pager_alloc(vp, INT_MAX, 0, 0);
1637 		}
1638 		vp->v_object->flags |= OBJ_VFS_REF;
1639 	} else {
1640 		if (object->flags & OBJ_DEAD) {
1641 			if (waslocked)
1642 				VOP_UNLOCK(vp);
1643 			tsleep(object, PVM, "vodead", 0);
1644 			if (waslocked)
1645 				VOP_LOCK(vp);
1646 			goto retry;
1647 		}
1648 		if ((object->flags & OBJ_VFS_REF) == 0) {
1649 			object->flags |= OBJ_VFS_REF;
1650 			vm_object_reference(object);
1651 		}
1652 	}
1653 	if (vp->v_object)
1654 		vp->v_flag |= VVMIO;
1655 
1656 retn:
1657 	return error;
1658 }
1659