xref: /freebsd/sys/kern/vfs_export.c (revision ce834215a70ff69e7e222827437116eee2f9ac6f)
1 /*
2  * Copyright (c) 1989, 1993
3  *	The Regents of the University of California.  All rights reserved.
4  * (c) UNIX System Laboratories, Inc.
5  * All or some portions of this file are derived from material licensed
6  * to the University of California by American Telephone and Telegraph
7  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8  * the permission of UNIX System Laboratories, Inc.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. All advertising materials mentioning features or use of this software
19  *    must display the following acknowledgement:
20  *	This product includes software developed by the University of
21  *	California, Berkeley and its contributors.
22  * 4. Neither the name of the University nor the names of its contributors
23  *    may be used to endorse or promote products derived from this software
24  *    without specific prior written permission.
25  *
26  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36  * SUCH DAMAGE.
37  *
38  *	@(#)vfs_subr.c	8.31 (Berkeley) 5/26/95
39  * $Id: vfs_subr.c,v 1.87 1997/06/10 02:48:08 davidg Exp $
40  */
41 
42 /*
43  * External virtual filesystem routines
44  */
45 #include "opt_ddb.h"
46 #include "opt_devfs.h"
47 
48 #include <sys/param.h>
49 #include <sys/systm.h>
50 #include <sys/kernel.h>
51 #include <sys/file.h>
52 #include <sys/proc.h>
53 #include <sys/mount.h>
54 #include <sys/time.h>
55 #include <sys/vnode.h>
56 #include <sys/stat.h>
57 #include <sys/namei.h>
58 #include <sys/ucred.h>
59 #include <sys/buf.h>
60 #include <sys/errno.h>
61 #include <sys/malloc.h>
62 #include <sys/domain.h>
63 #include <sys/mbuf.h>
64 
65 #include <vm/vm.h>
66 #include <vm/vm_param.h>
67 #include <vm/vm_object.h>
68 #include <vm/vm_extern.h>
69 #include <vm/vm_pager.h>
70 #include <vm/vnode_pager.h>
71 #include <sys/sysctl.h>
72 
73 #include <miscfs/specfs/specdev.h>
74 
75 #ifdef DDB
76 extern void	printlockedvnodes __P((void));
77 #endif
78 static void	vclean __P((struct vnode *vp, int flags, struct proc *p));
79 static void	vgonel __P((struct vnode *vp, struct proc *p));
80 unsigned long	numvnodes;
81 SYSCTL_INT(_debug, OID_AUTO, numvnodes, CTLFLAG_RD, &numvnodes, 0, "");
82 static void	vputrele __P((struct vnode *vp, int put));
83 
84 enum vtype iftovt_tab[16] = {
85 	VNON, VFIFO, VCHR, VNON, VDIR, VNON, VBLK, VNON,
86 	VREG, VNON, VLNK, VNON, VSOCK, VNON, VNON, VBAD,
87 };
88 int vttoif_tab[9] = {
89 	0, S_IFREG, S_IFDIR, S_IFBLK, S_IFCHR, S_IFLNK,
90 	S_IFSOCK, S_IFIFO, S_IFMT,
91 };
92 
93 /*
94  * Insq/Remq for the vnode usage lists.
95  */
96 #define	bufinsvn(bp, dp)	LIST_INSERT_HEAD(dp, bp, b_vnbufs)
97 #define	bufremvn(bp) {							\
98 	LIST_REMOVE(bp, b_vnbufs);					\
99 	(bp)->b_vnbufs.le_next = NOLIST;				\
100 }
101 TAILQ_HEAD(freelst, vnode) vnode_free_list;	/* vnode free list */
102 static u_long freevnodes = 0;
103 
104 struct mntlist mountlist;	/* mounted filesystem list */
105 struct simplelock mountlist_slock;
106 static struct simplelock mntid_slock;
107 struct simplelock mntvnode_slock;
108 struct simplelock vnode_free_list_slock;
109 static struct simplelock spechash_slock;
110 
111 int desiredvnodes;
112 SYSCTL_INT(_kern, KERN_MAXVNODES, maxvnodes, CTLFLAG_RW, &desiredvnodes, 0, "");
113 
114 static void	vfs_free_addrlist __P((struct netexport *nep));
115 static int	vfs_free_netcred __P((struct radix_node *rn, void *w));
116 static int	vfs_hang_addrlist __P((struct mount *mp, struct netexport *nep,
117 				       struct export_args *argp));
118 
119 /*
120  * Initialize the vnode management data structures.
121  */
122 void
123 vntblinit()
124 {
125 
126 	desiredvnodes = maxproc + vm_object_cache_max;
127 	simple_lock_init(&mntvnode_slock);
128 	simple_lock_init(&mntid_slock);
129 	simple_lock_init(&spechash_slock);
130 	TAILQ_INIT(&vnode_free_list);
131 	simple_lock_init(&vnode_free_list_slock);
132 	CIRCLEQ_INIT(&mountlist);
133 }
134 
135 /*
136  * Mark a mount point as busy. Used to synchronize access and to delay
137  * unmounting. Interlock is not released on failure.
138  */
139 int
140 vfs_busy(mp, flags, interlkp, p)
141 	struct mount *mp;
142 	int flags;
143 	struct simplelock *interlkp;
144 	struct proc *p;
145 {
146 	int lkflags;
147 
148 	if (mp->mnt_flag & MNT_UNMOUNT) {
149 		if (flags & LK_NOWAIT)
150 			return (ENOENT);
151 		mp->mnt_flag |= MNT_MWAIT;
152 		if (interlkp) {
153 			simple_unlock(interlkp);
154 		}
155 		/*
156 		 * Since all busy locks are shared except the exclusive
157 		 * lock granted when unmounting, the only place that a
158 		 * wakeup needs to be done is at the release of the
159 		 * exclusive lock at the end of dounmount.
160 		 */
161 		tsleep((caddr_t)mp, PVFS, "vfs_busy", 0);
162 		if (interlkp) {
163 			simple_lock(interlkp);
164 		}
165 		return (ENOENT);
166 	}
167 	lkflags = LK_SHARED;
168 	if (interlkp)
169 		lkflags |= LK_INTERLOCK;
170 	if (lockmgr(&mp->mnt_lock, lkflags, interlkp, p))
171 		panic("vfs_busy: unexpected lock failure");
172 	return (0);
173 }
174 
175 /*
176  * Free a busy filesystem.
177  */
178 void
179 vfs_unbusy(mp, p)
180 	struct mount *mp;
181 	struct proc *p;
182 {
183 
184 	lockmgr(&mp->mnt_lock, LK_RELEASE, NULL, p);
185 }
186 
187 /*
188  * Lookup a filesystem type, and if found allocate and initialize
189  * a mount structure for it.
190  *
191  * Devname is usually updated by mount(8) after booting.
192  */
193 int
194 vfs_rootmountalloc(fstypename, devname, mpp)
195 	char *fstypename;
196 	char *devname;
197 	struct mount **mpp;
198 {
199 	struct proc *p = curproc;	/* XXX */
200 	struct vfsconf *vfsp;
201 	struct mount *mp;
202 
203 	for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next)
204 		if (!strcmp(vfsp->vfc_name, fstypename))
205 			break;
206 	if (vfsp == NULL)
207 		return (ENODEV);
208 	mp = malloc((u_long)sizeof(struct mount), M_MOUNT, M_WAITOK);
209 	bzero((char *)mp, (u_long)sizeof(struct mount));
210 	lockinit(&mp->mnt_lock, PVFS, "vfslock", 0, 0);
211 	(void)vfs_busy(mp, LK_NOWAIT, 0, p);
212 	LIST_INIT(&mp->mnt_vnodelist);
213 	mp->mnt_vfc = vfsp;
214 	mp->mnt_op = vfsp->vfc_vfsops;
215 	mp->mnt_flag = MNT_RDONLY;
216 	mp->mnt_vnodecovered = NULLVP;
217 	vfsp->vfc_refcount++;
218 	mp->mnt_stat.f_type = vfsp->vfc_typenum;
219 	mp->mnt_flag |= vfsp->vfc_flags & MNT_VISFLAGMASK;
220 	strncpy(mp->mnt_stat.f_fstypename, vfsp->vfc_name, MFSNAMELEN);
221 	mp->mnt_stat.f_mntonname[0] = '/';
222 	mp->mnt_stat.f_mntonname[1] = 0;
223 	(void) copystr(devname, mp->mnt_stat.f_mntfromname, MNAMELEN - 1, 0);
224 	*mpp = mp;
225 	return (0);
226 }
227 
228 /*
229  * Find an appropriate filesystem to use for the root. If a filesystem
230  * has not been preselected, walk through the list of known filesystems
231  * trying those that have mountroot routines, and try them until one
232  * works or we have tried them all.
233  */
234 #ifdef notdef	/* XXX JH */
235 int
236 lite2_vfs_mountroot(void)
237 {
238 	struct vfsconf *vfsp;
239 	extern int (*lite2_mountroot)(void);
240 	int error;
241 
242 	if (lite2_mountroot != NULL)
243 		return ((*lite2_mountroot)());
244 	for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next) {
245 		if (vfsp->vfc_mountroot == NULL)
246 			continue;
247 		if ((error = (*vfsp->vfc_mountroot)()) == 0)
248 			return (0);
249 		printf("%s_mountroot failed: %d\n", vfsp->vfc_name, error);
250 	}
251 	return (ENODEV);
252 }
253 #endif
254 
255 /*
256  * Lookup a mount point by filesystem identifier.
257  */
258 struct mount *
259 vfs_getvfs(fsid)
260 	fsid_t *fsid;
261 {
262 	register struct mount *mp;
263 
264 	simple_lock(&mountlist_slock);
265 	for (mp = mountlist.cqh_first; mp != (void *)&mountlist;
266 	    mp = mp->mnt_list.cqe_next) {
267 		if (mp->mnt_stat.f_fsid.val[0] == fsid->val[0] &&
268 		    mp->mnt_stat.f_fsid.val[1] == fsid->val[1]) {
269 			simple_unlock(&mountlist_slock);
270 			return (mp);
271 	    }
272 	}
273 	simple_unlock(&mountlist_slock);
274 	return ((struct mount *) 0);
275 }
276 
277 /*
278  * Get a new unique fsid
279  */
280 void
281 vfs_getnewfsid(mp)
282 	struct mount *mp;
283 {
284 	static u_short xxxfs_mntid;
285 
286 	fsid_t tfsid;
287 	int mtype;
288 
289 	simple_lock(&mntid_slock);
290 	mtype = mp->mnt_vfc->vfc_typenum;
291 	mp->mnt_stat.f_fsid.val[0] = makedev(nblkdev + mtype, 0);
292 	mp->mnt_stat.f_fsid.val[1] = mtype;
293 	if (xxxfs_mntid == 0)
294 		++xxxfs_mntid;
295 	tfsid.val[0] = makedev(nblkdev + mtype, xxxfs_mntid);
296 	tfsid.val[1] = mtype;
297 	if (mountlist.cqh_first != (void *)&mountlist) {
298 		while (vfs_getvfs(&tfsid)) {
299 			tfsid.val[0]++;
300 			xxxfs_mntid++;
301 		}
302 	}
303 	mp->mnt_stat.f_fsid.val[0] = tfsid.val[0];
304 	simple_unlock(&mntid_slock);
305 }
306 
307 /*
308  * Set vnode attributes to VNOVAL
309  */
310 void
311 vattr_null(vap)
312 	register struct vattr *vap;
313 {
314 
315 	vap->va_type = VNON;
316 	vap->va_size = VNOVAL;
317 	vap->va_bytes = VNOVAL;
318 	vap->va_mode = vap->va_nlink = vap->va_uid = vap->va_gid =
319 	    vap->va_fsid = vap->va_fileid =
320 	    vap->va_blocksize = vap->va_rdev =
321 	    vap->va_atime.tv_sec = vap->va_atime.tv_nsec =
322 	    vap->va_mtime.tv_sec = vap->va_mtime.tv_nsec =
323 	    vap->va_ctime.tv_sec = vap->va_ctime.tv_nsec =
324 	    vap->va_flags = vap->va_gen = VNOVAL;
325 	vap->va_vaflags = 0;
326 }
327 
328 /*
329  * Routines having to do with the management of the vnode table.
330  */
331 extern vop_t **dead_vnodeop_p;
332 
333 /*
334  * Return the next vnode from the free list.
335  */
336 int
337 getnewvnode(tag, mp, vops, vpp)
338 	enum vtagtype tag;
339 	struct mount *mp;
340 	vop_t **vops;
341 	struct vnode **vpp;
342 {
343 	struct proc *p = curproc;	/* XXX */
344 	struct vnode *vp;
345 
346 	/*
347 	 * We take the least recently used vnode from the freelist
348 	 * if we can get it and it has no cached pages, and no
349 	 * namecache entries are relative to it.
350 	 * Otherwise we allocate a new vnode
351 	 */
352 
353 	simple_lock(&vnode_free_list_slock);
354 
355 	TAILQ_FOREACH(vp, &vnode_free_list, v_freelist) {
356 		if (!simple_lock_try(&vp->v_interlock))
357 			continue;
358 		if (vp->v_usecount)
359 			panic("free vnode isn't");
360 
361 		if (vp->v_object && vp->v_object->resident_page_count) {
362 			/* Don't recycle if it's caching some pages */
363 			simple_unlock(&vp->v_interlock);
364 			continue;
365 		} else if (LIST_FIRST(&vp->v_cache_src)) {
366 			/* Don't recycle if active in the namecache */
367 			simple_unlock(&vp->v_interlock);
368 			continue;
369 		} else {
370 			break;
371 		}
372 	}
373 	if (vp) {
374 		TAILQ_REMOVE(&vnode_free_list, vp, v_freelist);
375 		freevnodes--;
376 		/* see comment on why 0xdeadb is set at end of vgone (below) */
377 		vp->v_freelist.tqe_prev = (struct vnode **) 0xdeadb;
378 		simple_unlock(&vnode_free_list_slock);
379 		vp->v_lease = NULL;
380 		if (vp->v_type != VBAD)
381 			vgonel(vp, p);
382 		else {
383 			simple_unlock(&vp->v_interlock);
384 		}
385 
386 #ifdef DIAGNOSTIC
387 		{
388 			int s;
389 
390 			if (vp->v_data)
391 				panic("cleaned vnode isn't");
392 			s = splbio();
393 			if (vp->v_numoutput)
394 				panic("Clean vnode has pending I/O's");
395 			splx(s);
396 		}
397 #endif
398 		vp->v_flag = 0;
399 		vp->v_lastr = 0;
400 		vp->v_lastw = 0;
401 		vp->v_lasta = 0;
402 		vp->v_cstart = 0;
403 		vp->v_clen = 0;
404 		vp->v_socket = 0;
405 		vp->v_writecount = 0;	/* XXX */
406 	} else {
407 		simple_unlock(&vnode_free_list_slock);
408 		vp = (struct vnode *) malloc((u_long) sizeof *vp,
409 		    M_VNODE, M_WAITOK);
410 		bzero((char *) vp, sizeof *vp);
411 		vp->v_dd = vp;
412 		LIST_INIT(&vp->v_cache_src);
413 		TAILQ_INIT(&vp->v_cache_dst);
414 		numvnodes++;
415 	}
416 
417 	vp->v_type = VNON;
418 	cache_purge(vp);
419 	vp->v_tag = tag;
420 	vp->v_op = vops;
421 	insmntque(vp, mp);
422 	*vpp = vp;
423 	vp->v_usecount = 1;
424 	vp->v_data = 0;
425 	return (0);
426 }
427 
428 /*
429  * Move a vnode from one mount queue to another.
430  */
431 void
432 insmntque(vp, mp)
433 	register struct vnode *vp;
434 	register struct mount *mp;
435 {
436 
437 	simple_lock(&mntvnode_slock);
438 	/*
439 	 * Delete from old mount point vnode list, if on one.
440 	 */
441 	if (vp->v_mount != NULL)
442 		LIST_REMOVE(vp, v_mntvnodes);
443 	/*
444 	 * Insert into list of vnodes for the new mount point, if available.
445 	 */
446 	if ((vp->v_mount = mp) == NULL) {
447 		simple_unlock(&mntvnode_slock);
448 		return;
449 	}
450 	LIST_INSERT_HEAD(&mp->mnt_vnodelist, vp, v_mntvnodes);
451 	simple_unlock(&mntvnode_slock);
452 }
453 
454 /*
455  * Update outstanding I/O count and do wakeup if requested.
456  */
457 void
458 vwakeup(bp)
459 	register struct buf *bp;
460 {
461 	register struct vnode *vp;
462 
463 	bp->b_flags &= ~B_WRITEINPROG;
464 	if ((vp = bp->b_vp)) {
465 		vp->v_numoutput--;
466 		if (vp->v_numoutput < 0)
467 			panic("vwakeup: neg numoutput");
468 		if ((vp->v_numoutput == 0) && (vp->v_flag & VBWAIT)) {
469 			vp->v_flag &= ~VBWAIT;
470 			wakeup((caddr_t) &vp->v_numoutput);
471 		}
472 	}
473 }
474 
475 /*
476  * Flush out and invalidate all buffers associated with a vnode.
477  * Called with the underlying object locked.
478  */
479 int
480 vinvalbuf(vp, flags, cred, p, slpflag, slptimeo)
481 	register struct vnode *vp;
482 	int flags;
483 	struct ucred *cred;
484 	struct proc *p;
485 	int slpflag, slptimeo;
486 {
487 	register struct buf *bp;
488 	struct buf *nbp, *blist;
489 	int s, error;
490 	vm_object_t object;
491 
492 	if (flags & V_SAVE) {
493 		if ((error = VOP_FSYNC(vp, cred, MNT_WAIT, p)))
494 			return (error);
495 		if (vp->v_dirtyblkhd.lh_first != NULL)
496 			panic("vinvalbuf: dirty bufs");
497 	}
498 
499 	s = splbio();
500 	for (;;) {
501 		if ((blist = vp->v_cleanblkhd.lh_first) && (flags & V_SAVEMETA))
502 			while (blist && blist->b_lblkno < 0)
503 				blist = blist->b_vnbufs.le_next;
504 		if (!blist && (blist = vp->v_dirtyblkhd.lh_first) &&
505 		    (flags & V_SAVEMETA))
506 			while (blist && blist->b_lblkno < 0)
507 				blist = blist->b_vnbufs.le_next;
508 		if (!blist)
509 			break;
510 
511 		for (bp = blist; bp; bp = nbp) {
512 			nbp = bp->b_vnbufs.le_next;
513 			if ((flags & V_SAVEMETA) && bp->b_lblkno < 0)
514 				continue;
515 			if (bp->b_flags & B_BUSY) {
516 				bp->b_flags |= B_WANTED;
517 				error = tsleep((caddr_t) bp,
518 				    slpflag | (PRIBIO + 1), "vinvalbuf",
519 				    slptimeo);
520 				if (error) {
521 					splx(s);
522 					return (error);
523 				}
524 				break;
525 			}
526 			bremfree(bp);
527 			bp->b_flags |= B_BUSY;
528 			/*
529 			 * XXX Since there are no node locks for NFS, I
530 			 * believe there is a slight chance that a delayed
531 			 * write will occur while sleeping just above, so
532 			 * check for it.
533 			 */
534 			if ((bp->b_flags & B_DELWRI) && (flags & V_SAVE)) {
535 				(void) VOP_BWRITE(bp);
536 				break;
537 			}
538 			bp->b_flags |= (B_INVAL|B_NOCACHE|B_RELBUF);
539 			brelse(bp);
540 		}
541 	}
542 
543 	while (vp->v_numoutput > 0) {
544 		vp->v_flag |= VBWAIT;
545 		tsleep(&vp->v_numoutput, PVM, "vnvlbv", 0);
546 	}
547 
548 	splx(s);
549 
550 	/*
551 	 * Destroy the copy in the VM cache, too.
552 	 */
553 	object = vp->v_object;
554 	if (object != NULL) {
555 		vm_object_page_remove(object, 0, object->size,
556 		    (flags & V_SAVE) ? TRUE : FALSE);
557 	}
558 	if (!(flags & V_SAVEMETA) &&
559 	    (vp->v_dirtyblkhd.lh_first || vp->v_cleanblkhd.lh_first))
560 		panic("vinvalbuf: flush failed");
561 	return (0);
562 }
563 
564 /*
565  * Associate a buffer with a vnode.
566  */
567 void
568 bgetvp(vp, bp)
569 	register struct vnode *vp;
570 	register struct buf *bp;
571 {
572 	int s;
573 
574 	if (bp->b_vp)
575 		panic("bgetvp: not free");
576 	VHOLD(vp);
577 	bp->b_vp = vp;
578 	if (vp->v_type == VBLK || vp->v_type == VCHR)
579 		bp->b_dev = vp->v_rdev;
580 	else
581 		bp->b_dev = NODEV;
582 	/*
583 	 * Insert onto list for new vnode.
584 	 */
585 	s = splbio();
586 	bufinsvn(bp, &vp->v_cleanblkhd);
587 	splx(s);
588 }
589 
590 /*
591  * Disassociate a buffer from a vnode.
592  */
593 void
594 brelvp(bp)
595 	register struct buf *bp;
596 {
597 	struct vnode *vp;
598 	int s;
599 
600 	if (bp->b_vp == (struct vnode *) 0)
601 		panic("brelvp: NULL");
602 	/*
603 	 * Delete from old vnode list, if on one.
604 	 */
605 	s = splbio();
606 	if (bp->b_vnbufs.le_next != NOLIST)
607 		bufremvn(bp);
608 	splx(s);
609 
610 	vp = bp->b_vp;
611 	bp->b_vp = (struct vnode *) 0;
612 	HOLDRELE(vp);
613 }
614 
615 /*
616  * Associate a p-buffer with a vnode.
617  */
618 void
619 pbgetvp(vp, bp)
620 	register struct vnode *vp;
621 	register struct buf *bp;
622 {
623 #if defined(DIAGNOSTIC)
624 	if (bp->b_vp)
625 		panic("pbgetvp: not free");
626 #endif
627 	bp->b_vp = vp;
628 	if (vp->v_type == VBLK || vp->v_type == VCHR)
629 		bp->b_dev = vp->v_rdev;
630 	else
631 		bp->b_dev = NODEV;
632 }
633 
634 /*
635  * Disassociate a p-buffer from a vnode.
636  */
637 void
638 pbrelvp(bp)
639 	register struct buf *bp;
640 {
641 	struct vnode *vp;
642 
643 #if defined(DIAGNOSTIC)
644 	if (bp->b_vp == (struct vnode *) 0)
645 		panic("pbrelvp: NULL");
646 #endif
647 
648 	bp->b_vp = (struct vnode *) 0;
649 }
650 
651 /*
652  * Reassign a buffer from one vnode to another.
653  * Used to assign file specific control information
654  * (indirect blocks) to the vnode to which they belong.
655  */
656 void
657 reassignbuf(bp, newvp)
658 	register struct buf *bp;
659 	register struct vnode *newvp;
660 {
661 	int s;
662 
663 	if (newvp == NULL) {
664 		printf("reassignbuf: NULL");
665 		return;
666 	}
667 
668 	s = splbio();
669 	/*
670 	 * Delete from old vnode list, if on one.
671 	 */
672 	if (bp->b_vnbufs.le_next != NOLIST)
673 		bufremvn(bp);
674 	/*
675 	 * If dirty, put on list of dirty buffers; otherwise insert onto list
676 	 * of clean buffers.
677 	 */
678 	if (bp->b_flags & B_DELWRI) {
679 		struct buf *tbp;
680 
681 		tbp = newvp->v_dirtyblkhd.lh_first;
682 		if (!tbp || (tbp->b_lblkno > bp->b_lblkno)) {
683 			bufinsvn(bp, &newvp->v_dirtyblkhd);
684 		} else {
685 			while (tbp->b_vnbufs.le_next &&
686 				(tbp->b_vnbufs.le_next->b_lblkno < bp->b_lblkno)) {
687 				tbp = tbp->b_vnbufs.le_next;
688 			}
689 			LIST_INSERT_AFTER(tbp, bp, b_vnbufs);
690 		}
691 	} else {
692 		bufinsvn(bp, &newvp->v_cleanblkhd);
693 	}
694 	splx(s);
695 }
696 
697 #ifndef DEVFS_ROOT
698 /*
699  * Create a vnode for a block device.
700  * Used for root filesystem, argdev, and swap areas.
701  * Also used for memory file system special devices.
702  */
703 int
704 bdevvp(dev, vpp)
705 	dev_t dev;
706 	struct vnode **vpp;
707 {
708 	register struct vnode *vp;
709 	struct vnode *nvp;
710 	int error;
711 
712 	if (dev == NODEV)
713 		return (0);
714 	error = getnewvnode(VT_NON, (struct mount *) 0, spec_vnodeop_p, &nvp);
715 	if (error) {
716 		*vpp = 0;
717 		return (error);
718 	}
719 	vp = nvp;
720 	vp->v_type = VBLK;
721 	if ((nvp = checkalias(vp, dev, (struct mount *) 0))) {
722 		vput(vp);
723 		vp = nvp;
724 	}
725 	*vpp = vp;
726 	return (0);
727 }
728 #endif /* !DEVFS_ROOT */
729 
730 /*
731  * Check to see if the new vnode represents a special device
732  * for which we already have a vnode (either because of
733  * bdevvp() or because of a different vnode representing
734  * the same block device). If such an alias exists, deallocate
735  * the existing contents and return the aliased vnode. The
736  * caller is responsible for filling it with its new contents.
737  */
738 struct vnode *
739 checkalias(nvp, nvp_rdev, mp)
740 	register struct vnode *nvp;
741 	dev_t nvp_rdev;
742 	struct mount *mp;
743 {
744 	struct proc *p = curproc;	/* XXX */
745 	struct vnode *vp;
746 	struct vnode **vpp;
747 
748 	if (nvp->v_type != VBLK && nvp->v_type != VCHR)
749 		return (NULLVP);
750 
751 	vpp = &speclisth[SPECHASH(nvp_rdev)];
752 loop:
753 	simple_lock(&spechash_slock);
754 	for (vp = *vpp; vp; vp = vp->v_specnext) {
755 		if (nvp_rdev != vp->v_rdev || nvp->v_type != vp->v_type)
756 			continue;
757 		/*
758 		 * Alias, but not in use, so flush it out.
759 		 */
760 		simple_lock(&vp->v_interlock);
761 		if (vp->v_usecount == 0) {
762 			simple_unlock(&spechash_slock);
763 			vgonel(vp, p);
764 			goto loop;
765 		}
766 		if (vget(vp, LK_EXCLUSIVE | LK_INTERLOCK, p)) {
767 			simple_unlock(&spechash_slock);
768 			goto loop;
769 		}
770 		break;
771 	}
772 	if (vp == NULL || vp->v_tag != VT_NON) {
773 		MALLOC(nvp->v_specinfo, struct specinfo *,
774 		    sizeof(struct specinfo), M_VNODE, M_WAITOK);
775 		nvp->v_rdev = nvp_rdev;
776 		nvp->v_hashchain = vpp;
777 		nvp->v_specnext = *vpp;
778 		nvp->v_specflags = 0;
779 		simple_unlock(&spechash_slock);
780 		*vpp = nvp;
781 		if (vp != NULLVP) {
782 			nvp->v_flag |= VALIASED;
783 			vp->v_flag |= VALIASED;
784 			vput(vp);
785 		}
786 		return (NULLVP);
787 	}
788 	simple_unlock(&spechash_slock);
789 	VOP_UNLOCK(vp, 0, p);
790 	simple_lock(&vp->v_interlock);
791 	vclean(vp, 0, p);
792 	vp->v_op = nvp->v_op;
793 	vp->v_tag = nvp->v_tag;
794 	nvp->v_type = VNON;
795 	insmntque(vp, mp);
796 	return (vp);
797 }
798 
799 /*
800  * Grab a particular vnode from the free list, increment its
801  * reference count and lock it. The vnode lock bit is set the
802  * vnode is being eliminated in vgone. The process is awakened
803  * when the transition is completed, and an error returned to
804  * indicate that the vnode is no longer usable (possibly having
805  * been changed to a new file system type).
806  */
807 int
808 vget(vp, flags, p)
809 	register struct vnode *vp;
810 	int flags;
811 	struct proc *p;
812 {
813 	int error;
814 
815 	/*
816 	 * If the vnode is in the process of being cleaned out for
817 	 * another use, we wait for the cleaning to finish and then
818 	 * return failure. Cleaning is determined by checking that
819 	 * the VXLOCK flag is set.
820 	 */
821 	if ((flags & LK_INTERLOCK) == 0) {
822 		simple_lock(&vp->v_interlock);
823 	}
824 	if (vp->v_flag & VXLOCK) {
825 		vp->v_flag |= VXWANT;
826 		simple_unlock(&vp->v_interlock);
827 		tsleep((caddr_t)vp, PINOD, "vget", 0);
828 		return (ENOENT);
829 	}
830 	if (vp->v_usecount == 0) {
831 		simple_lock(&vnode_free_list_slock);
832 		TAILQ_REMOVE(&vnode_free_list, vp, v_freelist);
833 		simple_unlock(&vnode_free_list_slock);
834 		freevnodes--;
835 	}
836 	vp->v_usecount++;
837 	/*
838 	 * Create the VM object, if needed
839 	 */
840 	if ((vp->v_type == VREG) &&
841 		((vp->v_object == NULL) ||
842 			(vp->v_object->flags & OBJ_VFS_REF) == 0 ||
843 			(vp->v_object->flags & OBJ_DEAD))) {
844 		/*
845 		 * XXX vfs_object_create probably needs the interlock.
846 		 */
847 		simple_unlock(&vp->v_interlock);
848 		vfs_object_create(vp, curproc, curproc->p_ucred, 0);
849 		simple_lock(&vp->v_interlock);
850 	}
851 	if (flags & LK_TYPE_MASK) {
852 		if (error = vn_lock(vp, flags | LK_INTERLOCK, p))
853 			vrele(vp);
854 		return (error);
855 	}
856 	simple_unlock(&vp->v_interlock);
857 	return (0);
858 }
859 
860 /*
861  * Stubs to use when there is no locking to be done on the underlying object.
862  * A minimal shared lock is necessary to ensure that the underlying object
863  * is not revoked while an operation is in progress. So, an active shared
864  * count is maintained in an auxillary vnode lock structure.
865  */
866 int
867 vop_sharedlock(ap)
868 	struct vop_lock_args /* {
869 		struct vnode *a_vp;
870 		int a_flags;
871 		struct proc *a_p;
872 	} */ *ap;
873 {
874 	/*
875 	 * This code cannot be used until all the non-locking filesystems
876 	 * (notably NFS) are converted to properly lock and release nodes.
877 	 * Also, certain vnode operations change the locking state within
878 	 * the operation (create, mknod, remove, link, rename, mkdir, rmdir,
879 	 * and symlink). Ideally these operations should not change the
880 	 * lock state, but should be changed to let the caller of the
881 	 * function unlock them. Otherwise all intermediate vnode layers
882 	 * (such as union, umapfs, etc) must catch these functions to do
883 	 * the necessary locking at their layer. Note that the inactive
884 	 * and lookup operations also change their lock state, but this
885 	 * cannot be avoided, so these two operations will always need
886 	 * to be handled in intermediate layers.
887 	 */
888 	struct vnode *vp = ap->a_vp;
889 	int vnflags, flags = ap->a_flags;
890 
891 	if (vp->v_vnlock == NULL) {
892 		if ((flags & LK_TYPE_MASK) == LK_DRAIN)
893 			return (0);
894 		MALLOC(vp->v_vnlock, struct lock *, sizeof(struct lock),
895 		    M_VNODE, M_WAITOK);
896 		lockinit(vp->v_vnlock, PVFS, "vnlock", 0, 0);
897 	}
898 	switch (flags & LK_TYPE_MASK) {
899 	case LK_DRAIN:
900 		vnflags = LK_DRAIN;
901 		break;
902 	case LK_EXCLUSIVE:
903 #ifdef DEBUG_VFS_LOCKS
904 		/*
905 		 * Normally, we use shared locks here, but that confuses
906 		 * the locking assertions.
907 		 */
908 		vnflags = LK_EXCLUSIVE;
909 		break;
910 #endif
911 	case LK_SHARED:
912 		vnflags = LK_SHARED;
913 		break;
914 	case LK_UPGRADE:
915 	case LK_EXCLUPGRADE:
916 	case LK_DOWNGRADE:
917 		return (0);
918 	case LK_RELEASE:
919 	default:
920 		panic("vop_nolock: bad operation %d", flags & LK_TYPE_MASK);
921 	}
922 	if (flags & LK_INTERLOCK)
923 		vnflags |= LK_INTERLOCK;
924 	return(lockmgr(vp->v_vnlock, vnflags, &vp->v_interlock, ap->a_p));
925 }
926 
927 /*
928  * Stubs to use when there is no locking to be done on the underlying object.
929  * A minimal shared lock is necessary to ensure that the underlying object
930  * is not revoked while an operation is in progress. So, an active shared
931  * count is maintained in an auxillary vnode lock structure.
932  */
933 int
934 vop_nolock(ap)
935 	struct vop_lock_args /* {
936 		struct vnode *a_vp;
937 		int a_flags;
938 		struct proc *a_p;
939 	} */ *ap;
940 {
941 #ifdef notyet
942 	/*
943 	 * This code cannot be used until all the non-locking filesystems
944 	 * (notably NFS) are converted to properly lock and release nodes.
945 	 * Also, certain vnode operations change the locking state within
946 	 * the operation (create, mknod, remove, link, rename, mkdir, rmdir,
947 	 * and symlink). Ideally these operations should not change the
948 	 * lock state, but should be changed to let the caller of the
949 	 * function unlock them. Otherwise all intermediate vnode layers
950 	 * (such as union, umapfs, etc) must catch these functions to do
951 	 * the necessary locking at their layer. Note that the inactive
952 	 * and lookup operations also change their lock state, but this
953 	 * cannot be avoided, so these two operations will always need
954 	 * to be handled in intermediate layers.
955 	 */
956 	struct vnode *vp = ap->a_vp;
957 	int vnflags, flags = ap->a_flags;
958 
959 	if (vp->v_vnlock == NULL) {
960 		if ((flags & LK_TYPE_MASK) == LK_DRAIN)
961 			return (0);
962 		MALLOC(vp->v_vnlock, struct lock *, sizeof(struct lock),
963 		    M_VNODE, M_WAITOK);
964 		lockinit(vp->v_vnlock, PVFS, "vnlock", 0, 0);
965 	}
966 	switch (flags & LK_TYPE_MASK) {
967 	case LK_DRAIN:
968 		vnflags = LK_DRAIN;
969 		break;
970 	case LK_EXCLUSIVE:
971 	case LK_SHARED:
972 		vnflags = LK_SHARED;
973 		break;
974 	case LK_UPGRADE:
975 	case LK_EXCLUPGRADE:
976 	case LK_DOWNGRADE:
977 		return (0);
978 	case LK_RELEASE:
979 	default:
980 		panic("vop_nolock: bad operation %d", flags & LK_TYPE_MASK);
981 	}
982 	if (flags & LK_INTERLOCK)
983 		vnflags |= LK_INTERLOCK;
984 	return(lockmgr(vp->v_vnlock, vnflags, &vp->v_interlock, ap->a_p));
985 #else /* for now */
986 	/*
987 	 * Since we are not using the lock manager, we must clear
988 	 * the interlock here.
989 	 */
990 	if (ap->a_flags & LK_INTERLOCK) {
991 		simple_unlock(&ap->a_vp->v_interlock);
992 	}
993 	return (0);
994 #endif
995 }
996 
997 /*
998  * Do the inverse of vop_nolock, handling the interlock in a compatible way.
999  */
1000 int
1001 vop_nounlock(ap)
1002 	struct vop_unlock_args /* {
1003 		struct vnode *a_vp;
1004 		int a_flags;
1005 		struct proc *a_p;
1006 	} */ *ap;
1007 {
1008 	struct vnode *vp = ap->a_vp;
1009 
1010 	if (vp->v_vnlock == NULL) {
1011 		if (ap->a_flags & LK_INTERLOCK)
1012 			simple_unlock(&ap->a_vp->v_interlock);
1013 		return (0);
1014 	}
1015 	return (lockmgr(vp->v_vnlock, LK_RELEASE | ap->a_flags,
1016 		&ap->a_vp->v_interlock, ap->a_p));
1017 }
1018 
1019 /*
1020  * Return whether or not the node is in use.
1021  */
1022 int
1023 vop_noislocked(ap)
1024 	struct vop_islocked_args /* {
1025 		struct vnode *a_vp;
1026 	} */ *ap;
1027 {
1028 	struct vnode *vp = ap->a_vp;
1029 
1030 	if (vp->v_vnlock == NULL)
1031 		return (0);
1032 	return (lockstatus(vp->v_vnlock));
1033 }
1034 
1035 /* #ifdef DIAGNOSTIC */
1036 /*
1037  * Vnode reference, just increment the count
1038  */
1039 void
1040 vref(vp)
1041 	struct vnode *vp;
1042 {
1043 	simple_lock(&vp->v_interlock);
1044 	if (vp->v_usecount <= 0)
1045 		panic("vref used where vget required");
1046 
1047 	vp->v_usecount++;
1048 
1049 	if ((vp->v_type == VREG) &&
1050 		((vp->v_object == NULL) ||
1051 			((vp->v_object->flags & OBJ_VFS_REF) == 0) ||
1052 			(vp->v_object->flags & OBJ_DEAD))) {
1053 		/*
1054 		 * We need to lock to VP during the time that
1055 		 * the object is created.  This is necessary to
1056 		 * keep the system from re-entrantly doing it
1057 		 * multiple times.
1058 		 * XXX vfs_object_create probably needs the interlock?
1059 		 */
1060 		simple_unlock(&vp->v_interlock);
1061 		vfs_object_create(vp, curproc, curproc->p_ucred, 0);
1062 		return;
1063 	}
1064 	simple_unlock(&vp->v_interlock);
1065 }
1066 
1067 /*
1068  * Vnode put/release.
1069  * If count drops to zero, call inactive routine and return to freelist.
1070  */
1071 static void
1072 vputrele(vp, put)
1073 	struct vnode *vp;
1074 	int put;
1075 {
1076 	struct proc *p = curproc;	/* XXX */
1077 
1078 #ifdef DIAGNOSTIC
1079 	if (vp == NULL)
1080 		panic("vputrele: null vp");
1081 #endif
1082 	simple_lock(&vp->v_interlock);
1083 	vp->v_usecount--;
1084 
1085 	if ((vp->v_usecount == 1) &&
1086 		vp->v_object &&
1087 		(vp->v_object->flags & OBJ_VFS_REF)) {
1088 		vp->v_object->flags &= ~OBJ_VFS_REF;
1089 		if (put) {
1090 			VOP_UNLOCK(vp, LK_INTERLOCK, p);
1091 		} else {
1092 			simple_unlock(&vp->v_interlock);
1093 		}
1094 		vm_object_deallocate(vp->v_object);
1095 		return;
1096 	}
1097 
1098 	if (vp->v_usecount > 0) {
1099 		if (put) {
1100 			VOP_UNLOCK(vp, LK_INTERLOCK, p);
1101 		} else {
1102 			simple_unlock(&vp->v_interlock);
1103 		}
1104 		return;
1105 	}
1106 
1107 	if (vp->v_usecount < 0) {
1108 #ifdef DIAGNOSTIC
1109 		vprint("vputrele: negative ref count", vp);
1110 #endif
1111 		panic("vputrele: negative ref cnt");
1112 	}
1113 	simple_lock(&vnode_free_list_slock);
1114 	if (vp->v_flag & VAGE) {
1115 		vp->v_flag &= ~VAGE;
1116 		if(vp->v_tag != VT_TFS)
1117 			TAILQ_INSERT_HEAD(&vnode_free_list, vp, v_freelist);
1118 	} else {
1119 		if(vp->v_tag != VT_TFS)
1120 			TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist);
1121 	}
1122 	freevnodes++;
1123 	simple_unlock(&vnode_free_list_slock);
1124 
1125 	/*
1126 	 * If we are doing a vput, the node is already locked, and we must
1127 	 * call VOP_INACTIVE with the node locked.  So, in the case of
1128 	 * vrele, we explicitly lock the vnode before calling VOP_INACTIVE.
1129 	 */
1130 	if (put) {
1131 		simple_unlock(&vp->v_interlock);
1132 		VOP_INACTIVE(vp, p);
1133 	} else if (vn_lock(vp, LK_EXCLUSIVE | LK_INTERLOCK, p) == 0) {
1134 		VOP_INACTIVE(vp, p);
1135 	}
1136 }
1137 
1138 /*
1139  * vput(), just unlock and vrele()
1140  */
1141 void
1142 vput(vp)
1143 	struct vnode *vp;
1144 {
1145 	vputrele(vp, 1);
1146 }
1147 
1148 void
1149 vrele(vp)
1150 	struct vnode *vp;
1151 {
1152 	vputrele(vp, 0);
1153 }
1154 
1155 #ifdef DIAGNOSTIC
1156 /*
1157  * Page or buffer structure gets a reference.
1158  */
1159 void
1160 vhold(vp)
1161 	register struct vnode *vp;
1162 {
1163 
1164 	simple_lock(&vp->v_interlock);
1165 	vp->v_holdcnt++;
1166 	simple_unlock(&vp->v_interlock);
1167 }
1168 
1169 /*
1170  * Page or buffer structure frees a reference.
1171  */
1172 void
1173 holdrele(vp)
1174 	register struct vnode *vp;
1175 {
1176 
1177 	simple_lock(&vp->v_interlock);
1178 	if (vp->v_holdcnt <= 0)
1179 		panic("holdrele: holdcnt");
1180 	vp->v_holdcnt--;
1181 	simple_unlock(&vp->v_interlock);
1182 }
1183 #endif /* DIAGNOSTIC */
1184 
1185 /*
1186  * Remove any vnodes in the vnode table belonging to mount point mp.
1187  *
1188  * If MNT_NOFORCE is specified, there should not be any active ones,
1189  * return error if any are found (nb: this is a user error, not a
1190  * system error). If MNT_FORCE is specified, detach any active vnodes
1191  * that are found.
1192  */
1193 #ifdef DIAGNOSTIC
1194 static int busyprt = 0;		/* print out busy vnodes */
1195 SYSCTL_INT(_debug, OID_AUTO, busyprt, CTLFLAG_RW, &busyprt, 0, "");
1196 #endif
1197 
1198 int
1199 vflush(mp, skipvp, flags)
1200 	struct mount *mp;
1201 	struct vnode *skipvp;
1202 	int flags;
1203 {
1204 	struct proc *p = curproc;	/* XXX */
1205 	struct vnode *vp, *nvp;
1206 	int busy = 0;
1207 
1208 	simple_lock(&mntvnode_slock);
1209 loop:
1210 	for (vp = mp->mnt_vnodelist.lh_first; vp; vp = nvp) {
1211 		/*
1212 		 * Make sure this vnode wasn't reclaimed in getnewvnode().
1213 		 * Start over if it has (it won't be on the list anymore).
1214 		 */
1215 		if (vp->v_mount != mp)
1216 			goto loop;
1217 		nvp = vp->v_mntvnodes.le_next;
1218 		/*
1219 		 * Skip over a selected vnode.
1220 		 */
1221 		if (vp == skipvp)
1222 			continue;
1223 
1224 		simple_lock(&vp->v_interlock);
1225 		/*
1226 		 * Skip over a vnodes marked VSYSTEM.
1227 		 */
1228 		if ((flags & SKIPSYSTEM) && (vp->v_flag & VSYSTEM)) {
1229 			simple_unlock(&vp->v_interlock);
1230 			continue;
1231 		}
1232 		/*
1233 		 * If WRITECLOSE is set, only flush out regular file vnodes
1234 		 * open for writing.
1235 		 */
1236 		if ((flags & WRITECLOSE) &&
1237 		    (vp->v_writecount == 0 || vp->v_type != VREG)) {
1238 			simple_unlock(&vp->v_interlock);
1239 			continue;
1240 		}
1241 
1242 		/*
1243 		 * With v_usecount == 0, all we need to do is clear out the
1244 		 * vnode data structures and we are done.
1245 		 */
1246 		if (vp->v_usecount == 0) {
1247 			simple_unlock(&mntvnode_slock);
1248 			vgonel(vp, p);
1249 			simple_lock(&mntvnode_slock);
1250 			continue;
1251 		}
1252 
1253 		/*
1254 		 * If FORCECLOSE is set, forcibly close the vnode. For block
1255 		 * or character devices, revert to an anonymous device. For
1256 		 * all other files, just kill them.
1257 		 */
1258 		if (flags & FORCECLOSE) {
1259 			simple_unlock(&mntvnode_slock);
1260 			if (vp->v_type != VBLK && vp->v_type != VCHR) {
1261 				vgonel(vp, p);
1262 			} else {
1263 				vclean(vp, 0, p);
1264 				vp->v_op = spec_vnodeop_p;
1265 				insmntque(vp, (struct mount *) 0);
1266 			}
1267 			simple_lock(&mntvnode_slock);
1268 			continue;
1269 		}
1270 #ifdef DIAGNOSTIC
1271 		if (busyprt)
1272 			vprint("vflush: busy vnode", vp);
1273 #endif
1274 		simple_unlock(&vp->v_interlock);
1275 		busy++;
1276 	}
1277 	simple_unlock(&mntvnode_slock);
1278 	if (busy)
1279 		return (EBUSY);
1280 	return (0);
1281 }
1282 
1283 /*
1284  * Disassociate the underlying file system from a vnode.
1285  */
1286 static void
1287 vclean(struct vnode *vp, int flags, struct proc *p)
1288 {
1289 	int active, irefed;
1290 	vm_object_t object;
1291 
1292 	/*
1293 	 * Check to see if the vnode is in use. If so we have to reference it
1294 	 * before we clean it out so that its count cannot fall to zero and
1295 	 * generate a race against ourselves to recycle it.
1296 	 */
1297 	if ((active = vp->v_usecount))
1298 		vp->v_usecount++;
1299 	/*
1300 	 * Prevent the vnode from being recycled or brought into use while we
1301 	 * clean it out.
1302 	 */
1303 	if (vp->v_flag & VXLOCK)
1304 		panic("vclean: deadlock");
1305 	vp->v_flag |= VXLOCK;
1306 	/*
1307 	 * Even if the count is zero, the VOP_INACTIVE routine may still
1308 	 * have the object locked while it cleans it out. The VOP_LOCK
1309 	 * ensures that the VOP_INACTIVE routine is done with its work.
1310 	 * For active vnodes, it ensures that no other activity can
1311 	 * occur while the underlying object is being cleaned out.
1312 	 */
1313 	VOP_LOCK(vp, LK_DRAIN | LK_INTERLOCK, p);
1314 
1315 	object = vp->v_object;
1316 	irefed = 0;
1317 	if (object && ((object->flags & OBJ_DEAD) == 0)) {
1318 		if (object->ref_count == 0) {
1319 			vm_object_reference(object);
1320 			irefed = 1;
1321 		}
1322 		++object->ref_count;
1323 		pager_cache(object, FALSE);
1324 	}
1325 
1326 	/*
1327 	 * Clean out any buffers associated with the vnode.
1328 	 */
1329 	if (flags & DOCLOSE)
1330 		vinvalbuf(vp, V_SAVE, NOCRED, p, 0, 0);
1331 
1332 	if (irefed) {
1333 		vm_object_deallocate(object);
1334 	}
1335 
1336 	/*
1337 	 * If purging an active vnode, it must be closed and
1338 	 * deactivated before being reclaimed. Note that the
1339 	 * VOP_INACTIVE will unlock the vnode.
1340 	 */
1341 	if (active) {
1342 		if (flags & DOCLOSE)
1343 			VOP_CLOSE(vp, IO_NDELAY, NOCRED, p);
1344 		VOP_INACTIVE(vp, p);
1345 	} else {
1346 		/*
1347 		 * Any other processes trying to obtain this lock must first
1348 		 * wait for VXLOCK to clear, then call the new lock operation.
1349 		 */
1350 		VOP_UNLOCK(vp, 0, p);
1351 	}
1352 	/*
1353 	 * Reclaim the vnode.
1354 	 */
1355 	if (VOP_RECLAIM(vp, p))
1356 		panic("vclean: cannot reclaim");
1357 	if (active)
1358 		vrele(vp);
1359 	cache_purge(vp);
1360 	if (vp->v_vnlock) {
1361 #ifdef DIAGNOSTIC
1362 		if ((vp->v_vnlock->lk_flags & LK_DRAINED) == 0)
1363 			vprint("vclean: lock not drained", vp);
1364 #endif
1365 		FREE(vp->v_vnlock, M_VNODE);
1366 		vp->v_vnlock = NULL;
1367 	}
1368 
1369 	/*
1370 	 * Done with purge, notify sleepers of the grim news.
1371 	 */
1372 	vp->v_op = dead_vnodeop_p;
1373 	vp->v_tag = VT_NON;
1374 	vp->v_flag &= ~VXLOCK;
1375 	if (vp->v_flag & VXWANT) {
1376 		vp->v_flag &= ~VXWANT;
1377 		wakeup((caddr_t) vp);
1378 	}
1379 }
1380 
1381 /*
1382  * Eliminate all activity associated with the requested vnode
1383  * and with all vnodes aliased to the requested vnode.
1384  */
1385 int
1386 vop_revoke(ap)
1387 	struct vop_revoke_args /* {
1388 		struct vnode *a_vp;
1389 		int a_flags;
1390 	} */ *ap;
1391 {
1392 	struct vnode *vp, *vq;
1393 	struct proc *p = curproc;	/* XXX */
1394 
1395 #ifdef DIAGNOSTIC
1396 	if ((ap->a_flags & REVOKEALL) == 0)
1397 		panic("vop_revoke");
1398 #endif
1399 
1400 	vp = ap->a_vp;
1401 	simple_lock(&vp->v_interlock);
1402 
1403 	if (vp->v_flag & VALIASED) {
1404 		/*
1405 		 * If a vgone (or vclean) is already in progress,
1406 		 * wait until it is done and return.
1407 		 */
1408 		if (vp->v_flag & VXLOCK) {
1409 			vp->v_flag |= VXWANT;
1410 			simple_unlock(&vp->v_interlock);
1411 			tsleep((caddr_t)vp, PINOD, "vop_revokeall", 0);
1412 			return (0);
1413 		}
1414 		/*
1415 		 * Ensure that vp will not be vgone'd while we
1416 		 * are eliminating its aliases.
1417 		 */
1418 		vp->v_flag |= VXLOCK;
1419 		simple_unlock(&vp->v_interlock);
1420 		while (vp->v_flag & VALIASED) {
1421 			simple_lock(&spechash_slock);
1422 			for (vq = *vp->v_hashchain; vq; vq = vq->v_specnext) {
1423 				if (vq->v_rdev != vp->v_rdev ||
1424 				    vq->v_type != vp->v_type || vp == vq)
1425 					continue;
1426 				simple_unlock(&spechash_slock);
1427 				vgone(vq);
1428 				break;
1429 			}
1430 			if (vq == NULLVP) {
1431 				simple_unlock(&spechash_slock);
1432 			}
1433 		}
1434 		/*
1435 		 * Remove the lock so that vgone below will
1436 		 * really eliminate the vnode after which time
1437 		 * vgone will awaken any sleepers.
1438 		 */
1439 		simple_lock(&vp->v_interlock);
1440 		vp->v_flag &= ~VXLOCK;
1441 	}
1442 	vgonel(vp, p);
1443 	return (0);
1444 }
1445 
1446 /*
1447  * Recycle an unused vnode to the front of the free list.
1448  * Release the passed interlock if the vnode will be recycled.
1449  */
1450 int
1451 vrecycle(vp, inter_lkp, p)
1452 	struct vnode *vp;
1453 	struct simplelock *inter_lkp;
1454 	struct proc *p;
1455 {
1456 
1457 	simple_lock(&vp->v_interlock);
1458 	if (vp->v_usecount == 0) {
1459 		if (inter_lkp) {
1460 			simple_unlock(inter_lkp);
1461 		}
1462 		vgonel(vp, p);
1463 		return (1);
1464 	}
1465 	simple_unlock(&vp->v_interlock);
1466 	return (0);
1467 }
1468 
1469 /*
1470  * Eliminate all activity associated with a vnode
1471  * in preparation for reuse.
1472  */
1473 void
1474 vgone(vp)
1475 	register struct vnode *vp;
1476 {
1477 	struct proc *p = curproc;	/* XXX */
1478 
1479 	simple_lock(&vp->v_interlock);
1480 	vgonel(vp, p);
1481 }
1482 
1483 /*
1484  * vgone, with the vp interlock held.
1485  */
1486 static void
1487 vgonel(vp, p)
1488 	struct vnode *vp;
1489 	struct proc *p;
1490 {
1491 	struct vnode *vq;
1492 	struct vnode *vx;
1493 
1494 	/*
1495 	 * If a vgone (or vclean) is already in progress,
1496 	 * wait until it is done and return.
1497 	 */
1498 	if (vp->v_flag & VXLOCK) {
1499 		vp->v_flag |= VXWANT;
1500 		simple_unlock(&vp->v_interlock);
1501 		tsleep((caddr_t)vp, PINOD, "vgone", 0);
1502 		return;
1503 	}
1504 
1505 	if (vp->v_object) {
1506 		vp->v_object->flags |= OBJ_VNODE_GONE;
1507 	}
1508 
1509 	/*
1510 	 * Clean out the filesystem specific data.
1511 	 */
1512 	vclean(vp, DOCLOSE, p);
1513 	/*
1514 	 * Delete from old mount point vnode list, if on one.
1515 	 */
1516 	if (vp->v_mount != NULL)
1517 		insmntque(vp, (struct mount *)0);
1518 	/*
1519 	 * If special device, remove it from special device alias list
1520 	 * if it is on one.
1521 	 */
1522 	if ((vp->v_type == VBLK || vp->v_type == VCHR) && vp->v_specinfo != 0) {
1523 		simple_lock(&spechash_slock);
1524 		if (*vp->v_hashchain == vp) {
1525 			*vp->v_hashchain = vp->v_specnext;
1526 		} else {
1527 			for (vq = *vp->v_hashchain; vq; vq = vq->v_specnext) {
1528 				if (vq->v_specnext != vp)
1529 					continue;
1530 				vq->v_specnext = vp->v_specnext;
1531 				break;
1532 			}
1533 			if (vq == NULL)
1534 				panic("missing bdev");
1535 		}
1536 		if (vp->v_flag & VALIASED) {
1537 			vx = NULL;
1538 			for (vq = *vp->v_hashchain; vq; vq = vq->v_specnext) {
1539 				if (vq->v_rdev != vp->v_rdev ||
1540 				    vq->v_type != vp->v_type)
1541 					continue;
1542 				if (vx)
1543 					break;
1544 				vx = vq;
1545 			}
1546 			if (vx == NULL)
1547 				panic("missing alias");
1548 			if (vq == NULL)
1549 				vx->v_flag &= ~VALIASED;
1550 			vp->v_flag &= ~VALIASED;
1551 		}
1552 		simple_unlock(&spechash_slock);
1553 		FREE(vp->v_specinfo, M_VNODE);
1554 		vp->v_specinfo = NULL;
1555 	}
1556 
1557 	/*
1558 	 * If it is on the freelist and not already at the head,
1559 	 * move it to the head of the list. The test of the back
1560 	 * pointer and the reference count of zero is because
1561 	 * it will be removed from the free list by getnewvnode,
1562 	 * but will not have its reference count incremented until
1563 	 * after calling vgone. If the reference count were
1564 	 * incremented first, vgone would (incorrectly) try to
1565 	 * close the previous instance of the underlying object.
1566 	 * So, the back pointer is explicitly set to `0xdeadb' in
1567 	 * getnewvnode after removing it from the freelist to ensure
1568 	 * that we do not try to move it here.
1569 	 */
1570 	if (vp->v_usecount == 0) {
1571 		simple_lock(&vnode_free_list_slock);
1572 		if ((vp->v_freelist.tqe_prev != (struct vnode **)0xdeadb) &&
1573 			vnode_free_list.tqh_first != vp) {
1574 			TAILQ_REMOVE(&vnode_free_list, vp, v_freelist);
1575 			TAILQ_INSERT_HEAD(&vnode_free_list, vp, v_freelist);
1576 		}
1577 		simple_unlock(&vnode_free_list_slock);
1578 	}
1579 
1580 	vp->v_type = VBAD;
1581 }
1582 
1583 /*
1584  * Lookup a vnode by device number.
1585  */
1586 int
1587 vfinddev(dev, type, vpp)
1588 	dev_t dev;
1589 	enum vtype type;
1590 	struct vnode **vpp;
1591 {
1592 	register struct vnode *vp;
1593 	int rc = 0;
1594 
1595 	simple_lock(&spechash_slock);
1596 	for (vp = speclisth[SPECHASH(dev)]; vp; vp = vp->v_specnext) {
1597 		if (dev != vp->v_rdev || type != vp->v_type)
1598 			continue;
1599 		*vpp = vp;
1600 		rc = 1;
1601 		break;
1602 	}
1603 	simple_unlock(&spechash_slock);
1604 	return (rc);
1605 }
1606 
1607 /*
1608  * Calculate the total number of references to a special device.
1609  */
1610 int
1611 vcount(vp)
1612 	register struct vnode *vp;
1613 {
1614 	struct vnode *vq, *vnext;
1615 	int count;
1616 
1617 loop:
1618 	if ((vp->v_flag & VALIASED) == 0)
1619 		return (vp->v_usecount);
1620 	simple_lock(&spechash_slock);
1621 	for (count = 0, vq = *vp->v_hashchain; vq; vq = vnext) {
1622 		vnext = vq->v_specnext;
1623 		if (vq->v_rdev != vp->v_rdev || vq->v_type != vp->v_type)
1624 			continue;
1625 		/*
1626 		 * Alias, but not in use, so flush it out.
1627 		 */
1628 		if (vq->v_usecount == 0 && vq != vp) {
1629 			simple_unlock(&spechash_slock);
1630 			vgone(vq);
1631 			goto loop;
1632 		}
1633 		count += vq->v_usecount;
1634 	}
1635 	simple_unlock(&spechash_slock);
1636 	return (count);
1637 }
1638 
1639 /*
1640  * Print out a description of a vnode.
1641  */
1642 static char *typename[] =
1643 {"VNON", "VREG", "VDIR", "VBLK", "VCHR", "VLNK", "VSOCK", "VFIFO", "VBAD"};
1644 
1645 void
1646 vprint(label, vp)
1647 	char *label;
1648 	register struct vnode *vp;
1649 {
1650 	char buf[64];
1651 
1652 	if (label != NULL)
1653 		printf("%s: %x: ", label, vp);
1654 	else
1655 		printf("%x: ", vp);
1656 	printf("type %s, usecount %d, writecount %d, refcount %ld,",
1657 	    typename[vp->v_type], vp->v_usecount, vp->v_writecount,
1658 	    vp->v_holdcnt);
1659 	buf[0] = '\0';
1660 	if (vp->v_flag & VROOT)
1661 		strcat(buf, "|VROOT");
1662 	if (vp->v_flag & VTEXT)
1663 		strcat(buf, "|VTEXT");
1664 	if (vp->v_flag & VSYSTEM)
1665 		strcat(buf, "|VSYSTEM");
1666 	if (vp->v_flag & VXLOCK)
1667 		strcat(buf, "|VXLOCK");
1668 	if (vp->v_flag & VXWANT)
1669 		strcat(buf, "|VXWANT");
1670 	if (vp->v_flag & VBWAIT)
1671 		strcat(buf, "|VBWAIT");
1672 	if (vp->v_flag & VALIASED)
1673 		strcat(buf, "|VALIASED");
1674 	if (buf[0] != '\0')
1675 		printf(" flags (%s)", &buf[1]);
1676 	if (vp->v_data == NULL) {
1677 		printf("\n");
1678 	} else {
1679 		printf("\n\t");
1680 		VOP_PRINT(vp);
1681 	}
1682 }
1683 
1684 #ifdef DDB
1685 /*
1686  * List all of the locked vnodes in the system.
1687  * Called when debugging the kernel.
1688  */
1689 void
1690 printlockedvnodes()
1691 {
1692 	struct proc *p = curproc;	/* XXX */
1693 	struct mount *mp, *nmp;
1694 	struct vnode *vp;
1695 
1696 	printf("Locked vnodes\n");
1697 	simple_lock(&mountlist_slock);
1698 	for (mp = mountlist.cqh_first; mp != (void *)&mountlist; mp = nmp) {
1699 		if (vfs_busy(mp, LK_NOWAIT, &mountlist_slock, p)) {
1700 			nmp = mp->mnt_list.cqe_next;
1701 			continue;
1702 		}
1703 		for (vp = mp->mnt_vnodelist.lh_first;
1704 		     vp != NULL;
1705 		     vp = vp->v_mntvnodes.le_next) {
1706 			if (VOP_ISLOCKED(vp))
1707 				vprint((char *)0, vp);
1708 		}
1709 		simple_lock(&mountlist_slock);
1710 		nmp = mp->mnt_list.cqe_next;
1711 		vfs_unbusy(mp, p);
1712 	}
1713 	simple_unlock(&mountlist_slock);
1714 }
1715 #endif
1716 
1717 /*
1718  * Top level filesystem related information gathering.
1719  */
1720 static int	sysctl_ovfs_conf __P(SYSCTL_HANDLER_ARGS);
1721 
1722 static int
1723 vfs_sysctl SYSCTL_HANDLER_ARGS
1724 {
1725 	int *name = (int *)arg1 - 1;	/* XXX */
1726 	u_int namelen = arg2 + 1;	/* XXX */
1727 	struct vfsconf *vfsp;
1728 
1729 #ifndef NO_COMPAT_PRELITE2
1730 	/* Resolve ambiguity between VFS_VFSCONF and VFS_GENERIC. */
1731 	if (namelen == 1)
1732 		return (sysctl_ovfs_conf(oidp, arg1, arg2, req));
1733 #endif
1734 
1735 #ifdef notyet
1736 	/* all sysctl names at this level are at least name and field */
1737 	if (namelen < 2)
1738 		return (ENOTDIR);		/* overloaded */
1739 	if (name[0] != VFS_GENERIC) {
1740 		for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next)
1741 			if (vfsp->vfc_typenum == name[0])
1742 				break;
1743 		if (vfsp == NULL)
1744 			return (EOPNOTSUPP);
1745 		return ((*vfsp->vfc_vfsops->vfs_sysctl)(&name[1], namelen - 1,
1746 		    oldp, oldlenp, newp, newlen, p));
1747 	}
1748 #endif
1749 	switch (name[1]) {
1750 	case VFS_MAXTYPENUM:
1751 		if (namelen != 2)
1752 			return (ENOTDIR);
1753 		return (SYSCTL_OUT(req, &maxvfsconf, sizeof(int)));
1754 	case VFS_CONF:
1755 		if (namelen != 3)
1756 			return (ENOTDIR);	/* overloaded */
1757 		for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next)
1758 			if (vfsp->vfc_typenum == name[2])
1759 				break;
1760 		if (vfsp == NULL)
1761 			return (EOPNOTSUPP);
1762 		return (SYSCTL_OUT(req, vfsp, sizeof *vfsp));
1763 	}
1764 	return (EOPNOTSUPP);
1765 }
1766 
1767 SYSCTL_NODE(_vfs, VFS_GENERIC, generic, CTLFLAG_RD, vfs_sysctl,
1768 	"Generic filesystem");
1769 
1770 #ifndef NO_COMPAT_PRELITE2
1771 
1772 static int
1773 sysctl_ovfs_conf SYSCTL_HANDLER_ARGS
1774 {
1775 	int error;
1776 	struct vfsconf *vfsp;
1777 	struct ovfsconf ovfs;
1778 
1779 	for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next) {
1780 		ovfs.vfc_vfsops = vfsp->vfc_vfsops;	/* XXX used as flag */
1781 		strcpy(ovfs.vfc_name, vfsp->vfc_name);
1782 		ovfs.vfc_index = vfsp->vfc_typenum;
1783 		ovfs.vfc_refcount = vfsp->vfc_refcount;
1784 		ovfs.vfc_flags = vfsp->vfc_flags;
1785 		error = SYSCTL_OUT(req, &ovfs, sizeof ovfs);
1786 		if (error)
1787 			return error;
1788 	}
1789 	return 0;
1790 }
1791 
1792 #endif /* !NO_COMPAT_PRELITE2 */
1793 
1794 int kinfo_vdebug = 1;
1795 int kinfo_vgetfailed;
1796 
1797 #define KINFO_VNODESLOP	10
1798 /*
1799  * Dump vnode list (via sysctl).
1800  * Copyout address of vnode followed by vnode.
1801  */
1802 /* ARGSUSED */
1803 static int
1804 sysctl_vnode SYSCTL_HANDLER_ARGS
1805 {
1806 	struct proc *p = curproc;	/* XXX */
1807 	struct mount *mp, *nmp;
1808 	struct vnode *nvp, *vp;
1809 	int error;
1810 
1811 #define VPTRSZ	sizeof (struct vnode *)
1812 #define VNODESZ	sizeof (struct vnode)
1813 
1814 	req->lock = 0;
1815 	if (!req->oldptr) /* Make an estimate */
1816 		return (SYSCTL_OUT(req, 0,
1817 			(numvnodes + KINFO_VNODESLOP) * (VPTRSZ + VNODESZ)));
1818 
1819 	simple_lock(&mountlist_slock);
1820 	for (mp = mountlist.cqh_first; mp != (void *)&mountlist; mp = nmp) {
1821 		if (vfs_busy(mp, LK_NOWAIT, &mountlist_slock, p)) {
1822 			nmp = mp->mnt_list.cqe_next;
1823 			continue;
1824 		}
1825 again:
1826 		simple_lock(&mntvnode_slock);
1827 		for (vp = mp->mnt_vnodelist.lh_first;
1828 		     vp != NULL;
1829 		     vp = nvp) {
1830 			/*
1831 			 * Check that the vp is still associated with
1832 			 * this filesystem.  RACE: could have been
1833 			 * recycled onto the same filesystem.
1834 			 */
1835 			if (vp->v_mount != mp) {
1836 				simple_unlock(&mntvnode_slock);
1837 				if (kinfo_vdebug)
1838 					printf("kinfo: vp changed\n");
1839 				goto again;
1840 			}
1841 			nvp = vp->v_mntvnodes.le_next;
1842 			simple_unlock(&mntvnode_slock);
1843 			if ((error = SYSCTL_OUT(req, &vp, VPTRSZ)) ||
1844 			    (error = SYSCTL_OUT(req, vp, VNODESZ)))
1845 				return (error);
1846 			simple_lock(&mntvnode_slock);
1847 		}
1848 		simple_unlock(&mntvnode_slock);
1849 		simple_lock(&mountlist_slock);
1850 		nmp = mp->mnt_list.cqe_next;
1851 		vfs_unbusy(mp, p);
1852 	}
1853 	simple_unlock(&mountlist_slock);
1854 
1855 	return (0);
1856 }
1857 
1858 /*
1859  * XXX
1860  * Exporting the vnode list on large systems causes them to crash.
1861  * Exporting the vnode list on medium systems causes sysctl to coredump.
1862  */
1863 #if 0
1864 SYSCTL_PROC(_kern, KERN_VNODE, vnode, CTLTYPE_OPAQUE|CTLFLAG_RD,
1865 	0, 0, sysctl_vnode, "S,vnode", "");
1866 #endif
1867 
1868 /*
1869  * Check to see if a filesystem is mounted on a block device.
1870  */
1871 int
1872 vfs_mountedon(vp)
1873 	struct vnode *vp;
1874 {
1875 	struct vnode *vq;
1876 	int error = 0;
1877 
1878 	if (vp->v_specflags & SI_MOUNTEDON)
1879 		return (EBUSY);
1880 	if (vp->v_flag & VALIASED) {
1881 		simple_lock(&spechash_slock);
1882 		for (vq = *vp->v_hashchain; vq; vq = vq->v_specnext) {
1883 			if (vq->v_rdev != vp->v_rdev ||
1884 			    vq->v_type != vp->v_type)
1885 				continue;
1886 			if (vq->v_specflags & SI_MOUNTEDON) {
1887 				error = EBUSY;
1888 				break;
1889 			}
1890 		}
1891 		simple_unlock(&spechash_slock);
1892 	}
1893 	return (error);
1894 }
1895 
1896 /*
1897  * Unmount all filesystems. The list is traversed in reverse order
1898  * of mounting to avoid dependencies.
1899  */
1900 void
1901 vfs_unmountall()
1902 {
1903 	struct mount *mp, *nmp;
1904 	struct proc *p = initproc;	/* XXX XXX should this be proc0? */
1905 	int error;
1906 
1907 	/*
1908 	 * Since this only runs when rebooting, it is not interlocked.
1909 	 */
1910 	for (mp = mountlist.cqh_last; mp != (void *)&mountlist; mp = nmp) {
1911 		nmp = mp->mnt_list.cqe_prev;
1912 		error = dounmount(mp, MNT_FORCE, p);
1913 		if (error) {
1914 			printf("unmount of %s failed (",
1915 			    mp->mnt_stat.f_mntonname);
1916 			if (error == EBUSY)
1917 				printf("BUSY)\n");
1918 			else
1919 				printf("%d)\n", error);
1920 		}
1921 	}
1922 }
1923 
1924 /*
1925  * Build hash lists of net addresses and hang them off the mount point.
1926  * Called by ufs_mount() to set up the lists of export addresses.
1927  */
1928 static int
1929 vfs_hang_addrlist(struct mount *mp, struct netexport *nep,
1930 	struct export_args *argp)
1931 {
1932 	register struct netcred *np;
1933 	register struct radix_node_head *rnh;
1934 	register int i;
1935 	struct radix_node *rn;
1936 	struct sockaddr *saddr, *smask = 0;
1937 	struct domain *dom;
1938 	int error;
1939 
1940 	if (argp->ex_addrlen == 0) {
1941 		if (mp->mnt_flag & MNT_DEFEXPORTED)
1942 			return (EPERM);
1943 		np = &nep->ne_defexported;
1944 		np->netc_exflags = argp->ex_flags;
1945 		np->netc_anon = argp->ex_anon;
1946 		np->netc_anon.cr_ref = 1;
1947 		mp->mnt_flag |= MNT_DEFEXPORTED;
1948 		return (0);
1949 	}
1950 	i = sizeof(struct netcred) + argp->ex_addrlen + argp->ex_masklen;
1951 	np = (struct netcred *) malloc(i, M_NETADDR, M_WAITOK);
1952 	bzero((caddr_t) np, i);
1953 	saddr = (struct sockaddr *) (np + 1);
1954 	if ((error = copyin(argp->ex_addr, (caddr_t) saddr, argp->ex_addrlen)))
1955 		goto out;
1956 	if (saddr->sa_len > argp->ex_addrlen)
1957 		saddr->sa_len = argp->ex_addrlen;
1958 	if (argp->ex_masklen) {
1959 		smask = (struct sockaddr *) ((caddr_t) saddr + argp->ex_addrlen);
1960 		error = copyin(argp->ex_mask, (caddr_t) smask, argp->ex_masklen);
1961 		if (error)
1962 			goto out;
1963 		if (smask->sa_len > argp->ex_masklen)
1964 			smask->sa_len = argp->ex_masklen;
1965 	}
1966 	i = saddr->sa_family;
1967 	if ((rnh = nep->ne_rtable[i]) == 0) {
1968 		/*
1969 		 * Seems silly to initialize every AF when most are not used,
1970 		 * do so on demand here
1971 		 */
1972 		for (dom = domains; dom; dom = dom->dom_next)
1973 			if (dom->dom_family == i && dom->dom_rtattach) {
1974 				dom->dom_rtattach((void **) &nep->ne_rtable[i],
1975 				    dom->dom_rtoffset);
1976 				break;
1977 			}
1978 		if ((rnh = nep->ne_rtable[i]) == 0) {
1979 			error = ENOBUFS;
1980 			goto out;
1981 		}
1982 	}
1983 	rn = (*rnh->rnh_addaddr) ((caddr_t) saddr, (caddr_t) smask, rnh,
1984 	    np->netc_rnodes);
1985 	if (rn == 0 || np != (struct netcred *) rn) {	/* already exists */
1986 		error = EPERM;
1987 		goto out;
1988 	}
1989 	np->netc_exflags = argp->ex_flags;
1990 	np->netc_anon = argp->ex_anon;
1991 	np->netc_anon.cr_ref = 1;
1992 	return (0);
1993 out:
1994 	free(np, M_NETADDR);
1995 	return (error);
1996 }
1997 
1998 /* ARGSUSED */
1999 static int
2000 vfs_free_netcred(struct radix_node *rn, void *w)
2001 {
2002 	register struct radix_node_head *rnh = (struct radix_node_head *) w;
2003 
2004 	(*rnh->rnh_deladdr) (rn->rn_key, rn->rn_mask, rnh);
2005 	free((caddr_t) rn, M_NETADDR);
2006 	return (0);
2007 }
2008 
2009 /*
2010  * Free the net address hash lists that are hanging off the mount points.
2011  */
2012 static void
2013 vfs_free_addrlist(struct netexport *nep)
2014 {
2015 	register int i;
2016 	register struct radix_node_head *rnh;
2017 
2018 	for (i = 0; i <= AF_MAX; i++)
2019 		if ((rnh = nep->ne_rtable[i])) {
2020 			(*rnh->rnh_walktree) (rnh, vfs_free_netcred,
2021 			    (caddr_t) rnh);
2022 			free((caddr_t) rnh, M_RTABLE);
2023 			nep->ne_rtable[i] = 0;
2024 		}
2025 }
2026 
2027 int
2028 vfs_export(mp, nep, argp)
2029 	struct mount *mp;
2030 	struct netexport *nep;
2031 	struct export_args *argp;
2032 {
2033 	int error;
2034 
2035 	if (argp->ex_flags & MNT_DELEXPORT) {
2036 		vfs_free_addrlist(nep);
2037 		mp->mnt_flag &= ~(MNT_EXPORTED | MNT_DEFEXPORTED);
2038 	}
2039 	if (argp->ex_flags & MNT_EXPORTED) {
2040 		if ((error = vfs_hang_addrlist(mp, nep, argp)))
2041 			return (error);
2042 		mp->mnt_flag |= MNT_EXPORTED;
2043 	}
2044 	return (0);
2045 }
2046 
2047 struct netcred *
2048 vfs_export_lookup(mp, nep, nam)
2049 	register struct mount *mp;
2050 	struct netexport *nep;
2051 	struct mbuf *nam;
2052 {
2053 	register struct netcred *np;
2054 	register struct radix_node_head *rnh;
2055 	struct sockaddr *saddr;
2056 
2057 	np = NULL;
2058 	if (mp->mnt_flag & MNT_EXPORTED) {
2059 		/*
2060 		 * Lookup in the export list first.
2061 		 */
2062 		if (nam != NULL) {
2063 			saddr = mtod(nam, struct sockaddr *);
2064 			rnh = nep->ne_rtable[saddr->sa_family];
2065 			if (rnh != NULL) {
2066 				np = (struct netcred *)
2067 					(*rnh->rnh_matchaddr)((caddr_t)saddr,
2068 							      rnh);
2069 				if (np && np->netc_rnodes->rn_flags & RNF_ROOT)
2070 					np = NULL;
2071 			}
2072 		}
2073 		/*
2074 		 * If no address match, use the default if it exists.
2075 		 */
2076 		if (np == NULL && mp->mnt_flag & MNT_DEFEXPORTED)
2077 			np = &nep->ne_defexported;
2078 	}
2079 	return (np);
2080 }
2081 
2082 /*
2083  * perform msync on all vnodes under a mount point
2084  * the mount point must be locked.
2085  */
2086 void
2087 vfs_msync(struct mount *mp, int flags) {
2088 	struct vnode *vp, *nvp;
2089 loop:
2090 	for (vp = mp->mnt_vnodelist.lh_first; vp != NULL; vp = nvp) {
2091 
2092 		if (vp->v_mount != mp)
2093 			goto loop;
2094 		nvp = vp->v_mntvnodes.le_next;
2095 		if (VOP_ISLOCKED(vp) && (flags != MNT_WAIT))
2096 			continue;
2097 		if (vp->v_object &&
2098 		   (vp->v_object->flags & OBJ_MIGHTBEDIRTY)) {
2099 			vm_object_page_clean(vp->v_object, 0, 0, TRUE, TRUE);
2100 		}
2101 	}
2102 }
2103 
2104 /*
2105  * Create the VM object needed for VMIO and mmap support.  This
2106  * is done for all VREG files in the system.  Some filesystems might
2107  * afford the additional metadata buffering capability of the
2108  * VMIO code by making the device node be VMIO mode also.
2109  */
2110 int
2111 vfs_object_create(vp, p, cred, waslocked)
2112 	struct vnode *vp;
2113 	struct proc *p;
2114 	struct ucred *cred;
2115 	int waslocked;
2116 {
2117 	struct vattr vat;
2118 	vm_object_t object;
2119 	int error = 0;
2120 
2121 retry:
2122 	if ((object = vp->v_object) == NULL) {
2123 		if (vp->v_type == VREG) {
2124 			if ((error = VOP_GETATTR(vp, &vat, cred, p)) != 0)
2125 				goto retn;
2126 			(void) vnode_pager_alloc(vp,
2127 				OFF_TO_IDX(round_page(vat.va_size)), 0, 0);
2128 		} else {
2129 			/*
2130 			 * This simply allocates the biggest object possible
2131 			 * for a VBLK vnode.  This should be fixed, but doesn't
2132 			 * cause any problems (yet).
2133 			 */
2134 			(void) vnode_pager_alloc(vp, INT_MAX, 0, 0);
2135 		}
2136 		vp->v_object->flags |= OBJ_VFS_REF;
2137 	} else {
2138 		if (object->flags & OBJ_DEAD) {
2139 			if (waslocked)
2140 				VOP_UNLOCK(vp, 0, p);
2141 			tsleep(object, PVM, "vodead", 0);
2142 			if (waslocked)
2143 				vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p);
2144 			goto retry;
2145 		}
2146 		if ((object->flags & OBJ_VFS_REF) == 0) {
2147 			object->flags |= OBJ_VFS_REF;
2148 			vm_object_reference(object);
2149 		}
2150 	}
2151 	if (vp->v_object)
2152 		vp->v_flag |= VVMIO;
2153 
2154 retn:
2155 	return error;
2156 }
2157 
2158 void
2159 vtouch(vp)
2160 	struct vnode *vp;
2161 {
2162 	simple_lock(&vp->v_interlock);
2163 	if (vp->v_usecount) {
2164 		simple_unlock(&vp->v_interlock);
2165 		return;
2166 	}
2167 	if (simple_lock_try(&vnode_free_list_slock)) {
2168 		if (vp->v_freelist.tqe_prev != (struct vnode **)0xdeadb) {
2169 			TAILQ_REMOVE(&vnode_free_list, vp, v_freelist);
2170 			TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist);
2171 		}
2172 		simple_unlock(&vnode_free_list_slock);
2173 	}
2174 	simple_unlock(&vp->v_interlock);
2175 }
2176