xref: /freebsd/sys/kern/vfs_subr.c (revision 7de72ac1f832cd9e1c747220c58913b0465b66b3)
1 /*-
2  * Copyright (c) 1989, 1993
3  *	The Regents of the University of California.  All rights reserved.
4  * (c) UNIX System Laboratories, Inc.
5  * All or some portions of this file are derived from material licensed
6  * to the University of California by American Telephone and Telegraph
7  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8  * the permission of UNIX System Laboratories, Inc.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 4. Neither the name of the University nor the names of its contributors
19  *    may be used to endorse or promote products derived from this software
20  *    without specific prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  *
34  *	@(#)vfs_subr.c	8.31 (Berkeley) 5/26/95
35  */
36 
37 /*
38  * External virtual filesystem routines
39  */
40 
41 #include <sys/cdefs.h>
42 __FBSDID("$FreeBSD$");
43 
44 #include "opt_compat.h"
45 #include "opt_ddb.h"
46 #include "opt_watchdog.h"
47 
48 #include <sys/param.h>
49 #include <sys/systm.h>
50 #include <sys/bio.h>
51 #include <sys/buf.h>
52 #include <sys/condvar.h>
53 #include <sys/conf.h>
54 #include <sys/dirent.h>
55 #include <sys/event.h>
56 #include <sys/eventhandler.h>
57 #include <sys/extattr.h>
58 #include <sys/file.h>
59 #include <sys/fcntl.h>
60 #include <sys/jail.h>
61 #include <sys/kdb.h>
62 #include <sys/kernel.h>
63 #include <sys/kthread.h>
64 #include <sys/lockf.h>
65 #include <sys/malloc.h>
66 #include <sys/mount.h>
67 #include <sys/namei.h>
68 #include <sys/priv.h>
69 #include <sys/reboot.h>
70 #include <sys/rwlock.h>
71 #include <sys/sched.h>
72 #include <sys/sleepqueue.h>
73 #include <sys/smp.h>
74 #include <sys/stat.h>
75 #include <sys/sysctl.h>
76 #include <sys/syslog.h>
77 #include <sys/vmmeter.h>
78 #include <sys/vnode.h>
79 #include <sys/watchdog.h>
80 
81 #include <machine/stdarg.h>
82 
83 #include <security/mac/mac_framework.h>
84 
85 #include <vm/vm.h>
86 #include <vm/vm_object.h>
87 #include <vm/vm_extern.h>
88 #include <vm/pmap.h>
89 #include <vm/vm_map.h>
90 #include <vm/vm_page.h>
91 #include <vm/vm_kern.h>
92 #include <vm/uma.h>
93 
94 #ifdef DDB
95 #include <ddb/ddb.h>
96 #endif
97 
98 static void	delmntque(struct vnode *vp);
99 static int	flushbuflist(struct bufv *bufv, int flags, struct bufobj *bo,
100 		    int slpflag, int slptimeo);
101 static void	syncer_shutdown(void *arg, int howto);
102 static int	vtryrecycle(struct vnode *vp);
103 static void	v_incr_usecount(struct vnode *);
104 static void	v_decr_usecount(struct vnode *);
105 static void	v_decr_useonly(struct vnode *);
106 static void	v_upgrade_usecount(struct vnode *);
107 static void	vnlru_free(int);
108 static void	vgonel(struct vnode *);
109 static void	vfs_knllock(void *arg);
110 static void	vfs_knlunlock(void *arg);
111 static void	vfs_knl_assert_locked(void *arg);
112 static void	vfs_knl_assert_unlocked(void *arg);
113 static void	destroy_vpollinfo(struct vpollinfo *vi);
114 
115 /*
116  * Number of vnodes in existence.  Increased whenever getnewvnode()
117  * allocates a new vnode, decreased in vdropl() for VI_DOOMED vnode.
118  */
119 static unsigned long	numvnodes;
120 
121 SYSCTL_ULONG(_vfs, OID_AUTO, numvnodes, CTLFLAG_RD, &numvnodes, 0,
122     "Number of vnodes in existence");
123 
124 /*
125  * Conversion tables for conversion from vnode types to inode formats
126  * and back.
127  */
128 enum vtype iftovt_tab[16] = {
129 	VNON, VFIFO, VCHR, VNON, VDIR, VNON, VBLK, VNON,
130 	VREG, VNON, VLNK, VNON, VSOCK, VNON, VNON, VBAD,
131 };
132 int vttoif_tab[10] = {
133 	0, S_IFREG, S_IFDIR, S_IFBLK, S_IFCHR, S_IFLNK,
134 	S_IFSOCK, S_IFIFO, S_IFMT, S_IFMT
135 };
136 
137 /*
138  * List of vnodes that are ready for recycling.
139  */
140 static TAILQ_HEAD(freelst, vnode) vnode_free_list;
141 
142 /*
143  * Free vnode target.  Free vnodes may simply be files which have been stat'd
144  * but not read.  This is somewhat common, and a small cache of such files
145  * should be kept to avoid recreation costs.
146  */
147 static u_long wantfreevnodes;
148 SYSCTL_ULONG(_vfs, OID_AUTO, wantfreevnodes, CTLFLAG_RW, &wantfreevnodes, 0, "");
149 /* Number of vnodes in the free list. */
150 static u_long freevnodes;
151 SYSCTL_ULONG(_vfs, OID_AUTO, freevnodes, CTLFLAG_RD, &freevnodes, 0,
152     "Number of vnodes in the free list");
153 
154 static int vlru_allow_cache_src;
155 SYSCTL_INT(_vfs, OID_AUTO, vlru_allow_cache_src, CTLFLAG_RW,
156     &vlru_allow_cache_src, 0, "Allow vlru to reclaim source vnode");
157 
158 /*
159  * Various variables used for debugging the new implementation of
160  * reassignbuf().
161  * XXX these are probably of (very) limited utility now.
162  */
163 static int reassignbufcalls;
164 SYSCTL_INT(_vfs, OID_AUTO, reassignbufcalls, CTLFLAG_RW, &reassignbufcalls, 0,
165     "Number of calls to reassignbuf");
166 
167 /*
168  * Cache for the mount type id assigned to NFS.  This is used for
169  * special checks in nfs/nfs_nqlease.c and vm/vnode_pager.c.
170  */
171 int	nfs_mount_type = -1;
172 
173 /* To keep more than one thread at a time from running vfs_getnewfsid */
174 static struct mtx mntid_mtx;
175 
176 /*
177  * Lock for any access to the following:
178  *	vnode_free_list
179  *	numvnodes
180  *	freevnodes
181  */
182 static struct mtx vnode_free_list_mtx;
183 
184 /* Publicly exported FS */
185 struct nfs_public nfs_pub;
186 
187 /* Zone for allocation of new vnodes - used exclusively by getnewvnode() */
188 static uma_zone_t vnode_zone;
189 static uma_zone_t vnodepoll_zone;
190 
191 /*
192  * The workitem queue.
193  *
194  * It is useful to delay writes of file data and filesystem metadata
195  * for tens of seconds so that quickly created and deleted files need
196  * not waste disk bandwidth being created and removed. To realize this,
197  * we append vnodes to a "workitem" queue. When running with a soft
198  * updates implementation, most pending metadata dependencies should
199  * not wait for more than a few seconds. Thus, mounted on block devices
200  * are delayed only about a half the time that file data is delayed.
201  * Similarly, directory updates are more critical, so are only delayed
202  * about a third the time that file data is delayed. Thus, there are
203  * SYNCER_MAXDELAY queues that are processed round-robin at a rate of
204  * one each second (driven off the filesystem syncer process). The
205  * syncer_delayno variable indicates the next queue that is to be processed.
206  * Items that need to be processed soon are placed in this queue:
207  *
208  *	syncer_workitem_pending[syncer_delayno]
209  *
210  * A delay of fifteen seconds is done by placing the request fifteen
211  * entries later in the queue:
212  *
213  *	syncer_workitem_pending[(syncer_delayno + 15) & syncer_mask]
214  *
215  */
216 static int syncer_delayno;
217 static long syncer_mask;
218 LIST_HEAD(synclist, bufobj);
219 static struct synclist *syncer_workitem_pending;
220 /*
221  * The sync_mtx protects:
222  *	bo->bo_synclist
223  *	sync_vnode_count
224  *	syncer_delayno
225  *	syncer_state
226  *	syncer_workitem_pending
227  *	syncer_worklist_len
228  *	rushjob
229  */
230 static struct mtx sync_mtx;
231 static struct cv sync_wakeup;
232 
233 #define SYNCER_MAXDELAY		32
234 static int syncer_maxdelay = SYNCER_MAXDELAY;	/* maximum delay time */
235 static int syncdelay = 30;		/* max time to delay syncing data */
236 static int filedelay = 30;		/* time to delay syncing files */
237 SYSCTL_INT(_kern, OID_AUTO, filedelay, CTLFLAG_RW, &filedelay, 0,
238     "Time to delay syncing files (in seconds)");
239 static int dirdelay = 29;		/* time to delay syncing directories */
240 SYSCTL_INT(_kern, OID_AUTO, dirdelay, CTLFLAG_RW, &dirdelay, 0,
241     "Time to delay syncing directories (in seconds)");
242 static int metadelay = 28;		/* time to delay syncing metadata */
243 SYSCTL_INT(_kern, OID_AUTO, metadelay, CTLFLAG_RW, &metadelay, 0,
244     "Time to delay syncing metadata (in seconds)");
245 static int rushjob;		/* number of slots to run ASAP */
246 static int stat_rush_requests;	/* number of times I/O speeded up */
247 SYSCTL_INT(_debug, OID_AUTO, rush_requests, CTLFLAG_RW, &stat_rush_requests, 0,
248     "Number of times I/O speeded up (rush requests)");
249 
250 /*
251  * When shutting down the syncer, run it at four times normal speed.
252  */
253 #define SYNCER_SHUTDOWN_SPEEDUP		4
254 static int sync_vnode_count;
255 static int syncer_worklist_len;
256 static enum { SYNCER_RUNNING, SYNCER_SHUTTING_DOWN, SYNCER_FINAL_DELAY }
257     syncer_state;
258 
259 /*
260  * Number of vnodes we want to exist at any one time.  This is mostly used
261  * to size hash tables in vnode-related code.  It is normally not used in
262  * getnewvnode(), as wantfreevnodes is normally nonzero.)
263  *
264  * XXX desiredvnodes is historical cruft and should not exist.
265  */
266 int desiredvnodes;
267 SYSCTL_INT(_kern, KERN_MAXVNODES, maxvnodes, CTLFLAG_RW,
268     &desiredvnodes, 0, "Maximum number of vnodes");
269 SYSCTL_ULONG(_kern, OID_AUTO, minvnodes, CTLFLAG_RW,
270     &wantfreevnodes, 0, "Minimum number of vnodes (legacy)");
271 static int vnlru_nowhere;
272 SYSCTL_INT(_debug, OID_AUTO, vnlru_nowhere, CTLFLAG_RW,
273     &vnlru_nowhere, 0, "Number of times the vnlru process ran without success");
274 
275 /*
276  * Macros to control when a vnode is freed and recycled.  All require
277  * the vnode interlock.
278  */
279 #define VCANRECYCLE(vp) (((vp)->v_iflag & VI_FREE) && !(vp)->v_holdcnt)
280 #define VSHOULDFREE(vp) (!((vp)->v_iflag & VI_FREE) && !(vp)->v_holdcnt)
281 #define VSHOULDBUSY(vp) (((vp)->v_iflag & VI_FREE) && (vp)->v_holdcnt)
282 
283 /* Shift count for (uintptr_t)vp to initialize vp->v_hash. */
284 static int vnsz2log;
285 
286 /*
287  * Initialize the vnode management data structures.
288  *
289  * Reevaluate the following cap on the number of vnodes after the physical
290  * memory size exceeds 512GB.  In the limit, as the physical memory size
291  * grows, the ratio of physical pages to vnodes approaches sixteen to one.
292  */
293 #ifndef	MAXVNODES_MAX
294 #define	MAXVNODES_MAX	(512 * (1024 * 1024 * 1024 / (int)PAGE_SIZE / 16))
295 #endif
296 static void
297 vntblinit(void *dummy __unused)
298 {
299 	u_int i;
300 	int physvnodes, virtvnodes;
301 
302 	/*
303 	 * Desiredvnodes is a function of the physical memory size and the
304 	 * kernel's heap size.  Generally speaking, it scales with the
305 	 * physical memory size.  The ratio of desiredvnodes to physical pages
306 	 * is one to four until desiredvnodes exceeds 98,304.  Thereafter, the
307 	 * marginal ratio of desiredvnodes to physical pages is one to
308 	 * sixteen.  However, desiredvnodes is limited by the kernel's heap
309 	 * size.  The memory required by desiredvnodes vnodes and vm objects
310 	 * may not exceed one seventh of the kernel's heap size.
311 	 */
312 	physvnodes = maxproc + cnt.v_page_count / 16 + 3 * min(98304 * 4,
313 	    cnt.v_page_count) / 16;
314 	virtvnodes = vm_kmem_size / (7 * (sizeof(struct vm_object) +
315 	    sizeof(struct vnode)));
316 	desiredvnodes = min(physvnodes, virtvnodes);
317 	if (desiredvnodes > MAXVNODES_MAX) {
318 		if (bootverbose)
319 			printf("Reducing kern.maxvnodes %d -> %d\n",
320 			    desiredvnodes, MAXVNODES_MAX);
321 		desiredvnodes = MAXVNODES_MAX;
322 	}
323 	wantfreevnodes = desiredvnodes / 4;
324 	mtx_init(&mntid_mtx, "mntid", NULL, MTX_DEF);
325 	TAILQ_INIT(&vnode_free_list);
326 	mtx_init(&vnode_free_list_mtx, "vnode_free_list", NULL, MTX_DEF);
327 	vnode_zone = uma_zcreate("VNODE", sizeof (struct vnode), NULL, NULL,
328 	    NULL, NULL, UMA_ALIGN_PTR, 0);
329 	vnodepoll_zone = uma_zcreate("VNODEPOLL", sizeof (struct vpollinfo),
330 	    NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
331 	/*
332 	 * Initialize the filesystem syncer.
333 	 */
334 	syncer_workitem_pending = hashinit(syncer_maxdelay, M_VNODE,
335 	    &syncer_mask);
336 	syncer_maxdelay = syncer_mask + 1;
337 	mtx_init(&sync_mtx, "Syncer mtx", NULL, MTX_DEF);
338 	cv_init(&sync_wakeup, "syncer");
339 	for (i = 1; i <= sizeof(struct vnode); i <<= 1)
340 		vnsz2log++;
341 	vnsz2log--;
342 }
343 SYSINIT(vfs, SI_SUB_VFS, SI_ORDER_FIRST, vntblinit, NULL);
344 
345 
346 /*
347  * Mark a mount point as busy. Used to synchronize access and to delay
348  * unmounting. Eventually, mountlist_mtx is not released on failure.
349  *
350  * vfs_busy() is a custom lock, it can block the caller.
351  * vfs_busy() only sleeps if the unmount is active on the mount point.
352  * For a mountpoint mp, vfs_busy-enforced lock is before lock of any
353  * vnode belonging to mp.
354  *
355  * Lookup uses vfs_busy() to traverse mount points.
356  * root fs			var fs
357  * / vnode lock		A	/ vnode lock (/var)		D
358  * /var vnode lock	B	/log vnode lock(/var/log)	E
359  * vfs_busy lock	C	vfs_busy lock			F
360  *
361  * Within each file system, the lock order is C->A->B and F->D->E.
362  *
363  * When traversing across mounts, the system follows that lock order:
364  *
365  *        C->A->B
366  *              |
367  *              +->F->D->E
368  *
369  * The lookup() process for namei("/var") illustrates the process:
370  *  VOP_LOOKUP() obtains B while A is held
371  *  vfs_busy() obtains a shared lock on F while A and B are held
372  *  vput() releases lock on B
373  *  vput() releases lock on A
374  *  VFS_ROOT() obtains lock on D while shared lock on F is held
375  *  vfs_unbusy() releases shared lock on F
376  *  vn_lock() obtains lock on deadfs vnode vp_crossmp instead of A.
377  *    Attempt to lock A (instead of vp_crossmp) while D is held would
378  *    violate the global order, causing deadlocks.
379  *
380  * dounmount() locks B while F is drained.
381  */
382 int
383 vfs_busy(struct mount *mp, int flags)
384 {
385 
386 	MPASS((flags & ~MBF_MASK) == 0);
387 	CTR3(KTR_VFS, "%s: mp %p with flags %d", __func__, mp, flags);
388 
389 	MNT_ILOCK(mp);
390 	MNT_REF(mp);
391 	/*
392 	 * If mount point is currenly being unmounted, sleep until the
393 	 * mount point fate is decided.  If thread doing the unmounting fails,
394 	 * it will clear MNTK_UNMOUNT flag before waking us up, indicating
395 	 * that this mount point has survived the unmount attempt and vfs_busy
396 	 * should retry.  Otherwise the unmounter thread will set MNTK_REFEXPIRE
397 	 * flag in addition to MNTK_UNMOUNT, indicating that mount point is
398 	 * about to be really destroyed.  vfs_busy needs to release its
399 	 * reference on the mount point in this case and return with ENOENT,
400 	 * telling the caller that mount mount it tried to busy is no longer
401 	 * valid.
402 	 */
403 	while (mp->mnt_kern_flag & MNTK_UNMOUNT) {
404 		if (flags & MBF_NOWAIT || mp->mnt_kern_flag & MNTK_REFEXPIRE) {
405 			MNT_REL(mp);
406 			MNT_IUNLOCK(mp);
407 			CTR1(KTR_VFS, "%s: failed busying before sleeping",
408 			    __func__);
409 			return (ENOENT);
410 		}
411 		if (flags & MBF_MNTLSTLOCK)
412 			mtx_unlock(&mountlist_mtx);
413 		mp->mnt_kern_flag |= MNTK_MWAIT;
414 		msleep(mp, MNT_MTX(mp), PVFS | PDROP, "vfs_busy", 0);
415 		if (flags & MBF_MNTLSTLOCK)
416 			mtx_lock(&mountlist_mtx);
417 		MNT_ILOCK(mp);
418 	}
419 	if (flags & MBF_MNTLSTLOCK)
420 		mtx_unlock(&mountlist_mtx);
421 	mp->mnt_lockref++;
422 	MNT_IUNLOCK(mp);
423 	return (0);
424 }
425 
426 /*
427  * Free a busy filesystem.
428  */
429 void
430 vfs_unbusy(struct mount *mp)
431 {
432 
433 	CTR2(KTR_VFS, "%s: mp %p", __func__, mp);
434 	MNT_ILOCK(mp);
435 	MNT_REL(mp);
436 	KASSERT(mp->mnt_lockref > 0, ("negative mnt_lockref"));
437 	mp->mnt_lockref--;
438 	if (mp->mnt_lockref == 0 && (mp->mnt_kern_flag & MNTK_DRAINING) != 0) {
439 		MPASS(mp->mnt_kern_flag & MNTK_UNMOUNT);
440 		CTR1(KTR_VFS, "%s: waking up waiters", __func__);
441 		mp->mnt_kern_flag &= ~MNTK_DRAINING;
442 		wakeup(&mp->mnt_lockref);
443 	}
444 	MNT_IUNLOCK(mp);
445 }
446 
447 /*
448  * Lookup a mount point by filesystem identifier.
449  */
450 struct mount *
451 vfs_getvfs(fsid_t *fsid)
452 {
453 	struct mount *mp;
454 
455 	CTR2(KTR_VFS, "%s: fsid %p", __func__, fsid);
456 	mtx_lock(&mountlist_mtx);
457 	TAILQ_FOREACH(mp, &mountlist, mnt_list) {
458 		if (mp->mnt_stat.f_fsid.val[0] == fsid->val[0] &&
459 		    mp->mnt_stat.f_fsid.val[1] == fsid->val[1]) {
460 			vfs_ref(mp);
461 			mtx_unlock(&mountlist_mtx);
462 			return (mp);
463 		}
464 	}
465 	mtx_unlock(&mountlist_mtx);
466 	CTR2(KTR_VFS, "%s: lookup failed for %p id", __func__, fsid);
467 	return ((struct mount *) 0);
468 }
469 
470 /*
471  * Lookup a mount point by filesystem identifier, busying it before
472  * returning.
473  */
474 struct mount *
475 vfs_busyfs(fsid_t *fsid)
476 {
477 	struct mount *mp;
478 	int error;
479 
480 	CTR2(KTR_VFS, "%s: fsid %p", __func__, fsid);
481 	mtx_lock(&mountlist_mtx);
482 	TAILQ_FOREACH(mp, &mountlist, mnt_list) {
483 		if (mp->mnt_stat.f_fsid.val[0] == fsid->val[0] &&
484 		    mp->mnt_stat.f_fsid.val[1] == fsid->val[1]) {
485 			error = vfs_busy(mp, MBF_MNTLSTLOCK);
486 			if (error) {
487 				mtx_unlock(&mountlist_mtx);
488 				return (NULL);
489 			}
490 			return (mp);
491 		}
492 	}
493 	CTR2(KTR_VFS, "%s: lookup failed for %p id", __func__, fsid);
494 	mtx_unlock(&mountlist_mtx);
495 	return ((struct mount *) 0);
496 }
497 
498 /*
499  * Check if a user can access privileged mount options.
500  */
501 int
502 vfs_suser(struct mount *mp, struct thread *td)
503 {
504 	int error;
505 
506 	/*
507 	 * If the thread is jailed, but this is not a jail-friendly file
508 	 * system, deny immediately.
509 	 */
510 	if (!(mp->mnt_vfc->vfc_flags & VFCF_JAIL) && jailed(td->td_ucred))
511 		return (EPERM);
512 
513 	/*
514 	 * If the file system was mounted outside the jail of the calling
515 	 * thread, deny immediately.
516 	 */
517 	if (prison_check(td->td_ucred, mp->mnt_cred) != 0)
518 		return (EPERM);
519 
520 	/*
521 	 * If file system supports delegated administration, we don't check
522 	 * for the PRIV_VFS_MOUNT_OWNER privilege - it will be better verified
523 	 * by the file system itself.
524 	 * If this is not the user that did original mount, we check for
525 	 * the PRIV_VFS_MOUNT_OWNER privilege.
526 	 */
527 	if (!(mp->mnt_vfc->vfc_flags & VFCF_DELEGADMIN) &&
528 	    mp->mnt_cred->cr_uid != td->td_ucred->cr_uid) {
529 		if ((error = priv_check(td, PRIV_VFS_MOUNT_OWNER)) != 0)
530 			return (error);
531 	}
532 	return (0);
533 }
534 
535 /*
536  * Get a new unique fsid.  Try to make its val[0] unique, since this value
537  * will be used to create fake device numbers for stat().  Also try (but
538  * not so hard) make its val[0] unique mod 2^16, since some emulators only
539  * support 16-bit device numbers.  We end up with unique val[0]'s for the
540  * first 2^16 calls and unique val[0]'s mod 2^16 for the first 2^8 calls.
541  *
542  * Keep in mind that several mounts may be running in parallel.  Starting
543  * the search one past where the previous search terminated is both a
544  * micro-optimization and a defense against returning the same fsid to
545  * different mounts.
546  */
547 void
548 vfs_getnewfsid(struct mount *mp)
549 {
550 	static uint16_t mntid_base;
551 	struct mount *nmp;
552 	fsid_t tfsid;
553 	int mtype;
554 
555 	CTR2(KTR_VFS, "%s: mp %p", __func__, mp);
556 	mtx_lock(&mntid_mtx);
557 	mtype = mp->mnt_vfc->vfc_typenum;
558 	tfsid.val[1] = mtype;
559 	mtype = (mtype & 0xFF) << 24;
560 	for (;;) {
561 		tfsid.val[0] = makedev(255,
562 		    mtype | ((mntid_base & 0xFF00) << 8) | (mntid_base & 0xFF));
563 		mntid_base++;
564 		if ((nmp = vfs_getvfs(&tfsid)) == NULL)
565 			break;
566 		vfs_rel(nmp);
567 	}
568 	mp->mnt_stat.f_fsid.val[0] = tfsid.val[0];
569 	mp->mnt_stat.f_fsid.val[1] = tfsid.val[1];
570 	mtx_unlock(&mntid_mtx);
571 }
572 
573 /*
574  * Knob to control the precision of file timestamps:
575  *
576  *   0 = seconds only; nanoseconds zeroed.
577  *   1 = seconds and nanoseconds, accurate within 1/HZ.
578  *   2 = seconds and nanoseconds, truncated to microseconds.
579  * >=3 = seconds and nanoseconds, maximum precision.
580  */
581 enum { TSP_SEC, TSP_HZ, TSP_USEC, TSP_NSEC };
582 
583 static int timestamp_precision = TSP_SEC;
584 SYSCTL_INT(_vfs, OID_AUTO, timestamp_precision, CTLFLAG_RW,
585     &timestamp_precision, 0, "File timestamp precision (0: seconds, "
586     "1: sec + ns accurate to 1/HZ, 2: sec + ns truncated to ms, "
587     "3+: sec + ns (max. precision))");
588 
589 /*
590  * Get a current timestamp.
591  */
592 void
593 vfs_timestamp(struct timespec *tsp)
594 {
595 	struct timeval tv;
596 
597 	switch (timestamp_precision) {
598 	case TSP_SEC:
599 		tsp->tv_sec = time_second;
600 		tsp->tv_nsec = 0;
601 		break;
602 	case TSP_HZ:
603 		getnanotime(tsp);
604 		break;
605 	case TSP_USEC:
606 		microtime(&tv);
607 		TIMEVAL_TO_TIMESPEC(&tv, tsp);
608 		break;
609 	case TSP_NSEC:
610 	default:
611 		nanotime(tsp);
612 		break;
613 	}
614 }
615 
616 /*
617  * Set vnode attributes to VNOVAL
618  */
619 void
620 vattr_null(struct vattr *vap)
621 {
622 
623 	vap->va_type = VNON;
624 	vap->va_size = VNOVAL;
625 	vap->va_bytes = VNOVAL;
626 	vap->va_mode = VNOVAL;
627 	vap->va_nlink = VNOVAL;
628 	vap->va_uid = VNOVAL;
629 	vap->va_gid = VNOVAL;
630 	vap->va_fsid = VNOVAL;
631 	vap->va_fileid = VNOVAL;
632 	vap->va_blocksize = VNOVAL;
633 	vap->va_rdev = VNOVAL;
634 	vap->va_atime.tv_sec = VNOVAL;
635 	vap->va_atime.tv_nsec = VNOVAL;
636 	vap->va_mtime.tv_sec = VNOVAL;
637 	vap->va_mtime.tv_nsec = VNOVAL;
638 	vap->va_ctime.tv_sec = VNOVAL;
639 	vap->va_ctime.tv_nsec = VNOVAL;
640 	vap->va_birthtime.tv_sec = VNOVAL;
641 	vap->va_birthtime.tv_nsec = VNOVAL;
642 	vap->va_flags = VNOVAL;
643 	vap->va_gen = VNOVAL;
644 	vap->va_vaflags = 0;
645 }
646 
647 /*
648  * This routine is called when we have too many vnodes.  It attempts
649  * to free <count> vnodes and will potentially free vnodes that still
650  * have VM backing store (VM backing store is typically the cause
651  * of a vnode blowout so we want to do this).  Therefore, this operation
652  * is not considered cheap.
653  *
654  * A number of conditions may prevent a vnode from being reclaimed.
655  * the buffer cache may have references on the vnode, a directory
656  * vnode may still have references due to the namei cache representing
657  * underlying files, or the vnode may be in active use.   It is not
658  * desireable to reuse such vnodes.  These conditions may cause the
659  * number of vnodes to reach some minimum value regardless of what
660  * you set kern.maxvnodes to.  Do not set kern.maxvnodes too low.
661  */
662 static int
663 vlrureclaim(struct mount *mp)
664 {
665 	struct vnode *vp;
666 	int done;
667 	int trigger;
668 	int usevnodes;
669 	int count;
670 
671 	/*
672 	 * Calculate the trigger point, don't allow user
673 	 * screwups to blow us up.   This prevents us from
674 	 * recycling vnodes with lots of resident pages.  We
675 	 * aren't trying to free memory, we are trying to
676 	 * free vnodes.
677 	 */
678 	usevnodes = desiredvnodes;
679 	if (usevnodes <= 0)
680 		usevnodes = 1;
681 	trigger = cnt.v_page_count * 2 / usevnodes;
682 	done = 0;
683 	vn_start_write(NULL, &mp, V_WAIT);
684 	MNT_ILOCK(mp);
685 	count = mp->mnt_nvnodelistsize / 10 + 1;
686 	while (count != 0) {
687 		vp = TAILQ_FIRST(&mp->mnt_nvnodelist);
688 		while (vp != NULL && vp->v_type == VMARKER)
689 			vp = TAILQ_NEXT(vp, v_nmntvnodes);
690 		if (vp == NULL)
691 			break;
692 		TAILQ_REMOVE(&mp->mnt_nvnodelist, vp, v_nmntvnodes);
693 		TAILQ_INSERT_TAIL(&mp->mnt_nvnodelist, vp, v_nmntvnodes);
694 		--count;
695 		if (!VI_TRYLOCK(vp))
696 			goto next_iter;
697 		/*
698 		 * If it's been deconstructed already, it's still
699 		 * referenced, or it exceeds the trigger, skip it.
700 		 */
701 		if (vp->v_usecount ||
702 		    (!vlru_allow_cache_src &&
703 			!LIST_EMPTY(&(vp)->v_cache_src)) ||
704 		    (vp->v_iflag & VI_DOOMED) != 0 || (vp->v_object != NULL &&
705 		    vp->v_object->resident_page_count > trigger)) {
706 			VI_UNLOCK(vp);
707 			goto next_iter;
708 		}
709 		MNT_IUNLOCK(mp);
710 		vholdl(vp);
711 		if (VOP_LOCK(vp, LK_INTERLOCK|LK_EXCLUSIVE|LK_NOWAIT)) {
712 			vdrop(vp);
713 			goto next_iter_mntunlocked;
714 		}
715 		VI_LOCK(vp);
716 		/*
717 		 * v_usecount may have been bumped after VOP_LOCK() dropped
718 		 * the vnode interlock and before it was locked again.
719 		 *
720 		 * It is not necessary to recheck VI_DOOMED because it can
721 		 * only be set by another thread that holds both the vnode
722 		 * lock and vnode interlock.  If another thread has the
723 		 * vnode lock before we get to VOP_LOCK() and obtains the
724 		 * vnode interlock after VOP_LOCK() drops the vnode
725 		 * interlock, the other thread will be unable to drop the
726 		 * vnode lock before our VOP_LOCK() call fails.
727 		 */
728 		if (vp->v_usecount ||
729 		    (!vlru_allow_cache_src &&
730 			!LIST_EMPTY(&(vp)->v_cache_src)) ||
731 		    (vp->v_object != NULL &&
732 		    vp->v_object->resident_page_count > trigger)) {
733 			VOP_UNLOCK(vp, LK_INTERLOCK);
734 			vdrop(vp);
735 			goto next_iter_mntunlocked;
736 		}
737 		KASSERT((vp->v_iflag & VI_DOOMED) == 0,
738 		    ("VI_DOOMED unexpectedly detected in vlrureclaim()"));
739 		vgonel(vp);
740 		VOP_UNLOCK(vp, 0);
741 		vdropl(vp);
742 		done++;
743 next_iter_mntunlocked:
744 		if (!should_yield())
745 			goto relock_mnt;
746 		goto yield;
747 next_iter:
748 		if (!should_yield())
749 			continue;
750 		MNT_IUNLOCK(mp);
751 yield:
752 		kern_yield(PRI_USER);
753 relock_mnt:
754 		MNT_ILOCK(mp);
755 	}
756 	MNT_IUNLOCK(mp);
757 	vn_finished_write(mp);
758 	return done;
759 }
760 
761 /*
762  * Attempt to keep the free list at wantfreevnodes length.
763  */
764 static void
765 vnlru_free(int count)
766 {
767 	struct vnode *vp;
768 
769 	mtx_assert(&vnode_free_list_mtx, MA_OWNED);
770 	for (; count > 0; count--) {
771 		vp = TAILQ_FIRST(&vnode_free_list);
772 		/*
773 		 * The list can be modified while the free_list_mtx
774 		 * has been dropped and vp could be NULL here.
775 		 */
776 		if (!vp)
777 			break;
778 		VNASSERT(vp->v_op != NULL, vp,
779 		    ("vnlru_free: vnode already reclaimed."));
780 		KASSERT((vp->v_iflag & VI_FREE) != 0,
781 		    ("Removing vnode not on freelist"));
782 		KASSERT((vp->v_iflag & VI_ACTIVE) == 0,
783 		    ("Mangling active vnode"));
784 		TAILQ_REMOVE(&vnode_free_list, vp, v_actfreelist);
785 		/*
786 		 * Don't recycle if we can't get the interlock.
787 		 */
788 		if (!VI_TRYLOCK(vp)) {
789 			TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_actfreelist);
790 			continue;
791 		}
792 		VNASSERT(VCANRECYCLE(vp), vp,
793 		    ("vp inconsistent on freelist"));
794 		freevnodes--;
795 		vp->v_iflag &= ~VI_FREE;
796 		vholdl(vp);
797 		mtx_unlock(&vnode_free_list_mtx);
798 		VI_UNLOCK(vp);
799 		vtryrecycle(vp);
800 		/*
801 		 * If the recycled succeeded this vdrop will actually free
802 		 * the vnode.  If not it will simply place it back on
803 		 * the free list.
804 		 */
805 		vdrop(vp);
806 		mtx_lock(&vnode_free_list_mtx);
807 	}
808 }
809 /*
810  * Attempt to recycle vnodes in a context that is always safe to block.
811  * Calling vlrurecycle() from the bowels of filesystem code has some
812  * interesting deadlock problems.
813  */
814 static struct proc *vnlruproc;
815 static int vnlruproc_sig;
816 
817 static void
818 vnlru_proc(void)
819 {
820 	struct mount *mp, *nmp;
821 	int done;
822 	struct proc *p = vnlruproc;
823 
824 	EVENTHANDLER_REGISTER(shutdown_pre_sync, kproc_shutdown, p,
825 	    SHUTDOWN_PRI_FIRST);
826 
827 	for (;;) {
828 		kproc_suspend_check(p);
829 		mtx_lock(&vnode_free_list_mtx);
830 		if (freevnodes > wantfreevnodes)
831 			vnlru_free(freevnodes - wantfreevnodes);
832 		if (numvnodes <= desiredvnodes * 9 / 10) {
833 			vnlruproc_sig = 0;
834 			wakeup(&vnlruproc_sig);
835 			msleep(vnlruproc, &vnode_free_list_mtx,
836 			    PVFS|PDROP, "vlruwt", hz);
837 			continue;
838 		}
839 		mtx_unlock(&vnode_free_list_mtx);
840 		done = 0;
841 		mtx_lock(&mountlist_mtx);
842 		for (mp = TAILQ_FIRST(&mountlist); mp != NULL; mp = nmp) {
843 			if (vfs_busy(mp, MBF_NOWAIT | MBF_MNTLSTLOCK)) {
844 				nmp = TAILQ_NEXT(mp, mnt_list);
845 				continue;
846 			}
847 			done += vlrureclaim(mp);
848 			mtx_lock(&mountlist_mtx);
849 			nmp = TAILQ_NEXT(mp, mnt_list);
850 			vfs_unbusy(mp);
851 		}
852 		mtx_unlock(&mountlist_mtx);
853 		if (done == 0) {
854 #if 0
855 			/* These messages are temporary debugging aids */
856 			if (vnlru_nowhere < 5)
857 				printf("vnlru process getting nowhere..\n");
858 			else if (vnlru_nowhere == 5)
859 				printf("vnlru process messages stopped.\n");
860 #endif
861 			vnlru_nowhere++;
862 			tsleep(vnlruproc, PPAUSE, "vlrup", hz * 3);
863 		} else
864 			kern_yield(PRI_USER);
865 	}
866 }
867 
868 static struct kproc_desc vnlru_kp = {
869 	"vnlru",
870 	vnlru_proc,
871 	&vnlruproc
872 };
873 SYSINIT(vnlru, SI_SUB_KTHREAD_UPDATE, SI_ORDER_FIRST, kproc_start,
874     &vnlru_kp);
875 
876 /*
877  * Routines having to do with the management of the vnode table.
878  */
879 
880 /*
881  * Try to recycle a freed vnode.  We abort if anyone picks up a reference
882  * before we actually vgone().  This function must be called with the vnode
883  * held to prevent the vnode from being returned to the free list midway
884  * through vgone().
885  */
886 static int
887 vtryrecycle(struct vnode *vp)
888 {
889 	struct mount *vnmp;
890 
891 	CTR2(KTR_VFS, "%s: vp %p", __func__, vp);
892 	VNASSERT(vp->v_holdcnt, vp,
893 	    ("vtryrecycle: Recycling vp %p without a reference.", vp));
894 	/*
895 	 * This vnode may found and locked via some other list, if so we
896 	 * can't recycle it yet.
897 	 */
898 	if (VOP_LOCK(vp, LK_EXCLUSIVE | LK_NOWAIT) != 0) {
899 		CTR2(KTR_VFS,
900 		    "%s: impossible to recycle, vp %p lock is already held",
901 		    __func__, vp);
902 		return (EWOULDBLOCK);
903 	}
904 	/*
905 	 * Don't recycle if its filesystem is being suspended.
906 	 */
907 	if (vn_start_write(vp, &vnmp, V_NOWAIT) != 0) {
908 		VOP_UNLOCK(vp, 0);
909 		CTR2(KTR_VFS,
910 		    "%s: impossible to recycle, cannot start the write for %p",
911 		    __func__, vp);
912 		return (EBUSY);
913 	}
914 	/*
915 	 * If we got this far, we need to acquire the interlock and see if
916 	 * anyone picked up this vnode from another list.  If not, we will
917 	 * mark it with DOOMED via vgonel() so that anyone who does find it
918 	 * will skip over it.
919 	 */
920 	VI_LOCK(vp);
921 	if (vp->v_usecount) {
922 		VOP_UNLOCK(vp, LK_INTERLOCK);
923 		vn_finished_write(vnmp);
924 		CTR2(KTR_VFS,
925 		    "%s: impossible to recycle, %p is already referenced",
926 		    __func__, vp);
927 		return (EBUSY);
928 	}
929 	if ((vp->v_iflag & VI_DOOMED) == 0)
930 		vgonel(vp);
931 	VOP_UNLOCK(vp, LK_INTERLOCK);
932 	vn_finished_write(vnmp);
933 	return (0);
934 }
935 
936 /*
937  * Wait for available vnodes.
938  */
939 static int
940 getnewvnode_wait(int suspended)
941 {
942 
943 	mtx_assert(&vnode_free_list_mtx, MA_OWNED);
944 	if (numvnodes > desiredvnodes) {
945 		if (suspended) {
946 			/*
947 			 * File system is beeing suspended, we cannot risk a
948 			 * deadlock here, so allocate new vnode anyway.
949 			 */
950 			if (freevnodes > wantfreevnodes)
951 				vnlru_free(freevnodes - wantfreevnodes);
952 			return (0);
953 		}
954 		if (vnlruproc_sig == 0) {
955 			vnlruproc_sig = 1;	/* avoid unnecessary wakeups */
956 			wakeup(vnlruproc);
957 		}
958 		msleep(&vnlruproc_sig, &vnode_free_list_mtx, PVFS,
959 		    "vlruwk", hz);
960 	}
961 	return (numvnodes > desiredvnodes ? ENFILE : 0);
962 }
963 
964 void
965 getnewvnode_reserve(u_int count)
966 {
967 	struct thread *td;
968 
969 	td = curthread;
970 	mtx_lock(&vnode_free_list_mtx);
971 	while (count > 0) {
972 		if (getnewvnode_wait(0) == 0) {
973 			count--;
974 			td->td_vp_reserv++;
975 			numvnodes++;
976 		}
977 	}
978 	mtx_unlock(&vnode_free_list_mtx);
979 }
980 
981 void
982 getnewvnode_drop_reserve(void)
983 {
984 	struct thread *td;
985 
986 	td = curthread;
987 	mtx_lock(&vnode_free_list_mtx);
988 	KASSERT(numvnodes >= td->td_vp_reserv, ("reserve too large"));
989 	numvnodes -= td->td_vp_reserv;
990 	mtx_unlock(&vnode_free_list_mtx);
991 	td->td_vp_reserv = 0;
992 }
993 
994 /*
995  * Return the next vnode from the free list.
996  */
997 int
998 getnewvnode(const char *tag, struct mount *mp, struct vop_vector *vops,
999     struct vnode **vpp)
1000 {
1001 	struct vnode *vp;
1002 	struct bufobj *bo;
1003 	struct thread *td;
1004 	int error;
1005 
1006 	CTR3(KTR_VFS, "%s: mp %p with tag %s", __func__, mp, tag);
1007 	vp = NULL;
1008 	td = curthread;
1009 	if (td->td_vp_reserv > 0) {
1010 		td->td_vp_reserv -= 1;
1011 		goto alloc;
1012 	}
1013 	mtx_lock(&vnode_free_list_mtx);
1014 	/*
1015 	 * Lend our context to reclaim vnodes if they've exceeded the max.
1016 	 */
1017 	if (freevnodes > wantfreevnodes)
1018 		vnlru_free(1);
1019 	error = getnewvnode_wait(mp != NULL && (mp->mnt_kern_flag &
1020 	    MNTK_SUSPEND));
1021 #if 0	/* XXX Not all VFS_VGET/ffs_vget callers check returns. */
1022 	if (error != 0) {
1023 		mtx_unlock(&vnode_free_list_mtx);
1024 		return (error);
1025 	}
1026 #endif
1027 	numvnodes++;
1028 	mtx_unlock(&vnode_free_list_mtx);
1029 alloc:
1030 	vp = (struct vnode *) uma_zalloc(vnode_zone, M_WAITOK|M_ZERO);
1031 	/*
1032 	 * Setup locks.
1033 	 */
1034 	vp->v_vnlock = &vp->v_lock;
1035 	mtx_init(&vp->v_interlock, "vnode interlock", NULL, MTX_DEF);
1036 	/*
1037 	 * By default, don't allow shared locks unless filesystems
1038 	 * opt-in.
1039 	 */
1040 	lockinit(vp->v_vnlock, PVFS, tag, VLKTIMEOUT, LK_NOSHARE);
1041 	/*
1042 	 * Initialize bufobj.
1043 	 */
1044 	bo = &vp->v_bufobj;
1045 	bo->__bo_vnode = vp;
1046 	mtx_init(BO_MTX(bo), "bufobj interlock", NULL, MTX_DEF);
1047 	bo->bo_ops = &buf_ops_bio;
1048 	bo->bo_private = vp;
1049 	TAILQ_INIT(&bo->bo_clean.bv_hd);
1050 	TAILQ_INIT(&bo->bo_dirty.bv_hd);
1051 	/*
1052 	 * Initialize namecache.
1053 	 */
1054 	LIST_INIT(&vp->v_cache_src);
1055 	TAILQ_INIT(&vp->v_cache_dst);
1056 	/*
1057 	 * Finalize various vnode identity bits.
1058 	 */
1059 	vp->v_type = VNON;
1060 	vp->v_tag = tag;
1061 	vp->v_op = vops;
1062 	v_incr_usecount(vp);
1063 	vp->v_data = NULL;
1064 #ifdef MAC
1065 	mac_vnode_init(vp);
1066 	if (mp != NULL && (mp->mnt_flag & MNT_MULTILABEL) == 0)
1067 		mac_vnode_associate_singlelabel(mp, vp);
1068 	else if (mp == NULL && vops != &dead_vnodeops)
1069 		printf("NULL mp in getnewvnode()\n");
1070 #endif
1071 	if (mp != NULL) {
1072 		bo->bo_bsize = mp->mnt_stat.f_iosize;
1073 		if ((mp->mnt_kern_flag & MNTK_NOKNOTE) != 0)
1074 			vp->v_vflag |= VV_NOKNOTE;
1075 	}
1076 	rangelock_init(&vp->v_rl);
1077 
1078 	/*
1079 	 * For the filesystems which do not use vfs_hash_insert(),
1080 	 * still initialize v_hash to have vfs_hash_index() useful.
1081 	 * E.g., nullfs uses vfs_hash_index() on the lower vnode for
1082 	 * its own hashing.
1083 	 */
1084 	vp->v_hash = (uintptr_t)vp >> vnsz2log;
1085 
1086 	*vpp = vp;
1087 	return (0);
1088 }
1089 
1090 /*
1091  * Delete from old mount point vnode list, if on one.
1092  */
1093 static void
1094 delmntque(struct vnode *vp)
1095 {
1096 	struct mount *mp;
1097 	int active;
1098 
1099 	mp = vp->v_mount;
1100 	if (mp == NULL)
1101 		return;
1102 	MNT_ILOCK(mp);
1103 	VI_LOCK(vp);
1104 	KASSERT(mp->mnt_activevnodelistsize <= mp->mnt_nvnodelistsize,
1105 	    ("Active vnode list size %d > Vnode list size %d",
1106 	     mp->mnt_activevnodelistsize, mp->mnt_nvnodelistsize));
1107 	active = vp->v_iflag & VI_ACTIVE;
1108 	vp->v_iflag &= ~VI_ACTIVE;
1109 	if (active) {
1110 		mtx_lock(&vnode_free_list_mtx);
1111 		TAILQ_REMOVE(&mp->mnt_activevnodelist, vp, v_actfreelist);
1112 		mp->mnt_activevnodelistsize--;
1113 		mtx_unlock(&vnode_free_list_mtx);
1114 	}
1115 	vp->v_mount = NULL;
1116 	VI_UNLOCK(vp);
1117 	VNASSERT(mp->mnt_nvnodelistsize > 0, vp,
1118 		("bad mount point vnode list size"));
1119 	TAILQ_REMOVE(&mp->mnt_nvnodelist, vp, v_nmntvnodes);
1120 	mp->mnt_nvnodelistsize--;
1121 	MNT_REL(mp);
1122 	MNT_IUNLOCK(mp);
1123 }
1124 
1125 static void
1126 insmntque_stddtr(struct vnode *vp, void *dtr_arg)
1127 {
1128 
1129 	vp->v_data = NULL;
1130 	vp->v_op = &dead_vnodeops;
1131 	vgone(vp);
1132 	vput(vp);
1133 }
1134 
1135 /*
1136  * Insert into list of vnodes for the new mount point, if available.
1137  */
1138 int
1139 insmntque1(struct vnode *vp, struct mount *mp,
1140 	void (*dtr)(struct vnode *, void *), void *dtr_arg)
1141 {
1142 
1143 	KASSERT(vp->v_mount == NULL,
1144 		("insmntque: vnode already on per mount vnode list"));
1145 	VNASSERT(mp != NULL, vp, ("Don't call insmntque(foo, NULL)"));
1146 	ASSERT_VOP_ELOCKED(vp, "insmntque: non-locked vp");
1147 
1148 	/*
1149 	 * We acquire the vnode interlock early to ensure that the
1150 	 * vnode cannot be recycled by another process releasing a
1151 	 * holdcnt on it before we get it on both the vnode list
1152 	 * and the active vnode list. The mount mutex protects only
1153 	 * manipulation of the vnode list and the vnode freelist
1154 	 * mutex protects only manipulation of the active vnode list.
1155 	 * Hence the need to hold the vnode interlock throughout.
1156 	 */
1157 	MNT_ILOCK(mp);
1158 	VI_LOCK(vp);
1159 	if (((mp->mnt_kern_flag & MNTK_NOINSMNTQ) != 0 &&
1160 	    ((mp->mnt_kern_flag & MNTK_UNMOUNTF) != 0 ||
1161 	    mp->mnt_nvnodelistsize == 0)) &&
1162 	    (vp->v_vflag & VV_FORCEINSMQ) == 0) {
1163 		VI_UNLOCK(vp);
1164 		MNT_IUNLOCK(mp);
1165 		if (dtr != NULL)
1166 			dtr(vp, dtr_arg);
1167 		return (EBUSY);
1168 	}
1169 	vp->v_mount = mp;
1170 	MNT_REF(mp);
1171 	TAILQ_INSERT_TAIL(&mp->mnt_nvnodelist, vp, v_nmntvnodes);
1172 	VNASSERT(mp->mnt_nvnodelistsize >= 0, vp,
1173 		("neg mount point vnode list size"));
1174 	mp->mnt_nvnodelistsize++;
1175 	KASSERT((vp->v_iflag & VI_ACTIVE) == 0,
1176 	    ("Activating already active vnode"));
1177 	vp->v_iflag |= VI_ACTIVE;
1178 	mtx_lock(&vnode_free_list_mtx);
1179 	TAILQ_INSERT_HEAD(&mp->mnt_activevnodelist, vp, v_actfreelist);
1180 	mp->mnt_activevnodelistsize++;
1181 	mtx_unlock(&vnode_free_list_mtx);
1182 	VI_UNLOCK(vp);
1183 	MNT_IUNLOCK(mp);
1184 	return (0);
1185 }
1186 
1187 int
1188 insmntque(struct vnode *vp, struct mount *mp)
1189 {
1190 
1191 	return (insmntque1(vp, mp, insmntque_stddtr, NULL));
1192 }
1193 
1194 /*
1195  * Flush out and invalidate all buffers associated with a bufobj
1196  * Called with the underlying object locked.
1197  */
1198 int
1199 bufobj_invalbuf(struct bufobj *bo, int flags, int slpflag, int slptimeo)
1200 {
1201 	int error;
1202 
1203 	BO_LOCK(bo);
1204 	if (flags & V_SAVE) {
1205 		error = bufobj_wwait(bo, slpflag, slptimeo);
1206 		if (error) {
1207 			BO_UNLOCK(bo);
1208 			return (error);
1209 		}
1210 		if (bo->bo_dirty.bv_cnt > 0) {
1211 			BO_UNLOCK(bo);
1212 			if ((error = BO_SYNC(bo, MNT_WAIT)) != 0)
1213 				return (error);
1214 			/*
1215 			 * XXX We could save a lock/unlock if this was only
1216 			 * enabled under INVARIANTS
1217 			 */
1218 			BO_LOCK(bo);
1219 			if (bo->bo_numoutput > 0 || bo->bo_dirty.bv_cnt > 0)
1220 				panic("vinvalbuf: dirty bufs");
1221 		}
1222 	}
1223 	/*
1224 	 * If you alter this loop please notice that interlock is dropped and
1225 	 * reacquired in flushbuflist.  Special care is needed to ensure that
1226 	 * no race conditions occur from this.
1227 	 */
1228 	do {
1229 		error = flushbuflist(&bo->bo_clean,
1230 		    flags, bo, slpflag, slptimeo);
1231 		if (error == 0 && !(flags & V_CLEANONLY))
1232 			error = flushbuflist(&bo->bo_dirty,
1233 			    flags, bo, slpflag, slptimeo);
1234 		if (error != 0 && error != EAGAIN) {
1235 			BO_UNLOCK(bo);
1236 			return (error);
1237 		}
1238 	} while (error != 0);
1239 
1240 	/*
1241 	 * Wait for I/O to complete.  XXX needs cleaning up.  The vnode can
1242 	 * have write I/O in-progress but if there is a VM object then the
1243 	 * VM object can also have read-I/O in-progress.
1244 	 */
1245 	do {
1246 		bufobj_wwait(bo, 0, 0);
1247 		BO_UNLOCK(bo);
1248 		if (bo->bo_object != NULL) {
1249 			VM_OBJECT_WLOCK(bo->bo_object);
1250 			vm_object_pip_wait(bo->bo_object, "bovlbx");
1251 			VM_OBJECT_WUNLOCK(bo->bo_object);
1252 		}
1253 		BO_LOCK(bo);
1254 	} while (bo->bo_numoutput > 0);
1255 	BO_UNLOCK(bo);
1256 
1257 	/*
1258 	 * Destroy the copy in the VM cache, too.
1259 	 */
1260 	if (bo->bo_object != NULL &&
1261 	    (flags & (V_ALT | V_NORMAL | V_CLEANONLY)) == 0) {
1262 		VM_OBJECT_WLOCK(bo->bo_object);
1263 		vm_object_page_remove(bo->bo_object, 0, 0, (flags & V_SAVE) ?
1264 		    OBJPR_CLEANONLY : 0);
1265 		VM_OBJECT_WUNLOCK(bo->bo_object);
1266 	}
1267 
1268 #ifdef INVARIANTS
1269 	BO_LOCK(bo);
1270 	if ((flags & (V_ALT | V_NORMAL | V_CLEANONLY)) == 0 &&
1271 	    (bo->bo_dirty.bv_cnt > 0 || bo->bo_clean.bv_cnt > 0))
1272 		panic("vinvalbuf: flush failed");
1273 	BO_UNLOCK(bo);
1274 #endif
1275 	return (0);
1276 }
1277 
1278 /*
1279  * Flush out and invalidate all buffers associated with a vnode.
1280  * Called with the underlying object locked.
1281  */
1282 int
1283 vinvalbuf(struct vnode *vp, int flags, int slpflag, int slptimeo)
1284 {
1285 
1286 	CTR3(KTR_VFS, "%s: vp %p with flags %d", __func__, vp, flags);
1287 	ASSERT_VOP_LOCKED(vp, "vinvalbuf");
1288 	return (bufobj_invalbuf(&vp->v_bufobj, flags, slpflag, slptimeo));
1289 }
1290 
1291 /*
1292  * Flush out buffers on the specified list.
1293  *
1294  */
1295 static int
1296 flushbuflist(struct bufv *bufv, int flags, struct bufobj *bo, int slpflag,
1297     int slptimeo)
1298 {
1299 	struct buf *bp, *nbp;
1300 	int retval, error;
1301 	daddr_t lblkno;
1302 	b_xflags_t xflags;
1303 
1304 	ASSERT_BO_LOCKED(bo);
1305 
1306 	retval = 0;
1307 	TAILQ_FOREACH_SAFE(bp, &bufv->bv_hd, b_bobufs, nbp) {
1308 		if (((flags & V_NORMAL) && (bp->b_xflags & BX_ALTDATA)) ||
1309 		    ((flags & V_ALT) && (bp->b_xflags & BX_ALTDATA) == 0)) {
1310 			continue;
1311 		}
1312 		lblkno = 0;
1313 		xflags = 0;
1314 		if (nbp != NULL) {
1315 			lblkno = nbp->b_lblkno;
1316 			xflags = nbp->b_xflags & (BX_VNDIRTY | BX_VNCLEAN);
1317 		}
1318 		retval = EAGAIN;
1319 		error = BUF_TIMELOCK(bp,
1320 		    LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK, BO_MTX(bo),
1321 		    "flushbuf", slpflag, slptimeo);
1322 		if (error) {
1323 			BO_LOCK(bo);
1324 			return (error != ENOLCK ? error : EAGAIN);
1325 		}
1326 		KASSERT(bp->b_bufobj == bo,
1327 		    ("bp %p wrong b_bufobj %p should be %p",
1328 		    bp, bp->b_bufobj, bo));
1329 		if (bp->b_bufobj != bo) {	/* XXX: necessary ? */
1330 			BUF_UNLOCK(bp);
1331 			BO_LOCK(bo);
1332 			return (EAGAIN);
1333 		}
1334 		/*
1335 		 * XXX Since there are no node locks for NFS, I
1336 		 * believe there is a slight chance that a delayed
1337 		 * write will occur while sleeping just above, so
1338 		 * check for it.
1339 		 */
1340 		if (((bp->b_flags & (B_DELWRI | B_INVAL)) == B_DELWRI) &&
1341 		    (flags & V_SAVE)) {
1342 			BO_LOCK(bo);
1343 			bremfree(bp);
1344 			BO_UNLOCK(bo);
1345 			bp->b_flags |= B_ASYNC;
1346 			bwrite(bp);
1347 			BO_LOCK(bo);
1348 			return (EAGAIN);	/* XXX: why not loop ? */
1349 		}
1350 		BO_LOCK(bo);
1351 		bremfree(bp);
1352 		BO_UNLOCK(bo);
1353 		bp->b_flags |= (B_INVAL | B_RELBUF);
1354 		bp->b_flags &= ~B_ASYNC;
1355 		brelse(bp);
1356 		BO_LOCK(bo);
1357 		if (nbp != NULL &&
1358 		    (nbp->b_bufobj != bo ||
1359 		     nbp->b_lblkno != lblkno ||
1360 		     (nbp->b_xflags & (BX_VNDIRTY | BX_VNCLEAN)) != xflags))
1361 			break;			/* nbp invalid */
1362 	}
1363 	return (retval);
1364 }
1365 
1366 /*
1367  * Truncate a file's buffer and pages to a specified length.  This
1368  * is in lieu of the old vinvalbuf mechanism, which performed unneeded
1369  * sync activity.
1370  */
1371 int
1372 vtruncbuf(struct vnode *vp, struct ucred *cred, off_t length, int blksize)
1373 {
1374 	struct buf *bp, *nbp;
1375 	int anyfreed;
1376 	int trunclbn;
1377 	struct bufobj *bo;
1378 
1379 	CTR5(KTR_VFS, "%s: vp %p with cred %p and block %d:%ju", __func__,
1380 	    vp, cred, blksize, (uintmax_t)length);
1381 
1382 	/*
1383 	 * Round up to the *next* lbn.
1384 	 */
1385 	trunclbn = (length + blksize - 1) / blksize;
1386 
1387 	ASSERT_VOP_LOCKED(vp, "vtruncbuf");
1388 restart:
1389 	bo = &vp->v_bufobj;
1390 	BO_LOCK(bo);
1391 	anyfreed = 1;
1392 	for (;anyfreed;) {
1393 		anyfreed = 0;
1394 		TAILQ_FOREACH_SAFE(bp, &bo->bo_clean.bv_hd, b_bobufs, nbp) {
1395 			if (bp->b_lblkno < trunclbn)
1396 				continue;
1397 			if (BUF_LOCK(bp,
1398 			    LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK,
1399 			    BO_MTX(bo)) == ENOLCK)
1400 				goto restart;
1401 
1402 			BO_LOCK(bo);
1403 			bremfree(bp);
1404 			BO_UNLOCK(bo);
1405 			bp->b_flags |= (B_INVAL | B_RELBUF);
1406 			bp->b_flags &= ~B_ASYNC;
1407 			brelse(bp);
1408 			anyfreed = 1;
1409 
1410 			BO_LOCK(bo);
1411 			if (nbp != NULL &&
1412 			    (((nbp->b_xflags & BX_VNCLEAN) == 0) ||
1413 			    (nbp->b_vp != vp) ||
1414 			    (nbp->b_flags & B_DELWRI))) {
1415 				BO_UNLOCK(bo);
1416 				goto restart;
1417 			}
1418 		}
1419 
1420 		TAILQ_FOREACH_SAFE(bp, &bo->bo_dirty.bv_hd, b_bobufs, nbp) {
1421 			if (bp->b_lblkno < trunclbn)
1422 				continue;
1423 			if (BUF_LOCK(bp,
1424 			    LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK,
1425 			    BO_MTX(bo)) == ENOLCK)
1426 				goto restart;
1427 			BO_LOCK(bo);
1428 			bremfree(bp);
1429 			BO_UNLOCK(bo);
1430 			bp->b_flags |= (B_INVAL | B_RELBUF);
1431 			bp->b_flags &= ~B_ASYNC;
1432 			brelse(bp);
1433 			anyfreed = 1;
1434 
1435 			BO_LOCK(bo);
1436 			if (nbp != NULL &&
1437 			    (((nbp->b_xflags & BX_VNDIRTY) == 0) ||
1438 			    (nbp->b_vp != vp) ||
1439 			    (nbp->b_flags & B_DELWRI) == 0)) {
1440 				BO_UNLOCK(bo);
1441 				goto restart;
1442 			}
1443 		}
1444 	}
1445 
1446 	if (length > 0) {
1447 restartsync:
1448 		TAILQ_FOREACH_SAFE(bp, &bo->bo_dirty.bv_hd, b_bobufs, nbp) {
1449 			if (bp->b_lblkno > 0)
1450 				continue;
1451 			/*
1452 			 * Since we hold the vnode lock this should only
1453 			 * fail if we're racing with the buf daemon.
1454 			 */
1455 			if (BUF_LOCK(bp,
1456 			    LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK,
1457 			    BO_MTX(bo)) == ENOLCK) {
1458 				goto restart;
1459 			}
1460 			VNASSERT((bp->b_flags & B_DELWRI), vp,
1461 			    ("buf(%p) on dirty queue without DELWRI", bp));
1462 
1463 			BO_LOCK(bo);
1464 			bremfree(bp);
1465 			BO_UNLOCK(bo);
1466 			bawrite(bp);
1467 			BO_LOCK(bo);
1468 			goto restartsync;
1469 		}
1470 	}
1471 
1472 	bufobj_wwait(bo, 0, 0);
1473 	BO_UNLOCK(bo);
1474 	vnode_pager_setsize(vp, length);
1475 
1476 	return (0);
1477 }
1478 
1479 /*
1480  * buf_splay() - splay tree core for the clean/dirty list of buffers in
1481  *		 a vnode.
1482  *
1483  *	NOTE: We have to deal with the special case of a background bitmap
1484  *	buffer, a situation where two buffers will have the same logical
1485  *	block offset.  We want (1) only the foreground buffer to be accessed
1486  *	in a lookup and (2) must differentiate between the foreground and
1487  *	background buffer in the splay tree algorithm because the splay
1488  *	tree cannot normally handle multiple entities with the same 'index'.
1489  *	We accomplish this by adding differentiating flags to the splay tree's
1490  *	numerical domain.
1491  */
1492 static
1493 struct buf *
1494 buf_splay(daddr_t lblkno, b_xflags_t xflags, struct buf *root)
1495 {
1496 	struct buf dummy;
1497 	struct buf *lefttreemax, *righttreemin, *y;
1498 
1499 	if (root == NULL)
1500 		return (NULL);
1501 	lefttreemax = righttreemin = &dummy;
1502 	for (;;) {
1503 		if (lblkno < root->b_lblkno) {
1504 			if ((y = root->b_left) == NULL)
1505 				break;
1506 			if (lblkno < y->b_lblkno) {
1507 				/* Rotate right. */
1508 				root->b_left = y->b_right;
1509 				y->b_right = root;
1510 				root = y;
1511 				if ((y = root->b_left) == NULL)
1512 					break;
1513 			}
1514 			/* Link into the new root's right tree. */
1515 			righttreemin->b_left = root;
1516 			righttreemin = root;
1517 		} else if (lblkno > root->b_lblkno) {
1518 			if ((y = root->b_right) == NULL)
1519 				break;
1520 			if (lblkno > y->b_lblkno) {
1521 				/* Rotate left. */
1522 				root->b_right = y->b_left;
1523 				y->b_left = root;
1524 				root = y;
1525 				if ((y = root->b_right) == NULL)
1526 					break;
1527 			}
1528 			/* Link into the new root's left tree. */
1529 			lefttreemax->b_right = root;
1530 			lefttreemax = root;
1531 		} else {
1532 			break;
1533 		}
1534 		root = y;
1535 	}
1536 	/* Assemble the new root. */
1537 	lefttreemax->b_right = root->b_left;
1538 	righttreemin->b_left = root->b_right;
1539 	root->b_left = dummy.b_right;
1540 	root->b_right = dummy.b_left;
1541 	return (root);
1542 }
1543 
1544 static void
1545 buf_vlist_remove(struct buf *bp)
1546 {
1547 	struct buf *root;
1548 	struct bufv *bv;
1549 
1550 	KASSERT(bp->b_bufobj != NULL, ("No b_bufobj %p", bp));
1551 	ASSERT_BO_LOCKED(bp->b_bufobj);
1552 	KASSERT((bp->b_xflags & (BX_VNDIRTY|BX_VNCLEAN)) !=
1553 	    (BX_VNDIRTY|BX_VNCLEAN),
1554 	    ("buf_vlist_remove: Buf %p is on two lists", bp));
1555 	if (bp->b_xflags & BX_VNDIRTY)
1556 		bv = &bp->b_bufobj->bo_dirty;
1557 	else
1558 		bv = &bp->b_bufobj->bo_clean;
1559 	if (bp != bv->bv_root) {
1560 		root = buf_splay(bp->b_lblkno, bp->b_xflags, bv->bv_root);
1561 		KASSERT(root == bp, ("splay lookup failed in remove"));
1562 	}
1563 	if (bp->b_left == NULL) {
1564 		root = bp->b_right;
1565 	} else {
1566 		root = buf_splay(bp->b_lblkno, bp->b_xflags, bp->b_left);
1567 		root->b_right = bp->b_right;
1568 	}
1569 	bv->bv_root = root;
1570 	TAILQ_REMOVE(&bv->bv_hd, bp, b_bobufs);
1571 	bv->bv_cnt--;
1572 	bp->b_xflags &= ~(BX_VNDIRTY | BX_VNCLEAN);
1573 }
1574 
1575 /*
1576  * Add the buffer to the sorted clean or dirty block list using a
1577  * splay tree algorithm.
1578  *
1579  * NOTE: xflags is passed as a constant, optimizing this inline function!
1580  */
1581 static void
1582 buf_vlist_add(struct buf *bp, struct bufobj *bo, b_xflags_t xflags)
1583 {
1584 	struct buf *root;
1585 	struct bufv *bv;
1586 
1587 	ASSERT_BO_LOCKED(bo);
1588 	KASSERT((bp->b_xflags & (BX_VNDIRTY|BX_VNCLEAN)) == 0,
1589 	    ("buf_vlist_add: Buf %p has existing xflags %d", bp, bp->b_xflags));
1590 	bp->b_xflags |= xflags;
1591 	if (xflags & BX_VNDIRTY)
1592 		bv = &bo->bo_dirty;
1593 	else
1594 		bv = &bo->bo_clean;
1595 
1596 	root = buf_splay(bp->b_lblkno, bp->b_xflags, bv->bv_root);
1597 	if (root == NULL) {
1598 		bp->b_left = NULL;
1599 		bp->b_right = NULL;
1600 		TAILQ_INSERT_TAIL(&bv->bv_hd, bp, b_bobufs);
1601 	} else if (bp->b_lblkno < root->b_lblkno) {
1602 		bp->b_left = root->b_left;
1603 		bp->b_right = root;
1604 		root->b_left = NULL;
1605 		TAILQ_INSERT_BEFORE(root, bp, b_bobufs);
1606 	} else {
1607 		bp->b_right = root->b_right;
1608 		bp->b_left = root;
1609 		root->b_right = NULL;
1610 		TAILQ_INSERT_AFTER(&bv->bv_hd, root, bp, b_bobufs);
1611 	}
1612 	bv->bv_cnt++;
1613 	bv->bv_root = bp;
1614 }
1615 
1616 /*
1617  * Lookup a buffer using the splay tree.  Note that we specifically avoid
1618  * shadow buffers used in background bitmap writes.
1619  *
1620  * This code isn't quite efficient as it could be because we are maintaining
1621  * two sorted lists and do not know which list the block resides in.
1622  *
1623  * During a "make buildworld" the desired buffer is found at one of
1624  * the roots more than 60% of the time.  Thus, checking both roots
1625  * before performing either splay eliminates unnecessary splays on the
1626  * first tree splayed.
1627  */
1628 struct buf *
1629 gbincore(struct bufobj *bo, daddr_t lblkno)
1630 {
1631 	struct buf *bp;
1632 
1633 	ASSERT_BO_LOCKED(bo);
1634 	if ((bp = bo->bo_clean.bv_root) != NULL && bp->b_lblkno == lblkno)
1635 		return (bp);
1636 	if ((bp = bo->bo_dirty.bv_root) != NULL && bp->b_lblkno == lblkno)
1637 		return (bp);
1638 	if ((bp = bo->bo_clean.bv_root) != NULL) {
1639 		bo->bo_clean.bv_root = bp = buf_splay(lblkno, 0, bp);
1640 		if (bp->b_lblkno == lblkno)
1641 			return (bp);
1642 	}
1643 	if ((bp = bo->bo_dirty.bv_root) != NULL) {
1644 		bo->bo_dirty.bv_root = bp = buf_splay(lblkno, 0, bp);
1645 		if (bp->b_lblkno == lblkno)
1646 			return (bp);
1647 	}
1648 	return (NULL);
1649 }
1650 
1651 /*
1652  * Associate a buffer with a vnode.
1653  */
1654 void
1655 bgetvp(struct vnode *vp, struct buf *bp)
1656 {
1657 	struct bufobj *bo;
1658 
1659 	bo = &vp->v_bufobj;
1660 	ASSERT_BO_LOCKED(bo);
1661 	VNASSERT(bp->b_vp == NULL, bp->b_vp, ("bgetvp: not free"));
1662 
1663 	CTR3(KTR_BUF, "bgetvp(%p) vp %p flags %X", bp, vp, bp->b_flags);
1664 	VNASSERT((bp->b_xflags & (BX_VNDIRTY|BX_VNCLEAN)) == 0, vp,
1665 	    ("bgetvp: bp already attached! %p", bp));
1666 
1667 	vhold(vp);
1668 	bp->b_vp = vp;
1669 	bp->b_bufobj = bo;
1670 	/*
1671 	 * Insert onto list for new vnode.
1672 	 */
1673 	buf_vlist_add(bp, bo, BX_VNCLEAN);
1674 }
1675 
1676 /*
1677  * Disassociate a buffer from a vnode.
1678  */
1679 void
1680 brelvp(struct buf *bp)
1681 {
1682 	struct bufobj *bo;
1683 	struct vnode *vp;
1684 
1685 	CTR3(KTR_BUF, "brelvp(%p) vp %p flags %X", bp, bp->b_vp, bp->b_flags);
1686 	KASSERT(bp->b_vp != NULL, ("brelvp: NULL"));
1687 
1688 	/*
1689 	 * Delete from old vnode list, if on one.
1690 	 */
1691 	vp = bp->b_vp;		/* XXX */
1692 	bo = bp->b_bufobj;
1693 	BO_LOCK(bo);
1694 	if (bp->b_xflags & (BX_VNDIRTY | BX_VNCLEAN))
1695 		buf_vlist_remove(bp);
1696 	else
1697 		panic("brelvp: Buffer %p not on queue.", bp);
1698 	if ((bo->bo_flag & BO_ONWORKLST) && bo->bo_dirty.bv_cnt == 0) {
1699 		bo->bo_flag &= ~BO_ONWORKLST;
1700 		mtx_lock(&sync_mtx);
1701 		LIST_REMOVE(bo, bo_synclist);
1702 		syncer_worklist_len--;
1703 		mtx_unlock(&sync_mtx);
1704 	}
1705 	bp->b_vp = NULL;
1706 	bp->b_bufobj = NULL;
1707 	BO_UNLOCK(bo);
1708 	vdrop(vp);
1709 }
1710 
1711 /*
1712  * Add an item to the syncer work queue.
1713  */
1714 static void
1715 vn_syncer_add_to_worklist(struct bufobj *bo, int delay)
1716 {
1717 	int slot;
1718 
1719 	ASSERT_BO_LOCKED(bo);
1720 
1721 	mtx_lock(&sync_mtx);
1722 	if (bo->bo_flag & BO_ONWORKLST)
1723 		LIST_REMOVE(bo, bo_synclist);
1724 	else {
1725 		bo->bo_flag |= BO_ONWORKLST;
1726 		syncer_worklist_len++;
1727 	}
1728 
1729 	if (delay > syncer_maxdelay - 2)
1730 		delay = syncer_maxdelay - 2;
1731 	slot = (syncer_delayno + delay) & syncer_mask;
1732 
1733 	LIST_INSERT_HEAD(&syncer_workitem_pending[slot], bo, bo_synclist);
1734 	mtx_unlock(&sync_mtx);
1735 }
1736 
1737 static int
1738 sysctl_vfs_worklist_len(SYSCTL_HANDLER_ARGS)
1739 {
1740 	int error, len;
1741 
1742 	mtx_lock(&sync_mtx);
1743 	len = syncer_worklist_len - sync_vnode_count;
1744 	mtx_unlock(&sync_mtx);
1745 	error = SYSCTL_OUT(req, &len, sizeof(len));
1746 	return (error);
1747 }
1748 
1749 SYSCTL_PROC(_vfs, OID_AUTO, worklist_len, CTLTYPE_INT | CTLFLAG_RD, NULL, 0,
1750     sysctl_vfs_worklist_len, "I", "Syncer thread worklist length");
1751 
1752 static struct proc *updateproc;
1753 static void sched_sync(void);
1754 static struct kproc_desc up_kp = {
1755 	"syncer",
1756 	sched_sync,
1757 	&updateproc
1758 };
1759 SYSINIT(syncer, SI_SUB_KTHREAD_UPDATE, SI_ORDER_FIRST, kproc_start, &up_kp);
1760 
1761 static int
1762 sync_vnode(struct synclist *slp, struct bufobj **bo, struct thread *td)
1763 {
1764 	struct vnode *vp;
1765 	struct mount *mp;
1766 
1767 	*bo = LIST_FIRST(slp);
1768 	if (*bo == NULL)
1769 		return (0);
1770 	vp = (*bo)->__bo_vnode;	/* XXX */
1771 	if (VOP_ISLOCKED(vp) != 0 || VI_TRYLOCK(vp) == 0)
1772 		return (1);
1773 	/*
1774 	 * We use vhold in case the vnode does not
1775 	 * successfully sync.  vhold prevents the vnode from
1776 	 * going away when we unlock the sync_mtx so that
1777 	 * we can acquire the vnode interlock.
1778 	 */
1779 	vholdl(vp);
1780 	mtx_unlock(&sync_mtx);
1781 	VI_UNLOCK(vp);
1782 	if (vn_start_write(vp, &mp, V_NOWAIT) != 0) {
1783 		vdrop(vp);
1784 		mtx_lock(&sync_mtx);
1785 		return (*bo == LIST_FIRST(slp));
1786 	}
1787 	vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
1788 	(void) VOP_FSYNC(vp, MNT_LAZY, td);
1789 	VOP_UNLOCK(vp, 0);
1790 	vn_finished_write(mp);
1791 	BO_LOCK(*bo);
1792 	if (((*bo)->bo_flag & BO_ONWORKLST) != 0) {
1793 		/*
1794 		 * Put us back on the worklist.  The worklist
1795 		 * routine will remove us from our current
1796 		 * position and then add us back in at a later
1797 		 * position.
1798 		 */
1799 		vn_syncer_add_to_worklist(*bo, syncdelay);
1800 	}
1801 	BO_UNLOCK(*bo);
1802 	vdrop(vp);
1803 	mtx_lock(&sync_mtx);
1804 	return (0);
1805 }
1806 
1807 /*
1808  * System filesystem synchronizer daemon.
1809  */
1810 static void
1811 sched_sync(void)
1812 {
1813 	struct synclist *next, *slp;
1814 	struct bufobj *bo;
1815 	long starttime;
1816 	struct thread *td = curthread;
1817 	int last_work_seen;
1818 	int net_worklist_len;
1819 	int syncer_final_iter;
1820 	int first_printf;
1821 	int error;
1822 
1823 	last_work_seen = 0;
1824 	syncer_final_iter = 0;
1825 	first_printf = 1;
1826 	syncer_state = SYNCER_RUNNING;
1827 	starttime = time_uptime;
1828 	td->td_pflags |= TDP_NORUNNINGBUF;
1829 
1830 	EVENTHANDLER_REGISTER(shutdown_pre_sync, syncer_shutdown, td->td_proc,
1831 	    SHUTDOWN_PRI_LAST);
1832 
1833 	mtx_lock(&sync_mtx);
1834 	for (;;) {
1835 		if (syncer_state == SYNCER_FINAL_DELAY &&
1836 		    syncer_final_iter == 0) {
1837 			mtx_unlock(&sync_mtx);
1838 			kproc_suspend_check(td->td_proc);
1839 			mtx_lock(&sync_mtx);
1840 		}
1841 		net_worklist_len = syncer_worklist_len - sync_vnode_count;
1842 		if (syncer_state != SYNCER_RUNNING &&
1843 		    starttime != time_uptime) {
1844 			if (first_printf) {
1845 				printf("\nSyncing disks, vnodes remaining...");
1846 				first_printf = 0;
1847 			}
1848 			printf("%d ", net_worklist_len);
1849 		}
1850 		starttime = time_uptime;
1851 
1852 		/*
1853 		 * Push files whose dirty time has expired.  Be careful
1854 		 * of interrupt race on slp queue.
1855 		 *
1856 		 * Skip over empty worklist slots when shutting down.
1857 		 */
1858 		do {
1859 			slp = &syncer_workitem_pending[syncer_delayno];
1860 			syncer_delayno += 1;
1861 			if (syncer_delayno == syncer_maxdelay)
1862 				syncer_delayno = 0;
1863 			next = &syncer_workitem_pending[syncer_delayno];
1864 			/*
1865 			 * If the worklist has wrapped since the
1866 			 * it was emptied of all but syncer vnodes,
1867 			 * switch to the FINAL_DELAY state and run
1868 			 * for one more second.
1869 			 */
1870 			if (syncer_state == SYNCER_SHUTTING_DOWN &&
1871 			    net_worklist_len == 0 &&
1872 			    last_work_seen == syncer_delayno) {
1873 				syncer_state = SYNCER_FINAL_DELAY;
1874 				syncer_final_iter = SYNCER_SHUTDOWN_SPEEDUP;
1875 			}
1876 		} while (syncer_state != SYNCER_RUNNING && LIST_EMPTY(slp) &&
1877 		    syncer_worklist_len > 0);
1878 
1879 		/*
1880 		 * Keep track of the last time there was anything
1881 		 * on the worklist other than syncer vnodes.
1882 		 * Return to the SHUTTING_DOWN state if any
1883 		 * new work appears.
1884 		 */
1885 		if (net_worklist_len > 0 || syncer_state == SYNCER_RUNNING)
1886 			last_work_seen = syncer_delayno;
1887 		if (net_worklist_len > 0 && syncer_state == SYNCER_FINAL_DELAY)
1888 			syncer_state = SYNCER_SHUTTING_DOWN;
1889 		while (!LIST_EMPTY(slp)) {
1890 			error = sync_vnode(slp, &bo, td);
1891 			if (error == 1) {
1892 				LIST_REMOVE(bo, bo_synclist);
1893 				LIST_INSERT_HEAD(next, bo, bo_synclist);
1894 				continue;
1895 			}
1896 
1897 			if (first_printf == 0)
1898 				wdog_kern_pat(WD_LASTVAL);
1899 
1900 		}
1901 		if (syncer_state == SYNCER_FINAL_DELAY && syncer_final_iter > 0)
1902 			syncer_final_iter--;
1903 		/*
1904 		 * The variable rushjob allows the kernel to speed up the
1905 		 * processing of the filesystem syncer process. A rushjob
1906 		 * value of N tells the filesystem syncer to process the next
1907 		 * N seconds worth of work on its queue ASAP. Currently rushjob
1908 		 * is used by the soft update code to speed up the filesystem
1909 		 * syncer process when the incore state is getting so far
1910 		 * ahead of the disk that the kernel memory pool is being
1911 		 * threatened with exhaustion.
1912 		 */
1913 		if (rushjob > 0) {
1914 			rushjob -= 1;
1915 			continue;
1916 		}
1917 		/*
1918 		 * Just sleep for a short period of time between
1919 		 * iterations when shutting down to allow some I/O
1920 		 * to happen.
1921 		 *
1922 		 * If it has taken us less than a second to process the
1923 		 * current work, then wait. Otherwise start right over
1924 		 * again. We can still lose time if any single round
1925 		 * takes more than two seconds, but it does not really
1926 		 * matter as we are just trying to generally pace the
1927 		 * filesystem activity.
1928 		 */
1929 		if (syncer_state != SYNCER_RUNNING ||
1930 		    time_uptime == starttime) {
1931 			thread_lock(td);
1932 			sched_prio(td, PPAUSE);
1933 			thread_unlock(td);
1934 		}
1935 		if (syncer_state != SYNCER_RUNNING)
1936 			cv_timedwait(&sync_wakeup, &sync_mtx,
1937 			    hz / SYNCER_SHUTDOWN_SPEEDUP);
1938 		else if (time_uptime == starttime)
1939 			cv_timedwait(&sync_wakeup, &sync_mtx, hz);
1940 	}
1941 }
1942 
1943 /*
1944  * Request the syncer daemon to speed up its work.
1945  * We never push it to speed up more than half of its
1946  * normal turn time, otherwise it could take over the cpu.
1947  */
1948 int
1949 speedup_syncer(void)
1950 {
1951 	int ret = 0;
1952 
1953 	mtx_lock(&sync_mtx);
1954 	if (rushjob < syncdelay / 2) {
1955 		rushjob += 1;
1956 		stat_rush_requests += 1;
1957 		ret = 1;
1958 	}
1959 	mtx_unlock(&sync_mtx);
1960 	cv_broadcast(&sync_wakeup);
1961 	return (ret);
1962 }
1963 
1964 /*
1965  * Tell the syncer to speed up its work and run though its work
1966  * list several times, then tell it to shut down.
1967  */
1968 static void
1969 syncer_shutdown(void *arg, int howto)
1970 {
1971 
1972 	if (howto & RB_NOSYNC)
1973 		return;
1974 	mtx_lock(&sync_mtx);
1975 	syncer_state = SYNCER_SHUTTING_DOWN;
1976 	rushjob = 0;
1977 	mtx_unlock(&sync_mtx);
1978 	cv_broadcast(&sync_wakeup);
1979 	kproc_shutdown(arg, howto);
1980 }
1981 
1982 /*
1983  * Reassign a buffer from one vnode to another.
1984  * Used to assign file specific control information
1985  * (indirect blocks) to the vnode to which they belong.
1986  */
1987 void
1988 reassignbuf(struct buf *bp)
1989 {
1990 	struct vnode *vp;
1991 	struct bufobj *bo;
1992 	int delay;
1993 #ifdef INVARIANTS
1994 	struct bufv *bv;
1995 #endif
1996 
1997 	vp = bp->b_vp;
1998 	bo = bp->b_bufobj;
1999 	++reassignbufcalls;
2000 
2001 	CTR3(KTR_BUF, "reassignbuf(%p) vp %p flags %X",
2002 	    bp, bp->b_vp, bp->b_flags);
2003 	/*
2004 	 * B_PAGING flagged buffers cannot be reassigned because their vp
2005 	 * is not fully linked in.
2006 	 */
2007 	if (bp->b_flags & B_PAGING)
2008 		panic("cannot reassign paging buffer");
2009 
2010 	/*
2011 	 * Delete from old vnode list, if on one.
2012 	 */
2013 	BO_LOCK(bo);
2014 	if (bp->b_xflags & (BX_VNDIRTY | BX_VNCLEAN))
2015 		buf_vlist_remove(bp);
2016 	else
2017 		panic("reassignbuf: Buffer %p not on queue.", bp);
2018 	/*
2019 	 * If dirty, put on list of dirty buffers; otherwise insert onto list
2020 	 * of clean buffers.
2021 	 */
2022 	if (bp->b_flags & B_DELWRI) {
2023 		if ((bo->bo_flag & BO_ONWORKLST) == 0) {
2024 			switch (vp->v_type) {
2025 			case VDIR:
2026 				delay = dirdelay;
2027 				break;
2028 			case VCHR:
2029 				delay = metadelay;
2030 				break;
2031 			default:
2032 				delay = filedelay;
2033 			}
2034 			vn_syncer_add_to_worklist(bo, delay);
2035 		}
2036 		buf_vlist_add(bp, bo, BX_VNDIRTY);
2037 	} else {
2038 		buf_vlist_add(bp, bo, BX_VNCLEAN);
2039 
2040 		if ((bo->bo_flag & BO_ONWORKLST) && bo->bo_dirty.bv_cnt == 0) {
2041 			mtx_lock(&sync_mtx);
2042 			LIST_REMOVE(bo, bo_synclist);
2043 			syncer_worklist_len--;
2044 			mtx_unlock(&sync_mtx);
2045 			bo->bo_flag &= ~BO_ONWORKLST;
2046 		}
2047 	}
2048 #ifdef INVARIANTS
2049 	bv = &bo->bo_clean;
2050 	bp = TAILQ_FIRST(&bv->bv_hd);
2051 	KASSERT(bp == NULL || bp->b_bufobj == bo,
2052 	    ("bp %p wrong b_bufobj %p should be %p", bp, bp->b_bufobj, bo));
2053 	bp = TAILQ_LAST(&bv->bv_hd, buflists);
2054 	KASSERT(bp == NULL || bp->b_bufobj == bo,
2055 	    ("bp %p wrong b_bufobj %p should be %p", bp, bp->b_bufobj, bo));
2056 	bv = &bo->bo_dirty;
2057 	bp = TAILQ_FIRST(&bv->bv_hd);
2058 	KASSERT(bp == NULL || bp->b_bufobj == bo,
2059 	    ("bp %p wrong b_bufobj %p should be %p", bp, bp->b_bufobj, bo));
2060 	bp = TAILQ_LAST(&bv->bv_hd, buflists);
2061 	KASSERT(bp == NULL || bp->b_bufobj == bo,
2062 	    ("bp %p wrong b_bufobj %p should be %p", bp, bp->b_bufobj, bo));
2063 #endif
2064 	BO_UNLOCK(bo);
2065 }
2066 
2067 /*
2068  * Increment the use and hold counts on the vnode, taking care to reference
2069  * the driver's usecount if this is a chardev.  The vholdl() will remove
2070  * the vnode from the free list if it is presently free.  Requires the
2071  * vnode interlock and returns with it held.
2072  */
2073 static void
2074 v_incr_usecount(struct vnode *vp)
2075 {
2076 
2077 	CTR2(KTR_VFS, "%s: vp %p", __func__, vp);
2078 	vp->v_usecount++;
2079 	if (vp->v_type == VCHR && vp->v_rdev != NULL) {
2080 		dev_lock();
2081 		vp->v_rdev->si_usecount++;
2082 		dev_unlock();
2083 	}
2084 	vholdl(vp);
2085 }
2086 
2087 /*
2088  * Turn a holdcnt into a use+holdcnt such that only one call to
2089  * v_decr_usecount is needed.
2090  */
2091 static void
2092 v_upgrade_usecount(struct vnode *vp)
2093 {
2094 
2095 	CTR2(KTR_VFS, "%s: vp %p", __func__, vp);
2096 	vp->v_usecount++;
2097 	if (vp->v_type == VCHR && vp->v_rdev != NULL) {
2098 		dev_lock();
2099 		vp->v_rdev->si_usecount++;
2100 		dev_unlock();
2101 	}
2102 }
2103 
2104 /*
2105  * Decrement the vnode use and hold count along with the driver's usecount
2106  * if this is a chardev.  The vdropl() below releases the vnode interlock
2107  * as it may free the vnode.
2108  */
2109 static void
2110 v_decr_usecount(struct vnode *vp)
2111 {
2112 
2113 	ASSERT_VI_LOCKED(vp, __FUNCTION__);
2114 	VNASSERT(vp->v_usecount > 0, vp,
2115 	    ("v_decr_usecount: negative usecount"));
2116 	CTR2(KTR_VFS, "%s: vp %p", __func__, vp);
2117 	vp->v_usecount--;
2118 	if (vp->v_type == VCHR && vp->v_rdev != NULL) {
2119 		dev_lock();
2120 		vp->v_rdev->si_usecount--;
2121 		dev_unlock();
2122 	}
2123 	vdropl(vp);
2124 }
2125 
2126 /*
2127  * Decrement only the use count and driver use count.  This is intended to
2128  * be paired with a follow on vdropl() to release the remaining hold count.
2129  * In this way we may vgone() a vnode with a 0 usecount without risk of
2130  * having it end up on a free list because the hold count is kept above 0.
2131  */
2132 static void
2133 v_decr_useonly(struct vnode *vp)
2134 {
2135 
2136 	ASSERT_VI_LOCKED(vp, __FUNCTION__);
2137 	VNASSERT(vp->v_usecount > 0, vp,
2138 	    ("v_decr_useonly: negative usecount"));
2139 	CTR2(KTR_VFS, "%s: vp %p", __func__, vp);
2140 	vp->v_usecount--;
2141 	if (vp->v_type == VCHR && vp->v_rdev != NULL) {
2142 		dev_lock();
2143 		vp->v_rdev->si_usecount--;
2144 		dev_unlock();
2145 	}
2146 }
2147 
2148 /*
2149  * Grab a particular vnode from the free list, increment its
2150  * reference count and lock it.  VI_DOOMED is set if the vnode
2151  * is being destroyed.  Only callers who specify LK_RETRY will
2152  * see doomed vnodes.  If inactive processing was delayed in
2153  * vput try to do it here.
2154  */
2155 int
2156 vget(struct vnode *vp, int flags, struct thread *td)
2157 {
2158 	int error;
2159 
2160 	error = 0;
2161 	VNASSERT((flags & LK_TYPE_MASK) != 0, vp,
2162 	    ("vget: invalid lock operation"));
2163 	CTR3(KTR_VFS, "%s: vp %p with flags %d", __func__, vp, flags);
2164 
2165 	if ((flags & LK_INTERLOCK) == 0)
2166 		VI_LOCK(vp);
2167 	vholdl(vp);
2168 	if ((error = vn_lock(vp, flags | LK_INTERLOCK)) != 0) {
2169 		vdrop(vp);
2170 		CTR2(KTR_VFS, "%s: impossible to lock vnode %p", __func__,
2171 		    vp);
2172 		return (error);
2173 	}
2174 	if (vp->v_iflag & VI_DOOMED && (flags & LK_RETRY) == 0)
2175 		panic("vget: vn_lock failed to return ENOENT\n");
2176 	VI_LOCK(vp);
2177 	/* Upgrade our holdcnt to a usecount. */
2178 	v_upgrade_usecount(vp);
2179 	/*
2180 	 * We don't guarantee that any particular close will
2181 	 * trigger inactive processing so just make a best effort
2182 	 * here at preventing a reference to a removed file.  If
2183 	 * we don't succeed no harm is done.
2184 	 */
2185 	if (vp->v_iflag & VI_OWEINACT) {
2186 		if (VOP_ISLOCKED(vp) == LK_EXCLUSIVE &&
2187 		    (flags & LK_NOWAIT) == 0)
2188 			vinactive(vp, td);
2189 		vp->v_iflag &= ~VI_OWEINACT;
2190 	}
2191 	VI_UNLOCK(vp);
2192 	return (0);
2193 }
2194 
2195 /*
2196  * Increase the reference count of a vnode.
2197  */
2198 void
2199 vref(struct vnode *vp)
2200 {
2201 
2202 	CTR2(KTR_VFS, "%s: vp %p", __func__, vp);
2203 	VI_LOCK(vp);
2204 	v_incr_usecount(vp);
2205 	VI_UNLOCK(vp);
2206 }
2207 
2208 /*
2209  * Return reference count of a vnode.
2210  *
2211  * The results of this call are only guaranteed when some mechanism other
2212  * than the VI lock is used to stop other processes from gaining references
2213  * to the vnode.  This may be the case if the caller holds the only reference.
2214  * This is also useful when stale data is acceptable as race conditions may
2215  * be accounted for by some other means.
2216  */
2217 int
2218 vrefcnt(struct vnode *vp)
2219 {
2220 	int usecnt;
2221 
2222 	VI_LOCK(vp);
2223 	usecnt = vp->v_usecount;
2224 	VI_UNLOCK(vp);
2225 
2226 	return (usecnt);
2227 }
2228 
2229 #define	VPUTX_VRELE	1
2230 #define	VPUTX_VPUT	2
2231 #define	VPUTX_VUNREF	3
2232 
2233 static void
2234 vputx(struct vnode *vp, int func)
2235 {
2236 	int error;
2237 
2238 	KASSERT(vp != NULL, ("vputx: null vp"));
2239 	if (func == VPUTX_VUNREF)
2240 		ASSERT_VOP_LOCKED(vp, "vunref");
2241 	else if (func == VPUTX_VPUT)
2242 		ASSERT_VOP_LOCKED(vp, "vput");
2243 	else
2244 		KASSERT(func == VPUTX_VRELE, ("vputx: wrong func"));
2245 	CTR2(KTR_VFS, "%s: vp %p", __func__, vp);
2246 	VI_LOCK(vp);
2247 
2248 	/* Skip this v_writecount check if we're going to panic below. */
2249 	VNASSERT(vp->v_writecount < vp->v_usecount || vp->v_usecount < 1, vp,
2250 	    ("vputx: missed vn_close"));
2251 	error = 0;
2252 
2253 	if (vp->v_usecount > 1 || ((vp->v_iflag & VI_DOINGINACT) &&
2254 	    vp->v_usecount == 1)) {
2255 		if (func == VPUTX_VPUT)
2256 			VOP_UNLOCK(vp, 0);
2257 		v_decr_usecount(vp);
2258 		return;
2259 	}
2260 
2261 	if (vp->v_usecount != 1) {
2262 		vprint("vputx: negative ref count", vp);
2263 		panic("vputx: negative ref cnt");
2264 	}
2265 	CTR2(KTR_VFS, "%s: return vnode %p to the freelist", __func__, vp);
2266 	/*
2267 	 * We want to hold the vnode until the inactive finishes to
2268 	 * prevent vgone() races.  We drop the use count here and the
2269 	 * hold count below when we're done.
2270 	 */
2271 	v_decr_useonly(vp);
2272 	/*
2273 	 * We must call VOP_INACTIVE with the node locked. Mark
2274 	 * as VI_DOINGINACT to avoid recursion.
2275 	 */
2276 	vp->v_iflag |= VI_OWEINACT;
2277 	switch (func) {
2278 	case VPUTX_VRELE:
2279 		error = vn_lock(vp, LK_EXCLUSIVE | LK_INTERLOCK);
2280 		VI_LOCK(vp);
2281 		break;
2282 	case VPUTX_VPUT:
2283 		if (VOP_ISLOCKED(vp) != LK_EXCLUSIVE) {
2284 			error = VOP_LOCK(vp, LK_UPGRADE | LK_INTERLOCK |
2285 			    LK_NOWAIT);
2286 			VI_LOCK(vp);
2287 		}
2288 		break;
2289 	case VPUTX_VUNREF:
2290 		if (VOP_ISLOCKED(vp) != LK_EXCLUSIVE)
2291 			error = EBUSY;
2292 		break;
2293 	}
2294 	if (vp->v_usecount > 0)
2295 		vp->v_iflag &= ~VI_OWEINACT;
2296 	if (error == 0) {
2297 		if (vp->v_iflag & VI_OWEINACT)
2298 			vinactive(vp, curthread);
2299 		if (func != VPUTX_VUNREF)
2300 			VOP_UNLOCK(vp, 0);
2301 	}
2302 	vdropl(vp);
2303 }
2304 
2305 /*
2306  * Vnode put/release.
2307  * If count drops to zero, call inactive routine and return to freelist.
2308  */
2309 void
2310 vrele(struct vnode *vp)
2311 {
2312 
2313 	vputx(vp, VPUTX_VRELE);
2314 }
2315 
2316 /*
2317  * Release an already locked vnode.  This give the same effects as
2318  * unlock+vrele(), but takes less time and avoids releasing and
2319  * re-aquiring the lock (as vrele() acquires the lock internally.)
2320  */
2321 void
2322 vput(struct vnode *vp)
2323 {
2324 
2325 	vputx(vp, VPUTX_VPUT);
2326 }
2327 
2328 /*
2329  * Release an exclusively locked vnode. Do not unlock the vnode lock.
2330  */
2331 void
2332 vunref(struct vnode *vp)
2333 {
2334 
2335 	vputx(vp, VPUTX_VUNREF);
2336 }
2337 
2338 /*
2339  * Somebody doesn't want the vnode recycled.
2340  */
2341 void
2342 vhold(struct vnode *vp)
2343 {
2344 
2345 	VI_LOCK(vp);
2346 	vholdl(vp);
2347 	VI_UNLOCK(vp);
2348 }
2349 
2350 /*
2351  * Increase the hold count and activate if this is the first reference.
2352  */
2353 void
2354 vholdl(struct vnode *vp)
2355 {
2356 	struct mount *mp;
2357 
2358 	CTR2(KTR_VFS, "%s: vp %p", __func__, vp);
2359 	vp->v_holdcnt++;
2360 	if (!VSHOULDBUSY(vp))
2361 		return;
2362 	ASSERT_VI_LOCKED(vp, "vholdl");
2363 	VNASSERT((vp->v_iflag & VI_FREE) != 0, vp, ("vnode not free"));
2364 	VNASSERT(vp->v_op != NULL, vp, ("vholdl: vnode already reclaimed."));
2365 	/*
2366 	 * Remove a vnode from the free list, mark it as in use,
2367 	 * and put it on the active list.
2368 	 */
2369 	mtx_lock(&vnode_free_list_mtx);
2370 	TAILQ_REMOVE(&vnode_free_list, vp, v_actfreelist);
2371 	freevnodes--;
2372 	vp->v_iflag &= ~(VI_FREE|VI_AGE);
2373 	KASSERT((vp->v_iflag & VI_ACTIVE) == 0,
2374 	    ("Activating already active vnode"));
2375 	vp->v_iflag |= VI_ACTIVE;
2376 	mp = vp->v_mount;
2377 	TAILQ_INSERT_HEAD(&mp->mnt_activevnodelist, vp, v_actfreelist);
2378 	mp->mnt_activevnodelistsize++;
2379 	mtx_unlock(&vnode_free_list_mtx);
2380 }
2381 
2382 /*
2383  * Note that there is one less who cares about this vnode.
2384  * vdrop() is the opposite of vhold().
2385  */
2386 void
2387 vdrop(struct vnode *vp)
2388 {
2389 
2390 	VI_LOCK(vp);
2391 	vdropl(vp);
2392 }
2393 
2394 /*
2395  * Drop the hold count of the vnode.  If this is the last reference to
2396  * the vnode we place it on the free list unless it has been vgone'd
2397  * (marked VI_DOOMED) in which case we will free it.
2398  */
2399 void
2400 vdropl(struct vnode *vp)
2401 {
2402 	struct bufobj *bo;
2403 	struct mount *mp;
2404 	int active;
2405 
2406 	ASSERT_VI_LOCKED(vp, "vdropl");
2407 	CTR2(KTR_VFS, "%s: vp %p", __func__, vp);
2408 	if (vp->v_holdcnt <= 0)
2409 		panic("vdrop: holdcnt %d", vp->v_holdcnt);
2410 	vp->v_holdcnt--;
2411 	if (vp->v_holdcnt > 0) {
2412 		VI_UNLOCK(vp);
2413 		return;
2414 	}
2415 	if ((vp->v_iflag & VI_DOOMED) == 0) {
2416 		/*
2417 		 * Mark a vnode as free: remove it from its active list
2418 		 * and put it up for recycling on the freelist.
2419 		 */
2420 		VNASSERT(vp->v_op != NULL, vp,
2421 		    ("vdropl: vnode already reclaimed."));
2422 		VNASSERT((vp->v_iflag & VI_FREE) == 0, vp,
2423 		    ("vnode already free"));
2424 		VNASSERT(VSHOULDFREE(vp), vp,
2425 		    ("vdropl: freeing when we shouldn't"));
2426 		active = vp->v_iflag & VI_ACTIVE;
2427 		vp->v_iflag &= ~VI_ACTIVE;
2428 		mp = vp->v_mount;
2429 		mtx_lock(&vnode_free_list_mtx);
2430 		if (active) {
2431 			TAILQ_REMOVE(&mp->mnt_activevnodelist, vp,
2432 			    v_actfreelist);
2433 			mp->mnt_activevnodelistsize--;
2434 		}
2435 		if (vp->v_iflag & VI_AGE) {
2436 			TAILQ_INSERT_HEAD(&vnode_free_list, vp, v_actfreelist);
2437 		} else {
2438 			TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_actfreelist);
2439 		}
2440 		freevnodes++;
2441 		vp->v_iflag &= ~VI_AGE;
2442 		vp->v_iflag |= VI_FREE;
2443 		mtx_unlock(&vnode_free_list_mtx);
2444 		VI_UNLOCK(vp);
2445 		return;
2446 	}
2447 	/*
2448 	 * The vnode has been marked for destruction, so free it.
2449 	 */
2450 	CTR2(KTR_VFS, "%s: destroying the vnode %p", __func__, vp);
2451 	mtx_lock(&vnode_free_list_mtx);
2452 	numvnodes--;
2453 	mtx_unlock(&vnode_free_list_mtx);
2454 	bo = &vp->v_bufobj;
2455 	VNASSERT((vp->v_iflag & VI_FREE) == 0, vp,
2456 	    ("cleaned vnode still on the free list."));
2457 	VNASSERT(vp->v_data == NULL, vp, ("cleaned vnode isn't"));
2458 	VNASSERT(vp->v_holdcnt == 0, vp, ("Non-zero hold count"));
2459 	VNASSERT(vp->v_usecount == 0, vp, ("Non-zero use count"));
2460 	VNASSERT(vp->v_writecount == 0, vp, ("Non-zero write count"));
2461 	VNASSERT(bo->bo_numoutput == 0, vp, ("Clean vnode has pending I/O's"));
2462 	VNASSERT(bo->bo_clean.bv_cnt == 0, vp, ("cleanbufcnt not 0"));
2463 	VNASSERT(bo->bo_clean.bv_root == NULL, vp, ("cleanblkroot not NULL"));
2464 	VNASSERT(bo->bo_dirty.bv_cnt == 0, vp, ("dirtybufcnt not 0"));
2465 	VNASSERT(bo->bo_dirty.bv_root == NULL, vp, ("dirtyblkroot not NULL"));
2466 	VNASSERT(TAILQ_EMPTY(&vp->v_cache_dst), vp, ("vp has namecache dst"));
2467 	VNASSERT(LIST_EMPTY(&vp->v_cache_src), vp, ("vp has namecache src"));
2468 	VNASSERT(vp->v_cache_dd == NULL, vp, ("vp has namecache for .."));
2469 	VI_UNLOCK(vp);
2470 #ifdef MAC
2471 	mac_vnode_destroy(vp);
2472 #endif
2473 	if (vp->v_pollinfo != NULL)
2474 		destroy_vpollinfo(vp->v_pollinfo);
2475 #ifdef INVARIANTS
2476 	/* XXX Elsewhere we detect an already freed vnode via NULL v_op. */
2477 	vp->v_op = NULL;
2478 #endif
2479 	rangelock_destroy(&vp->v_rl);
2480 	lockdestroy(vp->v_vnlock);
2481 	mtx_destroy(&vp->v_interlock);
2482 	mtx_destroy(BO_MTX(bo));
2483 	uma_zfree(vnode_zone, vp);
2484 }
2485 
2486 /*
2487  * Call VOP_INACTIVE on the vnode and manage the DOINGINACT and OWEINACT
2488  * flags.  DOINGINACT prevents us from recursing in calls to vinactive.
2489  * OWEINACT tracks whether a vnode missed a call to inactive due to a
2490  * failed lock upgrade.
2491  */
2492 void
2493 vinactive(struct vnode *vp, struct thread *td)
2494 {
2495 	struct vm_object *obj;
2496 
2497 	ASSERT_VOP_ELOCKED(vp, "vinactive");
2498 	ASSERT_VI_LOCKED(vp, "vinactive");
2499 	VNASSERT((vp->v_iflag & VI_DOINGINACT) == 0, vp,
2500 	    ("vinactive: recursed on VI_DOINGINACT"));
2501 	CTR2(KTR_VFS, "%s: vp %p", __func__, vp);
2502 	vp->v_iflag |= VI_DOINGINACT;
2503 	vp->v_iflag &= ~VI_OWEINACT;
2504 	VI_UNLOCK(vp);
2505 	/*
2506 	 * Before moving off the active list, we must be sure that any
2507 	 * modified pages are on the vnode's dirty list since these will
2508 	 * no longer be checked once the vnode is on the inactive list.
2509 	 * Because the vnode vm object keeps a hold reference on the vnode
2510 	 * if there is at least one resident non-cached page, the vnode
2511 	 * cannot leave the active list without the page cleanup done.
2512 	 */
2513 	obj = vp->v_object;
2514 	if (obj != NULL && (obj->flags & OBJ_MIGHTBEDIRTY) != 0) {
2515 		VM_OBJECT_WLOCK(obj);
2516 		vm_object_page_clean(obj, 0, 0, OBJPC_NOSYNC);
2517 		VM_OBJECT_WUNLOCK(obj);
2518 	}
2519 	VOP_INACTIVE(vp, td);
2520 	VI_LOCK(vp);
2521 	VNASSERT(vp->v_iflag & VI_DOINGINACT, vp,
2522 	    ("vinactive: lost VI_DOINGINACT"));
2523 	vp->v_iflag &= ~VI_DOINGINACT;
2524 }
2525 
2526 /*
2527  * Remove any vnodes in the vnode table belonging to mount point mp.
2528  *
2529  * If FORCECLOSE is not specified, there should not be any active ones,
2530  * return error if any are found (nb: this is a user error, not a
2531  * system error). If FORCECLOSE is specified, detach any active vnodes
2532  * that are found.
2533  *
2534  * If WRITECLOSE is set, only flush out regular file vnodes open for
2535  * writing.
2536  *
2537  * SKIPSYSTEM causes any vnodes marked VV_SYSTEM to be skipped.
2538  *
2539  * `rootrefs' specifies the base reference count for the root vnode
2540  * of this filesystem. The root vnode is considered busy if its
2541  * v_usecount exceeds this value. On a successful return, vflush(, td)
2542  * will call vrele() on the root vnode exactly rootrefs times.
2543  * If the SKIPSYSTEM or WRITECLOSE flags are specified, rootrefs must
2544  * be zero.
2545  */
2546 #ifdef DIAGNOSTIC
2547 static int busyprt = 0;		/* print out busy vnodes */
2548 SYSCTL_INT(_debug, OID_AUTO, busyprt, CTLFLAG_RW, &busyprt, 0, "Print out busy vnodes");
2549 #endif
2550 
2551 int
2552 vflush(struct mount *mp, int rootrefs, int flags, struct thread *td)
2553 {
2554 	struct vnode *vp, *mvp, *rootvp = NULL;
2555 	struct vattr vattr;
2556 	int busy = 0, error;
2557 
2558 	CTR4(KTR_VFS, "%s: mp %p with rootrefs %d and flags %d", __func__, mp,
2559 	    rootrefs, flags);
2560 	if (rootrefs > 0) {
2561 		KASSERT((flags & (SKIPSYSTEM | WRITECLOSE)) == 0,
2562 		    ("vflush: bad args"));
2563 		/*
2564 		 * Get the filesystem root vnode. We can vput() it
2565 		 * immediately, since with rootrefs > 0, it won't go away.
2566 		 */
2567 		if ((error = VFS_ROOT(mp, LK_EXCLUSIVE, &rootvp)) != 0) {
2568 			CTR2(KTR_VFS, "%s: vfs_root lookup failed with %d",
2569 			    __func__, error);
2570 			return (error);
2571 		}
2572 		vput(rootvp);
2573 	}
2574 loop:
2575 	MNT_VNODE_FOREACH_ALL(vp, mp, mvp) {
2576 		vholdl(vp);
2577 		error = vn_lock(vp, LK_INTERLOCK | LK_EXCLUSIVE);
2578 		if (error) {
2579 			vdrop(vp);
2580 			MNT_VNODE_FOREACH_ALL_ABORT(mp, mvp);
2581 			goto loop;
2582 		}
2583 		/*
2584 		 * Skip over a vnodes marked VV_SYSTEM.
2585 		 */
2586 		if ((flags & SKIPSYSTEM) && (vp->v_vflag & VV_SYSTEM)) {
2587 			VOP_UNLOCK(vp, 0);
2588 			vdrop(vp);
2589 			continue;
2590 		}
2591 		/*
2592 		 * If WRITECLOSE is set, flush out unlinked but still open
2593 		 * files (even if open only for reading) and regular file
2594 		 * vnodes open for writing.
2595 		 */
2596 		if (flags & WRITECLOSE) {
2597 			if (vp->v_object != NULL) {
2598 				VM_OBJECT_WLOCK(vp->v_object);
2599 				vm_object_page_clean(vp->v_object, 0, 0, 0);
2600 				VM_OBJECT_WUNLOCK(vp->v_object);
2601 			}
2602 			error = VOP_FSYNC(vp, MNT_WAIT, td);
2603 			if (error != 0) {
2604 				VOP_UNLOCK(vp, 0);
2605 				vdrop(vp);
2606 				MNT_VNODE_FOREACH_ALL_ABORT(mp, mvp);
2607 				return (error);
2608 			}
2609 			error = VOP_GETATTR(vp, &vattr, td->td_ucred);
2610 			VI_LOCK(vp);
2611 
2612 			if ((vp->v_type == VNON ||
2613 			    (error == 0 && vattr.va_nlink > 0)) &&
2614 			    (vp->v_writecount == 0 || vp->v_type != VREG)) {
2615 				VOP_UNLOCK(vp, 0);
2616 				vdropl(vp);
2617 				continue;
2618 			}
2619 		} else
2620 			VI_LOCK(vp);
2621 		/*
2622 		 * With v_usecount == 0, all we need to do is clear out the
2623 		 * vnode data structures and we are done.
2624 		 *
2625 		 * If FORCECLOSE is set, forcibly close the vnode.
2626 		 */
2627 		if (vp->v_usecount == 0 || (flags & FORCECLOSE)) {
2628 			VNASSERT(vp->v_usecount == 0 ||
2629 			    (vp->v_type != VCHR && vp->v_type != VBLK), vp,
2630 			    ("device VNODE %p is FORCECLOSED", vp));
2631 			vgonel(vp);
2632 		} else {
2633 			busy++;
2634 #ifdef DIAGNOSTIC
2635 			if (busyprt)
2636 				vprint("vflush: busy vnode", vp);
2637 #endif
2638 		}
2639 		VOP_UNLOCK(vp, 0);
2640 		vdropl(vp);
2641 	}
2642 	if (rootrefs > 0 && (flags & FORCECLOSE) == 0) {
2643 		/*
2644 		 * If just the root vnode is busy, and if its refcount
2645 		 * is equal to `rootrefs', then go ahead and kill it.
2646 		 */
2647 		VI_LOCK(rootvp);
2648 		KASSERT(busy > 0, ("vflush: not busy"));
2649 		VNASSERT(rootvp->v_usecount >= rootrefs, rootvp,
2650 		    ("vflush: usecount %d < rootrefs %d",
2651 		     rootvp->v_usecount, rootrefs));
2652 		if (busy == 1 && rootvp->v_usecount == rootrefs) {
2653 			VOP_LOCK(rootvp, LK_EXCLUSIVE|LK_INTERLOCK);
2654 			vgone(rootvp);
2655 			VOP_UNLOCK(rootvp, 0);
2656 			busy = 0;
2657 		} else
2658 			VI_UNLOCK(rootvp);
2659 	}
2660 	if (busy) {
2661 		CTR2(KTR_VFS, "%s: failing as %d vnodes are busy", __func__,
2662 		    busy);
2663 		return (EBUSY);
2664 	}
2665 	for (; rootrefs > 0; rootrefs--)
2666 		vrele(rootvp);
2667 	return (0);
2668 }
2669 
2670 /*
2671  * Recycle an unused vnode to the front of the free list.
2672  */
2673 int
2674 vrecycle(struct vnode *vp)
2675 {
2676 	int recycled;
2677 
2678 	ASSERT_VOP_ELOCKED(vp, "vrecycle");
2679 	CTR2(KTR_VFS, "%s: vp %p", __func__, vp);
2680 	recycled = 0;
2681 	VI_LOCK(vp);
2682 	if (vp->v_usecount == 0) {
2683 		recycled = 1;
2684 		vgonel(vp);
2685 	}
2686 	VI_UNLOCK(vp);
2687 	return (recycled);
2688 }
2689 
2690 /*
2691  * Eliminate all activity associated with a vnode
2692  * in preparation for reuse.
2693  */
2694 void
2695 vgone(struct vnode *vp)
2696 {
2697 	VI_LOCK(vp);
2698 	vgonel(vp);
2699 	VI_UNLOCK(vp);
2700 }
2701 
2702 static void
2703 vgonel_reclaim_lowervp_vfs(struct mount *mp __unused,
2704     struct vnode *lowervp __unused)
2705 {
2706 }
2707 
2708 /*
2709  * Notify upper mounts about reclaimed vnode.
2710  */
2711 static void
2712 vgonel_reclaim_lowervp(struct vnode *vp)
2713 {
2714 	static struct vfsops vgonel_vfsops = {
2715 		.vfs_reclaim_lowervp = vgonel_reclaim_lowervp_vfs
2716 	};
2717 	struct mount *mp, *ump, *mmp;
2718 
2719 	mp = vp->v_mount;
2720 	if (mp == NULL)
2721 		return;
2722 
2723 	MNT_ILOCK(mp);
2724 	if (TAILQ_EMPTY(&mp->mnt_uppers))
2725 		goto unlock;
2726 	MNT_IUNLOCK(mp);
2727 	mmp = malloc(sizeof(struct mount), M_TEMP, M_WAITOK | M_ZERO);
2728 	mmp->mnt_op = &vgonel_vfsops;
2729 	mmp->mnt_kern_flag |= MNTK_MARKER;
2730 	MNT_ILOCK(mp);
2731 	mp->mnt_kern_flag |= MNTK_VGONE_UPPER;
2732 	for (ump = TAILQ_FIRST(&mp->mnt_uppers); ump != NULL;) {
2733 		if ((ump->mnt_kern_flag & MNTK_MARKER) != 0) {
2734 			ump = TAILQ_NEXT(ump, mnt_upper_link);
2735 			continue;
2736 		}
2737 		TAILQ_INSERT_AFTER(&mp->mnt_uppers, ump, mmp, mnt_upper_link);
2738 		MNT_IUNLOCK(mp);
2739 		VFS_RECLAIM_LOWERVP(ump, vp);
2740 		MNT_ILOCK(mp);
2741 		ump = TAILQ_NEXT(mmp, mnt_upper_link);
2742 		TAILQ_REMOVE(&mp->mnt_uppers, mmp, mnt_upper_link);
2743 	}
2744 	free(mmp, M_TEMP);
2745 	mp->mnt_kern_flag &= ~MNTK_VGONE_UPPER;
2746 	if ((mp->mnt_kern_flag & MNTK_VGONE_WAITER) != 0) {
2747 		mp->mnt_kern_flag &= ~MNTK_VGONE_WAITER;
2748 		wakeup(&mp->mnt_uppers);
2749 	}
2750 unlock:
2751 	MNT_IUNLOCK(mp);
2752 }
2753 
2754 /*
2755  * vgone, with the vp interlock held.
2756  */
2757 void
2758 vgonel(struct vnode *vp)
2759 {
2760 	struct thread *td;
2761 	int oweinact;
2762 	int active;
2763 	struct mount *mp;
2764 
2765 	ASSERT_VOP_ELOCKED(vp, "vgonel");
2766 	ASSERT_VI_LOCKED(vp, "vgonel");
2767 	VNASSERT(vp->v_holdcnt, vp,
2768 	    ("vgonel: vp %p has no reference.", vp));
2769 	CTR2(KTR_VFS, "%s: vp %p", __func__, vp);
2770 	td = curthread;
2771 
2772 	/*
2773 	 * Don't vgonel if we're already doomed.
2774 	 */
2775 	if (vp->v_iflag & VI_DOOMED)
2776 		return;
2777 	vp->v_iflag |= VI_DOOMED;
2778 
2779 	/*
2780 	 * Check to see if the vnode is in use.  If so, we have to call
2781 	 * VOP_CLOSE() and VOP_INACTIVE().
2782 	 */
2783 	active = vp->v_usecount;
2784 	oweinact = (vp->v_iflag & VI_OWEINACT);
2785 	VI_UNLOCK(vp);
2786 	vgonel_reclaim_lowervp(vp);
2787 
2788 	/*
2789 	 * Clean out any buffers associated with the vnode.
2790 	 * If the flush fails, just toss the buffers.
2791 	 */
2792 	mp = NULL;
2793 	if (!TAILQ_EMPTY(&vp->v_bufobj.bo_dirty.bv_hd))
2794 		(void) vn_start_secondary_write(vp, &mp, V_WAIT);
2795 	if (vinvalbuf(vp, V_SAVE, 0, 0) != 0)
2796 		vinvalbuf(vp, 0, 0, 0);
2797 
2798 	/*
2799 	 * If purging an active vnode, it must be closed and
2800 	 * deactivated before being reclaimed.
2801 	 */
2802 	if (active)
2803 		VOP_CLOSE(vp, FNONBLOCK, NOCRED, td);
2804 	if (oweinact || active) {
2805 		VI_LOCK(vp);
2806 		if ((vp->v_iflag & VI_DOINGINACT) == 0)
2807 			vinactive(vp, td);
2808 		VI_UNLOCK(vp);
2809 	}
2810 	if (vp->v_type == VSOCK)
2811 		vfs_unp_reclaim(vp);
2812 	/*
2813 	 * Reclaim the vnode.
2814 	 */
2815 	if (VOP_RECLAIM(vp, td))
2816 		panic("vgone: cannot reclaim");
2817 	if (mp != NULL)
2818 		vn_finished_secondary_write(mp);
2819 	VNASSERT(vp->v_object == NULL, vp,
2820 	    ("vop_reclaim left v_object vp=%p, tag=%s", vp, vp->v_tag));
2821 	/*
2822 	 * Clear the advisory locks and wake up waiting threads.
2823 	 */
2824 	(void)VOP_ADVLOCKPURGE(vp);
2825 	/*
2826 	 * Delete from old mount point vnode list.
2827 	 */
2828 	delmntque(vp);
2829 	cache_purge(vp);
2830 	/*
2831 	 * Done with purge, reset to the standard lock and invalidate
2832 	 * the vnode.
2833 	 */
2834 	VI_LOCK(vp);
2835 	vp->v_vnlock = &vp->v_lock;
2836 	vp->v_op = &dead_vnodeops;
2837 	vp->v_tag = "none";
2838 	vp->v_type = VBAD;
2839 }
2840 
2841 /*
2842  * Calculate the total number of references to a special device.
2843  */
2844 int
2845 vcount(struct vnode *vp)
2846 {
2847 	int count;
2848 
2849 	dev_lock();
2850 	count = vp->v_rdev->si_usecount;
2851 	dev_unlock();
2852 	return (count);
2853 }
2854 
2855 /*
2856  * Same as above, but using the struct cdev *as argument
2857  */
2858 int
2859 count_dev(struct cdev *dev)
2860 {
2861 	int count;
2862 
2863 	dev_lock();
2864 	count = dev->si_usecount;
2865 	dev_unlock();
2866 	return(count);
2867 }
2868 
2869 /*
2870  * Print out a description of a vnode.
2871  */
2872 static char *typename[] =
2873 {"VNON", "VREG", "VDIR", "VBLK", "VCHR", "VLNK", "VSOCK", "VFIFO", "VBAD",
2874  "VMARKER"};
2875 
2876 void
2877 vn_printf(struct vnode *vp, const char *fmt, ...)
2878 {
2879 	va_list ap;
2880 	char buf[256], buf2[16];
2881 	u_long flags;
2882 
2883 	va_start(ap, fmt);
2884 	vprintf(fmt, ap);
2885 	va_end(ap);
2886 	printf("%p: ", (void *)vp);
2887 	printf("tag %s, type %s\n", vp->v_tag, typename[vp->v_type]);
2888 	printf("    usecount %d, writecount %d, refcount %d mountedhere %p\n",
2889 	    vp->v_usecount, vp->v_writecount, vp->v_holdcnt, vp->v_mountedhere);
2890 	buf[0] = '\0';
2891 	buf[1] = '\0';
2892 	if (vp->v_vflag & VV_ROOT)
2893 		strlcat(buf, "|VV_ROOT", sizeof(buf));
2894 	if (vp->v_vflag & VV_ISTTY)
2895 		strlcat(buf, "|VV_ISTTY", sizeof(buf));
2896 	if (vp->v_vflag & VV_NOSYNC)
2897 		strlcat(buf, "|VV_NOSYNC", sizeof(buf));
2898 	if (vp->v_vflag & VV_ETERNALDEV)
2899 		strlcat(buf, "|VV_ETERNALDEV", sizeof(buf));
2900 	if (vp->v_vflag & VV_CACHEDLABEL)
2901 		strlcat(buf, "|VV_CACHEDLABEL", sizeof(buf));
2902 	if (vp->v_vflag & VV_TEXT)
2903 		strlcat(buf, "|VV_TEXT", sizeof(buf));
2904 	if (vp->v_vflag & VV_COPYONWRITE)
2905 		strlcat(buf, "|VV_COPYONWRITE", sizeof(buf));
2906 	if (vp->v_vflag & VV_SYSTEM)
2907 		strlcat(buf, "|VV_SYSTEM", sizeof(buf));
2908 	if (vp->v_vflag & VV_PROCDEP)
2909 		strlcat(buf, "|VV_PROCDEP", sizeof(buf));
2910 	if (vp->v_vflag & VV_NOKNOTE)
2911 		strlcat(buf, "|VV_NOKNOTE", sizeof(buf));
2912 	if (vp->v_vflag & VV_DELETED)
2913 		strlcat(buf, "|VV_DELETED", sizeof(buf));
2914 	if (vp->v_vflag & VV_MD)
2915 		strlcat(buf, "|VV_MD", sizeof(buf));
2916 	if (vp->v_vflag & VV_FORCEINSMQ)
2917 		strlcat(buf, "|VV_FORCEINSMQ", sizeof(buf));
2918 	flags = vp->v_vflag & ~(VV_ROOT | VV_ISTTY | VV_NOSYNC | VV_ETERNALDEV |
2919 	    VV_CACHEDLABEL | VV_TEXT | VV_COPYONWRITE | VV_SYSTEM | VV_PROCDEP |
2920 	    VV_NOKNOTE | VV_DELETED | VV_MD | VV_FORCEINSMQ);
2921 	if (flags != 0) {
2922 		snprintf(buf2, sizeof(buf2), "|VV(0x%lx)", flags);
2923 		strlcat(buf, buf2, sizeof(buf));
2924 	}
2925 	if (vp->v_iflag & VI_MOUNT)
2926 		strlcat(buf, "|VI_MOUNT", sizeof(buf));
2927 	if (vp->v_iflag & VI_AGE)
2928 		strlcat(buf, "|VI_AGE", sizeof(buf));
2929 	if (vp->v_iflag & VI_DOOMED)
2930 		strlcat(buf, "|VI_DOOMED", sizeof(buf));
2931 	if (vp->v_iflag & VI_FREE)
2932 		strlcat(buf, "|VI_FREE", sizeof(buf));
2933 	if (vp->v_iflag & VI_ACTIVE)
2934 		strlcat(buf, "|VI_ACTIVE", sizeof(buf));
2935 	if (vp->v_iflag & VI_DOINGINACT)
2936 		strlcat(buf, "|VI_DOINGINACT", sizeof(buf));
2937 	if (vp->v_iflag & VI_OWEINACT)
2938 		strlcat(buf, "|VI_OWEINACT", sizeof(buf));
2939 	flags = vp->v_iflag & ~(VI_MOUNT | VI_AGE | VI_DOOMED | VI_FREE |
2940 	    VI_ACTIVE | VI_DOINGINACT | VI_OWEINACT);
2941 	if (flags != 0) {
2942 		snprintf(buf2, sizeof(buf2), "|VI(0x%lx)", flags);
2943 		strlcat(buf, buf2, sizeof(buf));
2944 	}
2945 	printf("    flags (%s)\n", buf + 1);
2946 	if (mtx_owned(VI_MTX(vp)))
2947 		printf(" VI_LOCKed");
2948 	if (vp->v_object != NULL)
2949 		printf("    v_object %p ref %d pages %d\n",
2950 		    vp->v_object, vp->v_object->ref_count,
2951 		    vp->v_object->resident_page_count);
2952 	printf("    ");
2953 	lockmgr_printinfo(vp->v_vnlock);
2954 	if (vp->v_data != NULL)
2955 		VOP_PRINT(vp);
2956 }
2957 
2958 #ifdef DDB
2959 /*
2960  * List all of the locked vnodes in the system.
2961  * Called when debugging the kernel.
2962  */
2963 DB_SHOW_COMMAND(lockedvnods, lockedvnodes)
2964 {
2965 	struct mount *mp, *nmp;
2966 	struct vnode *vp;
2967 
2968 	/*
2969 	 * Note: because this is DDB, we can't obey the locking semantics
2970 	 * for these structures, which means we could catch an inconsistent
2971 	 * state and dereference a nasty pointer.  Not much to be done
2972 	 * about that.
2973 	 */
2974 	db_printf("Locked vnodes\n");
2975 	for (mp = TAILQ_FIRST(&mountlist); mp != NULL; mp = nmp) {
2976 		nmp = TAILQ_NEXT(mp, mnt_list);
2977 		TAILQ_FOREACH(vp, &mp->mnt_nvnodelist, v_nmntvnodes) {
2978 			if (vp->v_type != VMARKER &&
2979 			    VOP_ISLOCKED(vp))
2980 				vprint("", vp);
2981 		}
2982 		nmp = TAILQ_NEXT(mp, mnt_list);
2983 	}
2984 }
2985 
2986 /*
2987  * Show details about the given vnode.
2988  */
2989 DB_SHOW_COMMAND(vnode, db_show_vnode)
2990 {
2991 	struct vnode *vp;
2992 
2993 	if (!have_addr)
2994 		return;
2995 	vp = (struct vnode *)addr;
2996 	vn_printf(vp, "vnode ");
2997 }
2998 
2999 /*
3000  * Show details about the given mount point.
3001  */
3002 DB_SHOW_COMMAND(mount, db_show_mount)
3003 {
3004 	struct mount *mp;
3005 	struct vfsopt *opt;
3006 	struct statfs *sp;
3007 	struct vnode *vp;
3008 	char buf[512];
3009 	uint64_t mflags;
3010 	u_int flags;
3011 
3012 	if (!have_addr) {
3013 		/* No address given, print short info about all mount points. */
3014 		TAILQ_FOREACH(mp, &mountlist, mnt_list) {
3015 			db_printf("%p %s on %s (%s)\n", mp,
3016 			    mp->mnt_stat.f_mntfromname,
3017 			    mp->mnt_stat.f_mntonname,
3018 			    mp->mnt_stat.f_fstypename);
3019 			if (db_pager_quit)
3020 				break;
3021 		}
3022 		db_printf("\nMore info: show mount <addr>\n");
3023 		return;
3024 	}
3025 
3026 	mp = (struct mount *)addr;
3027 	db_printf("%p %s on %s (%s)\n", mp, mp->mnt_stat.f_mntfromname,
3028 	    mp->mnt_stat.f_mntonname, mp->mnt_stat.f_fstypename);
3029 
3030 	buf[0] = '\0';
3031 	mflags = mp->mnt_flag;
3032 #define	MNT_FLAG(flag)	do {						\
3033 	if (mflags & (flag)) {						\
3034 		if (buf[0] != '\0')					\
3035 			strlcat(buf, ", ", sizeof(buf));		\
3036 		strlcat(buf, (#flag) + 4, sizeof(buf));			\
3037 		mflags &= ~(flag);					\
3038 	}								\
3039 } while (0)
3040 	MNT_FLAG(MNT_RDONLY);
3041 	MNT_FLAG(MNT_SYNCHRONOUS);
3042 	MNT_FLAG(MNT_NOEXEC);
3043 	MNT_FLAG(MNT_NOSUID);
3044 	MNT_FLAG(MNT_NFS4ACLS);
3045 	MNT_FLAG(MNT_UNION);
3046 	MNT_FLAG(MNT_ASYNC);
3047 	MNT_FLAG(MNT_SUIDDIR);
3048 	MNT_FLAG(MNT_SOFTDEP);
3049 	MNT_FLAG(MNT_NOSYMFOLLOW);
3050 	MNT_FLAG(MNT_GJOURNAL);
3051 	MNT_FLAG(MNT_MULTILABEL);
3052 	MNT_FLAG(MNT_ACLS);
3053 	MNT_FLAG(MNT_NOATIME);
3054 	MNT_FLAG(MNT_NOCLUSTERR);
3055 	MNT_FLAG(MNT_NOCLUSTERW);
3056 	MNT_FLAG(MNT_SUJ);
3057 	MNT_FLAG(MNT_EXRDONLY);
3058 	MNT_FLAG(MNT_EXPORTED);
3059 	MNT_FLAG(MNT_DEFEXPORTED);
3060 	MNT_FLAG(MNT_EXPORTANON);
3061 	MNT_FLAG(MNT_EXKERB);
3062 	MNT_FLAG(MNT_EXPUBLIC);
3063 	MNT_FLAG(MNT_LOCAL);
3064 	MNT_FLAG(MNT_QUOTA);
3065 	MNT_FLAG(MNT_ROOTFS);
3066 	MNT_FLAG(MNT_USER);
3067 	MNT_FLAG(MNT_IGNORE);
3068 	MNT_FLAG(MNT_UPDATE);
3069 	MNT_FLAG(MNT_DELEXPORT);
3070 	MNT_FLAG(MNT_RELOAD);
3071 	MNT_FLAG(MNT_FORCE);
3072 	MNT_FLAG(MNT_SNAPSHOT);
3073 	MNT_FLAG(MNT_BYFSID);
3074 #undef MNT_FLAG
3075 	if (mflags != 0) {
3076 		if (buf[0] != '\0')
3077 			strlcat(buf, ", ", sizeof(buf));
3078 		snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
3079 		    "0x%016jx", mflags);
3080 	}
3081 	db_printf("    mnt_flag = %s\n", buf);
3082 
3083 	buf[0] = '\0';
3084 	flags = mp->mnt_kern_flag;
3085 #define	MNT_KERN_FLAG(flag)	do {					\
3086 	if (flags & (flag)) {						\
3087 		if (buf[0] != '\0')					\
3088 			strlcat(buf, ", ", sizeof(buf));		\
3089 		strlcat(buf, (#flag) + 5, sizeof(buf));			\
3090 		flags &= ~(flag);					\
3091 	}								\
3092 } while (0)
3093 	MNT_KERN_FLAG(MNTK_UNMOUNTF);
3094 	MNT_KERN_FLAG(MNTK_ASYNC);
3095 	MNT_KERN_FLAG(MNTK_SOFTDEP);
3096 	MNT_KERN_FLAG(MNTK_NOINSMNTQ);
3097 	MNT_KERN_FLAG(MNTK_DRAINING);
3098 	MNT_KERN_FLAG(MNTK_REFEXPIRE);
3099 	MNT_KERN_FLAG(MNTK_EXTENDED_SHARED);
3100 	MNT_KERN_FLAG(MNTK_SHARED_WRITES);
3101 	MNT_KERN_FLAG(MNTK_NO_IOPF);
3102 	MNT_KERN_FLAG(MNTK_VGONE_UPPER);
3103 	MNT_KERN_FLAG(MNTK_VGONE_WAITER);
3104 	MNT_KERN_FLAG(MNTK_LOOKUP_EXCL_DOTDOT);
3105 	MNT_KERN_FLAG(MNTK_MARKER);
3106 	MNT_KERN_FLAG(MNTK_NOASYNC);
3107 	MNT_KERN_FLAG(MNTK_UNMOUNT);
3108 	MNT_KERN_FLAG(MNTK_MWAIT);
3109 	MNT_KERN_FLAG(MNTK_SUSPEND);
3110 	MNT_KERN_FLAG(MNTK_SUSPEND2);
3111 	MNT_KERN_FLAG(MNTK_SUSPENDED);
3112 	MNT_KERN_FLAG(MNTK_LOOKUP_SHARED);
3113 	MNT_KERN_FLAG(MNTK_NOKNOTE);
3114 #undef MNT_KERN_FLAG
3115 	if (flags != 0) {
3116 		if (buf[0] != '\0')
3117 			strlcat(buf, ", ", sizeof(buf));
3118 		snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
3119 		    "0x%08x", flags);
3120 	}
3121 	db_printf("    mnt_kern_flag = %s\n", buf);
3122 
3123 	db_printf("    mnt_opt = ");
3124 	opt = TAILQ_FIRST(mp->mnt_opt);
3125 	if (opt != NULL) {
3126 		db_printf("%s", opt->name);
3127 		opt = TAILQ_NEXT(opt, link);
3128 		while (opt != NULL) {
3129 			db_printf(", %s", opt->name);
3130 			opt = TAILQ_NEXT(opt, link);
3131 		}
3132 	}
3133 	db_printf("\n");
3134 
3135 	sp = &mp->mnt_stat;
3136 	db_printf("    mnt_stat = { version=%u type=%u flags=0x%016jx "
3137 	    "bsize=%ju iosize=%ju blocks=%ju bfree=%ju bavail=%jd files=%ju "
3138 	    "ffree=%jd syncwrites=%ju asyncwrites=%ju syncreads=%ju "
3139 	    "asyncreads=%ju namemax=%u owner=%u fsid=[%d, %d] }\n",
3140 	    (u_int)sp->f_version, (u_int)sp->f_type, (uintmax_t)sp->f_flags,
3141 	    (uintmax_t)sp->f_bsize, (uintmax_t)sp->f_iosize,
3142 	    (uintmax_t)sp->f_blocks, (uintmax_t)sp->f_bfree,
3143 	    (intmax_t)sp->f_bavail, (uintmax_t)sp->f_files,
3144 	    (intmax_t)sp->f_ffree, (uintmax_t)sp->f_syncwrites,
3145 	    (uintmax_t)sp->f_asyncwrites, (uintmax_t)sp->f_syncreads,
3146 	    (uintmax_t)sp->f_asyncreads, (u_int)sp->f_namemax,
3147 	    (u_int)sp->f_owner, (int)sp->f_fsid.val[0], (int)sp->f_fsid.val[1]);
3148 
3149 	db_printf("    mnt_cred = { uid=%u ruid=%u",
3150 	    (u_int)mp->mnt_cred->cr_uid, (u_int)mp->mnt_cred->cr_ruid);
3151 	if (jailed(mp->mnt_cred))
3152 		db_printf(", jail=%d", mp->mnt_cred->cr_prison->pr_id);
3153 	db_printf(" }\n");
3154 	db_printf("    mnt_ref = %d\n", mp->mnt_ref);
3155 	db_printf("    mnt_gen = %d\n", mp->mnt_gen);
3156 	db_printf("    mnt_nvnodelistsize = %d\n", mp->mnt_nvnodelistsize);
3157 	db_printf("    mnt_activevnodelistsize = %d\n",
3158 	    mp->mnt_activevnodelistsize);
3159 	db_printf("    mnt_writeopcount = %d\n", mp->mnt_writeopcount);
3160 	db_printf("    mnt_maxsymlinklen = %d\n", mp->mnt_maxsymlinklen);
3161 	db_printf("    mnt_iosize_max = %d\n", mp->mnt_iosize_max);
3162 	db_printf("    mnt_hashseed = %u\n", mp->mnt_hashseed);
3163 	db_printf("    mnt_secondary_writes = %d\n", mp->mnt_secondary_writes);
3164 	db_printf("    mnt_secondary_accwrites = %d\n",
3165 	    mp->mnt_secondary_accwrites);
3166 	db_printf("    mnt_gjprovider = %s\n",
3167 	    mp->mnt_gjprovider != NULL ? mp->mnt_gjprovider : "NULL");
3168 
3169 	db_printf("\n\nList of active vnodes\n");
3170 	TAILQ_FOREACH(vp, &mp->mnt_activevnodelist, v_actfreelist) {
3171 		if (vp->v_type != VMARKER) {
3172 			vn_printf(vp, "vnode ");
3173 			if (db_pager_quit)
3174 				break;
3175 		}
3176 	}
3177 	db_printf("\n\nList of inactive vnodes\n");
3178 	TAILQ_FOREACH(vp, &mp->mnt_nvnodelist, v_nmntvnodes) {
3179 		if (vp->v_type != VMARKER && (vp->v_iflag & VI_ACTIVE) == 0) {
3180 			vn_printf(vp, "vnode ");
3181 			if (db_pager_quit)
3182 				break;
3183 		}
3184 	}
3185 }
3186 #endif	/* DDB */
3187 
3188 /*
3189  * Fill in a struct xvfsconf based on a struct vfsconf.
3190  */
3191 static int
3192 vfsconf2x(struct sysctl_req *req, struct vfsconf *vfsp)
3193 {
3194 	struct xvfsconf xvfsp;
3195 
3196 	bzero(&xvfsp, sizeof(xvfsp));
3197 	strcpy(xvfsp.vfc_name, vfsp->vfc_name);
3198 	xvfsp.vfc_typenum = vfsp->vfc_typenum;
3199 	xvfsp.vfc_refcount = vfsp->vfc_refcount;
3200 	xvfsp.vfc_flags = vfsp->vfc_flags;
3201 	/*
3202 	 * These are unused in userland, we keep them
3203 	 * to not break binary compatibility.
3204 	 */
3205 	xvfsp.vfc_vfsops = NULL;
3206 	xvfsp.vfc_next = NULL;
3207 	return (SYSCTL_OUT(req, &xvfsp, sizeof(xvfsp)));
3208 }
3209 
3210 #ifdef COMPAT_FREEBSD32
3211 struct xvfsconf32 {
3212 	uint32_t	vfc_vfsops;
3213 	char		vfc_name[MFSNAMELEN];
3214 	int32_t		vfc_typenum;
3215 	int32_t		vfc_refcount;
3216 	int32_t		vfc_flags;
3217 	uint32_t	vfc_next;
3218 };
3219 
3220 static int
3221 vfsconf2x32(struct sysctl_req *req, struct vfsconf *vfsp)
3222 {
3223 	struct xvfsconf32 xvfsp;
3224 
3225 	strcpy(xvfsp.vfc_name, vfsp->vfc_name);
3226 	xvfsp.vfc_typenum = vfsp->vfc_typenum;
3227 	xvfsp.vfc_refcount = vfsp->vfc_refcount;
3228 	xvfsp.vfc_flags = vfsp->vfc_flags;
3229 	xvfsp.vfc_vfsops = 0;
3230 	xvfsp.vfc_next = 0;
3231 	return (SYSCTL_OUT(req, &xvfsp, sizeof(xvfsp)));
3232 }
3233 #endif
3234 
3235 /*
3236  * Top level filesystem related information gathering.
3237  */
3238 static int
3239 sysctl_vfs_conflist(SYSCTL_HANDLER_ARGS)
3240 {
3241 	struct vfsconf *vfsp;
3242 	int error;
3243 
3244 	error = 0;
3245 	TAILQ_FOREACH(vfsp, &vfsconf, vfc_list) {
3246 #ifdef COMPAT_FREEBSD32
3247 		if (req->flags & SCTL_MASK32)
3248 			error = vfsconf2x32(req, vfsp);
3249 		else
3250 #endif
3251 			error = vfsconf2x(req, vfsp);
3252 		if (error)
3253 			break;
3254 	}
3255 	return (error);
3256 }
3257 
3258 SYSCTL_PROC(_vfs, OID_AUTO, conflist, CTLTYPE_OPAQUE | CTLFLAG_RD,
3259     NULL, 0, sysctl_vfs_conflist,
3260     "S,xvfsconf", "List of all configured filesystems");
3261 
3262 #ifndef BURN_BRIDGES
3263 static int	sysctl_ovfs_conf(SYSCTL_HANDLER_ARGS);
3264 
3265 static int
3266 vfs_sysctl(SYSCTL_HANDLER_ARGS)
3267 {
3268 	int *name = (int *)arg1 - 1;	/* XXX */
3269 	u_int namelen = arg2 + 1;	/* XXX */
3270 	struct vfsconf *vfsp;
3271 
3272 	log(LOG_WARNING, "userland calling deprecated sysctl, "
3273 	    "please rebuild world\n");
3274 
3275 #if 1 || defined(COMPAT_PRELITE2)
3276 	/* Resolve ambiguity between VFS_VFSCONF and VFS_GENERIC. */
3277 	if (namelen == 1)
3278 		return (sysctl_ovfs_conf(oidp, arg1, arg2, req));
3279 #endif
3280 
3281 	switch (name[1]) {
3282 	case VFS_MAXTYPENUM:
3283 		if (namelen != 2)
3284 			return (ENOTDIR);
3285 		return (SYSCTL_OUT(req, &maxvfsconf, sizeof(int)));
3286 	case VFS_CONF:
3287 		if (namelen != 3)
3288 			return (ENOTDIR);	/* overloaded */
3289 		TAILQ_FOREACH(vfsp, &vfsconf, vfc_list)
3290 			if (vfsp->vfc_typenum == name[2])
3291 				break;
3292 		if (vfsp == NULL)
3293 			return (EOPNOTSUPP);
3294 #ifdef COMPAT_FREEBSD32
3295 		if (req->flags & SCTL_MASK32)
3296 			return (vfsconf2x32(req, vfsp));
3297 		else
3298 #endif
3299 			return (vfsconf2x(req, vfsp));
3300 	}
3301 	return (EOPNOTSUPP);
3302 }
3303 
3304 static SYSCTL_NODE(_vfs, VFS_GENERIC, generic, CTLFLAG_RD | CTLFLAG_SKIP,
3305     vfs_sysctl, "Generic filesystem");
3306 
3307 #if 1 || defined(COMPAT_PRELITE2)
3308 
3309 static int
3310 sysctl_ovfs_conf(SYSCTL_HANDLER_ARGS)
3311 {
3312 	int error;
3313 	struct vfsconf *vfsp;
3314 	struct ovfsconf ovfs;
3315 
3316 	TAILQ_FOREACH(vfsp, &vfsconf, vfc_list) {
3317 		bzero(&ovfs, sizeof(ovfs));
3318 		ovfs.vfc_vfsops = vfsp->vfc_vfsops;	/* XXX used as flag */
3319 		strcpy(ovfs.vfc_name, vfsp->vfc_name);
3320 		ovfs.vfc_index = vfsp->vfc_typenum;
3321 		ovfs.vfc_refcount = vfsp->vfc_refcount;
3322 		ovfs.vfc_flags = vfsp->vfc_flags;
3323 		error = SYSCTL_OUT(req, &ovfs, sizeof ovfs);
3324 		if (error)
3325 			return error;
3326 	}
3327 	return 0;
3328 }
3329 
3330 #endif /* 1 || COMPAT_PRELITE2 */
3331 #endif /* !BURN_BRIDGES */
3332 
3333 #define KINFO_VNODESLOP		10
3334 #ifdef notyet
3335 /*
3336  * Dump vnode list (via sysctl).
3337  */
3338 /* ARGSUSED */
3339 static int
3340 sysctl_vnode(SYSCTL_HANDLER_ARGS)
3341 {
3342 	struct xvnode *xvn;
3343 	struct mount *mp;
3344 	struct vnode *vp;
3345 	int error, len, n;
3346 
3347 	/*
3348 	 * Stale numvnodes access is not fatal here.
3349 	 */
3350 	req->lock = 0;
3351 	len = (numvnodes + KINFO_VNODESLOP) * sizeof *xvn;
3352 	if (!req->oldptr)
3353 		/* Make an estimate */
3354 		return (SYSCTL_OUT(req, 0, len));
3355 
3356 	error = sysctl_wire_old_buffer(req, 0);
3357 	if (error != 0)
3358 		return (error);
3359 	xvn = malloc(len, M_TEMP, M_ZERO | M_WAITOK);
3360 	n = 0;
3361 	mtx_lock(&mountlist_mtx);
3362 	TAILQ_FOREACH(mp, &mountlist, mnt_list) {
3363 		if (vfs_busy(mp, MBF_NOWAIT | MBF_MNTLSTLOCK))
3364 			continue;
3365 		MNT_ILOCK(mp);
3366 		TAILQ_FOREACH(vp, &mp->mnt_nvnodelist, v_nmntvnodes) {
3367 			if (n == len)
3368 				break;
3369 			vref(vp);
3370 			xvn[n].xv_size = sizeof *xvn;
3371 			xvn[n].xv_vnode = vp;
3372 			xvn[n].xv_id = 0;	/* XXX compat */
3373 #define XV_COPY(field) xvn[n].xv_##field = vp->v_##field
3374 			XV_COPY(usecount);
3375 			XV_COPY(writecount);
3376 			XV_COPY(holdcnt);
3377 			XV_COPY(mount);
3378 			XV_COPY(numoutput);
3379 			XV_COPY(type);
3380 #undef XV_COPY
3381 			xvn[n].xv_flag = vp->v_vflag;
3382 
3383 			switch (vp->v_type) {
3384 			case VREG:
3385 			case VDIR:
3386 			case VLNK:
3387 				break;
3388 			case VBLK:
3389 			case VCHR:
3390 				if (vp->v_rdev == NULL) {
3391 					vrele(vp);
3392 					continue;
3393 				}
3394 				xvn[n].xv_dev = dev2udev(vp->v_rdev);
3395 				break;
3396 			case VSOCK:
3397 				xvn[n].xv_socket = vp->v_socket;
3398 				break;
3399 			case VFIFO:
3400 				xvn[n].xv_fifo = vp->v_fifoinfo;
3401 				break;
3402 			case VNON:
3403 			case VBAD:
3404 			default:
3405 				/* shouldn't happen? */
3406 				vrele(vp);
3407 				continue;
3408 			}
3409 			vrele(vp);
3410 			++n;
3411 		}
3412 		MNT_IUNLOCK(mp);
3413 		mtx_lock(&mountlist_mtx);
3414 		vfs_unbusy(mp);
3415 		if (n == len)
3416 			break;
3417 	}
3418 	mtx_unlock(&mountlist_mtx);
3419 
3420 	error = SYSCTL_OUT(req, xvn, n * sizeof *xvn);
3421 	free(xvn, M_TEMP);
3422 	return (error);
3423 }
3424 
3425 SYSCTL_PROC(_kern, KERN_VNODE, vnode, CTLTYPE_OPAQUE|CTLFLAG_RD,
3426     0, 0, sysctl_vnode, "S,xvnode", "");
3427 #endif
3428 
3429 /*
3430  * Unmount all filesystems. The list is traversed in reverse order
3431  * of mounting to avoid dependencies.
3432  */
3433 void
3434 vfs_unmountall(void)
3435 {
3436 	struct mount *mp;
3437 	struct thread *td;
3438 	int error;
3439 
3440 	CTR1(KTR_VFS, "%s: unmounting all filesystems", __func__);
3441 	td = curthread;
3442 
3443 	/*
3444 	 * Since this only runs when rebooting, it is not interlocked.
3445 	 */
3446 	while(!TAILQ_EMPTY(&mountlist)) {
3447 		mp = TAILQ_LAST(&mountlist, mntlist);
3448 		error = dounmount(mp, MNT_FORCE, td);
3449 		if (error) {
3450 			TAILQ_REMOVE(&mountlist, mp, mnt_list);
3451 			/*
3452 			 * XXX: Due to the way in which we mount the root
3453 			 * file system off of devfs, devfs will generate a
3454 			 * "busy" warning when we try to unmount it before
3455 			 * the root.  Don't print a warning as a result in
3456 			 * order to avoid false positive errors that may
3457 			 * cause needless upset.
3458 			 */
3459 			if (strcmp(mp->mnt_vfc->vfc_name, "devfs") != 0) {
3460 				printf("unmount of %s failed (",
3461 				    mp->mnt_stat.f_mntonname);
3462 				if (error == EBUSY)
3463 					printf("BUSY)\n");
3464 				else
3465 					printf("%d)\n", error);
3466 			}
3467 		} else {
3468 			/* The unmount has removed mp from the mountlist */
3469 		}
3470 	}
3471 }
3472 
3473 /*
3474  * perform msync on all vnodes under a mount point
3475  * the mount point must be locked.
3476  */
3477 void
3478 vfs_msync(struct mount *mp, int flags)
3479 {
3480 	struct vnode *vp, *mvp;
3481 	struct vm_object *obj;
3482 
3483 	CTR2(KTR_VFS, "%s: mp %p", __func__, mp);
3484 	MNT_VNODE_FOREACH_ACTIVE(vp, mp, mvp) {
3485 		obj = vp->v_object;
3486 		if (obj != NULL && (obj->flags & OBJ_MIGHTBEDIRTY) != 0 &&
3487 		    (flags == MNT_WAIT || VOP_ISLOCKED(vp) == 0)) {
3488 			if (!vget(vp,
3489 			    LK_EXCLUSIVE | LK_RETRY | LK_INTERLOCK,
3490 			    curthread)) {
3491 				if (vp->v_vflag & VV_NOSYNC) {	/* unlinked */
3492 					vput(vp);
3493 					continue;
3494 				}
3495 
3496 				obj = vp->v_object;
3497 				if (obj != NULL) {
3498 					VM_OBJECT_WLOCK(obj);
3499 					vm_object_page_clean(obj, 0, 0,
3500 					    flags == MNT_WAIT ?
3501 					    OBJPC_SYNC : OBJPC_NOSYNC);
3502 					VM_OBJECT_WUNLOCK(obj);
3503 				}
3504 				vput(vp);
3505 			}
3506 		} else
3507 			VI_UNLOCK(vp);
3508 	}
3509 }
3510 
3511 static void
3512 destroy_vpollinfo(struct vpollinfo *vi)
3513 {
3514 	seldrain(&vi->vpi_selinfo);
3515 	knlist_destroy(&vi->vpi_selinfo.si_note);
3516 	mtx_destroy(&vi->vpi_lock);
3517 	uma_zfree(vnodepoll_zone, vi);
3518 }
3519 
3520 /*
3521  * Initalize per-vnode helper structure to hold poll-related state.
3522  */
3523 void
3524 v_addpollinfo(struct vnode *vp)
3525 {
3526 	struct vpollinfo *vi;
3527 
3528 	if (vp->v_pollinfo != NULL)
3529 		return;
3530 	vi = uma_zalloc(vnodepoll_zone, M_WAITOK);
3531 	mtx_init(&vi->vpi_lock, "vnode pollinfo", NULL, MTX_DEF);
3532 	knlist_init(&vi->vpi_selinfo.si_note, vp, vfs_knllock,
3533 	    vfs_knlunlock, vfs_knl_assert_locked, vfs_knl_assert_unlocked);
3534 	VI_LOCK(vp);
3535 	if (vp->v_pollinfo != NULL) {
3536 		VI_UNLOCK(vp);
3537 		destroy_vpollinfo(vi);
3538 		return;
3539 	}
3540 	vp->v_pollinfo = vi;
3541 	VI_UNLOCK(vp);
3542 }
3543 
3544 /*
3545  * Record a process's interest in events which might happen to
3546  * a vnode.  Because poll uses the historic select-style interface
3547  * internally, this routine serves as both the ``check for any
3548  * pending events'' and the ``record my interest in future events''
3549  * functions.  (These are done together, while the lock is held,
3550  * to avoid race conditions.)
3551  */
3552 int
3553 vn_pollrecord(struct vnode *vp, struct thread *td, int events)
3554 {
3555 
3556 	v_addpollinfo(vp);
3557 	mtx_lock(&vp->v_pollinfo->vpi_lock);
3558 	if (vp->v_pollinfo->vpi_revents & events) {
3559 		/*
3560 		 * This leaves events we are not interested
3561 		 * in available for the other process which
3562 		 * which presumably had requested them
3563 		 * (otherwise they would never have been
3564 		 * recorded).
3565 		 */
3566 		events &= vp->v_pollinfo->vpi_revents;
3567 		vp->v_pollinfo->vpi_revents &= ~events;
3568 
3569 		mtx_unlock(&vp->v_pollinfo->vpi_lock);
3570 		return (events);
3571 	}
3572 	vp->v_pollinfo->vpi_events |= events;
3573 	selrecord(td, &vp->v_pollinfo->vpi_selinfo);
3574 	mtx_unlock(&vp->v_pollinfo->vpi_lock);
3575 	return (0);
3576 }
3577 
3578 /*
3579  * Routine to create and manage a filesystem syncer vnode.
3580  */
3581 #define sync_close ((int (*)(struct  vop_close_args *))nullop)
3582 static int	sync_fsync(struct  vop_fsync_args *);
3583 static int	sync_inactive(struct  vop_inactive_args *);
3584 static int	sync_reclaim(struct  vop_reclaim_args *);
3585 
3586 static struct vop_vector sync_vnodeops = {
3587 	.vop_bypass =	VOP_EOPNOTSUPP,
3588 	.vop_close =	sync_close,		/* close */
3589 	.vop_fsync =	sync_fsync,		/* fsync */
3590 	.vop_inactive =	sync_inactive,	/* inactive */
3591 	.vop_reclaim =	sync_reclaim,	/* reclaim */
3592 	.vop_lock1 =	vop_stdlock,	/* lock */
3593 	.vop_unlock =	vop_stdunlock,	/* unlock */
3594 	.vop_islocked =	vop_stdislocked,	/* islocked */
3595 };
3596 
3597 /*
3598  * Create a new filesystem syncer vnode for the specified mount point.
3599  */
3600 void
3601 vfs_allocate_syncvnode(struct mount *mp)
3602 {
3603 	struct vnode *vp;
3604 	struct bufobj *bo;
3605 	static long start, incr, next;
3606 	int error;
3607 
3608 	/* Allocate a new vnode */
3609 	error = getnewvnode("syncer", mp, &sync_vnodeops, &vp);
3610 	if (error != 0)
3611 		panic("vfs_allocate_syncvnode: getnewvnode() failed");
3612 	vp->v_type = VNON;
3613 	vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
3614 	vp->v_vflag |= VV_FORCEINSMQ;
3615 	error = insmntque(vp, mp);
3616 	if (error != 0)
3617 		panic("vfs_allocate_syncvnode: insmntque() failed");
3618 	vp->v_vflag &= ~VV_FORCEINSMQ;
3619 	VOP_UNLOCK(vp, 0);
3620 	/*
3621 	 * Place the vnode onto the syncer worklist. We attempt to
3622 	 * scatter them about on the list so that they will go off
3623 	 * at evenly distributed times even if all the filesystems
3624 	 * are mounted at once.
3625 	 */
3626 	next += incr;
3627 	if (next == 0 || next > syncer_maxdelay) {
3628 		start /= 2;
3629 		incr /= 2;
3630 		if (start == 0) {
3631 			start = syncer_maxdelay / 2;
3632 			incr = syncer_maxdelay;
3633 		}
3634 		next = start;
3635 	}
3636 	bo = &vp->v_bufobj;
3637 	BO_LOCK(bo);
3638 	vn_syncer_add_to_worklist(bo, syncdelay > 0 ? next % syncdelay : 0);
3639 	/* XXX - vn_syncer_add_to_worklist() also grabs and drops sync_mtx. */
3640 	mtx_lock(&sync_mtx);
3641 	sync_vnode_count++;
3642 	if (mp->mnt_syncer == NULL) {
3643 		mp->mnt_syncer = vp;
3644 		vp = NULL;
3645 	}
3646 	mtx_unlock(&sync_mtx);
3647 	BO_UNLOCK(bo);
3648 	if (vp != NULL) {
3649 		vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
3650 		vgone(vp);
3651 		vput(vp);
3652 	}
3653 }
3654 
3655 void
3656 vfs_deallocate_syncvnode(struct mount *mp)
3657 {
3658 	struct vnode *vp;
3659 
3660 	mtx_lock(&sync_mtx);
3661 	vp = mp->mnt_syncer;
3662 	if (vp != NULL)
3663 		mp->mnt_syncer = NULL;
3664 	mtx_unlock(&sync_mtx);
3665 	if (vp != NULL)
3666 		vrele(vp);
3667 }
3668 
3669 /*
3670  * Do a lazy sync of the filesystem.
3671  */
3672 static int
3673 sync_fsync(struct vop_fsync_args *ap)
3674 {
3675 	struct vnode *syncvp = ap->a_vp;
3676 	struct mount *mp = syncvp->v_mount;
3677 	int error, save;
3678 	struct bufobj *bo;
3679 
3680 	/*
3681 	 * We only need to do something if this is a lazy evaluation.
3682 	 */
3683 	if (ap->a_waitfor != MNT_LAZY)
3684 		return (0);
3685 
3686 	/*
3687 	 * Move ourselves to the back of the sync list.
3688 	 */
3689 	bo = &syncvp->v_bufobj;
3690 	BO_LOCK(bo);
3691 	vn_syncer_add_to_worklist(bo, syncdelay);
3692 	BO_UNLOCK(bo);
3693 
3694 	/*
3695 	 * Walk the list of vnodes pushing all that are dirty and
3696 	 * not already on the sync list.
3697 	 */
3698 	mtx_lock(&mountlist_mtx);
3699 	if (vfs_busy(mp, MBF_NOWAIT | MBF_MNTLSTLOCK) != 0) {
3700 		mtx_unlock(&mountlist_mtx);
3701 		return (0);
3702 	}
3703 	if (vn_start_write(NULL, &mp, V_NOWAIT) != 0) {
3704 		vfs_unbusy(mp);
3705 		return (0);
3706 	}
3707 	save = curthread_pflags_set(TDP_SYNCIO);
3708 	vfs_msync(mp, MNT_NOWAIT);
3709 	error = VFS_SYNC(mp, MNT_LAZY);
3710 	curthread_pflags_restore(save);
3711 	vn_finished_write(mp);
3712 	vfs_unbusy(mp);
3713 	return (error);
3714 }
3715 
3716 /*
3717  * The syncer vnode is no referenced.
3718  */
3719 static int
3720 sync_inactive(struct vop_inactive_args *ap)
3721 {
3722 
3723 	vgone(ap->a_vp);
3724 	return (0);
3725 }
3726 
3727 /*
3728  * The syncer vnode is no longer needed and is being decommissioned.
3729  *
3730  * Modifications to the worklist must be protected by sync_mtx.
3731  */
3732 static int
3733 sync_reclaim(struct vop_reclaim_args *ap)
3734 {
3735 	struct vnode *vp = ap->a_vp;
3736 	struct bufobj *bo;
3737 
3738 	bo = &vp->v_bufobj;
3739 	BO_LOCK(bo);
3740 	mtx_lock(&sync_mtx);
3741 	if (vp->v_mount->mnt_syncer == vp)
3742 		vp->v_mount->mnt_syncer = NULL;
3743 	if (bo->bo_flag & BO_ONWORKLST) {
3744 		LIST_REMOVE(bo, bo_synclist);
3745 		syncer_worklist_len--;
3746 		sync_vnode_count--;
3747 		bo->bo_flag &= ~BO_ONWORKLST;
3748 	}
3749 	mtx_unlock(&sync_mtx);
3750 	BO_UNLOCK(bo);
3751 
3752 	return (0);
3753 }
3754 
3755 /*
3756  * Check if vnode represents a disk device
3757  */
3758 int
3759 vn_isdisk(struct vnode *vp, int *errp)
3760 {
3761 	int error;
3762 
3763 	error = 0;
3764 	dev_lock();
3765 	if (vp->v_type != VCHR)
3766 		error = ENOTBLK;
3767 	else if (vp->v_rdev == NULL)
3768 		error = ENXIO;
3769 	else if (vp->v_rdev->si_devsw == NULL)
3770 		error = ENXIO;
3771 	else if (!(vp->v_rdev->si_devsw->d_flags & D_DISK))
3772 		error = ENOTBLK;
3773 	dev_unlock();
3774 	if (errp != NULL)
3775 		*errp = error;
3776 	return (error == 0);
3777 }
3778 
3779 /*
3780  * Common filesystem object access control check routine.  Accepts a
3781  * vnode's type, "mode", uid and gid, requested access mode, credentials,
3782  * and optional call-by-reference privused argument allowing vaccess()
3783  * to indicate to the caller whether privilege was used to satisfy the
3784  * request (obsoleted).  Returns 0 on success, or an errno on failure.
3785  */
3786 int
3787 vaccess(enum vtype type, mode_t file_mode, uid_t file_uid, gid_t file_gid,
3788     accmode_t accmode, struct ucred *cred, int *privused)
3789 {
3790 	accmode_t dac_granted;
3791 	accmode_t priv_granted;
3792 
3793 	KASSERT((accmode & ~(VEXEC | VWRITE | VREAD | VADMIN | VAPPEND)) == 0,
3794 	    ("invalid bit in accmode"));
3795 	KASSERT((accmode & VAPPEND) == 0 || (accmode & VWRITE),
3796 	    ("VAPPEND without VWRITE"));
3797 
3798 	/*
3799 	 * Look for a normal, non-privileged way to access the file/directory
3800 	 * as requested.  If it exists, go with that.
3801 	 */
3802 
3803 	if (privused != NULL)
3804 		*privused = 0;
3805 
3806 	dac_granted = 0;
3807 
3808 	/* Check the owner. */
3809 	if (cred->cr_uid == file_uid) {
3810 		dac_granted |= VADMIN;
3811 		if (file_mode & S_IXUSR)
3812 			dac_granted |= VEXEC;
3813 		if (file_mode & S_IRUSR)
3814 			dac_granted |= VREAD;
3815 		if (file_mode & S_IWUSR)
3816 			dac_granted |= (VWRITE | VAPPEND);
3817 
3818 		if ((accmode & dac_granted) == accmode)
3819 			return (0);
3820 
3821 		goto privcheck;
3822 	}
3823 
3824 	/* Otherwise, check the groups (first match) */
3825 	if (groupmember(file_gid, cred)) {
3826 		if (file_mode & S_IXGRP)
3827 			dac_granted |= VEXEC;
3828 		if (file_mode & S_IRGRP)
3829 			dac_granted |= VREAD;
3830 		if (file_mode & S_IWGRP)
3831 			dac_granted |= (VWRITE | VAPPEND);
3832 
3833 		if ((accmode & dac_granted) == accmode)
3834 			return (0);
3835 
3836 		goto privcheck;
3837 	}
3838 
3839 	/* Otherwise, check everyone else. */
3840 	if (file_mode & S_IXOTH)
3841 		dac_granted |= VEXEC;
3842 	if (file_mode & S_IROTH)
3843 		dac_granted |= VREAD;
3844 	if (file_mode & S_IWOTH)
3845 		dac_granted |= (VWRITE | VAPPEND);
3846 	if ((accmode & dac_granted) == accmode)
3847 		return (0);
3848 
3849 privcheck:
3850 	/*
3851 	 * Build a privilege mask to determine if the set of privileges
3852 	 * satisfies the requirements when combined with the granted mask
3853 	 * from above.  For each privilege, if the privilege is required,
3854 	 * bitwise or the request type onto the priv_granted mask.
3855 	 */
3856 	priv_granted = 0;
3857 
3858 	if (type == VDIR) {
3859 		/*
3860 		 * For directories, use PRIV_VFS_LOOKUP to satisfy VEXEC
3861 		 * requests, instead of PRIV_VFS_EXEC.
3862 		 */
3863 		if ((accmode & VEXEC) && ((dac_granted & VEXEC) == 0) &&
3864 		    !priv_check_cred(cred, PRIV_VFS_LOOKUP, 0))
3865 			priv_granted |= VEXEC;
3866 	} else {
3867 		/*
3868 		 * Ensure that at least one execute bit is on. Otherwise,
3869 		 * a privileged user will always succeed, and we don't want
3870 		 * this to happen unless the file really is executable.
3871 		 */
3872 		if ((accmode & VEXEC) && ((dac_granted & VEXEC) == 0) &&
3873 		    (file_mode & (S_IXUSR | S_IXGRP | S_IXOTH)) != 0 &&
3874 		    !priv_check_cred(cred, PRIV_VFS_EXEC, 0))
3875 			priv_granted |= VEXEC;
3876 	}
3877 
3878 	if ((accmode & VREAD) && ((dac_granted & VREAD) == 0) &&
3879 	    !priv_check_cred(cred, PRIV_VFS_READ, 0))
3880 		priv_granted |= VREAD;
3881 
3882 	if ((accmode & VWRITE) && ((dac_granted & VWRITE) == 0) &&
3883 	    !priv_check_cred(cred, PRIV_VFS_WRITE, 0))
3884 		priv_granted |= (VWRITE | VAPPEND);
3885 
3886 	if ((accmode & VADMIN) && ((dac_granted & VADMIN) == 0) &&
3887 	    !priv_check_cred(cred, PRIV_VFS_ADMIN, 0))
3888 		priv_granted |= VADMIN;
3889 
3890 	if ((accmode & (priv_granted | dac_granted)) == accmode) {
3891 		/* XXX audit: privilege used */
3892 		if (privused != NULL)
3893 			*privused = 1;
3894 		return (0);
3895 	}
3896 
3897 	return ((accmode & VADMIN) ? EPERM : EACCES);
3898 }
3899 
3900 /*
3901  * Credential check based on process requesting service, and per-attribute
3902  * permissions.
3903  */
3904 int
3905 extattr_check_cred(struct vnode *vp, int attrnamespace, struct ucred *cred,
3906     struct thread *td, accmode_t accmode)
3907 {
3908 
3909 	/*
3910 	 * Kernel-invoked always succeeds.
3911 	 */
3912 	if (cred == NOCRED)
3913 		return (0);
3914 
3915 	/*
3916 	 * Do not allow privileged processes in jail to directly manipulate
3917 	 * system attributes.
3918 	 */
3919 	switch (attrnamespace) {
3920 	case EXTATTR_NAMESPACE_SYSTEM:
3921 		/* Potentially should be: return (EPERM); */
3922 		return (priv_check_cred(cred, PRIV_VFS_EXTATTR_SYSTEM, 0));
3923 	case EXTATTR_NAMESPACE_USER:
3924 		return (VOP_ACCESS(vp, accmode, cred, td));
3925 	default:
3926 		return (EPERM);
3927 	}
3928 }
3929 
3930 #ifdef DEBUG_VFS_LOCKS
3931 /*
3932  * This only exists to supress warnings from unlocked specfs accesses.  It is
3933  * no longer ok to have an unlocked VFS.
3934  */
3935 #define	IGNORE_LOCK(vp) (panicstr != NULL || (vp) == NULL ||		\
3936 	(vp)->v_type == VCHR ||	(vp)->v_type == VBAD)
3937 
3938 int vfs_badlock_ddb = 1;	/* Drop into debugger on violation. */
3939 SYSCTL_INT(_debug, OID_AUTO, vfs_badlock_ddb, CTLFLAG_RW, &vfs_badlock_ddb, 0,
3940     "Drop into debugger on lock violation");
3941 
3942 int vfs_badlock_mutex = 1;	/* Check for interlock across VOPs. */
3943 SYSCTL_INT(_debug, OID_AUTO, vfs_badlock_mutex, CTLFLAG_RW, &vfs_badlock_mutex,
3944     0, "Check for interlock across VOPs");
3945 
3946 int vfs_badlock_print = 1;	/* Print lock violations. */
3947 SYSCTL_INT(_debug, OID_AUTO, vfs_badlock_print, CTLFLAG_RW, &vfs_badlock_print,
3948     0, "Print lock violations");
3949 
3950 #ifdef KDB
3951 int vfs_badlock_backtrace = 1;	/* Print backtrace at lock violations. */
3952 SYSCTL_INT(_debug, OID_AUTO, vfs_badlock_backtrace, CTLFLAG_RW,
3953     &vfs_badlock_backtrace, 0, "Print backtrace at lock violations");
3954 #endif
3955 
3956 static void
3957 vfs_badlock(const char *msg, const char *str, struct vnode *vp)
3958 {
3959 
3960 #ifdef KDB
3961 	if (vfs_badlock_backtrace)
3962 		kdb_backtrace();
3963 #endif
3964 	if (vfs_badlock_print)
3965 		printf("%s: %p %s\n", str, (void *)vp, msg);
3966 	if (vfs_badlock_ddb)
3967 		kdb_enter(KDB_WHY_VFSLOCK, "lock violation");
3968 }
3969 
3970 void
3971 assert_vi_locked(struct vnode *vp, const char *str)
3972 {
3973 
3974 	if (vfs_badlock_mutex && !mtx_owned(VI_MTX(vp)))
3975 		vfs_badlock("interlock is not locked but should be", str, vp);
3976 }
3977 
3978 void
3979 assert_vi_unlocked(struct vnode *vp, const char *str)
3980 {
3981 
3982 	if (vfs_badlock_mutex && mtx_owned(VI_MTX(vp)))
3983 		vfs_badlock("interlock is locked but should not be", str, vp);
3984 }
3985 
3986 void
3987 assert_vop_locked(struct vnode *vp, const char *str)
3988 {
3989 	int locked;
3990 
3991 	if (!IGNORE_LOCK(vp)) {
3992 		locked = VOP_ISLOCKED(vp);
3993 		if (locked == 0 || locked == LK_EXCLOTHER)
3994 			vfs_badlock("is not locked but should be", str, vp);
3995 	}
3996 }
3997 
3998 void
3999 assert_vop_unlocked(struct vnode *vp, const char *str)
4000 {
4001 
4002 	if (!IGNORE_LOCK(vp) && VOP_ISLOCKED(vp) == LK_EXCLUSIVE)
4003 		vfs_badlock("is locked but should not be", str, vp);
4004 }
4005 
4006 void
4007 assert_vop_elocked(struct vnode *vp, const char *str)
4008 {
4009 
4010 	if (!IGNORE_LOCK(vp) && VOP_ISLOCKED(vp) != LK_EXCLUSIVE)
4011 		vfs_badlock("is not exclusive locked but should be", str, vp);
4012 }
4013 
4014 #if 0
4015 void
4016 assert_vop_elocked_other(struct vnode *vp, const char *str)
4017 {
4018 
4019 	if (!IGNORE_LOCK(vp) && VOP_ISLOCKED(vp) != LK_EXCLOTHER)
4020 		vfs_badlock("is not exclusive locked by another thread",
4021 		    str, vp);
4022 }
4023 
4024 void
4025 assert_vop_slocked(struct vnode *vp, const char *str)
4026 {
4027 
4028 	if (!IGNORE_LOCK(vp) && VOP_ISLOCKED(vp) != LK_SHARED)
4029 		vfs_badlock("is not locked shared but should be", str, vp);
4030 }
4031 #endif /* 0 */
4032 #endif /* DEBUG_VFS_LOCKS */
4033 
4034 void
4035 vop_rename_fail(struct vop_rename_args *ap)
4036 {
4037 
4038 	if (ap->a_tvp != NULL)
4039 		vput(ap->a_tvp);
4040 	if (ap->a_tdvp == ap->a_tvp)
4041 		vrele(ap->a_tdvp);
4042 	else
4043 		vput(ap->a_tdvp);
4044 	vrele(ap->a_fdvp);
4045 	vrele(ap->a_fvp);
4046 }
4047 
4048 void
4049 vop_rename_pre(void *ap)
4050 {
4051 	struct vop_rename_args *a = ap;
4052 
4053 #ifdef DEBUG_VFS_LOCKS
4054 	if (a->a_tvp)
4055 		ASSERT_VI_UNLOCKED(a->a_tvp, "VOP_RENAME");
4056 	ASSERT_VI_UNLOCKED(a->a_tdvp, "VOP_RENAME");
4057 	ASSERT_VI_UNLOCKED(a->a_fvp, "VOP_RENAME");
4058 	ASSERT_VI_UNLOCKED(a->a_fdvp, "VOP_RENAME");
4059 
4060 	/* Check the source (from). */
4061 	if (a->a_tdvp->v_vnlock != a->a_fdvp->v_vnlock &&
4062 	    (a->a_tvp == NULL || a->a_tvp->v_vnlock != a->a_fdvp->v_vnlock))
4063 		ASSERT_VOP_UNLOCKED(a->a_fdvp, "vop_rename: fdvp locked");
4064 	if (a->a_tvp == NULL || a->a_tvp->v_vnlock != a->a_fvp->v_vnlock)
4065 		ASSERT_VOP_UNLOCKED(a->a_fvp, "vop_rename: fvp locked");
4066 
4067 	/* Check the target. */
4068 	if (a->a_tvp)
4069 		ASSERT_VOP_LOCKED(a->a_tvp, "vop_rename: tvp not locked");
4070 	ASSERT_VOP_LOCKED(a->a_tdvp, "vop_rename: tdvp not locked");
4071 #endif
4072 	if (a->a_tdvp != a->a_fdvp)
4073 		vhold(a->a_fdvp);
4074 	if (a->a_tvp != a->a_fvp)
4075 		vhold(a->a_fvp);
4076 	vhold(a->a_tdvp);
4077 	if (a->a_tvp)
4078 		vhold(a->a_tvp);
4079 }
4080 
4081 void
4082 vop_strategy_pre(void *ap)
4083 {
4084 #ifdef DEBUG_VFS_LOCKS
4085 	struct vop_strategy_args *a;
4086 	struct buf *bp;
4087 
4088 	a = ap;
4089 	bp = a->a_bp;
4090 
4091 	/*
4092 	 * Cluster ops lock their component buffers but not the IO container.
4093 	 */
4094 	if ((bp->b_flags & B_CLUSTER) != 0)
4095 		return;
4096 
4097 	if (panicstr == NULL && !BUF_ISLOCKED(bp)) {
4098 		if (vfs_badlock_print)
4099 			printf(
4100 			    "VOP_STRATEGY: bp is not locked but should be\n");
4101 		if (vfs_badlock_ddb)
4102 			kdb_enter(KDB_WHY_VFSLOCK, "lock violation");
4103 	}
4104 #endif
4105 }
4106 
4107 void
4108 vop_lock_pre(void *ap)
4109 {
4110 #ifdef DEBUG_VFS_LOCKS
4111 	struct vop_lock1_args *a = ap;
4112 
4113 	if ((a->a_flags & LK_INTERLOCK) == 0)
4114 		ASSERT_VI_UNLOCKED(a->a_vp, "VOP_LOCK");
4115 	else
4116 		ASSERT_VI_LOCKED(a->a_vp, "VOP_LOCK");
4117 #endif
4118 }
4119 
4120 void
4121 vop_lock_post(void *ap, int rc)
4122 {
4123 #ifdef DEBUG_VFS_LOCKS
4124 	struct vop_lock1_args *a = ap;
4125 
4126 	ASSERT_VI_UNLOCKED(a->a_vp, "VOP_LOCK");
4127 	if (rc == 0 && (a->a_flags & LK_EXCLOTHER) == 0)
4128 		ASSERT_VOP_LOCKED(a->a_vp, "VOP_LOCK");
4129 #endif
4130 }
4131 
4132 void
4133 vop_unlock_pre(void *ap)
4134 {
4135 #ifdef DEBUG_VFS_LOCKS
4136 	struct vop_unlock_args *a = ap;
4137 
4138 	if (a->a_flags & LK_INTERLOCK)
4139 		ASSERT_VI_LOCKED(a->a_vp, "VOP_UNLOCK");
4140 	ASSERT_VOP_LOCKED(a->a_vp, "VOP_UNLOCK");
4141 #endif
4142 }
4143 
4144 void
4145 vop_unlock_post(void *ap, int rc)
4146 {
4147 #ifdef DEBUG_VFS_LOCKS
4148 	struct vop_unlock_args *a = ap;
4149 
4150 	if (a->a_flags & LK_INTERLOCK)
4151 		ASSERT_VI_UNLOCKED(a->a_vp, "VOP_UNLOCK");
4152 #endif
4153 }
4154 
4155 void
4156 vop_create_post(void *ap, int rc)
4157 {
4158 	struct vop_create_args *a = ap;
4159 
4160 	if (!rc)
4161 		VFS_KNOTE_LOCKED(a->a_dvp, NOTE_WRITE);
4162 }
4163 
4164 void
4165 vop_deleteextattr_post(void *ap, int rc)
4166 {
4167 	struct vop_deleteextattr_args *a = ap;
4168 
4169 	if (!rc)
4170 		VFS_KNOTE_LOCKED(a->a_vp, NOTE_ATTRIB);
4171 }
4172 
4173 void
4174 vop_link_post(void *ap, int rc)
4175 {
4176 	struct vop_link_args *a = ap;
4177 
4178 	if (!rc) {
4179 		VFS_KNOTE_LOCKED(a->a_vp, NOTE_LINK);
4180 		VFS_KNOTE_LOCKED(a->a_tdvp, NOTE_WRITE);
4181 	}
4182 }
4183 
4184 void
4185 vop_mkdir_post(void *ap, int rc)
4186 {
4187 	struct vop_mkdir_args *a = ap;
4188 
4189 	if (!rc)
4190 		VFS_KNOTE_LOCKED(a->a_dvp, NOTE_WRITE | NOTE_LINK);
4191 }
4192 
4193 void
4194 vop_mknod_post(void *ap, int rc)
4195 {
4196 	struct vop_mknod_args *a = ap;
4197 
4198 	if (!rc)
4199 		VFS_KNOTE_LOCKED(a->a_dvp, NOTE_WRITE);
4200 }
4201 
4202 void
4203 vop_remove_post(void *ap, int rc)
4204 {
4205 	struct vop_remove_args *a = ap;
4206 
4207 	if (!rc) {
4208 		VFS_KNOTE_LOCKED(a->a_dvp, NOTE_WRITE);
4209 		VFS_KNOTE_LOCKED(a->a_vp, NOTE_DELETE);
4210 	}
4211 }
4212 
4213 void
4214 vop_rename_post(void *ap, int rc)
4215 {
4216 	struct vop_rename_args *a = ap;
4217 
4218 	if (!rc) {
4219 		VFS_KNOTE_UNLOCKED(a->a_fdvp, NOTE_WRITE);
4220 		VFS_KNOTE_UNLOCKED(a->a_tdvp, NOTE_WRITE);
4221 		VFS_KNOTE_UNLOCKED(a->a_fvp, NOTE_RENAME);
4222 		if (a->a_tvp)
4223 			VFS_KNOTE_UNLOCKED(a->a_tvp, NOTE_DELETE);
4224 	}
4225 	if (a->a_tdvp != a->a_fdvp)
4226 		vdrop(a->a_fdvp);
4227 	if (a->a_tvp != a->a_fvp)
4228 		vdrop(a->a_fvp);
4229 	vdrop(a->a_tdvp);
4230 	if (a->a_tvp)
4231 		vdrop(a->a_tvp);
4232 }
4233 
4234 void
4235 vop_rmdir_post(void *ap, int rc)
4236 {
4237 	struct vop_rmdir_args *a = ap;
4238 
4239 	if (!rc) {
4240 		VFS_KNOTE_LOCKED(a->a_dvp, NOTE_WRITE | NOTE_LINK);
4241 		VFS_KNOTE_LOCKED(a->a_vp, NOTE_DELETE);
4242 	}
4243 }
4244 
4245 void
4246 vop_setattr_post(void *ap, int rc)
4247 {
4248 	struct vop_setattr_args *a = ap;
4249 
4250 	if (!rc)
4251 		VFS_KNOTE_LOCKED(a->a_vp, NOTE_ATTRIB);
4252 }
4253 
4254 void
4255 vop_setextattr_post(void *ap, int rc)
4256 {
4257 	struct vop_setextattr_args *a = ap;
4258 
4259 	if (!rc)
4260 		VFS_KNOTE_LOCKED(a->a_vp, NOTE_ATTRIB);
4261 }
4262 
4263 void
4264 vop_symlink_post(void *ap, int rc)
4265 {
4266 	struct vop_symlink_args *a = ap;
4267 
4268 	if (!rc)
4269 		VFS_KNOTE_LOCKED(a->a_dvp, NOTE_WRITE);
4270 }
4271 
4272 static struct knlist fs_knlist;
4273 
4274 static void
4275 vfs_event_init(void *arg)
4276 {
4277 	knlist_init_mtx(&fs_knlist, NULL);
4278 }
4279 /* XXX - correct order? */
4280 SYSINIT(vfs_knlist, SI_SUB_VFS, SI_ORDER_ANY, vfs_event_init, NULL);
4281 
4282 void
4283 vfs_event_signal(fsid_t *fsid, uint32_t event, intptr_t data __unused)
4284 {
4285 
4286 	KNOTE_UNLOCKED(&fs_knlist, event);
4287 }
4288 
4289 static int	filt_fsattach(struct knote *kn);
4290 static void	filt_fsdetach(struct knote *kn);
4291 static int	filt_fsevent(struct knote *kn, long hint);
4292 
4293 struct filterops fs_filtops = {
4294 	.f_isfd = 0,
4295 	.f_attach = filt_fsattach,
4296 	.f_detach = filt_fsdetach,
4297 	.f_event = filt_fsevent
4298 };
4299 
4300 static int
4301 filt_fsattach(struct knote *kn)
4302 {
4303 
4304 	kn->kn_flags |= EV_CLEAR;
4305 	knlist_add(&fs_knlist, kn, 0);
4306 	return (0);
4307 }
4308 
4309 static void
4310 filt_fsdetach(struct knote *kn)
4311 {
4312 
4313 	knlist_remove(&fs_knlist, kn, 0);
4314 }
4315 
4316 static int
4317 filt_fsevent(struct knote *kn, long hint)
4318 {
4319 
4320 	kn->kn_fflags |= hint;
4321 	return (kn->kn_fflags != 0);
4322 }
4323 
4324 static int
4325 sysctl_vfs_ctl(SYSCTL_HANDLER_ARGS)
4326 {
4327 	struct vfsidctl vc;
4328 	int error;
4329 	struct mount *mp;
4330 
4331 	error = SYSCTL_IN(req, &vc, sizeof(vc));
4332 	if (error)
4333 		return (error);
4334 	if (vc.vc_vers != VFS_CTL_VERS1)
4335 		return (EINVAL);
4336 	mp = vfs_getvfs(&vc.vc_fsid);
4337 	if (mp == NULL)
4338 		return (ENOENT);
4339 	/* ensure that a specific sysctl goes to the right filesystem. */
4340 	if (strcmp(vc.vc_fstypename, "*") != 0 &&
4341 	    strcmp(vc.vc_fstypename, mp->mnt_vfc->vfc_name) != 0) {
4342 		vfs_rel(mp);
4343 		return (EINVAL);
4344 	}
4345 	VCTLTOREQ(&vc, req);
4346 	error = VFS_SYSCTL(mp, vc.vc_op, req);
4347 	vfs_rel(mp);
4348 	return (error);
4349 }
4350 
4351 SYSCTL_PROC(_vfs, OID_AUTO, ctl, CTLTYPE_OPAQUE | CTLFLAG_WR,
4352     NULL, 0, sysctl_vfs_ctl, "",
4353     "Sysctl by fsid");
4354 
4355 /*
4356  * Function to initialize a va_filerev field sensibly.
4357  * XXX: Wouldn't a random number make a lot more sense ??
4358  */
4359 u_quad_t
4360 init_va_filerev(void)
4361 {
4362 	struct bintime bt;
4363 
4364 	getbinuptime(&bt);
4365 	return (((u_quad_t)bt.sec << 32LL) | (bt.frac >> 32LL));
4366 }
4367 
4368 static int	filt_vfsread(struct knote *kn, long hint);
4369 static int	filt_vfswrite(struct knote *kn, long hint);
4370 static int	filt_vfsvnode(struct knote *kn, long hint);
4371 static void	filt_vfsdetach(struct knote *kn);
4372 static struct filterops vfsread_filtops = {
4373 	.f_isfd = 1,
4374 	.f_detach = filt_vfsdetach,
4375 	.f_event = filt_vfsread
4376 };
4377 static struct filterops vfswrite_filtops = {
4378 	.f_isfd = 1,
4379 	.f_detach = filt_vfsdetach,
4380 	.f_event = filt_vfswrite
4381 };
4382 static struct filterops vfsvnode_filtops = {
4383 	.f_isfd = 1,
4384 	.f_detach = filt_vfsdetach,
4385 	.f_event = filt_vfsvnode
4386 };
4387 
4388 static void
4389 vfs_knllock(void *arg)
4390 {
4391 	struct vnode *vp = arg;
4392 
4393 	vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
4394 }
4395 
4396 static void
4397 vfs_knlunlock(void *arg)
4398 {
4399 	struct vnode *vp = arg;
4400 
4401 	VOP_UNLOCK(vp, 0);
4402 }
4403 
4404 static void
4405 vfs_knl_assert_locked(void *arg)
4406 {
4407 #ifdef DEBUG_VFS_LOCKS
4408 	struct vnode *vp = arg;
4409 
4410 	ASSERT_VOP_LOCKED(vp, "vfs_knl_assert_locked");
4411 #endif
4412 }
4413 
4414 static void
4415 vfs_knl_assert_unlocked(void *arg)
4416 {
4417 #ifdef DEBUG_VFS_LOCKS
4418 	struct vnode *vp = arg;
4419 
4420 	ASSERT_VOP_UNLOCKED(vp, "vfs_knl_assert_unlocked");
4421 #endif
4422 }
4423 
4424 int
4425 vfs_kqfilter(struct vop_kqfilter_args *ap)
4426 {
4427 	struct vnode *vp = ap->a_vp;
4428 	struct knote *kn = ap->a_kn;
4429 	struct knlist *knl;
4430 
4431 	switch (kn->kn_filter) {
4432 	case EVFILT_READ:
4433 		kn->kn_fop = &vfsread_filtops;
4434 		break;
4435 	case EVFILT_WRITE:
4436 		kn->kn_fop = &vfswrite_filtops;
4437 		break;
4438 	case EVFILT_VNODE:
4439 		kn->kn_fop = &vfsvnode_filtops;
4440 		break;
4441 	default:
4442 		return (EINVAL);
4443 	}
4444 
4445 	kn->kn_hook = (caddr_t)vp;
4446 
4447 	v_addpollinfo(vp);
4448 	if (vp->v_pollinfo == NULL)
4449 		return (ENOMEM);
4450 	knl = &vp->v_pollinfo->vpi_selinfo.si_note;
4451 	knlist_add(knl, kn, 0);
4452 
4453 	return (0);
4454 }
4455 
4456 /*
4457  * Detach knote from vnode
4458  */
4459 static void
4460 filt_vfsdetach(struct knote *kn)
4461 {
4462 	struct vnode *vp = (struct vnode *)kn->kn_hook;
4463 
4464 	KASSERT(vp->v_pollinfo != NULL, ("Missing v_pollinfo"));
4465 	knlist_remove(&vp->v_pollinfo->vpi_selinfo.si_note, kn, 0);
4466 }
4467 
4468 /*ARGSUSED*/
4469 static int
4470 filt_vfsread(struct knote *kn, long hint)
4471 {
4472 	struct vnode *vp = (struct vnode *)kn->kn_hook;
4473 	struct vattr va;
4474 	int res;
4475 
4476 	/*
4477 	 * filesystem is gone, so set the EOF flag and schedule
4478 	 * the knote for deletion.
4479 	 */
4480 	if (hint == NOTE_REVOKE) {
4481 		VI_LOCK(vp);
4482 		kn->kn_flags |= (EV_EOF | EV_ONESHOT);
4483 		VI_UNLOCK(vp);
4484 		return (1);
4485 	}
4486 
4487 	if (VOP_GETATTR(vp, &va, curthread->td_ucred))
4488 		return (0);
4489 
4490 	VI_LOCK(vp);
4491 	kn->kn_data = va.va_size - kn->kn_fp->f_offset;
4492 	res = (kn->kn_data != 0);
4493 	VI_UNLOCK(vp);
4494 	return (res);
4495 }
4496 
4497 /*ARGSUSED*/
4498 static int
4499 filt_vfswrite(struct knote *kn, long hint)
4500 {
4501 	struct vnode *vp = (struct vnode *)kn->kn_hook;
4502 
4503 	VI_LOCK(vp);
4504 
4505 	/*
4506 	 * filesystem is gone, so set the EOF flag and schedule
4507 	 * the knote for deletion.
4508 	 */
4509 	if (hint == NOTE_REVOKE)
4510 		kn->kn_flags |= (EV_EOF | EV_ONESHOT);
4511 
4512 	kn->kn_data = 0;
4513 	VI_UNLOCK(vp);
4514 	return (1);
4515 }
4516 
4517 static int
4518 filt_vfsvnode(struct knote *kn, long hint)
4519 {
4520 	struct vnode *vp = (struct vnode *)kn->kn_hook;
4521 	int res;
4522 
4523 	VI_LOCK(vp);
4524 	if (kn->kn_sfflags & hint)
4525 		kn->kn_fflags |= hint;
4526 	if (hint == NOTE_REVOKE) {
4527 		kn->kn_flags |= EV_EOF;
4528 		VI_UNLOCK(vp);
4529 		return (1);
4530 	}
4531 	res = (kn->kn_fflags != 0);
4532 	VI_UNLOCK(vp);
4533 	return (res);
4534 }
4535 
4536 int
4537 vfs_read_dirent(struct vop_readdir_args *ap, struct dirent *dp, off_t off)
4538 {
4539 	int error;
4540 
4541 	if (dp->d_reclen > ap->a_uio->uio_resid)
4542 		return (ENAMETOOLONG);
4543 	error = uiomove(dp, dp->d_reclen, ap->a_uio);
4544 	if (error) {
4545 		if (ap->a_ncookies != NULL) {
4546 			if (ap->a_cookies != NULL)
4547 				free(ap->a_cookies, M_TEMP);
4548 			ap->a_cookies = NULL;
4549 			*ap->a_ncookies = 0;
4550 		}
4551 		return (error);
4552 	}
4553 	if (ap->a_ncookies == NULL)
4554 		return (0);
4555 
4556 	KASSERT(ap->a_cookies,
4557 	    ("NULL ap->a_cookies value with non-NULL ap->a_ncookies!"));
4558 
4559 	*ap->a_cookies = realloc(*ap->a_cookies,
4560 	    (*ap->a_ncookies + 1) * sizeof(u_long), M_TEMP, M_WAITOK | M_ZERO);
4561 	(*ap->a_cookies)[*ap->a_ncookies] = off;
4562 	return (0);
4563 }
4564 
4565 /*
4566  * Mark for update the access time of the file if the filesystem
4567  * supports VOP_MARKATIME.  This functionality is used by execve and
4568  * mmap, so we want to avoid the I/O implied by directly setting
4569  * va_atime for the sake of efficiency.
4570  */
4571 void
4572 vfs_mark_atime(struct vnode *vp, struct ucred *cred)
4573 {
4574 	struct mount *mp;
4575 
4576 	mp = vp->v_mount;
4577 	ASSERT_VOP_LOCKED(vp, "vfs_mark_atime");
4578 	if (mp != NULL && (mp->mnt_flag & (MNT_NOATIME | MNT_RDONLY)) == 0)
4579 		(void)VOP_MARKATIME(vp);
4580 }
4581 
4582 /*
4583  * The purpose of this routine is to remove granularity from accmode_t,
4584  * reducing it into standard unix access bits - VEXEC, VREAD, VWRITE,
4585  * VADMIN and VAPPEND.
4586  *
4587  * If it returns 0, the caller is supposed to continue with the usual
4588  * access checks using 'accmode' as modified by this routine.  If it
4589  * returns nonzero value, the caller is supposed to return that value
4590  * as errno.
4591  *
4592  * Note that after this routine runs, accmode may be zero.
4593  */
4594 int
4595 vfs_unixify_accmode(accmode_t *accmode)
4596 {
4597 	/*
4598 	 * There is no way to specify explicit "deny" rule using
4599 	 * file mode or POSIX.1e ACLs.
4600 	 */
4601 	if (*accmode & VEXPLICIT_DENY) {
4602 		*accmode = 0;
4603 		return (0);
4604 	}
4605 
4606 	/*
4607 	 * None of these can be translated into usual access bits.
4608 	 * Also, the common case for NFSv4 ACLs is to not contain
4609 	 * either of these bits. Caller should check for VWRITE
4610 	 * on the containing directory instead.
4611 	 */
4612 	if (*accmode & (VDELETE_CHILD | VDELETE))
4613 		return (EPERM);
4614 
4615 	if (*accmode & VADMIN_PERMS) {
4616 		*accmode &= ~VADMIN_PERMS;
4617 		*accmode |= VADMIN;
4618 	}
4619 
4620 	/*
4621 	 * There is no way to deny VREAD_ATTRIBUTES, VREAD_ACL
4622 	 * or VSYNCHRONIZE using file mode or POSIX.1e ACL.
4623 	 */
4624 	*accmode &= ~(VSTAT_PERMS | VSYNCHRONIZE);
4625 
4626 	return (0);
4627 }
4628 
4629 /*
4630  * These are helper functions for filesystems to traverse all
4631  * their vnodes.  See MNT_VNODE_FOREACH_ALL() in sys/mount.h.
4632  *
4633  * This interface replaces MNT_VNODE_FOREACH.
4634  */
4635 
4636 MALLOC_DEFINE(M_VNODE_MARKER, "vnodemarker", "vnode marker");
4637 
4638 struct vnode *
4639 __mnt_vnode_next_all(struct vnode **mvp, struct mount *mp)
4640 {
4641 	struct vnode *vp;
4642 
4643 	if (should_yield())
4644 		kern_yield(PRI_USER);
4645 	MNT_ILOCK(mp);
4646 	KASSERT((*mvp)->v_mount == mp, ("marker vnode mount list mismatch"));
4647 	vp = TAILQ_NEXT(*mvp, v_nmntvnodes);
4648 	while (vp != NULL && (vp->v_type == VMARKER ||
4649 	    (vp->v_iflag & VI_DOOMED) != 0))
4650 		vp = TAILQ_NEXT(vp, v_nmntvnodes);
4651 
4652 	/* Check if we are done */
4653 	if (vp == NULL) {
4654 		__mnt_vnode_markerfree_all(mvp, mp);
4655 		/* MNT_IUNLOCK(mp); -- done in above function */
4656 		mtx_assert(MNT_MTX(mp), MA_NOTOWNED);
4657 		return (NULL);
4658 	}
4659 	TAILQ_REMOVE(&mp->mnt_nvnodelist, *mvp, v_nmntvnodes);
4660 	TAILQ_INSERT_AFTER(&mp->mnt_nvnodelist, vp, *mvp, v_nmntvnodes);
4661 	VI_LOCK(vp);
4662 	MNT_IUNLOCK(mp);
4663 	return (vp);
4664 }
4665 
4666 struct vnode *
4667 __mnt_vnode_first_all(struct vnode **mvp, struct mount *mp)
4668 {
4669 	struct vnode *vp;
4670 
4671 	*mvp = malloc(sizeof(struct vnode), M_VNODE_MARKER, M_WAITOK | M_ZERO);
4672 	MNT_ILOCK(mp);
4673 	MNT_REF(mp);
4674 	(*mvp)->v_type = VMARKER;
4675 
4676 	vp = TAILQ_FIRST(&mp->mnt_nvnodelist);
4677 	while (vp != NULL && (vp->v_type == VMARKER ||
4678 	    (vp->v_iflag & VI_DOOMED) != 0))
4679 		vp = TAILQ_NEXT(vp, v_nmntvnodes);
4680 
4681 	/* Check if we are done */
4682 	if (vp == NULL) {
4683 		MNT_REL(mp);
4684 		MNT_IUNLOCK(mp);
4685 		free(*mvp, M_VNODE_MARKER);
4686 		*mvp = NULL;
4687 		return (NULL);
4688 	}
4689 	(*mvp)->v_mount = mp;
4690 	TAILQ_INSERT_AFTER(&mp->mnt_nvnodelist, vp, *mvp, v_nmntvnodes);
4691 	VI_LOCK(vp);
4692 	MNT_IUNLOCK(mp);
4693 	return (vp);
4694 }
4695 
4696 
4697 void
4698 __mnt_vnode_markerfree_all(struct vnode **mvp, struct mount *mp)
4699 {
4700 
4701 	if (*mvp == NULL) {
4702 		MNT_IUNLOCK(mp);
4703 		return;
4704 	}
4705 
4706 	mtx_assert(MNT_MTX(mp), MA_OWNED);
4707 
4708 	KASSERT((*mvp)->v_mount == mp, ("marker vnode mount list mismatch"));
4709 	TAILQ_REMOVE(&mp->mnt_nvnodelist, *mvp, v_nmntvnodes);
4710 	MNT_REL(mp);
4711 	MNT_IUNLOCK(mp);
4712 	free(*mvp, M_VNODE_MARKER);
4713 	*mvp = NULL;
4714 }
4715 
4716 /*
4717  * These are helper functions for filesystems to traverse their
4718  * active vnodes.  See MNT_VNODE_FOREACH_ACTIVE() in sys/mount.h
4719  */
4720 static void
4721 mnt_vnode_markerfree_active(struct vnode **mvp, struct mount *mp)
4722 {
4723 
4724 	KASSERT((*mvp)->v_mount == mp, ("marker vnode mount list mismatch"));
4725 
4726 	MNT_ILOCK(mp);
4727 	MNT_REL(mp);
4728 	MNT_IUNLOCK(mp);
4729 	free(*mvp, M_VNODE_MARKER);
4730 	*mvp = NULL;
4731 }
4732 
4733 static struct vnode *
4734 mnt_vnode_next_active(struct vnode **mvp, struct mount *mp)
4735 {
4736 	struct vnode *vp, *nvp;
4737 
4738 	mtx_assert(&vnode_free_list_mtx, MA_OWNED);
4739 	KASSERT((*mvp)->v_mount == mp, ("marker vnode mount list mismatch"));
4740 restart:
4741 	vp = TAILQ_NEXT(*mvp, v_actfreelist);
4742 	TAILQ_REMOVE(&mp->mnt_activevnodelist, *mvp, v_actfreelist);
4743 	while (vp != NULL) {
4744 		if (vp->v_type == VMARKER) {
4745 			vp = TAILQ_NEXT(vp, v_actfreelist);
4746 			continue;
4747 		}
4748 		if (!VI_TRYLOCK(vp)) {
4749 			if (mp_ncpus == 1 || should_yield()) {
4750 				TAILQ_INSERT_BEFORE(vp, *mvp, v_actfreelist);
4751 				mtx_unlock(&vnode_free_list_mtx);
4752 				kern_yield(PRI_USER);
4753 				mtx_lock(&vnode_free_list_mtx);
4754 				goto restart;
4755 			}
4756 			continue;
4757 		}
4758 		KASSERT(vp->v_type != VMARKER, ("locked marker %p", vp));
4759 		KASSERT(vp->v_mount == mp || vp->v_mount == NULL,
4760 		    ("alien vnode on the active list %p %p", vp, mp));
4761 		if (vp->v_mount == mp && (vp->v_iflag & VI_DOOMED) == 0)
4762 			break;
4763 		nvp = TAILQ_NEXT(vp, v_actfreelist);
4764 		VI_UNLOCK(vp);
4765 		vp = nvp;
4766 	}
4767 
4768 	/* Check if we are done */
4769 	if (vp == NULL) {
4770 		mtx_unlock(&vnode_free_list_mtx);
4771 		mnt_vnode_markerfree_active(mvp, mp);
4772 		return (NULL);
4773 	}
4774 	TAILQ_INSERT_AFTER(&mp->mnt_activevnodelist, vp, *mvp, v_actfreelist);
4775 	mtx_unlock(&vnode_free_list_mtx);
4776 	ASSERT_VI_LOCKED(vp, "active iter");
4777 	KASSERT((vp->v_iflag & VI_ACTIVE) != 0, ("Non-active vp %p", vp));
4778 	return (vp);
4779 }
4780 
4781 struct vnode *
4782 __mnt_vnode_next_active(struct vnode **mvp, struct mount *mp)
4783 {
4784 
4785 	if (should_yield())
4786 		kern_yield(PRI_USER);
4787 	mtx_lock(&vnode_free_list_mtx);
4788 	return (mnt_vnode_next_active(mvp, mp));
4789 }
4790 
4791 struct vnode *
4792 __mnt_vnode_first_active(struct vnode **mvp, struct mount *mp)
4793 {
4794 	struct vnode *vp;
4795 
4796 	*mvp = malloc(sizeof(struct vnode), M_VNODE_MARKER, M_WAITOK | M_ZERO);
4797 	MNT_ILOCK(mp);
4798 	MNT_REF(mp);
4799 	MNT_IUNLOCK(mp);
4800 	(*mvp)->v_type = VMARKER;
4801 	(*mvp)->v_mount = mp;
4802 
4803 	mtx_lock(&vnode_free_list_mtx);
4804 	vp = TAILQ_FIRST(&mp->mnt_activevnodelist);
4805 	if (vp == NULL) {
4806 		mtx_unlock(&vnode_free_list_mtx);
4807 		mnt_vnode_markerfree_active(mvp, mp);
4808 		return (NULL);
4809 	}
4810 	TAILQ_INSERT_BEFORE(vp, *mvp, v_actfreelist);
4811 	return (mnt_vnode_next_active(mvp, mp));
4812 }
4813 
4814 void
4815 __mnt_vnode_markerfree_active(struct vnode **mvp, struct mount *mp)
4816 {
4817 
4818 	if (*mvp == NULL)
4819 		return;
4820 
4821 	mtx_lock(&vnode_free_list_mtx);
4822 	TAILQ_REMOVE(&mp->mnt_activevnodelist, *mvp, v_actfreelist);
4823 	mtx_unlock(&vnode_free_list_mtx);
4824 	mnt_vnode_markerfree_active(mvp, mp);
4825 }
4826