xref: /freebsd/sys/kern/vfs_subr.c (revision 4cc3366de27dc5bfcf44c3b596ba850001e33933)
1 /*-
2  * Copyright (c) 1989, 1993
3  *	The Regents of the University of California.  All rights reserved.
4  * (c) UNIX System Laboratories, Inc.
5  * All or some portions of this file are derived from material licensed
6  * to the University of California by American Telephone and Telegraph
7  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8  * the permission of UNIX System Laboratories, Inc.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. Neither the name of the University nor the names of its contributors
19  *    may be used to endorse or promote products derived from this software
20  *    without specific prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  *
34  *	@(#)vfs_subr.c	8.31 (Berkeley) 5/26/95
35  */
36 
37 /*
38  * External virtual filesystem routines
39  */
40 
41 #include <sys/cdefs.h>
42 __FBSDID("$FreeBSD$");
43 
44 #include "opt_compat.h"
45 #include "opt_ddb.h"
46 #include "opt_watchdog.h"
47 
48 #include <sys/param.h>
49 #include <sys/systm.h>
50 #include <sys/bio.h>
51 #include <sys/buf.h>
52 #include <sys/condvar.h>
53 #include <sys/conf.h>
54 #include <sys/dirent.h>
55 #include <sys/event.h>
56 #include <sys/eventhandler.h>
57 #include <sys/extattr.h>
58 #include <sys/file.h>
59 #include <sys/fcntl.h>
60 #include <sys/jail.h>
61 #include <sys/kdb.h>
62 #include <sys/kernel.h>
63 #include <sys/kthread.h>
64 #include <sys/lockf.h>
65 #include <sys/malloc.h>
66 #include <sys/mount.h>
67 #include <sys/namei.h>
68 #include <sys/pctrie.h>
69 #include <sys/priv.h>
70 #include <sys/reboot.h>
71 #include <sys/refcount.h>
72 #include <sys/rwlock.h>
73 #include <sys/sched.h>
74 #include <sys/sleepqueue.h>
75 #include <sys/smp.h>
76 #include <sys/stat.h>
77 #include <sys/sysctl.h>
78 #include <sys/syslog.h>
79 #include <sys/vmmeter.h>
80 #include <sys/vnode.h>
81 #include <sys/watchdog.h>
82 
83 #include <machine/stdarg.h>
84 
85 #include <security/mac/mac_framework.h>
86 
87 #include <vm/vm.h>
88 #include <vm/vm_object.h>
89 #include <vm/vm_extern.h>
90 #include <vm/pmap.h>
91 #include <vm/vm_map.h>
92 #include <vm/vm_page.h>
93 #include <vm/vm_kern.h>
94 #include <vm/uma.h>
95 
96 #ifdef DDB
97 #include <ddb/ddb.h>
98 #endif
99 
100 static void	delmntque(struct vnode *vp);
101 static int	flushbuflist(struct bufv *bufv, int flags, struct bufobj *bo,
102 		    int slpflag, int slptimeo);
103 static void	syncer_shutdown(void *arg, int howto);
104 static int	vtryrecycle(struct vnode *vp);
105 static void	v_init_counters(struct vnode *);
106 static void	v_incr_usecount(struct vnode *);
107 static void	v_incr_usecount_locked(struct vnode *);
108 static void	v_incr_devcount(struct vnode *);
109 static void	v_decr_devcount(struct vnode *);
110 static void	vgonel(struct vnode *);
111 static void	vfs_knllock(void *arg);
112 static void	vfs_knlunlock(void *arg);
113 static void	vfs_knl_assert_locked(void *arg);
114 static void	vfs_knl_assert_unlocked(void *arg);
115 static void	vnlru_return_batches(struct vfsops *mnt_op);
116 static void	destroy_vpollinfo(struct vpollinfo *vi);
117 
118 /*
119  * Number of vnodes in existence.  Increased whenever getnewvnode()
120  * allocates a new vnode, decreased in vdropl() for VI_DOOMED vnode.
121  */
122 static unsigned long	numvnodes;
123 
124 SYSCTL_ULONG(_vfs, OID_AUTO, numvnodes, CTLFLAG_RD, &numvnodes, 0,
125     "Number of vnodes in existence");
126 
127 static u_long vnodes_created;
128 SYSCTL_ULONG(_vfs, OID_AUTO, vnodes_created, CTLFLAG_RD, &vnodes_created,
129     0, "Number of vnodes created by getnewvnode");
130 
131 static u_long mnt_free_list_batch = 128;
132 SYSCTL_ULONG(_vfs, OID_AUTO, mnt_free_list_batch, CTLFLAG_RW,
133     &mnt_free_list_batch, 0, "Limit of vnodes held on mnt's free list");
134 
135 /*
136  * Conversion tables for conversion from vnode types to inode formats
137  * and back.
138  */
139 enum vtype iftovt_tab[16] = {
140 	VNON, VFIFO, VCHR, VNON, VDIR, VNON, VBLK, VNON,
141 	VREG, VNON, VLNK, VNON, VSOCK, VNON, VNON, VBAD,
142 };
143 int vttoif_tab[10] = {
144 	0, S_IFREG, S_IFDIR, S_IFBLK, S_IFCHR, S_IFLNK,
145 	S_IFSOCK, S_IFIFO, S_IFMT, S_IFMT
146 };
147 
148 /*
149  * List of vnodes that are ready for recycling.
150  */
151 static TAILQ_HEAD(freelst, vnode) vnode_free_list;
152 
153 /*
154  * "Free" vnode target.  Free vnodes are rarely completely free, but are
155  * just ones that are cheap to recycle.  Usually they are for files which
156  * have been stat'd but not read; these usually have inode and namecache
157  * data attached to them.  This target is the preferred minimum size of a
158  * sub-cache consisting mostly of such files. The system balances the size
159  * of this sub-cache with its complement to try to prevent either from
160  * thrashing while the other is relatively inactive.  The targets express
161  * a preference for the best balance.
162  *
163  * "Above" this target there are 2 further targets (watermarks) related
164  * to recyling of free vnodes.  In the best-operating case, the cache is
165  * exactly full, the free list has size between vlowat and vhiwat above the
166  * free target, and recycling from it and normal use maintains this state.
167  * Sometimes the free list is below vlowat or even empty, but this state
168  * is even better for immediate use provided the cache is not full.
169  * Otherwise, vnlru_proc() runs to reclaim enough vnodes (usually non-free
170  * ones) to reach one of these states.  The watermarks are currently hard-
171  * coded as 4% and 9% of the available space higher.  These and the default
172  * of 25% for wantfreevnodes are too large if the memory size is large.
173  * E.g., 9% of 75% of MAXVNODES is more than 566000 vnodes to reclaim
174  * whenever vnlru_proc() becomes active.
175  */
176 static u_long wantfreevnodes;
177 SYSCTL_ULONG(_vfs, OID_AUTO, wantfreevnodes, CTLFLAG_RW,
178     &wantfreevnodes, 0, "Target for minimum number of \"free\" vnodes");
179 static u_long freevnodes;
180 SYSCTL_ULONG(_vfs, OID_AUTO, freevnodes, CTLFLAG_RD,
181     &freevnodes, 0, "Number of \"free\" vnodes");
182 
183 static u_long recycles_count;
184 SYSCTL_ULONG(_vfs, OID_AUTO, recycles, CTLFLAG_RD, &recycles_count, 0,
185     "Number of vnodes recycled to meet vnode cache targets");
186 
187 /*
188  * Various variables used for debugging the new implementation of
189  * reassignbuf().
190  * XXX these are probably of (very) limited utility now.
191  */
192 static int reassignbufcalls;
193 SYSCTL_INT(_vfs, OID_AUTO, reassignbufcalls, CTLFLAG_RW, &reassignbufcalls, 0,
194     "Number of calls to reassignbuf");
195 
196 static u_long free_owe_inact;
197 SYSCTL_ULONG(_vfs, OID_AUTO, free_owe_inact, CTLFLAG_RD, &free_owe_inact, 0,
198     "Number of times free vnodes kept on active list due to VFS "
199     "owing inactivation");
200 
201 /* To keep more than one thread at a time from running vfs_getnewfsid */
202 static struct mtx mntid_mtx;
203 
204 /*
205  * Lock for any access to the following:
206  *	vnode_free_list
207  *	numvnodes
208  *	freevnodes
209  */
210 static struct mtx vnode_free_list_mtx;
211 
212 /* Publicly exported FS */
213 struct nfs_public nfs_pub;
214 
215 static uma_zone_t buf_trie_zone;
216 
217 /* Zone for allocation of new vnodes - used exclusively by getnewvnode() */
218 static uma_zone_t vnode_zone;
219 static uma_zone_t vnodepoll_zone;
220 
221 /*
222  * The workitem queue.
223  *
224  * It is useful to delay writes of file data and filesystem metadata
225  * for tens of seconds so that quickly created and deleted files need
226  * not waste disk bandwidth being created and removed. To realize this,
227  * we append vnodes to a "workitem" queue. When running with a soft
228  * updates implementation, most pending metadata dependencies should
229  * not wait for more than a few seconds. Thus, mounted on block devices
230  * are delayed only about a half the time that file data is delayed.
231  * Similarly, directory updates are more critical, so are only delayed
232  * about a third the time that file data is delayed. Thus, there are
233  * SYNCER_MAXDELAY queues that are processed round-robin at a rate of
234  * one each second (driven off the filesystem syncer process). The
235  * syncer_delayno variable indicates the next queue that is to be processed.
236  * Items that need to be processed soon are placed in this queue:
237  *
238  *	syncer_workitem_pending[syncer_delayno]
239  *
240  * A delay of fifteen seconds is done by placing the request fifteen
241  * entries later in the queue:
242  *
243  *	syncer_workitem_pending[(syncer_delayno + 15) & syncer_mask]
244  *
245  */
246 static int syncer_delayno;
247 static long syncer_mask;
248 LIST_HEAD(synclist, bufobj);
249 static struct synclist *syncer_workitem_pending;
250 /*
251  * The sync_mtx protects:
252  *	bo->bo_synclist
253  *	sync_vnode_count
254  *	syncer_delayno
255  *	syncer_state
256  *	syncer_workitem_pending
257  *	syncer_worklist_len
258  *	rushjob
259  */
260 static struct mtx sync_mtx;
261 static struct cv sync_wakeup;
262 
263 #define SYNCER_MAXDELAY		32
264 static int syncer_maxdelay = SYNCER_MAXDELAY;	/* maximum delay time */
265 static int syncdelay = 30;		/* max time to delay syncing data */
266 static int filedelay = 30;		/* time to delay syncing files */
267 SYSCTL_INT(_kern, OID_AUTO, filedelay, CTLFLAG_RW, &filedelay, 0,
268     "Time to delay syncing files (in seconds)");
269 static int dirdelay = 29;		/* time to delay syncing directories */
270 SYSCTL_INT(_kern, OID_AUTO, dirdelay, CTLFLAG_RW, &dirdelay, 0,
271     "Time to delay syncing directories (in seconds)");
272 static int metadelay = 28;		/* time to delay syncing metadata */
273 SYSCTL_INT(_kern, OID_AUTO, metadelay, CTLFLAG_RW, &metadelay, 0,
274     "Time to delay syncing metadata (in seconds)");
275 static int rushjob;		/* number of slots to run ASAP */
276 static int stat_rush_requests;	/* number of times I/O speeded up */
277 SYSCTL_INT(_debug, OID_AUTO, rush_requests, CTLFLAG_RW, &stat_rush_requests, 0,
278     "Number of times I/O speeded up (rush requests)");
279 
280 /*
281  * When shutting down the syncer, run it at four times normal speed.
282  */
283 #define SYNCER_SHUTDOWN_SPEEDUP		4
284 static int sync_vnode_count;
285 static int syncer_worklist_len;
286 static enum { SYNCER_RUNNING, SYNCER_SHUTTING_DOWN, SYNCER_FINAL_DELAY }
287     syncer_state;
288 
289 /* Target for maximum number of vnodes. */
290 int desiredvnodes;
291 static int gapvnodes;		/* gap between wanted and desired */
292 static int vhiwat;		/* enough extras after expansion */
293 static int vlowat;		/* minimal extras before expansion */
294 static int vstir;		/* nonzero to stir non-free vnodes */
295 static volatile int vsmalltrigger = 8;	/* pref to keep if > this many pages */
296 
297 static int
298 sysctl_update_desiredvnodes(SYSCTL_HANDLER_ARGS)
299 {
300 	int error, old_desiredvnodes;
301 
302 	old_desiredvnodes = desiredvnodes;
303 	if ((error = sysctl_handle_int(oidp, arg1, arg2, req)) != 0)
304 		return (error);
305 	if (old_desiredvnodes != desiredvnodes) {
306 		wantfreevnodes = desiredvnodes / 4;
307 		/* XXX locking seems to be incomplete. */
308 		vfs_hash_changesize(desiredvnodes);
309 		cache_changesize(desiredvnodes);
310 	}
311 	return (0);
312 }
313 
314 SYSCTL_PROC(_kern, KERN_MAXVNODES, maxvnodes,
315     CTLTYPE_INT | CTLFLAG_MPSAFE | CTLFLAG_RW, &desiredvnodes, 0,
316     sysctl_update_desiredvnodes, "I", "Target for maximum number of vnodes");
317 SYSCTL_ULONG(_kern, OID_AUTO, minvnodes, CTLFLAG_RW,
318     &wantfreevnodes, 0, "Old name for vfs.wantfreevnodes (legacy)");
319 static int vnlru_nowhere;
320 SYSCTL_INT(_debug, OID_AUTO, vnlru_nowhere, CTLFLAG_RW,
321     &vnlru_nowhere, 0, "Number of times the vnlru process ran without success");
322 
323 /* Shift count for (uintptr_t)vp to initialize vp->v_hash. */
324 static int vnsz2log;
325 
326 /*
327  * Support for the bufobj clean & dirty pctrie.
328  */
329 static void *
330 buf_trie_alloc(struct pctrie *ptree)
331 {
332 
333 	return uma_zalloc(buf_trie_zone, M_NOWAIT);
334 }
335 
336 static void
337 buf_trie_free(struct pctrie *ptree, void *node)
338 {
339 
340 	uma_zfree(buf_trie_zone, node);
341 }
342 PCTRIE_DEFINE(BUF, buf, b_lblkno, buf_trie_alloc, buf_trie_free);
343 
344 /*
345  * Initialize the vnode management data structures.
346  *
347  * Reevaluate the following cap on the number of vnodes after the physical
348  * memory size exceeds 512GB.  In the limit, as the physical memory size
349  * grows, the ratio of the memory size in KB to to vnodes approaches 64:1.
350  */
351 #ifndef	MAXVNODES_MAX
352 #define	MAXVNODES_MAX	(512 * 1024 * 1024 / 64)	/* 8M */
353 #endif
354 
355 /*
356  * Initialize a vnode as it first enters the zone.
357  */
358 static int
359 vnode_init(void *mem, int size, int flags)
360 {
361 	struct vnode *vp;
362 	struct bufobj *bo;
363 
364 	vp = mem;
365 	bzero(vp, size);
366 	/*
367 	 * Setup locks.
368 	 */
369 	vp->v_vnlock = &vp->v_lock;
370 	mtx_init(&vp->v_interlock, "vnode interlock", NULL, MTX_DEF);
371 	/*
372 	 * By default, don't allow shared locks unless filesystems opt-in.
373 	 */
374 	lockinit(vp->v_vnlock, PVFS, "vnode", VLKTIMEOUT,
375 	    LK_NOSHARE | LK_IS_VNODE);
376 	/*
377 	 * Initialize bufobj.
378 	 */
379 	bo = &vp->v_bufobj;
380 	rw_init(BO_LOCKPTR(bo), "bufobj interlock");
381 	bo->bo_private = vp;
382 	TAILQ_INIT(&bo->bo_clean.bv_hd);
383 	TAILQ_INIT(&bo->bo_dirty.bv_hd);
384 	/*
385 	 * Initialize namecache.
386 	 */
387 	LIST_INIT(&vp->v_cache_src);
388 	TAILQ_INIT(&vp->v_cache_dst);
389 	/*
390 	 * Initialize rangelocks.
391 	 */
392 	rangelock_init(&vp->v_rl);
393 	return (0);
394 }
395 
396 /*
397  * Free a vnode when it is cleared from the zone.
398  */
399 static void
400 vnode_fini(void *mem, int size)
401 {
402 	struct vnode *vp;
403 	struct bufobj *bo;
404 
405 	vp = mem;
406 	rangelock_destroy(&vp->v_rl);
407 	lockdestroy(vp->v_vnlock);
408 	mtx_destroy(&vp->v_interlock);
409 	bo = &vp->v_bufobj;
410 	rw_destroy(BO_LOCKPTR(bo));
411 }
412 
413 /*
414  * Provide the size of NFS nclnode and NFS fh for calculation of the
415  * vnode memory consumption.  The size is specified directly to
416  * eliminate dependency on NFS-private header.
417  *
418  * Other filesystems may use bigger or smaller (like UFS and ZFS)
419  * private inode data, but the NFS-based estimation is ample enough.
420  * Still, we care about differences in the size between 64- and 32-bit
421  * platforms.
422  *
423  * Namecache structure size is heuristically
424  * sizeof(struct namecache_ts) + CACHE_PATH_CUTOFF + 1.
425  */
426 #ifdef _LP64
427 #define	NFS_NCLNODE_SZ	(528 + 64)
428 #define	NC_SZ		148
429 #else
430 #define	NFS_NCLNODE_SZ	(360 + 32)
431 #define	NC_SZ		92
432 #endif
433 
434 static void
435 vntblinit(void *dummy __unused)
436 {
437 	u_int i;
438 	int physvnodes, virtvnodes;
439 
440 	/*
441 	 * Desiredvnodes is a function of the physical memory size and the
442 	 * kernel's heap size.  Generally speaking, it scales with the
443 	 * physical memory size.  The ratio of desiredvnodes to the physical
444 	 * memory size is 1:16 until desiredvnodes exceeds 98,304.
445 	 * Thereafter, the
446 	 * marginal ratio of desiredvnodes to the physical memory size is
447 	 * 1:64.  However, desiredvnodes is limited by the kernel's heap
448 	 * size.  The memory required by desiredvnodes vnodes and vm objects
449 	 * must not exceed 1/10th of the kernel's heap size.
450 	 */
451 	physvnodes = maxproc + pgtok(vm_cnt.v_page_count) / 64 +
452 	    3 * min(98304 * 16, pgtok(vm_cnt.v_page_count)) / 64;
453 	virtvnodes = vm_kmem_size / (10 * (sizeof(struct vm_object) +
454 	    sizeof(struct vnode) + NC_SZ * ncsizefactor + NFS_NCLNODE_SZ));
455 	desiredvnodes = min(physvnodes, virtvnodes);
456 	if (desiredvnodes > MAXVNODES_MAX) {
457 		if (bootverbose)
458 			printf("Reducing kern.maxvnodes %d -> %d\n",
459 			    desiredvnodes, MAXVNODES_MAX);
460 		desiredvnodes = MAXVNODES_MAX;
461 	}
462 	wantfreevnodes = desiredvnodes / 4;
463 	mtx_init(&mntid_mtx, "mntid", NULL, MTX_DEF);
464 	TAILQ_INIT(&vnode_free_list);
465 	mtx_init(&vnode_free_list_mtx, "vnode_free_list", NULL, MTX_DEF);
466 	vnode_zone = uma_zcreate("VNODE", sizeof (struct vnode), NULL, NULL,
467 	    vnode_init, vnode_fini, UMA_ALIGN_PTR, 0);
468 	vnodepoll_zone = uma_zcreate("VNODEPOLL", sizeof (struct vpollinfo),
469 	    NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
470 	/*
471 	 * Preallocate enough nodes to support one-per buf so that
472 	 * we can not fail an insert.  reassignbuf() callers can not
473 	 * tolerate the insertion failure.
474 	 */
475 	buf_trie_zone = uma_zcreate("BUF TRIE", pctrie_node_size(),
476 	    NULL, NULL, pctrie_zone_init, NULL, UMA_ALIGN_PTR,
477 	    UMA_ZONE_NOFREE | UMA_ZONE_VM);
478 	uma_prealloc(buf_trie_zone, nbuf);
479 	/*
480 	 * Initialize the filesystem syncer.
481 	 */
482 	syncer_workitem_pending = hashinit(syncer_maxdelay, M_VNODE,
483 	    &syncer_mask);
484 	syncer_maxdelay = syncer_mask + 1;
485 	mtx_init(&sync_mtx, "Syncer mtx", NULL, MTX_DEF);
486 	cv_init(&sync_wakeup, "syncer");
487 	for (i = 1; i <= sizeof(struct vnode); i <<= 1)
488 		vnsz2log++;
489 	vnsz2log--;
490 }
491 SYSINIT(vfs, SI_SUB_VFS, SI_ORDER_FIRST, vntblinit, NULL);
492 
493 
494 /*
495  * Mark a mount point as busy. Used to synchronize access and to delay
496  * unmounting. Eventually, mountlist_mtx is not released on failure.
497  *
498  * vfs_busy() is a custom lock, it can block the caller.
499  * vfs_busy() only sleeps if the unmount is active on the mount point.
500  * For a mountpoint mp, vfs_busy-enforced lock is before lock of any
501  * vnode belonging to mp.
502  *
503  * Lookup uses vfs_busy() to traverse mount points.
504  * root fs			var fs
505  * / vnode lock		A	/ vnode lock (/var)		D
506  * /var vnode lock	B	/log vnode lock(/var/log)	E
507  * vfs_busy lock	C	vfs_busy lock			F
508  *
509  * Within each file system, the lock order is C->A->B and F->D->E.
510  *
511  * When traversing across mounts, the system follows that lock order:
512  *
513  *        C->A->B
514  *              |
515  *              +->F->D->E
516  *
517  * The lookup() process for namei("/var") illustrates the process:
518  *  VOP_LOOKUP() obtains B while A is held
519  *  vfs_busy() obtains a shared lock on F while A and B are held
520  *  vput() releases lock on B
521  *  vput() releases lock on A
522  *  VFS_ROOT() obtains lock on D while shared lock on F is held
523  *  vfs_unbusy() releases shared lock on F
524  *  vn_lock() obtains lock on deadfs vnode vp_crossmp instead of A.
525  *    Attempt to lock A (instead of vp_crossmp) while D is held would
526  *    violate the global order, causing deadlocks.
527  *
528  * dounmount() locks B while F is drained.
529  */
530 int
531 vfs_busy(struct mount *mp, int flags)
532 {
533 
534 	MPASS((flags & ~MBF_MASK) == 0);
535 	CTR3(KTR_VFS, "%s: mp %p with flags %d", __func__, mp, flags);
536 
537 	MNT_ILOCK(mp);
538 	MNT_REF(mp);
539 	/*
540 	 * If mount point is currently being unmounted, sleep until the
541 	 * mount point fate is decided.  If thread doing the unmounting fails,
542 	 * it will clear MNTK_UNMOUNT flag before waking us up, indicating
543 	 * that this mount point has survived the unmount attempt and vfs_busy
544 	 * should retry.  Otherwise the unmounter thread will set MNTK_REFEXPIRE
545 	 * flag in addition to MNTK_UNMOUNT, indicating that mount point is
546 	 * about to be really destroyed.  vfs_busy needs to release its
547 	 * reference on the mount point in this case and return with ENOENT,
548 	 * telling the caller that mount mount it tried to busy is no longer
549 	 * valid.
550 	 */
551 	while (mp->mnt_kern_flag & MNTK_UNMOUNT) {
552 		if (flags & MBF_NOWAIT || mp->mnt_kern_flag & MNTK_REFEXPIRE) {
553 			MNT_REL(mp);
554 			MNT_IUNLOCK(mp);
555 			CTR1(KTR_VFS, "%s: failed busying before sleeping",
556 			    __func__);
557 			return (ENOENT);
558 		}
559 		if (flags & MBF_MNTLSTLOCK)
560 			mtx_unlock(&mountlist_mtx);
561 		mp->mnt_kern_flag |= MNTK_MWAIT;
562 		msleep(mp, MNT_MTX(mp), PVFS | PDROP, "vfs_busy", 0);
563 		if (flags & MBF_MNTLSTLOCK)
564 			mtx_lock(&mountlist_mtx);
565 		MNT_ILOCK(mp);
566 	}
567 	if (flags & MBF_MNTLSTLOCK)
568 		mtx_unlock(&mountlist_mtx);
569 	mp->mnt_lockref++;
570 	MNT_IUNLOCK(mp);
571 	return (0);
572 }
573 
574 /*
575  * Free a busy filesystem.
576  */
577 void
578 vfs_unbusy(struct mount *mp)
579 {
580 
581 	CTR2(KTR_VFS, "%s: mp %p", __func__, mp);
582 	MNT_ILOCK(mp);
583 	MNT_REL(mp);
584 	KASSERT(mp->mnt_lockref > 0, ("negative mnt_lockref"));
585 	mp->mnt_lockref--;
586 	if (mp->mnt_lockref == 0 && (mp->mnt_kern_flag & MNTK_DRAINING) != 0) {
587 		MPASS(mp->mnt_kern_flag & MNTK_UNMOUNT);
588 		CTR1(KTR_VFS, "%s: waking up waiters", __func__);
589 		mp->mnt_kern_flag &= ~MNTK_DRAINING;
590 		wakeup(&mp->mnt_lockref);
591 	}
592 	MNT_IUNLOCK(mp);
593 }
594 
595 /*
596  * Lookup a mount point by filesystem identifier.
597  */
598 struct mount *
599 vfs_getvfs(fsid_t *fsid)
600 {
601 	struct mount *mp;
602 
603 	CTR2(KTR_VFS, "%s: fsid %p", __func__, fsid);
604 	mtx_lock(&mountlist_mtx);
605 	TAILQ_FOREACH(mp, &mountlist, mnt_list) {
606 		if (mp->mnt_stat.f_fsid.val[0] == fsid->val[0] &&
607 		    mp->mnt_stat.f_fsid.val[1] == fsid->val[1]) {
608 			vfs_ref(mp);
609 			mtx_unlock(&mountlist_mtx);
610 			return (mp);
611 		}
612 	}
613 	mtx_unlock(&mountlist_mtx);
614 	CTR2(KTR_VFS, "%s: lookup failed for %p id", __func__, fsid);
615 	return ((struct mount *) 0);
616 }
617 
618 /*
619  * Lookup a mount point by filesystem identifier, busying it before
620  * returning.
621  *
622  * To avoid congestion on mountlist_mtx, implement simple direct-mapped
623  * cache for popular filesystem identifiers.  The cache is lockess, using
624  * the fact that struct mount's are never freed.  In worst case we may
625  * get pointer to unmounted or even different filesystem, so we have to
626  * check what we got, and go slow way if so.
627  */
628 struct mount *
629 vfs_busyfs(fsid_t *fsid)
630 {
631 #define	FSID_CACHE_SIZE	256
632 	typedef struct mount * volatile vmp_t;
633 	static vmp_t cache[FSID_CACHE_SIZE];
634 	struct mount *mp;
635 	int error;
636 	uint32_t hash;
637 
638 	CTR2(KTR_VFS, "%s: fsid %p", __func__, fsid);
639 	hash = fsid->val[0] ^ fsid->val[1];
640 	hash = (hash >> 16 ^ hash) & (FSID_CACHE_SIZE - 1);
641 	mp = cache[hash];
642 	if (mp == NULL ||
643 	    mp->mnt_stat.f_fsid.val[0] != fsid->val[0] ||
644 	    mp->mnt_stat.f_fsid.val[1] != fsid->val[1])
645 		goto slow;
646 	if (vfs_busy(mp, 0) != 0) {
647 		cache[hash] = NULL;
648 		goto slow;
649 	}
650 	if (mp->mnt_stat.f_fsid.val[0] == fsid->val[0] &&
651 	    mp->mnt_stat.f_fsid.val[1] == fsid->val[1])
652 		return (mp);
653 	else
654 	    vfs_unbusy(mp);
655 
656 slow:
657 	mtx_lock(&mountlist_mtx);
658 	TAILQ_FOREACH(mp, &mountlist, mnt_list) {
659 		if (mp->mnt_stat.f_fsid.val[0] == fsid->val[0] &&
660 		    mp->mnt_stat.f_fsid.val[1] == fsid->val[1]) {
661 			error = vfs_busy(mp, MBF_MNTLSTLOCK);
662 			if (error) {
663 				cache[hash] = NULL;
664 				mtx_unlock(&mountlist_mtx);
665 				return (NULL);
666 			}
667 			cache[hash] = mp;
668 			return (mp);
669 		}
670 	}
671 	CTR2(KTR_VFS, "%s: lookup failed for %p id", __func__, fsid);
672 	mtx_unlock(&mountlist_mtx);
673 	return ((struct mount *) 0);
674 }
675 
676 /*
677  * Check if a user can access privileged mount options.
678  */
679 int
680 vfs_suser(struct mount *mp, struct thread *td)
681 {
682 	int error;
683 
684 	/*
685 	 * If the thread is jailed, but this is not a jail-friendly file
686 	 * system, deny immediately.
687 	 */
688 	if (!(mp->mnt_vfc->vfc_flags & VFCF_JAIL) && jailed(td->td_ucred))
689 		return (EPERM);
690 
691 	/*
692 	 * If the file system was mounted outside the jail of the calling
693 	 * thread, deny immediately.
694 	 */
695 	if (prison_check(td->td_ucred, mp->mnt_cred) != 0)
696 		return (EPERM);
697 
698 	/*
699 	 * If file system supports delegated administration, we don't check
700 	 * for the PRIV_VFS_MOUNT_OWNER privilege - it will be better verified
701 	 * by the file system itself.
702 	 * If this is not the user that did original mount, we check for
703 	 * the PRIV_VFS_MOUNT_OWNER privilege.
704 	 */
705 	if (!(mp->mnt_vfc->vfc_flags & VFCF_DELEGADMIN) &&
706 	    mp->mnt_cred->cr_uid != td->td_ucred->cr_uid) {
707 		if ((error = priv_check(td, PRIV_VFS_MOUNT_OWNER)) != 0)
708 			return (error);
709 	}
710 	return (0);
711 }
712 
713 /*
714  * Get a new unique fsid.  Try to make its val[0] unique, since this value
715  * will be used to create fake device numbers for stat().  Also try (but
716  * not so hard) make its val[0] unique mod 2^16, since some emulators only
717  * support 16-bit device numbers.  We end up with unique val[0]'s for the
718  * first 2^16 calls and unique val[0]'s mod 2^16 for the first 2^8 calls.
719  *
720  * Keep in mind that several mounts may be running in parallel.  Starting
721  * the search one past where the previous search terminated is both a
722  * micro-optimization and a defense against returning the same fsid to
723  * different mounts.
724  */
725 void
726 vfs_getnewfsid(struct mount *mp)
727 {
728 	static uint16_t mntid_base;
729 	struct mount *nmp;
730 	fsid_t tfsid;
731 	int mtype;
732 
733 	CTR2(KTR_VFS, "%s: mp %p", __func__, mp);
734 	mtx_lock(&mntid_mtx);
735 	mtype = mp->mnt_vfc->vfc_typenum;
736 	tfsid.val[1] = mtype;
737 	mtype = (mtype & 0xFF) << 24;
738 	for (;;) {
739 		tfsid.val[0] = makedev(255,
740 		    mtype | ((mntid_base & 0xFF00) << 8) | (mntid_base & 0xFF));
741 		mntid_base++;
742 		if ((nmp = vfs_getvfs(&tfsid)) == NULL)
743 			break;
744 		vfs_rel(nmp);
745 	}
746 	mp->mnt_stat.f_fsid.val[0] = tfsid.val[0];
747 	mp->mnt_stat.f_fsid.val[1] = tfsid.val[1];
748 	mtx_unlock(&mntid_mtx);
749 }
750 
751 /*
752  * Knob to control the precision of file timestamps:
753  *
754  *   0 = seconds only; nanoseconds zeroed.
755  *   1 = seconds and nanoseconds, accurate within 1/HZ.
756  *   2 = seconds and nanoseconds, truncated to microseconds.
757  * >=3 = seconds and nanoseconds, maximum precision.
758  */
759 enum { TSP_SEC, TSP_HZ, TSP_USEC, TSP_NSEC };
760 
761 static int timestamp_precision = TSP_USEC;
762 SYSCTL_INT(_vfs, OID_AUTO, timestamp_precision, CTLFLAG_RW,
763     &timestamp_precision, 0, "File timestamp precision (0: seconds, "
764     "1: sec + ns accurate to 1/HZ, 2: sec + ns truncated to ms, "
765     "3+: sec + ns (max. precision))");
766 
767 /*
768  * Get a current timestamp.
769  */
770 void
771 vfs_timestamp(struct timespec *tsp)
772 {
773 	struct timeval tv;
774 
775 	switch (timestamp_precision) {
776 	case TSP_SEC:
777 		tsp->tv_sec = time_second;
778 		tsp->tv_nsec = 0;
779 		break;
780 	case TSP_HZ:
781 		getnanotime(tsp);
782 		break;
783 	case TSP_USEC:
784 		microtime(&tv);
785 		TIMEVAL_TO_TIMESPEC(&tv, tsp);
786 		break;
787 	case TSP_NSEC:
788 	default:
789 		nanotime(tsp);
790 		break;
791 	}
792 }
793 
794 /*
795  * Set vnode attributes to VNOVAL
796  */
797 void
798 vattr_null(struct vattr *vap)
799 {
800 
801 	vap->va_type = VNON;
802 	vap->va_size = VNOVAL;
803 	vap->va_bytes = VNOVAL;
804 	vap->va_mode = VNOVAL;
805 	vap->va_nlink = VNOVAL;
806 	vap->va_uid = VNOVAL;
807 	vap->va_gid = VNOVAL;
808 	vap->va_fsid = VNOVAL;
809 	vap->va_fileid = VNOVAL;
810 	vap->va_blocksize = VNOVAL;
811 	vap->va_rdev = VNOVAL;
812 	vap->va_atime.tv_sec = VNOVAL;
813 	vap->va_atime.tv_nsec = VNOVAL;
814 	vap->va_mtime.tv_sec = VNOVAL;
815 	vap->va_mtime.tv_nsec = VNOVAL;
816 	vap->va_ctime.tv_sec = VNOVAL;
817 	vap->va_ctime.tv_nsec = VNOVAL;
818 	vap->va_birthtime.tv_sec = VNOVAL;
819 	vap->va_birthtime.tv_nsec = VNOVAL;
820 	vap->va_flags = VNOVAL;
821 	vap->va_gen = VNOVAL;
822 	vap->va_vaflags = 0;
823 }
824 
825 /*
826  * This routine is called when we have too many vnodes.  It attempts
827  * to free <count> vnodes and will potentially free vnodes that still
828  * have VM backing store (VM backing store is typically the cause
829  * of a vnode blowout so we want to do this).  Therefore, this operation
830  * is not considered cheap.
831  *
832  * A number of conditions may prevent a vnode from being reclaimed.
833  * the buffer cache may have references on the vnode, a directory
834  * vnode may still have references due to the namei cache representing
835  * underlying files, or the vnode may be in active use.   It is not
836  * desirable to reuse such vnodes.  These conditions may cause the
837  * number of vnodes to reach some minimum value regardless of what
838  * you set kern.maxvnodes to.  Do not set kern.maxvnodes too low.
839  */
840 static int
841 vlrureclaim(struct mount *mp, int reclaim_nc_src, int trigger)
842 {
843 	struct vnode *vp;
844 	int count, done, target;
845 
846 	done = 0;
847 	vn_start_write(NULL, &mp, V_WAIT);
848 	MNT_ILOCK(mp);
849 	count = mp->mnt_nvnodelistsize;
850 	target = count * (int64_t)gapvnodes / imax(desiredvnodes, 1);
851 	target = target / 10 + 1;
852 	while (count != 0 && done < target) {
853 		vp = TAILQ_FIRST(&mp->mnt_nvnodelist);
854 		while (vp != NULL && vp->v_type == VMARKER)
855 			vp = TAILQ_NEXT(vp, v_nmntvnodes);
856 		if (vp == NULL)
857 			break;
858 		/*
859 		 * XXX LRU is completely broken for non-free vnodes.  First
860 		 * by calling here in mountpoint order, then by moving
861 		 * unselected vnodes to the end here, and most grossly by
862 		 * removing the vlruvp() function that was supposed to
863 		 * maintain the order.  (This function was born broken
864 		 * since syncer problems prevented it doing anything.)  The
865 		 * order is closer to LRC (C = Created).
866 		 *
867 		 * LRU reclaiming of vnodes seems to have last worked in
868 		 * FreeBSD-3 where LRU wasn't mentioned under any spelling.
869 		 * Then there was no hold count, and inactive vnodes were
870 		 * simply put on the free list in LRU order.  The separate
871 		 * lists also break LRU.  We prefer to reclaim from the
872 		 * free list for technical reasons.  This tends to thrash
873 		 * the free list to keep very unrecently used held vnodes.
874 		 * The problem is mitigated by keeping the free list large.
875 		 */
876 		TAILQ_REMOVE(&mp->mnt_nvnodelist, vp, v_nmntvnodes);
877 		TAILQ_INSERT_TAIL(&mp->mnt_nvnodelist, vp, v_nmntvnodes);
878 		--count;
879 		if (!VI_TRYLOCK(vp))
880 			goto next_iter;
881 		/*
882 		 * If it's been deconstructed already, it's still
883 		 * referenced, or it exceeds the trigger, skip it.
884 		 * Also skip free vnodes.  We are trying to make space
885 		 * to expand the free list, not reduce it.
886 		 */
887 		if (vp->v_usecount ||
888 		    (!reclaim_nc_src && !LIST_EMPTY(&vp->v_cache_src)) ||
889 		    ((vp->v_iflag & VI_FREE) != 0) ||
890 		    (vp->v_iflag & VI_DOOMED) != 0 || (vp->v_object != NULL &&
891 		    vp->v_object->resident_page_count > trigger)) {
892 			VI_UNLOCK(vp);
893 			goto next_iter;
894 		}
895 		MNT_IUNLOCK(mp);
896 		vholdl(vp);
897 		if (VOP_LOCK(vp, LK_INTERLOCK|LK_EXCLUSIVE|LK_NOWAIT)) {
898 			vdrop(vp);
899 			goto next_iter_mntunlocked;
900 		}
901 		VI_LOCK(vp);
902 		/*
903 		 * v_usecount may have been bumped after VOP_LOCK() dropped
904 		 * the vnode interlock and before it was locked again.
905 		 *
906 		 * It is not necessary to recheck VI_DOOMED because it can
907 		 * only be set by another thread that holds both the vnode
908 		 * lock and vnode interlock.  If another thread has the
909 		 * vnode lock before we get to VOP_LOCK() and obtains the
910 		 * vnode interlock after VOP_LOCK() drops the vnode
911 		 * interlock, the other thread will be unable to drop the
912 		 * vnode lock before our VOP_LOCK() call fails.
913 		 */
914 		if (vp->v_usecount ||
915 		    (!reclaim_nc_src && !LIST_EMPTY(&vp->v_cache_src)) ||
916 		    (vp->v_iflag & VI_FREE) != 0 ||
917 		    (vp->v_object != NULL &&
918 		    vp->v_object->resident_page_count > trigger)) {
919 			VOP_UNLOCK(vp, LK_INTERLOCK);
920 			vdrop(vp);
921 			goto next_iter_mntunlocked;
922 		}
923 		KASSERT((vp->v_iflag & VI_DOOMED) == 0,
924 		    ("VI_DOOMED unexpectedly detected in vlrureclaim()"));
925 		atomic_add_long(&recycles_count, 1);
926 		vgonel(vp);
927 		VOP_UNLOCK(vp, 0);
928 		vdropl(vp);
929 		done++;
930 next_iter_mntunlocked:
931 		if (!should_yield())
932 			goto relock_mnt;
933 		goto yield;
934 next_iter:
935 		if (!should_yield())
936 			continue;
937 		MNT_IUNLOCK(mp);
938 yield:
939 		kern_yield(PRI_USER);
940 relock_mnt:
941 		MNT_ILOCK(mp);
942 	}
943 	MNT_IUNLOCK(mp);
944 	vn_finished_write(mp);
945 	return done;
946 }
947 
948 static int max_vnlru_free = 10000; /* limit on vnode free requests per call */
949 SYSCTL_INT(_debug, OID_AUTO, max_vnlru_free, CTLFLAG_RW, &max_vnlru_free,
950     0,
951     "limit on vnode free requests per call to the vnlru_free routine");
952 
953 /*
954  * Attempt to reduce the free list by the requested amount.
955  */
956 static void
957 vnlru_free_locked(int count, struct vfsops *mnt_op)
958 {
959 	struct vnode *vp;
960 	struct mount *mp;
961 	bool tried_batches;
962 
963 	tried_batches = false;
964 	mtx_assert(&vnode_free_list_mtx, MA_OWNED);
965 	if (count > max_vnlru_free)
966 		count = max_vnlru_free;
967 	for (; count > 0; count--) {
968 		vp = TAILQ_FIRST(&vnode_free_list);
969 		/*
970 		 * The list can be modified while the free_list_mtx
971 		 * has been dropped and vp could be NULL here.
972 		 */
973 		if (vp == NULL) {
974 			if (tried_batches)
975 				break;
976 			mtx_unlock(&vnode_free_list_mtx);
977 			vnlru_return_batches(mnt_op);
978 			tried_batches = true;
979 			mtx_lock(&vnode_free_list_mtx);
980 			continue;
981 		}
982 
983 		VNASSERT(vp->v_op != NULL, vp,
984 		    ("vnlru_free: vnode already reclaimed."));
985 		KASSERT((vp->v_iflag & VI_FREE) != 0,
986 		    ("Removing vnode not on freelist"));
987 		KASSERT((vp->v_iflag & VI_ACTIVE) == 0,
988 		    ("Mangling active vnode"));
989 		TAILQ_REMOVE(&vnode_free_list, vp, v_actfreelist);
990 
991 		/*
992 		 * Don't recycle if our vnode is from different type
993 		 * of mount point.  Note that mp is type-safe, the
994 		 * check does not reach unmapped address even if
995 		 * vnode is reclaimed.
996 		 * Don't recycle if we can't get the interlock without
997 		 * blocking.
998 		 */
999 		if ((mnt_op != NULL && (mp = vp->v_mount) != NULL &&
1000 		    mp->mnt_op != mnt_op) || !VI_TRYLOCK(vp)) {
1001 			TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_actfreelist);
1002 			continue;
1003 		}
1004 		VNASSERT((vp->v_iflag & VI_FREE) != 0 && vp->v_holdcnt == 0,
1005 		    vp, ("vp inconsistent on freelist"));
1006 
1007 		/*
1008 		 * The clear of VI_FREE prevents activation of the
1009 		 * vnode.  There is no sense in putting the vnode on
1010 		 * the mount point active list, only to remove it
1011 		 * later during recycling.  Inline the relevant part
1012 		 * of vholdl(), to avoid triggering assertions or
1013 		 * activating.
1014 		 */
1015 		freevnodes--;
1016 		vp->v_iflag &= ~VI_FREE;
1017 		refcount_acquire(&vp->v_holdcnt);
1018 
1019 		mtx_unlock(&vnode_free_list_mtx);
1020 		VI_UNLOCK(vp);
1021 		vtryrecycle(vp);
1022 		/*
1023 		 * If the recycled succeeded this vdrop will actually free
1024 		 * the vnode.  If not it will simply place it back on
1025 		 * the free list.
1026 		 */
1027 		vdrop(vp);
1028 		mtx_lock(&vnode_free_list_mtx);
1029 	}
1030 }
1031 
1032 void
1033 vnlru_free(int count, struct vfsops *mnt_op)
1034 {
1035 
1036 	mtx_lock(&vnode_free_list_mtx);
1037 	vnlru_free_locked(count, mnt_op);
1038 	mtx_unlock(&vnode_free_list_mtx);
1039 }
1040 
1041 
1042 /* XXX some names and initialization are bad for limits and watermarks. */
1043 static int
1044 vspace(void)
1045 {
1046 	int space;
1047 
1048 	gapvnodes = imax(desiredvnodes - wantfreevnodes, 100);
1049 	vhiwat = gapvnodes / 11; /* 9% -- just under the 10% in vlrureclaim() */
1050 	vlowat = vhiwat / 2;
1051 	if (numvnodes > desiredvnodes)
1052 		return (0);
1053 	space = desiredvnodes - numvnodes;
1054 	if (freevnodes > wantfreevnodes)
1055 		space += freevnodes - wantfreevnodes;
1056 	return (space);
1057 }
1058 
1059 static void
1060 vnlru_return_batch_locked(struct mount *mp)
1061 {
1062 	struct vnode *vp;
1063 
1064 	mtx_assert(&mp->mnt_listmtx, MA_OWNED);
1065 
1066 	if (mp->mnt_tmpfreevnodelistsize == 0)
1067 		return;
1068 
1069 	TAILQ_FOREACH(vp, &mp->mnt_tmpfreevnodelist, v_actfreelist) {
1070 		VNASSERT((vp->v_mflag & VMP_TMPMNTFREELIST) != 0, vp,
1071 		    ("vnode without VMP_TMPMNTFREELIST on mnt_tmpfreevnodelist"));
1072 		vp->v_mflag &= ~VMP_TMPMNTFREELIST;
1073 	}
1074 	mtx_lock(&vnode_free_list_mtx);
1075 	TAILQ_CONCAT(&vnode_free_list, &mp->mnt_tmpfreevnodelist, v_actfreelist);
1076 	freevnodes += mp->mnt_tmpfreevnodelistsize;
1077 	mtx_unlock(&vnode_free_list_mtx);
1078 	mp->mnt_tmpfreevnodelistsize = 0;
1079 }
1080 
1081 static void
1082 vnlru_return_batch(struct mount *mp)
1083 {
1084 
1085 	mtx_lock(&mp->mnt_listmtx);
1086 	vnlru_return_batch_locked(mp);
1087 	mtx_unlock(&mp->mnt_listmtx);
1088 }
1089 
1090 static void
1091 vnlru_return_batches(struct vfsops *mnt_op)
1092 {
1093 	struct mount *mp, *nmp;
1094 	bool need_unbusy;
1095 
1096 	mtx_lock(&mountlist_mtx);
1097 	for (mp = TAILQ_FIRST(&mountlist); mp != NULL; mp = nmp) {
1098 		need_unbusy = false;
1099 		if (mnt_op != NULL && mp->mnt_op != mnt_op)
1100 			goto next;
1101 		if (mp->mnt_tmpfreevnodelistsize == 0)
1102 			goto next;
1103 		if (vfs_busy(mp, MBF_NOWAIT | MBF_MNTLSTLOCK) == 0) {
1104 			vnlru_return_batch(mp);
1105 			need_unbusy = true;
1106 			mtx_lock(&mountlist_mtx);
1107 		}
1108 next:
1109 		nmp = TAILQ_NEXT(mp, mnt_list);
1110 		if (need_unbusy)
1111 			vfs_unbusy(mp);
1112 	}
1113 	mtx_unlock(&mountlist_mtx);
1114 }
1115 
1116 /*
1117  * Attempt to recycle vnodes in a context that is always safe to block.
1118  * Calling vlrurecycle() from the bowels of filesystem code has some
1119  * interesting deadlock problems.
1120  */
1121 static struct proc *vnlruproc;
1122 static int vnlruproc_sig;
1123 
1124 static void
1125 vnlru_proc(void)
1126 {
1127 	struct mount *mp, *nmp;
1128 	unsigned long ofreevnodes, onumvnodes;
1129 	int done, force, reclaim_nc_src, trigger, usevnodes;
1130 
1131 	EVENTHANDLER_REGISTER(shutdown_pre_sync, kproc_shutdown, vnlruproc,
1132 	    SHUTDOWN_PRI_FIRST);
1133 
1134 	force = 0;
1135 	for (;;) {
1136 		kproc_suspend_check(vnlruproc);
1137 		mtx_lock(&vnode_free_list_mtx);
1138 		/*
1139 		 * If numvnodes is too large (due to desiredvnodes being
1140 		 * adjusted using its sysctl, or emergency growth), first
1141 		 * try to reduce it by discarding from the free list.
1142 		 */
1143 		if (numvnodes > desiredvnodes)
1144 			vnlru_free_locked(numvnodes - desiredvnodes, NULL);
1145 		/*
1146 		 * Sleep if the vnode cache is in a good state.  This is
1147 		 * when it is not over-full and has space for about a 4%
1148 		 * or 9% expansion (by growing its size or inexcessively
1149 		 * reducing its free list).  Otherwise, try to reclaim
1150 		 * space for a 10% expansion.
1151 		 */
1152 		if (vstir && force == 0) {
1153 			force = 1;
1154 			vstir = 0;
1155 		}
1156 		if (vspace() >= vlowat && force == 0) {
1157 			vnlruproc_sig = 0;
1158 			wakeup(&vnlruproc_sig);
1159 			msleep(vnlruproc, &vnode_free_list_mtx,
1160 			    PVFS|PDROP, "vlruwt", hz);
1161 			continue;
1162 		}
1163 		mtx_unlock(&vnode_free_list_mtx);
1164 		done = 0;
1165 		ofreevnodes = freevnodes;
1166 		onumvnodes = numvnodes;
1167 		/*
1168 		 * Calculate parameters for recycling.  These are the same
1169 		 * throughout the loop to give some semblance of fairness.
1170 		 * The trigger point is to avoid recycling vnodes with lots
1171 		 * of resident pages.  We aren't trying to free memory; we
1172 		 * are trying to recycle or at least free vnodes.
1173 		 */
1174 		if (numvnodes <= desiredvnodes)
1175 			usevnodes = numvnodes - freevnodes;
1176 		else
1177 			usevnodes = numvnodes;
1178 		if (usevnodes <= 0)
1179 			usevnodes = 1;
1180 		/*
1181 		 * The trigger value is is chosen to give a conservatively
1182 		 * large value to ensure that it alone doesn't prevent
1183 		 * making progress.  The value can easily be so large that
1184 		 * it is effectively infinite in some congested and
1185 		 * misconfigured cases, and this is necessary.  Normally
1186 		 * it is about 8 to 100 (pages), which is quite large.
1187 		 */
1188 		trigger = vm_cnt.v_page_count * 2 / usevnodes;
1189 		if (force < 2)
1190 			trigger = vsmalltrigger;
1191 		reclaim_nc_src = force >= 3;
1192 		mtx_lock(&mountlist_mtx);
1193 		for (mp = TAILQ_FIRST(&mountlist); mp != NULL; mp = nmp) {
1194 			if (vfs_busy(mp, MBF_NOWAIT | MBF_MNTLSTLOCK)) {
1195 				nmp = TAILQ_NEXT(mp, mnt_list);
1196 				continue;
1197 			}
1198 			done += vlrureclaim(mp, reclaim_nc_src, trigger);
1199 			mtx_lock(&mountlist_mtx);
1200 			nmp = TAILQ_NEXT(mp, mnt_list);
1201 			vfs_unbusy(mp);
1202 		}
1203 		mtx_unlock(&mountlist_mtx);
1204 		if (onumvnodes > desiredvnodes && numvnodes <= desiredvnodes)
1205 			uma_reclaim();
1206 		if (done == 0) {
1207 			if (force == 0 || force == 1) {
1208 				force = 2;
1209 				continue;
1210 			}
1211 			if (force == 2) {
1212 				force = 3;
1213 				continue;
1214 			}
1215 			force = 0;
1216 			vnlru_nowhere++;
1217 			tsleep(vnlruproc, PPAUSE, "vlrup", hz * 3);
1218 		} else
1219 			kern_yield(PRI_USER);
1220 		/*
1221 		 * After becoming active to expand above low water, keep
1222 		 * active until above high water.
1223 		 */
1224 		force = vspace() < vhiwat;
1225 	}
1226 }
1227 
1228 static struct kproc_desc vnlru_kp = {
1229 	"vnlru",
1230 	vnlru_proc,
1231 	&vnlruproc
1232 };
1233 SYSINIT(vnlru, SI_SUB_KTHREAD_UPDATE, SI_ORDER_FIRST, kproc_start,
1234     &vnlru_kp);
1235 
1236 /*
1237  * Routines having to do with the management of the vnode table.
1238  */
1239 
1240 /*
1241  * Try to recycle a freed vnode.  We abort if anyone picks up a reference
1242  * before we actually vgone().  This function must be called with the vnode
1243  * held to prevent the vnode from being returned to the free list midway
1244  * through vgone().
1245  */
1246 static int
1247 vtryrecycle(struct vnode *vp)
1248 {
1249 	struct mount *vnmp;
1250 
1251 	CTR2(KTR_VFS, "%s: vp %p", __func__, vp);
1252 	VNASSERT(vp->v_holdcnt, vp,
1253 	    ("vtryrecycle: Recycling vp %p without a reference.", vp));
1254 	/*
1255 	 * This vnode may found and locked via some other list, if so we
1256 	 * can't recycle it yet.
1257 	 */
1258 	if (VOP_LOCK(vp, LK_EXCLUSIVE | LK_NOWAIT) != 0) {
1259 		CTR2(KTR_VFS,
1260 		    "%s: impossible to recycle, vp %p lock is already held",
1261 		    __func__, vp);
1262 		return (EWOULDBLOCK);
1263 	}
1264 	/*
1265 	 * Don't recycle if its filesystem is being suspended.
1266 	 */
1267 	if (vn_start_write(vp, &vnmp, V_NOWAIT) != 0) {
1268 		VOP_UNLOCK(vp, 0);
1269 		CTR2(KTR_VFS,
1270 		    "%s: impossible to recycle, cannot start the write for %p",
1271 		    __func__, vp);
1272 		return (EBUSY);
1273 	}
1274 	/*
1275 	 * If we got this far, we need to acquire the interlock and see if
1276 	 * anyone picked up this vnode from another list.  If not, we will
1277 	 * mark it with DOOMED via vgonel() so that anyone who does find it
1278 	 * will skip over it.
1279 	 */
1280 	VI_LOCK(vp);
1281 	if (vp->v_usecount) {
1282 		VOP_UNLOCK(vp, LK_INTERLOCK);
1283 		vn_finished_write(vnmp);
1284 		CTR2(KTR_VFS,
1285 		    "%s: impossible to recycle, %p is already referenced",
1286 		    __func__, vp);
1287 		return (EBUSY);
1288 	}
1289 	if ((vp->v_iflag & VI_DOOMED) == 0) {
1290 		atomic_add_long(&recycles_count, 1);
1291 		vgonel(vp);
1292 	}
1293 	VOP_UNLOCK(vp, LK_INTERLOCK);
1294 	vn_finished_write(vnmp);
1295 	return (0);
1296 }
1297 
1298 static void
1299 vcheckspace(void)
1300 {
1301 
1302 	if (vspace() < vlowat && vnlruproc_sig == 0) {
1303 		vnlruproc_sig = 1;
1304 		wakeup(vnlruproc);
1305 	}
1306 }
1307 
1308 /*
1309  * Wait if necessary for space for a new vnode.
1310  */
1311 static int
1312 getnewvnode_wait(int suspended)
1313 {
1314 
1315 	mtx_assert(&vnode_free_list_mtx, MA_OWNED);
1316 	if (numvnodes >= desiredvnodes) {
1317 		if (suspended) {
1318 			/*
1319 			 * The file system is being suspended.  We cannot
1320 			 * risk a deadlock here, so allow allocation of
1321 			 * another vnode even if this would give too many.
1322 			 */
1323 			return (0);
1324 		}
1325 		if (vnlruproc_sig == 0) {
1326 			vnlruproc_sig = 1;	/* avoid unnecessary wakeups */
1327 			wakeup(vnlruproc);
1328 		}
1329 		msleep(&vnlruproc_sig, &vnode_free_list_mtx, PVFS,
1330 		    "vlruwk", hz);
1331 	}
1332 	/* Post-adjust like the pre-adjust in getnewvnode(). */
1333 	if (numvnodes + 1 > desiredvnodes && freevnodes > 1)
1334 		vnlru_free_locked(1, NULL);
1335 	return (numvnodes >= desiredvnodes ? ENFILE : 0);
1336 }
1337 
1338 /*
1339  * This hack is fragile, and probably not needed any more now that the
1340  * watermark handling works.
1341  */
1342 void
1343 getnewvnode_reserve(u_int count)
1344 {
1345 	struct thread *td;
1346 
1347 	/* Pre-adjust like the pre-adjust in getnewvnode(), with any count. */
1348 	/* XXX no longer so quick, but this part is not racy. */
1349 	mtx_lock(&vnode_free_list_mtx);
1350 	if (numvnodes + count > desiredvnodes && freevnodes > wantfreevnodes)
1351 		vnlru_free_locked(ulmin(numvnodes + count - desiredvnodes,
1352 		    freevnodes - wantfreevnodes), NULL);
1353 	mtx_unlock(&vnode_free_list_mtx);
1354 
1355 	td = curthread;
1356 	/* First try to be quick and racy. */
1357 	if (atomic_fetchadd_long(&numvnodes, count) + count <= desiredvnodes) {
1358 		td->td_vp_reserv += count;
1359 		vcheckspace();	/* XXX no longer so quick, but more racy */
1360 		return;
1361 	} else
1362 		atomic_subtract_long(&numvnodes, count);
1363 
1364 	mtx_lock(&vnode_free_list_mtx);
1365 	while (count > 0) {
1366 		if (getnewvnode_wait(0) == 0) {
1367 			count--;
1368 			td->td_vp_reserv++;
1369 			atomic_add_long(&numvnodes, 1);
1370 		}
1371 	}
1372 	vcheckspace();
1373 	mtx_unlock(&vnode_free_list_mtx);
1374 }
1375 
1376 /*
1377  * This hack is fragile, especially if desiredvnodes or wantvnodes are
1378  * misconfgured or changed significantly.  Reducing desiredvnodes below
1379  * the reserved amount should cause bizarre behaviour like reducing it
1380  * below the number of active vnodes -- the system will try to reduce
1381  * numvnodes to match, but should fail, so the subtraction below should
1382  * not overflow.
1383  */
1384 void
1385 getnewvnode_drop_reserve(void)
1386 {
1387 	struct thread *td;
1388 
1389 	td = curthread;
1390 	atomic_subtract_long(&numvnodes, td->td_vp_reserv);
1391 	td->td_vp_reserv = 0;
1392 }
1393 
1394 /*
1395  * Return the next vnode from the free list.
1396  */
1397 int
1398 getnewvnode(const char *tag, struct mount *mp, struct vop_vector *vops,
1399     struct vnode **vpp)
1400 {
1401 	struct vnode *vp;
1402 	struct thread *td;
1403 	struct lock_object *lo;
1404 	static int cyclecount;
1405 	int error;
1406 
1407 	CTR3(KTR_VFS, "%s: mp %p with tag %s", __func__, mp, tag);
1408 	vp = NULL;
1409 	td = curthread;
1410 	if (td->td_vp_reserv > 0) {
1411 		td->td_vp_reserv -= 1;
1412 		goto alloc;
1413 	}
1414 	mtx_lock(&vnode_free_list_mtx);
1415 	if (numvnodes < desiredvnodes)
1416 		cyclecount = 0;
1417 	else if (cyclecount++ >= freevnodes) {
1418 		cyclecount = 0;
1419 		vstir = 1;
1420 	}
1421 	/*
1422 	 * Grow the vnode cache if it will not be above its target max
1423 	 * after growing.  Otherwise, if the free list is nonempty, try
1424 	 * to reclaim 1 item from it before growing the cache (possibly
1425 	 * above its target max if the reclamation failed or is delayed).
1426 	 * Otherwise, wait for some space.  In all cases, schedule
1427 	 * vnlru_proc() if we are getting short of space.  The watermarks
1428 	 * should be chosen so that we never wait or even reclaim from
1429 	 * the free list to below its target minimum.
1430 	 */
1431 	if (numvnodes + 1 <= desiredvnodes)
1432 		;
1433 	else if (freevnodes > 0)
1434 		vnlru_free_locked(1, NULL);
1435 	else {
1436 		error = getnewvnode_wait(mp != NULL && (mp->mnt_kern_flag &
1437 		    MNTK_SUSPEND));
1438 #if 0	/* XXX Not all VFS_VGET/ffs_vget callers check returns. */
1439 		if (error != 0) {
1440 			mtx_unlock(&vnode_free_list_mtx);
1441 			return (error);
1442 		}
1443 #endif
1444 	}
1445 	vcheckspace();
1446 	atomic_add_long(&numvnodes, 1);
1447 	mtx_unlock(&vnode_free_list_mtx);
1448 alloc:
1449 	atomic_add_long(&vnodes_created, 1);
1450 	vp = (struct vnode *) uma_zalloc(vnode_zone, M_WAITOK);
1451 	/*
1452 	 * Locks are given the generic name "vnode" when created.
1453 	 * Follow the historic practice of using the filesystem
1454 	 * name when they allocated, e.g., "zfs", "ufs", "nfs, etc.
1455 	 *
1456 	 * Locks live in a witness group keyed on their name. Thus,
1457 	 * when a lock is renamed, it must also move from the witness
1458 	 * group of its old name to the witness group of its new name.
1459 	 *
1460 	 * The change only needs to be made when the vnode moves
1461 	 * from one filesystem type to another. We ensure that each
1462 	 * filesystem use a single static name pointer for its tag so
1463 	 * that we can compare pointers rather than doing a strcmp().
1464 	 */
1465 	lo = &vp->v_vnlock->lock_object;
1466 	if (lo->lo_name != tag) {
1467 		lo->lo_name = tag;
1468 		WITNESS_DESTROY(lo);
1469 		WITNESS_INIT(lo, tag);
1470 	}
1471 	/*
1472 	 * By default, don't allow shared locks unless filesystems opt-in.
1473 	 */
1474 	vp->v_vnlock->lock_object.lo_flags |= LK_NOSHARE;
1475 	/*
1476 	 * Finalize various vnode identity bits.
1477 	 */
1478 	KASSERT(vp->v_object == NULL, ("stale v_object %p", vp));
1479 	KASSERT(vp->v_lockf == NULL, ("stale v_lockf %p", vp));
1480 	KASSERT(vp->v_pollinfo == NULL, ("stale v_pollinfo %p", vp));
1481 	vp->v_type = VNON;
1482 	vp->v_tag = tag;
1483 	vp->v_op = vops;
1484 	v_init_counters(vp);
1485 	vp->v_bufobj.bo_ops = &buf_ops_bio;
1486 #ifdef MAC
1487 	mac_vnode_init(vp);
1488 	if (mp != NULL && (mp->mnt_flag & MNT_MULTILABEL) == 0)
1489 		mac_vnode_associate_singlelabel(mp, vp);
1490 	else if (mp == NULL && vops != &dead_vnodeops)
1491 		printf("NULL mp in getnewvnode()\n");
1492 #endif
1493 	if (mp != NULL) {
1494 		vp->v_bufobj.bo_bsize = mp->mnt_stat.f_iosize;
1495 		if ((mp->mnt_kern_flag & MNTK_NOKNOTE) != 0)
1496 			vp->v_vflag |= VV_NOKNOTE;
1497 	}
1498 
1499 	/*
1500 	 * For the filesystems which do not use vfs_hash_insert(),
1501 	 * still initialize v_hash to have vfs_hash_index() useful.
1502 	 * E.g., nullfs uses vfs_hash_index() on the lower vnode for
1503 	 * its own hashing.
1504 	 */
1505 	vp->v_hash = (uintptr_t)vp >> vnsz2log;
1506 
1507 	*vpp = vp;
1508 	return (0);
1509 }
1510 
1511 /*
1512  * Delete from old mount point vnode list, if on one.
1513  */
1514 static void
1515 delmntque(struct vnode *vp)
1516 {
1517 	struct mount *mp;
1518 	int active;
1519 
1520 	mp = vp->v_mount;
1521 	if (mp == NULL)
1522 		return;
1523 	MNT_ILOCK(mp);
1524 	VI_LOCK(vp);
1525 	KASSERT(mp->mnt_activevnodelistsize <= mp->mnt_nvnodelistsize,
1526 	    ("Active vnode list size %d > Vnode list size %d",
1527 	     mp->mnt_activevnodelistsize, mp->mnt_nvnodelistsize));
1528 	active = vp->v_iflag & VI_ACTIVE;
1529 	vp->v_iflag &= ~VI_ACTIVE;
1530 	if (active) {
1531 		mtx_lock(&mp->mnt_listmtx);
1532 		TAILQ_REMOVE(&mp->mnt_activevnodelist, vp, v_actfreelist);
1533 		mp->mnt_activevnodelistsize--;
1534 		mtx_unlock(&mp->mnt_listmtx);
1535 	}
1536 	vp->v_mount = NULL;
1537 	VI_UNLOCK(vp);
1538 	VNASSERT(mp->mnt_nvnodelistsize > 0, vp,
1539 		("bad mount point vnode list size"));
1540 	TAILQ_REMOVE(&mp->mnt_nvnodelist, vp, v_nmntvnodes);
1541 	mp->mnt_nvnodelistsize--;
1542 	MNT_REL(mp);
1543 	MNT_IUNLOCK(mp);
1544 }
1545 
1546 static void
1547 insmntque_stddtr(struct vnode *vp, void *dtr_arg)
1548 {
1549 
1550 	vp->v_data = NULL;
1551 	vp->v_op = &dead_vnodeops;
1552 	vgone(vp);
1553 	vput(vp);
1554 }
1555 
1556 /*
1557  * Insert into list of vnodes for the new mount point, if available.
1558  */
1559 int
1560 insmntque1(struct vnode *vp, struct mount *mp,
1561 	void (*dtr)(struct vnode *, void *), void *dtr_arg)
1562 {
1563 
1564 	KASSERT(vp->v_mount == NULL,
1565 		("insmntque: vnode already on per mount vnode list"));
1566 	VNASSERT(mp != NULL, vp, ("Don't call insmntque(foo, NULL)"));
1567 	ASSERT_VOP_ELOCKED(vp, "insmntque: non-locked vp");
1568 
1569 	/*
1570 	 * We acquire the vnode interlock early to ensure that the
1571 	 * vnode cannot be recycled by another process releasing a
1572 	 * holdcnt on it before we get it on both the vnode list
1573 	 * and the active vnode list. The mount mutex protects only
1574 	 * manipulation of the vnode list and the vnode freelist
1575 	 * mutex protects only manipulation of the active vnode list.
1576 	 * Hence the need to hold the vnode interlock throughout.
1577 	 */
1578 	MNT_ILOCK(mp);
1579 	VI_LOCK(vp);
1580 	if (((mp->mnt_kern_flag & MNTK_NOINSMNTQ) != 0 &&
1581 	    ((mp->mnt_kern_flag & MNTK_UNMOUNTF) != 0 ||
1582 	    mp->mnt_nvnodelistsize == 0)) &&
1583 	    (vp->v_vflag & VV_FORCEINSMQ) == 0) {
1584 		VI_UNLOCK(vp);
1585 		MNT_IUNLOCK(mp);
1586 		if (dtr != NULL)
1587 			dtr(vp, dtr_arg);
1588 		return (EBUSY);
1589 	}
1590 	vp->v_mount = mp;
1591 	MNT_REF(mp);
1592 	TAILQ_INSERT_TAIL(&mp->mnt_nvnodelist, vp, v_nmntvnodes);
1593 	VNASSERT(mp->mnt_nvnodelistsize >= 0, vp,
1594 		("neg mount point vnode list size"));
1595 	mp->mnt_nvnodelistsize++;
1596 	KASSERT((vp->v_iflag & VI_ACTIVE) == 0,
1597 	    ("Activating already active vnode"));
1598 	vp->v_iflag |= VI_ACTIVE;
1599 	mtx_lock(&mp->mnt_listmtx);
1600 	TAILQ_INSERT_HEAD(&mp->mnt_activevnodelist, vp, v_actfreelist);
1601 	mp->mnt_activevnodelistsize++;
1602 	mtx_unlock(&mp->mnt_listmtx);
1603 	VI_UNLOCK(vp);
1604 	MNT_IUNLOCK(mp);
1605 	return (0);
1606 }
1607 
1608 int
1609 insmntque(struct vnode *vp, struct mount *mp)
1610 {
1611 
1612 	return (insmntque1(vp, mp, insmntque_stddtr, NULL));
1613 }
1614 
1615 /*
1616  * Flush out and invalidate all buffers associated with a bufobj
1617  * Called with the underlying object locked.
1618  */
1619 int
1620 bufobj_invalbuf(struct bufobj *bo, int flags, int slpflag, int slptimeo)
1621 {
1622 	int error;
1623 
1624 	BO_LOCK(bo);
1625 	if (flags & V_SAVE) {
1626 		error = bufobj_wwait(bo, slpflag, slptimeo);
1627 		if (error) {
1628 			BO_UNLOCK(bo);
1629 			return (error);
1630 		}
1631 		if (bo->bo_dirty.bv_cnt > 0) {
1632 			BO_UNLOCK(bo);
1633 			if ((error = BO_SYNC(bo, MNT_WAIT)) != 0)
1634 				return (error);
1635 			/*
1636 			 * XXX We could save a lock/unlock if this was only
1637 			 * enabled under INVARIANTS
1638 			 */
1639 			BO_LOCK(bo);
1640 			if (bo->bo_numoutput > 0 || bo->bo_dirty.bv_cnt > 0)
1641 				panic("vinvalbuf: dirty bufs");
1642 		}
1643 	}
1644 	/*
1645 	 * If you alter this loop please notice that interlock is dropped and
1646 	 * reacquired in flushbuflist.  Special care is needed to ensure that
1647 	 * no race conditions occur from this.
1648 	 */
1649 	do {
1650 		error = flushbuflist(&bo->bo_clean,
1651 		    flags, bo, slpflag, slptimeo);
1652 		if (error == 0 && !(flags & V_CLEANONLY))
1653 			error = flushbuflist(&bo->bo_dirty,
1654 			    flags, bo, slpflag, slptimeo);
1655 		if (error != 0 && error != EAGAIN) {
1656 			BO_UNLOCK(bo);
1657 			return (error);
1658 		}
1659 	} while (error != 0);
1660 
1661 	/*
1662 	 * Wait for I/O to complete.  XXX needs cleaning up.  The vnode can
1663 	 * have write I/O in-progress but if there is a VM object then the
1664 	 * VM object can also have read-I/O in-progress.
1665 	 */
1666 	do {
1667 		bufobj_wwait(bo, 0, 0);
1668 		BO_UNLOCK(bo);
1669 		if (bo->bo_object != NULL) {
1670 			VM_OBJECT_WLOCK(bo->bo_object);
1671 			vm_object_pip_wait(bo->bo_object, "bovlbx");
1672 			VM_OBJECT_WUNLOCK(bo->bo_object);
1673 		}
1674 		BO_LOCK(bo);
1675 	} while (bo->bo_numoutput > 0);
1676 	BO_UNLOCK(bo);
1677 
1678 	/*
1679 	 * Destroy the copy in the VM cache, too.
1680 	 */
1681 	if (bo->bo_object != NULL &&
1682 	    (flags & (V_ALT | V_NORMAL | V_CLEANONLY)) == 0) {
1683 		VM_OBJECT_WLOCK(bo->bo_object);
1684 		vm_object_page_remove(bo->bo_object, 0, 0, (flags & V_SAVE) ?
1685 		    OBJPR_CLEANONLY : 0);
1686 		VM_OBJECT_WUNLOCK(bo->bo_object);
1687 	}
1688 
1689 #ifdef INVARIANTS
1690 	BO_LOCK(bo);
1691 	if ((flags & (V_ALT | V_NORMAL | V_CLEANONLY)) == 0 &&
1692 	    (bo->bo_dirty.bv_cnt > 0 || bo->bo_clean.bv_cnt > 0))
1693 		panic("vinvalbuf: flush failed");
1694 	BO_UNLOCK(bo);
1695 #endif
1696 	return (0);
1697 }
1698 
1699 /*
1700  * Flush out and invalidate all buffers associated with a vnode.
1701  * Called with the underlying object locked.
1702  */
1703 int
1704 vinvalbuf(struct vnode *vp, int flags, int slpflag, int slptimeo)
1705 {
1706 
1707 	CTR3(KTR_VFS, "%s: vp %p with flags %d", __func__, vp, flags);
1708 	ASSERT_VOP_LOCKED(vp, "vinvalbuf");
1709 	if (vp->v_object != NULL && vp->v_object->handle != vp)
1710 		return (0);
1711 	return (bufobj_invalbuf(&vp->v_bufobj, flags, slpflag, slptimeo));
1712 }
1713 
1714 /*
1715  * Flush out buffers on the specified list.
1716  *
1717  */
1718 static int
1719 flushbuflist(struct bufv *bufv, int flags, struct bufobj *bo, int slpflag,
1720     int slptimeo)
1721 {
1722 	struct buf *bp, *nbp;
1723 	int retval, error;
1724 	daddr_t lblkno;
1725 	b_xflags_t xflags;
1726 
1727 	ASSERT_BO_WLOCKED(bo);
1728 
1729 	retval = 0;
1730 	TAILQ_FOREACH_SAFE(bp, &bufv->bv_hd, b_bobufs, nbp) {
1731 		if (((flags & V_NORMAL) && (bp->b_xflags & BX_ALTDATA)) ||
1732 		    ((flags & V_ALT) && (bp->b_xflags & BX_ALTDATA) == 0)) {
1733 			continue;
1734 		}
1735 		lblkno = 0;
1736 		xflags = 0;
1737 		if (nbp != NULL) {
1738 			lblkno = nbp->b_lblkno;
1739 			xflags = nbp->b_xflags & (BX_VNDIRTY | BX_VNCLEAN);
1740 		}
1741 		retval = EAGAIN;
1742 		error = BUF_TIMELOCK(bp,
1743 		    LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK, BO_LOCKPTR(bo),
1744 		    "flushbuf", slpflag, slptimeo);
1745 		if (error) {
1746 			BO_LOCK(bo);
1747 			return (error != ENOLCK ? error : EAGAIN);
1748 		}
1749 		KASSERT(bp->b_bufobj == bo,
1750 		    ("bp %p wrong b_bufobj %p should be %p",
1751 		    bp, bp->b_bufobj, bo));
1752 		/*
1753 		 * XXX Since there are no node locks for NFS, I
1754 		 * believe there is a slight chance that a delayed
1755 		 * write will occur while sleeping just above, so
1756 		 * check for it.
1757 		 */
1758 		if (((bp->b_flags & (B_DELWRI | B_INVAL)) == B_DELWRI) &&
1759 		    (flags & V_SAVE)) {
1760 			bremfree(bp);
1761 			bp->b_flags |= B_ASYNC;
1762 			bwrite(bp);
1763 			BO_LOCK(bo);
1764 			return (EAGAIN);	/* XXX: why not loop ? */
1765 		}
1766 		bremfree(bp);
1767 		bp->b_flags |= (B_INVAL | B_RELBUF);
1768 		bp->b_flags &= ~B_ASYNC;
1769 		brelse(bp);
1770 		BO_LOCK(bo);
1771 		nbp = gbincore(bo, lblkno);
1772 		if (nbp == NULL || (nbp->b_xflags & (BX_VNDIRTY | BX_VNCLEAN))
1773 		    != xflags)
1774 			break;			/* nbp invalid */
1775 	}
1776 	return (retval);
1777 }
1778 
1779 int
1780 bnoreuselist(struct bufv *bufv, struct bufobj *bo, daddr_t startn, daddr_t endn)
1781 {
1782 	struct buf *bp;
1783 	int error;
1784 	daddr_t lblkno;
1785 
1786 	ASSERT_BO_LOCKED(bo);
1787 
1788 	for (lblkno = startn;;) {
1789 again:
1790 		bp = BUF_PCTRIE_LOOKUP_GE(&bufv->bv_root, lblkno);
1791 		if (bp == NULL || bp->b_lblkno >= endn ||
1792 		    bp->b_lblkno < startn)
1793 			break;
1794 		error = BUF_TIMELOCK(bp, LK_EXCLUSIVE | LK_SLEEPFAIL |
1795 		    LK_INTERLOCK, BO_LOCKPTR(bo), "brlsfl", 0, 0);
1796 		if (error != 0) {
1797 			BO_RLOCK(bo);
1798 			if (error == ENOLCK)
1799 				goto again;
1800 			return (error);
1801 		}
1802 		KASSERT(bp->b_bufobj == bo,
1803 		    ("bp %p wrong b_bufobj %p should be %p",
1804 		    bp, bp->b_bufobj, bo));
1805 		lblkno = bp->b_lblkno + 1;
1806 		if ((bp->b_flags & B_MANAGED) == 0)
1807 			bremfree(bp);
1808 		bp->b_flags |= B_RELBUF;
1809 		/*
1810 		 * In the VMIO case, use the B_NOREUSE flag to hint that the
1811 		 * pages backing each buffer in the range are unlikely to be
1812 		 * reused.  Dirty buffers will have the hint applied once
1813 		 * they've been written.
1814 		 */
1815 		if (bp->b_vp->v_object != NULL)
1816 			bp->b_flags |= B_NOREUSE;
1817 		brelse(bp);
1818 		BO_RLOCK(bo);
1819 	}
1820 	return (0);
1821 }
1822 
1823 /*
1824  * Truncate a file's buffer and pages to a specified length.  This
1825  * is in lieu of the old vinvalbuf mechanism, which performed unneeded
1826  * sync activity.
1827  */
1828 int
1829 vtruncbuf(struct vnode *vp, struct ucred *cred, off_t length, int blksize)
1830 {
1831 	struct buf *bp, *nbp;
1832 	int anyfreed;
1833 	int trunclbn;
1834 	struct bufobj *bo;
1835 
1836 	CTR5(KTR_VFS, "%s: vp %p with cred %p and block %d:%ju", __func__,
1837 	    vp, cred, blksize, (uintmax_t)length);
1838 
1839 	/*
1840 	 * Round up to the *next* lbn.
1841 	 */
1842 	trunclbn = howmany(length, blksize);
1843 
1844 	ASSERT_VOP_LOCKED(vp, "vtruncbuf");
1845 restart:
1846 	bo = &vp->v_bufobj;
1847 	BO_LOCK(bo);
1848 	anyfreed = 1;
1849 	for (;anyfreed;) {
1850 		anyfreed = 0;
1851 		TAILQ_FOREACH_SAFE(bp, &bo->bo_clean.bv_hd, b_bobufs, nbp) {
1852 			if (bp->b_lblkno < trunclbn)
1853 				continue;
1854 			if (BUF_LOCK(bp,
1855 			    LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK,
1856 			    BO_LOCKPTR(bo)) == ENOLCK)
1857 				goto restart;
1858 
1859 			bremfree(bp);
1860 			bp->b_flags |= (B_INVAL | B_RELBUF);
1861 			bp->b_flags &= ~B_ASYNC;
1862 			brelse(bp);
1863 			anyfreed = 1;
1864 
1865 			BO_LOCK(bo);
1866 			if (nbp != NULL &&
1867 			    (((nbp->b_xflags & BX_VNCLEAN) == 0) ||
1868 			    (nbp->b_vp != vp) ||
1869 			    (nbp->b_flags & B_DELWRI))) {
1870 				BO_UNLOCK(bo);
1871 				goto restart;
1872 			}
1873 		}
1874 
1875 		TAILQ_FOREACH_SAFE(bp, &bo->bo_dirty.bv_hd, b_bobufs, nbp) {
1876 			if (bp->b_lblkno < trunclbn)
1877 				continue;
1878 			if (BUF_LOCK(bp,
1879 			    LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK,
1880 			    BO_LOCKPTR(bo)) == ENOLCK)
1881 				goto restart;
1882 			bremfree(bp);
1883 			bp->b_flags |= (B_INVAL | B_RELBUF);
1884 			bp->b_flags &= ~B_ASYNC;
1885 			brelse(bp);
1886 			anyfreed = 1;
1887 
1888 			BO_LOCK(bo);
1889 			if (nbp != NULL &&
1890 			    (((nbp->b_xflags & BX_VNDIRTY) == 0) ||
1891 			    (nbp->b_vp != vp) ||
1892 			    (nbp->b_flags & B_DELWRI) == 0)) {
1893 				BO_UNLOCK(bo);
1894 				goto restart;
1895 			}
1896 		}
1897 	}
1898 
1899 	if (length > 0) {
1900 restartsync:
1901 		TAILQ_FOREACH_SAFE(bp, &bo->bo_dirty.bv_hd, b_bobufs, nbp) {
1902 			if (bp->b_lblkno > 0)
1903 				continue;
1904 			/*
1905 			 * Since we hold the vnode lock this should only
1906 			 * fail if we're racing with the buf daemon.
1907 			 */
1908 			if (BUF_LOCK(bp,
1909 			    LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK,
1910 			    BO_LOCKPTR(bo)) == ENOLCK) {
1911 				goto restart;
1912 			}
1913 			VNASSERT((bp->b_flags & B_DELWRI), vp,
1914 			    ("buf(%p) on dirty queue without DELWRI", bp));
1915 
1916 			bremfree(bp);
1917 			bawrite(bp);
1918 			BO_LOCK(bo);
1919 			goto restartsync;
1920 		}
1921 	}
1922 
1923 	bufobj_wwait(bo, 0, 0);
1924 	BO_UNLOCK(bo);
1925 	vnode_pager_setsize(vp, length);
1926 
1927 	return (0);
1928 }
1929 
1930 static void
1931 buf_vlist_remove(struct buf *bp)
1932 {
1933 	struct bufv *bv;
1934 
1935 	KASSERT(bp->b_bufobj != NULL, ("No b_bufobj %p", bp));
1936 	ASSERT_BO_WLOCKED(bp->b_bufobj);
1937 	KASSERT((bp->b_xflags & (BX_VNDIRTY|BX_VNCLEAN)) !=
1938 	    (BX_VNDIRTY|BX_VNCLEAN),
1939 	    ("buf_vlist_remove: Buf %p is on two lists", bp));
1940 	if (bp->b_xflags & BX_VNDIRTY)
1941 		bv = &bp->b_bufobj->bo_dirty;
1942 	else
1943 		bv = &bp->b_bufobj->bo_clean;
1944 	BUF_PCTRIE_REMOVE(&bv->bv_root, bp->b_lblkno);
1945 	TAILQ_REMOVE(&bv->bv_hd, bp, b_bobufs);
1946 	bv->bv_cnt--;
1947 	bp->b_xflags &= ~(BX_VNDIRTY | BX_VNCLEAN);
1948 }
1949 
1950 /*
1951  * Add the buffer to the sorted clean or dirty block list.
1952  *
1953  * NOTE: xflags is passed as a constant, optimizing this inline function!
1954  */
1955 static void
1956 buf_vlist_add(struct buf *bp, struct bufobj *bo, b_xflags_t xflags)
1957 {
1958 	struct bufv *bv;
1959 	struct buf *n;
1960 	int error;
1961 
1962 	ASSERT_BO_WLOCKED(bo);
1963 	KASSERT((xflags & BX_VNDIRTY) == 0 || (bo->bo_flag & BO_DEAD) == 0,
1964 	    ("dead bo %p", bo));
1965 	KASSERT((bp->b_xflags & (BX_VNDIRTY|BX_VNCLEAN)) == 0,
1966 	    ("buf_vlist_add: Buf %p has existing xflags %d", bp, bp->b_xflags));
1967 	bp->b_xflags |= xflags;
1968 	if (xflags & BX_VNDIRTY)
1969 		bv = &bo->bo_dirty;
1970 	else
1971 		bv = &bo->bo_clean;
1972 
1973 	/*
1974 	 * Keep the list ordered.  Optimize empty list insertion.  Assume
1975 	 * we tend to grow at the tail so lookup_le should usually be cheaper
1976 	 * than _ge.
1977 	 */
1978 	if (bv->bv_cnt == 0 ||
1979 	    bp->b_lblkno > TAILQ_LAST(&bv->bv_hd, buflists)->b_lblkno)
1980 		TAILQ_INSERT_TAIL(&bv->bv_hd, bp, b_bobufs);
1981 	else if ((n = BUF_PCTRIE_LOOKUP_LE(&bv->bv_root, bp->b_lblkno)) == NULL)
1982 		TAILQ_INSERT_HEAD(&bv->bv_hd, bp, b_bobufs);
1983 	else
1984 		TAILQ_INSERT_AFTER(&bv->bv_hd, n, bp, b_bobufs);
1985 	error = BUF_PCTRIE_INSERT(&bv->bv_root, bp);
1986 	if (error)
1987 		panic("buf_vlist_add:  Preallocated nodes insufficient.");
1988 	bv->bv_cnt++;
1989 }
1990 
1991 /*
1992  * Look up a buffer using the buffer tries.
1993  */
1994 struct buf *
1995 gbincore(struct bufobj *bo, daddr_t lblkno)
1996 {
1997 	struct buf *bp;
1998 
1999 	ASSERT_BO_LOCKED(bo);
2000 	bp = BUF_PCTRIE_LOOKUP(&bo->bo_clean.bv_root, lblkno);
2001 	if (bp != NULL)
2002 		return (bp);
2003 	return BUF_PCTRIE_LOOKUP(&bo->bo_dirty.bv_root, lblkno);
2004 }
2005 
2006 /*
2007  * Associate a buffer with a vnode.
2008  */
2009 void
2010 bgetvp(struct vnode *vp, struct buf *bp)
2011 {
2012 	struct bufobj *bo;
2013 
2014 	bo = &vp->v_bufobj;
2015 	ASSERT_BO_WLOCKED(bo);
2016 	VNASSERT(bp->b_vp == NULL, bp->b_vp, ("bgetvp: not free"));
2017 
2018 	CTR3(KTR_BUF, "bgetvp(%p) vp %p flags %X", bp, vp, bp->b_flags);
2019 	VNASSERT((bp->b_xflags & (BX_VNDIRTY|BX_VNCLEAN)) == 0, vp,
2020 	    ("bgetvp: bp already attached! %p", bp));
2021 
2022 	vhold(vp);
2023 	bp->b_vp = vp;
2024 	bp->b_bufobj = bo;
2025 	/*
2026 	 * Insert onto list for new vnode.
2027 	 */
2028 	buf_vlist_add(bp, bo, BX_VNCLEAN);
2029 }
2030 
2031 /*
2032  * Disassociate a buffer from a vnode.
2033  */
2034 void
2035 brelvp(struct buf *bp)
2036 {
2037 	struct bufobj *bo;
2038 	struct vnode *vp;
2039 
2040 	CTR3(KTR_BUF, "brelvp(%p) vp %p flags %X", bp, bp->b_vp, bp->b_flags);
2041 	KASSERT(bp->b_vp != NULL, ("brelvp: NULL"));
2042 
2043 	/*
2044 	 * Delete from old vnode list, if on one.
2045 	 */
2046 	vp = bp->b_vp;		/* XXX */
2047 	bo = bp->b_bufobj;
2048 	BO_LOCK(bo);
2049 	if (bp->b_xflags & (BX_VNDIRTY | BX_VNCLEAN))
2050 		buf_vlist_remove(bp);
2051 	else
2052 		panic("brelvp: Buffer %p not on queue.", bp);
2053 	if ((bo->bo_flag & BO_ONWORKLST) && bo->bo_dirty.bv_cnt == 0) {
2054 		bo->bo_flag &= ~BO_ONWORKLST;
2055 		mtx_lock(&sync_mtx);
2056 		LIST_REMOVE(bo, bo_synclist);
2057 		syncer_worklist_len--;
2058 		mtx_unlock(&sync_mtx);
2059 	}
2060 	bp->b_vp = NULL;
2061 	bp->b_bufobj = NULL;
2062 	BO_UNLOCK(bo);
2063 	vdrop(vp);
2064 }
2065 
2066 /*
2067  * Add an item to the syncer work queue.
2068  */
2069 static void
2070 vn_syncer_add_to_worklist(struct bufobj *bo, int delay)
2071 {
2072 	int slot;
2073 
2074 	ASSERT_BO_WLOCKED(bo);
2075 
2076 	mtx_lock(&sync_mtx);
2077 	if (bo->bo_flag & BO_ONWORKLST)
2078 		LIST_REMOVE(bo, bo_synclist);
2079 	else {
2080 		bo->bo_flag |= BO_ONWORKLST;
2081 		syncer_worklist_len++;
2082 	}
2083 
2084 	if (delay > syncer_maxdelay - 2)
2085 		delay = syncer_maxdelay - 2;
2086 	slot = (syncer_delayno + delay) & syncer_mask;
2087 
2088 	LIST_INSERT_HEAD(&syncer_workitem_pending[slot], bo, bo_synclist);
2089 	mtx_unlock(&sync_mtx);
2090 }
2091 
2092 static int
2093 sysctl_vfs_worklist_len(SYSCTL_HANDLER_ARGS)
2094 {
2095 	int error, len;
2096 
2097 	mtx_lock(&sync_mtx);
2098 	len = syncer_worklist_len - sync_vnode_count;
2099 	mtx_unlock(&sync_mtx);
2100 	error = SYSCTL_OUT(req, &len, sizeof(len));
2101 	return (error);
2102 }
2103 
2104 SYSCTL_PROC(_vfs, OID_AUTO, worklist_len, CTLTYPE_INT | CTLFLAG_RD, NULL, 0,
2105     sysctl_vfs_worklist_len, "I", "Syncer thread worklist length");
2106 
2107 static struct proc *updateproc;
2108 static void sched_sync(void);
2109 static struct kproc_desc up_kp = {
2110 	"syncer",
2111 	sched_sync,
2112 	&updateproc
2113 };
2114 SYSINIT(syncer, SI_SUB_KTHREAD_UPDATE, SI_ORDER_FIRST, kproc_start, &up_kp);
2115 
2116 static int
2117 sync_vnode(struct synclist *slp, struct bufobj **bo, struct thread *td)
2118 {
2119 	struct vnode *vp;
2120 	struct mount *mp;
2121 
2122 	*bo = LIST_FIRST(slp);
2123 	if (*bo == NULL)
2124 		return (0);
2125 	vp = bo2vnode(*bo);
2126 	if (VOP_ISLOCKED(vp) != 0 || VI_TRYLOCK(vp) == 0)
2127 		return (1);
2128 	/*
2129 	 * We use vhold in case the vnode does not
2130 	 * successfully sync.  vhold prevents the vnode from
2131 	 * going away when we unlock the sync_mtx so that
2132 	 * we can acquire the vnode interlock.
2133 	 */
2134 	vholdl(vp);
2135 	mtx_unlock(&sync_mtx);
2136 	VI_UNLOCK(vp);
2137 	if (vn_start_write(vp, &mp, V_NOWAIT) != 0) {
2138 		vdrop(vp);
2139 		mtx_lock(&sync_mtx);
2140 		return (*bo == LIST_FIRST(slp));
2141 	}
2142 	vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
2143 	(void) VOP_FSYNC(vp, MNT_LAZY, td);
2144 	VOP_UNLOCK(vp, 0);
2145 	vn_finished_write(mp);
2146 	BO_LOCK(*bo);
2147 	if (((*bo)->bo_flag & BO_ONWORKLST) != 0) {
2148 		/*
2149 		 * Put us back on the worklist.  The worklist
2150 		 * routine will remove us from our current
2151 		 * position and then add us back in at a later
2152 		 * position.
2153 		 */
2154 		vn_syncer_add_to_worklist(*bo, syncdelay);
2155 	}
2156 	BO_UNLOCK(*bo);
2157 	vdrop(vp);
2158 	mtx_lock(&sync_mtx);
2159 	return (0);
2160 }
2161 
2162 static int first_printf = 1;
2163 
2164 /*
2165  * System filesystem synchronizer daemon.
2166  */
2167 static void
2168 sched_sync(void)
2169 {
2170 	struct synclist *next, *slp;
2171 	struct bufobj *bo;
2172 	long starttime;
2173 	struct thread *td = curthread;
2174 	int last_work_seen;
2175 	int net_worklist_len;
2176 	int syncer_final_iter;
2177 	int error;
2178 
2179 	last_work_seen = 0;
2180 	syncer_final_iter = 0;
2181 	syncer_state = SYNCER_RUNNING;
2182 	starttime = time_uptime;
2183 	td->td_pflags |= TDP_NORUNNINGBUF;
2184 
2185 	EVENTHANDLER_REGISTER(shutdown_pre_sync, syncer_shutdown, td->td_proc,
2186 	    SHUTDOWN_PRI_LAST);
2187 
2188 	mtx_lock(&sync_mtx);
2189 	for (;;) {
2190 		if (syncer_state == SYNCER_FINAL_DELAY &&
2191 		    syncer_final_iter == 0) {
2192 			mtx_unlock(&sync_mtx);
2193 			kproc_suspend_check(td->td_proc);
2194 			mtx_lock(&sync_mtx);
2195 		}
2196 		net_worklist_len = syncer_worklist_len - sync_vnode_count;
2197 		if (syncer_state != SYNCER_RUNNING &&
2198 		    starttime != time_uptime) {
2199 			if (first_printf) {
2200 				printf("\nSyncing disks, vnodes remaining... ");
2201 				first_printf = 0;
2202 			}
2203 			printf("%d ", net_worklist_len);
2204 		}
2205 		starttime = time_uptime;
2206 
2207 		/*
2208 		 * Push files whose dirty time has expired.  Be careful
2209 		 * of interrupt race on slp queue.
2210 		 *
2211 		 * Skip over empty worklist slots when shutting down.
2212 		 */
2213 		do {
2214 			slp = &syncer_workitem_pending[syncer_delayno];
2215 			syncer_delayno += 1;
2216 			if (syncer_delayno == syncer_maxdelay)
2217 				syncer_delayno = 0;
2218 			next = &syncer_workitem_pending[syncer_delayno];
2219 			/*
2220 			 * If the worklist has wrapped since the
2221 			 * it was emptied of all but syncer vnodes,
2222 			 * switch to the FINAL_DELAY state and run
2223 			 * for one more second.
2224 			 */
2225 			if (syncer_state == SYNCER_SHUTTING_DOWN &&
2226 			    net_worklist_len == 0 &&
2227 			    last_work_seen == syncer_delayno) {
2228 				syncer_state = SYNCER_FINAL_DELAY;
2229 				syncer_final_iter = SYNCER_SHUTDOWN_SPEEDUP;
2230 			}
2231 		} while (syncer_state != SYNCER_RUNNING && LIST_EMPTY(slp) &&
2232 		    syncer_worklist_len > 0);
2233 
2234 		/*
2235 		 * Keep track of the last time there was anything
2236 		 * on the worklist other than syncer vnodes.
2237 		 * Return to the SHUTTING_DOWN state if any
2238 		 * new work appears.
2239 		 */
2240 		if (net_worklist_len > 0 || syncer_state == SYNCER_RUNNING)
2241 			last_work_seen = syncer_delayno;
2242 		if (net_worklist_len > 0 && syncer_state == SYNCER_FINAL_DELAY)
2243 			syncer_state = SYNCER_SHUTTING_DOWN;
2244 		while (!LIST_EMPTY(slp)) {
2245 			error = sync_vnode(slp, &bo, td);
2246 			if (error == 1) {
2247 				LIST_REMOVE(bo, bo_synclist);
2248 				LIST_INSERT_HEAD(next, bo, bo_synclist);
2249 				continue;
2250 			}
2251 
2252 			if (first_printf == 0) {
2253 				/*
2254 				 * Drop the sync mutex, because some watchdog
2255 				 * drivers need to sleep while patting
2256 				 */
2257 				mtx_unlock(&sync_mtx);
2258 				wdog_kern_pat(WD_LASTVAL);
2259 				mtx_lock(&sync_mtx);
2260 			}
2261 
2262 		}
2263 		if (syncer_state == SYNCER_FINAL_DELAY && syncer_final_iter > 0)
2264 			syncer_final_iter--;
2265 		/*
2266 		 * The variable rushjob allows the kernel to speed up the
2267 		 * processing of the filesystem syncer process. A rushjob
2268 		 * value of N tells the filesystem syncer to process the next
2269 		 * N seconds worth of work on its queue ASAP. Currently rushjob
2270 		 * is used by the soft update code to speed up the filesystem
2271 		 * syncer process when the incore state is getting so far
2272 		 * ahead of the disk that the kernel memory pool is being
2273 		 * threatened with exhaustion.
2274 		 */
2275 		if (rushjob > 0) {
2276 			rushjob -= 1;
2277 			continue;
2278 		}
2279 		/*
2280 		 * Just sleep for a short period of time between
2281 		 * iterations when shutting down to allow some I/O
2282 		 * to happen.
2283 		 *
2284 		 * If it has taken us less than a second to process the
2285 		 * current work, then wait. Otherwise start right over
2286 		 * again. We can still lose time if any single round
2287 		 * takes more than two seconds, but it does not really
2288 		 * matter as we are just trying to generally pace the
2289 		 * filesystem activity.
2290 		 */
2291 		if (syncer_state != SYNCER_RUNNING ||
2292 		    time_uptime == starttime) {
2293 			thread_lock(td);
2294 			sched_prio(td, PPAUSE);
2295 			thread_unlock(td);
2296 		}
2297 		if (syncer_state != SYNCER_RUNNING)
2298 			cv_timedwait(&sync_wakeup, &sync_mtx,
2299 			    hz / SYNCER_SHUTDOWN_SPEEDUP);
2300 		else if (time_uptime == starttime)
2301 			cv_timedwait(&sync_wakeup, &sync_mtx, hz);
2302 	}
2303 }
2304 
2305 /*
2306  * Request the syncer daemon to speed up its work.
2307  * We never push it to speed up more than half of its
2308  * normal turn time, otherwise it could take over the cpu.
2309  */
2310 int
2311 speedup_syncer(void)
2312 {
2313 	int ret = 0;
2314 
2315 	mtx_lock(&sync_mtx);
2316 	if (rushjob < syncdelay / 2) {
2317 		rushjob += 1;
2318 		stat_rush_requests += 1;
2319 		ret = 1;
2320 	}
2321 	mtx_unlock(&sync_mtx);
2322 	cv_broadcast(&sync_wakeup);
2323 	return (ret);
2324 }
2325 
2326 /*
2327  * Tell the syncer to speed up its work and run though its work
2328  * list several times, then tell it to shut down.
2329  */
2330 static void
2331 syncer_shutdown(void *arg, int howto)
2332 {
2333 
2334 	if (howto & RB_NOSYNC)
2335 		return;
2336 	mtx_lock(&sync_mtx);
2337 	syncer_state = SYNCER_SHUTTING_DOWN;
2338 	rushjob = 0;
2339 	mtx_unlock(&sync_mtx);
2340 	cv_broadcast(&sync_wakeup);
2341 	kproc_shutdown(arg, howto);
2342 }
2343 
2344 void
2345 syncer_suspend(void)
2346 {
2347 
2348 	syncer_shutdown(updateproc, 0);
2349 }
2350 
2351 void
2352 syncer_resume(void)
2353 {
2354 
2355 	mtx_lock(&sync_mtx);
2356 	first_printf = 1;
2357 	syncer_state = SYNCER_RUNNING;
2358 	mtx_unlock(&sync_mtx);
2359 	cv_broadcast(&sync_wakeup);
2360 	kproc_resume(updateproc);
2361 }
2362 
2363 /*
2364  * Reassign a buffer from one vnode to another.
2365  * Used to assign file specific control information
2366  * (indirect blocks) to the vnode to which they belong.
2367  */
2368 void
2369 reassignbuf(struct buf *bp)
2370 {
2371 	struct vnode *vp;
2372 	struct bufobj *bo;
2373 	int delay;
2374 #ifdef INVARIANTS
2375 	struct bufv *bv;
2376 #endif
2377 
2378 	vp = bp->b_vp;
2379 	bo = bp->b_bufobj;
2380 	++reassignbufcalls;
2381 
2382 	CTR3(KTR_BUF, "reassignbuf(%p) vp %p flags %X",
2383 	    bp, bp->b_vp, bp->b_flags);
2384 	/*
2385 	 * B_PAGING flagged buffers cannot be reassigned because their vp
2386 	 * is not fully linked in.
2387 	 */
2388 	if (bp->b_flags & B_PAGING)
2389 		panic("cannot reassign paging buffer");
2390 
2391 	/*
2392 	 * Delete from old vnode list, if on one.
2393 	 */
2394 	BO_LOCK(bo);
2395 	if (bp->b_xflags & (BX_VNDIRTY | BX_VNCLEAN))
2396 		buf_vlist_remove(bp);
2397 	else
2398 		panic("reassignbuf: Buffer %p not on queue.", bp);
2399 	/*
2400 	 * If dirty, put on list of dirty buffers; otherwise insert onto list
2401 	 * of clean buffers.
2402 	 */
2403 	if (bp->b_flags & B_DELWRI) {
2404 		if ((bo->bo_flag & BO_ONWORKLST) == 0) {
2405 			switch (vp->v_type) {
2406 			case VDIR:
2407 				delay = dirdelay;
2408 				break;
2409 			case VCHR:
2410 				delay = metadelay;
2411 				break;
2412 			default:
2413 				delay = filedelay;
2414 			}
2415 			vn_syncer_add_to_worklist(bo, delay);
2416 		}
2417 		buf_vlist_add(bp, bo, BX_VNDIRTY);
2418 	} else {
2419 		buf_vlist_add(bp, bo, BX_VNCLEAN);
2420 
2421 		if ((bo->bo_flag & BO_ONWORKLST) && bo->bo_dirty.bv_cnt == 0) {
2422 			mtx_lock(&sync_mtx);
2423 			LIST_REMOVE(bo, bo_synclist);
2424 			syncer_worklist_len--;
2425 			mtx_unlock(&sync_mtx);
2426 			bo->bo_flag &= ~BO_ONWORKLST;
2427 		}
2428 	}
2429 #ifdef INVARIANTS
2430 	bv = &bo->bo_clean;
2431 	bp = TAILQ_FIRST(&bv->bv_hd);
2432 	KASSERT(bp == NULL || bp->b_bufobj == bo,
2433 	    ("bp %p wrong b_bufobj %p should be %p", bp, bp->b_bufobj, bo));
2434 	bp = TAILQ_LAST(&bv->bv_hd, buflists);
2435 	KASSERT(bp == NULL || bp->b_bufobj == bo,
2436 	    ("bp %p wrong b_bufobj %p should be %p", bp, bp->b_bufobj, bo));
2437 	bv = &bo->bo_dirty;
2438 	bp = TAILQ_FIRST(&bv->bv_hd);
2439 	KASSERT(bp == NULL || bp->b_bufobj == bo,
2440 	    ("bp %p wrong b_bufobj %p should be %p", bp, bp->b_bufobj, bo));
2441 	bp = TAILQ_LAST(&bv->bv_hd, buflists);
2442 	KASSERT(bp == NULL || bp->b_bufobj == bo,
2443 	    ("bp %p wrong b_bufobj %p should be %p", bp, bp->b_bufobj, bo));
2444 #endif
2445 	BO_UNLOCK(bo);
2446 }
2447 
2448 /*
2449  * A temporary hack until refcount_* APIs are sorted out.
2450  */
2451 static __inline int
2452 vfs_refcount_acquire_if_not_zero(volatile u_int *count)
2453 {
2454 	u_int old;
2455 
2456 	for (;;) {
2457 		old = *count;
2458 		if (old == 0)
2459 			return (0);
2460 		if (atomic_cmpset_int(count, old, old + 1))
2461 			return (1);
2462 	}
2463 }
2464 
2465 static __inline int
2466 vfs_refcount_release_if_not_last(volatile u_int *count)
2467 {
2468 	u_int old;
2469 
2470 	for (;;) {
2471 		old = *count;
2472 		if (old == 1)
2473 			return (0);
2474 		if (atomic_cmpset_int(count, old, old - 1))
2475 			return (1);
2476 	}
2477 }
2478 
2479 static void
2480 v_init_counters(struct vnode *vp)
2481 {
2482 
2483 	VNASSERT(vp->v_type == VNON && vp->v_data == NULL && vp->v_iflag == 0,
2484 	    vp, ("%s called for an initialized vnode", __FUNCTION__));
2485 	ASSERT_VI_UNLOCKED(vp, __FUNCTION__);
2486 
2487 	refcount_init(&vp->v_holdcnt, 1);
2488 	refcount_init(&vp->v_usecount, 1);
2489 }
2490 
2491 static void
2492 v_incr_usecount_locked(struct vnode *vp)
2493 {
2494 
2495 	ASSERT_VI_LOCKED(vp, __func__);
2496 	if ((vp->v_iflag & VI_OWEINACT) != 0) {
2497 		VNASSERT(vp->v_usecount == 0, vp,
2498 		    ("vnode with usecount and VI_OWEINACT set"));
2499 		vp->v_iflag &= ~VI_OWEINACT;
2500 	}
2501 	refcount_acquire(&vp->v_usecount);
2502 	v_incr_devcount(vp);
2503 }
2504 
2505 /*
2506  * Increment the use count on the vnode, taking care to reference
2507  * the driver's usecount if this is a chardev.
2508  */
2509 static void
2510 v_incr_usecount(struct vnode *vp)
2511 {
2512 
2513 	ASSERT_VI_UNLOCKED(vp, __func__);
2514 	CTR2(KTR_VFS, "%s: vp %p", __func__, vp);
2515 
2516 	if (vp->v_type != VCHR &&
2517 	    vfs_refcount_acquire_if_not_zero(&vp->v_usecount)) {
2518 		VNASSERT((vp->v_iflag & VI_OWEINACT) == 0, vp,
2519 		    ("vnode with usecount and VI_OWEINACT set"));
2520 	} else {
2521 		VI_LOCK(vp);
2522 		v_incr_usecount_locked(vp);
2523 		VI_UNLOCK(vp);
2524 	}
2525 }
2526 
2527 /*
2528  * Increment si_usecount of the associated device, if any.
2529  */
2530 static void
2531 v_incr_devcount(struct vnode *vp)
2532 {
2533 
2534 	ASSERT_VI_LOCKED(vp, __FUNCTION__);
2535 	if (vp->v_type == VCHR && vp->v_rdev != NULL) {
2536 		dev_lock();
2537 		vp->v_rdev->si_usecount++;
2538 		dev_unlock();
2539 	}
2540 }
2541 
2542 /*
2543  * Decrement si_usecount of the associated device, if any.
2544  */
2545 static void
2546 v_decr_devcount(struct vnode *vp)
2547 {
2548 
2549 	ASSERT_VI_LOCKED(vp, __FUNCTION__);
2550 	if (vp->v_type == VCHR && vp->v_rdev != NULL) {
2551 		dev_lock();
2552 		vp->v_rdev->si_usecount--;
2553 		dev_unlock();
2554 	}
2555 }
2556 
2557 /*
2558  * Grab a particular vnode from the free list, increment its
2559  * reference count and lock it.  VI_DOOMED is set if the vnode
2560  * is being destroyed.  Only callers who specify LK_RETRY will
2561  * see doomed vnodes.  If inactive processing was delayed in
2562  * vput try to do it here.
2563  *
2564  * Notes on lockless counter manipulation:
2565  * _vhold, vputx and other routines make various decisions based
2566  * on either holdcnt or usecount being 0. As long as either counter
2567  * is not transitioning 0->1 nor 1->0, the manipulation can be done
2568  * with atomic operations. Otherwise the interlock is taken covering
2569  * both the atomic and additional actions.
2570  */
2571 int
2572 vget(struct vnode *vp, int flags, struct thread *td)
2573 {
2574 	int error, oweinact;
2575 
2576 	VNASSERT((flags & LK_TYPE_MASK) != 0, vp,
2577 	    ("vget: invalid lock operation"));
2578 
2579 	if ((flags & LK_INTERLOCK) != 0)
2580 		ASSERT_VI_LOCKED(vp, __func__);
2581 	else
2582 		ASSERT_VI_UNLOCKED(vp, __func__);
2583 	if ((flags & LK_VNHELD) != 0)
2584 		VNASSERT((vp->v_holdcnt > 0), vp,
2585 		    ("vget: LK_VNHELD passed but vnode not held"));
2586 
2587 	CTR3(KTR_VFS, "%s: vp %p with flags %d", __func__, vp, flags);
2588 
2589 	if ((flags & LK_VNHELD) == 0)
2590 		_vhold(vp, (flags & LK_INTERLOCK) != 0);
2591 
2592 	if ((error = vn_lock(vp, flags)) != 0) {
2593 		vdrop(vp);
2594 		CTR2(KTR_VFS, "%s: impossible to lock vnode %p", __func__,
2595 		    vp);
2596 		return (error);
2597 	}
2598 	if (vp->v_iflag & VI_DOOMED && (flags & LK_RETRY) == 0)
2599 		panic("vget: vn_lock failed to return ENOENT\n");
2600 	/*
2601 	 * We don't guarantee that any particular close will
2602 	 * trigger inactive processing so just make a best effort
2603 	 * here at preventing a reference to a removed file.  If
2604 	 * we don't succeed no harm is done.
2605 	 *
2606 	 * Upgrade our holdcnt to a usecount.
2607 	 */
2608 	if (vp->v_type == VCHR ||
2609 	    !vfs_refcount_acquire_if_not_zero(&vp->v_usecount)) {
2610 		VI_LOCK(vp);
2611 		if ((vp->v_iflag & VI_OWEINACT) == 0) {
2612 			oweinact = 0;
2613 		} else {
2614 			oweinact = 1;
2615 			vp->v_iflag &= ~VI_OWEINACT;
2616 		}
2617 		refcount_acquire(&vp->v_usecount);
2618 		v_incr_devcount(vp);
2619 		if (oweinact && VOP_ISLOCKED(vp) == LK_EXCLUSIVE &&
2620 		    (flags & LK_NOWAIT) == 0)
2621 			vinactive(vp, td);
2622 		VI_UNLOCK(vp);
2623 	}
2624 	return (0);
2625 }
2626 
2627 /*
2628  * Increase the reference (use) and hold count of a vnode.
2629  * This will also remove the vnode from the free list if it is presently free.
2630  */
2631 void
2632 vref(struct vnode *vp)
2633 {
2634 
2635 	CTR2(KTR_VFS, "%s: vp %p", __func__, vp);
2636 	_vhold(vp, false);
2637 	v_incr_usecount(vp);
2638 }
2639 
2640 void
2641 vrefl(struct vnode *vp)
2642 {
2643 
2644 	ASSERT_VI_LOCKED(vp, __func__);
2645 	CTR2(KTR_VFS, "%s: vp %p", __func__, vp);
2646 	_vhold(vp, true);
2647 	v_incr_usecount_locked(vp);
2648 }
2649 
2650 void
2651 vrefact(struct vnode *vp)
2652 {
2653 
2654 	CTR2(KTR_VFS, "%s: vp %p", __func__, vp);
2655 	if (__predict_false(vp->v_type == VCHR)) {
2656 		VNASSERT(vp->v_holdcnt > 0 && vp->v_usecount > 0, vp,
2657 		    ("%s: wrong ref counts", __func__));
2658 		vref(vp);
2659 		return;
2660 	}
2661 #ifdef INVARIANTS
2662 	int old = atomic_fetchadd_int(&vp->v_holdcnt, 1);
2663 	VNASSERT(old > 0, vp, ("%s: wrong hold count", __func__));
2664 	old = atomic_fetchadd_int(&vp->v_usecount, 1);
2665 	VNASSERT(old > 0, vp, ("%s: wrong use count", __func__));
2666 #else
2667 	refcount_acquire(&vp->v_holdcnt);
2668 	refcount_acquire(&vp->v_usecount);
2669 #endif
2670 }
2671 
2672 /*
2673  * Return reference count of a vnode.
2674  *
2675  * The results of this call are only guaranteed when some mechanism is used to
2676  * stop other processes from gaining references to the vnode.  This may be the
2677  * case if the caller holds the only reference.  This is also useful when stale
2678  * data is acceptable as race conditions may be accounted for by some other
2679  * means.
2680  */
2681 int
2682 vrefcnt(struct vnode *vp)
2683 {
2684 
2685 	return (vp->v_usecount);
2686 }
2687 
2688 #define	VPUTX_VRELE	1
2689 #define	VPUTX_VPUT	2
2690 #define	VPUTX_VUNREF	3
2691 
2692 /*
2693  * Decrement the use and hold counts for a vnode.
2694  *
2695  * See an explanation near vget() as to why atomic operation is safe.
2696  */
2697 static void
2698 vputx(struct vnode *vp, int func)
2699 {
2700 	int error;
2701 
2702 	KASSERT(vp != NULL, ("vputx: null vp"));
2703 	if (func == VPUTX_VUNREF)
2704 		ASSERT_VOP_LOCKED(vp, "vunref");
2705 	else if (func == VPUTX_VPUT)
2706 		ASSERT_VOP_LOCKED(vp, "vput");
2707 	else
2708 		KASSERT(func == VPUTX_VRELE, ("vputx: wrong func"));
2709 	ASSERT_VI_UNLOCKED(vp, __func__);
2710 	CTR2(KTR_VFS, "%s: vp %p", __func__, vp);
2711 
2712 	if (vp->v_type != VCHR &&
2713 	    vfs_refcount_release_if_not_last(&vp->v_usecount)) {
2714 		if (func == VPUTX_VPUT)
2715 			VOP_UNLOCK(vp, 0);
2716 		vdrop(vp);
2717 		return;
2718 	}
2719 
2720 	VI_LOCK(vp);
2721 
2722 	/*
2723 	 * We want to hold the vnode until the inactive finishes to
2724 	 * prevent vgone() races.  We drop the use count here and the
2725 	 * hold count below when we're done.
2726 	 */
2727 	if (!refcount_release(&vp->v_usecount) ||
2728 	    (vp->v_iflag & VI_DOINGINACT)) {
2729 		if (func == VPUTX_VPUT)
2730 			VOP_UNLOCK(vp, 0);
2731 		v_decr_devcount(vp);
2732 		vdropl(vp);
2733 		return;
2734 	}
2735 
2736 	v_decr_devcount(vp);
2737 
2738 	error = 0;
2739 
2740 	if (vp->v_usecount != 0) {
2741 		vn_printf(vp, "vputx: usecount not zero for vnode ");
2742 		panic("vputx: usecount not zero");
2743 	}
2744 
2745 	CTR2(KTR_VFS, "%s: return vnode %p to the freelist", __func__, vp);
2746 
2747 	/*
2748 	 * We must call VOP_INACTIVE with the node locked. Mark
2749 	 * as VI_DOINGINACT to avoid recursion.
2750 	 */
2751 	vp->v_iflag |= VI_OWEINACT;
2752 	switch (func) {
2753 	case VPUTX_VRELE:
2754 		error = vn_lock(vp, LK_EXCLUSIVE | LK_INTERLOCK);
2755 		VI_LOCK(vp);
2756 		break;
2757 	case VPUTX_VPUT:
2758 		if (VOP_ISLOCKED(vp) != LK_EXCLUSIVE) {
2759 			error = VOP_LOCK(vp, LK_UPGRADE | LK_INTERLOCK |
2760 			    LK_NOWAIT);
2761 			VI_LOCK(vp);
2762 		}
2763 		break;
2764 	case VPUTX_VUNREF:
2765 		if (VOP_ISLOCKED(vp) != LK_EXCLUSIVE) {
2766 			error = VOP_LOCK(vp, LK_TRYUPGRADE | LK_INTERLOCK);
2767 			VI_LOCK(vp);
2768 		}
2769 		break;
2770 	}
2771 	VNASSERT(vp->v_usecount == 0 || (vp->v_iflag & VI_OWEINACT) == 0, vp,
2772 	    ("vnode with usecount and VI_OWEINACT set"));
2773 	if (error == 0) {
2774 		if (vp->v_iflag & VI_OWEINACT)
2775 			vinactive(vp, curthread);
2776 		if (func != VPUTX_VUNREF)
2777 			VOP_UNLOCK(vp, 0);
2778 	}
2779 	vdropl(vp);
2780 }
2781 
2782 /*
2783  * Vnode put/release.
2784  * If count drops to zero, call inactive routine and return to freelist.
2785  */
2786 void
2787 vrele(struct vnode *vp)
2788 {
2789 
2790 	vputx(vp, VPUTX_VRELE);
2791 }
2792 
2793 /*
2794  * Release an already locked vnode.  This give the same effects as
2795  * unlock+vrele(), but takes less time and avoids releasing and
2796  * re-aquiring the lock (as vrele() acquires the lock internally.)
2797  */
2798 void
2799 vput(struct vnode *vp)
2800 {
2801 
2802 	vputx(vp, VPUTX_VPUT);
2803 }
2804 
2805 /*
2806  * Release an exclusively locked vnode. Do not unlock the vnode lock.
2807  */
2808 void
2809 vunref(struct vnode *vp)
2810 {
2811 
2812 	vputx(vp, VPUTX_VUNREF);
2813 }
2814 
2815 /*
2816  * Increase the hold count and activate if this is the first reference.
2817  */
2818 void
2819 _vhold(struct vnode *vp, bool locked)
2820 {
2821 	struct mount *mp;
2822 
2823 	if (locked)
2824 		ASSERT_VI_LOCKED(vp, __func__);
2825 	else
2826 		ASSERT_VI_UNLOCKED(vp, __func__);
2827 	CTR2(KTR_VFS, "%s: vp %p", __func__, vp);
2828 	if (!locked && vfs_refcount_acquire_if_not_zero(&vp->v_holdcnt)) {
2829 		VNASSERT((vp->v_iflag & VI_FREE) == 0, vp,
2830 		    ("_vhold: vnode with holdcnt is free"));
2831 		return;
2832 	}
2833 
2834 	if (!locked)
2835 		VI_LOCK(vp);
2836 	if ((vp->v_iflag & VI_FREE) == 0) {
2837 		refcount_acquire(&vp->v_holdcnt);
2838 		if (!locked)
2839 			VI_UNLOCK(vp);
2840 		return;
2841 	}
2842 	VNASSERT(vp->v_holdcnt == 0, vp,
2843 	    ("%s: wrong hold count", __func__));
2844 	VNASSERT(vp->v_op != NULL, vp,
2845 	    ("%s: vnode already reclaimed.", __func__));
2846 	/*
2847 	 * Remove a vnode from the free list, mark it as in use,
2848 	 * and put it on the active list.
2849 	 */
2850 	mp = vp->v_mount;
2851 	mtx_lock(&mp->mnt_listmtx);
2852 	if ((vp->v_mflag & VMP_TMPMNTFREELIST) != 0) {
2853 		TAILQ_REMOVE(&mp->mnt_tmpfreevnodelist, vp, v_actfreelist);
2854 		mp->mnt_tmpfreevnodelistsize--;
2855 		vp->v_mflag &= ~VMP_TMPMNTFREELIST;
2856 	} else {
2857 		mtx_lock(&vnode_free_list_mtx);
2858 		TAILQ_REMOVE(&vnode_free_list, vp, v_actfreelist);
2859 		freevnodes--;
2860 		mtx_unlock(&vnode_free_list_mtx);
2861 	}
2862 	KASSERT((vp->v_iflag & VI_ACTIVE) == 0,
2863 	    ("Activating already active vnode"));
2864 	vp->v_iflag &= ~VI_FREE;
2865 	vp->v_iflag |= VI_ACTIVE;
2866 	TAILQ_INSERT_HEAD(&mp->mnt_activevnodelist, vp, v_actfreelist);
2867 	mp->mnt_activevnodelistsize++;
2868 	mtx_unlock(&mp->mnt_listmtx);
2869 	refcount_acquire(&vp->v_holdcnt);
2870 	if (!locked)
2871 		VI_UNLOCK(vp);
2872 }
2873 
2874 /*
2875  * Drop the hold count of the vnode.  If this is the last reference to
2876  * the vnode we place it on the free list unless it has been vgone'd
2877  * (marked VI_DOOMED) in which case we will free it.
2878  *
2879  * Because the vnode vm object keeps a hold reference on the vnode if
2880  * there is at least one resident non-cached page, the vnode cannot
2881  * leave the active list without the page cleanup done.
2882  */
2883 void
2884 _vdrop(struct vnode *vp, bool locked)
2885 {
2886 	struct bufobj *bo;
2887 	struct mount *mp;
2888 	int active;
2889 
2890 	if (locked)
2891 		ASSERT_VI_LOCKED(vp, __func__);
2892 	else
2893 		ASSERT_VI_UNLOCKED(vp, __func__);
2894 	CTR2(KTR_VFS, "%s: vp %p", __func__, vp);
2895 	if ((int)vp->v_holdcnt <= 0)
2896 		panic("vdrop: holdcnt %d", vp->v_holdcnt);
2897 	if (vfs_refcount_release_if_not_last(&vp->v_holdcnt)) {
2898 		if (locked)
2899 			VI_UNLOCK(vp);
2900 		return;
2901 	}
2902 
2903 	if (!locked)
2904 		VI_LOCK(vp);
2905 	if (refcount_release(&vp->v_holdcnt) == 0) {
2906 		VI_UNLOCK(vp);
2907 		return;
2908 	}
2909 	if ((vp->v_iflag & VI_DOOMED) == 0) {
2910 		/*
2911 		 * Mark a vnode as free: remove it from its active list
2912 		 * and put it up for recycling on the freelist.
2913 		 */
2914 		VNASSERT(vp->v_op != NULL, vp,
2915 		    ("vdropl: vnode already reclaimed."));
2916 		VNASSERT((vp->v_iflag & VI_FREE) == 0, vp,
2917 		    ("vnode already free"));
2918 		VNASSERT(vp->v_holdcnt == 0, vp,
2919 		    ("vdropl: freeing when we shouldn't"));
2920 		active = vp->v_iflag & VI_ACTIVE;
2921 		if ((vp->v_iflag & VI_OWEINACT) == 0) {
2922 			vp->v_iflag &= ~VI_ACTIVE;
2923 			mp = vp->v_mount;
2924 			mtx_lock(&mp->mnt_listmtx);
2925 			if (active) {
2926 				TAILQ_REMOVE(&mp->mnt_activevnodelist, vp,
2927 				    v_actfreelist);
2928 				mp->mnt_activevnodelistsize--;
2929 			}
2930 			TAILQ_INSERT_TAIL(&mp->mnt_tmpfreevnodelist, vp,
2931 			    v_actfreelist);
2932 			mp->mnt_tmpfreevnodelistsize++;
2933 			vp->v_iflag |= VI_FREE;
2934 			vp->v_mflag |= VMP_TMPMNTFREELIST;
2935 			VI_UNLOCK(vp);
2936 			if (mp->mnt_tmpfreevnodelistsize >= mnt_free_list_batch)
2937 				vnlru_return_batch_locked(mp);
2938 			mtx_unlock(&mp->mnt_listmtx);
2939 		} else {
2940 			VI_UNLOCK(vp);
2941 			atomic_add_long(&free_owe_inact, 1);
2942 		}
2943 		return;
2944 	}
2945 	/*
2946 	 * The vnode has been marked for destruction, so free it.
2947 	 *
2948 	 * The vnode will be returned to the zone where it will
2949 	 * normally remain until it is needed for another vnode. We
2950 	 * need to cleanup (or verify that the cleanup has already
2951 	 * been done) any residual data left from its current use
2952 	 * so as not to contaminate the freshly allocated vnode.
2953 	 */
2954 	CTR2(KTR_VFS, "%s: destroying the vnode %p", __func__, vp);
2955 	atomic_subtract_long(&numvnodes, 1);
2956 	bo = &vp->v_bufobj;
2957 	VNASSERT((vp->v_iflag & VI_FREE) == 0, vp,
2958 	    ("cleaned vnode still on the free list."));
2959 	VNASSERT(vp->v_data == NULL, vp, ("cleaned vnode isn't"));
2960 	VNASSERT(vp->v_holdcnt == 0, vp, ("Non-zero hold count"));
2961 	VNASSERT(vp->v_usecount == 0, vp, ("Non-zero use count"));
2962 	VNASSERT(vp->v_writecount == 0, vp, ("Non-zero write count"));
2963 	VNASSERT(bo->bo_numoutput == 0, vp, ("Clean vnode has pending I/O's"));
2964 	VNASSERT(bo->bo_clean.bv_cnt == 0, vp, ("cleanbufcnt not 0"));
2965 	VNASSERT(pctrie_is_empty(&bo->bo_clean.bv_root), vp,
2966 	    ("clean blk trie not empty"));
2967 	VNASSERT(bo->bo_dirty.bv_cnt == 0, vp, ("dirtybufcnt not 0"));
2968 	VNASSERT(pctrie_is_empty(&bo->bo_dirty.bv_root), vp,
2969 	    ("dirty blk trie not empty"));
2970 	VNASSERT(TAILQ_EMPTY(&vp->v_cache_dst), vp, ("vp has namecache dst"));
2971 	VNASSERT(LIST_EMPTY(&vp->v_cache_src), vp, ("vp has namecache src"));
2972 	VNASSERT(vp->v_cache_dd == NULL, vp, ("vp has namecache for .."));
2973 	VNASSERT(TAILQ_EMPTY(&vp->v_rl.rl_waiters), vp,
2974 	    ("Dangling rangelock waiters"));
2975 	VI_UNLOCK(vp);
2976 #ifdef MAC
2977 	mac_vnode_destroy(vp);
2978 #endif
2979 	if (vp->v_pollinfo != NULL) {
2980 		destroy_vpollinfo(vp->v_pollinfo);
2981 		vp->v_pollinfo = NULL;
2982 	}
2983 #ifdef INVARIANTS
2984 	/* XXX Elsewhere we detect an already freed vnode via NULL v_op. */
2985 	vp->v_op = NULL;
2986 #endif
2987 	bzero(&vp->v_un, sizeof(vp->v_un));
2988 	vp->v_lasta = vp->v_clen = vp->v_cstart = vp->v_lastw = 0;
2989 	vp->v_iflag = 0;
2990 	vp->v_vflag = 0;
2991 	bo->bo_flag = 0;
2992 	uma_zfree(vnode_zone, vp);
2993 }
2994 
2995 /*
2996  * Call VOP_INACTIVE on the vnode and manage the DOINGINACT and OWEINACT
2997  * flags.  DOINGINACT prevents us from recursing in calls to vinactive.
2998  * OWEINACT tracks whether a vnode missed a call to inactive due to a
2999  * failed lock upgrade.
3000  */
3001 void
3002 vinactive(struct vnode *vp, struct thread *td)
3003 {
3004 	struct vm_object *obj;
3005 
3006 	ASSERT_VOP_ELOCKED(vp, "vinactive");
3007 	ASSERT_VI_LOCKED(vp, "vinactive");
3008 	VNASSERT((vp->v_iflag & VI_DOINGINACT) == 0, vp,
3009 	    ("vinactive: recursed on VI_DOINGINACT"));
3010 	CTR2(KTR_VFS, "%s: vp %p", __func__, vp);
3011 	vp->v_iflag |= VI_DOINGINACT;
3012 	vp->v_iflag &= ~VI_OWEINACT;
3013 	VI_UNLOCK(vp);
3014 	/*
3015 	 * Before moving off the active list, we must be sure that any
3016 	 * modified pages are converted into the vnode's dirty
3017 	 * buffers, since these will no longer be checked once the
3018 	 * vnode is on the inactive list.
3019 	 *
3020 	 * The write-out of the dirty pages is asynchronous.  At the
3021 	 * point that VOP_INACTIVE() is called, there could still be
3022 	 * pending I/O and dirty pages in the object.
3023 	 */
3024 	obj = vp->v_object;
3025 	if (obj != NULL && (obj->flags & OBJ_MIGHTBEDIRTY) != 0) {
3026 		VM_OBJECT_WLOCK(obj);
3027 		vm_object_page_clean(obj, 0, 0, 0);
3028 		VM_OBJECT_WUNLOCK(obj);
3029 	}
3030 	VOP_INACTIVE(vp, td);
3031 	VI_LOCK(vp);
3032 	VNASSERT(vp->v_iflag & VI_DOINGINACT, vp,
3033 	    ("vinactive: lost VI_DOINGINACT"));
3034 	vp->v_iflag &= ~VI_DOINGINACT;
3035 }
3036 
3037 /*
3038  * Remove any vnodes in the vnode table belonging to mount point mp.
3039  *
3040  * If FORCECLOSE is not specified, there should not be any active ones,
3041  * return error if any are found (nb: this is a user error, not a
3042  * system error). If FORCECLOSE is specified, detach any active vnodes
3043  * that are found.
3044  *
3045  * If WRITECLOSE is set, only flush out regular file vnodes open for
3046  * writing.
3047  *
3048  * SKIPSYSTEM causes any vnodes marked VV_SYSTEM to be skipped.
3049  *
3050  * `rootrefs' specifies the base reference count for the root vnode
3051  * of this filesystem. The root vnode is considered busy if its
3052  * v_usecount exceeds this value. On a successful return, vflush(, td)
3053  * will call vrele() on the root vnode exactly rootrefs times.
3054  * If the SKIPSYSTEM or WRITECLOSE flags are specified, rootrefs must
3055  * be zero.
3056  */
3057 #ifdef DIAGNOSTIC
3058 static int busyprt = 0;		/* print out busy vnodes */
3059 SYSCTL_INT(_debug, OID_AUTO, busyprt, CTLFLAG_RW, &busyprt, 0, "Print out busy vnodes");
3060 #endif
3061 
3062 int
3063 vflush(struct mount *mp, int rootrefs, int flags, struct thread *td)
3064 {
3065 	struct vnode *vp, *mvp, *rootvp = NULL;
3066 	struct vattr vattr;
3067 	int busy = 0, error;
3068 
3069 	CTR4(KTR_VFS, "%s: mp %p with rootrefs %d and flags %d", __func__, mp,
3070 	    rootrefs, flags);
3071 	if (rootrefs > 0) {
3072 		KASSERT((flags & (SKIPSYSTEM | WRITECLOSE)) == 0,
3073 		    ("vflush: bad args"));
3074 		/*
3075 		 * Get the filesystem root vnode. We can vput() it
3076 		 * immediately, since with rootrefs > 0, it won't go away.
3077 		 */
3078 		if ((error = VFS_ROOT(mp, LK_EXCLUSIVE, &rootvp)) != 0) {
3079 			CTR2(KTR_VFS, "%s: vfs_root lookup failed with %d",
3080 			    __func__, error);
3081 			return (error);
3082 		}
3083 		vput(rootvp);
3084 	}
3085 loop:
3086 	MNT_VNODE_FOREACH_ALL(vp, mp, mvp) {
3087 		vholdl(vp);
3088 		error = vn_lock(vp, LK_INTERLOCK | LK_EXCLUSIVE);
3089 		if (error) {
3090 			vdrop(vp);
3091 			MNT_VNODE_FOREACH_ALL_ABORT(mp, mvp);
3092 			goto loop;
3093 		}
3094 		/*
3095 		 * Skip over a vnodes marked VV_SYSTEM.
3096 		 */
3097 		if ((flags & SKIPSYSTEM) && (vp->v_vflag & VV_SYSTEM)) {
3098 			VOP_UNLOCK(vp, 0);
3099 			vdrop(vp);
3100 			continue;
3101 		}
3102 		/*
3103 		 * If WRITECLOSE is set, flush out unlinked but still open
3104 		 * files (even if open only for reading) and regular file
3105 		 * vnodes open for writing.
3106 		 */
3107 		if (flags & WRITECLOSE) {
3108 			if (vp->v_object != NULL) {
3109 				VM_OBJECT_WLOCK(vp->v_object);
3110 				vm_object_page_clean(vp->v_object, 0, 0, 0);
3111 				VM_OBJECT_WUNLOCK(vp->v_object);
3112 			}
3113 			error = VOP_FSYNC(vp, MNT_WAIT, td);
3114 			if (error != 0) {
3115 				VOP_UNLOCK(vp, 0);
3116 				vdrop(vp);
3117 				MNT_VNODE_FOREACH_ALL_ABORT(mp, mvp);
3118 				return (error);
3119 			}
3120 			error = VOP_GETATTR(vp, &vattr, td->td_ucred);
3121 			VI_LOCK(vp);
3122 
3123 			if ((vp->v_type == VNON ||
3124 			    (error == 0 && vattr.va_nlink > 0)) &&
3125 			    (vp->v_writecount == 0 || vp->v_type != VREG)) {
3126 				VOP_UNLOCK(vp, 0);
3127 				vdropl(vp);
3128 				continue;
3129 			}
3130 		} else
3131 			VI_LOCK(vp);
3132 		/*
3133 		 * With v_usecount == 0, all we need to do is clear out the
3134 		 * vnode data structures and we are done.
3135 		 *
3136 		 * If FORCECLOSE is set, forcibly close the vnode.
3137 		 */
3138 		if (vp->v_usecount == 0 || (flags & FORCECLOSE)) {
3139 			vgonel(vp);
3140 		} else {
3141 			busy++;
3142 #ifdef DIAGNOSTIC
3143 			if (busyprt)
3144 				vn_printf(vp, "vflush: busy vnode ");
3145 #endif
3146 		}
3147 		VOP_UNLOCK(vp, 0);
3148 		vdropl(vp);
3149 	}
3150 	if (rootrefs > 0 && (flags & FORCECLOSE) == 0) {
3151 		/*
3152 		 * If just the root vnode is busy, and if its refcount
3153 		 * is equal to `rootrefs', then go ahead and kill it.
3154 		 */
3155 		VI_LOCK(rootvp);
3156 		KASSERT(busy > 0, ("vflush: not busy"));
3157 		VNASSERT(rootvp->v_usecount >= rootrefs, rootvp,
3158 		    ("vflush: usecount %d < rootrefs %d",
3159 		     rootvp->v_usecount, rootrefs));
3160 		if (busy == 1 && rootvp->v_usecount == rootrefs) {
3161 			VOP_LOCK(rootvp, LK_EXCLUSIVE|LK_INTERLOCK);
3162 			vgone(rootvp);
3163 			VOP_UNLOCK(rootvp, 0);
3164 			busy = 0;
3165 		} else
3166 			VI_UNLOCK(rootvp);
3167 	}
3168 	if (busy) {
3169 		CTR2(KTR_VFS, "%s: failing as %d vnodes are busy", __func__,
3170 		    busy);
3171 		return (EBUSY);
3172 	}
3173 	for (; rootrefs > 0; rootrefs--)
3174 		vrele(rootvp);
3175 	return (0);
3176 }
3177 
3178 /*
3179  * Recycle an unused vnode to the front of the free list.
3180  */
3181 int
3182 vrecycle(struct vnode *vp)
3183 {
3184 	int recycled;
3185 
3186 	VI_LOCK(vp);
3187 	recycled = vrecyclel(vp);
3188 	VI_UNLOCK(vp);
3189 	return (recycled);
3190 }
3191 
3192 /*
3193  * vrecycle, with the vp interlock held.
3194  */
3195 int
3196 vrecyclel(struct vnode *vp)
3197 {
3198 	int recycled;
3199 
3200 	ASSERT_VOP_ELOCKED(vp, __func__);
3201 	ASSERT_VI_LOCKED(vp, __func__);
3202 	CTR2(KTR_VFS, "%s: vp %p", __func__, vp);
3203 	recycled = 0;
3204 	if (vp->v_usecount == 0) {
3205 		recycled = 1;
3206 		vgonel(vp);
3207 	}
3208 	return (recycled);
3209 }
3210 
3211 /*
3212  * Eliminate all activity associated with a vnode
3213  * in preparation for reuse.
3214  */
3215 void
3216 vgone(struct vnode *vp)
3217 {
3218 	VI_LOCK(vp);
3219 	vgonel(vp);
3220 	VI_UNLOCK(vp);
3221 }
3222 
3223 static void
3224 notify_lowervp_vfs_dummy(struct mount *mp __unused,
3225     struct vnode *lowervp __unused)
3226 {
3227 }
3228 
3229 /*
3230  * Notify upper mounts about reclaimed or unlinked vnode.
3231  */
3232 void
3233 vfs_notify_upper(struct vnode *vp, int event)
3234 {
3235 	static struct vfsops vgonel_vfsops = {
3236 		.vfs_reclaim_lowervp = notify_lowervp_vfs_dummy,
3237 		.vfs_unlink_lowervp = notify_lowervp_vfs_dummy,
3238 	};
3239 	struct mount *mp, *ump, *mmp;
3240 
3241 	mp = vp->v_mount;
3242 	if (mp == NULL)
3243 		return;
3244 
3245 	MNT_ILOCK(mp);
3246 	if (TAILQ_EMPTY(&mp->mnt_uppers))
3247 		goto unlock;
3248 	MNT_IUNLOCK(mp);
3249 	mmp = malloc(sizeof(struct mount), M_TEMP, M_WAITOK | M_ZERO);
3250 	mmp->mnt_op = &vgonel_vfsops;
3251 	mmp->mnt_kern_flag |= MNTK_MARKER;
3252 	MNT_ILOCK(mp);
3253 	mp->mnt_kern_flag |= MNTK_VGONE_UPPER;
3254 	for (ump = TAILQ_FIRST(&mp->mnt_uppers); ump != NULL;) {
3255 		if ((ump->mnt_kern_flag & MNTK_MARKER) != 0) {
3256 			ump = TAILQ_NEXT(ump, mnt_upper_link);
3257 			continue;
3258 		}
3259 		TAILQ_INSERT_AFTER(&mp->mnt_uppers, ump, mmp, mnt_upper_link);
3260 		MNT_IUNLOCK(mp);
3261 		switch (event) {
3262 		case VFS_NOTIFY_UPPER_RECLAIM:
3263 			VFS_RECLAIM_LOWERVP(ump, vp);
3264 			break;
3265 		case VFS_NOTIFY_UPPER_UNLINK:
3266 			VFS_UNLINK_LOWERVP(ump, vp);
3267 			break;
3268 		default:
3269 			KASSERT(0, ("invalid event %d", event));
3270 			break;
3271 		}
3272 		MNT_ILOCK(mp);
3273 		ump = TAILQ_NEXT(mmp, mnt_upper_link);
3274 		TAILQ_REMOVE(&mp->mnt_uppers, mmp, mnt_upper_link);
3275 	}
3276 	free(mmp, M_TEMP);
3277 	mp->mnt_kern_flag &= ~MNTK_VGONE_UPPER;
3278 	if ((mp->mnt_kern_flag & MNTK_VGONE_WAITER) != 0) {
3279 		mp->mnt_kern_flag &= ~MNTK_VGONE_WAITER;
3280 		wakeup(&mp->mnt_uppers);
3281 	}
3282 unlock:
3283 	MNT_IUNLOCK(mp);
3284 }
3285 
3286 /*
3287  * vgone, with the vp interlock held.
3288  */
3289 static void
3290 vgonel(struct vnode *vp)
3291 {
3292 	struct thread *td;
3293 	int oweinact;
3294 	int active;
3295 	struct mount *mp;
3296 
3297 	ASSERT_VOP_ELOCKED(vp, "vgonel");
3298 	ASSERT_VI_LOCKED(vp, "vgonel");
3299 	VNASSERT(vp->v_holdcnt, vp,
3300 	    ("vgonel: vp %p has no reference.", vp));
3301 	CTR2(KTR_VFS, "%s: vp %p", __func__, vp);
3302 	td = curthread;
3303 
3304 	/*
3305 	 * Don't vgonel if we're already doomed.
3306 	 */
3307 	if (vp->v_iflag & VI_DOOMED)
3308 		return;
3309 	vp->v_iflag |= VI_DOOMED;
3310 
3311 	/*
3312 	 * Check to see if the vnode is in use.  If so, we have to call
3313 	 * VOP_CLOSE() and VOP_INACTIVE().
3314 	 */
3315 	active = vp->v_usecount;
3316 	oweinact = (vp->v_iflag & VI_OWEINACT);
3317 	VI_UNLOCK(vp);
3318 	vfs_notify_upper(vp, VFS_NOTIFY_UPPER_RECLAIM);
3319 
3320 	/*
3321 	 * If purging an active vnode, it must be closed and
3322 	 * deactivated before being reclaimed.
3323 	 */
3324 	if (active)
3325 		VOP_CLOSE(vp, FNONBLOCK, NOCRED, td);
3326 	if (oweinact || active) {
3327 		VI_LOCK(vp);
3328 		if ((vp->v_iflag & VI_DOINGINACT) == 0)
3329 			vinactive(vp, td);
3330 		VI_UNLOCK(vp);
3331 	}
3332 	if (vp->v_type == VSOCK)
3333 		vfs_unp_reclaim(vp);
3334 
3335 	/*
3336 	 * Clean out any buffers associated with the vnode.
3337 	 * If the flush fails, just toss the buffers.
3338 	 */
3339 	mp = NULL;
3340 	if (!TAILQ_EMPTY(&vp->v_bufobj.bo_dirty.bv_hd))
3341 		(void) vn_start_secondary_write(vp, &mp, V_WAIT);
3342 	if (vinvalbuf(vp, V_SAVE, 0, 0) != 0) {
3343 		while (vinvalbuf(vp, 0, 0, 0) != 0)
3344 			;
3345 	}
3346 
3347 	BO_LOCK(&vp->v_bufobj);
3348 	KASSERT(TAILQ_EMPTY(&vp->v_bufobj.bo_dirty.bv_hd) &&
3349 	    vp->v_bufobj.bo_dirty.bv_cnt == 0 &&
3350 	    TAILQ_EMPTY(&vp->v_bufobj.bo_clean.bv_hd) &&
3351 	    vp->v_bufobj.bo_clean.bv_cnt == 0,
3352 	    ("vp %p bufobj not invalidated", vp));
3353 
3354 	/*
3355 	 * For VMIO bufobj, BO_DEAD is set in vm_object_terminate()
3356 	 * after the object's page queue is flushed.
3357 	 */
3358 	if (vp->v_bufobj.bo_object == NULL)
3359 		vp->v_bufobj.bo_flag |= BO_DEAD;
3360 	BO_UNLOCK(&vp->v_bufobj);
3361 
3362 	/*
3363 	 * Reclaim the vnode.
3364 	 */
3365 	if (VOP_RECLAIM(vp, td))
3366 		panic("vgone: cannot reclaim");
3367 	if (mp != NULL)
3368 		vn_finished_secondary_write(mp);
3369 	VNASSERT(vp->v_object == NULL, vp,
3370 	    ("vop_reclaim left v_object vp=%p, tag=%s", vp, vp->v_tag));
3371 	/*
3372 	 * Clear the advisory locks and wake up waiting threads.
3373 	 */
3374 	(void)VOP_ADVLOCKPURGE(vp);
3375 	vp->v_lockf = NULL;
3376 	/*
3377 	 * Delete from old mount point vnode list.
3378 	 */
3379 	delmntque(vp);
3380 	cache_purge(vp);
3381 	/*
3382 	 * Done with purge, reset to the standard lock and invalidate
3383 	 * the vnode.
3384 	 */
3385 	VI_LOCK(vp);
3386 	vp->v_vnlock = &vp->v_lock;
3387 	vp->v_op = &dead_vnodeops;
3388 	vp->v_tag = "none";
3389 	vp->v_type = VBAD;
3390 }
3391 
3392 /*
3393  * Calculate the total number of references to a special device.
3394  */
3395 int
3396 vcount(struct vnode *vp)
3397 {
3398 	int count;
3399 
3400 	dev_lock();
3401 	count = vp->v_rdev->si_usecount;
3402 	dev_unlock();
3403 	return (count);
3404 }
3405 
3406 /*
3407  * Same as above, but using the struct cdev *as argument
3408  */
3409 int
3410 count_dev(struct cdev *dev)
3411 {
3412 	int count;
3413 
3414 	dev_lock();
3415 	count = dev->si_usecount;
3416 	dev_unlock();
3417 	return(count);
3418 }
3419 
3420 /*
3421  * Print out a description of a vnode.
3422  */
3423 static char *typename[] =
3424 {"VNON", "VREG", "VDIR", "VBLK", "VCHR", "VLNK", "VSOCK", "VFIFO", "VBAD",
3425  "VMARKER"};
3426 
3427 void
3428 vn_printf(struct vnode *vp, const char *fmt, ...)
3429 {
3430 	va_list ap;
3431 	char buf[256], buf2[16];
3432 	u_long flags;
3433 
3434 	va_start(ap, fmt);
3435 	vprintf(fmt, ap);
3436 	va_end(ap);
3437 	printf("%p: ", (void *)vp);
3438 	printf("tag %s, type %s\n", vp->v_tag, typename[vp->v_type]);
3439 	printf("    usecount %d, writecount %d, refcount %d mountedhere %p\n",
3440 	    vp->v_usecount, vp->v_writecount, vp->v_holdcnt, vp->v_mountedhere);
3441 	buf[0] = '\0';
3442 	buf[1] = '\0';
3443 	if (vp->v_vflag & VV_ROOT)
3444 		strlcat(buf, "|VV_ROOT", sizeof(buf));
3445 	if (vp->v_vflag & VV_ISTTY)
3446 		strlcat(buf, "|VV_ISTTY", sizeof(buf));
3447 	if (vp->v_vflag & VV_NOSYNC)
3448 		strlcat(buf, "|VV_NOSYNC", sizeof(buf));
3449 	if (vp->v_vflag & VV_ETERNALDEV)
3450 		strlcat(buf, "|VV_ETERNALDEV", sizeof(buf));
3451 	if (vp->v_vflag & VV_CACHEDLABEL)
3452 		strlcat(buf, "|VV_CACHEDLABEL", sizeof(buf));
3453 	if (vp->v_vflag & VV_TEXT)
3454 		strlcat(buf, "|VV_TEXT", sizeof(buf));
3455 	if (vp->v_vflag & VV_COPYONWRITE)
3456 		strlcat(buf, "|VV_COPYONWRITE", sizeof(buf));
3457 	if (vp->v_vflag & VV_SYSTEM)
3458 		strlcat(buf, "|VV_SYSTEM", sizeof(buf));
3459 	if (vp->v_vflag & VV_PROCDEP)
3460 		strlcat(buf, "|VV_PROCDEP", sizeof(buf));
3461 	if (vp->v_vflag & VV_NOKNOTE)
3462 		strlcat(buf, "|VV_NOKNOTE", sizeof(buf));
3463 	if (vp->v_vflag & VV_DELETED)
3464 		strlcat(buf, "|VV_DELETED", sizeof(buf));
3465 	if (vp->v_vflag & VV_MD)
3466 		strlcat(buf, "|VV_MD", sizeof(buf));
3467 	if (vp->v_vflag & VV_FORCEINSMQ)
3468 		strlcat(buf, "|VV_FORCEINSMQ", sizeof(buf));
3469 	flags = vp->v_vflag & ~(VV_ROOT | VV_ISTTY | VV_NOSYNC | VV_ETERNALDEV |
3470 	    VV_CACHEDLABEL | VV_TEXT | VV_COPYONWRITE | VV_SYSTEM | VV_PROCDEP |
3471 	    VV_NOKNOTE | VV_DELETED | VV_MD | VV_FORCEINSMQ);
3472 	if (flags != 0) {
3473 		snprintf(buf2, sizeof(buf2), "|VV(0x%lx)", flags);
3474 		strlcat(buf, buf2, sizeof(buf));
3475 	}
3476 	if (vp->v_iflag & VI_MOUNT)
3477 		strlcat(buf, "|VI_MOUNT", sizeof(buf));
3478 	if (vp->v_iflag & VI_DOOMED)
3479 		strlcat(buf, "|VI_DOOMED", sizeof(buf));
3480 	if (vp->v_iflag & VI_FREE)
3481 		strlcat(buf, "|VI_FREE", sizeof(buf));
3482 	if (vp->v_iflag & VI_ACTIVE)
3483 		strlcat(buf, "|VI_ACTIVE", sizeof(buf));
3484 	if (vp->v_iflag & VI_DOINGINACT)
3485 		strlcat(buf, "|VI_DOINGINACT", sizeof(buf));
3486 	if (vp->v_iflag & VI_OWEINACT)
3487 		strlcat(buf, "|VI_OWEINACT", sizeof(buf));
3488 	flags = vp->v_iflag & ~(VI_MOUNT | VI_DOOMED | VI_FREE |
3489 	    VI_ACTIVE | VI_DOINGINACT | VI_OWEINACT);
3490 	if (flags != 0) {
3491 		snprintf(buf2, sizeof(buf2), "|VI(0x%lx)", flags);
3492 		strlcat(buf, buf2, sizeof(buf));
3493 	}
3494 	printf("    flags (%s)\n", buf + 1);
3495 	if (mtx_owned(VI_MTX(vp)))
3496 		printf(" VI_LOCKed");
3497 	if (vp->v_object != NULL)
3498 		printf("    v_object %p ref %d pages %d "
3499 		    "cleanbuf %d dirtybuf %d\n",
3500 		    vp->v_object, vp->v_object->ref_count,
3501 		    vp->v_object->resident_page_count,
3502 		    vp->v_bufobj.bo_clean.bv_cnt,
3503 		    vp->v_bufobj.bo_dirty.bv_cnt);
3504 	printf("    ");
3505 	lockmgr_printinfo(vp->v_vnlock);
3506 	if (vp->v_data != NULL)
3507 		VOP_PRINT(vp);
3508 }
3509 
3510 #ifdef DDB
3511 /*
3512  * List all of the locked vnodes in the system.
3513  * Called when debugging the kernel.
3514  */
3515 DB_SHOW_COMMAND(lockedvnods, lockedvnodes)
3516 {
3517 	struct mount *mp;
3518 	struct vnode *vp;
3519 
3520 	/*
3521 	 * Note: because this is DDB, we can't obey the locking semantics
3522 	 * for these structures, which means we could catch an inconsistent
3523 	 * state and dereference a nasty pointer.  Not much to be done
3524 	 * about that.
3525 	 */
3526 	db_printf("Locked vnodes\n");
3527 	TAILQ_FOREACH(mp, &mountlist, mnt_list) {
3528 		TAILQ_FOREACH(vp, &mp->mnt_nvnodelist, v_nmntvnodes) {
3529 			if (vp->v_type != VMARKER && VOP_ISLOCKED(vp))
3530 				vn_printf(vp, "vnode ");
3531 		}
3532 	}
3533 }
3534 
3535 /*
3536  * Show details about the given vnode.
3537  */
3538 DB_SHOW_COMMAND(vnode, db_show_vnode)
3539 {
3540 	struct vnode *vp;
3541 
3542 	if (!have_addr)
3543 		return;
3544 	vp = (struct vnode *)addr;
3545 	vn_printf(vp, "vnode ");
3546 }
3547 
3548 /*
3549  * Show details about the given mount point.
3550  */
3551 DB_SHOW_COMMAND(mount, db_show_mount)
3552 {
3553 	struct mount *mp;
3554 	struct vfsopt *opt;
3555 	struct statfs *sp;
3556 	struct vnode *vp;
3557 	char buf[512];
3558 	uint64_t mflags;
3559 	u_int flags;
3560 
3561 	if (!have_addr) {
3562 		/* No address given, print short info about all mount points. */
3563 		TAILQ_FOREACH(mp, &mountlist, mnt_list) {
3564 			db_printf("%p %s on %s (%s)\n", mp,
3565 			    mp->mnt_stat.f_mntfromname,
3566 			    mp->mnt_stat.f_mntonname,
3567 			    mp->mnt_stat.f_fstypename);
3568 			if (db_pager_quit)
3569 				break;
3570 		}
3571 		db_printf("\nMore info: show mount <addr>\n");
3572 		return;
3573 	}
3574 
3575 	mp = (struct mount *)addr;
3576 	db_printf("%p %s on %s (%s)\n", mp, mp->mnt_stat.f_mntfromname,
3577 	    mp->mnt_stat.f_mntonname, mp->mnt_stat.f_fstypename);
3578 
3579 	buf[0] = '\0';
3580 	mflags = mp->mnt_flag;
3581 #define	MNT_FLAG(flag)	do {						\
3582 	if (mflags & (flag)) {						\
3583 		if (buf[0] != '\0')					\
3584 			strlcat(buf, ", ", sizeof(buf));		\
3585 		strlcat(buf, (#flag) + 4, sizeof(buf));			\
3586 		mflags &= ~(flag);					\
3587 	}								\
3588 } while (0)
3589 	MNT_FLAG(MNT_RDONLY);
3590 	MNT_FLAG(MNT_SYNCHRONOUS);
3591 	MNT_FLAG(MNT_NOEXEC);
3592 	MNT_FLAG(MNT_NOSUID);
3593 	MNT_FLAG(MNT_NFS4ACLS);
3594 	MNT_FLAG(MNT_UNION);
3595 	MNT_FLAG(MNT_ASYNC);
3596 	MNT_FLAG(MNT_SUIDDIR);
3597 	MNT_FLAG(MNT_SOFTDEP);
3598 	MNT_FLAG(MNT_NOSYMFOLLOW);
3599 	MNT_FLAG(MNT_GJOURNAL);
3600 	MNT_FLAG(MNT_MULTILABEL);
3601 	MNT_FLAG(MNT_ACLS);
3602 	MNT_FLAG(MNT_NOATIME);
3603 	MNT_FLAG(MNT_NOCLUSTERR);
3604 	MNT_FLAG(MNT_NOCLUSTERW);
3605 	MNT_FLAG(MNT_SUJ);
3606 	MNT_FLAG(MNT_EXRDONLY);
3607 	MNT_FLAG(MNT_EXPORTED);
3608 	MNT_FLAG(MNT_DEFEXPORTED);
3609 	MNT_FLAG(MNT_EXPORTANON);
3610 	MNT_FLAG(MNT_EXKERB);
3611 	MNT_FLAG(MNT_EXPUBLIC);
3612 	MNT_FLAG(MNT_LOCAL);
3613 	MNT_FLAG(MNT_QUOTA);
3614 	MNT_FLAG(MNT_ROOTFS);
3615 	MNT_FLAG(MNT_USER);
3616 	MNT_FLAG(MNT_IGNORE);
3617 	MNT_FLAG(MNT_UPDATE);
3618 	MNT_FLAG(MNT_DELEXPORT);
3619 	MNT_FLAG(MNT_RELOAD);
3620 	MNT_FLAG(MNT_FORCE);
3621 	MNT_FLAG(MNT_SNAPSHOT);
3622 	MNT_FLAG(MNT_BYFSID);
3623 #undef MNT_FLAG
3624 	if (mflags != 0) {
3625 		if (buf[0] != '\0')
3626 			strlcat(buf, ", ", sizeof(buf));
3627 		snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
3628 		    "0x%016jx", mflags);
3629 	}
3630 	db_printf("    mnt_flag = %s\n", buf);
3631 
3632 	buf[0] = '\0';
3633 	flags = mp->mnt_kern_flag;
3634 #define	MNT_KERN_FLAG(flag)	do {					\
3635 	if (flags & (flag)) {						\
3636 		if (buf[0] != '\0')					\
3637 			strlcat(buf, ", ", sizeof(buf));		\
3638 		strlcat(buf, (#flag) + 5, sizeof(buf));			\
3639 		flags &= ~(flag);					\
3640 	}								\
3641 } while (0)
3642 	MNT_KERN_FLAG(MNTK_UNMOUNTF);
3643 	MNT_KERN_FLAG(MNTK_ASYNC);
3644 	MNT_KERN_FLAG(MNTK_SOFTDEP);
3645 	MNT_KERN_FLAG(MNTK_NOINSMNTQ);
3646 	MNT_KERN_FLAG(MNTK_DRAINING);
3647 	MNT_KERN_FLAG(MNTK_REFEXPIRE);
3648 	MNT_KERN_FLAG(MNTK_EXTENDED_SHARED);
3649 	MNT_KERN_FLAG(MNTK_SHARED_WRITES);
3650 	MNT_KERN_FLAG(MNTK_NO_IOPF);
3651 	MNT_KERN_FLAG(MNTK_VGONE_UPPER);
3652 	MNT_KERN_FLAG(MNTK_VGONE_WAITER);
3653 	MNT_KERN_FLAG(MNTK_LOOKUP_EXCL_DOTDOT);
3654 	MNT_KERN_FLAG(MNTK_MARKER);
3655 	MNT_KERN_FLAG(MNTK_USES_BCACHE);
3656 	MNT_KERN_FLAG(MNTK_NOASYNC);
3657 	MNT_KERN_FLAG(MNTK_UNMOUNT);
3658 	MNT_KERN_FLAG(MNTK_MWAIT);
3659 	MNT_KERN_FLAG(MNTK_SUSPEND);
3660 	MNT_KERN_FLAG(MNTK_SUSPEND2);
3661 	MNT_KERN_FLAG(MNTK_SUSPENDED);
3662 	MNT_KERN_FLAG(MNTK_LOOKUP_SHARED);
3663 	MNT_KERN_FLAG(MNTK_NOKNOTE);
3664 #undef MNT_KERN_FLAG
3665 	if (flags != 0) {
3666 		if (buf[0] != '\0')
3667 			strlcat(buf, ", ", sizeof(buf));
3668 		snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
3669 		    "0x%08x", flags);
3670 	}
3671 	db_printf("    mnt_kern_flag = %s\n", buf);
3672 
3673 	db_printf("    mnt_opt = ");
3674 	opt = TAILQ_FIRST(mp->mnt_opt);
3675 	if (opt != NULL) {
3676 		db_printf("%s", opt->name);
3677 		opt = TAILQ_NEXT(opt, link);
3678 		while (opt != NULL) {
3679 			db_printf(", %s", opt->name);
3680 			opt = TAILQ_NEXT(opt, link);
3681 		}
3682 	}
3683 	db_printf("\n");
3684 
3685 	sp = &mp->mnt_stat;
3686 	db_printf("    mnt_stat = { version=%u type=%u flags=0x%016jx "
3687 	    "bsize=%ju iosize=%ju blocks=%ju bfree=%ju bavail=%jd files=%ju "
3688 	    "ffree=%jd syncwrites=%ju asyncwrites=%ju syncreads=%ju "
3689 	    "asyncreads=%ju namemax=%u owner=%u fsid=[%d, %d] }\n",
3690 	    (u_int)sp->f_version, (u_int)sp->f_type, (uintmax_t)sp->f_flags,
3691 	    (uintmax_t)sp->f_bsize, (uintmax_t)sp->f_iosize,
3692 	    (uintmax_t)sp->f_blocks, (uintmax_t)sp->f_bfree,
3693 	    (intmax_t)sp->f_bavail, (uintmax_t)sp->f_files,
3694 	    (intmax_t)sp->f_ffree, (uintmax_t)sp->f_syncwrites,
3695 	    (uintmax_t)sp->f_asyncwrites, (uintmax_t)sp->f_syncreads,
3696 	    (uintmax_t)sp->f_asyncreads, (u_int)sp->f_namemax,
3697 	    (u_int)sp->f_owner, (int)sp->f_fsid.val[0], (int)sp->f_fsid.val[1]);
3698 
3699 	db_printf("    mnt_cred = { uid=%u ruid=%u",
3700 	    (u_int)mp->mnt_cred->cr_uid, (u_int)mp->mnt_cred->cr_ruid);
3701 	if (jailed(mp->mnt_cred))
3702 		db_printf(", jail=%d", mp->mnt_cred->cr_prison->pr_id);
3703 	db_printf(" }\n");
3704 	db_printf("    mnt_ref = %d\n", mp->mnt_ref);
3705 	db_printf("    mnt_gen = %d\n", mp->mnt_gen);
3706 	db_printf("    mnt_nvnodelistsize = %d\n", mp->mnt_nvnodelistsize);
3707 	db_printf("    mnt_activevnodelistsize = %d\n",
3708 	    mp->mnt_activevnodelistsize);
3709 	db_printf("    mnt_writeopcount = %d\n", mp->mnt_writeopcount);
3710 	db_printf("    mnt_maxsymlinklen = %d\n", mp->mnt_maxsymlinklen);
3711 	db_printf("    mnt_iosize_max = %d\n", mp->mnt_iosize_max);
3712 	db_printf("    mnt_hashseed = %u\n", mp->mnt_hashseed);
3713 	db_printf("    mnt_lockref = %d\n", mp->mnt_lockref);
3714 	db_printf("    mnt_secondary_writes = %d\n", mp->mnt_secondary_writes);
3715 	db_printf("    mnt_secondary_accwrites = %d\n",
3716 	    mp->mnt_secondary_accwrites);
3717 	db_printf("    mnt_gjprovider = %s\n",
3718 	    mp->mnt_gjprovider != NULL ? mp->mnt_gjprovider : "NULL");
3719 
3720 	db_printf("\n\nList of active vnodes\n");
3721 	TAILQ_FOREACH(vp, &mp->mnt_activevnodelist, v_actfreelist) {
3722 		if (vp->v_type != VMARKER) {
3723 			vn_printf(vp, "vnode ");
3724 			if (db_pager_quit)
3725 				break;
3726 		}
3727 	}
3728 	db_printf("\n\nList of inactive vnodes\n");
3729 	TAILQ_FOREACH(vp, &mp->mnt_nvnodelist, v_nmntvnodes) {
3730 		if (vp->v_type != VMARKER && (vp->v_iflag & VI_ACTIVE) == 0) {
3731 			vn_printf(vp, "vnode ");
3732 			if (db_pager_quit)
3733 				break;
3734 		}
3735 	}
3736 }
3737 #endif	/* DDB */
3738 
3739 /*
3740  * Fill in a struct xvfsconf based on a struct vfsconf.
3741  */
3742 static int
3743 vfsconf2x(struct sysctl_req *req, struct vfsconf *vfsp)
3744 {
3745 	struct xvfsconf xvfsp;
3746 
3747 	bzero(&xvfsp, sizeof(xvfsp));
3748 	strcpy(xvfsp.vfc_name, vfsp->vfc_name);
3749 	xvfsp.vfc_typenum = vfsp->vfc_typenum;
3750 	xvfsp.vfc_refcount = vfsp->vfc_refcount;
3751 	xvfsp.vfc_flags = vfsp->vfc_flags;
3752 	/*
3753 	 * These are unused in userland, we keep them
3754 	 * to not break binary compatibility.
3755 	 */
3756 	xvfsp.vfc_vfsops = NULL;
3757 	xvfsp.vfc_next = NULL;
3758 	return (SYSCTL_OUT(req, &xvfsp, sizeof(xvfsp)));
3759 }
3760 
3761 #ifdef COMPAT_FREEBSD32
3762 struct xvfsconf32 {
3763 	uint32_t	vfc_vfsops;
3764 	char		vfc_name[MFSNAMELEN];
3765 	int32_t		vfc_typenum;
3766 	int32_t		vfc_refcount;
3767 	int32_t		vfc_flags;
3768 	uint32_t	vfc_next;
3769 };
3770 
3771 static int
3772 vfsconf2x32(struct sysctl_req *req, struct vfsconf *vfsp)
3773 {
3774 	struct xvfsconf32 xvfsp;
3775 
3776 	strcpy(xvfsp.vfc_name, vfsp->vfc_name);
3777 	xvfsp.vfc_typenum = vfsp->vfc_typenum;
3778 	xvfsp.vfc_refcount = vfsp->vfc_refcount;
3779 	xvfsp.vfc_flags = vfsp->vfc_flags;
3780 	xvfsp.vfc_vfsops = 0;
3781 	xvfsp.vfc_next = 0;
3782 	return (SYSCTL_OUT(req, &xvfsp, sizeof(xvfsp)));
3783 }
3784 #endif
3785 
3786 /*
3787  * Top level filesystem related information gathering.
3788  */
3789 static int
3790 sysctl_vfs_conflist(SYSCTL_HANDLER_ARGS)
3791 {
3792 	struct vfsconf *vfsp;
3793 	int error;
3794 
3795 	error = 0;
3796 	vfsconf_slock();
3797 	TAILQ_FOREACH(vfsp, &vfsconf, vfc_list) {
3798 #ifdef COMPAT_FREEBSD32
3799 		if (req->flags & SCTL_MASK32)
3800 			error = vfsconf2x32(req, vfsp);
3801 		else
3802 #endif
3803 			error = vfsconf2x(req, vfsp);
3804 		if (error)
3805 			break;
3806 	}
3807 	vfsconf_sunlock();
3808 	return (error);
3809 }
3810 
3811 SYSCTL_PROC(_vfs, OID_AUTO, conflist, CTLTYPE_OPAQUE | CTLFLAG_RD |
3812     CTLFLAG_MPSAFE, NULL, 0, sysctl_vfs_conflist,
3813     "S,xvfsconf", "List of all configured filesystems");
3814 
3815 #ifndef BURN_BRIDGES
3816 static int	sysctl_ovfs_conf(SYSCTL_HANDLER_ARGS);
3817 
3818 static int
3819 vfs_sysctl(SYSCTL_HANDLER_ARGS)
3820 {
3821 	int *name = (int *)arg1 - 1;	/* XXX */
3822 	u_int namelen = arg2 + 1;	/* XXX */
3823 	struct vfsconf *vfsp;
3824 
3825 	log(LOG_WARNING, "userland calling deprecated sysctl, "
3826 	    "please rebuild world\n");
3827 
3828 #if 1 || defined(COMPAT_PRELITE2)
3829 	/* Resolve ambiguity between VFS_VFSCONF and VFS_GENERIC. */
3830 	if (namelen == 1)
3831 		return (sysctl_ovfs_conf(oidp, arg1, arg2, req));
3832 #endif
3833 
3834 	switch (name[1]) {
3835 	case VFS_MAXTYPENUM:
3836 		if (namelen != 2)
3837 			return (ENOTDIR);
3838 		return (SYSCTL_OUT(req, &maxvfsconf, sizeof(int)));
3839 	case VFS_CONF:
3840 		if (namelen != 3)
3841 			return (ENOTDIR);	/* overloaded */
3842 		vfsconf_slock();
3843 		TAILQ_FOREACH(vfsp, &vfsconf, vfc_list) {
3844 			if (vfsp->vfc_typenum == name[2])
3845 				break;
3846 		}
3847 		vfsconf_sunlock();
3848 		if (vfsp == NULL)
3849 			return (EOPNOTSUPP);
3850 #ifdef COMPAT_FREEBSD32
3851 		if (req->flags & SCTL_MASK32)
3852 			return (vfsconf2x32(req, vfsp));
3853 		else
3854 #endif
3855 			return (vfsconf2x(req, vfsp));
3856 	}
3857 	return (EOPNOTSUPP);
3858 }
3859 
3860 static SYSCTL_NODE(_vfs, VFS_GENERIC, generic, CTLFLAG_RD | CTLFLAG_SKIP |
3861     CTLFLAG_MPSAFE, vfs_sysctl,
3862     "Generic filesystem");
3863 
3864 #if 1 || defined(COMPAT_PRELITE2)
3865 
3866 static int
3867 sysctl_ovfs_conf(SYSCTL_HANDLER_ARGS)
3868 {
3869 	int error;
3870 	struct vfsconf *vfsp;
3871 	struct ovfsconf ovfs;
3872 
3873 	vfsconf_slock();
3874 	TAILQ_FOREACH(vfsp, &vfsconf, vfc_list) {
3875 		bzero(&ovfs, sizeof(ovfs));
3876 		ovfs.vfc_vfsops = vfsp->vfc_vfsops;	/* XXX used as flag */
3877 		strcpy(ovfs.vfc_name, vfsp->vfc_name);
3878 		ovfs.vfc_index = vfsp->vfc_typenum;
3879 		ovfs.vfc_refcount = vfsp->vfc_refcount;
3880 		ovfs.vfc_flags = vfsp->vfc_flags;
3881 		error = SYSCTL_OUT(req, &ovfs, sizeof ovfs);
3882 		if (error != 0) {
3883 			vfsconf_sunlock();
3884 			return (error);
3885 		}
3886 	}
3887 	vfsconf_sunlock();
3888 	return (0);
3889 }
3890 
3891 #endif /* 1 || COMPAT_PRELITE2 */
3892 #endif /* !BURN_BRIDGES */
3893 
3894 #define KINFO_VNODESLOP		10
3895 #ifdef notyet
3896 /*
3897  * Dump vnode list (via sysctl).
3898  */
3899 /* ARGSUSED */
3900 static int
3901 sysctl_vnode(SYSCTL_HANDLER_ARGS)
3902 {
3903 	struct xvnode *xvn;
3904 	struct mount *mp;
3905 	struct vnode *vp;
3906 	int error, len, n;
3907 
3908 	/*
3909 	 * Stale numvnodes access is not fatal here.
3910 	 */
3911 	req->lock = 0;
3912 	len = (numvnodes + KINFO_VNODESLOP) * sizeof *xvn;
3913 	if (!req->oldptr)
3914 		/* Make an estimate */
3915 		return (SYSCTL_OUT(req, 0, len));
3916 
3917 	error = sysctl_wire_old_buffer(req, 0);
3918 	if (error != 0)
3919 		return (error);
3920 	xvn = malloc(len, M_TEMP, M_ZERO | M_WAITOK);
3921 	n = 0;
3922 	mtx_lock(&mountlist_mtx);
3923 	TAILQ_FOREACH(mp, &mountlist, mnt_list) {
3924 		if (vfs_busy(mp, MBF_NOWAIT | MBF_MNTLSTLOCK))
3925 			continue;
3926 		MNT_ILOCK(mp);
3927 		TAILQ_FOREACH(vp, &mp->mnt_nvnodelist, v_nmntvnodes) {
3928 			if (n == len)
3929 				break;
3930 			vref(vp);
3931 			xvn[n].xv_size = sizeof *xvn;
3932 			xvn[n].xv_vnode = vp;
3933 			xvn[n].xv_id = 0;	/* XXX compat */
3934 #define XV_COPY(field) xvn[n].xv_##field = vp->v_##field
3935 			XV_COPY(usecount);
3936 			XV_COPY(writecount);
3937 			XV_COPY(holdcnt);
3938 			XV_COPY(mount);
3939 			XV_COPY(numoutput);
3940 			XV_COPY(type);
3941 #undef XV_COPY
3942 			xvn[n].xv_flag = vp->v_vflag;
3943 
3944 			switch (vp->v_type) {
3945 			case VREG:
3946 			case VDIR:
3947 			case VLNK:
3948 				break;
3949 			case VBLK:
3950 			case VCHR:
3951 				if (vp->v_rdev == NULL) {
3952 					vrele(vp);
3953 					continue;
3954 				}
3955 				xvn[n].xv_dev = dev2udev(vp->v_rdev);
3956 				break;
3957 			case VSOCK:
3958 				xvn[n].xv_socket = vp->v_socket;
3959 				break;
3960 			case VFIFO:
3961 				xvn[n].xv_fifo = vp->v_fifoinfo;
3962 				break;
3963 			case VNON:
3964 			case VBAD:
3965 			default:
3966 				/* shouldn't happen? */
3967 				vrele(vp);
3968 				continue;
3969 			}
3970 			vrele(vp);
3971 			++n;
3972 		}
3973 		MNT_IUNLOCK(mp);
3974 		mtx_lock(&mountlist_mtx);
3975 		vfs_unbusy(mp);
3976 		if (n == len)
3977 			break;
3978 	}
3979 	mtx_unlock(&mountlist_mtx);
3980 
3981 	error = SYSCTL_OUT(req, xvn, n * sizeof *xvn);
3982 	free(xvn, M_TEMP);
3983 	return (error);
3984 }
3985 
3986 SYSCTL_PROC(_kern, KERN_VNODE, vnode, CTLTYPE_OPAQUE | CTLFLAG_RD |
3987     CTLFLAG_MPSAFE, 0, 0, sysctl_vnode, "S,xvnode",
3988     "");
3989 #endif
3990 
3991 static void
3992 unmount_or_warn(struct mount *mp)
3993 {
3994 	int error;
3995 
3996 	error = dounmount(mp, MNT_FORCE, curthread);
3997 	if (error != 0) {
3998 		printf("unmount of %s failed (", mp->mnt_stat.f_mntonname);
3999 		if (error == EBUSY)
4000 			printf("BUSY)\n");
4001 		else
4002 			printf("%d)\n", error);
4003 	}
4004 }
4005 
4006 /*
4007  * Unmount all filesystems. The list is traversed in reverse order
4008  * of mounting to avoid dependencies.
4009  */
4010 void
4011 vfs_unmountall(void)
4012 {
4013 	struct mount *mp, *tmp;
4014 
4015 	CTR1(KTR_VFS, "%s: unmounting all filesystems", __func__);
4016 
4017 	/*
4018 	 * Since this only runs when rebooting, it is not interlocked.
4019 	 */
4020 	TAILQ_FOREACH_REVERSE_SAFE(mp, &mountlist, mntlist, mnt_list, tmp) {
4021 		vfs_ref(mp);
4022 
4023 		/*
4024 		 * Forcibly unmounting "/dev" before "/" would prevent clean
4025 		 * unmount of the latter.
4026 		 */
4027 		if (mp == rootdevmp)
4028 			continue;
4029 
4030 		unmount_or_warn(mp);
4031 	}
4032 
4033 	if (rootdevmp != NULL)
4034 		unmount_or_warn(rootdevmp);
4035 }
4036 
4037 /*
4038  * perform msync on all vnodes under a mount point
4039  * the mount point must be locked.
4040  */
4041 void
4042 vfs_msync(struct mount *mp, int flags)
4043 {
4044 	struct vnode *vp, *mvp;
4045 	struct vm_object *obj;
4046 
4047 	CTR2(KTR_VFS, "%s: mp %p", __func__, mp);
4048 
4049 	vnlru_return_batch(mp);
4050 
4051 	MNT_VNODE_FOREACH_ACTIVE(vp, mp, mvp) {
4052 		obj = vp->v_object;
4053 		if (obj != NULL && (obj->flags & OBJ_MIGHTBEDIRTY) != 0 &&
4054 		    (flags == MNT_WAIT || VOP_ISLOCKED(vp) == 0)) {
4055 			if (!vget(vp,
4056 			    LK_EXCLUSIVE | LK_RETRY | LK_INTERLOCK,
4057 			    curthread)) {
4058 				if (vp->v_vflag & VV_NOSYNC) {	/* unlinked */
4059 					vput(vp);
4060 					continue;
4061 				}
4062 
4063 				obj = vp->v_object;
4064 				if (obj != NULL) {
4065 					VM_OBJECT_WLOCK(obj);
4066 					vm_object_page_clean(obj, 0, 0,
4067 					    flags == MNT_WAIT ?
4068 					    OBJPC_SYNC : OBJPC_NOSYNC);
4069 					VM_OBJECT_WUNLOCK(obj);
4070 				}
4071 				vput(vp);
4072 			}
4073 		} else
4074 			VI_UNLOCK(vp);
4075 	}
4076 }
4077 
4078 static void
4079 destroy_vpollinfo_free(struct vpollinfo *vi)
4080 {
4081 
4082 	knlist_destroy(&vi->vpi_selinfo.si_note);
4083 	mtx_destroy(&vi->vpi_lock);
4084 	uma_zfree(vnodepoll_zone, vi);
4085 }
4086 
4087 static void
4088 destroy_vpollinfo(struct vpollinfo *vi)
4089 {
4090 
4091 	knlist_clear(&vi->vpi_selinfo.si_note, 1);
4092 	seldrain(&vi->vpi_selinfo);
4093 	destroy_vpollinfo_free(vi);
4094 }
4095 
4096 /*
4097  * Initialize per-vnode helper structure to hold poll-related state.
4098  */
4099 void
4100 v_addpollinfo(struct vnode *vp)
4101 {
4102 	struct vpollinfo *vi;
4103 
4104 	if (vp->v_pollinfo != NULL)
4105 		return;
4106 	vi = uma_zalloc(vnodepoll_zone, M_WAITOK | M_ZERO);
4107 	mtx_init(&vi->vpi_lock, "vnode pollinfo", NULL, MTX_DEF);
4108 	knlist_init(&vi->vpi_selinfo.si_note, vp, vfs_knllock,
4109 	    vfs_knlunlock, vfs_knl_assert_locked, vfs_knl_assert_unlocked);
4110 	VI_LOCK(vp);
4111 	if (vp->v_pollinfo != NULL) {
4112 		VI_UNLOCK(vp);
4113 		destroy_vpollinfo_free(vi);
4114 		return;
4115 	}
4116 	vp->v_pollinfo = vi;
4117 	VI_UNLOCK(vp);
4118 }
4119 
4120 /*
4121  * Record a process's interest in events which might happen to
4122  * a vnode.  Because poll uses the historic select-style interface
4123  * internally, this routine serves as both the ``check for any
4124  * pending events'' and the ``record my interest in future events''
4125  * functions.  (These are done together, while the lock is held,
4126  * to avoid race conditions.)
4127  */
4128 int
4129 vn_pollrecord(struct vnode *vp, struct thread *td, int events)
4130 {
4131 
4132 	v_addpollinfo(vp);
4133 	mtx_lock(&vp->v_pollinfo->vpi_lock);
4134 	if (vp->v_pollinfo->vpi_revents & events) {
4135 		/*
4136 		 * This leaves events we are not interested
4137 		 * in available for the other process which
4138 		 * which presumably had requested them
4139 		 * (otherwise they would never have been
4140 		 * recorded).
4141 		 */
4142 		events &= vp->v_pollinfo->vpi_revents;
4143 		vp->v_pollinfo->vpi_revents &= ~events;
4144 
4145 		mtx_unlock(&vp->v_pollinfo->vpi_lock);
4146 		return (events);
4147 	}
4148 	vp->v_pollinfo->vpi_events |= events;
4149 	selrecord(td, &vp->v_pollinfo->vpi_selinfo);
4150 	mtx_unlock(&vp->v_pollinfo->vpi_lock);
4151 	return (0);
4152 }
4153 
4154 /*
4155  * Routine to create and manage a filesystem syncer vnode.
4156  */
4157 #define sync_close ((int (*)(struct  vop_close_args *))nullop)
4158 static int	sync_fsync(struct  vop_fsync_args *);
4159 static int	sync_inactive(struct  vop_inactive_args *);
4160 static int	sync_reclaim(struct  vop_reclaim_args *);
4161 
4162 static struct vop_vector sync_vnodeops = {
4163 	.vop_bypass =	VOP_EOPNOTSUPP,
4164 	.vop_close =	sync_close,		/* close */
4165 	.vop_fsync =	sync_fsync,		/* fsync */
4166 	.vop_inactive =	sync_inactive,	/* inactive */
4167 	.vop_reclaim =	sync_reclaim,	/* reclaim */
4168 	.vop_lock1 =	vop_stdlock,	/* lock */
4169 	.vop_unlock =	vop_stdunlock,	/* unlock */
4170 	.vop_islocked =	vop_stdislocked,	/* islocked */
4171 };
4172 
4173 /*
4174  * Create a new filesystem syncer vnode for the specified mount point.
4175  */
4176 void
4177 vfs_allocate_syncvnode(struct mount *mp)
4178 {
4179 	struct vnode *vp;
4180 	struct bufobj *bo;
4181 	static long start, incr, next;
4182 	int error;
4183 
4184 	/* Allocate a new vnode */
4185 	error = getnewvnode("syncer", mp, &sync_vnodeops, &vp);
4186 	if (error != 0)
4187 		panic("vfs_allocate_syncvnode: getnewvnode() failed");
4188 	vp->v_type = VNON;
4189 	vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
4190 	vp->v_vflag |= VV_FORCEINSMQ;
4191 	error = insmntque(vp, mp);
4192 	if (error != 0)
4193 		panic("vfs_allocate_syncvnode: insmntque() failed");
4194 	vp->v_vflag &= ~VV_FORCEINSMQ;
4195 	VOP_UNLOCK(vp, 0);
4196 	/*
4197 	 * Place the vnode onto the syncer worklist. We attempt to
4198 	 * scatter them about on the list so that they will go off
4199 	 * at evenly distributed times even if all the filesystems
4200 	 * are mounted at once.
4201 	 */
4202 	next += incr;
4203 	if (next == 0 || next > syncer_maxdelay) {
4204 		start /= 2;
4205 		incr /= 2;
4206 		if (start == 0) {
4207 			start = syncer_maxdelay / 2;
4208 			incr = syncer_maxdelay;
4209 		}
4210 		next = start;
4211 	}
4212 	bo = &vp->v_bufobj;
4213 	BO_LOCK(bo);
4214 	vn_syncer_add_to_worklist(bo, syncdelay > 0 ? next % syncdelay : 0);
4215 	/* XXX - vn_syncer_add_to_worklist() also grabs and drops sync_mtx. */
4216 	mtx_lock(&sync_mtx);
4217 	sync_vnode_count++;
4218 	if (mp->mnt_syncer == NULL) {
4219 		mp->mnt_syncer = vp;
4220 		vp = NULL;
4221 	}
4222 	mtx_unlock(&sync_mtx);
4223 	BO_UNLOCK(bo);
4224 	if (vp != NULL) {
4225 		vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
4226 		vgone(vp);
4227 		vput(vp);
4228 	}
4229 }
4230 
4231 void
4232 vfs_deallocate_syncvnode(struct mount *mp)
4233 {
4234 	struct vnode *vp;
4235 
4236 	mtx_lock(&sync_mtx);
4237 	vp = mp->mnt_syncer;
4238 	if (vp != NULL)
4239 		mp->mnt_syncer = NULL;
4240 	mtx_unlock(&sync_mtx);
4241 	if (vp != NULL)
4242 		vrele(vp);
4243 }
4244 
4245 /*
4246  * Do a lazy sync of the filesystem.
4247  */
4248 static int
4249 sync_fsync(struct vop_fsync_args *ap)
4250 {
4251 	struct vnode *syncvp = ap->a_vp;
4252 	struct mount *mp = syncvp->v_mount;
4253 	int error, save;
4254 	struct bufobj *bo;
4255 
4256 	/*
4257 	 * We only need to do something if this is a lazy evaluation.
4258 	 */
4259 	if (ap->a_waitfor != MNT_LAZY)
4260 		return (0);
4261 
4262 	/*
4263 	 * Move ourselves to the back of the sync list.
4264 	 */
4265 	bo = &syncvp->v_bufobj;
4266 	BO_LOCK(bo);
4267 	vn_syncer_add_to_worklist(bo, syncdelay);
4268 	BO_UNLOCK(bo);
4269 
4270 	/*
4271 	 * Walk the list of vnodes pushing all that are dirty and
4272 	 * not already on the sync list.
4273 	 */
4274 	if (vfs_busy(mp, MBF_NOWAIT) != 0)
4275 		return (0);
4276 	if (vn_start_write(NULL, &mp, V_NOWAIT) != 0) {
4277 		vfs_unbusy(mp);
4278 		return (0);
4279 	}
4280 	save = curthread_pflags_set(TDP_SYNCIO);
4281 	vfs_msync(mp, MNT_NOWAIT);
4282 	error = VFS_SYNC(mp, MNT_LAZY);
4283 	curthread_pflags_restore(save);
4284 	vn_finished_write(mp);
4285 	vfs_unbusy(mp);
4286 	return (error);
4287 }
4288 
4289 /*
4290  * The syncer vnode is no referenced.
4291  */
4292 static int
4293 sync_inactive(struct vop_inactive_args *ap)
4294 {
4295 
4296 	vgone(ap->a_vp);
4297 	return (0);
4298 }
4299 
4300 /*
4301  * The syncer vnode is no longer needed and is being decommissioned.
4302  *
4303  * Modifications to the worklist must be protected by sync_mtx.
4304  */
4305 static int
4306 sync_reclaim(struct vop_reclaim_args *ap)
4307 {
4308 	struct vnode *vp = ap->a_vp;
4309 	struct bufobj *bo;
4310 
4311 	bo = &vp->v_bufobj;
4312 	BO_LOCK(bo);
4313 	mtx_lock(&sync_mtx);
4314 	if (vp->v_mount->mnt_syncer == vp)
4315 		vp->v_mount->mnt_syncer = NULL;
4316 	if (bo->bo_flag & BO_ONWORKLST) {
4317 		LIST_REMOVE(bo, bo_synclist);
4318 		syncer_worklist_len--;
4319 		sync_vnode_count--;
4320 		bo->bo_flag &= ~BO_ONWORKLST;
4321 	}
4322 	mtx_unlock(&sync_mtx);
4323 	BO_UNLOCK(bo);
4324 
4325 	return (0);
4326 }
4327 
4328 /*
4329  * Check if vnode represents a disk device
4330  */
4331 int
4332 vn_isdisk(struct vnode *vp, int *errp)
4333 {
4334 	int error;
4335 
4336 	if (vp->v_type != VCHR) {
4337 		error = ENOTBLK;
4338 		goto out;
4339 	}
4340 	error = 0;
4341 	dev_lock();
4342 	if (vp->v_rdev == NULL)
4343 		error = ENXIO;
4344 	else if (vp->v_rdev->si_devsw == NULL)
4345 		error = ENXIO;
4346 	else if (!(vp->v_rdev->si_devsw->d_flags & D_DISK))
4347 		error = ENOTBLK;
4348 	dev_unlock();
4349 out:
4350 	if (errp != NULL)
4351 		*errp = error;
4352 	return (error == 0);
4353 }
4354 
4355 /*
4356  * Common filesystem object access control check routine.  Accepts a
4357  * vnode's type, "mode", uid and gid, requested access mode, credentials,
4358  * and optional call-by-reference privused argument allowing vaccess()
4359  * to indicate to the caller whether privilege was used to satisfy the
4360  * request (obsoleted).  Returns 0 on success, or an errno on failure.
4361  */
4362 int
4363 vaccess(enum vtype type, mode_t file_mode, uid_t file_uid, gid_t file_gid,
4364     accmode_t accmode, struct ucred *cred, int *privused)
4365 {
4366 	accmode_t dac_granted;
4367 	accmode_t priv_granted;
4368 
4369 	KASSERT((accmode & ~(VEXEC | VWRITE | VREAD | VADMIN | VAPPEND)) == 0,
4370 	    ("invalid bit in accmode"));
4371 	KASSERT((accmode & VAPPEND) == 0 || (accmode & VWRITE),
4372 	    ("VAPPEND without VWRITE"));
4373 
4374 	/*
4375 	 * Look for a normal, non-privileged way to access the file/directory
4376 	 * as requested.  If it exists, go with that.
4377 	 */
4378 
4379 	if (privused != NULL)
4380 		*privused = 0;
4381 
4382 	dac_granted = 0;
4383 
4384 	/* Check the owner. */
4385 	if (cred->cr_uid == file_uid) {
4386 		dac_granted |= VADMIN;
4387 		if (file_mode & S_IXUSR)
4388 			dac_granted |= VEXEC;
4389 		if (file_mode & S_IRUSR)
4390 			dac_granted |= VREAD;
4391 		if (file_mode & S_IWUSR)
4392 			dac_granted |= (VWRITE | VAPPEND);
4393 
4394 		if ((accmode & dac_granted) == accmode)
4395 			return (0);
4396 
4397 		goto privcheck;
4398 	}
4399 
4400 	/* Otherwise, check the groups (first match) */
4401 	if (groupmember(file_gid, cred)) {
4402 		if (file_mode & S_IXGRP)
4403 			dac_granted |= VEXEC;
4404 		if (file_mode & S_IRGRP)
4405 			dac_granted |= VREAD;
4406 		if (file_mode & S_IWGRP)
4407 			dac_granted |= (VWRITE | VAPPEND);
4408 
4409 		if ((accmode & dac_granted) == accmode)
4410 			return (0);
4411 
4412 		goto privcheck;
4413 	}
4414 
4415 	/* Otherwise, check everyone else. */
4416 	if (file_mode & S_IXOTH)
4417 		dac_granted |= VEXEC;
4418 	if (file_mode & S_IROTH)
4419 		dac_granted |= VREAD;
4420 	if (file_mode & S_IWOTH)
4421 		dac_granted |= (VWRITE | VAPPEND);
4422 	if ((accmode & dac_granted) == accmode)
4423 		return (0);
4424 
4425 privcheck:
4426 	/*
4427 	 * Build a privilege mask to determine if the set of privileges
4428 	 * satisfies the requirements when combined with the granted mask
4429 	 * from above.  For each privilege, if the privilege is required,
4430 	 * bitwise or the request type onto the priv_granted mask.
4431 	 */
4432 	priv_granted = 0;
4433 
4434 	if (type == VDIR) {
4435 		/*
4436 		 * For directories, use PRIV_VFS_LOOKUP to satisfy VEXEC
4437 		 * requests, instead of PRIV_VFS_EXEC.
4438 		 */
4439 		if ((accmode & VEXEC) && ((dac_granted & VEXEC) == 0) &&
4440 		    !priv_check_cred(cred, PRIV_VFS_LOOKUP, 0))
4441 			priv_granted |= VEXEC;
4442 	} else {
4443 		/*
4444 		 * Ensure that at least one execute bit is on. Otherwise,
4445 		 * a privileged user will always succeed, and we don't want
4446 		 * this to happen unless the file really is executable.
4447 		 */
4448 		if ((accmode & VEXEC) && ((dac_granted & VEXEC) == 0) &&
4449 		    (file_mode & (S_IXUSR | S_IXGRP | S_IXOTH)) != 0 &&
4450 		    !priv_check_cred(cred, PRIV_VFS_EXEC, 0))
4451 			priv_granted |= VEXEC;
4452 	}
4453 
4454 	if ((accmode & VREAD) && ((dac_granted & VREAD) == 0) &&
4455 	    !priv_check_cred(cred, PRIV_VFS_READ, 0))
4456 		priv_granted |= VREAD;
4457 
4458 	if ((accmode & VWRITE) && ((dac_granted & VWRITE) == 0) &&
4459 	    !priv_check_cred(cred, PRIV_VFS_WRITE, 0))
4460 		priv_granted |= (VWRITE | VAPPEND);
4461 
4462 	if ((accmode & VADMIN) && ((dac_granted & VADMIN) == 0) &&
4463 	    !priv_check_cred(cred, PRIV_VFS_ADMIN, 0))
4464 		priv_granted |= VADMIN;
4465 
4466 	if ((accmode & (priv_granted | dac_granted)) == accmode) {
4467 		/* XXX audit: privilege used */
4468 		if (privused != NULL)
4469 			*privused = 1;
4470 		return (0);
4471 	}
4472 
4473 	return ((accmode & VADMIN) ? EPERM : EACCES);
4474 }
4475 
4476 /*
4477  * Credential check based on process requesting service, and per-attribute
4478  * permissions.
4479  */
4480 int
4481 extattr_check_cred(struct vnode *vp, int attrnamespace, struct ucred *cred,
4482     struct thread *td, accmode_t accmode)
4483 {
4484 
4485 	/*
4486 	 * Kernel-invoked always succeeds.
4487 	 */
4488 	if (cred == NOCRED)
4489 		return (0);
4490 
4491 	/*
4492 	 * Do not allow privileged processes in jail to directly manipulate
4493 	 * system attributes.
4494 	 */
4495 	switch (attrnamespace) {
4496 	case EXTATTR_NAMESPACE_SYSTEM:
4497 		/* Potentially should be: return (EPERM); */
4498 		return (priv_check_cred(cred, PRIV_VFS_EXTATTR_SYSTEM, 0));
4499 	case EXTATTR_NAMESPACE_USER:
4500 		return (VOP_ACCESS(vp, accmode, cred, td));
4501 	default:
4502 		return (EPERM);
4503 	}
4504 }
4505 
4506 #ifdef DEBUG_VFS_LOCKS
4507 /*
4508  * This only exists to suppress warnings from unlocked specfs accesses.  It is
4509  * no longer ok to have an unlocked VFS.
4510  */
4511 #define	IGNORE_LOCK(vp) (panicstr != NULL || (vp) == NULL ||		\
4512 	(vp)->v_type == VCHR ||	(vp)->v_type == VBAD)
4513 
4514 int vfs_badlock_ddb = 1;	/* Drop into debugger on violation. */
4515 SYSCTL_INT(_debug, OID_AUTO, vfs_badlock_ddb, CTLFLAG_RW, &vfs_badlock_ddb, 0,
4516     "Drop into debugger on lock violation");
4517 
4518 int vfs_badlock_mutex = 1;	/* Check for interlock across VOPs. */
4519 SYSCTL_INT(_debug, OID_AUTO, vfs_badlock_mutex, CTLFLAG_RW, &vfs_badlock_mutex,
4520     0, "Check for interlock across VOPs");
4521 
4522 int vfs_badlock_print = 1;	/* Print lock violations. */
4523 SYSCTL_INT(_debug, OID_AUTO, vfs_badlock_print, CTLFLAG_RW, &vfs_badlock_print,
4524     0, "Print lock violations");
4525 
4526 int vfs_badlock_vnode = 1;	/* Print vnode details on lock violations. */
4527 SYSCTL_INT(_debug, OID_AUTO, vfs_badlock_vnode, CTLFLAG_RW, &vfs_badlock_vnode,
4528     0, "Print vnode details on lock violations");
4529 
4530 #ifdef KDB
4531 int vfs_badlock_backtrace = 1;	/* Print backtrace at lock violations. */
4532 SYSCTL_INT(_debug, OID_AUTO, vfs_badlock_backtrace, CTLFLAG_RW,
4533     &vfs_badlock_backtrace, 0, "Print backtrace at lock violations");
4534 #endif
4535 
4536 static void
4537 vfs_badlock(const char *msg, const char *str, struct vnode *vp)
4538 {
4539 
4540 #ifdef KDB
4541 	if (vfs_badlock_backtrace)
4542 		kdb_backtrace();
4543 #endif
4544 	if (vfs_badlock_vnode)
4545 		vn_printf(vp, "vnode ");
4546 	if (vfs_badlock_print)
4547 		printf("%s: %p %s\n", str, (void *)vp, msg);
4548 	if (vfs_badlock_ddb)
4549 		kdb_enter(KDB_WHY_VFSLOCK, "lock violation");
4550 }
4551 
4552 void
4553 assert_vi_locked(struct vnode *vp, const char *str)
4554 {
4555 
4556 	if (vfs_badlock_mutex && !mtx_owned(VI_MTX(vp)))
4557 		vfs_badlock("interlock is not locked but should be", str, vp);
4558 }
4559 
4560 void
4561 assert_vi_unlocked(struct vnode *vp, const char *str)
4562 {
4563 
4564 	if (vfs_badlock_mutex && mtx_owned(VI_MTX(vp)))
4565 		vfs_badlock("interlock is locked but should not be", str, vp);
4566 }
4567 
4568 void
4569 assert_vop_locked(struct vnode *vp, const char *str)
4570 {
4571 	int locked;
4572 
4573 	if (!IGNORE_LOCK(vp)) {
4574 		locked = VOP_ISLOCKED(vp);
4575 		if (locked == 0 || locked == LK_EXCLOTHER)
4576 			vfs_badlock("is not locked but should be", str, vp);
4577 	}
4578 }
4579 
4580 void
4581 assert_vop_unlocked(struct vnode *vp, const char *str)
4582 {
4583 
4584 	if (!IGNORE_LOCK(vp) && VOP_ISLOCKED(vp) == LK_EXCLUSIVE)
4585 		vfs_badlock("is locked but should not be", str, vp);
4586 }
4587 
4588 void
4589 assert_vop_elocked(struct vnode *vp, const char *str)
4590 {
4591 
4592 	if (!IGNORE_LOCK(vp) && VOP_ISLOCKED(vp) != LK_EXCLUSIVE)
4593 		vfs_badlock("is not exclusive locked but should be", str, vp);
4594 }
4595 #endif /* DEBUG_VFS_LOCKS */
4596 
4597 void
4598 vop_rename_fail(struct vop_rename_args *ap)
4599 {
4600 
4601 	if (ap->a_tvp != NULL)
4602 		vput(ap->a_tvp);
4603 	if (ap->a_tdvp == ap->a_tvp)
4604 		vrele(ap->a_tdvp);
4605 	else
4606 		vput(ap->a_tdvp);
4607 	vrele(ap->a_fdvp);
4608 	vrele(ap->a_fvp);
4609 }
4610 
4611 void
4612 vop_rename_pre(void *ap)
4613 {
4614 	struct vop_rename_args *a = ap;
4615 
4616 #ifdef DEBUG_VFS_LOCKS
4617 	if (a->a_tvp)
4618 		ASSERT_VI_UNLOCKED(a->a_tvp, "VOP_RENAME");
4619 	ASSERT_VI_UNLOCKED(a->a_tdvp, "VOP_RENAME");
4620 	ASSERT_VI_UNLOCKED(a->a_fvp, "VOP_RENAME");
4621 	ASSERT_VI_UNLOCKED(a->a_fdvp, "VOP_RENAME");
4622 
4623 	/* Check the source (from). */
4624 	if (a->a_tdvp->v_vnlock != a->a_fdvp->v_vnlock &&
4625 	    (a->a_tvp == NULL || a->a_tvp->v_vnlock != a->a_fdvp->v_vnlock))
4626 		ASSERT_VOP_UNLOCKED(a->a_fdvp, "vop_rename: fdvp locked");
4627 	if (a->a_tvp == NULL || a->a_tvp->v_vnlock != a->a_fvp->v_vnlock)
4628 		ASSERT_VOP_UNLOCKED(a->a_fvp, "vop_rename: fvp locked");
4629 
4630 	/* Check the target. */
4631 	if (a->a_tvp)
4632 		ASSERT_VOP_LOCKED(a->a_tvp, "vop_rename: tvp not locked");
4633 	ASSERT_VOP_LOCKED(a->a_tdvp, "vop_rename: tdvp not locked");
4634 #endif
4635 	if (a->a_tdvp != a->a_fdvp)
4636 		vhold(a->a_fdvp);
4637 	if (a->a_tvp != a->a_fvp)
4638 		vhold(a->a_fvp);
4639 	vhold(a->a_tdvp);
4640 	if (a->a_tvp)
4641 		vhold(a->a_tvp);
4642 }
4643 
4644 #ifdef DEBUG_VFS_LOCKS
4645 void
4646 vop_strategy_pre(void *ap)
4647 {
4648 	struct vop_strategy_args *a;
4649 	struct buf *bp;
4650 
4651 	a = ap;
4652 	bp = a->a_bp;
4653 
4654 	/*
4655 	 * Cluster ops lock their component buffers but not the IO container.
4656 	 */
4657 	if ((bp->b_flags & B_CLUSTER) != 0)
4658 		return;
4659 
4660 	if (panicstr == NULL && !BUF_ISLOCKED(bp)) {
4661 		if (vfs_badlock_print)
4662 			printf(
4663 			    "VOP_STRATEGY: bp is not locked but should be\n");
4664 		if (vfs_badlock_ddb)
4665 			kdb_enter(KDB_WHY_VFSLOCK, "lock violation");
4666 	}
4667 }
4668 
4669 void
4670 vop_lock_pre(void *ap)
4671 {
4672 	struct vop_lock1_args *a = ap;
4673 
4674 	if ((a->a_flags & LK_INTERLOCK) == 0)
4675 		ASSERT_VI_UNLOCKED(a->a_vp, "VOP_LOCK");
4676 	else
4677 		ASSERT_VI_LOCKED(a->a_vp, "VOP_LOCK");
4678 }
4679 
4680 void
4681 vop_lock_post(void *ap, int rc)
4682 {
4683 	struct vop_lock1_args *a = ap;
4684 
4685 	ASSERT_VI_UNLOCKED(a->a_vp, "VOP_LOCK");
4686 	if (rc == 0 && (a->a_flags & LK_EXCLOTHER) == 0)
4687 		ASSERT_VOP_LOCKED(a->a_vp, "VOP_LOCK");
4688 }
4689 
4690 void
4691 vop_unlock_pre(void *ap)
4692 {
4693 	struct vop_unlock_args *a = ap;
4694 
4695 	if (a->a_flags & LK_INTERLOCK)
4696 		ASSERT_VI_LOCKED(a->a_vp, "VOP_UNLOCK");
4697 	ASSERT_VOP_LOCKED(a->a_vp, "VOP_UNLOCK");
4698 }
4699 
4700 void
4701 vop_unlock_post(void *ap, int rc)
4702 {
4703 	struct vop_unlock_args *a = ap;
4704 
4705 	if (a->a_flags & LK_INTERLOCK)
4706 		ASSERT_VI_UNLOCKED(a->a_vp, "VOP_UNLOCK");
4707 }
4708 #endif
4709 
4710 void
4711 vop_create_post(void *ap, int rc)
4712 {
4713 	struct vop_create_args *a = ap;
4714 
4715 	if (!rc)
4716 		VFS_KNOTE_LOCKED(a->a_dvp, NOTE_WRITE);
4717 }
4718 
4719 void
4720 vop_deleteextattr_post(void *ap, int rc)
4721 {
4722 	struct vop_deleteextattr_args *a = ap;
4723 
4724 	if (!rc)
4725 		VFS_KNOTE_LOCKED(a->a_vp, NOTE_ATTRIB);
4726 }
4727 
4728 void
4729 vop_link_post(void *ap, int rc)
4730 {
4731 	struct vop_link_args *a = ap;
4732 
4733 	if (!rc) {
4734 		VFS_KNOTE_LOCKED(a->a_vp, NOTE_LINK);
4735 		VFS_KNOTE_LOCKED(a->a_tdvp, NOTE_WRITE);
4736 	}
4737 }
4738 
4739 void
4740 vop_mkdir_post(void *ap, int rc)
4741 {
4742 	struct vop_mkdir_args *a = ap;
4743 
4744 	if (!rc)
4745 		VFS_KNOTE_LOCKED(a->a_dvp, NOTE_WRITE | NOTE_LINK);
4746 }
4747 
4748 void
4749 vop_mknod_post(void *ap, int rc)
4750 {
4751 	struct vop_mknod_args *a = ap;
4752 
4753 	if (!rc)
4754 		VFS_KNOTE_LOCKED(a->a_dvp, NOTE_WRITE);
4755 }
4756 
4757 void
4758 vop_reclaim_post(void *ap, int rc)
4759 {
4760 	struct vop_reclaim_args *a = ap;
4761 
4762 	if (!rc)
4763 		VFS_KNOTE_LOCKED(a->a_vp, NOTE_REVOKE);
4764 }
4765 
4766 void
4767 vop_remove_post(void *ap, int rc)
4768 {
4769 	struct vop_remove_args *a = ap;
4770 
4771 	if (!rc) {
4772 		VFS_KNOTE_LOCKED(a->a_dvp, NOTE_WRITE);
4773 		VFS_KNOTE_LOCKED(a->a_vp, NOTE_DELETE);
4774 	}
4775 }
4776 
4777 void
4778 vop_rename_post(void *ap, int rc)
4779 {
4780 	struct vop_rename_args *a = ap;
4781 	long hint;
4782 
4783 	if (!rc) {
4784 		hint = NOTE_WRITE;
4785 		if (a->a_fdvp == a->a_tdvp) {
4786 			if (a->a_tvp != NULL && a->a_tvp->v_type == VDIR)
4787 				hint |= NOTE_LINK;
4788 			VFS_KNOTE_UNLOCKED(a->a_fdvp, hint);
4789 			VFS_KNOTE_UNLOCKED(a->a_tdvp, hint);
4790 		} else {
4791 			hint |= NOTE_EXTEND;
4792 			if (a->a_fvp->v_type == VDIR)
4793 				hint |= NOTE_LINK;
4794 			VFS_KNOTE_UNLOCKED(a->a_fdvp, hint);
4795 
4796 			if (a->a_fvp->v_type == VDIR && a->a_tvp != NULL &&
4797 			    a->a_tvp->v_type == VDIR)
4798 				hint &= ~NOTE_LINK;
4799 			VFS_KNOTE_UNLOCKED(a->a_tdvp, hint);
4800 		}
4801 
4802 		VFS_KNOTE_UNLOCKED(a->a_fvp, NOTE_RENAME);
4803 		if (a->a_tvp)
4804 			VFS_KNOTE_UNLOCKED(a->a_tvp, NOTE_DELETE);
4805 	}
4806 	if (a->a_tdvp != a->a_fdvp)
4807 		vdrop(a->a_fdvp);
4808 	if (a->a_tvp != a->a_fvp)
4809 		vdrop(a->a_fvp);
4810 	vdrop(a->a_tdvp);
4811 	if (a->a_tvp)
4812 		vdrop(a->a_tvp);
4813 }
4814 
4815 void
4816 vop_rmdir_post(void *ap, int rc)
4817 {
4818 	struct vop_rmdir_args *a = ap;
4819 
4820 	if (!rc) {
4821 		VFS_KNOTE_LOCKED(a->a_dvp, NOTE_WRITE | NOTE_LINK);
4822 		VFS_KNOTE_LOCKED(a->a_vp, NOTE_DELETE);
4823 	}
4824 }
4825 
4826 void
4827 vop_setattr_post(void *ap, int rc)
4828 {
4829 	struct vop_setattr_args *a = ap;
4830 
4831 	if (!rc)
4832 		VFS_KNOTE_LOCKED(a->a_vp, NOTE_ATTRIB);
4833 }
4834 
4835 void
4836 vop_setextattr_post(void *ap, int rc)
4837 {
4838 	struct vop_setextattr_args *a = ap;
4839 
4840 	if (!rc)
4841 		VFS_KNOTE_LOCKED(a->a_vp, NOTE_ATTRIB);
4842 }
4843 
4844 void
4845 vop_symlink_post(void *ap, int rc)
4846 {
4847 	struct vop_symlink_args *a = ap;
4848 
4849 	if (!rc)
4850 		VFS_KNOTE_LOCKED(a->a_dvp, NOTE_WRITE);
4851 }
4852 
4853 void
4854 vop_open_post(void *ap, int rc)
4855 {
4856 	struct vop_open_args *a = ap;
4857 
4858 	if (!rc)
4859 		VFS_KNOTE_LOCKED(a->a_vp, NOTE_OPEN);
4860 }
4861 
4862 void
4863 vop_close_post(void *ap, int rc)
4864 {
4865 	struct vop_close_args *a = ap;
4866 
4867 	if (!rc && (a->a_cred != NOCRED || /* filter out revokes */
4868 	    (a->a_vp->v_iflag & VI_DOOMED) == 0)) {
4869 		VFS_KNOTE_LOCKED(a->a_vp, (a->a_fflag & FWRITE) != 0 ?
4870 		    NOTE_CLOSE_WRITE : NOTE_CLOSE);
4871 	}
4872 }
4873 
4874 void
4875 vop_read_post(void *ap, int rc)
4876 {
4877 	struct vop_read_args *a = ap;
4878 
4879 	if (!rc)
4880 		VFS_KNOTE_LOCKED(a->a_vp, NOTE_READ);
4881 }
4882 
4883 void
4884 vop_readdir_post(void *ap, int rc)
4885 {
4886 	struct vop_readdir_args *a = ap;
4887 
4888 	if (!rc)
4889 		VFS_KNOTE_LOCKED(a->a_vp, NOTE_READ);
4890 }
4891 
4892 static struct knlist fs_knlist;
4893 
4894 static void
4895 vfs_event_init(void *arg)
4896 {
4897 	knlist_init_mtx(&fs_knlist, NULL);
4898 }
4899 /* XXX - correct order? */
4900 SYSINIT(vfs_knlist, SI_SUB_VFS, SI_ORDER_ANY, vfs_event_init, NULL);
4901 
4902 void
4903 vfs_event_signal(fsid_t *fsid, uint32_t event, intptr_t data __unused)
4904 {
4905 
4906 	KNOTE_UNLOCKED(&fs_knlist, event);
4907 }
4908 
4909 static int	filt_fsattach(struct knote *kn);
4910 static void	filt_fsdetach(struct knote *kn);
4911 static int	filt_fsevent(struct knote *kn, long hint);
4912 
4913 struct filterops fs_filtops = {
4914 	.f_isfd = 0,
4915 	.f_attach = filt_fsattach,
4916 	.f_detach = filt_fsdetach,
4917 	.f_event = filt_fsevent
4918 };
4919 
4920 static int
4921 filt_fsattach(struct knote *kn)
4922 {
4923 
4924 	kn->kn_flags |= EV_CLEAR;
4925 	knlist_add(&fs_knlist, kn, 0);
4926 	return (0);
4927 }
4928 
4929 static void
4930 filt_fsdetach(struct knote *kn)
4931 {
4932 
4933 	knlist_remove(&fs_knlist, kn, 0);
4934 }
4935 
4936 static int
4937 filt_fsevent(struct knote *kn, long hint)
4938 {
4939 
4940 	kn->kn_fflags |= hint;
4941 	return (kn->kn_fflags != 0);
4942 }
4943 
4944 static int
4945 sysctl_vfs_ctl(SYSCTL_HANDLER_ARGS)
4946 {
4947 	struct vfsidctl vc;
4948 	int error;
4949 	struct mount *mp;
4950 
4951 	error = SYSCTL_IN(req, &vc, sizeof(vc));
4952 	if (error)
4953 		return (error);
4954 	if (vc.vc_vers != VFS_CTL_VERS1)
4955 		return (EINVAL);
4956 	mp = vfs_getvfs(&vc.vc_fsid);
4957 	if (mp == NULL)
4958 		return (ENOENT);
4959 	/* ensure that a specific sysctl goes to the right filesystem. */
4960 	if (strcmp(vc.vc_fstypename, "*") != 0 &&
4961 	    strcmp(vc.vc_fstypename, mp->mnt_vfc->vfc_name) != 0) {
4962 		vfs_rel(mp);
4963 		return (EINVAL);
4964 	}
4965 	VCTLTOREQ(&vc, req);
4966 	error = VFS_SYSCTL(mp, vc.vc_op, req);
4967 	vfs_rel(mp);
4968 	return (error);
4969 }
4970 
4971 SYSCTL_PROC(_vfs, OID_AUTO, ctl, CTLTYPE_OPAQUE | CTLFLAG_WR,
4972     NULL, 0, sysctl_vfs_ctl, "",
4973     "Sysctl by fsid");
4974 
4975 /*
4976  * Function to initialize a va_filerev field sensibly.
4977  * XXX: Wouldn't a random number make a lot more sense ??
4978  */
4979 u_quad_t
4980 init_va_filerev(void)
4981 {
4982 	struct bintime bt;
4983 
4984 	getbinuptime(&bt);
4985 	return (((u_quad_t)bt.sec << 32LL) | (bt.frac >> 32LL));
4986 }
4987 
4988 static int	filt_vfsread(struct knote *kn, long hint);
4989 static int	filt_vfswrite(struct knote *kn, long hint);
4990 static int	filt_vfsvnode(struct knote *kn, long hint);
4991 static void	filt_vfsdetach(struct knote *kn);
4992 static struct filterops vfsread_filtops = {
4993 	.f_isfd = 1,
4994 	.f_detach = filt_vfsdetach,
4995 	.f_event = filt_vfsread
4996 };
4997 static struct filterops vfswrite_filtops = {
4998 	.f_isfd = 1,
4999 	.f_detach = filt_vfsdetach,
5000 	.f_event = filt_vfswrite
5001 };
5002 static struct filterops vfsvnode_filtops = {
5003 	.f_isfd = 1,
5004 	.f_detach = filt_vfsdetach,
5005 	.f_event = filt_vfsvnode
5006 };
5007 
5008 static void
5009 vfs_knllock(void *arg)
5010 {
5011 	struct vnode *vp = arg;
5012 
5013 	vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
5014 }
5015 
5016 static void
5017 vfs_knlunlock(void *arg)
5018 {
5019 	struct vnode *vp = arg;
5020 
5021 	VOP_UNLOCK(vp, 0);
5022 }
5023 
5024 static void
5025 vfs_knl_assert_locked(void *arg)
5026 {
5027 #ifdef DEBUG_VFS_LOCKS
5028 	struct vnode *vp = arg;
5029 
5030 	ASSERT_VOP_LOCKED(vp, "vfs_knl_assert_locked");
5031 #endif
5032 }
5033 
5034 static void
5035 vfs_knl_assert_unlocked(void *arg)
5036 {
5037 #ifdef DEBUG_VFS_LOCKS
5038 	struct vnode *vp = arg;
5039 
5040 	ASSERT_VOP_UNLOCKED(vp, "vfs_knl_assert_unlocked");
5041 #endif
5042 }
5043 
5044 int
5045 vfs_kqfilter(struct vop_kqfilter_args *ap)
5046 {
5047 	struct vnode *vp = ap->a_vp;
5048 	struct knote *kn = ap->a_kn;
5049 	struct knlist *knl;
5050 
5051 	switch (kn->kn_filter) {
5052 	case EVFILT_READ:
5053 		kn->kn_fop = &vfsread_filtops;
5054 		break;
5055 	case EVFILT_WRITE:
5056 		kn->kn_fop = &vfswrite_filtops;
5057 		break;
5058 	case EVFILT_VNODE:
5059 		kn->kn_fop = &vfsvnode_filtops;
5060 		break;
5061 	default:
5062 		return (EINVAL);
5063 	}
5064 
5065 	kn->kn_hook = (caddr_t)vp;
5066 
5067 	v_addpollinfo(vp);
5068 	if (vp->v_pollinfo == NULL)
5069 		return (ENOMEM);
5070 	knl = &vp->v_pollinfo->vpi_selinfo.si_note;
5071 	vhold(vp);
5072 	knlist_add(knl, kn, 0);
5073 
5074 	return (0);
5075 }
5076 
5077 /*
5078  * Detach knote from vnode
5079  */
5080 static void
5081 filt_vfsdetach(struct knote *kn)
5082 {
5083 	struct vnode *vp = (struct vnode *)kn->kn_hook;
5084 
5085 	KASSERT(vp->v_pollinfo != NULL, ("Missing v_pollinfo"));
5086 	knlist_remove(&vp->v_pollinfo->vpi_selinfo.si_note, kn, 0);
5087 	vdrop(vp);
5088 }
5089 
5090 /*ARGSUSED*/
5091 static int
5092 filt_vfsread(struct knote *kn, long hint)
5093 {
5094 	struct vnode *vp = (struct vnode *)kn->kn_hook;
5095 	struct vattr va;
5096 	int res;
5097 
5098 	/*
5099 	 * filesystem is gone, so set the EOF flag and schedule
5100 	 * the knote for deletion.
5101 	 */
5102 	if (hint == NOTE_REVOKE || (hint == 0 && vp->v_type == VBAD)) {
5103 		VI_LOCK(vp);
5104 		kn->kn_flags |= (EV_EOF | EV_ONESHOT);
5105 		VI_UNLOCK(vp);
5106 		return (1);
5107 	}
5108 
5109 	if (VOP_GETATTR(vp, &va, curthread->td_ucred))
5110 		return (0);
5111 
5112 	VI_LOCK(vp);
5113 	kn->kn_data = va.va_size - kn->kn_fp->f_offset;
5114 	res = (kn->kn_sfflags & NOTE_FILE_POLL) != 0 || kn->kn_data != 0;
5115 	VI_UNLOCK(vp);
5116 	return (res);
5117 }
5118 
5119 /*ARGSUSED*/
5120 static int
5121 filt_vfswrite(struct knote *kn, long hint)
5122 {
5123 	struct vnode *vp = (struct vnode *)kn->kn_hook;
5124 
5125 	VI_LOCK(vp);
5126 
5127 	/*
5128 	 * filesystem is gone, so set the EOF flag and schedule
5129 	 * the knote for deletion.
5130 	 */
5131 	if (hint == NOTE_REVOKE || (hint == 0 && vp->v_type == VBAD))
5132 		kn->kn_flags |= (EV_EOF | EV_ONESHOT);
5133 
5134 	kn->kn_data = 0;
5135 	VI_UNLOCK(vp);
5136 	return (1);
5137 }
5138 
5139 static int
5140 filt_vfsvnode(struct knote *kn, long hint)
5141 {
5142 	struct vnode *vp = (struct vnode *)kn->kn_hook;
5143 	int res;
5144 
5145 	VI_LOCK(vp);
5146 	if (kn->kn_sfflags & hint)
5147 		kn->kn_fflags |= hint;
5148 	if (hint == NOTE_REVOKE || (hint == 0 && vp->v_type == VBAD)) {
5149 		kn->kn_flags |= EV_EOF;
5150 		VI_UNLOCK(vp);
5151 		return (1);
5152 	}
5153 	res = (kn->kn_fflags != 0);
5154 	VI_UNLOCK(vp);
5155 	return (res);
5156 }
5157 
5158 int
5159 vfs_read_dirent(struct vop_readdir_args *ap, struct dirent *dp, off_t off)
5160 {
5161 	int error;
5162 
5163 	if (dp->d_reclen > ap->a_uio->uio_resid)
5164 		return (ENAMETOOLONG);
5165 	error = uiomove(dp, dp->d_reclen, ap->a_uio);
5166 	if (error) {
5167 		if (ap->a_ncookies != NULL) {
5168 			if (ap->a_cookies != NULL)
5169 				free(ap->a_cookies, M_TEMP);
5170 			ap->a_cookies = NULL;
5171 			*ap->a_ncookies = 0;
5172 		}
5173 		return (error);
5174 	}
5175 	if (ap->a_ncookies == NULL)
5176 		return (0);
5177 
5178 	KASSERT(ap->a_cookies,
5179 	    ("NULL ap->a_cookies value with non-NULL ap->a_ncookies!"));
5180 
5181 	*ap->a_cookies = realloc(*ap->a_cookies,
5182 	    (*ap->a_ncookies + 1) * sizeof(u_long), M_TEMP, M_WAITOK | M_ZERO);
5183 	(*ap->a_cookies)[*ap->a_ncookies] = off;
5184 	*ap->a_ncookies += 1;
5185 	return (0);
5186 }
5187 
5188 /*
5189  * Mark for update the access time of the file if the filesystem
5190  * supports VOP_MARKATIME.  This functionality is used by execve and
5191  * mmap, so we want to avoid the I/O implied by directly setting
5192  * va_atime for the sake of efficiency.
5193  */
5194 void
5195 vfs_mark_atime(struct vnode *vp, struct ucred *cred)
5196 {
5197 	struct mount *mp;
5198 
5199 	mp = vp->v_mount;
5200 	ASSERT_VOP_LOCKED(vp, "vfs_mark_atime");
5201 	if (mp != NULL && (mp->mnt_flag & (MNT_NOATIME | MNT_RDONLY)) == 0)
5202 		(void)VOP_MARKATIME(vp);
5203 }
5204 
5205 /*
5206  * The purpose of this routine is to remove granularity from accmode_t,
5207  * reducing it into standard unix access bits - VEXEC, VREAD, VWRITE,
5208  * VADMIN and VAPPEND.
5209  *
5210  * If it returns 0, the caller is supposed to continue with the usual
5211  * access checks using 'accmode' as modified by this routine.  If it
5212  * returns nonzero value, the caller is supposed to return that value
5213  * as errno.
5214  *
5215  * Note that after this routine runs, accmode may be zero.
5216  */
5217 int
5218 vfs_unixify_accmode(accmode_t *accmode)
5219 {
5220 	/*
5221 	 * There is no way to specify explicit "deny" rule using
5222 	 * file mode or POSIX.1e ACLs.
5223 	 */
5224 	if (*accmode & VEXPLICIT_DENY) {
5225 		*accmode = 0;
5226 		return (0);
5227 	}
5228 
5229 	/*
5230 	 * None of these can be translated into usual access bits.
5231 	 * Also, the common case for NFSv4 ACLs is to not contain
5232 	 * either of these bits. Caller should check for VWRITE
5233 	 * on the containing directory instead.
5234 	 */
5235 	if (*accmode & (VDELETE_CHILD | VDELETE))
5236 		return (EPERM);
5237 
5238 	if (*accmode & VADMIN_PERMS) {
5239 		*accmode &= ~VADMIN_PERMS;
5240 		*accmode |= VADMIN;
5241 	}
5242 
5243 	/*
5244 	 * There is no way to deny VREAD_ATTRIBUTES, VREAD_ACL
5245 	 * or VSYNCHRONIZE using file mode or POSIX.1e ACL.
5246 	 */
5247 	*accmode &= ~(VSTAT_PERMS | VSYNCHRONIZE);
5248 
5249 	return (0);
5250 }
5251 
5252 /*
5253  * These are helper functions for filesystems to traverse all
5254  * their vnodes.  See MNT_VNODE_FOREACH_ALL() in sys/mount.h.
5255  *
5256  * This interface replaces MNT_VNODE_FOREACH.
5257  */
5258 
5259 MALLOC_DEFINE(M_VNODE_MARKER, "vnodemarker", "vnode marker");
5260 
5261 struct vnode *
5262 __mnt_vnode_next_all(struct vnode **mvp, struct mount *mp)
5263 {
5264 	struct vnode *vp;
5265 
5266 	if (should_yield())
5267 		kern_yield(PRI_USER);
5268 	MNT_ILOCK(mp);
5269 	KASSERT((*mvp)->v_mount == mp, ("marker vnode mount list mismatch"));
5270 	vp = TAILQ_NEXT(*mvp, v_nmntvnodes);
5271 	while (vp != NULL && (vp->v_type == VMARKER ||
5272 	    (vp->v_iflag & VI_DOOMED) != 0))
5273 		vp = TAILQ_NEXT(vp, v_nmntvnodes);
5274 
5275 	/* Check if we are done */
5276 	if (vp == NULL) {
5277 		__mnt_vnode_markerfree_all(mvp, mp);
5278 		/* MNT_IUNLOCK(mp); -- done in above function */
5279 		mtx_assert(MNT_MTX(mp), MA_NOTOWNED);
5280 		return (NULL);
5281 	}
5282 	TAILQ_REMOVE(&mp->mnt_nvnodelist, *mvp, v_nmntvnodes);
5283 	TAILQ_INSERT_AFTER(&mp->mnt_nvnodelist, vp, *mvp, v_nmntvnodes);
5284 	VI_LOCK(vp);
5285 	MNT_IUNLOCK(mp);
5286 	return (vp);
5287 }
5288 
5289 struct vnode *
5290 __mnt_vnode_first_all(struct vnode **mvp, struct mount *mp)
5291 {
5292 	struct vnode *vp;
5293 
5294 	*mvp = malloc(sizeof(struct vnode), M_VNODE_MARKER, M_WAITOK | M_ZERO);
5295 	MNT_ILOCK(mp);
5296 	MNT_REF(mp);
5297 	(*mvp)->v_type = VMARKER;
5298 
5299 	vp = TAILQ_FIRST(&mp->mnt_nvnodelist);
5300 	while (vp != NULL && (vp->v_type == VMARKER ||
5301 	    (vp->v_iflag & VI_DOOMED) != 0))
5302 		vp = TAILQ_NEXT(vp, v_nmntvnodes);
5303 
5304 	/* Check if we are done */
5305 	if (vp == NULL) {
5306 		MNT_REL(mp);
5307 		MNT_IUNLOCK(mp);
5308 		free(*mvp, M_VNODE_MARKER);
5309 		*mvp = NULL;
5310 		return (NULL);
5311 	}
5312 	(*mvp)->v_mount = mp;
5313 	TAILQ_INSERT_AFTER(&mp->mnt_nvnodelist, vp, *mvp, v_nmntvnodes);
5314 	VI_LOCK(vp);
5315 	MNT_IUNLOCK(mp);
5316 	return (vp);
5317 }
5318 
5319 
5320 void
5321 __mnt_vnode_markerfree_all(struct vnode **mvp, struct mount *mp)
5322 {
5323 
5324 	if (*mvp == NULL) {
5325 		MNT_IUNLOCK(mp);
5326 		return;
5327 	}
5328 
5329 	mtx_assert(MNT_MTX(mp), MA_OWNED);
5330 
5331 	KASSERT((*mvp)->v_mount == mp, ("marker vnode mount list mismatch"));
5332 	TAILQ_REMOVE(&mp->mnt_nvnodelist, *mvp, v_nmntvnodes);
5333 	MNT_REL(mp);
5334 	MNT_IUNLOCK(mp);
5335 	free(*mvp, M_VNODE_MARKER);
5336 	*mvp = NULL;
5337 }
5338 
5339 /*
5340  * These are helper functions for filesystems to traverse their
5341  * active vnodes.  See MNT_VNODE_FOREACH_ACTIVE() in sys/mount.h
5342  */
5343 static void
5344 mnt_vnode_markerfree_active(struct vnode **mvp, struct mount *mp)
5345 {
5346 
5347 	KASSERT((*mvp)->v_mount == mp, ("marker vnode mount list mismatch"));
5348 
5349 	MNT_ILOCK(mp);
5350 	MNT_REL(mp);
5351 	MNT_IUNLOCK(mp);
5352 	free(*mvp, M_VNODE_MARKER);
5353 	*mvp = NULL;
5354 }
5355 
5356 static struct vnode *
5357 mnt_vnode_next_active(struct vnode **mvp, struct mount *mp)
5358 {
5359 	struct vnode *vp, *nvp;
5360 
5361 	mtx_assert(&mp->mnt_listmtx, MA_OWNED);
5362 	KASSERT((*mvp)->v_mount == mp, ("marker vnode mount list mismatch"));
5363 restart:
5364 	vp = TAILQ_NEXT(*mvp, v_actfreelist);
5365 	TAILQ_REMOVE(&mp->mnt_activevnodelist, *mvp, v_actfreelist);
5366 	while (vp != NULL) {
5367 		if (vp->v_type == VMARKER) {
5368 			vp = TAILQ_NEXT(vp, v_actfreelist);
5369 			continue;
5370 		}
5371 		if (!VI_TRYLOCK(vp)) {
5372 			if (mp_ncpus == 1 || should_yield()) {
5373 				TAILQ_INSERT_BEFORE(vp, *mvp, v_actfreelist);
5374 				mtx_unlock(&mp->mnt_listmtx);
5375 				pause("vnacti", 1);
5376 				mtx_lock(&mp->mnt_listmtx);
5377 				goto restart;
5378 			}
5379 			continue;
5380 		}
5381 		KASSERT(vp->v_type != VMARKER, ("locked marker %p", vp));
5382 		KASSERT(vp->v_mount == mp || vp->v_mount == NULL,
5383 		    ("alien vnode on the active list %p %p", vp, mp));
5384 		if (vp->v_mount == mp && (vp->v_iflag & VI_DOOMED) == 0)
5385 			break;
5386 		nvp = TAILQ_NEXT(vp, v_actfreelist);
5387 		VI_UNLOCK(vp);
5388 		vp = nvp;
5389 	}
5390 
5391 	/* Check if we are done */
5392 	if (vp == NULL) {
5393 		mtx_unlock(&mp->mnt_listmtx);
5394 		mnt_vnode_markerfree_active(mvp, mp);
5395 		return (NULL);
5396 	}
5397 	TAILQ_INSERT_AFTER(&mp->mnt_activevnodelist, vp, *mvp, v_actfreelist);
5398 	mtx_unlock(&mp->mnt_listmtx);
5399 	ASSERT_VI_LOCKED(vp, "active iter");
5400 	KASSERT((vp->v_iflag & VI_ACTIVE) != 0, ("Non-active vp %p", vp));
5401 	return (vp);
5402 }
5403 
5404 struct vnode *
5405 __mnt_vnode_next_active(struct vnode **mvp, struct mount *mp)
5406 {
5407 
5408 	if (should_yield())
5409 		kern_yield(PRI_USER);
5410 	mtx_lock(&mp->mnt_listmtx);
5411 	return (mnt_vnode_next_active(mvp, mp));
5412 }
5413 
5414 struct vnode *
5415 __mnt_vnode_first_active(struct vnode **mvp, struct mount *mp)
5416 {
5417 	struct vnode *vp;
5418 
5419 	*mvp = malloc(sizeof(struct vnode), M_VNODE_MARKER, M_WAITOK | M_ZERO);
5420 	MNT_ILOCK(mp);
5421 	MNT_REF(mp);
5422 	MNT_IUNLOCK(mp);
5423 	(*mvp)->v_type = VMARKER;
5424 	(*mvp)->v_mount = mp;
5425 
5426 	mtx_lock(&mp->mnt_listmtx);
5427 	vp = TAILQ_FIRST(&mp->mnt_activevnodelist);
5428 	if (vp == NULL) {
5429 		mtx_unlock(&mp->mnt_listmtx);
5430 		mnt_vnode_markerfree_active(mvp, mp);
5431 		return (NULL);
5432 	}
5433 	TAILQ_INSERT_BEFORE(vp, *mvp, v_actfreelist);
5434 	return (mnt_vnode_next_active(mvp, mp));
5435 }
5436 
5437 void
5438 __mnt_vnode_markerfree_active(struct vnode **mvp, struct mount *mp)
5439 {
5440 
5441 	if (*mvp == NULL)
5442 		return;
5443 
5444 	mtx_lock(&mp->mnt_listmtx);
5445 	TAILQ_REMOVE(&mp->mnt_activevnodelist, *mvp, v_actfreelist);
5446 	mtx_unlock(&mp->mnt_listmtx);
5447 	mnt_vnode_markerfree_active(mvp, mp);
5448 }
5449