xref: /freebsd/sys/kern/vfs_subr.c (revision b51f459a2098622c31ed54f5c1bf0e03efce403b)
1 /*-
2  * SPDX-License-Identifier: BSD-3-Clause
3  *
4  * Copyright (c) 1989, 1993
5  *	The Regents of the University of California.  All rights reserved.
6  * (c) UNIX System Laboratories, Inc.
7  * All or some portions of this file are derived from material licensed
8  * to the University of California by American Telephone and Telegraph
9  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
10  * the permission of UNIX System Laboratories, Inc.
11  *
12  * Redistribution and use in source and binary forms, with or without
13  * modification, are permitted provided that the following conditions
14  * are met:
15  * 1. Redistributions of source code must retain the above copyright
16  *    notice, this list of conditions and the following disclaimer.
17  * 2. Redistributions in binary form must reproduce the above copyright
18  *    notice, this list of conditions and the following disclaimer in the
19  *    documentation and/or other materials provided with the distribution.
20  * 3. Neither the name of the University nor the names of its contributors
21  *    may be used to endorse or promote products derived from this software
22  *    without specific prior written permission.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34  * SUCH DAMAGE.
35  *
36  *	@(#)vfs_subr.c	8.31 (Berkeley) 5/26/95
37  */
38 
39 /*
40  * External virtual filesystem routines
41  */
42 
43 #include <sys/cdefs.h>
44 __FBSDID("$FreeBSD$");
45 
46 #include "opt_ddb.h"
47 #include "opt_watchdog.h"
48 
49 #include <sys/param.h>
50 #include <sys/systm.h>
51 #include <sys/asan.h>
52 #include <sys/bio.h>
53 #include <sys/buf.h>
54 #include <sys/capsicum.h>
55 #include <sys/condvar.h>
56 #include <sys/conf.h>
57 #include <sys/counter.h>
58 #include <sys/dirent.h>
59 #include <sys/event.h>
60 #include <sys/eventhandler.h>
61 #include <sys/extattr.h>
62 #include <sys/file.h>
63 #include <sys/fcntl.h>
64 #include <sys/jail.h>
65 #include <sys/kdb.h>
66 #include <sys/kernel.h>
67 #include <sys/kthread.h>
68 #include <sys/ktr.h>
69 #include <sys/lockf.h>
70 #include <sys/malloc.h>
71 #include <sys/mount.h>
72 #include <sys/namei.h>
73 #include <sys/pctrie.h>
74 #include <sys/priv.h>
75 #include <sys/reboot.h>
76 #include <sys/refcount.h>
77 #include <sys/rwlock.h>
78 #include <sys/sched.h>
79 #include <sys/sleepqueue.h>
80 #include <sys/smr.h>
81 #include <sys/smp.h>
82 #include <sys/stat.h>
83 #include <sys/sysctl.h>
84 #include <sys/syslog.h>
85 #include <sys/vmmeter.h>
86 #include <sys/vnode.h>
87 #include <sys/watchdog.h>
88 
89 #include <machine/stdarg.h>
90 
91 #include <security/mac/mac_framework.h>
92 
93 #include <vm/vm.h>
94 #include <vm/vm_object.h>
95 #include <vm/vm_extern.h>
96 #include <vm/pmap.h>
97 #include <vm/vm_map.h>
98 #include <vm/vm_page.h>
99 #include <vm/vm_kern.h>
100 #include <vm/uma.h>
101 
102 #ifdef DDB
103 #include <ddb/ddb.h>
104 #endif
105 
106 static void	delmntque(struct vnode *vp);
107 static int	flushbuflist(struct bufv *bufv, int flags, struct bufobj *bo,
108 		    int slpflag, int slptimeo);
109 static void	syncer_shutdown(void *arg, int howto);
110 static int	vtryrecycle(struct vnode *vp);
111 static void	v_init_counters(struct vnode *);
112 static void	vn_seqc_init(struct vnode *);
113 static void	vn_seqc_write_end_free(struct vnode *vp);
114 static void	vgonel(struct vnode *);
115 static bool	vhold_recycle_free(struct vnode *);
116 static void	vfs_knllock(void *arg);
117 static void	vfs_knlunlock(void *arg);
118 static void	vfs_knl_assert_lock(void *arg, int what);
119 static void	destroy_vpollinfo(struct vpollinfo *vi);
120 static int	v_inval_buf_range_locked(struct vnode *vp, struct bufobj *bo,
121 		    daddr_t startlbn, daddr_t endlbn);
122 static void	vnlru_recalc(void);
123 
124 /*
125  * These fences are intended for cases where some synchronization is
126  * needed between access of v_iflags and lockless vnode refcount (v_holdcnt
127  * and v_usecount) updates.  Access to v_iflags is generally synchronized
128  * by the interlock, but we have some internal assertions that check vnode
129  * flags without acquiring the lock.  Thus, these fences are INVARIANTS-only
130  * for now.
131  */
132 #ifdef INVARIANTS
133 #define	VNODE_REFCOUNT_FENCE_ACQ()	atomic_thread_fence_acq()
134 #define	VNODE_REFCOUNT_FENCE_REL()	atomic_thread_fence_rel()
135 #else
136 #define	VNODE_REFCOUNT_FENCE_ACQ()
137 #define	VNODE_REFCOUNT_FENCE_REL()
138 #endif
139 
140 /*
141  * Number of vnodes in existence.  Increased whenever getnewvnode()
142  * allocates a new vnode, decreased in vdropl() for VIRF_DOOMED vnode.
143  */
144 static u_long __exclusive_cache_line numvnodes;
145 
146 SYSCTL_ULONG(_vfs, OID_AUTO, numvnodes, CTLFLAG_RD, &numvnodes, 0,
147     "Number of vnodes in existence");
148 
149 static counter_u64_t vnodes_created;
150 SYSCTL_COUNTER_U64(_vfs, OID_AUTO, vnodes_created, CTLFLAG_RD, &vnodes_created,
151     "Number of vnodes created by getnewvnode");
152 
153 /*
154  * Conversion tables for conversion from vnode types to inode formats
155  * and back.
156  */
157 enum vtype iftovt_tab[16] = {
158 	VNON, VFIFO, VCHR, VNON, VDIR, VNON, VBLK, VNON,
159 	VREG, VNON, VLNK, VNON, VSOCK, VNON, VNON, VNON
160 };
161 int vttoif_tab[10] = {
162 	0, S_IFREG, S_IFDIR, S_IFBLK, S_IFCHR, S_IFLNK,
163 	S_IFSOCK, S_IFIFO, S_IFMT, S_IFMT
164 };
165 
166 /*
167  * List of allocates vnodes in the system.
168  */
169 static TAILQ_HEAD(freelst, vnode) vnode_list;
170 static struct vnode *vnode_list_free_marker;
171 static struct vnode *vnode_list_reclaim_marker;
172 
173 /*
174  * "Free" vnode target.  Free vnodes are rarely completely free, but are
175  * just ones that are cheap to recycle.  Usually they are for files which
176  * have been stat'd but not read; these usually have inode and namecache
177  * data attached to them.  This target is the preferred minimum size of a
178  * sub-cache consisting mostly of such files. The system balances the size
179  * of this sub-cache with its complement to try to prevent either from
180  * thrashing while the other is relatively inactive.  The targets express
181  * a preference for the best balance.
182  *
183  * "Above" this target there are 2 further targets (watermarks) related
184  * to recyling of free vnodes.  In the best-operating case, the cache is
185  * exactly full, the free list has size between vlowat and vhiwat above the
186  * free target, and recycling from it and normal use maintains this state.
187  * Sometimes the free list is below vlowat or even empty, but this state
188  * is even better for immediate use provided the cache is not full.
189  * Otherwise, vnlru_proc() runs to reclaim enough vnodes (usually non-free
190  * ones) to reach one of these states.  The watermarks are currently hard-
191  * coded as 4% and 9% of the available space higher.  These and the default
192  * of 25% for wantfreevnodes are too large if the memory size is large.
193  * E.g., 9% of 75% of MAXVNODES is more than 566000 vnodes to reclaim
194  * whenever vnlru_proc() becomes active.
195  */
196 static long wantfreevnodes;
197 static long __exclusive_cache_line freevnodes;
198 SYSCTL_ULONG(_vfs, OID_AUTO, freevnodes, CTLFLAG_RD,
199     &freevnodes, 0, "Number of \"free\" vnodes");
200 static long freevnodes_old;
201 
202 static counter_u64_t recycles_count;
203 SYSCTL_COUNTER_U64(_vfs, OID_AUTO, recycles, CTLFLAG_RD, &recycles_count,
204     "Number of vnodes recycled to meet vnode cache targets");
205 
206 static counter_u64_t recycles_free_count;
207 SYSCTL_COUNTER_U64(_vfs, OID_AUTO, recycles_free, CTLFLAG_RD, &recycles_free_count,
208     "Number of free vnodes recycled to meet vnode cache targets");
209 
210 static counter_u64_t deferred_inact;
211 SYSCTL_COUNTER_U64(_vfs, OID_AUTO, deferred_inact, CTLFLAG_RD, &deferred_inact,
212     "Number of times inactive processing was deferred");
213 
214 /* To keep more than one thread at a time from running vfs_getnewfsid */
215 static struct mtx mntid_mtx;
216 
217 /*
218  * Lock for any access to the following:
219  *	vnode_list
220  *	numvnodes
221  *	freevnodes
222  */
223 static struct mtx __exclusive_cache_line vnode_list_mtx;
224 
225 /* Publicly exported FS */
226 struct nfs_public nfs_pub;
227 
228 static uma_zone_t buf_trie_zone;
229 static smr_t buf_trie_smr;
230 
231 /* Zone for allocation of new vnodes - used exclusively by getnewvnode() */
232 static uma_zone_t vnode_zone;
233 MALLOC_DEFINE(M_VNODEPOLL, "VN POLL", "vnode poll");
234 
235 __read_frequently smr_t vfs_smr;
236 
237 /*
238  * The workitem queue.
239  *
240  * It is useful to delay writes of file data and filesystem metadata
241  * for tens of seconds so that quickly created and deleted files need
242  * not waste disk bandwidth being created and removed. To realize this,
243  * we append vnodes to a "workitem" queue. When running with a soft
244  * updates implementation, most pending metadata dependencies should
245  * not wait for more than a few seconds. Thus, mounted on block devices
246  * are delayed only about a half the time that file data is delayed.
247  * Similarly, directory updates are more critical, so are only delayed
248  * about a third the time that file data is delayed. Thus, there are
249  * SYNCER_MAXDELAY queues that are processed round-robin at a rate of
250  * one each second (driven off the filesystem syncer process). The
251  * syncer_delayno variable indicates the next queue that is to be processed.
252  * Items that need to be processed soon are placed in this queue:
253  *
254  *	syncer_workitem_pending[syncer_delayno]
255  *
256  * A delay of fifteen seconds is done by placing the request fifteen
257  * entries later in the queue:
258  *
259  *	syncer_workitem_pending[(syncer_delayno + 15) & syncer_mask]
260  *
261  */
262 static int syncer_delayno;
263 static long syncer_mask;
264 LIST_HEAD(synclist, bufobj);
265 static struct synclist *syncer_workitem_pending;
266 /*
267  * The sync_mtx protects:
268  *	bo->bo_synclist
269  *	sync_vnode_count
270  *	syncer_delayno
271  *	syncer_state
272  *	syncer_workitem_pending
273  *	syncer_worklist_len
274  *	rushjob
275  */
276 static struct mtx sync_mtx;
277 static struct cv sync_wakeup;
278 
279 #define SYNCER_MAXDELAY		32
280 static int syncer_maxdelay = SYNCER_MAXDELAY;	/* maximum delay time */
281 static int syncdelay = 30;		/* max time to delay syncing data */
282 static int filedelay = 30;		/* time to delay syncing files */
283 SYSCTL_INT(_kern, OID_AUTO, filedelay, CTLFLAG_RW, &filedelay, 0,
284     "Time to delay syncing files (in seconds)");
285 static int dirdelay = 29;		/* time to delay syncing directories */
286 SYSCTL_INT(_kern, OID_AUTO, dirdelay, CTLFLAG_RW, &dirdelay, 0,
287     "Time to delay syncing directories (in seconds)");
288 static int metadelay = 28;		/* time to delay syncing metadata */
289 SYSCTL_INT(_kern, OID_AUTO, metadelay, CTLFLAG_RW, &metadelay, 0,
290     "Time to delay syncing metadata (in seconds)");
291 static int rushjob;		/* number of slots to run ASAP */
292 static int stat_rush_requests;	/* number of times I/O speeded up */
293 SYSCTL_INT(_debug, OID_AUTO, rush_requests, CTLFLAG_RW, &stat_rush_requests, 0,
294     "Number of times I/O speeded up (rush requests)");
295 
296 #define	VDBATCH_SIZE 8
297 struct vdbatch {
298 	u_int index;
299 	long freevnodes;
300 	struct mtx lock;
301 	struct vnode *tab[VDBATCH_SIZE];
302 };
303 DPCPU_DEFINE_STATIC(struct vdbatch, vd);
304 
305 static void	vdbatch_dequeue(struct vnode *vp);
306 
307 /*
308  * When shutting down the syncer, run it at four times normal speed.
309  */
310 #define SYNCER_SHUTDOWN_SPEEDUP		4
311 static int sync_vnode_count;
312 static int syncer_worklist_len;
313 static enum { SYNCER_RUNNING, SYNCER_SHUTTING_DOWN, SYNCER_FINAL_DELAY }
314     syncer_state;
315 
316 /* Target for maximum number of vnodes. */
317 u_long desiredvnodes;
318 static u_long gapvnodes;		/* gap between wanted and desired */
319 static u_long vhiwat;		/* enough extras after expansion */
320 static u_long vlowat;		/* minimal extras before expansion */
321 static u_long vstir;		/* nonzero to stir non-free vnodes */
322 static volatile int vsmalltrigger = 8;	/* pref to keep if > this many pages */
323 
324 static u_long vnlru_read_freevnodes(void);
325 
326 /*
327  * Note that no attempt is made to sanitize these parameters.
328  */
329 static int
330 sysctl_maxvnodes(SYSCTL_HANDLER_ARGS)
331 {
332 	u_long val;
333 	int error;
334 
335 	val = desiredvnodes;
336 	error = sysctl_handle_long(oidp, &val, 0, req);
337 	if (error != 0 || req->newptr == NULL)
338 		return (error);
339 
340 	if (val == desiredvnodes)
341 		return (0);
342 	mtx_lock(&vnode_list_mtx);
343 	desiredvnodes = val;
344 	wantfreevnodes = desiredvnodes / 4;
345 	vnlru_recalc();
346 	mtx_unlock(&vnode_list_mtx);
347 	/*
348 	 * XXX There is no protection against multiple threads changing
349 	 * desiredvnodes at the same time. Locking above only helps vnlru and
350 	 * getnewvnode.
351 	 */
352 	vfs_hash_changesize(desiredvnodes);
353 	cache_changesize(desiredvnodes);
354 	return (0);
355 }
356 
357 SYSCTL_PROC(_kern, KERN_MAXVNODES, maxvnodes,
358     CTLTYPE_ULONG | CTLFLAG_MPSAFE | CTLFLAG_RW, NULL, 0, sysctl_maxvnodes,
359     "LU", "Target for maximum number of vnodes");
360 
361 static int
362 sysctl_wantfreevnodes(SYSCTL_HANDLER_ARGS)
363 {
364 	u_long val;
365 	int error;
366 
367 	val = wantfreevnodes;
368 	error = sysctl_handle_long(oidp, &val, 0, req);
369 	if (error != 0 || req->newptr == NULL)
370 		return (error);
371 
372 	if (val == wantfreevnodes)
373 		return (0);
374 	mtx_lock(&vnode_list_mtx);
375 	wantfreevnodes = val;
376 	vnlru_recalc();
377 	mtx_unlock(&vnode_list_mtx);
378 	return (0);
379 }
380 
381 SYSCTL_PROC(_vfs, OID_AUTO, wantfreevnodes,
382     CTLTYPE_ULONG | CTLFLAG_MPSAFE | CTLFLAG_RW, NULL, 0, sysctl_wantfreevnodes,
383     "LU", "Target for minimum number of \"free\" vnodes");
384 
385 SYSCTL_ULONG(_kern, OID_AUTO, minvnodes, CTLFLAG_RW,
386     &wantfreevnodes, 0, "Old name for vfs.wantfreevnodes (legacy)");
387 static int vnlru_nowhere;
388 SYSCTL_INT(_debug, OID_AUTO, vnlru_nowhere, CTLFLAG_RW,
389     &vnlru_nowhere, 0, "Number of times the vnlru process ran without success");
390 
391 static int
392 sysctl_try_reclaim_vnode(SYSCTL_HANDLER_ARGS)
393 {
394 	struct vnode *vp;
395 	struct nameidata nd;
396 	char *buf;
397 	unsigned long ndflags;
398 	int error;
399 
400 	if (req->newptr == NULL)
401 		return (EINVAL);
402 	if (req->newlen >= PATH_MAX)
403 		return (E2BIG);
404 
405 	buf = malloc(PATH_MAX, M_TEMP, M_WAITOK);
406 	error = SYSCTL_IN(req, buf, req->newlen);
407 	if (error != 0)
408 		goto out;
409 
410 	buf[req->newlen] = '\0';
411 
412 	ndflags = LOCKLEAF | NOFOLLOW | AUDITVNODE1 | SAVENAME;
413 	NDINIT(&nd, LOOKUP, ndflags, UIO_SYSSPACE, buf, curthread);
414 	if ((error = namei(&nd)) != 0)
415 		goto out;
416 	vp = nd.ni_vp;
417 
418 	if (VN_IS_DOOMED(vp)) {
419 		/*
420 		 * This vnode is being recycled.  Return != 0 to let the caller
421 		 * know that the sysctl had no effect.  Return EAGAIN because a
422 		 * subsequent call will likely succeed (since namei will create
423 		 * a new vnode if necessary)
424 		 */
425 		error = EAGAIN;
426 		goto putvnode;
427 	}
428 
429 	counter_u64_add(recycles_count, 1);
430 	vgone(vp);
431 putvnode:
432 	NDFREE(&nd, 0);
433 out:
434 	free(buf, M_TEMP);
435 	return (error);
436 }
437 
438 static int
439 sysctl_ftry_reclaim_vnode(SYSCTL_HANDLER_ARGS)
440 {
441 	struct thread *td = curthread;
442 	struct vnode *vp;
443 	struct file *fp;
444 	int error;
445 	int fd;
446 
447 	if (req->newptr == NULL)
448 		return (EBADF);
449 
450         error = sysctl_handle_int(oidp, &fd, 0, req);
451         if (error != 0)
452                 return (error);
453 	error = getvnode(curthread, fd, &cap_fcntl_rights, &fp);
454 	if (error != 0)
455 		return (error);
456 	vp = fp->f_vnode;
457 
458 	error = vn_lock(vp, LK_EXCLUSIVE);
459 	if (error != 0)
460 		goto drop;
461 
462 	counter_u64_add(recycles_count, 1);
463 	vgone(vp);
464 	VOP_UNLOCK(vp);
465 drop:
466 	fdrop(fp, td);
467 	return (error);
468 }
469 
470 SYSCTL_PROC(_debug, OID_AUTO, try_reclaim_vnode,
471     CTLTYPE_STRING | CTLFLAG_MPSAFE | CTLFLAG_WR, NULL, 0,
472     sysctl_try_reclaim_vnode, "A", "Try to reclaim a vnode by its pathname");
473 SYSCTL_PROC(_debug, OID_AUTO, ftry_reclaim_vnode,
474     CTLTYPE_INT | CTLFLAG_MPSAFE | CTLFLAG_WR, NULL, 0,
475     sysctl_ftry_reclaim_vnode, "I",
476     "Try to reclaim a vnode by its file descriptor");
477 
478 /* Shift count for (uintptr_t)vp to initialize vp->v_hash. */
479 static int vnsz2log;
480 
481 /*
482  * Support for the bufobj clean & dirty pctrie.
483  */
484 static void *
485 buf_trie_alloc(struct pctrie *ptree)
486 {
487 	return (uma_zalloc_smr(buf_trie_zone, M_NOWAIT));
488 }
489 
490 static void
491 buf_trie_free(struct pctrie *ptree, void *node)
492 {
493 	uma_zfree_smr(buf_trie_zone, node);
494 }
495 PCTRIE_DEFINE_SMR(BUF, buf, b_lblkno, buf_trie_alloc, buf_trie_free,
496     buf_trie_smr);
497 
498 /*
499  * Initialize the vnode management data structures.
500  *
501  * Reevaluate the following cap on the number of vnodes after the physical
502  * memory size exceeds 512GB.  In the limit, as the physical memory size
503  * grows, the ratio of the memory size in KB to vnodes approaches 64:1.
504  */
505 #ifndef	MAXVNODES_MAX
506 #define	MAXVNODES_MAX	(512UL * 1024 * 1024 / 64)	/* 8M */
507 #endif
508 
509 static MALLOC_DEFINE(M_VNODE_MARKER, "vnodemarker", "vnode marker");
510 
511 static struct vnode *
512 vn_alloc_marker(struct mount *mp)
513 {
514 	struct vnode *vp;
515 
516 	vp = malloc(sizeof(struct vnode), M_VNODE_MARKER, M_WAITOK | M_ZERO);
517 	vp->v_type = VMARKER;
518 	vp->v_mount = mp;
519 
520 	return (vp);
521 }
522 
523 static void
524 vn_free_marker(struct vnode *vp)
525 {
526 
527 	MPASS(vp->v_type == VMARKER);
528 	free(vp, M_VNODE_MARKER);
529 }
530 
531 #ifdef KASAN
532 static int
533 vnode_ctor(void *mem, int size, void *arg __unused, int flags __unused)
534 {
535 	kasan_mark(mem, size, roundup2(size, UMA_ALIGN_PTR + 1), 0);
536 	return (0);
537 }
538 
539 static void
540 vnode_dtor(void *mem, int size, void *arg __unused)
541 {
542 	size_t end1, end2, off1, off2;
543 
544 	_Static_assert(offsetof(struct vnode, v_vnodelist) <
545 	    offsetof(struct vnode, v_dbatchcpu),
546 	    "KASAN marks require updating");
547 
548 	off1 = offsetof(struct vnode, v_vnodelist);
549 	off2 = offsetof(struct vnode, v_dbatchcpu);
550 	end1 = off1 + sizeof(((struct vnode *)NULL)->v_vnodelist);
551 	end2 = off2 + sizeof(((struct vnode *)NULL)->v_dbatchcpu);
552 
553 	/*
554 	 * Access to the v_vnodelist and v_dbatchcpu fields are permitted even
555 	 * after the vnode has been freed.  Try to get some KASAN coverage by
556 	 * marking everything except those two fields as invalid.  Because
557 	 * KASAN's tracking is not byte-granular, any preceding fields sharing
558 	 * the same 8-byte aligned word must also be marked valid.
559 	 */
560 
561 	/* Handle the area from the start until v_vnodelist... */
562 	off1 = rounddown2(off1, KASAN_SHADOW_SCALE);
563 	kasan_mark(mem, off1, off1, KASAN_UMA_FREED);
564 
565 	/* ... then the area between v_vnodelist and v_dbatchcpu ... */
566 	off1 = roundup2(end1, KASAN_SHADOW_SCALE);
567 	off2 = rounddown2(off2, KASAN_SHADOW_SCALE);
568 	if (off2 > off1)
569 		kasan_mark((void *)((char *)mem + off1), off2 - off1,
570 		    off2 - off1, KASAN_UMA_FREED);
571 
572 	/* ... and finally the area from v_dbatchcpu to the end. */
573 	off2 = roundup2(end2, KASAN_SHADOW_SCALE);
574 	kasan_mark((void *)((char *)mem + off2), size - off2, size - off2,
575 	    KASAN_UMA_FREED);
576 }
577 #endif /* KASAN */
578 
579 /*
580  * Initialize a vnode as it first enters the zone.
581  */
582 static int
583 vnode_init(void *mem, int size, int flags)
584 {
585 	struct vnode *vp;
586 
587 	vp = mem;
588 	bzero(vp, size);
589 	/*
590 	 * Setup locks.
591 	 */
592 	vp->v_vnlock = &vp->v_lock;
593 	mtx_init(&vp->v_interlock, "vnode interlock", NULL, MTX_DEF);
594 	/*
595 	 * By default, don't allow shared locks unless filesystems opt-in.
596 	 */
597 	lockinit(vp->v_vnlock, PVFS, "vnode", VLKTIMEOUT,
598 	    LK_NOSHARE | LK_IS_VNODE);
599 	/*
600 	 * Initialize bufobj.
601 	 */
602 	bufobj_init(&vp->v_bufobj, vp);
603 	/*
604 	 * Initialize namecache.
605 	 */
606 	cache_vnode_init(vp);
607 	/*
608 	 * Initialize rangelocks.
609 	 */
610 	rangelock_init(&vp->v_rl);
611 
612 	vp->v_dbatchcpu = NOCPU;
613 
614 	/*
615 	 * Check vhold_recycle_free for an explanation.
616 	 */
617 	vp->v_holdcnt = VHOLD_NO_SMR;
618 	vp->v_type = VNON;
619 	mtx_lock(&vnode_list_mtx);
620 	TAILQ_INSERT_BEFORE(vnode_list_free_marker, vp, v_vnodelist);
621 	mtx_unlock(&vnode_list_mtx);
622 	return (0);
623 }
624 
625 /*
626  * Free a vnode when it is cleared from the zone.
627  */
628 static void
629 vnode_fini(void *mem, int size)
630 {
631 	struct vnode *vp;
632 	struct bufobj *bo;
633 
634 	vp = mem;
635 	vdbatch_dequeue(vp);
636 	mtx_lock(&vnode_list_mtx);
637 	TAILQ_REMOVE(&vnode_list, vp, v_vnodelist);
638 	mtx_unlock(&vnode_list_mtx);
639 	rangelock_destroy(&vp->v_rl);
640 	lockdestroy(vp->v_vnlock);
641 	mtx_destroy(&vp->v_interlock);
642 	bo = &vp->v_bufobj;
643 	rw_destroy(BO_LOCKPTR(bo));
644 
645 	kasan_mark(mem, size, size, 0);
646 }
647 
648 /*
649  * Provide the size of NFS nclnode and NFS fh for calculation of the
650  * vnode memory consumption.  The size is specified directly to
651  * eliminate dependency on NFS-private header.
652  *
653  * Other filesystems may use bigger or smaller (like UFS and ZFS)
654  * private inode data, but the NFS-based estimation is ample enough.
655  * Still, we care about differences in the size between 64- and 32-bit
656  * platforms.
657  *
658  * Namecache structure size is heuristically
659  * sizeof(struct namecache_ts) + CACHE_PATH_CUTOFF + 1.
660  */
661 #ifdef _LP64
662 #define	NFS_NCLNODE_SZ	(528 + 64)
663 #define	NC_SZ		148
664 #else
665 #define	NFS_NCLNODE_SZ	(360 + 32)
666 #define	NC_SZ		92
667 #endif
668 
669 static void
670 vntblinit(void *dummy __unused)
671 {
672 	struct vdbatch *vd;
673 	uma_ctor ctor;
674 	uma_dtor dtor;
675 	int cpu, physvnodes, virtvnodes;
676 	u_int i;
677 
678 	/*
679 	 * Desiredvnodes is a function of the physical memory size and the
680 	 * kernel's heap size.  Generally speaking, it scales with the
681 	 * physical memory size.  The ratio of desiredvnodes to the physical
682 	 * memory size is 1:16 until desiredvnodes exceeds 98,304.
683 	 * Thereafter, the
684 	 * marginal ratio of desiredvnodes to the physical memory size is
685 	 * 1:64.  However, desiredvnodes is limited by the kernel's heap
686 	 * size.  The memory required by desiredvnodes vnodes and vm objects
687 	 * must not exceed 1/10th of the kernel's heap size.
688 	 */
689 	physvnodes = maxproc + pgtok(vm_cnt.v_page_count) / 64 +
690 	    3 * min(98304 * 16, pgtok(vm_cnt.v_page_count)) / 64;
691 	virtvnodes = vm_kmem_size / (10 * (sizeof(struct vm_object) +
692 	    sizeof(struct vnode) + NC_SZ * ncsizefactor + NFS_NCLNODE_SZ));
693 	desiredvnodes = min(physvnodes, virtvnodes);
694 	if (desiredvnodes > MAXVNODES_MAX) {
695 		if (bootverbose)
696 			printf("Reducing kern.maxvnodes %lu -> %lu\n",
697 			    desiredvnodes, MAXVNODES_MAX);
698 		desiredvnodes = MAXVNODES_MAX;
699 	}
700 	wantfreevnodes = desiredvnodes / 4;
701 	mtx_init(&mntid_mtx, "mntid", NULL, MTX_DEF);
702 	TAILQ_INIT(&vnode_list);
703 	mtx_init(&vnode_list_mtx, "vnode_list", NULL, MTX_DEF);
704 	/*
705 	 * The lock is taken to appease WITNESS.
706 	 */
707 	mtx_lock(&vnode_list_mtx);
708 	vnlru_recalc();
709 	mtx_unlock(&vnode_list_mtx);
710 	vnode_list_free_marker = vn_alloc_marker(NULL);
711 	TAILQ_INSERT_HEAD(&vnode_list, vnode_list_free_marker, v_vnodelist);
712 	vnode_list_reclaim_marker = vn_alloc_marker(NULL);
713 	TAILQ_INSERT_HEAD(&vnode_list, vnode_list_reclaim_marker, v_vnodelist);
714 
715 #ifdef KASAN
716 	ctor = vnode_ctor;
717 	dtor = vnode_dtor;
718 #else
719 	ctor = NULL;
720 	dtor = NULL;
721 #endif
722 	vnode_zone = uma_zcreate("VNODE", sizeof(struct vnode), ctor, dtor,
723 	    vnode_init, vnode_fini, UMA_ALIGN_PTR, UMA_ZONE_NOKASAN);
724 	uma_zone_set_smr(vnode_zone, vfs_smr);
725 
726 	/*
727 	 * Preallocate enough nodes to support one-per buf so that
728 	 * we can not fail an insert.  reassignbuf() callers can not
729 	 * tolerate the insertion failure.
730 	 */
731 	buf_trie_zone = uma_zcreate("BUF TRIE", pctrie_node_size(),
732 	    NULL, NULL, pctrie_zone_init, NULL, UMA_ALIGN_PTR,
733 	    UMA_ZONE_NOFREE | UMA_ZONE_SMR);
734 	buf_trie_smr = uma_zone_get_smr(buf_trie_zone);
735 	uma_prealloc(buf_trie_zone, nbuf);
736 
737 	vnodes_created = counter_u64_alloc(M_WAITOK);
738 	recycles_count = counter_u64_alloc(M_WAITOK);
739 	recycles_free_count = counter_u64_alloc(M_WAITOK);
740 	deferred_inact = counter_u64_alloc(M_WAITOK);
741 
742 	/*
743 	 * Initialize the filesystem syncer.
744 	 */
745 	syncer_workitem_pending = hashinit(syncer_maxdelay, M_VNODE,
746 	    &syncer_mask);
747 	syncer_maxdelay = syncer_mask + 1;
748 	mtx_init(&sync_mtx, "Syncer mtx", NULL, MTX_DEF);
749 	cv_init(&sync_wakeup, "syncer");
750 	for (i = 1; i <= sizeof(struct vnode); i <<= 1)
751 		vnsz2log++;
752 	vnsz2log--;
753 
754 	CPU_FOREACH(cpu) {
755 		vd = DPCPU_ID_PTR((cpu), vd);
756 		bzero(vd, sizeof(*vd));
757 		mtx_init(&vd->lock, "vdbatch", NULL, MTX_DEF);
758 	}
759 }
760 SYSINIT(vfs, SI_SUB_VFS, SI_ORDER_FIRST, vntblinit, NULL);
761 
762 /*
763  * Mark a mount point as busy. Used to synchronize access and to delay
764  * unmounting. Eventually, mountlist_mtx is not released on failure.
765  *
766  * vfs_busy() is a custom lock, it can block the caller.
767  * vfs_busy() only sleeps if the unmount is active on the mount point.
768  * For a mountpoint mp, vfs_busy-enforced lock is before lock of any
769  * vnode belonging to mp.
770  *
771  * Lookup uses vfs_busy() to traverse mount points.
772  * root fs			var fs
773  * / vnode lock		A	/ vnode lock (/var)		D
774  * /var vnode lock	B	/log vnode lock(/var/log)	E
775  * vfs_busy lock	C	vfs_busy lock			F
776  *
777  * Within each file system, the lock order is C->A->B and F->D->E.
778  *
779  * When traversing across mounts, the system follows that lock order:
780  *
781  *        C->A->B
782  *              |
783  *              +->F->D->E
784  *
785  * The lookup() process for namei("/var") illustrates the process:
786  *  VOP_LOOKUP() obtains B while A is held
787  *  vfs_busy() obtains a shared lock on F while A and B are held
788  *  vput() releases lock on B
789  *  vput() releases lock on A
790  *  VFS_ROOT() obtains lock on D while shared lock on F is held
791  *  vfs_unbusy() releases shared lock on F
792  *  vn_lock() obtains lock on deadfs vnode vp_crossmp instead of A.
793  *    Attempt to lock A (instead of vp_crossmp) while D is held would
794  *    violate the global order, causing deadlocks.
795  *
796  * dounmount() locks B while F is drained.
797  */
798 int
799 vfs_busy(struct mount *mp, int flags)
800 {
801 	struct mount_pcpu *mpcpu;
802 
803 	MPASS((flags & ~MBF_MASK) == 0);
804 	CTR3(KTR_VFS, "%s: mp %p with flags %d", __func__, mp, flags);
805 
806 	if (vfs_op_thread_enter(mp, mpcpu)) {
807 		MPASS((mp->mnt_kern_flag & MNTK_DRAINING) == 0);
808 		MPASS((mp->mnt_kern_flag & MNTK_UNMOUNT) == 0);
809 		MPASS((mp->mnt_kern_flag & MNTK_REFEXPIRE) == 0);
810 		vfs_mp_count_add_pcpu(mpcpu, ref, 1);
811 		vfs_mp_count_add_pcpu(mpcpu, lockref, 1);
812 		vfs_op_thread_exit(mp, mpcpu);
813 		if (flags & MBF_MNTLSTLOCK)
814 			mtx_unlock(&mountlist_mtx);
815 		return (0);
816 	}
817 
818 	MNT_ILOCK(mp);
819 	vfs_assert_mount_counters(mp);
820 	MNT_REF(mp);
821 	/*
822 	 * If mount point is currently being unmounted, sleep until the
823 	 * mount point fate is decided.  If thread doing the unmounting fails,
824 	 * it will clear MNTK_UNMOUNT flag before waking us up, indicating
825 	 * that this mount point has survived the unmount attempt and vfs_busy
826 	 * should retry.  Otherwise the unmounter thread will set MNTK_REFEXPIRE
827 	 * flag in addition to MNTK_UNMOUNT, indicating that mount point is
828 	 * about to be really destroyed.  vfs_busy needs to release its
829 	 * reference on the mount point in this case and return with ENOENT,
830 	 * telling the caller that mount mount it tried to busy is no longer
831 	 * valid.
832 	 */
833 	while (mp->mnt_kern_flag & MNTK_UNMOUNT) {
834 		if (flags & MBF_NOWAIT || mp->mnt_kern_flag & MNTK_REFEXPIRE) {
835 			MNT_REL(mp);
836 			MNT_IUNLOCK(mp);
837 			CTR1(KTR_VFS, "%s: failed busying before sleeping",
838 			    __func__);
839 			return (ENOENT);
840 		}
841 		if (flags & MBF_MNTLSTLOCK)
842 			mtx_unlock(&mountlist_mtx);
843 		mp->mnt_kern_flag |= MNTK_MWAIT;
844 		msleep(mp, MNT_MTX(mp), PVFS | PDROP, "vfs_busy", 0);
845 		if (flags & MBF_MNTLSTLOCK)
846 			mtx_lock(&mountlist_mtx);
847 		MNT_ILOCK(mp);
848 	}
849 	if (flags & MBF_MNTLSTLOCK)
850 		mtx_unlock(&mountlist_mtx);
851 	mp->mnt_lockref++;
852 	MNT_IUNLOCK(mp);
853 	return (0);
854 }
855 
856 /*
857  * Free a busy filesystem.
858  */
859 void
860 vfs_unbusy(struct mount *mp)
861 {
862 	struct mount_pcpu *mpcpu;
863 	int c;
864 
865 	CTR2(KTR_VFS, "%s: mp %p", __func__, mp);
866 
867 	if (vfs_op_thread_enter(mp, mpcpu)) {
868 		MPASS((mp->mnt_kern_flag & MNTK_DRAINING) == 0);
869 		vfs_mp_count_sub_pcpu(mpcpu, lockref, 1);
870 		vfs_mp_count_sub_pcpu(mpcpu, ref, 1);
871 		vfs_op_thread_exit(mp, mpcpu);
872 		return;
873 	}
874 
875 	MNT_ILOCK(mp);
876 	vfs_assert_mount_counters(mp);
877 	MNT_REL(mp);
878 	c = --mp->mnt_lockref;
879 	if (mp->mnt_vfs_ops == 0) {
880 		MPASS((mp->mnt_kern_flag & MNTK_DRAINING) == 0);
881 		MNT_IUNLOCK(mp);
882 		return;
883 	}
884 	if (c < 0)
885 		vfs_dump_mount_counters(mp);
886 	if (c == 0 && (mp->mnt_kern_flag & MNTK_DRAINING) != 0) {
887 		MPASS(mp->mnt_kern_flag & MNTK_UNMOUNT);
888 		CTR1(KTR_VFS, "%s: waking up waiters", __func__);
889 		mp->mnt_kern_flag &= ~MNTK_DRAINING;
890 		wakeup(&mp->mnt_lockref);
891 	}
892 	MNT_IUNLOCK(mp);
893 }
894 
895 /*
896  * Lookup a mount point by filesystem identifier.
897  */
898 struct mount *
899 vfs_getvfs(fsid_t *fsid)
900 {
901 	struct mount *mp;
902 
903 	CTR2(KTR_VFS, "%s: fsid %p", __func__, fsid);
904 	mtx_lock(&mountlist_mtx);
905 	TAILQ_FOREACH(mp, &mountlist, mnt_list) {
906 		if (fsidcmp(&mp->mnt_stat.f_fsid, fsid) == 0) {
907 			vfs_ref(mp);
908 			mtx_unlock(&mountlist_mtx);
909 			return (mp);
910 		}
911 	}
912 	mtx_unlock(&mountlist_mtx);
913 	CTR2(KTR_VFS, "%s: lookup failed for %p id", __func__, fsid);
914 	return ((struct mount *) 0);
915 }
916 
917 /*
918  * Lookup a mount point by filesystem identifier, busying it before
919  * returning.
920  *
921  * To avoid congestion on mountlist_mtx, implement simple direct-mapped
922  * cache for popular filesystem identifiers.  The cache is lockess, using
923  * the fact that struct mount's are never freed.  In worst case we may
924  * get pointer to unmounted or even different filesystem, so we have to
925  * check what we got, and go slow way if so.
926  */
927 struct mount *
928 vfs_busyfs(fsid_t *fsid)
929 {
930 #define	FSID_CACHE_SIZE	256
931 	typedef struct mount * volatile vmp_t;
932 	static vmp_t cache[FSID_CACHE_SIZE];
933 	struct mount *mp;
934 	int error;
935 	uint32_t hash;
936 
937 	CTR2(KTR_VFS, "%s: fsid %p", __func__, fsid);
938 	hash = fsid->val[0] ^ fsid->val[1];
939 	hash = (hash >> 16 ^ hash) & (FSID_CACHE_SIZE - 1);
940 	mp = cache[hash];
941 	if (mp == NULL || fsidcmp(&mp->mnt_stat.f_fsid, fsid) != 0)
942 		goto slow;
943 	if (vfs_busy(mp, 0) != 0) {
944 		cache[hash] = NULL;
945 		goto slow;
946 	}
947 	if (fsidcmp(&mp->mnt_stat.f_fsid, fsid) == 0)
948 		return (mp);
949 	else
950 	    vfs_unbusy(mp);
951 
952 slow:
953 	mtx_lock(&mountlist_mtx);
954 	TAILQ_FOREACH(mp, &mountlist, mnt_list) {
955 		if (fsidcmp(&mp->mnt_stat.f_fsid, fsid) == 0) {
956 			error = vfs_busy(mp, MBF_MNTLSTLOCK);
957 			if (error) {
958 				cache[hash] = NULL;
959 				mtx_unlock(&mountlist_mtx);
960 				return (NULL);
961 			}
962 			cache[hash] = mp;
963 			return (mp);
964 		}
965 	}
966 	CTR2(KTR_VFS, "%s: lookup failed for %p id", __func__, fsid);
967 	mtx_unlock(&mountlist_mtx);
968 	return ((struct mount *) 0);
969 }
970 
971 /*
972  * Check if a user can access privileged mount options.
973  */
974 int
975 vfs_suser(struct mount *mp, struct thread *td)
976 {
977 	int error;
978 
979 	if (jailed(td->td_ucred)) {
980 		/*
981 		 * If the jail of the calling thread lacks permission for
982 		 * this type of file system, deny immediately.
983 		 */
984 		if (!prison_allow(td->td_ucred, mp->mnt_vfc->vfc_prison_flag))
985 			return (EPERM);
986 
987 		/*
988 		 * If the file system was mounted outside the jail of the
989 		 * calling thread, deny immediately.
990 		 */
991 		if (prison_check(td->td_ucred, mp->mnt_cred) != 0)
992 			return (EPERM);
993 	}
994 
995 	/*
996 	 * If file system supports delegated administration, we don't check
997 	 * for the PRIV_VFS_MOUNT_OWNER privilege - it will be better verified
998 	 * by the file system itself.
999 	 * If this is not the user that did original mount, we check for
1000 	 * the PRIV_VFS_MOUNT_OWNER privilege.
1001 	 */
1002 	if (!(mp->mnt_vfc->vfc_flags & VFCF_DELEGADMIN) &&
1003 	    mp->mnt_cred->cr_uid != td->td_ucred->cr_uid) {
1004 		if ((error = priv_check(td, PRIV_VFS_MOUNT_OWNER)) != 0)
1005 			return (error);
1006 	}
1007 	return (0);
1008 }
1009 
1010 /*
1011  * Get a new unique fsid.  Try to make its val[0] unique, since this value
1012  * will be used to create fake device numbers for stat().  Also try (but
1013  * not so hard) make its val[0] unique mod 2^16, since some emulators only
1014  * support 16-bit device numbers.  We end up with unique val[0]'s for the
1015  * first 2^16 calls and unique val[0]'s mod 2^16 for the first 2^8 calls.
1016  *
1017  * Keep in mind that several mounts may be running in parallel.  Starting
1018  * the search one past where the previous search terminated is both a
1019  * micro-optimization and a defense against returning the same fsid to
1020  * different mounts.
1021  */
1022 void
1023 vfs_getnewfsid(struct mount *mp)
1024 {
1025 	static uint16_t mntid_base;
1026 	struct mount *nmp;
1027 	fsid_t tfsid;
1028 	int mtype;
1029 
1030 	CTR2(KTR_VFS, "%s: mp %p", __func__, mp);
1031 	mtx_lock(&mntid_mtx);
1032 	mtype = mp->mnt_vfc->vfc_typenum;
1033 	tfsid.val[1] = mtype;
1034 	mtype = (mtype & 0xFF) << 24;
1035 	for (;;) {
1036 		tfsid.val[0] = makedev(255,
1037 		    mtype | ((mntid_base & 0xFF00) << 8) | (mntid_base & 0xFF));
1038 		mntid_base++;
1039 		if ((nmp = vfs_getvfs(&tfsid)) == NULL)
1040 			break;
1041 		vfs_rel(nmp);
1042 	}
1043 	mp->mnt_stat.f_fsid.val[0] = tfsid.val[0];
1044 	mp->mnt_stat.f_fsid.val[1] = tfsid.val[1];
1045 	mtx_unlock(&mntid_mtx);
1046 }
1047 
1048 /*
1049  * Knob to control the precision of file timestamps:
1050  *
1051  *   0 = seconds only; nanoseconds zeroed.
1052  *   1 = seconds and nanoseconds, accurate within 1/HZ.
1053  *   2 = seconds and nanoseconds, truncated to microseconds.
1054  * >=3 = seconds and nanoseconds, maximum precision.
1055  */
1056 enum { TSP_SEC, TSP_HZ, TSP_USEC, TSP_NSEC };
1057 
1058 static int timestamp_precision = TSP_USEC;
1059 SYSCTL_INT(_vfs, OID_AUTO, timestamp_precision, CTLFLAG_RW,
1060     &timestamp_precision, 0, "File timestamp precision (0: seconds, "
1061     "1: sec + ns accurate to 1/HZ, 2: sec + ns truncated to us, "
1062     "3+: sec + ns (max. precision))");
1063 
1064 /*
1065  * Get a current timestamp.
1066  */
1067 void
1068 vfs_timestamp(struct timespec *tsp)
1069 {
1070 	struct timeval tv;
1071 
1072 	switch (timestamp_precision) {
1073 	case TSP_SEC:
1074 		tsp->tv_sec = time_second;
1075 		tsp->tv_nsec = 0;
1076 		break;
1077 	case TSP_HZ:
1078 		getnanotime(tsp);
1079 		break;
1080 	case TSP_USEC:
1081 		microtime(&tv);
1082 		TIMEVAL_TO_TIMESPEC(&tv, tsp);
1083 		break;
1084 	case TSP_NSEC:
1085 	default:
1086 		nanotime(tsp);
1087 		break;
1088 	}
1089 }
1090 
1091 /*
1092  * Set vnode attributes to VNOVAL
1093  */
1094 void
1095 vattr_null(struct vattr *vap)
1096 {
1097 
1098 	vap->va_type = VNON;
1099 	vap->va_size = VNOVAL;
1100 	vap->va_bytes = VNOVAL;
1101 	vap->va_mode = VNOVAL;
1102 	vap->va_nlink = VNOVAL;
1103 	vap->va_uid = VNOVAL;
1104 	vap->va_gid = VNOVAL;
1105 	vap->va_fsid = VNOVAL;
1106 	vap->va_fileid = VNOVAL;
1107 	vap->va_blocksize = VNOVAL;
1108 	vap->va_rdev = VNOVAL;
1109 	vap->va_atime.tv_sec = VNOVAL;
1110 	vap->va_atime.tv_nsec = VNOVAL;
1111 	vap->va_mtime.tv_sec = VNOVAL;
1112 	vap->va_mtime.tv_nsec = VNOVAL;
1113 	vap->va_ctime.tv_sec = VNOVAL;
1114 	vap->va_ctime.tv_nsec = VNOVAL;
1115 	vap->va_birthtime.tv_sec = VNOVAL;
1116 	vap->va_birthtime.tv_nsec = VNOVAL;
1117 	vap->va_flags = VNOVAL;
1118 	vap->va_gen = VNOVAL;
1119 	vap->va_vaflags = 0;
1120 }
1121 
1122 /*
1123  * Try to reduce the total number of vnodes.
1124  *
1125  * This routine (and its user) are buggy in at least the following ways:
1126  * - all parameters were picked years ago when RAM sizes were significantly
1127  *   smaller
1128  * - it can pick vnodes based on pages used by the vm object, but filesystems
1129  *   like ZFS don't use it making the pick broken
1130  * - since ZFS has its own aging policy it gets partially combated by this one
1131  * - a dedicated method should be provided for filesystems to let them decide
1132  *   whether the vnode should be recycled
1133  *
1134  * This routine is called when we have too many vnodes.  It attempts
1135  * to free <count> vnodes and will potentially free vnodes that still
1136  * have VM backing store (VM backing store is typically the cause
1137  * of a vnode blowout so we want to do this).  Therefore, this operation
1138  * is not considered cheap.
1139  *
1140  * A number of conditions may prevent a vnode from being reclaimed.
1141  * the buffer cache may have references on the vnode, a directory
1142  * vnode may still have references due to the namei cache representing
1143  * underlying files, or the vnode may be in active use.   It is not
1144  * desirable to reuse such vnodes.  These conditions may cause the
1145  * number of vnodes to reach some minimum value regardless of what
1146  * you set kern.maxvnodes to.  Do not set kern.maxvnodes too low.
1147  *
1148  * @param reclaim_nc_src Only reclaim directories with outgoing namecache
1149  * 			 entries if this argument is strue
1150  * @param trigger	 Only reclaim vnodes with fewer than this many resident
1151  *			 pages.
1152  * @param target	 How many vnodes to reclaim.
1153  * @return		 The number of vnodes that were reclaimed.
1154  */
1155 static int
1156 vlrureclaim(bool reclaim_nc_src, int trigger, u_long target)
1157 {
1158 	struct vnode *vp, *mvp;
1159 	struct mount *mp;
1160 	struct vm_object *object;
1161 	u_long done;
1162 	bool retried;
1163 
1164 	mtx_assert(&vnode_list_mtx, MA_OWNED);
1165 
1166 	retried = false;
1167 	done = 0;
1168 
1169 	mvp = vnode_list_reclaim_marker;
1170 restart:
1171 	vp = mvp;
1172 	while (done < target) {
1173 		vp = TAILQ_NEXT(vp, v_vnodelist);
1174 		if (__predict_false(vp == NULL))
1175 			break;
1176 
1177 		if (__predict_false(vp->v_type == VMARKER))
1178 			continue;
1179 
1180 		/*
1181 		 * If it's been deconstructed already, it's still
1182 		 * referenced, or it exceeds the trigger, skip it.
1183 		 * Also skip free vnodes.  We are trying to make space
1184 		 * to expand the free list, not reduce it.
1185 		 */
1186 		if (vp->v_usecount > 0 || vp->v_holdcnt == 0 ||
1187 		    (!reclaim_nc_src && !LIST_EMPTY(&vp->v_cache_src)))
1188 			goto next_iter;
1189 
1190 		if (vp->v_type == VBAD || vp->v_type == VNON)
1191 			goto next_iter;
1192 
1193 		object = atomic_load_ptr(&vp->v_object);
1194 		if (object == NULL || object->resident_page_count > trigger) {
1195 			goto next_iter;
1196 		}
1197 
1198 		/*
1199 		 * Handle races against vnode allocation. Filesystems lock the
1200 		 * vnode some time after it gets returned from getnewvnode,
1201 		 * despite type and hold count being manipulated earlier.
1202 		 * Resorting to checking v_mount restores guarantees present
1203 		 * before the global list was reworked to contain all vnodes.
1204 		 */
1205 		if (!VI_TRYLOCK(vp))
1206 			goto next_iter;
1207 		if (__predict_false(vp->v_type == VBAD || vp->v_type == VNON)) {
1208 			VI_UNLOCK(vp);
1209 			goto next_iter;
1210 		}
1211 		if (vp->v_mount == NULL) {
1212 			VI_UNLOCK(vp);
1213 			goto next_iter;
1214 		}
1215 		vholdl(vp);
1216 		VI_UNLOCK(vp);
1217 		TAILQ_REMOVE(&vnode_list, mvp, v_vnodelist);
1218 		TAILQ_INSERT_AFTER(&vnode_list, vp, mvp, v_vnodelist);
1219 		mtx_unlock(&vnode_list_mtx);
1220 
1221 		if (vn_start_write(vp, &mp, V_NOWAIT) != 0) {
1222 			vdrop(vp);
1223 			goto next_iter_unlocked;
1224 		}
1225 		if (VOP_LOCK(vp, LK_EXCLUSIVE|LK_NOWAIT) != 0) {
1226 			vdrop(vp);
1227 			vn_finished_write(mp);
1228 			goto next_iter_unlocked;
1229 		}
1230 
1231 		VI_LOCK(vp);
1232 		if (vp->v_usecount > 0 ||
1233 		    (!reclaim_nc_src && !LIST_EMPTY(&vp->v_cache_src)) ||
1234 		    (vp->v_object != NULL && vp->v_object->handle == vp &&
1235 		    vp->v_object->resident_page_count > trigger)) {
1236 			VOP_UNLOCK(vp);
1237 			vdropl(vp);
1238 			vn_finished_write(mp);
1239 			goto next_iter_unlocked;
1240 		}
1241 		counter_u64_add(recycles_count, 1);
1242 		vgonel(vp);
1243 		VOP_UNLOCK(vp);
1244 		vdropl(vp);
1245 		vn_finished_write(mp);
1246 		done++;
1247 next_iter_unlocked:
1248 		if (should_yield())
1249 			kern_yield(PRI_USER);
1250 		mtx_lock(&vnode_list_mtx);
1251 		goto restart;
1252 next_iter:
1253 		MPASS(vp->v_type != VMARKER);
1254 		if (!should_yield())
1255 			continue;
1256 		TAILQ_REMOVE(&vnode_list, mvp, v_vnodelist);
1257 		TAILQ_INSERT_AFTER(&vnode_list, vp, mvp, v_vnodelist);
1258 		mtx_unlock(&vnode_list_mtx);
1259 		kern_yield(PRI_USER);
1260 		mtx_lock(&vnode_list_mtx);
1261 		goto restart;
1262 	}
1263 	if (done == 0 && !retried) {
1264 		TAILQ_REMOVE(&vnode_list, mvp, v_vnodelist);
1265 		TAILQ_INSERT_HEAD(&vnode_list, mvp, v_vnodelist);
1266 		retried = true;
1267 		goto restart;
1268 	}
1269 	return (done);
1270 }
1271 
1272 static int max_vnlru_free = 10000; /* limit on vnode free requests per call */
1273 SYSCTL_INT(_debug, OID_AUTO, max_vnlru_free, CTLFLAG_RW, &max_vnlru_free,
1274     0,
1275     "limit on vnode free requests per call to the vnlru_free routine");
1276 
1277 /*
1278  * Attempt to reduce the free list by the requested amount.
1279  */
1280 static int
1281 vnlru_free_impl(int count, struct vfsops *mnt_op, struct vnode *mvp)
1282 {
1283 	struct vnode *vp;
1284 	struct mount *mp;
1285 	int ocount;
1286 
1287 	mtx_assert(&vnode_list_mtx, MA_OWNED);
1288 	if (count > max_vnlru_free)
1289 		count = max_vnlru_free;
1290 	ocount = count;
1291 	vp = mvp;
1292 	for (;;) {
1293 		if (count == 0) {
1294 			break;
1295 		}
1296 		vp = TAILQ_NEXT(vp, v_vnodelist);
1297 		if (__predict_false(vp == NULL)) {
1298 			TAILQ_REMOVE(&vnode_list, mvp, v_vnodelist);
1299 			TAILQ_INSERT_TAIL(&vnode_list, mvp, v_vnodelist);
1300 			break;
1301 		}
1302 		if (__predict_false(vp->v_type == VMARKER))
1303 			continue;
1304 		if (vp->v_holdcnt > 0)
1305 			continue;
1306 		/*
1307 		 * Don't recycle if our vnode is from different type
1308 		 * of mount point.  Note that mp is type-safe, the
1309 		 * check does not reach unmapped address even if
1310 		 * vnode is reclaimed.
1311 		 */
1312 		if (mnt_op != NULL && (mp = vp->v_mount) != NULL &&
1313 		    mp->mnt_op != mnt_op) {
1314 			continue;
1315 		}
1316 		if (__predict_false(vp->v_type == VBAD || vp->v_type == VNON)) {
1317 			continue;
1318 		}
1319 		if (!vhold_recycle_free(vp))
1320 			continue;
1321 		TAILQ_REMOVE(&vnode_list, mvp, v_vnodelist);
1322 		TAILQ_INSERT_AFTER(&vnode_list, vp, mvp, v_vnodelist);
1323 		mtx_unlock(&vnode_list_mtx);
1324 		if (vtryrecycle(vp) == 0)
1325 			count--;
1326 		mtx_lock(&vnode_list_mtx);
1327 		vp = mvp;
1328 	}
1329 	return (ocount - count);
1330 }
1331 
1332 static int
1333 vnlru_free_locked(int count)
1334 {
1335 
1336 	mtx_assert(&vnode_list_mtx, MA_OWNED);
1337 	return (vnlru_free_impl(count, NULL, vnode_list_free_marker));
1338 }
1339 
1340 void
1341 vnlru_free_vfsops(int count, struct vfsops *mnt_op, struct vnode *mvp)
1342 {
1343 
1344 	MPASS(mnt_op != NULL);
1345 	MPASS(mvp != NULL);
1346 	VNPASS(mvp->v_type == VMARKER, mvp);
1347 	mtx_lock(&vnode_list_mtx);
1348 	vnlru_free_impl(count, mnt_op, mvp);
1349 	mtx_unlock(&vnode_list_mtx);
1350 }
1351 
1352 /*
1353  * Temporary binary compat, don't use. Call vnlru_free_vfsops instead.
1354  */
1355 void
1356 vnlru_free(int count, struct vfsops *mnt_op)
1357 {
1358 	struct vnode *mvp;
1359 
1360 	if (count == 0)
1361 		return;
1362 	mtx_lock(&vnode_list_mtx);
1363 	mvp = vnode_list_free_marker;
1364 	if (vnlru_free_impl(count, mnt_op, mvp) == 0) {
1365 		/*
1366 		 * It is possible the marker was moved over eligible vnodes by
1367 		 * callers which filtered by different ops. If so, start from
1368 		 * scratch.
1369 		 */
1370 		if (vnlru_read_freevnodes() > 0) {
1371 			TAILQ_REMOVE(&vnode_list, mvp, v_vnodelist);
1372 			TAILQ_INSERT_HEAD(&vnode_list, mvp, v_vnodelist);
1373 		}
1374 		vnlru_free_impl(count, mnt_op, mvp);
1375 	}
1376 	mtx_unlock(&vnode_list_mtx);
1377 }
1378 
1379 struct vnode *
1380 vnlru_alloc_marker(void)
1381 {
1382 	struct vnode *mvp;
1383 
1384 	mvp = vn_alloc_marker(NULL);
1385 	mtx_lock(&vnode_list_mtx);
1386 	TAILQ_INSERT_BEFORE(vnode_list_free_marker, mvp, v_vnodelist);
1387 	mtx_unlock(&vnode_list_mtx);
1388 	return (mvp);
1389 }
1390 
1391 void
1392 vnlru_free_marker(struct vnode *mvp)
1393 {
1394 	mtx_lock(&vnode_list_mtx);
1395 	TAILQ_REMOVE(&vnode_list, mvp, v_vnodelist);
1396 	mtx_unlock(&vnode_list_mtx);
1397 	vn_free_marker(mvp);
1398 }
1399 
1400 static void
1401 vnlru_recalc(void)
1402 {
1403 
1404 	mtx_assert(&vnode_list_mtx, MA_OWNED);
1405 	gapvnodes = imax(desiredvnodes - wantfreevnodes, 100);
1406 	vhiwat = gapvnodes / 11; /* 9% -- just under the 10% in vlrureclaim() */
1407 	vlowat = vhiwat / 2;
1408 }
1409 
1410 /*
1411  * Attempt to recycle vnodes in a context that is always safe to block.
1412  * Calling vlrurecycle() from the bowels of filesystem code has some
1413  * interesting deadlock problems.
1414  */
1415 static struct proc *vnlruproc;
1416 static int vnlruproc_sig;
1417 
1418 /*
1419  * The main freevnodes counter is only updated when threads requeue their vnode
1420  * batches. CPUs are conditionally walked to compute a more accurate total.
1421  *
1422  * Limit how much of a slop are we willing to tolerate. Note: the actual value
1423  * at any given moment can still exceed slop, but it should not be by significant
1424  * margin in practice.
1425  */
1426 #define VNLRU_FREEVNODES_SLOP 128
1427 
1428 static __inline void
1429 vn_freevnodes_inc(void)
1430 {
1431 	struct vdbatch *vd;
1432 
1433 	critical_enter();
1434 	vd = DPCPU_PTR(vd);
1435 	vd->freevnodes++;
1436 	critical_exit();
1437 }
1438 
1439 static __inline void
1440 vn_freevnodes_dec(void)
1441 {
1442 	struct vdbatch *vd;
1443 
1444 	critical_enter();
1445 	vd = DPCPU_PTR(vd);
1446 	vd->freevnodes--;
1447 	critical_exit();
1448 }
1449 
1450 static u_long
1451 vnlru_read_freevnodes(void)
1452 {
1453 	struct vdbatch *vd;
1454 	long slop;
1455 	int cpu;
1456 
1457 	mtx_assert(&vnode_list_mtx, MA_OWNED);
1458 	if (freevnodes > freevnodes_old)
1459 		slop = freevnodes - freevnodes_old;
1460 	else
1461 		slop = freevnodes_old - freevnodes;
1462 	if (slop < VNLRU_FREEVNODES_SLOP)
1463 		return (freevnodes >= 0 ? freevnodes : 0);
1464 	freevnodes_old = freevnodes;
1465 	CPU_FOREACH(cpu) {
1466 		vd = DPCPU_ID_PTR((cpu), vd);
1467 		freevnodes_old += vd->freevnodes;
1468 	}
1469 	return (freevnodes_old >= 0 ? freevnodes_old : 0);
1470 }
1471 
1472 static bool
1473 vnlru_under(u_long rnumvnodes, u_long limit)
1474 {
1475 	u_long rfreevnodes, space;
1476 
1477 	if (__predict_false(rnumvnodes > desiredvnodes))
1478 		return (true);
1479 
1480 	space = desiredvnodes - rnumvnodes;
1481 	if (space < limit) {
1482 		rfreevnodes = vnlru_read_freevnodes();
1483 		if (rfreevnodes > wantfreevnodes)
1484 			space += rfreevnodes - wantfreevnodes;
1485 	}
1486 	return (space < limit);
1487 }
1488 
1489 static bool
1490 vnlru_under_unlocked(u_long rnumvnodes, u_long limit)
1491 {
1492 	long rfreevnodes, space;
1493 
1494 	if (__predict_false(rnumvnodes > desiredvnodes))
1495 		return (true);
1496 
1497 	space = desiredvnodes - rnumvnodes;
1498 	if (space < limit) {
1499 		rfreevnodes = atomic_load_long(&freevnodes);
1500 		if (rfreevnodes > wantfreevnodes)
1501 			space += rfreevnodes - wantfreevnodes;
1502 	}
1503 	return (space < limit);
1504 }
1505 
1506 static void
1507 vnlru_kick(void)
1508 {
1509 
1510 	mtx_assert(&vnode_list_mtx, MA_OWNED);
1511 	if (vnlruproc_sig == 0) {
1512 		vnlruproc_sig = 1;
1513 		wakeup(vnlruproc);
1514 	}
1515 }
1516 
1517 static void
1518 vnlru_proc(void)
1519 {
1520 	u_long rnumvnodes, rfreevnodes, target;
1521 	unsigned long onumvnodes;
1522 	int done, force, trigger, usevnodes;
1523 	bool reclaim_nc_src, want_reread;
1524 
1525 	EVENTHANDLER_REGISTER(shutdown_pre_sync, kproc_shutdown, vnlruproc,
1526 	    SHUTDOWN_PRI_FIRST);
1527 
1528 	force = 0;
1529 	want_reread = false;
1530 	for (;;) {
1531 		kproc_suspend_check(vnlruproc);
1532 		mtx_lock(&vnode_list_mtx);
1533 		rnumvnodes = atomic_load_long(&numvnodes);
1534 
1535 		if (want_reread) {
1536 			force = vnlru_under(numvnodes, vhiwat) ? 1 : 0;
1537 			want_reread = false;
1538 		}
1539 
1540 		/*
1541 		 * If numvnodes is too large (due to desiredvnodes being
1542 		 * adjusted using its sysctl, or emergency growth), first
1543 		 * try to reduce it by discarding from the free list.
1544 		 */
1545 		if (rnumvnodes > desiredvnodes) {
1546 			vnlru_free_locked(rnumvnodes - desiredvnodes);
1547 			rnumvnodes = atomic_load_long(&numvnodes);
1548 		}
1549 		/*
1550 		 * Sleep if the vnode cache is in a good state.  This is
1551 		 * when it is not over-full and has space for about a 4%
1552 		 * or 9% expansion (by growing its size or inexcessively
1553 		 * reducing its free list).  Otherwise, try to reclaim
1554 		 * space for a 10% expansion.
1555 		 */
1556 		if (vstir && force == 0) {
1557 			force = 1;
1558 			vstir = 0;
1559 		}
1560 		if (force == 0 && !vnlru_under(rnumvnodes, vlowat)) {
1561 			vnlruproc_sig = 0;
1562 			wakeup(&vnlruproc_sig);
1563 			msleep(vnlruproc, &vnode_list_mtx,
1564 			    PVFS|PDROP, "vlruwt", hz);
1565 			continue;
1566 		}
1567 		rfreevnodes = vnlru_read_freevnodes();
1568 
1569 		onumvnodes = rnumvnodes;
1570 		/*
1571 		 * Calculate parameters for recycling.  These are the same
1572 		 * throughout the loop to give some semblance of fairness.
1573 		 * The trigger point is to avoid recycling vnodes with lots
1574 		 * of resident pages.  We aren't trying to free memory; we
1575 		 * are trying to recycle or at least free vnodes.
1576 		 */
1577 		if (rnumvnodes <= desiredvnodes)
1578 			usevnodes = rnumvnodes - rfreevnodes;
1579 		else
1580 			usevnodes = rnumvnodes;
1581 		if (usevnodes <= 0)
1582 			usevnodes = 1;
1583 		/*
1584 		 * The trigger value is is chosen to give a conservatively
1585 		 * large value to ensure that it alone doesn't prevent
1586 		 * making progress.  The value can easily be so large that
1587 		 * it is effectively infinite in some congested and
1588 		 * misconfigured cases, and this is necessary.  Normally
1589 		 * it is about 8 to 100 (pages), which is quite large.
1590 		 */
1591 		trigger = vm_cnt.v_page_count * 2 / usevnodes;
1592 		if (force < 2)
1593 			trigger = vsmalltrigger;
1594 		reclaim_nc_src = force >= 3;
1595 		target = rnumvnodes * (int64_t)gapvnodes / imax(desiredvnodes, 1);
1596 		target = target / 10 + 1;
1597 		done = vlrureclaim(reclaim_nc_src, trigger, target);
1598 		mtx_unlock(&vnode_list_mtx);
1599 		if (onumvnodes > desiredvnodes && numvnodes <= desiredvnodes)
1600 			uma_reclaim(UMA_RECLAIM_DRAIN);
1601 		if (done == 0) {
1602 			if (force == 0 || force == 1) {
1603 				force = 2;
1604 				continue;
1605 			}
1606 			if (force == 2) {
1607 				force = 3;
1608 				continue;
1609 			}
1610 			want_reread = true;
1611 			force = 0;
1612 			vnlru_nowhere++;
1613 			tsleep(vnlruproc, PPAUSE, "vlrup", hz * 3);
1614 		} else {
1615 			want_reread = true;
1616 			kern_yield(PRI_USER);
1617 		}
1618 	}
1619 }
1620 
1621 static struct kproc_desc vnlru_kp = {
1622 	"vnlru",
1623 	vnlru_proc,
1624 	&vnlruproc
1625 };
1626 SYSINIT(vnlru, SI_SUB_KTHREAD_UPDATE, SI_ORDER_FIRST, kproc_start,
1627     &vnlru_kp);
1628 
1629 /*
1630  * Routines having to do with the management of the vnode table.
1631  */
1632 
1633 /*
1634  * Try to recycle a freed vnode.  We abort if anyone picks up a reference
1635  * before we actually vgone().  This function must be called with the vnode
1636  * held to prevent the vnode from being returned to the free list midway
1637  * through vgone().
1638  */
1639 static int
1640 vtryrecycle(struct vnode *vp)
1641 {
1642 	struct mount *vnmp;
1643 
1644 	CTR2(KTR_VFS, "%s: vp %p", __func__, vp);
1645 	VNASSERT(vp->v_holdcnt, vp,
1646 	    ("vtryrecycle: Recycling vp %p without a reference.", vp));
1647 	/*
1648 	 * This vnode may found and locked via some other list, if so we
1649 	 * can't recycle it yet.
1650 	 */
1651 	if (VOP_LOCK(vp, LK_EXCLUSIVE | LK_NOWAIT) != 0) {
1652 		CTR2(KTR_VFS,
1653 		    "%s: impossible to recycle, vp %p lock is already held",
1654 		    __func__, vp);
1655 		vdrop(vp);
1656 		return (EWOULDBLOCK);
1657 	}
1658 	/*
1659 	 * Don't recycle if its filesystem is being suspended.
1660 	 */
1661 	if (vn_start_write(vp, &vnmp, V_NOWAIT) != 0) {
1662 		VOP_UNLOCK(vp);
1663 		CTR2(KTR_VFS,
1664 		    "%s: impossible to recycle, cannot start the write for %p",
1665 		    __func__, vp);
1666 		vdrop(vp);
1667 		return (EBUSY);
1668 	}
1669 	/*
1670 	 * If we got this far, we need to acquire the interlock and see if
1671 	 * anyone picked up this vnode from another list.  If not, we will
1672 	 * mark it with DOOMED via vgonel() so that anyone who does find it
1673 	 * will skip over it.
1674 	 */
1675 	VI_LOCK(vp);
1676 	if (vp->v_usecount) {
1677 		VOP_UNLOCK(vp);
1678 		vdropl(vp);
1679 		vn_finished_write(vnmp);
1680 		CTR2(KTR_VFS,
1681 		    "%s: impossible to recycle, %p is already referenced",
1682 		    __func__, vp);
1683 		return (EBUSY);
1684 	}
1685 	if (!VN_IS_DOOMED(vp)) {
1686 		counter_u64_add(recycles_free_count, 1);
1687 		vgonel(vp);
1688 	}
1689 	VOP_UNLOCK(vp);
1690 	vdropl(vp);
1691 	vn_finished_write(vnmp);
1692 	return (0);
1693 }
1694 
1695 /*
1696  * Allocate a new vnode.
1697  *
1698  * The operation never returns an error. Returning an error was disabled
1699  * in r145385 (dated 2005) with the following comment:
1700  *
1701  * XXX Not all VFS_VGET/ffs_vget callers check returns.
1702  *
1703  * Given the age of this commit (almost 15 years at the time of writing this
1704  * comment) restoring the ability to fail requires a significant audit of
1705  * all codepaths.
1706  *
1707  * The routine can try to free a vnode or stall for up to 1 second waiting for
1708  * vnlru to clear things up, but ultimately always performs a M_WAITOK allocation.
1709  */
1710 static u_long vn_alloc_cyclecount;
1711 
1712 static struct vnode * __noinline
1713 vn_alloc_hard(struct mount *mp)
1714 {
1715 	u_long rnumvnodes, rfreevnodes;
1716 
1717 	mtx_lock(&vnode_list_mtx);
1718 	rnumvnodes = atomic_load_long(&numvnodes);
1719 	if (rnumvnodes + 1 < desiredvnodes) {
1720 		vn_alloc_cyclecount = 0;
1721 		goto alloc;
1722 	}
1723 	rfreevnodes = vnlru_read_freevnodes();
1724 	if (vn_alloc_cyclecount++ >= rfreevnodes) {
1725 		vn_alloc_cyclecount = 0;
1726 		vstir = 1;
1727 	}
1728 	/*
1729 	 * Grow the vnode cache if it will not be above its target max
1730 	 * after growing.  Otherwise, if the free list is nonempty, try
1731 	 * to reclaim 1 item from it before growing the cache (possibly
1732 	 * above its target max if the reclamation failed or is delayed).
1733 	 * Otherwise, wait for some space.  In all cases, schedule
1734 	 * vnlru_proc() if we are getting short of space.  The watermarks
1735 	 * should be chosen so that we never wait or even reclaim from
1736 	 * the free list to below its target minimum.
1737 	 */
1738 	if (vnlru_free_locked(1) > 0)
1739 		goto alloc;
1740 	if (mp == NULL || (mp->mnt_kern_flag & MNTK_SUSPEND) == 0) {
1741 		/*
1742 		 * Wait for space for a new vnode.
1743 		 */
1744 		vnlru_kick();
1745 		msleep(&vnlruproc_sig, &vnode_list_mtx, PVFS, "vlruwk", hz);
1746 		if (atomic_load_long(&numvnodes) + 1 > desiredvnodes &&
1747 		    vnlru_read_freevnodes() > 1)
1748 			vnlru_free_locked(1);
1749 	}
1750 alloc:
1751 	rnumvnodes = atomic_fetchadd_long(&numvnodes, 1) + 1;
1752 	if (vnlru_under(rnumvnodes, vlowat))
1753 		vnlru_kick();
1754 	mtx_unlock(&vnode_list_mtx);
1755 	return (uma_zalloc_smr(vnode_zone, M_WAITOK));
1756 }
1757 
1758 static struct vnode *
1759 vn_alloc(struct mount *mp)
1760 {
1761 	u_long rnumvnodes;
1762 
1763 	if (__predict_false(vn_alloc_cyclecount != 0))
1764 		return (vn_alloc_hard(mp));
1765 	rnumvnodes = atomic_fetchadd_long(&numvnodes, 1) + 1;
1766 	if (__predict_false(vnlru_under_unlocked(rnumvnodes, vlowat))) {
1767 		atomic_subtract_long(&numvnodes, 1);
1768 		return (vn_alloc_hard(mp));
1769 	}
1770 
1771 	return (uma_zalloc_smr(vnode_zone, M_WAITOK));
1772 }
1773 
1774 static void
1775 vn_free(struct vnode *vp)
1776 {
1777 
1778 	atomic_subtract_long(&numvnodes, 1);
1779 	uma_zfree_smr(vnode_zone, vp);
1780 }
1781 
1782 /*
1783  * Return the next vnode from the free list.
1784  */
1785 int
1786 getnewvnode(const char *tag, struct mount *mp, struct vop_vector *vops,
1787     struct vnode **vpp)
1788 {
1789 	struct vnode *vp;
1790 	struct thread *td;
1791 	struct lock_object *lo;
1792 
1793 	CTR3(KTR_VFS, "%s: mp %p with tag %s", __func__, mp, tag);
1794 
1795 	KASSERT(vops->registered,
1796 	    ("%s: not registered vector op %p\n", __func__, vops));
1797 
1798 	td = curthread;
1799 	if (td->td_vp_reserved != NULL) {
1800 		vp = td->td_vp_reserved;
1801 		td->td_vp_reserved = NULL;
1802 	} else {
1803 		vp = vn_alloc(mp);
1804 	}
1805 	counter_u64_add(vnodes_created, 1);
1806 	/*
1807 	 * Locks are given the generic name "vnode" when created.
1808 	 * Follow the historic practice of using the filesystem
1809 	 * name when they allocated, e.g., "zfs", "ufs", "nfs, etc.
1810 	 *
1811 	 * Locks live in a witness group keyed on their name. Thus,
1812 	 * when a lock is renamed, it must also move from the witness
1813 	 * group of its old name to the witness group of its new name.
1814 	 *
1815 	 * The change only needs to be made when the vnode moves
1816 	 * from one filesystem type to another. We ensure that each
1817 	 * filesystem use a single static name pointer for its tag so
1818 	 * that we can compare pointers rather than doing a strcmp().
1819 	 */
1820 	lo = &vp->v_vnlock->lock_object;
1821 #ifdef WITNESS
1822 	if (lo->lo_name != tag) {
1823 #endif
1824 		lo->lo_name = tag;
1825 #ifdef WITNESS
1826 		WITNESS_DESTROY(lo);
1827 		WITNESS_INIT(lo, tag);
1828 	}
1829 #endif
1830 	/*
1831 	 * By default, don't allow shared locks unless filesystems opt-in.
1832 	 */
1833 	vp->v_vnlock->lock_object.lo_flags |= LK_NOSHARE;
1834 	/*
1835 	 * Finalize various vnode identity bits.
1836 	 */
1837 	KASSERT(vp->v_object == NULL, ("stale v_object %p", vp));
1838 	KASSERT(vp->v_lockf == NULL, ("stale v_lockf %p", vp));
1839 	KASSERT(vp->v_pollinfo == NULL, ("stale v_pollinfo %p", vp));
1840 	vp->v_type = VNON;
1841 	vp->v_op = vops;
1842 	vp->v_irflag = 0;
1843 	v_init_counters(vp);
1844 	vn_seqc_init(vp);
1845 	vp->v_bufobj.bo_ops = &buf_ops_bio;
1846 #ifdef DIAGNOSTIC
1847 	if (mp == NULL && vops != &dead_vnodeops)
1848 		printf("NULL mp in getnewvnode(9), tag %s\n", tag);
1849 #endif
1850 #ifdef MAC
1851 	mac_vnode_init(vp);
1852 	if (mp != NULL && (mp->mnt_flag & MNT_MULTILABEL) == 0)
1853 		mac_vnode_associate_singlelabel(mp, vp);
1854 #endif
1855 	if (mp != NULL) {
1856 		vp->v_bufobj.bo_bsize = mp->mnt_stat.f_iosize;
1857 		if ((mp->mnt_kern_flag & MNTK_NOKNOTE) != 0)
1858 			vp->v_vflag |= VV_NOKNOTE;
1859 	}
1860 
1861 	/*
1862 	 * For the filesystems which do not use vfs_hash_insert(),
1863 	 * still initialize v_hash to have vfs_hash_index() useful.
1864 	 * E.g., nullfs uses vfs_hash_index() on the lower vnode for
1865 	 * its own hashing.
1866 	 */
1867 	vp->v_hash = (uintptr_t)vp >> vnsz2log;
1868 
1869 	*vpp = vp;
1870 	return (0);
1871 }
1872 
1873 void
1874 getnewvnode_reserve(void)
1875 {
1876 	struct thread *td;
1877 
1878 	td = curthread;
1879 	MPASS(td->td_vp_reserved == NULL);
1880 	td->td_vp_reserved = vn_alloc(NULL);
1881 }
1882 
1883 void
1884 getnewvnode_drop_reserve(void)
1885 {
1886 	struct thread *td;
1887 
1888 	td = curthread;
1889 	if (td->td_vp_reserved != NULL) {
1890 		vn_free(td->td_vp_reserved);
1891 		td->td_vp_reserved = NULL;
1892 	}
1893 }
1894 
1895 static void __noinline
1896 freevnode(struct vnode *vp)
1897 {
1898 	struct bufobj *bo;
1899 
1900 	/*
1901 	 * The vnode has been marked for destruction, so free it.
1902 	 *
1903 	 * The vnode will be returned to the zone where it will
1904 	 * normally remain until it is needed for another vnode. We
1905 	 * need to cleanup (or verify that the cleanup has already
1906 	 * been done) any residual data left from its current use
1907 	 * so as not to contaminate the freshly allocated vnode.
1908 	 */
1909 	CTR2(KTR_VFS, "%s: destroying the vnode %p", __func__, vp);
1910 	/*
1911 	 * Paired with vgone.
1912 	 */
1913 	vn_seqc_write_end_free(vp);
1914 
1915 	bo = &vp->v_bufobj;
1916 	VNASSERT(vp->v_data == NULL, vp, ("cleaned vnode isn't"));
1917 	VNPASS(vp->v_holdcnt == VHOLD_NO_SMR, vp);
1918 	VNASSERT(vp->v_usecount == 0, vp, ("Non-zero use count"));
1919 	VNASSERT(vp->v_writecount == 0, vp, ("Non-zero write count"));
1920 	VNASSERT(bo->bo_numoutput == 0, vp, ("Clean vnode has pending I/O's"));
1921 	VNASSERT(bo->bo_clean.bv_cnt == 0, vp, ("cleanbufcnt not 0"));
1922 	VNASSERT(pctrie_is_empty(&bo->bo_clean.bv_root), vp,
1923 	    ("clean blk trie not empty"));
1924 	VNASSERT(bo->bo_dirty.bv_cnt == 0, vp, ("dirtybufcnt not 0"));
1925 	VNASSERT(pctrie_is_empty(&bo->bo_dirty.bv_root), vp,
1926 	    ("dirty blk trie not empty"));
1927 	VNASSERT(TAILQ_EMPTY(&vp->v_cache_dst), vp, ("vp has namecache dst"));
1928 	VNASSERT(LIST_EMPTY(&vp->v_cache_src), vp, ("vp has namecache src"));
1929 	VNASSERT(vp->v_cache_dd == NULL, vp, ("vp has namecache for .."));
1930 	VNASSERT(TAILQ_EMPTY(&vp->v_rl.rl_waiters), vp,
1931 	    ("Dangling rangelock waiters"));
1932 	VNASSERT((vp->v_iflag & (VI_DOINGINACT | VI_OWEINACT)) == 0, vp,
1933 	    ("Leaked inactivation"));
1934 	VI_UNLOCK(vp);
1935 #ifdef MAC
1936 	mac_vnode_destroy(vp);
1937 #endif
1938 	if (vp->v_pollinfo != NULL) {
1939 		destroy_vpollinfo(vp->v_pollinfo);
1940 		vp->v_pollinfo = NULL;
1941 	}
1942 	vp->v_mountedhere = NULL;
1943 	vp->v_unpcb = NULL;
1944 	vp->v_rdev = NULL;
1945 	vp->v_fifoinfo = NULL;
1946 	vp->v_iflag = 0;
1947 	vp->v_vflag = 0;
1948 	bo->bo_flag = 0;
1949 	vn_free(vp);
1950 }
1951 
1952 /*
1953  * Delete from old mount point vnode list, if on one.
1954  */
1955 static void
1956 delmntque(struct vnode *vp)
1957 {
1958 	struct mount *mp;
1959 
1960 	VNPASS((vp->v_mflag & VMP_LAZYLIST) == 0, vp);
1961 
1962 	mp = vp->v_mount;
1963 	if (mp == NULL)
1964 		return;
1965 	MNT_ILOCK(mp);
1966 	VI_LOCK(vp);
1967 	vp->v_mount = NULL;
1968 	VI_UNLOCK(vp);
1969 	VNASSERT(mp->mnt_nvnodelistsize > 0, vp,
1970 		("bad mount point vnode list size"));
1971 	TAILQ_REMOVE(&mp->mnt_nvnodelist, vp, v_nmntvnodes);
1972 	mp->mnt_nvnodelistsize--;
1973 	MNT_REL(mp);
1974 	MNT_IUNLOCK(mp);
1975 }
1976 
1977 static void
1978 insmntque_stddtr(struct vnode *vp, void *dtr_arg)
1979 {
1980 
1981 	vp->v_data = NULL;
1982 	vp->v_op = &dead_vnodeops;
1983 	vgone(vp);
1984 	vput(vp);
1985 }
1986 
1987 /*
1988  * Insert into list of vnodes for the new mount point, if available.
1989  */
1990 int
1991 insmntque1(struct vnode *vp, struct mount *mp,
1992 	void (*dtr)(struct vnode *, void *), void *dtr_arg)
1993 {
1994 
1995 	KASSERT(vp->v_mount == NULL,
1996 		("insmntque: vnode already on per mount vnode list"));
1997 	VNASSERT(mp != NULL, vp, ("Don't call insmntque(foo, NULL)"));
1998 	ASSERT_VOP_ELOCKED(vp, "insmntque: non-locked vp");
1999 
2000 	/*
2001 	 * We acquire the vnode interlock early to ensure that the
2002 	 * vnode cannot be recycled by another process releasing a
2003 	 * holdcnt on it before we get it on both the vnode list
2004 	 * and the active vnode list. The mount mutex protects only
2005 	 * manipulation of the vnode list and the vnode freelist
2006 	 * mutex protects only manipulation of the active vnode list.
2007 	 * Hence the need to hold the vnode interlock throughout.
2008 	 */
2009 	MNT_ILOCK(mp);
2010 	VI_LOCK(vp);
2011 	if (((mp->mnt_kern_flag & MNTK_UNMOUNT) != 0 &&
2012 	    ((mp->mnt_kern_flag & MNTK_UNMOUNTF) != 0 ||
2013 	    mp->mnt_nvnodelistsize == 0)) &&
2014 	    (vp->v_vflag & VV_FORCEINSMQ) == 0) {
2015 		VI_UNLOCK(vp);
2016 		MNT_IUNLOCK(mp);
2017 		if (dtr != NULL)
2018 			dtr(vp, dtr_arg);
2019 		return (EBUSY);
2020 	}
2021 	vp->v_mount = mp;
2022 	MNT_REF(mp);
2023 	TAILQ_INSERT_TAIL(&mp->mnt_nvnodelist, vp, v_nmntvnodes);
2024 	VNASSERT(mp->mnt_nvnodelistsize >= 0, vp,
2025 		("neg mount point vnode list size"));
2026 	mp->mnt_nvnodelistsize++;
2027 	VI_UNLOCK(vp);
2028 	MNT_IUNLOCK(mp);
2029 	return (0);
2030 }
2031 
2032 int
2033 insmntque(struct vnode *vp, struct mount *mp)
2034 {
2035 
2036 	return (insmntque1(vp, mp, insmntque_stddtr, NULL));
2037 }
2038 
2039 /*
2040  * Flush out and invalidate all buffers associated with a bufobj
2041  * Called with the underlying object locked.
2042  */
2043 int
2044 bufobj_invalbuf(struct bufobj *bo, int flags, int slpflag, int slptimeo)
2045 {
2046 	int error;
2047 
2048 	BO_LOCK(bo);
2049 	if (flags & V_SAVE) {
2050 		error = bufobj_wwait(bo, slpflag, slptimeo);
2051 		if (error) {
2052 			BO_UNLOCK(bo);
2053 			return (error);
2054 		}
2055 		if (bo->bo_dirty.bv_cnt > 0) {
2056 			BO_UNLOCK(bo);
2057 			do {
2058 				error = BO_SYNC(bo, MNT_WAIT);
2059 			} while (error == ERELOOKUP);
2060 			if (error != 0)
2061 				return (error);
2062 			/*
2063 			 * XXX We could save a lock/unlock if this was only
2064 			 * enabled under INVARIANTS
2065 			 */
2066 			BO_LOCK(bo);
2067 			if (bo->bo_numoutput > 0 || bo->bo_dirty.bv_cnt > 0)
2068 				panic("vinvalbuf: dirty bufs");
2069 		}
2070 	}
2071 	/*
2072 	 * If you alter this loop please notice that interlock is dropped and
2073 	 * reacquired in flushbuflist.  Special care is needed to ensure that
2074 	 * no race conditions occur from this.
2075 	 */
2076 	do {
2077 		error = flushbuflist(&bo->bo_clean,
2078 		    flags, bo, slpflag, slptimeo);
2079 		if (error == 0 && !(flags & V_CLEANONLY))
2080 			error = flushbuflist(&bo->bo_dirty,
2081 			    flags, bo, slpflag, slptimeo);
2082 		if (error != 0 && error != EAGAIN) {
2083 			BO_UNLOCK(bo);
2084 			return (error);
2085 		}
2086 	} while (error != 0);
2087 
2088 	/*
2089 	 * Wait for I/O to complete.  XXX needs cleaning up.  The vnode can
2090 	 * have write I/O in-progress but if there is a VM object then the
2091 	 * VM object can also have read-I/O in-progress.
2092 	 */
2093 	do {
2094 		bufobj_wwait(bo, 0, 0);
2095 		if ((flags & V_VMIO) == 0 && bo->bo_object != NULL) {
2096 			BO_UNLOCK(bo);
2097 			vm_object_pip_wait_unlocked(bo->bo_object, "bovlbx");
2098 			BO_LOCK(bo);
2099 		}
2100 	} while (bo->bo_numoutput > 0);
2101 	BO_UNLOCK(bo);
2102 
2103 	/*
2104 	 * Destroy the copy in the VM cache, too.
2105 	 */
2106 	if (bo->bo_object != NULL &&
2107 	    (flags & (V_ALT | V_NORMAL | V_CLEANONLY | V_VMIO)) == 0) {
2108 		VM_OBJECT_WLOCK(bo->bo_object);
2109 		vm_object_page_remove(bo->bo_object, 0, 0, (flags & V_SAVE) ?
2110 		    OBJPR_CLEANONLY : 0);
2111 		VM_OBJECT_WUNLOCK(bo->bo_object);
2112 	}
2113 
2114 #ifdef INVARIANTS
2115 	BO_LOCK(bo);
2116 	if ((flags & (V_ALT | V_NORMAL | V_CLEANONLY | V_VMIO |
2117 	    V_ALLOWCLEAN)) == 0 && (bo->bo_dirty.bv_cnt > 0 ||
2118 	    bo->bo_clean.bv_cnt > 0))
2119 		panic("vinvalbuf: flush failed");
2120 	if ((flags & (V_ALT | V_NORMAL | V_CLEANONLY | V_VMIO)) == 0 &&
2121 	    bo->bo_dirty.bv_cnt > 0)
2122 		panic("vinvalbuf: flush dirty failed");
2123 	BO_UNLOCK(bo);
2124 #endif
2125 	return (0);
2126 }
2127 
2128 /*
2129  * Flush out and invalidate all buffers associated with a vnode.
2130  * Called with the underlying object locked.
2131  */
2132 int
2133 vinvalbuf(struct vnode *vp, int flags, int slpflag, int slptimeo)
2134 {
2135 
2136 	CTR3(KTR_VFS, "%s: vp %p with flags %d", __func__, vp, flags);
2137 	ASSERT_VOP_LOCKED(vp, "vinvalbuf");
2138 	if (vp->v_object != NULL && vp->v_object->handle != vp)
2139 		return (0);
2140 	return (bufobj_invalbuf(&vp->v_bufobj, flags, slpflag, slptimeo));
2141 }
2142 
2143 /*
2144  * Flush out buffers on the specified list.
2145  *
2146  */
2147 static int
2148 flushbuflist(struct bufv *bufv, int flags, struct bufobj *bo, int slpflag,
2149     int slptimeo)
2150 {
2151 	struct buf *bp, *nbp;
2152 	int retval, error;
2153 	daddr_t lblkno;
2154 	b_xflags_t xflags;
2155 
2156 	ASSERT_BO_WLOCKED(bo);
2157 
2158 	retval = 0;
2159 	TAILQ_FOREACH_SAFE(bp, &bufv->bv_hd, b_bobufs, nbp) {
2160 		/*
2161 		 * If we are flushing both V_NORMAL and V_ALT buffers then
2162 		 * do not skip any buffers. If we are flushing only V_NORMAL
2163 		 * buffers then skip buffers marked as BX_ALTDATA. If we are
2164 		 * flushing only V_ALT buffers then skip buffers not marked
2165 		 * as BX_ALTDATA.
2166 		 */
2167 		if (((flags & (V_NORMAL | V_ALT)) != (V_NORMAL | V_ALT)) &&
2168 		   (((flags & V_NORMAL) && (bp->b_xflags & BX_ALTDATA) != 0) ||
2169 		    ((flags & V_ALT) && (bp->b_xflags & BX_ALTDATA) == 0))) {
2170 			continue;
2171 		}
2172 		if (nbp != NULL) {
2173 			lblkno = nbp->b_lblkno;
2174 			xflags = nbp->b_xflags & (BX_VNDIRTY | BX_VNCLEAN);
2175 		}
2176 		retval = EAGAIN;
2177 		error = BUF_TIMELOCK(bp,
2178 		    LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK, BO_LOCKPTR(bo),
2179 		    "flushbuf", slpflag, slptimeo);
2180 		if (error) {
2181 			BO_LOCK(bo);
2182 			return (error != ENOLCK ? error : EAGAIN);
2183 		}
2184 		KASSERT(bp->b_bufobj == bo,
2185 		    ("bp %p wrong b_bufobj %p should be %p",
2186 		    bp, bp->b_bufobj, bo));
2187 		/*
2188 		 * XXX Since there are no node locks for NFS, I
2189 		 * believe there is a slight chance that a delayed
2190 		 * write will occur while sleeping just above, so
2191 		 * check for it.
2192 		 */
2193 		if (((bp->b_flags & (B_DELWRI | B_INVAL)) == B_DELWRI) &&
2194 		    (flags & V_SAVE)) {
2195 			bremfree(bp);
2196 			bp->b_flags |= B_ASYNC;
2197 			bwrite(bp);
2198 			BO_LOCK(bo);
2199 			return (EAGAIN);	/* XXX: why not loop ? */
2200 		}
2201 		bremfree(bp);
2202 		bp->b_flags |= (B_INVAL | B_RELBUF);
2203 		bp->b_flags &= ~B_ASYNC;
2204 		brelse(bp);
2205 		BO_LOCK(bo);
2206 		if (nbp == NULL)
2207 			break;
2208 		nbp = gbincore(bo, lblkno);
2209 		if (nbp == NULL || (nbp->b_xflags & (BX_VNDIRTY | BX_VNCLEAN))
2210 		    != xflags)
2211 			break;			/* nbp invalid */
2212 	}
2213 	return (retval);
2214 }
2215 
2216 int
2217 bnoreuselist(struct bufv *bufv, struct bufobj *bo, daddr_t startn, daddr_t endn)
2218 {
2219 	struct buf *bp;
2220 	int error;
2221 	daddr_t lblkno;
2222 
2223 	ASSERT_BO_LOCKED(bo);
2224 
2225 	for (lblkno = startn;;) {
2226 again:
2227 		bp = BUF_PCTRIE_LOOKUP_GE(&bufv->bv_root, lblkno);
2228 		if (bp == NULL || bp->b_lblkno >= endn ||
2229 		    bp->b_lblkno < startn)
2230 			break;
2231 		error = BUF_TIMELOCK(bp, LK_EXCLUSIVE | LK_SLEEPFAIL |
2232 		    LK_INTERLOCK, BO_LOCKPTR(bo), "brlsfl", 0, 0);
2233 		if (error != 0) {
2234 			BO_RLOCK(bo);
2235 			if (error == ENOLCK)
2236 				goto again;
2237 			return (error);
2238 		}
2239 		KASSERT(bp->b_bufobj == bo,
2240 		    ("bp %p wrong b_bufobj %p should be %p",
2241 		    bp, bp->b_bufobj, bo));
2242 		lblkno = bp->b_lblkno + 1;
2243 		if ((bp->b_flags & B_MANAGED) == 0)
2244 			bremfree(bp);
2245 		bp->b_flags |= B_RELBUF;
2246 		/*
2247 		 * In the VMIO case, use the B_NOREUSE flag to hint that the
2248 		 * pages backing each buffer in the range are unlikely to be
2249 		 * reused.  Dirty buffers will have the hint applied once
2250 		 * they've been written.
2251 		 */
2252 		if ((bp->b_flags & B_VMIO) != 0)
2253 			bp->b_flags |= B_NOREUSE;
2254 		brelse(bp);
2255 		BO_RLOCK(bo);
2256 	}
2257 	return (0);
2258 }
2259 
2260 /*
2261  * Truncate a file's buffer and pages to a specified length.  This
2262  * is in lieu of the old vinvalbuf mechanism, which performed unneeded
2263  * sync activity.
2264  */
2265 int
2266 vtruncbuf(struct vnode *vp, off_t length, int blksize)
2267 {
2268 	struct buf *bp, *nbp;
2269 	struct bufobj *bo;
2270 	daddr_t startlbn;
2271 
2272 	CTR4(KTR_VFS, "%s: vp %p with block %d:%ju", __func__,
2273 	    vp, blksize, (uintmax_t)length);
2274 
2275 	/*
2276 	 * Round up to the *next* lbn.
2277 	 */
2278 	startlbn = howmany(length, blksize);
2279 
2280 	ASSERT_VOP_LOCKED(vp, "vtruncbuf");
2281 
2282 	bo = &vp->v_bufobj;
2283 restart_unlocked:
2284 	BO_LOCK(bo);
2285 
2286 	while (v_inval_buf_range_locked(vp, bo, startlbn, INT64_MAX) == EAGAIN)
2287 		;
2288 
2289 	if (length > 0) {
2290 restartsync:
2291 		TAILQ_FOREACH_SAFE(bp, &bo->bo_dirty.bv_hd, b_bobufs, nbp) {
2292 			if (bp->b_lblkno > 0)
2293 				continue;
2294 			/*
2295 			 * Since we hold the vnode lock this should only
2296 			 * fail if we're racing with the buf daemon.
2297 			 */
2298 			if (BUF_LOCK(bp,
2299 			    LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK,
2300 			    BO_LOCKPTR(bo)) == ENOLCK)
2301 				goto restart_unlocked;
2302 
2303 			VNASSERT((bp->b_flags & B_DELWRI), vp,
2304 			    ("buf(%p) on dirty queue without DELWRI", bp));
2305 
2306 			bremfree(bp);
2307 			bawrite(bp);
2308 			BO_LOCK(bo);
2309 			goto restartsync;
2310 		}
2311 	}
2312 
2313 	bufobj_wwait(bo, 0, 0);
2314 	BO_UNLOCK(bo);
2315 	vnode_pager_setsize(vp, length);
2316 
2317 	return (0);
2318 }
2319 
2320 /*
2321  * Invalidate the cached pages of a file's buffer within the range of block
2322  * numbers [startlbn, endlbn).
2323  */
2324 void
2325 v_inval_buf_range(struct vnode *vp, daddr_t startlbn, daddr_t endlbn,
2326     int blksize)
2327 {
2328 	struct bufobj *bo;
2329 	off_t start, end;
2330 
2331 	ASSERT_VOP_LOCKED(vp, "v_inval_buf_range");
2332 
2333 	start = blksize * startlbn;
2334 	end = blksize * endlbn;
2335 
2336 	bo = &vp->v_bufobj;
2337 	BO_LOCK(bo);
2338 	MPASS(blksize == bo->bo_bsize);
2339 
2340 	while (v_inval_buf_range_locked(vp, bo, startlbn, endlbn) == EAGAIN)
2341 		;
2342 
2343 	BO_UNLOCK(bo);
2344 	vn_pages_remove(vp, OFF_TO_IDX(start), OFF_TO_IDX(end + PAGE_SIZE - 1));
2345 }
2346 
2347 static int
2348 v_inval_buf_range_locked(struct vnode *vp, struct bufobj *bo,
2349     daddr_t startlbn, daddr_t endlbn)
2350 {
2351 	struct buf *bp, *nbp;
2352 	bool anyfreed;
2353 
2354 	ASSERT_VOP_LOCKED(vp, "v_inval_buf_range_locked");
2355 	ASSERT_BO_LOCKED(bo);
2356 
2357 	do {
2358 		anyfreed = false;
2359 		TAILQ_FOREACH_SAFE(bp, &bo->bo_clean.bv_hd, b_bobufs, nbp) {
2360 			if (bp->b_lblkno < startlbn || bp->b_lblkno >= endlbn)
2361 				continue;
2362 			if (BUF_LOCK(bp,
2363 			    LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK,
2364 			    BO_LOCKPTR(bo)) == ENOLCK) {
2365 				BO_LOCK(bo);
2366 				return (EAGAIN);
2367 			}
2368 
2369 			bremfree(bp);
2370 			bp->b_flags |= B_INVAL | B_RELBUF;
2371 			bp->b_flags &= ~B_ASYNC;
2372 			brelse(bp);
2373 			anyfreed = true;
2374 
2375 			BO_LOCK(bo);
2376 			if (nbp != NULL &&
2377 			    (((nbp->b_xflags & BX_VNCLEAN) == 0) ||
2378 			    nbp->b_vp != vp ||
2379 			    (nbp->b_flags & B_DELWRI) != 0))
2380 				return (EAGAIN);
2381 		}
2382 
2383 		TAILQ_FOREACH_SAFE(bp, &bo->bo_dirty.bv_hd, b_bobufs, nbp) {
2384 			if (bp->b_lblkno < startlbn || bp->b_lblkno >= endlbn)
2385 				continue;
2386 			if (BUF_LOCK(bp,
2387 			    LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK,
2388 			    BO_LOCKPTR(bo)) == ENOLCK) {
2389 				BO_LOCK(bo);
2390 				return (EAGAIN);
2391 			}
2392 			bremfree(bp);
2393 			bp->b_flags |= B_INVAL | B_RELBUF;
2394 			bp->b_flags &= ~B_ASYNC;
2395 			brelse(bp);
2396 			anyfreed = true;
2397 
2398 			BO_LOCK(bo);
2399 			if (nbp != NULL &&
2400 			    (((nbp->b_xflags & BX_VNDIRTY) == 0) ||
2401 			    (nbp->b_vp != vp) ||
2402 			    (nbp->b_flags & B_DELWRI) == 0))
2403 				return (EAGAIN);
2404 		}
2405 	} while (anyfreed);
2406 	return (0);
2407 }
2408 
2409 static void
2410 buf_vlist_remove(struct buf *bp)
2411 {
2412 	struct bufv *bv;
2413 	b_xflags_t flags;
2414 
2415 	flags = bp->b_xflags;
2416 
2417 	KASSERT(bp->b_bufobj != NULL, ("No b_bufobj %p", bp));
2418 	ASSERT_BO_WLOCKED(bp->b_bufobj);
2419 	KASSERT((flags & (BX_VNDIRTY | BX_VNCLEAN)) != 0 &&
2420 	    (flags & (BX_VNDIRTY | BX_VNCLEAN)) != (BX_VNDIRTY | BX_VNCLEAN),
2421 	    ("%s: buffer %p has invalid queue state", __func__, bp));
2422 
2423 	if ((flags & BX_VNDIRTY) != 0)
2424 		bv = &bp->b_bufobj->bo_dirty;
2425 	else
2426 		bv = &bp->b_bufobj->bo_clean;
2427 	BUF_PCTRIE_REMOVE(&bv->bv_root, bp->b_lblkno);
2428 	TAILQ_REMOVE(&bv->bv_hd, bp, b_bobufs);
2429 	bv->bv_cnt--;
2430 	bp->b_xflags &= ~(BX_VNDIRTY | BX_VNCLEAN);
2431 }
2432 
2433 /*
2434  * Add the buffer to the sorted clean or dirty block list.
2435  *
2436  * NOTE: xflags is passed as a constant, optimizing this inline function!
2437  */
2438 static void
2439 buf_vlist_add(struct buf *bp, struct bufobj *bo, b_xflags_t xflags)
2440 {
2441 	struct bufv *bv;
2442 	struct buf *n;
2443 	int error;
2444 
2445 	ASSERT_BO_WLOCKED(bo);
2446 	KASSERT((bo->bo_flag & BO_NOBUFS) == 0,
2447 	    ("buf_vlist_add: bo %p does not allow bufs", bo));
2448 	KASSERT((xflags & BX_VNDIRTY) == 0 || (bo->bo_flag & BO_DEAD) == 0,
2449 	    ("dead bo %p", bo));
2450 	KASSERT((bp->b_xflags & (BX_VNDIRTY|BX_VNCLEAN)) == 0,
2451 	    ("buf_vlist_add: Buf %p has existing xflags %d", bp, bp->b_xflags));
2452 	bp->b_xflags |= xflags;
2453 	if (xflags & BX_VNDIRTY)
2454 		bv = &bo->bo_dirty;
2455 	else
2456 		bv = &bo->bo_clean;
2457 
2458 	/*
2459 	 * Keep the list ordered.  Optimize empty list insertion.  Assume
2460 	 * we tend to grow at the tail so lookup_le should usually be cheaper
2461 	 * than _ge.
2462 	 */
2463 	if (bv->bv_cnt == 0 ||
2464 	    bp->b_lblkno > TAILQ_LAST(&bv->bv_hd, buflists)->b_lblkno)
2465 		TAILQ_INSERT_TAIL(&bv->bv_hd, bp, b_bobufs);
2466 	else if ((n = BUF_PCTRIE_LOOKUP_LE(&bv->bv_root, bp->b_lblkno)) == NULL)
2467 		TAILQ_INSERT_HEAD(&bv->bv_hd, bp, b_bobufs);
2468 	else
2469 		TAILQ_INSERT_AFTER(&bv->bv_hd, n, bp, b_bobufs);
2470 	error = BUF_PCTRIE_INSERT(&bv->bv_root, bp);
2471 	if (error)
2472 		panic("buf_vlist_add:  Preallocated nodes insufficient.");
2473 	bv->bv_cnt++;
2474 }
2475 
2476 /*
2477  * Look up a buffer using the buffer tries.
2478  */
2479 struct buf *
2480 gbincore(struct bufobj *bo, daddr_t lblkno)
2481 {
2482 	struct buf *bp;
2483 
2484 	ASSERT_BO_LOCKED(bo);
2485 	bp = BUF_PCTRIE_LOOKUP(&bo->bo_clean.bv_root, lblkno);
2486 	if (bp != NULL)
2487 		return (bp);
2488 	return (BUF_PCTRIE_LOOKUP(&bo->bo_dirty.bv_root, lblkno));
2489 }
2490 
2491 /*
2492  * Look up a buf using the buffer tries, without the bufobj lock.  This relies
2493  * on SMR for safe lookup, and bufs being in a no-free zone to provide type
2494  * stability of the result.  Like other lockless lookups, the found buf may
2495  * already be invalid by the time this function returns.
2496  */
2497 struct buf *
2498 gbincore_unlocked(struct bufobj *bo, daddr_t lblkno)
2499 {
2500 	struct buf *bp;
2501 
2502 	ASSERT_BO_UNLOCKED(bo);
2503 	bp = BUF_PCTRIE_LOOKUP_UNLOCKED(&bo->bo_clean.bv_root, lblkno);
2504 	if (bp != NULL)
2505 		return (bp);
2506 	return (BUF_PCTRIE_LOOKUP_UNLOCKED(&bo->bo_dirty.bv_root, lblkno));
2507 }
2508 
2509 /*
2510  * Associate a buffer with a vnode.
2511  */
2512 void
2513 bgetvp(struct vnode *vp, struct buf *bp)
2514 {
2515 	struct bufobj *bo;
2516 
2517 	bo = &vp->v_bufobj;
2518 	ASSERT_BO_WLOCKED(bo);
2519 	VNASSERT(bp->b_vp == NULL, bp->b_vp, ("bgetvp: not free"));
2520 
2521 	CTR3(KTR_BUF, "bgetvp(%p) vp %p flags %X", bp, vp, bp->b_flags);
2522 	VNASSERT((bp->b_xflags & (BX_VNDIRTY|BX_VNCLEAN)) == 0, vp,
2523 	    ("bgetvp: bp already attached! %p", bp));
2524 
2525 	vhold(vp);
2526 	bp->b_vp = vp;
2527 	bp->b_bufobj = bo;
2528 	/*
2529 	 * Insert onto list for new vnode.
2530 	 */
2531 	buf_vlist_add(bp, bo, BX_VNCLEAN);
2532 }
2533 
2534 /*
2535  * Disassociate a buffer from a vnode.
2536  */
2537 void
2538 brelvp(struct buf *bp)
2539 {
2540 	struct bufobj *bo;
2541 	struct vnode *vp;
2542 
2543 	CTR3(KTR_BUF, "brelvp(%p) vp %p flags %X", bp, bp->b_vp, bp->b_flags);
2544 	KASSERT(bp->b_vp != NULL, ("brelvp: NULL"));
2545 
2546 	/*
2547 	 * Delete from old vnode list, if on one.
2548 	 */
2549 	vp = bp->b_vp;		/* XXX */
2550 	bo = bp->b_bufobj;
2551 	BO_LOCK(bo);
2552 	buf_vlist_remove(bp);
2553 	if ((bo->bo_flag & BO_ONWORKLST) && bo->bo_dirty.bv_cnt == 0) {
2554 		bo->bo_flag &= ~BO_ONWORKLST;
2555 		mtx_lock(&sync_mtx);
2556 		LIST_REMOVE(bo, bo_synclist);
2557 		syncer_worklist_len--;
2558 		mtx_unlock(&sync_mtx);
2559 	}
2560 	bp->b_vp = NULL;
2561 	bp->b_bufobj = NULL;
2562 	BO_UNLOCK(bo);
2563 	vdrop(vp);
2564 }
2565 
2566 /*
2567  * Add an item to the syncer work queue.
2568  */
2569 static void
2570 vn_syncer_add_to_worklist(struct bufobj *bo, int delay)
2571 {
2572 	int slot;
2573 
2574 	ASSERT_BO_WLOCKED(bo);
2575 
2576 	mtx_lock(&sync_mtx);
2577 	if (bo->bo_flag & BO_ONWORKLST)
2578 		LIST_REMOVE(bo, bo_synclist);
2579 	else {
2580 		bo->bo_flag |= BO_ONWORKLST;
2581 		syncer_worklist_len++;
2582 	}
2583 
2584 	if (delay > syncer_maxdelay - 2)
2585 		delay = syncer_maxdelay - 2;
2586 	slot = (syncer_delayno + delay) & syncer_mask;
2587 
2588 	LIST_INSERT_HEAD(&syncer_workitem_pending[slot], bo, bo_synclist);
2589 	mtx_unlock(&sync_mtx);
2590 }
2591 
2592 static int
2593 sysctl_vfs_worklist_len(SYSCTL_HANDLER_ARGS)
2594 {
2595 	int error, len;
2596 
2597 	mtx_lock(&sync_mtx);
2598 	len = syncer_worklist_len - sync_vnode_count;
2599 	mtx_unlock(&sync_mtx);
2600 	error = SYSCTL_OUT(req, &len, sizeof(len));
2601 	return (error);
2602 }
2603 
2604 SYSCTL_PROC(_vfs, OID_AUTO, worklist_len,
2605     CTLTYPE_INT | CTLFLAG_MPSAFE| CTLFLAG_RD, NULL, 0,
2606     sysctl_vfs_worklist_len, "I", "Syncer thread worklist length");
2607 
2608 static struct proc *updateproc;
2609 static void sched_sync(void);
2610 static struct kproc_desc up_kp = {
2611 	"syncer",
2612 	sched_sync,
2613 	&updateproc
2614 };
2615 SYSINIT(syncer, SI_SUB_KTHREAD_UPDATE, SI_ORDER_FIRST, kproc_start, &up_kp);
2616 
2617 static int
2618 sync_vnode(struct synclist *slp, struct bufobj **bo, struct thread *td)
2619 {
2620 	struct vnode *vp;
2621 	struct mount *mp;
2622 
2623 	*bo = LIST_FIRST(slp);
2624 	if (*bo == NULL)
2625 		return (0);
2626 	vp = bo2vnode(*bo);
2627 	if (VOP_ISLOCKED(vp) != 0 || VI_TRYLOCK(vp) == 0)
2628 		return (1);
2629 	/*
2630 	 * We use vhold in case the vnode does not
2631 	 * successfully sync.  vhold prevents the vnode from
2632 	 * going away when we unlock the sync_mtx so that
2633 	 * we can acquire the vnode interlock.
2634 	 */
2635 	vholdl(vp);
2636 	mtx_unlock(&sync_mtx);
2637 	VI_UNLOCK(vp);
2638 	if (vn_start_write(vp, &mp, V_NOWAIT) != 0) {
2639 		vdrop(vp);
2640 		mtx_lock(&sync_mtx);
2641 		return (*bo == LIST_FIRST(slp));
2642 	}
2643 	vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
2644 	(void) VOP_FSYNC(vp, MNT_LAZY, td);
2645 	VOP_UNLOCK(vp);
2646 	vn_finished_write(mp);
2647 	BO_LOCK(*bo);
2648 	if (((*bo)->bo_flag & BO_ONWORKLST) != 0) {
2649 		/*
2650 		 * Put us back on the worklist.  The worklist
2651 		 * routine will remove us from our current
2652 		 * position and then add us back in at a later
2653 		 * position.
2654 		 */
2655 		vn_syncer_add_to_worklist(*bo, syncdelay);
2656 	}
2657 	BO_UNLOCK(*bo);
2658 	vdrop(vp);
2659 	mtx_lock(&sync_mtx);
2660 	return (0);
2661 }
2662 
2663 static int first_printf = 1;
2664 
2665 /*
2666  * System filesystem synchronizer daemon.
2667  */
2668 static void
2669 sched_sync(void)
2670 {
2671 	struct synclist *next, *slp;
2672 	struct bufobj *bo;
2673 	long starttime;
2674 	struct thread *td = curthread;
2675 	int last_work_seen;
2676 	int net_worklist_len;
2677 	int syncer_final_iter;
2678 	int error;
2679 
2680 	last_work_seen = 0;
2681 	syncer_final_iter = 0;
2682 	syncer_state = SYNCER_RUNNING;
2683 	starttime = time_uptime;
2684 	td->td_pflags |= TDP_NORUNNINGBUF;
2685 
2686 	EVENTHANDLER_REGISTER(shutdown_pre_sync, syncer_shutdown, td->td_proc,
2687 	    SHUTDOWN_PRI_LAST);
2688 
2689 	mtx_lock(&sync_mtx);
2690 	for (;;) {
2691 		if (syncer_state == SYNCER_FINAL_DELAY &&
2692 		    syncer_final_iter == 0) {
2693 			mtx_unlock(&sync_mtx);
2694 			kproc_suspend_check(td->td_proc);
2695 			mtx_lock(&sync_mtx);
2696 		}
2697 		net_worklist_len = syncer_worklist_len - sync_vnode_count;
2698 		if (syncer_state != SYNCER_RUNNING &&
2699 		    starttime != time_uptime) {
2700 			if (first_printf) {
2701 				printf("\nSyncing disks, vnodes remaining... ");
2702 				first_printf = 0;
2703 			}
2704 			printf("%d ", net_worklist_len);
2705 		}
2706 		starttime = time_uptime;
2707 
2708 		/*
2709 		 * Push files whose dirty time has expired.  Be careful
2710 		 * of interrupt race on slp queue.
2711 		 *
2712 		 * Skip over empty worklist slots when shutting down.
2713 		 */
2714 		do {
2715 			slp = &syncer_workitem_pending[syncer_delayno];
2716 			syncer_delayno += 1;
2717 			if (syncer_delayno == syncer_maxdelay)
2718 				syncer_delayno = 0;
2719 			next = &syncer_workitem_pending[syncer_delayno];
2720 			/*
2721 			 * If the worklist has wrapped since the
2722 			 * it was emptied of all but syncer vnodes,
2723 			 * switch to the FINAL_DELAY state and run
2724 			 * for one more second.
2725 			 */
2726 			if (syncer_state == SYNCER_SHUTTING_DOWN &&
2727 			    net_worklist_len == 0 &&
2728 			    last_work_seen == syncer_delayno) {
2729 				syncer_state = SYNCER_FINAL_DELAY;
2730 				syncer_final_iter = SYNCER_SHUTDOWN_SPEEDUP;
2731 			}
2732 		} while (syncer_state != SYNCER_RUNNING && LIST_EMPTY(slp) &&
2733 		    syncer_worklist_len > 0);
2734 
2735 		/*
2736 		 * Keep track of the last time there was anything
2737 		 * on the worklist other than syncer vnodes.
2738 		 * Return to the SHUTTING_DOWN state if any
2739 		 * new work appears.
2740 		 */
2741 		if (net_worklist_len > 0 || syncer_state == SYNCER_RUNNING)
2742 			last_work_seen = syncer_delayno;
2743 		if (net_worklist_len > 0 && syncer_state == SYNCER_FINAL_DELAY)
2744 			syncer_state = SYNCER_SHUTTING_DOWN;
2745 		while (!LIST_EMPTY(slp)) {
2746 			error = sync_vnode(slp, &bo, td);
2747 			if (error == 1) {
2748 				LIST_REMOVE(bo, bo_synclist);
2749 				LIST_INSERT_HEAD(next, bo, bo_synclist);
2750 				continue;
2751 			}
2752 
2753 			if (first_printf == 0) {
2754 				/*
2755 				 * Drop the sync mutex, because some watchdog
2756 				 * drivers need to sleep while patting
2757 				 */
2758 				mtx_unlock(&sync_mtx);
2759 				wdog_kern_pat(WD_LASTVAL);
2760 				mtx_lock(&sync_mtx);
2761 			}
2762 		}
2763 		if (syncer_state == SYNCER_FINAL_DELAY && syncer_final_iter > 0)
2764 			syncer_final_iter--;
2765 		/*
2766 		 * The variable rushjob allows the kernel to speed up the
2767 		 * processing of the filesystem syncer process. A rushjob
2768 		 * value of N tells the filesystem syncer to process the next
2769 		 * N seconds worth of work on its queue ASAP. Currently rushjob
2770 		 * is used by the soft update code to speed up the filesystem
2771 		 * syncer process when the incore state is getting so far
2772 		 * ahead of the disk that the kernel memory pool is being
2773 		 * threatened with exhaustion.
2774 		 */
2775 		if (rushjob > 0) {
2776 			rushjob -= 1;
2777 			continue;
2778 		}
2779 		/*
2780 		 * Just sleep for a short period of time between
2781 		 * iterations when shutting down to allow some I/O
2782 		 * to happen.
2783 		 *
2784 		 * If it has taken us less than a second to process the
2785 		 * current work, then wait. Otherwise start right over
2786 		 * again. We can still lose time if any single round
2787 		 * takes more than two seconds, but it does not really
2788 		 * matter as we are just trying to generally pace the
2789 		 * filesystem activity.
2790 		 */
2791 		if (syncer_state != SYNCER_RUNNING ||
2792 		    time_uptime == starttime) {
2793 			thread_lock(td);
2794 			sched_prio(td, PPAUSE);
2795 			thread_unlock(td);
2796 		}
2797 		if (syncer_state != SYNCER_RUNNING)
2798 			cv_timedwait(&sync_wakeup, &sync_mtx,
2799 			    hz / SYNCER_SHUTDOWN_SPEEDUP);
2800 		else if (time_uptime == starttime)
2801 			cv_timedwait(&sync_wakeup, &sync_mtx, hz);
2802 	}
2803 }
2804 
2805 /*
2806  * Request the syncer daemon to speed up its work.
2807  * We never push it to speed up more than half of its
2808  * normal turn time, otherwise it could take over the cpu.
2809  */
2810 int
2811 speedup_syncer(void)
2812 {
2813 	int ret = 0;
2814 
2815 	mtx_lock(&sync_mtx);
2816 	if (rushjob < syncdelay / 2) {
2817 		rushjob += 1;
2818 		stat_rush_requests += 1;
2819 		ret = 1;
2820 	}
2821 	mtx_unlock(&sync_mtx);
2822 	cv_broadcast(&sync_wakeup);
2823 	return (ret);
2824 }
2825 
2826 /*
2827  * Tell the syncer to speed up its work and run though its work
2828  * list several times, then tell it to shut down.
2829  */
2830 static void
2831 syncer_shutdown(void *arg, int howto)
2832 {
2833 
2834 	if (howto & RB_NOSYNC)
2835 		return;
2836 	mtx_lock(&sync_mtx);
2837 	syncer_state = SYNCER_SHUTTING_DOWN;
2838 	rushjob = 0;
2839 	mtx_unlock(&sync_mtx);
2840 	cv_broadcast(&sync_wakeup);
2841 	kproc_shutdown(arg, howto);
2842 }
2843 
2844 void
2845 syncer_suspend(void)
2846 {
2847 
2848 	syncer_shutdown(updateproc, 0);
2849 }
2850 
2851 void
2852 syncer_resume(void)
2853 {
2854 
2855 	mtx_lock(&sync_mtx);
2856 	first_printf = 1;
2857 	syncer_state = SYNCER_RUNNING;
2858 	mtx_unlock(&sync_mtx);
2859 	cv_broadcast(&sync_wakeup);
2860 	kproc_resume(updateproc);
2861 }
2862 
2863 /*
2864  * Move the buffer between the clean and dirty lists of its vnode.
2865  */
2866 void
2867 reassignbuf(struct buf *bp)
2868 {
2869 	struct vnode *vp;
2870 	struct bufobj *bo;
2871 	int delay;
2872 #ifdef INVARIANTS
2873 	struct bufv *bv;
2874 #endif
2875 
2876 	vp = bp->b_vp;
2877 	bo = bp->b_bufobj;
2878 
2879 	KASSERT((bp->b_flags & B_PAGING) == 0,
2880 	    ("%s: cannot reassign paging buffer %p", __func__, bp));
2881 
2882 	CTR3(KTR_BUF, "reassignbuf(%p) vp %p flags %X",
2883 	    bp, bp->b_vp, bp->b_flags);
2884 
2885 	BO_LOCK(bo);
2886 	buf_vlist_remove(bp);
2887 
2888 	/*
2889 	 * If dirty, put on list of dirty buffers; otherwise insert onto list
2890 	 * of clean buffers.
2891 	 */
2892 	if (bp->b_flags & B_DELWRI) {
2893 		if ((bo->bo_flag & BO_ONWORKLST) == 0) {
2894 			switch (vp->v_type) {
2895 			case VDIR:
2896 				delay = dirdelay;
2897 				break;
2898 			case VCHR:
2899 				delay = metadelay;
2900 				break;
2901 			default:
2902 				delay = filedelay;
2903 			}
2904 			vn_syncer_add_to_worklist(bo, delay);
2905 		}
2906 		buf_vlist_add(bp, bo, BX_VNDIRTY);
2907 	} else {
2908 		buf_vlist_add(bp, bo, BX_VNCLEAN);
2909 
2910 		if ((bo->bo_flag & BO_ONWORKLST) && bo->bo_dirty.bv_cnt == 0) {
2911 			mtx_lock(&sync_mtx);
2912 			LIST_REMOVE(bo, bo_synclist);
2913 			syncer_worklist_len--;
2914 			mtx_unlock(&sync_mtx);
2915 			bo->bo_flag &= ~BO_ONWORKLST;
2916 		}
2917 	}
2918 #ifdef INVARIANTS
2919 	bv = &bo->bo_clean;
2920 	bp = TAILQ_FIRST(&bv->bv_hd);
2921 	KASSERT(bp == NULL || bp->b_bufobj == bo,
2922 	    ("bp %p wrong b_bufobj %p should be %p", bp, bp->b_bufobj, bo));
2923 	bp = TAILQ_LAST(&bv->bv_hd, buflists);
2924 	KASSERT(bp == NULL || bp->b_bufobj == bo,
2925 	    ("bp %p wrong b_bufobj %p should be %p", bp, bp->b_bufobj, bo));
2926 	bv = &bo->bo_dirty;
2927 	bp = TAILQ_FIRST(&bv->bv_hd);
2928 	KASSERT(bp == NULL || bp->b_bufobj == bo,
2929 	    ("bp %p wrong b_bufobj %p should be %p", bp, bp->b_bufobj, bo));
2930 	bp = TAILQ_LAST(&bv->bv_hd, buflists);
2931 	KASSERT(bp == NULL || bp->b_bufobj == bo,
2932 	    ("bp %p wrong b_bufobj %p should be %p", bp, bp->b_bufobj, bo));
2933 #endif
2934 	BO_UNLOCK(bo);
2935 }
2936 
2937 static void
2938 v_init_counters(struct vnode *vp)
2939 {
2940 
2941 	VNASSERT(vp->v_type == VNON && vp->v_data == NULL && vp->v_iflag == 0,
2942 	    vp, ("%s called for an initialized vnode", __FUNCTION__));
2943 	ASSERT_VI_UNLOCKED(vp, __FUNCTION__);
2944 
2945 	refcount_init(&vp->v_holdcnt, 1);
2946 	refcount_init(&vp->v_usecount, 1);
2947 }
2948 
2949 /*
2950  * Grab a particular vnode from the free list, increment its
2951  * reference count and lock it.  VIRF_DOOMED is set if the vnode
2952  * is being destroyed.  Only callers who specify LK_RETRY will
2953  * see doomed vnodes.  If inactive processing was delayed in
2954  * vput try to do it here.
2955  *
2956  * usecount is manipulated using atomics without holding any locks.
2957  *
2958  * holdcnt can be manipulated using atomics without holding any locks,
2959  * except when transitioning 1<->0, in which case the interlock is held.
2960  *
2961  * Consumers which don't guarantee liveness of the vnode can use SMR to
2962  * try to get a reference. Note this operation can fail since the vnode
2963  * may be awaiting getting freed by the time they get to it.
2964  */
2965 enum vgetstate
2966 vget_prep_smr(struct vnode *vp)
2967 {
2968 	enum vgetstate vs;
2969 
2970 	VFS_SMR_ASSERT_ENTERED();
2971 
2972 	if (refcount_acquire_if_not_zero(&vp->v_usecount)) {
2973 		vs = VGET_USECOUNT;
2974 	} else {
2975 		if (vhold_smr(vp))
2976 			vs = VGET_HOLDCNT;
2977 		else
2978 			vs = VGET_NONE;
2979 	}
2980 	return (vs);
2981 }
2982 
2983 enum vgetstate
2984 vget_prep(struct vnode *vp)
2985 {
2986 	enum vgetstate vs;
2987 
2988 	if (refcount_acquire_if_not_zero(&vp->v_usecount)) {
2989 		vs = VGET_USECOUNT;
2990 	} else {
2991 		vhold(vp);
2992 		vs = VGET_HOLDCNT;
2993 	}
2994 	return (vs);
2995 }
2996 
2997 void
2998 vget_abort(struct vnode *vp, enum vgetstate vs)
2999 {
3000 
3001 	switch (vs) {
3002 	case VGET_USECOUNT:
3003 		vrele(vp);
3004 		break;
3005 	case VGET_HOLDCNT:
3006 		vdrop(vp);
3007 		break;
3008 	default:
3009 		__assert_unreachable();
3010 	}
3011 }
3012 
3013 int
3014 vget(struct vnode *vp, int flags)
3015 {
3016 	enum vgetstate vs;
3017 
3018 	vs = vget_prep(vp);
3019 	return (vget_finish(vp, flags, vs));
3020 }
3021 
3022 int
3023 vget_finish(struct vnode *vp, int flags, enum vgetstate vs)
3024 {
3025 	int error;
3026 
3027 	if ((flags & LK_INTERLOCK) != 0)
3028 		ASSERT_VI_LOCKED(vp, __func__);
3029 	else
3030 		ASSERT_VI_UNLOCKED(vp, __func__);
3031 	VNPASS(vs == VGET_HOLDCNT || vs == VGET_USECOUNT, vp);
3032 	VNPASS(vp->v_holdcnt > 0, vp);
3033 	VNPASS(vs == VGET_HOLDCNT || vp->v_usecount > 0, vp);
3034 
3035 	error = vn_lock(vp, flags);
3036 	if (__predict_false(error != 0)) {
3037 		vget_abort(vp, vs);
3038 		CTR2(KTR_VFS, "%s: impossible to lock vnode %p", __func__,
3039 		    vp);
3040 		return (error);
3041 	}
3042 
3043 	vget_finish_ref(vp, vs);
3044 	return (0);
3045 }
3046 
3047 void
3048 vget_finish_ref(struct vnode *vp, enum vgetstate vs)
3049 {
3050 	int old;
3051 
3052 	VNPASS(vs == VGET_HOLDCNT || vs == VGET_USECOUNT, vp);
3053 	VNPASS(vp->v_holdcnt > 0, vp);
3054 	VNPASS(vs == VGET_HOLDCNT || vp->v_usecount > 0, vp);
3055 
3056 	if (vs == VGET_USECOUNT)
3057 		return;
3058 
3059 	/*
3060 	 * We hold the vnode. If the usecount is 0 it will be utilized to keep
3061 	 * the vnode around. Otherwise someone else lended their hold count and
3062 	 * we have to drop ours.
3063 	 */
3064 	old = atomic_fetchadd_int(&vp->v_usecount, 1);
3065 	VNASSERT(old >= 0, vp, ("%s: wrong use count %d", __func__, old));
3066 	if (old != 0) {
3067 #ifdef INVARIANTS
3068 		old = atomic_fetchadd_int(&vp->v_holdcnt, -1);
3069 		VNASSERT(old > 1, vp, ("%s: wrong hold count %d", __func__, old));
3070 #else
3071 		refcount_release(&vp->v_holdcnt);
3072 #endif
3073 	}
3074 }
3075 
3076 void
3077 vref(struct vnode *vp)
3078 {
3079 	enum vgetstate vs;
3080 
3081 	CTR2(KTR_VFS, "%s: vp %p", __func__, vp);
3082 	vs = vget_prep(vp);
3083 	vget_finish_ref(vp, vs);
3084 }
3085 
3086 void
3087 vrefact(struct vnode *vp)
3088 {
3089 
3090 	CTR2(KTR_VFS, "%s: vp %p", __func__, vp);
3091 #ifdef INVARIANTS
3092 	int old = atomic_fetchadd_int(&vp->v_usecount, 1);
3093 	VNASSERT(old > 0, vp, ("%s: wrong use count %d", __func__, old));
3094 #else
3095 	refcount_acquire(&vp->v_usecount);
3096 #endif
3097 }
3098 
3099 void
3100 vlazy(struct vnode *vp)
3101 {
3102 	struct mount *mp;
3103 
3104 	VNASSERT(vp->v_holdcnt > 0, vp, ("%s: vnode not held", __func__));
3105 
3106 	if ((vp->v_mflag & VMP_LAZYLIST) != 0)
3107 		return;
3108 	/*
3109 	 * We may get here for inactive routines after the vnode got doomed.
3110 	 */
3111 	if (VN_IS_DOOMED(vp))
3112 		return;
3113 	mp = vp->v_mount;
3114 	mtx_lock(&mp->mnt_listmtx);
3115 	if ((vp->v_mflag & VMP_LAZYLIST) == 0) {
3116 		vp->v_mflag |= VMP_LAZYLIST;
3117 		TAILQ_INSERT_TAIL(&mp->mnt_lazyvnodelist, vp, v_lazylist);
3118 		mp->mnt_lazyvnodelistsize++;
3119 	}
3120 	mtx_unlock(&mp->mnt_listmtx);
3121 }
3122 
3123 /*
3124  * This routine is only meant to be called from vgonel prior to dooming
3125  * the vnode.
3126  */
3127 static void
3128 vunlazy_gone(struct vnode *vp)
3129 {
3130 	struct mount *mp;
3131 
3132 	ASSERT_VOP_ELOCKED(vp, __func__);
3133 	ASSERT_VI_LOCKED(vp, __func__);
3134 	VNPASS(!VN_IS_DOOMED(vp), vp);
3135 
3136 	if (vp->v_mflag & VMP_LAZYLIST) {
3137 		mp = vp->v_mount;
3138 		mtx_lock(&mp->mnt_listmtx);
3139 		VNPASS(vp->v_mflag & VMP_LAZYLIST, vp);
3140 		vp->v_mflag &= ~VMP_LAZYLIST;
3141 		TAILQ_REMOVE(&mp->mnt_lazyvnodelist, vp, v_lazylist);
3142 		mp->mnt_lazyvnodelistsize--;
3143 		mtx_unlock(&mp->mnt_listmtx);
3144 	}
3145 }
3146 
3147 static void
3148 vdefer_inactive(struct vnode *vp)
3149 {
3150 
3151 	ASSERT_VI_LOCKED(vp, __func__);
3152 	VNASSERT(vp->v_holdcnt > 0, vp,
3153 	    ("%s: vnode without hold count", __func__));
3154 	if (VN_IS_DOOMED(vp)) {
3155 		vdropl(vp);
3156 		return;
3157 	}
3158 	if (vp->v_iflag & VI_DEFINACT) {
3159 		VNASSERT(vp->v_holdcnt > 1, vp, ("lost hold count"));
3160 		vdropl(vp);
3161 		return;
3162 	}
3163 	if (vp->v_usecount > 0) {
3164 		vp->v_iflag &= ~VI_OWEINACT;
3165 		vdropl(vp);
3166 		return;
3167 	}
3168 	vlazy(vp);
3169 	vp->v_iflag |= VI_DEFINACT;
3170 	VI_UNLOCK(vp);
3171 	counter_u64_add(deferred_inact, 1);
3172 }
3173 
3174 static void
3175 vdefer_inactive_unlocked(struct vnode *vp)
3176 {
3177 
3178 	VI_LOCK(vp);
3179 	if ((vp->v_iflag & VI_OWEINACT) == 0) {
3180 		vdropl(vp);
3181 		return;
3182 	}
3183 	vdefer_inactive(vp);
3184 }
3185 
3186 enum vput_op { VRELE, VPUT, VUNREF };
3187 
3188 /*
3189  * Handle ->v_usecount transitioning to 0.
3190  *
3191  * By releasing the last usecount we take ownership of the hold count which
3192  * provides liveness of the vnode, meaning we have to vdrop.
3193  *
3194  * For all vnodes we may need to perform inactive processing. It requires an
3195  * exclusive lock on the vnode, while it is legal to call here with only a
3196  * shared lock (or no locks). If locking the vnode in an expected manner fails,
3197  * inactive processing gets deferred to the syncer.
3198  *
3199  * XXX Some filesystems pass in an exclusively locked vnode and strongly depend
3200  * on the lock being held all the way until VOP_INACTIVE. This in particular
3201  * happens with UFS which adds half-constructed vnodes to the hash, where they
3202  * can be found by other code.
3203  */
3204 static void
3205 vput_final(struct vnode *vp, enum vput_op func)
3206 {
3207 	int error;
3208 	bool want_unlock;
3209 
3210 	CTR2(KTR_VFS, "%s: vp %p", __func__, vp);
3211 	VNPASS(vp->v_holdcnt > 0, vp);
3212 
3213 	VI_LOCK(vp);
3214 
3215 	/*
3216 	 * By the time we got here someone else might have transitioned
3217 	 * the count back to > 0.
3218 	 */
3219 	if (vp->v_usecount > 0)
3220 		goto out;
3221 
3222 	/*
3223 	 * If the vnode is doomed vgone already performed inactive processing
3224 	 * (if needed).
3225 	 */
3226 	if (VN_IS_DOOMED(vp))
3227 		goto out;
3228 
3229 	if (__predict_true(VOP_NEED_INACTIVE(vp) == 0))
3230 		goto out;
3231 
3232 	if (vp->v_iflag & VI_DOINGINACT)
3233 		goto out;
3234 
3235 	/*
3236 	 * Locking operations here will drop the interlock and possibly the
3237 	 * vnode lock, opening a window where the vnode can get doomed all the
3238 	 * while ->v_usecount is 0. Set VI_OWEINACT to let vgone know to
3239 	 * perform inactive.
3240 	 */
3241 	vp->v_iflag |= VI_OWEINACT;
3242 	want_unlock = false;
3243 	error = 0;
3244 	switch (func) {
3245 	case VRELE:
3246 		switch (VOP_ISLOCKED(vp)) {
3247 		case LK_EXCLUSIVE:
3248 			break;
3249 		case LK_EXCLOTHER:
3250 		case 0:
3251 			want_unlock = true;
3252 			error = vn_lock(vp, LK_EXCLUSIVE | LK_INTERLOCK);
3253 			VI_LOCK(vp);
3254 			break;
3255 		default:
3256 			/*
3257 			 * The lock has at least one sharer, but we have no way
3258 			 * to conclude whether this is us. Play it safe and
3259 			 * defer processing.
3260 			 */
3261 			error = EAGAIN;
3262 			break;
3263 		}
3264 		break;
3265 	case VPUT:
3266 		want_unlock = true;
3267 		if (VOP_ISLOCKED(vp) != LK_EXCLUSIVE) {
3268 			error = VOP_LOCK(vp, LK_UPGRADE | LK_INTERLOCK |
3269 			    LK_NOWAIT);
3270 			VI_LOCK(vp);
3271 		}
3272 		break;
3273 	case VUNREF:
3274 		if (VOP_ISLOCKED(vp) != LK_EXCLUSIVE) {
3275 			error = VOP_LOCK(vp, LK_TRYUPGRADE | LK_INTERLOCK);
3276 			VI_LOCK(vp);
3277 		}
3278 		break;
3279 	}
3280 	if (error == 0) {
3281 		if (func == VUNREF) {
3282 			VNASSERT((vp->v_vflag & VV_UNREF) == 0, vp,
3283 			    ("recursive vunref"));
3284 			vp->v_vflag |= VV_UNREF;
3285 		}
3286 		for (;;) {
3287 			error = vinactive(vp);
3288 			if (want_unlock)
3289 				VOP_UNLOCK(vp);
3290 			if (error != ERELOOKUP || !want_unlock)
3291 				break;
3292 			VOP_LOCK(vp, LK_EXCLUSIVE);
3293 		}
3294 		if (func == VUNREF)
3295 			vp->v_vflag &= ~VV_UNREF;
3296 		vdropl(vp);
3297 	} else {
3298 		vdefer_inactive(vp);
3299 	}
3300 	return;
3301 out:
3302 	if (func == VPUT)
3303 		VOP_UNLOCK(vp);
3304 	vdropl(vp);
3305 }
3306 
3307 /*
3308  * Decrement ->v_usecount for a vnode.
3309  *
3310  * Releasing the last use count requires additional processing, see vput_final
3311  * above for details.
3312  *
3313  * Comment above each variant denotes lock state on entry and exit.
3314  */
3315 
3316 /*
3317  * in: any
3318  * out: same as passed in
3319  */
3320 void
3321 vrele(struct vnode *vp)
3322 {
3323 
3324 	ASSERT_VI_UNLOCKED(vp, __func__);
3325 	if (!refcount_release(&vp->v_usecount))
3326 		return;
3327 	vput_final(vp, VRELE);
3328 }
3329 
3330 /*
3331  * in: locked
3332  * out: unlocked
3333  */
3334 void
3335 vput(struct vnode *vp)
3336 {
3337 
3338 	ASSERT_VOP_LOCKED(vp, __func__);
3339 	ASSERT_VI_UNLOCKED(vp, __func__);
3340 	if (!refcount_release(&vp->v_usecount)) {
3341 		VOP_UNLOCK(vp);
3342 		return;
3343 	}
3344 	vput_final(vp, VPUT);
3345 }
3346 
3347 /*
3348  * in: locked
3349  * out: locked
3350  */
3351 void
3352 vunref(struct vnode *vp)
3353 {
3354 
3355 	ASSERT_VOP_LOCKED(vp, __func__);
3356 	ASSERT_VI_UNLOCKED(vp, __func__);
3357 	if (!refcount_release(&vp->v_usecount))
3358 		return;
3359 	vput_final(vp, VUNREF);
3360 }
3361 
3362 void
3363 vhold(struct vnode *vp)
3364 {
3365 	int old;
3366 
3367 	CTR2(KTR_VFS, "%s: vp %p", __func__, vp);
3368 	old = atomic_fetchadd_int(&vp->v_holdcnt, 1);
3369 	VNASSERT(old >= 0 && (old & VHOLD_ALL_FLAGS) == 0, vp,
3370 	    ("%s: wrong hold count %d", __func__, old));
3371 	if (old == 0)
3372 		vn_freevnodes_dec();
3373 }
3374 
3375 void
3376 vholdnz(struct vnode *vp)
3377 {
3378 
3379 	CTR2(KTR_VFS, "%s: vp %p", __func__, vp);
3380 #ifdef INVARIANTS
3381 	int old = atomic_fetchadd_int(&vp->v_holdcnt, 1);
3382 	VNASSERT(old > 0 && (old & VHOLD_ALL_FLAGS) == 0, vp,
3383 	    ("%s: wrong hold count %d", __func__, old));
3384 #else
3385 	atomic_add_int(&vp->v_holdcnt, 1);
3386 #endif
3387 }
3388 
3389 /*
3390  * Grab a hold count unless the vnode is freed.
3391  *
3392  * Only use this routine if vfs smr is the only protection you have against
3393  * freeing the vnode.
3394  *
3395  * The code loops trying to add a hold count as long as the VHOLD_NO_SMR flag
3396  * is not set.  After the flag is set the vnode becomes immutable to anyone but
3397  * the thread which managed to set the flag.
3398  *
3399  * It may be tempting to replace the loop with:
3400  * count = atomic_fetchadd_int(&vp->v_holdcnt, 1);
3401  * if (count & VHOLD_NO_SMR) {
3402  *     backpedal and error out;
3403  * }
3404  *
3405  * However, while this is more performant, it hinders debugging by eliminating
3406  * the previously mentioned invariant.
3407  */
3408 bool
3409 vhold_smr(struct vnode *vp)
3410 {
3411 	int count;
3412 
3413 	VFS_SMR_ASSERT_ENTERED();
3414 
3415 	count = atomic_load_int(&vp->v_holdcnt);
3416 	for (;;) {
3417 		if (count & VHOLD_NO_SMR) {
3418 			VNASSERT((count & ~VHOLD_NO_SMR) == 0, vp,
3419 			    ("non-zero hold count with flags %d\n", count));
3420 			return (false);
3421 		}
3422 		VNASSERT(count >= 0, vp, ("invalid hold count %d\n", count));
3423 		if (atomic_fcmpset_int(&vp->v_holdcnt, &count, count + 1)) {
3424 			if (count == 0)
3425 				vn_freevnodes_dec();
3426 			return (true);
3427 		}
3428 	}
3429 }
3430 
3431 /*
3432  * Hold a free vnode for recycling.
3433  *
3434  * Note: vnode_init references this comment.
3435  *
3436  * Attempts to recycle only need the global vnode list lock and have no use for
3437  * SMR.
3438  *
3439  * However, vnodes get inserted into the global list before they get fully
3440  * initialized and stay there until UMA decides to free the memory. This in
3441  * particular means the target can be found before it becomes usable and after
3442  * it becomes recycled. Picking up such vnodes is guarded with v_holdcnt set to
3443  * VHOLD_NO_SMR.
3444  *
3445  * Note: the vnode may gain more references after we transition the count 0->1.
3446  */
3447 static bool
3448 vhold_recycle_free(struct vnode *vp)
3449 {
3450 	int count;
3451 
3452 	mtx_assert(&vnode_list_mtx, MA_OWNED);
3453 
3454 	count = atomic_load_int(&vp->v_holdcnt);
3455 	for (;;) {
3456 		if (count & VHOLD_NO_SMR) {
3457 			VNASSERT((count & ~VHOLD_NO_SMR) == 0, vp,
3458 			    ("non-zero hold count with flags %d\n", count));
3459 			return (false);
3460 		}
3461 		VNASSERT(count >= 0, vp, ("invalid hold count %d\n", count));
3462 		if (count > 0) {
3463 			return (false);
3464 		}
3465 		if (atomic_fcmpset_int(&vp->v_holdcnt, &count, count + 1)) {
3466 			vn_freevnodes_dec();
3467 			return (true);
3468 		}
3469 	}
3470 }
3471 
3472 static void __noinline
3473 vdbatch_process(struct vdbatch *vd)
3474 {
3475 	struct vnode *vp;
3476 	int i;
3477 
3478 	mtx_assert(&vd->lock, MA_OWNED);
3479 	MPASS(curthread->td_pinned > 0);
3480 	MPASS(vd->index == VDBATCH_SIZE);
3481 
3482 	mtx_lock(&vnode_list_mtx);
3483 	critical_enter();
3484 	freevnodes += vd->freevnodes;
3485 	for (i = 0; i < VDBATCH_SIZE; i++) {
3486 		vp = vd->tab[i];
3487 		TAILQ_REMOVE(&vnode_list, vp, v_vnodelist);
3488 		TAILQ_INSERT_TAIL(&vnode_list, vp, v_vnodelist);
3489 		MPASS(vp->v_dbatchcpu != NOCPU);
3490 		vp->v_dbatchcpu = NOCPU;
3491 	}
3492 	mtx_unlock(&vnode_list_mtx);
3493 	vd->freevnodes = 0;
3494 	bzero(vd->tab, sizeof(vd->tab));
3495 	vd->index = 0;
3496 	critical_exit();
3497 }
3498 
3499 static void
3500 vdbatch_enqueue(struct vnode *vp)
3501 {
3502 	struct vdbatch *vd;
3503 
3504 	ASSERT_VI_LOCKED(vp, __func__);
3505 	VNASSERT(!VN_IS_DOOMED(vp), vp,
3506 	    ("%s: deferring requeue of a doomed vnode", __func__));
3507 
3508 	if (vp->v_dbatchcpu != NOCPU) {
3509 		VI_UNLOCK(vp);
3510 		return;
3511 	}
3512 
3513 	sched_pin();
3514 	vd = DPCPU_PTR(vd);
3515 	mtx_lock(&vd->lock);
3516 	MPASS(vd->index < VDBATCH_SIZE);
3517 	MPASS(vd->tab[vd->index] == NULL);
3518 	/*
3519 	 * A hack: we depend on being pinned so that we know what to put in
3520 	 * ->v_dbatchcpu.
3521 	 */
3522 	vp->v_dbatchcpu = curcpu;
3523 	vd->tab[vd->index] = vp;
3524 	vd->index++;
3525 	VI_UNLOCK(vp);
3526 	if (vd->index == VDBATCH_SIZE)
3527 		vdbatch_process(vd);
3528 	mtx_unlock(&vd->lock);
3529 	sched_unpin();
3530 }
3531 
3532 /*
3533  * This routine must only be called for vnodes which are about to be
3534  * deallocated. Supporting dequeue for arbitrary vndoes would require
3535  * validating that the locked batch matches.
3536  */
3537 static void
3538 vdbatch_dequeue(struct vnode *vp)
3539 {
3540 	struct vdbatch *vd;
3541 	int i;
3542 	short cpu;
3543 
3544 	VNASSERT(vp->v_type == VBAD || vp->v_type == VNON, vp,
3545 	    ("%s: called for a used vnode\n", __func__));
3546 
3547 	cpu = vp->v_dbatchcpu;
3548 	if (cpu == NOCPU)
3549 		return;
3550 
3551 	vd = DPCPU_ID_PTR(cpu, vd);
3552 	mtx_lock(&vd->lock);
3553 	for (i = 0; i < vd->index; i++) {
3554 		if (vd->tab[i] != vp)
3555 			continue;
3556 		vp->v_dbatchcpu = NOCPU;
3557 		vd->index--;
3558 		vd->tab[i] = vd->tab[vd->index];
3559 		vd->tab[vd->index] = NULL;
3560 		break;
3561 	}
3562 	mtx_unlock(&vd->lock);
3563 	/*
3564 	 * Either we dequeued the vnode above or the target CPU beat us to it.
3565 	 */
3566 	MPASS(vp->v_dbatchcpu == NOCPU);
3567 }
3568 
3569 /*
3570  * Drop the hold count of the vnode.  If this is the last reference to
3571  * the vnode we place it on the free list unless it has been vgone'd
3572  * (marked VIRF_DOOMED) in which case we will free it.
3573  *
3574  * Because the vnode vm object keeps a hold reference on the vnode if
3575  * there is at least one resident non-cached page, the vnode cannot
3576  * leave the active list without the page cleanup done.
3577  */
3578 static void
3579 vdrop_deactivate(struct vnode *vp)
3580 {
3581 	struct mount *mp;
3582 
3583 	ASSERT_VI_LOCKED(vp, __func__);
3584 	/*
3585 	 * Mark a vnode as free: remove it from its active list
3586 	 * and put it up for recycling on the freelist.
3587 	 */
3588 	VNASSERT(!VN_IS_DOOMED(vp), vp,
3589 	    ("vdrop: returning doomed vnode"));
3590 	VNASSERT((vp->v_iflag & VI_OWEINACT) == 0, vp,
3591 	    ("vnode with VI_OWEINACT set"));
3592 	VNASSERT((vp->v_iflag & VI_DEFINACT) == 0, vp,
3593 	    ("vnode with VI_DEFINACT set"));
3594 	if (vp->v_mflag & VMP_LAZYLIST) {
3595 		mp = vp->v_mount;
3596 		mtx_lock(&mp->mnt_listmtx);
3597 		VNASSERT(vp->v_mflag & VMP_LAZYLIST, vp, ("lost VMP_LAZYLIST"));
3598 		/*
3599 		 * Don't remove the vnode from the lazy list if another thread
3600 		 * has increased the hold count. It may have re-enqueued the
3601 		 * vnode to the lazy list and is now responsible for its
3602 		 * removal.
3603 		 */
3604 		if (vp->v_holdcnt == 0) {
3605 			vp->v_mflag &= ~VMP_LAZYLIST;
3606 			TAILQ_REMOVE(&mp->mnt_lazyvnodelist, vp, v_lazylist);
3607 			mp->mnt_lazyvnodelistsize--;
3608 		}
3609 		mtx_unlock(&mp->mnt_listmtx);
3610 	}
3611 	vdbatch_enqueue(vp);
3612 }
3613 
3614 static void __noinline
3615 vdropl_final(struct vnode *vp)
3616 {
3617 
3618 	ASSERT_VI_LOCKED(vp, __func__);
3619 	VNPASS(VN_IS_DOOMED(vp), vp);
3620 	/*
3621 	 * Set the VHOLD_NO_SMR flag.
3622 	 *
3623 	 * We may be racing against vhold_smr. If they win we can just pretend
3624 	 * we never got this far, they will vdrop later.
3625 	 */
3626 	if (__predict_false(!atomic_cmpset_int(&vp->v_holdcnt, 0, VHOLD_NO_SMR))) {
3627 		vn_freevnodes_inc();
3628 		VI_UNLOCK(vp);
3629 		/*
3630 		 * We lost the aforementioned race. Any subsequent access is
3631 		 * invalid as they might have managed to vdropl on their own.
3632 		 */
3633 		return;
3634 	}
3635 	/*
3636 	 * Don't bump freevnodes as this one is going away.
3637 	 */
3638 	freevnode(vp);
3639 }
3640 
3641 void
3642 vdrop(struct vnode *vp)
3643 {
3644 
3645 	ASSERT_VI_UNLOCKED(vp, __func__);
3646 	CTR2(KTR_VFS, "%s: vp %p", __func__, vp);
3647 	if (refcount_release_if_not_last(&vp->v_holdcnt))
3648 		return;
3649 	VI_LOCK(vp);
3650 	vdropl(vp);
3651 }
3652 
3653 void
3654 vdropl(struct vnode *vp)
3655 {
3656 
3657 	ASSERT_VI_LOCKED(vp, __func__);
3658 	CTR2(KTR_VFS, "%s: vp %p", __func__, vp);
3659 	if (!refcount_release(&vp->v_holdcnt)) {
3660 		VI_UNLOCK(vp);
3661 		return;
3662 	}
3663 	if (!VN_IS_DOOMED(vp)) {
3664 		vn_freevnodes_inc();
3665 		vdrop_deactivate(vp);
3666 		/*
3667 		 * Also unlocks the interlock. We can't assert on it as we
3668 		 * released our hold and by now the vnode might have been
3669 		 * freed.
3670 		 */
3671 		return;
3672 	}
3673 	vdropl_final(vp);
3674 }
3675 
3676 /*
3677  * Call VOP_INACTIVE on the vnode and manage the DOINGINACT and OWEINACT
3678  * flags.  DOINGINACT prevents us from recursing in calls to vinactive.
3679  */
3680 static int
3681 vinactivef(struct vnode *vp)
3682 {
3683 	struct vm_object *obj;
3684 	int error;
3685 
3686 	ASSERT_VOP_ELOCKED(vp, "vinactive");
3687 	ASSERT_VI_LOCKED(vp, "vinactive");
3688 	VNASSERT((vp->v_iflag & VI_DOINGINACT) == 0, vp,
3689 	    ("vinactive: recursed on VI_DOINGINACT"));
3690 	CTR2(KTR_VFS, "%s: vp %p", __func__, vp);
3691 	vp->v_iflag |= VI_DOINGINACT;
3692 	vp->v_iflag &= ~VI_OWEINACT;
3693 	VI_UNLOCK(vp);
3694 	/*
3695 	 * Before moving off the active list, we must be sure that any
3696 	 * modified pages are converted into the vnode's dirty
3697 	 * buffers, since these will no longer be checked once the
3698 	 * vnode is on the inactive list.
3699 	 *
3700 	 * The write-out of the dirty pages is asynchronous.  At the
3701 	 * point that VOP_INACTIVE() is called, there could still be
3702 	 * pending I/O and dirty pages in the object.
3703 	 */
3704 	if ((obj = vp->v_object) != NULL && (vp->v_vflag & VV_NOSYNC) == 0 &&
3705 	    vm_object_mightbedirty(obj)) {
3706 		VM_OBJECT_WLOCK(obj);
3707 		vm_object_page_clean(obj, 0, 0, 0);
3708 		VM_OBJECT_WUNLOCK(obj);
3709 	}
3710 	error = VOP_INACTIVE(vp);
3711 	VI_LOCK(vp);
3712 	VNASSERT(vp->v_iflag & VI_DOINGINACT, vp,
3713 	    ("vinactive: lost VI_DOINGINACT"));
3714 	vp->v_iflag &= ~VI_DOINGINACT;
3715 	return (error);
3716 }
3717 
3718 int
3719 vinactive(struct vnode *vp)
3720 {
3721 
3722 	ASSERT_VOP_ELOCKED(vp, "vinactive");
3723 	ASSERT_VI_LOCKED(vp, "vinactive");
3724 	CTR2(KTR_VFS, "%s: vp %p", __func__, vp);
3725 
3726 	if ((vp->v_iflag & VI_OWEINACT) == 0)
3727 		return (0);
3728 	if (vp->v_iflag & VI_DOINGINACT)
3729 		return (0);
3730 	if (vp->v_usecount > 0) {
3731 		vp->v_iflag &= ~VI_OWEINACT;
3732 		return (0);
3733 	}
3734 	return (vinactivef(vp));
3735 }
3736 
3737 /*
3738  * Remove any vnodes in the vnode table belonging to mount point mp.
3739  *
3740  * If FORCECLOSE is not specified, there should not be any active ones,
3741  * return error if any are found (nb: this is a user error, not a
3742  * system error). If FORCECLOSE is specified, detach any active vnodes
3743  * that are found.
3744  *
3745  * If WRITECLOSE is set, only flush out regular file vnodes open for
3746  * writing.
3747  *
3748  * SKIPSYSTEM causes any vnodes marked VV_SYSTEM to be skipped.
3749  *
3750  * `rootrefs' specifies the base reference count for the root vnode
3751  * of this filesystem. The root vnode is considered busy if its
3752  * v_usecount exceeds this value. On a successful return, vflush(, td)
3753  * will call vrele() on the root vnode exactly rootrefs times.
3754  * If the SKIPSYSTEM or WRITECLOSE flags are specified, rootrefs must
3755  * be zero.
3756  */
3757 #ifdef DIAGNOSTIC
3758 static int busyprt = 0;		/* print out busy vnodes */
3759 SYSCTL_INT(_debug, OID_AUTO, busyprt, CTLFLAG_RW, &busyprt, 0, "Print out busy vnodes");
3760 #endif
3761 
3762 int
3763 vflush(struct mount *mp, int rootrefs, int flags, struct thread *td)
3764 {
3765 	struct vnode *vp, *mvp, *rootvp = NULL;
3766 	struct vattr vattr;
3767 	int busy = 0, error;
3768 
3769 	CTR4(KTR_VFS, "%s: mp %p with rootrefs %d and flags %d", __func__, mp,
3770 	    rootrefs, flags);
3771 	if (rootrefs > 0) {
3772 		KASSERT((flags & (SKIPSYSTEM | WRITECLOSE)) == 0,
3773 		    ("vflush: bad args"));
3774 		/*
3775 		 * Get the filesystem root vnode. We can vput() it
3776 		 * immediately, since with rootrefs > 0, it won't go away.
3777 		 */
3778 		if ((error = VFS_ROOT(mp, LK_EXCLUSIVE, &rootvp)) != 0) {
3779 			CTR2(KTR_VFS, "%s: vfs_root lookup failed with %d",
3780 			    __func__, error);
3781 			return (error);
3782 		}
3783 		vput(rootvp);
3784 	}
3785 loop:
3786 	MNT_VNODE_FOREACH_ALL(vp, mp, mvp) {
3787 		vholdl(vp);
3788 		error = vn_lock(vp, LK_INTERLOCK | LK_EXCLUSIVE);
3789 		if (error) {
3790 			vdrop(vp);
3791 			MNT_VNODE_FOREACH_ALL_ABORT(mp, mvp);
3792 			goto loop;
3793 		}
3794 		/*
3795 		 * Skip over a vnodes marked VV_SYSTEM.
3796 		 */
3797 		if ((flags & SKIPSYSTEM) && (vp->v_vflag & VV_SYSTEM)) {
3798 			VOP_UNLOCK(vp);
3799 			vdrop(vp);
3800 			continue;
3801 		}
3802 		/*
3803 		 * If WRITECLOSE is set, flush out unlinked but still open
3804 		 * files (even if open only for reading) and regular file
3805 		 * vnodes open for writing.
3806 		 */
3807 		if (flags & WRITECLOSE) {
3808 			if (vp->v_object != NULL) {
3809 				VM_OBJECT_WLOCK(vp->v_object);
3810 				vm_object_page_clean(vp->v_object, 0, 0, 0);
3811 				VM_OBJECT_WUNLOCK(vp->v_object);
3812 			}
3813 			do {
3814 				error = VOP_FSYNC(vp, MNT_WAIT, td);
3815 			} while (error == ERELOOKUP);
3816 			if (error != 0) {
3817 				VOP_UNLOCK(vp);
3818 				vdrop(vp);
3819 				MNT_VNODE_FOREACH_ALL_ABORT(mp, mvp);
3820 				return (error);
3821 			}
3822 			error = VOP_GETATTR(vp, &vattr, td->td_ucred);
3823 			VI_LOCK(vp);
3824 
3825 			if ((vp->v_type == VNON ||
3826 			    (error == 0 && vattr.va_nlink > 0)) &&
3827 			    (vp->v_writecount <= 0 || vp->v_type != VREG)) {
3828 				VOP_UNLOCK(vp);
3829 				vdropl(vp);
3830 				continue;
3831 			}
3832 		} else
3833 			VI_LOCK(vp);
3834 		/*
3835 		 * With v_usecount == 0, all we need to do is clear out the
3836 		 * vnode data structures and we are done.
3837 		 *
3838 		 * If FORCECLOSE is set, forcibly close the vnode.
3839 		 */
3840 		if (vp->v_usecount == 0 || (flags & FORCECLOSE)) {
3841 			vgonel(vp);
3842 		} else {
3843 			busy++;
3844 #ifdef DIAGNOSTIC
3845 			if (busyprt)
3846 				vn_printf(vp, "vflush: busy vnode ");
3847 #endif
3848 		}
3849 		VOP_UNLOCK(vp);
3850 		vdropl(vp);
3851 	}
3852 	if (rootrefs > 0 && (flags & FORCECLOSE) == 0) {
3853 		/*
3854 		 * If just the root vnode is busy, and if its refcount
3855 		 * is equal to `rootrefs', then go ahead and kill it.
3856 		 */
3857 		VI_LOCK(rootvp);
3858 		KASSERT(busy > 0, ("vflush: not busy"));
3859 		VNASSERT(rootvp->v_usecount >= rootrefs, rootvp,
3860 		    ("vflush: usecount %d < rootrefs %d",
3861 		     rootvp->v_usecount, rootrefs));
3862 		if (busy == 1 && rootvp->v_usecount == rootrefs) {
3863 			VOP_LOCK(rootvp, LK_EXCLUSIVE|LK_INTERLOCK);
3864 			vgone(rootvp);
3865 			VOP_UNLOCK(rootvp);
3866 			busy = 0;
3867 		} else
3868 			VI_UNLOCK(rootvp);
3869 	}
3870 	if (busy) {
3871 		CTR2(KTR_VFS, "%s: failing as %d vnodes are busy", __func__,
3872 		    busy);
3873 		return (EBUSY);
3874 	}
3875 	for (; rootrefs > 0; rootrefs--)
3876 		vrele(rootvp);
3877 	return (0);
3878 }
3879 
3880 /*
3881  * Recycle an unused vnode to the front of the free list.
3882  */
3883 int
3884 vrecycle(struct vnode *vp)
3885 {
3886 	int recycled;
3887 
3888 	VI_LOCK(vp);
3889 	recycled = vrecyclel(vp);
3890 	VI_UNLOCK(vp);
3891 	return (recycled);
3892 }
3893 
3894 /*
3895  * vrecycle, with the vp interlock held.
3896  */
3897 int
3898 vrecyclel(struct vnode *vp)
3899 {
3900 	int recycled;
3901 
3902 	ASSERT_VOP_ELOCKED(vp, __func__);
3903 	ASSERT_VI_LOCKED(vp, __func__);
3904 	CTR2(KTR_VFS, "%s: vp %p", __func__, vp);
3905 	recycled = 0;
3906 	if (vp->v_usecount == 0) {
3907 		recycled = 1;
3908 		vgonel(vp);
3909 	}
3910 	return (recycled);
3911 }
3912 
3913 /*
3914  * Eliminate all activity associated with a vnode
3915  * in preparation for reuse.
3916  */
3917 void
3918 vgone(struct vnode *vp)
3919 {
3920 	VI_LOCK(vp);
3921 	vgonel(vp);
3922 	VI_UNLOCK(vp);
3923 }
3924 
3925 static void
3926 notify_lowervp_vfs_dummy(struct mount *mp __unused,
3927     struct vnode *lowervp __unused)
3928 {
3929 }
3930 
3931 /*
3932  * Notify upper mounts about reclaimed or unlinked vnode.
3933  */
3934 void
3935 vfs_notify_upper(struct vnode *vp, int event)
3936 {
3937 	static struct vfsops vgonel_vfsops = {
3938 		.vfs_reclaim_lowervp = notify_lowervp_vfs_dummy,
3939 		.vfs_unlink_lowervp = notify_lowervp_vfs_dummy,
3940 	};
3941 	struct mount *mp, *ump, *mmp;
3942 
3943 	mp = vp->v_mount;
3944 	if (mp == NULL)
3945 		return;
3946 	if (TAILQ_EMPTY(&mp->mnt_uppers))
3947 		return;
3948 
3949 	mmp = malloc(sizeof(struct mount), M_TEMP, M_WAITOK | M_ZERO);
3950 	mmp->mnt_op = &vgonel_vfsops;
3951 	mmp->mnt_kern_flag |= MNTK_MARKER;
3952 	MNT_ILOCK(mp);
3953 	mp->mnt_kern_flag |= MNTK_VGONE_UPPER;
3954 	for (ump = TAILQ_FIRST(&mp->mnt_uppers); ump != NULL;) {
3955 		if ((ump->mnt_kern_flag & MNTK_MARKER) != 0) {
3956 			ump = TAILQ_NEXT(ump, mnt_upper_link);
3957 			continue;
3958 		}
3959 		TAILQ_INSERT_AFTER(&mp->mnt_uppers, ump, mmp, mnt_upper_link);
3960 		MNT_IUNLOCK(mp);
3961 		switch (event) {
3962 		case VFS_NOTIFY_UPPER_RECLAIM:
3963 			VFS_RECLAIM_LOWERVP(ump, vp);
3964 			break;
3965 		case VFS_NOTIFY_UPPER_UNLINK:
3966 			VFS_UNLINK_LOWERVP(ump, vp);
3967 			break;
3968 		default:
3969 			KASSERT(0, ("invalid event %d", event));
3970 			break;
3971 		}
3972 		MNT_ILOCK(mp);
3973 		ump = TAILQ_NEXT(mmp, mnt_upper_link);
3974 		TAILQ_REMOVE(&mp->mnt_uppers, mmp, mnt_upper_link);
3975 	}
3976 	free(mmp, M_TEMP);
3977 	mp->mnt_kern_flag &= ~MNTK_VGONE_UPPER;
3978 	if ((mp->mnt_kern_flag & MNTK_VGONE_WAITER) != 0) {
3979 		mp->mnt_kern_flag &= ~MNTK_VGONE_WAITER;
3980 		wakeup(&mp->mnt_uppers);
3981 	}
3982 	MNT_IUNLOCK(mp);
3983 }
3984 
3985 /*
3986  * vgone, with the vp interlock held.
3987  */
3988 static void
3989 vgonel(struct vnode *vp)
3990 {
3991 	struct thread *td;
3992 	struct mount *mp;
3993 	vm_object_t object;
3994 	bool active, doinginact, oweinact;
3995 
3996 	ASSERT_VOP_ELOCKED(vp, "vgonel");
3997 	ASSERT_VI_LOCKED(vp, "vgonel");
3998 	VNASSERT(vp->v_holdcnt, vp,
3999 	    ("vgonel: vp %p has no reference.", vp));
4000 	CTR2(KTR_VFS, "%s: vp %p", __func__, vp);
4001 	td = curthread;
4002 
4003 	/*
4004 	 * Don't vgonel if we're already doomed.
4005 	 */
4006 	if (VN_IS_DOOMED(vp))
4007 		return;
4008 	/*
4009 	 * Paired with freevnode.
4010 	 */
4011 	vn_seqc_write_begin_locked(vp);
4012 	vunlazy_gone(vp);
4013 	vn_irflag_set_locked(vp, VIRF_DOOMED);
4014 
4015 	/*
4016 	 * Check to see if the vnode is in use.  If so, we have to
4017 	 * call VOP_CLOSE() and VOP_INACTIVE().
4018 	 *
4019 	 * It could be that VOP_INACTIVE() requested reclamation, in
4020 	 * which case we should avoid recursion, so check
4021 	 * VI_DOINGINACT.  This is not precise but good enough.
4022 	 */
4023 	active = vp->v_usecount > 0;
4024 	oweinact = (vp->v_iflag & VI_OWEINACT) != 0;
4025 	doinginact = (vp->v_iflag & VI_DOINGINACT) != 0;
4026 
4027 	/*
4028 	 * If we need to do inactive VI_OWEINACT will be set.
4029 	 */
4030 	if (vp->v_iflag & VI_DEFINACT) {
4031 		VNASSERT(vp->v_holdcnt > 1, vp, ("lost hold count"));
4032 		vp->v_iflag &= ~VI_DEFINACT;
4033 		vdropl(vp);
4034 	} else {
4035 		VNASSERT(vp->v_holdcnt > 0, vp, ("vnode without hold count"));
4036 		VI_UNLOCK(vp);
4037 	}
4038 	cache_purge_vgone(vp);
4039 	vfs_notify_upper(vp, VFS_NOTIFY_UPPER_RECLAIM);
4040 
4041 	/*
4042 	 * If purging an active vnode, it must be closed and
4043 	 * deactivated before being reclaimed.
4044 	 */
4045 	if (active)
4046 		VOP_CLOSE(vp, FNONBLOCK, NOCRED, td);
4047 	if (!doinginact) {
4048 		do {
4049 			if (oweinact || active) {
4050 				VI_LOCK(vp);
4051 				vinactivef(vp);
4052 				oweinact = (vp->v_iflag & VI_OWEINACT) != 0;
4053 				VI_UNLOCK(vp);
4054 			}
4055 		} while (oweinact);
4056 	}
4057 	if (vp->v_type == VSOCK)
4058 		vfs_unp_reclaim(vp);
4059 
4060 	/*
4061 	 * Clean out any buffers associated with the vnode.
4062 	 * If the flush fails, just toss the buffers.
4063 	 */
4064 	mp = NULL;
4065 	if (!TAILQ_EMPTY(&vp->v_bufobj.bo_dirty.bv_hd))
4066 		(void) vn_start_secondary_write(vp, &mp, V_WAIT);
4067 	if (vinvalbuf(vp, V_SAVE, 0, 0) != 0) {
4068 		while (vinvalbuf(vp, 0, 0, 0) != 0)
4069 			;
4070 	}
4071 
4072 	BO_LOCK(&vp->v_bufobj);
4073 	KASSERT(TAILQ_EMPTY(&vp->v_bufobj.bo_dirty.bv_hd) &&
4074 	    vp->v_bufobj.bo_dirty.bv_cnt == 0 &&
4075 	    TAILQ_EMPTY(&vp->v_bufobj.bo_clean.bv_hd) &&
4076 	    vp->v_bufobj.bo_clean.bv_cnt == 0,
4077 	    ("vp %p bufobj not invalidated", vp));
4078 
4079 	/*
4080 	 * For VMIO bufobj, BO_DEAD is set later, or in
4081 	 * vm_object_terminate() after the object's page queue is
4082 	 * flushed.
4083 	 */
4084 	object = vp->v_bufobj.bo_object;
4085 	if (object == NULL)
4086 		vp->v_bufobj.bo_flag |= BO_DEAD;
4087 	BO_UNLOCK(&vp->v_bufobj);
4088 
4089 	/*
4090 	 * Handle the VM part.  Tmpfs handles v_object on its own (the
4091 	 * OBJT_VNODE check).  Nullfs or other bypassing filesystems
4092 	 * should not touch the object borrowed from the lower vnode
4093 	 * (the handle check).
4094 	 */
4095 	if (object != NULL && object->type == OBJT_VNODE &&
4096 	    object->handle == vp)
4097 		vnode_destroy_vobject(vp);
4098 
4099 	/*
4100 	 * Reclaim the vnode.
4101 	 */
4102 	if (VOP_RECLAIM(vp))
4103 		panic("vgone: cannot reclaim");
4104 	if (mp != NULL)
4105 		vn_finished_secondary_write(mp);
4106 	VNASSERT(vp->v_object == NULL, vp,
4107 	    ("vop_reclaim left v_object vp=%p", vp));
4108 	/*
4109 	 * Clear the advisory locks and wake up waiting threads.
4110 	 */
4111 	(void)VOP_ADVLOCKPURGE(vp);
4112 	vp->v_lockf = NULL;
4113 	/*
4114 	 * Delete from old mount point vnode list.
4115 	 */
4116 	delmntque(vp);
4117 	/*
4118 	 * Done with purge, reset to the standard lock and invalidate
4119 	 * the vnode.
4120 	 */
4121 	VI_LOCK(vp);
4122 	vp->v_vnlock = &vp->v_lock;
4123 	vp->v_op = &dead_vnodeops;
4124 	vp->v_type = VBAD;
4125 }
4126 
4127 /*
4128  * Print out a description of a vnode.
4129  */
4130 static const char * const typename[] =
4131 {"VNON", "VREG", "VDIR", "VBLK", "VCHR", "VLNK", "VSOCK", "VFIFO", "VBAD",
4132  "VMARKER"};
4133 
4134 _Static_assert((VHOLD_ALL_FLAGS & ~VHOLD_NO_SMR) == 0,
4135     "new hold count flag not added to vn_printf");
4136 
4137 void
4138 vn_printf(struct vnode *vp, const char *fmt, ...)
4139 {
4140 	va_list ap;
4141 	char buf[256], buf2[16];
4142 	u_long flags;
4143 	u_int holdcnt;
4144 	short irflag;
4145 
4146 	va_start(ap, fmt);
4147 	vprintf(fmt, ap);
4148 	va_end(ap);
4149 	printf("%p: ", (void *)vp);
4150 	printf("type %s\n", typename[vp->v_type]);
4151 	holdcnt = atomic_load_int(&vp->v_holdcnt);
4152 	printf("    usecount %d, writecount %d, refcount %d seqc users %d",
4153 	    vp->v_usecount, vp->v_writecount, holdcnt & ~VHOLD_ALL_FLAGS,
4154 	    vp->v_seqc_users);
4155 	switch (vp->v_type) {
4156 	case VDIR:
4157 		printf(" mountedhere %p\n", vp->v_mountedhere);
4158 		break;
4159 	case VCHR:
4160 		printf(" rdev %p\n", vp->v_rdev);
4161 		break;
4162 	case VSOCK:
4163 		printf(" socket %p\n", vp->v_unpcb);
4164 		break;
4165 	case VFIFO:
4166 		printf(" fifoinfo %p\n", vp->v_fifoinfo);
4167 		break;
4168 	default:
4169 		printf("\n");
4170 		break;
4171 	}
4172 	buf[0] = '\0';
4173 	buf[1] = '\0';
4174 	if (holdcnt & VHOLD_NO_SMR)
4175 		strlcat(buf, "|VHOLD_NO_SMR", sizeof(buf));
4176 	printf("    hold count flags (%s)\n", buf + 1);
4177 
4178 	buf[0] = '\0';
4179 	buf[1] = '\0';
4180 	irflag = vn_irflag_read(vp);
4181 	if (irflag & VIRF_DOOMED)
4182 		strlcat(buf, "|VIRF_DOOMED", sizeof(buf));
4183 	if (irflag & VIRF_PGREAD)
4184 		strlcat(buf, "|VIRF_PGREAD", sizeof(buf));
4185 	if (irflag & VIRF_MOUNTPOINT)
4186 		strlcat(buf, "|VIRF_MOUNTPOINT", sizeof(buf));
4187 	flags = irflag & ~(VIRF_DOOMED | VIRF_PGREAD | VIRF_MOUNTPOINT);
4188 	if (flags != 0) {
4189 		snprintf(buf2, sizeof(buf2), "|VIRF(0x%lx)", flags);
4190 		strlcat(buf, buf2, sizeof(buf));
4191 	}
4192 	if (vp->v_vflag & VV_ROOT)
4193 		strlcat(buf, "|VV_ROOT", sizeof(buf));
4194 	if (vp->v_vflag & VV_ISTTY)
4195 		strlcat(buf, "|VV_ISTTY", sizeof(buf));
4196 	if (vp->v_vflag & VV_NOSYNC)
4197 		strlcat(buf, "|VV_NOSYNC", sizeof(buf));
4198 	if (vp->v_vflag & VV_ETERNALDEV)
4199 		strlcat(buf, "|VV_ETERNALDEV", sizeof(buf));
4200 	if (vp->v_vflag & VV_CACHEDLABEL)
4201 		strlcat(buf, "|VV_CACHEDLABEL", sizeof(buf));
4202 	if (vp->v_vflag & VV_VMSIZEVNLOCK)
4203 		strlcat(buf, "|VV_VMSIZEVNLOCK", sizeof(buf));
4204 	if (vp->v_vflag & VV_COPYONWRITE)
4205 		strlcat(buf, "|VV_COPYONWRITE", sizeof(buf));
4206 	if (vp->v_vflag & VV_SYSTEM)
4207 		strlcat(buf, "|VV_SYSTEM", sizeof(buf));
4208 	if (vp->v_vflag & VV_PROCDEP)
4209 		strlcat(buf, "|VV_PROCDEP", sizeof(buf));
4210 	if (vp->v_vflag & VV_NOKNOTE)
4211 		strlcat(buf, "|VV_NOKNOTE", sizeof(buf));
4212 	if (vp->v_vflag & VV_DELETED)
4213 		strlcat(buf, "|VV_DELETED", sizeof(buf));
4214 	if (vp->v_vflag & VV_MD)
4215 		strlcat(buf, "|VV_MD", sizeof(buf));
4216 	if (vp->v_vflag & VV_FORCEINSMQ)
4217 		strlcat(buf, "|VV_FORCEINSMQ", sizeof(buf));
4218 	if (vp->v_vflag & VV_READLINK)
4219 		strlcat(buf, "|VV_READLINK", sizeof(buf));
4220 	flags = vp->v_vflag & ~(VV_ROOT | VV_ISTTY | VV_NOSYNC | VV_ETERNALDEV |
4221 	    VV_CACHEDLABEL | VV_VMSIZEVNLOCK | VV_COPYONWRITE | VV_SYSTEM |
4222 	    VV_PROCDEP | VV_NOKNOTE | VV_DELETED | VV_MD | VV_FORCEINSMQ |
4223 	    VV_READLINK);
4224 	if (flags != 0) {
4225 		snprintf(buf2, sizeof(buf2), "|VV(0x%lx)", flags);
4226 		strlcat(buf, buf2, sizeof(buf));
4227 	}
4228 	if (vp->v_iflag & VI_TEXT_REF)
4229 		strlcat(buf, "|VI_TEXT_REF", sizeof(buf));
4230 	if (vp->v_iflag & VI_MOUNT)
4231 		strlcat(buf, "|VI_MOUNT", sizeof(buf));
4232 	if (vp->v_iflag & VI_DOINGINACT)
4233 		strlcat(buf, "|VI_DOINGINACT", sizeof(buf));
4234 	if (vp->v_iflag & VI_OWEINACT)
4235 		strlcat(buf, "|VI_OWEINACT", sizeof(buf));
4236 	if (vp->v_iflag & VI_DEFINACT)
4237 		strlcat(buf, "|VI_DEFINACT", sizeof(buf));
4238 	if (vp->v_iflag & VI_FOPENING)
4239 		strlcat(buf, "|VI_FOPENING", sizeof(buf));
4240 	flags = vp->v_iflag & ~(VI_TEXT_REF | VI_MOUNT | VI_DOINGINACT |
4241 	    VI_OWEINACT | VI_DEFINACT | VI_FOPENING);
4242 	if (flags != 0) {
4243 		snprintf(buf2, sizeof(buf2), "|VI(0x%lx)", flags);
4244 		strlcat(buf, buf2, sizeof(buf));
4245 	}
4246 	if (vp->v_mflag & VMP_LAZYLIST)
4247 		strlcat(buf, "|VMP_LAZYLIST", sizeof(buf));
4248 	flags = vp->v_mflag & ~(VMP_LAZYLIST);
4249 	if (flags != 0) {
4250 		snprintf(buf2, sizeof(buf2), "|VMP(0x%lx)", flags);
4251 		strlcat(buf, buf2, sizeof(buf));
4252 	}
4253 	printf("    flags (%s)", buf + 1);
4254 	if (mtx_owned(VI_MTX(vp)))
4255 		printf(" VI_LOCKed");
4256 	printf("\n");
4257 	if (vp->v_object != NULL)
4258 		printf("    v_object %p ref %d pages %d "
4259 		    "cleanbuf %d dirtybuf %d\n",
4260 		    vp->v_object, vp->v_object->ref_count,
4261 		    vp->v_object->resident_page_count,
4262 		    vp->v_bufobj.bo_clean.bv_cnt,
4263 		    vp->v_bufobj.bo_dirty.bv_cnt);
4264 	printf("    ");
4265 	lockmgr_printinfo(vp->v_vnlock);
4266 	if (vp->v_data != NULL)
4267 		VOP_PRINT(vp);
4268 }
4269 
4270 #ifdef DDB
4271 /*
4272  * List all of the locked vnodes in the system.
4273  * Called when debugging the kernel.
4274  */
4275 DB_SHOW_COMMAND(lockedvnods, lockedvnodes)
4276 {
4277 	struct mount *mp;
4278 	struct vnode *vp;
4279 
4280 	/*
4281 	 * Note: because this is DDB, we can't obey the locking semantics
4282 	 * for these structures, which means we could catch an inconsistent
4283 	 * state and dereference a nasty pointer.  Not much to be done
4284 	 * about that.
4285 	 */
4286 	db_printf("Locked vnodes\n");
4287 	TAILQ_FOREACH(mp, &mountlist, mnt_list) {
4288 		TAILQ_FOREACH(vp, &mp->mnt_nvnodelist, v_nmntvnodes) {
4289 			if (vp->v_type != VMARKER && VOP_ISLOCKED(vp))
4290 				vn_printf(vp, "vnode ");
4291 		}
4292 	}
4293 }
4294 
4295 /*
4296  * Show details about the given vnode.
4297  */
4298 DB_SHOW_COMMAND(vnode, db_show_vnode)
4299 {
4300 	struct vnode *vp;
4301 
4302 	if (!have_addr)
4303 		return;
4304 	vp = (struct vnode *)addr;
4305 	vn_printf(vp, "vnode ");
4306 }
4307 
4308 /*
4309  * Show details about the given mount point.
4310  */
4311 DB_SHOW_COMMAND(mount, db_show_mount)
4312 {
4313 	struct mount *mp;
4314 	struct vfsopt *opt;
4315 	struct statfs *sp;
4316 	struct vnode *vp;
4317 	char buf[512];
4318 	uint64_t mflags;
4319 	u_int flags;
4320 
4321 	if (!have_addr) {
4322 		/* No address given, print short info about all mount points. */
4323 		TAILQ_FOREACH(mp, &mountlist, mnt_list) {
4324 			db_printf("%p %s on %s (%s)\n", mp,
4325 			    mp->mnt_stat.f_mntfromname,
4326 			    mp->mnt_stat.f_mntonname,
4327 			    mp->mnt_stat.f_fstypename);
4328 			if (db_pager_quit)
4329 				break;
4330 		}
4331 		db_printf("\nMore info: show mount <addr>\n");
4332 		return;
4333 	}
4334 
4335 	mp = (struct mount *)addr;
4336 	db_printf("%p %s on %s (%s)\n", mp, mp->mnt_stat.f_mntfromname,
4337 	    mp->mnt_stat.f_mntonname, mp->mnt_stat.f_fstypename);
4338 
4339 	buf[0] = '\0';
4340 	mflags = mp->mnt_flag;
4341 #define	MNT_FLAG(flag)	do {						\
4342 	if (mflags & (flag)) {						\
4343 		if (buf[0] != '\0')					\
4344 			strlcat(buf, ", ", sizeof(buf));		\
4345 		strlcat(buf, (#flag) + 4, sizeof(buf));			\
4346 		mflags &= ~(flag);					\
4347 	}								\
4348 } while (0)
4349 	MNT_FLAG(MNT_RDONLY);
4350 	MNT_FLAG(MNT_SYNCHRONOUS);
4351 	MNT_FLAG(MNT_NOEXEC);
4352 	MNT_FLAG(MNT_NOSUID);
4353 	MNT_FLAG(MNT_NFS4ACLS);
4354 	MNT_FLAG(MNT_UNION);
4355 	MNT_FLAG(MNT_ASYNC);
4356 	MNT_FLAG(MNT_SUIDDIR);
4357 	MNT_FLAG(MNT_SOFTDEP);
4358 	MNT_FLAG(MNT_NOSYMFOLLOW);
4359 	MNT_FLAG(MNT_GJOURNAL);
4360 	MNT_FLAG(MNT_MULTILABEL);
4361 	MNT_FLAG(MNT_ACLS);
4362 	MNT_FLAG(MNT_NOATIME);
4363 	MNT_FLAG(MNT_NOCLUSTERR);
4364 	MNT_FLAG(MNT_NOCLUSTERW);
4365 	MNT_FLAG(MNT_SUJ);
4366 	MNT_FLAG(MNT_EXRDONLY);
4367 	MNT_FLAG(MNT_EXPORTED);
4368 	MNT_FLAG(MNT_DEFEXPORTED);
4369 	MNT_FLAG(MNT_EXPORTANON);
4370 	MNT_FLAG(MNT_EXKERB);
4371 	MNT_FLAG(MNT_EXPUBLIC);
4372 	MNT_FLAG(MNT_LOCAL);
4373 	MNT_FLAG(MNT_QUOTA);
4374 	MNT_FLAG(MNT_ROOTFS);
4375 	MNT_FLAG(MNT_USER);
4376 	MNT_FLAG(MNT_IGNORE);
4377 	MNT_FLAG(MNT_UPDATE);
4378 	MNT_FLAG(MNT_DELEXPORT);
4379 	MNT_FLAG(MNT_RELOAD);
4380 	MNT_FLAG(MNT_FORCE);
4381 	MNT_FLAG(MNT_SNAPSHOT);
4382 	MNT_FLAG(MNT_BYFSID);
4383 #undef MNT_FLAG
4384 	if (mflags != 0) {
4385 		if (buf[0] != '\0')
4386 			strlcat(buf, ", ", sizeof(buf));
4387 		snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
4388 		    "0x%016jx", mflags);
4389 	}
4390 	db_printf("    mnt_flag = %s\n", buf);
4391 
4392 	buf[0] = '\0';
4393 	flags = mp->mnt_kern_flag;
4394 #define	MNT_KERN_FLAG(flag)	do {					\
4395 	if (flags & (flag)) {						\
4396 		if (buf[0] != '\0')					\
4397 			strlcat(buf, ", ", sizeof(buf));		\
4398 		strlcat(buf, (#flag) + 5, sizeof(buf));			\
4399 		flags &= ~(flag);					\
4400 	}								\
4401 } while (0)
4402 	MNT_KERN_FLAG(MNTK_UNMOUNTF);
4403 	MNT_KERN_FLAG(MNTK_ASYNC);
4404 	MNT_KERN_FLAG(MNTK_SOFTDEP);
4405 	MNT_KERN_FLAG(MNTK_DRAINING);
4406 	MNT_KERN_FLAG(MNTK_REFEXPIRE);
4407 	MNT_KERN_FLAG(MNTK_EXTENDED_SHARED);
4408 	MNT_KERN_FLAG(MNTK_SHARED_WRITES);
4409 	MNT_KERN_FLAG(MNTK_NO_IOPF);
4410 	MNT_KERN_FLAG(MNTK_VGONE_UPPER);
4411 	MNT_KERN_FLAG(MNTK_VGONE_WAITER);
4412 	MNT_KERN_FLAG(MNTK_LOOKUP_EXCL_DOTDOT);
4413 	MNT_KERN_FLAG(MNTK_MARKER);
4414 	MNT_KERN_FLAG(MNTK_USES_BCACHE);
4415 	MNT_KERN_FLAG(MNTK_FPLOOKUP);
4416 	MNT_KERN_FLAG(MNTK_NOASYNC);
4417 	MNT_KERN_FLAG(MNTK_UNMOUNT);
4418 	MNT_KERN_FLAG(MNTK_MWAIT);
4419 	MNT_KERN_FLAG(MNTK_SUSPEND);
4420 	MNT_KERN_FLAG(MNTK_SUSPEND2);
4421 	MNT_KERN_FLAG(MNTK_SUSPENDED);
4422 	MNT_KERN_FLAG(MNTK_LOOKUP_SHARED);
4423 	MNT_KERN_FLAG(MNTK_NOKNOTE);
4424 #undef MNT_KERN_FLAG
4425 	if (flags != 0) {
4426 		if (buf[0] != '\0')
4427 			strlcat(buf, ", ", sizeof(buf));
4428 		snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
4429 		    "0x%08x", flags);
4430 	}
4431 	db_printf("    mnt_kern_flag = %s\n", buf);
4432 
4433 	db_printf("    mnt_opt = ");
4434 	opt = TAILQ_FIRST(mp->mnt_opt);
4435 	if (opt != NULL) {
4436 		db_printf("%s", opt->name);
4437 		opt = TAILQ_NEXT(opt, link);
4438 		while (opt != NULL) {
4439 			db_printf(", %s", opt->name);
4440 			opt = TAILQ_NEXT(opt, link);
4441 		}
4442 	}
4443 	db_printf("\n");
4444 
4445 	sp = &mp->mnt_stat;
4446 	db_printf("    mnt_stat = { version=%u type=%u flags=0x%016jx "
4447 	    "bsize=%ju iosize=%ju blocks=%ju bfree=%ju bavail=%jd files=%ju "
4448 	    "ffree=%jd syncwrites=%ju asyncwrites=%ju syncreads=%ju "
4449 	    "asyncreads=%ju namemax=%u owner=%u fsid=[%d, %d] }\n",
4450 	    (u_int)sp->f_version, (u_int)sp->f_type, (uintmax_t)sp->f_flags,
4451 	    (uintmax_t)sp->f_bsize, (uintmax_t)sp->f_iosize,
4452 	    (uintmax_t)sp->f_blocks, (uintmax_t)sp->f_bfree,
4453 	    (intmax_t)sp->f_bavail, (uintmax_t)sp->f_files,
4454 	    (intmax_t)sp->f_ffree, (uintmax_t)sp->f_syncwrites,
4455 	    (uintmax_t)sp->f_asyncwrites, (uintmax_t)sp->f_syncreads,
4456 	    (uintmax_t)sp->f_asyncreads, (u_int)sp->f_namemax,
4457 	    (u_int)sp->f_owner, (int)sp->f_fsid.val[0], (int)sp->f_fsid.val[1]);
4458 
4459 	db_printf("    mnt_cred = { uid=%u ruid=%u",
4460 	    (u_int)mp->mnt_cred->cr_uid, (u_int)mp->mnt_cred->cr_ruid);
4461 	if (jailed(mp->mnt_cred))
4462 		db_printf(", jail=%d", mp->mnt_cred->cr_prison->pr_id);
4463 	db_printf(" }\n");
4464 	db_printf("    mnt_ref = %d (with %d in the struct)\n",
4465 	    vfs_mount_fetch_counter(mp, MNT_COUNT_REF), mp->mnt_ref);
4466 	db_printf("    mnt_gen = %d\n", mp->mnt_gen);
4467 	db_printf("    mnt_nvnodelistsize = %d\n", mp->mnt_nvnodelistsize);
4468 	db_printf("    mnt_lazyvnodelistsize = %d\n",
4469 	    mp->mnt_lazyvnodelistsize);
4470 	db_printf("    mnt_writeopcount = %d (with %d in the struct)\n",
4471 	    vfs_mount_fetch_counter(mp, MNT_COUNT_WRITEOPCOUNT), mp->mnt_writeopcount);
4472 	db_printf("    mnt_maxsymlinklen = %d\n", mp->mnt_maxsymlinklen);
4473 	db_printf("    mnt_iosize_max = %d\n", mp->mnt_iosize_max);
4474 	db_printf("    mnt_hashseed = %u\n", mp->mnt_hashseed);
4475 	db_printf("    mnt_lockref = %d (with %d in the struct)\n",
4476 	    vfs_mount_fetch_counter(mp, MNT_COUNT_LOCKREF), mp->mnt_lockref);
4477 	db_printf("    mnt_secondary_writes = %d\n", mp->mnt_secondary_writes);
4478 	db_printf("    mnt_secondary_accwrites = %d\n",
4479 	    mp->mnt_secondary_accwrites);
4480 	db_printf("    mnt_gjprovider = %s\n",
4481 	    mp->mnt_gjprovider != NULL ? mp->mnt_gjprovider : "NULL");
4482 	db_printf("    mnt_vfs_ops = %d\n", mp->mnt_vfs_ops);
4483 
4484 	db_printf("\n\nList of active vnodes\n");
4485 	TAILQ_FOREACH(vp, &mp->mnt_nvnodelist, v_nmntvnodes) {
4486 		if (vp->v_type != VMARKER && vp->v_holdcnt > 0) {
4487 			vn_printf(vp, "vnode ");
4488 			if (db_pager_quit)
4489 				break;
4490 		}
4491 	}
4492 	db_printf("\n\nList of inactive vnodes\n");
4493 	TAILQ_FOREACH(vp, &mp->mnt_nvnodelist, v_nmntvnodes) {
4494 		if (vp->v_type != VMARKER && vp->v_holdcnt == 0) {
4495 			vn_printf(vp, "vnode ");
4496 			if (db_pager_quit)
4497 				break;
4498 		}
4499 	}
4500 }
4501 #endif	/* DDB */
4502 
4503 /*
4504  * Fill in a struct xvfsconf based on a struct vfsconf.
4505  */
4506 static int
4507 vfsconf2x(struct sysctl_req *req, struct vfsconf *vfsp)
4508 {
4509 	struct xvfsconf xvfsp;
4510 
4511 	bzero(&xvfsp, sizeof(xvfsp));
4512 	strcpy(xvfsp.vfc_name, vfsp->vfc_name);
4513 	xvfsp.vfc_typenum = vfsp->vfc_typenum;
4514 	xvfsp.vfc_refcount = vfsp->vfc_refcount;
4515 	xvfsp.vfc_flags = vfsp->vfc_flags;
4516 	/*
4517 	 * These are unused in userland, we keep them
4518 	 * to not break binary compatibility.
4519 	 */
4520 	xvfsp.vfc_vfsops = NULL;
4521 	xvfsp.vfc_next = NULL;
4522 	return (SYSCTL_OUT(req, &xvfsp, sizeof(xvfsp)));
4523 }
4524 
4525 #ifdef COMPAT_FREEBSD32
4526 struct xvfsconf32 {
4527 	uint32_t	vfc_vfsops;
4528 	char		vfc_name[MFSNAMELEN];
4529 	int32_t		vfc_typenum;
4530 	int32_t		vfc_refcount;
4531 	int32_t		vfc_flags;
4532 	uint32_t	vfc_next;
4533 };
4534 
4535 static int
4536 vfsconf2x32(struct sysctl_req *req, struct vfsconf *vfsp)
4537 {
4538 	struct xvfsconf32 xvfsp;
4539 
4540 	bzero(&xvfsp, sizeof(xvfsp));
4541 	strcpy(xvfsp.vfc_name, vfsp->vfc_name);
4542 	xvfsp.vfc_typenum = vfsp->vfc_typenum;
4543 	xvfsp.vfc_refcount = vfsp->vfc_refcount;
4544 	xvfsp.vfc_flags = vfsp->vfc_flags;
4545 	return (SYSCTL_OUT(req, &xvfsp, sizeof(xvfsp)));
4546 }
4547 #endif
4548 
4549 /*
4550  * Top level filesystem related information gathering.
4551  */
4552 static int
4553 sysctl_vfs_conflist(SYSCTL_HANDLER_ARGS)
4554 {
4555 	struct vfsconf *vfsp;
4556 	int error;
4557 
4558 	error = 0;
4559 	vfsconf_slock();
4560 	TAILQ_FOREACH(vfsp, &vfsconf, vfc_list) {
4561 #ifdef COMPAT_FREEBSD32
4562 		if (req->flags & SCTL_MASK32)
4563 			error = vfsconf2x32(req, vfsp);
4564 		else
4565 #endif
4566 			error = vfsconf2x(req, vfsp);
4567 		if (error)
4568 			break;
4569 	}
4570 	vfsconf_sunlock();
4571 	return (error);
4572 }
4573 
4574 SYSCTL_PROC(_vfs, OID_AUTO, conflist, CTLTYPE_OPAQUE | CTLFLAG_RD |
4575     CTLFLAG_MPSAFE, NULL, 0, sysctl_vfs_conflist,
4576     "S,xvfsconf", "List of all configured filesystems");
4577 
4578 #ifndef BURN_BRIDGES
4579 static int	sysctl_ovfs_conf(SYSCTL_HANDLER_ARGS);
4580 
4581 static int
4582 vfs_sysctl(SYSCTL_HANDLER_ARGS)
4583 {
4584 	int *name = (int *)arg1 - 1;	/* XXX */
4585 	u_int namelen = arg2 + 1;	/* XXX */
4586 	struct vfsconf *vfsp;
4587 
4588 	log(LOG_WARNING, "userland calling deprecated sysctl, "
4589 	    "please rebuild world\n");
4590 
4591 #if 1 || defined(COMPAT_PRELITE2)
4592 	/* Resolve ambiguity between VFS_VFSCONF and VFS_GENERIC. */
4593 	if (namelen == 1)
4594 		return (sysctl_ovfs_conf(oidp, arg1, arg2, req));
4595 #endif
4596 
4597 	switch (name[1]) {
4598 	case VFS_MAXTYPENUM:
4599 		if (namelen != 2)
4600 			return (ENOTDIR);
4601 		return (SYSCTL_OUT(req, &maxvfsconf, sizeof(int)));
4602 	case VFS_CONF:
4603 		if (namelen != 3)
4604 			return (ENOTDIR);	/* overloaded */
4605 		vfsconf_slock();
4606 		TAILQ_FOREACH(vfsp, &vfsconf, vfc_list) {
4607 			if (vfsp->vfc_typenum == name[2])
4608 				break;
4609 		}
4610 		vfsconf_sunlock();
4611 		if (vfsp == NULL)
4612 			return (EOPNOTSUPP);
4613 #ifdef COMPAT_FREEBSD32
4614 		if (req->flags & SCTL_MASK32)
4615 			return (vfsconf2x32(req, vfsp));
4616 		else
4617 #endif
4618 			return (vfsconf2x(req, vfsp));
4619 	}
4620 	return (EOPNOTSUPP);
4621 }
4622 
4623 static SYSCTL_NODE(_vfs, VFS_GENERIC, generic, CTLFLAG_RD | CTLFLAG_SKIP |
4624     CTLFLAG_MPSAFE, vfs_sysctl,
4625     "Generic filesystem");
4626 
4627 #if 1 || defined(COMPAT_PRELITE2)
4628 
4629 static int
4630 sysctl_ovfs_conf(SYSCTL_HANDLER_ARGS)
4631 {
4632 	int error;
4633 	struct vfsconf *vfsp;
4634 	struct ovfsconf ovfs;
4635 
4636 	vfsconf_slock();
4637 	TAILQ_FOREACH(vfsp, &vfsconf, vfc_list) {
4638 		bzero(&ovfs, sizeof(ovfs));
4639 		ovfs.vfc_vfsops = vfsp->vfc_vfsops;	/* XXX used as flag */
4640 		strcpy(ovfs.vfc_name, vfsp->vfc_name);
4641 		ovfs.vfc_index = vfsp->vfc_typenum;
4642 		ovfs.vfc_refcount = vfsp->vfc_refcount;
4643 		ovfs.vfc_flags = vfsp->vfc_flags;
4644 		error = SYSCTL_OUT(req, &ovfs, sizeof ovfs);
4645 		if (error != 0) {
4646 			vfsconf_sunlock();
4647 			return (error);
4648 		}
4649 	}
4650 	vfsconf_sunlock();
4651 	return (0);
4652 }
4653 
4654 #endif /* 1 || COMPAT_PRELITE2 */
4655 #endif /* !BURN_BRIDGES */
4656 
4657 #define KINFO_VNODESLOP		10
4658 #ifdef notyet
4659 /*
4660  * Dump vnode list (via sysctl).
4661  */
4662 /* ARGSUSED */
4663 static int
4664 sysctl_vnode(SYSCTL_HANDLER_ARGS)
4665 {
4666 	struct xvnode *xvn;
4667 	struct mount *mp;
4668 	struct vnode *vp;
4669 	int error, len, n;
4670 
4671 	/*
4672 	 * Stale numvnodes access is not fatal here.
4673 	 */
4674 	req->lock = 0;
4675 	len = (numvnodes + KINFO_VNODESLOP) * sizeof *xvn;
4676 	if (!req->oldptr)
4677 		/* Make an estimate */
4678 		return (SYSCTL_OUT(req, 0, len));
4679 
4680 	error = sysctl_wire_old_buffer(req, 0);
4681 	if (error != 0)
4682 		return (error);
4683 	xvn = malloc(len, M_TEMP, M_ZERO | M_WAITOK);
4684 	n = 0;
4685 	mtx_lock(&mountlist_mtx);
4686 	TAILQ_FOREACH(mp, &mountlist, mnt_list) {
4687 		if (vfs_busy(mp, MBF_NOWAIT | MBF_MNTLSTLOCK))
4688 			continue;
4689 		MNT_ILOCK(mp);
4690 		TAILQ_FOREACH(vp, &mp->mnt_nvnodelist, v_nmntvnodes) {
4691 			if (n == len)
4692 				break;
4693 			vref(vp);
4694 			xvn[n].xv_size = sizeof *xvn;
4695 			xvn[n].xv_vnode = vp;
4696 			xvn[n].xv_id = 0;	/* XXX compat */
4697 #define XV_COPY(field) xvn[n].xv_##field = vp->v_##field
4698 			XV_COPY(usecount);
4699 			XV_COPY(writecount);
4700 			XV_COPY(holdcnt);
4701 			XV_COPY(mount);
4702 			XV_COPY(numoutput);
4703 			XV_COPY(type);
4704 #undef XV_COPY
4705 			xvn[n].xv_flag = vp->v_vflag;
4706 
4707 			switch (vp->v_type) {
4708 			case VREG:
4709 			case VDIR:
4710 			case VLNK:
4711 				break;
4712 			case VBLK:
4713 			case VCHR:
4714 				if (vp->v_rdev == NULL) {
4715 					vrele(vp);
4716 					continue;
4717 				}
4718 				xvn[n].xv_dev = dev2udev(vp->v_rdev);
4719 				break;
4720 			case VSOCK:
4721 				xvn[n].xv_socket = vp->v_socket;
4722 				break;
4723 			case VFIFO:
4724 				xvn[n].xv_fifo = vp->v_fifoinfo;
4725 				break;
4726 			case VNON:
4727 			case VBAD:
4728 			default:
4729 				/* shouldn't happen? */
4730 				vrele(vp);
4731 				continue;
4732 			}
4733 			vrele(vp);
4734 			++n;
4735 		}
4736 		MNT_IUNLOCK(mp);
4737 		mtx_lock(&mountlist_mtx);
4738 		vfs_unbusy(mp);
4739 		if (n == len)
4740 			break;
4741 	}
4742 	mtx_unlock(&mountlist_mtx);
4743 
4744 	error = SYSCTL_OUT(req, xvn, n * sizeof *xvn);
4745 	free(xvn, M_TEMP);
4746 	return (error);
4747 }
4748 
4749 SYSCTL_PROC(_kern, KERN_VNODE, vnode, CTLTYPE_OPAQUE | CTLFLAG_RD |
4750     CTLFLAG_MPSAFE, 0, 0, sysctl_vnode, "S,xvnode",
4751     "");
4752 #endif
4753 
4754 static void
4755 unmount_or_warn(struct mount *mp)
4756 {
4757 	int error;
4758 
4759 	error = dounmount(mp, MNT_FORCE, curthread);
4760 	if (error != 0) {
4761 		printf("unmount of %s failed (", mp->mnt_stat.f_mntonname);
4762 		if (error == EBUSY)
4763 			printf("BUSY)\n");
4764 		else
4765 			printf("%d)\n", error);
4766 	}
4767 }
4768 
4769 /*
4770  * Unmount all filesystems. The list is traversed in reverse order
4771  * of mounting to avoid dependencies.
4772  */
4773 void
4774 vfs_unmountall(void)
4775 {
4776 	struct mount *mp, *tmp;
4777 
4778 	CTR1(KTR_VFS, "%s: unmounting all filesystems", __func__);
4779 
4780 	/*
4781 	 * Since this only runs when rebooting, it is not interlocked.
4782 	 */
4783 	TAILQ_FOREACH_REVERSE_SAFE(mp, &mountlist, mntlist, mnt_list, tmp) {
4784 		vfs_ref(mp);
4785 
4786 		/*
4787 		 * Forcibly unmounting "/dev" before "/" would prevent clean
4788 		 * unmount of the latter.
4789 		 */
4790 		if (mp == rootdevmp)
4791 			continue;
4792 
4793 		unmount_or_warn(mp);
4794 	}
4795 
4796 	if (rootdevmp != NULL)
4797 		unmount_or_warn(rootdevmp);
4798 }
4799 
4800 static void
4801 vfs_deferred_inactive(struct vnode *vp, int lkflags)
4802 {
4803 
4804 	ASSERT_VI_LOCKED(vp, __func__);
4805 	VNASSERT((vp->v_iflag & VI_DEFINACT) == 0, vp, ("VI_DEFINACT still set"));
4806 	if ((vp->v_iflag & VI_OWEINACT) == 0) {
4807 		vdropl(vp);
4808 		return;
4809 	}
4810 	if (vn_lock(vp, lkflags) == 0) {
4811 		VI_LOCK(vp);
4812 		vinactive(vp);
4813 		VOP_UNLOCK(vp);
4814 		vdropl(vp);
4815 		return;
4816 	}
4817 	vdefer_inactive_unlocked(vp);
4818 }
4819 
4820 static int
4821 vfs_periodic_inactive_filter(struct vnode *vp, void *arg)
4822 {
4823 
4824 	return (vp->v_iflag & VI_DEFINACT);
4825 }
4826 
4827 static void __noinline
4828 vfs_periodic_inactive(struct mount *mp, int flags)
4829 {
4830 	struct vnode *vp, *mvp;
4831 	int lkflags;
4832 
4833 	lkflags = LK_EXCLUSIVE | LK_INTERLOCK;
4834 	if (flags != MNT_WAIT)
4835 		lkflags |= LK_NOWAIT;
4836 
4837 	MNT_VNODE_FOREACH_LAZY(vp, mp, mvp, vfs_periodic_inactive_filter, NULL) {
4838 		if ((vp->v_iflag & VI_DEFINACT) == 0) {
4839 			VI_UNLOCK(vp);
4840 			continue;
4841 		}
4842 		vp->v_iflag &= ~VI_DEFINACT;
4843 		vfs_deferred_inactive(vp, lkflags);
4844 	}
4845 }
4846 
4847 static inline bool
4848 vfs_want_msync(struct vnode *vp)
4849 {
4850 	struct vm_object *obj;
4851 
4852 	/*
4853 	 * This test may be performed without any locks held.
4854 	 * We rely on vm_object's type stability.
4855 	 */
4856 	if (vp->v_vflag & VV_NOSYNC)
4857 		return (false);
4858 	obj = vp->v_object;
4859 	return (obj != NULL && vm_object_mightbedirty(obj));
4860 }
4861 
4862 static int
4863 vfs_periodic_msync_inactive_filter(struct vnode *vp, void *arg __unused)
4864 {
4865 
4866 	if (vp->v_vflag & VV_NOSYNC)
4867 		return (false);
4868 	if (vp->v_iflag & VI_DEFINACT)
4869 		return (true);
4870 	return (vfs_want_msync(vp));
4871 }
4872 
4873 static void __noinline
4874 vfs_periodic_msync_inactive(struct mount *mp, int flags)
4875 {
4876 	struct vnode *vp, *mvp;
4877 	struct vm_object *obj;
4878 	int lkflags, objflags;
4879 	bool seen_defer;
4880 
4881 	lkflags = LK_EXCLUSIVE | LK_INTERLOCK;
4882 	if (flags != MNT_WAIT) {
4883 		lkflags |= LK_NOWAIT;
4884 		objflags = OBJPC_NOSYNC;
4885 	} else {
4886 		objflags = OBJPC_SYNC;
4887 	}
4888 
4889 	MNT_VNODE_FOREACH_LAZY(vp, mp, mvp, vfs_periodic_msync_inactive_filter, NULL) {
4890 		seen_defer = false;
4891 		if (vp->v_iflag & VI_DEFINACT) {
4892 			vp->v_iflag &= ~VI_DEFINACT;
4893 			seen_defer = true;
4894 		}
4895 		if (!vfs_want_msync(vp)) {
4896 			if (seen_defer)
4897 				vfs_deferred_inactive(vp, lkflags);
4898 			else
4899 				VI_UNLOCK(vp);
4900 			continue;
4901 		}
4902 		if (vget(vp, lkflags) == 0) {
4903 			obj = vp->v_object;
4904 			if (obj != NULL && (vp->v_vflag & VV_NOSYNC) == 0) {
4905 				VM_OBJECT_WLOCK(obj);
4906 				vm_object_page_clean(obj, 0, 0, objflags);
4907 				VM_OBJECT_WUNLOCK(obj);
4908 			}
4909 			vput(vp);
4910 			if (seen_defer)
4911 				vdrop(vp);
4912 		} else {
4913 			if (seen_defer)
4914 				vdefer_inactive_unlocked(vp);
4915 		}
4916 	}
4917 }
4918 
4919 void
4920 vfs_periodic(struct mount *mp, int flags)
4921 {
4922 
4923 	CTR2(KTR_VFS, "%s: mp %p", __func__, mp);
4924 
4925 	if ((mp->mnt_kern_flag & MNTK_NOMSYNC) != 0)
4926 		vfs_periodic_inactive(mp, flags);
4927 	else
4928 		vfs_periodic_msync_inactive(mp, flags);
4929 }
4930 
4931 static void
4932 destroy_vpollinfo_free(struct vpollinfo *vi)
4933 {
4934 
4935 	knlist_destroy(&vi->vpi_selinfo.si_note);
4936 	mtx_destroy(&vi->vpi_lock);
4937 	free(vi, M_VNODEPOLL);
4938 }
4939 
4940 static void
4941 destroy_vpollinfo(struct vpollinfo *vi)
4942 {
4943 
4944 	knlist_clear(&vi->vpi_selinfo.si_note, 1);
4945 	seldrain(&vi->vpi_selinfo);
4946 	destroy_vpollinfo_free(vi);
4947 }
4948 
4949 /*
4950  * Initialize per-vnode helper structure to hold poll-related state.
4951  */
4952 void
4953 v_addpollinfo(struct vnode *vp)
4954 {
4955 	struct vpollinfo *vi;
4956 
4957 	if (vp->v_pollinfo != NULL)
4958 		return;
4959 	vi = malloc(sizeof(*vi), M_VNODEPOLL, M_WAITOK | M_ZERO);
4960 	mtx_init(&vi->vpi_lock, "vnode pollinfo", NULL, MTX_DEF);
4961 	knlist_init(&vi->vpi_selinfo.si_note, vp, vfs_knllock,
4962 	    vfs_knlunlock, vfs_knl_assert_lock);
4963 	VI_LOCK(vp);
4964 	if (vp->v_pollinfo != NULL) {
4965 		VI_UNLOCK(vp);
4966 		destroy_vpollinfo_free(vi);
4967 		return;
4968 	}
4969 	vp->v_pollinfo = vi;
4970 	VI_UNLOCK(vp);
4971 }
4972 
4973 /*
4974  * Record a process's interest in events which might happen to
4975  * a vnode.  Because poll uses the historic select-style interface
4976  * internally, this routine serves as both the ``check for any
4977  * pending events'' and the ``record my interest in future events''
4978  * functions.  (These are done together, while the lock is held,
4979  * to avoid race conditions.)
4980  */
4981 int
4982 vn_pollrecord(struct vnode *vp, struct thread *td, int events)
4983 {
4984 
4985 	v_addpollinfo(vp);
4986 	mtx_lock(&vp->v_pollinfo->vpi_lock);
4987 	if (vp->v_pollinfo->vpi_revents & events) {
4988 		/*
4989 		 * This leaves events we are not interested
4990 		 * in available for the other process which
4991 		 * which presumably had requested them
4992 		 * (otherwise they would never have been
4993 		 * recorded).
4994 		 */
4995 		events &= vp->v_pollinfo->vpi_revents;
4996 		vp->v_pollinfo->vpi_revents &= ~events;
4997 
4998 		mtx_unlock(&vp->v_pollinfo->vpi_lock);
4999 		return (events);
5000 	}
5001 	vp->v_pollinfo->vpi_events |= events;
5002 	selrecord(td, &vp->v_pollinfo->vpi_selinfo);
5003 	mtx_unlock(&vp->v_pollinfo->vpi_lock);
5004 	return (0);
5005 }
5006 
5007 /*
5008  * Routine to create and manage a filesystem syncer vnode.
5009  */
5010 #define sync_close ((int (*)(struct  vop_close_args *))nullop)
5011 static int	sync_fsync(struct  vop_fsync_args *);
5012 static int	sync_inactive(struct  vop_inactive_args *);
5013 static int	sync_reclaim(struct  vop_reclaim_args *);
5014 
5015 static struct vop_vector sync_vnodeops = {
5016 	.vop_bypass =	VOP_EOPNOTSUPP,
5017 	.vop_close =	sync_close,		/* close */
5018 	.vop_fsync =	sync_fsync,		/* fsync */
5019 	.vop_inactive =	sync_inactive,	/* inactive */
5020 	.vop_need_inactive = vop_stdneed_inactive, /* need_inactive */
5021 	.vop_reclaim =	sync_reclaim,	/* reclaim */
5022 	.vop_lock1 =	vop_stdlock,	/* lock */
5023 	.vop_unlock =	vop_stdunlock,	/* unlock */
5024 	.vop_islocked =	vop_stdislocked,	/* islocked */
5025 };
5026 VFS_VOP_VECTOR_REGISTER(sync_vnodeops);
5027 
5028 /*
5029  * Create a new filesystem syncer vnode for the specified mount point.
5030  */
5031 void
5032 vfs_allocate_syncvnode(struct mount *mp)
5033 {
5034 	struct vnode *vp;
5035 	struct bufobj *bo;
5036 	static long start, incr, next;
5037 	int error;
5038 
5039 	/* Allocate a new vnode */
5040 	error = getnewvnode("syncer", mp, &sync_vnodeops, &vp);
5041 	if (error != 0)
5042 		panic("vfs_allocate_syncvnode: getnewvnode() failed");
5043 	vp->v_type = VNON;
5044 	vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
5045 	vp->v_vflag |= VV_FORCEINSMQ;
5046 	error = insmntque(vp, mp);
5047 	if (error != 0)
5048 		panic("vfs_allocate_syncvnode: insmntque() failed");
5049 	vp->v_vflag &= ~VV_FORCEINSMQ;
5050 	VOP_UNLOCK(vp);
5051 	/*
5052 	 * Place the vnode onto the syncer worklist. We attempt to
5053 	 * scatter them about on the list so that they will go off
5054 	 * at evenly distributed times even if all the filesystems
5055 	 * are mounted at once.
5056 	 */
5057 	next += incr;
5058 	if (next == 0 || next > syncer_maxdelay) {
5059 		start /= 2;
5060 		incr /= 2;
5061 		if (start == 0) {
5062 			start = syncer_maxdelay / 2;
5063 			incr = syncer_maxdelay;
5064 		}
5065 		next = start;
5066 	}
5067 	bo = &vp->v_bufobj;
5068 	BO_LOCK(bo);
5069 	vn_syncer_add_to_worklist(bo, syncdelay > 0 ? next % syncdelay : 0);
5070 	/* XXX - vn_syncer_add_to_worklist() also grabs and drops sync_mtx. */
5071 	mtx_lock(&sync_mtx);
5072 	sync_vnode_count++;
5073 	if (mp->mnt_syncer == NULL) {
5074 		mp->mnt_syncer = vp;
5075 		vp = NULL;
5076 	}
5077 	mtx_unlock(&sync_mtx);
5078 	BO_UNLOCK(bo);
5079 	if (vp != NULL) {
5080 		vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
5081 		vgone(vp);
5082 		vput(vp);
5083 	}
5084 }
5085 
5086 void
5087 vfs_deallocate_syncvnode(struct mount *mp)
5088 {
5089 	struct vnode *vp;
5090 
5091 	mtx_lock(&sync_mtx);
5092 	vp = mp->mnt_syncer;
5093 	if (vp != NULL)
5094 		mp->mnt_syncer = NULL;
5095 	mtx_unlock(&sync_mtx);
5096 	if (vp != NULL)
5097 		vrele(vp);
5098 }
5099 
5100 /*
5101  * Do a lazy sync of the filesystem.
5102  */
5103 static int
5104 sync_fsync(struct vop_fsync_args *ap)
5105 {
5106 	struct vnode *syncvp = ap->a_vp;
5107 	struct mount *mp = syncvp->v_mount;
5108 	int error, save;
5109 	struct bufobj *bo;
5110 
5111 	/*
5112 	 * We only need to do something if this is a lazy evaluation.
5113 	 */
5114 	if (ap->a_waitfor != MNT_LAZY)
5115 		return (0);
5116 
5117 	/*
5118 	 * Move ourselves to the back of the sync list.
5119 	 */
5120 	bo = &syncvp->v_bufobj;
5121 	BO_LOCK(bo);
5122 	vn_syncer_add_to_worklist(bo, syncdelay);
5123 	BO_UNLOCK(bo);
5124 
5125 	/*
5126 	 * Walk the list of vnodes pushing all that are dirty and
5127 	 * not already on the sync list.
5128 	 */
5129 	if (vfs_busy(mp, MBF_NOWAIT) != 0)
5130 		return (0);
5131 	if (vn_start_write(NULL, &mp, V_NOWAIT) != 0) {
5132 		vfs_unbusy(mp);
5133 		return (0);
5134 	}
5135 	save = curthread_pflags_set(TDP_SYNCIO);
5136 	/*
5137 	 * The filesystem at hand may be idle with free vnodes stored in the
5138 	 * batch.  Return them instead of letting them stay there indefinitely.
5139 	 */
5140 	vfs_periodic(mp, MNT_NOWAIT);
5141 	error = VFS_SYNC(mp, MNT_LAZY);
5142 	curthread_pflags_restore(save);
5143 	vn_finished_write(mp);
5144 	vfs_unbusy(mp);
5145 	return (error);
5146 }
5147 
5148 /*
5149  * The syncer vnode is no referenced.
5150  */
5151 static int
5152 sync_inactive(struct vop_inactive_args *ap)
5153 {
5154 
5155 	vgone(ap->a_vp);
5156 	return (0);
5157 }
5158 
5159 /*
5160  * The syncer vnode is no longer needed and is being decommissioned.
5161  *
5162  * Modifications to the worklist must be protected by sync_mtx.
5163  */
5164 static int
5165 sync_reclaim(struct vop_reclaim_args *ap)
5166 {
5167 	struct vnode *vp = ap->a_vp;
5168 	struct bufobj *bo;
5169 
5170 	bo = &vp->v_bufobj;
5171 	BO_LOCK(bo);
5172 	mtx_lock(&sync_mtx);
5173 	if (vp->v_mount->mnt_syncer == vp)
5174 		vp->v_mount->mnt_syncer = NULL;
5175 	if (bo->bo_flag & BO_ONWORKLST) {
5176 		LIST_REMOVE(bo, bo_synclist);
5177 		syncer_worklist_len--;
5178 		sync_vnode_count--;
5179 		bo->bo_flag &= ~BO_ONWORKLST;
5180 	}
5181 	mtx_unlock(&sync_mtx);
5182 	BO_UNLOCK(bo);
5183 
5184 	return (0);
5185 }
5186 
5187 int
5188 vn_need_pageq_flush(struct vnode *vp)
5189 {
5190 	struct vm_object *obj;
5191 	int need;
5192 
5193 	MPASS(mtx_owned(VI_MTX(vp)));
5194 	need = 0;
5195 	if ((obj = vp->v_object) != NULL && (vp->v_vflag & VV_NOSYNC) == 0 &&
5196 	    vm_object_mightbedirty(obj))
5197 		need = 1;
5198 	return (need);
5199 }
5200 
5201 /*
5202  * Check if vnode represents a disk device
5203  */
5204 bool
5205 vn_isdisk_error(struct vnode *vp, int *errp)
5206 {
5207 	int error;
5208 
5209 	if (vp->v_type != VCHR) {
5210 		error = ENOTBLK;
5211 		goto out;
5212 	}
5213 	error = 0;
5214 	dev_lock();
5215 	if (vp->v_rdev == NULL)
5216 		error = ENXIO;
5217 	else if (vp->v_rdev->si_devsw == NULL)
5218 		error = ENXIO;
5219 	else if (!(vp->v_rdev->si_devsw->d_flags & D_DISK))
5220 		error = ENOTBLK;
5221 	dev_unlock();
5222 out:
5223 	*errp = error;
5224 	return (error == 0);
5225 }
5226 
5227 bool
5228 vn_isdisk(struct vnode *vp)
5229 {
5230 	int error;
5231 
5232 	return (vn_isdisk_error(vp, &error));
5233 }
5234 
5235 /*
5236  * VOP_FPLOOKUP_VEXEC routines are subject to special circumstances, see
5237  * the comment above cache_fplookup for details.
5238  */
5239 int
5240 vaccess_vexec_smr(mode_t file_mode, uid_t file_uid, gid_t file_gid, struct ucred *cred)
5241 {
5242 	int error;
5243 
5244 	VFS_SMR_ASSERT_ENTERED();
5245 
5246 	/* Check the owner. */
5247 	if (cred->cr_uid == file_uid) {
5248 		if (file_mode & S_IXUSR)
5249 			return (0);
5250 		goto out_error;
5251 	}
5252 
5253 	/* Otherwise, check the groups (first match) */
5254 	if (groupmember(file_gid, cred)) {
5255 		if (file_mode & S_IXGRP)
5256 			return (0);
5257 		goto out_error;
5258 	}
5259 
5260 	/* Otherwise, check everyone else. */
5261 	if (file_mode & S_IXOTH)
5262 		return (0);
5263 out_error:
5264 	/*
5265 	 * Permission check failed, but it is possible denial will get overwritten
5266 	 * (e.g., when root is traversing through a 700 directory owned by someone
5267 	 * else).
5268 	 *
5269 	 * vaccess() calls priv_check_cred which in turn can descent into MAC
5270 	 * modules overriding this result. It's quite unclear what semantics
5271 	 * are allowed for them to operate, thus for safety we don't call them
5272 	 * from within the SMR section. This also means if any such modules
5273 	 * are present, we have to let the regular lookup decide.
5274 	 */
5275 	error = priv_check_cred_vfs_lookup_nomac(cred);
5276 	switch (error) {
5277 	case 0:
5278 		return (0);
5279 	case EAGAIN:
5280 		/*
5281 		 * MAC modules present.
5282 		 */
5283 		return (EAGAIN);
5284 	case EPERM:
5285 		return (EACCES);
5286 	default:
5287 		return (error);
5288 	}
5289 }
5290 
5291 /*
5292  * Common filesystem object access control check routine.  Accepts a
5293  * vnode's type, "mode", uid and gid, requested access mode, and credentials.
5294  * Returns 0 on success, or an errno on failure.
5295  */
5296 int
5297 vaccess(enum vtype type, mode_t file_mode, uid_t file_uid, gid_t file_gid,
5298     accmode_t accmode, struct ucred *cred)
5299 {
5300 	accmode_t dac_granted;
5301 	accmode_t priv_granted;
5302 
5303 	KASSERT((accmode & ~(VEXEC | VWRITE | VREAD | VADMIN | VAPPEND)) == 0,
5304 	    ("invalid bit in accmode"));
5305 	KASSERT((accmode & VAPPEND) == 0 || (accmode & VWRITE),
5306 	    ("VAPPEND without VWRITE"));
5307 
5308 	/*
5309 	 * Look for a normal, non-privileged way to access the file/directory
5310 	 * as requested.  If it exists, go with that.
5311 	 */
5312 
5313 	dac_granted = 0;
5314 
5315 	/* Check the owner. */
5316 	if (cred->cr_uid == file_uid) {
5317 		dac_granted |= VADMIN;
5318 		if (file_mode & S_IXUSR)
5319 			dac_granted |= VEXEC;
5320 		if (file_mode & S_IRUSR)
5321 			dac_granted |= VREAD;
5322 		if (file_mode & S_IWUSR)
5323 			dac_granted |= (VWRITE | VAPPEND);
5324 
5325 		if ((accmode & dac_granted) == accmode)
5326 			return (0);
5327 
5328 		goto privcheck;
5329 	}
5330 
5331 	/* Otherwise, check the groups (first match) */
5332 	if (groupmember(file_gid, cred)) {
5333 		if (file_mode & S_IXGRP)
5334 			dac_granted |= VEXEC;
5335 		if (file_mode & S_IRGRP)
5336 			dac_granted |= VREAD;
5337 		if (file_mode & S_IWGRP)
5338 			dac_granted |= (VWRITE | VAPPEND);
5339 
5340 		if ((accmode & dac_granted) == accmode)
5341 			return (0);
5342 
5343 		goto privcheck;
5344 	}
5345 
5346 	/* Otherwise, check everyone else. */
5347 	if (file_mode & S_IXOTH)
5348 		dac_granted |= VEXEC;
5349 	if (file_mode & S_IROTH)
5350 		dac_granted |= VREAD;
5351 	if (file_mode & S_IWOTH)
5352 		dac_granted |= (VWRITE | VAPPEND);
5353 	if ((accmode & dac_granted) == accmode)
5354 		return (0);
5355 
5356 privcheck:
5357 	/*
5358 	 * Build a privilege mask to determine if the set of privileges
5359 	 * satisfies the requirements when combined with the granted mask
5360 	 * from above.  For each privilege, if the privilege is required,
5361 	 * bitwise or the request type onto the priv_granted mask.
5362 	 */
5363 	priv_granted = 0;
5364 
5365 	if (type == VDIR) {
5366 		/*
5367 		 * For directories, use PRIV_VFS_LOOKUP to satisfy VEXEC
5368 		 * requests, instead of PRIV_VFS_EXEC.
5369 		 */
5370 		if ((accmode & VEXEC) && ((dac_granted & VEXEC) == 0) &&
5371 		    !priv_check_cred(cred, PRIV_VFS_LOOKUP))
5372 			priv_granted |= VEXEC;
5373 	} else {
5374 		/*
5375 		 * Ensure that at least one execute bit is on. Otherwise,
5376 		 * a privileged user will always succeed, and we don't want
5377 		 * this to happen unless the file really is executable.
5378 		 */
5379 		if ((accmode & VEXEC) && ((dac_granted & VEXEC) == 0) &&
5380 		    (file_mode & (S_IXUSR | S_IXGRP | S_IXOTH)) != 0 &&
5381 		    !priv_check_cred(cred, PRIV_VFS_EXEC))
5382 			priv_granted |= VEXEC;
5383 	}
5384 
5385 	if ((accmode & VREAD) && ((dac_granted & VREAD) == 0) &&
5386 	    !priv_check_cred(cred, PRIV_VFS_READ))
5387 		priv_granted |= VREAD;
5388 
5389 	if ((accmode & VWRITE) && ((dac_granted & VWRITE) == 0) &&
5390 	    !priv_check_cred(cred, PRIV_VFS_WRITE))
5391 		priv_granted |= (VWRITE | VAPPEND);
5392 
5393 	if ((accmode & VADMIN) && ((dac_granted & VADMIN) == 0) &&
5394 	    !priv_check_cred(cred, PRIV_VFS_ADMIN))
5395 		priv_granted |= VADMIN;
5396 
5397 	if ((accmode & (priv_granted | dac_granted)) == accmode) {
5398 		return (0);
5399 	}
5400 
5401 	return ((accmode & VADMIN) ? EPERM : EACCES);
5402 }
5403 
5404 /*
5405  * Credential check based on process requesting service, and per-attribute
5406  * permissions.
5407  */
5408 int
5409 extattr_check_cred(struct vnode *vp, int attrnamespace, struct ucred *cred,
5410     struct thread *td, accmode_t accmode)
5411 {
5412 
5413 	/*
5414 	 * Kernel-invoked always succeeds.
5415 	 */
5416 	if (cred == NOCRED)
5417 		return (0);
5418 
5419 	/*
5420 	 * Do not allow privileged processes in jail to directly manipulate
5421 	 * system attributes.
5422 	 */
5423 	switch (attrnamespace) {
5424 	case EXTATTR_NAMESPACE_SYSTEM:
5425 		/* Potentially should be: return (EPERM); */
5426 		return (priv_check_cred(cred, PRIV_VFS_EXTATTR_SYSTEM));
5427 	case EXTATTR_NAMESPACE_USER:
5428 		return (VOP_ACCESS(vp, accmode, cred, td));
5429 	default:
5430 		return (EPERM);
5431 	}
5432 }
5433 
5434 #ifdef DEBUG_VFS_LOCKS
5435 /*
5436  * This only exists to suppress warnings from unlocked specfs accesses.  It is
5437  * no longer ok to have an unlocked VFS.
5438  */
5439 #define	IGNORE_LOCK(vp) (KERNEL_PANICKED() || (vp) == NULL ||		\
5440 	(vp)->v_type == VCHR ||	(vp)->v_type == VBAD)
5441 
5442 int vfs_badlock_ddb = 1;	/* Drop into debugger on violation. */
5443 SYSCTL_INT(_debug, OID_AUTO, vfs_badlock_ddb, CTLFLAG_RW, &vfs_badlock_ddb, 0,
5444     "Drop into debugger on lock violation");
5445 
5446 int vfs_badlock_mutex = 1;	/* Check for interlock across VOPs. */
5447 SYSCTL_INT(_debug, OID_AUTO, vfs_badlock_mutex, CTLFLAG_RW, &vfs_badlock_mutex,
5448     0, "Check for interlock across VOPs");
5449 
5450 int vfs_badlock_print = 1;	/* Print lock violations. */
5451 SYSCTL_INT(_debug, OID_AUTO, vfs_badlock_print, CTLFLAG_RW, &vfs_badlock_print,
5452     0, "Print lock violations");
5453 
5454 int vfs_badlock_vnode = 1;	/* Print vnode details on lock violations. */
5455 SYSCTL_INT(_debug, OID_AUTO, vfs_badlock_vnode, CTLFLAG_RW, &vfs_badlock_vnode,
5456     0, "Print vnode details on lock violations");
5457 
5458 #ifdef KDB
5459 int vfs_badlock_backtrace = 1;	/* Print backtrace at lock violations. */
5460 SYSCTL_INT(_debug, OID_AUTO, vfs_badlock_backtrace, CTLFLAG_RW,
5461     &vfs_badlock_backtrace, 0, "Print backtrace at lock violations");
5462 #endif
5463 
5464 static void
5465 vfs_badlock(const char *msg, const char *str, struct vnode *vp)
5466 {
5467 
5468 #ifdef KDB
5469 	if (vfs_badlock_backtrace)
5470 		kdb_backtrace();
5471 #endif
5472 	if (vfs_badlock_vnode)
5473 		vn_printf(vp, "vnode ");
5474 	if (vfs_badlock_print)
5475 		printf("%s: %p %s\n", str, (void *)vp, msg);
5476 	if (vfs_badlock_ddb)
5477 		kdb_enter(KDB_WHY_VFSLOCK, "lock violation");
5478 }
5479 
5480 void
5481 assert_vi_locked(struct vnode *vp, const char *str)
5482 {
5483 
5484 	if (vfs_badlock_mutex && !mtx_owned(VI_MTX(vp)))
5485 		vfs_badlock("interlock is not locked but should be", str, vp);
5486 }
5487 
5488 void
5489 assert_vi_unlocked(struct vnode *vp, const char *str)
5490 {
5491 
5492 	if (vfs_badlock_mutex && mtx_owned(VI_MTX(vp)))
5493 		vfs_badlock("interlock is locked but should not be", str, vp);
5494 }
5495 
5496 void
5497 assert_vop_locked(struct vnode *vp, const char *str)
5498 {
5499 	int locked;
5500 
5501 	if (!IGNORE_LOCK(vp)) {
5502 		locked = VOP_ISLOCKED(vp);
5503 		if (locked == 0 || locked == LK_EXCLOTHER)
5504 			vfs_badlock("is not locked but should be", str, vp);
5505 	}
5506 }
5507 
5508 void
5509 assert_vop_unlocked(struct vnode *vp, const char *str)
5510 {
5511 
5512 	if (!IGNORE_LOCK(vp) && VOP_ISLOCKED(vp) == LK_EXCLUSIVE)
5513 		vfs_badlock("is locked but should not be", str, vp);
5514 }
5515 
5516 void
5517 assert_vop_elocked(struct vnode *vp, const char *str)
5518 {
5519 
5520 	if (!IGNORE_LOCK(vp) && VOP_ISLOCKED(vp) != LK_EXCLUSIVE)
5521 		vfs_badlock("is not exclusive locked but should be", str, vp);
5522 }
5523 #endif /* DEBUG_VFS_LOCKS */
5524 
5525 void
5526 vop_rename_fail(struct vop_rename_args *ap)
5527 {
5528 
5529 	if (ap->a_tvp != NULL)
5530 		vput(ap->a_tvp);
5531 	if (ap->a_tdvp == ap->a_tvp)
5532 		vrele(ap->a_tdvp);
5533 	else
5534 		vput(ap->a_tdvp);
5535 	vrele(ap->a_fdvp);
5536 	vrele(ap->a_fvp);
5537 }
5538 
5539 void
5540 vop_rename_pre(void *ap)
5541 {
5542 	struct vop_rename_args *a = ap;
5543 
5544 #ifdef DEBUG_VFS_LOCKS
5545 	if (a->a_tvp)
5546 		ASSERT_VI_UNLOCKED(a->a_tvp, "VOP_RENAME");
5547 	ASSERT_VI_UNLOCKED(a->a_tdvp, "VOP_RENAME");
5548 	ASSERT_VI_UNLOCKED(a->a_fvp, "VOP_RENAME");
5549 	ASSERT_VI_UNLOCKED(a->a_fdvp, "VOP_RENAME");
5550 
5551 	/* Check the source (from). */
5552 	if (a->a_tdvp->v_vnlock != a->a_fdvp->v_vnlock &&
5553 	    (a->a_tvp == NULL || a->a_tvp->v_vnlock != a->a_fdvp->v_vnlock))
5554 		ASSERT_VOP_UNLOCKED(a->a_fdvp, "vop_rename: fdvp locked");
5555 	if (a->a_tvp == NULL || a->a_tvp->v_vnlock != a->a_fvp->v_vnlock)
5556 		ASSERT_VOP_UNLOCKED(a->a_fvp, "vop_rename: fvp locked");
5557 
5558 	/* Check the target. */
5559 	if (a->a_tvp)
5560 		ASSERT_VOP_LOCKED(a->a_tvp, "vop_rename: tvp not locked");
5561 	ASSERT_VOP_LOCKED(a->a_tdvp, "vop_rename: tdvp not locked");
5562 #endif
5563 	/*
5564 	 * It may be tempting to add vn_seqc_write_begin/end calls here and
5565 	 * in vop_rename_post but that's not going to work out since some
5566 	 * filesystems relookup vnodes mid-rename. This is probably a bug.
5567 	 *
5568 	 * For now filesystems are expected to do the relevant calls after they
5569 	 * decide what vnodes to operate on.
5570 	 */
5571 	if (a->a_tdvp != a->a_fdvp)
5572 		vhold(a->a_fdvp);
5573 	if (a->a_tvp != a->a_fvp)
5574 		vhold(a->a_fvp);
5575 	vhold(a->a_tdvp);
5576 	if (a->a_tvp)
5577 		vhold(a->a_tvp);
5578 }
5579 
5580 #ifdef DEBUG_VFS_LOCKS
5581 void
5582 vop_fplookup_vexec_debugpre(void *ap __unused)
5583 {
5584 
5585 	VFS_SMR_ASSERT_ENTERED();
5586 }
5587 
5588 void
5589 vop_fplookup_vexec_debugpost(void *ap __unused, int rc __unused)
5590 {
5591 
5592 	VFS_SMR_ASSERT_ENTERED();
5593 }
5594 
5595 void
5596 vop_fplookup_symlink_debugpre(void *ap __unused)
5597 {
5598 
5599 	VFS_SMR_ASSERT_ENTERED();
5600 }
5601 
5602 void
5603 vop_fplookup_symlink_debugpost(void *ap __unused, int rc __unused)
5604 {
5605 
5606 	VFS_SMR_ASSERT_ENTERED();
5607 }
5608 void
5609 vop_strategy_debugpre(void *ap)
5610 {
5611 	struct vop_strategy_args *a;
5612 	struct buf *bp;
5613 
5614 	a = ap;
5615 	bp = a->a_bp;
5616 
5617 	/*
5618 	 * Cluster ops lock their component buffers but not the IO container.
5619 	 */
5620 	if ((bp->b_flags & B_CLUSTER) != 0)
5621 		return;
5622 
5623 	if (!KERNEL_PANICKED() && !BUF_ISLOCKED(bp)) {
5624 		if (vfs_badlock_print)
5625 			printf(
5626 			    "VOP_STRATEGY: bp is not locked but should be\n");
5627 		if (vfs_badlock_ddb)
5628 			kdb_enter(KDB_WHY_VFSLOCK, "lock violation");
5629 	}
5630 }
5631 
5632 void
5633 vop_lock_debugpre(void *ap)
5634 {
5635 	struct vop_lock1_args *a = ap;
5636 
5637 	if ((a->a_flags & LK_INTERLOCK) == 0)
5638 		ASSERT_VI_UNLOCKED(a->a_vp, "VOP_LOCK");
5639 	else
5640 		ASSERT_VI_LOCKED(a->a_vp, "VOP_LOCK");
5641 }
5642 
5643 void
5644 vop_lock_debugpost(void *ap, int rc)
5645 {
5646 	struct vop_lock1_args *a = ap;
5647 
5648 	ASSERT_VI_UNLOCKED(a->a_vp, "VOP_LOCK");
5649 	if (rc == 0 && (a->a_flags & LK_EXCLOTHER) == 0)
5650 		ASSERT_VOP_LOCKED(a->a_vp, "VOP_LOCK");
5651 }
5652 
5653 void
5654 vop_unlock_debugpre(void *ap)
5655 {
5656 	struct vop_unlock_args *a = ap;
5657 
5658 	ASSERT_VOP_LOCKED(a->a_vp, "VOP_UNLOCK");
5659 }
5660 
5661 void
5662 vop_need_inactive_debugpre(void *ap)
5663 {
5664 	struct vop_need_inactive_args *a = ap;
5665 
5666 	ASSERT_VI_LOCKED(a->a_vp, "VOP_NEED_INACTIVE");
5667 }
5668 
5669 void
5670 vop_need_inactive_debugpost(void *ap, int rc)
5671 {
5672 	struct vop_need_inactive_args *a = ap;
5673 
5674 	ASSERT_VI_LOCKED(a->a_vp, "VOP_NEED_INACTIVE");
5675 }
5676 #endif
5677 
5678 void
5679 vop_create_pre(void *ap)
5680 {
5681 	struct vop_create_args *a;
5682 	struct vnode *dvp;
5683 
5684 	a = ap;
5685 	dvp = a->a_dvp;
5686 	vn_seqc_write_begin(dvp);
5687 }
5688 
5689 void
5690 vop_create_post(void *ap, int rc)
5691 {
5692 	struct vop_create_args *a;
5693 	struct vnode *dvp;
5694 
5695 	a = ap;
5696 	dvp = a->a_dvp;
5697 	vn_seqc_write_end(dvp);
5698 	if (!rc)
5699 		VFS_KNOTE_LOCKED(dvp, NOTE_WRITE);
5700 }
5701 
5702 void
5703 vop_whiteout_pre(void *ap)
5704 {
5705 	struct vop_whiteout_args *a;
5706 	struct vnode *dvp;
5707 
5708 	a = ap;
5709 	dvp = a->a_dvp;
5710 	vn_seqc_write_begin(dvp);
5711 }
5712 
5713 void
5714 vop_whiteout_post(void *ap, int rc)
5715 {
5716 	struct vop_whiteout_args *a;
5717 	struct vnode *dvp;
5718 
5719 	a = ap;
5720 	dvp = a->a_dvp;
5721 	vn_seqc_write_end(dvp);
5722 }
5723 
5724 void
5725 vop_deleteextattr_pre(void *ap)
5726 {
5727 	struct vop_deleteextattr_args *a;
5728 	struct vnode *vp;
5729 
5730 	a = ap;
5731 	vp = a->a_vp;
5732 	vn_seqc_write_begin(vp);
5733 }
5734 
5735 void
5736 vop_deleteextattr_post(void *ap, int rc)
5737 {
5738 	struct vop_deleteextattr_args *a;
5739 	struct vnode *vp;
5740 
5741 	a = ap;
5742 	vp = a->a_vp;
5743 	vn_seqc_write_end(vp);
5744 	if (!rc)
5745 		VFS_KNOTE_LOCKED(a->a_vp, NOTE_ATTRIB);
5746 }
5747 
5748 void
5749 vop_link_pre(void *ap)
5750 {
5751 	struct vop_link_args *a;
5752 	struct vnode *vp, *tdvp;
5753 
5754 	a = ap;
5755 	vp = a->a_vp;
5756 	tdvp = a->a_tdvp;
5757 	vn_seqc_write_begin(vp);
5758 	vn_seqc_write_begin(tdvp);
5759 }
5760 
5761 void
5762 vop_link_post(void *ap, int rc)
5763 {
5764 	struct vop_link_args *a;
5765 	struct vnode *vp, *tdvp;
5766 
5767 	a = ap;
5768 	vp = a->a_vp;
5769 	tdvp = a->a_tdvp;
5770 	vn_seqc_write_end(vp);
5771 	vn_seqc_write_end(tdvp);
5772 	if (!rc) {
5773 		VFS_KNOTE_LOCKED(vp, NOTE_LINK);
5774 		VFS_KNOTE_LOCKED(tdvp, NOTE_WRITE);
5775 	}
5776 }
5777 
5778 void
5779 vop_mkdir_pre(void *ap)
5780 {
5781 	struct vop_mkdir_args *a;
5782 	struct vnode *dvp;
5783 
5784 	a = ap;
5785 	dvp = a->a_dvp;
5786 	vn_seqc_write_begin(dvp);
5787 }
5788 
5789 void
5790 vop_mkdir_post(void *ap, int rc)
5791 {
5792 	struct vop_mkdir_args *a;
5793 	struct vnode *dvp;
5794 
5795 	a = ap;
5796 	dvp = a->a_dvp;
5797 	vn_seqc_write_end(dvp);
5798 	if (!rc)
5799 		VFS_KNOTE_LOCKED(dvp, NOTE_WRITE | NOTE_LINK);
5800 }
5801 
5802 #ifdef DEBUG_VFS_LOCKS
5803 void
5804 vop_mkdir_debugpost(void *ap, int rc)
5805 {
5806 	struct vop_mkdir_args *a;
5807 
5808 	a = ap;
5809 	if (!rc)
5810 		cache_validate(a->a_dvp, *a->a_vpp, a->a_cnp);
5811 }
5812 #endif
5813 
5814 void
5815 vop_mknod_pre(void *ap)
5816 {
5817 	struct vop_mknod_args *a;
5818 	struct vnode *dvp;
5819 
5820 	a = ap;
5821 	dvp = a->a_dvp;
5822 	vn_seqc_write_begin(dvp);
5823 }
5824 
5825 void
5826 vop_mknod_post(void *ap, int rc)
5827 {
5828 	struct vop_mknod_args *a;
5829 	struct vnode *dvp;
5830 
5831 	a = ap;
5832 	dvp = a->a_dvp;
5833 	vn_seqc_write_end(dvp);
5834 	if (!rc)
5835 		VFS_KNOTE_LOCKED(dvp, NOTE_WRITE);
5836 }
5837 
5838 void
5839 vop_reclaim_post(void *ap, int rc)
5840 {
5841 	struct vop_reclaim_args *a;
5842 	struct vnode *vp;
5843 
5844 	a = ap;
5845 	vp = a->a_vp;
5846 	ASSERT_VOP_IN_SEQC(vp);
5847 	if (!rc)
5848 		VFS_KNOTE_LOCKED(vp, NOTE_REVOKE);
5849 }
5850 
5851 void
5852 vop_remove_pre(void *ap)
5853 {
5854 	struct vop_remove_args *a;
5855 	struct vnode *dvp, *vp;
5856 
5857 	a = ap;
5858 	dvp = a->a_dvp;
5859 	vp = a->a_vp;
5860 	vn_seqc_write_begin(dvp);
5861 	vn_seqc_write_begin(vp);
5862 }
5863 
5864 void
5865 vop_remove_post(void *ap, int rc)
5866 {
5867 	struct vop_remove_args *a;
5868 	struct vnode *dvp, *vp;
5869 
5870 	a = ap;
5871 	dvp = a->a_dvp;
5872 	vp = a->a_vp;
5873 	vn_seqc_write_end(dvp);
5874 	vn_seqc_write_end(vp);
5875 	if (!rc) {
5876 		VFS_KNOTE_LOCKED(dvp, NOTE_WRITE);
5877 		VFS_KNOTE_LOCKED(vp, NOTE_DELETE);
5878 	}
5879 }
5880 
5881 void
5882 vop_rename_post(void *ap, int rc)
5883 {
5884 	struct vop_rename_args *a = ap;
5885 	long hint;
5886 
5887 	if (!rc) {
5888 		hint = NOTE_WRITE;
5889 		if (a->a_fdvp == a->a_tdvp) {
5890 			if (a->a_tvp != NULL && a->a_tvp->v_type == VDIR)
5891 				hint |= NOTE_LINK;
5892 			VFS_KNOTE_UNLOCKED(a->a_fdvp, hint);
5893 			VFS_KNOTE_UNLOCKED(a->a_tdvp, hint);
5894 		} else {
5895 			hint |= NOTE_EXTEND;
5896 			if (a->a_fvp->v_type == VDIR)
5897 				hint |= NOTE_LINK;
5898 			VFS_KNOTE_UNLOCKED(a->a_fdvp, hint);
5899 
5900 			if (a->a_fvp->v_type == VDIR && a->a_tvp != NULL &&
5901 			    a->a_tvp->v_type == VDIR)
5902 				hint &= ~NOTE_LINK;
5903 			VFS_KNOTE_UNLOCKED(a->a_tdvp, hint);
5904 		}
5905 
5906 		VFS_KNOTE_UNLOCKED(a->a_fvp, NOTE_RENAME);
5907 		if (a->a_tvp)
5908 			VFS_KNOTE_UNLOCKED(a->a_tvp, NOTE_DELETE);
5909 	}
5910 	if (a->a_tdvp != a->a_fdvp)
5911 		vdrop(a->a_fdvp);
5912 	if (a->a_tvp != a->a_fvp)
5913 		vdrop(a->a_fvp);
5914 	vdrop(a->a_tdvp);
5915 	if (a->a_tvp)
5916 		vdrop(a->a_tvp);
5917 }
5918 
5919 void
5920 vop_rmdir_pre(void *ap)
5921 {
5922 	struct vop_rmdir_args *a;
5923 	struct vnode *dvp, *vp;
5924 
5925 	a = ap;
5926 	dvp = a->a_dvp;
5927 	vp = a->a_vp;
5928 	vn_seqc_write_begin(dvp);
5929 	vn_seqc_write_begin(vp);
5930 }
5931 
5932 void
5933 vop_rmdir_post(void *ap, int rc)
5934 {
5935 	struct vop_rmdir_args *a;
5936 	struct vnode *dvp, *vp;
5937 
5938 	a = ap;
5939 	dvp = a->a_dvp;
5940 	vp = a->a_vp;
5941 	vn_seqc_write_end(dvp);
5942 	vn_seqc_write_end(vp);
5943 	if (!rc) {
5944 		VFS_KNOTE_LOCKED(dvp, NOTE_WRITE | NOTE_LINK);
5945 		VFS_KNOTE_LOCKED(vp, NOTE_DELETE);
5946 	}
5947 }
5948 
5949 void
5950 vop_setattr_pre(void *ap)
5951 {
5952 	struct vop_setattr_args *a;
5953 	struct vnode *vp;
5954 
5955 	a = ap;
5956 	vp = a->a_vp;
5957 	vn_seqc_write_begin(vp);
5958 }
5959 
5960 void
5961 vop_setattr_post(void *ap, int rc)
5962 {
5963 	struct vop_setattr_args *a;
5964 	struct vnode *vp;
5965 
5966 	a = ap;
5967 	vp = a->a_vp;
5968 	vn_seqc_write_end(vp);
5969 	if (!rc)
5970 		VFS_KNOTE_LOCKED(vp, NOTE_ATTRIB);
5971 }
5972 
5973 void
5974 vop_setacl_pre(void *ap)
5975 {
5976 	struct vop_setacl_args *a;
5977 	struct vnode *vp;
5978 
5979 	a = ap;
5980 	vp = a->a_vp;
5981 	vn_seqc_write_begin(vp);
5982 }
5983 
5984 void
5985 vop_setacl_post(void *ap, int rc __unused)
5986 {
5987 	struct vop_setacl_args *a;
5988 	struct vnode *vp;
5989 
5990 	a = ap;
5991 	vp = a->a_vp;
5992 	vn_seqc_write_end(vp);
5993 }
5994 
5995 void
5996 vop_setextattr_pre(void *ap)
5997 {
5998 	struct vop_setextattr_args *a;
5999 	struct vnode *vp;
6000 
6001 	a = ap;
6002 	vp = a->a_vp;
6003 	vn_seqc_write_begin(vp);
6004 }
6005 
6006 void
6007 vop_setextattr_post(void *ap, int rc)
6008 {
6009 	struct vop_setextattr_args *a;
6010 	struct vnode *vp;
6011 
6012 	a = ap;
6013 	vp = a->a_vp;
6014 	vn_seqc_write_end(vp);
6015 	if (!rc)
6016 		VFS_KNOTE_LOCKED(vp, NOTE_ATTRIB);
6017 }
6018 
6019 void
6020 vop_symlink_pre(void *ap)
6021 {
6022 	struct vop_symlink_args *a;
6023 	struct vnode *dvp;
6024 
6025 	a = ap;
6026 	dvp = a->a_dvp;
6027 	vn_seqc_write_begin(dvp);
6028 }
6029 
6030 void
6031 vop_symlink_post(void *ap, int rc)
6032 {
6033 	struct vop_symlink_args *a;
6034 	struct vnode *dvp;
6035 
6036 	a = ap;
6037 	dvp = a->a_dvp;
6038 	vn_seqc_write_end(dvp);
6039 	if (!rc)
6040 		VFS_KNOTE_LOCKED(dvp, NOTE_WRITE);
6041 }
6042 
6043 void
6044 vop_open_post(void *ap, int rc)
6045 {
6046 	struct vop_open_args *a = ap;
6047 
6048 	if (!rc)
6049 		VFS_KNOTE_LOCKED(a->a_vp, NOTE_OPEN);
6050 }
6051 
6052 void
6053 vop_close_post(void *ap, int rc)
6054 {
6055 	struct vop_close_args *a = ap;
6056 
6057 	if (!rc && (a->a_cred != NOCRED || /* filter out revokes */
6058 	    !VN_IS_DOOMED(a->a_vp))) {
6059 		VFS_KNOTE_LOCKED(a->a_vp, (a->a_fflag & FWRITE) != 0 ?
6060 		    NOTE_CLOSE_WRITE : NOTE_CLOSE);
6061 	}
6062 }
6063 
6064 void
6065 vop_read_post(void *ap, int rc)
6066 {
6067 	struct vop_read_args *a = ap;
6068 
6069 	if (!rc)
6070 		VFS_KNOTE_LOCKED(a->a_vp, NOTE_READ);
6071 }
6072 
6073 void
6074 vop_read_pgcache_post(void *ap, int rc)
6075 {
6076 	struct vop_read_pgcache_args *a = ap;
6077 
6078 	if (!rc)
6079 		VFS_KNOTE_UNLOCKED(a->a_vp, NOTE_READ);
6080 }
6081 
6082 void
6083 vop_readdir_post(void *ap, int rc)
6084 {
6085 	struct vop_readdir_args *a = ap;
6086 
6087 	if (!rc)
6088 		VFS_KNOTE_LOCKED(a->a_vp, NOTE_READ);
6089 }
6090 
6091 static struct knlist fs_knlist;
6092 
6093 static void
6094 vfs_event_init(void *arg)
6095 {
6096 	knlist_init_mtx(&fs_knlist, NULL);
6097 }
6098 /* XXX - correct order? */
6099 SYSINIT(vfs_knlist, SI_SUB_VFS, SI_ORDER_ANY, vfs_event_init, NULL);
6100 
6101 void
6102 vfs_event_signal(fsid_t *fsid, uint32_t event, intptr_t data __unused)
6103 {
6104 
6105 	KNOTE_UNLOCKED(&fs_knlist, event);
6106 }
6107 
6108 static int	filt_fsattach(struct knote *kn);
6109 static void	filt_fsdetach(struct knote *kn);
6110 static int	filt_fsevent(struct knote *kn, long hint);
6111 
6112 struct filterops fs_filtops = {
6113 	.f_isfd = 0,
6114 	.f_attach = filt_fsattach,
6115 	.f_detach = filt_fsdetach,
6116 	.f_event = filt_fsevent
6117 };
6118 
6119 static int
6120 filt_fsattach(struct knote *kn)
6121 {
6122 
6123 	kn->kn_flags |= EV_CLEAR;
6124 	knlist_add(&fs_knlist, kn, 0);
6125 	return (0);
6126 }
6127 
6128 static void
6129 filt_fsdetach(struct knote *kn)
6130 {
6131 
6132 	knlist_remove(&fs_knlist, kn, 0);
6133 }
6134 
6135 static int
6136 filt_fsevent(struct knote *kn, long hint)
6137 {
6138 
6139 	kn->kn_fflags |= kn->kn_sfflags & hint;
6140 
6141 	return (kn->kn_fflags != 0);
6142 }
6143 
6144 static int
6145 sysctl_vfs_ctl(SYSCTL_HANDLER_ARGS)
6146 {
6147 	struct vfsidctl vc;
6148 	int error;
6149 	struct mount *mp;
6150 
6151 	error = SYSCTL_IN(req, &vc, sizeof(vc));
6152 	if (error)
6153 		return (error);
6154 	if (vc.vc_vers != VFS_CTL_VERS1)
6155 		return (EINVAL);
6156 	mp = vfs_getvfs(&vc.vc_fsid);
6157 	if (mp == NULL)
6158 		return (ENOENT);
6159 	/* ensure that a specific sysctl goes to the right filesystem. */
6160 	if (strcmp(vc.vc_fstypename, "*") != 0 &&
6161 	    strcmp(vc.vc_fstypename, mp->mnt_vfc->vfc_name) != 0) {
6162 		vfs_rel(mp);
6163 		return (EINVAL);
6164 	}
6165 	VCTLTOREQ(&vc, req);
6166 	error = VFS_SYSCTL(mp, vc.vc_op, req);
6167 	vfs_rel(mp);
6168 	return (error);
6169 }
6170 
6171 SYSCTL_PROC(_vfs, OID_AUTO, ctl, CTLTYPE_OPAQUE | CTLFLAG_MPSAFE | CTLFLAG_WR,
6172     NULL, 0, sysctl_vfs_ctl, "",
6173     "Sysctl by fsid");
6174 
6175 /*
6176  * Function to initialize a va_filerev field sensibly.
6177  * XXX: Wouldn't a random number make a lot more sense ??
6178  */
6179 u_quad_t
6180 init_va_filerev(void)
6181 {
6182 	struct bintime bt;
6183 
6184 	getbinuptime(&bt);
6185 	return (((u_quad_t)bt.sec << 32LL) | (bt.frac >> 32LL));
6186 }
6187 
6188 static int	filt_vfsread(struct knote *kn, long hint);
6189 static int	filt_vfswrite(struct knote *kn, long hint);
6190 static int	filt_vfsvnode(struct knote *kn, long hint);
6191 static void	filt_vfsdetach(struct knote *kn);
6192 static struct filterops vfsread_filtops = {
6193 	.f_isfd = 1,
6194 	.f_detach = filt_vfsdetach,
6195 	.f_event = filt_vfsread
6196 };
6197 static struct filterops vfswrite_filtops = {
6198 	.f_isfd = 1,
6199 	.f_detach = filt_vfsdetach,
6200 	.f_event = filt_vfswrite
6201 };
6202 static struct filterops vfsvnode_filtops = {
6203 	.f_isfd = 1,
6204 	.f_detach = filt_vfsdetach,
6205 	.f_event = filt_vfsvnode
6206 };
6207 
6208 static void
6209 vfs_knllock(void *arg)
6210 {
6211 	struct vnode *vp = arg;
6212 
6213 	vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
6214 }
6215 
6216 static void
6217 vfs_knlunlock(void *arg)
6218 {
6219 	struct vnode *vp = arg;
6220 
6221 	VOP_UNLOCK(vp);
6222 }
6223 
6224 static void
6225 vfs_knl_assert_lock(void *arg, int what)
6226 {
6227 #ifdef DEBUG_VFS_LOCKS
6228 	struct vnode *vp = arg;
6229 
6230 	if (what == LA_LOCKED)
6231 		ASSERT_VOP_LOCKED(vp, "vfs_knl_assert_locked");
6232 	else
6233 		ASSERT_VOP_UNLOCKED(vp, "vfs_knl_assert_unlocked");
6234 #endif
6235 }
6236 
6237 int
6238 vfs_kqfilter(struct vop_kqfilter_args *ap)
6239 {
6240 	struct vnode *vp = ap->a_vp;
6241 	struct knote *kn = ap->a_kn;
6242 	struct knlist *knl;
6243 
6244 	switch (kn->kn_filter) {
6245 	case EVFILT_READ:
6246 		kn->kn_fop = &vfsread_filtops;
6247 		break;
6248 	case EVFILT_WRITE:
6249 		kn->kn_fop = &vfswrite_filtops;
6250 		break;
6251 	case EVFILT_VNODE:
6252 		kn->kn_fop = &vfsvnode_filtops;
6253 		break;
6254 	default:
6255 		return (EINVAL);
6256 	}
6257 
6258 	kn->kn_hook = (caddr_t)vp;
6259 
6260 	v_addpollinfo(vp);
6261 	if (vp->v_pollinfo == NULL)
6262 		return (ENOMEM);
6263 	knl = &vp->v_pollinfo->vpi_selinfo.si_note;
6264 	vhold(vp);
6265 	knlist_add(knl, kn, 0);
6266 
6267 	return (0);
6268 }
6269 
6270 /*
6271  * Detach knote from vnode
6272  */
6273 static void
6274 filt_vfsdetach(struct knote *kn)
6275 {
6276 	struct vnode *vp = (struct vnode *)kn->kn_hook;
6277 
6278 	KASSERT(vp->v_pollinfo != NULL, ("Missing v_pollinfo"));
6279 	knlist_remove(&vp->v_pollinfo->vpi_selinfo.si_note, kn, 0);
6280 	vdrop(vp);
6281 }
6282 
6283 /*ARGSUSED*/
6284 static int
6285 filt_vfsread(struct knote *kn, long hint)
6286 {
6287 	struct vnode *vp = (struct vnode *)kn->kn_hook;
6288 	struct vattr va;
6289 	int res;
6290 
6291 	/*
6292 	 * filesystem is gone, so set the EOF flag and schedule
6293 	 * the knote for deletion.
6294 	 */
6295 	if (hint == NOTE_REVOKE || (hint == 0 && vp->v_type == VBAD)) {
6296 		VI_LOCK(vp);
6297 		kn->kn_flags |= (EV_EOF | EV_ONESHOT);
6298 		VI_UNLOCK(vp);
6299 		return (1);
6300 	}
6301 
6302 	if (VOP_GETATTR(vp, &va, curthread->td_ucred))
6303 		return (0);
6304 
6305 	VI_LOCK(vp);
6306 	kn->kn_data = va.va_size - kn->kn_fp->f_offset;
6307 	res = (kn->kn_sfflags & NOTE_FILE_POLL) != 0 || kn->kn_data != 0;
6308 	VI_UNLOCK(vp);
6309 	return (res);
6310 }
6311 
6312 /*ARGSUSED*/
6313 static int
6314 filt_vfswrite(struct knote *kn, long hint)
6315 {
6316 	struct vnode *vp = (struct vnode *)kn->kn_hook;
6317 
6318 	VI_LOCK(vp);
6319 
6320 	/*
6321 	 * filesystem is gone, so set the EOF flag and schedule
6322 	 * the knote for deletion.
6323 	 */
6324 	if (hint == NOTE_REVOKE || (hint == 0 && vp->v_type == VBAD))
6325 		kn->kn_flags |= (EV_EOF | EV_ONESHOT);
6326 
6327 	kn->kn_data = 0;
6328 	VI_UNLOCK(vp);
6329 	return (1);
6330 }
6331 
6332 static int
6333 filt_vfsvnode(struct knote *kn, long hint)
6334 {
6335 	struct vnode *vp = (struct vnode *)kn->kn_hook;
6336 	int res;
6337 
6338 	VI_LOCK(vp);
6339 	if (kn->kn_sfflags & hint)
6340 		kn->kn_fflags |= hint;
6341 	if (hint == NOTE_REVOKE || (hint == 0 && vp->v_type == VBAD)) {
6342 		kn->kn_flags |= EV_EOF;
6343 		VI_UNLOCK(vp);
6344 		return (1);
6345 	}
6346 	res = (kn->kn_fflags != 0);
6347 	VI_UNLOCK(vp);
6348 	return (res);
6349 }
6350 
6351 /*
6352  * Returns whether the directory is empty or not.
6353  * If it is empty, the return value is 0; otherwise
6354  * the return value is an error value (which may
6355  * be ENOTEMPTY).
6356  */
6357 int
6358 vfs_emptydir(struct vnode *vp)
6359 {
6360 	struct uio uio;
6361 	struct iovec iov;
6362 	struct dirent *dirent, *dp, *endp;
6363 	int error, eof;
6364 
6365 	error = 0;
6366 	eof = 0;
6367 
6368 	ASSERT_VOP_LOCKED(vp, "vfs_emptydir");
6369 
6370 	dirent = malloc(sizeof(struct dirent), M_TEMP, M_WAITOK);
6371 	iov.iov_base = dirent;
6372 	iov.iov_len = sizeof(struct dirent);
6373 
6374 	uio.uio_iov = &iov;
6375 	uio.uio_iovcnt = 1;
6376 	uio.uio_offset = 0;
6377 	uio.uio_resid = sizeof(struct dirent);
6378 	uio.uio_segflg = UIO_SYSSPACE;
6379 	uio.uio_rw = UIO_READ;
6380 	uio.uio_td = curthread;
6381 
6382 	while (eof == 0 && error == 0) {
6383 		error = VOP_READDIR(vp, &uio, curthread->td_ucred, &eof,
6384 		    NULL, NULL);
6385 		if (error != 0)
6386 			break;
6387 		endp = (void *)((uint8_t *)dirent +
6388 		    sizeof(struct dirent) - uio.uio_resid);
6389 		for (dp = dirent; dp < endp;
6390 		     dp = (void *)((uint8_t *)dp + GENERIC_DIRSIZ(dp))) {
6391 			if (dp->d_type == DT_WHT)
6392 				continue;
6393 			if (dp->d_namlen == 0)
6394 				continue;
6395 			if (dp->d_type != DT_DIR &&
6396 			    dp->d_type != DT_UNKNOWN) {
6397 				error = ENOTEMPTY;
6398 				break;
6399 			}
6400 			if (dp->d_namlen > 2) {
6401 				error = ENOTEMPTY;
6402 				break;
6403 			}
6404 			if (dp->d_namlen == 1 &&
6405 			    dp->d_name[0] != '.') {
6406 				error = ENOTEMPTY;
6407 				break;
6408 			}
6409 			if (dp->d_namlen == 2 &&
6410 			    dp->d_name[1] != '.') {
6411 				error = ENOTEMPTY;
6412 				break;
6413 			}
6414 			uio.uio_resid = sizeof(struct dirent);
6415 		}
6416 	}
6417 	free(dirent, M_TEMP);
6418 	return (error);
6419 }
6420 
6421 int
6422 vfs_read_dirent(struct vop_readdir_args *ap, struct dirent *dp, off_t off)
6423 {
6424 	int error;
6425 
6426 	if (dp->d_reclen > ap->a_uio->uio_resid)
6427 		return (ENAMETOOLONG);
6428 	error = uiomove(dp, dp->d_reclen, ap->a_uio);
6429 	if (error) {
6430 		if (ap->a_ncookies != NULL) {
6431 			if (ap->a_cookies != NULL)
6432 				free(ap->a_cookies, M_TEMP);
6433 			ap->a_cookies = NULL;
6434 			*ap->a_ncookies = 0;
6435 		}
6436 		return (error);
6437 	}
6438 	if (ap->a_ncookies == NULL)
6439 		return (0);
6440 
6441 	KASSERT(ap->a_cookies,
6442 	    ("NULL ap->a_cookies value with non-NULL ap->a_ncookies!"));
6443 
6444 	*ap->a_cookies = realloc(*ap->a_cookies,
6445 	    (*ap->a_ncookies + 1) * sizeof(u_long), M_TEMP, M_WAITOK | M_ZERO);
6446 	(*ap->a_cookies)[*ap->a_ncookies] = off;
6447 	*ap->a_ncookies += 1;
6448 	return (0);
6449 }
6450 
6451 /*
6452  * The purpose of this routine is to remove granularity from accmode_t,
6453  * reducing it into standard unix access bits - VEXEC, VREAD, VWRITE,
6454  * VADMIN and VAPPEND.
6455  *
6456  * If it returns 0, the caller is supposed to continue with the usual
6457  * access checks using 'accmode' as modified by this routine.  If it
6458  * returns nonzero value, the caller is supposed to return that value
6459  * as errno.
6460  *
6461  * Note that after this routine runs, accmode may be zero.
6462  */
6463 int
6464 vfs_unixify_accmode(accmode_t *accmode)
6465 {
6466 	/*
6467 	 * There is no way to specify explicit "deny" rule using
6468 	 * file mode or POSIX.1e ACLs.
6469 	 */
6470 	if (*accmode & VEXPLICIT_DENY) {
6471 		*accmode = 0;
6472 		return (0);
6473 	}
6474 
6475 	/*
6476 	 * None of these can be translated into usual access bits.
6477 	 * Also, the common case for NFSv4 ACLs is to not contain
6478 	 * either of these bits. Caller should check for VWRITE
6479 	 * on the containing directory instead.
6480 	 */
6481 	if (*accmode & (VDELETE_CHILD | VDELETE))
6482 		return (EPERM);
6483 
6484 	if (*accmode & VADMIN_PERMS) {
6485 		*accmode &= ~VADMIN_PERMS;
6486 		*accmode |= VADMIN;
6487 	}
6488 
6489 	/*
6490 	 * There is no way to deny VREAD_ATTRIBUTES, VREAD_ACL
6491 	 * or VSYNCHRONIZE using file mode or POSIX.1e ACL.
6492 	 */
6493 	*accmode &= ~(VSTAT_PERMS | VSYNCHRONIZE);
6494 
6495 	return (0);
6496 }
6497 
6498 /*
6499  * Clear out a doomed vnode (if any) and replace it with a new one as long
6500  * as the fs is not being unmounted. Return the root vnode to the caller.
6501  */
6502 static int __noinline
6503 vfs_cache_root_fallback(struct mount *mp, int flags, struct vnode **vpp)
6504 {
6505 	struct vnode *vp;
6506 	int error;
6507 
6508 restart:
6509 	if (mp->mnt_rootvnode != NULL) {
6510 		MNT_ILOCK(mp);
6511 		vp = mp->mnt_rootvnode;
6512 		if (vp != NULL) {
6513 			if (!VN_IS_DOOMED(vp)) {
6514 				vrefact(vp);
6515 				MNT_IUNLOCK(mp);
6516 				error = vn_lock(vp, flags);
6517 				if (error == 0) {
6518 					*vpp = vp;
6519 					return (0);
6520 				}
6521 				vrele(vp);
6522 				goto restart;
6523 			}
6524 			/*
6525 			 * Clear the old one.
6526 			 */
6527 			mp->mnt_rootvnode = NULL;
6528 		}
6529 		MNT_IUNLOCK(mp);
6530 		if (vp != NULL) {
6531 			vfs_op_barrier_wait(mp);
6532 			vrele(vp);
6533 		}
6534 	}
6535 	error = VFS_CACHEDROOT(mp, flags, vpp);
6536 	if (error != 0)
6537 		return (error);
6538 	if (mp->mnt_vfs_ops == 0) {
6539 		MNT_ILOCK(mp);
6540 		if (mp->mnt_vfs_ops != 0) {
6541 			MNT_IUNLOCK(mp);
6542 			return (0);
6543 		}
6544 		if (mp->mnt_rootvnode == NULL) {
6545 			vrefact(*vpp);
6546 			mp->mnt_rootvnode = *vpp;
6547 		} else {
6548 			if (mp->mnt_rootvnode != *vpp) {
6549 				if (!VN_IS_DOOMED(mp->mnt_rootvnode)) {
6550 					panic("%s: mismatch between vnode returned "
6551 					    " by VFS_CACHEDROOT and the one cached "
6552 					    " (%p != %p)",
6553 					    __func__, *vpp, mp->mnt_rootvnode);
6554 				}
6555 			}
6556 		}
6557 		MNT_IUNLOCK(mp);
6558 	}
6559 	return (0);
6560 }
6561 
6562 int
6563 vfs_cache_root(struct mount *mp, int flags, struct vnode **vpp)
6564 {
6565 	struct mount_pcpu *mpcpu;
6566 	struct vnode *vp;
6567 	int error;
6568 
6569 	if (!vfs_op_thread_enter(mp, mpcpu))
6570 		return (vfs_cache_root_fallback(mp, flags, vpp));
6571 	vp = atomic_load_ptr(&mp->mnt_rootvnode);
6572 	if (vp == NULL || VN_IS_DOOMED(vp)) {
6573 		vfs_op_thread_exit(mp, mpcpu);
6574 		return (vfs_cache_root_fallback(mp, flags, vpp));
6575 	}
6576 	vrefact(vp);
6577 	vfs_op_thread_exit(mp, mpcpu);
6578 	error = vn_lock(vp, flags);
6579 	if (error != 0) {
6580 		vrele(vp);
6581 		return (vfs_cache_root_fallback(mp, flags, vpp));
6582 	}
6583 	*vpp = vp;
6584 	return (0);
6585 }
6586 
6587 struct vnode *
6588 vfs_cache_root_clear(struct mount *mp)
6589 {
6590 	struct vnode *vp;
6591 
6592 	/*
6593 	 * ops > 0 guarantees there is nobody who can see this vnode
6594 	 */
6595 	MPASS(mp->mnt_vfs_ops > 0);
6596 	vp = mp->mnt_rootvnode;
6597 	if (vp != NULL)
6598 		vn_seqc_write_begin(vp);
6599 	mp->mnt_rootvnode = NULL;
6600 	return (vp);
6601 }
6602 
6603 void
6604 vfs_cache_root_set(struct mount *mp, struct vnode *vp)
6605 {
6606 
6607 	MPASS(mp->mnt_vfs_ops > 0);
6608 	vrefact(vp);
6609 	mp->mnt_rootvnode = vp;
6610 }
6611 
6612 /*
6613  * These are helper functions for filesystems to traverse all
6614  * their vnodes.  See MNT_VNODE_FOREACH_ALL() in sys/mount.h.
6615  *
6616  * This interface replaces MNT_VNODE_FOREACH.
6617  */
6618 
6619 struct vnode *
6620 __mnt_vnode_next_all(struct vnode **mvp, struct mount *mp)
6621 {
6622 	struct vnode *vp;
6623 
6624 	if (should_yield())
6625 		kern_yield(PRI_USER);
6626 	MNT_ILOCK(mp);
6627 	KASSERT((*mvp)->v_mount == mp, ("marker vnode mount list mismatch"));
6628 	for (vp = TAILQ_NEXT(*mvp, v_nmntvnodes); vp != NULL;
6629 	    vp = TAILQ_NEXT(vp, v_nmntvnodes)) {
6630 		/* Allow a racy peek at VIRF_DOOMED to save a lock acquisition. */
6631 		if (vp->v_type == VMARKER || VN_IS_DOOMED(vp))
6632 			continue;
6633 		VI_LOCK(vp);
6634 		if (VN_IS_DOOMED(vp)) {
6635 			VI_UNLOCK(vp);
6636 			continue;
6637 		}
6638 		break;
6639 	}
6640 	if (vp == NULL) {
6641 		__mnt_vnode_markerfree_all(mvp, mp);
6642 		/* MNT_IUNLOCK(mp); -- done in above function */
6643 		mtx_assert(MNT_MTX(mp), MA_NOTOWNED);
6644 		return (NULL);
6645 	}
6646 	TAILQ_REMOVE(&mp->mnt_nvnodelist, *mvp, v_nmntvnodes);
6647 	TAILQ_INSERT_AFTER(&mp->mnt_nvnodelist, vp, *mvp, v_nmntvnodes);
6648 	MNT_IUNLOCK(mp);
6649 	return (vp);
6650 }
6651 
6652 struct vnode *
6653 __mnt_vnode_first_all(struct vnode **mvp, struct mount *mp)
6654 {
6655 	struct vnode *vp;
6656 
6657 	*mvp = vn_alloc_marker(mp);
6658 	MNT_ILOCK(mp);
6659 	MNT_REF(mp);
6660 
6661 	TAILQ_FOREACH(vp, &mp->mnt_nvnodelist, v_nmntvnodes) {
6662 		/* Allow a racy peek at VIRF_DOOMED to save a lock acquisition. */
6663 		if (vp->v_type == VMARKER || VN_IS_DOOMED(vp))
6664 			continue;
6665 		VI_LOCK(vp);
6666 		if (VN_IS_DOOMED(vp)) {
6667 			VI_UNLOCK(vp);
6668 			continue;
6669 		}
6670 		break;
6671 	}
6672 	if (vp == NULL) {
6673 		MNT_REL(mp);
6674 		MNT_IUNLOCK(mp);
6675 		vn_free_marker(*mvp);
6676 		*mvp = NULL;
6677 		return (NULL);
6678 	}
6679 	TAILQ_INSERT_AFTER(&mp->mnt_nvnodelist, vp, *mvp, v_nmntvnodes);
6680 	MNT_IUNLOCK(mp);
6681 	return (vp);
6682 }
6683 
6684 void
6685 __mnt_vnode_markerfree_all(struct vnode **mvp, struct mount *mp)
6686 {
6687 
6688 	if (*mvp == NULL) {
6689 		MNT_IUNLOCK(mp);
6690 		return;
6691 	}
6692 
6693 	mtx_assert(MNT_MTX(mp), MA_OWNED);
6694 
6695 	KASSERT((*mvp)->v_mount == mp, ("marker vnode mount list mismatch"));
6696 	TAILQ_REMOVE(&mp->mnt_nvnodelist, *mvp, v_nmntvnodes);
6697 	MNT_REL(mp);
6698 	MNT_IUNLOCK(mp);
6699 	vn_free_marker(*mvp);
6700 	*mvp = NULL;
6701 }
6702 
6703 /*
6704  * These are helper functions for filesystems to traverse their
6705  * lazy vnodes.  See MNT_VNODE_FOREACH_LAZY() in sys/mount.h
6706  */
6707 static void
6708 mnt_vnode_markerfree_lazy(struct vnode **mvp, struct mount *mp)
6709 {
6710 
6711 	KASSERT((*mvp)->v_mount == mp, ("marker vnode mount list mismatch"));
6712 
6713 	MNT_ILOCK(mp);
6714 	MNT_REL(mp);
6715 	MNT_IUNLOCK(mp);
6716 	vn_free_marker(*mvp);
6717 	*mvp = NULL;
6718 }
6719 
6720 /*
6721  * Relock the mp mount vnode list lock with the vp vnode interlock in the
6722  * conventional lock order during mnt_vnode_next_lazy iteration.
6723  *
6724  * On entry, the mount vnode list lock is held and the vnode interlock is not.
6725  * The list lock is dropped and reacquired.  On success, both locks are held.
6726  * On failure, the mount vnode list lock is held but the vnode interlock is
6727  * not, and the procedure may have yielded.
6728  */
6729 static bool
6730 mnt_vnode_next_lazy_relock(struct vnode *mvp, struct mount *mp,
6731     struct vnode *vp)
6732 {
6733 
6734 	VNASSERT(mvp->v_mount == mp && mvp->v_type == VMARKER &&
6735 	    TAILQ_NEXT(mvp, v_lazylist) != NULL, mvp,
6736 	    ("%s: bad marker", __func__));
6737 	VNASSERT(vp->v_mount == mp && vp->v_type != VMARKER, vp,
6738 	    ("%s: inappropriate vnode", __func__));
6739 	ASSERT_VI_UNLOCKED(vp, __func__);
6740 	mtx_assert(&mp->mnt_listmtx, MA_OWNED);
6741 
6742 	TAILQ_REMOVE(&mp->mnt_lazyvnodelist, mvp, v_lazylist);
6743 	TAILQ_INSERT_BEFORE(vp, mvp, v_lazylist);
6744 
6745 	/*
6746 	 * Note we may be racing against vdrop which transitioned the hold
6747 	 * count to 0 and now waits for the ->mnt_listmtx lock. This is fine,
6748 	 * if we are the only user after we get the interlock we will just
6749 	 * vdrop.
6750 	 */
6751 	vhold(vp);
6752 	mtx_unlock(&mp->mnt_listmtx);
6753 	VI_LOCK(vp);
6754 	if (VN_IS_DOOMED(vp)) {
6755 		VNPASS((vp->v_mflag & VMP_LAZYLIST) == 0, vp);
6756 		goto out_lost;
6757 	}
6758 	VNPASS(vp->v_mflag & VMP_LAZYLIST, vp);
6759 	/*
6760 	 * There is nothing to do if we are the last user.
6761 	 */
6762 	if (!refcount_release_if_not_last(&vp->v_holdcnt))
6763 		goto out_lost;
6764 	mtx_lock(&mp->mnt_listmtx);
6765 	return (true);
6766 out_lost:
6767 	vdropl(vp);
6768 	maybe_yield();
6769 	mtx_lock(&mp->mnt_listmtx);
6770 	return (false);
6771 }
6772 
6773 static struct vnode *
6774 mnt_vnode_next_lazy(struct vnode **mvp, struct mount *mp, mnt_lazy_cb_t *cb,
6775     void *cbarg)
6776 {
6777 	struct vnode *vp;
6778 
6779 	mtx_assert(&mp->mnt_listmtx, MA_OWNED);
6780 	KASSERT((*mvp)->v_mount == mp, ("marker vnode mount list mismatch"));
6781 restart:
6782 	vp = TAILQ_NEXT(*mvp, v_lazylist);
6783 	while (vp != NULL) {
6784 		if (vp->v_type == VMARKER) {
6785 			vp = TAILQ_NEXT(vp, v_lazylist);
6786 			continue;
6787 		}
6788 		/*
6789 		 * See if we want to process the vnode. Note we may encounter a
6790 		 * long string of vnodes we don't care about and hog the list
6791 		 * as a result. Check for it and requeue the marker.
6792 		 */
6793 		VNPASS(!VN_IS_DOOMED(vp), vp);
6794 		if (!cb(vp, cbarg)) {
6795 			if (!should_yield()) {
6796 				vp = TAILQ_NEXT(vp, v_lazylist);
6797 				continue;
6798 			}
6799 			TAILQ_REMOVE(&mp->mnt_lazyvnodelist, *mvp,
6800 			    v_lazylist);
6801 			TAILQ_INSERT_AFTER(&mp->mnt_lazyvnodelist, vp, *mvp,
6802 			    v_lazylist);
6803 			mtx_unlock(&mp->mnt_listmtx);
6804 			kern_yield(PRI_USER);
6805 			mtx_lock(&mp->mnt_listmtx);
6806 			goto restart;
6807 		}
6808 		/*
6809 		 * Try-lock because this is the wrong lock order.
6810 		 */
6811 		if (!VI_TRYLOCK(vp) &&
6812 		    !mnt_vnode_next_lazy_relock(*mvp, mp, vp))
6813 			goto restart;
6814 		KASSERT(vp->v_type != VMARKER, ("locked marker %p", vp));
6815 		KASSERT(vp->v_mount == mp || vp->v_mount == NULL,
6816 		    ("alien vnode on the lazy list %p %p", vp, mp));
6817 		VNPASS(vp->v_mount == mp, vp);
6818 		VNPASS(!VN_IS_DOOMED(vp), vp);
6819 		break;
6820 	}
6821 	TAILQ_REMOVE(&mp->mnt_lazyvnodelist, *mvp, v_lazylist);
6822 
6823 	/* Check if we are done */
6824 	if (vp == NULL) {
6825 		mtx_unlock(&mp->mnt_listmtx);
6826 		mnt_vnode_markerfree_lazy(mvp, mp);
6827 		return (NULL);
6828 	}
6829 	TAILQ_INSERT_AFTER(&mp->mnt_lazyvnodelist, vp, *mvp, v_lazylist);
6830 	mtx_unlock(&mp->mnt_listmtx);
6831 	ASSERT_VI_LOCKED(vp, "lazy iter");
6832 	return (vp);
6833 }
6834 
6835 struct vnode *
6836 __mnt_vnode_next_lazy(struct vnode **mvp, struct mount *mp, mnt_lazy_cb_t *cb,
6837     void *cbarg)
6838 {
6839 
6840 	if (should_yield())
6841 		kern_yield(PRI_USER);
6842 	mtx_lock(&mp->mnt_listmtx);
6843 	return (mnt_vnode_next_lazy(mvp, mp, cb, cbarg));
6844 }
6845 
6846 struct vnode *
6847 __mnt_vnode_first_lazy(struct vnode **mvp, struct mount *mp, mnt_lazy_cb_t *cb,
6848     void *cbarg)
6849 {
6850 	struct vnode *vp;
6851 
6852 	if (TAILQ_EMPTY(&mp->mnt_lazyvnodelist))
6853 		return (NULL);
6854 
6855 	*mvp = vn_alloc_marker(mp);
6856 	MNT_ILOCK(mp);
6857 	MNT_REF(mp);
6858 	MNT_IUNLOCK(mp);
6859 
6860 	mtx_lock(&mp->mnt_listmtx);
6861 	vp = TAILQ_FIRST(&mp->mnt_lazyvnodelist);
6862 	if (vp == NULL) {
6863 		mtx_unlock(&mp->mnt_listmtx);
6864 		mnt_vnode_markerfree_lazy(mvp, mp);
6865 		return (NULL);
6866 	}
6867 	TAILQ_INSERT_BEFORE(vp, *mvp, v_lazylist);
6868 	return (mnt_vnode_next_lazy(mvp, mp, cb, cbarg));
6869 }
6870 
6871 void
6872 __mnt_vnode_markerfree_lazy(struct vnode **mvp, struct mount *mp)
6873 {
6874 
6875 	if (*mvp == NULL)
6876 		return;
6877 
6878 	mtx_lock(&mp->mnt_listmtx);
6879 	TAILQ_REMOVE(&mp->mnt_lazyvnodelist, *mvp, v_lazylist);
6880 	mtx_unlock(&mp->mnt_listmtx);
6881 	mnt_vnode_markerfree_lazy(mvp, mp);
6882 }
6883 
6884 int
6885 vn_dir_check_exec(struct vnode *vp, struct componentname *cnp)
6886 {
6887 
6888 	if ((cnp->cn_flags & NOEXECCHECK) != 0) {
6889 		cnp->cn_flags &= ~NOEXECCHECK;
6890 		return (0);
6891 	}
6892 
6893 	return (VOP_ACCESS(vp, VEXEC, cnp->cn_cred, cnp->cn_thread));
6894 }
6895 
6896 /*
6897  * Do not use this variant unless you have means other than the hold count
6898  * to prevent the vnode from getting freed.
6899  */
6900 void
6901 vn_seqc_write_begin_unheld_locked(struct vnode *vp)
6902 {
6903 
6904 	ASSERT_VI_LOCKED(vp, __func__);
6905 	VNPASS(vp->v_seqc_users >= 0, vp);
6906 	vp->v_seqc_users++;
6907 	if (vp->v_seqc_users == 1)
6908 		seqc_sleepable_write_begin(&vp->v_seqc);
6909 }
6910 
6911 void
6912 vn_seqc_write_begin_locked(struct vnode *vp)
6913 {
6914 
6915 	ASSERT_VI_LOCKED(vp, __func__);
6916 	VNPASS(vp->v_holdcnt > 0, vp);
6917 	vn_seqc_write_begin_unheld_locked(vp);
6918 }
6919 
6920 void
6921 vn_seqc_write_begin(struct vnode *vp)
6922 {
6923 
6924 	VI_LOCK(vp);
6925 	vn_seqc_write_begin_locked(vp);
6926 	VI_UNLOCK(vp);
6927 }
6928 
6929 void
6930 vn_seqc_write_begin_unheld(struct vnode *vp)
6931 {
6932 
6933 	VI_LOCK(vp);
6934 	vn_seqc_write_begin_unheld_locked(vp);
6935 	VI_UNLOCK(vp);
6936 }
6937 
6938 void
6939 vn_seqc_write_end_locked(struct vnode *vp)
6940 {
6941 
6942 	ASSERT_VI_LOCKED(vp, __func__);
6943 	VNPASS(vp->v_seqc_users > 0, vp);
6944 	vp->v_seqc_users--;
6945 	if (vp->v_seqc_users == 0)
6946 		seqc_sleepable_write_end(&vp->v_seqc);
6947 }
6948 
6949 void
6950 vn_seqc_write_end(struct vnode *vp)
6951 {
6952 
6953 	VI_LOCK(vp);
6954 	vn_seqc_write_end_locked(vp);
6955 	VI_UNLOCK(vp);
6956 }
6957 
6958 /*
6959  * Special case handling for allocating and freeing vnodes.
6960  *
6961  * The counter remains unchanged on free so that a doomed vnode will
6962  * keep testing as in modify as long as it is accessible with SMR.
6963  */
6964 static void
6965 vn_seqc_init(struct vnode *vp)
6966 {
6967 
6968 	vp->v_seqc = 0;
6969 	vp->v_seqc_users = 0;
6970 }
6971 
6972 static void
6973 vn_seqc_write_end_free(struct vnode *vp)
6974 {
6975 
6976 	VNPASS(seqc_in_modify(vp->v_seqc), vp);
6977 	VNPASS(vp->v_seqc_users == 1, vp);
6978 }
6979 
6980 void
6981 vn_irflag_set_locked(struct vnode *vp, short toset)
6982 {
6983 	short flags;
6984 
6985 	ASSERT_VI_LOCKED(vp, __func__);
6986 	flags = vn_irflag_read(vp);
6987 	VNASSERT((flags & toset) == 0, vp,
6988 	    ("%s: some of the passed flags already set (have %d, passed %d)\n",
6989 	    __func__, flags, toset));
6990 	atomic_store_short(&vp->v_irflag, flags | toset);
6991 }
6992 
6993 void
6994 vn_irflag_set(struct vnode *vp, short toset)
6995 {
6996 
6997 	VI_LOCK(vp);
6998 	vn_irflag_set_locked(vp, toset);
6999 	VI_UNLOCK(vp);
7000 }
7001 
7002 void
7003 vn_irflag_set_cond_locked(struct vnode *vp, short toset)
7004 {
7005 	short flags;
7006 
7007 	ASSERT_VI_LOCKED(vp, __func__);
7008 	flags = vn_irflag_read(vp);
7009 	atomic_store_short(&vp->v_irflag, flags | toset);
7010 }
7011 
7012 void
7013 vn_irflag_set_cond(struct vnode *vp, short toset)
7014 {
7015 
7016 	VI_LOCK(vp);
7017 	vn_irflag_set_cond_locked(vp, toset);
7018 	VI_UNLOCK(vp);
7019 }
7020 
7021 void
7022 vn_irflag_unset_locked(struct vnode *vp, short tounset)
7023 {
7024 	short flags;
7025 
7026 	ASSERT_VI_LOCKED(vp, __func__);
7027 	flags = vn_irflag_read(vp);
7028 	VNASSERT((flags & tounset) == tounset, vp,
7029 	    ("%s: some of the passed flags not set (have %d, passed %d)\n",
7030 	    __func__, flags, tounset));
7031 	atomic_store_short(&vp->v_irflag, flags & ~tounset);
7032 }
7033 
7034 void
7035 vn_irflag_unset(struct vnode *vp, short tounset)
7036 {
7037 
7038 	VI_LOCK(vp);
7039 	vn_irflag_unset_locked(vp, tounset);
7040 	VI_UNLOCK(vp);
7041 }
7042