xref: /freebsd/sys/kern/vfs_subr.c (revision c0020399a650364d0134f79f3fa319f84064372d)
1 /*-
2  * Copyright (c) 1989, 1993
3  *	The Regents of the University of California.  All rights reserved.
4  * (c) UNIX System Laboratories, Inc.
5  * All or some portions of this file are derived from material licensed
6  * to the University of California by American Telephone and Telegraph
7  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8  * the permission of UNIX System Laboratories, Inc.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 4. Neither the name of the University nor the names of its contributors
19  *    may be used to endorse or promote products derived from this software
20  *    without specific prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  *
34  *	@(#)vfs_subr.c	8.31 (Berkeley) 5/26/95
35  */
36 
37 /*
38  * External virtual filesystem routines
39  */
40 
41 #include <sys/cdefs.h>
42 __FBSDID("$FreeBSD$");
43 
44 #include "opt_ddb.h"
45 #include "opt_mac.h"
46 
47 #include <sys/param.h>
48 #include <sys/systm.h>
49 #include <sys/bio.h>
50 #include <sys/buf.h>
51 #include <sys/condvar.h>
52 #include <sys/conf.h>
53 #include <sys/dirent.h>
54 #include <sys/event.h>
55 #include <sys/eventhandler.h>
56 #include <sys/extattr.h>
57 #include <sys/file.h>
58 #include <sys/fcntl.h>
59 #include <sys/jail.h>
60 #include <sys/kdb.h>
61 #include <sys/kernel.h>
62 #include <sys/kthread.h>
63 #include <sys/lockf.h>
64 #include <sys/malloc.h>
65 #include <sys/mount.h>
66 #include <sys/namei.h>
67 #include <sys/priv.h>
68 #include <sys/reboot.h>
69 #include <sys/sleepqueue.h>
70 #include <sys/stat.h>
71 #include <sys/sysctl.h>
72 #include <sys/syslog.h>
73 #include <sys/vmmeter.h>
74 #include <sys/vnode.h>
75 
76 #include <machine/stdarg.h>
77 
78 #include <security/mac/mac_framework.h>
79 
80 #include <vm/vm.h>
81 #include <vm/vm_object.h>
82 #include <vm/vm_extern.h>
83 #include <vm/pmap.h>
84 #include <vm/vm_map.h>
85 #include <vm/vm_page.h>
86 #include <vm/vm_kern.h>
87 #include <vm/uma.h>
88 
89 #ifdef DDB
90 #include <ddb/ddb.h>
91 #endif
92 
93 #define	WI_MPSAFEQ	0
94 #define	WI_GIANTQ	1
95 
96 static MALLOC_DEFINE(M_NETADDR, "subr_export_host", "Export host address structure");
97 
98 static void	delmntque(struct vnode *vp);
99 static int	flushbuflist(struct bufv *bufv, int flags, struct bufobj *bo,
100 		    int slpflag, int slptimeo);
101 static void	syncer_shutdown(void *arg, int howto);
102 static int	vtryrecycle(struct vnode *vp);
103 static void	vbusy(struct vnode *vp);
104 static void	vinactive(struct vnode *, struct thread *);
105 static void	v_incr_usecount(struct vnode *);
106 static void	v_decr_usecount(struct vnode *);
107 static void	v_decr_useonly(struct vnode *);
108 static void	v_upgrade_usecount(struct vnode *);
109 static void	vfree(struct vnode *);
110 static void	vnlru_free(int);
111 static void	vgonel(struct vnode *);
112 static void	vfs_knllock(void *arg);
113 static void	vfs_knlunlock(void *arg);
114 static int	vfs_knllocked(void *arg);
115 static void	destroy_vpollinfo(struct vpollinfo *vi);
116 
117 /*
118  * Enable Giant pushdown based on whether or not the vm is mpsafe in this
119  * build.  Without mpsafevm the buffer cache can not run Giant free.
120  */
121 int mpsafe_vfs = 1;
122 TUNABLE_INT("debug.mpsafevfs", &mpsafe_vfs);
123 SYSCTL_INT(_debug, OID_AUTO, mpsafevfs, CTLFLAG_RD, &mpsafe_vfs, 0,
124     "MPSAFE VFS");
125 
126 /*
127  * Number of vnodes in existence.  Increased whenever getnewvnode()
128  * allocates a new vnode, decreased on vdestroy() called on VI_DOOMed
129  * vnode.
130  */
131 static unsigned long	numvnodes;
132 
133 SYSCTL_LONG(_vfs, OID_AUTO, numvnodes, CTLFLAG_RD, &numvnodes, 0, "");
134 
135 /*
136  * Conversion tables for conversion from vnode types to inode formats
137  * and back.
138  */
139 enum vtype iftovt_tab[16] = {
140 	VNON, VFIFO, VCHR, VNON, VDIR, VNON, VBLK, VNON,
141 	VREG, VNON, VLNK, VNON, VSOCK, VNON, VNON, VBAD,
142 };
143 int vttoif_tab[10] = {
144 	0, S_IFREG, S_IFDIR, S_IFBLK, S_IFCHR, S_IFLNK,
145 	S_IFSOCK, S_IFIFO, S_IFMT, S_IFMT
146 };
147 
148 /*
149  * List of vnodes that are ready for recycling.
150  */
151 static TAILQ_HEAD(freelst, vnode) vnode_free_list;
152 
153 /*
154  * Free vnode target.  Free vnodes may simply be files which have been stat'd
155  * but not read.  This is somewhat common, and a small cache of such files
156  * should be kept to avoid recreation costs.
157  */
158 static u_long wantfreevnodes;
159 SYSCTL_LONG(_vfs, OID_AUTO, wantfreevnodes, CTLFLAG_RW, &wantfreevnodes, 0, "");
160 /* Number of vnodes in the free list. */
161 static u_long freevnodes;
162 SYSCTL_LONG(_vfs, OID_AUTO, freevnodes, CTLFLAG_RD, &freevnodes, 0, "");
163 
164 /*
165  * Various variables used for debugging the new implementation of
166  * reassignbuf().
167  * XXX these are probably of (very) limited utility now.
168  */
169 static int reassignbufcalls;
170 SYSCTL_INT(_vfs, OID_AUTO, reassignbufcalls, CTLFLAG_RW, &reassignbufcalls, 0, "");
171 
172 /*
173  * Cache for the mount type id assigned to NFS.  This is used for
174  * special checks in nfs/nfs_nqlease.c and vm/vnode_pager.c.
175  */
176 int	nfs_mount_type = -1;
177 
178 /* To keep more than one thread at a time from running vfs_getnewfsid */
179 static struct mtx mntid_mtx;
180 
181 /*
182  * Lock for any access to the following:
183  *	vnode_free_list
184  *	numvnodes
185  *	freevnodes
186  */
187 static struct mtx vnode_free_list_mtx;
188 
189 /* Publicly exported FS */
190 struct nfs_public nfs_pub;
191 
192 /* Zone for allocation of new vnodes - used exclusively by getnewvnode() */
193 static uma_zone_t vnode_zone;
194 static uma_zone_t vnodepoll_zone;
195 
196 /* Set to 1 to print out reclaim of active vnodes */
197 int	prtactive;
198 
199 /*
200  * The workitem queue.
201  *
202  * It is useful to delay writes of file data and filesystem metadata
203  * for tens of seconds so that quickly created and deleted files need
204  * not waste disk bandwidth being created and removed. To realize this,
205  * we append vnodes to a "workitem" queue. When running with a soft
206  * updates implementation, most pending metadata dependencies should
207  * not wait for more than a few seconds. Thus, mounted on block devices
208  * are delayed only about a half the time that file data is delayed.
209  * Similarly, directory updates are more critical, so are only delayed
210  * about a third the time that file data is delayed. Thus, there are
211  * SYNCER_MAXDELAY queues that are processed round-robin at a rate of
212  * one each second (driven off the filesystem syncer process). The
213  * syncer_delayno variable indicates the next queue that is to be processed.
214  * Items that need to be processed soon are placed in this queue:
215  *
216  *	syncer_workitem_pending[syncer_delayno]
217  *
218  * A delay of fifteen seconds is done by placing the request fifteen
219  * entries later in the queue:
220  *
221  *	syncer_workitem_pending[(syncer_delayno + 15) & syncer_mask]
222  *
223  */
224 static int syncer_delayno;
225 static long syncer_mask;
226 LIST_HEAD(synclist, bufobj);
227 static struct synclist *syncer_workitem_pending[2];
228 /*
229  * The sync_mtx protects:
230  *	bo->bo_synclist
231  *	sync_vnode_count
232  *	syncer_delayno
233  *	syncer_state
234  *	syncer_workitem_pending
235  *	syncer_worklist_len
236  *	rushjob
237  */
238 static struct mtx sync_mtx;
239 static struct cv sync_wakeup;
240 
241 #define SYNCER_MAXDELAY		32
242 static int syncer_maxdelay = SYNCER_MAXDELAY;	/* maximum delay time */
243 static int syncdelay = 30;		/* max time to delay syncing data */
244 static int filedelay = 30;		/* time to delay syncing files */
245 SYSCTL_INT(_kern, OID_AUTO, filedelay, CTLFLAG_RW, &filedelay, 0, "");
246 static int dirdelay = 29;		/* time to delay syncing directories */
247 SYSCTL_INT(_kern, OID_AUTO, dirdelay, CTLFLAG_RW, &dirdelay, 0, "");
248 static int metadelay = 28;		/* time to delay syncing metadata */
249 SYSCTL_INT(_kern, OID_AUTO, metadelay, CTLFLAG_RW, &metadelay, 0, "");
250 static int rushjob;		/* number of slots to run ASAP */
251 static int stat_rush_requests;	/* number of times I/O speeded up */
252 SYSCTL_INT(_debug, OID_AUTO, rush_requests, CTLFLAG_RW, &stat_rush_requests, 0, "");
253 
254 /*
255  * When shutting down the syncer, run it at four times normal speed.
256  */
257 #define SYNCER_SHUTDOWN_SPEEDUP		4
258 static int sync_vnode_count;
259 static int syncer_worklist_len;
260 static enum { SYNCER_RUNNING, SYNCER_SHUTTING_DOWN, SYNCER_FINAL_DELAY }
261     syncer_state;
262 
263 /*
264  * Number of vnodes we want to exist at any one time.  This is mostly used
265  * to size hash tables in vnode-related code.  It is normally not used in
266  * getnewvnode(), as wantfreevnodes is normally nonzero.)
267  *
268  * XXX desiredvnodes is historical cruft and should not exist.
269  */
270 int desiredvnodes;
271 SYSCTL_INT(_kern, KERN_MAXVNODES, maxvnodes, CTLFLAG_RW,
272     &desiredvnodes, 0, "Maximum number of vnodes");
273 SYSCTL_INT(_kern, OID_AUTO, minvnodes, CTLFLAG_RW,
274     &wantfreevnodes, 0, "Minimum number of vnodes (legacy)");
275 static int vnlru_nowhere;
276 SYSCTL_INT(_debug, OID_AUTO, vnlru_nowhere, CTLFLAG_RW,
277     &vnlru_nowhere, 0, "Number of times the vnlru process ran without success");
278 
279 /*
280  * Macros to control when a vnode is freed and recycled.  All require
281  * the vnode interlock.
282  */
283 #define VCANRECYCLE(vp) (((vp)->v_iflag & VI_FREE) && !(vp)->v_holdcnt)
284 #define VSHOULDFREE(vp) (!((vp)->v_iflag & VI_FREE) && !(vp)->v_holdcnt)
285 #define VSHOULDBUSY(vp) (((vp)->v_iflag & VI_FREE) && (vp)->v_holdcnt)
286 
287 
288 /*
289  * Initialize the vnode management data structures.
290  */
291 #ifndef	MAXVNODES_MAX
292 #define	MAXVNODES_MAX	100000
293 #endif
294 static void
295 vntblinit(void *dummy __unused)
296 {
297 
298 	/*
299 	 * Desiredvnodes is a function of the physical memory size and
300 	 * the kernel's heap size.  Specifically, desiredvnodes scales
301 	 * in proportion to the physical memory size until two fifths
302 	 * of the kernel's heap size is consumed by vnodes and vm
303 	 * objects.
304 	 */
305 	desiredvnodes = min(maxproc + cnt.v_page_count / 4, 2 * vm_kmem_size /
306 	    (5 * (sizeof(struct vm_object) + sizeof(struct vnode))));
307 	if (desiredvnodes > MAXVNODES_MAX) {
308 		if (bootverbose)
309 			printf("Reducing kern.maxvnodes %d -> %d\n",
310 			    desiredvnodes, MAXVNODES_MAX);
311 		desiredvnodes = MAXVNODES_MAX;
312 	}
313 	wantfreevnodes = desiredvnodes / 4;
314 	mtx_init(&mntid_mtx, "mntid", NULL, MTX_DEF);
315 	TAILQ_INIT(&vnode_free_list);
316 	mtx_init(&vnode_free_list_mtx, "vnode_free_list", NULL, MTX_DEF);
317 	vnode_zone = uma_zcreate("VNODE", sizeof (struct vnode), NULL, NULL,
318 	    NULL, NULL, UMA_ALIGN_PTR, 0);
319 	vnodepoll_zone = uma_zcreate("VNODEPOLL", sizeof (struct vpollinfo),
320 	    NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
321 	/*
322 	 * Initialize the filesystem syncer.
323 	 */
324 	syncer_workitem_pending[WI_MPSAFEQ] = hashinit(syncer_maxdelay, M_VNODE,
325 	    &syncer_mask);
326 	syncer_workitem_pending[WI_GIANTQ] = hashinit(syncer_maxdelay, M_VNODE,
327 	    &syncer_mask);
328 	syncer_maxdelay = syncer_mask + 1;
329 	mtx_init(&sync_mtx, "Syncer mtx", NULL, MTX_DEF);
330 	cv_init(&sync_wakeup, "syncer");
331 }
332 SYSINIT(vfs, SI_SUB_VFS, SI_ORDER_FIRST, vntblinit, NULL);
333 
334 
335 /*
336  * Mark a mount point as busy. Used to synchronize access and to delay
337  * unmounting. Eventually, mountlist_mtx is not released on failure.
338  */
339 int
340 vfs_busy(struct mount *mp, int flags)
341 {
342 
343 	MPASS((flags & ~MBF_MASK) == 0);
344 	CTR3(KTR_VFS, "%s: mp %p with flags %d", __func__, mp, flags);
345 
346 	MNT_ILOCK(mp);
347 	MNT_REF(mp);
348 	/*
349 	 * If mount point is currenly being unmounted, sleep until the
350 	 * mount point fate is decided.  If thread doing the unmounting fails,
351 	 * it will clear MNTK_UNMOUNT flag before waking us up, indicating
352 	 * that this mount point has survived the unmount attempt and vfs_busy
353 	 * should retry.  Otherwise the unmounter thread will set MNTK_REFEXPIRE
354 	 * flag in addition to MNTK_UNMOUNT, indicating that mount point is
355 	 * about to be really destroyed.  vfs_busy needs to release its
356 	 * reference on the mount point in this case and return with ENOENT,
357 	 * telling the caller that mount mount it tried to busy is no longer
358 	 * valid.
359 	 */
360 	while (mp->mnt_kern_flag & MNTK_UNMOUNT) {
361 		if (flags & MBF_NOWAIT || mp->mnt_kern_flag & MNTK_REFEXPIRE) {
362 			MNT_REL(mp);
363 			MNT_IUNLOCK(mp);
364 			CTR1(KTR_VFS, "%s: failed busying before sleeping",
365 			    __func__);
366 			return (ENOENT);
367 		}
368 		if (flags & MBF_MNTLSTLOCK)
369 			mtx_unlock(&mountlist_mtx);
370 		mp->mnt_kern_flag |= MNTK_MWAIT;
371 		msleep(mp, MNT_MTX(mp), PVFS, "vfs_busy", 0);
372 		if (flags & MBF_MNTLSTLOCK)
373 			mtx_lock(&mountlist_mtx);
374 	}
375 	if (flags & MBF_MNTLSTLOCK)
376 		mtx_unlock(&mountlist_mtx);
377 	mp->mnt_lockref++;
378 	MNT_IUNLOCK(mp);
379 	return (0);
380 }
381 
382 /*
383  * Free a busy filesystem.
384  */
385 void
386 vfs_unbusy(struct mount *mp)
387 {
388 
389 	CTR2(KTR_VFS, "%s: mp %p", __func__, mp);
390 	MNT_ILOCK(mp);
391 	MNT_REL(mp);
392 	KASSERT(mp->mnt_lockref > 0, ("negative mnt_lockref"));
393 	mp->mnt_lockref--;
394 	if (mp->mnt_lockref == 0 && (mp->mnt_kern_flag & MNTK_DRAINING) != 0) {
395 		MPASS(mp->mnt_kern_flag & MNTK_UNMOUNT);
396 		CTR1(KTR_VFS, "%s: waking up waiters", __func__);
397 		mp->mnt_kern_flag &= ~MNTK_DRAINING;
398 		wakeup(&mp->mnt_lockref);
399 	}
400 	MNT_IUNLOCK(mp);
401 }
402 
403 /*
404  * Lookup a mount point by filesystem identifier.
405  */
406 struct mount *
407 vfs_getvfs(fsid_t *fsid)
408 {
409 	struct mount *mp;
410 
411 	CTR2(KTR_VFS, "%s: fsid %p", __func__, fsid);
412 	mtx_lock(&mountlist_mtx);
413 	TAILQ_FOREACH(mp, &mountlist, mnt_list) {
414 		if (mp->mnt_stat.f_fsid.val[0] == fsid->val[0] &&
415 		    mp->mnt_stat.f_fsid.val[1] == fsid->val[1]) {
416 			vfs_ref(mp);
417 			mtx_unlock(&mountlist_mtx);
418 			return (mp);
419 		}
420 	}
421 	mtx_unlock(&mountlist_mtx);
422 	CTR2(KTR_VFS, "%s: lookup failed for %p id", __func__, fsid);
423 	return ((struct mount *) 0);
424 }
425 
426 /*
427  * Lookup a mount point by filesystem identifier, busying it before
428  * returning.
429  */
430 struct mount *
431 vfs_busyfs(fsid_t *fsid)
432 {
433 	struct mount *mp;
434 	int error;
435 
436 	CTR2(KTR_VFS, "%s: fsid %p", __func__, fsid);
437 	mtx_lock(&mountlist_mtx);
438 	TAILQ_FOREACH(mp, &mountlist, mnt_list) {
439 		if (mp->mnt_stat.f_fsid.val[0] == fsid->val[0] &&
440 		    mp->mnt_stat.f_fsid.val[1] == fsid->val[1]) {
441 			error = vfs_busy(mp, MBF_MNTLSTLOCK);
442 			if (error) {
443 				mtx_unlock(&mountlist_mtx);
444 				return (NULL);
445 			}
446 			return (mp);
447 		}
448 	}
449 	CTR2(KTR_VFS, "%s: lookup failed for %p id", __func__, fsid);
450 	mtx_unlock(&mountlist_mtx);
451 	return ((struct mount *) 0);
452 }
453 
454 /*
455  * Check if a user can access privileged mount options.
456  */
457 int
458 vfs_suser(struct mount *mp, struct thread *td)
459 {
460 	int error;
461 
462 	/*
463 	 * If the thread is jailed, but this is not a jail-friendly file
464 	 * system, deny immediately.
465 	 */
466 	if (!(mp->mnt_vfc->vfc_flags & VFCF_JAIL) && jailed(td->td_ucred))
467 		return (EPERM);
468 
469 	/*
470 	 * If the file system was mounted outside a jail and a jailed thread
471 	 * tries to access it, deny immediately.
472 	 */
473 	if (!jailed(mp->mnt_cred) && jailed(td->td_ucred))
474 		return (EPERM);
475 
476 	/*
477 	 * If the file system was mounted inside different jail that the jail of
478 	 * the calling thread, deny immediately.
479 	 */
480 	if (jailed(mp->mnt_cred) && jailed(td->td_ucred) &&
481 	    mp->mnt_cred->cr_prison != td->td_ucred->cr_prison) {
482 		return (EPERM);
483 	}
484 
485 	/*
486 	 * If file system supports delegated administration, we don't check
487 	 * for the PRIV_VFS_MOUNT_OWNER privilege - it will be better verified
488 	 * by the file system itself.
489 	 * If this is not the user that did original mount, we check for
490 	 * the PRIV_VFS_MOUNT_OWNER privilege.
491 	 */
492 	if (!(mp->mnt_vfc->vfc_flags & VFCF_DELEGADMIN) &&
493 	    mp->mnt_cred->cr_uid != td->td_ucred->cr_uid) {
494 		if ((error = priv_check(td, PRIV_VFS_MOUNT_OWNER)) != 0)
495 			return (error);
496 	}
497 	return (0);
498 }
499 
500 /*
501  * Get a new unique fsid.  Try to make its val[0] unique, since this value
502  * will be used to create fake device numbers for stat().  Also try (but
503  * not so hard) make its val[0] unique mod 2^16, since some emulators only
504  * support 16-bit device numbers.  We end up with unique val[0]'s for the
505  * first 2^16 calls and unique val[0]'s mod 2^16 for the first 2^8 calls.
506  *
507  * Keep in mind that several mounts may be running in parallel.  Starting
508  * the search one past where the previous search terminated is both a
509  * micro-optimization and a defense against returning the same fsid to
510  * different mounts.
511  */
512 void
513 vfs_getnewfsid(struct mount *mp)
514 {
515 	static u_int16_t mntid_base;
516 	struct mount *nmp;
517 	fsid_t tfsid;
518 	int mtype;
519 
520 	CTR2(KTR_VFS, "%s: mp %p", __func__, mp);
521 	mtx_lock(&mntid_mtx);
522 	mtype = mp->mnt_vfc->vfc_typenum;
523 	tfsid.val[1] = mtype;
524 	mtype = (mtype & 0xFF) << 24;
525 	for (;;) {
526 		tfsid.val[0] = makedev(255,
527 		    mtype | ((mntid_base & 0xFF00) << 8) | (mntid_base & 0xFF));
528 		mntid_base++;
529 		if ((nmp = vfs_getvfs(&tfsid)) == NULL)
530 			break;
531 		vfs_rel(nmp);
532 	}
533 	mp->mnt_stat.f_fsid.val[0] = tfsid.val[0];
534 	mp->mnt_stat.f_fsid.val[1] = tfsid.val[1];
535 	mtx_unlock(&mntid_mtx);
536 }
537 
538 /*
539  * Knob to control the precision of file timestamps:
540  *
541  *   0 = seconds only; nanoseconds zeroed.
542  *   1 = seconds and nanoseconds, accurate within 1/HZ.
543  *   2 = seconds and nanoseconds, truncated to microseconds.
544  * >=3 = seconds and nanoseconds, maximum precision.
545  */
546 enum { TSP_SEC, TSP_HZ, TSP_USEC, TSP_NSEC };
547 
548 static int timestamp_precision = TSP_SEC;
549 SYSCTL_INT(_vfs, OID_AUTO, timestamp_precision, CTLFLAG_RW,
550     &timestamp_precision, 0, "");
551 
552 /*
553  * Get a current timestamp.
554  */
555 void
556 vfs_timestamp(struct timespec *tsp)
557 {
558 	struct timeval tv;
559 
560 	switch (timestamp_precision) {
561 	case TSP_SEC:
562 		tsp->tv_sec = time_second;
563 		tsp->tv_nsec = 0;
564 		break;
565 	case TSP_HZ:
566 		getnanotime(tsp);
567 		break;
568 	case TSP_USEC:
569 		microtime(&tv);
570 		TIMEVAL_TO_TIMESPEC(&tv, tsp);
571 		break;
572 	case TSP_NSEC:
573 	default:
574 		nanotime(tsp);
575 		break;
576 	}
577 }
578 
579 /*
580  * Set vnode attributes to VNOVAL
581  */
582 void
583 vattr_null(struct vattr *vap)
584 {
585 
586 	vap->va_type = VNON;
587 	vap->va_size = VNOVAL;
588 	vap->va_bytes = VNOVAL;
589 	vap->va_mode = VNOVAL;
590 	vap->va_nlink = VNOVAL;
591 	vap->va_uid = VNOVAL;
592 	vap->va_gid = VNOVAL;
593 	vap->va_fsid = VNOVAL;
594 	vap->va_fileid = VNOVAL;
595 	vap->va_blocksize = VNOVAL;
596 	vap->va_rdev = VNOVAL;
597 	vap->va_atime.tv_sec = VNOVAL;
598 	vap->va_atime.tv_nsec = VNOVAL;
599 	vap->va_mtime.tv_sec = VNOVAL;
600 	vap->va_mtime.tv_nsec = VNOVAL;
601 	vap->va_ctime.tv_sec = VNOVAL;
602 	vap->va_ctime.tv_nsec = VNOVAL;
603 	vap->va_birthtime.tv_sec = VNOVAL;
604 	vap->va_birthtime.tv_nsec = VNOVAL;
605 	vap->va_flags = VNOVAL;
606 	vap->va_gen = VNOVAL;
607 	vap->va_vaflags = 0;
608 }
609 
610 /*
611  * This routine is called when we have too many vnodes.  It attempts
612  * to free <count> vnodes and will potentially free vnodes that still
613  * have VM backing store (VM backing store is typically the cause
614  * of a vnode blowout so we want to do this).  Therefore, this operation
615  * is not considered cheap.
616  *
617  * A number of conditions may prevent a vnode from being reclaimed.
618  * the buffer cache may have references on the vnode, a directory
619  * vnode may still have references due to the namei cache representing
620  * underlying files, or the vnode may be in active use.   It is not
621  * desireable to reuse such vnodes.  These conditions may cause the
622  * number of vnodes to reach some minimum value regardless of what
623  * you set kern.maxvnodes to.  Do not set kern.maxvnodes too low.
624  */
625 static int
626 vlrureclaim(struct mount *mp)
627 {
628 	struct vnode *vp;
629 	int done;
630 	int trigger;
631 	int usevnodes;
632 	int count;
633 
634 	/*
635 	 * Calculate the trigger point, don't allow user
636 	 * screwups to blow us up.   This prevents us from
637 	 * recycling vnodes with lots of resident pages.  We
638 	 * aren't trying to free memory, we are trying to
639 	 * free vnodes.
640 	 */
641 	usevnodes = desiredvnodes;
642 	if (usevnodes <= 0)
643 		usevnodes = 1;
644 	trigger = cnt.v_page_count * 2 / usevnodes;
645 	done = 0;
646 	vn_start_write(NULL, &mp, V_WAIT);
647 	MNT_ILOCK(mp);
648 	count = mp->mnt_nvnodelistsize / 10 + 1;
649 	while (count != 0) {
650 		vp = TAILQ_FIRST(&mp->mnt_nvnodelist);
651 		while (vp != NULL && vp->v_type == VMARKER)
652 			vp = TAILQ_NEXT(vp, v_nmntvnodes);
653 		if (vp == NULL)
654 			break;
655 		TAILQ_REMOVE(&mp->mnt_nvnodelist, vp, v_nmntvnodes);
656 		TAILQ_INSERT_TAIL(&mp->mnt_nvnodelist, vp, v_nmntvnodes);
657 		--count;
658 		if (!VI_TRYLOCK(vp))
659 			goto next_iter;
660 		/*
661 		 * If it's been deconstructed already, it's still
662 		 * referenced, or it exceeds the trigger, skip it.
663 		 */
664 		if (vp->v_usecount || !LIST_EMPTY(&(vp)->v_cache_src) ||
665 		    (vp->v_iflag & VI_DOOMED) != 0 || (vp->v_object != NULL &&
666 		    vp->v_object->resident_page_count > trigger)) {
667 			VI_UNLOCK(vp);
668 			goto next_iter;
669 		}
670 		MNT_IUNLOCK(mp);
671 		vholdl(vp);
672 		if (VOP_LOCK(vp, LK_INTERLOCK|LK_EXCLUSIVE|LK_NOWAIT)) {
673 			vdrop(vp);
674 			goto next_iter_mntunlocked;
675 		}
676 		VI_LOCK(vp);
677 		/*
678 		 * v_usecount may have been bumped after VOP_LOCK() dropped
679 		 * the vnode interlock and before it was locked again.
680 		 *
681 		 * It is not necessary to recheck VI_DOOMED because it can
682 		 * only be set by another thread that holds both the vnode
683 		 * lock and vnode interlock.  If another thread has the
684 		 * vnode lock before we get to VOP_LOCK() and obtains the
685 		 * vnode interlock after VOP_LOCK() drops the vnode
686 		 * interlock, the other thread will be unable to drop the
687 		 * vnode lock before our VOP_LOCK() call fails.
688 		 */
689 		if (vp->v_usecount || !LIST_EMPTY(&(vp)->v_cache_src) ||
690 		    (vp->v_object != NULL &&
691 		    vp->v_object->resident_page_count > trigger)) {
692 			VOP_UNLOCK(vp, LK_INTERLOCK);
693 			goto next_iter_mntunlocked;
694 		}
695 		KASSERT((vp->v_iflag & VI_DOOMED) == 0,
696 		    ("VI_DOOMED unexpectedly detected in vlrureclaim()"));
697 		vgonel(vp);
698 		VOP_UNLOCK(vp, 0);
699 		vdropl(vp);
700 		done++;
701 next_iter_mntunlocked:
702 		if ((count % 256) != 0)
703 			goto relock_mnt;
704 		goto yield;
705 next_iter:
706 		if ((count % 256) != 0)
707 			continue;
708 		MNT_IUNLOCK(mp);
709 yield:
710 		uio_yield();
711 relock_mnt:
712 		MNT_ILOCK(mp);
713 	}
714 	MNT_IUNLOCK(mp);
715 	vn_finished_write(mp);
716 	return done;
717 }
718 
719 /*
720  * Attempt to keep the free list at wantfreevnodes length.
721  */
722 static void
723 vnlru_free(int count)
724 {
725 	struct vnode *vp;
726 	int vfslocked;
727 
728 	mtx_assert(&vnode_free_list_mtx, MA_OWNED);
729 	for (; count > 0; count--) {
730 		vp = TAILQ_FIRST(&vnode_free_list);
731 		/*
732 		 * The list can be modified while the free_list_mtx
733 		 * has been dropped and vp could be NULL here.
734 		 */
735 		if (!vp)
736 			break;
737 		VNASSERT(vp->v_op != NULL, vp,
738 		    ("vnlru_free: vnode already reclaimed."));
739 		TAILQ_REMOVE(&vnode_free_list, vp, v_freelist);
740 		/*
741 		 * Don't recycle if we can't get the interlock.
742 		 */
743 		if (!VI_TRYLOCK(vp)) {
744 			TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist);
745 			continue;
746 		}
747 		VNASSERT(VCANRECYCLE(vp), vp,
748 		    ("vp inconsistent on freelist"));
749 		freevnodes--;
750 		vp->v_iflag &= ~VI_FREE;
751 		vholdl(vp);
752 		mtx_unlock(&vnode_free_list_mtx);
753 		VI_UNLOCK(vp);
754 		vfslocked = VFS_LOCK_GIANT(vp->v_mount);
755 		vtryrecycle(vp);
756 		VFS_UNLOCK_GIANT(vfslocked);
757 		/*
758 		 * If the recycled succeeded this vdrop will actually free
759 		 * the vnode.  If not it will simply place it back on
760 		 * the free list.
761 		 */
762 		vdrop(vp);
763 		mtx_lock(&vnode_free_list_mtx);
764 	}
765 }
766 /*
767  * Attempt to recycle vnodes in a context that is always safe to block.
768  * Calling vlrurecycle() from the bowels of filesystem code has some
769  * interesting deadlock problems.
770  */
771 static struct proc *vnlruproc;
772 static int vnlruproc_sig;
773 
774 static void
775 vnlru_proc(void)
776 {
777 	struct mount *mp, *nmp;
778 	int done, vfslocked;
779 	struct proc *p = vnlruproc;
780 
781 	EVENTHANDLER_REGISTER(shutdown_pre_sync, kproc_shutdown, p,
782 	    SHUTDOWN_PRI_FIRST);
783 
784 	for (;;) {
785 		kproc_suspend_check(p);
786 		mtx_lock(&vnode_free_list_mtx);
787 		if (freevnodes > wantfreevnodes)
788 			vnlru_free(freevnodes - wantfreevnodes);
789 		if (numvnodes <= desiredvnodes * 9 / 10) {
790 			vnlruproc_sig = 0;
791 			wakeup(&vnlruproc_sig);
792 			msleep(vnlruproc, &vnode_free_list_mtx,
793 			    PVFS|PDROP, "vlruwt", hz);
794 			continue;
795 		}
796 		mtx_unlock(&vnode_free_list_mtx);
797 		done = 0;
798 		mtx_lock(&mountlist_mtx);
799 		for (mp = TAILQ_FIRST(&mountlist); mp != NULL; mp = nmp) {
800 			if (vfs_busy(mp, MBF_NOWAIT | MBF_MNTLSTLOCK)) {
801 				nmp = TAILQ_NEXT(mp, mnt_list);
802 				continue;
803 			}
804 			vfslocked = VFS_LOCK_GIANT(mp);
805 			done += vlrureclaim(mp);
806 			VFS_UNLOCK_GIANT(vfslocked);
807 			mtx_lock(&mountlist_mtx);
808 			nmp = TAILQ_NEXT(mp, mnt_list);
809 			vfs_unbusy(mp);
810 		}
811 		mtx_unlock(&mountlist_mtx);
812 		if (done == 0) {
813 			EVENTHANDLER_INVOKE(vfs_lowvnodes, desiredvnodes / 10);
814 #if 0
815 			/* These messages are temporary debugging aids */
816 			if (vnlru_nowhere < 5)
817 				printf("vnlru process getting nowhere..\n");
818 			else if (vnlru_nowhere == 5)
819 				printf("vnlru process messages stopped.\n");
820 #endif
821 			vnlru_nowhere++;
822 			tsleep(vnlruproc, PPAUSE, "vlrup", hz * 3);
823 		} else
824 			uio_yield();
825 	}
826 }
827 
828 static struct kproc_desc vnlru_kp = {
829 	"vnlru",
830 	vnlru_proc,
831 	&vnlruproc
832 };
833 SYSINIT(vnlru, SI_SUB_KTHREAD_UPDATE, SI_ORDER_FIRST, kproc_start,
834     &vnlru_kp);
835 
836 /*
837  * Routines having to do with the management of the vnode table.
838  */
839 
840 void
841 vdestroy(struct vnode *vp)
842 {
843 	struct bufobj *bo;
844 
845 	CTR2(KTR_VFS, "%s: vp %p", __func__, vp);
846 	mtx_lock(&vnode_free_list_mtx);
847 	numvnodes--;
848 	mtx_unlock(&vnode_free_list_mtx);
849 	bo = &vp->v_bufobj;
850 	VNASSERT((vp->v_iflag & VI_FREE) == 0, vp,
851 	    ("cleaned vnode still on the free list."));
852 	VNASSERT(vp->v_data == NULL, vp, ("cleaned vnode isn't"));
853 	VNASSERT(vp->v_holdcnt == 0, vp, ("Non-zero hold count"));
854 	VNASSERT(vp->v_usecount == 0, vp, ("Non-zero use count"));
855 	VNASSERT(vp->v_writecount == 0, vp, ("Non-zero write count"));
856 	VNASSERT(bo->bo_numoutput == 0, vp, ("Clean vnode has pending I/O's"));
857 	VNASSERT(bo->bo_clean.bv_cnt == 0, vp, ("cleanbufcnt not 0"));
858 	VNASSERT(bo->bo_clean.bv_root == NULL, vp, ("cleanblkroot not NULL"));
859 	VNASSERT(bo->bo_dirty.bv_cnt == 0, vp, ("dirtybufcnt not 0"));
860 	VNASSERT(bo->bo_dirty.bv_root == NULL, vp, ("dirtyblkroot not NULL"));
861 	VNASSERT(TAILQ_EMPTY(&vp->v_cache_dst), vp, ("vp has namecache dst"));
862 	VNASSERT(LIST_EMPTY(&vp->v_cache_src), vp, ("vp has namecache src"));
863 	VNASSERT(vp->v_cache_dd == NULL, vp, ("vp has namecache for .."));
864 	VI_UNLOCK(vp);
865 #ifdef MAC
866 	mac_vnode_destroy(vp);
867 #endif
868 	if (vp->v_pollinfo != NULL)
869 		destroy_vpollinfo(vp->v_pollinfo);
870 #ifdef INVARIANTS
871 	/* XXX Elsewhere we can detect an already freed vnode via NULL v_op. */
872 	vp->v_op = NULL;
873 #endif
874 	lockdestroy(vp->v_vnlock);
875 	mtx_destroy(&vp->v_interlock);
876 	mtx_destroy(BO_MTX(bo));
877 	uma_zfree(vnode_zone, vp);
878 }
879 
880 /*
881  * Try to recycle a freed vnode.  We abort if anyone picks up a reference
882  * before we actually vgone().  This function must be called with the vnode
883  * held to prevent the vnode from being returned to the free list midway
884  * through vgone().
885  */
886 static int
887 vtryrecycle(struct vnode *vp)
888 {
889 	struct mount *vnmp;
890 
891 	CTR2(KTR_VFS, "%s: vp %p", __func__, vp);
892 	VNASSERT(vp->v_holdcnt, vp,
893 	    ("vtryrecycle: Recycling vp %p without a reference.", vp));
894 	/*
895 	 * This vnode may found and locked via some other list, if so we
896 	 * can't recycle it yet.
897 	 */
898 	if (VOP_LOCK(vp, LK_EXCLUSIVE | LK_NOWAIT) != 0) {
899 		CTR2(KTR_VFS,
900 		    "%s: impossible to recycle, vp %p lock is already held",
901 		    __func__, vp);
902 		return (EWOULDBLOCK);
903 	}
904 	/*
905 	 * Don't recycle if its filesystem is being suspended.
906 	 */
907 	if (vn_start_write(vp, &vnmp, V_NOWAIT) != 0) {
908 		VOP_UNLOCK(vp, 0);
909 		CTR2(KTR_VFS,
910 		    "%s: impossible to recycle, cannot start the write for %p",
911 		    __func__, vp);
912 		return (EBUSY);
913 	}
914 	/*
915 	 * If we got this far, we need to acquire the interlock and see if
916 	 * anyone picked up this vnode from another list.  If not, we will
917 	 * mark it with DOOMED via vgonel() so that anyone who does find it
918 	 * will skip over it.
919 	 */
920 	VI_LOCK(vp);
921 	if (vp->v_usecount) {
922 		VOP_UNLOCK(vp, LK_INTERLOCK);
923 		vn_finished_write(vnmp);
924 		CTR2(KTR_VFS,
925 		    "%s: impossible to recycle, %p is already referenced",
926 		    __func__, vp);
927 		return (EBUSY);
928 	}
929 	if ((vp->v_iflag & VI_DOOMED) == 0)
930 		vgonel(vp);
931 	VOP_UNLOCK(vp, LK_INTERLOCK);
932 	vn_finished_write(vnmp);
933 	return (0);
934 }
935 
936 /*
937  * Return the next vnode from the free list.
938  */
939 int
940 getnewvnode(const char *tag, struct mount *mp, struct vop_vector *vops,
941     struct vnode **vpp)
942 {
943 	struct vnode *vp = NULL;
944 	struct bufobj *bo;
945 
946 	CTR3(KTR_VFS, "%s: mp %p with tag %s", __func__, mp, tag);
947 	mtx_lock(&vnode_free_list_mtx);
948 	/*
949 	 * Lend our context to reclaim vnodes if they've exceeded the max.
950 	 */
951 	if (freevnodes > wantfreevnodes)
952 		vnlru_free(1);
953 	/*
954 	 * Wait for available vnodes.
955 	 */
956 	if (numvnodes > desiredvnodes) {
957 		if (mp != NULL && (mp->mnt_kern_flag & MNTK_SUSPEND)) {
958 			/*
959 			 * File system is beeing suspended, we cannot risk a
960 			 * deadlock here, so allocate new vnode anyway.
961 			 */
962 			if (freevnodes > wantfreevnodes)
963 				vnlru_free(freevnodes - wantfreevnodes);
964 			goto alloc;
965 		}
966 		if (vnlruproc_sig == 0) {
967 			vnlruproc_sig = 1;	/* avoid unnecessary wakeups */
968 			wakeup(vnlruproc);
969 		}
970 		msleep(&vnlruproc_sig, &vnode_free_list_mtx, PVFS,
971 		    "vlruwk", hz);
972 #if 0	/* XXX Not all VFS_VGET/ffs_vget callers check returns. */
973 		if (numvnodes > desiredvnodes) {
974 			mtx_unlock(&vnode_free_list_mtx);
975 			return (ENFILE);
976 		}
977 #endif
978 	}
979 alloc:
980 	numvnodes++;
981 	mtx_unlock(&vnode_free_list_mtx);
982 	vp = (struct vnode *) uma_zalloc(vnode_zone, M_WAITOK|M_ZERO);
983 	/*
984 	 * Setup locks.
985 	 */
986 	vp->v_vnlock = &vp->v_lock;
987 	mtx_init(&vp->v_interlock, "vnode interlock", NULL, MTX_DEF);
988 	/*
989 	 * By default, don't allow shared locks unless filesystems
990 	 * opt-in.
991 	 */
992 	lockinit(vp->v_vnlock, PVFS, tag, VLKTIMEOUT, LK_NOSHARE);
993 	/*
994 	 * Initialize bufobj.
995 	 */
996 	bo = &vp->v_bufobj;
997 	bo->__bo_vnode = vp;
998 	mtx_init(BO_MTX(bo), "bufobj interlock", NULL, MTX_DEF);
999 	bo->bo_ops = &buf_ops_bio;
1000 	bo->bo_private = vp;
1001 	TAILQ_INIT(&bo->bo_clean.bv_hd);
1002 	TAILQ_INIT(&bo->bo_dirty.bv_hd);
1003 	/*
1004 	 * Initialize namecache.
1005 	 */
1006 	LIST_INIT(&vp->v_cache_src);
1007 	TAILQ_INIT(&vp->v_cache_dst);
1008 	/*
1009 	 * Finalize various vnode identity bits.
1010 	 */
1011 	vp->v_type = VNON;
1012 	vp->v_tag = tag;
1013 	vp->v_op = vops;
1014 	v_incr_usecount(vp);
1015 	vp->v_data = 0;
1016 #ifdef MAC
1017 	mac_vnode_init(vp);
1018 	if (mp != NULL && (mp->mnt_flag & MNT_MULTILABEL) == 0)
1019 		mac_vnode_associate_singlelabel(mp, vp);
1020 	else if (mp == NULL && vops != &dead_vnodeops)
1021 		printf("NULL mp in getnewvnode()\n");
1022 #endif
1023 	if (mp != NULL) {
1024 		bo->bo_bsize = mp->mnt_stat.f_iosize;
1025 		if ((mp->mnt_kern_flag & MNTK_NOKNOTE) != 0)
1026 			vp->v_vflag |= VV_NOKNOTE;
1027 	}
1028 
1029 	*vpp = vp;
1030 	return (0);
1031 }
1032 
1033 /*
1034  * Delete from old mount point vnode list, if on one.
1035  */
1036 static void
1037 delmntque(struct vnode *vp)
1038 {
1039 	struct mount *mp;
1040 
1041 	mp = vp->v_mount;
1042 	if (mp == NULL)
1043 		return;
1044 	MNT_ILOCK(mp);
1045 	vp->v_mount = NULL;
1046 	VNASSERT(mp->mnt_nvnodelistsize > 0, vp,
1047 		("bad mount point vnode list size"));
1048 	TAILQ_REMOVE(&mp->mnt_nvnodelist, vp, v_nmntvnodes);
1049 	mp->mnt_nvnodelistsize--;
1050 	MNT_REL(mp);
1051 	MNT_IUNLOCK(mp);
1052 }
1053 
1054 static void
1055 insmntque_stddtr(struct vnode *vp, void *dtr_arg)
1056 {
1057 
1058 	vp->v_data = NULL;
1059 	vp->v_op = &dead_vnodeops;
1060 	/* XXX non mp-safe fs may still call insmntque with vnode
1061 	   unlocked */
1062 	if (!VOP_ISLOCKED(vp))
1063 		vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
1064 	vgone(vp);
1065 	vput(vp);
1066 }
1067 
1068 /*
1069  * Insert into list of vnodes for the new mount point, if available.
1070  */
1071 int
1072 insmntque1(struct vnode *vp, struct mount *mp,
1073 	void (*dtr)(struct vnode *, void *), void *dtr_arg)
1074 {
1075 	int locked;
1076 
1077 	KASSERT(vp->v_mount == NULL,
1078 		("insmntque: vnode already on per mount vnode list"));
1079 	VNASSERT(mp != NULL, vp, ("Don't call insmntque(foo, NULL)"));
1080 #ifdef DEBUG_VFS_LOCKS
1081 	if (!VFS_NEEDSGIANT(mp))
1082 		ASSERT_VOP_ELOCKED(vp,
1083 		    "insmntque: mp-safe fs and non-locked vp");
1084 #endif
1085 	MNT_ILOCK(mp);
1086 	if ((mp->mnt_kern_flag & MNTK_NOINSMNTQ) != 0 &&
1087 	    ((mp->mnt_kern_flag & MNTK_UNMOUNTF) != 0 ||
1088 	     mp->mnt_nvnodelistsize == 0)) {
1089 		locked = VOP_ISLOCKED(vp);
1090 		if (!locked || (locked == LK_EXCLUSIVE &&
1091 		     (vp->v_vflag & VV_FORCEINSMQ) == 0)) {
1092 			MNT_IUNLOCK(mp);
1093 			if (dtr != NULL)
1094 				dtr(vp, dtr_arg);
1095 			return (EBUSY);
1096 		}
1097 	}
1098 	vp->v_mount = mp;
1099 	MNT_REF(mp);
1100 	TAILQ_INSERT_TAIL(&mp->mnt_nvnodelist, vp, v_nmntvnodes);
1101 	VNASSERT(mp->mnt_nvnodelistsize >= 0, vp,
1102 		("neg mount point vnode list size"));
1103 	mp->mnt_nvnodelistsize++;
1104 	MNT_IUNLOCK(mp);
1105 	return (0);
1106 }
1107 
1108 int
1109 insmntque(struct vnode *vp, struct mount *mp)
1110 {
1111 
1112 	return (insmntque1(vp, mp, insmntque_stddtr, NULL));
1113 }
1114 
1115 /*
1116  * Flush out and invalidate all buffers associated with a bufobj
1117  * Called with the underlying object locked.
1118  */
1119 int
1120 bufobj_invalbuf(struct bufobj *bo, int flags, int slpflag, int slptimeo)
1121 {
1122 	int error;
1123 
1124 	BO_LOCK(bo);
1125 	if (flags & V_SAVE) {
1126 		error = bufobj_wwait(bo, slpflag, slptimeo);
1127 		if (error) {
1128 			BO_UNLOCK(bo);
1129 			return (error);
1130 		}
1131 		if (bo->bo_dirty.bv_cnt > 0) {
1132 			BO_UNLOCK(bo);
1133 			if ((error = BO_SYNC(bo, MNT_WAIT)) != 0)
1134 				return (error);
1135 			/*
1136 			 * XXX We could save a lock/unlock if this was only
1137 			 * enabled under INVARIANTS
1138 			 */
1139 			BO_LOCK(bo);
1140 			if (bo->bo_numoutput > 0 || bo->bo_dirty.bv_cnt > 0)
1141 				panic("vinvalbuf: dirty bufs");
1142 		}
1143 	}
1144 	/*
1145 	 * If you alter this loop please notice that interlock is dropped and
1146 	 * reacquired in flushbuflist.  Special care is needed to ensure that
1147 	 * no race conditions occur from this.
1148 	 */
1149 	do {
1150 		error = flushbuflist(&bo->bo_clean,
1151 		    flags, bo, slpflag, slptimeo);
1152 		if (error == 0)
1153 			error = flushbuflist(&bo->bo_dirty,
1154 			    flags, bo, slpflag, slptimeo);
1155 		if (error != 0 && error != EAGAIN) {
1156 			BO_UNLOCK(bo);
1157 			return (error);
1158 		}
1159 	} while (error != 0);
1160 
1161 	/*
1162 	 * Wait for I/O to complete.  XXX needs cleaning up.  The vnode can
1163 	 * have write I/O in-progress but if there is a VM object then the
1164 	 * VM object can also have read-I/O in-progress.
1165 	 */
1166 	do {
1167 		bufobj_wwait(bo, 0, 0);
1168 		BO_UNLOCK(bo);
1169 		if (bo->bo_object != NULL) {
1170 			VM_OBJECT_LOCK(bo->bo_object);
1171 			vm_object_pip_wait(bo->bo_object, "bovlbx");
1172 			VM_OBJECT_UNLOCK(bo->bo_object);
1173 		}
1174 		BO_LOCK(bo);
1175 	} while (bo->bo_numoutput > 0);
1176 	BO_UNLOCK(bo);
1177 
1178 	/*
1179 	 * Destroy the copy in the VM cache, too.
1180 	 */
1181 	if (bo->bo_object != NULL && (flags & (V_ALT | V_NORMAL)) == 0) {
1182 		VM_OBJECT_LOCK(bo->bo_object);
1183 		vm_object_page_remove(bo->bo_object, 0, 0,
1184 			(flags & V_SAVE) ? TRUE : FALSE);
1185 		VM_OBJECT_UNLOCK(bo->bo_object);
1186 	}
1187 
1188 #ifdef INVARIANTS
1189 	BO_LOCK(bo);
1190 	if ((flags & (V_ALT | V_NORMAL)) == 0 &&
1191 	    (bo->bo_dirty.bv_cnt > 0 || bo->bo_clean.bv_cnt > 0))
1192 		panic("vinvalbuf: flush failed");
1193 	BO_UNLOCK(bo);
1194 #endif
1195 	return (0);
1196 }
1197 
1198 /*
1199  * Flush out and invalidate all buffers associated with a vnode.
1200  * Called with the underlying object locked.
1201  */
1202 int
1203 vinvalbuf(struct vnode *vp, int flags, int slpflag, int slptimeo)
1204 {
1205 
1206 	CTR3(KTR_VFS, "%s: vp %p with flags %d", __func__, vp, flags);
1207 	ASSERT_VOP_LOCKED(vp, "vinvalbuf");
1208 	return (bufobj_invalbuf(&vp->v_bufobj, flags, slpflag, slptimeo));
1209 }
1210 
1211 /*
1212  * Flush out buffers on the specified list.
1213  *
1214  */
1215 static int
1216 flushbuflist( struct bufv *bufv, int flags, struct bufobj *bo, int slpflag,
1217     int slptimeo)
1218 {
1219 	struct buf *bp, *nbp;
1220 	int retval, error;
1221 	daddr_t lblkno;
1222 	b_xflags_t xflags;
1223 
1224 	ASSERT_BO_LOCKED(bo);
1225 
1226 	retval = 0;
1227 	TAILQ_FOREACH_SAFE(bp, &bufv->bv_hd, b_bobufs, nbp) {
1228 		if (((flags & V_NORMAL) && (bp->b_xflags & BX_ALTDATA)) ||
1229 		    ((flags & V_ALT) && (bp->b_xflags & BX_ALTDATA) == 0)) {
1230 			continue;
1231 		}
1232 		lblkno = 0;
1233 		xflags = 0;
1234 		if (nbp != NULL) {
1235 			lblkno = nbp->b_lblkno;
1236 			xflags = nbp->b_xflags &
1237 				(BX_BKGRDMARKER | BX_VNDIRTY | BX_VNCLEAN);
1238 		}
1239 		retval = EAGAIN;
1240 		error = BUF_TIMELOCK(bp,
1241 		    LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK, BO_MTX(bo),
1242 		    "flushbuf", slpflag, slptimeo);
1243 		if (error) {
1244 			BO_LOCK(bo);
1245 			return (error != ENOLCK ? error : EAGAIN);
1246 		}
1247 		KASSERT(bp->b_bufobj == bo,
1248 		    ("bp %p wrong b_bufobj %p should be %p",
1249 		    bp, bp->b_bufobj, bo));
1250 		if (bp->b_bufobj != bo) {	/* XXX: necessary ? */
1251 			BUF_UNLOCK(bp);
1252 			BO_LOCK(bo);
1253 			return (EAGAIN);
1254 		}
1255 		/*
1256 		 * XXX Since there are no node locks for NFS, I
1257 		 * believe there is a slight chance that a delayed
1258 		 * write will occur while sleeping just above, so
1259 		 * check for it.
1260 		 */
1261 		if (((bp->b_flags & (B_DELWRI | B_INVAL)) == B_DELWRI) &&
1262 		    (flags & V_SAVE)) {
1263 			bremfree(bp);
1264 			bp->b_flags |= B_ASYNC;
1265 			bwrite(bp);
1266 			BO_LOCK(bo);
1267 			return (EAGAIN);	/* XXX: why not loop ? */
1268 		}
1269 		bremfree(bp);
1270 		bp->b_flags |= (B_INVAL | B_RELBUF);
1271 		bp->b_flags &= ~B_ASYNC;
1272 		brelse(bp);
1273 		BO_LOCK(bo);
1274 		if (nbp != NULL &&
1275 		    (nbp->b_bufobj != bo ||
1276 		     nbp->b_lblkno != lblkno ||
1277 		     (nbp->b_xflags &
1278 		      (BX_BKGRDMARKER | BX_VNDIRTY | BX_VNCLEAN)) != xflags))
1279 			break;			/* nbp invalid */
1280 	}
1281 	return (retval);
1282 }
1283 
1284 /*
1285  * Truncate a file's buffer and pages to a specified length.  This
1286  * is in lieu of the old vinvalbuf mechanism, which performed unneeded
1287  * sync activity.
1288  */
1289 int
1290 vtruncbuf(struct vnode *vp, struct ucred *cred, struct thread *td,
1291     off_t length, int blksize)
1292 {
1293 	struct buf *bp, *nbp;
1294 	int anyfreed;
1295 	int trunclbn;
1296 	struct bufobj *bo;
1297 
1298 	CTR5(KTR_VFS, "%s: vp %p with cred %p and block %d:%ju", __func__,
1299 	    vp, cred, blksize, (uintmax_t)length);
1300 
1301 	/*
1302 	 * Round up to the *next* lbn.
1303 	 */
1304 	trunclbn = (length + blksize - 1) / blksize;
1305 
1306 	ASSERT_VOP_LOCKED(vp, "vtruncbuf");
1307 restart:
1308 	bo = &vp->v_bufobj;
1309 	BO_LOCK(bo);
1310 	anyfreed = 1;
1311 	for (;anyfreed;) {
1312 		anyfreed = 0;
1313 		TAILQ_FOREACH_SAFE(bp, &bo->bo_clean.bv_hd, b_bobufs, nbp) {
1314 			if (bp->b_lblkno < trunclbn)
1315 				continue;
1316 			if (BUF_LOCK(bp,
1317 			    LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK,
1318 			    BO_MTX(bo)) == ENOLCK)
1319 				goto restart;
1320 
1321 			bremfree(bp);
1322 			bp->b_flags |= (B_INVAL | B_RELBUF);
1323 			bp->b_flags &= ~B_ASYNC;
1324 			brelse(bp);
1325 			anyfreed = 1;
1326 
1327 			if (nbp != NULL &&
1328 			    (((nbp->b_xflags & BX_VNCLEAN) == 0) ||
1329 			    (nbp->b_vp != vp) ||
1330 			    (nbp->b_flags & B_DELWRI))) {
1331 				goto restart;
1332 			}
1333 			BO_LOCK(bo);
1334 		}
1335 
1336 		TAILQ_FOREACH_SAFE(bp, &bo->bo_dirty.bv_hd, b_bobufs, nbp) {
1337 			if (bp->b_lblkno < trunclbn)
1338 				continue;
1339 			if (BUF_LOCK(bp,
1340 			    LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK,
1341 			    BO_MTX(bo)) == ENOLCK)
1342 				goto restart;
1343 			bremfree(bp);
1344 			bp->b_flags |= (B_INVAL | B_RELBUF);
1345 			bp->b_flags &= ~B_ASYNC;
1346 			brelse(bp);
1347 			anyfreed = 1;
1348 			if (nbp != NULL &&
1349 			    (((nbp->b_xflags & BX_VNDIRTY) == 0) ||
1350 			    (nbp->b_vp != vp) ||
1351 			    (nbp->b_flags & B_DELWRI) == 0)) {
1352 				goto restart;
1353 			}
1354 			BO_LOCK(bo);
1355 		}
1356 	}
1357 
1358 	if (length > 0) {
1359 restartsync:
1360 		TAILQ_FOREACH_SAFE(bp, &bo->bo_dirty.bv_hd, b_bobufs, nbp) {
1361 			if (bp->b_lblkno > 0)
1362 				continue;
1363 			/*
1364 			 * Since we hold the vnode lock this should only
1365 			 * fail if we're racing with the buf daemon.
1366 			 */
1367 			if (BUF_LOCK(bp,
1368 			    LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK,
1369 			    BO_MTX(bo)) == ENOLCK) {
1370 				goto restart;
1371 			}
1372 			VNASSERT((bp->b_flags & B_DELWRI), vp,
1373 			    ("buf(%p) on dirty queue without DELWRI", bp));
1374 
1375 			bremfree(bp);
1376 			bawrite(bp);
1377 			BO_LOCK(bo);
1378 			goto restartsync;
1379 		}
1380 	}
1381 
1382 	bufobj_wwait(bo, 0, 0);
1383 	BO_UNLOCK(bo);
1384 	vnode_pager_setsize(vp, length);
1385 
1386 	return (0);
1387 }
1388 
1389 /*
1390  * buf_splay() - splay tree core for the clean/dirty list of buffers in
1391  * 		 a vnode.
1392  *
1393  *	NOTE: We have to deal with the special case of a background bitmap
1394  *	buffer, a situation where two buffers will have the same logical
1395  *	block offset.  We want (1) only the foreground buffer to be accessed
1396  *	in a lookup and (2) must differentiate between the foreground and
1397  *	background buffer in the splay tree algorithm because the splay
1398  *	tree cannot normally handle multiple entities with the same 'index'.
1399  *	We accomplish this by adding differentiating flags to the splay tree's
1400  *	numerical domain.
1401  */
1402 static
1403 struct buf *
1404 buf_splay(daddr_t lblkno, b_xflags_t xflags, struct buf *root)
1405 {
1406 	struct buf dummy;
1407 	struct buf *lefttreemax, *righttreemin, *y;
1408 
1409 	if (root == NULL)
1410 		return (NULL);
1411 	lefttreemax = righttreemin = &dummy;
1412 	for (;;) {
1413 		if (lblkno < root->b_lblkno ||
1414 		    (lblkno == root->b_lblkno &&
1415 		    (xflags & BX_BKGRDMARKER) < (root->b_xflags & BX_BKGRDMARKER))) {
1416 			if ((y = root->b_left) == NULL)
1417 				break;
1418 			if (lblkno < y->b_lblkno) {
1419 				/* Rotate right. */
1420 				root->b_left = y->b_right;
1421 				y->b_right = root;
1422 				root = y;
1423 				if ((y = root->b_left) == NULL)
1424 					break;
1425 			}
1426 			/* Link into the new root's right tree. */
1427 			righttreemin->b_left = root;
1428 			righttreemin = root;
1429 		} else if (lblkno > root->b_lblkno ||
1430 		    (lblkno == root->b_lblkno &&
1431 		    (xflags & BX_BKGRDMARKER) > (root->b_xflags & BX_BKGRDMARKER))) {
1432 			if ((y = root->b_right) == NULL)
1433 				break;
1434 			if (lblkno > y->b_lblkno) {
1435 				/* Rotate left. */
1436 				root->b_right = y->b_left;
1437 				y->b_left = root;
1438 				root = y;
1439 				if ((y = root->b_right) == NULL)
1440 					break;
1441 			}
1442 			/* Link into the new root's left tree. */
1443 			lefttreemax->b_right = root;
1444 			lefttreemax = root;
1445 		} else {
1446 			break;
1447 		}
1448 		root = y;
1449 	}
1450 	/* Assemble the new root. */
1451 	lefttreemax->b_right = root->b_left;
1452 	righttreemin->b_left = root->b_right;
1453 	root->b_left = dummy.b_right;
1454 	root->b_right = dummy.b_left;
1455 	return (root);
1456 }
1457 
1458 static void
1459 buf_vlist_remove(struct buf *bp)
1460 {
1461 	struct buf *root;
1462 	struct bufv *bv;
1463 
1464 	KASSERT(bp->b_bufobj != NULL, ("No b_bufobj %p", bp));
1465 	ASSERT_BO_LOCKED(bp->b_bufobj);
1466 	KASSERT((bp->b_xflags & (BX_VNDIRTY|BX_VNCLEAN)) !=
1467 	    (BX_VNDIRTY|BX_VNCLEAN),
1468 	    ("buf_vlist_remove: Buf %p is on two lists", bp));
1469 	if (bp->b_xflags & BX_VNDIRTY)
1470 		bv = &bp->b_bufobj->bo_dirty;
1471 	else
1472 		bv = &bp->b_bufobj->bo_clean;
1473 	if (bp != bv->bv_root) {
1474 		root = buf_splay(bp->b_lblkno, bp->b_xflags, bv->bv_root);
1475 		KASSERT(root == bp, ("splay lookup failed in remove"));
1476 	}
1477 	if (bp->b_left == NULL) {
1478 		root = bp->b_right;
1479 	} else {
1480 		root = buf_splay(bp->b_lblkno, bp->b_xflags, bp->b_left);
1481 		root->b_right = bp->b_right;
1482 	}
1483 	bv->bv_root = root;
1484 	TAILQ_REMOVE(&bv->bv_hd, bp, b_bobufs);
1485 	bv->bv_cnt--;
1486 	bp->b_xflags &= ~(BX_VNDIRTY | BX_VNCLEAN);
1487 }
1488 
1489 /*
1490  * Add the buffer to the sorted clean or dirty block list using a
1491  * splay tree algorithm.
1492  *
1493  * NOTE: xflags is passed as a constant, optimizing this inline function!
1494  */
1495 static void
1496 buf_vlist_add(struct buf *bp, struct bufobj *bo, b_xflags_t xflags)
1497 {
1498 	struct buf *root;
1499 	struct bufv *bv;
1500 
1501 	ASSERT_BO_LOCKED(bo);
1502 	KASSERT((bp->b_xflags & (BX_VNDIRTY|BX_VNCLEAN)) == 0,
1503 	    ("buf_vlist_add: Buf %p has existing xflags %d", bp, bp->b_xflags));
1504 	bp->b_xflags |= xflags;
1505 	if (xflags & BX_VNDIRTY)
1506 		bv = &bo->bo_dirty;
1507 	else
1508 		bv = &bo->bo_clean;
1509 
1510 	root = buf_splay(bp->b_lblkno, bp->b_xflags, bv->bv_root);
1511 	if (root == NULL) {
1512 		bp->b_left = NULL;
1513 		bp->b_right = NULL;
1514 		TAILQ_INSERT_TAIL(&bv->bv_hd, bp, b_bobufs);
1515 	} else if (bp->b_lblkno < root->b_lblkno ||
1516 	    (bp->b_lblkno == root->b_lblkno &&
1517 	    (bp->b_xflags & BX_BKGRDMARKER) < (root->b_xflags & BX_BKGRDMARKER))) {
1518 		bp->b_left = root->b_left;
1519 		bp->b_right = root;
1520 		root->b_left = NULL;
1521 		TAILQ_INSERT_BEFORE(root, bp, b_bobufs);
1522 	} else {
1523 		bp->b_right = root->b_right;
1524 		bp->b_left = root;
1525 		root->b_right = NULL;
1526 		TAILQ_INSERT_AFTER(&bv->bv_hd, root, bp, b_bobufs);
1527 	}
1528 	bv->bv_cnt++;
1529 	bv->bv_root = bp;
1530 }
1531 
1532 /*
1533  * Lookup a buffer using the splay tree.  Note that we specifically avoid
1534  * shadow buffers used in background bitmap writes.
1535  *
1536  * This code isn't quite efficient as it could be because we are maintaining
1537  * two sorted lists and do not know which list the block resides in.
1538  *
1539  * During a "make buildworld" the desired buffer is found at one of
1540  * the roots more than 60% of the time.  Thus, checking both roots
1541  * before performing either splay eliminates unnecessary splays on the
1542  * first tree splayed.
1543  */
1544 struct buf *
1545 gbincore(struct bufobj *bo, daddr_t lblkno)
1546 {
1547 	struct buf *bp;
1548 
1549 	ASSERT_BO_LOCKED(bo);
1550 	if ((bp = bo->bo_clean.bv_root) != NULL &&
1551 	    bp->b_lblkno == lblkno && !(bp->b_xflags & BX_BKGRDMARKER))
1552 		return (bp);
1553 	if ((bp = bo->bo_dirty.bv_root) != NULL &&
1554 	    bp->b_lblkno == lblkno && !(bp->b_xflags & BX_BKGRDMARKER))
1555 		return (bp);
1556 	if ((bp = bo->bo_clean.bv_root) != NULL) {
1557 		bo->bo_clean.bv_root = bp = buf_splay(lblkno, 0, bp);
1558 		if (bp->b_lblkno == lblkno && !(bp->b_xflags & BX_BKGRDMARKER))
1559 			return (bp);
1560 	}
1561 	if ((bp = bo->bo_dirty.bv_root) != NULL) {
1562 		bo->bo_dirty.bv_root = bp = buf_splay(lblkno, 0, bp);
1563 		if (bp->b_lblkno == lblkno && !(bp->b_xflags & BX_BKGRDMARKER))
1564 			return (bp);
1565 	}
1566 	return (NULL);
1567 }
1568 
1569 /*
1570  * Associate a buffer with a vnode.
1571  */
1572 void
1573 bgetvp(struct vnode *vp, struct buf *bp)
1574 {
1575 	struct bufobj *bo;
1576 
1577 	bo = &vp->v_bufobj;
1578 	ASSERT_BO_LOCKED(bo);
1579 	VNASSERT(bp->b_vp == NULL, bp->b_vp, ("bgetvp: not free"));
1580 
1581 	CTR3(KTR_BUF, "bgetvp(%p) vp %p flags %X", bp, vp, bp->b_flags);
1582 	VNASSERT((bp->b_xflags & (BX_VNDIRTY|BX_VNCLEAN)) == 0, vp,
1583 	    ("bgetvp: bp already attached! %p", bp));
1584 
1585 	vhold(vp);
1586 	if (VFS_NEEDSGIANT(vp->v_mount) || bo->bo_flag & BO_NEEDSGIANT)
1587 		bp->b_flags |= B_NEEDSGIANT;
1588 	bp->b_vp = vp;
1589 	bp->b_bufobj = bo;
1590 	/*
1591 	 * Insert onto list for new vnode.
1592 	 */
1593 	buf_vlist_add(bp, bo, BX_VNCLEAN);
1594 }
1595 
1596 /*
1597  * Disassociate a buffer from a vnode.
1598  */
1599 void
1600 brelvp(struct buf *bp)
1601 {
1602 	struct bufobj *bo;
1603 	struct vnode *vp;
1604 
1605 	CTR3(KTR_BUF, "brelvp(%p) vp %p flags %X", bp, bp->b_vp, bp->b_flags);
1606 	KASSERT(bp->b_vp != NULL, ("brelvp: NULL"));
1607 
1608 	/*
1609 	 * Delete from old vnode list, if on one.
1610 	 */
1611 	vp = bp->b_vp;		/* XXX */
1612 	bo = bp->b_bufobj;
1613 	BO_LOCK(bo);
1614 	if (bp->b_xflags & (BX_VNDIRTY | BX_VNCLEAN))
1615 		buf_vlist_remove(bp);
1616 	else
1617 		panic("brelvp: Buffer %p not on queue.", bp);
1618 	if ((bo->bo_flag & BO_ONWORKLST) && bo->bo_dirty.bv_cnt == 0) {
1619 		bo->bo_flag &= ~BO_ONWORKLST;
1620 		mtx_lock(&sync_mtx);
1621 		LIST_REMOVE(bo, bo_synclist);
1622 		syncer_worklist_len--;
1623 		mtx_unlock(&sync_mtx);
1624 	}
1625 	bp->b_flags &= ~B_NEEDSGIANT;
1626 	bp->b_vp = NULL;
1627 	bp->b_bufobj = NULL;
1628 	BO_UNLOCK(bo);
1629 	vdrop(vp);
1630 }
1631 
1632 /*
1633  * Add an item to the syncer work queue.
1634  */
1635 static void
1636 vn_syncer_add_to_worklist(struct bufobj *bo, int delay)
1637 {
1638 	int queue, slot;
1639 
1640 	ASSERT_BO_LOCKED(bo);
1641 
1642 	mtx_lock(&sync_mtx);
1643 	if (bo->bo_flag & BO_ONWORKLST)
1644 		LIST_REMOVE(bo, bo_synclist);
1645 	else {
1646 		bo->bo_flag |= BO_ONWORKLST;
1647 		syncer_worklist_len++;
1648 	}
1649 
1650 	if (delay > syncer_maxdelay - 2)
1651 		delay = syncer_maxdelay - 2;
1652 	slot = (syncer_delayno + delay) & syncer_mask;
1653 
1654 	queue = VFS_NEEDSGIANT(bo->__bo_vnode->v_mount) ? WI_GIANTQ :
1655 	    WI_MPSAFEQ;
1656 	LIST_INSERT_HEAD(&syncer_workitem_pending[queue][slot], bo,
1657 	    bo_synclist);
1658 	mtx_unlock(&sync_mtx);
1659 }
1660 
1661 static int
1662 sysctl_vfs_worklist_len(SYSCTL_HANDLER_ARGS)
1663 {
1664 	int error, len;
1665 
1666 	mtx_lock(&sync_mtx);
1667 	len = syncer_worklist_len - sync_vnode_count;
1668 	mtx_unlock(&sync_mtx);
1669 	error = SYSCTL_OUT(req, &len, sizeof(len));
1670 	return (error);
1671 }
1672 
1673 SYSCTL_PROC(_vfs, OID_AUTO, worklist_len, CTLTYPE_INT | CTLFLAG_RD, NULL, 0,
1674     sysctl_vfs_worklist_len, "I", "Syncer thread worklist length");
1675 
1676 static struct proc *updateproc;
1677 static void sched_sync(void);
1678 static struct kproc_desc up_kp = {
1679 	"syncer",
1680 	sched_sync,
1681 	&updateproc
1682 };
1683 SYSINIT(syncer, SI_SUB_KTHREAD_UPDATE, SI_ORDER_FIRST, kproc_start, &up_kp);
1684 
1685 static int
1686 sync_vnode(struct synclist *slp, struct bufobj **bo, struct thread *td)
1687 {
1688 	struct vnode *vp;
1689 	struct mount *mp;
1690 
1691 	*bo = LIST_FIRST(slp);
1692 	if (*bo == NULL)
1693 		return (0);
1694 	vp = (*bo)->__bo_vnode;	/* XXX */
1695 	if (VOP_ISLOCKED(vp) != 0 || VI_TRYLOCK(vp) == 0)
1696 		return (1);
1697 	/*
1698 	 * We use vhold in case the vnode does not
1699 	 * successfully sync.  vhold prevents the vnode from
1700 	 * going away when we unlock the sync_mtx so that
1701 	 * we can acquire the vnode interlock.
1702 	 */
1703 	vholdl(vp);
1704 	mtx_unlock(&sync_mtx);
1705 	VI_UNLOCK(vp);
1706 	if (vn_start_write(vp, &mp, V_NOWAIT) != 0) {
1707 		vdrop(vp);
1708 		mtx_lock(&sync_mtx);
1709 		return (*bo == LIST_FIRST(slp));
1710 	}
1711 	vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
1712 	(void) VOP_FSYNC(vp, MNT_LAZY, td);
1713 	VOP_UNLOCK(vp, 0);
1714 	vn_finished_write(mp);
1715 	BO_LOCK(*bo);
1716 	if (((*bo)->bo_flag & BO_ONWORKLST) != 0) {
1717 		/*
1718 		 * Put us back on the worklist.  The worklist
1719 		 * routine will remove us from our current
1720 		 * position and then add us back in at a later
1721 		 * position.
1722 		 */
1723 		vn_syncer_add_to_worklist(*bo, syncdelay);
1724 	}
1725 	BO_UNLOCK(*bo);
1726 	vdrop(vp);
1727 	mtx_lock(&sync_mtx);
1728 	return (0);
1729 }
1730 
1731 /*
1732  * System filesystem synchronizer daemon.
1733  */
1734 static void
1735 sched_sync(void)
1736 {
1737 	struct synclist *gnext, *next;
1738 	struct synclist *gslp, *slp;
1739 	struct bufobj *bo;
1740 	long starttime;
1741 	struct thread *td = curthread;
1742 	int last_work_seen;
1743 	int net_worklist_len;
1744 	int syncer_final_iter;
1745 	int first_printf;
1746 	int error;
1747 
1748 	last_work_seen = 0;
1749 	syncer_final_iter = 0;
1750 	first_printf = 1;
1751 	syncer_state = SYNCER_RUNNING;
1752 	starttime = time_uptime;
1753 	td->td_pflags |= TDP_NORUNNINGBUF;
1754 
1755 	EVENTHANDLER_REGISTER(shutdown_pre_sync, syncer_shutdown, td->td_proc,
1756 	    SHUTDOWN_PRI_LAST);
1757 
1758 	mtx_lock(&sync_mtx);
1759 	for (;;) {
1760 		if (syncer_state == SYNCER_FINAL_DELAY &&
1761 		    syncer_final_iter == 0) {
1762 			mtx_unlock(&sync_mtx);
1763 			kproc_suspend_check(td->td_proc);
1764 			mtx_lock(&sync_mtx);
1765 		}
1766 		net_worklist_len = syncer_worklist_len - sync_vnode_count;
1767 		if (syncer_state != SYNCER_RUNNING &&
1768 		    starttime != time_uptime) {
1769 			if (first_printf) {
1770 				printf("\nSyncing disks, vnodes remaining...");
1771 				first_printf = 0;
1772 			}
1773 			printf("%d ", net_worklist_len);
1774 		}
1775 		starttime = time_uptime;
1776 
1777 		/*
1778 		 * Push files whose dirty time has expired.  Be careful
1779 		 * of interrupt race on slp queue.
1780 		 *
1781 		 * Skip over empty worklist slots when shutting down.
1782 		 */
1783 		do {
1784 			slp = &syncer_workitem_pending[WI_MPSAFEQ][syncer_delayno];
1785 			gslp = &syncer_workitem_pending[WI_GIANTQ][syncer_delayno];
1786 			syncer_delayno += 1;
1787 			if (syncer_delayno == syncer_maxdelay)
1788 				syncer_delayno = 0;
1789 			next = &syncer_workitem_pending[WI_MPSAFEQ][syncer_delayno];
1790 			gnext = &syncer_workitem_pending[WI_GIANTQ][syncer_delayno];
1791 			/*
1792 			 * If the worklist has wrapped since the
1793 			 * it was emptied of all but syncer vnodes,
1794 			 * switch to the FINAL_DELAY state and run
1795 			 * for one more second.
1796 			 */
1797 			if (syncer_state == SYNCER_SHUTTING_DOWN &&
1798 			    net_worklist_len == 0 &&
1799 			    last_work_seen == syncer_delayno) {
1800 				syncer_state = SYNCER_FINAL_DELAY;
1801 				syncer_final_iter = SYNCER_SHUTDOWN_SPEEDUP;
1802 			}
1803 		} while (syncer_state != SYNCER_RUNNING && LIST_EMPTY(slp) &&
1804 		    LIST_EMPTY(gslp) && syncer_worklist_len > 0);
1805 
1806 		/*
1807 		 * Keep track of the last time there was anything
1808 		 * on the worklist other than syncer vnodes.
1809 		 * Return to the SHUTTING_DOWN state if any
1810 		 * new work appears.
1811 		 */
1812 		if (net_worklist_len > 0 || syncer_state == SYNCER_RUNNING)
1813 			last_work_seen = syncer_delayno;
1814 		if (net_worklist_len > 0 && syncer_state == SYNCER_FINAL_DELAY)
1815 			syncer_state = SYNCER_SHUTTING_DOWN;
1816 		while (!LIST_EMPTY(slp)) {
1817 			error = sync_vnode(slp, &bo, td);
1818 			if (error == 1) {
1819 				LIST_REMOVE(bo, bo_synclist);
1820 				LIST_INSERT_HEAD(next, bo, bo_synclist);
1821 				continue;
1822 			}
1823 		}
1824 		if (!LIST_EMPTY(gslp)) {
1825 			mtx_unlock(&sync_mtx);
1826 			mtx_lock(&Giant);
1827 			mtx_lock(&sync_mtx);
1828 			while (!LIST_EMPTY(gslp)) {
1829 				error = sync_vnode(gslp, &bo, td);
1830 				if (error == 1) {
1831 					LIST_REMOVE(bo, bo_synclist);
1832 					LIST_INSERT_HEAD(gnext, bo,
1833 					    bo_synclist);
1834 					continue;
1835 				}
1836 			}
1837 			mtx_unlock(&Giant);
1838 		}
1839 		if (syncer_state == SYNCER_FINAL_DELAY && syncer_final_iter > 0)
1840 			syncer_final_iter--;
1841 		/*
1842 		 * The variable rushjob allows the kernel to speed up the
1843 		 * processing of the filesystem syncer process. A rushjob
1844 		 * value of N tells the filesystem syncer to process the next
1845 		 * N seconds worth of work on its queue ASAP. Currently rushjob
1846 		 * is used by the soft update code to speed up the filesystem
1847 		 * syncer process when the incore state is getting so far
1848 		 * ahead of the disk that the kernel memory pool is being
1849 		 * threatened with exhaustion.
1850 		 */
1851 		if (rushjob > 0) {
1852 			rushjob -= 1;
1853 			continue;
1854 		}
1855 		/*
1856 		 * Just sleep for a short period of time between
1857 		 * iterations when shutting down to allow some I/O
1858 		 * to happen.
1859 		 *
1860 		 * If it has taken us less than a second to process the
1861 		 * current work, then wait. Otherwise start right over
1862 		 * again. We can still lose time if any single round
1863 		 * takes more than two seconds, but it does not really
1864 		 * matter as we are just trying to generally pace the
1865 		 * filesystem activity.
1866 		 */
1867 		if (syncer_state != SYNCER_RUNNING)
1868 			cv_timedwait(&sync_wakeup, &sync_mtx,
1869 			    hz / SYNCER_SHUTDOWN_SPEEDUP);
1870 		else if (time_uptime == starttime)
1871 			cv_timedwait(&sync_wakeup, &sync_mtx, hz);
1872 	}
1873 }
1874 
1875 /*
1876  * Request the syncer daemon to speed up its work.
1877  * We never push it to speed up more than half of its
1878  * normal turn time, otherwise it could take over the cpu.
1879  */
1880 int
1881 speedup_syncer(void)
1882 {
1883 	int ret = 0;
1884 
1885 	mtx_lock(&sync_mtx);
1886 	if (rushjob < syncdelay / 2) {
1887 		rushjob += 1;
1888 		stat_rush_requests += 1;
1889 		ret = 1;
1890 	}
1891 	mtx_unlock(&sync_mtx);
1892 	cv_broadcast(&sync_wakeup);
1893 	return (ret);
1894 }
1895 
1896 /*
1897  * Tell the syncer to speed up its work and run though its work
1898  * list several times, then tell it to shut down.
1899  */
1900 static void
1901 syncer_shutdown(void *arg, int howto)
1902 {
1903 
1904 	if (howto & RB_NOSYNC)
1905 		return;
1906 	mtx_lock(&sync_mtx);
1907 	syncer_state = SYNCER_SHUTTING_DOWN;
1908 	rushjob = 0;
1909 	mtx_unlock(&sync_mtx);
1910 	cv_broadcast(&sync_wakeup);
1911 	kproc_shutdown(arg, howto);
1912 }
1913 
1914 /*
1915  * Reassign a buffer from one vnode to another.
1916  * Used to assign file specific control information
1917  * (indirect blocks) to the vnode to which they belong.
1918  */
1919 void
1920 reassignbuf(struct buf *bp)
1921 {
1922 	struct vnode *vp;
1923 	struct bufobj *bo;
1924 	int delay;
1925 #ifdef INVARIANTS
1926 	struct bufv *bv;
1927 #endif
1928 
1929 	vp = bp->b_vp;
1930 	bo = bp->b_bufobj;
1931 	++reassignbufcalls;
1932 
1933 	CTR3(KTR_BUF, "reassignbuf(%p) vp %p flags %X",
1934 	    bp, bp->b_vp, bp->b_flags);
1935 	/*
1936 	 * B_PAGING flagged buffers cannot be reassigned because their vp
1937 	 * is not fully linked in.
1938 	 */
1939 	if (bp->b_flags & B_PAGING)
1940 		panic("cannot reassign paging buffer");
1941 
1942 	/*
1943 	 * Delete from old vnode list, if on one.
1944 	 */
1945 	BO_LOCK(bo);
1946 	if (bp->b_xflags & (BX_VNDIRTY | BX_VNCLEAN))
1947 		buf_vlist_remove(bp);
1948 	else
1949 		panic("reassignbuf: Buffer %p not on queue.", bp);
1950 	/*
1951 	 * If dirty, put on list of dirty buffers; otherwise insert onto list
1952 	 * of clean buffers.
1953 	 */
1954 	if (bp->b_flags & B_DELWRI) {
1955 		if ((bo->bo_flag & BO_ONWORKLST) == 0) {
1956 			switch (vp->v_type) {
1957 			case VDIR:
1958 				delay = dirdelay;
1959 				break;
1960 			case VCHR:
1961 				delay = metadelay;
1962 				break;
1963 			default:
1964 				delay = filedelay;
1965 			}
1966 			vn_syncer_add_to_worklist(bo, delay);
1967 		}
1968 		buf_vlist_add(bp, bo, BX_VNDIRTY);
1969 	} else {
1970 		buf_vlist_add(bp, bo, BX_VNCLEAN);
1971 
1972 		if ((bo->bo_flag & BO_ONWORKLST) && bo->bo_dirty.bv_cnt == 0) {
1973 			mtx_lock(&sync_mtx);
1974 			LIST_REMOVE(bo, bo_synclist);
1975 			syncer_worklist_len--;
1976 			mtx_unlock(&sync_mtx);
1977 			bo->bo_flag &= ~BO_ONWORKLST;
1978 		}
1979 	}
1980 #ifdef INVARIANTS
1981 	bv = &bo->bo_clean;
1982 	bp = TAILQ_FIRST(&bv->bv_hd);
1983 	KASSERT(bp == NULL || bp->b_bufobj == bo,
1984 	    ("bp %p wrong b_bufobj %p should be %p", bp, bp->b_bufobj, bo));
1985 	bp = TAILQ_LAST(&bv->bv_hd, buflists);
1986 	KASSERT(bp == NULL || bp->b_bufobj == bo,
1987 	    ("bp %p wrong b_bufobj %p should be %p", bp, bp->b_bufobj, bo));
1988 	bv = &bo->bo_dirty;
1989 	bp = TAILQ_FIRST(&bv->bv_hd);
1990 	KASSERT(bp == NULL || bp->b_bufobj == bo,
1991 	    ("bp %p wrong b_bufobj %p should be %p", bp, bp->b_bufobj, bo));
1992 	bp = TAILQ_LAST(&bv->bv_hd, buflists);
1993 	KASSERT(bp == NULL || bp->b_bufobj == bo,
1994 	    ("bp %p wrong b_bufobj %p should be %p", bp, bp->b_bufobj, bo));
1995 #endif
1996 	BO_UNLOCK(bo);
1997 }
1998 
1999 /*
2000  * Increment the use and hold counts on the vnode, taking care to reference
2001  * the driver's usecount if this is a chardev.  The vholdl() will remove
2002  * the vnode from the free list if it is presently free.  Requires the
2003  * vnode interlock and returns with it held.
2004  */
2005 static void
2006 v_incr_usecount(struct vnode *vp)
2007 {
2008 
2009 	CTR2(KTR_VFS, "%s: vp %p", __func__, vp);
2010 	vp->v_usecount++;
2011 	if (vp->v_type == VCHR && vp->v_rdev != NULL) {
2012 		dev_lock();
2013 		vp->v_rdev->si_usecount++;
2014 		dev_unlock();
2015 	}
2016 	vholdl(vp);
2017 }
2018 
2019 /*
2020  * Turn a holdcnt into a use+holdcnt such that only one call to
2021  * v_decr_usecount is needed.
2022  */
2023 static void
2024 v_upgrade_usecount(struct vnode *vp)
2025 {
2026 
2027 	CTR2(KTR_VFS, "%s: vp %p", __func__, vp);
2028 	vp->v_usecount++;
2029 	if (vp->v_type == VCHR && vp->v_rdev != NULL) {
2030 		dev_lock();
2031 		vp->v_rdev->si_usecount++;
2032 		dev_unlock();
2033 	}
2034 }
2035 
2036 /*
2037  * Decrement the vnode use and hold count along with the driver's usecount
2038  * if this is a chardev.  The vdropl() below releases the vnode interlock
2039  * as it may free the vnode.
2040  */
2041 static void
2042 v_decr_usecount(struct vnode *vp)
2043 {
2044 
2045 	ASSERT_VI_LOCKED(vp, __FUNCTION__);
2046 	VNASSERT(vp->v_usecount > 0, vp,
2047 	    ("v_decr_usecount: negative usecount"));
2048 	CTR2(KTR_VFS, "%s: vp %p", __func__, vp);
2049 	vp->v_usecount--;
2050 	if (vp->v_type == VCHR && vp->v_rdev != NULL) {
2051 		dev_lock();
2052 		vp->v_rdev->si_usecount--;
2053 		dev_unlock();
2054 	}
2055 	vdropl(vp);
2056 }
2057 
2058 /*
2059  * Decrement only the use count and driver use count.  This is intended to
2060  * be paired with a follow on vdropl() to release the remaining hold count.
2061  * In this way we may vgone() a vnode with a 0 usecount without risk of
2062  * having it end up on a free list because the hold count is kept above 0.
2063  */
2064 static void
2065 v_decr_useonly(struct vnode *vp)
2066 {
2067 
2068 	ASSERT_VI_LOCKED(vp, __FUNCTION__);
2069 	VNASSERT(vp->v_usecount > 0, vp,
2070 	    ("v_decr_useonly: negative usecount"));
2071 	CTR2(KTR_VFS, "%s: vp %p", __func__, vp);
2072 	vp->v_usecount--;
2073 	if (vp->v_type == VCHR && vp->v_rdev != NULL) {
2074 		dev_lock();
2075 		vp->v_rdev->si_usecount--;
2076 		dev_unlock();
2077 	}
2078 }
2079 
2080 /*
2081  * Grab a particular vnode from the free list, increment its
2082  * reference count and lock it.  VI_DOOMED is set if the vnode
2083  * is being destroyed.  Only callers who specify LK_RETRY will
2084  * see doomed vnodes.  If inactive processing was delayed in
2085  * vput try to do it here.
2086  */
2087 int
2088 vget(struct vnode *vp, int flags, struct thread *td)
2089 {
2090 	int error;
2091 
2092 	error = 0;
2093 	VFS_ASSERT_GIANT(vp->v_mount);
2094 	VNASSERT((flags & LK_TYPE_MASK) != 0, vp,
2095 	    ("vget: invalid lock operation"));
2096 	CTR3(KTR_VFS, "%s: vp %p with flags %d", __func__, vp, flags);
2097 
2098 	if ((flags & LK_INTERLOCK) == 0)
2099 		VI_LOCK(vp);
2100 	vholdl(vp);
2101 	if ((error = vn_lock(vp, flags | LK_INTERLOCK)) != 0) {
2102 		vdrop(vp);
2103 		CTR2(KTR_VFS, "%s: impossible to lock vnode %p", __func__,
2104 		    vp);
2105 		return (error);
2106 	}
2107 	if (vp->v_iflag & VI_DOOMED && (flags & LK_RETRY) == 0)
2108 		panic("vget: vn_lock failed to return ENOENT\n");
2109 	VI_LOCK(vp);
2110 	/* Upgrade our holdcnt to a usecount. */
2111 	v_upgrade_usecount(vp);
2112 	/*
2113  	 * We don't guarantee that any particular close will
2114 	 * trigger inactive processing so just make a best effort
2115 	 * here at preventing a reference to a removed file.  If
2116 	 * we don't succeed no harm is done.
2117 	 */
2118 	if (vp->v_iflag & VI_OWEINACT) {
2119 		if (VOP_ISLOCKED(vp) == LK_EXCLUSIVE &&
2120 		    (flags & LK_NOWAIT) == 0)
2121 			vinactive(vp, td);
2122 		vp->v_iflag &= ~VI_OWEINACT;
2123 	}
2124 	VI_UNLOCK(vp);
2125 	return (0);
2126 }
2127 
2128 /*
2129  * Increase the reference count of a vnode.
2130  */
2131 void
2132 vref(struct vnode *vp)
2133 {
2134 
2135 	CTR2(KTR_VFS, "%s: vp %p", __func__, vp);
2136 	VI_LOCK(vp);
2137 	v_incr_usecount(vp);
2138 	VI_UNLOCK(vp);
2139 }
2140 
2141 /*
2142  * Return reference count of a vnode.
2143  *
2144  * The results of this call are only guaranteed when some mechanism other
2145  * than the VI lock is used to stop other processes from gaining references
2146  * to the vnode.  This may be the case if the caller holds the only reference.
2147  * This is also useful when stale data is acceptable as race conditions may
2148  * be accounted for by some other means.
2149  */
2150 int
2151 vrefcnt(struct vnode *vp)
2152 {
2153 	int usecnt;
2154 
2155 	VI_LOCK(vp);
2156 	usecnt = vp->v_usecount;
2157 	VI_UNLOCK(vp);
2158 
2159 	return (usecnt);
2160 }
2161 
2162 
2163 /*
2164  * Vnode put/release.
2165  * If count drops to zero, call inactive routine and return to freelist.
2166  */
2167 void
2168 vrele(struct vnode *vp)
2169 {
2170 	struct thread *td = curthread;	/* XXX */
2171 
2172 	KASSERT(vp != NULL, ("vrele: null vp"));
2173 	VFS_ASSERT_GIANT(vp->v_mount);
2174 
2175 	VI_LOCK(vp);
2176 
2177 	/* Skip this v_writecount check if we're going to panic below. */
2178 	VNASSERT(vp->v_writecount < vp->v_usecount || vp->v_usecount < 1, vp,
2179 	    ("vrele: missed vn_close"));
2180 	CTR2(KTR_VFS, "%s: vp %p", __func__, vp);
2181 
2182 	if (vp->v_usecount > 1 || ((vp->v_iflag & VI_DOINGINACT) &&
2183 	    vp->v_usecount == 1)) {
2184 		v_decr_usecount(vp);
2185 		return;
2186 	}
2187 	if (vp->v_usecount != 1) {
2188 #ifdef DIAGNOSTIC
2189 		vprint("vrele: negative ref count", vp);
2190 #endif
2191 		VI_UNLOCK(vp);
2192 		panic("vrele: negative ref cnt");
2193 	}
2194 	CTR2(KTR_VFS, "%s: return vnode %p to the freelist", __func__, vp);
2195 	/*
2196 	 * We want to hold the vnode until the inactive finishes to
2197 	 * prevent vgone() races.  We drop the use count here and the
2198 	 * hold count below when we're done.
2199 	 */
2200 	v_decr_useonly(vp);
2201 	/*
2202 	 * We must call VOP_INACTIVE with the node locked. Mark
2203 	 * as VI_DOINGINACT to avoid recursion.
2204 	 */
2205 	vp->v_iflag |= VI_OWEINACT;
2206 	if (vn_lock(vp, LK_EXCLUSIVE | LK_INTERLOCK) == 0) {
2207 		VI_LOCK(vp);
2208 		if (vp->v_usecount > 0)
2209 			vp->v_iflag &= ~VI_OWEINACT;
2210 		if (vp->v_iflag & VI_OWEINACT)
2211 			vinactive(vp, td);
2212 		VOP_UNLOCK(vp, 0);
2213 	} else {
2214 		VI_LOCK(vp);
2215 		if (vp->v_usecount > 0)
2216 			vp->v_iflag &= ~VI_OWEINACT;
2217 	}
2218 	vdropl(vp);
2219 }
2220 
2221 /*
2222  * Release an already locked vnode.  This give the same effects as
2223  * unlock+vrele(), but takes less time and avoids releasing and
2224  * re-aquiring the lock (as vrele() acquires the lock internally.)
2225  */
2226 void
2227 vput(struct vnode *vp)
2228 {
2229 	struct thread *td = curthread;	/* XXX */
2230 	int error;
2231 
2232 	KASSERT(vp != NULL, ("vput: null vp"));
2233 	ASSERT_VOP_LOCKED(vp, "vput");
2234 	VFS_ASSERT_GIANT(vp->v_mount);
2235 	CTR2(KTR_VFS, "%s: vp %p", __func__, vp);
2236 	VI_LOCK(vp);
2237 	/* Skip this v_writecount check if we're going to panic below. */
2238 	VNASSERT(vp->v_writecount < vp->v_usecount || vp->v_usecount < 1, vp,
2239 	    ("vput: missed vn_close"));
2240 	error = 0;
2241 
2242 	if (vp->v_usecount > 1 || ((vp->v_iflag & VI_DOINGINACT) &&
2243 	    vp->v_usecount == 1)) {
2244 		VOP_UNLOCK(vp, 0);
2245 		v_decr_usecount(vp);
2246 		return;
2247 	}
2248 
2249 	if (vp->v_usecount != 1) {
2250 #ifdef DIAGNOSTIC
2251 		vprint("vput: negative ref count", vp);
2252 #endif
2253 		panic("vput: negative ref cnt");
2254 	}
2255 	CTR2(KTR_VFS, "%s: return to freelist the vnode %p", __func__, vp);
2256 	/*
2257 	 * We want to hold the vnode until the inactive finishes to
2258 	 * prevent vgone() races.  We drop the use count here and the
2259 	 * hold count below when we're done.
2260 	 */
2261 	v_decr_useonly(vp);
2262 	vp->v_iflag |= VI_OWEINACT;
2263 	if (VOP_ISLOCKED(vp) != LK_EXCLUSIVE) {
2264 		error = VOP_LOCK(vp, LK_UPGRADE|LK_INTERLOCK|LK_NOWAIT);
2265 		VI_LOCK(vp);
2266 		if (error) {
2267 			if (vp->v_usecount > 0)
2268 				vp->v_iflag &= ~VI_OWEINACT;
2269 			goto done;
2270 		}
2271 	}
2272 	if (vp->v_usecount > 0)
2273 		vp->v_iflag &= ~VI_OWEINACT;
2274 	if (vp->v_iflag & VI_OWEINACT)
2275 		vinactive(vp, td);
2276 	VOP_UNLOCK(vp, 0);
2277 done:
2278 	vdropl(vp);
2279 }
2280 
2281 /*
2282  * Somebody doesn't want the vnode recycled.
2283  */
2284 void
2285 vhold(struct vnode *vp)
2286 {
2287 
2288 	VI_LOCK(vp);
2289 	vholdl(vp);
2290 	VI_UNLOCK(vp);
2291 }
2292 
2293 void
2294 vholdl(struct vnode *vp)
2295 {
2296 
2297 	CTR2(KTR_VFS, "%s: vp %p", __func__, vp);
2298 	vp->v_holdcnt++;
2299 	if (VSHOULDBUSY(vp))
2300 		vbusy(vp);
2301 }
2302 
2303 /*
2304  * Note that there is one less who cares about this vnode.  vdrop() is the
2305  * opposite of vhold().
2306  */
2307 void
2308 vdrop(struct vnode *vp)
2309 {
2310 
2311 	VI_LOCK(vp);
2312 	vdropl(vp);
2313 }
2314 
2315 /*
2316  * Drop the hold count of the vnode.  If this is the last reference to
2317  * the vnode we will free it if it has been vgone'd otherwise it is
2318  * placed on the free list.
2319  */
2320 void
2321 vdropl(struct vnode *vp)
2322 {
2323 
2324 	ASSERT_VI_LOCKED(vp, "vdropl");
2325 	CTR2(KTR_VFS, "%s: vp %p", __func__, vp);
2326 	if (vp->v_holdcnt <= 0)
2327 		panic("vdrop: holdcnt %d", vp->v_holdcnt);
2328 	vp->v_holdcnt--;
2329 	if (vp->v_holdcnt == 0) {
2330 		if (vp->v_iflag & VI_DOOMED) {
2331 			CTR2(KTR_VFS, "%s: destroying the vnode %p", __func__,
2332 			    vp);
2333 			vdestroy(vp);
2334 			return;
2335 		} else
2336 			vfree(vp);
2337 	}
2338 	VI_UNLOCK(vp);
2339 }
2340 
2341 /*
2342  * Call VOP_INACTIVE on the vnode and manage the DOINGINACT and OWEINACT
2343  * flags.  DOINGINACT prevents us from recursing in calls to vinactive.
2344  * OWEINACT tracks whether a vnode missed a call to inactive due to a
2345  * failed lock upgrade.
2346  */
2347 static void
2348 vinactive(struct vnode *vp, struct thread *td)
2349 {
2350 
2351 	ASSERT_VOP_ELOCKED(vp, "vinactive");
2352 	ASSERT_VI_LOCKED(vp, "vinactive");
2353 	VNASSERT((vp->v_iflag & VI_DOINGINACT) == 0, vp,
2354 	    ("vinactive: recursed on VI_DOINGINACT"));
2355 	CTR2(KTR_VFS, "%s: vp %p", __func__, vp);
2356 	vp->v_iflag |= VI_DOINGINACT;
2357 	vp->v_iflag &= ~VI_OWEINACT;
2358 	VI_UNLOCK(vp);
2359 	VOP_INACTIVE(vp, td);
2360 	VI_LOCK(vp);
2361 	VNASSERT(vp->v_iflag & VI_DOINGINACT, vp,
2362 	    ("vinactive: lost VI_DOINGINACT"));
2363 	vp->v_iflag &= ~VI_DOINGINACT;
2364 }
2365 
2366 /*
2367  * Remove any vnodes in the vnode table belonging to mount point mp.
2368  *
2369  * If FORCECLOSE is not specified, there should not be any active ones,
2370  * return error if any are found (nb: this is a user error, not a
2371  * system error). If FORCECLOSE is specified, detach any active vnodes
2372  * that are found.
2373  *
2374  * If WRITECLOSE is set, only flush out regular file vnodes open for
2375  * writing.
2376  *
2377  * SKIPSYSTEM causes any vnodes marked VV_SYSTEM to be skipped.
2378  *
2379  * `rootrefs' specifies the base reference count for the root vnode
2380  * of this filesystem. The root vnode is considered busy if its
2381  * v_usecount exceeds this value. On a successful return, vflush(, td)
2382  * will call vrele() on the root vnode exactly rootrefs times.
2383  * If the SKIPSYSTEM or WRITECLOSE flags are specified, rootrefs must
2384  * be zero.
2385  */
2386 #ifdef DIAGNOSTIC
2387 static int busyprt = 0;		/* print out busy vnodes */
2388 SYSCTL_INT(_debug, OID_AUTO, busyprt, CTLFLAG_RW, &busyprt, 0, "");
2389 #endif
2390 
2391 int
2392 vflush( struct mount *mp, int rootrefs, int flags, struct thread *td)
2393 {
2394 	struct vnode *vp, *mvp, *rootvp = NULL;
2395 	struct vattr vattr;
2396 	int busy = 0, error;
2397 
2398 	CTR4(KTR_VFS, "%s: mp %p with rootrefs %d and flags %d", __func__, mp,
2399 	    rootrefs, flags);
2400 	if (rootrefs > 0) {
2401 		KASSERT((flags & (SKIPSYSTEM | WRITECLOSE)) == 0,
2402 		    ("vflush: bad args"));
2403 		/*
2404 		 * Get the filesystem root vnode. We can vput() it
2405 		 * immediately, since with rootrefs > 0, it won't go away.
2406 		 */
2407 		if ((error = VFS_ROOT(mp, LK_EXCLUSIVE, &rootvp, td)) != 0) {
2408 			CTR2(KTR_VFS, "%s: vfs_root lookup failed with %d",
2409 			    __func__, error);
2410 			return (error);
2411 		}
2412 		vput(rootvp);
2413 
2414 	}
2415 	MNT_ILOCK(mp);
2416 loop:
2417 	MNT_VNODE_FOREACH(vp, mp, mvp) {
2418 
2419 		VI_LOCK(vp);
2420 		vholdl(vp);
2421 		MNT_IUNLOCK(mp);
2422 		error = vn_lock(vp, LK_INTERLOCK | LK_EXCLUSIVE);
2423 		if (error) {
2424 			vdrop(vp);
2425 			MNT_ILOCK(mp);
2426 			MNT_VNODE_FOREACH_ABORT_ILOCKED(mp, mvp);
2427 			goto loop;
2428 		}
2429 		/*
2430 		 * Skip over a vnodes marked VV_SYSTEM.
2431 		 */
2432 		if ((flags & SKIPSYSTEM) && (vp->v_vflag & VV_SYSTEM)) {
2433 			VOP_UNLOCK(vp, 0);
2434 			vdrop(vp);
2435 			MNT_ILOCK(mp);
2436 			continue;
2437 		}
2438 		/*
2439 		 * If WRITECLOSE is set, flush out unlinked but still open
2440 		 * files (even if open only for reading) and regular file
2441 		 * vnodes open for writing.
2442 		 */
2443 		if (flags & WRITECLOSE) {
2444 			error = VOP_GETATTR(vp, &vattr, td->td_ucred);
2445 			VI_LOCK(vp);
2446 
2447 			if ((vp->v_type == VNON ||
2448 			    (error == 0 && vattr.va_nlink > 0)) &&
2449 			    (vp->v_writecount == 0 || vp->v_type != VREG)) {
2450 				VOP_UNLOCK(vp, 0);
2451 				vdropl(vp);
2452 				MNT_ILOCK(mp);
2453 				continue;
2454 			}
2455 		} else
2456 			VI_LOCK(vp);
2457 		/*
2458 		 * With v_usecount == 0, all we need to do is clear out the
2459 		 * vnode data structures and we are done.
2460 		 *
2461 		 * If FORCECLOSE is set, forcibly close the vnode.
2462 		 */
2463 		if (vp->v_usecount == 0 || (flags & FORCECLOSE)) {
2464 			VNASSERT(vp->v_usecount == 0 ||
2465 			    (vp->v_type != VCHR && vp->v_type != VBLK), vp,
2466 			    ("device VNODE %p is FORCECLOSED", vp));
2467 			vgonel(vp);
2468 		} else {
2469 			busy++;
2470 #ifdef DIAGNOSTIC
2471 			if (busyprt)
2472 				vprint("vflush: busy vnode", vp);
2473 #endif
2474 		}
2475 		VOP_UNLOCK(vp, 0);
2476 		vdropl(vp);
2477 		MNT_ILOCK(mp);
2478 	}
2479 	MNT_IUNLOCK(mp);
2480 	if (rootrefs > 0 && (flags & FORCECLOSE) == 0) {
2481 		/*
2482 		 * If just the root vnode is busy, and if its refcount
2483 		 * is equal to `rootrefs', then go ahead and kill it.
2484 		 */
2485 		VI_LOCK(rootvp);
2486 		KASSERT(busy > 0, ("vflush: not busy"));
2487 		VNASSERT(rootvp->v_usecount >= rootrefs, rootvp,
2488 		    ("vflush: usecount %d < rootrefs %d",
2489 		     rootvp->v_usecount, rootrefs));
2490 		if (busy == 1 && rootvp->v_usecount == rootrefs) {
2491 			VOP_LOCK(rootvp, LK_EXCLUSIVE|LK_INTERLOCK);
2492 			vgone(rootvp);
2493 			VOP_UNLOCK(rootvp, 0);
2494 			busy = 0;
2495 		} else
2496 			VI_UNLOCK(rootvp);
2497 	}
2498 	if (busy) {
2499 		CTR2(KTR_VFS, "%s: failing as %d vnodes are busy", __func__,
2500 		    busy);
2501 		return (EBUSY);
2502 	}
2503 	for (; rootrefs > 0; rootrefs--)
2504 		vrele(rootvp);
2505 	return (0);
2506 }
2507 
2508 /*
2509  * Recycle an unused vnode to the front of the free list.
2510  */
2511 int
2512 vrecycle(struct vnode *vp, struct thread *td)
2513 {
2514 	int recycled;
2515 
2516 	ASSERT_VOP_ELOCKED(vp, "vrecycle");
2517 	CTR2(KTR_VFS, "%s: vp %p", __func__, vp);
2518 	recycled = 0;
2519 	VI_LOCK(vp);
2520 	if (vp->v_usecount == 0) {
2521 		recycled = 1;
2522 		vgonel(vp);
2523 	}
2524 	VI_UNLOCK(vp);
2525 	return (recycled);
2526 }
2527 
2528 /*
2529  * Eliminate all activity associated with a vnode
2530  * in preparation for reuse.
2531  */
2532 void
2533 vgone(struct vnode *vp)
2534 {
2535 	VI_LOCK(vp);
2536 	vgonel(vp);
2537 	VI_UNLOCK(vp);
2538 }
2539 
2540 /*
2541  * vgone, with the vp interlock held.
2542  */
2543 void
2544 vgonel(struct vnode *vp)
2545 {
2546 	struct thread *td;
2547 	int oweinact;
2548 	int active;
2549 	struct mount *mp;
2550 
2551 	ASSERT_VOP_ELOCKED(vp, "vgonel");
2552 	ASSERT_VI_LOCKED(vp, "vgonel");
2553 	VNASSERT(vp->v_holdcnt, vp,
2554 	    ("vgonel: vp %p has no reference.", vp));
2555 	CTR2(KTR_VFS, "%s: vp %p", __func__, vp);
2556 	td = curthread;
2557 
2558 	/*
2559 	 * Don't vgonel if we're already doomed.
2560 	 */
2561 	if (vp->v_iflag & VI_DOOMED)
2562 		return;
2563 	vp->v_iflag |= VI_DOOMED;
2564 	/*
2565 	 * Check to see if the vnode is in use.  If so, we have to call
2566 	 * VOP_CLOSE() and VOP_INACTIVE().
2567 	 */
2568 	active = vp->v_usecount;
2569 	oweinact = (vp->v_iflag & VI_OWEINACT);
2570 	VI_UNLOCK(vp);
2571 	/*
2572 	 * Clean out any buffers associated with the vnode.
2573 	 * If the flush fails, just toss the buffers.
2574 	 */
2575 	mp = NULL;
2576 	if (!TAILQ_EMPTY(&vp->v_bufobj.bo_dirty.bv_hd))
2577 		(void) vn_start_secondary_write(vp, &mp, V_WAIT);
2578 	if (vinvalbuf(vp, V_SAVE, 0, 0) != 0)
2579 		vinvalbuf(vp, 0, 0, 0);
2580 
2581 	/*
2582 	 * If purging an active vnode, it must be closed and
2583 	 * deactivated before being reclaimed.
2584 	 */
2585 	if (active)
2586 		VOP_CLOSE(vp, FNONBLOCK, NOCRED, td);
2587 	if (oweinact || active) {
2588 		VI_LOCK(vp);
2589 		if ((vp->v_iflag & VI_DOINGINACT) == 0)
2590 			vinactive(vp, td);
2591 		VI_UNLOCK(vp);
2592 	}
2593 	/*
2594 	 * Reclaim the vnode.
2595 	 */
2596 	if (VOP_RECLAIM(vp, td))
2597 		panic("vgone: cannot reclaim");
2598 	if (mp != NULL)
2599 		vn_finished_secondary_write(mp);
2600 	VNASSERT(vp->v_object == NULL, vp,
2601 	    ("vop_reclaim left v_object vp=%p, tag=%s", vp, vp->v_tag));
2602 	/*
2603 	 * Clear the advisory locks and wake up waiting threads.
2604 	 */
2605 	lf_purgelocks(vp, &(vp->v_lockf));
2606 	/*
2607 	 * Delete from old mount point vnode list.
2608 	 */
2609 	delmntque(vp);
2610 	cache_purge(vp);
2611 	/*
2612 	 * Done with purge, reset to the standard lock and invalidate
2613 	 * the vnode.
2614 	 */
2615 	VI_LOCK(vp);
2616 	vp->v_vnlock = &vp->v_lock;
2617 	vp->v_op = &dead_vnodeops;
2618 	vp->v_tag = "none";
2619 	vp->v_type = VBAD;
2620 }
2621 
2622 /*
2623  * Calculate the total number of references to a special device.
2624  */
2625 int
2626 vcount(struct vnode *vp)
2627 {
2628 	int count;
2629 
2630 	dev_lock();
2631 	count = vp->v_rdev->si_usecount;
2632 	dev_unlock();
2633 	return (count);
2634 }
2635 
2636 /*
2637  * Same as above, but using the struct cdev *as argument
2638  */
2639 int
2640 count_dev(struct cdev *dev)
2641 {
2642 	int count;
2643 
2644 	dev_lock();
2645 	count = dev->si_usecount;
2646 	dev_unlock();
2647 	return(count);
2648 }
2649 
2650 /*
2651  * Print out a description of a vnode.
2652  */
2653 static char *typename[] =
2654 {"VNON", "VREG", "VDIR", "VBLK", "VCHR", "VLNK", "VSOCK", "VFIFO", "VBAD",
2655  "VMARKER"};
2656 
2657 void
2658 vn_printf(struct vnode *vp, const char *fmt, ...)
2659 {
2660 	va_list ap;
2661 	char buf[256], buf2[16];
2662 	u_long flags;
2663 
2664 	va_start(ap, fmt);
2665 	vprintf(fmt, ap);
2666 	va_end(ap);
2667 	printf("%p: ", (void *)vp);
2668 	printf("tag %s, type %s\n", vp->v_tag, typename[vp->v_type]);
2669 	printf("    usecount %d, writecount %d, refcount %d mountedhere %p\n",
2670 	    vp->v_usecount, vp->v_writecount, vp->v_holdcnt, vp->v_mountedhere);
2671 	buf[0] = '\0';
2672 	buf[1] = '\0';
2673 	if (vp->v_vflag & VV_ROOT)
2674 		strlcat(buf, "|VV_ROOT", sizeof(buf));
2675 	if (vp->v_vflag & VV_ISTTY)
2676 		strlcat(buf, "|VV_ISTTY", sizeof(buf));
2677 	if (vp->v_vflag & VV_NOSYNC)
2678 		strlcat(buf, "|VV_NOSYNC", sizeof(buf));
2679 	if (vp->v_vflag & VV_CACHEDLABEL)
2680 		strlcat(buf, "|VV_CACHEDLABEL", sizeof(buf));
2681 	if (vp->v_vflag & VV_TEXT)
2682 		strlcat(buf, "|VV_TEXT", sizeof(buf));
2683 	if (vp->v_vflag & VV_COPYONWRITE)
2684 		strlcat(buf, "|VV_COPYONWRITE", sizeof(buf));
2685 	if (vp->v_vflag & VV_SYSTEM)
2686 		strlcat(buf, "|VV_SYSTEM", sizeof(buf));
2687 	if (vp->v_vflag & VV_PROCDEP)
2688 		strlcat(buf, "|VV_PROCDEP", sizeof(buf));
2689 	if (vp->v_vflag & VV_NOKNOTE)
2690 		strlcat(buf, "|VV_NOKNOTE", sizeof(buf));
2691 	if (vp->v_vflag & VV_DELETED)
2692 		strlcat(buf, "|VV_DELETED", sizeof(buf));
2693 	if (vp->v_vflag & VV_MD)
2694 		strlcat(buf, "|VV_MD", sizeof(buf));
2695 	flags = vp->v_vflag & ~(VV_ROOT | VV_ISTTY | VV_NOSYNC |
2696 	    VV_CACHEDLABEL | VV_TEXT | VV_COPYONWRITE | VV_SYSTEM | VV_PROCDEP |
2697 	    VV_NOKNOTE | VV_DELETED | VV_MD);
2698 	if (flags != 0) {
2699 		snprintf(buf2, sizeof(buf2), "|VV(0x%lx)", flags);
2700 		strlcat(buf, buf2, sizeof(buf));
2701 	}
2702 	if (vp->v_iflag & VI_MOUNT)
2703 		strlcat(buf, "|VI_MOUNT", sizeof(buf));
2704 	if (vp->v_iflag & VI_AGE)
2705 		strlcat(buf, "|VI_AGE", sizeof(buf));
2706 	if (vp->v_iflag & VI_DOOMED)
2707 		strlcat(buf, "|VI_DOOMED", sizeof(buf));
2708 	if (vp->v_iflag & VI_FREE)
2709 		strlcat(buf, "|VI_FREE", sizeof(buf));
2710 	if (vp->v_iflag & VI_OBJDIRTY)
2711 		strlcat(buf, "|VI_OBJDIRTY", sizeof(buf));
2712 	if (vp->v_iflag & VI_DOINGINACT)
2713 		strlcat(buf, "|VI_DOINGINACT", sizeof(buf));
2714 	if (vp->v_iflag & VI_OWEINACT)
2715 		strlcat(buf, "|VI_OWEINACT", sizeof(buf));
2716 	flags = vp->v_iflag & ~(VI_MOUNT | VI_AGE | VI_DOOMED | VI_FREE |
2717 	    VI_OBJDIRTY | VI_DOINGINACT | VI_OWEINACT);
2718 	if (flags != 0) {
2719 		snprintf(buf2, sizeof(buf2), "|VI(0x%lx)", flags);
2720 		strlcat(buf, buf2, sizeof(buf));
2721 	}
2722 	printf("    flags (%s)\n", buf + 1);
2723 	if (mtx_owned(VI_MTX(vp)))
2724 		printf(" VI_LOCKed");
2725 	if (vp->v_object != NULL)
2726 		printf("    v_object %p ref %d pages %d\n",
2727 		    vp->v_object, vp->v_object->ref_count,
2728 		    vp->v_object->resident_page_count);
2729 	printf("    ");
2730 	lockmgr_printinfo(vp->v_vnlock);
2731 	if (vp->v_data != NULL)
2732 		VOP_PRINT(vp);
2733 }
2734 
2735 #ifdef DDB
2736 /*
2737  * List all of the locked vnodes in the system.
2738  * Called when debugging the kernel.
2739  */
2740 DB_SHOW_COMMAND(lockedvnods, lockedvnodes)
2741 {
2742 	struct mount *mp, *nmp;
2743 	struct vnode *vp;
2744 
2745 	/*
2746 	 * Note: because this is DDB, we can't obey the locking semantics
2747 	 * for these structures, which means we could catch an inconsistent
2748 	 * state and dereference a nasty pointer.  Not much to be done
2749 	 * about that.
2750 	 */
2751 	db_printf("Locked vnodes\n");
2752 	for (mp = TAILQ_FIRST(&mountlist); mp != NULL; mp = nmp) {
2753 		nmp = TAILQ_NEXT(mp, mnt_list);
2754 		TAILQ_FOREACH(vp, &mp->mnt_nvnodelist, v_nmntvnodes) {
2755 			if (vp->v_type != VMARKER &&
2756 			    VOP_ISLOCKED(vp))
2757 				vprint("", vp);
2758 		}
2759 		nmp = TAILQ_NEXT(mp, mnt_list);
2760 	}
2761 }
2762 
2763 /*
2764  * Show details about the given vnode.
2765  */
2766 DB_SHOW_COMMAND(vnode, db_show_vnode)
2767 {
2768 	struct vnode *vp;
2769 
2770 	if (!have_addr)
2771 		return;
2772 	vp = (struct vnode *)addr;
2773 	vn_printf(vp, "vnode ");
2774 }
2775 
2776 /*
2777  * Show details about the given mount point.
2778  */
2779 DB_SHOW_COMMAND(mount, db_show_mount)
2780 {
2781 	struct mount *mp;
2782 	struct statfs *sp;
2783 	struct vnode *vp;
2784 	char buf[512];
2785 	u_int flags;
2786 
2787 	if (!have_addr) {
2788 		/* No address given, print short info about all mount points. */
2789 		TAILQ_FOREACH(mp, &mountlist, mnt_list) {
2790 			db_printf("%p %s on %s (%s)\n", mp,
2791 			    mp->mnt_stat.f_mntfromname,
2792 			    mp->mnt_stat.f_mntonname,
2793 			    mp->mnt_stat.f_fstypename);
2794 			if (db_pager_quit)
2795 				break;
2796 		}
2797 		db_printf("\nMore info: show mount <addr>\n");
2798 		return;
2799 	}
2800 
2801 	mp = (struct mount *)addr;
2802 	db_printf("%p %s on %s (%s)\n", mp, mp->mnt_stat.f_mntfromname,
2803 	    mp->mnt_stat.f_mntonname, mp->mnt_stat.f_fstypename);
2804 
2805 	buf[0] = '\0';
2806 	flags = mp->mnt_flag;
2807 #define	MNT_FLAG(flag)	do {						\
2808 	if (flags & (flag)) {						\
2809 		if (buf[0] != '\0')					\
2810 			strlcat(buf, ", ", sizeof(buf));		\
2811 		strlcat(buf, (#flag) + 4, sizeof(buf));			\
2812 		flags &= ~(flag);					\
2813 	}								\
2814 } while (0)
2815 	MNT_FLAG(MNT_RDONLY);
2816 	MNT_FLAG(MNT_SYNCHRONOUS);
2817 	MNT_FLAG(MNT_NOEXEC);
2818 	MNT_FLAG(MNT_NOSUID);
2819 	MNT_FLAG(MNT_UNION);
2820 	MNT_FLAG(MNT_ASYNC);
2821 	MNT_FLAG(MNT_SUIDDIR);
2822 	MNT_FLAG(MNT_SOFTDEP);
2823 	MNT_FLAG(MNT_NOSYMFOLLOW);
2824 	MNT_FLAG(MNT_GJOURNAL);
2825 	MNT_FLAG(MNT_MULTILABEL);
2826 	MNT_FLAG(MNT_ACLS);
2827 	MNT_FLAG(MNT_NOATIME);
2828 	MNT_FLAG(MNT_NOCLUSTERR);
2829 	MNT_FLAG(MNT_NOCLUSTERW);
2830 	MNT_FLAG(MNT_EXRDONLY);
2831 	MNT_FLAG(MNT_EXPORTED);
2832 	MNT_FLAG(MNT_DEFEXPORTED);
2833 	MNT_FLAG(MNT_EXPORTANON);
2834 	MNT_FLAG(MNT_EXKERB);
2835 	MNT_FLAG(MNT_EXPUBLIC);
2836 	MNT_FLAG(MNT_LOCAL);
2837 	MNT_FLAG(MNT_QUOTA);
2838 	MNT_FLAG(MNT_ROOTFS);
2839 	MNT_FLAG(MNT_USER);
2840 	MNT_FLAG(MNT_IGNORE);
2841 	MNT_FLAG(MNT_UPDATE);
2842 	MNT_FLAG(MNT_DELEXPORT);
2843 	MNT_FLAG(MNT_RELOAD);
2844 	MNT_FLAG(MNT_FORCE);
2845 	MNT_FLAG(MNT_SNAPSHOT);
2846 	MNT_FLAG(MNT_BYFSID);
2847 #undef MNT_FLAG
2848 	if (flags != 0) {
2849 		if (buf[0] != '\0')
2850 			strlcat(buf, ", ", sizeof(buf));
2851 		snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
2852 		    "0x%08x", flags);
2853 	}
2854 	db_printf("    mnt_flag = %s\n", buf);
2855 
2856 	buf[0] = '\0';
2857 	flags = mp->mnt_kern_flag;
2858 #define	MNT_KERN_FLAG(flag)	do {					\
2859 	if (flags & (flag)) {						\
2860 		if (buf[0] != '\0')					\
2861 			strlcat(buf, ", ", sizeof(buf));		\
2862 		strlcat(buf, (#flag) + 5, sizeof(buf));			\
2863 		flags &= ~(flag);					\
2864 	}								\
2865 } while (0)
2866 	MNT_KERN_FLAG(MNTK_UNMOUNTF);
2867 	MNT_KERN_FLAG(MNTK_ASYNC);
2868 	MNT_KERN_FLAG(MNTK_SOFTDEP);
2869 	MNT_KERN_FLAG(MNTK_NOINSMNTQ);
2870 	MNT_KERN_FLAG(MNTK_UNMOUNT);
2871 	MNT_KERN_FLAG(MNTK_MWAIT);
2872 	MNT_KERN_FLAG(MNTK_SUSPEND);
2873 	MNT_KERN_FLAG(MNTK_SUSPEND2);
2874 	MNT_KERN_FLAG(MNTK_SUSPENDED);
2875 	MNT_KERN_FLAG(MNTK_MPSAFE);
2876 	MNT_KERN_FLAG(MNTK_NOKNOTE);
2877 	MNT_KERN_FLAG(MNTK_LOOKUP_SHARED);
2878 #undef MNT_KERN_FLAG
2879 	if (flags != 0) {
2880 		if (buf[0] != '\0')
2881 			strlcat(buf, ", ", sizeof(buf));
2882 		snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
2883 		    "0x%08x", flags);
2884 	}
2885 	db_printf("    mnt_kern_flag = %s\n", buf);
2886 
2887 	sp = &mp->mnt_stat;
2888 	db_printf("    mnt_stat = { version=%u type=%u flags=0x%016jx "
2889 	    "bsize=%ju iosize=%ju blocks=%ju bfree=%ju bavail=%jd files=%ju "
2890 	    "ffree=%jd syncwrites=%ju asyncwrites=%ju syncreads=%ju "
2891 	    "asyncreads=%ju namemax=%u owner=%u fsid=[%d, %d] }\n",
2892 	    (u_int)sp->f_version, (u_int)sp->f_type, (uintmax_t)sp->f_flags,
2893 	    (uintmax_t)sp->f_bsize, (uintmax_t)sp->f_iosize,
2894 	    (uintmax_t)sp->f_blocks, (uintmax_t)sp->f_bfree,
2895 	    (intmax_t)sp->f_bavail, (uintmax_t)sp->f_files,
2896 	    (intmax_t)sp->f_ffree, (uintmax_t)sp->f_syncwrites,
2897 	    (uintmax_t)sp->f_asyncwrites, (uintmax_t)sp->f_syncreads,
2898 	    (uintmax_t)sp->f_asyncreads, (u_int)sp->f_namemax,
2899 	    (u_int)sp->f_owner, (int)sp->f_fsid.val[0], (int)sp->f_fsid.val[1]);
2900 
2901 	db_printf("    mnt_cred = { uid=%u ruid=%u",
2902 	    (u_int)mp->mnt_cred->cr_uid, (u_int)mp->mnt_cred->cr_ruid);
2903 	if (mp->mnt_cred->cr_prison != NULL)
2904 		db_printf(", jail=%d", mp->mnt_cred->cr_prison->pr_id);
2905 	db_printf(" }\n");
2906 	db_printf("    mnt_ref = %d\n", mp->mnt_ref);
2907 	db_printf("    mnt_gen = %d\n", mp->mnt_gen);
2908 	db_printf("    mnt_nvnodelistsize = %d\n", mp->mnt_nvnodelistsize);
2909 	db_printf("    mnt_writeopcount = %d\n", mp->mnt_writeopcount);
2910 	db_printf("    mnt_noasync = %u\n", mp->mnt_noasync);
2911 	db_printf("    mnt_maxsymlinklen = %d\n", mp->mnt_maxsymlinklen);
2912 	db_printf("    mnt_iosize_max = %d\n", mp->mnt_iosize_max);
2913 	db_printf("    mnt_hashseed = %u\n", mp->mnt_hashseed);
2914 	db_printf("    mnt_secondary_writes = %d\n", mp->mnt_secondary_writes);
2915 	db_printf("    mnt_secondary_accwrites = %d\n",
2916 	    mp->mnt_secondary_accwrites);
2917 	db_printf("    mnt_gjprovider = %s\n",
2918 	    mp->mnt_gjprovider != NULL ? mp->mnt_gjprovider : "NULL");
2919 	db_printf("\n");
2920 
2921 	TAILQ_FOREACH(vp, &mp->mnt_nvnodelist, v_nmntvnodes) {
2922 		if (vp->v_type != VMARKER) {
2923 			vn_printf(vp, "vnode ");
2924 			if (db_pager_quit)
2925 				break;
2926 		}
2927 	}
2928 }
2929 #endif	/* DDB */
2930 
2931 /*
2932  * Fill in a struct xvfsconf based on a struct vfsconf.
2933  */
2934 static void
2935 vfsconf2x(struct vfsconf *vfsp, struct xvfsconf *xvfsp)
2936 {
2937 
2938 	strcpy(xvfsp->vfc_name, vfsp->vfc_name);
2939 	xvfsp->vfc_typenum = vfsp->vfc_typenum;
2940 	xvfsp->vfc_refcount = vfsp->vfc_refcount;
2941 	xvfsp->vfc_flags = vfsp->vfc_flags;
2942 	/*
2943 	 * These are unused in userland, we keep them
2944 	 * to not break binary compatibility.
2945 	 */
2946 	xvfsp->vfc_vfsops = NULL;
2947 	xvfsp->vfc_next = NULL;
2948 }
2949 
2950 /*
2951  * Top level filesystem related information gathering.
2952  */
2953 static int
2954 sysctl_vfs_conflist(SYSCTL_HANDLER_ARGS)
2955 {
2956 	struct vfsconf *vfsp;
2957 	struct xvfsconf xvfsp;
2958 	int error;
2959 
2960 	error = 0;
2961 	TAILQ_FOREACH(vfsp, &vfsconf, vfc_list) {
2962 		bzero(&xvfsp, sizeof(xvfsp));
2963 		vfsconf2x(vfsp, &xvfsp);
2964 		error = SYSCTL_OUT(req, &xvfsp, sizeof xvfsp);
2965 		if (error)
2966 			break;
2967 	}
2968 	return (error);
2969 }
2970 
2971 SYSCTL_PROC(_vfs, OID_AUTO, conflist, CTLFLAG_RD, NULL, 0, sysctl_vfs_conflist,
2972     "S,xvfsconf", "List of all configured filesystems");
2973 
2974 #ifndef BURN_BRIDGES
2975 static int	sysctl_ovfs_conf(SYSCTL_HANDLER_ARGS);
2976 
2977 static int
2978 vfs_sysctl(SYSCTL_HANDLER_ARGS)
2979 {
2980 	int *name = (int *)arg1 - 1;	/* XXX */
2981 	u_int namelen = arg2 + 1;	/* XXX */
2982 	struct vfsconf *vfsp;
2983 	struct xvfsconf xvfsp;
2984 
2985 	printf("WARNING: userland calling deprecated sysctl, "
2986 	    "please rebuild world\n");
2987 
2988 #if 1 || defined(COMPAT_PRELITE2)
2989 	/* Resolve ambiguity between VFS_VFSCONF and VFS_GENERIC. */
2990 	if (namelen == 1)
2991 		return (sysctl_ovfs_conf(oidp, arg1, arg2, req));
2992 #endif
2993 
2994 	switch (name[1]) {
2995 	case VFS_MAXTYPENUM:
2996 		if (namelen != 2)
2997 			return (ENOTDIR);
2998 		return (SYSCTL_OUT(req, &maxvfsconf, sizeof(int)));
2999 	case VFS_CONF:
3000 		if (namelen != 3)
3001 			return (ENOTDIR);	/* overloaded */
3002 		TAILQ_FOREACH(vfsp, &vfsconf, vfc_list)
3003 			if (vfsp->vfc_typenum == name[2])
3004 				break;
3005 		if (vfsp == NULL)
3006 			return (EOPNOTSUPP);
3007 		bzero(&xvfsp, sizeof(xvfsp));
3008 		vfsconf2x(vfsp, &xvfsp);
3009 		return (SYSCTL_OUT(req, &xvfsp, sizeof(xvfsp)));
3010 	}
3011 	return (EOPNOTSUPP);
3012 }
3013 
3014 static SYSCTL_NODE(_vfs, VFS_GENERIC, generic, CTLFLAG_RD | CTLFLAG_SKIP,
3015 	vfs_sysctl, "Generic filesystem");
3016 
3017 #if 1 || defined(COMPAT_PRELITE2)
3018 
3019 static int
3020 sysctl_ovfs_conf(SYSCTL_HANDLER_ARGS)
3021 {
3022 	int error;
3023 	struct vfsconf *vfsp;
3024 	struct ovfsconf ovfs;
3025 
3026 	TAILQ_FOREACH(vfsp, &vfsconf, vfc_list) {
3027 		bzero(&ovfs, sizeof(ovfs));
3028 		ovfs.vfc_vfsops = vfsp->vfc_vfsops;	/* XXX used as flag */
3029 		strcpy(ovfs.vfc_name, vfsp->vfc_name);
3030 		ovfs.vfc_index = vfsp->vfc_typenum;
3031 		ovfs.vfc_refcount = vfsp->vfc_refcount;
3032 		ovfs.vfc_flags = vfsp->vfc_flags;
3033 		error = SYSCTL_OUT(req, &ovfs, sizeof ovfs);
3034 		if (error)
3035 			return error;
3036 	}
3037 	return 0;
3038 }
3039 
3040 #endif /* 1 || COMPAT_PRELITE2 */
3041 #endif /* !BURN_BRIDGES */
3042 
3043 #define KINFO_VNODESLOP		10
3044 #ifdef notyet
3045 /*
3046  * Dump vnode list (via sysctl).
3047  */
3048 /* ARGSUSED */
3049 static int
3050 sysctl_vnode(SYSCTL_HANDLER_ARGS)
3051 {
3052 	struct xvnode *xvn;
3053 	struct mount *mp;
3054 	struct vnode *vp;
3055 	int error, len, n;
3056 
3057 	/*
3058 	 * Stale numvnodes access is not fatal here.
3059 	 */
3060 	req->lock = 0;
3061 	len = (numvnodes + KINFO_VNODESLOP) * sizeof *xvn;
3062 	if (!req->oldptr)
3063 		/* Make an estimate */
3064 		return (SYSCTL_OUT(req, 0, len));
3065 
3066 	error = sysctl_wire_old_buffer(req, 0);
3067 	if (error != 0)
3068 		return (error);
3069 	xvn = malloc(len, M_TEMP, M_ZERO | M_WAITOK);
3070 	n = 0;
3071 	mtx_lock(&mountlist_mtx);
3072 	TAILQ_FOREACH(mp, &mountlist, mnt_list) {
3073 		if (vfs_busy(mp, MBF_NOWAIT | MBF_MNTLSTLOCK))
3074 			continue;
3075 		MNT_ILOCK(mp);
3076 		TAILQ_FOREACH(vp, &mp->mnt_nvnodelist, v_nmntvnodes) {
3077 			if (n == len)
3078 				break;
3079 			vref(vp);
3080 			xvn[n].xv_size = sizeof *xvn;
3081 			xvn[n].xv_vnode = vp;
3082 			xvn[n].xv_id = 0;	/* XXX compat */
3083 #define XV_COPY(field) xvn[n].xv_##field = vp->v_##field
3084 			XV_COPY(usecount);
3085 			XV_COPY(writecount);
3086 			XV_COPY(holdcnt);
3087 			XV_COPY(mount);
3088 			XV_COPY(numoutput);
3089 			XV_COPY(type);
3090 #undef XV_COPY
3091 			xvn[n].xv_flag = vp->v_vflag;
3092 
3093 			switch (vp->v_type) {
3094 			case VREG:
3095 			case VDIR:
3096 			case VLNK:
3097 				break;
3098 			case VBLK:
3099 			case VCHR:
3100 				if (vp->v_rdev == NULL) {
3101 					vrele(vp);
3102 					continue;
3103 				}
3104 				xvn[n].xv_dev = dev2udev(vp->v_rdev);
3105 				break;
3106 			case VSOCK:
3107 				xvn[n].xv_socket = vp->v_socket;
3108 				break;
3109 			case VFIFO:
3110 				xvn[n].xv_fifo = vp->v_fifoinfo;
3111 				break;
3112 			case VNON:
3113 			case VBAD:
3114 			default:
3115 				/* shouldn't happen? */
3116 				vrele(vp);
3117 				continue;
3118 			}
3119 			vrele(vp);
3120 			++n;
3121 		}
3122 		MNT_IUNLOCK(mp);
3123 		mtx_lock(&mountlist_mtx);
3124 		vfs_unbusy(mp);
3125 		if (n == len)
3126 			break;
3127 	}
3128 	mtx_unlock(&mountlist_mtx);
3129 
3130 	error = SYSCTL_OUT(req, xvn, n * sizeof *xvn);
3131 	free(xvn, M_TEMP);
3132 	return (error);
3133 }
3134 
3135 SYSCTL_PROC(_kern, KERN_VNODE, vnode, CTLTYPE_OPAQUE|CTLFLAG_RD,
3136 	0, 0, sysctl_vnode, "S,xvnode", "");
3137 #endif
3138 
3139 /*
3140  * Unmount all filesystems. The list is traversed in reverse order
3141  * of mounting to avoid dependencies.
3142  */
3143 void
3144 vfs_unmountall(void)
3145 {
3146 	struct mount *mp;
3147 	struct thread *td;
3148 	int error;
3149 
3150 	KASSERT(curthread != NULL, ("vfs_unmountall: NULL curthread"));
3151 	CTR1(KTR_VFS, "%s: unmounting all filesystems", __func__);
3152 	td = curthread;
3153 
3154 	/*
3155 	 * Since this only runs when rebooting, it is not interlocked.
3156 	 */
3157 	while(!TAILQ_EMPTY(&mountlist)) {
3158 		mp = TAILQ_LAST(&mountlist, mntlist);
3159 		error = dounmount(mp, MNT_FORCE, td);
3160 		if (error) {
3161 			TAILQ_REMOVE(&mountlist, mp, mnt_list);
3162 			/*
3163 			 * XXX: Due to the way in which we mount the root
3164 			 * file system off of devfs, devfs will generate a
3165 			 * "busy" warning when we try to unmount it before
3166 			 * the root.  Don't print a warning as a result in
3167 			 * order to avoid false positive errors that may
3168 			 * cause needless upset.
3169 			 */
3170 			if (strcmp(mp->mnt_vfc->vfc_name, "devfs") != 0) {
3171 				printf("unmount of %s failed (",
3172 				    mp->mnt_stat.f_mntonname);
3173 				if (error == EBUSY)
3174 					printf("BUSY)\n");
3175 				else
3176 					printf("%d)\n", error);
3177 			}
3178 		} else {
3179 			/* The unmount has removed mp from the mountlist */
3180 		}
3181 	}
3182 }
3183 
3184 /*
3185  * perform msync on all vnodes under a mount point
3186  * the mount point must be locked.
3187  */
3188 void
3189 vfs_msync(struct mount *mp, int flags)
3190 {
3191 	struct vnode *vp, *mvp;
3192 	struct vm_object *obj;
3193 
3194 	CTR2(KTR_VFS, "%s: mp %p", __func__, mp);
3195 	MNT_ILOCK(mp);
3196 	MNT_VNODE_FOREACH(vp, mp, mvp) {
3197 		VI_LOCK(vp);
3198 		if ((vp->v_iflag & VI_OBJDIRTY) &&
3199 		    (flags == MNT_WAIT || VOP_ISLOCKED(vp) == 0)) {
3200 			MNT_IUNLOCK(mp);
3201 			if (!vget(vp,
3202 			    LK_EXCLUSIVE | LK_RETRY | LK_INTERLOCK,
3203 			    curthread)) {
3204 				if (vp->v_vflag & VV_NOSYNC) {	/* unlinked */
3205 					vput(vp);
3206 					MNT_ILOCK(mp);
3207 					continue;
3208 				}
3209 
3210 				obj = vp->v_object;
3211 				if (obj != NULL) {
3212 					VM_OBJECT_LOCK(obj);
3213 					vm_object_page_clean(obj, 0, 0,
3214 					    flags == MNT_WAIT ?
3215 					    OBJPC_SYNC : OBJPC_NOSYNC);
3216 					VM_OBJECT_UNLOCK(obj);
3217 				}
3218 				vput(vp);
3219 			}
3220 			MNT_ILOCK(mp);
3221 		} else
3222 			VI_UNLOCK(vp);
3223 	}
3224 	MNT_IUNLOCK(mp);
3225 }
3226 
3227 /*
3228  * Mark a vnode as free, putting it up for recycling.
3229  */
3230 static void
3231 vfree(struct vnode *vp)
3232 {
3233 
3234 	ASSERT_VI_LOCKED(vp, "vfree");
3235 	mtx_lock(&vnode_free_list_mtx);
3236 	VNASSERT(vp->v_op != NULL, vp, ("vfree: vnode already reclaimed."));
3237 	VNASSERT((vp->v_iflag & VI_FREE) == 0, vp, ("vnode already free"));
3238 	VNASSERT(VSHOULDFREE(vp), vp, ("vfree: freeing when we shouldn't"));
3239 	VNASSERT((vp->v_iflag & VI_DOOMED) == 0, vp,
3240 	    ("vfree: Freeing doomed vnode"));
3241 	CTR2(KTR_VFS, "%s: vp %p", __func__, vp);
3242 	if (vp->v_iflag & VI_AGE) {
3243 		TAILQ_INSERT_HEAD(&vnode_free_list, vp, v_freelist);
3244 	} else {
3245 		TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist);
3246 	}
3247 	freevnodes++;
3248 	vp->v_iflag &= ~VI_AGE;
3249 	vp->v_iflag |= VI_FREE;
3250 	mtx_unlock(&vnode_free_list_mtx);
3251 }
3252 
3253 /*
3254  * Opposite of vfree() - mark a vnode as in use.
3255  */
3256 static void
3257 vbusy(struct vnode *vp)
3258 {
3259 	ASSERT_VI_LOCKED(vp, "vbusy");
3260 	VNASSERT((vp->v_iflag & VI_FREE) != 0, vp, ("vnode not free"));
3261 	VNASSERT(vp->v_op != NULL, vp, ("vbusy: vnode already reclaimed."));
3262 	CTR2(KTR_VFS, "%s: vp %p", __func__, vp);
3263 
3264 	mtx_lock(&vnode_free_list_mtx);
3265 	TAILQ_REMOVE(&vnode_free_list, vp, v_freelist);
3266 	freevnodes--;
3267 	vp->v_iflag &= ~(VI_FREE|VI_AGE);
3268 	mtx_unlock(&vnode_free_list_mtx);
3269 }
3270 
3271 static void
3272 destroy_vpollinfo(struct vpollinfo *vi)
3273 {
3274 	knlist_destroy(&vi->vpi_selinfo.si_note);
3275 	mtx_destroy(&vi->vpi_lock);
3276 	uma_zfree(vnodepoll_zone, vi);
3277 }
3278 
3279 /*
3280  * Initalize per-vnode helper structure to hold poll-related state.
3281  */
3282 void
3283 v_addpollinfo(struct vnode *vp)
3284 {
3285 	struct vpollinfo *vi;
3286 
3287 	if (vp->v_pollinfo != NULL)
3288 		return;
3289 	vi = uma_zalloc(vnodepoll_zone, M_WAITOK);
3290 	mtx_init(&vi->vpi_lock, "vnode pollinfo", NULL, MTX_DEF);
3291 	knlist_init(&vi->vpi_selinfo.si_note, vp, vfs_knllock,
3292 	    vfs_knlunlock, vfs_knllocked);
3293 	VI_LOCK(vp);
3294 	if (vp->v_pollinfo != NULL) {
3295 		VI_UNLOCK(vp);
3296 		destroy_vpollinfo(vi);
3297 		return;
3298 	}
3299 	vp->v_pollinfo = vi;
3300 	VI_UNLOCK(vp);
3301 }
3302 
3303 /*
3304  * Record a process's interest in events which might happen to
3305  * a vnode.  Because poll uses the historic select-style interface
3306  * internally, this routine serves as both the ``check for any
3307  * pending events'' and the ``record my interest in future events''
3308  * functions.  (These are done together, while the lock is held,
3309  * to avoid race conditions.)
3310  */
3311 int
3312 vn_pollrecord(struct vnode *vp, struct thread *td, int events)
3313 {
3314 
3315 	v_addpollinfo(vp);
3316 	mtx_lock(&vp->v_pollinfo->vpi_lock);
3317 	if (vp->v_pollinfo->vpi_revents & events) {
3318 		/*
3319 		 * This leaves events we are not interested
3320 		 * in available for the other process which
3321 		 * which presumably had requested them
3322 		 * (otherwise they would never have been
3323 		 * recorded).
3324 		 */
3325 		events &= vp->v_pollinfo->vpi_revents;
3326 		vp->v_pollinfo->vpi_revents &= ~events;
3327 
3328 		mtx_unlock(&vp->v_pollinfo->vpi_lock);
3329 		return (events);
3330 	}
3331 	vp->v_pollinfo->vpi_events |= events;
3332 	selrecord(td, &vp->v_pollinfo->vpi_selinfo);
3333 	mtx_unlock(&vp->v_pollinfo->vpi_lock);
3334 	return (0);
3335 }
3336 
3337 /*
3338  * Routine to create and manage a filesystem syncer vnode.
3339  */
3340 #define sync_close ((int (*)(struct  vop_close_args *))nullop)
3341 static int	sync_fsync(struct  vop_fsync_args *);
3342 static int	sync_inactive(struct  vop_inactive_args *);
3343 static int	sync_reclaim(struct  vop_reclaim_args *);
3344 
3345 static struct vop_vector sync_vnodeops = {
3346 	.vop_bypass =	VOP_EOPNOTSUPP,
3347 	.vop_close =	sync_close,		/* close */
3348 	.vop_fsync =	sync_fsync,		/* fsync */
3349 	.vop_inactive =	sync_inactive,	/* inactive */
3350 	.vop_reclaim =	sync_reclaim,	/* reclaim */
3351 	.vop_lock1 =	vop_stdlock,	/* lock */
3352 	.vop_unlock =	vop_stdunlock,	/* unlock */
3353 	.vop_islocked =	vop_stdislocked,	/* islocked */
3354 };
3355 
3356 /*
3357  * Create a new filesystem syncer vnode for the specified mount point.
3358  */
3359 int
3360 vfs_allocate_syncvnode(struct mount *mp)
3361 {
3362 	struct vnode *vp;
3363 	struct bufobj *bo;
3364 	static long start, incr, next;
3365 	int error;
3366 
3367 	/* Allocate a new vnode */
3368 	if ((error = getnewvnode("syncer", mp, &sync_vnodeops, &vp)) != 0) {
3369 		mp->mnt_syncer = NULL;
3370 		return (error);
3371 	}
3372 	vp->v_type = VNON;
3373 	vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
3374 	vp->v_vflag |= VV_FORCEINSMQ;
3375 	error = insmntque(vp, mp);
3376 	if (error != 0)
3377 		panic("vfs_allocate_syncvnode: insmntque failed");
3378 	vp->v_vflag &= ~VV_FORCEINSMQ;
3379 	VOP_UNLOCK(vp, 0);
3380 	/*
3381 	 * Place the vnode onto the syncer worklist. We attempt to
3382 	 * scatter them about on the list so that they will go off
3383 	 * at evenly distributed times even if all the filesystems
3384 	 * are mounted at once.
3385 	 */
3386 	next += incr;
3387 	if (next == 0 || next > syncer_maxdelay) {
3388 		start /= 2;
3389 		incr /= 2;
3390 		if (start == 0) {
3391 			start = syncer_maxdelay / 2;
3392 			incr = syncer_maxdelay;
3393 		}
3394 		next = start;
3395 	}
3396 	bo = &vp->v_bufobj;
3397 	BO_LOCK(bo);
3398 	vn_syncer_add_to_worklist(bo, syncdelay > 0 ? next % syncdelay : 0);
3399 	/* XXX - vn_syncer_add_to_worklist() also grabs and drops sync_mtx. */
3400 	mtx_lock(&sync_mtx);
3401 	sync_vnode_count++;
3402 	mtx_unlock(&sync_mtx);
3403 	BO_UNLOCK(bo);
3404 	mp->mnt_syncer = vp;
3405 	return (0);
3406 }
3407 
3408 /*
3409  * Do a lazy sync of the filesystem.
3410  */
3411 static int
3412 sync_fsync(struct vop_fsync_args *ap)
3413 {
3414 	struct vnode *syncvp = ap->a_vp;
3415 	struct mount *mp = syncvp->v_mount;
3416 	int error;
3417 	struct bufobj *bo;
3418 
3419 	/*
3420 	 * We only need to do something if this is a lazy evaluation.
3421 	 */
3422 	if (ap->a_waitfor != MNT_LAZY)
3423 		return (0);
3424 
3425 	/*
3426 	 * Move ourselves to the back of the sync list.
3427 	 */
3428 	bo = &syncvp->v_bufobj;
3429 	BO_LOCK(bo);
3430 	vn_syncer_add_to_worklist(bo, syncdelay);
3431 	BO_UNLOCK(bo);
3432 
3433 	/*
3434 	 * Walk the list of vnodes pushing all that are dirty and
3435 	 * not already on the sync list.
3436 	 */
3437 	mtx_lock(&mountlist_mtx);
3438 	if (vfs_busy(mp, MBF_NOWAIT | MBF_MNTLSTLOCK) != 0) {
3439 		mtx_unlock(&mountlist_mtx);
3440 		return (0);
3441 	}
3442 	if (vn_start_write(NULL, &mp, V_NOWAIT) != 0) {
3443 		vfs_unbusy(mp);
3444 		return (0);
3445 	}
3446 	MNT_ILOCK(mp);
3447 	mp->mnt_noasync++;
3448 	mp->mnt_kern_flag &= ~MNTK_ASYNC;
3449 	MNT_IUNLOCK(mp);
3450 	vfs_msync(mp, MNT_NOWAIT);
3451 	error = VFS_SYNC(mp, MNT_LAZY, ap->a_td);
3452 	MNT_ILOCK(mp);
3453 	mp->mnt_noasync--;
3454 	if ((mp->mnt_flag & MNT_ASYNC) != 0 && mp->mnt_noasync == 0)
3455 		mp->mnt_kern_flag |= MNTK_ASYNC;
3456 	MNT_IUNLOCK(mp);
3457 	vn_finished_write(mp);
3458 	vfs_unbusy(mp);
3459 	return (error);
3460 }
3461 
3462 /*
3463  * The syncer vnode is no referenced.
3464  */
3465 static int
3466 sync_inactive(struct vop_inactive_args *ap)
3467 {
3468 
3469 	vgone(ap->a_vp);
3470 	return (0);
3471 }
3472 
3473 /*
3474  * The syncer vnode is no longer needed and is being decommissioned.
3475  *
3476  * Modifications to the worklist must be protected by sync_mtx.
3477  */
3478 static int
3479 sync_reclaim(struct vop_reclaim_args *ap)
3480 {
3481 	struct vnode *vp = ap->a_vp;
3482 	struct bufobj *bo;
3483 
3484 	bo = &vp->v_bufobj;
3485 	BO_LOCK(bo);
3486 	vp->v_mount->mnt_syncer = NULL;
3487 	if (bo->bo_flag & BO_ONWORKLST) {
3488 		mtx_lock(&sync_mtx);
3489 		LIST_REMOVE(bo, bo_synclist);
3490 		syncer_worklist_len--;
3491 		sync_vnode_count--;
3492 		mtx_unlock(&sync_mtx);
3493 		bo->bo_flag &= ~BO_ONWORKLST;
3494 	}
3495 	BO_UNLOCK(bo);
3496 
3497 	return (0);
3498 }
3499 
3500 /*
3501  * Check if vnode represents a disk device
3502  */
3503 int
3504 vn_isdisk(struct vnode *vp, int *errp)
3505 {
3506 	int error;
3507 
3508 	error = 0;
3509 	dev_lock();
3510 	if (vp->v_type != VCHR)
3511 		error = ENOTBLK;
3512 	else if (vp->v_rdev == NULL)
3513 		error = ENXIO;
3514 	else if (vp->v_rdev->si_devsw == NULL)
3515 		error = ENXIO;
3516 	else if (!(vp->v_rdev->si_devsw->d_flags & D_DISK))
3517 		error = ENOTBLK;
3518 	dev_unlock();
3519 	if (errp != NULL)
3520 		*errp = error;
3521 	return (error == 0);
3522 }
3523 
3524 /*
3525  * Common filesystem object access control check routine.  Accepts a
3526  * vnode's type, "mode", uid and gid, requested access mode, credentials,
3527  * and optional call-by-reference privused argument allowing vaccess()
3528  * to indicate to the caller whether privilege was used to satisfy the
3529  * request (obsoleted).  Returns 0 on success, or an errno on failure.
3530  *
3531  * The ifdef'd CAPABILITIES version is here for reference, but is not
3532  * actually used.
3533  */
3534 int
3535 vaccess(enum vtype type, mode_t file_mode, uid_t file_uid, gid_t file_gid,
3536     accmode_t accmode, struct ucred *cred, int *privused)
3537 {
3538 	accmode_t dac_granted;
3539 	accmode_t priv_granted;
3540 
3541 	/*
3542 	 * Look for a normal, non-privileged way to access the file/directory
3543 	 * as requested.  If it exists, go with that.
3544 	 */
3545 
3546 	if (privused != NULL)
3547 		*privused = 0;
3548 
3549 	dac_granted = 0;
3550 
3551 	/* Check the owner. */
3552 	if (cred->cr_uid == file_uid) {
3553 		dac_granted |= VADMIN;
3554 		if (file_mode & S_IXUSR)
3555 			dac_granted |= VEXEC;
3556 		if (file_mode & S_IRUSR)
3557 			dac_granted |= VREAD;
3558 		if (file_mode & S_IWUSR)
3559 			dac_granted |= (VWRITE | VAPPEND);
3560 
3561 		if ((accmode & dac_granted) == accmode)
3562 			return (0);
3563 
3564 		goto privcheck;
3565 	}
3566 
3567 	/* Otherwise, check the groups (first match) */
3568 	if (groupmember(file_gid, cred)) {
3569 		if (file_mode & S_IXGRP)
3570 			dac_granted |= VEXEC;
3571 		if (file_mode & S_IRGRP)
3572 			dac_granted |= VREAD;
3573 		if (file_mode & S_IWGRP)
3574 			dac_granted |= (VWRITE | VAPPEND);
3575 
3576 		if ((accmode & dac_granted) == accmode)
3577 			return (0);
3578 
3579 		goto privcheck;
3580 	}
3581 
3582 	/* Otherwise, check everyone else. */
3583 	if (file_mode & S_IXOTH)
3584 		dac_granted |= VEXEC;
3585 	if (file_mode & S_IROTH)
3586 		dac_granted |= VREAD;
3587 	if (file_mode & S_IWOTH)
3588 		dac_granted |= (VWRITE | VAPPEND);
3589 	if ((accmode & dac_granted) == accmode)
3590 		return (0);
3591 
3592 privcheck:
3593 	/*
3594 	 * Build a privilege mask to determine if the set of privileges
3595 	 * satisfies the requirements when combined with the granted mask
3596 	 * from above.  For each privilege, if the privilege is required,
3597 	 * bitwise or the request type onto the priv_granted mask.
3598 	 */
3599 	priv_granted = 0;
3600 
3601 	if (type == VDIR) {
3602 		/*
3603 		 * For directories, use PRIV_VFS_LOOKUP to satisfy VEXEC
3604 		 * requests, instead of PRIV_VFS_EXEC.
3605 		 */
3606 		if ((accmode & VEXEC) && ((dac_granted & VEXEC) == 0) &&
3607 		    !priv_check_cred(cred, PRIV_VFS_LOOKUP, 0))
3608 			priv_granted |= VEXEC;
3609 	} else {
3610 		if ((accmode & VEXEC) && ((dac_granted & VEXEC) == 0) &&
3611 		    !priv_check_cred(cred, PRIV_VFS_EXEC, 0))
3612 			priv_granted |= VEXEC;
3613 	}
3614 
3615 	if ((accmode & VREAD) && ((dac_granted & VREAD) == 0) &&
3616 	    !priv_check_cred(cred, PRIV_VFS_READ, 0))
3617 		priv_granted |= VREAD;
3618 
3619 	if ((accmode & VWRITE) && ((dac_granted & VWRITE) == 0) &&
3620 	    !priv_check_cred(cred, PRIV_VFS_WRITE, 0))
3621 		priv_granted |= (VWRITE | VAPPEND);
3622 
3623 	if ((accmode & VADMIN) && ((dac_granted & VADMIN) == 0) &&
3624 	    !priv_check_cred(cred, PRIV_VFS_ADMIN, 0))
3625 		priv_granted |= VADMIN;
3626 
3627 	if ((accmode & (priv_granted | dac_granted)) == accmode) {
3628 		/* XXX audit: privilege used */
3629 		if (privused != NULL)
3630 			*privused = 1;
3631 		return (0);
3632 	}
3633 
3634 	return ((accmode & VADMIN) ? EPERM : EACCES);
3635 }
3636 
3637 /*
3638  * Credential check based on process requesting service, and per-attribute
3639  * permissions.
3640  */
3641 int
3642 extattr_check_cred(struct vnode *vp, int attrnamespace, struct ucred *cred,
3643     struct thread *td, accmode_t accmode)
3644 {
3645 
3646 	/*
3647 	 * Kernel-invoked always succeeds.
3648 	 */
3649 	if (cred == NOCRED)
3650 		return (0);
3651 
3652 	/*
3653 	 * Do not allow privileged processes in jail to directly manipulate
3654 	 * system attributes.
3655 	 */
3656 	switch (attrnamespace) {
3657 	case EXTATTR_NAMESPACE_SYSTEM:
3658 		/* Potentially should be: return (EPERM); */
3659 		return (priv_check_cred(cred, PRIV_VFS_EXTATTR_SYSTEM, 0));
3660 	case EXTATTR_NAMESPACE_USER:
3661 		return (VOP_ACCESS(vp, accmode, cred, td));
3662 	default:
3663 		return (EPERM);
3664 	}
3665 }
3666 
3667 #ifdef DEBUG_VFS_LOCKS
3668 /*
3669  * This only exists to supress warnings from unlocked specfs accesses.  It is
3670  * no longer ok to have an unlocked VFS.
3671  */
3672 #define	IGNORE_LOCK(vp) (panicstr != NULL || (vp) == NULL ||		\
3673 	(vp)->v_type == VCHR ||	(vp)->v_type == VBAD)
3674 
3675 int vfs_badlock_ddb = 1;	/* Drop into debugger on violation. */
3676 SYSCTL_INT(_debug, OID_AUTO, vfs_badlock_ddb, CTLFLAG_RW, &vfs_badlock_ddb, 0, "");
3677 
3678 int vfs_badlock_mutex = 1;	/* Check for interlock across VOPs. */
3679 SYSCTL_INT(_debug, OID_AUTO, vfs_badlock_mutex, CTLFLAG_RW, &vfs_badlock_mutex, 0, "");
3680 
3681 int vfs_badlock_print = 1;	/* Print lock violations. */
3682 SYSCTL_INT(_debug, OID_AUTO, vfs_badlock_print, CTLFLAG_RW, &vfs_badlock_print, 0, "");
3683 
3684 #ifdef KDB
3685 int vfs_badlock_backtrace = 1;	/* Print backtrace at lock violations. */
3686 SYSCTL_INT(_debug, OID_AUTO, vfs_badlock_backtrace, CTLFLAG_RW, &vfs_badlock_backtrace, 0, "");
3687 #endif
3688 
3689 static void
3690 vfs_badlock(const char *msg, const char *str, struct vnode *vp)
3691 {
3692 
3693 #ifdef KDB
3694 	if (vfs_badlock_backtrace)
3695 		kdb_backtrace();
3696 #endif
3697 	if (vfs_badlock_print)
3698 		printf("%s: %p %s\n", str, (void *)vp, msg);
3699 	if (vfs_badlock_ddb)
3700 		kdb_enter(KDB_WHY_VFSLOCK, "lock violation");
3701 }
3702 
3703 void
3704 assert_vi_locked(struct vnode *vp, const char *str)
3705 {
3706 
3707 	if (vfs_badlock_mutex && !mtx_owned(VI_MTX(vp)))
3708 		vfs_badlock("interlock is not locked but should be", str, vp);
3709 }
3710 
3711 void
3712 assert_vi_unlocked(struct vnode *vp, const char *str)
3713 {
3714 
3715 	if (vfs_badlock_mutex && mtx_owned(VI_MTX(vp)))
3716 		vfs_badlock("interlock is locked but should not be", str, vp);
3717 }
3718 
3719 void
3720 assert_vop_locked(struct vnode *vp, const char *str)
3721 {
3722 
3723 	if (!IGNORE_LOCK(vp) && VOP_ISLOCKED(vp) == 0)
3724 		vfs_badlock("is not locked but should be", str, vp);
3725 }
3726 
3727 void
3728 assert_vop_unlocked(struct vnode *vp, const char *str)
3729 {
3730 
3731 	if (!IGNORE_LOCK(vp) && VOP_ISLOCKED(vp) == LK_EXCLUSIVE)
3732 		vfs_badlock("is locked but should not be", str, vp);
3733 }
3734 
3735 void
3736 assert_vop_elocked(struct vnode *vp, const char *str)
3737 {
3738 
3739 	if (!IGNORE_LOCK(vp) && VOP_ISLOCKED(vp) != LK_EXCLUSIVE)
3740 		vfs_badlock("is not exclusive locked but should be", str, vp);
3741 }
3742 
3743 #if 0
3744 void
3745 assert_vop_elocked_other(struct vnode *vp, const char *str)
3746 {
3747 
3748 	if (!IGNORE_LOCK(vp) && VOP_ISLOCKED(vp) != LK_EXCLOTHER)
3749 		vfs_badlock("is not exclusive locked by another thread",
3750 		    str, vp);
3751 }
3752 
3753 void
3754 assert_vop_slocked(struct vnode *vp, const char *str)
3755 {
3756 
3757 	if (!IGNORE_LOCK(vp) && VOP_ISLOCKED(vp) != LK_SHARED)
3758 		vfs_badlock("is not locked shared but should be", str, vp);
3759 }
3760 #endif /* 0 */
3761 #endif /* DEBUG_VFS_LOCKS */
3762 
3763 void
3764 vop_rename_pre(void *ap)
3765 {
3766 	struct vop_rename_args *a = ap;
3767 
3768 #ifdef DEBUG_VFS_LOCKS
3769 	if (a->a_tvp)
3770 		ASSERT_VI_UNLOCKED(a->a_tvp, "VOP_RENAME");
3771 	ASSERT_VI_UNLOCKED(a->a_tdvp, "VOP_RENAME");
3772 	ASSERT_VI_UNLOCKED(a->a_fvp, "VOP_RENAME");
3773 	ASSERT_VI_UNLOCKED(a->a_fdvp, "VOP_RENAME");
3774 
3775 	/* Check the source (from). */
3776 	if (a->a_tdvp != a->a_fdvp && a->a_tvp != a->a_fdvp)
3777 		ASSERT_VOP_UNLOCKED(a->a_fdvp, "vop_rename: fdvp locked");
3778 	if (a->a_tvp != a->a_fvp)
3779 		ASSERT_VOP_UNLOCKED(a->a_fvp, "vop_rename: fvp locked");
3780 
3781 	/* Check the target. */
3782 	if (a->a_tvp)
3783 		ASSERT_VOP_LOCKED(a->a_tvp, "vop_rename: tvp not locked");
3784 	ASSERT_VOP_LOCKED(a->a_tdvp, "vop_rename: tdvp not locked");
3785 #endif
3786 	if (a->a_tdvp != a->a_fdvp)
3787 		vhold(a->a_fdvp);
3788 	if (a->a_tvp != a->a_fvp)
3789 		vhold(a->a_fvp);
3790 	vhold(a->a_tdvp);
3791 	if (a->a_tvp)
3792 		vhold(a->a_tvp);
3793 }
3794 
3795 void
3796 vop_strategy_pre(void *ap)
3797 {
3798 #ifdef DEBUG_VFS_LOCKS
3799 	struct vop_strategy_args *a;
3800 	struct buf *bp;
3801 
3802 	a = ap;
3803 	bp = a->a_bp;
3804 
3805 	/*
3806 	 * Cluster ops lock their component buffers but not the IO container.
3807 	 */
3808 	if ((bp->b_flags & B_CLUSTER) != 0)
3809 		return;
3810 
3811 	if (!BUF_ISLOCKED(bp)) {
3812 		if (vfs_badlock_print)
3813 			printf(
3814 			    "VOP_STRATEGY: bp is not locked but should be\n");
3815 		if (vfs_badlock_ddb)
3816 			kdb_enter(KDB_WHY_VFSLOCK, "lock violation");
3817 	}
3818 #endif
3819 }
3820 
3821 void
3822 vop_lookup_pre(void *ap)
3823 {
3824 #ifdef DEBUG_VFS_LOCKS
3825 	struct vop_lookup_args *a;
3826 	struct vnode *dvp;
3827 
3828 	a = ap;
3829 	dvp = a->a_dvp;
3830 	ASSERT_VI_UNLOCKED(dvp, "VOP_LOOKUP");
3831 	ASSERT_VOP_LOCKED(dvp, "VOP_LOOKUP");
3832 #endif
3833 }
3834 
3835 void
3836 vop_lookup_post(void *ap, int rc)
3837 {
3838 #ifdef DEBUG_VFS_LOCKS
3839 	struct vop_lookup_args *a;
3840 	struct vnode *dvp;
3841 	struct vnode *vp;
3842 
3843 	a = ap;
3844 	dvp = a->a_dvp;
3845 	vp = *(a->a_vpp);
3846 
3847 	ASSERT_VI_UNLOCKED(dvp, "VOP_LOOKUP");
3848 	ASSERT_VOP_LOCKED(dvp, "VOP_LOOKUP");
3849 
3850 	if (!rc)
3851 		ASSERT_VOP_LOCKED(vp, "VOP_LOOKUP (child)");
3852 #endif
3853 }
3854 
3855 void
3856 vop_lock_pre(void *ap)
3857 {
3858 #ifdef DEBUG_VFS_LOCKS
3859 	struct vop_lock1_args *a = ap;
3860 
3861 	if ((a->a_flags & LK_INTERLOCK) == 0)
3862 		ASSERT_VI_UNLOCKED(a->a_vp, "VOP_LOCK");
3863 	else
3864 		ASSERT_VI_LOCKED(a->a_vp, "VOP_LOCK");
3865 #endif
3866 }
3867 
3868 void
3869 vop_lock_post(void *ap, int rc)
3870 {
3871 #ifdef DEBUG_VFS_LOCKS
3872 	struct vop_lock1_args *a = ap;
3873 
3874 	ASSERT_VI_UNLOCKED(a->a_vp, "VOP_LOCK");
3875 	if (rc == 0)
3876 		ASSERT_VOP_LOCKED(a->a_vp, "VOP_LOCK");
3877 #endif
3878 }
3879 
3880 void
3881 vop_unlock_pre(void *ap)
3882 {
3883 #ifdef DEBUG_VFS_LOCKS
3884 	struct vop_unlock_args *a = ap;
3885 
3886 	if (a->a_flags & LK_INTERLOCK)
3887 		ASSERT_VI_LOCKED(a->a_vp, "VOP_UNLOCK");
3888 	ASSERT_VOP_LOCKED(a->a_vp, "VOP_UNLOCK");
3889 #endif
3890 }
3891 
3892 void
3893 vop_unlock_post(void *ap, int rc)
3894 {
3895 #ifdef DEBUG_VFS_LOCKS
3896 	struct vop_unlock_args *a = ap;
3897 
3898 	if (a->a_flags & LK_INTERLOCK)
3899 		ASSERT_VI_UNLOCKED(a->a_vp, "VOP_UNLOCK");
3900 #endif
3901 }
3902 
3903 void
3904 vop_create_post(void *ap, int rc)
3905 {
3906 	struct vop_create_args *a = ap;
3907 
3908 	if (!rc)
3909 		VFS_KNOTE_LOCKED(a->a_dvp, NOTE_WRITE);
3910 }
3911 
3912 void
3913 vop_link_post(void *ap, int rc)
3914 {
3915 	struct vop_link_args *a = ap;
3916 
3917 	if (!rc) {
3918 		VFS_KNOTE_LOCKED(a->a_vp, NOTE_LINK);
3919 		VFS_KNOTE_LOCKED(a->a_tdvp, NOTE_WRITE);
3920 	}
3921 }
3922 
3923 void
3924 vop_mkdir_post(void *ap, int rc)
3925 {
3926 	struct vop_mkdir_args *a = ap;
3927 
3928 	if (!rc)
3929 		VFS_KNOTE_LOCKED(a->a_dvp, NOTE_WRITE | NOTE_LINK);
3930 }
3931 
3932 void
3933 vop_mknod_post(void *ap, int rc)
3934 {
3935 	struct vop_mknod_args *a = ap;
3936 
3937 	if (!rc)
3938 		VFS_KNOTE_LOCKED(a->a_dvp, NOTE_WRITE);
3939 }
3940 
3941 void
3942 vop_remove_post(void *ap, int rc)
3943 {
3944 	struct vop_remove_args *a = ap;
3945 
3946 	if (!rc) {
3947 		VFS_KNOTE_LOCKED(a->a_dvp, NOTE_WRITE);
3948 		VFS_KNOTE_LOCKED(a->a_vp, NOTE_DELETE);
3949 	}
3950 }
3951 
3952 void
3953 vop_rename_post(void *ap, int rc)
3954 {
3955 	struct vop_rename_args *a = ap;
3956 
3957 	if (!rc) {
3958 		VFS_KNOTE_UNLOCKED(a->a_fdvp, NOTE_WRITE);
3959 		VFS_KNOTE_UNLOCKED(a->a_tdvp, NOTE_WRITE);
3960 		VFS_KNOTE_UNLOCKED(a->a_fvp, NOTE_RENAME);
3961 		if (a->a_tvp)
3962 			VFS_KNOTE_UNLOCKED(a->a_tvp, NOTE_DELETE);
3963 	}
3964 	if (a->a_tdvp != a->a_fdvp)
3965 		vdrop(a->a_fdvp);
3966 	if (a->a_tvp != a->a_fvp)
3967 		vdrop(a->a_fvp);
3968 	vdrop(a->a_tdvp);
3969 	if (a->a_tvp)
3970 		vdrop(a->a_tvp);
3971 }
3972 
3973 void
3974 vop_rmdir_post(void *ap, int rc)
3975 {
3976 	struct vop_rmdir_args *a = ap;
3977 
3978 	if (!rc) {
3979 		VFS_KNOTE_LOCKED(a->a_dvp, NOTE_WRITE | NOTE_LINK);
3980 		VFS_KNOTE_LOCKED(a->a_vp, NOTE_DELETE);
3981 	}
3982 }
3983 
3984 void
3985 vop_setattr_post(void *ap, int rc)
3986 {
3987 	struct vop_setattr_args *a = ap;
3988 
3989 	if (!rc)
3990 		VFS_KNOTE_LOCKED(a->a_vp, NOTE_ATTRIB);
3991 }
3992 
3993 void
3994 vop_symlink_post(void *ap, int rc)
3995 {
3996 	struct vop_symlink_args *a = ap;
3997 
3998 	if (!rc)
3999 		VFS_KNOTE_LOCKED(a->a_dvp, NOTE_WRITE);
4000 }
4001 
4002 static struct knlist fs_knlist;
4003 
4004 static void
4005 vfs_event_init(void *arg)
4006 {
4007 	knlist_init(&fs_knlist, NULL, NULL, NULL, NULL);
4008 }
4009 /* XXX - correct order? */
4010 SYSINIT(vfs_knlist, SI_SUB_VFS, SI_ORDER_ANY, vfs_event_init, NULL);
4011 
4012 void
4013 vfs_event_signal(fsid_t *fsid, u_int32_t event, intptr_t data __unused)
4014 {
4015 
4016 	KNOTE_UNLOCKED(&fs_knlist, event);
4017 }
4018 
4019 static int	filt_fsattach(struct knote *kn);
4020 static void	filt_fsdetach(struct knote *kn);
4021 static int	filt_fsevent(struct knote *kn, long hint);
4022 
4023 struct filterops fs_filtops =
4024 	{ 0, filt_fsattach, filt_fsdetach, filt_fsevent };
4025 
4026 static int
4027 filt_fsattach(struct knote *kn)
4028 {
4029 
4030 	kn->kn_flags |= EV_CLEAR;
4031 	knlist_add(&fs_knlist, kn, 0);
4032 	return (0);
4033 }
4034 
4035 static void
4036 filt_fsdetach(struct knote *kn)
4037 {
4038 
4039 	knlist_remove(&fs_knlist, kn, 0);
4040 }
4041 
4042 static int
4043 filt_fsevent(struct knote *kn, long hint)
4044 {
4045 
4046 	kn->kn_fflags |= hint;
4047 	return (kn->kn_fflags != 0);
4048 }
4049 
4050 static int
4051 sysctl_vfs_ctl(SYSCTL_HANDLER_ARGS)
4052 {
4053 	struct vfsidctl vc;
4054 	int error;
4055 	struct mount *mp;
4056 
4057 	error = SYSCTL_IN(req, &vc, sizeof(vc));
4058 	if (error)
4059 		return (error);
4060 	if (vc.vc_vers != VFS_CTL_VERS1)
4061 		return (EINVAL);
4062 	mp = vfs_getvfs(&vc.vc_fsid);
4063 	if (mp == NULL)
4064 		return (ENOENT);
4065 	/* ensure that a specific sysctl goes to the right filesystem. */
4066 	if (strcmp(vc.vc_fstypename, "*") != 0 &&
4067 	    strcmp(vc.vc_fstypename, mp->mnt_vfc->vfc_name) != 0) {
4068 		vfs_rel(mp);
4069 		return (EINVAL);
4070 	}
4071 	VCTLTOREQ(&vc, req);
4072 	error = VFS_SYSCTL(mp, vc.vc_op, req);
4073 	vfs_rel(mp);
4074 	return (error);
4075 }
4076 
4077 SYSCTL_PROC(_vfs, OID_AUTO, ctl, CTLFLAG_WR, NULL, 0, sysctl_vfs_ctl, "",
4078     "Sysctl by fsid");
4079 
4080 /*
4081  * Function to initialize a va_filerev field sensibly.
4082  * XXX: Wouldn't a random number make a lot more sense ??
4083  */
4084 u_quad_t
4085 init_va_filerev(void)
4086 {
4087 	struct bintime bt;
4088 
4089 	getbinuptime(&bt);
4090 	return (((u_quad_t)bt.sec << 32LL) | (bt.frac >> 32LL));
4091 }
4092 
4093 static int	filt_vfsread(struct knote *kn, long hint);
4094 static int	filt_vfswrite(struct knote *kn, long hint);
4095 static int	filt_vfsvnode(struct knote *kn, long hint);
4096 static void	filt_vfsdetach(struct knote *kn);
4097 static struct filterops vfsread_filtops =
4098 	{ 1, NULL, filt_vfsdetach, filt_vfsread };
4099 static struct filterops vfswrite_filtops =
4100 	{ 1, NULL, filt_vfsdetach, filt_vfswrite };
4101 static struct filterops vfsvnode_filtops =
4102 	{ 1, NULL, filt_vfsdetach, filt_vfsvnode };
4103 
4104 static void
4105 vfs_knllock(void *arg)
4106 {
4107 	struct vnode *vp = arg;
4108 
4109 	vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
4110 }
4111 
4112 static void
4113 vfs_knlunlock(void *arg)
4114 {
4115 	struct vnode *vp = arg;
4116 
4117 	VOP_UNLOCK(vp, 0);
4118 }
4119 
4120 static int
4121 vfs_knllocked(void *arg)
4122 {
4123 	struct vnode *vp = arg;
4124 
4125 	return (VOP_ISLOCKED(vp) == LK_EXCLUSIVE);
4126 }
4127 
4128 int
4129 vfs_kqfilter(struct vop_kqfilter_args *ap)
4130 {
4131 	struct vnode *vp = ap->a_vp;
4132 	struct knote *kn = ap->a_kn;
4133 	struct knlist *knl;
4134 
4135 	switch (kn->kn_filter) {
4136 	case EVFILT_READ:
4137 		kn->kn_fop = &vfsread_filtops;
4138 		break;
4139 	case EVFILT_WRITE:
4140 		kn->kn_fop = &vfswrite_filtops;
4141 		break;
4142 	case EVFILT_VNODE:
4143 		kn->kn_fop = &vfsvnode_filtops;
4144 		break;
4145 	default:
4146 		return (EINVAL);
4147 	}
4148 
4149 	kn->kn_hook = (caddr_t)vp;
4150 
4151 	v_addpollinfo(vp);
4152 	if (vp->v_pollinfo == NULL)
4153 		return (ENOMEM);
4154 	knl = &vp->v_pollinfo->vpi_selinfo.si_note;
4155 	knlist_add(knl, kn, 0);
4156 
4157 	return (0);
4158 }
4159 
4160 /*
4161  * Detach knote from vnode
4162  */
4163 static void
4164 filt_vfsdetach(struct knote *kn)
4165 {
4166 	struct vnode *vp = (struct vnode *)kn->kn_hook;
4167 
4168 	KASSERT(vp->v_pollinfo != NULL, ("Missing v_pollinfo"));
4169 	knlist_remove(&vp->v_pollinfo->vpi_selinfo.si_note, kn, 0);
4170 }
4171 
4172 /*ARGSUSED*/
4173 static int
4174 filt_vfsread(struct knote *kn, long hint)
4175 {
4176 	struct vnode *vp = (struct vnode *)kn->kn_hook;
4177 	struct vattr va;
4178 
4179 	/*
4180 	 * filesystem is gone, so set the EOF flag and schedule
4181 	 * the knote for deletion.
4182 	 */
4183 	if (hint == NOTE_REVOKE) {
4184 		kn->kn_flags |= (EV_EOF | EV_ONESHOT);
4185 		return (1);
4186 	}
4187 
4188 	if (VOP_GETATTR(vp, &va, curthread->td_ucred))
4189 		return (0);
4190 
4191 	kn->kn_data = va.va_size - kn->kn_fp->f_offset;
4192 	return (kn->kn_data != 0);
4193 }
4194 
4195 /*ARGSUSED*/
4196 static int
4197 filt_vfswrite(struct knote *kn, long hint)
4198 {
4199 	/*
4200 	 * filesystem is gone, so set the EOF flag and schedule
4201 	 * the knote for deletion.
4202 	 */
4203 	if (hint == NOTE_REVOKE)
4204 		kn->kn_flags |= (EV_EOF | EV_ONESHOT);
4205 
4206 	kn->kn_data = 0;
4207 	return (1);
4208 }
4209 
4210 static int
4211 filt_vfsvnode(struct knote *kn, long hint)
4212 {
4213 	if (kn->kn_sfflags & hint)
4214 		kn->kn_fflags |= hint;
4215 	if (hint == NOTE_REVOKE) {
4216 		kn->kn_flags |= EV_EOF;
4217 		return (1);
4218 	}
4219 	return (kn->kn_fflags != 0);
4220 }
4221 
4222 int
4223 vfs_read_dirent(struct vop_readdir_args *ap, struct dirent *dp, off_t off)
4224 {
4225 	int error;
4226 
4227 	if (dp->d_reclen > ap->a_uio->uio_resid)
4228 		return (ENAMETOOLONG);
4229 	error = uiomove(dp, dp->d_reclen, ap->a_uio);
4230 	if (error) {
4231 		if (ap->a_ncookies != NULL) {
4232 			if (ap->a_cookies != NULL)
4233 				free(ap->a_cookies, M_TEMP);
4234 			ap->a_cookies = NULL;
4235 			*ap->a_ncookies = 0;
4236 		}
4237 		return (error);
4238 	}
4239 	if (ap->a_ncookies == NULL)
4240 		return (0);
4241 
4242 	KASSERT(ap->a_cookies,
4243 	    ("NULL ap->a_cookies value with non-NULL ap->a_ncookies!"));
4244 
4245 	*ap->a_cookies = realloc(*ap->a_cookies,
4246 	    (*ap->a_ncookies + 1) * sizeof(u_long), M_TEMP, M_WAITOK | M_ZERO);
4247 	(*ap->a_cookies)[*ap->a_ncookies] = off;
4248 	return (0);
4249 }
4250 
4251 /*
4252  * Mark for update the access time of the file if the filesystem
4253  * supports VOP_MARKATIME.  This functionality is used by execve and
4254  * mmap, so we want to avoid the I/O implied by directly setting
4255  * va_atime for the sake of efficiency.
4256  */
4257 void
4258 vfs_mark_atime(struct vnode *vp, struct ucred *cred)
4259 {
4260 
4261 	if ((vp->v_mount->mnt_flag & (MNT_NOATIME | MNT_RDONLY)) == 0)
4262 		(void)VOP_MARKATIME(vp);
4263 }
4264