xref: /freebsd/sys/kern/vfs_subr.c (revision 195ebc7e9e4b129de810833791a19dfb4349d6a9)
1 /*-
2  * Copyright (c) 1989, 1993
3  *	The Regents of the University of California.  All rights reserved.
4  * (c) UNIX System Laboratories, Inc.
5  * All or some portions of this file are derived from material licensed
6  * to the University of California by American Telephone and Telegraph
7  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8  * the permission of UNIX System Laboratories, Inc.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 4. Neither the name of the University nor the names of its contributors
19  *    may be used to endorse or promote products derived from this software
20  *    without specific prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  *
34  *	@(#)vfs_subr.c	8.31 (Berkeley) 5/26/95
35  */
36 
37 /*
38  * External virtual filesystem routines
39  */
40 
41 #include <sys/cdefs.h>
42 __FBSDID("$FreeBSD$");
43 
44 #include "opt_ddb.h"
45 #include "opt_mac.h"
46 
47 #include <sys/param.h>
48 #include <sys/systm.h>
49 #include <sys/bio.h>
50 #include <sys/buf.h>
51 #include <sys/condvar.h>
52 #include <sys/conf.h>
53 #include <sys/dirent.h>
54 #include <sys/event.h>
55 #include <sys/eventhandler.h>
56 #include <sys/extattr.h>
57 #include <sys/file.h>
58 #include <sys/fcntl.h>
59 #include <sys/jail.h>
60 #include <sys/kdb.h>
61 #include <sys/kernel.h>
62 #include <sys/kthread.h>
63 #include <sys/lockf.h>
64 #include <sys/malloc.h>
65 #include <sys/mount.h>
66 #include <sys/namei.h>
67 #include <sys/priv.h>
68 #include <sys/reboot.h>
69 #include <sys/sleepqueue.h>
70 #include <sys/stat.h>
71 #include <sys/sysctl.h>
72 #include <sys/syslog.h>
73 #include <sys/vmmeter.h>
74 #include <sys/vnode.h>
75 
76 #include <machine/stdarg.h>
77 
78 #include <security/mac/mac_framework.h>
79 
80 #include <vm/vm.h>
81 #include <vm/vm_object.h>
82 #include <vm/vm_extern.h>
83 #include <vm/pmap.h>
84 #include <vm/vm_map.h>
85 #include <vm/vm_page.h>
86 #include <vm/vm_kern.h>
87 #include <vm/uma.h>
88 
89 #ifdef DDB
90 #include <ddb/ddb.h>
91 #endif
92 
93 #define	WI_MPSAFEQ	0
94 #define	WI_GIANTQ	1
95 
96 static MALLOC_DEFINE(M_NETADDR, "subr_export_host", "Export host address structure");
97 
98 static void	delmntque(struct vnode *vp);
99 static int	flushbuflist(struct bufv *bufv, int flags, struct bufobj *bo,
100 		    int slpflag, int slptimeo);
101 static void	syncer_shutdown(void *arg, int howto);
102 static int	vtryrecycle(struct vnode *vp);
103 static void	vbusy(struct vnode *vp);
104 static void	vinactive(struct vnode *, struct thread *);
105 static void	v_incr_usecount(struct vnode *);
106 static void	v_decr_usecount(struct vnode *);
107 static void	v_decr_useonly(struct vnode *);
108 static void	v_upgrade_usecount(struct vnode *);
109 static void	vfree(struct vnode *);
110 static void	vnlru_free(int);
111 static void	vgonel(struct vnode *);
112 static void	vfs_knllock(void *arg);
113 static void	vfs_knlunlock(void *arg);
114 static int	vfs_knllocked(void *arg);
115 static void	destroy_vpollinfo(struct vpollinfo *vi);
116 
117 /*
118  * Number of vnodes in existence.  Increased whenever getnewvnode()
119  * allocates a new vnode, decreased on vdestroy() called on VI_DOOMed
120  * vnode.
121  */
122 static unsigned long	numvnodes;
123 
124 SYSCTL_LONG(_vfs, OID_AUTO, numvnodes, CTLFLAG_RD, &numvnodes, 0, "");
125 
126 /*
127  * Conversion tables for conversion from vnode types to inode formats
128  * and back.
129  */
130 enum vtype iftovt_tab[16] = {
131 	VNON, VFIFO, VCHR, VNON, VDIR, VNON, VBLK, VNON,
132 	VREG, VNON, VLNK, VNON, VSOCK, VNON, VNON, VBAD,
133 };
134 int vttoif_tab[10] = {
135 	0, S_IFREG, S_IFDIR, S_IFBLK, S_IFCHR, S_IFLNK,
136 	S_IFSOCK, S_IFIFO, S_IFMT, S_IFMT
137 };
138 
139 /*
140  * List of vnodes that are ready for recycling.
141  */
142 static TAILQ_HEAD(freelst, vnode) vnode_free_list;
143 
144 /*
145  * Free vnode target.  Free vnodes may simply be files which have been stat'd
146  * but not read.  This is somewhat common, and a small cache of such files
147  * should be kept to avoid recreation costs.
148  */
149 static u_long wantfreevnodes;
150 SYSCTL_LONG(_vfs, OID_AUTO, wantfreevnodes, CTLFLAG_RW, &wantfreevnodes, 0, "");
151 /* Number of vnodes in the free list. */
152 static u_long freevnodes;
153 SYSCTL_LONG(_vfs, OID_AUTO, freevnodes, CTLFLAG_RD, &freevnodes, 0, "");
154 
155 /*
156  * Various variables used for debugging the new implementation of
157  * reassignbuf().
158  * XXX these are probably of (very) limited utility now.
159  */
160 static int reassignbufcalls;
161 SYSCTL_INT(_vfs, OID_AUTO, reassignbufcalls, CTLFLAG_RW, &reassignbufcalls, 0, "");
162 
163 /*
164  * Cache for the mount type id assigned to NFS.  This is used for
165  * special checks in nfs/nfs_nqlease.c and vm/vnode_pager.c.
166  */
167 int	nfs_mount_type = -1;
168 
169 /* To keep more than one thread at a time from running vfs_getnewfsid */
170 static struct mtx mntid_mtx;
171 
172 /*
173  * Lock for any access to the following:
174  *	vnode_free_list
175  *	numvnodes
176  *	freevnodes
177  */
178 static struct mtx vnode_free_list_mtx;
179 
180 /* Publicly exported FS */
181 struct nfs_public nfs_pub;
182 
183 /* Zone for allocation of new vnodes - used exclusively by getnewvnode() */
184 static uma_zone_t vnode_zone;
185 static uma_zone_t vnodepoll_zone;
186 
187 /* Set to 1 to print out reclaim of active vnodes */
188 int	prtactive;
189 
190 /*
191  * The workitem queue.
192  *
193  * It is useful to delay writes of file data and filesystem metadata
194  * for tens of seconds so that quickly created and deleted files need
195  * not waste disk bandwidth being created and removed. To realize this,
196  * we append vnodes to a "workitem" queue. When running with a soft
197  * updates implementation, most pending metadata dependencies should
198  * not wait for more than a few seconds. Thus, mounted on block devices
199  * are delayed only about a half the time that file data is delayed.
200  * Similarly, directory updates are more critical, so are only delayed
201  * about a third the time that file data is delayed. Thus, there are
202  * SYNCER_MAXDELAY queues that are processed round-robin at a rate of
203  * one each second (driven off the filesystem syncer process). The
204  * syncer_delayno variable indicates the next queue that is to be processed.
205  * Items that need to be processed soon are placed in this queue:
206  *
207  *	syncer_workitem_pending[syncer_delayno]
208  *
209  * A delay of fifteen seconds is done by placing the request fifteen
210  * entries later in the queue:
211  *
212  *	syncer_workitem_pending[(syncer_delayno + 15) & syncer_mask]
213  *
214  */
215 static int syncer_delayno;
216 static long syncer_mask;
217 LIST_HEAD(synclist, bufobj);
218 static struct synclist *syncer_workitem_pending[2];
219 /*
220  * The sync_mtx protects:
221  *	bo->bo_synclist
222  *	sync_vnode_count
223  *	syncer_delayno
224  *	syncer_state
225  *	syncer_workitem_pending
226  *	syncer_worklist_len
227  *	rushjob
228  */
229 static struct mtx sync_mtx;
230 static struct cv sync_wakeup;
231 
232 #define SYNCER_MAXDELAY		32
233 static int syncer_maxdelay = SYNCER_MAXDELAY;	/* maximum delay time */
234 static int syncdelay = 30;		/* max time to delay syncing data */
235 static int filedelay = 30;		/* time to delay syncing files */
236 SYSCTL_INT(_kern, OID_AUTO, filedelay, CTLFLAG_RW, &filedelay, 0, "");
237 static int dirdelay = 29;		/* time to delay syncing directories */
238 SYSCTL_INT(_kern, OID_AUTO, dirdelay, CTLFLAG_RW, &dirdelay, 0, "");
239 static int metadelay = 28;		/* time to delay syncing metadata */
240 SYSCTL_INT(_kern, OID_AUTO, metadelay, CTLFLAG_RW, &metadelay, 0, "");
241 static int rushjob;		/* number of slots to run ASAP */
242 static int stat_rush_requests;	/* number of times I/O speeded up */
243 SYSCTL_INT(_debug, OID_AUTO, rush_requests, CTLFLAG_RW, &stat_rush_requests, 0, "");
244 
245 /*
246  * When shutting down the syncer, run it at four times normal speed.
247  */
248 #define SYNCER_SHUTDOWN_SPEEDUP		4
249 static int sync_vnode_count;
250 static int syncer_worklist_len;
251 static enum { SYNCER_RUNNING, SYNCER_SHUTTING_DOWN, SYNCER_FINAL_DELAY }
252     syncer_state;
253 
254 /*
255  * Number of vnodes we want to exist at any one time.  This is mostly used
256  * to size hash tables in vnode-related code.  It is normally not used in
257  * getnewvnode(), as wantfreevnodes is normally nonzero.)
258  *
259  * XXX desiredvnodes is historical cruft and should not exist.
260  */
261 int desiredvnodes;
262 SYSCTL_INT(_kern, KERN_MAXVNODES, maxvnodes, CTLFLAG_RW,
263     &desiredvnodes, 0, "Maximum number of vnodes");
264 SYSCTL_INT(_kern, OID_AUTO, minvnodes, CTLFLAG_RW,
265     &wantfreevnodes, 0, "Minimum number of vnodes (legacy)");
266 static int vnlru_nowhere;
267 SYSCTL_INT(_debug, OID_AUTO, vnlru_nowhere, CTLFLAG_RW,
268     &vnlru_nowhere, 0, "Number of times the vnlru process ran without success");
269 
270 /*
271  * Macros to control when a vnode is freed and recycled.  All require
272  * the vnode interlock.
273  */
274 #define VCANRECYCLE(vp) (((vp)->v_iflag & VI_FREE) && !(vp)->v_holdcnt)
275 #define VSHOULDFREE(vp) (!((vp)->v_iflag & VI_FREE) && !(vp)->v_holdcnt)
276 #define VSHOULDBUSY(vp) (((vp)->v_iflag & VI_FREE) && (vp)->v_holdcnt)
277 
278 
279 /*
280  * Initialize the vnode management data structures.
281  */
282 #ifndef	MAXVNODES_MAX
283 #define	MAXVNODES_MAX	100000
284 #endif
285 static void
286 vntblinit(void *dummy __unused)
287 {
288 
289 	/*
290 	 * Desiredvnodes is a function of the physical memory size and
291 	 * the kernel's heap size.  Specifically, desiredvnodes scales
292 	 * in proportion to the physical memory size until two fifths
293 	 * of the kernel's heap size is consumed by vnodes and vm
294 	 * objects.
295 	 */
296 	desiredvnodes = min(maxproc + cnt.v_page_count / 4, 2 * vm_kmem_size /
297 	    (5 * (sizeof(struct vm_object) + sizeof(struct vnode))));
298 	if (desiredvnodes > MAXVNODES_MAX) {
299 		if (bootverbose)
300 			printf("Reducing kern.maxvnodes %d -> %d\n",
301 			    desiredvnodes, MAXVNODES_MAX);
302 		desiredvnodes = MAXVNODES_MAX;
303 	}
304 	wantfreevnodes = desiredvnodes / 4;
305 	mtx_init(&mntid_mtx, "mntid", NULL, MTX_DEF);
306 	TAILQ_INIT(&vnode_free_list);
307 	mtx_init(&vnode_free_list_mtx, "vnode_free_list", NULL, MTX_DEF);
308 	vnode_zone = uma_zcreate("VNODE", sizeof (struct vnode), NULL, NULL,
309 	    NULL, NULL, UMA_ALIGN_PTR, 0);
310 	vnodepoll_zone = uma_zcreate("VNODEPOLL", sizeof (struct vpollinfo),
311 	    NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
312 	/*
313 	 * Initialize the filesystem syncer.
314 	 */
315 	syncer_workitem_pending[WI_MPSAFEQ] = hashinit(syncer_maxdelay, M_VNODE,
316 	    &syncer_mask);
317 	syncer_workitem_pending[WI_GIANTQ] = hashinit(syncer_maxdelay, M_VNODE,
318 	    &syncer_mask);
319 	syncer_maxdelay = syncer_mask + 1;
320 	mtx_init(&sync_mtx, "Syncer mtx", NULL, MTX_DEF);
321 	cv_init(&sync_wakeup, "syncer");
322 }
323 SYSINIT(vfs, SI_SUB_VFS, SI_ORDER_FIRST, vntblinit, NULL);
324 
325 
326 /*
327  * Mark a mount point as busy. Used to synchronize access and to delay
328  * unmounting. Eventually, mountlist_mtx is not released on failure.
329  */
330 int
331 vfs_busy(struct mount *mp, int flags)
332 {
333 
334 	MPASS((flags & ~MBF_MASK) == 0);
335 	CTR3(KTR_VFS, "%s: mp %p with flags %d", __func__, mp, flags);
336 
337 	MNT_ILOCK(mp);
338 	MNT_REF(mp);
339 	/*
340 	 * If mount point is currenly being unmounted, sleep until the
341 	 * mount point fate is decided.  If thread doing the unmounting fails,
342 	 * it will clear MNTK_UNMOUNT flag before waking us up, indicating
343 	 * that this mount point has survived the unmount attempt and vfs_busy
344 	 * should retry.  Otherwise the unmounter thread will set MNTK_REFEXPIRE
345 	 * flag in addition to MNTK_UNMOUNT, indicating that mount point is
346 	 * about to be really destroyed.  vfs_busy needs to release its
347 	 * reference on the mount point in this case and return with ENOENT,
348 	 * telling the caller that mount mount it tried to busy is no longer
349 	 * valid.
350 	 */
351 	while (mp->mnt_kern_flag & MNTK_UNMOUNT) {
352 		if (flags & MBF_NOWAIT || mp->mnt_kern_flag & MNTK_REFEXPIRE) {
353 			MNT_REL(mp);
354 			MNT_IUNLOCK(mp);
355 			CTR1(KTR_VFS, "%s: failed busying before sleeping",
356 			    __func__);
357 			return (ENOENT);
358 		}
359 		if (flags & MBF_MNTLSTLOCK)
360 			mtx_unlock(&mountlist_mtx);
361 		mp->mnt_kern_flag |= MNTK_MWAIT;
362 		msleep(mp, MNT_MTX(mp), PVFS, "vfs_busy", 0);
363 		if (flags & MBF_MNTLSTLOCK)
364 			mtx_lock(&mountlist_mtx);
365 	}
366 	if (flags & MBF_MNTLSTLOCK)
367 		mtx_unlock(&mountlist_mtx);
368 	mp->mnt_lockref++;
369 	MNT_IUNLOCK(mp);
370 	return (0);
371 }
372 
373 /*
374  * Free a busy filesystem.
375  */
376 void
377 vfs_unbusy(struct mount *mp)
378 {
379 
380 	CTR2(KTR_VFS, "%s: mp %p", __func__, mp);
381 	MNT_ILOCK(mp);
382 	MNT_REL(mp);
383 	KASSERT(mp->mnt_lockref > 0, ("negative mnt_lockref"));
384 	mp->mnt_lockref--;
385 	if (mp->mnt_lockref == 0 && (mp->mnt_kern_flag & MNTK_DRAINING) != 0) {
386 		MPASS(mp->mnt_kern_flag & MNTK_UNMOUNT);
387 		CTR1(KTR_VFS, "%s: waking up waiters", __func__);
388 		mp->mnt_kern_flag &= ~MNTK_DRAINING;
389 		wakeup(&mp->mnt_lockref);
390 	}
391 	MNT_IUNLOCK(mp);
392 }
393 
394 /*
395  * Lookup a mount point by filesystem identifier.
396  */
397 struct mount *
398 vfs_getvfs(fsid_t *fsid)
399 {
400 	struct mount *mp;
401 
402 	CTR2(KTR_VFS, "%s: fsid %p", __func__, fsid);
403 	mtx_lock(&mountlist_mtx);
404 	TAILQ_FOREACH(mp, &mountlist, mnt_list) {
405 		if (mp->mnt_stat.f_fsid.val[0] == fsid->val[0] &&
406 		    mp->mnt_stat.f_fsid.val[1] == fsid->val[1]) {
407 			vfs_ref(mp);
408 			mtx_unlock(&mountlist_mtx);
409 			return (mp);
410 		}
411 	}
412 	mtx_unlock(&mountlist_mtx);
413 	CTR2(KTR_VFS, "%s: lookup failed for %p id", __func__, fsid);
414 	return ((struct mount *) 0);
415 }
416 
417 /*
418  * Lookup a mount point by filesystem identifier, busying it before
419  * returning.
420  */
421 struct mount *
422 vfs_busyfs(fsid_t *fsid)
423 {
424 	struct mount *mp;
425 	int error;
426 
427 	CTR2(KTR_VFS, "%s: fsid %p", __func__, fsid);
428 	mtx_lock(&mountlist_mtx);
429 	TAILQ_FOREACH(mp, &mountlist, mnt_list) {
430 		if (mp->mnt_stat.f_fsid.val[0] == fsid->val[0] &&
431 		    mp->mnt_stat.f_fsid.val[1] == fsid->val[1]) {
432 			error = vfs_busy(mp, MBF_MNTLSTLOCK);
433 			if (error) {
434 				mtx_unlock(&mountlist_mtx);
435 				return (NULL);
436 			}
437 			return (mp);
438 		}
439 	}
440 	CTR2(KTR_VFS, "%s: lookup failed for %p id", __func__, fsid);
441 	mtx_unlock(&mountlist_mtx);
442 	return ((struct mount *) 0);
443 }
444 
445 /*
446  * Check if a user can access privileged mount options.
447  */
448 int
449 vfs_suser(struct mount *mp, struct thread *td)
450 {
451 	int error;
452 
453 	/*
454 	 * If the thread is jailed, but this is not a jail-friendly file
455 	 * system, deny immediately.
456 	 */
457 	if (!(mp->mnt_vfc->vfc_flags & VFCF_JAIL) && jailed(td->td_ucred))
458 		return (EPERM);
459 
460 	/*
461 	 * If the file system was mounted outside the jail of the calling
462 	 * thread, deny immediately.
463 	 */
464 	if (mp->mnt_cred->cr_prison != td->td_ucred->cr_prison &&
465 	    !prison_ischild(td->td_ucred->cr_prison, mp->mnt_cred->cr_prison))
466 		return (EPERM);
467 
468 	/*
469 	 * If file system supports delegated administration, we don't check
470 	 * for the PRIV_VFS_MOUNT_OWNER privilege - it will be better verified
471 	 * by the file system itself.
472 	 * If this is not the user that did original mount, we check for
473 	 * the PRIV_VFS_MOUNT_OWNER privilege.
474 	 */
475 	if (!(mp->mnt_vfc->vfc_flags & VFCF_DELEGADMIN) &&
476 	    mp->mnt_cred->cr_uid != td->td_ucred->cr_uid) {
477 		if ((error = priv_check(td, PRIV_VFS_MOUNT_OWNER)) != 0)
478 			return (error);
479 	}
480 	return (0);
481 }
482 
483 /*
484  * Get a new unique fsid.  Try to make its val[0] unique, since this value
485  * will be used to create fake device numbers for stat().  Also try (but
486  * not so hard) make its val[0] unique mod 2^16, since some emulators only
487  * support 16-bit device numbers.  We end up with unique val[0]'s for the
488  * first 2^16 calls and unique val[0]'s mod 2^16 for the first 2^8 calls.
489  *
490  * Keep in mind that several mounts may be running in parallel.  Starting
491  * the search one past where the previous search terminated is both a
492  * micro-optimization and a defense against returning the same fsid to
493  * different mounts.
494  */
495 void
496 vfs_getnewfsid(struct mount *mp)
497 {
498 	static u_int16_t mntid_base;
499 	struct mount *nmp;
500 	fsid_t tfsid;
501 	int mtype;
502 
503 	CTR2(KTR_VFS, "%s: mp %p", __func__, mp);
504 	mtx_lock(&mntid_mtx);
505 	mtype = mp->mnt_vfc->vfc_typenum;
506 	tfsid.val[1] = mtype;
507 	mtype = (mtype & 0xFF) << 24;
508 	for (;;) {
509 		tfsid.val[0] = makedev(255,
510 		    mtype | ((mntid_base & 0xFF00) << 8) | (mntid_base & 0xFF));
511 		mntid_base++;
512 		if ((nmp = vfs_getvfs(&tfsid)) == NULL)
513 			break;
514 		vfs_rel(nmp);
515 	}
516 	mp->mnt_stat.f_fsid.val[0] = tfsid.val[0];
517 	mp->mnt_stat.f_fsid.val[1] = tfsid.val[1];
518 	mtx_unlock(&mntid_mtx);
519 }
520 
521 /*
522  * Knob to control the precision of file timestamps:
523  *
524  *   0 = seconds only; nanoseconds zeroed.
525  *   1 = seconds and nanoseconds, accurate within 1/HZ.
526  *   2 = seconds and nanoseconds, truncated to microseconds.
527  * >=3 = seconds and nanoseconds, maximum precision.
528  */
529 enum { TSP_SEC, TSP_HZ, TSP_USEC, TSP_NSEC };
530 
531 static int timestamp_precision = TSP_SEC;
532 SYSCTL_INT(_vfs, OID_AUTO, timestamp_precision, CTLFLAG_RW,
533     &timestamp_precision, 0, "");
534 
535 /*
536  * Get a current timestamp.
537  */
538 void
539 vfs_timestamp(struct timespec *tsp)
540 {
541 	struct timeval tv;
542 
543 	switch (timestamp_precision) {
544 	case TSP_SEC:
545 		tsp->tv_sec = time_second;
546 		tsp->tv_nsec = 0;
547 		break;
548 	case TSP_HZ:
549 		getnanotime(tsp);
550 		break;
551 	case TSP_USEC:
552 		microtime(&tv);
553 		TIMEVAL_TO_TIMESPEC(&tv, tsp);
554 		break;
555 	case TSP_NSEC:
556 	default:
557 		nanotime(tsp);
558 		break;
559 	}
560 }
561 
562 /*
563  * Set vnode attributes to VNOVAL
564  */
565 void
566 vattr_null(struct vattr *vap)
567 {
568 
569 	vap->va_type = VNON;
570 	vap->va_size = VNOVAL;
571 	vap->va_bytes = VNOVAL;
572 	vap->va_mode = VNOVAL;
573 	vap->va_nlink = VNOVAL;
574 	vap->va_uid = VNOVAL;
575 	vap->va_gid = VNOVAL;
576 	vap->va_fsid = VNOVAL;
577 	vap->va_fileid = VNOVAL;
578 	vap->va_blocksize = VNOVAL;
579 	vap->va_rdev = VNOVAL;
580 	vap->va_atime.tv_sec = VNOVAL;
581 	vap->va_atime.tv_nsec = VNOVAL;
582 	vap->va_mtime.tv_sec = VNOVAL;
583 	vap->va_mtime.tv_nsec = VNOVAL;
584 	vap->va_ctime.tv_sec = VNOVAL;
585 	vap->va_ctime.tv_nsec = VNOVAL;
586 	vap->va_birthtime.tv_sec = VNOVAL;
587 	vap->va_birthtime.tv_nsec = VNOVAL;
588 	vap->va_flags = VNOVAL;
589 	vap->va_gen = VNOVAL;
590 	vap->va_vaflags = 0;
591 }
592 
593 /*
594  * This routine is called when we have too many vnodes.  It attempts
595  * to free <count> vnodes and will potentially free vnodes that still
596  * have VM backing store (VM backing store is typically the cause
597  * of a vnode blowout so we want to do this).  Therefore, this operation
598  * is not considered cheap.
599  *
600  * A number of conditions may prevent a vnode from being reclaimed.
601  * the buffer cache may have references on the vnode, a directory
602  * vnode may still have references due to the namei cache representing
603  * underlying files, or the vnode may be in active use.   It is not
604  * desireable to reuse such vnodes.  These conditions may cause the
605  * number of vnodes to reach some minimum value regardless of what
606  * you set kern.maxvnodes to.  Do not set kern.maxvnodes too low.
607  */
608 static int
609 vlrureclaim(struct mount *mp)
610 {
611 	struct vnode *vp;
612 	int done;
613 	int trigger;
614 	int usevnodes;
615 	int count;
616 
617 	/*
618 	 * Calculate the trigger point, don't allow user
619 	 * screwups to blow us up.   This prevents us from
620 	 * recycling vnodes with lots of resident pages.  We
621 	 * aren't trying to free memory, we are trying to
622 	 * free vnodes.
623 	 */
624 	usevnodes = desiredvnodes;
625 	if (usevnodes <= 0)
626 		usevnodes = 1;
627 	trigger = cnt.v_page_count * 2 / usevnodes;
628 	done = 0;
629 	vn_start_write(NULL, &mp, V_WAIT);
630 	MNT_ILOCK(mp);
631 	count = mp->mnt_nvnodelistsize / 10 + 1;
632 	while (count != 0) {
633 		vp = TAILQ_FIRST(&mp->mnt_nvnodelist);
634 		while (vp != NULL && vp->v_type == VMARKER)
635 			vp = TAILQ_NEXT(vp, v_nmntvnodes);
636 		if (vp == NULL)
637 			break;
638 		TAILQ_REMOVE(&mp->mnt_nvnodelist, vp, v_nmntvnodes);
639 		TAILQ_INSERT_TAIL(&mp->mnt_nvnodelist, vp, v_nmntvnodes);
640 		--count;
641 		if (!VI_TRYLOCK(vp))
642 			goto next_iter;
643 		/*
644 		 * If it's been deconstructed already, it's still
645 		 * referenced, or it exceeds the trigger, skip it.
646 		 */
647 		if (vp->v_usecount || !LIST_EMPTY(&(vp)->v_cache_src) ||
648 		    (vp->v_iflag & VI_DOOMED) != 0 || (vp->v_object != NULL &&
649 		    vp->v_object->resident_page_count > trigger)) {
650 			VI_UNLOCK(vp);
651 			goto next_iter;
652 		}
653 		MNT_IUNLOCK(mp);
654 		vholdl(vp);
655 		if (VOP_LOCK(vp, LK_INTERLOCK|LK_EXCLUSIVE|LK_NOWAIT)) {
656 			vdrop(vp);
657 			goto next_iter_mntunlocked;
658 		}
659 		VI_LOCK(vp);
660 		/*
661 		 * v_usecount may have been bumped after VOP_LOCK() dropped
662 		 * the vnode interlock and before it was locked again.
663 		 *
664 		 * It is not necessary to recheck VI_DOOMED because it can
665 		 * only be set by another thread that holds both the vnode
666 		 * lock and vnode interlock.  If another thread has the
667 		 * vnode lock before we get to VOP_LOCK() and obtains the
668 		 * vnode interlock after VOP_LOCK() drops the vnode
669 		 * interlock, the other thread will be unable to drop the
670 		 * vnode lock before our VOP_LOCK() call fails.
671 		 */
672 		if (vp->v_usecount || !LIST_EMPTY(&(vp)->v_cache_src) ||
673 		    (vp->v_object != NULL &&
674 		    vp->v_object->resident_page_count > trigger)) {
675 			VOP_UNLOCK(vp, LK_INTERLOCK);
676 			goto next_iter_mntunlocked;
677 		}
678 		KASSERT((vp->v_iflag & VI_DOOMED) == 0,
679 		    ("VI_DOOMED unexpectedly detected in vlrureclaim()"));
680 		vgonel(vp);
681 		VOP_UNLOCK(vp, 0);
682 		vdropl(vp);
683 		done++;
684 next_iter_mntunlocked:
685 		if ((count % 256) != 0)
686 			goto relock_mnt;
687 		goto yield;
688 next_iter:
689 		if ((count % 256) != 0)
690 			continue;
691 		MNT_IUNLOCK(mp);
692 yield:
693 		uio_yield();
694 relock_mnt:
695 		MNT_ILOCK(mp);
696 	}
697 	MNT_IUNLOCK(mp);
698 	vn_finished_write(mp);
699 	return done;
700 }
701 
702 /*
703  * Attempt to keep the free list at wantfreevnodes length.
704  */
705 static void
706 vnlru_free(int count)
707 {
708 	struct vnode *vp;
709 	int vfslocked;
710 
711 	mtx_assert(&vnode_free_list_mtx, MA_OWNED);
712 	for (; count > 0; count--) {
713 		vp = TAILQ_FIRST(&vnode_free_list);
714 		/*
715 		 * The list can be modified while the free_list_mtx
716 		 * has been dropped and vp could be NULL here.
717 		 */
718 		if (!vp)
719 			break;
720 		VNASSERT(vp->v_op != NULL, vp,
721 		    ("vnlru_free: vnode already reclaimed."));
722 		TAILQ_REMOVE(&vnode_free_list, vp, v_freelist);
723 		/*
724 		 * Don't recycle if we can't get the interlock.
725 		 */
726 		if (!VI_TRYLOCK(vp)) {
727 			TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist);
728 			continue;
729 		}
730 		VNASSERT(VCANRECYCLE(vp), vp,
731 		    ("vp inconsistent on freelist"));
732 		freevnodes--;
733 		vp->v_iflag &= ~VI_FREE;
734 		vholdl(vp);
735 		mtx_unlock(&vnode_free_list_mtx);
736 		VI_UNLOCK(vp);
737 		vfslocked = VFS_LOCK_GIANT(vp->v_mount);
738 		vtryrecycle(vp);
739 		VFS_UNLOCK_GIANT(vfslocked);
740 		/*
741 		 * If the recycled succeeded this vdrop will actually free
742 		 * the vnode.  If not it will simply place it back on
743 		 * the free list.
744 		 */
745 		vdrop(vp);
746 		mtx_lock(&vnode_free_list_mtx);
747 	}
748 }
749 /*
750  * Attempt to recycle vnodes in a context that is always safe to block.
751  * Calling vlrurecycle() from the bowels of filesystem code has some
752  * interesting deadlock problems.
753  */
754 static struct proc *vnlruproc;
755 static int vnlruproc_sig;
756 
757 static void
758 vnlru_proc(void)
759 {
760 	struct mount *mp, *nmp;
761 	int done, vfslocked;
762 	struct proc *p = vnlruproc;
763 
764 	EVENTHANDLER_REGISTER(shutdown_pre_sync, kproc_shutdown, p,
765 	    SHUTDOWN_PRI_FIRST);
766 
767 	for (;;) {
768 		kproc_suspend_check(p);
769 		mtx_lock(&vnode_free_list_mtx);
770 		if (freevnodes > wantfreevnodes)
771 			vnlru_free(freevnodes - wantfreevnodes);
772 		if (numvnodes <= desiredvnodes * 9 / 10) {
773 			vnlruproc_sig = 0;
774 			wakeup(&vnlruproc_sig);
775 			msleep(vnlruproc, &vnode_free_list_mtx,
776 			    PVFS|PDROP, "vlruwt", hz);
777 			continue;
778 		}
779 		mtx_unlock(&vnode_free_list_mtx);
780 		done = 0;
781 		mtx_lock(&mountlist_mtx);
782 		for (mp = TAILQ_FIRST(&mountlist); mp != NULL; mp = nmp) {
783 			if (vfs_busy(mp, MBF_NOWAIT | MBF_MNTLSTLOCK)) {
784 				nmp = TAILQ_NEXT(mp, mnt_list);
785 				continue;
786 			}
787 			vfslocked = VFS_LOCK_GIANT(mp);
788 			done += vlrureclaim(mp);
789 			VFS_UNLOCK_GIANT(vfslocked);
790 			mtx_lock(&mountlist_mtx);
791 			nmp = TAILQ_NEXT(mp, mnt_list);
792 			vfs_unbusy(mp);
793 		}
794 		mtx_unlock(&mountlist_mtx);
795 		if (done == 0) {
796 			EVENTHANDLER_INVOKE(vfs_lowvnodes, desiredvnodes / 10);
797 #if 0
798 			/* These messages are temporary debugging aids */
799 			if (vnlru_nowhere < 5)
800 				printf("vnlru process getting nowhere..\n");
801 			else if (vnlru_nowhere == 5)
802 				printf("vnlru process messages stopped.\n");
803 #endif
804 			vnlru_nowhere++;
805 			tsleep(vnlruproc, PPAUSE, "vlrup", hz * 3);
806 		} else
807 			uio_yield();
808 	}
809 }
810 
811 static struct kproc_desc vnlru_kp = {
812 	"vnlru",
813 	vnlru_proc,
814 	&vnlruproc
815 };
816 SYSINIT(vnlru, SI_SUB_KTHREAD_UPDATE, SI_ORDER_FIRST, kproc_start,
817     &vnlru_kp);
818 
819 /*
820  * Routines having to do with the management of the vnode table.
821  */
822 
823 void
824 vdestroy(struct vnode *vp)
825 {
826 	struct bufobj *bo;
827 
828 	CTR2(KTR_VFS, "%s: vp %p", __func__, vp);
829 	mtx_lock(&vnode_free_list_mtx);
830 	numvnodes--;
831 	mtx_unlock(&vnode_free_list_mtx);
832 	bo = &vp->v_bufobj;
833 	VNASSERT((vp->v_iflag & VI_FREE) == 0, vp,
834 	    ("cleaned vnode still on the free list."));
835 	VNASSERT(vp->v_data == NULL, vp, ("cleaned vnode isn't"));
836 	VNASSERT(vp->v_holdcnt == 0, vp, ("Non-zero hold count"));
837 	VNASSERT(vp->v_usecount == 0, vp, ("Non-zero use count"));
838 	VNASSERT(vp->v_writecount == 0, vp, ("Non-zero write count"));
839 	VNASSERT(bo->bo_numoutput == 0, vp, ("Clean vnode has pending I/O's"));
840 	VNASSERT(bo->bo_clean.bv_cnt == 0, vp, ("cleanbufcnt not 0"));
841 	VNASSERT(bo->bo_clean.bv_root == NULL, vp, ("cleanblkroot not NULL"));
842 	VNASSERT(bo->bo_dirty.bv_cnt == 0, vp, ("dirtybufcnt not 0"));
843 	VNASSERT(bo->bo_dirty.bv_root == NULL, vp, ("dirtyblkroot not NULL"));
844 	VNASSERT(TAILQ_EMPTY(&vp->v_cache_dst), vp, ("vp has namecache dst"));
845 	VNASSERT(LIST_EMPTY(&vp->v_cache_src), vp, ("vp has namecache src"));
846 	VNASSERT(vp->v_cache_dd == NULL, vp, ("vp has namecache for .."));
847 	VI_UNLOCK(vp);
848 #ifdef MAC
849 	mac_vnode_destroy(vp);
850 #endif
851 	if (vp->v_pollinfo != NULL)
852 		destroy_vpollinfo(vp->v_pollinfo);
853 #ifdef INVARIANTS
854 	/* XXX Elsewhere we can detect an already freed vnode via NULL v_op. */
855 	vp->v_op = NULL;
856 #endif
857 	lockdestroy(vp->v_vnlock);
858 	mtx_destroy(&vp->v_interlock);
859 	mtx_destroy(BO_MTX(bo));
860 	uma_zfree(vnode_zone, vp);
861 }
862 
863 /*
864  * Try to recycle a freed vnode.  We abort if anyone picks up a reference
865  * before we actually vgone().  This function must be called with the vnode
866  * held to prevent the vnode from being returned to the free list midway
867  * through vgone().
868  */
869 static int
870 vtryrecycle(struct vnode *vp)
871 {
872 	struct mount *vnmp;
873 
874 	CTR2(KTR_VFS, "%s: vp %p", __func__, vp);
875 	VNASSERT(vp->v_holdcnt, vp,
876 	    ("vtryrecycle: Recycling vp %p without a reference.", vp));
877 	/*
878 	 * This vnode may found and locked via some other list, if so we
879 	 * can't recycle it yet.
880 	 */
881 	if (VOP_LOCK(vp, LK_EXCLUSIVE | LK_NOWAIT) != 0) {
882 		CTR2(KTR_VFS,
883 		    "%s: impossible to recycle, vp %p lock is already held",
884 		    __func__, vp);
885 		return (EWOULDBLOCK);
886 	}
887 	/*
888 	 * Don't recycle if its filesystem is being suspended.
889 	 */
890 	if (vn_start_write(vp, &vnmp, V_NOWAIT) != 0) {
891 		VOP_UNLOCK(vp, 0);
892 		CTR2(KTR_VFS,
893 		    "%s: impossible to recycle, cannot start the write for %p",
894 		    __func__, vp);
895 		return (EBUSY);
896 	}
897 	/*
898 	 * If we got this far, we need to acquire the interlock and see if
899 	 * anyone picked up this vnode from another list.  If not, we will
900 	 * mark it with DOOMED via vgonel() so that anyone who does find it
901 	 * will skip over it.
902 	 */
903 	VI_LOCK(vp);
904 	if (vp->v_usecount) {
905 		VOP_UNLOCK(vp, LK_INTERLOCK);
906 		vn_finished_write(vnmp);
907 		CTR2(KTR_VFS,
908 		    "%s: impossible to recycle, %p is already referenced",
909 		    __func__, vp);
910 		return (EBUSY);
911 	}
912 	if ((vp->v_iflag & VI_DOOMED) == 0)
913 		vgonel(vp);
914 	VOP_UNLOCK(vp, LK_INTERLOCK);
915 	vn_finished_write(vnmp);
916 	return (0);
917 }
918 
919 /*
920  * Return the next vnode from the free list.
921  */
922 int
923 getnewvnode(const char *tag, struct mount *mp, struct vop_vector *vops,
924     struct vnode **vpp)
925 {
926 	struct vnode *vp = NULL;
927 	struct bufobj *bo;
928 
929 	CTR3(KTR_VFS, "%s: mp %p with tag %s", __func__, mp, tag);
930 	mtx_lock(&vnode_free_list_mtx);
931 	/*
932 	 * Lend our context to reclaim vnodes if they've exceeded the max.
933 	 */
934 	if (freevnodes > wantfreevnodes)
935 		vnlru_free(1);
936 	/*
937 	 * Wait for available vnodes.
938 	 */
939 	if (numvnodes > desiredvnodes) {
940 		if (mp != NULL && (mp->mnt_kern_flag & MNTK_SUSPEND)) {
941 			/*
942 			 * File system is beeing suspended, we cannot risk a
943 			 * deadlock here, so allocate new vnode anyway.
944 			 */
945 			if (freevnodes > wantfreevnodes)
946 				vnlru_free(freevnodes - wantfreevnodes);
947 			goto alloc;
948 		}
949 		if (vnlruproc_sig == 0) {
950 			vnlruproc_sig = 1;	/* avoid unnecessary wakeups */
951 			wakeup(vnlruproc);
952 		}
953 		msleep(&vnlruproc_sig, &vnode_free_list_mtx, PVFS,
954 		    "vlruwk", hz);
955 #if 0	/* XXX Not all VFS_VGET/ffs_vget callers check returns. */
956 		if (numvnodes > desiredvnodes) {
957 			mtx_unlock(&vnode_free_list_mtx);
958 			return (ENFILE);
959 		}
960 #endif
961 	}
962 alloc:
963 	numvnodes++;
964 	mtx_unlock(&vnode_free_list_mtx);
965 	vp = (struct vnode *) uma_zalloc(vnode_zone, M_WAITOK|M_ZERO);
966 	/*
967 	 * Setup locks.
968 	 */
969 	vp->v_vnlock = &vp->v_lock;
970 	mtx_init(&vp->v_interlock, "vnode interlock", NULL, MTX_DEF);
971 	/*
972 	 * By default, don't allow shared locks unless filesystems
973 	 * opt-in.
974 	 */
975 	lockinit(vp->v_vnlock, PVFS, tag, VLKTIMEOUT, LK_NOSHARE);
976 	/*
977 	 * Initialize bufobj.
978 	 */
979 	bo = &vp->v_bufobj;
980 	bo->__bo_vnode = vp;
981 	mtx_init(BO_MTX(bo), "bufobj interlock", NULL, MTX_DEF);
982 	bo->bo_ops = &buf_ops_bio;
983 	bo->bo_private = vp;
984 	TAILQ_INIT(&bo->bo_clean.bv_hd);
985 	TAILQ_INIT(&bo->bo_dirty.bv_hd);
986 	/*
987 	 * Initialize namecache.
988 	 */
989 	LIST_INIT(&vp->v_cache_src);
990 	TAILQ_INIT(&vp->v_cache_dst);
991 	/*
992 	 * Finalize various vnode identity bits.
993 	 */
994 	vp->v_type = VNON;
995 	vp->v_tag = tag;
996 	vp->v_op = vops;
997 	v_incr_usecount(vp);
998 	vp->v_data = 0;
999 #ifdef MAC
1000 	mac_vnode_init(vp);
1001 	if (mp != NULL && (mp->mnt_flag & MNT_MULTILABEL) == 0)
1002 		mac_vnode_associate_singlelabel(mp, vp);
1003 	else if (mp == NULL && vops != &dead_vnodeops)
1004 		printf("NULL mp in getnewvnode()\n");
1005 #endif
1006 	if (mp != NULL) {
1007 		bo->bo_bsize = mp->mnt_stat.f_iosize;
1008 		if ((mp->mnt_kern_flag & MNTK_NOKNOTE) != 0)
1009 			vp->v_vflag |= VV_NOKNOTE;
1010 	}
1011 
1012 	*vpp = vp;
1013 	return (0);
1014 }
1015 
1016 /*
1017  * Delete from old mount point vnode list, if on one.
1018  */
1019 static void
1020 delmntque(struct vnode *vp)
1021 {
1022 	struct mount *mp;
1023 
1024 	mp = vp->v_mount;
1025 	if (mp == NULL)
1026 		return;
1027 	MNT_ILOCK(mp);
1028 	vp->v_mount = NULL;
1029 	VNASSERT(mp->mnt_nvnodelistsize > 0, vp,
1030 		("bad mount point vnode list size"));
1031 	TAILQ_REMOVE(&mp->mnt_nvnodelist, vp, v_nmntvnodes);
1032 	mp->mnt_nvnodelistsize--;
1033 	MNT_REL(mp);
1034 	MNT_IUNLOCK(mp);
1035 }
1036 
1037 static void
1038 insmntque_stddtr(struct vnode *vp, void *dtr_arg)
1039 {
1040 
1041 	vp->v_data = NULL;
1042 	vp->v_op = &dead_vnodeops;
1043 	/* XXX non mp-safe fs may still call insmntque with vnode
1044 	   unlocked */
1045 	if (!VOP_ISLOCKED(vp))
1046 		vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
1047 	vgone(vp);
1048 	vput(vp);
1049 }
1050 
1051 /*
1052  * Insert into list of vnodes for the new mount point, if available.
1053  */
1054 int
1055 insmntque1(struct vnode *vp, struct mount *mp,
1056 	void (*dtr)(struct vnode *, void *), void *dtr_arg)
1057 {
1058 	int locked;
1059 
1060 	KASSERT(vp->v_mount == NULL,
1061 		("insmntque: vnode already on per mount vnode list"));
1062 	VNASSERT(mp != NULL, vp, ("Don't call insmntque(foo, NULL)"));
1063 #ifdef DEBUG_VFS_LOCKS
1064 	if (!VFS_NEEDSGIANT(mp))
1065 		ASSERT_VOP_ELOCKED(vp,
1066 		    "insmntque: mp-safe fs and non-locked vp");
1067 #endif
1068 	MNT_ILOCK(mp);
1069 	if ((mp->mnt_kern_flag & MNTK_NOINSMNTQ) != 0 &&
1070 	    ((mp->mnt_kern_flag & MNTK_UNMOUNTF) != 0 ||
1071 	     mp->mnt_nvnodelistsize == 0)) {
1072 		locked = VOP_ISLOCKED(vp);
1073 		if (!locked || (locked == LK_EXCLUSIVE &&
1074 		     (vp->v_vflag & VV_FORCEINSMQ) == 0)) {
1075 			MNT_IUNLOCK(mp);
1076 			if (dtr != NULL)
1077 				dtr(vp, dtr_arg);
1078 			return (EBUSY);
1079 		}
1080 	}
1081 	vp->v_mount = mp;
1082 	MNT_REF(mp);
1083 	TAILQ_INSERT_TAIL(&mp->mnt_nvnodelist, vp, v_nmntvnodes);
1084 	VNASSERT(mp->mnt_nvnodelistsize >= 0, vp,
1085 		("neg mount point vnode list size"));
1086 	mp->mnt_nvnodelistsize++;
1087 	MNT_IUNLOCK(mp);
1088 	return (0);
1089 }
1090 
1091 int
1092 insmntque(struct vnode *vp, struct mount *mp)
1093 {
1094 
1095 	return (insmntque1(vp, mp, insmntque_stddtr, NULL));
1096 }
1097 
1098 /*
1099  * Flush out and invalidate all buffers associated with a bufobj
1100  * Called with the underlying object locked.
1101  */
1102 int
1103 bufobj_invalbuf(struct bufobj *bo, int flags, int slpflag, int slptimeo)
1104 {
1105 	int error;
1106 
1107 	BO_LOCK(bo);
1108 	if (flags & V_SAVE) {
1109 		error = bufobj_wwait(bo, slpflag, slptimeo);
1110 		if (error) {
1111 			BO_UNLOCK(bo);
1112 			return (error);
1113 		}
1114 		if (bo->bo_dirty.bv_cnt > 0) {
1115 			BO_UNLOCK(bo);
1116 			if ((error = BO_SYNC(bo, MNT_WAIT)) != 0)
1117 				return (error);
1118 			/*
1119 			 * XXX We could save a lock/unlock if this was only
1120 			 * enabled under INVARIANTS
1121 			 */
1122 			BO_LOCK(bo);
1123 			if (bo->bo_numoutput > 0 || bo->bo_dirty.bv_cnt > 0)
1124 				panic("vinvalbuf: dirty bufs");
1125 		}
1126 	}
1127 	/*
1128 	 * If you alter this loop please notice that interlock is dropped and
1129 	 * reacquired in flushbuflist.  Special care is needed to ensure that
1130 	 * no race conditions occur from this.
1131 	 */
1132 	do {
1133 		error = flushbuflist(&bo->bo_clean,
1134 		    flags, bo, slpflag, slptimeo);
1135 		if (error == 0)
1136 			error = flushbuflist(&bo->bo_dirty,
1137 			    flags, bo, slpflag, slptimeo);
1138 		if (error != 0 && error != EAGAIN) {
1139 			BO_UNLOCK(bo);
1140 			return (error);
1141 		}
1142 	} while (error != 0);
1143 
1144 	/*
1145 	 * Wait for I/O to complete.  XXX needs cleaning up.  The vnode can
1146 	 * have write I/O in-progress but if there is a VM object then the
1147 	 * VM object can also have read-I/O in-progress.
1148 	 */
1149 	do {
1150 		bufobj_wwait(bo, 0, 0);
1151 		BO_UNLOCK(bo);
1152 		if (bo->bo_object != NULL) {
1153 			VM_OBJECT_LOCK(bo->bo_object);
1154 			vm_object_pip_wait(bo->bo_object, "bovlbx");
1155 			VM_OBJECT_UNLOCK(bo->bo_object);
1156 		}
1157 		BO_LOCK(bo);
1158 	} while (bo->bo_numoutput > 0);
1159 	BO_UNLOCK(bo);
1160 
1161 	/*
1162 	 * Destroy the copy in the VM cache, too.
1163 	 */
1164 	if (bo->bo_object != NULL && (flags & (V_ALT | V_NORMAL)) == 0) {
1165 		VM_OBJECT_LOCK(bo->bo_object);
1166 		vm_object_page_remove(bo->bo_object, 0, 0,
1167 			(flags & V_SAVE) ? TRUE : FALSE);
1168 		VM_OBJECT_UNLOCK(bo->bo_object);
1169 	}
1170 
1171 #ifdef INVARIANTS
1172 	BO_LOCK(bo);
1173 	if ((flags & (V_ALT | V_NORMAL)) == 0 &&
1174 	    (bo->bo_dirty.bv_cnt > 0 || bo->bo_clean.bv_cnt > 0))
1175 		panic("vinvalbuf: flush failed");
1176 	BO_UNLOCK(bo);
1177 #endif
1178 	return (0);
1179 }
1180 
1181 /*
1182  * Flush out and invalidate all buffers associated with a vnode.
1183  * Called with the underlying object locked.
1184  */
1185 int
1186 vinvalbuf(struct vnode *vp, int flags, int slpflag, int slptimeo)
1187 {
1188 
1189 	CTR3(KTR_VFS, "%s: vp %p with flags %d", __func__, vp, flags);
1190 	ASSERT_VOP_LOCKED(vp, "vinvalbuf");
1191 	return (bufobj_invalbuf(&vp->v_bufobj, flags, slpflag, slptimeo));
1192 }
1193 
1194 /*
1195  * Flush out buffers on the specified list.
1196  *
1197  */
1198 static int
1199 flushbuflist( struct bufv *bufv, int flags, struct bufobj *bo, int slpflag,
1200     int slptimeo)
1201 {
1202 	struct buf *bp, *nbp;
1203 	int retval, error;
1204 	daddr_t lblkno;
1205 	b_xflags_t xflags;
1206 
1207 	ASSERT_BO_LOCKED(bo);
1208 
1209 	retval = 0;
1210 	TAILQ_FOREACH_SAFE(bp, &bufv->bv_hd, b_bobufs, nbp) {
1211 		if (((flags & V_NORMAL) && (bp->b_xflags & BX_ALTDATA)) ||
1212 		    ((flags & V_ALT) && (bp->b_xflags & BX_ALTDATA) == 0)) {
1213 			continue;
1214 		}
1215 		lblkno = 0;
1216 		xflags = 0;
1217 		if (nbp != NULL) {
1218 			lblkno = nbp->b_lblkno;
1219 			xflags = nbp->b_xflags &
1220 				(BX_BKGRDMARKER | BX_VNDIRTY | BX_VNCLEAN);
1221 		}
1222 		retval = EAGAIN;
1223 		error = BUF_TIMELOCK(bp,
1224 		    LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK, BO_MTX(bo),
1225 		    "flushbuf", slpflag, slptimeo);
1226 		if (error) {
1227 			BO_LOCK(bo);
1228 			return (error != ENOLCK ? error : EAGAIN);
1229 		}
1230 		KASSERT(bp->b_bufobj == bo,
1231 		    ("bp %p wrong b_bufobj %p should be %p",
1232 		    bp, bp->b_bufobj, bo));
1233 		if (bp->b_bufobj != bo) {	/* XXX: necessary ? */
1234 			BUF_UNLOCK(bp);
1235 			BO_LOCK(bo);
1236 			return (EAGAIN);
1237 		}
1238 		/*
1239 		 * XXX Since there are no node locks for NFS, I
1240 		 * believe there is a slight chance that a delayed
1241 		 * write will occur while sleeping just above, so
1242 		 * check for it.
1243 		 */
1244 		if (((bp->b_flags & (B_DELWRI | B_INVAL)) == B_DELWRI) &&
1245 		    (flags & V_SAVE)) {
1246 			bremfree(bp);
1247 			bp->b_flags |= B_ASYNC;
1248 			bwrite(bp);
1249 			BO_LOCK(bo);
1250 			return (EAGAIN);	/* XXX: why not loop ? */
1251 		}
1252 		bremfree(bp);
1253 		bp->b_flags |= (B_INVAL | B_RELBUF);
1254 		bp->b_flags &= ~B_ASYNC;
1255 		brelse(bp);
1256 		BO_LOCK(bo);
1257 		if (nbp != NULL &&
1258 		    (nbp->b_bufobj != bo ||
1259 		     nbp->b_lblkno != lblkno ||
1260 		     (nbp->b_xflags &
1261 		      (BX_BKGRDMARKER | BX_VNDIRTY | BX_VNCLEAN)) != xflags))
1262 			break;			/* nbp invalid */
1263 	}
1264 	return (retval);
1265 }
1266 
1267 /*
1268  * Truncate a file's buffer and pages to a specified length.  This
1269  * is in lieu of the old vinvalbuf mechanism, which performed unneeded
1270  * sync activity.
1271  */
1272 int
1273 vtruncbuf(struct vnode *vp, struct ucred *cred, struct thread *td,
1274     off_t length, int blksize)
1275 {
1276 	struct buf *bp, *nbp;
1277 	int anyfreed;
1278 	int trunclbn;
1279 	struct bufobj *bo;
1280 
1281 	CTR5(KTR_VFS, "%s: vp %p with cred %p and block %d:%ju", __func__,
1282 	    vp, cred, blksize, (uintmax_t)length);
1283 
1284 	/*
1285 	 * Round up to the *next* lbn.
1286 	 */
1287 	trunclbn = (length + blksize - 1) / blksize;
1288 
1289 	ASSERT_VOP_LOCKED(vp, "vtruncbuf");
1290 restart:
1291 	bo = &vp->v_bufobj;
1292 	BO_LOCK(bo);
1293 	anyfreed = 1;
1294 	for (;anyfreed;) {
1295 		anyfreed = 0;
1296 		TAILQ_FOREACH_SAFE(bp, &bo->bo_clean.bv_hd, b_bobufs, nbp) {
1297 			if (bp->b_lblkno < trunclbn)
1298 				continue;
1299 			if (BUF_LOCK(bp,
1300 			    LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK,
1301 			    BO_MTX(bo)) == ENOLCK)
1302 				goto restart;
1303 
1304 			bremfree(bp);
1305 			bp->b_flags |= (B_INVAL | B_RELBUF);
1306 			bp->b_flags &= ~B_ASYNC;
1307 			brelse(bp);
1308 			anyfreed = 1;
1309 
1310 			if (nbp != NULL &&
1311 			    (((nbp->b_xflags & BX_VNCLEAN) == 0) ||
1312 			    (nbp->b_vp != vp) ||
1313 			    (nbp->b_flags & B_DELWRI))) {
1314 				goto restart;
1315 			}
1316 			BO_LOCK(bo);
1317 		}
1318 
1319 		TAILQ_FOREACH_SAFE(bp, &bo->bo_dirty.bv_hd, b_bobufs, nbp) {
1320 			if (bp->b_lblkno < trunclbn)
1321 				continue;
1322 			if (BUF_LOCK(bp,
1323 			    LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK,
1324 			    BO_MTX(bo)) == ENOLCK)
1325 				goto restart;
1326 			bremfree(bp);
1327 			bp->b_flags |= (B_INVAL | B_RELBUF);
1328 			bp->b_flags &= ~B_ASYNC;
1329 			brelse(bp);
1330 			anyfreed = 1;
1331 			if (nbp != NULL &&
1332 			    (((nbp->b_xflags & BX_VNDIRTY) == 0) ||
1333 			    (nbp->b_vp != vp) ||
1334 			    (nbp->b_flags & B_DELWRI) == 0)) {
1335 				goto restart;
1336 			}
1337 			BO_LOCK(bo);
1338 		}
1339 	}
1340 
1341 	if (length > 0) {
1342 restartsync:
1343 		TAILQ_FOREACH_SAFE(bp, &bo->bo_dirty.bv_hd, b_bobufs, nbp) {
1344 			if (bp->b_lblkno > 0)
1345 				continue;
1346 			/*
1347 			 * Since we hold the vnode lock this should only
1348 			 * fail if we're racing with the buf daemon.
1349 			 */
1350 			if (BUF_LOCK(bp,
1351 			    LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK,
1352 			    BO_MTX(bo)) == ENOLCK) {
1353 				goto restart;
1354 			}
1355 			VNASSERT((bp->b_flags & B_DELWRI), vp,
1356 			    ("buf(%p) on dirty queue without DELWRI", bp));
1357 
1358 			bremfree(bp);
1359 			bawrite(bp);
1360 			BO_LOCK(bo);
1361 			goto restartsync;
1362 		}
1363 	}
1364 
1365 	bufobj_wwait(bo, 0, 0);
1366 	BO_UNLOCK(bo);
1367 	vnode_pager_setsize(vp, length);
1368 
1369 	return (0);
1370 }
1371 
1372 /*
1373  * buf_splay() - splay tree core for the clean/dirty list of buffers in
1374  * 		 a vnode.
1375  *
1376  *	NOTE: We have to deal with the special case of a background bitmap
1377  *	buffer, a situation where two buffers will have the same logical
1378  *	block offset.  We want (1) only the foreground buffer to be accessed
1379  *	in a lookup and (2) must differentiate between the foreground and
1380  *	background buffer in the splay tree algorithm because the splay
1381  *	tree cannot normally handle multiple entities with the same 'index'.
1382  *	We accomplish this by adding differentiating flags to the splay tree's
1383  *	numerical domain.
1384  */
1385 static
1386 struct buf *
1387 buf_splay(daddr_t lblkno, b_xflags_t xflags, struct buf *root)
1388 {
1389 	struct buf dummy;
1390 	struct buf *lefttreemax, *righttreemin, *y;
1391 
1392 	if (root == NULL)
1393 		return (NULL);
1394 	lefttreemax = righttreemin = &dummy;
1395 	for (;;) {
1396 		if (lblkno < root->b_lblkno ||
1397 		    (lblkno == root->b_lblkno &&
1398 		    (xflags & BX_BKGRDMARKER) < (root->b_xflags & BX_BKGRDMARKER))) {
1399 			if ((y = root->b_left) == NULL)
1400 				break;
1401 			if (lblkno < y->b_lblkno) {
1402 				/* Rotate right. */
1403 				root->b_left = y->b_right;
1404 				y->b_right = root;
1405 				root = y;
1406 				if ((y = root->b_left) == NULL)
1407 					break;
1408 			}
1409 			/* Link into the new root's right tree. */
1410 			righttreemin->b_left = root;
1411 			righttreemin = root;
1412 		} else if (lblkno > root->b_lblkno ||
1413 		    (lblkno == root->b_lblkno &&
1414 		    (xflags & BX_BKGRDMARKER) > (root->b_xflags & BX_BKGRDMARKER))) {
1415 			if ((y = root->b_right) == NULL)
1416 				break;
1417 			if (lblkno > y->b_lblkno) {
1418 				/* Rotate left. */
1419 				root->b_right = y->b_left;
1420 				y->b_left = root;
1421 				root = y;
1422 				if ((y = root->b_right) == NULL)
1423 					break;
1424 			}
1425 			/* Link into the new root's left tree. */
1426 			lefttreemax->b_right = root;
1427 			lefttreemax = root;
1428 		} else {
1429 			break;
1430 		}
1431 		root = y;
1432 	}
1433 	/* Assemble the new root. */
1434 	lefttreemax->b_right = root->b_left;
1435 	righttreemin->b_left = root->b_right;
1436 	root->b_left = dummy.b_right;
1437 	root->b_right = dummy.b_left;
1438 	return (root);
1439 }
1440 
1441 static void
1442 buf_vlist_remove(struct buf *bp)
1443 {
1444 	struct buf *root;
1445 	struct bufv *bv;
1446 
1447 	KASSERT(bp->b_bufobj != NULL, ("No b_bufobj %p", bp));
1448 	ASSERT_BO_LOCKED(bp->b_bufobj);
1449 	KASSERT((bp->b_xflags & (BX_VNDIRTY|BX_VNCLEAN)) !=
1450 	    (BX_VNDIRTY|BX_VNCLEAN),
1451 	    ("buf_vlist_remove: Buf %p is on two lists", bp));
1452 	if (bp->b_xflags & BX_VNDIRTY)
1453 		bv = &bp->b_bufobj->bo_dirty;
1454 	else
1455 		bv = &bp->b_bufobj->bo_clean;
1456 	if (bp != bv->bv_root) {
1457 		root = buf_splay(bp->b_lblkno, bp->b_xflags, bv->bv_root);
1458 		KASSERT(root == bp, ("splay lookup failed in remove"));
1459 	}
1460 	if (bp->b_left == NULL) {
1461 		root = bp->b_right;
1462 	} else {
1463 		root = buf_splay(bp->b_lblkno, bp->b_xflags, bp->b_left);
1464 		root->b_right = bp->b_right;
1465 	}
1466 	bv->bv_root = root;
1467 	TAILQ_REMOVE(&bv->bv_hd, bp, b_bobufs);
1468 	bv->bv_cnt--;
1469 	bp->b_xflags &= ~(BX_VNDIRTY | BX_VNCLEAN);
1470 }
1471 
1472 /*
1473  * Add the buffer to the sorted clean or dirty block list using a
1474  * splay tree algorithm.
1475  *
1476  * NOTE: xflags is passed as a constant, optimizing this inline function!
1477  */
1478 static void
1479 buf_vlist_add(struct buf *bp, struct bufobj *bo, b_xflags_t xflags)
1480 {
1481 	struct buf *root;
1482 	struct bufv *bv;
1483 
1484 	ASSERT_BO_LOCKED(bo);
1485 	KASSERT((bp->b_xflags & (BX_VNDIRTY|BX_VNCLEAN)) == 0,
1486 	    ("buf_vlist_add: Buf %p has existing xflags %d", bp, bp->b_xflags));
1487 	bp->b_xflags |= xflags;
1488 	if (xflags & BX_VNDIRTY)
1489 		bv = &bo->bo_dirty;
1490 	else
1491 		bv = &bo->bo_clean;
1492 
1493 	root = buf_splay(bp->b_lblkno, bp->b_xflags, bv->bv_root);
1494 	if (root == NULL) {
1495 		bp->b_left = NULL;
1496 		bp->b_right = NULL;
1497 		TAILQ_INSERT_TAIL(&bv->bv_hd, bp, b_bobufs);
1498 	} else if (bp->b_lblkno < root->b_lblkno ||
1499 	    (bp->b_lblkno == root->b_lblkno &&
1500 	    (bp->b_xflags & BX_BKGRDMARKER) < (root->b_xflags & BX_BKGRDMARKER))) {
1501 		bp->b_left = root->b_left;
1502 		bp->b_right = root;
1503 		root->b_left = NULL;
1504 		TAILQ_INSERT_BEFORE(root, bp, b_bobufs);
1505 	} else {
1506 		bp->b_right = root->b_right;
1507 		bp->b_left = root;
1508 		root->b_right = NULL;
1509 		TAILQ_INSERT_AFTER(&bv->bv_hd, root, bp, b_bobufs);
1510 	}
1511 	bv->bv_cnt++;
1512 	bv->bv_root = bp;
1513 }
1514 
1515 /*
1516  * Lookup a buffer using the splay tree.  Note that we specifically avoid
1517  * shadow buffers used in background bitmap writes.
1518  *
1519  * This code isn't quite efficient as it could be because we are maintaining
1520  * two sorted lists and do not know which list the block resides in.
1521  *
1522  * During a "make buildworld" the desired buffer is found at one of
1523  * the roots more than 60% of the time.  Thus, checking both roots
1524  * before performing either splay eliminates unnecessary splays on the
1525  * first tree splayed.
1526  */
1527 struct buf *
1528 gbincore(struct bufobj *bo, daddr_t lblkno)
1529 {
1530 	struct buf *bp;
1531 
1532 	ASSERT_BO_LOCKED(bo);
1533 	if ((bp = bo->bo_clean.bv_root) != NULL &&
1534 	    bp->b_lblkno == lblkno && !(bp->b_xflags & BX_BKGRDMARKER))
1535 		return (bp);
1536 	if ((bp = bo->bo_dirty.bv_root) != NULL &&
1537 	    bp->b_lblkno == lblkno && !(bp->b_xflags & BX_BKGRDMARKER))
1538 		return (bp);
1539 	if ((bp = bo->bo_clean.bv_root) != NULL) {
1540 		bo->bo_clean.bv_root = bp = buf_splay(lblkno, 0, bp);
1541 		if (bp->b_lblkno == lblkno && !(bp->b_xflags & BX_BKGRDMARKER))
1542 			return (bp);
1543 	}
1544 	if ((bp = bo->bo_dirty.bv_root) != NULL) {
1545 		bo->bo_dirty.bv_root = bp = buf_splay(lblkno, 0, bp);
1546 		if (bp->b_lblkno == lblkno && !(bp->b_xflags & BX_BKGRDMARKER))
1547 			return (bp);
1548 	}
1549 	return (NULL);
1550 }
1551 
1552 /*
1553  * Associate a buffer with a vnode.
1554  */
1555 void
1556 bgetvp(struct vnode *vp, struct buf *bp)
1557 {
1558 	struct bufobj *bo;
1559 
1560 	bo = &vp->v_bufobj;
1561 	ASSERT_BO_LOCKED(bo);
1562 	VNASSERT(bp->b_vp == NULL, bp->b_vp, ("bgetvp: not free"));
1563 
1564 	CTR3(KTR_BUF, "bgetvp(%p) vp %p flags %X", bp, vp, bp->b_flags);
1565 	VNASSERT((bp->b_xflags & (BX_VNDIRTY|BX_VNCLEAN)) == 0, vp,
1566 	    ("bgetvp: bp already attached! %p", bp));
1567 
1568 	vhold(vp);
1569 	if (VFS_NEEDSGIANT(vp->v_mount) || bo->bo_flag & BO_NEEDSGIANT)
1570 		bp->b_flags |= B_NEEDSGIANT;
1571 	bp->b_vp = vp;
1572 	bp->b_bufobj = bo;
1573 	/*
1574 	 * Insert onto list for new vnode.
1575 	 */
1576 	buf_vlist_add(bp, bo, BX_VNCLEAN);
1577 }
1578 
1579 /*
1580  * Disassociate a buffer from a vnode.
1581  */
1582 void
1583 brelvp(struct buf *bp)
1584 {
1585 	struct bufobj *bo;
1586 	struct vnode *vp;
1587 
1588 	CTR3(KTR_BUF, "brelvp(%p) vp %p flags %X", bp, bp->b_vp, bp->b_flags);
1589 	KASSERT(bp->b_vp != NULL, ("brelvp: NULL"));
1590 
1591 	/*
1592 	 * Delete from old vnode list, if on one.
1593 	 */
1594 	vp = bp->b_vp;		/* XXX */
1595 	bo = bp->b_bufobj;
1596 	BO_LOCK(bo);
1597 	if (bp->b_xflags & (BX_VNDIRTY | BX_VNCLEAN))
1598 		buf_vlist_remove(bp);
1599 	else
1600 		panic("brelvp: Buffer %p not on queue.", bp);
1601 	if ((bo->bo_flag & BO_ONWORKLST) && bo->bo_dirty.bv_cnt == 0) {
1602 		bo->bo_flag &= ~BO_ONWORKLST;
1603 		mtx_lock(&sync_mtx);
1604 		LIST_REMOVE(bo, bo_synclist);
1605 		syncer_worklist_len--;
1606 		mtx_unlock(&sync_mtx);
1607 	}
1608 	bp->b_flags &= ~B_NEEDSGIANT;
1609 	bp->b_vp = NULL;
1610 	bp->b_bufobj = NULL;
1611 	BO_UNLOCK(bo);
1612 	vdrop(vp);
1613 }
1614 
1615 /*
1616  * Add an item to the syncer work queue.
1617  */
1618 static void
1619 vn_syncer_add_to_worklist(struct bufobj *bo, int delay)
1620 {
1621 	int queue, slot;
1622 
1623 	ASSERT_BO_LOCKED(bo);
1624 
1625 	mtx_lock(&sync_mtx);
1626 	if (bo->bo_flag & BO_ONWORKLST)
1627 		LIST_REMOVE(bo, bo_synclist);
1628 	else {
1629 		bo->bo_flag |= BO_ONWORKLST;
1630 		syncer_worklist_len++;
1631 	}
1632 
1633 	if (delay > syncer_maxdelay - 2)
1634 		delay = syncer_maxdelay - 2;
1635 	slot = (syncer_delayno + delay) & syncer_mask;
1636 
1637 	queue = VFS_NEEDSGIANT(bo->__bo_vnode->v_mount) ? WI_GIANTQ :
1638 	    WI_MPSAFEQ;
1639 	LIST_INSERT_HEAD(&syncer_workitem_pending[queue][slot], bo,
1640 	    bo_synclist);
1641 	mtx_unlock(&sync_mtx);
1642 }
1643 
1644 static int
1645 sysctl_vfs_worklist_len(SYSCTL_HANDLER_ARGS)
1646 {
1647 	int error, len;
1648 
1649 	mtx_lock(&sync_mtx);
1650 	len = syncer_worklist_len - sync_vnode_count;
1651 	mtx_unlock(&sync_mtx);
1652 	error = SYSCTL_OUT(req, &len, sizeof(len));
1653 	return (error);
1654 }
1655 
1656 SYSCTL_PROC(_vfs, OID_AUTO, worklist_len, CTLTYPE_INT | CTLFLAG_RD, NULL, 0,
1657     sysctl_vfs_worklist_len, "I", "Syncer thread worklist length");
1658 
1659 static struct proc *updateproc;
1660 static void sched_sync(void);
1661 static struct kproc_desc up_kp = {
1662 	"syncer",
1663 	sched_sync,
1664 	&updateproc
1665 };
1666 SYSINIT(syncer, SI_SUB_KTHREAD_UPDATE, SI_ORDER_FIRST, kproc_start, &up_kp);
1667 
1668 static int
1669 sync_vnode(struct synclist *slp, struct bufobj **bo, struct thread *td)
1670 {
1671 	struct vnode *vp;
1672 	struct mount *mp;
1673 
1674 	*bo = LIST_FIRST(slp);
1675 	if (*bo == NULL)
1676 		return (0);
1677 	vp = (*bo)->__bo_vnode;	/* XXX */
1678 	if (VOP_ISLOCKED(vp) != 0 || VI_TRYLOCK(vp) == 0)
1679 		return (1);
1680 	/*
1681 	 * We use vhold in case the vnode does not
1682 	 * successfully sync.  vhold prevents the vnode from
1683 	 * going away when we unlock the sync_mtx so that
1684 	 * we can acquire the vnode interlock.
1685 	 */
1686 	vholdl(vp);
1687 	mtx_unlock(&sync_mtx);
1688 	VI_UNLOCK(vp);
1689 	if (vn_start_write(vp, &mp, V_NOWAIT) != 0) {
1690 		vdrop(vp);
1691 		mtx_lock(&sync_mtx);
1692 		return (*bo == LIST_FIRST(slp));
1693 	}
1694 	vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
1695 	(void) VOP_FSYNC(vp, MNT_LAZY, td);
1696 	VOP_UNLOCK(vp, 0);
1697 	vn_finished_write(mp);
1698 	BO_LOCK(*bo);
1699 	if (((*bo)->bo_flag & BO_ONWORKLST) != 0) {
1700 		/*
1701 		 * Put us back on the worklist.  The worklist
1702 		 * routine will remove us from our current
1703 		 * position and then add us back in at a later
1704 		 * position.
1705 		 */
1706 		vn_syncer_add_to_worklist(*bo, syncdelay);
1707 	}
1708 	BO_UNLOCK(*bo);
1709 	vdrop(vp);
1710 	mtx_lock(&sync_mtx);
1711 	return (0);
1712 }
1713 
1714 /*
1715  * System filesystem synchronizer daemon.
1716  */
1717 static void
1718 sched_sync(void)
1719 {
1720 	struct synclist *gnext, *next;
1721 	struct synclist *gslp, *slp;
1722 	struct bufobj *bo;
1723 	long starttime;
1724 	struct thread *td = curthread;
1725 	int last_work_seen;
1726 	int net_worklist_len;
1727 	int syncer_final_iter;
1728 	int first_printf;
1729 	int error;
1730 
1731 	last_work_seen = 0;
1732 	syncer_final_iter = 0;
1733 	first_printf = 1;
1734 	syncer_state = SYNCER_RUNNING;
1735 	starttime = time_uptime;
1736 	td->td_pflags |= TDP_NORUNNINGBUF;
1737 
1738 	EVENTHANDLER_REGISTER(shutdown_pre_sync, syncer_shutdown, td->td_proc,
1739 	    SHUTDOWN_PRI_LAST);
1740 
1741 	mtx_lock(&sync_mtx);
1742 	for (;;) {
1743 		if (syncer_state == SYNCER_FINAL_DELAY &&
1744 		    syncer_final_iter == 0) {
1745 			mtx_unlock(&sync_mtx);
1746 			kproc_suspend_check(td->td_proc);
1747 			mtx_lock(&sync_mtx);
1748 		}
1749 		net_worklist_len = syncer_worklist_len - sync_vnode_count;
1750 		if (syncer_state != SYNCER_RUNNING &&
1751 		    starttime != time_uptime) {
1752 			if (first_printf) {
1753 				printf("\nSyncing disks, vnodes remaining...");
1754 				first_printf = 0;
1755 			}
1756 			printf("%d ", net_worklist_len);
1757 		}
1758 		starttime = time_uptime;
1759 
1760 		/*
1761 		 * Push files whose dirty time has expired.  Be careful
1762 		 * of interrupt race on slp queue.
1763 		 *
1764 		 * Skip over empty worklist slots when shutting down.
1765 		 */
1766 		do {
1767 			slp = &syncer_workitem_pending[WI_MPSAFEQ][syncer_delayno];
1768 			gslp = &syncer_workitem_pending[WI_GIANTQ][syncer_delayno];
1769 			syncer_delayno += 1;
1770 			if (syncer_delayno == syncer_maxdelay)
1771 				syncer_delayno = 0;
1772 			next = &syncer_workitem_pending[WI_MPSAFEQ][syncer_delayno];
1773 			gnext = &syncer_workitem_pending[WI_GIANTQ][syncer_delayno];
1774 			/*
1775 			 * If the worklist has wrapped since the
1776 			 * it was emptied of all but syncer vnodes,
1777 			 * switch to the FINAL_DELAY state and run
1778 			 * for one more second.
1779 			 */
1780 			if (syncer_state == SYNCER_SHUTTING_DOWN &&
1781 			    net_worklist_len == 0 &&
1782 			    last_work_seen == syncer_delayno) {
1783 				syncer_state = SYNCER_FINAL_DELAY;
1784 				syncer_final_iter = SYNCER_SHUTDOWN_SPEEDUP;
1785 			}
1786 		} while (syncer_state != SYNCER_RUNNING && LIST_EMPTY(slp) &&
1787 		    LIST_EMPTY(gslp) && syncer_worklist_len > 0);
1788 
1789 		/*
1790 		 * Keep track of the last time there was anything
1791 		 * on the worklist other than syncer vnodes.
1792 		 * Return to the SHUTTING_DOWN state if any
1793 		 * new work appears.
1794 		 */
1795 		if (net_worklist_len > 0 || syncer_state == SYNCER_RUNNING)
1796 			last_work_seen = syncer_delayno;
1797 		if (net_worklist_len > 0 && syncer_state == SYNCER_FINAL_DELAY)
1798 			syncer_state = SYNCER_SHUTTING_DOWN;
1799 		while (!LIST_EMPTY(slp)) {
1800 			error = sync_vnode(slp, &bo, td);
1801 			if (error == 1) {
1802 				LIST_REMOVE(bo, bo_synclist);
1803 				LIST_INSERT_HEAD(next, bo, bo_synclist);
1804 				continue;
1805 			}
1806 		}
1807 		if (!LIST_EMPTY(gslp)) {
1808 			mtx_unlock(&sync_mtx);
1809 			mtx_lock(&Giant);
1810 			mtx_lock(&sync_mtx);
1811 			while (!LIST_EMPTY(gslp)) {
1812 				error = sync_vnode(gslp, &bo, td);
1813 				if (error == 1) {
1814 					LIST_REMOVE(bo, bo_synclist);
1815 					LIST_INSERT_HEAD(gnext, bo,
1816 					    bo_synclist);
1817 					continue;
1818 				}
1819 			}
1820 			mtx_unlock(&Giant);
1821 		}
1822 		if (syncer_state == SYNCER_FINAL_DELAY && syncer_final_iter > 0)
1823 			syncer_final_iter--;
1824 		/*
1825 		 * The variable rushjob allows the kernel to speed up the
1826 		 * processing of the filesystem syncer process. A rushjob
1827 		 * value of N tells the filesystem syncer to process the next
1828 		 * N seconds worth of work on its queue ASAP. Currently rushjob
1829 		 * is used by the soft update code to speed up the filesystem
1830 		 * syncer process when the incore state is getting so far
1831 		 * ahead of the disk that the kernel memory pool is being
1832 		 * threatened with exhaustion.
1833 		 */
1834 		if (rushjob > 0) {
1835 			rushjob -= 1;
1836 			continue;
1837 		}
1838 		/*
1839 		 * Just sleep for a short period of time between
1840 		 * iterations when shutting down to allow some I/O
1841 		 * to happen.
1842 		 *
1843 		 * If it has taken us less than a second to process the
1844 		 * current work, then wait. Otherwise start right over
1845 		 * again. We can still lose time if any single round
1846 		 * takes more than two seconds, but it does not really
1847 		 * matter as we are just trying to generally pace the
1848 		 * filesystem activity.
1849 		 */
1850 		if (syncer_state != SYNCER_RUNNING)
1851 			cv_timedwait(&sync_wakeup, &sync_mtx,
1852 			    hz / SYNCER_SHUTDOWN_SPEEDUP);
1853 		else if (time_uptime == starttime)
1854 			cv_timedwait(&sync_wakeup, &sync_mtx, hz);
1855 	}
1856 }
1857 
1858 /*
1859  * Request the syncer daemon to speed up its work.
1860  * We never push it to speed up more than half of its
1861  * normal turn time, otherwise it could take over the cpu.
1862  */
1863 int
1864 speedup_syncer(void)
1865 {
1866 	int ret = 0;
1867 
1868 	mtx_lock(&sync_mtx);
1869 	if (rushjob < syncdelay / 2) {
1870 		rushjob += 1;
1871 		stat_rush_requests += 1;
1872 		ret = 1;
1873 	}
1874 	mtx_unlock(&sync_mtx);
1875 	cv_broadcast(&sync_wakeup);
1876 	return (ret);
1877 }
1878 
1879 /*
1880  * Tell the syncer to speed up its work and run though its work
1881  * list several times, then tell it to shut down.
1882  */
1883 static void
1884 syncer_shutdown(void *arg, int howto)
1885 {
1886 
1887 	if (howto & RB_NOSYNC)
1888 		return;
1889 	mtx_lock(&sync_mtx);
1890 	syncer_state = SYNCER_SHUTTING_DOWN;
1891 	rushjob = 0;
1892 	mtx_unlock(&sync_mtx);
1893 	cv_broadcast(&sync_wakeup);
1894 	kproc_shutdown(arg, howto);
1895 }
1896 
1897 /*
1898  * Reassign a buffer from one vnode to another.
1899  * Used to assign file specific control information
1900  * (indirect blocks) to the vnode to which they belong.
1901  */
1902 void
1903 reassignbuf(struct buf *bp)
1904 {
1905 	struct vnode *vp;
1906 	struct bufobj *bo;
1907 	int delay;
1908 #ifdef INVARIANTS
1909 	struct bufv *bv;
1910 #endif
1911 
1912 	vp = bp->b_vp;
1913 	bo = bp->b_bufobj;
1914 	++reassignbufcalls;
1915 
1916 	CTR3(KTR_BUF, "reassignbuf(%p) vp %p flags %X",
1917 	    bp, bp->b_vp, bp->b_flags);
1918 	/*
1919 	 * B_PAGING flagged buffers cannot be reassigned because their vp
1920 	 * is not fully linked in.
1921 	 */
1922 	if (bp->b_flags & B_PAGING)
1923 		panic("cannot reassign paging buffer");
1924 
1925 	/*
1926 	 * Delete from old vnode list, if on one.
1927 	 */
1928 	BO_LOCK(bo);
1929 	if (bp->b_xflags & (BX_VNDIRTY | BX_VNCLEAN))
1930 		buf_vlist_remove(bp);
1931 	else
1932 		panic("reassignbuf: Buffer %p not on queue.", bp);
1933 	/*
1934 	 * If dirty, put on list of dirty buffers; otherwise insert onto list
1935 	 * of clean buffers.
1936 	 */
1937 	if (bp->b_flags & B_DELWRI) {
1938 		if ((bo->bo_flag & BO_ONWORKLST) == 0) {
1939 			switch (vp->v_type) {
1940 			case VDIR:
1941 				delay = dirdelay;
1942 				break;
1943 			case VCHR:
1944 				delay = metadelay;
1945 				break;
1946 			default:
1947 				delay = filedelay;
1948 			}
1949 			vn_syncer_add_to_worklist(bo, delay);
1950 		}
1951 		buf_vlist_add(bp, bo, BX_VNDIRTY);
1952 	} else {
1953 		buf_vlist_add(bp, bo, BX_VNCLEAN);
1954 
1955 		if ((bo->bo_flag & BO_ONWORKLST) && bo->bo_dirty.bv_cnt == 0) {
1956 			mtx_lock(&sync_mtx);
1957 			LIST_REMOVE(bo, bo_synclist);
1958 			syncer_worklist_len--;
1959 			mtx_unlock(&sync_mtx);
1960 			bo->bo_flag &= ~BO_ONWORKLST;
1961 		}
1962 	}
1963 #ifdef INVARIANTS
1964 	bv = &bo->bo_clean;
1965 	bp = TAILQ_FIRST(&bv->bv_hd);
1966 	KASSERT(bp == NULL || bp->b_bufobj == bo,
1967 	    ("bp %p wrong b_bufobj %p should be %p", bp, bp->b_bufobj, bo));
1968 	bp = TAILQ_LAST(&bv->bv_hd, buflists);
1969 	KASSERT(bp == NULL || bp->b_bufobj == bo,
1970 	    ("bp %p wrong b_bufobj %p should be %p", bp, bp->b_bufobj, bo));
1971 	bv = &bo->bo_dirty;
1972 	bp = TAILQ_FIRST(&bv->bv_hd);
1973 	KASSERT(bp == NULL || bp->b_bufobj == bo,
1974 	    ("bp %p wrong b_bufobj %p should be %p", bp, bp->b_bufobj, bo));
1975 	bp = TAILQ_LAST(&bv->bv_hd, buflists);
1976 	KASSERT(bp == NULL || bp->b_bufobj == bo,
1977 	    ("bp %p wrong b_bufobj %p should be %p", bp, bp->b_bufobj, bo));
1978 #endif
1979 	BO_UNLOCK(bo);
1980 }
1981 
1982 /*
1983  * Increment the use and hold counts on the vnode, taking care to reference
1984  * the driver's usecount if this is a chardev.  The vholdl() will remove
1985  * the vnode from the free list if it is presently free.  Requires the
1986  * vnode interlock and returns with it held.
1987  */
1988 static void
1989 v_incr_usecount(struct vnode *vp)
1990 {
1991 
1992 	CTR2(KTR_VFS, "%s: vp %p", __func__, vp);
1993 	vp->v_usecount++;
1994 	if (vp->v_type == VCHR && vp->v_rdev != NULL) {
1995 		dev_lock();
1996 		vp->v_rdev->si_usecount++;
1997 		dev_unlock();
1998 	}
1999 	vholdl(vp);
2000 }
2001 
2002 /*
2003  * Turn a holdcnt into a use+holdcnt such that only one call to
2004  * v_decr_usecount is needed.
2005  */
2006 static void
2007 v_upgrade_usecount(struct vnode *vp)
2008 {
2009 
2010 	CTR2(KTR_VFS, "%s: vp %p", __func__, vp);
2011 	vp->v_usecount++;
2012 	if (vp->v_type == VCHR && vp->v_rdev != NULL) {
2013 		dev_lock();
2014 		vp->v_rdev->si_usecount++;
2015 		dev_unlock();
2016 	}
2017 }
2018 
2019 /*
2020  * Decrement the vnode use and hold count along with the driver's usecount
2021  * if this is a chardev.  The vdropl() below releases the vnode interlock
2022  * as it may free the vnode.
2023  */
2024 static void
2025 v_decr_usecount(struct vnode *vp)
2026 {
2027 
2028 	ASSERT_VI_LOCKED(vp, __FUNCTION__);
2029 	VNASSERT(vp->v_usecount > 0, vp,
2030 	    ("v_decr_usecount: negative usecount"));
2031 	CTR2(KTR_VFS, "%s: vp %p", __func__, vp);
2032 	vp->v_usecount--;
2033 	if (vp->v_type == VCHR && vp->v_rdev != NULL) {
2034 		dev_lock();
2035 		vp->v_rdev->si_usecount--;
2036 		dev_unlock();
2037 	}
2038 	vdropl(vp);
2039 }
2040 
2041 /*
2042  * Decrement only the use count and driver use count.  This is intended to
2043  * be paired with a follow on vdropl() to release the remaining hold count.
2044  * In this way we may vgone() a vnode with a 0 usecount without risk of
2045  * having it end up on a free list because the hold count is kept above 0.
2046  */
2047 static void
2048 v_decr_useonly(struct vnode *vp)
2049 {
2050 
2051 	ASSERT_VI_LOCKED(vp, __FUNCTION__);
2052 	VNASSERT(vp->v_usecount > 0, vp,
2053 	    ("v_decr_useonly: negative usecount"));
2054 	CTR2(KTR_VFS, "%s: vp %p", __func__, vp);
2055 	vp->v_usecount--;
2056 	if (vp->v_type == VCHR && vp->v_rdev != NULL) {
2057 		dev_lock();
2058 		vp->v_rdev->si_usecount--;
2059 		dev_unlock();
2060 	}
2061 }
2062 
2063 /*
2064  * Grab a particular vnode from the free list, increment its
2065  * reference count and lock it.  VI_DOOMED is set if the vnode
2066  * is being destroyed.  Only callers who specify LK_RETRY will
2067  * see doomed vnodes.  If inactive processing was delayed in
2068  * vput try to do it here.
2069  */
2070 int
2071 vget(struct vnode *vp, int flags, struct thread *td)
2072 {
2073 	int error;
2074 
2075 	error = 0;
2076 	VFS_ASSERT_GIANT(vp->v_mount);
2077 	VNASSERT((flags & LK_TYPE_MASK) != 0, vp,
2078 	    ("vget: invalid lock operation"));
2079 	CTR3(KTR_VFS, "%s: vp %p with flags %d", __func__, vp, flags);
2080 
2081 	if ((flags & LK_INTERLOCK) == 0)
2082 		VI_LOCK(vp);
2083 	vholdl(vp);
2084 	if ((error = vn_lock(vp, flags | LK_INTERLOCK)) != 0) {
2085 		vdrop(vp);
2086 		CTR2(KTR_VFS, "%s: impossible to lock vnode %p", __func__,
2087 		    vp);
2088 		return (error);
2089 	}
2090 	if (vp->v_iflag & VI_DOOMED && (flags & LK_RETRY) == 0)
2091 		panic("vget: vn_lock failed to return ENOENT\n");
2092 	VI_LOCK(vp);
2093 	/* Upgrade our holdcnt to a usecount. */
2094 	v_upgrade_usecount(vp);
2095 	/*
2096  	 * We don't guarantee that any particular close will
2097 	 * trigger inactive processing so just make a best effort
2098 	 * here at preventing a reference to a removed file.  If
2099 	 * we don't succeed no harm is done.
2100 	 */
2101 	if (vp->v_iflag & VI_OWEINACT) {
2102 		if (VOP_ISLOCKED(vp) == LK_EXCLUSIVE &&
2103 		    (flags & LK_NOWAIT) == 0)
2104 			vinactive(vp, td);
2105 		vp->v_iflag &= ~VI_OWEINACT;
2106 	}
2107 	VI_UNLOCK(vp);
2108 	return (0);
2109 }
2110 
2111 /*
2112  * Increase the reference count of a vnode.
2113  */
2114 void
2115 vref(struct vnode *vp)
2116 {
2117 
2118 	CTR2(KTR_VFS, "%s: vp %p", __func__, vp);
2119 	VI_LOCK(vp);
2120 	v_incr_usecount(vp);
2121 	VI_UNLOCK(vp);
2122 }
2123 
2124 /*
2125  * Return reference count of a vnode.
2126  *
2127  * The results of this call are only guaranteed when some mechanism other
2128  * than the VI lock is used to stop other processes from gaining references
2129  * to the vnode.  This may be the case if the caller holds the only reference.
2130  * This is also useful when stale data is acceptable as race conditions may
2131  * be accounted for by some other means.
2132  */
2133 int
2134 vrefcnt(struct vnode *vp)
2135 {
2136 	int usecnt;
2137 
2138 	VI_LOCK(vp);
2139 	usecnt = vp->v_usecount;
2140 	VI_UNLOCK(vp);
2141 
2142 	return (usecnt);
2143 }
2144 
2145 
2146 /*
2147  * Vnode put/release.
2148  * If count drops to zero, call inactive routine and return to freelist.
2149  */
2150 void
2151 vrele(struct vnode *vp)
2152 {
2153 	struct thread *td = curthread;	/* XXX */
2154 
2155 	KASSERT(vp != NULL, ("vrele: null vp"));
2156 	VFS_ASSERT_GIANT(vp->v_mount);
2157 
2158 	VI_LOCK(vp);
2159 
2160 	/* Skip this v_writecount check if we're going to panic below. */
2161 	VNASSERT(vp->v_writecount < vp->v_usecount || vp->v_usecount < 1, vp,
2162 	    ("vrele: missed vn_close"));
2163 	CTR2(KTR_VFS, "%s: vp %p", __func__, vp);
2164 
2165 	if (vp->v_usecount > 1 || ((vp->v_iflag & VI_DOINGINACT) &&
2166 	    vp->v_usecount == 1)) {
2167 		v_decr_usecount(vp);
2168 		return;
2169 	}
2170 	if (vp->v_usecount != 1) {
2171 #ifdef DIAGNOSTIC
2172 		vprint("vrele: negative ref count", vp);
2173 #endif
2174 		VI_UNLOCK(vp);
2175 		panic("vrele: negative ref cnt");
2176 	}
2177 	CTR2(KTR_VFS, "%s: return vnode %p to the freelist", __func__, vp);
2178 	/*
2179 	 * We want to hold the vnode until the inactive finishes to
2180 	 * prevent vgone() races.  We drop the use count here and the
2181 	 * hold count below when we're done.
2182 	 */
2183 	v_decr_useonly(vp);
2184 	/*
2185 	 * We must call VOP_INACTIVE with the node locked. Mark
2186 	 * as VI_DOINGINACT to avoid recursion.
2187 	 */
2188 	vp->v_iflag |= VI_OWEINACT;
2189 	if (vn_lock(vp, LK_EXCLUSIVE | LK_INTERLOCK) == 0) {
2190 		VI_LOCK(vp);
2191 		if (vp->v_usecount > 0)
2192 			vp->v_iflag &= ~VI_OWEINACT;
2193 		if (vp->v_iflag & VI_OWEINACT)
2194 			vinactive(vp, td);
2195 		VOP_UNLOCK(vp, 0);
2196 	} else {
2197 		VI_LOCK(vp);
2198 		if (vp->v_usecount > 0)
2199 			vp->v_iflag &= ~VI_OWEINACT;
2200 	}
2201 	vdropl(vp);
2202 }
2203 
2204 /*
2205  * Release an already locked vnode.  This give the same effects as
2206  * unlock+vrele(), but takes less time and avoids releasing and
2207  * re-aquiring the lock (as vrele() acquires the lock internally.)
2208  */
2209 void
2210 vput(struct vnode *vp)
2211 {
2212 	struct thread *td = curthread;	/* XXX */
2213 	int error;
2214 
2215 	KASSERT(vp != NULL, ("vput: null vp"));
2216 	ASSERT_VOP_LOCKED(vp, "vput");
2217 	VFS_ASSERT_GIANT(vp->v_mount);
2218 	CTR2(KTR_VFS, "%s: vp %p", __func__, vp);
2219 	VI_LOCK(vp);
2220 	/* Skip this v_writecount check if we're going to panic below. */
2221 	VNASSERT(vp->v_writecount < vp->v_usecount || vp->v_usecount < 1, vp,
2222 	    ("vput: missed vn_close"));
2223 	error = 0;
2224 
2225 	if (vp->v_usecount > 1 || ((vp->v_iflag & VI_DOINGINACT) &&
2226 	    vp->v_usecount == 1)) {
2227 		VOP_UNLOCK(vp, 0);
2228 		v_decr_usecount(vp);
2229 		return;
2230 	}
2231 
2232 	if (vp->v_usecount != 1) {
2233 #ifdef DIAGNOSTIC
2234 		vprint("vput: negative ref count", vp);
2235 #endif
2236 		panic("vput: negative ref cnt");
2237 	}
2238 	CTR2(KTR_VFS, "%s: return to freelist the vnode %p", __func__, vp);
2239 	/*
2240 	 * We want to hold the vnode until the inactive finishes to
2241 	 * prevent vgone() races.  We drop the use count here and the
2242 	 * hold count below when we're done.
2243 	 */
2244 	v_decr_useonly(vp);
2245 	vp->v_iflag |= VI_OWEINACT;
2246 	if (VOP_ISLOCKED(vp) != LK_EXCLUSIVE) {
2247 		error = VOP_LOCK(vp, LK_UPGRADE|LK_INTERLOCK|LK_NOWAIT);
2248 		VI_LOCK(vp);
2249 		if (error) {
2250 			if (vp->v_usecount > 0)
2251 				vp->v_iflag &= ~VI_OWEINACT;
2252 			goto done;
2253 		}
2254 	}
2255 	if (vp->v_usecount > 0)
2256 		vp->v_iflag &= ~VI_OWEINACT;
2257 	if (vp->v_iflag & VI_OWEINACT)
2258 		vinactive(vp, td);
2259 	VOP_UNLOCK(vp, 0);
2260 done:
2261 	vdropl(vp);
2262 }
2263 
2264 /*
2265  * Somebody doesn't want the vnode recycled.
2266  */
2267 void
2268 vhold(struct vnode *vp)
2269 {
2270 
2271 	VI_LOCK(vp);
2272 	vholdl(vp);
2273 	VI_UNLOCK(vp);
2274 }
2275 
2276 void
2277 vholdl(struct vnode *vp)
2278 {
2279 
2280 	CTR2(KTR_VFS, "%s: vp %p", __func__, vp);
2281 	vp->v_holdcnt++;
2282 	if (VSHOULDBUSY(vp))
2283 		vbusy(vp);
2284 }
2285 
2286 /*
2287  * Note that there is one less who cares about this vnode.  vdrop() is the
2288  * opposite of vhold().
2289  */
2290 void
2291 vdrop(struct vnode *vp)
2292 {
2293 
2294 	VI_LOCK(vp);
2295 	vdropl(vp);
2296 }
2297 
2298 /*
2299  * Drop the hold count of the vnode.  If this is the last reference to
2300  * the vnode we will free it if it has been vgone'd otherwise it is
2301  * placed on the free list.
2302  */
2303 void
2304 vdropl(struct vnode *vp)
2305 {
2306 
2307 	ASSERT_VI_LOCKED(vp, "vdropl");
2308 	CTR2(KTR_VFS, "%s: vp %p", __func__, vp);
2309 	if (vp->v_holdcnt <= 0)
2310 		panic("vdrop: holdcnt %d", vp->v_holdcnt);
2311 	vp->v_holdcnt--;
2312 	if (vp->v_holdcnt == 0) {
2313 		if (vp->v_iflag & VI_DOOMED) {
2314 			CTR2(KTR_VFS, "%s: destroying the vnode %p", __func__,
2315 			    vp);
2316 			vdestroy(vp);
2317 			return;
2318 		} else
2319 			vfree(vp);
2320 	}
2321 	VI_UNLOCK(vp);
2322 }
2323 
2324 /*
2325  * Call VOP_INACTIVE on the vnode and manage the DOINGINACT and OWEINACT
2326  * flags.  DOINGINACT prevents us from recursing in calls to vinactive.
2327  * OWEINACT tracks whether a vnode missed a call to inactive due to a
2328  * failed lock upgrade.
2329  */
2330 static void
2331 vinactive(struct vnode *vp, struct thread *td)
2332 {
2333 
2334 	ASSERT_VOP_ELOCKED(vp, "vinactive");
2335 	ASSERT_VI_LOCKED(vp, "vinactive");
2336 	VNASSERT((vp->v_iflag & VI_DOINGINACT) == 0, vp,
2337 	    ("vinactive: recursed on VI_DOINGINACT"));
2338 	CTR2(KTR_VFS, "%s: vp %p", __func__, vp);
2339 	vp->v_iflag |= VI_DOINGINACT;
2340 	vp->v_iflag &= ~VI_OWEINACT;
2341 	VI_UNLOCK(vp);
2342 	VOP_INACTIVE(vp, td);
2343 	VI_LOCK(vp);
2344 	VNASSERT(vp->v_iflag & VI_DOINGINACT, vp,
2345 	    ("vinactive: lost VI_DOINGINACT"));
2346 	vp->v_iflag &= ~VI_DOINGINACT;
2347 }
2348 
2349 /*
2350  * Remove any vnodes in the vnode table belonging to mount point mp.
2351  *
2352  * If FORCECLOSE is not specified, there should not be any active ones,
2353  * return error if any are found (nb: this is a user error, not a
2354  * system error). If FORCECLOSE is specified, detach any active vnodes
2355  * that are found.
2356  *
2357  * If WRITECLOSE is set, only flush out regular file vnodes open for
2358  * writing.
2359  *
2360  * SKIPSYSTEM causes any vnodes marked VV_SYSTEM to be skipped.
2361  *
2362  * `rootrefs' specifies the base reference count for the root vnode
2363  * of this filesystem. The root vnode is considered busy if its
2364  * v_usecount exceeds this value. On a successful return, vflush(, td)
2365  * will call vrele() on the root vnode exactly rootrefs times.
2366  * If the SKIPSYSTEM or WRITECLOSE flags are specified, rootrefs must
2367  * be zero.
2368  */
2369 #ifdef DIAGNOSTIC
2370 static int busyprt = 0;		/* print out busy vnodes */
2371 SYSCTL_INT(_debug, OID_AUTO, busyprt, CTLFLAG_RW, &busyprt, 0, "");
2372 #endif
2373 
2374 int
2375 vflush( struct mount *mp, int rootrefs, int flags, struct thread *td)
2376 {
2377 	struct vnode *vp, *mvp, *rootvp = NULL;
2378 	struct vattr vattr;
2379 	int busy = 0, error;
2380 
2381 	CTR4(KTR_VFS, "%s: mp %p with rootrefs %d and flags %d", __func__, mp,
2382 	    rootrefs, flags);
2383 	if (rootrefs > 0) {
2384 		KASSERT((flags & (SKIPSYSTEM | WRITECLOSE)) == 0,
2385 		    ("vflush: bad args"));
2386 		/*
2387 		 * Get the filesystem root vnode. We can vput() it
2388 		 * immediately, since with rootrefs > 0, it won't go away.
2389 		 */
2390 		if ((error = VFS_ROOT(mp, LK_EXCLUSIVE, &rootvp)) != 0) {
2391 			CTR2(KTR_VFS, "%s: vfs_root lookup failed with %d",
2392 			    __func__, error);
2393 			return (error);
2394 		}
2395 		vput(rootvp);
2396 
2397 	}
2398 	MNT_ILOCK(mp);
2399 loop:
2400 	MNT_VNODE_FOREACH(vp, mp, mvp) {
2401 
2402 		VI_LOCK(vp);
2403 		vholdl(vp);
2404 		MNT_IUNLOCK(mp);
2405 		error = vn_lock(vp, LK_INTERLOCK | LK_EXCLUSIVE);
2406 		if (error) {
2407 			vdrop(vp);
2408 			MNT_ILOCK(mp);
2409 			MNT_VNODE_FOREACH_ABORT_ILOCKED(mp, mvp);
2410 			goto loop;
2411 		}
2412 		/*
2413 		 * Skip over a vnodes marked VV_SYSTEM.
2414 		 */
2415 		if ((flags & SKIPSYSTEM) && (vp->v_vflag & VV_SYSTEM)) {
2416 			VOP_UNLOCK(vp, 0);
2417 			vdrop(vp);
2418 			MNT_ILOCK(mp);
2419 			continue;
2420 		}
2421 		/*
2422 		 * If WRITECLOSE is set, flush out unlinked but still open
2423 		 * files (even if open only for reading) and regular file
2424 		 * vnodes open for writing.
2425 		 */
2426 		if (flags & WRITECLOSE) {
2427 			error = VOP_GETATTR(vp, &vattr, td->td_ucred);
2428 			VI_LOCK(vp);
2429 
2430 			if ((vp->v_type == VNON ||
2431 			    (error == 0 && vattr.va_nlink > 0)) &&
2432 			    (vp->v_writecount == 0 || vp->v_type != VREG)) {
2433 				VOP_UNLOCK(vp, 0);
2434 				vdropl(vp);
2435 				MNT_ILOCK(mp);
2436 				continue;
2437 			}
2438 		} else
2439 			VI_LOCK(vp);
2440 		/*
2441 		 * With v_usecount == 0, all we need to do is clear out the
2442 		 * vnode data structures and we are done.
2443 		 *
2444 		 * If FORCECLOSE is set, forcibly close the vnode.
2445 		 */
2446 		if (vp->v_usecount == 0 || (flags & FORCECLOSE)) {
2447 			VNASSERT(vp->v_usecount == 0 ||
2448 			    (vp->v_type != VCHR && vp->v_type != VBLK), vp,
2449 			    ("device VNODE %p is FORCECLOSED", vp));
2450 			vgonel(vp);
2451 		} else {
2452 			busy++;
2453 #ifdef DIAGNOSTIC
2454 			if (busyprt)
2455 				vprint("vflush: busy vnode", vp);
2456 #endif
2457 		}
2458 		VOP_UNLOCK(vp, 0);
2459 		vdropl(vp);
2460 		MNT_ILOCK(mp);
2461 	}
2462 	MNT_IUNLOCK(mp);
2463 	if (rootrefs > 0 && (flags & FORCECLOSE) == 0) {
2464 		/*
2465 		 * If just the root vnode is busy, and if its refcount
2466 		 * is equal to `rootrefs', then go ahead and kill it.
2467 		 */
2468 		VI_LOCK(rootvp);
2469 		KASSERT(busy > 0, ("vflush: not busy"));
2470 		VNASSERT(rootvp->v_usecount >= rootrefs, rootvp,
2471 		    ("vflush: usecount %d < rootrefs %d",
2472 		     rootvp->v_usecount, rootrefs));
2473 		if (busy == 1 && rootvp->v_usecount == rootrefs) {
2474 			VOP_LOCK(rootvp, LK_EXCLUSIVE|LK_INTERLOCK);
2475 			vgone(rootvp);
2476 			VOP_UNLOCK(rootvp, 0);
2477 			busy = 0;
2478 		} else
2479 			VI_UNLOCK(rootvp);
2480 	}
2481 	if (busy) {
2482 		CTR2(KTR_VFS, "%s: failing as %d vnodes are busy", __func__,
2483 		    busy);
2484 		return (EBUSY);
2485 	}
2486 	for (; rootrefs > 0; rootrefs--)
2487 		vrele(rootvp);
2488 	return (0);
2489 }
2490 
2491 /*
2492  * Recycle an unused vnode to the front of the free list.
2493  */
2494 int
2495 vrecycle(struct vnode *vp, struct thread *td)
2496 {
2497 	int recycled;
2498 
2499 	ASSERT_VOP_ELOCKED(vp, "vrecycle");
2500 	CTR2(KTR_VFS, "%s: vp %p", __func__, vp);
2501 	recycled = 0;
2502 	VI_LOCK(vp);
2503 	if (vp->v_usecount == 0) {
2504 		recycled = 1;
2505 		vgonel(vp);
2506 	}
2507 	VI_UNLOCK(vp);
2508 	return (recycled);
2509 }
2510 
2511 /*
2512  * Eliminate all activity associated with a vnode
2513  * in preparation for reuse.
2514  */
2515 void
2516 vgone(struct vnode *vp)
2517 {
2518 	VI_LOCK(vp);
2519 	vgonel(vp);
2520 	VI_UNLOCK(vp);
2521 }
2522 
2523 /*
2524  * vgone, with the vp interlock held.
2525  */
2526 void
2527 vgonel(struct vnode *vp)
2528 {
2529 	struct thread *td;
2530 	int oweinact;
2531 	int active;
2532 	struct mount *mp;
2533 
2534 	ASSERT_VOP_ELOCKED(vp, "vgonel");
2535 	ASSERT_VI_LOCKED(vp, "vgonel");
2536 	VNASSERT(vp->v_holdcnt, vp,
2537 	    ("vgonel: vp %p has no reference.", vp));
2538 	CTR2(KTR_VFS, "%s: vp %p", __func__, vp);
2539 	td = curthread;
2540 
2541 	/*
2542 	 * Don't vgonel if we're already doomed.
2543 	 */
2544 	if (vp->v_iflag & VI_DOOMED)
2545 		return;
2546 	vp->v_iflag |= VI_DOOMED;
2547 	/*
2548 	 * Check to see if the vnode is in use.  If so, we have to call
2549 	 * VOP_CLOSE() and VOP_INACTIVE().
2550 	 */
2551 	active = vp->v_usecount;
2552 	oweinact = (vp->v_iflag & VI_OWEINACT);
2553 	VI_UNLOCK(vp);
2554 	/*
2555 	 * Clean out any buffers associated with the vnode.
2556 	 * If the flush fails, just toss the buffers.
2557 	 */
2558 	mp = NULL;
2559 	if (!TAILQ_EMPTY(&vp->v_bufobj.bo_dirty.bv_hd))
2560 		(void) vn_start_secondary_write(vp, &mp, V_WAIT);
2561 	if (vinvalbuf(vp, V_SAVE, 0, 0) != 0)
2562 		vinvalbuf(vp, 0, 0, 0);
2563 
2564 	/*
2565 	 * If purging an active vnode, it must be closed and
2566 	 * deactivated before being reclaimed.
2567 	 */
2568 	if (active)
2569 		VOP_CLOSE(vp, FNONBLOCK, NOCRED, td);
2570 	if (oweinact || active) {
2571 		VI_LOCK(vp);
2572 		if ((vp->v_iflag & VI_DOINGINACT) == 0)
2573 			vinactive(vp, td);
2574 		VI_UNLOCK(vp);
2575 	}
2576 	/*
2577 	 * Reclaim the vnode.
2578 	 */
2579 	if (VOP_RECLAIM(vp, td))
2580 		panic("vgone: cannot reclaim");
2581 	if (mp != NULL)
2582 		vn_finished_secondary_write(mp);
2583 	VNASSERT(vp->v_object == NULL, vp,
2584 	    ("vop_reclaim left v_object vp=%p, tag=%s", vp, vp->v_tag));
2585 	/*
2586 	 * Clear the advisory locks and wake up waiting threads.
2587 	 */
2588 	lf_purgelocks(vp, &(vp->v_lockf));
2589 	/*
2590 	 * Delete from old mount point vnode list.
2591 	 */
2592 	delmntque(vp);
2593 	cache_purge(vp);
2594 	/*
2595 	 * Done with purge, reset to the standard lock and invalidate
2596 	 * the vnode.
2597 	 */
2598 	VI_LOCK(vp);
2599 	vp->v_vnlock = &vp->v_lock;
2600 	vp->v_op = &dead_vnodeops;
2601 	vp->v_tag = "none";
2602 	vp->v_type = VBAD;
2603 }
2604 
2605 /*
2606  * Calculate the total number of references to a special device.
2607  */
2608 int
2609 vcount(struct vnode *vp)
2610 {
2611 	int count;
2612 
2613 	dev_lock();
2614 	count = vp->v_rdev->si_usecount;
2615 	dev_unlock();
2616 	return (count);
2617 }
2618 
2619 /*
2620  * Same as above, but using the struct cdev *as argument
2621  */
2622 int
2623 count_dev(struct cdev *dev)
2624 {
2625 	int count;
2626 
2627 	dev_lock();
2628 	count = dev->si_usecount;
2629 	dev_unlock();
2630 	return(count);
2631 }
2632 
2633 /*
2634  * Print out a description of a vnode.
2635  */
2636 static char *typename[] =
2637 {"VNON", "VREG", "VDIR", "VBLK", "VCHR", "VLNK", "VSOCK", "VFIFO", "VBAD",
2638  "VMARKER"};
2639 
2640 void
2641 vn_printf(struct vnode *vp, const char *fmt, ...)
2642 {
2643 	va_list ap;
2644 	char buf[256], buf2[16];
2645 	u_long flags;
2646 
2647 	va_start(ap, fmt);
2648 	vprintf(fmt, ap);
2649 	va_end(ap);
2650 	printf("%p: ", (void *)vp);
2651 	printf("tag %s, type %s\n", vp->v_tag, typename[vp->v_type]);
2652 	printf("    usecount %d, writecount %d, refcount %d mountedhere %p\n",
2653 	    vp->v_usecount, vp->v_writecount, vp->v_holdcnt, vp->v_mountedhere);
2654 	buf[0] = '\0';
2655 	buf[1] = '\0';
2656 	if (vp->v_vflag & VV_ROOT)
2657 		strlcat(buf, "|VV_ROOT", sizeof(buf));
2658 	if (vp->v_vflag & VV_ISTTY)
2659 		strlcat(buf, "|VV_ISTTY", sizeof(buf));
2660 	if (vp->v_vflag & VV_NOSYNC)
2661 		strlcat(buf, "|VV_NOSYNC", sizeof(buf));
2662 	if (vp->v_vflag & VV_CACHEDLABEL)
2663 		strlcat(buf, "|VV_CACHEDLABEL", sizeof(buf));
2664 	if (vp->v_vflag & VV_TEXT)
2665 		strlcat(buf, "|VV_TEXT", sizeof(buf));
2666 	if (vp->v_vflag & VV_COPYONWRITE)
2667 		strlcat(buf, "|VV_COPYONWRITE", sizeof(buf));
2668 	if (vp->v_vflag & VV_SYSTEM)
2669 		strlcat(buf, "|VV_SYSTEM", sizeof(buf));
2670 	if (vp->v_vflag & VV_PROCDEP)
2671 		strlcat(buf, "|VV_PROCDEP", sizeof(buf));
2672 	if (vp->v_vflag & VV_NOKNOTE)
2673 		strlcat(buf, "|VV_NOKNOTE", sizeof(buf));
2674 	if (vp->v_vflag & VV_DELETED)
2675 		strlcat(buf, "|VV_DELETED", sizeof(buf));
2676 	if (vp->v_vflag & VV_MD)
2677 		strlcat(buf, "|VV_MD", sizeof(buf));
2678 	flags = vp->v_vflag & ~(VV_ROOT | VV_ISTTY | VV_NOSYNC |
2679 	    VV_CACHEDLABEL | VV_TEXT | VV_COPYONWRITE | VV_SYSTEM | VV_PROCDEP |
2680 	    VV_NOKNOTE | VV_DELETED | VV_MD);
2681 	if (flags != 0) {
2682 		snprintf(buf2, sizeof(buf2), "|VV(0x%lx)", flags);
2683 		strlcat(buf, buf2, sizeof(buf));
2684 	}
2685 	if (vp->v_iflag & VI_MOUNT)
2686 		strlcat(buf, "|VI_MOUNT", sizeof(buf));
2687 	if (vp->v_iflag & VI_AGE)
2688 		strlcat(buf, "|VI_AGE", sizeof(buf));
2689 	if (vp->v_iflag & VI_DOOMED)
2690 		strlcat(buf, "|VI_DOOMED", sizeof(buf));
2691 	if (vp->v_iflag & VI_FREE)
2692 		strlcat(buf, "|VI_FREE", sizeof(buf));
2693 	if (vp->v_iflag & VI_OBJDIRTY)
2694 		strlcat(buf, "|VI_OBJDIRTY", sizeof(buf));
2695 	if (vp->v_iflag & VI_DOINGINACT)
2696 		strlcat(buf, "|VI_DOINGINACT", sizeof(buf));
2697 	if (vp->v_iflag & VI_OWEINACT)
2698 		strlcat(buf, "|VI_OWEINACT", sizeof(buf));
2699 	flags = vp->v_iflag & ~(VI_MOUNT | VI_AGE | VI_DOOMED | VI_FREE |
2700 	    VI_OBJDIRTY | VI_DOINGINACT | VI_OWEINACT);
2701 	if (flags != 0) {
2702 		snprintf(buf2, sizeof(buf2), "|VI(0x%lx)", flags);
2703 		strlcat(buf, buf2, sizeof(buf));
2704 	}
2705 	printf("    flags (%s)\n", buf + 1);
2706 	if (mtx_owned(VI_MTX(vp)))
2707 		printf(" VI_LOCKed");
2708 	if (vp->v_object != NULL)
2709 		printf("    v_object %p ref %d pages %d\n",
2710 		    vp->v_object, vp->v_object->ref_count,
2711 		    vp->v_object->resident_page_count);
2712 	printf("    ");
2713 	lockmgr_printinfo(vp->v_vnlock);
2714 	if (vp->v_data != NULL)
2715 		VOP_PRINT(vp);
2716 }
2717 
2718 #ifdef DDB
2719 /*
2720  * List all of the locked vnodes in the system.
2721  * Called when debugging the kernel.
2722  */
2723 DB_SHOW_COMMAND(lockedvnods, lockedvnodes)
2724 {
2725 	struct mount *mp, *nmp;
2726 	struct vnode *vp;
2727 
2728 	/*
2729 	 * Note: because this is DDB, we can't obey the locking semantics
2730 	 * for these structures, which means we could catch an inconsistent
2731 	 * state and dereference a nasty pointer.  Not much to be done
2732 	 * about that.
2733 	 */
2734 	db_printf("Locked vnodes\n");
2735 	for (mp = TAILQ_FIRST(&mountlist); mp != NULL; mp = nmp) {
2736 		nmp = TAILQ_NEXT(mp, mnt_list);
2737 		TAILQ_FOREACH(vp, &mp->mnt_nvnodelist, v_nmntvnodes) {
2738 			if (vp->v_type != VMARKER &&
2739 			    VOP_ISLOCKED(vp))
2740 				vprint("", vp);
2741 		}
2742 		nmp = TAILQ_NEXT(mp, mnt_list);
2743 	}
2744 }
2745 
2746 /*
2747  * Show details about the given vnode.
2748  */
2749 DB_SHOW_COMMAND(vnode, db_show_vnode)
2750 {
2751 	struct vnode *vp;
2752 
2753 	if (!have_addr)
2754 		return;
2755 	vp = (struct vnode *)addr;
2756 	vn_printf(vp, "vnode ");
2757 }
2758 
2759 /*
2760  * Show details about the given mount point.
2761  */
2762 DB_SHOW_COMMAND(mount, db_show_mount)
2763 {
2764 	struct mount *mp;
2765 	struct statfs *sp;
2766 	struct vnode *vp;
2767 	char buf[512];
2768 	u_int flags;
2769 
2770 	if (!have_addr) {
2771 		/* No address given, print short info about all mount points. */
2772 		TAILQ_FOREACH(mp, &mountlist, mnt_list) {
2773 			db_printf("%p %s on %s (%s)\n", mp,
2774 			    mp->mnt_stat.f_mntfromname,
2775 			    mp->mnt_stat.f_mntonname,
2776 			    mp->mnt_stat.f_fstypename);
2777 			if (db_pager_quit)
2778 				break;
2779 		}
2780 		db_printf("\nMore info: show mount <addr>\n");
2781 		return;
2782 	}
2783 
2784 	mp = (struct mount *)addr;
2785 	db_printf("%p %s on %s (%s)\n", mp, mp->mnt_stat.f_mntfromname,
2786 	    mp->mnt_stat.f_mntonname, mp->mnt_stat.f_fstypename);
2787 
2788 	buf[0] = '\0';
2789 	flags = mp->mnt_flag;
2790 #define	MNT_FLAG(flag)	do {						\
2791 	if (flags & (flag)) {						\
2792 		if (buf[0] != '\0')					\
2793 			strlcat(buf, ", ", sizeof(buf));		\
2794 		strlcat(buf, (#flag) + 4, sizeof(buf));			\
2795 		flags &= ~(flag);					\
2796 	}								\
2797 } while (0)
2798 	MNT_FLAG(MNT_RDONLY);
2799 	MNT_FLAG(MNT_SYNCHRONOUS);
2800 	MNT_FLAG(MNT_NOEXEC);
2801 	MNT_FLAG(MNT_NOSUID);
2802 	MNT_FLAG(MNT_UNION);
2803 	MNT_FLAG(MNT_ASYNC);
2804 	MNT_FLAG(MNT_SUIDDIR);
2805 	MNT_FLAG(MNT_SOFTDEP);
2806 	MNT_FLAG(MNT_NOSYMFOLLOW);
2807 	MNT_FLAG(MNT_GJOURNAL);
2808 	MNT_FLAG(MNT_MULTILABEL);
2809 	MNT_FLAG(MNT_ACLS);
2810 	MNT_FLAG(MNT_NOATIME);
2811 	MNT_FLAG(MNT_NOCLUSTERR);
2812 	MNT_FLAG(MNT_NOCLUSTERW);
2813 	MNT_FLAG(MNT_EXRDONLY);
2814 	MNT_FLAG(MNT_EXPORTED);
2815 	MNT_FLAG(MNT_DEFEXPORTED);
2816 	MNT_FLAG(MNT_EXPORTANON);
2817 	MNT_FLAG(MNT_EXKERB);
2818 	MNT_FLAG(MNT_EXPUBLIC);
2819 	MNT_FLAG(MNT_LOCAL);
2820 	MNT_FLAG(MNT_QUOTA);
2821 	MNT_FLAG(MNT_ROOTFS);
2822 	MNT_FLAG(MNT_USER);
2823 	MNT_FLAG(MNT_IGNORE);
2824 	MNT_FLAG(MNT_UPDATE);
2825 	MNT_FLAG(MNT_DELEXPORT);
2826 	MNT_FLAG(MNT_RELOAD);
2827 	MNT_FLAG(MNT_FORCE);
2828 	MNT_FLAG(MNT_SNAPSHOT);
2829 	MNT_FLAG(MNT_BYFSID);
2830 #undef MNT_FLAG
2831 	if (flags != 0) {
2832 		if (buf[0] != '\0')
2833 			strlcat(buf, ", ", sizeof(buf));
2834 		snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
2835 		    "0x%08x", flags);
2836 	}
2837 	db_printf("    mnt_flag = %s\n", buf);
2838 
2839 	buf[0] = '\0';
2840 	flags = mp->mnt_kern_flag;
2841 #define	MNT_KERN_FLAG(flag)	do {					\
2842 	if (flags & (flag)) {						\
2843 		if (buf[0] != '\0')					\
2844 			strlcat(buf, ", ", sizeof(buf));		\
2845 		strlcat(buf, (#flag) + 5, sizeof(buf));			\
2846 		flags &= ~(flag);					\
2847 	}								\
2848 } while (0)
2849 	MNT_KERN_FLAG(MNTK_UNMOUNTF);
2850 	MNT_KERN_FLAG(MNTK_ASYNC);
2851 	MNT_KERN_FLAG(MNTK_SOFTDEP);
2852 	MNT_KERN_FLAG(MNTK_NOINSMNTQ);
2853 	MNT_KERN_FLAG(MNTK_UNMOUNT);
2854 	MNT_KERN_FLAG(MNTK_MWAIT);
2855 	MNT_KERN_FLAG(MNTK_SUSPEND);
2856 	MNT_KERN_FLAG(MNTK_SUSPEND2);
2857 	MNT_KERN_FLAG(MNTK_SUSPENDED);
2858 	MNT_KERN_FLAG(MNTK_MPSAFE);
2859 	MNT_KERN_FLAG(MNTK_NOKNOTE);
2860 	MNT_KERN_FLAG(MNTK_LOOKUP_SHARED);
2861 #undef MNT_KERN_FLAG
2862 	if (flags != 0) {
2863 		if (buf[0] != '\0')
2864 			strlcat(buf, ", ", sizeof(buf));
2865 		snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
2866 		    "0x%08x", flags);
2867 	}
2868 	db_printf("    mnt_kern_flag = %s\n", buf);
2869 
2870 	sp = &mp->mnt_stat;
2871 	db_printf("    mnt_stat = { version=%u type=%u flags=0x%016jx "
2872 	    "bsize=%ju iosize=%ju blocks=%ju bfree=%ju bavail=%jd files=%ju "
2873 	    "ffree=%jd syncwrites=%ju asyncwrites=%ju syncreads=%ju "
2874 	    "asyncreads=%ju namemax=%u owner=%u fsid=[%d, %d] }\n",
2875 	    (u_int)sp->f_version, (u_int)sp->f_type, (uintmax_t)sp->f_flags,
2876 	    (uintmax_t)sp->f_bsize, (uintmax_t)sp->f_iosize,
2877 	    (uintmax_t)sp->f_blocks, (uintmax_t)sp->f_bfree,
2878 	    (intmax_t)sp->f_bavail, (uintmax_t)sp->f_files,
2879 	    (intmax_t)sp->f_ffree, (uintmax_t)sp->f_syncwrites,
2880 	    (uintmax_t)sp->f_asyncwrites, (uintmax_t)sp->f_syncreads,
2881 	    (uintmax_t)sp->f_asyncreads, (u_int)sp->f_namemax,
2882 	    (u_int)sp->f_owner, (int)sp->f_fsid.val[0], (int)sp->f_fsid.val[1]);
2883 
2884 	db_printf("    mnt_cred = { uid=%u ruid=%u",
2885 	    (u_int)mp->mnt_cred->cr_uid, (u_int)mp->mnt_cred->cr_ruid);
2886 	if (jailed(mp->mnt_cred))
2887 		db_printf(", jail=%d", mp->mnt_cred->cr_prison->pr_id);
2888 	db_printf(" }\n");
2889 	db_printf("    mnt_ref = %d\n", mp->mnt_ref);
2890 	db_printf("    mnt_gen = %d\n", mp->mnt_gen);
2891 	db_printf("    mnt_nvnodelistsize = %d\n", mp->mnt_nvnodelistsize);
2892 	db_printf("    mnt_writeopcount = %d\n", mp->mnt_writeopcount);
2893 	db_printf("    mnt_noasync = %u\n", mp->mnt_noasync);
2894 	db_printf("    mnt_maxsymlinklen = %d\n", mp->mnt_maxsymlinklen);
2895 	db_printf("    mnt_iosize_max = %d\n", mp->mnt_iosize_max);
2896 	db_printf("    mnt_hashseed = %u\n", mp->mnt_hashseed);
2897 	db_printf("    mnt_secondary_writes = %d\n", mp->mnt_secondary_writes);
2898 	db_printf("    mnt_secondary_accwrites = %d\n",
2899 	    mp->mnt_secondary_accwrites);
2900 	db_printf("    mnt_gjprovider = %s\n",
2901 	    mp->mnt_gjprovider != NULL ? mp->mnt_gjprovider : "NULL");
2902 	db_printf("\n");
2903 
2904 	TAILQ_FOREACH(vp, &mp->mnt_nvnodelist, v_nmntvnodes) {
2905 		if (vp->v_type != VMARKER) {
2906 			vn_printf(vp, "vnode ");
2907 			if (db_pager_quit)
2908 				break;
2909 		}
2910 	}
2911 }
2912 #endif	/* DDB */
2913 
2914 /*
2915  * Fill in a struct xvfsconf based on a struct vfsconf.
2916  */
2917 static void
2918 vfsconf2x(struct vfsconf *vfsp, struct xvfsconf *xvfsp)
2919 {
2920 
2921 	strcpy(xvfsp->vfc_name, vfsp->vfc_name);
2922 	xvfsp->vfc_typenum = vfsp->vfc_typenum;
2923 	xvfsp->vfc_refcount = vfsp->vfc_refcount;
2924 	xvfsp->vfc_flags = vfsp->vfc_flags;
2925 	/*
2926 	 * These are unused in userland, we keep them
2927 	 * to not break binary compatibility.
2928 	 */
2929 	xvfsp->vfc_vfsops = NULL;
2930 	xvfsp->vfc_next = NULL;
2931 }
2932 
2933 /*
2934  * Top level filesystem related information gathering.
2935  */
2936 static int
2937 sysctl_vfs_conflist(SYSCTL_HANDLER_ARGS)
2938 {
2939 	struct vfsconf *vfsp;
2940 	struct xvfsconf xvfsp;
2941 	int error;
2942 
2943 	error = 0;
2944 	TAILQ_FOREACH(vfsp, &vfsconf, vfc_list) {
2945 		bzero(&xvfsp, sizeof(xvfsp));
2946 		vfsconf2x(vfsp, &xvfsp);
2947 		error = SYSCTL_OUT(req, &xvfsp, sizeof xvfsp);
2948 		if (error)
2949 			break;
2950 	}
2951 	return (error);
2952 }
2953 
2954 SYSCTL_PROC(_vfs, OID_AUTO, conflist, CTLFLAG_RD, NULL, 0, sysctl_vfs_conflist,
2955     "S,xvfsconf", "List of all configured filesystems");
2956 
2957 #ifndef BURN_BRIDGES
2958 static int	sysctl_ovfs_conf(SYSCTL_HANDLER_ARGS);
2959 
2960 static int
2961 vfs_sysctl(SYSCTL_HANDLER_ARGS)
2962 {
2963 	int *name = (int *)arg1 - 1;	/* XXX */
2964 	u_int namelen = arg2 + 1;	/* XXX */
2965 	struct vfsconf *vfsp;
2966 	struct xvfsconf xvfsp;
2967 
2968 	printf("WARNING: userland calling deprecated sysctl, "
2969 	    "please rebuild world\n");
2970 
2971 #if 1 || defined(COMPAT_PRELITE2)
2972 	/* Resolve ambiguity between VFS_VFSCONF and VFS_GENERIC. */
2973 	if (namelen == 1)
2974 		return (sysctl_ovfs_conf(oidp, arg1, arg2, req));
2975 #endif
2976 
2977 	switch (name[1]) {
2978 	case VFS_MAXTYPENUM:
2979 		if (namelen != 2)
2980 			return (ENOTDIR);
2981 		return (SYSCTL_OUT(req, &maxvfsconf, sizeof(int)));
2982 	case VFS_CONF:
2983 		if (namelen != 3)
2984 			return (ENOTDIR);	/* overloaded */
2985 		TAILQ_FOREACH(vfsp, &vfsconf, vfc_list)
2986 			if (vfsp->vfc_typenum == name[2])
2987 				break;
2988 		if (vfsp == NULL)
2989 			return (EOPNOTSUPP);
2990 		bzero(&xvfsp, sizeof(xvfsp));
2991 		vfsconf2x(vfsp, &xvfsp);
2992 		return (SYSCTL_OUT(req, &xvfsp, sizeof(xvfsp)));
2993 	}
2994 	return (EOPNOTSUPP);
2995 }
2996 
2997 static SYSCTL_NODE(_vfs, VFS_GENERIC, generic, CTLFLAG_RD | CTLFLAG_SKIP,
2998 	vfs_sysctl, "Generic filesystem");
2999 
3000 #if 1 || defined(COMPAT_PRELITE2)
3001 
3002 static int
3003 sysctl_ovfs_conf(SYSCTL_HANDLER_ARGS)
3004 {
3005 	int error;
3006 	struct vfsconf *vfsp;
3007 	struct ovfsconf ovfs;
3008 
3009 	TAILQ_FOREACH(vfsp, &vfsconf, vfc_list) {
3010 		bzero(&ovfs, sizeof(ovfs));
3011 		ovfs.vfc_vfsops = vfsp->vfc_vfsops;	/* XXX used as flag */
3012 		strcpy(ovfs.vfc_name, vfsp->vfc_name);
3013 		ovfs.vfc_index = vfsp->vfc_typenum;
3014 		ovfs.vfc_refcount = vfsp->vfc_refcount;
3015 		ovfs.vfc_flags = vfsp->vfc_flags;
3016 		error = SYSCTL_OUT(req, &ovfs, sizeof ovfs);
3017 		if (error)
3018 			return error;
3019 	}
3020 	return 0;
3021 }
3022 
3023 #endif /* 1 || COMPAT_PRELITE2 */
3024 #endif /* !BURN_BRIDGES */
3025 
3026 #define KINFO_VNODESLOP		10
3027 #ifdef notyet
3028 /*
3029  * Dump vnode list (via sysctl).
3030  */
3031 /* ARGSUSED */
3032 static int
3033 sysctl_vnode(SYSCTL_HANDLER_ARGS)
3034 {
3035 	struct xvnode *xvn;
3036 	struct mount *mp;
3037 	struct vnode *vp;
3038 	int error, len, n;
3039 
3040 	/*
3041 	 * Stale numvnodes access is not fatal here.
3042 	 */
3043 	req->lock = 0;
3044 	len = (numvnodes + KINFO_VNODESLOP) * sizeof *xvn;
3045 	if (!req->oldptr)
3046 		/* Make an estimate */
3047 		return (SYSCTL_OUT(req, 0, len));
3048 
3049 	error = sysctl_wire_old_buffer(req, 0);
3050 	if (error != 0)
3051 		return (error);
3052 	xvn = malloc(len, M_TEMP, M_ZERO | M_WAITOK);
3053 	n = 0;
3054 	mtx_lock(&mountlist_mtx);
3055 	TAILQ_FOREACH(mp, &mountlist, mnt_list) {
3056 		if (vfs_busy(mp, MBF_NOWAIT | MBF_MNTLSTLOCK))
3057 			continue;
3058 		MNT_ILOCK(mp);
3059 		TAILQ_FOREACH(vp, &mp->mnt_nvnodelist, v_nmntvnodes) {
3060 			if (n == len)
3061 				break;
3062 			vref(vp);
3063 			xvn[n].xv_size = sizeof *xvn;
3064 			xvn[n].xv_vnode = vp;
3065 			xvn[n].xv_id = 0;	/* XXX compat */
3066 #define XV_COPY(field) xvn[n].xv_##field = vp->v_##field
3067 			XV_COPY(usecount);
3068 			XV_COPY(writecount);
3069 			XV_COPY(holdcnt);
3070 			XV_COPY(mount);
3071 			XV_COPY(numoutput);
3072 			XV_COPY(type);
3073 #undef XV_COPY
3074 			xvn[n].xv_flag = vp->v_vflag;
3075 
3076 			switch (vp->v_type) {
3077 			case VREG:
3078 			case VDIR:
3079 			case VLNK:
3080 				break;
3081 			case VBLK:
3082 			case VCHR:
3083 				if (vp->v_rdev == NULL) {
3084 					vrele(vp);
3085 					continue;
3086 				}
3087 				xvn[n].xv_dev = dev2udev(vp->v_rdev);
3088 				break;
3089 			case VSOCK:
3090 				xvn[n].xv_socket = vp->v_socket;
3091 				break;
3092 			case VFIFO:
3093 				xvn[n].xv_fifo = vp->v_fifoinfo;
3094 				break;
3095 			case VNON:
3096 			case VBAD:
3097 			default:
3098 				/* shouldn't happen? */
3099 				vrele(vp);
3100 				continue;
3101 			}
3102 			vrele(vp);
3103 			++n;
3104 		}
3105 		MNT_IUNLOCK(mp);
3106 		mtx_lock(&mountlist_mtx);
3107 		vfs_unbusy(mp);
3108 		if (n == len)
3109 			break;
3110 	}
3111 	mtx_unlock(&mountlist_mtx);
3112 
3113 	error = SYSCTL_OUT(req, xvn, n * sizeof *xvn);
3114 	free(xvn, M_TEMP);
3115 	return (error);
3116 }
3117 
3118 SYSCTL_PROC(_kern, KERN_VNODE, vnode, CTLTYPE_OPAQUE|CTLFLAG_RD,
3119 	0, 0, sysctl_vnode, "S,xvnode", "");
3120 #endif
3121 
3122 /*
3123  * Unmount all filesystems. The list is traversed in reverse order
3124  * of mounting to avoid dependencies.
3125  */
3126 void
3127 vfs_unmountall(void)
3128 {
3129 	struct mount *mp;
3130 	struct thread *td;
3131 	int error;
3132 
3133 	KASSERT(curthread != NULL, ("vfs_unmountall: NULL curthread"));
3134 	CTR1(KTR_VFS, "%s: unmounting all filesystems", __func__);
3135 	td = curthread;
3136 
3137 	/*
3138 	 * Since this only runs when rebooting, it is not interlocked.
3139 	 */
3140 	while(!TAILQ_EMPTY(&mountlist)) {
3141 		mp = TAILQ_LAST(&mountlist, mntlist);
3142 		error = dounmount(mp, MNT_FORCE, td);
3143 		if (error) {
3144 			TAILQ_REMOVE(&mountlist, mp, mnt_list);
3145 			/*
3146 			 * XXX: Due to the way in which we mount the root
3147 			 * file system off of devfs, devfs will generate a
3148 			 * "busy" warning when we try to unmount it before
3149 			 * the root.  Don't print a warning as a result in
3150 			 * order to avoid false positive errors that may
3151 			 * cause needless upset.
3152 			 */
3153 			if (strcmp(mp->mnt_vfc->vfc_name, "devfs") != 0) {
3154 				printf("unmount of %s failed (",
3155 				    mp->mnt_stat.f_mntonname);
3156 				if (error == EBUSY)
3157 					printf("BUSY)\n");
3158 				else
3159 					printf("%d)\n", error);
3160 			}
3161 		} else {
3162 			/* The unmount has removed mp from the mountlist */
3163 		}
3164 	}
3165 }
3166 
3167 /*
3168  * perform msync on all vnodes under a mount point
3169  * the mount point must be locked.
3170  */
3171 void
3172 vfs_msync(struct mount *mp, int flags)
3173 {
3174 	struct vnode *vp, *mvp;
3175 	struct vm_object *obj;
3176 
3177 	CTR2(KTR_VFS, "%s: mp %p", __func__, mp);
3178 	MNT_ILOCK(mp);
3179 	MNT_VNODE_FOREACH(vp, mp, mvp) {
3180 		VI_LOCK(vp);
3181 		if ((vp->v_iflag & VI_OBJDIRTY) &&
3182 		    (flags == MNT_WAIT || VOP_ISLOCKED(vp) == 0)) {
3183 			MNT_IUNLOCK(mp);
3184 			if (!vget(vp,
3185 			    LK_EXCLUSIVE | LK_RETRY | LK_INTERLOCK,
3186 			    curthread)) {
3187 				if (vp->v_vflag & VV_NOSYNC) {	/* unlinked */
3188 					vput(vp);
3189 					MNT_ILOCK(mp);
3190 					continue;
3191 				}
3192 
3193 				obj = vp->v_object;
3194 				if (obj != NULL) {
3195 					VM_OBJECT_LOCK(obj);
3196 					vm_object_page_clean(obj, 0, 0,
3197 					    flags == MNT_WAIT ?
3198 					    OBJPC_SYNC : OBJPC_NOSYNC);
3199 					VM_OBJECT_UNLOCK(obj);
3200 				}
3201 				vput(vp);
3202 			}
3203 			MNT_ILOCK(mp);
3204 		} else
3205 			VI_UNLOCK(vp);
3206 	}
3207 	MNT_IUNLOCK(mp);
3208 }
3209 
3210 /*
3211  * Mark a vnode as free, putting it up for recycling.
3212  */
3213 static void
3214 vfree(struct vnode *vp)
3215 {
3216 
3217 	ASSERT_VI_LOCKED(vp, "vfree");
3218 	mtx_lock(&vnode_free_list_mtx);
3219 	VNASSERT(vp->v_op != NULL, vp, ("vfree: vnode already reclaimed."));
3220 	VNASSERT((vp->v_iflag & VI_FREE) == 0, vp, ("vnode already free"));
3221 	VNASSERT(VSHOULDFREE(vp), vp, ("vfree: freeing when we shouldn't"));
3222 	VNASSERT((vp->v_iflag & VI_DOOMED) == 0, vp,
3223 	    ("vfree: Freeing doomed vnode"));
3224 	CTR2(KTR_VFS, "%s: vp %p", __func__, vp);
3225 	if (vp->v_iflag & VI_AGE) {
3226 		TAILQ_INSERT_HEAD(&vnode_free_list, vp, v_freelist);
3227 	} else {
3228 		TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist);
3229 	}
3230 	freevnodes++;
3231 	vp->v_iflag &= ~VI_AGE;
3232 	vp->v_iflag |= VI_FREE;
3233 	mtx_unlock(&vnode_free_list_mtx);
3234 }
3235 
3236 /*
3237  * Opposite of vfree() - mark a vnode as in use.
3238  */
3239 static void
3240 vbusy(struct vnode *vp)
3241 {
3242 	ASSERT_VI_LOCKED(vp, "vbusy");
3243 	VNASSERT((vp->v_iflag & VI_FREE) != 0, vp, ("vnode not free"));
3244 	VNASSERT(vp->v_op != NULL, vp, ("vbusy: vnode already reclaimed."));
3245 	CTR2(KTR_VFS, "%s: vp %p", __func__, vp);
3246 
3247 	mtx_lock(&vnode_free_list_mtx);
3248 	TAILQ_REMOVE(&vnode_free_list, vp, v_freelist);
3249 	freevnodes--;
3250 	vp->v_iflag &= ~(VI_FREE|VI_AGE);
3251 	mtx_unlock(&vnode_free_list_mtx);
3252 }
3253 
3254 static void
3255 destroy_vpollinfo(struct vpollinfo *vi)
3256 {
3257 	knlist_destroy(&vi->vpi_selinfo.si_note);
3258 	mtx_destroy(&vi->vpi_lock);
3259 	uma_zfree(vnodepoll_zone, vi);
3260 }
3261 
3262 /*
3263  * Initalize per-vnode helper structure to hold poll-related state.
3264  */
3265 void
3266 v_addpollinfo(struct vnode *vp)
3267 {
3268 	struct vpollinfo *vi;
3269 
3270 	if (vp->v_pollinfo != NULL)
3271 		return;
3272 	vi = uma_zalloc(vnodepoll_zone, M_WAITOK);
3273 	mtx_init(&vi->vpi_lock, "vnode pollinfo", NULL, MTX_DEF);
3274 	knlist_init(&vi->vpi_selinfo.si_note, vp, vfs_knllock,
3275 	    vfs_knlunlock, vfs_knllocked);
3276 	VI_LOCK(vp);
3277 	if (vp->v_pollinfo != NULL) {
3278 		VI_UNLOCK(vp);
3279 		destroy_vpollinfo(vi);
3280 		return;
3281 	}
3282 	vp->v_pollinfo = vi;
3283 	VI_UNLOCK(vp);
3284 }
3285 
3286 /*
3287  * Record a process's interest in events which might happen to
3288  * a vnode.  Because poll uses the historic select-style interface
3289  * internally, this routine serves as both the ``check for any
3290  * pending events'' and the ``record my interest in future events''
3291  * functions.  (These are done together, while the lock is held,
3292  * to avoid race conditions.)
3293  */
3294 int
3295 vn_pollrecord(struct vnode *vp, struct thread *td, int events)
3296 {
3297 
3298 	v_addpollinfo(vp);
3299 	mtx_lock(&vp->v_pollinfo->vpi_lock);
3300 	if (vp->v_pollinfo->vpi_revents & events) {
3301 		/*
3302 		 * This leaves events we are not interested
3303 		 * in available for the other process which
3304 		 * which presumably had requested them
3305 		 * (otherwise they would never have been
3306 		 * recorded).
3307 		 */
3308 		events &= vp->v_pollinfo->vpi_revents;
3309 		vp->v_pollinfo->vpi_revents &= ~events;
3310 
3311 		mtx_unlock(&vp->v_pollinfo->vpi_lock);
3312 		return (events);
3313 	}
3314 	vp->v_pollinfo->vpi_events |= events;
3315 	selrecord(td, &vp->v_pollinfo->vpi_selinfo);
3316 	mtx_unlock(&vp->v_pollinfo->vpi_lock);
3317 	return (0);
3318 }
3319 
3320 /*
3321  * Routine to create and manage a filesystem syncer vnode.
3322  */
3323 #define sync_close ((int (*)(struct  vop_close_args *))nullop)
3324 static int	sync_fsync(struct  vop_fsync_args *);
3325 static int	sync_inactive(struct  vop_inactive_args *);
3326 static int	sync_reclaim(struct  vop_reclaim_args *);
3327 
3328 static struct vop_vector sync_vnodeops = {
3329 	.vop_bypass =	VOP_EOPNOTSUPP,
3330 	.vop_close =	sync_close,		/* close */
3331 	.vop_fsync =	sync_fsync,		/* fsync */
3332 	.vop_inactive =	sync_inactive,	/* inactive */
3333 	.vop_reclaim =	sync_reclaim,	/* reclaim */
3334 	.vop_lock1 =	vop_stdlock,	/* lock */
3335 	.vop_unlock =	vop_stdunlock,	/* unlock */
3336 	.vop_islocked =	vop_stdislocked,	/* islocked */
3337 };
3338 
3339 /*
3340  * Create a new filesystem syncer vnode for the specified mount point.
3341  */
3342 int
3343 vfs_allocate_syncvnode(struct mount *mp)
3344 {
3345 	struct vnode *vp;
3346 	struct bufobj *bo;
3347 	static long start, incr, next;
3348 	int error;
3349 
3350 	/* Allocate a new vnode */
3351 	if ((error = getnewvnode("syncer", mp, &sync_vnodeops, &vp)) != 0) {
3352 		mp->mnt_syncer = NULL;
3353 		return (error);
3354 	}
3355 	vp->v_type = VNON;
3356 	vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
3357 	vp->v_vflag |= VV_FORCEINSMQ;
3358 	error = insmntque(vp, mp);
3359 	if (error != 0)
3360 		panic("vfs_allocate_syncvnode: insmntque failed");
3361 	vp->v_vflag &= ~VV_FORCEINSMQ;
3362 	VOP_UNLOCK(vp, 0);
3363 	/*
3364 	 * Place the vnode onto the syncer worklist. We attempt to
3365 	 * scatter them about on the list so that they will go off
3366 	 * at evenly distributed times even if all the filesystems
3367 	 * are mounted at once.
3368 	 */
3369 	next += incr;
3370 	if (next == 0 || next > syncer_maxdelay) {
3371 		start /= 2;
3372 		incr /= 2;
3373 		if (start == 0) {
3374 			start = syncer_maxdelay / 2;
3375 			incr = syncer_maxdelay;
3376 		}
3377 		next = start;
3378 	}
3379 	bo = &vp->v_bufobj;
3380 	BO_LOCK(bo);
3381 	vn_syncer_add_to_worklist(bo, syncdelay > 0 ? next % syncdelay : 0);
3382 	/* XXX - vn_syncer_add_to_worklist() also grabs and drops sync_mtx. */
3383 	mtx_lock(&sync_mtx);
3384 	sync_vnode_count++;
3385 	mtx_unlock(&sync_mtx);
3386 	BO_UNLOCK(bo);
3387 	mp->mnt_syncer = vp;
3388 	return (0);
3389 }
3390 
3391 /*
3392  * Do a lazy sync of the filesystem.
3393  */
3394 static int
3395 sync_fsync(struct vop_fsync_args *ap)
3396 {
3397 	struct vnode *syncvp = ap->a_vp;
3398 	struct mount *mp = syncvp->v_mount;
3399 	int error;
3400 	struct bufobj *bo;
3401 
3402 	/*
3403 	 * We only need to do something if this is a lazy evaluation.
3404 	 */
3405 	if (ap->a_waitfor != MNT_LAZY)
3406 		return (0);
3407 
3408 	/*
3409 	 * Move ourselves to the back of the sync list.
3410 	 */
3411 	bo = &syncvp->v_bufobj;
3412 	BO_LOCK(bo);
3413 	vn_syncer_add_to_worklist(bo, syncdelay);
3414 	BO_UNLOCK(bo);
3415 
3416 	/*
3417 	 * Walk the list of vnodes pushing all that are dirty and
3418 	 * not already on the sync list.
3419 	 */
3420 	mtx_lock(&mountlist_mtx);
3421 	if (vfs_busy(mp, MBF_NOWAIT | MBF_MNTLSTLOCK) != 0) {
3422 		mtx_unlock(&mountlist_mtx);
3423 		return (0);
3424 	}
3425 	if (vn_start_write(NULL, &mp, V_NOWAIT) != 0) {
3426 		vfs_unbusy(mp);
3427 		return (0);
3428 	}
3429 	MNT_ILOCK(mp);
3430 	mp->mnt_noasync++;
3431 	mp->mnt_kern_flag &= ~MNTK_ASYNC;
3432 	MNT_IUNLOCK(mp);
3433 	vfs_msync(mp, MNT_NOWAIT);
3434 	error = VFS_SYNC(mp, MNT_LAZY);
3435 	MNT_ILOCK(mp);
3436 	mp->mnt_noasync--;
3437 	if ((mp->mnt_flag & MNT_ASYNC) != 0 && mp->mnt_noasync == 0)
3438 		mp->mnt_kern_flag |= MNTK_ASYNC;
3439 	MNT_IUNLOCK(mp);
3440 	vn_finished_write(mp);
3441 	vfs_unbusy(mp);
3442 	return (error);
3443 }
3444 
3445 /*
3446  * The syncer vnode is no referenced.
3447  */
3448 static int
3449 sync_inactive(struct vop_inactive_args *ap)
3450 {
3451 
3452 	vgone(ap->a_vp);
3453 	return (0);
3454 }
3455 
3456 /*
3457  * The syncer vnode is no longer needed and is being decommissioned.
3458  *
3459  * Modifications to the worklist must be protected by sync_mtx.
3460  */
3461 static int
3462 sync_reclaim(struct vop_reclaim_args *ap)
3463 {
3464 	struct vnode *vp = ap->a_vp;
3465 	struct bufobj *bo;
3466 
3467 	bo = &vp->v_bufobj;
3468 	BO_LOCK(bo);
3469 	vp->v_mount->mnt_syncer = NULL;
3470 	if (bo->bo_flag & BO_ONWORKLST) {
3471 		mtx_lock(&sync_mtx);
3472 		LIST_REMOVE(bo, bo_synclist);
3473 		syncer_worklist_len--;
3474 		sync_vnode_count--;
3475 		mtx_unlock(&sync_mtx);
3476 		bo->bo_flag &= ~BO_ONWORKLST;
3477 	}
3478 	BO_UNLOCK(bo);
3479 
3480 	return (0);
3481 }
3482 
3483 /*
3484  * Check if vnode represents a disk device
3485  */
3486 int
3487 vn_isdisk(struct vnode *vp, int *errp)
3488 {
3489 	int error;
3490 
3491 	error = 0;
3492 	dev_lock();
3493 	if (vp->v_type != VCHR)
3494 		error = ENOTBLK;
3495 	else if (vp->v_rdev == NULL)
3496 		error = ENXIO;
3497 	else if (vp->v_rdev->si_devsw == NULL)
3498 		error = ENXIO;
3499 	else if (!(vp->v_rdev->si_devsw->d_flags & D_DISK))
3500 		error = ENOTBLK;
3501 	dev_unlock();
3502 	if (errp != NULL)
3503 		*errp = error;
3504 	return (error == 0);
3505 }
3506 
3507 /*
3508  * Common filesystem object access control check routine.  Accepts a
3509  * vnode's type, "mode", uid and gid, requested access mode, credentials,
3510  * and optional call-by-reference privused argument allowing vaccess()
3511  * to indicate to the caller whether privilege was used to satisfy the
3512  * request (obsoleted).  Returns 0 on success, or an errno on failure.
3513  *
3514  * The ifdef'd CAPABILITIES version is here for reference, but is not
3515  * actually used.
3516  */
3517 int
3518 vaccess(enum vtype type, mode_t file_mode, uid_t file_uid, gid_t file_gid,
3519     accmode_t accmode, struct ucred *cred, int *privused)
3520 {
3521 	accmode_t dac_granted;
3522 	accmode_t priv_granted;
3523 
3524 	/*
3525 	 * Look for a normal, non-privileged way to access the file/directory
3526 	 * as requested.  If it exists, go with that.
3527 	 */
3528 
3529 	if (privused != NULL)
3530 		*privused = 0;
3531 
3532 	dac_granted = 0;
3533 
3534 	/* Check the owner. */
3535 	if (cred->cr_uid == file_uid) {
3536 		dac_granted |= VADMIN;
3537 		if (file_mode & S_IXUSR)
3538 			dac_granted |= VEXEC;
3539 		if (file_mode & S_IRUSR)
3540 			dac_granted |= VREAD;
3541 		if (file_mode & S_IWUSR)
3542 			dac_granted |= (VWRITE | VAPPEND);
3543 
3544 		if ((accmode & dac_granted) == accmode)
3545 			return (0);
3546 
3547 		goto privcheck;
3548 	}
3549 
3550 	/* Otherwise, check the groups (first match) */
3551 	if (groupmember(file_gid, cred)) {
3552 		if (file_mode & S_IXGRP)
3553 			dac_granted |= VEXEC;
3554 		if (file_mode & S_IRGRP)
3555 			dac_granted |= VREAD;
3556 		if (file_mode & S_IWGRP)
3557 			dac_granted |= (VWRITE | VAPPEND);
3558 
3559 		if ((accmode & dac_granted) == accmode)
3560 			return (0);
3561 
3562 		goto privcheck;
3563 	}
3564 
3565 	/* Otherwise, check everyone else. */
3566 	if (file_mode & S_IXOTH)
3567 		dac_granted |= VEXEC;
3568 	if (file_mode & S_IROTH)
3569 		dac_granted |= VREAD;
3570 	if (file_mode & S_IWOTH)
3571 		dac_granted |= (VWRITE | VAPPEND);
3572 	if ((accmode & dac_granted) == accmode)
3573 		return (0);
3574 
3575 privcheck:
3576 	/*
3577 	 * Build a privilege mask to determine if the set of privileges
3578 	 * satisfies the requirements when combined with the granted mask
3579 	 * from above.  For each privilege, if the privilege is required,
3580 	 * bitwise or the request type onto the priv_granted mask.
3581 	 */
3582 	priv_granted = 0;
3583 
3584 	if (type == VDIR) {
3585 		/*
3586 		 * For directories, use PRIV_VFS_LOOKUP to satisfy VEXEC
3587 		 * requests, instead of PRIV_VFS_EXEC.
3588 		 */
3589 		if ((accmode & VEXEC) && ((dac_granted & VEXEC) == 0) &&
3590 		    !priv_check_cred(cred, PRIV_VFS_LOOKUP, 0))
3591 			priv_granted |= VEXEC;
3592 	} else {
3593 		if ((accmode & VEXEC) && ((dac_granted & VEXEC) == 0) &&
3594 		    !priv_check_cred(cred, PRIV_VFS_EXEC, 0))
3595 			priv_granted |= VEXEC;
3596 	}
3597 
3598 	if ((accmode & VREAD) && ((dac_granted & VREAD) == 0) &&
3599 	    !priv_check_cred(cred, PRIV_VFS_READ, 0))
3600 		priv_granted |= VREAD;
3601 
3602 	if ((accmode & VWRITE) && ((dac_granted & VWRITE) == 0) &&
3603 	    !priv_check_cred(cred, PRIV_VFS_WRITE, 0))
3604 		priv_granted |= (VWRITE | VAPPEND);
3605 
3606 	if ((accmode & VADMIN) && ((dac_granted & VADMIN) == 0) &&
3607 	    !priv_check_cred(cred, PRIV_VFS_ADMIN, 0))
3608 		priv_granted |= VADMIN;
3609 
3610 	if ((accmode & (priv_granted | dac_granted)) == accmode) {
3611 		/* XXX audit: privilege used */
3612 		if (privused != NULL)
3613 			*privused = 1;
3614 		return (0);
3615 	}
3616 
3617 	return ((accmode & VADMIN) ? EPERM : EACCES);
3618 }
3619 
3620 /*
3621  * Credential check based on process requesting service, and per-attribute
3622  * permissions.
3623  */
3624 int
3625 extattr_check_cred(struct vnode *vp, int attrnamespace, struct ucred *cred,
3626     struct thread *td, accmode_t accmode)
3627 {
3628 
3629 	/*
3630 	 * Kernel-invoked always succeeds.
3631 	 */
3632 	if (cred == NOCRED)
3633 		return (0);
3634 
3635 	/*
3636 	 * Do not allow privileged processes in jail to directly manipulate
3637 	 * system attributes.
3638 	 */
3639 	switch (attrnamespace) {
3640 	case EXTATTR_NAMESPACE_SYSTEM:
3641 		/* Potentially should be: return (EPERM); */
3642 		return (priv_check_cred(cred, PRIV_VFS_EXTATTR_SYSTEM, 0));
3643 	case EXTATTR_NAMESPACE_USER:
3644 		return (VOP_ACCESS(vp, accmode, cred, td));
3645 	default:
3646 		return (EPERM);
3647 	}
3648 }
3649 
3650 #ifdef DEBUG_VFS_LOCKS
3651 /*
3652  * This only exists to supress warnings from unlocked specfs accesses.  It is
3653  * no longer ok to have an unlocked VFS.
3654  */
3655 #define	IGNORE_LOCK(vp) (panicstr != NULL || (vp) == NULL ||		\
3656 	(vp)->v_type == VCHR ||	(vp)->v_type == VBAD)
3657 
3658 int vfs_badlock_ddb = 1;	/* Drop into debugger on violation. */
3659 SYSCTL_INT(_debug, OID_AUTO, vfs_badlock_ddb, CTLFLAG_RW, &vfs_badlock_ddb, 0, "");
3660 
3661 int vfs_badlock_mutex = 1;	/* Check for interlock across VOPs. */
3662 SYSCTL_INT(_debug, OID_AUTO, vfs_badlock_mutex, CTLFLAG_RW, &vfs_badlock_mutex, 0, "");
3663 
3664 int vfs_badlock_print = 1;	/* Print lock violations. */
3665 SYSCTL_INT(_debug, OID_AUTO, vfs_badlock_print, CTLFLAG_RW, &vfs_badlock_print, 0, "");
3666 
3667 #ifdef KDB
3668 int vfs_badlock_backtrace = 1;	/* Print backtrace at lock violations. */
3669 SYSCTL_INT(_debug, OID_AUTO, vfs_badlock_backtrace, CTLFLAG_RW, &vfs_badlock_backtrace, 0, "");
3670 #endif
3671 
3672 static void
3673 vfs_badlock(const char *msg, const char *str, struct vnode *vp)
3674 {
3675 
3676 #ifdef KDB
3677 	if (vfs_badlock_backtrace)
3678 		kdb_backtrace();
3679 #endif
3680 	if (vfs_badlock_print)
3681 		printf("%s: %p %s\n", str, (void *)vp, msg);
3682 	if (vfs_badlock_ddb)
3683 		kdb_enter(KDB_WHY_VFSLOCK, "lock violation");
3684 }
3685 
3686 void
3687 assert_vi_locked(struct vnode *vp, const char *str)
3688 {
3689 
3690 	if (vfs_badlock_mutex && !mtx_owned(VI_MTX(vp)))
3691 		vfs_badlock("interlock is not locked but should be", str, vp);
3692 }
3693 
3694 void
3695 assert_vi_unlocked(struct vnode *vp, const char *str)
3696 {
3697 
3698 	if (vfs_badlock_mutex && mtx_owned(VI_MTX(vp)))
3699 		vfs_badlock("interlock is locked but should not be", str, vp);
3700 }
3701 
3702 void
3703 assert_vop_locked(struct vnode *vp, const char *str)
3704 {
3705 
3706 	if (!IGNORE_LOCK(vp) && VOP_ISLOCKED(vp) == 0)
3707 		vfs_badlock("is not locked but should be", str, vp);
3708 }
3709 
3710 void
3711 assert_vop_unlocked(struct vnode *vp, const char *str)
3712 {
3713 
3714 	if (!IGNORE_LOCK(vp) && VOP_ISLOCKED(vp) == LK_EXCLUSIVE)
3715 		vfs_badlock("is locked but should not be", str, vp);
3716 }
3717 
3718 void
3719 assert_vop_elocked(struct vnode *vp, const char *str)
3720 {
3721 
3722 	if (!IGNORE_LOCK(vp) && VOP_ISLOCKED(vp) != LK_EXCLUSIVE)
3723 		vfs_badlock("is not exclusive locked but should be", str, vp);
3724 }
3725 
3726 #if 0
3727 void
3728 assert_vop_elocked_other(struct vnode *vp, const char *str)
3729 {
3730 
3731 	if (!IGNORE_LOCK(vp) && VOP_ISLOCKED(vp) != LK_EXCLOTHER)
3732 		vfs_badlock("is not exclusive locked by another thread",
3733 		    str, vp);
3734 }
3735 
3736 void
3737 assert_vop_slocked(struct vnode *vp, const char *str)
3738 {
3739 
3740 	if (!IGNORE_LOCK(vp) && VOP_ISLOCKED(vp) != LK_SHARED)
3741 		vfs_badlock("is not locked shared but should be", str, vp);
3742 }
3743 #endif /* 0 */
3744 #endif /* DEBUG_VFS_LOCKS */
3745 
3746 void
3747 vop_rename_pre(void *ap)
3748 {
3749 	struct vop_rename_args *a = ap;
3750 
3751 #ifdef DEBUG_VFS_LOCKS
3752 	if (a->a_tvp)
3753 		ASSERT_VI_UNLOCKED(a->a_tvp, "VOP_RENAME");
3754 	ASSERT_VI_UNLOCKED(a->a_tdvp, "VOP_RENAME");
3755 	ASSERT_VI_UNLOCKED(a->a_fvp, "VOP_RENAME");
3756 	ASSERT_VI_UNLOCKED(a->a_fdvp, "VOP_RENAME");
3757 
3758 	/* Check the source (from). */
3759 	if (a->a_tdvp != a->a_fdvp && a->a_tvp != a->a_fdvp)
3760 		ASSERT_VOP_UNLOCKED(a->a_fdvp, "vop_rename: fdvp locked");
3761 	if (a->a_tvp != a->a_fvp)
3762 		ASSERT_VOP_UNLOCKED(a->a_fvp, "vop_rename: fvp locked");
3763 
3764 	/* Check the target. */
3765 	if (a->a_tvp)
3766 		ASSERT_VOP_LOCKED(a->a_tvp, "vop_rename: tvp not locked");
3767 	ASSERT_VOP_LOCKED(a->a_tdvp, "vop_rename: tdvp not locked");
3768 #endif
3769 	if (a->a_tdvp != a->a_fdvp)
3770 		vhold(a->a_fdvp);
3771 	if (a->a_tvp != a->a_fvp)
3772 		vhold(a->a_fvp);
3773 	vhold(a->a_tdvp);
3774 	if (a->a_tvp)
3775 		vhold(a->a_tvp);
3776 }
3777 
3778 void
3779 vop_strategy_pre(void *ap)
3780 {
3781 #ifdef DEBUG_VFS_LOCKS
3782 	struct vop_strategy_args *a;
3783 	struct buf *bp;
3784 
3785 	a = ap;
3786 	bp = a->a_bp;
3787 
3788 	/*
3789 	 * Cluster ops lock their component buffers but not the IO container.
3790 	 */
3791 	if ((bp->b_flags & B_CLUSTER) != 0)
3792 		return;
3793 
3794 	if (!BUF_ISLOCKED(bp)) {
3795 		if (vfs_badlock_print)
3796 			printf(
3797 			    "VOP_STRATEGY: bp is not locked but should be\n");
3798 		if (vfs_badlock_ddb)
3799 			kdb_enter(KDB_WHY_VFSLOCK, "lock violation");
3800 	}
3801 #endif
3802 }
3803 
3804 void
3805 vop_lookup_pre(void *ap)
3806 {
3807 #ifdef DEBUG_VFS_LOCKS
3808 	struct vop_lookup_args *a;
3809 	struct vnode *dvp;
3810 
3811 	a = ap;
3812 	dvp = a->a_dvp;
3813 	ASSERT_VI_UNLOCKED(dvp, "VOP_LOOKUP");
3814 	ASSERT_VOP_LOCKED(dvp, "VOP_LOOKUP");
3815 #endif
3816 }
3817 
3818 void
3819 vop_lookup_post(void *ap, int rc)
3820 {
3821 #ifdef DEBUG_VFS_LOCKS
3822 	struct vop_lookup_args *a;
3823 	struct vnode *dvp;
3824 	struct vnode *vp;
3825 
3826 	a = ap;
3827 	dvp = a->a_dvp;
3828 	vp = *(a->a_vpp);
3829 
3830 	ASSERT_VI_UNLOCKED(dvp, "VOP_LOOKUP");
3831 	ASSERT_VOP_LOCKED(dvp, "VOP_LOOKUP");
3832 
3833 	if (!rc)
3834 		ASSERT_VOP_LOCKED(vp, "VOP_LOOKUP (child)");
3835 #endif
3836 }
3837 
3838 void
3839 vop_lock_pre(void *ap)
3840 {
3841 #ifdef DEBUG_VFS_LOCKS
3842 	struct vop_lock1_args *a = ap;
3843 
3844 	if ((a->a_flags & LK_INTERLOCK) == 0)
3845 		ASSERT_VI_UNLOCKED(a->a_vp, "VOP_LOCK");
3846 	else
3847 		ASSERT_VI_LOCKED(a->a_vp, "VOP_LOCK");
3848 #endif
3849 }
3850 
3851 void
3852 vop_lock_post(void *ap, int rc)
3853 {
3854 #ifdef DEBUG_VFS_LOCKS
3855 	struct vop_lock1_args *a = ap;
3856 
3857 	ASSERT_VI_UNLOCKED(a->a_vp, "VOP_LOCK");
3858 	if (rc == 0)
3859 		ASSERT_VOP_LOCKED(a->a_vp, "VOP_LOCK");
3860 #endif
3861 }
3862 
3863 void
3864 vop_unlock_pre(void *ap)
3865 {
3866 #ifdef DEBUG_VFS_LOCKS
3867 	struct vop_unlock_args *a = ap;
3868 
3869 	if (a->a_flags & LK_INTERLOCK)
3870 		ASSERT_VI_LOCKED(a->a_vp, "VOP_UNLOCK");
3871 	ASSERT_VOP_LOCKED(a->a_vp, "VOP_UNLOCK");
3872 #endif
3873 }
3874 
3875 void
3876 vop_unlock_post(void *ap, int rc)
3877 {
3878 #ifdef DEBUG_VFS_LOCKS
3879 	struct vop_unlock_args *a = ap;
3880 
3881 	if (a->a_flags & LK_INTERLOCK)
3882 		ASSERT_VI_UNLOCKED(a->a_vp, "VOP_UNLOCK");
3883 #endif
3884 }
3885 
3886 void
3887 vop_create_post(void *ap, int rc)
3888 {
3889 	struct vop_create_args *a = ap;
3890 
3891 	if (!rc)
3892 		VFS_KNOTE_LOCKED(a->a_dvp, NOTE_WRITE);
3893 }
3894 
3895 void
3896 vop_link_post(void *ap, int rc)
3897 {
3898 	struct vop_link_args *a = ap;
3899 
3900 	if (!rc) {
3901 		VFS_KNOTE_LOCKED(a->a_vp, NOTE_LINK);
3902 		VFS_KNOTE_LOCKED(a->a_tdvp, NOTE_WRITE);
3903 	}
3904 }
3905 
3906 void
3907 vop_mkdir_post(void *ap, int rc)
3908 {
3909 	struct vop_mkdir_args *a = ap;
3910 
3911 	if (!rc)
3912 		VFS_KNOTE_LOCKED(a->a_dvp, NOTE_WRITE | NOTE_LINK);
3913 }
3914 
3915 void
3916 vop_mknod_post(void *ap, int rc)
3917 {
3918 	struct vop_mknod_args *a = ap;
3919 
3920 	if (!rc)
3921 		VFS_KNOTE_LOCKED(a->a_dvp, NOTE_WRITE);
3922 }
3923 
3924 void
3925 vop_remove_post(void *ap, int rc)
3926 {
3927 	struct vop_remove_args *a = ap;
3928 
3929 	if (!rc) {
3930 		VFS_KNOTE_LOCKED(a->a_dvp, NOTE_WRITE);
3931 		VFS_KNOTE_LOCKED(a->a_vp, NOTE_DELETE);
3932 	}
3933 }
3934 
3935 void
3936 vop_rename_post(void *ap, int rc)
3937 {
3938 	struct vop_rename_args *a = ap;
3939 
3940 	if (!rc) {
3941 		VFS_KNOTE_UNLOCKED(a->a_fdvp, NOTE_WRITE);
3942 		VFS_KNOTE_UNLOCKED(a->a_tdvp, NOTE_WRITE);
3943 		VFS_KNOTE_UNLOCKED(a->a_fvp, NOTE_RENAME);
3944 		if (a->a_tvp)
3945 			VFS_KNOTE_UNLOCKED(a->a_tvp, NOTE_DELETE);
3946 	}
3947 	if (a->a_tdvp != a->a_fdvp)
3948 		vdrop(a->a_fdvp);
3949 	if (a->a_tvp != a->a_fvp)
3950 		vdrop(a->a_fvp);
3951 	vdrop(a->a_tdvp);
3952 	if (a->a_tvp)
3953 		vdrop(a->a_tvp);
3954 }
3955 
3956 void
3957 vop_rmdir_post(void *ap, int rc)
3958 {
3959 	struct vop_rmdir_args *a = ap;
3960 
3961 	if (!rc) {
3962 		VFS_KNOTE_LOCKED(a->a_dvp, NOTE_WRITE | NOTE_LINK);
3963 		VFS_KNOTE_LOCKED(a->a_vp, NOTE_DELETE);
3964 	}
3965 }
3966 
3967 void
3968 vop_setattr_post(void *ap, int rc)
3969 {
3970 	struct vop_setattr_args *a = ap;
3971 
3972 	if (!rc)
3973 		VFS_KNOTE_LOCKED(a->a_vp, NOTE_ATTRIB);
3974 }
3975 
3976 void
3977 vop_symlink_post(void *ap, int rc)
3978 {
3979 	struct vop_symlink_args *a = ap;
3980 
3981 	if (!rc)
3982 		VFS_KNOTE_LOCKED(a->a_dvp, NOTE_WRITE);
3983 }
3984 
3985 static struct knlist fs_knlist;
3986 
3987 static void
3988 vfs_event_init(void *arg)
3989 {
3990 	knlist_init(&fs_knlist, NULL, NULL, NULL, NULL);
3991 }
3992 /* XXX - correct order? */
3993 SYSINIT(vfs_knlist, SI_SUB_VFS, SI_ORDER_ANY, vfs_event_init, NULL);
3994 
3995 void
3996 vfs_event_signal(fsid_t *fsid, u_int32_t event, intptr_t data __unused)
3997 {
3998 
3999 	KNOTE_UNLOCKED(&fs_knlist, event);
4000 }
4001 
4002 static int	filt_fsattach(struct knote *kn);
4003 static void	filt_fsdetach(struct knote *kn);
4004 static int	filt_fsevent(struct knote *kn, long hint);
4005 
4006 struct filterops fs_filtops =
4007 	{ 0, filt_fsattach, filt_fsdetach, filt_fsevent };
4008 
4009 static int
4010 filt_fsattach(struct knote *kn)
4011 {
4012 
4013 	kn->kn_flags |= EV_CLEAR;
4014 	knlist_add(&fs_knlist, kn, 0);
4015 	return (0);
4016 }
4017 
4018 static void
4019 filt_fsdetach(struct knote *kn)
4020 {
4021 
4022 	knlist_remove(&fs_knlist, kn, 0);
4023 }
4024 
4025 static int
4026 filt_fsevent(struct knote *kn, long hint)
4027 {
4028 
4029 	kn->kn_fflags |= hint;
4030 	return (kn->kn_fflags != 0);
4031 }
4032 
4033 static int
4034 sysctl_vfs_ctl(SYSCTL_HANDLER_ARGS)
4035 {
4036 	struct vfsidctl vc;
4037 	int error;
4038 	struct mount *mp;
4039 
4040 	error = SYSCTL_IN(req, &vc, sizeof(vc));
4041 	if (error)
4042 		return (error);
4043 	if (vc.vc_vers != VFS_CTL_VERS1)
4044 		return (EINVAL);
4045 	mp = vfs_getvfs(&vc.vc_fsid);
4046 	if (mp == NULL)
4047 		return (ENOENT);
4048 	/* ensure that a specific sysctl goes to the right filesystem. */
4049 	if (strcmp(vc.vc_fstypename, "*") != 0 &&
4050 	    strcmp(vc.vc_fstypename, mp->mnt_vfc->vfc_name) != 0) {
4051 		vfs_rel(mp);
4052 		return (EINVAL);
4053 	}
4054 	VCTLTOREQ(&vc, req);
4055 	error = VFS_SYSCTL(mp, vc.vc_op, req);
4056 	vfs_rel(mp);
4057 	return (error);
4058 }
4059 
4060 SYSCTL_PROC(_vfs, OID_AUTO, ctl, CTLFLAG_WR, NULL, 0, sysctl_vfs_ctl, "",
4061     "Sysctl by fsid");
4062 
4063 /*
4064  * Function to initialize a va_filerev field sensibly.
4065  * XXX: Wouldn't a random number make a lot more sense ??
4066  */
4067 u_quad_t
4068 init_va_filerev(void)
4069 {
4070 	struct bintime bt;
4071 
4072 	getbinuptime(&bt);
4073 	return (((u_quad_t)bt.sec << 32LL) | (bt.frac >> 32LL));
4074 }
4075 
4076 static int	filt_vfsread(struct knote *kn, long hint);
4077 static int	filt_vfswrite(struct knote *kn, long hint);
4078 static int	filt_vfsvnode(struct knote *kn, long hint);
4079 static void	filt_vfsdetach(struct knote *kn);
4080 static struct filterops vfsread_filtops =
4081 	{ 1, NULL, filt_vfsdetach, filt_vfsread };
4082 static struct filterops vfswrite_filtops =
4083 	{ 1, NULL, filt_vfsdetach, filt_vfswrite };
4084 static struct filterops vfsvnode_filtops =
4085 	{ 1, NULL, filt_vfsdetach, filt_vfsvnode };
4086 
4087 static void
4088 vfs_knllock(void *arg)
4089 {
4090 	struct vnode *vp = arg;
4091 
4092 	vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
4093 }
4094 
4095 static void
4096 vfs_knlunlock(void *arg)
4097 {
4098 	struct vnode *vp = arg;
4099 
4100 	VOP_UNLOCK(vp, 0);
4101 }
4102 
4103 static int
4104 vfs_knllocked(void *arg)
4105 {
4106 	struct vnode *vp = arg;
4107 
4108 	return (VOP_ISLOCKED(vp) == LK_EXCLUSIVE);
4109 }
4110 
4111 int
4112 vfs_kqfilter(struct vop_kqfilter_args *ap)
4113 {
4114 	struct vnode *vp = ap->a_vp;
4115 	struct knote *kn = ap->a_kn;
4116 	struct knlist *knl;
4117 
4118 	switch (kn->kn_filter) {
4119 	case EVFILT_READ:
4120 		kn->kn_fop = &vfsread_filtops;
4121 		break;
4122 	case EVFILT_WRITE:
4123 		kn->kn_fop = &vfswrite_filtops;
4124 		break;
4125 	case EVFILT_VNODE:
4126 		kn->kn_fop = &vfsvnode_filtops;
4127 		break;
4128 	default:
4129 		return (EINVAL);
4130 	}
4131 
4132 	kn->kn_hook = (caddr_t)vp;
4133 
4134 	v_addpollinfo(vp);
4135 	if (vp->v_pollinfo == NULL)
4136 		return (ENOMEM);
4137 	knl = &vp->v_pollinfo->vpi_selinfo.si_note;
4138 	knlist_add(knl, kn, 0);
4139 
4140 	return (0);
4141 }
4142 
4143 /*
4144  * Detach knote from vnode
4145  */
4146 static void
4147 filt_vfsdetach(struct knote *kn)
4148 {
4149 	struct vnode *vp = (struct vnode *)kn->kn_hook;
4150 
4151 	KASSERT(vp->v_pollinfo != NULL, ("Missing v_pollinfo"));
4152 	knlist_remove(&vp->v_pollinfo->vpi_selinfo.si_note, kn, 0);
4153 }
4154 
4155 /*ARGSUSED*/
4156 static int
4157 filt_vfsread(struct knote *kn, long hint)
4158 {
4159 	struct vnode *vp = (struct vnode *)kn->kn_hook;
4160 	struct vattr va;
4161 
4162 	/*
4163 	 * filesystem is gone, so set the EOF flag and schedule
4164 	 * the knote for deletion.
4165 	 */
4166 	if (hint == NOTE_REVOKE) {
4167 		kn->kn_flags |= (EV_EOF | EV_ONESHOT);
4168 		return (1);
4169 	}
4170 
4171 	if (VOP_GETATTR(vp, &va, curthread->td_ucred))
4172 		return (0);
4173 
4174 	kn->kn_data = va.va_size - kn->kn_fp->f_offset;
4175 	return (kn->kn_data != 0);
4176 }
4177 
4178 /*ARGSUSED*/
4179 static int
4180 filt_vfswrite(struct knote *kn, long hint)
4181 {
4182 	/*
4183 	 * filesystem is gone, so set the EOF flag and schedule
4184 	 * the knote for deletion.
4185 	 */
4186 	if (hint == NOTE_REVOKE)
4187 		kn->kn_flags |= (EV_EOF | EV_ONESHOT);
4188 
4189 	kn->kn_data = 0;
4190 	return (1);
4191 }
4192 
4193 static int
4194 filt_vfsvnode(struct knote *kn, long hint)
4195 {
4196 	if (kn->kn_sfflags & hint)
4197 		kn->kn_fflags |= hint;
4198 	if (hint == NOTE_REVOKE) {
4199 		kn->kn_flags |= EV_EOF;
4200 		return (1);
4201 	}
4202 	return (kn->kn_fflags != 0);
4203 }
4204 
4205 int
4206 vfs_read_dirent(struct vop_readdir_args *ap, struct dirent *dp, off_t off)
4207 {
4208 	int error;
4209 
4210 	if (dp->d_reclen > ap->a_uio->uio_resid)
4211 		return (ENAMETOOLONG);
4212 	error = uiomove(dp, dp->d_reclen, ap->a_uio);
4213 	if (error) {
4214 		if (ap->a_ncookies != NULL) {
4215 			if (ap->a_cookies != NULL)
4216 				free(ap->a_cookies, M_TEMP);
4217 			ap->a_cookies = NULL;
4218 			*ap->a_ncookies = 0;
4219 		}
4220 		return (error);
4221 	}
4222 	if (ap->a_ncookies == NULL)
4223 		return (0);
4224 
4225 	KASSERT(ap->a_cookies,
4226 	    ("NULL ap->a_cookies value with non-NULL ap->a_ncookies!"));
4227 
4228 	*ap->a_cookies = realloc(*ap->a_cookies,
4229 	    (*ap->a_ncookies + 1) * sizeof(u_long), M_TEMP, M_WAITOK | M_ZERO);
4230 	(*ap->a_cookies)[*ap->a_ncookies] = off;
4231 	return (0);
4232 }
4233 
4234 /*
4235  * Mark for update the access time of the file if the filesystem
4236  * supports VOP_MARKATIME.  This functionality is used by execve and
4237  * mmap, so we want to avoid the I/O implied by directly setting
4238  * va_atime for the sake of efficiency.
4239  */
4240 void
4241 vfs_mark_atime(struct vnode *vp, struct ucred *cred)
4242 {
4243 
4244 	if ((vp->v_mount->mnt_flag & (MNT_NOATIME | MNT_RDONLY)) == 0)
4245 		(void)VOP_MARKATIME(vp);
4246 }
4247 
4248 /*
4249  * The purpose of this routine is to remove granularity from accmode_t,
4250  * reducing it into standard unix access bits - VEXEC, VREAD, VWRITE,
4251  * VADMIN and VAPPEND.
4252  *
4253  * If it returns 0, the caller is supposed to continue with the usual
4254  * access checks using 'accmode' as modified by this routine.  If it
4255  * returns nonzero value, the caller is supposed to return that value
4256  * as errno.
4257  *
4258  * Note that after this routine runs, accmode may be zero.
4259  */
4260 int
4261 vfs_unixify_accmode(accmode_t *accmode)
4262 {
4263 	/*
4264 	 * There is no way to specify explicit "deny" rule using
4265 	 * file mode or POSIX.1e ACLs.
4266 	 */
4267 	if (*accmode & VEXPLICIT_DENY) {
4268 		*accmode = 0;
4269 		return (0);
4270 	}
4271 
4272 	/*
4273 	 * None of these can be translated into usual access bits.
4274 	 * Also, the common case for NFSv4 ACLs is to not contain
4275 	 * either of these bits. Caller should check for VWRITE
4276 	 * on the containing directory instead.
4277 	 */
4278 	if (*accmode & (VDELETE_CHILD | VDELETE))
4279 		return (EPERM);
4280 
4281 	if (*accmode & VADMIN_PERMS) {
4282 		*accmode &= ~VADMIN_PERMS;
4283 		*accmode |= VADMIN;
4284 	}
4285 
4286 	/*
4287 	 * There is no way to deny VREAD_ATTRIBUTES, VREAD_ACL
4288 	 * or VSYNCHRONIZE using file mode or POSIX.1e ACL.
4289 	 */
4290 	*accmode &= ~(VSTAT_PERMS | VSYNCHRONIZE);
4291 
4292 	return (0);
4293 }
4294