Lines Matching full:vnode
85 #include <sys/vnode.h>
104 static void delmntque(struct vnode *vp);
108 static int vtryrecycle(struct vnode *vp, bool isvnlru);
109 static void v_init_counters(struct vnode *);
110 static void vn_seqc_init(struct vnode *);
111 static void vn_seqc_write_end_free(struct vnode *vp);
112 static void vgonel(struct vnode *);
113 static bool vhold_recycle_free(struct vnode *);
114 static void vdropl_recycle(struct vnode *vp);
115 static void vdrop_recycle(struct vnode *vp);
120 static int v_inval_buf_range_locked(struct vnode *vp, struct bufobj *bo,
124 static SYSCTL_NODE(_vfs, OID_AUTO, vnode, CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
125 "vnode configuration and statistics");
127 "vnode configuration");
129 "vnode statistics");
131 "vnode recycling");
135 * allocates a new vnode, decreased in vdropl() for VIRF_DOOMED vnode.
151 * Conversion tables for conversion from vnode types to inode formats
166 static TAILQ_HEAD(freelst, vnode) vnode_list;
167 static struct vnode *vnode_list_free_marker;
168 static struct vnode *vnode_list_reclaim_marker;
171 * "Free" vnode target. Free vnodes are rarely completely free, but are
199 "Number of vnodes recycled to meet vnode cache targets (legacy)");
202 "Number of vnodes recycled to meet vnode cache targets");
207 "Number of free vnodes recycled to meet vnode cache targets (legacy)");
210 "Number of free vnodes recycled to meet vnode cache targets");
215 "Number of free vnodes recycled by vn_alloc callers to meet vnode cache targets");
248 MALLOC_DEFINE(M_VNODEPOLL, "VN POLL", "vnode poll");
315 struct vnode *tab[VDBATCH_SIZE];
319 static void vdbatch_dequeue(struct vnode *vp);
429 struct vnode *vp; in sysctl_try_reclaim_vnode()
455 * This vnode is being recycled. Return != 0 to let the caller in sysctl_try_reclaim_vnode()
458 * a new vnode if necessary) in sysctl_try_reclaim_vnode()
477 struct vnode *vp; in sysctl_ftry_reclaim_vnode()
506 sysctl_try_reclaim_vnode, "A", "Try to reclaim a vnode by its pathname");
510 "Try to reclaim a vnode by its file descriptor");
515 _Static_assert(sizeof(struct vnode) >= 1UL << vnsz2log &&
516 sizeof(struct vnode) < 1UL << (vnsz2log + 1),
574 * Initialize the vnode management data structures.
584 static MALLOC_DEFINE(M_VNODE_MARKER, "vnodemarker", "vnode marker");
586 static struct vnode *
589 struct vnode *vp; in vn_alloc_marker()
591 vp = malloc(sizeof(struct vnode), M_VNODE_MARKER, M_WAITOK | M_ZERO); in vn_alloc_marker()
599 vn_free_marker(struct vnode *vp) in vn_free_marker()
619 _Static_assert(offsetof(struct vnode, v_vnodelist) < in vnode_dtor()
620 offsetof(struct vnode, v_dbatchcpu), in vnode_dtor()
623 off1 = offsetof(struct vnode, v_vnodelist); in vnode_dtor()
624 off2 = offsetof(struct vnode, v_dbatchcpu); in vnode_dtor()
625 end1 = off1 + sizeof(((struct vnode *)NULL)->v_vnodelist); in vnode_dtor()
626 end2 = off2 + sizeof(((struct vnode *)NULL)->v_dbatchcpu); in vnode_dtor()
630 * after the vnode has been freed. Try to get some KASAN coverage by in vnode_dtor()
655 * Initialize a vnode as it first enters the zone.
660 struct vnode *vp; in vnode_init()
668 mtx_init(&vp->v_interlock, "vnode interlock", NULL, MTX_DEF); in vnode_init()
672 lockinit(vp->v_vnlock, PVFS, "vnode", VLKTIMEOUT, in vnode_init()
703 * Free a vnode when it is cleared from the zone.
708 struct vnode *vp; in vnode_fini()
727 * vnode memory consumption. The size is specified directly to
767 sizeof(struct vnode) + NC_SZ * ncsizefactor + NFS_NCLNODE_SZ)); in vntblinit()
797 vnode_zone = uma_zcreate("VNODE", sizeof(struct vnode), ctor, dtor, in vntblinit()
840 * vnode belonging to mp.
844 * / vnode lock A / vnode lock (/var) D
845 * /var vnode lock B /log vnode lock(/var/log) E
863 * 7. vn_lock() obtains lock on deadfs vnode vp_crossmp instead of A.
871 * by setting VV_CROSSLOCK on the covered vnode so that lock B will
1170 * Set vnode attributes to VNOVAL
1212 * whether the vnode should be recycled
1217 * of a vnode blowout so we want to do this). Therefore, this operation
1220 * A number of conditions may prevent a vnode from being reclaimed.
1221 * the buffer cache may have references on the vnode, a directory
1222 * vnode may still have references due to the namei cache representing
1223 * underlying files, or the vnode may be in active use. It is not
1238 struct vnode *vp, *mvp; in vlrureclaim()
1279 * Handle races against vnode allocation. Filesystems lock the in vlrureclaim()
1280 * vnode some time after it gets returned from getnewvnode, in vlrureclaim()
1353 "limit on vnode free requests per call to the vnlru_free routine (legacy)");
1356 "limit on vnode free requests per call to the vnlru_free routine");
1362 vnlru_free_impl(int count, struct vfsops *mnt_op, struct vnode *mvp, bool isvnlru) in vnlru_free_impl()
1364 struct vnode *vp; in vnlru_free_impl()
1383 * The free vnode marker can be past eligible vnodes: in vnlru_free_impl()
1410 * Don't recycle if our vnode is from different type in vnlru_free_impl()
1413 * vnode is reclaimed. in vnlru_free_impl()
1440 * The solution would be to pre-check if the vnode is likely to in vnlru_free_impl()
1494 vnlru_free_vfsops(int count, struct vfsops *mnt_op, struct vnode *mvp) in vnlru_free_vfsops()
1505 struct vnode *
1508 struct vnode *mvp; in vnlru_alloc_marker()
1518 vnlru_free_marker(struct vnode *mvp) in vnlru_free_marker()
1546 "Number of times vnlru awakened due to vnode shortage");
1686 * Sample testcase: vnode limit 8388608, 20 separate directory trees each with
1715 * vnode limit might have changed and now we may be at a significant in vnlru_proc_light_pick()
1809 * Sleep if the vnode cache is in a good state. This is in vnlru_proc()
1812 * reducing free vnode count). Otherwise, try to reclaim in vnlru_proc()
1894 * Routines having to do with the management of the vnode table.
1898 * Try to recycle a freed vnode.
1901 vtryrecycle(struct vnode *vp, bool isvnlru) in vtryrecycle()
1908 * This vnode may found and locked via some other list, if so we in vtryrecycle()
1931 * anyone picked up this vnode from another list. If not, we will in vtryrecycle()
1959 * Allocate a new vnode.
1970 * The routine can try to free a vnode or stall for up to 1 second waiting for
1977 "Number of times vnode allocation blocked waiting on vnlru");
1979 static struct vnode * __noinline
2012 * Grow the vnode cache if it will not be above its target max after in vn_alloc_hard()
2013 * growing. Otherwise, if there is at least one free vnode, try to in vn_alloc_hard()
2022 * Wait for space for a new vnode. in vn_alloc_hard()
2046 static struct vnode *
2062 vn_free(struct vnode *vp) in vn_free()
2070 * Allocate a new vnode.
2074 struct vnode **vpp) in getnewvnode()
2076 struct vnode *vp; in getnewvnode()
2098 * Locks are given the generic name "vnode" when created. in getnewvnode()
2106 * The change only needs to be made when the vnode moves in getnewvnode()
2126 * Finalize various vnode identity bits. in getnewvnode()
2153 * E.g., nullfs uses vfs_hash_index() on the lower vnode for in getnewvnode()
2185 freevnode(struct vnode *vp) in freevnode()
2192 * The vnode has been marked for destruction, so free it. in freevnode()
2194 * The vnode will be returned to the zone where it will in freevnode()
2195 * normally remain until it is needed for another vnode. We in freevnode()
2198 * so as not to contaminate the freshly allocated vnode. in freevnode()
2200 CTR2(KTR_VFS, "%s: destroying the vnode %p", __func__, vp); in freevnode()
2207 VNASSERT(vp->v_data == NULL, vp, ("cleaned vnode isn't")); in freevnode()
2211 VNASSERT(bo->bo_numoutput == 0, vp, ("Clean vnode has pending I/O's")); in freevnode()
2231 * here while having another vnode locked when trying to in freevnode()
2252 * Delete from old mount point vnode list, if on one.
2255 delmntque(struct vnode *vp) in delmntque()
2266 ("bad mount point vnode list size")); in delmntque()
2278 insmntque1_int(struct vnode *vp, struct mount *mp, bool dtr) in insmntque1_int()
2282 ("insmntque: vnode already on per mount vnode list")); in insmntque1_int()
2293 * We acquire the vnode interlock early to ensure that the in insmntque1_int()
2294 * vnode cannot be recycled by another process releasing a in insmntque1_int()
2295 * holdcnt on it before we get it on both the vnode list in insmntque1_int()
2296 * and the active vnode list. The mount mutex protects only in insmntque1_int()
2297 * manipulation of the vnode list and the vnode freelist in insmntque1_int()
2298 * mutex protects only manipulation of the active vnode list. in insmntque1_int()
2299 * Hence the need to hold the vnode interlock throughout. in insmntque1_int()
2321 ("neg mount point vnode list size")); in insmntque1_int()
2330 * insmntque() reclaims the vnode on insertion failure, insmntque1()
2331 * leaves handling of the vnode to the caller.
2334 insmntque(struct vnode *vp, struct mount *mp) in insmntque()
2340 insmntque1(struct vnode *vp, struct mount *mp) in insmntque1()
2393 * Wait for I/O to complete. XXX needs cleaning up. The vnode can in bufobj_invalbuf()
2433 * Flush out and invalidate all buffers associated with a vnode.
2437 vinvalbuf(struct vnode *vp, int flags, int slpflag, int slptimeo) in vinvalbuf()
2569 vtruncbuf(struct vnode *vp, off_t length, int blksize) in vtruncbuf()
2594 * Write out vnode metadata, e.g. indirect blocks. in vtruncbuf()
2601 * Since we hold the vnode lock this should only in vtruncbuf()
2631 v_inval_buf_range(struct vnode *vp, daddr_t startlbn, daddr_t endlbn, in v_inval_buf_range()
2654 v_inval_buf_range_locked(struct vnode *vp, struct bufobj *bo, in v_inval_buf_range_locked()
2838 * Associate a buffer with a vnode.
2841 bgetvp(struct vnode *vp, struct buf *bp) in bgetvp()
2855 * Add the buf to the vnode's clean list unless we lost a race and find in bgetvp()
2879 * Disassociate a buffer from a vnode.
2885 struct vnode *vp; in brelvp()
2891 * Delete from old vnode list, if on one. in brelvp()
2964 struct vnode *vp; in sync_vnode()
2974 * We use vhold in case the vnode does not in sync_vnode()
2975 * successfully sync. vhold prevents the vnode from in sync_vnode()
2977 * we can acquire the vnode interlock. in sync_vnode()
3211 * Move the buffer between the clean and dirty lists of its vnode.
3216 struct vnode *vp; in reassignbuf()
3295 v_init_counters(struct vnode *vp) in v_init_counters()
3299 vp, ("%s called for an initialized vnode", __FUNCTION__)); in v_init_counters()
3307 * Get a usecount on a vnode.
3309 * vget and vget_finish may fail to lock the vnode if they lose a race against
3312 * Consumers which don't guarantee liveness of the vnode can use SMR to
3313 * try to get a reference. Note this operation can fail since the vnode
3317 vget_prep_smr(struct vnode *vp) in vget_prep_smr()
3335 vget_prep(struct vnode *vp) in vget_prep()
3349 vget_abort(struct vnode *vp, enum vgetstate vs) in vget_abort()
3374 vget(struct vnode *vp, int flags) in vget()
3383 vget_finish(struct vnode *vp, int flags, enum vgetstate vs) in vget_finish()
3398 CTR2(KTR_VFS, "%s: impossible to lock vnode %p", __func__, in vget_finish()
3408 vget_finish_ref(struct vnode *vp, enum vgetstate vs) in vget_finish_ref()
3420 * We hold the vnode. If the usecount is 0 it will be utilized to keep in vget_finish_ref()
3421 * the vnode around. Otherwise someone else lended their hold count and in vget_finish_ref()
3437 vref(struct vnode *vp) in vref()
3447 vrefact(struct vnode *vp) in vrefact()
3457 vlazy(struct vnode *vp) in vlazy()
3461 VNASSERT(vp->v_holdcnt > 0, vp, ("%s: vnode not held", __func__)); in vlazy()
3466 * We may get here for inactive routines after the vnode got doomed. in vlazy()
3481 vunlazy(struct vnode *vp) in vunlazy()
3492 * Don't remove the vnode from the lazy list if another thread in vunlazy()
3494 * vnode to the lazy list and is now responsible for its in vunlazy()
3507 * the vnode.
3510 vunlazy_gone(struct vnode *vp) in vunlazy_gone()
3530 vdefer_inactive(struct vnode *vp) in vdefer_inactive()
3556 vdefer_inactive_unlocked(struct vnode *vp) in vdefer_inactive_unlocked()
3573 * provides liveness of the vnode, meaning we have to vdrop.
3576 * exclusive lock on the vnode, while it is legal to call here with only a
3577 * shared lock (or no locks). If locking the vnode in an expected manner fails,
3581 vput_final(struct vnode *vp, enum vput_op func) in vput_final()
3599 * If the vnode is doomed vgone already performed inactive processing in vput_final()
3613 * vnode lock, opening a window where the vnode can get doomed all the in vput_final()
3684 * Decrement ->v_usecount for a vnode.
3697 vrele(struct vnode *vp) in vrele()
3711 vput(struct vnode *vp) in vput()
3729 vunref(struct vnode *vp) in vunref()
3740 vhold(struct vnode *vp) in vhold()
3753 vholdnz(struct vnode *vp) in vholdnz()
3767 * Grab a hold count unless the vnode is freed.
3770 * freeing the vnode.
3773 * is not set. After the flag is set the vnode becomes immutable to anyone but
3786 vhold_smr(struct vnode *vp) in vhold_smr()
3809 * Hold a free vnode for recycling.
3813 * Attempts to recycle only need the global vnode list lock and have no use for
3822 * Note: the vnode may gain more references after we transition the count 0->1.
3825 vhold_recycle_free(struct vnode *vp) in vhold_recycle_free()
3852 struct vnode *vp; in vdbatch_process()
3906 vdbatch_enqueue(struct vnode *vp) in vdbatch_enqueue()
3943 vdbatch_dequeue(struct vnode *vp) in vdbatch_dequeue()
3968 * Either we dequeued the vnode above or the target CPU beat us to it. in vdbatch_dequeue()
3974 * Drop the hold count of the vnode.
3978 * Because the vnode vm object keeps a hold reference on the vnode if
3979 * there is at least one resident non-cached page, the vnode cannot
3983 vdropl_final(struct vnode *vp) in vdropl_final()
4010 vdrop(struct vnode *vp) in vdrop()
4022 vdropl_impl(struct vnode *vp, bool enqueue) in vdropl_impl()
4050 * released our hold and by now the vnode might have been in vdropl_impl()
4057 vdropl(struct vnode *vp) in vdropl()
4064 * vdrop a vnode when recycling
4067 * regular vdrop by not requeieing the vnode on LRU.
4075 vdropl_recycle(struct vnode *vp) in vdropl_recycle()
4082 vdrop_recycle(struct vnode *vp) in vdrop_recycle()
4090 * Call VOP_INACTIVE on the vnode and manage the DOINGINACT and OWEINACT
4094 vinactivef(struct vnode *vp) in vinactivef()
4108 * modified pages are converted into the vnode's dirty in vinactivef()
4110 * vnode is on the inactive list. in vinactivef()
4127 vinactive(struct vnode *vp) in vinactive()
4146 * Remove any vnodes in the vnode table belonging to mount point mp.
4158 * `rootrefs' specifies the base reference count for the root vnode
4159 * of this filesystem. The root vnode is considered busy if its
4161 * will call vrele() on the root vnode exactly rootrefs times.
4173 struct vnode *vp, *mvp, *rootvp = NULL; in vflush()
4183 * Get the filesystem root vnode. We can vput() it in vflush()
4240 * vnode data structures and we are done. in vflush()
4242 * If FORCECLOSE is set, forcibly close the vnode. in vflush()
4250 vn_printf(vp, "vflush: busy vnode "); in vflush()
4258 * If just the root vnode is busy, and if its refcount in vflush()
4285 * Recycle an unused vnode.
4288 vrecycle(struct vnode *vp) in vrecycle()
4302 vrecyclel(struct vnode *vp) in vrecyclel()
4318 * Eliminate all activity associated with a vnode
4322 vgone(struct vnode *vp) in vgone()
4330 * Notify upper mounts about reclaimed or unlinked vnode.
4333 vfs_notify_upper(struct vnode *vp, enum vfs_notify_upper_type event) in vfs_notify_upper()
4373 vgonel(struct vnode *vp) in vgonel()
4404 * Check to see if the vnode is in use. If so, we have to in vgonel()
4423 VNASSERT(vp->v_holdcnt > 0, vp, ("vnode without hold count")); in vgonel()
4430 * If purging an active vnode, it must be closed and in vgonel()
4449 * Clean out any buffers associated with the vnode. in vgonel()
4480 * should not touch the object borrowed from the lower vnode in vgonel()
4488 * Reclaim the vnode. in vgonel()
4504 * Delete from old mount point vnode list. in vgonel()
4514 * the vnode. in vgonel()
4516 * FIXME: this is buggy for vnode ops with custom locking primitives. in vgonel()
4534 * Print out a description of a vnode.
4549 "vnode type name not added to vtypename");
4558 "vnode state name not added to vstatename");
4564 vn_printf(struct vnode *vp, const char *fmt, ...) in vn_printf()
4702 struct vnode *vp; in DB_SHOW_COMMAND_FLAGS()
4714 vn_printf(vp, "vnode "); in DB_SHOW_COMMAND_FLAGS()
4720 * Show details about the given vnode.
4722 DB_SHOW_COMMAND(vnode, db_show_vnode) in DB_SHOW_COMMAND() argument
4724 struct vnode *vp; in DB_SHOW_COMMAND()
4728 vp = (struct vnode *)addr; in DB_SHOW_COMMAND()
4729 vn_printf(vp, "vnode "); in DB_SHOW_COMMAND()
4740 struct vnode *vp; in DB_SHOW_COMMAND()
4913 vn_printf(vp, "vnode "); in DB_SHOW_COMMAND()
4921 vn_printf(vp, "vnode "); in DB_SHOW_COMMAND()
5130 vfs_deferred_inactive(struct vnode *vp, int lkflags) in vfs_deferred_inactive()
5150 vfs_periodic_inactive_filter(struct vnode *vp, void *arg) in vfs_periodic_inactive_filter()
5159 struct vnode *vp, *mvp; in vfs_periodic_inactive()
5177 vfs_want_msync(struct vnode *vp) in vfs_want_msync()
5192 vfs_periodic_msync_inactive_filter(struct vnode *vp, void *arg __unused) in vfs_periodic_msync_inactive_filter()
5205 struct vnode *vp, *mvp; in vfs_periodic_msync_inactive()
5275 * Initialize per-vnode helper structure to hold poll-related state.
5278 v_addpollinfo(struct vnode *vp) in v_addpollinfo()
5285 mtx_init(&vi->vpi_lock, "vnode pollinfo", NULL, MTX_DEF); in v_addpollinfo()
5301 * a vnode. Because poll uses the historic select-style interface
5308 vn_pollrecord(struct vnode *vp, struct thread *td, int events) in vn_pollrecord()
5334 * Routine to create and manage a filesystem syncer vnode.
5358 * Create a new filesystem syncer vnode for the specified mount point.
5363 struct vnode *vp; in vfs_allocate_syncvnode()
5368 /* Allocate a new vnode */ in vfs_allocate_syncvnode()
5382 * Place the vnode onto the syncer worklist. We attempt to in vfs_allocate_syncvnode()
5419 struct vnode *vp; in vfs_deallocate_syncvnode()
5436 struct vnode *syncvp = ap->a_vp; in sync_fsync()
5476 * The syncer vnode is no referenced.
5487 * The syncer vnode is no longer needed and is being decommissioned.
5494 struct vnode *vp = ap->a_vp; in sync_reclaim()
5515 vn_need_pageq_flush(struct vnode *vp) in vn_need_pageq_flush()
5525 * Check if vnode represents a disk device
5528 vn_isdisk_error(struct vnode *vp, int *errp) in vn_isdisk_error()
5551 vn_isdisk(struct vnode *vp) in vn_isdisk()
5616 * vnode's type, "mode", uid and gid, requested access mode, and credentials.
5732 extattr_check_cred(struct vnode *vp, int attrnamespace, struct ucred *cred, in extattr_check_cred()
5759 assert_vi_locked(struct vnode *vp, const char *str) in assert_vi_locked()
5762 ("%s: vnode interlock is not locked but should be", str)); in assert_vi_locked()
5766 assert_vi_unlocked(struct vnode *vp, const char *str) in assert_vi_unlocked()
5769 ("%s: vnode interlock is locked but should not be", str)); in assert_vi_unlocked()
5773 assert_vop_locked(struct vnode *vp, const char *str) in assert_vop_locked()
5787 VNASSERT(locked, vp, ("%s: vnode is not locked but should be", str)); in assert_vop_locked()
5791 assert_vop_unlocked(struct vnode *vp, const char *str) in assert_vop_unlocked()
5804 VNASSERT(!locked, vp, ("%s: vnode is locked but should not be", str)); in assert_vop_unlocked()
5808 assert_vop_elocked(struct vnode *vp, const char *str) in assert_vop_elocked()
5817 ("%s: vnode is not exclusive locked but should be", str)); in assert_vop_elocked()
5895 struct vnode *vp; in vop_fplookup_vexec_debugpost()
5920 vop_fsync_debugprepost(struct vnode *vp, const char *name) in vop_fsync_debugprepost()
6032 struct vnode *vp = a->a_vp; in vop_unlock_debugpre()
6081 struct vnode *dvp; in vop_create_pre()
6092 struct vnode *dvp; in vop_create_post()
6117 struct vnode *dvp; in vop_whiteout_pre()
6128 struct vnode *dvp; in vop_whiteout_post()
6139 struct vnode *vp; in vop_deleteextattr_pre()
6150 struct vnode *vp; in vop_deleteextattr_post()
6165 struct vnode *vp, *tdvp; in vop_link_pre()
6178 struct vnode *vp, *tdvp; in vop_link_post()
6197 struct vnode *dvp; in vop_mkdir_pre()
6208 struct vnode *dvp; in vop_mkdir_post()
6235 struct vnode *dvp; in vop_mknod_pre()
6246 struct vnode *dvp; in vop_mknod_post()
6261 struct vnode *vp; in vop_reclaim_post()
6276 struct vnode *dvp, *vp; in vop_remove_pre()
6290 struct vnode *dvp, *vp; in vop_remove_post()
6349 struct vnode *dvp, *vp; in vop_rmdir_pre()
6363 struct vnode *dvp, *vp; in vop_rmdir_post()
6382 struct vnode *vp; in vop_setattr_pre()
6393 struct vnode *vp; in vop_setattr_post()
6408 struct vnode *vp; in vop_setacl_pre()
6419 struct vnode *vp; in vop_setacl_post()
6430 struct vnode *vp; in vop_setextattr_pre()
6441 struct vnode *vp; in vop_setextattr_post()
6456 struct vnode *dvp; in vop_symlink_pre()
6467 struct vnode *dvp; in vop_symlink_post()
6656 struct vnode *vp = arg; in vfs_knllock()
6664 struct vnode *vp = arg; in vfs_knlunlock()
6673 struct vnode *vp = arg; in vfs_knl_assert_lock()
6685 struct vnode *vp = ap->a_vp; in vfs_kqfilter()
6719 * Detach knote from vnode
6724 struct vnode *vp = (struct vnode *)kn->kn_hook; in filt_vfsdetach()
6735 struct vnode *vp = (struct vnode *)kn->kn_hook; in filt_vfsread()
6764 struct vnode *vp = (struct vnode *)kn->kn_hook; in filt_vfswrite()
6783 struct vnode *vp = (struct vnode *)kn->kn_hook; in filt_vfsvnode()
6803 struct vnode *vp; in filt_vfsdump()
6837 struct vnode *vp; in filt_vfscopy()
6839 vp = (struct vnode *)kn->kn_hook; in filt_vfscopy()
6922 * Clear out a doomed vnode (if any) and replace it with a new one as long
6923 * as the fs is not being unmounted. Return the root vnode to the caller.
6926 vfs_cache_root_fallback(struct mount *mp, int flags, struct vnode **vpp) in vfs_cache_root_fallback()
6928 struct vnode *vp; in vfs_cache_root_fallback()
6973 panic("%s: mismatch between vnode returned " in vfs_cache_root_fallback()
6986 vfs_cache_root(struct mount *mp, int flags, struct vnode **vpp) in vfs_cache_root()
6989 struct vnode *vp; in vfs_cache_root()
7010 struct vnode *
7013 struct vnode *vp; in vfs_cache_root_clear()
7016 * ops > 0 guarantees there is nobody who can see this vnode in vfs_cache_root_clear()
7027 vfs_cache_root_set(struct mount *mp, struct vnode *vp) in vfs_cache_root_set()
7042 struct vnode *
7043 __mnt_vnode_next_all(struct vnode **mvp, struct mount *mp) in __mnt_vnode_next_all()
7045 struct vnode *vp; in __mnt_vnode_next_all()
7049 KASSERT((*mvp)->v_mount == mp, ("marker vnode mount list mismatch")); in __mnt_vnode_next_all()
7074 struct vnode *
7075 __mnt_vnode_first_all(struct vnode **mvp, struct mount *mp) in __mnt_vnode_first_all()
7077 struct vnode *vp; in __mnt_vnode_first_all()
7107 __mnt_vnode_markerfree_all(struct vnode **mvp, struct mount *mp) in __mnt_vnode_markerfree_all()
7117 KASSERT((*mvp)->v_mount == mp, ("marker vnode mount list mismatch")); in __mnt_vnode_markerfree_all()
7130 mnt_vnode_markerfree_lazy(struct vnode **mvp, struct mount *mp) in mnt_vnode_markerfree_lazy()
7133 KASSERT((*mvp)->v_mount == mp, ("marker vnode mount list mismatch")); in mnt_vnode_markerfree_lazy()
7143 * Relock the mp mount vnode list lock with the vp vnode interlock in the
7146 * On entry, the mount vnode list lock is held and the vnode interlock is not.
7148 * On failure, the mount vnode list lock is held but the vnode interlock is
7152 mnt_vnode_next_lazy_relock(struct vnode *mvp, struct mount *mp, in mnt_vnode_next_lazy_relock()
7153 struct vnode *vp) in mnt_vnode_next_lazy_relock()
7160 ("%s: inappropriate vnode", __func__)); in mnt_vnode_next_lazy_relock()
7195 static struct vnode *
7196 mnt_vnode_next_lazy(struct vnode **mvp, struct mount *mp, mnt_lazy_cb_t *cb, in mnt_vnode_next_lazy()
7199 struct vnode *vp; in mnt_vnode_next_lazy()
7202 KASSERT((*mvp)->v_mount == mp, ("marker vnode mount list mismatch")); in mnt_vnode_next_lazy()
7211 * See if we want to process the vnode. Note we may encounter a in mnt_vnode_next_lazy()
7238 ("alien vnode on the lazy list %p %p", vp, mp)); in mnt_vnode_next_lazy()
7257 struct vnode *
7258 __mnt_vnode_next_lazy(struct vnode **mvp, struct mount *mp, mnt_lazy_cb_t *cb, in __mnt_vnode_next_lazy()
7267 struct vnode *
7268 __mnt_vnode_first_lazy(struct vnode **mvp, struct mount *mp, mnt_lazy_cb_t *cb, in __mnt_vnode_first_lazy()
7271 struct vnode *vp; in __mnt_vnode_first_lazy()
7293 __mnt_vnode_markerfree_lazy(struct vnode **mvp, struct mount *mp) in __mnt_vnode_markerfree_lazy()
7306 vn_dir_check_exec(struct vnode *vp, struct componentname *cnp) in vn_dir_check_exec()
7319 * to prevent the vnode from getting freed.
7322 vn_seqc_write_begin_locked(struct vnode *vp) in vn_seqc_write_begin_locked()
7334 vn_seqc_write_begin(struct vnode *vp) in vn_seqc_write_begin()
7343 vn_seqc_write_end_locked(struct vnode *vp) in vn_seqc_write_end_locked()
7354 vn_seqc_write_end(struct vnode *vp) in vn_seqc_write_end()
7365 * The counter remains unchanged on free so that a doomed vnode will
7369 vn_seqc_init(struct vnode *vp) in vn_seqc_init()
7377 vn_seqc_write_end_free(struct vnode *vp) in vn_seqc_write_end_free()
7385 vn_irflag_set_locked(struct vnode *vp, short toset) in vn_irflag_set_locked()
7398 vn_irflag_set(struct vnode *vp, short toset) in vn_irflag_set()
7407 vn_irflag_set_cond_locked(struct vnode *vp, short toset) in vn_irflag_set_cond_locked()
7417 vn_irflag_set_cond(struct vnode *vp, short toset) in vn_irflag_set_cond()
7426 vn_irflag_unset_locked(struct vnode *vp, short tounset) in vn_irflag_unset_locked()
7439 vn_irflag_unset(struct vnode *vp, short tounset) in vn_irflag_unset()
7448 vn_getsize_locked(struct vnode *vp, off_t *size, struct ucred *cred) in vn_getsize_locked()
7465 vn_getsize(struct vnode *vp, off_t *size, struct ucred *cred) in vn_getsize()
7477 vn_set_state_validate(struct vnode *vp, __enum_uint8(vstate) state) in vn_set_state_validate()