Lines Matching full:vnode

77  * on the bypass routine, through which nearly all vnode operations
80 * The bypass routine accepts arbitrary vnode operations for
81 * handling by the lower layer. It begins by examining vnode
85 * in the arguments and, if a vnode is return by the operation,
86 * stacks a null-node on top of the returned vnode.
92 * current vnode as well as pass the lock request down.
96 * Also, certain vnode operations change the locking state within
100 * function unlock them. Otherwise all intermediate vnode layers
105 * INSTANTIATING VNODE STACKS
108 * effect stacking two VFSes. Vnode stacks are instead
111 * The initial mount creates a single vnode stack for the
112 * root of the new null layer. All other vnode stacks
113 * are created as a result of vnode operations on
114 * this or other null vnode stacks.
116 * New vnode stacks come into existence as a result of
117 * an operation which returns a vnode.
119 * vnode before returning it to the caller.
127 * to the lower layer which would return a vnode representing
131 * process when constructing other vnode stacks.
152 * by mapping a vnode arguments to the lower layer.
160 * A second approach is to directly invoke vnode operations on
164 * is that vnode arguments must be manualy mapped.
180 #include <sys/vnode.h>
197 * Synchronize inotify flags with the lower vnode:
198 * - If the upper vnode has the flag set and the lower does not, then the lower
199 * vnode is unwatched and the upper vnode does not need to go through
201 * - If the lower vnode is watched, then the upper vnode should go through
205 null_copy_inotify(struct vnode *vp, struct vnode *lvp, short flag) in null_copy_inotify()
226 * Also, some BSD vnode operations have the side effect of vrele'ing
235 * - the vnode operation vector of the first vnode should be used
237 * - all mapped vnodes are of our vnode-type (NEEDSWORK:
243 struct vnode **this_vp_p; in null_bypass()
244 struct vnode *old_vps[VDESC_MAX_VPS]; in null_bypass()
245 struct vnode **vps_p[VDESC_MAX_VPS]; in null_bypass()
246 struct vnode ***vppp; in null_bypass()
247 struct vnode *lvp; in null_bypass()
266 * the first mapped vnode's operation vector. in null_bypass()
272 vps_p[i] = this_vp_p = VOPARG_OFFSETTO(struct vnode **, in null_bypass()
276 * We're not guaranteed that any but the first vnode in null_bypass()
288 * The upper vnode reference to the lower in null_bypass()
289 * vnode is the only reference that keeps our in null_bypass()
290 * pointer to the lower vnode alive. If lower in null_bypass()
291 * vnode is relocked during the VOP call, in null_bypass()
292 * upper vnode might become unlocked and in null_bypass()
337 * operation, nullfs upper vnode could have in null_bypass()
341 * upper (reclaimed) vnode. in null_bypass()
379 vppp = VOPARG_OFFSETTO(struct vnode ***, in null_bypass()
392 struct vnode *lvp, *vp; in null_add_writecount()
418 struct vnode *dvp = ap->a_dvp; in null_lookup()
420 struct vnode *vp, *ldvp, *lvp; in null_lookup()
437 * configuration where lower vnode is moved out of the directory tree in null_lookup()
468 * VOP_LOOKUP() on lower vnode may unlock ldvp, which allows in null_lookup()
516 struct vnode *vp, *ldvp; in null_open()
539 struct vnode *vp = ap->a_vp; in null_setattr()
606 struct vnode *vp = ap->a_vp; in null_access()
632 struct vnode *vp = ap->a_vp; in null_accessx()
656 * Increasing refcount of lower vnode is needed at least for the case
667 struct vnode *lvp, *vp; in null_remove()
691 struct vnode *fdvp, *fvp, *tdvp, *tvp; in null_rename()
692 struct vnode *lfdvp, *lfvp, *ltdvp, *ltvp; in null_rename()
775 * We need to process our own vnode lock and then clear the interlock flag as
776 * it applies only to our vnode, not the vnodes below us on the stack.
778 * We have to hold the vnode here to solve a potential reclaim race. If we're
783 * prevent it from being recycled by holding the vnode here.
785 static struct vnode *
789 struct vnode *lvp; in null_lock_prep_with_smr()
806 static struct vnode *
810 struct vnode *lvp; in null_lock_prep_with_interlock()
831 struct vnode *lvp; in null_lock()
853 * clean our vnode already, switching vnode lock from one in in null_lock()
854 * lowervp to v_lock in our own vnode structure. Handle this in null_lock()
883 struct vnode *vp = ap->a_vp; in null_unlock()
885 struct vnode *lvp; in null_unlock()
889 * Contrary to null_lock, we don't need to hold the vnode around in null_unlock()
897 * vop_stdunlock for a doomed vnode matches doomed locking in null_lock. in null_unlock()
911 * since the reference count on the lower vnode is not related to
915 null_want_recycle(struct vnode *vp) in null_want_recycle()
917 struct vnode *lvp; in null_want_recycle()
931 * nullfs vnodes is not enabled, or the lower vnode is in null_want_recycle()
932 * deleted, then free up the vnode so as not to tie up in null_want_recycle()
943 struct vnode *vp; in null_inactive()
961 * Now, the nullfs vnode and, due to the sharing lock, the lower
962 * vnode, are exclusively locked, and we shall destroy the null vnode.
967 struct vnode *vp; in null_reclaim()
969 struct vnode *lowervp; in null_reclaim()
976 ("Reclaiming incomplete null vnode %p", vp)); in null_reclaim()
991 * to the lower vnode. If this is a reclamation due to the in null_reclaim()
1013 struct vnode *vp = ap->a_vp; in null_print()
1024 struct vnode *lowervp; in null_getwritemount()
1025 struct vnode *vp; in null_getwritemount()
1045 struct vnode *lvp; in null_vptofh()
1054 struct vnode *vp = ap->a_vp; in null_vptocnp()
1055 struct vnode **dvp = ap->a_vpp; in null_vptocnp()
1056 struct vnode *lvp, *ldvp; in null_vptocnp()
1100 struct vnode *lvp, *vp; in null_read_pgcache()
1122 struct vnode *lvp, *vp; in null_advlock()
1149 struct vnode *dvp, *ldvp, *lvp, *vp, *vp1, **vpp; in null_vput_pair()
1209 struct vnode *vp, *vpl; in null_getlowvnode()