Lines Matching +full:lower +full:- +full:case

1 /*-
2 * SPDX-License-Identifier: BSD-3-Clause
47 * a stackable layers techniques, and its "null-node"s stack above
48 * all lower-layer vnodes, not just over directory vnodes.
66 * of the lower vfs (target-pn) and the pathname where the null
67 * layer will appear in the namespace (alias-pn). After
69 * of target-pn subtree will be aliased under alias-pn.
75 * simply bypassing all possible operations to the lower layer
81 * handling by the lower layer. It begins by examining vnode
82 * operation arguments and replacing any null-nodes by their
83 * lower-layer equivlants. It then invokes the operation
84 * on the lower layer. Finally, it replaces the null-nodes
86 * stacks a null-node on top of the returned vnode.
94 * they can handle freeing null-layer specific data. Vop_print
107 * Mounting associates the null layer with a lower layer,
118 * The bypass routine stacks a null-node above the new
124 * the root null-node (which was created when the null layer was mounted).
126 * done on the root null-node. This operation would bypass through
127 * to the lower layer which would return a vnode representing
128 * the UFS "sys". Null_bypass then builds a null-node
130 * Later operations on the null-node "sys" will repeat this
145 * INVOKING OPERATIONS ON LOWER LAYERS
147 * There are two techniques to invoke operations on a lower layer
151 * the operation arguments "correct" for the lower layer
152 * by mapping a vnode arguments to the lower layer.
156 * currently being handled on the lower layer. It has the advantage
161 * the lower layer with the VOP_OPERATIONNAME interface.
163 * arbitrary operations on the lower layer. The disadvantage
197 * Synchronize inotify flags with the lower vnode:
198 * - If the upper vnode has the flag set and the lower does not, then the lower
201 * - If the lower vnode is watched, then the upper vnode should go through
217 * This is the 10-Apr-92 bypass routine.
228 * by the upper node, not the lower one, so we must handle these
229 * side-effects here. This is not of concern in Sun-derived systems
230 * since there are no such side-effects.
233 * - only one returned vpp
234 * - no INOUT vpp's (Sun's vop_open has one of these)
235 * - the vnode operation vector of the first vnode should be used
237 * - all mapped vnodes are of our vnode-type (NEEDSWORK:
248 struct vnodeop_desc *descp = ap->a_desc; in null_bypass()
252 printf ("null_bypass: %s\n", descp->vdesc_name); in null_bypass()
258 if (descp->vdesc_vp_offsets == NULL || in null_bypass()
259 descp->vdesc_vp_offsets[0] == VDESC_NO_OFFSET) in null_bypass()
268 reles = descp->vdesc_flags; in null_bypass()
270 if (descp->vdesc_vp_offsets[i] == VDESC_NO_OFFSET) in null_bypass()
273 descp->vdesc_vp_offsets[i], ap); in null_bypass()
288 * The upper vnode reference to the lower in null_bypass()
290 * pointer to the lower vnode alive. If lower in null_bypass()
299 * XXX - Several operations have the side effect in null_bypass()
309 * Call the operation on the lower layer in null_bypass()
313 error = ap->a_desc->vdesc_call(ap); in null_bypass()
315 printf("null_bypass: no map for %s\n", descp->vdesc_name); in null_bypass()
320 * Maintain the illusion of call-by-value in null_bypass()
324 reles = descp->vdesc_flags; in null_bypass()
326 if (descp->vdesc_vp_offsets[i] == VDESC_NO_OFFSET) in null_bypass()
333 * flags up in case something is watching the lower in null_bypass()
339 * back to private v_lock. In this case we in null_bypass()
340 * must move lock ownership from lower to in null_bypass()
349 old_vps[i]->v_vnlock != lvp->v_vnlock) { in null_bypass()
368 * Map the possible out-going vpp in null_bypass()
369 * (Assumes that the lower layer always returns in null_bypass()
372 if (descp->vdesc_vpp_offset != VDESC_NO_OFFSET && error == 0) { in null_bypass()
374 * XXX - even though some ops have vpp returned vp's, in null_bypass()
380 descp->vdesc_vpp_offset, ap); in null_bypass()
382 error = null_nodeget(old_vps[0]->v_mount, **vppp, in null_bypass()
395 vp = ap->a_vp; in null_add_writecount()
399 VNASSERT(vp->v_writecount >= 0, vp, ("wrong null writecount")); in null_add_writecount()
400 VNASSERT(vp->v_writecount + ap->a_inc >= 0, vp, in null_add_writecount()
401 ("wrong writecount inc %d", ap->a_inc)); in null_add_writecount()
402 error = VOP_ADD_WRITECOUNT(lvp, ap->a_inc); in null_add_writecount()
404 vp->v_writecount += ap->a_inc; in null_add_writecount()
411 * as we progress through the tree. We also have to enforce read-only
412 * if this layer is mounted read-only.
417 struct componentname *cnp = ap->a_cnp; in null_lookup()
418 struct vnode *dvp = ap->a_dvp; in null_lookup()
419 uint64_t flags = cnp->cn_flags; in null_lookup()
424 mp = dvp->v_mount; in null_lookup()
425 if ((flags & ISLASTCN) != 0 && (mp->mnt_flag & MNT_RDONLY) != 0 && in null_lookup()
426 (cnp->cn_nameiop == DELETE || cnp->cn_nameiop == RENAME)) in null_lookup()
436 * Renames in the lower mounts might create an inconsistent in null_lookup()
437 * configuration where lower vnode is moved out of the directory tree in null_lookup()
446 if ((ldvp->v_vflag & VV_ROOT) != 0) { in null_lookup()
447 KASSERT((dvp->v_vflag & VV_ROOT) == 0, in null_lookup()
449 ldvp, ldvp->v_vflag, dvp, dvp->v_vflag, in null_lookup()
460 * case of dvp reclamation, and we need ldvp to move our lock in null_lookup()
468 * VOP_LOOKUP() on lower vnode may unlock ldvp, which allows in null_lookup()
482 * longer shared. In this case, relock of ldvp in in null_lookup()
483 * lower fs VOP_LOOKUP() does not restore the locking in null_lookup()
494 (mp->mnt_flag & MNT_RDONLY) != 0 && in null_lookup()
495 (cnp->cn_nameiop == CREATE || cnp->cn_nameiop == RENAME)) in null_lookup()
500 *ap->a_vpp = dvp; in null_lookup()
506 *ap->a_vpp = vp; in null_lookup()
518 vp = ap->a_vp; in null_open()
520 retval = null_bypass(&ap->a_gen); in null_open()
522 vp->v_object = ldvp->v_object; in null_open()
524 MPASS(vp->v_object != NULL); in null_open()
534 * Setattr call. Disallow write attempts if the layer is mounted read-only.
539 struct vnode *vp = ap->a_vp; in null_setattr()
540 struct vattr *vap = ap->a_vap; in null_setattr()
542 if ((vap->va_flags != VNOVAL || vap->va_uid != (uid_t)VNOVAL || in null_setattr()
543 vap->va_gid != (gid_t)VNOVAL || vap->va_atime.tv_sec != VNOVAL || in null_setattr()
544 vap->va_mtime.tv_sec != VNOVAL || vap->va_mode != (mode_t)VNOVAL) && in null_setattr()
545 (vp->v_mount->mnt_flag & MNT_RDONLY)) in null_setattr()
547 if (vap->va_size != VNOVAL) { in null_setattr()
548 switch (vp->v_type) { in null_setattr()
549 case VDIR: in null_setattr()
551 case VCHR: in null_setattr()
552 case VBLK: in null_setattr()
553 case VSOCK: in null_setattr()
554 case VFIFO: in null_setattr()
555 if (vap->va_flags != VNOVAL) in null_setattr()
558 case VREG: in null_setattr()
559 case VLNK: in null_setattr()
563 * mounted read-only. in null_setattr()
565 if (vp->v_mount->mnt_flag & MNT_RDONLY) in null_setattr()
570 return (null_bypass(&ap->a_gen)); in null_setattr()
581 if ((error = null_bypass(&ap->a_gen)) != 0) in null_stat()
584 ap->a_sb->st_dev = ap->a_vp->v_mount->mnt_stat.f_fsid.val[0]; in null_stat()
593 if ((error = null_bypass(&ap->a_gen)) != 0) in null_getattr()
596 ap->a_vap->va_fsid = ap->a_vp->v_mount->mnt_stat.f_fsid.val[0]; in null_getattr()
601 * Handle to disallow write access if mounted read-only.
606 struct vnode *vp = ap->a_vp; in null_access()
607 accmode_t accmode = ap->a_accmode; in null_access()
610 * Disallow write attempts on read-only layers; in null_access()
615 switch (vp->v_type) { in null_access()
616 case VDIR: in null_access()
617 case VLNK: in null_access()
618 case VREG: in null_access()
619 if (vp->v_mount->mnt_flag & MNT_RDONLY) in null_access()
626 return (null_bypass(&ap->a_gen)); in null_access()
632 struct vnode *vp = ap->a_vp; in null_accessx()
633 accmode_t accmode = ap->a_accmode; in null_accessx()
636 * Disallow write attempts on read-only layers; in null_accessx()
641 switch (vp->v_type) { in null_accessx()
642 case VDIR: in null_accessx()
643 case VLNK: in null_accessx()
644 case VREG: in null_accessx()
645 if (vp->v_mount->mnt_flag & MNT_RDONLY) in null_accessx()
652 return (null_bypass(&ap->a_gen)); in null_accessx()
656 * Increasing refcount of lower vnode is needed at least for the case
657 * when lower FS is NFS to do sillyrename if the file is in use.
669 vp = ap->a_vp; in null_remove()
676 VTONULL(vp)->null_flags |= NULLV_DROP; in null_remove()
677 retval = null_bypass(&ap->a_gen); in null_remove()
684 * We handle this to eliminate null FS to lower FS
696 tdvp = ap->a_tdvp; in null_rename()
697 fvp = ap->a_fvp; in null_rename()
698 fdvp = ap->a_fdvp; in null_rename()
699 tvp = ap->a_tvp; in null_rename()
702 /* Check for cross-device rename. */ in null_rename()
703 if ((fvp->v_mount != tdvp->v_mount) || in null_rename()
704 (tvp != NULL && fvp->v_mount != tvp->v_mount)) { in null_rename()
716 lfdvp = fdnn->null_lowervp; in null_rename()
727 lfvp = fnn->null_lowervp; in null_rename()
732 ltdvp = tdnn->null_lowervp; in null_rename()
737 ltvp = tnn->null_lowervp; in null_rename()
739 tnn->null_flags |= NULLV_DROP; in null_rename()
744 error = VOP_RENAME(lfdvp, lfvp, ap->a_fcnp, ltdvp, ltvp, ap->a_tcnp); in null_rename()
770 VTONULL(ap->a_vp)->null_flags |= NULLV_DROP; in null_rmdir()
771 return (null_bypass(&ap->a_gen)); in null_rmdir()
782 * be recycled, in which case it is not legal to be sleeping in its VOP. We
795 nn = VTONULL_SMR(ap->a_vp); in null_lock_prep_with_smr()
797 lvp = nn->null_lowervp; in null_lock_prep_with_smr()
812 ASSERT_VI_LOCKED(ap->a_vp, __func__); in null_lock_prep_with_interlock()
814 ap->a_flags &= ~LK_INTERLOCK; in null_lock_prep_with_interlock()
818 nn = VTONULL(ap->a_vp); in null_lock_prep_with_interlock()
820 lvp = nn->null_lowervp; in null_lock_prep_with_interlock()
824 VI_UNLOCK(ap->a_vp); in null_lock_prep_with_interlock()
834 if (__predict_true((ap->a_flags & LK_INTERLOCK) == 0)) { in null_lock()
837 VI_LOCK(ap->a_vp); in null_lock()
844 ASSERT_VI_UNLOCKED(ap->a_vp, __func__); in null_lock()
849 VNPASS(lvp->v_holdcnt > 0, lvp); in null_lock()
850 error = VOP_LOCK(lvp, ap->a_flags); in null_lock()
855 * case by reacquiring correct lock in requested mode. in null_lock()
857 if (VTONULL(ap->a_vp) == NULL && error == 0) { in null_lock()
860 flags = ap->a_flags; in null_lock()
861 ap->a_flags &= ~LK_TYPE_MASK; in null_lock()
863 case LK_SHARED: in null_lock()
864 ap->a_flags |= LK_SHARED; in null_lock()
866 case LK_UPGRADE: in null_lock()
867 case LK_EXCLUSIVE: in null_lock()
868 ap->a_flags |= LK_EXCLUSIVE; in null_lock()
883 struct vnode *vp = ap->a_vp; in null_unlock()
910 * Do not allow the VOP_INACTIVE to be passed to the lower layer,
911 * since the reference count on the lower vnode is not related to
924 mp = vp->v_mount; in null_want_recycle()
926 if ((xmp->nullm_flags & NULLM_CACHE) == 0 || in null_want_recycle()
927 (xp->null_flags & NULLV_DROP) != 0 || in null_want_recycle()
928 (lvp->v_vflag & VV_NOSYNC) != 0) { in null_want_recycle()
931 * nullfs vnodes is not enabled, or the lower vnode is in null_want_recycle()
933 * the lower vnodes. in null_want_recycle()
945 vp = ap->a_vp; in null_inactive()
947 vp->v_object = NULL; in null_inactive()
957 return (null_want_recycle(ap->a_vp) || vn_need_pageq_flush(ap->a_vp)); in null_need_inactive()
961 * Now, the nullfs vnode and, due to the sharing lock, the lower
971 vp = ap->a_vp; in null_reclaim()
973 lowervp = xp->null_lowervp; in null_reclaim()
975 KASSERT(lowervp != NULL && vp->v_vnlock != &vp->v_lock, in null_reclaim()
983 lockmgr(&vp->v_lock, LK_EXCLUSIVE, NULL); in null_reclaim()
985 vp->v_data = NULL; in null_reclaim()
986 vp->v_object = NULL; in null_reclaim()
987 vp->v_vnlock = &vp->v_lock; in null_reclaim()
991 * to the lower vnode. If this is a reclamation due to the in null_reclaim()
994 if (vp->v_writecount > 0) in null_reclaim()
995 VOP_ADD_WRITECOUNT(lowervp, -vp->v_writecount); in null_reclaim()
996 else if (vp->v_writecount < 0) in null_reclaim()
997 vp->v_writecount = 0; in null_reclaim()
1001 if ((xp->null_flags & NULLV_NOUNLOCK) != 0) in null_reclaim()
1013 struct vnode *vp = ap->a_vp; in null_print()
1015 printf("\tvp=%p, lowervp=%p\n", vp, VTONULL(vp)->null_lowervp); in null_print()
1027 vp = ap->a_vp; in null_getwritemount()
1030 if (xp && (lowervp = xp->null_lowervp)) { in null_getwritemount()
1033 VOP_GETWRITEMOUNT(lowervp, ap->a_mpp); in null_getwritemount()
1037 *(ap->a_mpp) = NULL; in null_getwritemount()
1047 lvp = NULLVPTOLOWERVP(ap->a_vp); in null_vptofh()
1048 return VOP_VPTOFH(lvp, ap->a_fhp); in null_vptofh()
1054 struct vnode *vp = ap->a_vp; in null_vptocnp()
1055 struct vnode **dvp = ap->a_vpp; in null_vptocnp()
1062 mp = vp->v_mount; in null_vptocnp()
1070 error = vn_vptocnp(&ldvp, ap->a_buf, ap->a_buflen); in null_vptocnp()
1104 vp = ap->a_vp; in null_read_pgcache()
1111 lvp = xp->null_lowervp; in null_read_pgcache()
1114 error = VOP_READ_PGCACHE(lvp, ap->a_uio, ap->a_ioflag, ap->a_cred); in null_read_pgcache()
1126 vp = ap->a_vp; in null_advlock()
1133 lvp = xp->null_lowervp; in null_advlock()
1136 error = VOP_ADVLOCK(lvp, ap->a_id, ap->a_op, ap->a_fl, ap->a_flags); in null_advlock()
1142 * Avoid standard bypass, since lower dvp and vp could be no longer
1152 dvp = ap->a_dvp; in null_vput_pair()
1156 vpp = ap->a_vpp; in null_vput_pair()
1165 if (!ap->a_unlock_vp) { in null_vput_pair()
1168 mp = vp->v_mount; in null_vput_pair()
1174 if (vp != NULL && ap->a_unlock_vp) in null_vput_pair()
1178 if (vp == NULL || ap->a_unlock_vp) in null_vput_pair()
1183 if (vp->v_data == NULL && vfs_busy(mp, MBF_NOWAIT) == 0) { in null_vput_pair()
1211 vp = ap->a_vp; in null_getlowvnode()
1218 VOP_GETLOWVNODE(vpl, ap->a_vplp, ap->a_flags); in null_getlowvnode()