Lines Matching +full:vref +full:- +full:source

1 /*-
2 * SPDX-License-Identifier: BSD-3-Clause
10 * Redistribution and use in source and binary forms, with or without
13 * 1. Redistributions of source code must retain the above copyright
47 * a stackable layers techniques, and its "null-node"s stack above
48 * all lower-layer vnodes, not just over directory vnodes.
66 * of the lower vfs (target-pn) and the pathname where the null
67 * layer will appear in the namespace (alias-pn). After
69 * of target-pn subtree will be aliased under alias-pn.
82 * operation arguments and replacing any null-nodes by their
83 * lower-layer equivlants. It then invokes the operation
84 * on the lower layer. Finally, it replaces the null-nodes
86 * stacks a null-node on top of the returned vnode.
94 * they can handle freeing null-layer specific data. Vop_print
118 * The bypass routine stacks a null-node above the new
124 * the root null-node (which was created when the null layer was mounted).
126 * done on the root null-node. This operation would bypass through
128 * the UFS "sys". Null_bypass then builds a null-node
130 * Later operations on the null-node "sys" will repeat this
193 * This is the 10-Apr-92 bypass routine.
205 * side-effects here. This is not of concern in Sun-derived systems
206 * since there are no such side-effects.
209 * - only one returned vpp
210 * - no INOUT vpp's (Sun's vop_open has one of these)
211 * - the vnode operation vector of the first vnode should be used
213 * - all mapped vnodes are of our vnode-type (NEEDSWORK:
224 struct vnodeop_desc *descp = ap->a_desc;
228 printf ("null_bypass: %s\n", descp->vdesc_name);
234 if (descp->vdesc_vp_offsets == NULL ||
235 descp->vdesc_vp_offsets[0] == VDESC_NO_OFFSET)
244 reles = descp->vdesc_flags;
246 if (descp->vdesc_vp_offsets[i] == VDESC_NO_OFFSET)
249 descp->vdesc_vp_offsets[i], ap);
257 (*this_vp_p)->v_op != &null_vnodeops)) {
275 * XXX - Several operations have the side effect
280 vref(*this_vp_p);
291 printf("null_bypass: no map for %s\n", descp->vdesc_name);
296 * Maintain the illusion of call-by-value
300 reles = descp->vdesc_flags;
302 if (descp->vdesc_vp_offsets[i] == VDESC_NO_OFFSET)
318 old_vps[i]->v_vnlock != lvp->v_vnlock) {
337 * Map the possible out-going vpp
339 * a VREF'ed vpp unless it gets an error.)
341 if (descp->vdesc_vpp_offset != VDESC_NO_OFFSET && error == 0) {
343 * XXX - even though some ops have vpp returned vp's,
349 descp->vdesc_vpp_offset, ap);
351 error = null_nodeget(old_vps[0]->v_mount, **vppp,
364 vp = ap->a_vp;
368 VNASSERT(vp->v_writecount >= 0, vp, ("wrong null writecount"));
369 VNASSERT(vp->v_writecount + ap->a_inc >= 0, vp,
370 ("wrong writecount inc %d", ap->a_inc));
371 error = VOP_ADD_WRITECOUNT(lvp, ap->a_inc);
373 vp->v_writecount += ap->a_inc;
380 * as we progress through the tree. We also have to enforce read-only
381 * if this layer is mounted read-only.
386 struct componentname *cnp = ap->a_cnp;
387 struct vnode *dvp = ap->a_dvp;
388 int flags = cnp->cn_flags;
393 mp = dvp->v_mount;
394 if ((flags & ISLASTCN) != 0 && (mp->mnt_flag & MNT_RDONLY) != 0 &&
395 (cnp->cn_nameiop == DELETE || cnp->cn_nameiop == RENAME))
412 if ((ldvp->v_vflag & VV_ROOT) != 0 && (flags & ISDOTDOT) != 0) {
413 KASSERT((dvp->v_vflag & VV_ROOT) == 0,
415 ldvp, ldvp->v_vflag, dvp, dvp->v_vflag, flags));
455 (mp->mnt_flag & MNT_RDONLY) != 0 &&
456 (cnp->cn_nameiop == CREATE || cnp->cn_nameiop == RENAME))
461 *ap->a_vpp = dvp;
462 VREF(dvp);
467 *ap->a_vpp = vp;
479 vp = ap->a_vp;
481 retval = null_bypass(&ap->a_gen);
483 vp->v_object = ldvp->v_object;
485 MPASS(vp->v_object != NULL);
495 * Setattr call. Disallow write attempts if the layer is mounted read-only.
500 struct vnode *vp = ap->a_vp;
501 struct vattr *vap = ap->a_vap;
503 if ((vap->va_flags != VNOVAL || vap->va_uid != (uid_t)VNOVAL ||
504 vap->va_gid != (gid_t)VNOVAL || vap->va_atime.tv_sec != VNOVAL ||
505 vap->va_mtime.tv_sec != VNOVAL || vap->va_mode != (mode_t)VNOVAL) &&
506 (vp->v_mount->mnt_flag & MNT_RDONLY))
508 if (vap->va_size != VNOVAL) {
509 switch (vp->v_type) {
516 if (vap->va_flags != VNOVAL)
524 * mounted read-only.
526 if (vp->v_mount->mnt_flag & MNT_RDONLY)
531 return (null_bypass(&ap->a_gen));
542 if ((error = null_bypass(&ap->a_gen)) != 0)
545 ap->a_sb->st_dev = ap->a_vp->v_mount->mnt_stat.f_fsid.val[0];
554 if ((error = null_bypass(&ap->a_gen)) != 0)
557 ap->a_vap->va_fsid = ap->a_vp->v_mount->mnt_stat.f_fsid.val[0];
562 * Handle to disallow write access if mounted read-only.
567 struct vnode *vp = ap->a_vp;
568 accmode_t accmode = ap->a_accmode;
571 * Disallow write attempts on read-only layers;
576 switch (vp->v_type) {
580 if (vp->v_mount->mnt_flag & MNT_RDONLY)
587 return (null_bypass(&ap->a_gen));
593 struct vnode *vp = ap->a_vp;
594 accmode_t accmode = ap->a_accmode;
597 * Disallow write attempts on read-only layers;
602 switch (vp->v_type) {
606 if (vp->v_mount->mnt_flag & MNT_RDONLY)
613 return (null_bypass(&ap->a_gen));
630 vp = ap->a_vp;
633 VREF(lvp);
637 VTONULL(vp)->null_flags |= NULLV_DROP;
638 retval = null_bypass(&ap->a_gen);
657 tdvp = ap->a_tdvp;
658 fvp = ap->a_fvp;
659 fdvp = ap->a_fdvp;
660 tvp = ap->a_tvp;
663 /* Check for cross-device rename. */
664 if ((fvp->v_mount != tdvp->v_mount) ||
665 (tvp != NULL && fvp->v_mount != tvp->v_mount)) {
677 lfdvp = fdnn->null_lowervp;
678 vref(lfdvp);
688 lfvp = fnn->null_lowervp;
689 vref(lfvp);
693 ltdvp = tdnn->null_lowervp;
694 vref(ltdvp);
698 ltvp = tnn->null_lowervp;
699 vref(ltvp);
700 tnn->null_flags |= NULLV_DROP;
705 error = VOP_RENAME(lfdvp, lfvp, ap->a_fcnp, ltdvp, ltvp, ap->a_tcnp);
731 VTONULL(ap->a_vp)->null_flags |= NULLV_DROP;
732 return (null_bypass(&ap->a_gen));
743 struct vnode *vp = ap->a_vp;
749 if ((ap->a_flags & LK_INTERLOCK) == 0)
752 ap->a_flags &= ~LK_INTERLOCK;
753 flags = ap->a_flags;
783 ap->a_flags &= ~LK_TYPE_MASK;
786 ap->a_flags |= LK_SHARED;
790 ap->a_flags |= LK_EXCLUSIVE;
794 ap->a_flags);
816 struct vnode *vp = ap->a_vp;
848 mp = vp->v_mount;
850 if ((xmp->nullm_flags & NULLM_CACHE) == 0 ||
851 (xp->null_flags & NULLV_DROP) != 0 ||
852 (lvp->v_vflag & VV_NOSYNC) != 0) {
869 vp = ap->a_vp;
871 vp->v_object = NULL;
881 return (null_want_recycle(ap->a_vp) || vn_need_pageq_flush(ap->a_vp));
895 vp = ap->a_vp;
897 lowervp = xp->null_lowervp;
899 KASSERT(lowervp != NULL && vp->v_vnlock != &vp->v_lock,
907 lockmgr(&vp->v_lock, LK_EXCLUSIVE, NULL);
909 vp->v_data = NULL;
910 vp->v_object = NULL;
911 vp->v_vnlock = &vp->v_lock;
918 if (vp->v_writecount > 0)
919 VOP_ADD_WRITECOUNT(lowervp, -vp->v_writecount);
920 else if (vp->v_writecount < 0)
921 vp->v_writecount = 0;
925 if ((xp->null_flags & NULLV_NOUNLOCK) != 0)
937 struct vnode *vp = ap->a_vp;
939 printf("\tvp=%p, lowervp=%p\n", vp, VTONULL(vp)->null_lowervp);
951 vp = ap->a_vp;
954 if (xp && (lowervp = xp->null_lowervp)) {
957 VOP_GETWRITEMOUNT(lowervp, ap->a_mpp);
961 *(ap->a_mpp) = NULL;
971 lvp = NULLVPTOLOWERVP(ap->a_vp);
972 return VOP_VPTOFH(lvp, ap->a_fhp);
978 struct vnode *vp = ap->a_vp;
979 struct vnode **dvp = ap->a_vpp;
986 mp = vp->v_mount;
993 vref(lvp);
994 error = vn_vptocnp(&ldvp, ap->a_buf, ap->a_buflen);
1028 vp = ap->a_vp;
1035 lvp = xp->null_lowervp;
1036 vref(lvp);
1038 error = VOP_READ_PGCACHE(lvp, ap->a_uio, ap->a_ioflag, ap->a_cred);
1050 vp = ap->a_vp;
1057 lvp = xp->null_lowervp;
1058 vref(lvp);
1060 error = VOP_ADVLOCK(lvp, ap->a_id, ap->a_op, ap->a_fl, ap->a_flags);
1076 dvp = ap->a_dvp;
1078 vref(ldvp);
1080 vpp = ap->a_vpp;
1088 vref(lvp);
1089 if (!ap->a_unlock_vp) {
1092 mp = vp->v_mount;
1098 if (vp != NULL && ap->a_unlock_vp)
1102 if (vp == NULL || ap->a_unlock_vp)
1107 if (vp->v_data == NULL && vfs_busy(mp, MBF_NOWAIT) == 0) {
1135 vp = ap->a_vp;
1142 VOP_GETLOWVNODE(vpl, ap->a_vplp, ap->a_flags);