Lines Matching full:vp
232 * We require at least one vp.
236 panic ("null_bypass: no vp's in map");
254 * that aren't. (We must always map first vp or vclean fails.)
276 * of vrele'ing their vp's. We must account for
343 * XXX - even though some ops have vpp returned vp's,
361 struct vnode *lvp, *vp;
364 vp = ap->a_vp;
365 lvp = NULLVPTOLOWERVP(vp);
366 VI_LOCK(vp);
368 VNASSERT(vp->v_writecount >= 0, vp, ("wrong null writecount"));
369 VNASSERT(vp->v_writecount + ap->a_inc >= 0, vp,
373 vp->v_writecount += ap->a_inc;
374 VI_UNLOCK(vp);
389 struct vnode *vp, *ldvp, *lvp;
402 vp = lvp = NULL;
465 error = null_nodeget(mp, lvp, &vp);
467 *ap->a_vpp = vp;
477 struct vnode *vp, *ldvp;
479 vp = ap->a_vp;
480 ldvp = NULLVPTOLOWERVP(vp);
483 vp->v_object = ldvp->v_object;
485 MPASS(vp->v_object != NULL);
486 if ((vn_irflag_read(vp) & VIRF_PGREAD) == 0) {
487 vn_irflag_set_cond(vp, VIRF_PGREAD);
500 struct vnode *vp = ap->a_vp;
506 (vp->v_mount->mnt_flag & MNT_RDONLY))
509 switch (vp->v_type) {
526 if (vp->v_mount->mnt_flag & MNT_RDONLY)
567 struct vnode *vp = ap->a_vp;
576 switch (vp->v_type) {
580 if (vp->v_mount->mnt_flag & MNT_RDONLY)
593 struct vnode *vp = ap->a_vp;
602 switch (vp->v_type) {
606 if (vp->v_mount->mnt_flag & MNT_RDONLY)
628 struct vnode *lvp, *vp;
630 vp = ap->a_vp;
631 if (vrefcnt(vp) > 1) {
632 lvp = NULLVPTOLOWERVP(vp);
637 VTONULL(vp)->null_flags |= NULLV_DROP;
743 struct vnode *vp = ap->a_vp;
750 VI_LOCK(vp);
754 nn = VTONULL(vp);
760 if (nn != NULL && (lvp = NULLVPTOLOWERVP(vp)) != NULL) {
773 VI_UNLOCK(vp);
782 if (VTONULL(vp) == NULL && error == 0) {
801 VI_UNLOCK(vp);
816 struct vnode *vp = ap->a_vp;
821 nn = VTONULL(vp);
822 if (nn != NULL && (lvp = NULLVPTOLOWERVP(vp)) != NULL) {
839 null_want_recycle(struct vnode *vp)
846 xp = VTONULL(vp);
847 lvp = NULLVPTOLOWERVP(vp);
848 mp = vp->v_mount;
867 struct vnode *vp;
869 vp = ap->a_vp;
870 if (null_want_recycle(vp)) {
871 vp->v_object = NULL;
872 vrecycle(vp);
891 struct vnode *vp;
895 vp = ap->a_vp;
896 xp = VTONULL(vp);
899 KASSERT(lowervp != NULL && vp->v_vnlock != &vp->v_lock,
900 ("Reclaiming incomplete null vnode %p", vp));
907 lockmgr(&vp->v_lock, LK_EXCLUSIVE, NULL);
908 VI_LOCK(vp);
909 vp->v_data = NULL;
910 vp->v_object = NULL;
911 vp->v_vnlock = &vp->v_lock;
918 if (vp->v_writecount > 0)
919 VOP_ADD_WRITECOUNT(lowervp, -vp->v_writecount);
920 else if (vp->v_writecount < 0)
921 vp->v_writecount = 0;
923 VI_UNLOCK(vp);
937 struct vnode *vp = ap->a_vp;
939 printf("\tvp=%p, lowervp=%p\n", vp, VTONULL(vp)->null_lowervp);
949 struct vnode *vp;
951 vp = ap->a_vp;
952 VI_LOCK(vp);
953 xp = VTONULL(vp);
956 VI_UNLOCK(vp);
960 VI_UNLOCK(vp);
978 struct vnode *vp = ap->a_vp;
984 locked = VOP_ISLOCKED(vp);
985 lvp = NULLVPTOLOWERVP(vp);
986 mp = vp->v_mount;
991 VOP_UNLOCK(vp); /* vp is held by vn_vptocnp_locked that called us */
997 vn_lock(vp, locked | LK_RETRY);
1005 vn_lock(vp, locked | LK_RETRY);
1016 vn_lock(vp, locked | LK_RETRY);
1024 struct vnode *lvp, *vp;
1028 vp = ap->a_vp;
1029 VI_LOCK(vp);
1030 xp = VTONULL(vp);
1032 VI_UNLOCK(vp);
1037 VI_UNLOCK(vp);
1046 struct vnode *lvp, *vp;
1050 vp = ap->a_vp;
1051 VI_LOCK(vp);
1052 xp = VTONULL(vp);
1054 VI_UNLOCK(vp);
1059 VI_UNLOCK(vp);
1066 * Avoid standard bypass, since lower dvp and vp could be no longer
1073 struct vnode *dvp, *ldvp, *lvp, *vp, *vp1, **vpp;
1081 vp = NULL;
1085 vp = *vpp;
1086 if (vp != NULL) {
1087 lvp = NULLVPTOLOWERVP(vp);
1090 vhold(vp);
1092 mp = vp->v_mount;
1098 if (vp != NULL && ap->a_unlock_vp)
1099 vrele(vp);
1102 if (vp == NULL || ap->a_unlock_vp)
1105 /* lvp has been unlocked and vp might be reclaimed */
1106 VOP_LOCK(vp, LK_EXCLUSIVE | LK_RETRY);
1107 if (vp->v_data == NULL && vfs_busy(mp, MBF_NOWAIT) == 0) {
1108 vput(vp);
1112 vget(vp, LK_EXCLUSIVE | LK_RETRY);
1118 vget(vp, LK_EXCLUSIVE | LK_RETRY);
1124 vdrop(vp);
1133 struct vnode *vp, *vpl;
1135 vp = ap->a_vp;
1136 if (vn_lock(vp, LK_SHARED) != 0)
1139 vpl = NULLVPTOLOWERVP(vp);
1141 VOP_UNLOCK(vp);