Lines Matching full:vp
74 static int dirent_exists(struct vnode *vp, const char *dirname,
285 dirent_exists(struct vnode *vp, const char *dirname, struct thread *td) in dirent_exists() argument
294 ASSERT_VOP_LOCKED(vp, "vnode not locked"); in dirent_exists()
295 KASSERT(vp->v_type == VDIR, ("vp %p is not a directory", vp)); in dirent_exists()
297 error = VOP_GETATTR(vp, &va, td->td_ucred); in dirent_exists()
311 error = vn_dir_next_dirent(vp, td, dirbuf, dirbuflen, in dirent_exists()
363 struct vnode *vp; in vop_stdadvlock() local
368 vp = ap->a_vp; in vop_stdadvlock()
374 mp = vp->v_mount; in vop_stdadvlock()
377 VI_LOCK(vp); in vop_stdadvlock()
378 while ((vp->v_iflag & VI_FOPENING) != 0) in vop_stdadvlock()
379 msleep(vp, VI_MTX(vp), PLOCK, "lockfo", 0); in vop_stdadvlock()
380 VI_UNLOCK(vp); in vop_stdadvlock()
390 vn_lock(vp, LK_SHARED | LK_RETRY); in vop_stdadvlock()
391 error = VOP_GETATTR(vp, &vattr, curthread->td_ucred); in vop_stdadvlock()
392 VOP_UNLOCK(vp); in vop_stdadvlock()
398 return (lf_advlock(ap, &(vp->v_lockf), vattr.va_size)); in vop_stdadvlock()
404 struct vnode *vp; in vop_stdadvlockasync() local
408 vp = ap->a_vp; in vop_stdadvlockasync()
411 vn_lock(vp, LK_SHARED | LK_RETRY); in vop_stdadvlockasync()
412 error = VOP_GETATTR(vp, &vattr, curthread->td_ucred); in vop_stdadvlockasync()
413 VOP_UNLOCK(vp); in vop_stdadvlockasync()
419 return (lf_advlockasync(ap, &(vp->v_lockf), vattr.va_size)); in vop_stdadvlockasync()
425 struct vnode *vp; in vop_stdadvlockpurge() local
427 vp = ap->a_vp; in vop_stdadvlockpurge()
428 lf_purgelocks(vp, &vp->v_lockf); in vop_stdadvlockpurge()
476 struct vnode *vp = ap->a_vp; in vop_stdlock() local
479 ilk = VI_MTX(vp); in vop_stdlock()
480 return (lockmgr_lock_flags(vp->v_vnlock, ap->a_flags, in vop_stdlock()
488 struct vnode *vp = ap->a_vp; in vop_stdunlock() local
490 return (lockmgr_unlock(vp->v_vnlock)); in vop_stdunlock()
511 struct vnode *vp = ap->a_vp; in vop_lock() local
515 MPASS(vp->v_vnlock == &vp->v_lock); in vop_lock()
522 return (lockmgr_slock(&vp->v_lock, flags, ap->a_file, ap->a_line)); in vop_lock()
524 return (lockmgr_xlock(&vp->v_lock, flags, ap->a_file, ap->a_line)); in vop_lock()
527 ilk = VI_MTX(vp); in vop_lock()
528 return (lockmgr_lock_flags(&vp->v_lock, flags, in vop_lock()
535 struct vnode *vp = ap->a_vp; in vop_unlock() local
537 MPASS(vp->v_vnlock == &vp->v_lock); in vop_unlock()
539 return (lockmgr_unlock(&vp->v_lock)); in vop_unlock()
545 struct vnode *vp = ap->a_vp; in vop_islocked() local
547 MPASS(vp->v_vnlock == &vp->v_lock); in vop_islocked()
549 return (lockstatus(&vp->v_lock)); in vop_islocked()
582 struct vnode *vp; in vop_stdgetwritemount() local
593 vp = ap->a_vp; in vop_stdgetwritemount()
594 mp = vfs_ref_from_vp(vp); in vop_stdgetwritemount()
687 struct vnode *const vp = ap->a_vp; in vop_stdvptocnp() local
705 if (vp->v_type != VDIR) in vop_stdvptocnp()
708 error = VOP_GETATTR(vp, &va, cred); in vop_stdvptocnp()
712 vref(vp); in vop_stdvptocnp()
713 locked = VOP_ISLOCKED(vp); in vop_stdvptocnp()
714 VOP_UNLOCK(vp); in vop_stdvptocnp()
716 "..", vp); in vop_stdvptocnp()
720 vn_lock(vp, locked | LK_RETRY); in vop_stdvptocnp()
727 if (vp->v_mount != (*dvp)->v_mount && in vop_stdvptocnp()
810 vn_lock(vp, locked | LK_RETRY); in vop_stdvptocnp()
827 struct vnode *vp; in vop_stdallocate() local
835 vp = ap->a_vp; in vop_stdallocate()
839 error = VOP_GETATTR(vp, vap, ap->a_cred); in vop_stdallocate()
856 error = VFS_STATFS(vp->v_mount, sfs, td); in vop_stdallocate()
876 error = VOP_SETATTR(vp, vap, ap->a_cred); in vop_stdallocate()
881 error = VOP_SETATTR(vp, vap, ap->a_cred); in vop_stdallocate()
907 error = VOP_READ(vp, &auio, ap->a_ioflag, ap->a_cred); in vop_stdallocate()
928 error = VOP_WRITE(vp, &auio, ap->a_ioflag, ap->a_cred); in vop_stdallocate()
948 vp_zerofill(struct vnode *vp, struct vattr *vap, off_t *offsetp, off_t *lenp, in vp_zerofill() argument
985 error = VOP_WRITE(vp, &auio, ioflag, cred); in vp_zerofill()
1004 struct vnode *vp; in vop_stddeallocate() local
1011 vp = ap->a_vp; in vop_stddeallocate()
1015 error = VOP_GETATTR(vp, &va, cred); in vop_stddeallocate()
1022 error = vn_bmap_seekhole_locked(vp, FIOSEEKDATA, &noff, cred); in vop_stddeallocate()
1044 error = vn_bmap_seekhole_locked(vp, FIOSEEKHOLE, &noff, cred); in vop_stddeallocate()
1050 error = vp_zerofill(vp, &va, &offset, &rem, ap->a_ioflag, cred); in vop_stddeallocate()
1072 struct vnode *vp; in vop_stdadvise() local
1078 vp = ap->a_vp; in vop_stdadvise()
1090 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); in vop_stdadvise()
1091 if (VN_IS_DOOMED(vp)) { in vop_stdadvise()
1092 VOP_UNLOCK(vp); in vop_stdadvise()
1105 bsize = vp->v_bufobj.bo_bsize; in vop_stdadvise()
1116 if (vp->v_object != NULL) { in vop_stdadvise()
1117 VM_OBJECT_RLOCK(vp->v_object); in vop_stdadvise()
1118 vm_object_page_noreuse(vp->v_object, in vop_stdadvise()
1121 VM_OBJECT_RUNLOCK(vp->v_object); in vop_stdadvise()
1124 bo = &vp->v_bufobj; in vop_stdadvise()
1132 VOP_UNLOCK(vp); in vop_stdadvise()
1175 struct vnode *vp; in vop_stdset_text() local
1179 vp = ap->a_vp; in vop_stdset_text()
1181 n = atomic_load_int(&vp->v_writecount); in vop_stdset_text()
1195 if ((vn_irflag_read(vp) & VIRF_TEXT_REF) != 0) { in vop_stdset_text()
1196 vref(vp); in vop_stdset_text()
1199 if (atomic_fcmpset_int(&vp->v_writecount, &n, -1)) { in vop_stdset_text()
1203 vunref(vp); in vop_stdset_text()
1209 if (atomic_fcmpset_int(&vp->v_writecount, &n, n - 1)) { in vop_stdset_text()
1219 struct vnode *vp; in vop_stdunset_text() local
1222 vp = ap->a_vp; in vop_stdunset_text()
1224 n = atomic_load_int(&vp->v_writecount); in vop_stdunset_text()
1234 if (atomic_fcmpset_int(&vp->v_writecount, &n, 0)) { in vop_stdunset_text()
1235 if ((vn_irflag_read(vp) & VIRF_TEXT_REF) != 0) { in vop_stdunset_text()
1236 vunref(vp); in vop_stdunset_text()
1244 if (atomic_fcmpset_int(&vp->v_writecount, &n, n + 1)) { in vop_stdunset_text()
1254 struct vnode *vp; in vop_stdadd_writecount_impl() local
1258 vp = ap->a_vp; in vop_stdadd_writecount_impl()
1261 mp = vp->v_mount; in vop_stdadd_writecount_impl()
1264 VNPASS((mp->mnt_kern_flag & MNTK_NOMSYNC) == 0, vp); in vop_stdadd_writecount_impl()
1266 VNPASS((mp->mnt_kern_flag & MNTK_NOMSYNC) != 0, vp); in vop_stdadd_writecount_impl()
1271 n = atomic_load_int(&vp->v_writecount); in vop_stdadd_writecount_impl()
1277 VNASSERT(n + ap->a_inc >= 0, vp, in vop_stdadd_writecount_impl()
1282 vlazy(vp); in vop_stdadd_writecount_impl()
1286 if (atomic_fcmpset_int(&vp->v_writecount, &n, n + ap->a_inc)) { in vop_stdadd_writecount_impl()
1331 struct vnode *vp; in vop_stdioctl() local
1339 vp = ap->a_vp; in vop_stdioctl()
1340 error = vn_lock(vp, LK_SHARED); in vop_stdioctl()
1343 if (vp->v_type == VREG) in vop_stdioctl()
1344 error = VOP_GETATTR(vp, &va, ap->a_cred); in vop_stdioctl()
1354 VOP_UNLOCK(vp); in vop_stdioctl()
1390 struct vnode *vp, *mvp; in vfs_stdsync() local
1402 MNT_VNODE_FOREACH_ALL(vp, mp, mvp) { in vfs_stdsync()
1403 if (vp->v_bufobj.bo_dirty.bv_cnt == 0) { in vfs_stdsync()
1404 VI_UNLOCK(vp); in vfs_stdsync()
1407 if ((error = vget(vp, lockreq)) != 0) { in vfs_stdsync()
1414 error = VOP_FSYNC(vp, waitfor, td); in vfs_stdsync()
1417 vput(vp); in vfs_stdsync()
1512 struct vnode *vp; in vop_stdstat() local
1517 vp = a->a_vp; in vop_stdstat()
1539 error = VOP_GETATTR(vp, vap, a->a_active_cred); in vop_stdstat()
1554 sb->st_dev = vp->v_mount->mnt_stat.f_fsid.val[0]; in vop_stdstat()
1629 struct vnode *dvp, *vp, **vpp; in vop_stdvput_pair() local
1634 if (vpp != NULL && ap->a_unlock_vp && (vp = *vpp) != NULL) in vop_stdvput_pair()
1635 vput(vp); in vop_stdvput_pair()