Lines Matching +full:fine +full:- +full:granular
1 /*-
2 * SPDX-License-Identifier: BSD-3-Clause
175 * sub-cache consisting mostly of such files. The system balances the size
176 * of this sub-cache with its complement to try to prevent either from
181 * to recyling of free vnodes. In the best-operating case, the cache is
186 * Otherwise, vnlru_proc() runs to reclaim enough vnodes (usually non-free
187 * ones) to reach one of these states. The watermarks are currently hard-
246 /* Zone for allocation of new vnodes - used exclusively by getnewvnode() */
264 * SYNCER_MAXDELAY queues that are processed round-robin at a rate of
283 * bo->bo_synclist
336 static bool vstir; /* nonzero to stir non-free vnodes */
352 if (error != 0 || req->newptr == NULL) in sysctl_maxvnodes()
403 if (error != 0 || req->newptr == NULL) in sysctl_wantfreevnodes()
435 if (req->newptr == NULL) in sysctl_try_reclaim_vnode()
437 if (req->newlen >= PATH_MAX) in sysctl_try_reclaim_vnode()
441 error = SYSCTL_IN(req, buf, req->newlen); in sysctl_try_reclaim_vnode()
445 buf[req->newlen] = '\0'; in sysctl_try_reclaim_vnode()
482 if (req->newptr == NULL) in sysctl_ftry_reclaim_vnode()
491 vp = fp->f_vnode; in sysctl_ftry_reclaim_vnode()
512 /* Shift count for (uintptr_t)vp to initialize vp->v_hash. */
546 bp = BUF_PCTRIE_LOOKUP_GE(&bv->bv_root, lblkno); in buf_lookup_ge()
548 bp = BUF_PCTRIE_LOOKUP_GE(&bv->bv_root, 0); in buf_lookup_ge()
549 if (bp != NULL && bp->b_lblkno < lblkno) in buf_lookup_ge()
563 error = BUF_PCTRIE_INSERT_LOOKUP_LE(&bv->bv_root, bp, n); in buf_insert_lookup_le()
565 if (*n == NULL && bp->b_lblkno >= 0) in buf_insert_lookup_le()
566 *n = BUF_PCTRIE_LOOKUP_LE(&bv->bv_root, ~0L); in buf_insert_lookup_le()
567 if (*n != NULL && (*n)->b_lblkno >= bp->b_lblkno) in buf_insert_lookup_le()
592 vp->v_type = VMARKER; in vn_alloc_marker()
593 vp->v_mount = mp; in vn_alloc_marker()
602 MPASS(vp->v_type == VMARKER); in vn_free_marker()
625 end1 = off1 + sizeof(((struct vnode *)NULL)->v_vnodelist); in vnode_dtor()
626 end2 = off2 + sizeof(((struct vnode *)NULL)->v_dbatchcpu); in vnode_dtor()
632 * KASAN's tracking is not byte-granular, any preceding fields sharing in vnode_dtor()
633 * the same 8-byte aligned word must also be marked valid. in vnode_dtor()
644 kasan_mark((void *)((char *)mem + off1), off2 - off1, in vnode_dtor()
645 off2 - off1, KASAN_UMA_FREED); in vnode_dtor()
649 kasan_mark((void *)((char *)mem + off2), size - off2, size - off2, in vnode_dtor()
667 vp->v_vnlock = &vp->v_lock; in vnode_init()
668 mtx_init(&vp->v_interlock, "vnode interlock", NULL, MTX_DEF); in vnode_init()
670 * By default, don't allow shared locks unless filesystems opt-in. in vnode_init()
672 lockinit(vp->v_vnlock, PVFS, "vnode", VLKTIMEOUT, in vnode_init()
677 bufobj_init(&vp->v_bufobj, vp); in vnode_init()
685 rangelock_init(&vp->v_rl); in vnode_init()
687 vp->v_dbatchcpu = NOCPU; in vnode_init()
689 vp->v_state = VSTATE_DEAD; in vnode_init()
694 vp->v_holdcnt = VHOLD_NO_SMR; in vnode_init()
695 vp->v_type = VNON; in vnode_init()
716 rangelock_destroy(&vp->v_rl); in vnode_fini()
717 lockdestroy(vp->v_vnlock); in vnode_fini()
718 mtx_destroy(&vp->v_interlock); in vnode_fini()
719 bo = &vp->v_bufobj; in vnode_fini()
728 * eliminate dependency on NFS-private header.
731 * private inode data, but the NFS-based estimation is ample enough.
732 * Still, we care about differences in the size between 64- and 32-bit
759 * Currently, on 64-bit platforms, 'desiredvnodes' is set to in vntblinit()
771 printf("Reducing kern.maxvnodes %lu -> %lu\n", in vntblinit()
802 * Preallocate enough nodes to support one-per buf so that in vntblinit()
828 mtx_init(&vd->lock, "vdbatch", NULL, MTX_DEF); in vntblinit()
839 * For a mountpoint mp, vfs_busy-enforced lock is before lock of any
848 * Within each file system, the lock order is C->A->B and F->D->E.
852 * C->A->B
854 * +->F->D->E
883 MPASS((mp->mnt_kern_flag & MNTK_DRAINING) == 0); in vfs_busy()
884 MPASS((mp->mnt_kern_flag & MNTK_UNMOUNT) == 0); in vfs_busy()
885 MPASS((mp->mnt_kern_flag & MNTK_REFEXPIRE) == 0); in vfs_busy()
908 while (mp->mnt_kern_flag & MNTK_UNMOUNT) { in vfs_busy()
909 KASSERT(TAILQ_EMPTY(&mp->mnt_uppers), in vfs_busy()
910 ("%s: non-empty upper mount list with pending unmount", in vfs_busy()
912 if (flags & MBF_NOWAIT || mp->mnt_kern_flag & MNTK_REFEXPIRE) { in vfs_busy()
921 mp->mnt_kern_flag |= MNTK_MWAIT; in vfs_busy()
929 mp->mnt_lockref++; in vfs_busy()
946 MPASS((mp->mnt_kern_flag & MNTK_DRAINING) == 0); in vfs_unbusy()
956 c = --mp->mnt_lockref; in vfs_unbusy()
957 if (mp->mnt_vfs_ops == 0) { in vfs_unbusy()
958 MPASS((mp->mnt_kern_flag & MNTK_DRAINING) == 0); in vfs_unbusy()
964 if (c == 0 && (mp->mnt_kern_flag & MNTK_DRAINING) != 0) { in vfs_unbusy()
965 MPASS(mp->mnt_kern_flag & MNTK_UNMOUNT); in vfs_unbusy()
967 mp->mnt_kern_flag &= ~MNTK_DRAINING; in vfs_unbusy()
968 wakeup(&mp->mnt_lockref); in vfs_unbusy()
984 if (fsidcmp(&mp->mnt_stat.f_fsid, fsid) == 0) { in vfs_getvfs()
999 * To avoid congestion on mountlist_mtx, implement simple direct-mapped
1016 hash = fsid->val[0] ^ fsid->val[1]; in vfs_busyfs()
1017 hash = (hash >> 16 ^ hash) & (FSID_CACHE_SIZE - 1); in vfs_busyfs()
1019 if (mp == NULL || fsidcmp(&mp->mnt_stat.f_fsid, fsid) != 0) in vfs_busyfs()
1025 if (fsidcmp(&mp->mnt_stat.f_fsid, fsid) == 0) in vfs_busyfs()
1033 if (fsidcmp(&mp->mnt_stat.f_fsid, fsid) == 0) { in vfs_busyfs()
1057 if (jailed(td->td_ucred)) { in vfs_suser()
1062 if (!prison_allow(td->td_ucred, mp->mnt_vfc->vfc_prison_flag)) in vfs_suser()
1069 if (prison_check(td->td_ucred, mp->mnt_cred) != 0) in vfs_suser()
1075 * for the PRIV_VFS_MOUNT_OWNER privilege - it will be better verified in vfs_suser()
1080 if (!(mp->mnt_vfc->vfc_flags & VFCF_DELEGADMIN) && in vfs_suser()
1081 mp->mnt_cred->cr_uid != td->td_ucred->cr_uid) { in vfs_suser()
1092 * support 16-bit device numbers. We end up with unique val[0]'s for the
1097 * micro-optimization and a defense against returning the same fsid to
1110 mtype = mp->mnt_vfc->vfc_typenum; in vfs_getnewfsid()
1121 mp->mnt_stat.f_fsid.val[0] = tfsid.val[0]; in vfs_getnewfsid()
1122 mp->mnt_stat.f_fsid.val[1] = tfsid.val[1]; in vfs_getnewfsid()
1152 tsp->tv_sec = time_second; in vfs_timestamp()
1153 tsp->tv_nsec = 0; in vfs_timestamp()
1176 vap->va_type = VNON; in vattr_null()
1177 vap->va_size = VNOVAL; in vattr_null()
1178 vap->va_bytes = VNOVAL; in vattr_null()
1179 vap->va_mode = VNOVAL; in vattr_null()
1180 vap->va_nlink = VNOVAL; in vattr_null()
1181 vap->va_uid = VNOVAL; in vattr_null()
1182 vap->va_gid = VNOVAL; in vattr_null()
1183 vap->va_fsid = VNOVAL; in vattr_null()
1184 vap->va_fileid = VNOVAL; in vattr_null()
1185 vap->va_blocksize = VNOVAL; in vattr_null()
1186 vap->va_rdev = VNOVAL; in vattr_null()
1187 vap->va_atime.tv_sec = VNOVAL; in vattr_null()
1188 vap->va_atime.tv_nsec = VNOVAL; in vattr_null()
1189 vap->va_mtime.tv_sec = VNOVAL; in vattr_null()
1190 vap->va_mtime.tv_nsec = VNOVAL; in vattr_null()
1191 vap->va_ctime.tv_sec = VNOVAL; in vattr_null()
1192 vap->va_ctime.tv_nsec = VNOVAL; in vattr_null()
1193 vap->va_birthtime.tv_sec = VNOVAL; in vattr_null()
1194 vap->va_birthtime.tv_nsec = VNOVAL; in vattr_null()
1195 vap->va_flags = VNOVAL; in vattr_null()
1196 vap->va_gen = VNOVAL; in vattr_null()
1197 vap->va_vaflags = 0; in vattr_null()
1198 vap->va_filerev = VNOVAL; in vattr_null()
1199 vap->va_bsdflags = 0; in vattr_null()
1206 * - all parameters were picked years ago when RAM sizes were significantly
1208 * - it can pick vnodes based on pages used by the vm object, but filesystems
1210 * - since ZFS has its own aging policy it gets partially combated by this one
1211 * - a dedicated method should be provided for filesystems to let them decide
1257 if (__predict_false(vp->v_type == VMARKER)) in vlrureclaim()
1266 if (vp->v_usecount > 0 || vp->v_holdcnt == 0 || in vlrureclaim()
1267 (!reclaim_nc_src && !LIST_EMPTY(&vp->v_cache_src))) in vlrureclaim()
1270 if (vp->v_type == VBAD || vp->v_type == VNON) in vlrureclaim()
1273 object = atomic_load_ptr(&vp->v_object); in vlrureclaim()
1274 if (object == NULL || object->resident_page_count > trigger) { in vlrureclaim()
1287 if (__predict_false(vp->v_type == VBAD || vp->v_type == VNON)) { in vlrureclaim()
1291 if (vp->v_mount == NULL) { in vlrureclaim()
1312 if (vp->v_usecount > 0 || in vlrureclaim()
1313 (!reclaim_nc_src && !LIST_EMPTY(&vp->v_cache_src)) || in vlrureclaim()
1314 (vp->v_object != NULL && vp->v_object->handle == vp && in vlrureclaim()
1315 vp->v_object->resident_page_count > trigger)) { in vlrureclaim()
1332 MPASS(vp->v_type != VMARKER); in vlrureclaim()
1405 if (__predict_false(vp->v_type == VMARKER)) in vnlru_free_impl()
1407 if (vp->v_holdcnt > 0) in vnlru_free_impl()
1411 * of mount point. Note that mp is type-safe, the in vnlru_free_impl()
1415 if (mnt_op != NULL && (mp = vp->v_mount) != NULL && in vnlru_free_impl()
1416 mp->mnt_op != mnt_op) { in vnlru_free_impl()
1419 if (__predict_false(vp->v_type == VBAD || vp->v_type == VNON)) { in vnlru_free_impl()
1440 * The solution would be to pre-check if the vnode is likely to in vnlru_free_impl()
1441 * be recycle-able, but it needs to happen with the in vnlru_free_impl()
1449 count--; in vnlru_free_impl()
1457 return (ocount - count); in vnlru_free_impl()
1499 VNPASS(mvp->v_type == VMARKER, mvp); in vnlru_free_vfsops()
1531 gapvnodes = imax(desiredvnodes - wantfreevnodes, 100); in vnlru_recalc()
1532 vhiwat = gapvnodes / 11; /* 9% -- just under the 10% in vlrureclaim() */ in vnlru_recalc()
1590 (*lfreevnodes)--; in vfs_freevnodes_dec()
1591 if (__predict_false(*lfreevnodes == -VNLRU_FREEVNODES_SLOP)) in vfs_freevnodes_dec()
1607 slop = rfreevnodes - rfreevnodes_old; in vnlru_read_freevnodes()
1609 slop = rfreevnodes_old - rfreevnodes; in vnlru_read_freevnodes()
1613 rfreevnodes += cpuid_to_pcpu[cpu]->pc_vfs_freevnodes; in vnlru_read_freevnodes()
1627 space = desiredvnodes - rnumvnodes; in vnlru_under()
1631 space += rfreevnodes - wantfreevnodes; in vnlru_under()
1699 * -1 -- fallback to regular vnlru loop
1700 * 0 -- do nothing, go to sleep
1701 * >0 -- recycle this many vnodes
1709 return (-1); in vnlru_proc_light_pick()
1723 if (rnumvnodes - rfreevnodes >= desiredvnodes || in vnlru_proc_light_pick()
1725 return (-1); in vnlru_proc_light_pick()
1728 return (rnumvnodes - desiredvnodes); in vnlru_proc_light_pick()
1740 return (-1); in vnlru_proc_light_pick()
1754 if (freecount == -1) in vnlru_proc_light()
1804 vnlru_free_locked_vnlru(rnumvnodes - desiredvnodes); in vnlru_proc()
1810 * when it is not over-full and has space for about a 4% in vnlru_proc()
1834 usevnodes = rnumvnodes - rfreevnodes; in vnlru_proc()
1906 VNPASS(vp->v_holdcnt > 0, vp); in vtryrecycle()
1936 if (vp->v_usecount) { in vtryrecycle()
2020 if (mp == NULL || (mp->mnt_kern_flag & MNTK_SUSPEND) == 0) { in vn_alloc_hard()
2082 KASSERT(vops->registered, in getnewvnode()
2087 if (td->td_vp_reserved != NULL) { in getnewvnode()
2088 vp = td->td_vp_reserved; in getnewvnode()
2089 td->td_vp_reserved = NULL; in getnewvnode()
2111 lo = &vp->v_vnlock->lock_object; in getnewvnode()
2113 if (lo->lo_name != tag) { in getnewvnode()
2115 lo->lo_name = tag; in getnewvnode()
2122 * By default, don't allow shared locks unless filesystems opt-in. in getnewvnode()
2124 vp->v_vnlock->lock_object.lo_flags |= LK_NOSHARE; in getnewvnode()
2128 KASSERT(vp->v_object == NULL, ("stale v_object %p", vp)); in getnewvnode()
2129 KASSERT(vp->v_lockf == NULL, ("stale v_lockf %p", vp)); in getnewvnode()
2130 KASSERT(vp->v_pollinfo == NULL, ("stale v_pollinfo %p", vp)); in getnewvnode()
2131 vp->v_type = VNON; in getnewvnode()
2132 vp->v_op = vops; in getnewvnode()
2133 vp->v_irflag = 0; in getnewvnode()
2136 vp->v_bufobj.bo_ops = &buf_ops_bio; in getnewvnode()
2143 if (mp != NULL && (mp->mnt_flag & MNT_MULTILABEL) == 0) in getnewvnode()
2147 vp->v_bufobj.bo_bsize = mp->mnt_stat.f_iosize; in getnewvnode()
2156 vp->v_hash = (uintptr_t)vp >> vnsz2log; in getnewvnode()
2168 MPASS(td->td_vp_reserved == NULL); in getnewvnode_reserve()
2169 td->td_vp_reserved = vn_alloc(NULL); in getnewvnode_reserve()
2178 if (td->td_vp_reserved != NULL) { in getnewvnode_drop_reserve()
2179 vn_free(td->td_vp_reserved); in getnewvnode_drop_reserve()
2180 td->td_vp_reserved = NULL; in getnewvnode_drop_reserve()
2206 bo = &vp->v_bufobj; in freevnode()
2207 VNASSERT(vp->v_data == NULL, vp, ("cleaned vnode isn't")); in freevnode()
2208 VNPASS(vp->v_holdcnt == VHOLD_NO_SMR, vp); in freevnode()
2209 VNASSERT(vp->v_usecount == 0, vp, ("Non-zero use count")); in freevnode()
2210 VNASSERT(vp->v_writecount == 0, vp, ("Non-zero write count")); in freevnode()
2211 VNASSERT(bo->bo_numoutput == 0, vp, ("Clean vnode has pending I/O's")); in freevnode()
2212 VNASSERT(bo->bo_clean.bv_cnt == 0, vp, ("cleanbufcnt not 0")); in freevnode()
2213 VNASSERT(pctrie_is_empty(&bo->bo_clean.bv_root), vp, in freevnode()
2215 VNASSERT(bo->bo_dirty.bv_cnt == 0, vp, ("dirtybufcnt not 0")); in freevnode()
2216 VNASSERT(pctrie_is_empty(&bo->bo_dirty.bv_root), vp, in freevnode()
2218 VNASSERT((vp->v_iflag & (VI_DOINGINACT | VI_OWEINACT)) == 0, vp, in freevnode()
2226 if (vp->v_pollinfo != NULL) { in freevnode()
2237 destroy_vpollinfo(vp->v_pollinfo); in freevnode()
2239 vp->v_pollinfo = NULL; in freevnode()
2241 vp->v_mountedhere = NULL; in freevnode()
2242 vp->v_unpcb = NULL; in freevnode()
2243 vp->v_rdev = NULL; in freevnode()
2244 vp->v_fifoinfo = NULL; in freevnode()
2245 vp->v_iflag = 0; in freevnode()
2246 vp->v_vflag = 0; in freevnode()
2247 bo->bo_flag = 0; in freevnode()
2259 VNPASS((vp->v_mflag & VMP_LAZYLIST) == 0, vp); in delmntque()
2261 mp = vp->v_mount; in delmntque()
2264 vp->v_mount = NULL; in delmntque()
2265 VNASSERT(mp->mnt_nvnodelistsize > 0, vp, in delmntque()
2267 TAILQ_REMOVE(&mp->mnt_nvnodelist, vp, v_nmntvnodes); in delmntque()
2268 mp->mnt_nvnodelistsize--; in delmntque()
2281 KASSERT(vp->v_mount == NULL, in insmntque1_int()
2284 if ((mp->mnt_kern_flag & MNTK_UNLOCKED_INSMNTQUE) == 0) { in insmntque1_int()
2285 ASSERT_VOP_ELOCKED(vp, "insmntque: non-locked vp"); in insmntque1_int()
2303 if (((mp->mnt_kern_flag & MNTK_UNMOUNT) != 0 && in insmntque1_int()
2304 ((mp->mnt_kern_flag & MNTK_UNMOUNTF) != 0 || in insmntque1_int()
2305 mp->mnt_nvnodelistsize == 0)) && in insmntque1_int()
2306 (vp->v_vflag & VV_FORCEINSMQ) == 0) { in insmntque1_int()
2310 vp->v_data = NULL; in insmntque1_int()
2311 vp->v_op = &dead_vnodeops; in insmntque1_int()
2317 vp->v_mount = mp; in insmntque1_int()
2319 TAILQ_INSERT_TAIL(&mp->mnt_nvnodelist, vp, v_nmntvnodes); in insmntque1_int()
2320 VNASSERT(mp->mnt_nvnodelistsize >= 0, vp, in insmntque1_int()
2322 mp->mnt_nvnodelistsize++; in insmntque1_int()
2361 if (bo->bo_dirty.bv_cnt > 0) { in bufobj_invalbuf()
2369 if (bo->bo_numoutput > 0 || bo->bo_dirty.bv_cnt > 0) { in bufobj_invalbuf()
2381 error = flushbuflist(&bo->bo_clean, in bufobj_invalbuf()
2384 error = flushbuflist(&bo->bo_dirty, in bufobj_invalbuf()
2394 * have write I/O in-progress but if there is a VM object then the in bufobj_invalbuf()
2395 * VM object can also have read-I/O in-progress. in bufobj_invalbuf()
2399 if ((flags & V_VMIO) == 0 && bo->bo_object != NULL) { in bufobj_invalbuf()
2401 vm_object_pip_wait_unlocked(bo->bo_object, "bovlbx"); in bufobj_invalbuf()
2404 } while (bo->bo_numoutput > 0); in bufobj_invalbuf()
2410 if (bo->bo_object != NULL && in bufobj_invalbuf()
2412 VM_OBJECT_WLOCK(bo->bo_object); in bufobj_invalbuf()
2413 vm_object_page_remove(bo->bo_object, 0, 0, (flags & V_SAVE) ? in bufobj_invalbuf()
2415 VM_OBJECT_WUNLOCK(bo->bo_object); in bufobj_invalbuf()
2421 V_ALLOWCLEAN)) == 0 && (bo->bo_dirty.bv_cnt > 0 || in bufobj_invalbuf()
2422 bo->bo_clean.bv_cnt > 0)) in bufobj_invalbuf()
2425 bo->bo_dirty.bv_cnt > 0) in bufobj_invalbuf()
2442 if (vp->v_object != NULL && vp->v_object->handle != vp) in vinvalbuf()
2444 return (bufobj_invalbuf(&vp->v_bufobj, flags, slpflag, slptimeo)); in vinvalbuf()
2463 TAILQ_FOREACH_SAFE(bp, &bufv->bv_hd, b_bobufs, nbp) { in flushbuflist()
2472 (((flags & V_NORMAL) && (bp->b_xflags & BX_ALTDATA) != 0) || in flushbuflist()
2473 ((flags & V_ALT) && (bp->b_xflags & BX_ALTDATA) == 0))) { in flushbuflist()
2477 lblkno = nbp->b_lblkno; in flushbuflist()
2478 xflags = nbp->b_xflags & (BX_VNDIRTY | BX_VNCLEAN); in flushbuflist()
2488 KASSERT(bp->b_bufobj == bo, in flushbuflist()
2490 bp, bp->b_bufobj, bo)); in flushbuflist()
2497 if (((bp->b_flags & (B_DELWRI | B_INVAL)) == B_DELWRI) && in flushbuflist()
2500 bp->b_flags |= B_ASYNC; in flushbuflist()
2506 bp->b_flags |= (B_INVAL | B_RELBUF); in flushbuflist()
2507 bp->b_flags &= ~B_ASYNC; in flushbuflist()
2513 if (nbp == NULL || (nbp->b_xflags & (BX_VNDIRTY | BX_VNCLEAN)) in flushbuflist()
2532 if (bp == NULL || bp->b_lblkno >= endn) in bnoreuselist()
2542 KASSERT(bp->b_bufobj == bo, in bnoreuselist()
2544 bp, bp->b_bufobj, bo)); in bnoreuselist()
2545 lblkno = bp->b_lblkno + 1; in bnoreuselist()
2546 if ((bp->b_flags & B_MANAGED) == 0) in bnoreuselist()
2548 bp->b_flags |= B_RELBUF; in bnoreuselist()
2555 if ((bp->b_flags & B_VMIO) != 0) in bnoreuselist()
2556 bp->b_flags |= B_NOREUSE; in bnoreuselist()
2585 bo = &vp->v_bufobj; in vtruncbuf()
2597 TAILQ_FOREACH_SAFE(bp, &bo->bo_dirty.bv_hd, b_bobufs, nbp) { in vtruncbuf()
2598 if (bp->b_lblkno >= 0) in vtruncbuf()
2609 VNASSERT((bp->b_flags & B_DELWRI), vp, in vtruncbuf()
2642 bo = &vp->v_bufobj; in v_inval_buf_range()
2644 MPASS(blksize == bo->bo_bsize); in v_inval_buf_range()
2650 vn_pages_remove(vp, OFF_TO_IDX(start), OFF_TO_IDX(end + PAGE_SIZE - 1)); in v_inval_buf_range()
2668 bv = clean ? &bo->bo_clean : &bo->bo_dirty; in v_inval_buf_range_locked()
2672 TAILQ_FOREACH_FROM_SAFE(bp, &bv->bv_hd, b_bobufs, nbp) { in v_inval_buf_range_locked()
2673 if (bp->b_lblkno >= endlbn) in v_inval_buf_range_locked()
2683 bp->b_flags |= B_INVAL | B_RELBUF; in v_inval_buf_range_locked()
2684 bp->b_flags &= ~B_ASYNC; in v_inval_buf_range_locked()
2690 (((nbp->b_xflags & in v_inval_buf_range_locked()
2692 nbp->b_vp != vp || in v_inval_buf_range_locked()
2693 (nbp->b_flags & B_DELWRI) == (clean? B_DELWRI: 0))) in v_inval_buf_range_locked()
2696 } while (clean = !clean, anyfreed-- > 0); in v_inval_buf_range_locked()
2706 flags = bp->b_xflags; in buf_vlist_remove()
2708 KASSERT(bp->b_bufobj != NULL, ("No b_bufobj %p", bp)); in buf_vlist_remove()
2709 ASSERT_BO_WLOCKED(bp->b_bufobj); in buf_vlist_remove()
2715 bv = &bp->b_bufobj->bo_dirty; in buf_vlist_remove()
2717 bv = &bp->b_bufobj->bo_clean; in buf_vlist_remove()
2718 BUF_PCTRIE_REMOVE(&bv->bv_root, bp->b_lblkno); in buf_vlist_remove()
2719 TAILQ_REMOVE(&bv->bv_hd, bp, b_bobufs); in buf_vlist_remove()
2720 bv->bv_cnt--; in buf_vlist_remove()
2721 bp->b_xflags &= ~(BX_VNDIRTY | BX_VNCLEAN); in buf_vlist_remove()
2737 KASSERT((bo->bo_flag & BO_NOBUFS) == 0, in buf_vlist_find_or_add()
2739 KASSERT((xflags & BX_VNDIRTY) == 0 || (bo->bo_flag & BO_DEAD) == 0, in buf_vlist_find_or_add()
2741 KASSERT((bp->b_xflags & (BX_VNDIRTY | BX_VNCLEAN)) == xflags, in buf_vlist_find_or_add()
2745 bv = &bo->bo_dirty; in buf_vlist_find_or_add()
2747 bv = &bo->bo_clean; in buf_vlist_find_or_add()
2755 KASSERT(n->b_lblkno <= bp->b_lblkno, in buf_vlist_find_or_add()
2758 KASSERT((n->b_lblkno == bp->b_lblkno) == (error == EEXIST), in buf_vlist_find_or_add()
2767 KASSERT(TAILQ_EMPTY(&bv->bv_hd) || in buf_vlist_find_or_add()
2768 bp->b_lblkno < TAILQ_FIRST(&bv->bv_hd)->b_lblkno, in buf_vlist_find_or_add()
2771 bp, TAILQ_FIRST(&bv->bv_hd))); in buf_vlist_find_or_add()
2772 TAILQ_INSERT_HEAD(&bv->bv_hd, bp, b_bobufs); in buf_vlist_find_or_add()
2775 bp->b_lblkno < TAILQ_NEXT(n, b_bobufs)->b_lblkno, in buf_vlist_find_or_add()
2779 TAILQ_INSERT_AFTER(&bv->bv_hd, n, bp, b_bobufs); in buf_vlist_find_or_add()
2782 bv->bv_cnt++; in buf_vlist_find_or_add()
2796 KASSERT((bp->b_xflags & (BX_VNDIRTY | BX_VNCLEAN)) == 0, in buf_vlist_add()
2797 ("buf_vlist_add: Buf %p has existing xflags %d", bp, bp->b_xflags)); in buf_vlist_add()
2798 bp->b_xflags |= xflags; in buf_vlist_add()
2813 bp = BUF_PCTRIE_LOOKUP(&bo->bo_clean.bv_root, lblkno); in gbincore()
2816 return (BUF_PCTRIE_LOOKUP(&bo->bo_dirty.bv_root, lblkno)); in gbincore()
2821 * on SMR for safe lookup, and bufs being in a no-free zone to provide type
2831 bp = BUF_PCTRIE_LOOKUP_UNLOCKED(&bo->bo_clean.bv_root, lblkno); in gbincore_unlocked()
2834 return (BUF_PCTRIE_LOOKUP_UNLOCKED(&bo->bo_dirty.bv_root, lblkno)); in gbincore_unlocked()
2846 bo = &vp->v_bufobj; in bgetvp()
2848 VNASSERT(bp->b_vp == NULL, bp->b_vp, ("bgetvp: not free")); in bgetvp()
2850 CTR3(KTR_BUF, "bgetvp(%p) vp %p flags %X", bp, vp, bp->b_flags); in bgetvp()
2851 VNASSERT((bp->b_xflags & (BX_VNDIRTY|BX_VNCLEAN)) == 0, vp, in bgetvp()
2858 bp->b_vp = vp; in bgetvp()
2859 bp->b_bufobj = bo; in bgetvp()
2860 bp->b_xflags |= BX_VNCLEAN; in bgetvp()
2863 if (BUF_PCTRIE_LOOKUP(&bo->bo_dirty.bv_root, bp->b_lblkno) == NULL) in bgetvp()
2872 bp->b_vp = NULL; in bgetvp()
2873 bp->b_bufobj = NULL; in bgetvp()
2874 bp->b_xflags &= ~BX_VNCLEAN; in bgetvp()
2887 CTR3(KTR_BUF, "brelvp(%p) vp %p flags %X", bp, bp->b_vp, bp->b_flags); in brelvp()
2888 KASSERT(bp->b_vp != NULL, ("brelvp: NULL")); in brelvp()
2893 vp = bp->b_vp; /* XXX */ in brelvp()
2894 bo = bp->b_bufobj; in brelvp()
2897 if ((bo->bo_flag & BO_ONWORKLST) && bo->bo_dirty.bv_cnt == 0) { in brelvp()
2898 bo->bo_flag &= ~BO_ONWORKLST; in brelvp()
2901 syncer_worklist_len--; in brelvp()
2904 bp->b_vp = NULL; in brelvp()
2905 bp->b_bufobj = NULL; in brelvp()
2921 if (bo->bo_flag & BO_ONWORKLST) in vn_syncer_add_to_worklist()
2924 bo->bo_flag |= BO_ONWORKLST; in vn_syncer_add_to_worklist()
2928 if (delay > syncer_maxdelay - 2) in vn_syncer_add_to_worklist()
2929 delay = syncer_maxdelay - 2; in vn_syncer_add_to_worklist()
2942 len = syncer_worklist_len - sync_vnode_count; in sysctl_vfs_worklist_len()
2987 MPASSERT(mp == NULL || (curthread->td_pflags & TDP_IGNSUSP) != 0 || in sync_vnode()
2988 (mp->mnt_kern_flag & MNTK_SUSPENDED) == 0, mp, in sync_vnode()
2995 if (((*bo)->bo_flag & BO_ONWORKLST) != 0) { in sync_vnode()
3031 td->td_pflags |= TDP_NORUNNINGBUF; in sched_sync()
3033 EVENTHANDLER_REGISTER(shutdown_pre_sync, syncer_shutdown, td->td_proc, in sched_sync()
3041 kproc_suspend_check(td->td_proc); in sched_sync()
3044 net_worklist_len = syncer_worklist_len - sync_vnode_count; in sched_sync()
3111 syncer_final_iter--; in sched_sync()
3123 rushjob -= 1; in sched_sync()
3223 vp = bp->b_vp; in reassignbuf()
3224 bo = bp->b_bufobj; in reassignbuf()
3226 KASSERT((bp->b_flags & B_PAGING) == 0, in reassignbuf()
3230 bp, bp->b_vp, bp->b_flags); in reassignbuf()
3233 if ((bo->bo_flag & BO_NONSTERILE) == 0) { in reassignbuf()
3240 bo->bo_flag |= BO_NONSTERILE; in reassignbuf()
3249 if (bp->b_flags & B_DELWRI) { in reassignbuf()
3250 if ((bo->bo_flag & BO_ONWORKLST) == 0) { in reassignbuf()
3251 switch (vp->v_type) { in reassignbuf()
3267 if ((bo->bo_flag & BO_ONWORKLST) && bo->bo_dirty.bv_cnt == 0) { in reassignbuf()
3270 syncer_worklist_len--; in reassignbuf()
3272 bo->bo_flag &= ~BO_ONWORKLST; in reassignbuf()
3276 bv = &bo->bo_clean; in reassignbuf()
3277 bp = TAILQ_FIRST(&bv->bv_hd); in reassignbuf()
3278 KASSERT(bp == NULL || bp->b_bufobj == bo, in reassignbuf()
3279 ("bp %p wrong b_bufobj %p should be %p", bp, bp->b_bufobj, bo)); in reassignbuf()
3280 bp = TAILQ_LAST(&bv->bv_hd, buflists); in reassignbuf()
3281 KASSERT(bp == NULL || bp->b_bufobj == bo, in reassignbuf()
3282 ("bp %p wrong b_bufobj %p should be %p", bp, bp->b_bufobj, bo)); in reassignbuf()
3283 bv = &bo->bo_dirty; in reassignbuf()
3284 bp = TAILQ_FIRST(&bv->bv_hd); in reassignbuf()
3285 KASSERT(bp == NULL || bp->b_bufobj == bo, in reassignbuf()
3286 ("bp %p wrong b_bufobj %p should be %p", bp, bp->b_bufobj, bo)); in reassignbuf()
3287 bp = TAILQ_LAST(&bv->bv_hd, buflists); in reassignbuf()
3288 KASSERT(bp == NULL || bp->b_bufobj == bo, in reassignbuf()
3289 ("bp %p wrong b_bufobj %p should be %p", bp, bp->b_bufobj, bo)); in reassignbuf()
3298 VNASSERT(vp->v_type == VNON && vp->v_data == NULL && vp->v_iflag == 0, in v_init_counters()
3302 refcount_init(&vp->v_holdcnt, 1); in v_init_counters()
3303 refcount_init(&vp->v_usecount, 1); in v_init_counters()
3323 if (refcount_acquire_if_not_zero(&vp->v_usecount)) { in vget_prep_smr()
3339 if (refcount_acquire_if_not_zero(&vp->v_usecount)) { in vget_prep()
3392 VNPASS(vp->v_holdcnt > 0, vp); in vget_finish()
3393 VNPASS(vs == VGET_HOLDCNT || vp->v_usecount > 0, vp); in vget_finish()
3413 VNPASS(vp->v_holdcnt > 0, vp); in vget_finish_ref()
3414 VNPASS(vs == VGET_HOLDCNT || vp->v_usecount > 0, vp); in vget_finish_ref()
3424 old = atomic_fetchadd_int(&vp->v_usecount, 1); in vget_finish_ref()
3428 old = atomic_fetchadd_int(&vp->v_holdcnt, -1); in vget_finish_ref()
3431 refcount_release(&vp->v_holdcnt); in vget_finish_ref()
3452 old = refcount_acquire(&vp->v_usecount); in vrefact()
3461 VNASSERT(vp->v_holdcnt > 0, vp, ("%s: vnode not held", __func__)); in vlazy()
3463 if ((vp->v_mflag & VMP_LAZYLIST) != 0) in vlazy()
3470 mp = vp->v_mount; in vlazy()
3471 mtx_lock(&mp->mnt_listmtx); in vlazy()
3472 if ((vp->v_mflag & VMP_LAZYLIST) == 0) { in vlazy()
3473 vp->v_mflag |= VMP_LAZYLIST; in vlazy()
3474 TAILQ_INSERT_TAIL(&mp->mnt_lazyvnodelist, vp, v_lazylist); in vlazy()
3475 mp->mnt_lazyvnodelistsize++; in vlazy()
3477 mtx_unlock(&mp->mnt_listmtx); in vlazy()
3488 mp = vp->v_mount; in vunlazy()
3489 mtx_lock(&mp->mnt_listmtx); in vunlazy()
3490 VNPASS(vp->v_mflag & VMP_LAZYLIST, vp); in vunlazy()
3493 * has increased the hold count. It may have re-enqueued the in vunlazy()
3497 if (vp->v_holdcnt == 0) { in vunlazy()
3498 vp->v_mflag &= ~VMP_LAZYLIST; in vunlazy()
3499 TAILQ_REMOVE(&mp->mnt_lazyvnodelist, vp, v_lazylist); in vunlazy()
3500 mp->mnt_lazyvnodelistsize--; in vunlazy()
3502 mtx_unlock(&mp->mnt_listmtx); in vunlazy()
3518 if (vp->v_mflag & VMP_LAZYLIST) { in vunlazy_gone()
3519 mp = vp->v_mount; in vunlazy_gone()
3520 mtx_lock(&mp->mnt_listmtx); in vunlazy_gone()
3521 VNPASS(vp->v_mflag & VMP_LAZYLIST, vp); in vunlazy_gone()
3522 vp->v_mflag &= ~VMP_LAZYLIST; in vunlazy_gone()
3523 TAILQ_REMOVE(&mp->mnt_lazyvnodelist, vp, v_lazylist); in vunlazy_gone()
3524 mp->mnt_lazyvnodelistsize--; in vunlazy_gone()
3525 mtx_unlock(&mp->mnt_listmtx); in vunlazy_gone()
3534 VNPASS(vp->v_holdcnt > 0, vp); in vdefer_inactive()
3539 if (vp->v_iflag & VI_DEFINACT) { in vdefer_inactive()
3540 VNPASS(vp->v_holdcnt > 1, vp); in vdefer_inactive()
3544 if (vp->v_usecount > 0) { in vdefer_inactive()
3545 vp->v_iflag &= ~VI_OWEINACT; in vdefer_inactive()
3550 vp->v_iflag |= VI_DEFINACT; in vdefer_inactive()
3560 if ((vp->v_iflag & VI_OWEINACT) == 0) { in vdefer_inactive_unlocked()
3570 * Handle ->v_usecount transitioning to 0.
3587 VNPASS(vp->v_holdcnt > 0, vp); in vput_final()
3595 if (vp->v_usecount > 0) in vput_final()
3608 if (vp->v_iflag & VI_DOINGINACT) in vput_final()
3614 * while ->v_usecount is 0. Set VI_OWEINACT to let vgone know to in vput_final()
3617 vp->v_iflag |= VI_OWEINACT; in vput_final()
3661 VNASSERT((vp->v_vflag & VV_UNREF) == 0, vp, in vput_final()
3663 vp->v_vflag |= VV_UNREF; in vput_final()
3674 vp->v_vflag &= ~VV_UNREF; in vput_final()
3684 * Decrement ->v_usecount for a vnode.
3701 if (!refcount_release(&vp->v_usecount)) in vrele()
3716 if (refcount_release_if_last(&vp->v_usecount)) { in vput()
3734 if (!refcount_release(&vp->v_usecount)) in vunref()
3745 old = atomic_fetchadd_int(&vp->v_holdcnt, 1); in vhold()
3758 int old = atomic_fetchadd_int(&vp->v_holdcnt, 1); in vholdnz()
3762 atomic_add_int(&vp->v_holdcnt, 1); in vholdnz()
3777 * count = atomic_fetchadd_int(&vp->v_holdcnt, 1);
3792 count = atomic_load_int(&vp->v_holdcnt); in vhold_smr()
3796 ("non-zero hold count with flags %d\n", count)); in vhold_smr()
3800 if (atomic_fcmpset_int(&vp->v_holdcnt, &count, count + 1)) { in vhold_smr()
3822 * Note: the vnode may gain more references after we transition the count 0->1.
3831 count = atomic_load_int(&vp->v_holdcnt); in vhold_recycle_free()
3835 ("non-zero hold count with flags %d\n", count)); in vhold_recycle_free()
3842 if (atomic_fcmpset_int(&vp->v_holdcnt, &count, count + 1)) { in vhold_recycle_free()
3855 mtx_assert(&vd->lock, MA_OWNED); in vdbatch_process()
3856 MPASS(curthread->td_pinned > 0); in vdbatch_process()
3857 MPASS(vd->index == VDBATCH_SIZE); in vdbatch_process()
3864 * if multiple CPUs get here (one real-world example is highly parallel in vdbatch_process()
3865 * do-nothing make , which will stat *tons* of vnodes). Since it is in vdbatch_process()
3866 * quasi-LRU (read: not that great even if fully honoured) provide an in vdbatch_process()
3875 vp = vd->tab[i]; in vdbatch_process()
3876 vd->tab[i] = NULL; in vdbatch_process()
3877 MPASS(vp->v_dbatchcpu != NOCPU); in vdbatch_process()
3878 vp->v_dbatchcpu = NOCPU; in vdbatch_process()
3880 vd->index = 0; in vdbatch_process()
3893 vp = vd->tab[i]; in vdbatch_process()
3894 vd->tab[i] = NULL; in vdbatch_process()
3897 MPASS(vp->v_dbatchcpu != NOCPU); in vdbatch_process()
3898 vp->v_dbatchcpu = NOCPU; in vdbatch_process()
3901 vd->index = 0; in vdbatch_process()
3913 if (vp->v_dbatchcpu != NOCPU) { in vdbatch_enqueue()
3920 mtx_lock(&vd->lock); in vdbatch_enqueue()
3921 MPASS(vd->index < VDBATCH_SIZE); in vdbatch_enqueue()
3922 MPASS(vd->tab[vd->index] == NULL); in vdbatch_enqueue()
3925 * ->v_dbatchcpu. in vdbatch_enqueue()
3927 vp->v_dbatchcpu = curcpu; in vdbatch_enqueue()
3928 vd->tab[vd->index] = vp; in vdbatch_enqueue()
3929 vd->index++; in vdbatch_enqueue()
3931 if (vd->index == VDBATCH_SIZE) in vdbatch_enqueue()
3933 mtx_unlock(&vd->lock); in vdbatch_enqueue()
3949 VNPASS(vp->v_type == VBAD || vp->v_type == VNON, vp); in vdbatch_dequeue()
3951 cpu = vp->v_dbatchcpu; in vdbatch_dequeue()
3956 mtx_lock(&vd->lock); in vdbatch_dequeue()
3957 for (i = 0; i < vd->index; i++) { in vdbatch_dequeue()
3958 if (vd->tab[i] != vp) in vdbatch_dequeue()
3960 vp->v_dbatchcpu = NOCPU; in vdbatch_dequeue()
3961 vd->index--; in vdbatch_dequeue()
3962 vd->tab[i] = vd->tab[vd->index]; in vdbatch_dequeue()
3963 vd->tab[vd->index] = NULL; in vdbatch_dequeue()
3966 mtx_unlock(&vd->lock); in vdbatch_dequeue()
3970 MPASS(vp->v_dbatchcpu == NOCPU); in vdbatch_dequeue()
3979 * there is at least one resident non-cached page, the vnode cannot
3994 if (__predict_false(!atomic_cmpset_int(&vp->v_holdcnt, 0, VHOLD_NO_SMR))) { in vdropl_final()
4015 if (refcount_release_if_not_last(&vp->v_holdcnt)) in vdrop()
4027 if (!refcount_release(&vp->v_holdcnt)) { in vdropl_impl()
4031 VNPASS((vp->v_iflag & VI_OWEINACT) == 0, vp); in vdropl_impl()
4032 VNPASS((vp->v_iflag & VI_DEFINACT) == 0, vp); in vdropl_impl()
4039 if (vp->v_mflag & VMP_LAZYLIST) { in vdropl_impl()
4100 VNPASS((vp->v_iflag & VI_DOINGINACT) == 0, vp); in vinactivef()
4102 vp->v_iflag |= VI_DOINGINACT; in vinactivef()
4103 vp->v_iflag &= ~VI_OWEINACT; in vinactivef()
4112 * The write-out of the dirty pages is asynchronous. At the in vinactivef()
4116 if ((vp->v_vflag & VV_NOSYNC) == 0) in vinactivef()
4121 VNPASS(vp->v_iflag & VI_DOINGINACT, vp); in vinactivef()
4122 vp->v_iflag &= ~VI_DOINGINACT; in vinactivef()
4134 if ((vp->v_iflag & VI_OWEINACT) == 0) in vinactive()
4136 if (vp->v_iflag & VI_DOINGINACT) in vinactive()
4138 if (vp->v_usecount > 0) { in vinactive()
4139 vp->v_iflag &= ~VI_OWEINACT; in vinactive()
4205 if ((flags & SKIPSYSTEM) && (vp->v_vflag & VV_SYSTEM)) { in vflush()
4226 error = VOP_GETATTR(vp, &vattr, td->td_ucred); in vflush()
4229 if ((vp->v_type == VNON || in vflush()
4231 (vp->v_writecount <= 0 || vp->v_type != VREG)) { in vflush()
4244 if (vp->v_usecount == 0 || (flags & FORCECLOSE)) { in vflush()
4263 VNASSERT(rootvp->v_usecount >= rootrefs, rootvp, in vflush()
4265 rootvp->v_usecount, rootrefs)); in vflush()
4266 if (busy == 1 && rootvp->v_usecount == rootrefs) { in vflush()
4279 for (; rootrefs > 0; rootrefs--) in vflush()
4310 if (vp->v_usecount == 0) { in vrecyclel()
4338 mp = atomic_load_ptr(&vp->v_mount); in vfs_notify_upper()
4341 if (TAILQ_EMPTY(&mp->mnt_notify)) in vfs_notify_upper()
4345 mp->mnt_upper_pending++; in vfs_notify_upper()
4346 KASSERT(mp->mnt_upper_pending > 0, in vfs_notify_upper()
4347 ("%s: mnt_upper_pending %d", __func__, mp->mnt_upper_pending)); in vfs_notify_upper()
4348 TAILQ_FOREACH(ump, &mp->mnt_notify, mnt_upper_link) { in vfs_notify_upper()
4352 VFS_RECLAIM_LOWERVP(ump->mp, vp); in vfs_notify_upper()
4355 VFS_UNLINK_LOWERVP(ump->mp, vp); in vfs_notify_upper()
4360 mp->mnt_upper_pending--; in vfs_notify_upper()
4361 if ((mp->mnt_kern_flag & MNTK_UPPER_WAITER) != 0 && in vfs_notify_upper()
4362 mp->mnt_upper_pending == 0) { in vfs_notify_upper()
4363 mp->mnt_kern_flag &= ~MNTK_UPPER_WAITER; in vfs_notify_upper()
4364 wakeup(&mp->mnt_uppers); in vfs_notify_upper()
4382 VNASSERT(vp->v_holdcnt, vp, in vgonel()
4411 active = vp->v_usecount > 0; in vgonel()
4412 oweinact = (vp->v_iflag & VI_OWEINACT) != 0; in vgonel()
4413 doinginact = (vp->v_iflag & VI_DOINGINACT) != 0; in vgonel()
4418 if (vp->v_iflag & VI_DEFINACT) { in vgonel()
4419 VNASSERT(vp->v_holdcnt > 1, vp, ("lost hold count")); in vgonel()
4420 vp->v_iflag &= ~VI_DEFINACT; in vgonel()
4423 VNASSERT(vp->v_holdcnt > 0, vp, ("vnode without hold count")); in vgonel()
4440 oweinact = (vp->v_iflag & VI_OWEINACT) != 0; in vgonel()
4445 if (vp->v_type == VSOCK) in vgonel()
4453 if (!TAILQ_EMPTY(&vp->v_bufobj.bo_dirty.bv_hd)) in vgonel()
4460 BO_LOCK(&vp->v_bufobj); in vgonel()
4461 KASSERT(TAILQ_EMPTY(&vp->v_bufobj.bo_dirty.bv_hd) && in vgonel()
4462 vp->v_bufobj.bo_dirty.bv_cnt == 0 && in vgonel()
4463 TAILQ_EMPTY(&vp->v_bufobj.bo_clean.bv_hd) && in vgonel()
4464 vp->v_bufobj.bo_clean.bv_cnt == 0, in vgonel()
4472 object = vp->v_bufobj.bo_object; in vgonel()
4474 vp->v_bufobj.bo_flag |= BO_DEAD; in vgonel()
4475 BO_UNLOCK(&vp->v_bufobj); in vgonel()
4483 if (object != NULL && object->type == OBJT_VNODE && in vgonel()
4484 object->handle == vp) in vgonel()
4494 VNASSERT(vp->v_object == NULL, vp, in vgonel()
4499 if (vp->v_lockf != NULL) { in vgonel()
4501 vp->v_lockf = NULL; in vgonel()
4506 if (vp->v_mount == NULL) { in vgonel()
4519 * which got lost in the process of SMP-ifying the VFS layer. in vgonel()
4521 * Suppose a custom locking routine references ->v_data. in vgonel()
4524 * progressing, this very well may crash as ->v_data gets invalidated in vgonel()
4527 vp->v_vnlock = &vp->v_lock; in vgonel()
4528 vp->v_op = &dead_vnodeops; in vgonel()
4529 vp->v_type = VBAD; in vgonel()
4576 printf("type %s state %s op %p\n", vtypename[vp->v_type], in vn_printf()
4577 vstatename[vp->v_state], vp->v_op); in vn_printf()
4578 holdcnt = atomic_load_int(&vp->v_holdcnt); in vn_printf()
4580 vp->v_usecount, vp->v_writecount, holdcnt & ~VHOLD_ALL_FLAGS, in vn_printf()
4581 vp->v_seqc_users); in vn_printf()
4582 switch (vp->v_type) { in vn_printf()
4584 printf(" mountedhere %p\n", vp->v_mountedhere); in vn_printf()
4587 printf(" rdev %p\n", vp->v_rdev); in vn_printf()
4590 printf(" socket %p\n", vp->v_unpcb); in vn_printf()
4593 printf(" fifoinfo %p\n", vp->v_fifoinfo); in vn_printf()
4621 if (vp->v_vflag & VV_ROOT) in vn_printf()
4623 if (vp->v_vflag & VV_ISTTY) in vn_printf()
4625 if (vp->v_vflag & VV_NOSYNC) in vn_printf()
4627 if (vp->v_vflag & VV_ETERNALDEV) in vn_printf()
4629 if (vp->v_vflag & VV_CACHEDLABEL) in vn_printf()
4631 if (vp->v_vflag & VV_VMSIZEVNLOCK) in vn_printf()
4633 if (vp->v_vflag & VV_COPYONWRITE) in vn_printf()
4635 if (vp->v_vflag & VV_SYSTEM) in vn_printf()
4637 if (vp->v_vflag & VV_PROCDEP) in vn_printf()
4639 if (vp->v_vflag & VV_DELETED) in vn_printf()
4641 if (vp->v_vflag & VV_MD) in vn_printf()
4643 if (vp->v_vflag & VV_FORCEINSMQ) in vn_printf()
4645 if (vp->v_vflag & VV_READLINK) in vn_printf()
4647 flags = vp->v_vflag & ~(VV_ROOT | VV_ISTTY | VV_NOSYNC | VV_ETERNALDEV | in vn_printf()
4654 if (vp->v_iflag & VI_MOUNT) in vn_printf()
4656 if (vp->v_iflag & VI_DOINGINACT) in vn_printf()
4658 if (vp->v_iflag & VI_OWEINACT) in vn_printf()
4660 if (vp->v_iflag & VI_DEFINACT) in vn_printf()
4662 if (vp->v_iflag & VI_FOPENING) in vn_printf()
4664 flags = vp->v_iflag & ~(VI_MOUNT | VI_DOINGINACT | in vn_printf()
4670 if (vp->v_mflag & VMP_LAZYLIST) in vn_printf()
4672 flags = vp->v_mflag & ~(VMP_LAZYLIST); in vn_printf()
4681 if (vp->v_object != NULL) in vn_printf()
4684 vp->v_object, vp->v_object->ref_count, in vn_printf()
4685 vp->v_object->resident_page_count, in vn_printf()
4686 vp->v_bufobj.bo_clean.bv_cnt, in vn_printf()
4687 vp->v_bufobj.bo_dirty.bv_cnt); in vn_printf()
4689 lockmgr_printinfo(vp->v_vnlock); in vn_printf()
4690 if (vp->v_data != NULL) in vn_printf()
4712 TAILQ_FOREACH(vp, &mp->mnt_nvnodelist, v_nmntvnodes) { in DB_SHOW_COMMAND_FLAGS()
4713 if (vp->v_type != VMARKER && VOP_ISLOCKED(vp)) in DB_SHOW_COMMAND_FLAGS()
4749 mp->mnt_stat.f_mntfromname, in DB_SHOW_COMMAND()
4750 mp->mnt_stat.f_mntonname, in DB_SHOW_COMMAND()
4751 mp->mnt_stat.f_fstypename); in DB_SHOW_COMMAND()
4760 db_printf("%p %s on %s (%s)\n", mp, mp->mnt_stat.f_mntfromname, in DB_SHOW_COMMAND()
4761 mp->mnt_stat.f_mntonname, mp->mnt_stat.f_fstypename); in DB_SHOW_COMMAND()
4764 mflags = mp->mnt_flag; in DB_SHOW_COMMAND()
4812 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), in DB_SHOW_COMMAND()
4818 flags = mp->mnt_kern_flag; in DB_SHOW_COMMAND()
4855 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), in DB_SHOW_COMMAND()
4861 opt = TAILQ_FIRST(mp->mnt_opt); in DB_SHOW_COMMAND()
4863 db_printf("%s", opt->name); in DB_SHOW_COMMAND()
4866 db_printf(", %s", opt->name); in DB_SHOW_COMMAND()
4872 sp = &mp->mnt_stat; in DB_SHOW_COMMAND()
4877 (u_int)sp->f_version, (u_int)sp->f_type, (uintmax_t)sp->f_flags, in DB_SHOW_COMMAND()
4878 (uintmax_t)sp->f_bsize, (uintmax_t)sp->f_iosize, in DB_SHOW_COMMAND()
4879 (uintmax_t)sp->f_blocks, (uintmax_t)sp->f_bfree, in DB_SHOW_COMMAND()
4880 (intmax_t)sp->f_bavail, (uintmax_t)sp->f_files, in DB_SHOW_COMMAND()
4881 (intmax_t)sp->f_ffree, (uintmax_t)sp->f_syncwrites, in DB_SHOW_COMMAND()
4882 (uintmax_t)sp->f_asyncwrites, (uintmax_t)sp->f_syncreads, in DB_SHOW_COMMAND()
4883 (uintmax_t)sp->f_asyncreads, (u_int)sp->f_namemax, in DB_SHOW_COMMAND()
4884 (u_int)sp->f_owner, (int)sp->f_fsid.val[0], (int)sp->f_fsid.val[1]); in DB_SHOW_COMMAND()
4887 (u_int)mp->mnt_cred->cr_uid, (u_int)mp->mnt_cred->cr_ruid); in DB_SHOW_COMMAND()
4888 if (jailed(mp->mnt_cred)) in DB_SHOW_COMMAND()
4889 db_printf(", jail=%d", mp->mnt_cred->cr_prison->pr_id); in DB_SHOW_COMMAND()
4892 vfs_mount_fetch_counter(mp, MNT_COUNT_REF), mp->mnt_ref); in DB_SHOW_COMMAND()
4893 db_printf(" mnt_gen = %d\n", mp->mnt_gen); in DB_SHOW_COMMAND()
4894 db_printf(" mnt_nvnodelistsize = %d\n", mp->mnt_nvnodelistsize); in DB_SHOW_COMMAND()
4896 mp->mnt_lazyvnodelistsize); in DB_SHOW_COMMAND()
4898 vfs_mount_fetch_counter(mp, MNT_COUNT_WRITEOPCOUNT), mp->mnt_writeopcount); in DB_SHOW_COMMAND()
4899 db_printf(" mnt_iosize_max = %d\n", mp->mnt_iosize_max); in DB_SHOW_COMMAND()
4900 db_printf(" mnt_hashseed = %u\n", mp->mnt_hashseed); in DB_SHOW_COMMAND()
4902 vfs_mount_fetch_counter(mp, MNT_COUNT_LOCKREF), mp->mnt_lockref); in DB_SHOW_COMMAND()
4903 db_printf(" mnt_secondary_writes = %d\n", mp->mnt_secondary_writes); in DB_SHOW_COMMAND()
4905 mp->mnt_secondary_accwrites); in DB_SHOW_COMMAND()
4907 mp->mnt_gjprovider != NULL ? mp->mnt_gjprovider : "NULL"); in DB_SHOW_COMMAND()
4908 db_printf(" mnt_vfs_ops = %d\n", mp->mnt_vfs_ops); in DB_SHOW_COMMAND()
4911 TAILQ_FOREACH(vp, &mp->mnt_nvnodelist, v_nmntvnodes) { in DB_SHOW_COMMAND()
4912 if (vp->v_type != VMARKER && vp->v_holdcnt > 0) { in DB_SHOW_COMMAND()
4919 TAILQ_FOREACH(vp, &mp->mnt_nvnodelist, v_nmntvnodes) { in DB_SHOW_COMMAND()
4920 if (vp->v_type != VMARKER && vp->v_holdcnt == 0) { in DB_SHOW_COMMAND()
4938 strcpy(xvfsp.vfc_name, vfsp->vfc_name); in vfsconf2x()
4939 xvfsp.vfc_typenum = vfsp->vfc_typenum; in vfsconf2x()
4940 xvfsp.vfc_refcount = vfsp->vfc_refcount; in vfsconf2x()
4941 xvfsp.vfc_flags = vfsp->vfc_flags; in vfsconf2x()
4967 strcpy(xvfsp.vfc_name, vfsp->vfc_name); in vfsconf2x32()
4968 xvfsp.vfc_typenum = vfsp->vfc_typenum; in vfsconf2x32()
4969 xvfsp.vfc_refcount = vfsp->vfc_refcount; in vfsconf2x32()
4970 xvfsp.vfc_flags = vfsp->vfc_flags; in vfsconf2x32()
4988 if (req->flags & SCTL_MASK32) in sysctl_vfs_conflist()
5010 int *name = (int *)arg1 - 1; /* XXX */ in vfs_sysctl()
5033 if (vfsp->vfc_typenum == name[2]) in vfs_sysctl()
5040 if (req->flags & SCTL_MASK32) in vfs_sysctl()
5065 ovfs.vfc_vfsops = vfsp->vfc_vfsops; /* XXX used as flag */ in sysctl_ovfs_conf()
5066 strcpy(ovfs.vfc_name, vfsp->vfc_name); in sysctl_ovfs_conf()
5067 ovfs.vfc_index = vfsp->vfc_typenum; in sysctl_ovfs_conf()
5068 ovfs.vfc_refcount = vfsp->vfc_refcount; in sysctl_ovfs_conf()
5069 ovfs.vfc_flags = vfsp->vfc_flags; in sysctl_ovfs_conf()
5090 printf("unmount of %s failed (", mp->mnt_stat.f_mntonname); in unmount_or_warn()
5134 VNPASS((vp->v_iflag & VI_DEFINACT) == 0, vp); in vfs_deferred_inactive()
5135 if ((vp->v_iflag & VI_OWEINACT) == 0) { in vfs_deferred_inactive()
5153 return (vp->v_iflag & VI_DEFINACT); in vfs_periodic_inactive_filter()
5167 if ((vp->v_iflag & VI_DEFINACT) == 0) { in vfs_periodic_inactive()
5171 vp->v_iflag &= ~VI_DEFINACT; in vfs_periodic_inactive()
5185 if (vp->v_vflag & VV_NOSYNC) in vfs_want_msync()
5187 obj = vp->v_object; in vfs_want_msync()
5195 if (vp->v_vflag & VV_NOSYNC) in vfs_periodic_msync_inactive_filter()
5197 if (vp->v_iflag & VI_DEFINACT) in vfs_periodic_msync_inactive_filter()
5215 if (vp->v_iflag & VI_DEFINACT) { in vfs_periodic_msync_inactive()
5216 vp->v_iflag &= ~VI_DEFINACT; in vfs_periodic_msync_inactive()
5227 if ((vp->v_vflag & VV_NOSYNC) == 0) { in vfs_periodic_msync_inactive()
5249 if ((mp->mnt_kern_flag & MNTK_NOMSYNC) != 0) in vfs_periodic()
5259 knlist_destroy(&vi->vpi_selinfo.si_note); in destroy_vpollinfo_free()
5260 mtx_destroy(&vi->vpi_lock); in destroy_vpollinfo_free()
5267 KASSERT(TAILQ_EMPTY(&vi->vpi_inotify), in destroy_vpollinfo()
5269 knlist_clear(&vi->vpi_selinfo.si_note, 1); in destroy_vpollinfo()
5270 seldrain(&vi->vpi_selinfo); in destroy_vpollinfo()
5275 * Initialize per-vnode helper structure to hold poll-related state.
5282 if (atomic_load_ptr(&vp->v_pollinfo) != NULL) in v_addpollinfo()
5285 mtx_init(&vi->vpi_lock, "vnode pollinfo", NULL, MTX_DEF); in v_addpollinfo()
5286 knlist_init(&vi->vpi_selinfo.si_note, vp, vfs_knllock, in v_addpollinfo()
5288 TAILQ_INIT(&vi->vpi_inotify); in v_addpollinfo()
5290 if (vp->v_pollinfo != NULL) { in v_addpollinfo()
5295 vp->v_pollinfo = vi; in v_addpollinfo()
5301 * a vnode. Because poll uses the historic select-style interface
5312 mtx_lock(&vp->v_pollinfo->vpi_lock); in vn_pollrecord()
5313 if (vp->v_pollinfo->vpi_revents & events) { in vn_pollrecord()
5321 events &= vp->v_pollinfo->vpi_revents; in vn_pollrecord()
5322 vp->v_pollinfo->vpi_revents &= ~events; in vn_pollrecord()
5324 mtx_unlock(&vp->v_pollinfo->vpi_lock); in vn_pollrecord()
5327 vp->v_pollinfo->vpi_events |= events; in vn_pollrecord()
5328 selrecord(td, &vp->v_pollinfo->vpi_selinfo); in vn_pollrecord()
5329 mtx_unlock(&vp->v_pollinfo->vpi_lock); in vn_pollrecord()
5372 vp->v_type = VNON; in vfs_allocate_syncvnode()
5374 vp->v_vflag |= VV_FORCEINSMQ; in vfs_allocate_syncvnode()
5378 vp->v_vflag &= ~VV_FORCEINSMQ; in vfs_allocate_syncvnode()
5397 bo = &vp->v_bufobj; in vfs_allocate_syncvnode()
5400 /* XXX - vn_syncer_add_to_worklist() also grabs and drops sync_mtx. */ in vfs_allocate_syncvnode()
5403 if (mp->mnt_syncer == NULL) { in vfs_allocate_syncvnode()
5404 mp->mnt_syncer = vp; in vfs_allocate_syncvnode()
5422 vp = mp->mnt_syncer; in vfs_deallocate_syncvnode()
5424 mp->mnt_syncer = NULL; in vfs_deallocate_syncvnode()
5436 struct vnode *syncvp = ap->a_vp; in sync_fsync()
5437 struct mount *mp = syncvp->v_mount; in sync_fsync()
5444 if (ap->a_waitfor != MNT_LAZY) in sync_fsync()
5450 bo = &syncvp->v_bufobj; in sync_fsync()
5482 vgone(ap->a_vp); in sync_inactive()
5494 struct vnode *vp = ap->a_vp; in sync_reclaim()
5497 bo = &vp->v_bufobj; in sync_reclaim()
5500 if (vp->v_mount->mnt_syncer == vp) in sync_reclaim()
5501 vp->v_mount->mnt_syncer = NULL; in sync_reclaim()
5502 if (bo->bo_flag & BO_ONWORKLST) { in sync_reclaim()
5504 syncer_worklist_len--; in sync_reclaim()
5505 sync_vnode_count--; in sync_reclaim()
5506 bo->bo_flag &= ~BO_ONWORKLST; in sync_reclaim()
5519 obj = vp->v_object; in vn_need_pageq_flush()
5520 return (obj != NULL && (vp->v_vflag & VV_NOSYNC) == 0 && in vn_need_pageq_flush()
5532 if (vp->v_type != VCHR) { in vn_isdisk_error()
5538 if (vp->v_rdev == NULL) in vn_isdisk_error()
5540 else if (vp->v_rdev->si_devsw == NULL) in vn_isdisk_error()
5542 else if (!(vp->v_rdev->si_devsw->d_flags & D_DISK)) in vn_isdisk_error()
5570 if (cred->cr_uid == file_uid) { in vaccess_vexec_smr()
5632 * Look for a normal, non-privileged way to access the file/directory in vaccess()
5639 if (cred->cr_uid == file_uid) { in vaccess()
5728 * Credential check based on process requesting service, and per-attribute
5737 * Kernel-invoked always succeeds. in extattr_check_cred()
5781 locked = !((vp->v_irflag & VIRF_CROSSMP) == 0 && in assert_vop_locked()
5782 witness_is_owned(&vp->v_vnlock->lock_object) == -1); in assert_vop_locked()
5799 locked = (vp->v_irflag & VIRF_CROSSMP) == 0 && in assert_vop_unlocked()
5800 witness_is_owned(&vp->v_vnlock->lock_object) == 1; in assert_vop_unlocked()
5825 if (ap->a_tvp != NULL) in vop_rename_fail()
5826 vput(ap->a_tvp); in vop_rename_fail()
5827 if (ap->a_tdvp == ap->a_tvp) in vop_rename_fail()
5828 vrele(ap->a_tdvp); in vop_rename_fail()
5830 vput(ap->a_tdvp); in vop_rename_fail()
5831 vrele(ap->a_fdvp); in vop_rename_fail()
5832 vrele(ap->a_fvp); in vop_rename_fail()
5843 if (a->a_tvp) in vop_rename_pre()
5844 ASSERT_VI_UNLOCKED(a->a_tvp, "VOP_RENAME"); in vop_rename_pre()
5845 ASSERT_VI_UNLOCKED(a->a_tdvp, "VOP_RENAME"); in vop_rename_pre()
5846 ASSERT_VI_UNLOCKED(a->a_fvp, "VOP_RENAME"); in vop_rename_pre()
5847 ASSERT_VI_UNLOCKED(a->a_fdvp, "VOP_RENAME"); in vop_rename_pre()
5850 if (a->a_tdvp->v_vnlock != a->a_fdvp->v_vnlock && in vop_rename_pre()
5851 (a->a_tvp == NULL || a->a_tvp->v_vnlock != a->a_fdvp->v_vnlock)) in vop_rename_pre()
5852 ASSERT_VOP_UNLOCKED(a->a_fdvp, "vop_rename: fdvp locked"); in vop_rename_pre()
5853 if (a->a_tvp == NULL || a->a_tvp->v_vnlock != a->a_fvp->v_vnlock) in vop_rename_pre()
5854 ASSERT_VOP_UNLOCKED(a->a_fvp, "vop_rename: fvp locked"); in vop_rename_pre()
5857 if (a->a_tvp) in vop_rename_pre()
5858 ASSERT_VOP_LOCKED(a->a_tvp, "vop_rename: tvp not locked"); in vop_rename_pre()
5859 ASSERT_VOP_LOCKED(a->a_tdvp, "vop_rename: tdvp not locked"); in vop_rename_pre()
5862 VOP_GETWRITEMOUNT(a->a_tdvp, &tmp); in vop_rename_pre()
5863 lockmgr_assert(&tmp->mnt_renamelock, KA_XLOCKED); in vop_rename_pre()
5869 * filesystems relookup vnodes mid-rename. This is probably a bug. in vop_rename_pre()
5874 if (a->a_tdvp != a->a_fdvp) in vop_rename_pre()
5875 vhold(a->a_fdvp); in vop_rename_pre()
5876 if (a->a_tvp != a->a_fvp) in vop_rename_pre()
5877 vhold(a->a_fvp); in vop_rename_pre()
5878 vhold(a->a_tdvp); in vop_rename_pre()
5879 if (a->a_tvp) in vop_rename_pre()
5880 vhold(a->a_tvp); in vop_rename_pre()
5898 vp = a->a_vp; in vop_fplookup_vexec_debugpost()
5924 if (vp->v_type == VCHR) in vop_fsync_debugprepost()
5930 * may not be the same as vp->v_mount. However, if the in vop_fsync_debugprepost()
5959 vop_fsync_debugprepost(ap->a_vp, "fsync"); in vop_fsync_debugpre()
5968 vop_fsync_debugprepost(ap->a_vp, "fsync"); in vop_fsync_debugpost()
5977 vop_fsync_debugprepost(ap->a_vp, "fsync"); in vop_fdatasync_debugpre()
5986 vop_fsync_debugprepost(ap->a_vp, "fsync"); in vop_fdatasync_debugpost()
5996 bp = a->a_bp; in vop_strategy_debugpre()
6001 if ((bp->b_flags & B_CLUSTER) != 0) in vop_strategy_debugpre()
6012 if ((a->a_flags & LK_INTERLOCK) == 0) in vop_lock_debugpre()
6013 ASSERT_VI_UNLOCKED(a->a_vp, "VOP_LOCK"); in vop_lock_debugpre()
6015 ASSERT_VI_LOCKED(a->a_vp, "VOP_LOCK"); in vop_lock_debugpre()
6023 ASSERT_VI_UNLOCKED(a->a_vp, "VOP_LOCK"); in vop_lock_debugpost()
6024 if (rc == 0 && (a->a_flags & LK_EXCLOTHER) == 0) in vop_lock_debugpost()
6025 ASSERT_VOP_LOCKED(a->a_vp, "VOP_LOCK"); in vop_lock_debugpost()
6032 struct vnode *vp = a->a_vp; in vop_unlock_debugpre()
6043 ASSERT_VI_LOCKED(a->a_vp, "VOP_NEED_INACTIVE"); in vop_need_inactive_debugpre()
6051 ASSERT_VI_LOCKED(a->a_vp, "VOP_NEED_INACTIVE"); in vop_need_inactive_debugpost()
6062 INOTIFY(a->a_vp, IN_MODIFY); in vop_allocate_post()
6072 INOTIFY(a->a_invp, IN_ACCESS); in vop_copy_file_range_post()
6073 INOTIFY(a->a_outvp, IN_MODIFY); in vop_copy_file_range_post()
6084 dvp = a->a_dvp; in vop_create_pre()
6095 dvp = a->a_dvp; in vop_create_post()
6099 INOTIFY_NAME(*a->a_vpp, dvp, a->a_cnp, IN_CREATE); in vop_create_post()
6110 INOTIFY(a->a_vp, IN_MODIFY); in vop_deallocate_post()
6120 dvp = a->a_dvp; in vop_whiteout_pre()
6131 dvp = a->a_dvp; in vop_whiteout_post()
6142 vp = a->a_vp; in vop_deleteextattr_pre()
6153 vp = a->a_vp; in vop_deleteextattr_post()
6156 VFS_KNOTE_LOCKED(a->a_vp, NOTE_ATTRIB); in vop_deleteextattr_post()
6168 vp = a->a_vp; in vop_link_pre()
6169 tdvp = a->a_tdvp; in vop_link_pre()
6181 vp = a->a_vp; in vop_link_post()
6182 tdvp = a->a_tdvp; in vop_link_post()
6188 INOTIFY_NAME(vp, tdvp, a->a_cnp, _IN_ATTRIB_LINKCOUNT); in vop_link_post()
6189 INOTIFY_NAME(vp, tdvp, a->a_cnp, IN_CREATE); in vop_link_post()
6200 dvp = a->a_dvp; in vop_mkdir_pre()
6211 dvp = a->a_dvp; in vop_mkdir_post()
6215 INOTIFY_NAME(*a->a_vpp, dvp, a->a_cnp, IN_CREATE); in vop_mkdir_post()
6227 cache_validate(a->a_dvp, *a->a_vpp, a->a_cnp); in vop_mkdir_debugpost()
6238 dvp = a->a_dvp; in vop_mknod_pre()
6249 dvp = a->a_dvp; in vop_mknod_post()
6253 INOTIFY_NAME(*a->a_vpp, dvp, a->a_cnp, IN_CREATE); in vop_mknod_post()
6264 vp = a->a_vp; in vop_reclaim_post()
6279 dvp = a->a_dvp; in vop_remove_pre()
6280 vp = a->a_vp; in vop_remove_pre()
6293 dvp = a->a_dvp; in vop_remove_post()
6294 vp = a->a_vp; in vop_remove_post()
6300 INOTIFY_NAME(vp, dvp, a->a_cnp, _IN_ATTRIB_LINKCOUNT); in vop_remove_post()
6301 INOTIFY_NAME(vp, dvp, a->a_cnp, IN_DELETE); in vop_remove_post()
6313 if (a->a_fdvp == a->a_tdvp) { in vop_rename_post()
6314 if (a->a_tvp != NULL && a->a_tvp->v_type == VDIR) in vop_rename_post()
6316 VFS_KNOTE_UNLOCKED(a->a_fdvp, hint); in vop_rename_post()
6317 VFS_KNOTE_UNLOCKED(a->a_tdvp, hint); in vop_rename_post()
6320 if (a->a_fvp->v_type == VDIR) in vop_rename_post()
6322 VFS_KNOTE_UNLOCKED(a->a_fdvp, hint); in vop_rename_post()
6324 if (a->a_fvp->v_type == VDIR && a->a_tvp != NULL && in vop_rename_post()
6325 a->a_tvp->v_type == VDIR) in vop_rename_post()
6327 VFS_KNOTE_UNLOCKED(a->a_tdvp, hint); in vop_rename_post()
6330 VFS_KNOTE_UNLOCKED(a->a_fvp, NOTE_RENAME); in vop_rename_post()
6331 if (a->a_tvp) in vop_rename_post()
6332 VFS_KNOTE_UNLOCKED(a->a_tvp, NOTE_DELETE); in vop_rename_post()
6333 INOTIFY_MOVE(a->a_fvp, a->a_fdvp, a->a_fcnp, a->a_tvp, in vop_rename_post()
6334 a->a_tdvp, a->a_tcnp); in vop_rename_post()
6336 if (a->a_tdvp != a->a_fdvp) in vop_rename_post()
6337 vdrop(a->a_fdvp); in vop_rename_post()
6338 if (a->a_tvp != a->a_fvp) in vop_rename_post()
6339 vdrop(a->a_fvp); in vop_rename_post()
6340 vdrop(a->a_tdvp); in vop_rename_post()
6341 if (a->a_tvp) in vop_rename_post()
6342 vdrop(a->a_tvp); in vop_rename_post()
6352 dvp = a->a_dvp; in vop_rmdir_pre()
6353 vp = a->a_vp; in vop_rmdir_pre()
6366 dvp = a->a_dvp; in vop_rmdir_post()
6367 vp = a->a_vp; in vop_rmdir_post()
6371 vp->v_vflag |= VV_UNLINKED; in vop_rmdir_post()
6374 INOTIFY_NAME(vp, dvp, a->a_cnp, IN_DELETE); in vop_rmdir_post()
6385 vp = a->a_vp; in vop_setattr_pre()
6396 vp = a->a_vp; in vop_setattr_post()
6411 vp = a->a_vp; in vop_setacl_pre()
6422 vp = a->a_vp; in vop_setacl_post()
6433 vp = a->a_vp; in vop_setextattr_pre()
6444 vp = a->a_vp; in vop_setextattr_post()
6459 dvp = a->a_dvp; in vop_symlink_pre()
6470 dvp = a->a_dvp; in vop_symlink_post()
6474 INOTIFY_NAME(*a->a_vpp, dvp, a->a_cnp, IN_CREATE); in vop_symlink_post()
6484 VFS_KNOTE_LOCKED(a->a_vp, NOTE_OPEN); in vop_open_post()
6485 INOTIFY(a->a_vp, IN_OPEN); in vop_open_post()
6494 if (!rc && (a->a_cred != NOCRED || /* filter out revokes */ in vop_close_post()
6495 !VN_IS_DOOMED(a->a_vp))) { in vop_close_post()
6496 VFS_KNOTE_LOCKED(a->a_vp, (a->a_fflag & FWRITE) != 0 ? in vop_close_post()
6498 INOTIFY(a->a_vp, (a->a_fflag & FWRITE) != 0 ? in vop_close_post()
6509 VFS_KNOTE_LOCKED(a->a_vp, NOTE_READ); in vop_read_post()
6510 INOTIFY(a->a_vp, IN_ACCESS); in vop_read_post()
6520 VFS_KNOTE_UNLOCKED(a->a_vp, NOTE_READ); in vop_read_pgcache_post()
6530 /* XXX - correct order? */
6556 kn->kn_flags |= EV_CLEAR; in filt_fsattach()
6572 kn->kn_fflags |= kn->kn_sfflags & hint; in filt_fsevent()
6574 return (kn->kn_fflags != 0); in filt_fsevent()
6584 if (req->newptr == NULL) in sysctl_vfs_ctl()
6596 strcmp(vc.vc_fstypename, mp->mnt_vfc->vfc_name) != 0) { in sysctl_vfs_ctl()
6685 struct vnode *vp = ap->a_vp; in vfs_kqfilter()
6686 struct knote *kn = ap->a_kn; in vfs_kqfilter()
6689 KASSERT(vp->v_type != VFIFO || (kn->kn_filter != EVFILT_READ && in vfs_kqfilter()
6690 kn->kn_filter != EVFILT_WRITE), in vfs_kqfilter()
6692 switch (kn->kn_filter) { in vfs_kqfilter()
6694 kn->kn_fop = &vfsread_filtops; in vfs_kqfilter()
6697 kn->kn_fop = &vfswrite_filtops; in vfs_kqfilter()
6700 kn->kn_fop = &vfsvnode_filtops; in vfs_kqfilter()
6706 kn->kn_hook = (caddr_t)vp; in vfs_kqfilter()
6709 if (vp->v_pollinfo == NULL) in vfs_kqfilter()
6711 knl = &vp->v_pollinfo->vpi_selinfo.si_note; in vfs_kqfilter()
6724 struct vnode *vp = (struct vnode *)kn->kn_hook; in filt_vfsdetach()
6726 KASSERT(vp->v_pollinfo != NULL, ("Missing v_pollinfo")); in filt_vfsdetach()
6727 knlist_remove(&vp->v_pollinfo->vpi_selinfo.si_note, kn, 0); in filt_vfsdetach()
6735 struct vnode *vp = (struct vnode *)kn->kn_hook; in filt_vfsread()
6743 if (hint == NOTE_REVOKE || (hint == 0 && vp->v_type == VBAD)) { in filt_vfsread()
6745 kn->kn_flags |= (EV_EOF | EV_ONESHOT); in filt_vfsread()
6750 if (vn_getsize_locked(vp, &size, curthread->td_ucred) != 0) in filt_vfsread()
6754 kn->kn_data = size - kn->kn_fp->f_offset; in filt_vfsread()
6755 res = (kn->kn_sfflags & NOTE_FILE_POLL) != 0 || kn->kn_data != 0; in filt_vfsread()
6764 struct vnode *vp = (struct vnode *)kn->kn_hook; in filt_vfswrite()
6772 if (hint == NOTE_REVOKE || (hint == 0 && vp->v_type == VBAD)) in filt_vfswrite()
6773 kn->kn_flags |= (EV_EOF | EV_ONESHOT); in filt_vfswrite()
6775 kn->kn_data = 0; in filt_vfswrite()
6783 struct vnode *vp = (struct vnode *)kn->kn_hook; in filt_vfsvnode()
6787 if (kn->kn_sfflags & hint) in filt_vfsvnode()
6788 kn->kn_fflags |= hint; in filt_vfsvnode()
6789 if (hint == NOTE_REVOKE || (hint == 0 && vp->v_type == VBAD)) { in filt_vfsvnode()
6790 kn->kn_flags |= EV_EOF; in filt_vfsvnode()
6794 res = (kn->kn_fflags != 0); in filt_vfsvnode()
6807 kin->knt_extdata = KNOTE_EXTDATA_VNODE; in filt_vfsdump()
6809 vp = kn->kn_fp->f_vnode; in filt_vfsdump()
6810 kin->knt_vnode.knt_vnode_type = vntype_to_kinfo(vp->v_type); in filt_vfsdump()
6814 error = VOP_GETATTR(vp, &va, curthread->td_ucred); in filt_vfsdump()
6818 kin->knt_vnode.knt_vnode_fsid = va.va_fsid; in filt_vfsdump()
6819 kin->knt_vnode.knt_vnode_fileid = va.va_fileid; in filt_vfsdump()
6822 fullpath = "-"; in filt_vfsdump()
6825 strlcpy(kin->knt_vnode.knt_vnode_fullpath, fullpath, in filt_vfsdump()
6826 sizeof(kin->knt_vnode.knt_vnode_fullpath)); in filt_vfsdump()
6839 vp = (struct vnode *)kn->kn_hook; in filt_vfscopy()
6849 if (dp->d_reclen > ap->a_uio->uio_resid) in vfs_read_dirent()
6851 error = uiomove(dp, dp->d_reclen, ap->a_uio); in vfs_read_dirent()
6853 if (ap->a_ncookies != NULL) { in vfs_read_dirent()
6854 if (ap->a_cookies != NULL) in vfs_read_dirent()
6855 free(ap->a_cookies, M_TEMP); in vfs_read_dirent()
6856 ap->a_cookies = NULL; in vfs_read_dirent()
6857 *ap->a_ncookies = 0; in vfs_read_dirent()
6861 if (ap->a_ncookies == NULL) in vfs_read_dirent()
6864 KASSERT(ap->a_cookies, in vfs_read_dirent()
6865 ("NULL ap->a_cookies value with non-NULL ap->a_ncookies!")); in vfs_read_dirent()
6867 *ap->a_cookies = realloc(*ap->a_cookies, in vfs_read_dirent()
6868 (*ap->a_ncookies + 1) * sizeof(uint64_t), M_TEMP, M_WAITOK | M_ZERO); in vfs_read_dirent()
6869 (*ap->a_cookies)[*ap->a_ncookies] = off; in vfs_read_dirent()
6870 *ap->a_ncookies += 1; in vfs_read_dirent()
6876 * reducing it into standard unix access bits - VEXEC, VREAD, VWRITE,
6932 if (mp->mnt_rootvnode != NULL) { in vfs_cache_root_fallback()
6934 vp = mp->mnt_rootvnode; in vfs_cache_root_fallback()
6950 mp->mnt_rootvnode = NULL; in vfs_cache_root_fallback()
6961 if (mp->mnt_vfs_ops == 0) { in vfs_cache_root_fallback()
6963 if (mp->mnt_vfs_ops != 0) { in vfs_cache_root_fallback()
6967 if (mp->mnt_rootvnode == NULL) { in vfs_cache_root_fallback()
6969 mp->mnt_rootvnode = *vpp; in vfs_cache_root_fallback()
6971 if (mp->mnt_rootvnode != *vpp) { in vfs_cache_root_fallback()
6972 if (!VN_IS_DOOMED(mp->mnt_rootvnode)) { in vfs_cache_root_fallback()
6976 __func__, *vpp, mp->mnt_rootvnode); in vfs_cache_root_fallback()
6994 vp = atomic_load_ptr(&mp->mnt_rootvnode); in vfs_cache_root()
7018 MPASS(mp->mnt_vfs_ops > 0); in vfs_cache_root_clear()
7019 vp = mp->mnt_rootvnode; in vfs_cache_root_clear()
7022 mp->mnt_rootvnode = NULL; in vfs_cache_root_clear()
7030 MPASS(mp->mnt_vfs_ops > 0); in vfs_cache_root_set()
7032 mp->mnt_rootvnode = vp; in vfs_cache_root_set()
7049 KASSERT((*mvp)->v_mount == mp, ("marker vnode mount list mismatch")); in __mnt_vnode_next_all()
7053 if (vp->v_type == VMARKER || VN_IS_DOOMED(vp)) in __mnt_vnode_next_all()
7064 /* MNT_IUNLOCK(mp); -- done in above function */ in __mnt_vnode_next_all()
7068 TAILQ_REMOVE(&mp->mnt_nvnodelist, *mvp, v_nmntvnodes); in __mnt_vnode_next_all()
7069 TAILQ_INSERT_AFTER(&mp->mnt_nvnodelist, vp, *mvp, v_nmntvnodes); in __mnt_vnode_next_all()
7083 TAILQ_FOREACH(vp, &mp->mnt_nvnodelist, v_nmntvnodes) { in __mnt_vnode_first_all()
7085 if (vp->v_type == VMARKER || VN_IS_DOOMED(vp)) in __mnt_vnode_first_all()
7101 TAILQ_INSERT_AFTER(&mp->mnt_nvnodelist, vp, *mvp, v_nmntvnodes); in __mnt_vnode_first_all()
7117 KASSERT((*mvp)->v_mount == mp, ("marker vnode mount list mismatch")); in __mnt_vnode_markerfree_all()
7118 TAILQ_REMOVE(&mp->mnt_nvnodelist, *mvp, v_nmntvnodes); in __mnt_vnode_markerfree_all()
7133 KASSERT((*mvp)->v_mount == mp, ("marker vnode mount list mismatch")); in mnt_vnode_markerfree_lazy()
7156 VNASSERT(mvp->v_mount == mp && mvp->v_type == VMARKER && in mnt_vnode_next_lazy_relock()
7159 VNASSERT(vp->v_mount == mp && vp->v_type != VMARKER, vp, in mnt_vnode_next_lazy_relock()
7162 mtx_assert(&mp->mnt_listmtx, MA_OWNED); in mnt_vnode_next_lazy_relock()
7164 TAILQ_REMOVE(&mp->mnt_lazyvnodelist, mvp, v_lazylist); in mnt_vnode_next_lazy_relock()
7169 * count to 0 and now waits for the ->mnt_listmtx lock. This is fine, in mnt_vnode_next_lazy_relock()
7174 mtx_unlock(&mp->mnt_listmtx); in mnt_vnode_next_lazy_relock()
7177 VNPASS((vp->v_mflag & VMP_LAZYLIST) == 0, vp); in mnt_vnode_next_lazy_relock()
7180 VNPASS(vp->v_mflag & VMP_LAZYLIST, vp); in mnt_vnode_next_lazy_relock()
7184 if (!refcount_release_if_not_last(&vp->v_holdcnt)) in mnt_vnode_next_lazy_relock()
7186 mtx_lock(&mp->mnt_listmtx); in mnt_vnode_next_lazy_relock()
7191 mtx_lock(&mp->mnt_listmtx); in mnt_vnode_next_lazy_relock()
7201 mtx_assert(&mp->mnt_listmtx, MA_OWNED); in mnt_vnode_next_lazy()
7202 KASSERT((*mvp)->v_mount == mp, ("marker vnode mount list mismatch")); in mnt_vnode_next_lazy()
7206 if (vp->v_type == VMARKER) { in mnt_vnode_next_lazy()
7221 TAILQ_REMOVE(&mp->mnt_lazyvnodelist, *mvp, in mnt_vnode_next_lazy()
7223 TAILQ_INSERT_AFTER(&mp->mnt_lazyvnodelist, vp, *mvp, in mnt_vnode_next_lazy()
7225 mtx_unlock(&mp->mnt_listmtx); in mnt_vnode_next_lazy()
7227 mtx_lock(&mp->mnt_listmtx); in mnt_vnode_next_lazy()
7231 * Try-lock because this is the wrong lock order. in mnt_vnode_next_lazy()
7236 KASSERT(vp->v_type != VMARKER, ("locked marker %p", vp)); in mnt_vnode_next_lazy()
7237 KASSERT(vp->v_mount == mp || vp->v_mount == NULL, in mnt_vnode_next_lazy()
7239 VNPASS(vp->v_mount == mp, vp); in mnt_vnode_next_lazy()
7243 TAILQ_REMOVE(&mp->mnt_lazyvnodelist, *mvp, v_lazylist); in mnt_vnode_next_lazy()
7247 mtx_unlock(&mp->mnt_listmtx); in mnt_vnode_next_lazy()
7251 TAILQ_INSERT_AFTER(&mp->mnt_lazyvnodelist, vp, *mvp, v_lazylist); in mnt_vnode_next_lazy()
7252 mtx_unlock(&mp->mnt_listmtx); in mnt_vnode_next_lazy()
7263 mtx_lock(&mp->mnt_listmtx); in __mnt_vnode_next_lazy()
7273 if (TAILQ_EMPTY(&mp->mnt_lazyvnodelist)) in __mnt_vnode_first_lazy()
7281 mtx_lock(&mp->mnt_listmtx); in __mnt_vnode_first_lazy()
7282 vp = TAILQ_FIRST(&mp->mnt_lazyvnodelist); in __mnt_vnode_first_lazy()
7284 mtx_unlock(&mp->mnt_listmtx); in __mnt_vnode_first_lazy()
7299 mtx_lock(&mp->mnt_listmtx); in __mnt_vnode_markerfree_lazy()
7300 TAILQ_REMOVE(&mp->mnt_lazyvnodelist, *mvp, v_lazylist); in __mnt_vnode_markerfree_lazy()
7301 mtx_unlock(&mp->mnt_listmtx); in __mnt_vnode_markerfree_lazy()
7309 if ((cnp->cn_flags & NOEXECCHECK) != 0) { in vn_dir_check_exec()
7310 cnp->cn_flags &= ~NOEXECCHECK; in vn_dir_check_exec()
7314 return (VOP_ACCESS(vp, VEXEC, cnp->cn_cred, curthread)); in vn_dir_check_exec()
7326 VNPASS(vp->v_holdcnt > 0, vp); in vn_seqc_write_begin_locked()
7327 VNPASS(vp->v_seqc_users >= 0, vp); in vn_seqc_write_begin_locked()
7328 vp->v_seqc_users++; in vn_seqc_write_begin_locked()
7329 if (vp->v_seqc_users == 1) in vn_seqc_write_begin_locked()
7330 seqc_sleepable_write_begin(&vp->v_seqc); in vn_seqc_write_begin_locked()
7347 VNPASS(vp->v_seqc_users > 0, vp); in vn_seqc_write_end_locked()
7348 vp->v_seqc_users--; in vn_seqc_write_end_locked()
7349 if (vp->v_seqc_users == 0) in vn_seqc_write_end_locked()
7350 seqc_sleepable_write_end(&vp->v_seqc); in vn_seqc_write_end_locked()
7372 vp->v_seqc = 0; in vn_seqc_init()
7373 vp->v_seqc_users = 0; in vn_seqc_init()
7380 VNPASS(seqc_in_modify(vp->v_seqc), vp); in vn_seqc_write_end_free()
7381 VNPASS(vp->v_seqc_users == 1, vp); in vn_seqc_write_end_free()
7394 atomic_store_short(&vp->v_irflag, flags | toset); in vn_irflag_set_locked()
7413 atomic_store_short(&vp->v_irflag, flags | toset); in vn_irflag_set_cond_locked()
7435 atomic_store_short(&vp->v_irflag, flags & ~tounset); in vn_irflag_unset_locked()
7480 switch (vp->v_state) { in vn_set_state_validate()
7518 vn_printf(vp, "invalid state transition %d -> %d\n", vp->v_state, state); in vn_set_state_validate()
7519 panic("invalid state transition %d -> %d\n", vp->v_state, state); in vn_set_state_validate()