Lines Matching +full:acquisition +full:- +full:time +full:- +full:ns

1 /*-
2 * SPDX-License-Identifier: BSD-3-Clause
179 * sub-cache consisting mostly of such files. The system balances the size
180 * of this sub-cache with its complement to try to prevent either from
185 * to recyling of free vnodes. In the best-operating case, the cache is
190 * Otherwise, vnlru_proc() runs to reclaim enough vnodes (usually non-free
191 * ones) to reach one of these states. The watermarks are currently hard-
233 /* To keep more than one thread at a time from running vfs_getnewfsid */
250 /* Zone for allocation of new vnodes - used exclusively by getnewvnode() */
265 * are delayed only about a half the time that file data is delayed.
267 * about a third the time that file data is delayed. Thus, there are
268 * SYNCER_MAXDELAY queues that are processed round-robin at a rate of
287 * bo->bo_synclist
299 static int syncer_maxdelay = SYNCER_MAXDELAY; /* maximum delay time */
300 static int syncdelay = 30; /* max time to delay syncing data */
301 static int filedelay = 30; /* time to delay syncing files */
303 "Time to delay syncing files (in seconds)");
304 static int dirdelay = 29; /* time to delay syncing directories */
306 "Time to delay syncing directories (in seconds)");
307 static int metadelay = 28; /* time to delay syncing metadata */
309 "Time to delay syncing metadata (in seconds)");
327 * we probably don't want to pause for the whole second each time.
340 static bool vstir; /* nonzero to stir non-free vnodes */
356 if (error != 0 || req->newptr == NULL) in sysctl_maxvnodes()
368 * desiredvnodes at the same time. Locking above only helps vnlru and in sysctl_maxvnodes()
407 if (error != 0 || req->newptr == NULL) in sysctl_wantfreevnodes()
439 if (req->newptr == NULL) in sysctl_try_reclaim_vnode()
441 if (req->newlen >= PATH_MAX) in sysctl_try_reclaim_vnode()
445 error = SYSCTL_IN(req, buf, req->newlen); in sysctl_try_reclaim_vnode()
449 buf[req->newlen] = '\0'; in sysctl_try_reclaim_vnode()
486 if (req->newptr == NULL) in sysctl_ftry_reclaim_vnode()
495 vp = fp->f_vnode; in sysctl_ftry_reclaim_vnode()
516 /* Shift count for (uintptr_t)vp to initialize vp->v_hash. */
550 bp = BUF_PCTRIE_LOOKUP_GE(&bv->bv_root, lblkno); in buf_lookup_ge()
552 bp = BUF_PCTRIE_LOOKUP_GE(&bv->bv_root, 0); in buf_lookup_ge()
553 if (bp != NULL && bp->b_lblkno < lblkno) in buf_lookup_ge()
567 error = BUF_PCTRIE_INSERT_LOOKUP_LE(&bv->bv_root, bp, n); in buf_insert_lookup_le()
569 if (*n == NULL && bp->b_lblkno >= 0) in buf_insert_lookup_le()
570 *n = BUF_PCTRIE_LOOKUP_LE(&bv->bv_root, ~0L); in buf_insert_lookup_le()
571 if (*n != NULL && (*n)->b_lblkno >= bp->b_lblkno) in buf_insert_lookup_le()
596 vp->v_type = VMARKER; in vn_alloc_marker()
597 vp->v_mount = mp; in vn_alloc_marker()
606 MPASS(vp->v_type == VMARKER); in vn_free_marker()
629 end1 = off1 + sizeof(((struct vnode *)NULL)->v_vnodelist); in vnode_dtor()
630 end2 = off2 + sizeof(((struct vnode *)NULL)->v_dbatchcpu); in vnode_dtor()
636 * KASAN's tracking is not byte-granular, any preceding fields sharing in vnode_dtor()
637 * the same 8-byte aligned word must also be marked valid. in vnode_dtor()
648 kasan_mark((void *)((char *)mem + off1), off2 - off1, in vnode_dtor()
649 off2 - off1, KASAN_UMA_FREED); in vnode_dtor()
653 kasan_mark((void *)((char *)mem + off2), size - off2, size - off2, in vnode_dtor()
671 vp->v_vnlock = &vp->v_lock; in vnode_init()
672 mtx_init(&vp->v_interlock, "vnode interlock", NULL, MTX_DEF); in vnode_init()
674 * By default, don't allow shared locks unless filesystems opt-in. in vnode_init()
676 lockinit(vp->v_vnlock, PVFS, "vnode", VLKTIMEOUT, in vnode_init()
681 bufobj_init(&vp->v_bufobj, vp); in vnode_init()
689 rangelock_init(&vp->v_rl); in vnode_init()
691 vp->v_dbatchcpu = NOCPU; in vnode_init()
693 vp->v_state = VSTATE_DEAD; in vnode_init()
698 vp->v_holdcnt = VHOLD_NO_SMR; in vnode_init()
699 vp->v_type = VNON; in vnode_init()
720 rangelock_destroy(&vp->v_rl); in vnode_fini()
721 lockdestroy(vp->v_vnlock); in vnode_fini()
722 mtx_destroy(&vp->v_interlock); in vnode_fini()
723 bo = &vp->v_bufobj; in vnode_fini()
732 * eliminate dependency on NFS-private header.
735 * private inode data, but the NFS-based estimation is ample enough.
736 * Still, we care about differences in the size between 64- and 32-bit
776 printf("Reducing kern.maxvnodes %lu -> %lu\n", in vntblinit()
807 * Preallocate enough nodes to support one-per buf so that in vntblinit()
833 mtx_init(&vd->lock, "vdbatch", NULL, MTX_DEF); in vntblinit()
844 * For a mountpoint mp, vfs_busy-enforced lock is before lock of any
853 * Within each file system, the lock order is C->A->B and F->D->E.
857 * C->A->B
859 * +->F->D->E
888 MPASS((mp->mnt_kern_flag & MNTK_DRAINING) == 0); in vfs_busy()
889 MPASS((mp->mnt_kern_flag & MNTK_UNMOUNT) == 0); in vfs_busy()
890 MPASS((mp->mnt_kern_flag & MNTK_REFEXPIRE) == 0); in vfs_busy()
913 while (mp->mnt_kern_flag & MNTK_UNMOUNT) { in vfs_busy()
914 KASSERT(TAILQ_EMPTY(&mp->mnt_uppers), in vfs_busy()
915 ("%s: non-empty upper mount list with pending unmount", in vfs_busy()
917 if (flags & MBF_NOWAIT || mp->mnt_kern_flag & MNTK_REFEXPIRE) { in vfs_busy()
926 mp->mnt_kern_flag |= MNTK_MWAIT; in vfs_busy()
934 mp->mnt_lockref++; in vfs_busy()
951 MPASS((mp->mnt_kern_flag & MNTK_DRAINING) == 0); in vfs_unbusy()
961 c = --mp->mnt_lockref; in vfs_unbusy()
962 if (mp->mnt_vfs_ops == 0) { in vfs_unbusy()
963 MPASS((mp->mnt_kern_flag & MNTK_DRAINING) == 0); in vfs_unbusy()
969 if (c == 0 && (mp->mnt_kern_flag & MNTK_DRAINING) != 0) { in vfs_unbusy()
970 MPASS(mp->mnt_kern_flag & MNTK_UNMOUNT); in vfs_unbusy()
972 mp->mnt_kern_flag &= ~MNTK_DRAINING; in vfs_unbusy()
973 wakeup(&mp->mnt_lockref); in vfs_unbusy()
989 if (fsidcmp(&mp->mnt_stat.f_fsid, fsid) == 0) { in vfs_getvfs()
1004 * To avoid congestion on mountlist_mtx, implement simple direct-mapped
1021 hash = fsid->val[0] ^ fsid->val[1]; in vfs_busyfs()
1022 hash = (hash >> 16 ^ hash) & (FSID_CACHE_SIZE - 1); in vfs_busyfs()
1024 if (mp == NULL || fsidcmp(&mp->mnt_stat.f_fsid, fsid) != 0) in vfs_busyfs()
1030 if (fsidcmp(&mp->mnt_stat.f_fsid, fsid) == 0) in vfs_busyfs()
1038 if (fsidcmp(&mp->mnt_stat.f_fsid, fsid) == 0) { in vfs_busyfs()
1062 if (jailed(td->td_ucred)) { in vfs_suser()
1067 if (!prison_allow(td->td_ucred, mp->mnt_vfc->vfc_prison_flag)) in vfs_suser()
1074 if (prison_check(td->td_ucred, mp->mnt_cred) != 0) in vfs_suser()
1080 * for the PRIV_VFS_MOUNT_OWNER privilege - it will be better verified in vfs_suser()
1085 if (!(mp->mnt_vfc->vfc_flags & VFCF_DELEGADMIN) && in vfs_suser()
1086 mp->mnt_cred->cr_uid != td->td_ucred->cr_uid) { in vfs_suser()
1097 * support 16-bit device numbers. We end up with unique val[0]'s for the
1102 * micro-optimization and a defense against returning the same fsid to
1115 mtype = mp->mnt_vfc->vfc_typenum; in vfs_getnewfsid()
1126 mp->mnt_stat.f_fsid.val[0] = tfsid.val[0]; in vfs_getnewfsid()
1127 mp->mnt_stat.f_fsid.val[1] = tfsid.val[1]; in vfs_getnewfsid()
1144 "1: sec + ns accurate to 1/HZ, 2: sec + ns truncated to us, "
1145 "3+: sec + ns (max. precision))");
1157 tsp->tv_sec = time_second; in vfs_timestamp()
1158 tsp->tv_nsec = 0; in vfs_timestamp()
1181 vap->va_type = VNON; in vattr_null()
1182 vap->va_size = VNOVAL; in vattr_null()
1183 vap->va_bytes = VNOVAL; in vattr_null()
1184 vap->va_mode = VNOVAL; in vattr_null()
1185 vap->va_nlink = VNOVAL; in vattr_null()
1186 vap->va_uid = VNOVAL; in vattr_null()
1187 vap->va_gid = VNOVAL; in vattr_null()
1188 vap->va_fsid = VNOVAL; in vattr_null()
1189 vap->va_fileid = VNOVAL; in vattr_null()
1190 vap->va_blocksize = VNOVAL; in vattr_null()
1191 vap->va_rdev = VNOVAL; in vattr_null()
1192 vap->va_atime.tv_sec = VNOVAL; in vattr_null()
1193 vap->va_atime.tv_nsec = VNOVAL; in vattr_null()
1194 vap->va_mtime.tv_sec = VNOVAL; in vattr_null()
1195 vap->va_mtime.tv_nsec = VNOVAL; in vattr_null()
1196 vap->va_ctime.tv_sec = VNOVAL; in vattr_null()
1197 vap->va_ctime.tv_nsec = VNOVAL; in vattr_null()
1198 vap->va_birthtime.tv_sec = VNOVAL; in vattr_null()
1199 vap->va_birthtime.tv_nsec = VNOVAL; in vattr_null()
1200 vap->va_flags = VNOVAL; in vattr_null()
1201 vap->va_gen = VNOVAL; in vattr_null()
1202 vap->va_vaflags = 0; in vattr_null()
1209 * - all parameters were picked years ago when RAM sizes were significantly
1211 * - it can pick vnodes based on pages used by the vm object, but filesystems
1213 * - since ZFS has its own aging policy it gets partially combated by this one
1214 * - a dedicated method should be provided for filesystems to let them decide
1260 if (__predict_false(vp->v_type == VMARKER)) in vlrureclaim()
1269 if (vp->v_usecount > 0 || vp->v_holdcnt == 0 || in vlrureclaim()
1270 (!reclaim_nc_src && !LIST_EMPTY(&vp->v_cache_src))) in vlrureclaim()
1273 if (vp->v_type == VBAD || vp->v_type == VNON) in vlrureclaim()
1276 object = atomic_load_ptr(&vp->v_object); in vlrureclaim()
1277 if (object == NULL || object->resident_page_count > trigger) { in vlrureclaim()
1283 * vnode some time after it gets returned from getnewvnode, in vlrureclaim()
1290 if (__predict_false(vp->v_type == VBAD || vp->v_type == VNON)) { in vlrureclaim()
1294 if (vp->v_mount == NULL) { in vlrureclaim()
1315 if (vp->v_usecount > 0 || in vlrureclaim()
1316 (!reclaim_nc_src && !LIST_EMPTY(&vp->v_cache_src)) || in vlrureclaim()
1317 (vp->v_object != NULL && vp->v_object->handle == vp && in vlrureclaim()
1318 vp->v_object->resident_page_count > trigger)) { in vlrureclaim()
1335 MPASS(vp->v_type != VMARKER); in vlrureclaim()
1408 if (__predict_false(vp->v_type == VMARKER)) in vnlru_free_impl()
1410 if (vp->v_holdcnt > 0) in vnlru_free_impl()
1414 * of mount point. Note that mp is type-safe, the in vnlru_free_impl()
1418 if (mnt_op != NULL && (mp = vp->v_mount) != NULL && in vnlru_free_impl()
1419 mp->mnt_op != mnt_op) { in vnlru_free_impl()
1422 if (__predict_false(vp->v_type == VBAD || vp->v_type == VNON)) { in vnlru_free_impl()
1443 * The solution would be to pre-check if the vnode is likely to in vnlru_free_impl()
1444 * be recycle-able, but it needs to happen with the in vnlru_free_impl()
1452 count--; in vnlru_free_impl()
1460 return (ocount - count); in vnlru_free_impl()
1502 VNPASS(mvp->v_type == VMARKER, mvp); in vnlru_free_vfsops()
1534 gapvnodes = imax(desiredvnodes - wantfreevnodes, 100); in vnlru_recalc()
1535 vhiwat = gapvnodes / 11; /* 9% -- just under the 10% in vlrureclaim() */ in vnlru_recalc()
1593 (*lfreevnodes)--; in vfs_freevnodes_dec()
1594 if (__predict_false(*lfreevnodes == -VNLRU_FREEVNODES_SLOP)) in vfs_freevnodes_dec()
1610 slop = rfreevnodes - rfreevnodes_old; in vnlru_read_freevnodes()
1612 slop = rfreevnodes_old - rfreevnodes; in vnlru_read_freevnodes()
1616 rfreevnodes += cpuid_to_pcpu[cpu]->pc_vfs_freevnodes; in vnlru_read_freevnodes()
1630 space = desiredvnodes - rnumvnodes; in vnlru_under()
1634 space += rfreevnodes - wantfreevnodes; in vnlru_under()
1683 * vnodes, but without having to rewrite the machinery at this time. Said
1694 * seconds to execute (time varies *wildly* between runs). With the workaround
1702 * -1 -- fallback to regular vnlru loop
1703 * 0 -- do nothing, go to sleep
1704 * >0 -- recycle this many vnodes
1712 return (-1); in vnlru_proc_light_pick()
1726 if (rnumvnodes - rfreevnodes >= desiredvnodes || in vnlru_proc_light_pick()
1728 return (-1); in vnlru_proc_light_pick()
1731 return (rnumvnodes - desiredvnodes); in vnlru_proc_light_pick()
1743 return (-1); in vnlru_proc_light_pick()
1757 if (freecount == -1) in vnlru_proc_light()
1807 vnlru_free_locked_vnlru(rnumvnodes - desiredvnodes); in vnlru_proc()
1813 * when it is not over-full and has space for about a 4% in vnlru_proc()
1837 usevnodes = rnumvnodes - rfreevnodes; in vnlru_proc()
1909 VNPASS(vp->v_holdcnt > 0, vp); in vtryrecycle()
1939 if (vp->v_usecount) { in vtryrecycle()
1969 * Given the age of this commit (almost 15 years at the time of writing this
2010 if (mp == NULL || (mp->mnt_kern_flag & MNTK_SUSPEND) == 0) { in vn_alloc_hard()
2072 KASSERT(vops->registered, in getnewvnode()
2077 if (td->td_vp_reserved != NULL) { in getnewvnode()
2078 vp = td->td_vp_reserved; in getnewvnode()
2079 td->td_vp_reserved = NULL; in getnewvnode()
2101 lo = &vp->v_vnlock->lock_object; in getnewvnode()
2103 if (lo->lo_name != tag) { in getnewvnode()
2105 lo->lo_name = tag; in getnewvnode()
2112 * By default, don't allow shared locks unless filesystems opt-in. in getnewvnode()
2114 vp->v_vnlock->lock_object.lo_flags |= LK_NOSHARE; in getnewvnode()
2118 KASSERT(vp->v_object == NULL, ("stale v_object %p", vp)); in getnewvnode()
2119 KASSERT(vp->v_lockf == NULL, ("stale v_lockf %p", vp)); in getnewvnode()
2120 KASSERT(vp->v_pollinfo == NULL, ("stale v_pollinfo %p", vp)); in getnewvnode()
2121 vp->v_type = VNON; in getnewvnode()
2122 vp->v_op = vops; in getnewvnode()
2123 vp->v_irflag = 0; in getnewvnode()
2126 vp->v_bufobj.bo_ops = &buf_ops_bio; in getnewvnode()
2133 if (mp != NULL && (mp->mnt_flag & MNT_MULTILABEL) == 0) in getnewvnode()
2137 vp->v_bufobj.bo_bsize = mp->mnt_stat.f_iosize; in getnewvnode()
2146 vp->v_hash = (uintptr_t)vp >> vnsz2log; in getnewvnode()
2158 MPASS(td->td_vp_reserved == NULL); in getnewvnode_reserve()
2159 td->td_vp_reserved = vn_alloc(NULL); in getnewvnode_reserve()
2168 if (td->td_vp_reserved != NULL) { in getnewvnode_drop_reserve()
2169 vn_free(td->td_vp_reserved); in getnewvnode_drop_reserve()
2170 td->td_vp_reserved = NULL; in getnewvnode_drop_reserve()
2194 bo = &vp->v_bufobj; in freevnode()
2195 VNASSERT(vp->v_data == NULL, vp, ("cleaned vnode isn't")); in freevnode()
2196 VNPASS(vp->v_holdcnt == VHOLD_NO_SMR, vp); in freevnode()
2197 VNASSERT(vp->v_usecount == 0, vp, ("Non-zero use count")); in freevnode()
2198 VNASSERT(vp->v_writecount == 0, vp, ("Non-zero write count")); in freevnode()
2199 VNASSERT(bo->bo_numoutput == 0, vp, ("Clean vnode has pending I/O's")); in freevnode()
2200 VNASSERT(bo->bo_clean.bv_cnt == 0, vp, ("cleanbufcnt not 0")); in freevnode()
2201 VNASSERT(pctrie_is_empty(&bo->bo_clean.bv_root), vp, in freevnode()
2203 VNASSERT(bo->bo_dirty.bv_cnt == 0, vp, ("dirtybufcnt not 0")); in freevnode()
2204 VNASSERT(pctrie_is_empty(&bo->bo_dirty.bv_root), vp, in freevnode()
2206 VNASSERT((vp->v_iflag & (VI_DOINGINACT | VI_OWEINACT)) == 0, vp, in freevnode()
2214 if (vp->v_pollinfo != NULL) { in freevnode()
2221 destroy_vpollinfo(vp->v_pollinfo); in freevnode()
2223 vp->v_pollinfo = NULL; in freevnode()
2225 vp->v_mountedhere = NULL; in freevnode()
2226 vp->v_unpcb = NULL; in freevnode()
2227 vp->v_rdev = NULL; in freevnode()
2228 vp->v_fifoinfo = NULL; in freevnode()
2229 vp->v_iflag = 0; in freevnode()
2230 vp->v_vflag = 0; in freevnode()
2231 bo->bo_flag = 0; in freevnode()
2243 VNPASS((vp->v_mflag & VMP_LAZYLIST) == 0, vp); in delmntque()
2245 mp = vp->v_mount; in delmntque()
2248 vp->v_mount = NULL; in delmntque()
2249 VNASSERT(mp->mnt_nvnodelistsize > 0, vp, in delmntque()
2251 TAILQ_REMOVE(&mp->mnt_nvnodelist, vp, v_nmntvnodes); in delmntque()
2252 mp->mnt_nvnodelistsize--; in delmntque()
2265 KASSERT(vp->v_mount == NULL, in insmntque1_int()
2268 if ((mp->mnt_kern_flag & MNTK_UNLOCKED_INSMNTQUE) == 0) { in insmntque1_int()
2269 ASSERT_VOP_ELOCKED(vp, "insmntque: non-locked vp"); in insmntque1_int()
2287 if (((mp->mnt_kern_flag & MNTK_UNMOUNT) != 0 && in insmntque1_int()
2288 ((mp->mnt_kern_flag & MNTK_UNMOUNTF) != 0 || in insmntque1_int()
2289 mp->mnt_nvnodelistsize == 0)) && in insmntque1_int()
2290 (vp->v_vflag & VV_FORCEINSMQ) == 0) { in insmntque1_int()
2294 vp->v_data = NULL; in insmntque1_int()
2295 vp->v_op = &dead_vnodeops; in insmntque1_int()
2301 vp->v_mount = mp; in insmntque1_int()
2303 TAILQ_INSERT_TAIL(&mp->mnt_nvnodelist, vp, v_nmntvnodes); in insmntque1_int()
2304 VNASSERT(mp->mnt_nvnodelistsize >= 0, vp, in insmntque1_int()
2306 mp->mnt_nvnodelistsize++; in insmntque1_int()
2345 if (bo->bo_dirty.bv_cnt > 0) { in bufobj_invalbuf()
2353 if (bo->bo_numoutput > 0 || bo->bo_dirty.bv_cnt > 0) { in bufobj_invalbuf()
2365 error = flushbuflist(&bo->bo_clean, in bufobj_invalbuf()
2368 error = flushbuflist(&bo->bo_dirty, in bufobj_invalbuf()
2378 * have write I/O in-progress but if there is a VM object then the in bufobj_invalbuf()
2379 * VM object can also have read-I/O in-progress. in bufobj_invalbuf()
2383 if ((flags & V_VMIO) == 0 && bo->bo_object != NULL) { in bufobj_invalbuf()
2385 vm_object_pip_wait_unlocked(bo->bo_object, "bovlbx"); in bufobj_invalbuf()
2388 } while (bo->bo_numoutput > 0); in bufobj_invalbuf()
2394 if (bo->bo_object != NULL && in bufobj_invalbuf()
2396 VM_OBJECT_WLOCK(bo->bo_object); in bufobj_invalbuf()
2397 vm_object_page_remove(bo->bo_object, 0, 0, (flags & V_SAVE) ? in bufobj_invalbuf()
2399 VM_OBJECT_WUNLOCK(bo->bo_object); in bufobj_invalbuf()
2405 V_ALLOWCLEAN)) == 0 && (bo->bo_dirty.bv_cnt > 0 || in bufobj_invalbuf()
2406 bo->bo_clean.bv_cnt > 0)) in bufobj_invalbuf()
2409 bo->bo_dirty.bv_cnt > 0) in bufobj_invalbuf()
2426 if (vp->v_object != NULL && vp->v_object->handle != vp) in vinvalbuf()
2428 return (bufobj_invalbuf(&vp->v_bufobj, flags, slpflag, slptimeo)); in vinvalbuf()
2447 TAILQ_FOREACH_SAFE(bp, &bufv->bv_hd, b_bobufs, nbp) { in flushbuflist()
2456 (((flags & V_NORMAL) && (bp->b_xflags & BX_ALTDATA) != 0) || in flushbuflist()
2457 ((flags & V_ALT) && (bp->b_xflags & BX_ALTDATA) == 0))) { in flushbuflist()
2461 lblkno = nbp->b_lblkno; in flushbuflist()
2462 xflags = nbp->b_xflags & (BX_VNDIRTY | BX_VNCLEAN); in flushbuflist()
2472 KASSERT(bp->b_bufobj == bo, in flushbuflist()
2474 bp, bp->b_bufobj, bo)); in flushbuflist()
2481 if (((bp->b_flags & (B_DELWRI | B_INVAL)) == B_DELWRI) && in flushbuflist()
2484 bp->b_flags |= B_ASYNC; in flushbuflist()
2490 bp->b_flags |= (B_INVAL | B_RELBUF); in flushbuflist()
2491 bp->b_flags &= ~B_ASYNC; in flushbuflist()
2497 if (nbp == NULL || (nbp->b_xflags & (BX_VNDIRTY | BX_VNCLEAN)) in flushbuflist()
2516 if (bp == NULL || bp->b_lblkno >= endn) in bnoreuselist()
2526 KASSERT(bp->b_bufobj == bo, in bnoreuselist()
2528 bp, bp->b_bufobj, bo)); in bnoreuselist()
2529 lblkno = bp->b_lblkno + 1; in bnoreuselist()
2530 if ((bp->b_flags & B_MANAGED) == 0) in bnoreuselist()
2532 bp->b_flags |= B_RELBUF; in bnoreuselist()
2539 if ((bp->b_flags & B_VMIO) != 0) in bnoreuselist()
2540 bp->b_flags |= B_NOREUSE; in bnoreuselist()
2569 bo = &vp->v_bufobj; in vtruncbuf()
2581 TAILQ_FOREACH_SAFE(bp, &bo->bo_dirty.bv_hd, b_bobufs, nbp) { in vtruncbuf()
2582 if (bp->b_lblkno >= 0) in vtruncbuf()
2593 VNASSERT((bp->b_flags & B_DELWRI), vp, in vtruncbuf()
2626 bo = &vp->v_bufobj; in v_inval_buf_range()
2628 MPASS(blksize == bo->bo_bsize); in v_inval_buf_range()
2634 vn_pages_remove(vp, OFF_TO_IDX(start), OFF_TO_IDX(end + PAGE_SIZE - 1)); in v_inval_buf_range()
2652 bv = clean ? &bo->bo_clean : &bo->bo_dirty; in v_inval_buf_range_locked()
2656 TAILQ_FOREACH_FROM_SAFE(bp, &bv->bv_hd, b_bobufs, nbp) { in v_inval_buf_range_locked()
2657 if (bp->b_lblkno >= endlbn) in v_inval_buf_range_locked()
2667 bp->b_flags |= B_INVAL | B_RELBUF; in v_inval_buf_range_locked()
2668 bp->b_flags &= ~B_ASYNC; in v_inval_buf_range_locked()
2674 (((nbp->b_xflags & in v_inval_buf_range_locked()
2676 nbp->b_vp != vp || in v_inval_buf_range_locked()
2677 (nbp->b_flags & B_DELWRI) == (clean? B_DELWRI: 0))) in v_inval_buf_range_locked()
2680 } while (clean = !clean, anyfreed-- > 0); in v_inval_buf_range_locked()
2690 flags = bp->b_xflags; in buf_vlist_remove()
2692 KASSERT(bp->b_bufobj != NULL, ("No b_bufobj %p", bp)); in buf_vlist_remove()
2693 ASSERT_BO_WLOCKED(bp->b_bufobj); in buf_vlist_remove()
2699 bv = &bp->b_bufobj->bo_dirty; in buf_vlist_remove()
2701 bv = &bp->b_bufobj->bo_clean; in buf_vlist_remove()
2702 BUF_PCTRIE_REMOVE(&bv->bv_root, bp->b_lblkno); in buf_vlist_remove()
2703 TAILQ_REMOVE(&bv->bv_hd, bp, b_bobufs); in buf_vlist_remove()
2704 bv->bv_cnt--; in buf_vlist_remove()
2705 bp->b_xflags &= ~(BX_VNDIRTY | BX_VNCLEAN); in buf_vlist_remove()
2721 KASSERT((bo->bo_flag & BO_NOBUFS) == 0, in buf_vlist_find_or_add()
2723 KASSERT((xflags & BX_VNDIRTY) == 0 || (bo->bo_flag & BO_DEAD) == 0, in buf_vlist_find_or_add()
2725 KASSERT((bp->b_xflags & (BX_VNDIRTY | BX_VNCLEAN)) == xflags, in buf_vlist_find_or_add()
2729 bv = &bo->bo_dirty; in buf_vlist_find_or_add()
2731 bv = &bo->bo_clean; in buf_vlist_find_or_add()
2739 KASSERT(n->b_lblkno <= bp->b_lblkno, in buf_vlist_find_or_add()
2742 KASSERT((n->b_lblkno == bp->b_lblkno) == (error == EEXIST), in buf_vlist_find_or_add()
2751 KASSERT(TAILQ_EMPTY(&bv->bv_hd) || in buf_vlist_find_or_add()
2752 bp->b_lblkno < TAILQ_FIRST(&bv->bv_hd)->b_lblkno, in buf_vlist_find_or_add()
2755 bp, TAILQ_FIRST(&bv->bv_hd))); in buf_vlist_find_or_add()
2756 TAILQ_INSERT_HEAD(&bv->bv_hd, bp, b_bobufs); in buf_vlist_find_or_add()
2759 bp->b_lblkno < TAILQ_NEXT(n, b_bobufs)->b_lblkno, in buf_vlist_find_or_add()
2763 TAILQ_INSERT_AFTER(&bv->bv_hd, n, bp, b_bobufs); in buf_vlist_find_or_add()
2766 bv->bv_cnt++; in buf_vlist_find_or_add()
2780 KASSERT((bp->b_xflags & (BX_VNDIRTY | BX_VNCLEAN)) == 0, in buf_vlist_add()
2781 ("buf_vlist_add: Buf %p has existing xflags %d", bp, bp->b_xflags)); in buf_vlist_add()
2782 bp->b_xflags |= xflags; in buf_vlist_add()
2797 bp = BUF_PCTRIE_LOOKUP(&bo->bo_clean.bv_root, lblkno); in gbincore()
2800 return (BUF_PCTRIE_LOOKUP(&bo->bo_dirty.bv_root, lblkno)); in gbincore()
2805 * on SMR for safe lookup, and bufs being in a no-free zone to provide type
2807 * already be invalid by the time this function returns.
2815 bp = BUF_PCTRIE_LOOKUP_UNLOCKED(&bo->bo_clean.bv_root, lblkno); in gbincore_unlocked()
2818 return (BUF_PCTRIE_LOOKUP_UNLOCKED(&bo->bo_dirty.bv_root, lblkno)); in gbincore_unlocked()
2830 bo = &vp->v_bufobj; in bgetvp()
2832 VNASSERT(bp->b_vp == NULL, bp->b_vp, ("bgetvp: not free")); in bgetvp()
2834 CTR3(KTR_BUF, "bgetvp(%p) vp %p flags %X", bp, vp, bp->b_flags); in bgetvp()
2835 VNASSERT((bp->b_xflags & (BX_VNDIRTY|BX_VNCLEAN)) == 0, vp, in bgetvp()
2842 bp->b_vp = vp; in bgetvp()
2843 bp->b_bufobj = bo; in bgetvp()
2844 bp->b_xflags |= BX_VNCLEAN; in bgetvp()
2847 if (BUF_PCTRIE_LOOKUP(&bo->bo_dirty.bv_root, bp->b_lblkno) == NULL) in bgetvp()
2856 bp->b_vp = NULL; in bgetvp()
2857 bp->b_bufobj = NULL; in bgetvp()
2858 bp->b_xflags &= ~BX_VNCLEAN; in bgetvp()
2871 CTR3(KTR_BUF, "brelvp(%p) vp %p flags %X", bp, bp->b_vp, bp->b_flags); in brelvp()
2872 KASSERT(bp->b_vp != NULL, ("brelvp: NULL")); in brelvp()
2877 vp = bp->b_vp; /* XXX */ in brelvp()
2878 bo = bp->b_bufobj; in brelvp()
2881 if ((bo->bo_flag & BO_ONWORKLST) && bo->bo_dirty.bv_cnt == 0) { in brelvp()
2882 bo->bo_flag &= ~BO_ONWORKLST; in brelvp()
2885 syncer_worklist_len--; in brelvp()
2888 bp->b_vp = NULL; in brelvp()
2889 bp->b_bufobj = NULL; in brelvp()
2905 if (bo->bo_flag & BO_ONWORKLST) in vn_syncer_add_to_worklist()
2908 bo->bo_flag |= BO_ONWORKLST; in vn_syncer_add_to_worklist()
2912 if (delay > syncer_maxdelay - 2) in vn_syncer_add_to_worklist()
2913 delay = syncer_maxdelay - 2; in vn_syncer_add_to_worklist()
2926 len = syncer_worklist_len - sync_vnode_count; in sysctl_vfs_worklist_len()
2971 MPASSERT(mp == NULL || (curthread->td_pflags & TDP_IGNSUSP) != 0 || in sync_vnode()
2972 (mp->mnt_kern_flag & MNTK_SUSPENDED) == 0, mp, in sync_vnode()
2979 if (((*bo)->bo_flag & BO_ONWORKLST) != 0) { in sync_vnode()
3015 td->td_pflags |= TDP_NORUNNINGBUF; in sched_sync()
3017 EVENTHANDLER_REGISTER(shutdown_pre_sync, syncer_shutdown, td->td_proc, in sched_sync()
3025 kproc_suspend_check(td->td_proc); in sched_sync()
3028 net_worklist_len = syncer_worklist_len - sync_vnode_count; in sched_sync()
3040 * Push files whose dirty time has expired. Be careful in sched_sync()
3067 * Keep track of the last time there was anything in sched_sync()
3095 syncer_final_iter--; in sched_sync()
3107 rushjob -= 1; in sched_sync()
3111 * Just sleep for a short period of time between in sched_sync()
3117 * again. We can still lose time if any single round in sched_sync()
3139 * normal turn time, otherwise it could take over the cpu.
3207 vp = bp->b_vp; in reassignbuf()
3208 bo = bp->b_bufobj; in reassignbuf()
3210 KASSERT((bp->b_flags & B_PAGING) == 0, in reassignbuf()
3214 bp, bp->b_vp, bp->b_flags); in reassignbuf()
3217 if ((bo->bo_flag & BO_NONSTERILE) == 0) { in reassignbuf()
3224 bo->bo_flag |= BO_NONSTERILE; in reassignbuf()
3233 if (bp->b_flags & B_DELWRI) { in reassignbuf()
3234 if ((bo->bo_flag & BO_ONWORKLST) == 0) { in reassignbuf()
3235 switch (vp->v_type) { in reassignbuf()
3251 if ((bo->bo_flag & BO_ONWORKLST) && bo->bo_dirty.bv_cnt == 0) { in reassignbuf()
3254 syncer_worklist_len--; in reassignbuf()
3256 bo->bo_flag &= ~BO_ONWORKLST; in reassignbuf()
3260 bv = &bo->bo_clean; in reassignbuf()
3261 bp = TAILQ_FIRST(&bv->bv_hd); in reassignbuf()
3262 KASSERT(bp == NULL || bp->b_bufobj == bo, in reassignbuf()
3263 ("bp %p wrong b_bufobj %p should be %p", bp, bp->b_bufobj, bo)); in reassignbuf()
3264 bp = TAILQ_LAST(&bv->bv_hd, buflists); in reassignbuf()
3265 KASSERT(bp == NULL || bp->b_bufobj == bo, in reassignbuf()
3266 ("bp %p wrong b_bufobj %p should be %p", bp, bp->b_bufobj, bo)); in reassignbuf()
3267 bv = &bo->bo_dirty; in reassignbuf()
3268 bp = TAILQ_FIRST(&bv->bv_hd); in reassignbuf()
3269 KASSERT(bp == NULL || bp->b_bufobj == bo, in reassignbuf()
3270 ("bp %p wrong b_bufobj %p should be %p", bp, bp->b_bufobj, bo)); in reassignbuf()
3271 bp = TAILQ_LAST(&bv->bv_hd, buflists); in reassignbuf()
3272 KASSERT(bp == NULL || bp->b_bufobj == bo, in reassignbuf()
3273 ("bp %p wrong b_bufobj %p should be %p", bp, bp->b_bufobj, bo)); in reassignbuf()
3282 VNASSERT(vp->v_type == VNON && vp->v_data == NULL && vp->v_iflag == 0, in v_init_counters()
3286 refcount_init(&vp->v_holdcnt, 1); in v_init_counters()
3287 refcount_init(&vp->v_usecount, 1); in v_init_counters()
3298 * may be awaiting getting freed by the time they get to it.
3307 if (refcount_acquire_if_not_zero(&vp->v_usecount)) { in vget_prep_smr()
3323 if (refcount_acquire_if_not_zero(&vp->v_usecount)) { in vget_prep()
3367 VNPASS(vp->v_holdcnt > 0, vp); in vget_finish()
3368 VNPASS(vs == VGET_HOLDCNT || vp->v_usecount > 0, vp); in vget_finish()
3388 VNPASS(vp->v_holdcnt > 0, vp); in vget_finish_ref()
3389 VNPASS(vs == VGET_HOLDCNT || vp->v_usecount > 0, vp); in vget_finish_ref()
3399 old = atomic_fetchadd_int(&vp->v_usecount, 1); in vget_finish_ref()
3403 old = atomic_fetchadd_int(&vp->v_holdcnt, -1); in vget_finish_ref()
3406 refcount_release(&vp->v_holdcnt); in vget_finish_ref()
3427 old = refcount_acquire(&vp->v_usecount); in vrefact()
3436 VNASSERT(vp->v_holdcnt > 0, vp, ("%s: vnode not held", __func__)); in vlazy()
3438 if ((vp->v_mflag & VMP_LAZYLIST) != 0) in vlazy()
3445 mp = vp->v_mount; in vlazy()
3446 mtx_lock(&mp->mnt_listmtx); in vlazy()
3447 if ((vp->v_mflag & VMP_LAZYLIST) == 0) { in vlazy()
3448 vp->v_mflag |= VMP_LAZYLIST; in vlazy()
3449 TAILQ_INSERT_TAIL(&mp->mnt_lazyvnodelist, vp, v_lazylist); in vlazy()
3450 mp->mnt_lazyvnodelistsize++; in vlazy()
3452 mtx_unlock(&mp->mnt_listmtx); in vlazy()
3463 mp = vp->v_mount; in vunlazy()
3464 mtx_lock(&mp->mnt_listmtx); in vunlazy()
3465 VNPASS(vp->v_mflag & VMP_LAZYLIST, vp); in vunlazy()
3468 * has increased the hold count. It may have re-enqueued the in vunlazy()
3472 if (vp->v_holdcnt == 0) { in vunlazy()
3473 vp->v_mflag &= ~VMP_LAZYLIST; in vunlazy()
3474 TAILQ_REMOVE(&mp->mnt_lazyvnodelist, vp, v_lazylist); in vunlazy()
3475 mp->mnt_lazyvnodelistsize--; in vunlazy()
3477 mtx_unlock(&mp->mnt_listmtx); in vunlazy()
3493 if (vp->v_mflag & VMP_LAZYLIST) { in vunlazy_gone()
3494 mp = vp->v_mount; in vunlazy_gone()
3495 mtx_lock(&mp->mnt_listmtx); in vunlazy_gone()
3496 VNPASS(vp->v_mflag & VMP_LAZYLIST, vp); in vunlazy_gone()
3497 vp->v_mflag &= ~VMP_LAZYLIST; in vunlazy_gone()
3498 TAILQ_REMOVE(&mp->mnt_lazyvnodelist, vp, v_lazylist); in vunlazy_gone()
3499 mp->mnt_lazyvnodelistsize--; in vunlazy_gone()
3500 mtx_unlock(&mp->mnt_listmtx); in vunlazy_gone()
3509 VNPASS(vp->v_holdcnt > 0, vp); in vdefer_inactive()
3514 if (vp->v_iflag & VI_DEFINACT) { in vdefer_inactive()
3515 VNPASS(vp->v_holdcnt > 1, vp); in vdefer_inactive()
3519 if (vp->v_usecount > 0) { in vdefer_inactive()
3520 vp->v_iflag &= ~VI_OWEINACT; in vdefer_inactive()
3525 vp->v_iflag |= VI_DEFINACT; in vdefer_inactive()
3535 if ((vp->v_iflag & VI_OWEINACT) == 0) { in vdefer_inactive_unlocked()
3545 * Handle ->v_usecount transitioning to 0.
3557 * happens with UFS which adds half-constructed vnodes to the hash, where they
3567 VNPASS(vp->v_holdcnt > 0, vp); in vput_final()
3572 * By the time we got here someone else might have transitioned in vput_final()
3575 if (vp->v_usecount > 0) in vput_final()
3588 if (vp->v_iflag & VI_DOINGINACT) in vput_final()
3594 * while ->v_usecount is 0. Set VI_OWEINACT to let vgone know to in vput_final()
3597 vp->v_iflag |= VI_OWEINACT; in vput_final()
3638 VNASSERT((vp->v_vflag & VV_UNREF) == 0, vp, in vput_final()
3640 vp->v_vflag |= VV_UNREF; in vput_final()
3651 vp->v_vflag &= ~VV_UNREF; in vput_final()
3664 * Decrement ->v_usecount for a vnode.
3681 if (!refcount_release(&vp->v_usecount)) in vrele()
3696 if (!refcount_release(&vp->v_usecount)) { in vput()
3713 if (!refcount_release(&vp->v_usecount)) in vunref()
3724 old = atomic_fetchadd_int(&vp->v_holdcnt, 1); in vhold()
3737 int old = atomic_fetchadd_int(&vp->v_holdcnt, 1); in vholdnz()
3741 atomic_add_int(&vp->v_holdcnt, 1); in vholdnz()
3756 * count = atomic_fetchadd_int(&vp->v_holdcnt, 1);
3771 count = atomic_load_int(&vp->v_holdcnt); in vhold_smr()
3775 ("non-zero hold count with flags %d\n", count)); in vhold_smr()
3779 if (atomic_fcmpset_int(&vp->v_holdcnt, &count, count + 1)) { in vhold_smr()
3801 * Note: the vnode may gain more references after we transition the count 0->1.
3810 count = atomic_load_int(&vp->v_holdcnt); in vhold_recycle_free()
3814 ("non-zero hold count with flags %d\n", count)); in vhold_recycle_free()
3821 if (atomic_fcmpset_int(&vp->v_holdcnt, &count, count + 1)) { in vhold_recycle_free()
3834 mtx_assert(&vd->lock, MA_OWNED); in vdbatch_process()
3835 MPASS(curthread->td_pinned > 0); in vdbatch_process()
3836 MPASS(vd->index == VDBATCH_SIZE); in vdbatch_process()
3843 * if multiple CPUs get here (one real-world example is highly parallel in vdbatch_process()
3844 * do-nothing make , which will stat *tons* of vnodes). Since it is in vdbatch_process()
3845 * quasi-LRU (read: not that great even if fully honoured) provide an in vdbatch_process()
3854 vp = vd->tab[i]; in vdbatch_process()
3855 vd->tab[i] = NULL; in vdbatch_process()
3856 MPASS(vp->v_dbatchcpu != NOCPU); in vdbatch_process()
3857 vp->v_dbatchcpu = NOCPU; in vdbatch_process()
3859 vd->index = 0; in vdbatch_process()
3872 vp = vd->tab[i]; in vdbatch_process()
3873 vd->tab[i] = NULL; in vdbatch_process()
3876 MPASS(vp->v_dbatchcpu != NOCPU); in vdbatch_process()
3877 vp->v_dbatchcpu = NOCPU; in vdbatch_process()
3880 vd->index = 0; in vdbatch_process()
3892 if (vp->v_dbatchcpu != NOCPU) { in vdbatch_enqueue()
3899 mtx_lock(&vd->lock); in vdbatch_enqueue()
3900 MPASS(vd->index < VDBATCH_SIZE); in vdbatch_enqueue()
3901 MPASS(vd->tab[vd->index] == NULL); in vdbatch_enqueue()
3904 * ->v_dbatchcpu. in vdbatch_enqueue()
3906 vp->v_dbatchcpu = curcpu; in vdbatch_enqueue()
3907 vd->tab[vd->index] = vp; in vdbatch_enqueue()
3908 vd->index++; in vdbatch_enqueue()
3910 if (vd->index == VDBATCH_SIZE) in vdbatch_enqueue()
3912 mtx_unlock(&vd->lock); in vdbatch_enqueue()
3928 VNPASS(vp->v_type == VBAD || vp->v_type == VNON, vp); in vdbatch_dequeue()
3930 cpu = vp->v_dbatchcpu; in vdbatch_dequeue()
3935 mtx_lock(&vd->lock); in vdbatch_dequeue()
3936 for (i = 0; i < vd->index; i++) { in vdbatch_dequeue()
3937 if (vd->tab[i] != vp) in vdbatch_dequeue()
3939 vp->v_dbatchcpu = NOCPU; in vdbatch_dequeue()
3940 vd->index--; in vdbatch_dequeue()
3941 vd->tab[i] = vd->tab[vd->index]; in vdbatch_dequeue()
3942 vd->tab[vd->index] = NULL; in vdbatch_dequeue()
3945 mtx_unlock(&vd->lock); in vdbatch_dequeue()
3949 MPASS(vp->v_dbatchcpu == NOCPU); in vdbatch_dequeue()
3958 * there is at least one resident non-cached page, the vnode cannot
3973 if (__predict_false(!atomic_cmpset_int(&vp->v_holdcnt, 0, VHOLD_NO_SMR))) { in vdropl_final()
3994 if (refcount_release_if_not_last(&vp->v_holdcnt)) in vdrop()
4006 if (!refcount_release(&vp->v_holdcnt)) { in vdropl_impl()
4010 VNPASS((vp->v_iflag & VI_OWEINACT) == 0, vp); in vdropl_impl()
4011 VNPASS((vp->v_iflag & VI_DEFINACT) == 0, vp); in vdropl_impl()
4018 if (vp->v_mflag & VMP_LAZYLIST) { in vdropl_impl()
4079 VNPASS((vp->v_iflag & VI_DOINGINACT) == 0, vp); in vinactivef()
4081 vp->v_iflag |= VI_DOINGINACT; in vinactivef()
4082 vp->v_iflag &= ~VI_OWEINACT; in vinactivef()
4091 * The write-out of the dirty pages is asynchronous. At the in vinactivef()
4095 if ((vp->v_vflag & VV_NOSYNC) == 0) in vinactivef()
4100 VNPASS(vp->v_iflag & VI_DOINGINACT, vp); in vinactivef()
4101 vp->v_iflag &= ~VI_DOINGINACT; in vinactivef()
4113 if ((vp->v_iflag & VI_OWEINACT) == 0) in vinactive()
4115 if (vp->v_iflag & VI_DOINGINACT) in vinactive()
4117 if (vp->v_usecount > 0) { in vinactive()
4118 vp->v_iflag &= ~VI_OWEINACT; in vinactive()
4184 if ((flags & SKIPSYSTEM) && (vp->v_vflag & VV_SYSTEM)) { in vflush()
4205 error = VOP_GETATTR(vp, &vattr, td->td_ucred); in vflush()
4208 if ((vp->v_type == VNON || in vflush()
4210 (vp->v_writecount <= 0 || vp->v_type != VREG)) { in vflush()
4223 if (vp->v_usecount == 0 || (flags & FORCECLOSE)) { in vflush()
4242 VNASSERT(rootvp->v_usecount >= rootrefs, rootvp, in vflush()
4244 rootvp->v_usecount, rootrefs)); in vflush()
4245 if (busy == 1 && rootvp->v_usecount == rootrefs) { in vflush()
4258 for (; rootrefs > 0; rootrefs--) in vflush()
4289 if (vp->v_usecount == 0) { in vrecyclel()
4317 mp = atomic_load_ptr(&vp->v_mount); in vfs_notify_upper()
4320 if (TAILQ_EMPTY(&mp->mnt_notify)) in vfs_notify_upper()
4324 mp->mnt_upper_pending++; in vfs_notify_upper()
4325 KASSERT(mp->mnt_upper_pending > 0, in vfs_notify_upper()
4326 ("%s: mnt_upper_pending %d", __func__, mp->mnt_upper_pending)); in vfs_notify_upper()
4327 TAILQ_FOREACH(ump, &mp->mnt_notify, mnt_upper_link) { in vfs_notify_upper()
4331 VFS_RECLAIM_LOWERVP(ump->mp, vp); in vfs_notify_upper()
4334 VFS_UNLINK_LOWERVP(ump->mp, vp); in vfs_notify_upper()
4339 mp->mnt_upper_pending--; in vfs_notify_upper()
4340 if ((mp->mnt_kern_flag & MNTK_UPPER_WAITER) != 0 && in vfs_notify_upper()
4341 mp->mnt_upper_pending == 0) { in vfs_notify_upper()
4342 mp->mnt_kern_flag &= ~MNTK_UPPER_WAITER; in vfs_notify_upper()
4343 wakeup(&mp->mnt_uppers); in vfs_notify_upper()
4361 VNASSERT(vp->v_holdcnt, vp, in vgonel()
4390 active = vp->v_usecount > 0; in vgonel()
4391 oweinact = (vp->v_iflag & VI_OWEINACT) != 0; in vgonel()
4392 doinginact = (vp->v_iflag & VI_DOINGINACT) != 0; in vgonel()
4397 if (vp->v_iflag & VI_DEFINACT) { in vgonel()
4398 VNASSERT(vp->v_holdcnt > 1, vp, ("lost hold count")); in vgonel()
4399 vp->v_iflag &= ~VI_DEFINACT; in vgonel()
4402 VNASSERT(vp->v_holdcnt > 0, vp, ("vnode without hold count")); in vgonel()
4419 oweinact = (vp->v_iflag & VI_OWEINACT) != 0; in vgonel()
4424 if (vp->v_type == VSOCK) in vgonel()
4432 if (!TAILQ_EMPTY(&vp->v_bufobj.bo_dirty.bv_hd)) in vgonel()
4439 BO_LOCK(&vp->v_bufobj); in vgonel()
4440 KASSERT(TAILQ_EMPTY(&vp->v_bufobj.bo_dirty.bv_hd) && in vgonel()
4441 vp->v_bufobj.bo_dirty.bv_cnt == 0 && in vgonel()
4442 TAILQ_EMPTY(&vp->v_bufobj.bo_clean.bv_hd) && in vgonel()
4443 vp->v_bufobj.bo_clean.bv_cnt == 0, in vgonel()
4451 object = vp->v_bufobj.bo_object; in vgonel()
4453 vp->v_bufobj.bo_flag |= BO_DEAD; in vgonel()
4454 BO_UNLOCK(&vp->v_bufobj); in vgonel()
4462 if (object != NULL && object->type == OBJT_VNODE && in vgonel()
4463 object->handle == vp) in vgonel()
4473 VNASSERT(vp->v_object == NULL, vp, in vgonel()
4478 if (vp->v_lockf != NULL) { in vgonel()
4480 vp->v_lockf = NULL; in vgonel()
4485 if (vp->v_mount == NULL) { in vgonel()
4495 vp->v_vnlock = &vp->v_lock; in vgonel()
4496 vp->v_op = &dead_vnodeops; in vgonel()
4497 vp->v_type = VBAD; in vgonel()
4544 printf("type %s state %s op %p\n", vtypename[vp->v_type], in vn_printf()
4545 vstatename[vp->v_state], vp->v_op); in vn_printf()
4546 holdcnt = atomic_load_int(&vp->v_holdcnt); in vn_printf()
4548 vp->v_usecount, vp->v_writecount, holdcnt & ~VHOLD_ALL_FLAGS, in vn_printf()
4549 vp->v_seqc_users); in vn_printf()
4550 switch (vp->v_type) { in vn_printf()
4552 printf(" mountedhere %p\n", vp->v_mountedhere); in vn_printf()
4555 printf(" rdev %p\n", vp->v_rdev); in vn_printf()
4558 printf(" socket %p\n", vp->v_unpcb); in vn_printf()
4561 printf(" fifoinfo %p\n", vp->v_fifoinfo); in vn_printf()
4589 if (vp->v_vflag & VV_ROOT) in vn_printf()
4591 if (vp->v_vflag & VV_ISTTY) in vn_printf()
4593 if (vp->v_vflag & VV_NOSYNC) in vn_printf()
4595 if (vp->v_vflag & VV_ETERNALDEV) in vn_printf()
4597 if (vp->v_vflag & VV_CACHEDLABEL) in vn_printf()
4599 if (vp->v_vflag & VV_VMSIZEVNLOCK) in vn_printf()
4601 if (vp->v_vflag & VV_COPYONWRITE) in vn_printf()
4603 if (vp->v_vflag & VV_SYSTEM) in vn_printf()
4605 if (vp->v_vflag & VV_PROCDEP) in vn_printf()
4607 if (vp->v_vflag & VV_DELETED) in vn_printf()
4609 if (vp->v_vflag & VV_MD) in vn_printf()
4611 if (vp->v_vflag & VV_FORCEINSMQ) in vn_printf()
4613 if (vp->v_vflag & VV_READLINK) in vn_printf()
4615 flags = vp->v_vflag & ~(VV_ROOT | VV_ISTTY | VV_NOSYNC | VV_ETERNALDEV | in vn_printf()
4622 if (vp->v_iflag & VI_MOUNT) in vn_printf()
4624 if (vp->v_iflag & VI_DOINGINACT) in vn_printf()
4626 if (vp->v_iflag & VI_OWEINACT) in vn_printf()
4628 if (vp->v_iflag & VI_DEFINACT) in vn_printf()
4630 if (vp->v_iflag & VI_FOPENING) in vn_printf()
4632 flags = vp->v_iflag & ~(VI_MOUNT | VI_DOINGINACT | in vn_printf()
4638 if (vp->v_mflag & VMP_LAZYLIST) in vn_printf()
4640 flags = vp->v_mflag & ~(VMP_LAZYLIST); in vn_printf()
4649 if (vp->v_object != NULL) in vn_printf()
4652 vp->v_object, vp->v_object->ref_count, in vn_printf()
4653 vp->v_object->resident_page_count, in vn_printf()
4654 vp->v_bufobj.bo_clean.bv_cnt, in vn_printf()
4655 vp->v_bufobj.bo_dirty.bv_cnt); in vn_printf()
4657 lockmgr_printinfo(vp->v_vnlock); in vn_printf()
4658 if (vp->v_data != NULL) in vn_printf()
4680 TAILQ_FOREACH(vp, &mp->mnt_nvnodelist, v_nmntvnodes) { in DB_SHOW_COMMAND_FLAGS()
4681 if (vp->v_type != VMARKER && VOP_ISLOCKED(vp)) in DB_SHOW_COMMAND_FLAGS()
4717 mp->mnt_stat.f_mntfromname, in DB_SHOW_COMMAND()
4718 mp->mnt_stat.f_mntonname, in DB_SHOW_COMMAND()
4719 mp->mnt_stat.f_fstypename); in DB_SHOW_COMMAND()
4728 db_printf("%p %s on %s (%s)\n", mp, mp->mnt_stat.f_mntfromname, in DB_SHOW_COMMAND()
4729 mp->mnt_stat.f_mntonname, mp->mnt_stat.f_fstypename); in DB_SHOW_COMMAND()
4732 mflags = mp->mnt_flag; in DB_SHOW_COMMAND()
4779 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), in DB_SHOW_COMMAND()
4785 flags = mp->mnt_kern_flag; in DB_SHOW_COMMAND()
4822 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), in DB_SHOW_COMMAND()
4828 opt = TAILQ_FIRST(mp->mnt_opt); in DB_SHOW_COMMAND()
4830 db_printf("%s", opt->name); in DB_SHOW_COMMAND()
4833 db_printf(", %s", opt->name); in DB_SHOW_COMMAND()
4839 sp = &mp->mnt_stat; in DB_SHOW_COMMAND()
4844 (u_int)sp->f_version, (u_int)sp->f_type, (uintmax_t)sp->f_flags, in DB_SHOW_COMMAND()
4845 (uintmax_t)sp->f_bsize, (uintmax_t)sp->f_iosize, in DB_SHOW_COMMAND()
4846 (uintmax_t)sp->f_blocks, (uintmax_t)sp->f_bfree, in DB_SHOW_COMMAND()
4847 (intmax_t)sp->f_bavail, (uintmax_t)sp->f_files, in DB_SHOW_COMMAND()
4848 (intmax_t)sp->f_ffree, (uintmax_t)sp->f_syncwrites, in DB_SHOW_COMMAND()
4849 (uintmax_t)sp->f_asyncwrites, (uintmax_t)sp->f_syncreads, in DB_SHOW_COMMAND()
4850 (uintmax_t)sp->f_asyncreads, (u_int)sp->f_namemax, in DB_SHOW_COMMAND()
4851 (u_int)sp->f_owner, (int)sp->f_fsid.val[0], (int)sp->f_fsid.val[1]); in DB_SHOW_COMMAND()
4854 (u_int)mp->mnt_cred->cr_uid, (u_int)mp->mnt_cred->cr_ruid); in DB_SHOW_COMMAND()
4855 if (jailed(mp->mnt_cred)) in DB_SHOW_COMMAND()
4856 db_printf(", jail=%d", mp->mnt_cred->cr_prison->pr_id); in DB_SHOW_COMMAND()
4859 vfs_mount_fetch_counter(mp, MNT_COUNT_REF), mp->mnt_ref); in DB_SHOW_COMMAND()
4860 db_printf(" mnt_gen = %d\n", mp->mnt_gen); in DB_SHOW_COMMAND()
4861 db_printf(" mnt_nvnodelistsize = %d\n", mp->mnt_nvnodelistsize); in DB_SHOW_COMMAND()
4863 mp->mnt_lazyvnodelistsize); in DB_SHOW_COMMAND()
4865 vfs_mount_fetch_counter(mp, MNT_COUNT_WRITEOPCOUNT), mp->mnt_writeopcount); in DB_SHOW_COMMAND()
4866 db_printf(" mnt_iosize_max = %d\n", mp->mnt_iosize_max); in DB_SHOW_COMMAND()
4867 db_printf(" mnt_hashseed = %u\n", mp->mnt_hashseed); in DB_SHOW_COMMAND()
4869 vfs_mount_fetch_counter(mp, MNT_COUNT_LOCKREF), mp->mnt_lockref); in DB_SHOW_COMMAND()
4870 db_printf(" mnt_secondary_writes = %d\n", mp->mnt_secondary_writes); in DB_SHOW_COMMAND()
4872 mp->mnt_secondary_accwrites); in DB_SHOW_COMMAND()
4874 mp->mnt_gjprovider != NULL ? mp->mnt_gjprovider : "NULL"); in DB_SHOW_COMMAND()
4875 db_printf(" mnt_vfs_ops = %d\n", mp->mnt_vfs_ops); in DB_SHOW_COMMAND()
4878 TAILQ_FOREACH(vp, &mp->mnt_nvnodelist, v_nmntvnodes) { in DB_SHOW_COMMAND()
4879 if (vp->v_type != VMARKER && vp->v_holdcnt > 0) { in DB_SHOW_COMMAND()
4886 TAILQ_FOREACH(vp, &mp->mnt_nvnodelist, v_nmntvnodes) { in DB_SHOW_COMMAND()
4887 if (vp->v_type != VMARKER && vp->v_holdcnt == 0) { in DB_SHOW_COMMAND()
4905 strcpy(xvfsp.vfc_name, vfsp->vfc_name); in vfsconf2x()
4906 xvfsp.vfc_typenum = vfsp->vfc_typenum; in vfsconf2x()
4907 xvfsp.vfc_refcount = vfsp->vfc_refcount; in vfsconf2x()
4908 xvfsp.vfc_flags = vfsp->vfc_flags; in vfsconf2x()
4934 strcpy(xvfsp.vfc_name, vfsp->vfc_name); in vfsconf2x32()
4935 xvfsp.vfc_typenum = vfsp->vfc_typenum; in vfsconf2x32()
4936 xvfsp.vfc_refcount = vfsp->vfc_refcount; in vfsconf2x32()
4937 xvfsp.vfc_flags = vfsp->vfc_flags; in vfsconf2x32()
4955 if (req->flags & SCTL_MASK32) in sysctl_vfs_conflist()
4977 int *name = (int *)arg1 - 1; /* XXX */ in vfs_sysctl()
5000 if (vfsp->vfc_typenum == name[2]) in vfs_sysctl()
5007 if (req->flags & SCTL_MASK32) in vfs_sysctl()
5032 ovfs.vfc_vfsops = vfsp->vfc_vfsops; /* XXX used as flag */ in sysctl_ovfs_conf()
5033 strcpy(ovfs.vfc_name, vfsp->vfc_name); in sysctl_ovfs_conf()
5034 ovfs.vfc_index = vfsp->vfc_typenum; in sysctl_ovfs_conf()
5035 ovfs.vfc_refcount = vfsp->vfc_refcount; in sysctl_ovfs_conf()
5036 ovfs.vfc_flags = vfsp->vfc_flags; in sysctl_ovfs_conf()
5057 printf("unmount of %s failed (", mp->mnt_stat.f_mntonname); in unmount_or_warn()
5101 VNPASS((vp->v_iflag & VI_DEFINACT) == 0, vp); in vfs_deferred_inactive()
5102 if ((vp->v_iflag & VI_OWEINACT) == 0) { in vfs_deferred_inactive()
5120 return (vp->v_iflag & VI_DEFINACT); in vfs_periodic_inactive_filter()
5134 if ((vp->v_iflag & VI_DEFINACT) == 0) { in vfs_periodic_inactive()
5138 vp->v_iflag &= ~VI_DEFINACT; in vfs_periodic_inactive()
5152 if (vp->v_vflag & VV_NOSYNC) in vfs_want_msync()
5154 obj = vp->v_object; in vfs_want_msync()
5162 if (vp->v_vflag & VV_NOSYNC) in vfs_periodic_msync_inactive_filter()
5164 if (vp->v_iflag & VI_DEFINACT) in vfs_periodic_msync_inactive_filter()
5182 if (vp->v_iflag & VI_DEFINACT) { in vfs_periodic_msync_inactive()
5183 vp->v_iflag &= ~VI_DEFINACT; in vfs_periodic_msync_inactive()
5194 if ((vp->v_vflag & VV_NOSYNC) == 0) { in vfs_periodic_msync_inactive()
5216 if ((mp->mnt_kern_flag & MNTK_NOMSYNC) != 0) in vfs_periodic()
5226 knlist_destroy(&vi->vpi_selinfo.si_note); in destroy_vpollinfo_free()
5227 mtx_destroy(&vi->vpi_lock); in destroy_vpollinfo_free()
5235 knlist_clear(&vi->vpi_selinfo.si_note, 1); in destroy_vpollinfo()
5236 seldrain(&vi->vpi_selinfo); in destroy_vpollinfo()
5241 * Initialize per-vnode helper structure to hold poll-related state.
5248 if (vp->v_pollinfo != NULL) in v_addpollinfo()
5251 mtx_init(&vi->vpi_lock, "vnode pollinfo", NULL, MTX_DEF); in v_addpollinfo()
5252 knlist_init(&vi->vpi_selinfo.si_note, vp, vfs_knllock, in v_addpollinfo()
5255 if (vp->v_pollinfo != NULL) { in v_addpollinfo()
5260 vp->v_pollinfo = vi; in v_addpollinfo()
5266 * a vnode. Because poll uses the historic select-style interface
5277 mtx_lock(&vp->v_pollinfo->vpi_lock); in vn_pollrecord()
5278 if (vp->v_pollinfo->vpi_revents & events) { in vn_pollrecord()
5286 events &= vp->v_pollinfo->vpi_revents; in vn_pollrecord()
5287 vp->v_pollinfo->vpi_revents &= ~events; in vn_pollrecord()
5289 mtx_unlock(&vp->v_pollinfo->vpi_lock); in vn_pollrecord()
5292 vp->v_pollinfo->vpi_events |= events; in vn_pollrecord()
5293 selrecord(td, &vp->v_pollinfo->vpi_selinfo); in vn_pollrecord()
5294 mtx_unlock(&vp->v_pollinfo->vpi_lock); in vn_pollrecord()
5337 vp->v_type = VNON; in vfs_allocate_syncvnode()
5339 vp->v_vflag |= VV_FORCEINSMQ; in vfs_allocate_syncvnode()
5343 vp->v_vflag &= ~VV_FORCEINSMQ; in vfs_allocate_syncvnode()
5362 bo = &vp->v_bufobj; in vfs_allocate_syncvnode()
5365 /* XXX - vn_syncer_add_to_worklist() also grabs and drops sync_mtx. */ in vfs_allocate_syncvnode()
5368 if (mp->mnt_syncer == NULL) { in vfs_allocate_syncvnode()
5369 mp->mnt_syncer = vp; in vfs_allocate_syncvnode()
5387 vp = mp->mnt_syncer; in vfs_deallocate_syncvnode()
5389 mp->mnt_syncer = NULL; in vfs_deallocate_syncvnode()
5401 struct vnode *syncvp = ap->a_vp; in sync_fsync()
5402 struct mount *mp = syncvp->v_mount; in sync_fsync()
5409 if (ap->a_waitfor != MNT_LAZY) in sync_fsync()
5415 bo = &syncvp->v_bufobj; in sync_fsync()
5447 vgone(ap->a_vp); in sync_inactive()
5459 struct vnode *vp = ap->a_vp; in sync_reclaim()
5462 bo = &vp->v_bufobj; in sync_reclaim()
5465 if (vp->v_mount->mnt_syncer == vp) in sync_reclaim()
5466 vp->v_mount->mnt_syncer = NULL; in sync_reclaim()
5467 if (bo->bo_flag & BO_ONWORKLST) { in sync_reclaim()
5469 syncer_worklist_len--; in sync_reclaim()
5470 sync_vnode_count--; in sync_reclaim()
5471 bo->bo_flag &= ~BO_ONWORKLST; in sync_reclaim()
5484 obj = vp->v_object; in vn_need_pageq_flush()
5485 return (obj != NULL && (vp->v_vflag & VV_NOSYNC) == 0 && in vn_need_pageq_flush()
5497 if (vp->v_type != VCHR) { in vn_isdisk_error()
5503 if (vp->v_rdev == NULL) in vn_isdisk_error()
5505 else if (vp->v_rdev->si_devsw == NULL) in vn_isdisk_error()
5507 else if (!(vp->v_rdev->si_devsw->d_flags & D_DISK)) in vn_isdisk_error()
5535 if (cred->cr_uid == file_uid) { in vaccess_vexec_smr()
5597 * Look for a normal, non-privileged way to access the file/directory in vaccess()
5604 if (cred->cr_uid == file_uid) { in vaccess()
5693 * Credential check based on process requesting service, and per-attribute
5702 * Kernel-invoked always succeeds. in extattr_check_cred()
5784 if ((vp->v_irflag & VIRF_CROSSMP) == 0 && in assert_vop_locked()
5785 witness_is_owned(&vp->v_vnlock->lock_object) == -1) in assert_vop_locked()
5800 if ((vp->v_irflag & VIRF_CROSSMP) == 0 && in assert_vop_unlocked()
5801 witness_is_owned(&vp->v_vnlock->lock_object) == 1) in assert_vop_unlocked()
5823 if (ap->a_tvp != NULL) in vop_rename_fail()
5824 vput(ap->a_tvp); in vop_rename_fail()
5825 if (ap->a_tdvp == ap->a_tvp) in vop_rename_fail()
5826 vrele(ap->a_tdvp); in vop_rename_fail()
5828 vput(ap->a_tdvp); in vop_rename_fail()
5829 vrele(ap->a_fdvp); in vop_rename_fail()
5830 vrele(ap->a_fvp); in vop_rename_fail()
5839 if (a->a_tvp) in vop_rename_pre()
5840 ASSERT_VI_UNLOCKED(a->a_tvp, "VOP_RENAME"); in vop_rename_pre()
5841 ASSERT_VI_UNLOCKED(a->a_tdvp, "VOP_RENAME"); in vop_rename_pre()
5842 ASSERT_VI_UNLOCKED(a->a_fvp, "VOP_RENAME"); in vop_rename_pre()
5843 ASSERT_VI_UNLOCKED(a->a_fdvp, "VOP_RENAME"); in vop_rename_pre()
5846 if (a->a_tdvp->v_vnlock != a->a_fdvp->v_vnlock && in vop_rename_pre()
5847 (a->a_tvp == NULL || a->a_tvp->v_vnlock != a->a_fdvp->v_vnlock)) in vop_rename_pre()
5848 ASSERT_VOP_UNLOCKED(a->a_fdvp, "vop_rename: fdvp locked"); in vop_rename_pre()
5849 if (a->a_tvp == NULL || a->a_tvp->v_vnlock != a->a_fvp->v_vnlock) in vop_rename_pre()
5850 ASSERT_VOP_UNLOCKED(a->a_fvp, "vop_rename: fvp locked"); in vop_rename_pre()
5853 if (a->a_tvp) in vop_rename_pre()
5854 ASSERT_VOP_LOCKED(a->a_tvp, "vop_rename: tvp not locked"); in vop_rename_pre()
5855 ASSERT_VOP_LOCKED(a->a_tdvp, "vop_rename: tdvp not locked"); in vop_rename_pre()
5860 * filesystems relookup vnodes mid-rename. This is probably a bug. in vop_rename_pre()
5865 if (a->a_tdvp != a->a_fdvp) in vop_rename_pre()
5866 vhold(a->a_fdvp); in vop_rename_pre()
5867 if (a->a_tvp != a->a_fvp) in vop_rename_pre()
5868 vhold(a->a_fvp); in vop_rename_pre()
5869 vhold(a->a_tdvp); in vop_rename_pre()
5870 if (a->a_tvp) in vop_rename_pre()
5871 vhold(a->a_tvp); in vop_rename_pre()
5889 vp = a->a_vp; in vop_fplookup_vexec_debugpost()
5913 if (vp->v_type == VCHR) in vop_fsync_debugprepost()
5919 * may not be the same as vp->v_mount. However, if the in vop_fsync_debugprepost()
5930 else if (MNT_SHARED_WRITES(vp->v_mount)) in vop_fsync_debugprepost()
5942 vop_fsync_debugprepost(ap->a_vp, "fsync"); in vop_fsync_debugpre()
5951 vop_fsync_debugprepost(ap->a_vp, "fsync"); in vop_fsync_debugpost()
5960 vop_fsync_debugprepost(ap->a_vp, "fsync"); in vop_fdatasync_debugpre()
5969 vop_fsync_debugprepost(ap->a_vp, "fsync"); in vop_fdatasync_debugpost()
5979 bp = a->a_bp; in vop_strategy_debugpre()
5984 if ((bp->b_flags & B_CLUSTER) != 0) in vop_strategy_debugpre()
6001 if ((a->a_flags & LK_INTERLOCK) == 0) in vop_lock_debugpre()
6002 ASSERT_VI_UNLOCKED(a->a_vp, "VOP_LOCK"); in vop_lock_debugpre()
6004 ASSERT_VI_LOCKED(a->a_vp, "VOP_LOCK"); in vop_lock_debugpre()
6012 ASSERT_VI_UNLOCKED(a->a_vp, "VOP_LOCK"); in vop_lock_debugpost()
6013 if (rc == 0 && (a->a_flags & LK_EXCLOTHER) == 0) in vop_lock_debugpost()
6014 ASSERT_VOP_LOCKED(a->a_vp, "VOP_LOCK"); in vop_lock_debugpost()
6021 struct vnode *vp = a->a_vp; in vop_unlock_debugpre()
6032 ASSERT_VI_LOCKED(a->a_vp, "VOP_NEED_INACTIVE"); in vop_need_inactive_debugpre()
6040 ASSERT_VI_LOCKED(a->a_vp, "VOP_NEED_INACTIVE"); in vop_need_inactive_debugpost()
6051 dvp = a->a_dvp; in vop_create_pre()
6062 dvp = a->a_dvp; in vop_create_post()
6075 dvp = a->a_dvp; in vop_whiteout_pre()
6086 dvp = a->a_dvp; in vop_whiteout_post()
6097 vp = a->a_vp; in vop_deleteextattr_pre()
6108 vp = a->a_vp; in vop_deleteextattr_post()
6111 VFS_KNOTE_LOCKED(a->a_vp, NOTE_ATTRIB); in vop_deleteextattr_post()
6121 vp = a->a_vp; in vop_link_pre()
6122 tdvp = a->a_tdvp; in vop_link_pre()
6134 vp = a->a_vp; in vop_link_post()
6135 tdvp = a->a_tdvp; in vop_link_post()
6151 dvp = a->a_dvp; in vop_mkdir_pre()
6162 dvp = a->a_dvp; in vop_mkdir_post()
6176 cache_validate(a->a_dvp, *a->a_vpp, a->a_cnp); in vop_mkdir_debugpost()
6187 dvp = a->a_dvp; in vop_mknod_pre()
6198 dvp = a->a_dvp; in vop_mknod_post()
6211 vp = a->a_vp; in vop_reclaim_post()
6224 dvp = a->a_dvp; in vop_remove_pre()
6225 vp = a->a_vp; in vop_remove_pre()
6237 dvp = a->a_dvp; in vop_remove_post()
6238 vp = a->a_vp; in vop_remove_post()
6255 if (a->a_fdvp == a->a_tdvp) { in vop_rename_post()
6256 if (a->a_tvp != NULL && a->a_tvp->v_type == VDIR) in vop_rename_post()
6258 VFS_KNOTE_UNLOCKED(a->a_fdvp, hint); in vop_rename_post()
6259 VFS_KNOTE_UNLOCKED(a->a_tdvp, hint); in vop_rename_post()
6262 if (a->a_fvp->v_type == VDIR) in vop_rename_post()
6264 VFS_KNOTE_UNLOCKED(a->a_fdvp, hint); in vop_rename_post()
6266 if (a->a_fvp->v_type == VDIR && a->a_tvp != NULL && in vop_rename_post()
6267 a->a_tvp->v_type == VDIR) in vop_rename_post()
6269 VFS_KNOTE_UNLOCKED(a->a_tdvp, hint); in vop_rename_post()
6272 VFS_KNOTE_UNLOCKED(a->a_fvp, NOTE_RENAME); in vop_rename_post()
6273 if (a->a_tvp) in vop_rename_post()
6274 VFS_KNOTE_UNLOCKED(a->a_tvp, NOTE_DELETE); in vop_rename_post()
6276 if (a->a_tdvp != a->a_fdvp) in vop_rename_post()
6277 vdrop(a->a_fdvp); in vop_rename_post()
6278 if (a->a_tvp != a->a_fvp) in vop_rename_post()
6279 vdrop(a->a_fvp); in vop_rename_post()
6280 vdrop(a->a_tdvp); in vop_rename_post()
6281 if (a->a_tvp) in vop_rename_post()
6282 vdrop(a->a_tvp); in vop_rename_post()
6292 dvp = a->a_dvp; in vop_rmdir_pre()
6293 vp = a->a_vp; in vop_rmdir_pre()
6305 dvp = a->a_dvp; in vop_rmdir_post()
6306 vp = a->a_vp; in vop_rmdir_post()
6310 vp->v_vflag |= VV_UNLINKED; in vop_rmdir_post()
6323 vp = a->a_vp; in vop_setattr_pre()
6334 vp = a->a_vp; in vop_setattr_post()
6347 vp = a->a_vp; in vop_setacl_pre()
6358 vp = a->a_vp; in vop_setacl_post()
6369 vp = a->a_vp; in vop_setextattr_pre()
6380 vp = a->a_vp; in vop_setextattr_post()
6393 dvp = a->a_dvp; in vop_symlink_pre()
6404 dvp = a->a_dvp; in vop_symlink_post()
6416 VFS_KNOTE_LOCKED(a->a_vp, NOTE_OPEN); in vop_open_post()
6424 if (!rc && (a->a_cred != NOCRED || /* filter out revokes */ in vop_close_post()
6425 !VN_IS_DOOMED(a->a_vp))) { in vop_close_post()
6426 VFS_KNOTE_LOCKED(a->a_vp, (a->a_fflag & FWRITE) != 0 ? in vop_close_post()
6437 VFS_KNOTE_LOCKED(a->a_vp, NOTE_READ); in vop_read_post()
6446 VFS_KNOTE_UNLOCKED(a->a_vp, NOTE_READ); in vop_read_pgcache_post()
6455 VFS_KNOTE_LOCKED(a->a_vp, NOTE_READ); in vop_readdir_post()
6465 /* XXX - correct order? */
6490 kn->kn_flags |= EV_CLEAR; in filt_fsattach()
6506 kn->kn_fflags |= kn->kn_sfflags & hint; in filt_fsevent()
6508 return (kn->kn_fflags != 0); in filt_fsevent()
6528 strcmp(vc.vc_fstypename, mp->mnt_vfc->vfc_name) != 0) { in sysctl_vfs_ctl()
6607 struct vnode *vp = ap->a_vp; in vfs_kqfilter()
6608 struct knote *kn = ap->a_kn; in vfs_kqfilter()
6611 KASSERT(vp->v_type != VFIFO || (kn->kn_filter != EVFILT_READ && in vfs_kqfilter()
6612 kn->kn_filter != EVFILT_WRITE), in vfs_kqfilter()
6614 switch (kn->kn_filter) { in vfs_kqfilter()
6616 kn->kn_fop = &vfsread_filtops; in vfs_kqfilter()
6619 kn->kn_fop = &vfswrite_filtops; in vfs_kqfilter()
6622 kn->kn_fop = &vfsvnode_filtops; in vfs_kqfilter()
6628 kn->kn_hook = (caddr_t)vp; in vfs_kqfilter()
6631 if (vp->v_pollinfo == NULL) in vfs_kqfilter()
6633 knl = &vp->v_pollinfo->vpi_selinfo.si_note; in vfs_kqfilter()
6646 struct vnode *vp = (struct vnode *)kn->kn_hook; in filt_vfsdetach()
6648 KASSERT(vp->v_pollinfo != NULL, ("Missing v_pollinfo")); in filt_vfsdetach()
6649 knlist_remove(&vp->v_pollinfo->vpi_selinfo.si_note, kn, 0); in filt_vfsdetach()
6657 struct vnode *vp = (struct vnode *)kn->kn_hook; in filt_vfsread()
6665 if (hint == NOTE_REVOKE || (hint == 0 && vp->v_type == VBAD)) { in filt_vfsread()
6667 kn->kn_flags |= (EV_EOF | EV_ONESHOT); in filt_vfsread()
6672 if (vn_getsize_locked(vp, &size, curthread->td_ucred) != 0) in filt_vfsread()
6676 kn->kn_data = size - kn->kn_fp->f_offset; in filt_vfsread()
6677 res = (kn->kn_sfflags & NOTE_FILE_POLL) != 0 || kn->kn_data != 0; in filt_vfsread()
6686 struct vnode *vp = (struct vnode *)kn->kn_hook; in filt_vfswrite()
6694 if (hint == NOTE_REVOKE || (hint == 0 && vp->v_type == VBAD)) in filt_vfswrite()
6695 kn->kn_flags |= (EV_EOF | EV_ONESHOT); in filt_vfswrite()
6697 kn->kn_data = 0; in filt_vfswrite()
6705 struct vnode *vp = (struct vnode *)kn->kn_hook; in filt_vfsvnode()
6709 if (kn->kn_sfflags & hint) in filt_vfsvnode()
6710 kn->kn_fflags |= hint; in filt_vfsvnode()
6711 if (hint == NOTE_REVOKE || (hint == 0 && vp->v_type == VBAD)) { in filt_vfsvnode()
6712 kn->kn_flags |= EV_EOF; in filt_vfsvnode()
6716 res = (kn->kn_fflags != 0); in filt_vfsvnode()
6726 if (dp->d_reclen > ap->a_uio->uio_resid) in vfs_read_dirent()
6728 error = uiomove(dp, dp->d_reclen, ap->a_uio); in vfs_read_dirent()
6730 if (ap->a_ncookies != NULL) { in vfs_read_dirent()
6731 if (ap->a_cookies != NULL) in vfs_read_dirent()
6732 free(ap->a_cookies, M_TEMP); in vfs_read_dirent()
6733 ap->a_cookies = NULL; in vfs_read_dirent()
6734 *ap->a_ncookies = 0; in vfs_read_dirent()
6738 if (ap->a_ncookies == NULL) in vfs_read_dirent()
6741 KASSERT(ap->a_cookies, in vfs_read_dirent()
6742 ("NULL ap->a_cookies value with non-NULL ap->a_ncookies!")); in vfs_read_dirent()
6744 *ap->a_cookies = realloc(*ap->a_cookies, in vfs_read_dirent()
6745 (*ap->a_ncookies + 1) * sizeof(uint64_t), M_TEMP, M_WAITOK | M_ZERO); in vfs_read_dirent()
6746 (*ap->a_cookies)[*ap->a_ncookies] = off; in vfs_read_dirent()
6747 *ap->a_ncookies += 1; in vfs_read_dirent()
6753 * reducing it into standard unix access bits - VEXEC, VREAD, VWRITE,
6809 if (mp->mnt_rootvnode != NULL) { in vfs_cache_root_fallback()
6811 vp = mp->mnt_rootvnode; in vfs_cache_root_fallback()
6827 mp->mnt_rootvnode = NULL; in vfs_cache_root_fallback()
6838 if (mp->mnt_vfs_ops == 0) { in vfs_cache_root_fallback()
6840 if (mp->mnt_vfs_ops != 0) { in vfs_cache_root_fallback()
6844 if (mp->mnt_rootvnode == NULL) { in vfs_cache_root_fallback()
6846 mp->mnt_rootvnode = *vpp; in vfs_cache_root_fallback()
6848 if (mp->mnt_rootvnode != *vpp) { in vfs_cache_root_fallback()
6849 if (!VN_IS_DOOMED(mp->mnt_rootvnode)) { in vfs_cache_root_fallback()
6853 __func__, *vpp, mp->mnt_rootvnode); in vfs_cache_root_fallback()
6871 vp = atomic_load_ptr(&mp->mnt_rootvnode); in vfs_cache_root()
6895 MPASS(mp->mnt_vfs_ops > 0); in vfs_cache_root_clear()
6896 vp = mp->mnt_rootvnode; in vfs_cache_root_clear()
6899 mp->mnt_rootvnode = NULL; in vfs_cache_root_clear()
6907 MPASS(mp->mnt_vfs_ops > 0); in vfs_cache_root_set()
6909 mp->mnt_rootvnode = vp; in vfs_cache_root_set()
6926 KASSERT((*mvp)->v_mount == mp, ("marker vnode mount list mismatch")); in __mnt_vnode_next_all()
6929 /* Allow a racy peek at VIRF_DOOMED to save a lock acquisition. */ in __mnt_vnode_next_all()
6930 if (vp->v_type == VMARKER || VN_IS_DOOMED(vp)) in __mnt_vnode_next_all()
6941 /* MNT_IUNLOCK(mp); -- done in above function */ in __mnt_vnode_next_all()
6945 TAILQ_REMOVE(&mp->mnt_nvnodelist, *mvp, v_nmntvnodes); in __mnt_vnode_next_all()
6946 TAILQ_INSERT_AFTER(&mp->mnt_nvnodelist, vp, *mvp, v_nmntvnodes); in __mnt_vnode_next_all()
6960 TAILQ_FOREACH(vp, &mp->mnt_nvnodelist, v_nmntvnodes) { in __mnt_vnode_first_all()
6961 /* Allow a racy peek at VIRF_DOOMED to save a lock acquisition. */ in __mnt_vnode_first_all()
6962 if (vp->v_type == VMARKER || VN_IS_DOOMED(vp)) in __mnt_vnode_first_all()
6978 TAILQ_INSERT_AFTER(&mp->mnt_nvnodelist, vp, *mvp, v_nmntvnodes); in __mnt_vnode_first_all()
6994 KASSERT((*mvp)->v_mount == mp, ("marker vnode mount list mismatch")); in __mnt_vnode_markerfree_all()
6995 TAILQ_REMOVE(&mp->mnt_nvnodelist, *mvp, v_nmntvnodes); in __mnt_vnode_markerfree_all()
7010 KASSERT((*mvp)->v_mount == mp, ("marker vnode mount list mismatch")); in mnt_vnode_markerfree_lazy()
7033 VNASSERT(mvp->v_mount == mp && mvp->v_type == VMARKER && in mnt_vnode_next_lazy_relock()
7036 VNASSERT(vp->v_mount == mp && vp->v_type != VMARKER, vp, in mnt_vnode_next_lazy_relock()
7039 mtx_assert(&mp->mnt_listmtx, MA_OWNED); in mnt_vnode_next_lazy_relock()
7041 TAILQ_REMOVE(&mp->mnt_lazyvnodelist, mvp, v_lazylist); in mnt_vnode_next_lazy_relock()
7046 * count to 0 and now waits for the ->mnt_listmtx lock. This is fine, in mnt_vnode_next_lazy_relock()
7051 mtx_unlock(&mp->mnt_listmtx); in mnt_vnode_next_lazy_relock()
7054 VNPASS((vp->v_mflag & VMP_LAZYLIST) == 0, vp); in mnt_vnode_next_lazy_relock()
7057 VNPASS(vp->v_mflag & VMP_LAZYLIST, vp); in mnt_vnode_next_lazy_relock()
7061 if (!refcount_release_if_not_last(&vp->v_holdcnt)) in mnt_vnode_next_lazy_relock()
7063 mtx_lock(&mp->mnt_listmtx); in mnt_vnode_next_lazy_relock()
7068 mtx_lock(&mp->mnt_listmtx); in mnt_vnode_next_lazy_relock()
7078 mtx_assert(&mp->mnt_listmtx, MA_OWNED); in mnt_vnode_next_lazy()
7079 KASSERT((*mvp)->v_mount == mp, ("marker vnode mount list mismatch")); in mnt_vnode_next_lazy()
7083 if (vp->v_type == VMARKER) { in mnt_vnode_next_lazy()
7098 TAILQ_REMOVE(&mp->mnt_lazyvnodelist, *mvp, in mnt_vnode_next_lazy()
7100 TAILQ_INSERT_AFTER(&mp->mnt_lazyvnodelist, vp, *mvp, in mnt_vnode_next_lazy()
7102 mtx_unlock(&mp->mnt_listmtx); in mnt_vnode_next_lazy()
7104 mtx_lock(&mp->mnt_listmtx); in mnt_vnode_next_lazy()
7108 * Try-lock because this is the wrong lock order. in mnt_vnode_next_lazy()
7113 KASSERT(vp->v_type != VMARKER, ("locked marker %p", vp)); in mnt_vnode_next_lazy()
7114 KASSERT(vp->v_mount == mp || vp->v_mount == NULL, in mnt_vnode_next_lazy()
7116 VNPASS(vp->v_mount == mp, vp); in mnt_vnode_next_lazy()
7120 TAILQ_REMOVE(&mp->mnt_lazyvnodelist, *mvp, v_lazylist); in mnt_vnode_next_lazy()
7124 mtx_unlock(&mp->mnt_listmtx); in mnt_vnode_next_lazy()
7128 TAILQ_INSERT_AFTER(&mp->mnt_lazyvnodelist, vp, *mvp, v_lazylist); in mnt_vnode_next_lazy()
7129 mtx_unlock(&mp->mnt_listmtx); in mnt_vnode_next_lazy()
7140 mtx_lock(&mp->mnt_listmtx); in __mnt_vnode_next_lazy()
7150 if (TAILQ_EMPTY(&mp->mnt_lazyvnodelist)) in __mnt_vnode_first_lazy()
7158 mtx_lock(&mp->mnt_listmtx); in __mnt_vnode_first_lazy()
7159 vp = TAILQ_FIRST(&mp->mnt_lazyvnodelist); in __mnt_vnode_first_lazy()
7161 mtx_unlock(&mp->mnt_listmtx); in __mnt_vnode_first_lazy()
7176 mtx_lock(&mp->mnt_listmtx); in __mnt_vnode_markerfree_lazy()
7177 TAILQ_REMOVE(&mp->mnt_lazyvnodelist, *mvp, v_lazylist); in __mnt_vnode_markerfree_lazy()
7178 mtx_unlock(&mp->mnt_listmtx); in __mnt_vnode_markerfree_lazy()
7186 if ((cnp->cn_flags & NOEXECCHECK) != 0) { in vn_dir_check_exec()
7187 cnp->cn_flags &= ~NOEXECCHECK; in vn_dir_check_exec()
7191 return (VOP_ACCESS(vp, VEXEC, cnp->cn_cred, curthread)); in vn_dir_check_exec()
7203 VNPASS(vp->v_holdcnt > 0, vp); in vn_seqc_write_begin_locked()
7204 VNPASS(vp->v_seqc_users >= 0, vp); in vn_seqc_write_begin_locked()
7205 vp->v_seqc_users++; in vn_seqc_write_begin_locked()
7206 if (vp->v_seqc_users == 1) in vn_seqc_write_begin_locked()
7207 seqc_sleepable_write_begin(&vp->v_seqc); in vn_seqc_write_begin_locked()
7224 VNPASS(vp->v_seqc_users > 0, vp); in vn_seqc_write_end_locked()
7225 vp->v_seqc_users--; in vn_seqc_write_end_locked()
7226 if (vp->v_seqc_users == 0) in vn_seqc_write_end_locked()
7227 seqc_sleepable_write_end(&vp->v_seqc); in vn_seqc_write_end_locked()
7249 vp->v_seqc = 0; in vn_seqc_init()
7250 vp->v_seqc_users = 0; in vn_seqc_init()
7257 VNPASS(seqc_in_modify(vp->v_seqc), vp); in vn_seqc_write_end_free()
7258 VNPASS(vp->v_seqc_users == 1, vp); in vn_seqc_write_end_free()
7271 atomic_store_short(&vp->v_irflag, flags | toset); in vn_irflag_set_locked()
7290 atomic_store_short(&vp->v_irflag, flags | toset); in vn_irflag_set_cond_locked()
7312 atomic_store_short(&vp->v_irflag, flags & ~tounset); in vn_irflag_unset_locked()
7357 switch (vp->v_state) { in vn_set_state_validate()
7395 vn_printf(vp, "invalid state transition %d -> %d\n", vp->v_state, state); in vn_set_state_validate()
7396 panic("invalid state transition %d -> %d\n", vp->v_state, state); in vn_set_state_validate()