Lines Matching +full:charge +full:- +full:current +full:- +full:limit +full:- +full:mapping
1 /*-
2 * SPDX-License-Identifier: (BSD-3-Clause AND MIT-CMU)
8 * The Mach Operating System project at Carnegie-Mellon University.
35 * Copyright (c) 1987, 1990 Carnegie-Mellon University.
55 * Pittsburgh PA 15213-3890
134 * objects from which they were virtual-copied.
179 KASSERT(object->ref_count == 0, in vm_object_zdtor()
180 ("object %p ref_count = %d", object, object->ref_count)); in vm_object_zdtor()
181 KASSERT(vm_radix_is_empty(&object->rtree), in vm_object_zdtor()
184 KASSERT(LIST_EMPTY(&object->rvq), in vm_object_zdtor()
189 ("object %p busy = %d", object, blockcount_read(&object->busy))); in vm_object_zdtor()
190 KASSERT(object->resident_page_count == 0, in vm_object_zdtor()
192 object, object->resident_page_count)); in vm_object_zdtor()
193 KASSERT(atomic_load_int(&object->shadow_count) == 0, in vm_object_zdtor()
195 object, atomic_load_int(&object->shadow_count))); in vm_object_zdtor()
196 KASSERT(object->type == OBJT_DEAD, in vm_object_zdtor()
197 ("object %p has non-dead type %d", in vm_object_zdtor()
198 object, object->type)); in vm_object_zdtor()
199 KASSERT(object->charge == 0 && object->cred == NULL, in vm_object_zdtor()
200 ("object %p has non-zero charge %ju (%p)", in vm_object_zdtor()
201 object, (uintmax_t)object->charge, object->cred)); in vm_object_zdtor()
211 rw_init_flags(&object->lock, "vmobject", RW_DUPOK | RW_NEW); in vm_object_zinit()
214 object->type = OBJT_DEAD; in vm_object_zinit()
215 vm_radix_init(&object->rtree); in vm_object_zinit()
216 refcount_init(&object->ref_count, 0); in vm_object_zinit()
217 blockcount_init(&object->paging_in_progress); in vm_object_zinit()
218 blockcount_init(&object->busy); in vm_object_zinit()
219 object->resident_page_count = 0; in vm_object_zinit()
220 atomic_store_int(&object->shadow_count, 0); in vm_object_zinit()
221 object->flags = OBJ_DEAD; in vm_object_zinit()
233 LIST_INIT(&object->shadow_head); in _vm_object_allocate()
235 object->type = type; in _vm_object_allocate()
236 object->flags = flags; in _vm_object_allocate()
238 pctrie_init(&object->un_pager.swp.swp_blks); in _vm_object_allocate()
239 object->un_pager.swp.writemappings = 0; in _vm_object_allocate()
245 * non-dead object. in _vm_object_allocate()
249 object->pg_color = 0; in _vm_object_allocate()
250 object->size = size; in _vm_object_allocate()
251 object->domain.dr_policy = NULL; in _vm_object_allocate()
252 object->generation = 1; in _vm_object_allocate()
253 object->cleangeneration = 1; in _vm_object_allocate()
254 refcount_init(&object->ref_count, 1); in _vm_object_allocate()
255 object->memattr = VM_MEMATTR_DEFAULT; in _vm_object_allocate()
256 object->cred = NULL; in _vm_object_allocate()
257 object->charge = 0; in _vm_object_allocate()
258 object->handle = handle; in _vm_object_allocate()
259 object->backing_object = NULL; in _vm_object_allocate()
260 object->backing_object_offset = (vm_ooffset_t) 0; in _vm_object_allocate()
262 LIST_INIT(&object->rvq); in _vm_object_allocate()
278 rw_init(&kernel_object->lock, "kernel vm object"); in vm_object_init()
279 vm_radix_init(&kernel_object->rtree); in vm_object_init()
280 _vm_object_allocate(OBJT_PHYS, atop(VM_MAX_KERNEL_ADDRESS - in vm_object_init()
283 kernel_object->flags |= OBJ_COLORED; in vm_object_init()
284 kernel_object->pg_color = (u_short)atop(VM_MIN_KERNEL_ADDRESS); in vm_object_init()
286 kernel_object->un_pager.phys.ops = &default_phys_pg_ops; in vm_object_init()
312 object->flags &= ~bits; in vm_object_clear_flag()
330 if (object->type == OBJT_DEAD) in vm_object_set_memattr()
332 if (!vm_radix_is_empty(&object->rtree)) in vm_object_set_memattr()
335 object->memattr = memattr; in vm_object_set_memattr()
344 blockcount_acquire(&object->paging_in_progress, i); in vm_object_pip_add()
359 blockcount_release(&object->paging_in_progress, i); in vm_object_pip_wakeupn()
364 * from sleep/wakeup races due to identity changes. The lock is not re-acquired
371 (void)blockcount_sleep(&object->paging_in_progress, &object->lock, in vm_object_pip_sleep()
381 blockcount_wait(&object->paging_in_progress, &object->lock, waitid, in vm_object_pip_wait()
391 blockcount_wait(&object->paging_in_progress, NULL, waitid, PVM); in vm_object_pip_wait_unlocked()
455 struct ucred *cred, vm_size_t charge) in vm_object_allocate_anon() argument
461 else if ((backing_object->flags & OBJ_ANON) != 0) in vm_object_allocate_anon()
462 handle = backing_object->handle; in vm_object_allocate_anon()
468 object->cred = cred; in vm_object_allocate_anon()
469 object->charge = cred != NULL ? charge : 0; in vm_object_allocate_anon()
482 if (!refcount_acquire_if_gt(&object->ref_count, 0)) { in vm_object_reference_vnode()
484 old = refcount_acquire(&object->ref_count); in vm_object_reference_vnode()
485 if (object->type == OBJT_VNODE && old == 0) in vm_object_reference_vnode()
486 vref(object->handle); in vm_object_reference_vnode()
503 if (object->type == OBJT_VNODE) in vm_object_reference()
506 refcount_acquire(&object->ref_count); in vm_object_reference()
507 KASSERT((object->flags & OBJ_DEAD) == 0, in vm_object_reference()
524 old = refcount_acquire(&object->ref_count); in vm_object_reference_locked()
525 if (object->type == OBJT_VNODE && old == 0) in vm_object_reference_locked()
526 vref(object->handle); in vm_object_reference_locked()
527 KASSERT((object->flags & OBJ_DEAD) == 0, in vm_object_reference_locked()
537 struct vnode *vp = (struct vnode *) object->handle; in vm_object_deallocate_vnode()
540 KASSERT(object->type == OBJT_VNODE, in vm_object_deallocate_vnode()
545 last = refcount_release(&object->ref_count); in vm_object_deallocate_vnode()
569 object = LIST_FIRST(&backing_object->shadow_head); in vm_object_deallocate_anon()
571 atomic_load_int(&backing_object->shadow_count) == 1, in vm_object_deallocate_anon()
573 backing_object->ref_count, in vm_object_deallocate_anon()
574 atomic_load_int(&backing_object->shadow_count))); in vm_object_deallocate_anon()
575 KASSERT((object->flags & OBJ_ANON) != 0, in vm_object_deallocate_anon()
593 if ((object->flags & (OBJ_DEAD | OBJ_COLLAPSING)) != 0 || in vm_object_deallocate_anon()
594 !refcount_acquire_if_not_zero(&object->ref_count)) { in vm_object_deallocate_anon()
598 backing_object = object->backing_object; in vm_object_deallocate_anon()
599 if (backing_object != NULL && (backing_object->flags & OBJ_ANON) != 0) in vm_object_deallocate_anon()
631 if ((object->flags & OBJ_ANON) == 0) in vm_object_deallocate()
632 released = refcount_release_if_gt(&object->ref_count, 1); in vm_object_deallocate()
634 released = refcount_release_if_gt(&object->ref_count, 2); in vm_object_deallocate()
638 if (object->type == OBJT_VNODE) { in vm_object_deallocate()
640 if (object->type == OBJT_VNODE) { in vm_object_deallocate()
648 KASSERT(object->ref_count > 0, in vm_object_deallocate()
650 object->type)); in vm_object_deallocate()
656 if (!refcount_release(&object->ref_count)) { in vm_object_deallocate()
657 if (object->ref_count > 1 || in vm_object_deallocate()
658 atomic_load_int(&object->shadow_count) == 0) { in vm_object_deallocate()
659 if ((object->flags & OBJ_ANON) != 0 && in vm_object_deallocate()
660 object->ref_count == 1) in vm_object_deallocate()
677 temp = object->backing_object; in vm_object_deallocate()
679 KASSERT(object->type == OBJT_SWAP, in vm_object_deallocate()
684 KASSERT((object->flags & OBJ_DEAD) == 0, in vm_object_deallocate()
701 KASSERT(object->shadow_count >= 1, in vm_object_sub_shadow()
703 atomic_subtract_int(&object->shadow_count, 1); in vm_object_sub_shadow()
711 backing_object = object->backing_object; in vm_object_backing_remove_locked()
715 KASSERT((object->flags & OBJ_COLLAPSING) == 0, in vm_object_backing_remove_locked()
719 if ((object->flags & OBJ_SHADOWLIST) != 0) { in vm_object_backing_remove_locked()
723 object->backing_object = NULL; in vm_object_backing_remove_locked()
733 backing_object = object->backing_object; in vm_object_backing_remove()
734 if ((object->flags & OBJ_SHADOWLIST) != 0) { in vm_object_backing_remove()
739 object->backing_object = NULL; in vm_object_backing_remove()
750 atomic_add_int(&backing_object->shadow_count, 1); in vm_object_backing_insert_locked()
751 if ((backing_object->flags & OBJ_ANON) != 0) { in vm_object_backing_insert_locked()
753 LIST_INSERT_HEAD(&backing_object->shadow_head, object, in vm_object_backing_insert_locked()
757 object->backing_object = backing_object; in vm_object_backing_insert_locked()
766 if ((backing_object->flags & OBJ_ANON) != 0) { in vm_object_backing_insert()
771 object->backing_object = backing_object; in vm_object_backing_insert()
772 atomic_add_int(&backing_object->shadow_count, 1); in vm_object_backing_insert()
786 if ((backing_object->flags & OBJ_ANON) != 0) { in vm_object_backing_insert_ref()
788 KASSERT((backing_object->flags & OBJ_DEAD) == 0, in vm_object_backing_insert_ref()
796 atomic_add_int(&backing_object->shadow_count, 1); in vm_object_backing_insert_ref()
797 object->backing_object = backing_object; in vm_object_backing_insert_ref()
810 * Note that the reference to backing_object->backing_object in vm_object_backing_transfer()
814 new_backing_object = backing_object->backing_object; in vm_object_backing_transfer()
817 if ((new_backing_object->flags & OBJ_ANON) != 0) { in vm_object_backing_transfer()
828 object->backing_object = new_backing_object; in vm_object_backing_transfer()
829 backing_object->backing_object = NULL; in vm_object_backing_transfer()
842 while ((object->flags & OBJ_COLLAPSING) != 0) { in vm_object_collapse_wait()
860 backing_object = object->backing_object; in vm_object_backing_collapse_wait()
862 (backing_object->flags & OBJ_ANON) == 0) in vm_object_backing_collapse_wait()
865 if ((backing_object->flags & (OBJ_DEAD | OBJ_COLLAPSING)) == 0) in vm_object_backing_collapse_wait()
886 KASSERT(p->object == object && in vm_object_terminate_single_page()
887 (p->ref_count & VPRC_OBJREF) != 0, in vm_object_terminate_single_page()
889 p->object = NULL; in vm_object_terminate_single_page()
891 KASSERT((object->flags & OBJ_UNMANAGED) != 0 || in vm_object_terminate_single_page()
913 if (object->resident_page_count == 0) in vm_object_terminate_pages()
916 vm_radix_reclaim_callback(&object->rtree, in vm_object_terminate_pages()
918 object->resident_page_count = 0; in vm_object_terminate_pages()
919 if (object->type == OBJT_VNODE) in vm_object_terminate_pages()
920 vdrop(object->handle); in vm_object_terminate_pages()
935 KASSERT((object->flags & OBJ_DEAD) != 0, in vm_object_terminate()
936 ("terminating non-dead obj %p", object)); in vm_object_terminate()
937 KASSERT((object->flags & OBJ_COLLAPSING) == 0, in vm_object_terminate()
939 KASSERT(object->backing_object == NULL, in vm_object_terminate()
943 * Wait for the pageout daemon and other current users to be in vm_object_terminate()
951 KASSERT(object->ref_count == 0, in vm_object_terminate()
953 object->ref_count)); in vm_object_terminate()
955 if ((object->flags & OBJ_PG_DTOR) == 0) in vm_object_terminate()
959 if (__predict_false(!LIST_EMPTY(&object->rvq))) in vm_object_terminate()
963 KASSERT(object->cred == NULL || (object->flags & OBJ_SWAP) != 0, in vm_object_terminate()
964 ("%s: non-swap obj %p has cred", __func__, object)); in vm_object_terminate()
976 * Make the page read-only so that we can clear the object flags. However, if
992 if ((flags & OBJPC_NOSYNC) != 0 && (p->a.flags & PGA_NOSYNC) != 0) { in vm_object_page_remove_write()
997 return (p->dirty != 0); in vm_object_page_remove_write()
1010 runlen = vm_radix_iter_lookup_range(pages, p->pindex + 1, in vm_object_page_clean_flush()
1011 &ma[1], vm_pageout_page_count - 1); in vm_object_page_clean_flush()
1060 if (!vm_object_mightbedirty(object) || object->resident_page_count == 0) in vm_object_page_clean()
1068 tend = (end == 0) ? object->size : OFF_TO_IDX(end + PAGE_MASK); in vm_object_page_clean()
1069 allclean = tstart == 0 && tend >= object->size; in vm_object_page_clean()
1074 curgeneration = object->generation; in vm_object_page_clean()
1077 pi = p->pindex; in vm_object_page_clean()
1086 if (object->generation != curgeneration && in vm_object_page_clean()
1097 if (object->type == OBJT_VNODE) { in vm_object_page_clean()
1105 if (object->generation != curgeneration && in vm_object_page_clean()
1140 if (allclean && object->type == OBJT_VNODE) in vm_object_page_clean()
1141 object->cleangeneration = curgeneration; in vm_object_page_clean()
1174 while ((backing_object = object->backing_object) != NULL) { in vm_object_sync()
1176 offset += object->backing_object_offset; in vm_object_sync()
1179 if (object->size < OFF_TO_IDX(offset + size)) in vm_object_sync()
1180 size = IDX_TO_OFF(object->size) - offset; in vm_object_sync()
1190 * allow it to block internally on a page-by-page in vm_object_sync()
1194 if (object->type == OBJT_VNODE && in vm_object_sync()
1196 ((vp = object->handle)->v_vflag & VV_NOSYNC) == 0) { in vm_object_sync()
1201 atop(size) == object->size) { in vm_object_sync()
1203 * If syncing the whole mapping of the file, in vm_object_sync()
1242 if ((object->type == OBJT_VNODE || in vm_object_sync()
1243 object->type == OBJT_DEVICE) && invalidate) { in vm_object_sync()
1244 if (object->type == OBJT_DEVICE) in vm_object_sync()
1272 if ((object->flags & OBJ_UNMANAGED) != 0) in vm_object_advice_applies()
1276 return ((object->flags & (OBJ_ONEMAPPING | OBJ_ANON)) == in vm_object_advice_applies()
1333 * If the next page isn't resident in the top-level object, we in vm_object_madvise()
1336 * non-resident pages. in vm_object_madvise()
1338 if (m == NULL || pindex < m->pindex) { in vm_object_madvise()
1340 * Optimize a common case: if the top-level object has in vm_object_madvise()
1341 * no backing object, we can skip over the non-resident in vm_object_madvise()
1344 if (object->backing_object == NULL) { in vm_object_madvise()
1345 tpindex = (m != NULL && m->pindex < end) ? in vm_object_madvise()
1346 m->pindex : end; in vm_object_madvise()
1348 pindex, tpindex - pindex); in vm_object_madvise()
1362 backing_object = tobject->backing_object; in vm_object_madvise()
1367 OFF_TO_IDX(tobject->backing_object_offset); in vm_object_madvise()
1387 KASSERT((tm->flags & PG_FICTITIOUS) == 0, in vm_object_madvise()
1389 KASSERT((tm->oflags & VPO_UNMANAGED) == 0, in vm_object_madvise()
1409 vm_object_madvise_freespace(tobject, advice, tm->pindex, 1); in vm_object_madvise()
1444 if (source != NULL && source->ref_count == 1 && in vm_object_shadow()
1445 (source->flags & OBJ_ANON) != 0) in vm_object_shadow()
1457 result->backing_object_offset = *offset; in vm_object_shadow()
1482 result->domain = source->domain; in vm_object_shadow()
1485 (source->flags & OBJ_COLORED)); in vm_object_shadow()
1486 result->pg_color = (source->pg_color + in vm_object_shadow()
1487 OFF_TO_IDX(*offset)) & ((1 << (VM_NFREEORDER - in vm_object_shadow()
1488 1)) - 1); in vm_object_shadow()
1517 orig_object = entry->object.vm_object; in vm_object_split()
1518 KASSERT((orig_object->flags & OBJ_ONEMAPPING) != 0, in vm_object_split()
1520 if ((orig_object->flags & OBJ_ANON) == 0) in vm_object_split()
1522 if (orig_object->ref_count <= 1) in vm_object_split()
1526 offidxstart = OFF_TO_IDX(entry->offset); in vm_object_split()
1527 size = atop(entry->end - entry->start); in vm_object_split()
1530 orig_object->cred, ptoa(size)); in vm_object_split()
1533 * We must wait for the orig_object to complete any in-progress in vm_object_split()
1546 new_object->domain = orig_object->domain; in vm_object_split()
1547 backing_object = orig_object->backing_object; in vm_object_split()
1550 new_object->backing_object_offset = in vm_object_split()
1551 orig_object->backing_object_offset + entry->offset; in vm_object_split()
1553 if (orig_object->cred != NULL) { in vm_object_split()
1554 crhold(orig_object->cred); in vm_object_split()
1555 KASSERT(orig_object->charge >= ptoa(size), in vm_object_split()
1556 ("orig_object->charge < 0")); in vm_object_split()
1557 orig_object->charge -= ptoa(size); in vm_object_split()
1596 if (vm_page_none_valid(m) && entry->wired_count == 0) { in vm_object_split()
1603 if (!vm_page_iter_rename(&pages, m, new_object, m->pindex - in vm_object_split()
1644 entry->object.vm_object = new_object; in vm_object_split()
1645 entry->offset = 0LL; in vm_object_split()
1657 backing_object = object->backing_object; in vm_object_collapse_scan_wait()
1660 KASSERT(p == NULL || p->object == object || p->object == backing_object, in vm_object_collapse_scan_wait()
1668 } else if (p->object == object) { in vm_object_collapse_scan_wait()
1692 VM_OBJECT_ASSERT_WLOCKED(object->backing_object); in vm_object_collapse_scan()
1694 backing_object = object->backing_object; in vm_object_collapse_scan()
1695 backing_offset_index = OFF_TO_IDX(object->backing_object_offset); in vm_object_collapse_scan()
1710 KASSERT(object->backing_object == backing_object, in vm_object_collapse_scan()
1712 object->backing_object, backing_object)); in vm_object_collapse_scan()
1713 KASSERT(p->object == backing_object, in vm_object_collapse_scan()
1715 p->object, backing_object)); in vm_object_collapse_scan()
1717 if (p->pindex < backing_offset_index || object->size <= in vm_object_collapse_scan()
1718 (new_pindex = p->pindex - backing_offset_index)) { in vm_object_collapse_scan()
1719 vm_pager_freespace(backing_object, p->pindex, 1); in vm_object_collapse_scan()
1770 vm_pager_freespace(backing_object, p->pindex, 1); in vm_object_collapse_scan()
1828 KASSERT((object->flags & (OBJ_DEAD | OBJ_ANON)) == OBJ_ANON, in vm_object_collapse()
1840 KASSERT(object->ref_count > 0 && in vm_object_collapse()
1841 object->ref_count > atomic_load_int(&object->shadow_count), in vm_object_collapse()
1843 object->ref_count, atomic_load_int(&object->shadow_count))); in vm_object_collapse()
1844 KASSERT((backing_object->flags & in vm_object_collapse()
1847 KASSERT((object->flags & (OBJ_COLLAPSING | OBJ_DEAD)) == 0, in vm_object_collapse()
1856 if (backing_object->ref_count == 1) { in vm_object_collapse()
1857 KASSERT(atomic_load_int(&backing_object->shadow_count) in vm_object_collapse()
1860 atomic_load_int(&backing_object->shadow_count))); in vm_object_collapse()
1880 OFF_TO_IDX(object->backing_object_offset), TRUE); in vm_object_collapse()
1887 object->backing_object_offset += in vm_object_collapse()
1888 backing_object->backing_object_offset; in vm_object_collapse()
1899 KASSERT(backing_object->ref_count == 1, ( in vm_object_collapse()
1900 "backing_object %p was somehow re-referenced during collapse!", in vm_object_collapse()
1903 (void)refcount_release(&backing_object->ref_count); in vm_object_collapse()
1927 new_backing_object = backing_object->backing_object; in vm_object_collapse()
1931 object->backing_object_offset += in vm_object_collapse()
1932 backing_object->backing_object_offset; in vm_object_collapse()
1939 (void)refcount_release(&backing_object->ref_count); in vm_object_collapse()
1940 KASSERT(backing_object->ref_count >= 1, ( in vm_object_collapse()
1959 * mapping, then it may be invalidated but not removed from the object.
1963 * OBJPR_CLEANONLY is specified, then only the non-dirty pages within the
1973 * Second, it is used by msync(..., MS_INVALIDATE) to invalidate device-
1987 KASSERT((object->flags & OBJ_UNMANAGED) == 0 || in vm_object_page_remove()
1990 if (object->resident_page_count == 0) in vm_object_page_remove()
2007 * for instance by ensuring that all invalid->valid transitions in vm_object_page_remove()
2035 object->ref_count != 0) in vm_object_page_remove()
2044 KASSERT((p->flags & PG_FICTITIOUS) == 0, in vm_object_page_remove()
2049 object->ref_count != 0 && in vm_object_page_remove()
2052 if (p->dirty != 0) { in vm_object_page_remove()
2058 object->ref_count != 0 && !vm_page_try_remove_all(p)) in vm_object_page_remove()
2064 vm_pager_freespace(object, start, (end == 0 ? object->size : end) - in vm_object_page_remove()
2080 * contain non-fictitious, managed pages.
2091 KASSERT((object->flags & (OBJ_FICTITIOUS | OBJ_UNMANAGED)) == 0, in vm_object_page_noreuse()
2093 if (object->resident_page_count == 0) in vm_object_page_noreuse()
2106 * pages to vm_pager_get_pages() before it is applied to a non-
2133 pages.limit = pindex; in vm_object_populate()
2147 * NOTE: Only works at the moment if the second object is NULL -
2169 if ((prev_object->flags & OBJ_ANON) == 0) in vm_object_coalesce()
2183 if (prev_object->backing_object != NULL) { in vm_object_coalesce()
2192 if (prev_object->ref_count > 1 && in vm_object_coalesce()
2193 prev_object->size != next_pindex && in vm_object_coalesce()
2194 (prev_object->flags & OBJ_ONEMAPPING) == 0) { in vm_object_coalesce()
2200 * Account for the charge. in vm_object_coalesce()
2202 if (prev_object->cred != NULL) { in vm_object_coalesce()
2204 * If prev_object was charged, then this mapping, in vm_object_coalesce()
2206 * later. Non-NULL cred in the object would prevent in vm_object_coalesce()
2214 prev_object->cred)) { in vm_object_coalesce()
2218 prev_object->charge += ptoa(next_size); in vm_object_coalesce()
2225 if (next_pindex < prev_object->size) { in vm_object_coalesce()
2229 if (prev_object->cred != NULL) { in vm_object_coalesce()
2230 KASSERT(prev_object->charge >= in vm_object_coalesce()
2231 ptoa(prev_object->size - next_pindex), in vm_object_coalesce()
2234 prev_object->charge -= ptoa(prev_object->size - in vm_object_coalesce()
2243 if (next_pindex + next_size > prev_object->size) in vm_object_coalesce()
2244 prev_object->size = next_pindex + next_size; in vm_object_coalesce()
2252 * and up to *rahead optional pages after m_src[count - 1]. In both cases, stop
2253 * the filling-in short on encountering a cached page, an object boundary limit,
2256 * m_dst[*rbehind] to m_dst[*rbehind + count -1].
2270 pindex = m->pindex; in vm_object_prepare_buf_pages()
2273 pindex - (mpred != NULL ? mpred->pindex + 1 : 0)); in vm_object_prepare_buf_pages()
2275 m = vm_page_alloc_iter(object, pindex - i - 1, in vm_object_prepare_buf_pages()
2280 ma_dst[j] = ma_dst[j + *rbehind - i]; in vm_object_prepare_buf_pages()
2285 ma_dst[*rbehind - i - 1] = m; in vm_object_prepare_buf_pages()
2291 m = ma_src[count - 1]; in vm_object_prepare_buf_pages()
2292 pindex = m->pindex + 1; in vm_object_prepare_buf_pages()
2295 (msucc != NULL ? msucc->pindex : object->size) - pindex); in vm_object_prepare_buf_pages()
2311 atomic_add_int(&object->generation, 1); in vm_object_set_writeable_dirty_()
2317 return (object->generation != object->cleangeneration); in vm_object_mightbedirty_()
2324 * find the highest-level page in the shadow chain and unwire it. A page
2325 * must exist at every page offset, and the highest-level page must be
2343 if ((object->flags & OBJ_FICTITIOUS) != 0) in vm_object_unwire()
2353 if (m == NULL || pindex < m->pindex) { in vm_object_unwire()
2356 * contain a page at the current index. Therefore, in vm_object_unwire()
2364 OFF_TO_IDX(tobject->backing_object_offset); in vm_object_unwire()
2365 tobject = tobject->backing_object; in vm_object_unwire()
2368 if ((tobject->flags & OBJ_FICTITIOUS) != 0) in vm_object_unwire()
2383 locked_depth--) { in vm_object_unwire()
2384 t1object = tobject->backing_object; in vm_object_unwire()
2385 if (tm->object != tobject) in vm_object_unwire()
2389 tobject = tm->object; in vm_object_unwire()
2402 for (tobject = object; locked_depth >= 1; locked_depth--) { in vm_object_unwire()
2403 t1object = tobject->backing_object; in vm_object_unwire()
2435 blockcount_acquire(&obj->busy, 1); in vm_object_busy()
2444 blockcount_release(&obj->busy, 1); in vm_object_unbusy()
2453 (void)blockcount_sleep(&obj->busy, NULL, wmesg, PVM); in vm_object_busy_wait()
2460 * represent a mapping, the method used here is inexact. However, it
2468 return (obj->ref_count > atomic_load_int(&obj->shadow_count)); in vm_object_is_active()
2487 if (req->oldptr == NULL) { in vm_object_list_handler()
2495 if (obj->type == OBJT_DEAD) in vm_object_list_handler()
2504 want_path = !(swap_only || jailed(curthread->td_ucred)); in vm_object_list_handler()
2510 * list once added. This allows us to safely read obj->object_list in vm_object_list_handler()
2515 if (obj->type == OBJT_DEAD || in vm_object_list_handler()
2516 (swap_only && (obj->flags & (OBJ_ANON | OBJ_SWAP)) == 0)) in vm_object_list_handler()
2519 if (obj->type == OBJT_DEAD || in vm_object_list_handler()
2520 (swap_only && (obj->flags & (OBJ_ANON | OBJ_SWAP)) == 0)) { in vm_object_list_handler()
2525 kvo->kvo_size = ptoa(obj->size); in vm_object_list_handler()
2526 kvo->kvo_resident = obj->resident_page_count; in vm_object_list_handler()
2527 kvo->kvo_ref_count = obj->ref_count; in vm_object_list_handler()
2528 kvo->kvo_shadow_count = atomic_load_int(&obj->shadow_count); in vm_object_list_handler()
2529 kvo->kvo_memattr = obj->memattr; in vm_object_list_handler()
2530 kvo->kvo_active = 0; in vm_object_list_handler()
2531 kvo->kvo_inactive = 0; in vm_object_list_handler()
2532 kvo->kvo_laundry = 0; in vm_object_list_handler()
2533 kvo->kvo_flags = 0; in vm_object_list_handler()
2541 * reads of m->queue below racy, and we do not in vm_object_list_handler()
2547 kvo->kvo_active++; in vm_object_list_handler()
2549 kvo->kvo_inactive++; in vm_object_list_handler()
2551 kvo->kvo_laundry++; in vm_object_list_handler()
2555 kvo->kvo_vn_fileid = 0; in vm_object_list_handler()
2556 kvo->kvo_vn_fsid = 0; in vm_object_list_handler()
2557 kvo->kvo_vn_fsid_freebsd11 = 0; in vm_object_list_handler()
2561 kvo->kvo_type = vm_object_kvme_type(obj, want_path ? &vp : in vm_object_list_handler()
2565 } else if ((obj->flags & OBJ_ANON) != 0) { in vm_object_list_handler()
2566 MPASS(kvo->kvo_type == KVME_TYPE_SWAP); in vm_object_list_handler()
2567 kvo->kvo_me = (uintptr_t)obj; in vm_object_list_handler()
2569 kvo->kvo_backing_obj = (uintptr_t)obj->backing_object; in vm_object_list_handler()
2571 kvo->kvo_swapped = sp > UINT32_MAX ? UINT32_MAX : sp; in vm_object_list_handler()
2573 if (obj->type == OBJT_DEVICE || obj->type == OBJT_MGTDEVICE) { in vm_object_list_handler()
2574 cdev_pager_get_path(obj, kvo->kvo_path, in vm_object_list_handler()
2575 sizeof(kvo->kvo_path)); in vm_object_list_handler()
2578 if ((obj->flags & OBJ_SYSVSHM) != 0) { in vm_object_list_handler()
2579 kvo->kvo_flags |= KVMO_FLAG_SYSVSHM; in vm_object_list_handler()
2581 kvo->kvo_vn_fileid = key; in vm_object_list_handler()
2582 kvo->kvo_vn_fsid_freebsd11 = seq; in vm_object_list_handler()
2584 if ((obj->flags & OBJ_POSIXSHM) != 0) { in vm_object_list_handler()
2585 kvo->kvo_flags |= KVMO_FLAG_POSIXSHM; in vm_object_list_handler()
2586 shm_get_path(obj, kvo->kvo_path, in vm_object_list_handler()
2587 sizeof(kvo->kvo_path)); in vm_object_list_handler()
2592 if (VOP_GETATTR(vp, &va, curthread->td_ucred) == 0) { in vm_object_list_handler()
2593 kvo->kvo_vn_fileid = va.va_fileid; in vm_object_list_handler()
2594 kvo->kvo_vn_fsid = va.va_fsid; in vm_object_list_handler()
2595 kvo->kvo_vn_fsid_freebsd11 = va.va_fsid; in vm_object_list_handler()
2599 strlcpy(kvo->kvo_path, fullpath, sizeof(kvo->kvo_path)); in vm_object_list_handler()
2604 kvo->kvo_structsize = offsetof(struct kinfo_vmobject, kvo_path) in vm_object_list_handler()
2605 + strlen(kvo->kvo_path) + 1; in vm_object_list_handler()
2606 kvo->kvo_structsize = roundup(kvo->kvo_structsize, in vm_object_list_handler()
2608 error = SYSCTL_OUT(req, kvo, kvo->kvo_structsize); in vm_object_list_handler()
2638 * Since technically non-swap (default) objects participate in the
2671 } else if (entry->eflags & MAP_ENTRY_IS_SUB_MAP) { in _vm_object_in_map()
2672 tmpm = entry->object.sub_map; in _vm_object_in_map()
2678 } else if ((obj = entry->object.vm_object) != NULL) { in _vm_object_in_map()
2679 for (; obj; obj = obj->backing_object) in _vm_object_in_map()
2694 if (!p->p_vmspace /* || (p->p_flag & (P_SYSTEM|P_WEXIT)) */) in vm_object_in_map()
2696 if (_vm_object_in_map(&p->p_vmspace->vm_map, object, 0)) { in vm_object_in_map()
2716 if ((object->flags & OBJ_ANON) != 0) { in DB_SHOW_COMMAND_FLAGS()
2717 if (object->ref_count == 0) { in DB_SHOW_COMMAND_FLAGS()
2720 (u_long)object->size); in DB_SHOW_COMMAND_FLAGS()
2726 object->ref_count, (u_long)object->size, in DB_SHOW_COMMAND_FLAGS()
2727 (u_long)object->size, in DB_SHOW_COMMAND_FLAGS()
2728 (void *)object->backing_object); in DB_SHOW_COMMAND_FLAGS()
2757 object, (int)object->type, (uintmax_t)object->size, in DB_SHOW_COMMAND()
2758 object->resident_page_count, object->ref_count, object->flags); in DB_SHOW_COMMAND()
2759 db_iprintf(" ruid %d charge %jx\n", in DB_SHOW_COMMAND()
2760 object->cred ? object->cred->cr_ruid : -1, in DB_SHOW_COMMAND()
2761 (uintmax_t)object->charge); in DB_SHOW_COMMAND()
2763 atomic_load_int(&object->shadow_count), in DB_SHOW_COMMAND()
2764 object->backing_object ? object->backing_object->ref_count : 0, in DB_SHOW_COMMAND()
2765 object->backing_object, (uintmax_t)object->backing_object_offset); in DB_SHOW_COMMAND()
2785 (uintmax_t)p->pindex, (uintmax_t)VM_PAGE_TO_PHYS(p)); in DB_SHOW_COMMAND()
2792 db_indent -= 2; in DB_SHOW_COMMAND()
2798 /* XXX need this non-static entry for calling from vm_map_print. */
2826 } else if (start_m->pindex + rcount != m->pindex || in DB_SHOW_COMMAND_FLAGS()
2830 (long)start_m->pindex, rcount, in DB_SHOW_COMMAND_FLAGS()
2841 (long)start_m->pindex, rcount, in DB_SHOW_COMMAND_FLAGS()