Lines Matching +full:end +full:- +full:of +full:- +full:charge
1 /*-
2 * SPDX-License-Identifier: (BSD-3-Clause AND MIT-CMU)
5 * The Regents of the University of California. All rights reserved.
8 * The Mach Operating System project at Carnegie-Mellon University.
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
16 * notice, this list of conditions and the following disclaimer in the
18 * 3. Neither the name of the University nor the names of its contributors
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 * Copyright (c) 1987, 1990 Carnegie-Mellon University.
42 * notice and this permission notice appear in all copies of the
46 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
47 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
48 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
50 * Carnegie Mellon requests users of this software to return to
53 * School of Computer Science
55 * Pittsburgh PA 15213-3890
123 * page of memory exists within exactly one object.
127 * region of an object should be writeable.
129 * Associated with each object is a list of all resident
137 * objects from which they were virtual-copied.
140 * modified after time of creation are:
167 "Number of sleeps for collapse");
182 KASSERT(object->ref_count == 0, in vm_object_zdtor()
183 ("object %p ref_count = %d", object, object->ref_count)); in vm_object_zdtor()
184 KASSERT(TAILQ_EMPTY(&object->memq), in vm_object_zdtor()
186 KASSERT(vm_radix_is_empty(&object->rtree), in vm_object_zdtor()
189 KASSERT(LIST_EMPTY(&object->rvq), in vm_object_zdtor()
194 ("object %p busy = %d", object, blockcount_read(&object->busy))); in vm_object_zdtor()
195 KASSERT(object->resident_page_count == 0, in vm_object_zdtor()
197 object, object->resident_page_count)); in vm_object_zdtor()
198 KASSERT(atomic_load_int(&object->shadow_count) == 0, in vm_object_zdtor()
200 object, atomic_load_int(&object->shadow_count))); in vm_object_zdtor()
201 KASSERT(object->type == OBJT_DEAD, in vm_object_zdtor()
202 ("object %p has non-dead type %d", in vm_object_zdtor()
203 object, object->type)); in vm_object_zdtor()
204 KASSERT(object->charge == 0 && object->cred == NULL, in vm_object_zdtor()
205 ("object %p has non-zero charge %ju (%p)", in vm_object_zdtor()
206 object, (uintmax_t)object->charge, object->cred)); in vm_object_zdtor()
216 rw_init_flags(&object->lock, "vmobject", RW_DUPOK | RW_NEW); in vm_object_zinit()
219 object->type = OBJT_DEAD; in vm_object_zinit()
220 vm_radix_init(&object->rtree); in vm_object_zinit()
221 refcount_init(&object->ref_count, 0); in vm_object_zinit()
222 blockcount_init(&object->paging_in_progress); in vm_object_zinit()
223 blockcount_init(&object->busy); in vm_object_zinit()
224 object->resident_page_count = 0; in vm_object_zinit()
225 atomic_store_int(&object->shadow_count, 0); in vm_object_zinit()
226 object->flags = OBJ_DEAD; in vm_object_zinit()
239 TAILQ_INIT(&object->memq); in _vm_object_allocate()
240 LIST_INIT(&object->shadow_head); in _vm_object_allocate()
242 object->type = type; in _vm_object_allocate()
243 object->flags = flags; in _vm_object_allocate()
245 pctrie_init(&object->un_pager.swp.swp_blks); in _vm_object_allocate()
246 object->un_pager.swp.writemappings = 0; in _vm_object_allocate()
252 * non-dead object. in _vm_object_allocate()
256 object->pg_color = 0; in _vm_object_allocate()
257 object->size = size; in _vm_object_allocate()
258 object->domain.dr_policy = NULL; in _vm_object_allocate()
259 object->generation = 1; in _vm_object_allocate()
260 object->cleangeneration = 1; in _vm_object_allocate()
261 refcount_init(&object->ref_count, 1); in _vm_object_allocate()
262 object->memattr = VM_MEMATTR_DEFAULT; in _vm_object_allocate()
263 object->cred = NULL; in _vm_object_allocate()
264 object->charge = 0; in _vm_object_allocate()
265 object->handle = handle; in _vm_object_allocate()
266 object->backing_object = NULL; in _vm_object_allocate()
267 object->backing_object_offset = (vm_ooffset_t) 0; in _vm_object_allocate()
269 LIST_INIT(&object->rvq); in _vm_object_allocate()
285 rw_init(&kernel_object->lock, "kernel vm object"); in vm_object_init()
286 vm_radix_init(&kernel_object->rtree); in vm_object_init()
287 _vm_object_allocate(OBJT_PHYS, atop(VM_MAX_KERNEL_ADDRESS - in vm_object_init()
290 kernel_object->flags |= OBJ_COLORED; in vm_object_init()
291 kernel_object->pg_color = (u_short)atop(VM_MIN_KERNEL_ADDRESS); in vm_object_init()
293 kernel_object->un_pager.phys.ops = &default_phys_pg_ops; in vm_object_init()
296 * The lock portion of struct vm_object must be type stable due in vm_object_init()
319 object->flags &= ~bits; in vm_object_clear_flag()
337 if (object->type == OBJT_DEAD) in vm_object_set_memattr()
339 if (!TAILQ_EMPTY(&object->memq)) in vm_object_set_memattr()
342 object->memattr = memattr; in vm_object_set_memattr()
351 blockcount_acquire(&object->paging_in_progress, i); in vm_object_pip_add()
366 blockcount_release(&object->paging_in_progress, i); in vm_object_pip_wakeupn()
371 * from sleep/wakeup races due to identity changes. The lock is not re-acquired
378 (void)blockcount_sleep(&object->paging_in_progress, &object->lock, in vm_object_pip_sleep()
388 blockcount_wait(&object->paging_in_progress, &object->lock, waitid, in vm_object_pip_wait()
398 blockcount_wait(&object->paging_in_progress, NULL, waitid, PVM); in vm_object_pip_wait_unlocked()
456 * Returns a new default object of the given size and marked as
462 struct ucred *cred, vm_size_t charge) in vm_object_allocate_anon() argument
468 else if ((backing_object->flags & OBJ_ANON) != 0) in vm_object_allocate_anon()
469 handle = backing_object->handle; in vm_object_allocate_anon()
475 object->cred = cred; in vm_object_allocate_anon()
476 object->charge = cred != NULL ? charge : 0; in vm_object_allocate_anon()
489 if (!refcount_acquire_if_gt(&object->ref_count, 0)) { in vm_object_reference_vnode()
491 old = refcount_acquire(&object->ref_count); in vm_object_reference_vnode()
492 if (object->type == OBJT_VNODE && old == 0) in vm_object_reference_vnode()
493 vref(object->handle); in vm_object_reference_vnode()
510 if (object->type == OBJT_VNODE) in vm_object_reference()
513 refcount_acquire(&object->ref_count); in vm_object_reference()
514 KASSERT((object->flags & OBJ_DEAD) == 0, in vm_object_reference()
531 old = refcount_acquire(&object->ref_count); in vm_object_reference_locked()
532 if (object->type == OBJT_VNODE && old == 0) in vm_object_reference_locked()
533 vref(object->handle); in vm_object_reference_locked()
534 KASSERT((object->flags & OBJ_DEAD) == 0, in vm_object_reference_locked()
539 * Handle deallocating an object of type OBJT_VNODE.
544 struct vnode *vp = (struct vnode *) object->handle; in vm_object_deallocate_vnode()
547 KASSERT(object->type == OBJT_VNODE, in vm_object_deallocate_vnode()
552 last = refcount_release(&object->ref_count); in vm_object_deallocate_vnode()
567 * single remaining shadow. This is a sibling of the reference we
576 object = LIST_FIRST(&backing_object->shadow_head); in vm_object_deallocate_anon()
578 atomic_load_int(&backing_object->shadow_count) == 1, in vm_object_deallocate_anon()
580 backing_object->ref_count, in vm_object_deallocate_anon()
581 atomic_load_int(&backing_object->shadow_count))); in vm_object_deallocate_anon()
582 KASSERT((object->flags & OBJ_ANON) != 0, in vm_object_deallocate_anon()
600 if ((object->flags & (OBJ_DEAD | OBJ_COLLAPSING)) != 0 || in vm_object_deallocate_anon()
601 !refcount_acquire_if_not_zero(&object->ref_count)) { in vm_object_deallocate_anon()
605 backing_object = object->backing_object; in vm_object_deallocate_anon()
606 if (backing_object != NULL && (backing_object->flags & OBJ_ANON) != 0) in vm_object_deallocate_anon()
634 * of 1 may be a special case depending on the shadow count in vm_object_deallocate()
638 if ((object->flags & OBJ_ANON) == 0) in vm_object_deallocate()
639 released = refcount_release_if_gt(&object->ref_count, 1); in vm_object_deallocate()
641 released = refcount_release_if_gt(&object->ref_count, 2); in vm_object_deallocate()
645 if (object->type == OBJT_VNODE) { in vm_object_deallocate()
647 if (object->type == OBJT_VNODE) { in vm_object_deallocate()
655 KASSERT(object->ref_count > 0, in vm_object_deallocate()
657 object->type)); in vm_object_deallocate()
663 if (!refcount_release(&object->ref_count)) { in vm_object_deallocate()
664 if (object->ref_count > 1 || in vm_object_deallocate()
665 atomic_load_int(&object->shadow_count) == 0) { in vm_object_deallocate()
666 if ((object->flags & OBJ_ANON) != 0 && in vm_object_deallocate()
667 object->ref_count == 1) in vm_object_deallocate()
684 temp = object->backing_object; in vm_object_deallocate()
686 KASSERT(object->type == OBJT_SWAP, in vm_object_deallocate()
691 KASSERT((object->flags & OBJ_DEAD) == 0, in vm_object_deallocate()
708 KASSERT(object->shadow_count >= 1, in vm_object_sub_shadow()
710 atomic_subtract_int(&object->shadow_count, 1); in vm_object_sub_shadow()
718 backing_object = object->backing_object; in vm_object_backing_remove_locked()
722 KASSERT((object->flags & OBJ_COLLAPSING) == 0, in vm_object_backing_remove_locked()
726 if ((object->flags & OBJ_SHADOWLIST) != 0) { in vm_object_backing_remove_locked()
730 object->backing_object = NULL; in vm_object_backing_remove_locked()
740 backing_object = object->backing_object; in vm_object_backing_remove()
741 if ((object->flags & OBJ_SHADOWLIST) != 0) { in vm_object_backing_remove()
746 object->backing_object = NULL; in vm_object_backing_remove()
757 atomic_add_int(&backing_object->shadow_count, 1); in vm_object_backing_insert_locked()
758 if ((backing_object->flags & OBJ_ANON) != 0) { in vm_object_backing_insert_locked()
760 LIST_INSERT_HEAD(&backing_object->shadow_head, object, in vm_object_backing_insert_locked()
764 object->backing_object = backing_object; in vm_object_backing_insert_locked()
773 if ((backing_object->flags & OBJ_ANON) != 0) { in vm_object_backing_insert()
778 object->backing_object = backing_object; in vm_object_backing_insert()
779 atomic_add_int(&backing_object->shadow_count, 1); in vm_object_backing_insert()
793 if ((backing_object->flags & OBJ_ANON) != 0) { in vm_object_backing_insert_ref()
795 KASSERT((backing_object->flags & OBJ_DEAD) == 0, in vm_object_backing_insert_ref()
803 atomic_add_int(&backing_object->shadow_count, 1); in vm_object_backing_insert_ref()
804 object->backing_object = backing_object; in vm_object_backing_insert_ref()
817 * Note that the reference to backing_object->backing_object in vm_object_backing_transfer()
821 new_backing_object = backing_object->backing_object; in vm_object_backing_transfer()
824 if ((new_backing_object->flags & OBJ_ANON) != 0) { in vm_object_backing_transfer()
835 object->backing_object = new_backing_object; in vm_object_backing_transfer()
836 backing_object->backing_object = NULL; in vm_object_backing_transfer()
849 while ((object->flags & OBJ_COLLAPSING) != 0) { in vm_object_collapse_wait()
867 backing_object = object->backing_object; in vm_object_backing_collapse_wait()
869 (backing_object->flags & OBJ_ANON) == 0) in vm_object_backing_collapse_wait()
872 if ((backing_object->flags & (OBJ_DEAD | OBJ_COLLAPSING)) == 0) in vm_object_backing_collapse_wait()
893 KASSERT(p->object == object && in vm_object_terminate_single_page()
894 (p->ref_count & VPRC_OBJREF) != 0, in vm_object_terminate_single_page()
896 p->object = NULL; in vm_object_terminate_single_page()
898 KASSERT((object->flags & OBJ_UNMANAGED) != 0 || in vm_object_terminate_single_page()
920 if (object->resident_page_count == 0) in vm_object_terminate_pages()
923 vm_radix_reclaim_callback(&object->rtree, in vm_object_terminate_pages()
925 TAILQ_INIT(&object->memq); in vm_object_terminate_pages()
926 object->resident_page_count = 0; in vm_object_terminate_pages()
927 if (object->type == OBJT_VNODE) in vm_object_terminate_pages()
928 vdrop(object->handle); in vm_object_terminate_pages()
943 KASSERT((object->flags & OBJ_DEAD) != 0, in vm_object_terminate()
944 ("terminating non-dead obj %p", object)); in vm_object_terminate()
945 KASSERT((object->flags & OBJ_COLLAPSING) == 0, in vm_object_terminate()
947 KASSERT(object->backing_object == NULL, in vm_object_terminate()
959 KASSERT(object->ref_count == 0, in vm_object_terminate()
961 object->ref_count)); in vm_object_terminate()
963 if ((object->flags & OBJ_PG_DTOR) == 0) in vm_object_terminate()
967 if (__predict_false(!LIST_EMPTY(&object->rvq))) in vm_object_terminate()
971 KASSERT(object->cred == NULL || (object->flags & OBJ_SWAP) != 0, in vm_object_terminate()
972 ("%s: non-swap obj %p has cred", __func__, object)); in vm_object_terminate()
984 * Make the page read-only so that we can clear the object flags. However, if
1000 if ((flags & OBJPC_NOSYNC) != 0 && (p->a.flags & PGA_NOSYNC) != 0) { in vm_object_page_remove_write()
1005 return (p->dirty != 0); in vm_object_page_remove_write()
1012 * Clean all dirty pages in the specified range of object. Leaves page
1024 * Odd semantics: if start == end, we clean everything.
1032 vm_object_page_clean(vm_object_t object, vm_ooffset_t start, vm_ooffset_t end, in vm_object_page_clean() argument
1042 if (!vm_object_mightbedirty(object) || object->resident_page_count == 0) in vm_object_page_clean()
1050 tend = (end == 0) ? object->size : OFF_TO_IDX(end + PAGE_MASK); in vm_object_page_clean()
1051 allclean = tstart == 0 && tend >= object->size; in vm_object_page_clean()
1055 curgeneration = object->generation; in vm_object_page_clean()
1058 pi = p->pindex; in vm_object_page_clean()
1065 if (object->generation != curgeneration && in vm_object_page_clean()
1075 if (object->type == OBJT_VNODE) { in vm_object_page_clean()
1082 if (object->generation != curgeneration && in vm_object_page_clean()
1088 * that even the first page of the run is not fully in vm_object_page_clean()
1117 if (allclean && object->type == OBJT_VNODE) in vm_object_page_clean()
1118 object->cleangeneration = curgeneration; in vm_object_page_clean()
1126 vm_page_t ma[2 * vm_pageout_page_count - 1], tp; in vm_object_page_collect_flush()
1153 ma[--base] = tp; in vm_object_page_collect_flush()
1156 vm_pageout_flush(&ma[base], count, pagerflags, nitems(ma) / 2 - base, in vm_object_page_collect_flush()
1169 * mappings to the specified range of pages must be removed before this
1190 while ((backing_object = object->backing_object) != NULL) { in vm_object_sync()
1192 offset += object->backing_object_offset; in vm_object_sync()
1195 if (object->size < OFF_TO_IDX(offset + size)) in vm_object_sync()
1196 size = IDX_TO_OFF(object->size) - offset; in vm_object_sync()
1206 * allow it to block internally on a page-by-page in vm_object_sync()
1210 if (object->type == OBJT_VNODE && in vm_object_sync()
1212 ((vp = object->handle)->v_vflag & VV_NOSYNC) == 0) { in vm_object_sync()
1217 atop(size) == object->size) { in vm_object_sync()
1219 * If syncing the whole mapping of the file, in vm_object_sync()
1258 if ((object->type == OBJT_VNODE || in vm_object_sync()
1259 object->type == OBJT_DEVICE) && invalidate) { in vm_object_sync()
1260 if (object->type == OBJT_DEVICE) in vm_object_sync()
1288 if ((object->flags & OBJ_UNMANAGED) != 0) in vm_object_advice_applies()
1292 return ((object->flags & (OBJ_ONEMAPPING | OBJ_ANON)) == in vm_object_advice_applies()
1326 vm_object_madvise(vm_object_t object, vm_pindex_t pindex, vm_pindex_t end, in vm_object_madvise() argument
1342 for (m = vm_page_find_least(object, pindex); pindex < end; pindex++) { in vm_object_madvise()
1346 * If the next page isn't resident in the top-level object, we in vm_object_madvise()
1349 * non-resident pages. in vm_object_madvise()
1351 if (m == NULL || pindex < m->pindex) { in vm_object_madvise()
1353 * Optimize a common case: if the top-level object has in vm_object_madvise()
1354 * no backing object, we can skip over the non-resident in vm_object_madvise()
1357 if (object->backing_object == NULL) { in vm_object_madvise()
1358 tpindex = (m != NULL && m->pindex < end) ? in vm_object_madvise()
1359 m->pindex : end; in vm_object_madvise()
1361 pindex, tpindex - pindex); in vm_object_madvise()
1362 if ((pindex = tpindex) == end) in vm_object_madvise()
1375 backing_object = tobject->backing_object; in vm_object_madvise()
1380 OFF_TO_IDX(tobject->backing_object_offset); in vm_object_madvise()
1400 KASSERT((tm->flags & PG_FICTITIOUS) == 0, in vm_object_madvise()
1402 KASSERT((tm->oflags & VPO_UNMANAGED) == 0, in vm_object_madvise()
1421 vm_object_madvise_freespace(tobject, advice, tm->pindex, 1); in vm_object_madvise()
1453 * harmless and we will end up with an extra shadow object that in vm_object_shadow()
1456 if (source != NULL && source->ref_count == 1 && in vm_object_shadow()
1457 (source->flags & OBJ_ANON) != 0) in vm_object_shadow()
1469 result->backing_object_offset = *offset; in vm_object_shadow()
1478 * the source object. Net result: no change of in vm_object_shadow()
1494 result->domain = source->domain; in vm_object_shadow()
1497 (source->flags & OBJ_COLORED)); in vm_object_shadow()
1498 result->pg_color = (source->pg_color + in vm_object_shadow()
1499 OFF_TO_IDX(*offset)) & ((1 << (VM_NFREEORDER - in vm_object_shadow()
1500 1)) - 1); in vm_object_shadow()
1517 * easier removal of unused pages, and keeps object inheritance from
1529 orig_object = entry->object.vm_object; in vm_object_split()
1530 KASSERT((orig_object->flags & OBJ_ONEMAPPING) != 0, in vm_object_split()
1532 if ((orig_object->flags & OBJ_ANON) == 0) in vm_object_split()
1534 if (orig_object->ref_count <= 1) in vm_object_split()
1538 offidxstart = OFF_TO_IDX(entry->offset); in vm_object_split()
1539 size = atop(entry->end - entry->start); in vm_object_split()
1542 orig_object->cred, ptoa(size)); in vm_object_split()
1545 * We must wait for the orig_object to complete any in-progress in vm_object_split()
1558 new_object->domain = orig_object->domain; in vm_object_split()
1559 backing_object = orig_object->backing_object; in vm_object_split()
1562 new_object->backing_object_offset = in vm_object_split()
1563 orig_object->backing_object_offset + entry->offset; in vm_object_split()
1565 if (orig_object->cred != NULL) { in vm_object_split()
1566 crhold(orig_object->cred); in vm_object_split()
1567 KASSERT(orig_object->charge >= ptoa(size), in vm_object_split()
1568 ("orig_object->charge < 0")); in vm_object_split()
1569 orig_object->charge -= ptoa(size); in vm_object_split()
1608 if (!vm_page_iter_rename(&pages, m, new_object, m->pindex - in vm_object_split()
1621 * If some of the reservation's allocated pages remain with in vm_object_split()
1625 * with the original object. If, however, all of the in vm_object_split()
1628 * beneficial. Determining which of these two cases applies in vm_object_split()
1642 TAILQ_FOREACH(m, &new_object->memq, listq) in vm_object_split()
1648 entry->object.vm_object = new_object; in vm_object_split()
1649 entry->offset = 0LL; in vm_object_split()
1661 backing_object = object->backing_object; in vm_object_collapse_scan_wait()
1664 KASSERT(p == NULL || p->object == object || p->object == backing_object, in vm_object_collapse_scan_wait()
1672 } else if (p->object == object) { in vm_object_collapse_scan_wait()
1696 VM_OBJECT_ASSERT_WLOCKED(object->backing_object); in vm_object_collapse_scan()
1698 backing_object = object->backing_object; in vm_object_collapse_scan()
1699 backing_offset_index = OFF_TO_IDX(object->backing_object_offset); in vm_object_collapse_scan()
1707 new_pindex = p->pindex - backing_offset_index; in vm_object_collapse_scan()
1717 KASSERT(object->backing_object == backing_object, in vm_object_collapse_scan()
1719 object->backing_object, backing_object)); in vm_object_collapse_scan()
1720 KASSERT(p->object == backing_object, in vm_object_collapse_scan()
1722 p->object, backing_object)); in vm_object_collapse_scan()
1724 if (p->pindex < backing_offset_index || in vm_object_collapse_scan()
1725 new_pindex >= object->size) { in vm_object_collapse_scan()
1726 vm_pager_freespace(backing_object, p->pindex, 1); in vm_object_collapse_scan()
1777 vm_pager_freespace(backing_object, p->pindex, 1); in vm_object_collapse_scan()
1835 KASSERT((object->flags & (OBJ_DEAD | OBJ_ANON)) == OBJ_ANON, in vm_object_collapse()
1847 KASSERT(object->ref_count > 0 && in vm_object_collapse()
1848 object->ref_count > atomic_load_int(&object->shadow_count), in vm_object_collapse()
1850 object->ref_count, atomic_load_int(&object->shadow_count))); in vm_object_collapse()
1851 KASSERT((backing_object->flags & in vm_object_collapse()
1854 KASSERT((object->flags & (OBJ_COLLAPSING | OBJ_DEAD)) == 0, in vm_object_collapse()
1863 if (backing_object->ref_count == 1) { in vm_object_collapse()
1864 KASSERT(atomic_load_int(&backing_object->shadow_count) in vm_object_collapse()
1867 atomic_load_int(&backing_object->shadow_count))); in vm_object_collapse()
1887 OFF_TO_IDX(object->backing_object_offset), TRUE); in vm_object_collapse()
1894 object->backing_object_offset += in vm_object_collapse()
1895 backing_object->backing_object_offset; in vm_object_collapse()
1904 * necessary is to dispose of it. in vm_object_collapse()
1906 KASSERT(backing_object->ref_count == 1, ( in vm_object_collapse()
1907 "backing_object %p was somehow re-referenced during collapse!", in vm_object_collapse()
1910 (void)refcount_release(&backing_object->ref_count); in vm_object_collapse()
1934 new_backing_object = backing_object->backing_object; in vm_object_collapse()
1938 object->backing_object_offset += in vm_object_collapse()
1939 backing_object->backing_object_offset; in vm_object_collapse()
1946 (void)refcount_release(&backing_object->ref_count); in vm_object_collapse()
1947 KASSERT(backing_object->ref_count >= 1, ( in vm_object_collapse()
1963 * For the given object, either frees or invalidates each of the
1965 * wired for any reason other than the existence of a managed, wired
1967 * Pages are specified by the given range ["start", "end") and the option
1968 * OBJPR_CLEANONLY. As a special case, if "end" is zero, then the range
1969 * extends from "start" to the end of the object. If the option
1970 * OBJPR_CLEANONLY is specified, then only the non-dirty pages within the
1980 * Second, it is used by msync(..., MS_INVALIDATE) to invalidate device-
1981 * backed pages. In both of these cases, the option OBJPR_CLEANONLY must
1987 vm_object_page_remove(vm_object_t object, vm_pindex_t start, vm_pindex_t end, in vm_object_page_remove() argument
1994 KASSERT((object->flags & OBJ_UNMANAGED) == 0 || in vm_object_page_remove()
1997 if (object->resident_page_count == 0) in vm_object_page_remove()
2000 vm_page_iter_limit_init(&pages, object, end); in vm_object_page_remove()
2011 * valid using only the busy lock, so the result of this check in vm_object_page_remove()
2013 * for instance by ensuring that all invalid->valid transitions in vm_object_page_remove()
2022 * of managed, wired mappings, then it cannot be freed. For in vm_object_page_remove()
2040 object->ref_count != 0) in vm_object_page_remove()
2049 KASSERT((p->flags & PG_FICTITIOUS) == 0, in vm_object_page_remove()
2054 object->ref_count != 0 && in vm_object_page_remove()
2057 if (p->dirty != 0) { in vm_object_page_remove()
2063 object->ref_count != 0 && !vm_page_try_remove_all(p)) in vm_object_page_remove()
2069 vm_pager_freespace(object, start, (end == 0 ? object->size : end) - in vm_object_page_remove()
2077 * the head of the inactive queue. This bypasses regular LRU
2080 * be queued. Pages are specified by the range ["start", "end").
2081 * As a special case, if "end" is zero, then the range extends from
2082 * "start" to the end of the object.
2085 * contain non-fictitious, managed pages.
2090 vm_object_page_noreuse(vm_object_t object, vm_pindex_t start, vm_pindex_t end) in vm_object_page_noreuse() argument
2095 KASSERT((object->flags & (OBJ_FICTITIOUS | OBJ_UNMANAGED)) == 0, in vm_object_page_noreuse()
2097 if (object->resident_page_count == 0) in vm_object_page_noreuse()
2105 for (; p != NULL && (p->pindex < end || end == 0); p = next) { in vm_object_page_noreuse()
2112 * Populate the specified range of the object with valid pages. Returns
2115 * Note: This function should be optimized to pass a larger array of
2116 * pages to vm_pager_get_pages() before it is applied to a non-
2122 vm_object_populate(vm_object_t object, vm_pindex_t start, vm_pindex_t end) in vm_object_populate() argument
2129 for (pindex = start; pindex < end; pindex++) { in vm_object_populate()
2141 while (m != NULL && m->pindex < pindex) { in vm_object_populate()
2146 return (pindex == end); in vm_object_populate()
2152 * regions of memory into a single object.
2156 * NOTE: Only works at the moment if the second object is NULL -
2162 * prev_size Size of reference to prev_object
2163 * next_size Size of reference to the second object
2178 if ((prev_object->flags & OBJ_ANON) == 0) in vm_object_coalesce()
2189 * another object . has a copy elsewhere (any of which mean that the in vm_object_coalesce()
2192 if (prev_object->backing_object != NULL) { in vm_object_coalesce()
2201 if (prev_object->ref_count > 1 && in vm_object_coalesce()
2202 prev_object->size != next_pindex && in vm_object_coalesce()
2203 (prev_object->flags & OBJ_ONEMAPPING) == 0) { in vm_object_coalesce()
2209 * Account for the charge. in vm_object_coalesce()
2211 if (prev_object->cred != NULL) { in vm_object_coalesce()
2215 * later. Non-NULL cred in the object would prevent in vm_object_coalesce()
2216 * swap reservation during enabling of the write in vm_object_coalesce()
2218 * cause allocation of the separate object for the map in vm_object_coalesce()
2223 prev_object->cred)) { in vm_object_coalesce()
2227 prev_object->charge += ptoa(next_size); in vm_object_coalesce()
2234 if (next_pindex < prev_object->size) { in vm_object_coalesce()
2238 if (prev_object->cred != NULL) { in vm_object_coalesce()
2239 KASSERT(prev_object->charge >= in vm_object_coalesce()
2240 ptoa(prev_object->size - next_pindex), in vm_object_coalesce()
2243 prev_object->charge -= ptoa(prev_object->size - in vm_object_coalesce()
2252 if (next_pindex + next_size > prev_object->size) in vm_object_coalesce()
2253 prev_object->size = next_pindex + next_size; in vm_object_coalesce()
2262 atomic_add_int(&object->generation, 1); in vm_object_set_writeable_dirty_()
2268 return (object->generation != object->cleangeneration); in vm_object_mightbedirty_()
2274 * For each page offset within the specified range of the given object,
2275 * find the highest-level page in the shadow chain and unwire it. A page
2276 * must exist at every page offset, and the highest-level page must be
2291 ("vm_object_unwire: length is not a multiple of PAGE_SIZE")); in vm_object_unwire()
2292 /* The wired count of a fictitious page never changes. */ in vm_object_unwire()
2293 if ((object->flags & OBJ_FICTITIOUS) != 0) in vm_object_unwire()
2302 if (m == NULL || pindex < m->pindex) { in vm_object_unwire()
2313 OFF_TO_IDX(tobject->backing_object_offset); in vm_object_unwire()
2314 tobject = tobject->backing_object; in vm_object_unwire()
2317 if ((tobject->flags & OBJ_FICTITIOUS) != 0) in vm_object_unwire()
2332 locked_depth--) { in vm_object_unwire()
2333 t1object = tobject->backing_object; in vm_object_unwire()
2334 if (tm->object != tobject) in vm_object_unwire()
2338 tobject = tm->object; in vm_object_unwire()
2350 for (tobject = object; locked_depth >= 1; locked_depth--) { in vm_object_unwire()
2351 t1object = tobject->backing_object; in vm_object_unwire()
2360 * no vnode allocated at the time of the call.
2383 blockcount_acquire(&obj->busy, 1); in vm_object_busy()
2384 /* The fence is required to order loads of page busy. */ in vm_object_busy()
2392 blockcount_release(&obj->busy, 1); in vm_object_unbusy()
2401 (void)blockcount_sleep(&obj->busy, NULL, wmesg, PVM); in vm_object_busy_wait()
2416 return (obj->ref_count > atomic_load_int(&obj->shadow_count)); in vm_object_is_active()
2436 if (req->oldptr == NULL) { in vm_object_list_handler()
2439 * estimate of the space needed for a subsequent call. in vm_object_list_handler()
2444 if (obj->type == OBJT_DEAD) in vm_object_list_handler()
2453 want_path = !(swap_only || jailed(curthread->td_ucred)); in vm_object_list_handler()
2459 * list once added. This allows us to safely read obj->object_list in vm_object_list_handler()
2464 if (obj->type == OBJT_DEAD || in vm_object_list_handler()
2465 (swap_only && (obj->flags & (OBJ_ANON | OBJ_SWAP)) == 0)) in vm_object_list_handler()
2468 if (obj->type == OBJT_DEAD || in vm_object_list_handler()
2469 (swap_only && (obj->flags & (OBJ_ANON | OBJ_SWAP)) == 0)) { in vm_object_list_handler()
2474 kvo->kvo_size = ptoa(obj->size); in vm_object_list_handler()
2475 kvo->kvo_resident = obj->resident_page_count; in vm_object_list_handler()
2476 kvo->kvo_ref_count = obj->ref_count; in vm_object_list_handler()
2477 kvo->kvo_shadow_count = atomic_load_int(&obj->shadow_count); in vm_object_list_handler()
2478 kvo->kvo_memattr = obj->memattr; in vm_object_list_handler()
2479 kvo->kvo_active = 0; in vm_object_list_handler()
2480 kvo->kvo_inactive = 0; in vm_object_list_handler()
2481 kvo->kvo_flags = 0; in vm_object_list_handler()
2483 TAILQ_FOREACH(m, &obj->memq, listq) { in vm_object_list_handler()
2488 * reads of m->queue below racy, and we do not in vm_object_list_handler()
2491 * approximation of the system anyway. in vm_object_list_handler()
2494 kvo->kvo_active++; in vm_object_list_handler()
2496 kvo->kvo_inactive++; in vm_object_list_handler()
2498 kvo->kvo_laundry++; in vm_object_list_handler()
2502 kvo->kvo_vn_fileid = 0; in vm_object_list_handler()
2503 kvo->kvo_vn_fsid = 0; in vm_object_list_handler()
2504 kvo->kvo_vn_fsid_freebsd11 = 0; in vm_object_list_handler()
2508 kvo->kvo_type = vm_object_kvme_type(obj, want_path ? &vp : in vm_object_list_handler()
2512 } else if ((obj->flags & OBJ_ANON) != 0) { in vm_object_list_handler()
2513 MPASS(kvo->kvo_type == KVME_TYPE_SWAP); in vm_object_list_handler()
2514 kvo->kvo_me = (uintptr_t)obj; in vm_object_list_handler()
2516 kvo->kvo_backing_obj = (uintptr_t)obj->backing_object; in vm_object_list_handler()
2518 kvo->kvo_swapped = sp > UINT32_MAX ? UINT32_MAX : sp; in vm_object_list_handler()
2520 if ((obj->type == OBJT_DEVICE || obj->type == OBJT_MGTDEVICE) && in vm_object_list_handler()
2521 (obj->flags & OBJ_CDEVH) != 0) { in vm_object_list_handler()
2522 cdev = obj->un_pager.devp.handle; in vm_object_list_handler()
2526 strlcpy(kvo->kvo_path, cdev->si_name, in vm_object_list_handler()
2527 sizeof(kvo->kvo_path)); in vm_object_list_handler()
2533 if ((obj->flags & OBJ_SYSVSHM) != 0) { in vm_object_list_handler()
2534 kvo->kvo_flags |= KVMO_FLAG_SYSVSHM; in vm_object_list_handler()
2536 kvo->kvo_vn_fileid = key; in vm_object_list_handler()
2537 kvo->kvo_vn_fsid_freebsd11 = seq; in vm_object_list_handler()
2539 if ((obj->flags & OBJ_POSIXSHM) != 0) { in vm_object_list_handler()
2540 kvo->kvo_flags |= KVMO_FLAG_POSIXSHM; in vm_object_list_handler()
2541 shm_get_path(obj, kvo->kvo_path, in vm_object_list_handler()
2542 sizeof(kvo->kvo_path)); in vm_object_list_handler()
2547 if (VOP_GETATTR(vp, &va, curthread->td_ucred) == 0) { in vm_object_list_handler()
2548 kvo->kvo_vn_fileid = va.va_fileid; in vm_object_list_handler()
2549 kvo->kvo_vn_fsid = va.va_fsid; in vm_object_list_handler()
2550 kvo->kvo_vn_fsid_freebsd11 = va.va_fsid; in vm_object_list_handler()
2554 strlcpy(kvo->kvo_path, fullpath, sizeof(kvo->kvo_path)); in vm_object_list_handler()
2559 kvo->kvo_structsize = offsetof(struct kinfo_vmobject, kvo_path) in vm_object_list_handler()
2560 + strlen(kvo->kvo_path) + 1; in vm_object_list_handler()
2561 kvo->kvo_structsize = roundup(kvo->kvo_structsize, in vm_object_list_handler()
2563 error = SYSCTL_OUT(req, kvo, kvo->kvo_structsize); in vm_object_list_handler()
2582 "List of VM objects");
2591 * This sysctl returns list of the anonymous or swap objects. Intent
2593 * Since technically non-swap (default) objects participate in the
2600 "List of swap VM objects");
2626 } else if (entry->eflags & MAP_ENTRY_IS_SUB_MAP) { in _vm_object_in_map()
2627 tmpm = entry->object.sub_map; in _vm_object_in_map()
2633 } else if ((obj = entry->object.vm_object) != NULL) { in _vm_object_in_map()
2634 for (; obj; obj = obj->backing_object) in _vm_object_in_map()
2649 if (!p->p_vmspace /* || (p->p_flag & (P_SYSTEM|P_WEXIT)) */) in vm_object_in_map()
2651 if (_vm_object_in_map(&p->p_vmspace->vm_map, object, 0)) { in vm_object_in_map()
2671 if ((object->flags & OBJ_ANON) != 0) { in DB_SHOW_COMMAND_FLAGS()
2672 if (object->ref_count == 0) { in DB_SHOW_COMMAND_FLAGS()
2674 (long)object->size); in DB_SHOW_COMMAND_FLAGS()
2680 object->ref_count, (u_long)object->size, in DB_SHOW_COMMAND_FLAGS()
2681 (u_long)object->size, in DB_SHOW_COMMAND_FLAGS()
2682 (void *)object->backing_object); in DB_SHOW_COMMAND_FLAGS()
2710 "Object %p: type=%d, size=0x%jx, res=%d, ref=%d, flags=0x%x ruid %d charge %jx\n", in DB_SHOW_COMMAND()
2711 object, (int)object->type, (uintmax_t)object->size, in DB_SHOW_COMMAND()
2712 object->resident_page_count, object->ref_count, object->flags, in DB_SHOW_COMMAND()
2713 object->cred ? object->cred->cr_ruid : -1, (uintmax_t)object->charge); in DB_SHOW_COMMAND()
2715 atomic_load_int(&object->shadow_count), in DB_SHOW_COMMAND()
2716 object->backing_object ? object->backing_object->ref_count : 0, in DB_SHOW_COMMAND()
2717 object->backing_object, (uintmax_t)object->backing_object_offset); in DB_SHOW_COMMAND()
2724 TAILQ_FOREACH(p, &object->memq, listq) { in DB_SHOW_COMMAND()
2736 (uintmax_t)p->pindex, (uintmax_t)VM_PAGE_TO_PHYS(p)); in DB_SHOW_COMMAND()
2743 db_indent -= 2; in DB_SHOW_COMMAND()
2749 /* XXX need this non-static entry for calling from vm_map_print. */
2775 pa = -1; in DB_SHOW_COMMAND_FLAGS()
2776 TAILQ_FOREACH(m, &object->memq, listq) { in DB_SHOW_COMMAND_FLAGS()
2778 prev_m->pindex + 1 != m->pindex) { in DB_SHOW_COMMAND_FLAGS()
2798 fidx = m->pindex; in DB_SHOW_COMMAND_FLAGS()