Lines Matching full:object
62 * Virtual memory object module.
113 static int vm_object_page_collect_flush(vm_object_t object, vm_page_t p,
118 static void vm_object_backing_remove(vm_object_t object);
123 * page of memory exists within exactly one object.
125 * An object is only deallocated when all "references"
127 * region of an object should be writeable.
129 * Associated with each object is a list of all resident
130 * memory pages belonging to that object; this list is
131 * maintained by the "vm_page" module, and locked by the object's
134 * Each object also records a "pager" routine which is
139 * The only items within the object structure which are
141 * reference count locked by object's lock
142 * pager routine locked by object's lock
147 struct mtx vm_object_list_mtx; /* lock for object list and count */
151 static SYSCTL_NODE(_vm_stats, OID_AUTO, object, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
152 "VM object stats");
157 "VM object collapses");
162 "VM object bypasses");
179 vm_object_t object; in vm_object_zdtor() local
181 object = (vm_object_t)mem; in vm_object_zdtor()
182 KASSERT(object->ref_count == 0, in vm_object_zdtor()
183 ("object %p ref_count = %d", object, object->ref_count)); in vm_object_zdtor()
184 KASSERT(TAILQ_EMPTY(&object->memq), in vm_object_zdtor()
185 ("object %p has resident pages in its memq", object)); in vm_object_zdtor()
186 KASSERT(vm_radix_is_empty(&object->rtree), in vm_object_zdtor()
187 ("object %p has resident pages in its trie", object)); in vm_object_zdtor()
189 KASSERT(LIST_EMPTY(&object->rvq), in vm_object_zdtor()
190 ("object %p has reservations", in vm_object_zdtor()
191 object)); in vm_object_zdtor()
193 KASSERT(!vm_object_busied(object), in vm_object_zdtor()
194 ("object %p busy = %d", object, blockcount_read(&object->busy))); in vm_object_zdtor()
195 KASSERT(object->resident_page_count == 0, in vm_object_zdtor()
196 ("object %p resident_page_count = %d", in vm_object_zdtor()
197 object, object->resident_page_count)); in vm_object_zdtor()
198 KASSERT(atomic_load_int(&object->shadow_count) == 0, in vm_object_zdtor()
199 ("object %p shadow_count = %d", in vm_object_zdtor()
200 object, atomic_load_int(&object->shadow_count))); in vm_object_zdtor()
201 KASSERT(object->type == OBJT_DEAD, in vm_object_zdtor()
202 ("object %p has non-dead type %d", in vm_object_zdtor()
203 object, object->type)); in vm_object_zdtor()
204 KASSERT(object->charge == 0 && object->cred == NULL, in vm_object_zdtor()
205 ("object %p has non-zero charge %ju (%p)", in vm_object_zdtor()
206 object, (uintmax_t)object->charge, object->cred)); in vm_object_zdtor()
213 vm_object_t object; in vm_object_zinit() local
215 object = (vm_object_t)mem; in vm_object_zinit()
216 rw_init_flags(&object->lock, "vmobject", RW_DUPOK | RW_NEW); in vm_object_zinit()
218 /* These are true for any object that has been freed */ in vm_object_zinit()
219 object->type = OBJT_DEAD; in vm_object_zinit()
220 vm_radix_init(&object->rtree); in vm_object_zinit()
221 refcount_init(&object->ref_count, 0); in vm_object_zinit()
222 blockcount_init(&object->paging_in_progress); in vm_object_zinit()
223 blockcount_init(&object->busy); in vm_object_zinit()
224 object->resident_page_count = 0; in vm_object_zinit()
225 atomic_store_int(&object->shadow_count, 0); in vm_object_zinit()
226 object->flags = OBJ_DEAD; in vm_object_zinit()
229 TAILQ_INSERT_TAIL(&vm_object_list, object, object_list); in vm_object_zinit()
236 vm_object_t object, void *handle) in _vm_object_allocate() argument
239 TAILQ_INIT(&object->memq); in _vm_object_allocate()
240 LIST_INIT(&object->shadow_head); in _vm_object_allocate()
242 object->type = type; in _vm_object_allocate()
243 object->flags = flags; in _vm_object_allocate()
245 pctrie_init(&object->un_pager.swp.swp_blks); in _vm_object_allocate()
246 object->un_pager.swp.writemappings = 0; in _vm_object_allocate()
252 * non-dead object. in _vm_object_allocate()
256 object->pg_color = 0; in _vm_object_allocate()
257 object->size = size; in _vm_object_allocate()
258 object->domain.dr_policy = NULL; in _vm_object_allocate()
259 object->generation = 1; in _vm_object_allocate()
260 object->cleangeneration = 1; in _vm_object_allocate()
261 refcount_init(&object->ref_count, 1); in _vm_object_allocate()
262 object->memattr = VM_MEMATTR_DEFAULT; in _vm_object_allocate()
263 object->cred = NULL; in _vm_object_allocate()
264 object->charge = 0; in _vm_object_allocate()
265 object->handle = handle; in _vm_object_allocate()
266 object->backing_object = NULL; in _vm_object_allocate()
267 object->backing_object_offset = (vm_ooffset_t) 0; in _vm_object_allocate()
269 LIST_INIT(&object->rvq); in _vm_object_allocate()
271 umtx_shm_object_init(object); in _vm_object_allocate()
285 rw_init(&kernel_object->lock, "kernel vm object"); in vm_object_init()
297 * to vm_pageout_fallback_object_lock locking a vm object in vm_object_init()
303 obj_zone = uma_zcreate("VM OBJECT", sizeof (struct vm_object), NULL, in vm_object_init()
315 vm_object_clear_flag(vm_object_t object, u_short bits) in vm_object_clear_flag() argument
318 VM_OBJECT_ASSERT_WLOCKED(object); in vm_object_clear_flag()
319 object->flags &= ~bits; in vm_object_clear_flag()
323 * Sets the default memory attribute for the specified object. Pages
324 * that are allocated to this object are by default assigned this memory
328 * to the object. In the future, this requirement may be relaxed for
332 vm_object_set_memattr(vm_object_t object, vm_memattr_t memattr) in vm_object_set_memattr() argument
335 VM_OBJECT_ASSERT_WLOCKED(object); in vm_object_set_memattr()
337 if (object->type == OBJT_DEAD) in vm_object_set_memattr()
339 if (!TAILQ_EMPTY(&object->memq)) in vm_object_set_memattr()
342 object->memattr = memattr; in vm_object_set_memattr()
347 vm_object_pip_add(vm_object_t object, short i) in vm_object_pip_add() argument
351 blockcount_acquire(&object->paging_in_progress, i); in vm_object_pip_add()
355 vm_object_pip_wakeup(vm_object_t object) in vm_object_pip_wakeup() argument
358 vm_object_pip_wakeupn(object, 1); in vm_object_pip_wakeup()
362 vm_object_pip_wakeupn(vm_object_t object, short i) in vm_object_pip_wakeupn() argument
366 blockcount_release(&object->paging_in_progress, i); in vm_object_pip_wakeupn()
370 * Atomically drop the object lock and wait for pip to drain. This protects
375 vm_object_pip_sleep(vm_object_t object, const char *waitid) in vm_object_pip_sleep() argument
378 (void)blockcount_sleep(&object->paging_in_progress, &object->lock, in vm_object_pip_sleep()
383 vm_object_pip_wait(vm_object_t object, const char *waitid) in vm_object_pip_wait() argument
386 VM_OBJECT_ASSERT_WLOCKED(object); in vm_object_pip_wait()
388 blockcount_wait(&object->paging_in_progress, &object->lock, waitid, in vm_object_pip_wait()
393 vm_object_pip_wait_unlocked(vm_object_t object, const char *waitid) in vm_object_pip_wait_unlocked() argument
396 VM_OBJECT_ASSERT_UNLOCKED(object); in vm_object_pip_wait_unlocked()
398 blockcount_wait(&object->paging_in_progress, NULL, waitid, PVM); in vm_object_pip_wait_unlocked()
404 * Returns a new object with the given size.
409 vm_object_t object; in vm_object_allocate() local
435 object = (vm_object_t)uma_zalloc(obj_zone, M_WAITOK); in vm_object_allocate()
436 _vm_object_allocate(type, size, flags, object, NULL); in vm_object_allocate()
438 return (object); in vm_object_allocate()
444 vm_object_t object; in vm_object_allocate_dyn() local
447 object = (vm_object_t)uma_zalloc(obj_zone, M_WAITOK); in vm_object_allocate_dyn()
448 _vm_object_allocate(dyntype, size, flags, object, NULL); in vm_object_allocate_dyn()
450 return (object); in vm_object_allocate_dyn()
456 * Returns a new default object of the given size and marked as
464 vm_object_t handle, object; in vm_object_allocate_anon() local
472 object = uma_zalloc(obj_zone, M_WAITOK); in vm_object_allocate_anon()
474 OBJ_ANON | OBJ_ONEMAPPING | OBJ_SWAP, object, handle); in vm_object_allocate_anon()
475 object->cred = cred; in vm_object_allocate_anon()
476 object->charge = cred != NULL ? charge : 0; in vm_object_allocate_anon()
477 return (object); in vm_object_allocate_anon()
481 vm_object_reference_vnode(vm_object_t object) in vm_object_reference_vnode() argument
489 if (!refcount_acquire_if_gt(&object->ref_count, 0)) { in vm_object_reference_vnode()
490 VM_OBJECT_RLOCK(object); in vm_object_reference_vnode()
491 old = refcount_acquire(&object->ref_count); in vm_object_reference_vnode()
492 if (object->type == OBJT_VNODE && old == 0) in vm_object_reference_vnode()
493 vref(object->handle); in vm_object_reference_vnode()
494 VM_OBJECT_RUNLOCK(object); in vm_object_reference_vnode()
501 * Acquires a reference to the given object.
504 vm_object_reference(vm_object_t object) in vm_object_reference() argument
507 if (object == NULL) in vm_object_reference()
510 if (object->type == OBJT_VNODE) in vm_object_reference()
511 vm_object_reference_vnode(object); in vm_object_reference()
513 refcount_acquire(&object->ref_count); in vm_object_reference()
514 KASSERT((object->flags & OBJ_DEAD) == 0, in vm_object_reference()
515 ("vm_object_reference: Referenced dead object.")); in vm_object_reference()
521 * Gets another reference to the given object.
523 * The object must be locked.
526 vm_object_reference_locked(vm_object_t object) in vm_object_reference_locked() argument
530 VM_OBJECT_ASSERT_LOCKED(object); in vm_object_reference_locked()
531 old = refcount_acquire(&object->ref_count); in vm_object_reference_locked()
532 if (object->type == OBJT_VNODE && old == 0) in vm_object_reference_locked()
533 vref(object->handle); in vm_object_reference_locked()
534 KASSERT((object->flags & OBJ_DEAD) == 0, in vm_object_reference_locked()
535 ("vm_object_reference: Referenced dead object.")); in vm_object_reference_locked()
539 * Handle deallocating an object of type OBJT_VNODE.
542 vm_object_deallocate_vnode(vm_object_t object) in vm_object_deallocate_vnode() argument
544 struct vnode *vp = (struct vnode *) object->handle; in vm_object_deallocate_vnode()
547 KASSERT(object->type == OBJT_VNODE, in vm_object_deallocate_vnode()
548 ("vm_object_deallocate_vnode: not a vnode object")); in vm_object_deallocate_vnode()
551 /* Object lock to protect handle lookup. */ in vm_object_deallocate_vnode()
552 last = refcount_release(&object->ref_count); in vm_object_deallocate_vnode()
553 VM_OBJECT_RUNLOCK(object); in vm_object_deallocate_vnode()
559 umtx_shm_object_terminated(object); in vm_object_deallocate_vnode()
566 * We dropped a reference on an object and discovered that it had a
568 * dropped. Attempt to collapse the sibling and backing object.
573 vm_object_t object; in vm_object_deallocate_anon() local
576 object = LIST_FIRST(&backing_object->shadow_head); in vm_object_deallocate_anon()
577 KASSERT(object != NULL && in vm_object_deallocate_anon()
582 KASSERT((object->flags & OBJ_ANON) != 0, in vm_object_deallocate_anon()
583 ("invalid shadow object %p", object)); in vm_object_deallocate_anon()
585 if (!VM_OBJECT_TRYWLOCK(object)) { in vm_object_deallocate_anon()
587 * Prevent object from disappearing since we do not have a in vm_object_deallocate_anon()
590 vm_object_pip_add(object, 1); in vm_object_deallocate_anon()
592 VM_OBJECT_WLOCK(object); in vm_object_deallocate_anon()
593 vm_object_pip_wakeup(object); in vm_object_deallocate_anon()
600 if ((object->flags & (OBJ_DEAD | OBJ_COLLAPSING)) != 0 || in vm_object_deallocate_anon()
601 !refcount_acquire_if_not_zero(&object->ref_count)) { in vm_object_deallocate_anon()
602 VM_OBJECT_WUNLOCK(object); in vm_object_deallocate_anon()
605 backing_object = object->backing_object; in vm_object_deallocate_anon()
607 vm_object_collapse(object); in vm_object_deallocate_anon()
608 VM_OBJECT_WUNLOCK(object); in vm_object_deallocate_anon()
610 return (object); in vm_object_deallocate_anon()
616 * Release a reference to the specified object,
619 * are gone, storage associated with this object
622 * No object may be locked.
625 vm_object_deallocate(vm_object_t object) in vm_object_deallocate() argument
630 while (object != NULL) { in vm_object_deallocate()
633 * vm_object_terminate() on the object chain. A ref count in vm_object_deallocate()
636 * object. in vm_object_deallocate()
638 if ((object->flags & OBJ_ANON) == 0) in vm_object_deallocate()
639 released = refcount_release_if_gt(&object->ref_count, 1); in vm_object_deallocate()
641 released = refcount_release_if_gt(&object->ref_count, 2); in vm_object_deallocate()
645 if (object->type == OBJT_VNODE) { in vm_object_deallocate()
646 VM_OBJECT_RLOCK(object); in vm_object_deallocate()
647 if (object->type == OBJT_VNODE) { in vm_object_deallocate()
648 vm_object_deallocate_vnode(object); in vm_object_deallocate()
651 VM_OBJECT_RUNLOCK(object); in vm_object_deallocate()
654 VM_OBJECT_WLOCK(object); in vm_object_deallocate()
655 KASSERT(object->ref_count > 0, in vm_object_deallocate()
656 ("vm_object_deallocate: object deallocated too many times: %d", in vm_object_deallocate()
657 object->type)); in vm_object_deallocate()
661 * object we may need to collapse the shadow chain. in vm_object_deallocate()
663 if (!refcount_release(&object->ref_count)) { in vm_object_deallocate()
664 if (object->ref_count > 1 || in vm_object_deallocate()
665 atomic_load_int(&object->shadow_count) == 0) { in vm_object_deallocate()
666 if ((object->flags & OBJ_ANON) != 0 && in vm_object_deallocate()
667 object->ref_count == 1) in vm_object_deallocate()
668 vm_object_set_flag(object, in vm_object_deallocate()
670 VM_OBJECT_WUNLOCK(object); in vm_object_deallocate()
675 object = vm_object_deallocate_anon(object); in vm_object_deallocate()
680 * Handle the final reference to an object. We restart in vm_object_deallocate()
681 * the loop with the backing object to avoid recursion. in vm_object_deallocate()
683 umtx_shm_object_terminated(object); in vm_object_deallocate()
684 temp = object->backing_object; in vm_object_deallocate()
686 KASSERT(object->type == OBJT_SWAP, in vm_object_deallocate()
687 ("shadowed tmpfs v_object 2 %p", object)); in vm_object_deallocate()
688 vm_object_backing_remove(object); in vm_object_deallocate()
691 KASSERT((object->flags & OBJ_DEAD) == 0, in vm_object_deallocate()
692 ("vm_object_deallocate: Terminating dead object.")); in vm_object_deallocate()
693 vm_object_set_flag(object, OBJ_DEAD); in vm_object_deallocate()
694 vm_object_terminate(object); in vm_object_deallocate()
695 object = temp; in vm_object_deallocate()
700 vm_object_destroy(vm_object_t object) in vm_object_destroy() argument
702 uma_zfree(obj_zone, object); in vm_object_destroy()
706 vm_object_sub_shadow(vm_object_t object) in vm_object_sub_shadow() argument
708 KASSERT(object->shadow_count >= 1, in vm_object_sub_shadow()
709 ("object %p sub_shadow count zero", object)); in vm_object_sub_shadow()
710 atomic_subtract_int(&object->shadow_count, 1); in vm_object_sub_shadow()
714 vm_object_backing_remove_locked(vm_object_t object) in vm_object_backing_remove_locked() argument
718 backing_object = object->backing_object; in vm_object_backing_remove_locked()
719 VM_OBJECT_ASSERT_WLOCKED(object); in vm_object_backing_remove_locked()
722 KASSERT((object->flags & OBJ_COLLAPSING) == 0, in vm_object_backing_remove_locked()
723 ("vm_object_backing_remove: Removing collapsing object.")); in vm_object_backing_remove_locked()
726 if ((object->flags & OBJ_SHADOWLIST) != 0) { in vm_object_backing_remove_locked()
727 LIST_REMOVE(object, shadow_list); in vm_object_backing_remove_locked()
728 vm_object_clear_flag(object, OBJ_SHADOWLIST); in vm_object_backing_remove_locked()
730 object->backing_object = NULL; in vm_object_backing_remove_locked()
734 vm_object_backing_remove(vm_object_t object) in vm_object_backing_remove() argument
738 VM_OBJECT_ASSERT_WLOCKED(object); in vm_object_backing_remove()
740 backing_object = object->backing_object; in vm_object_backing_remove()
741 if ((object->flags & OBJ_SHADOWLIST) != 0) { in vm_object_backing_remove()
743 vm_object_backing_remove_locked(object); in vm_object_backing_remove()
746 object->backing_object = NULL; in vm_object_backing_remove()
752 vm_object_backing_insert_locked(vm_object_t object, vm_object_t backing_object) in vm_object_backing_insert_locked() argument
755 VM_OBJECT_ASSERT_WLOCKED(object); in vm_object_backing_insert_locked()
760 LIST_INSERT_HEAD(&backing_object->shadow_head, object, in vm_object_backing_insert_locked()
762 vm_object_set_flag(object, OBJ_SHADOWLIST); in vm_object_backing_insert_locked()
764 object->backing_object = backing_object; in vm_object_backing_insert_locked()
768 vm_object_backing_insert(vm_object_t object, vm_object_t backing_object) in vm_object_backing_insert() argument
771 VM_OBJECT_ASSERT_WLOCKED(object); in vm_object_backing_insert()
775 vm_object_backing_insert_locked(object, backing_object); in vm_object_backing_insert()
778 object->backing_object = backing_object; in vm_object_backing_insert()
784 * Insert an object into a backing_object's shadow list with an additional
788 vm_object_backing_insert_ref(vm_object_t object, vm_object_t backing_object) in vm_object_backing_insert_ref() argument
791 VM_OBJECT_ASSERT_WLOCKED(object); in vm_object_backing_insert_ref()
796 ("shadowing dead anonymous object")); in vm_object_backing_insert_ref()
798 vm_object_backing_insert_locked(object, backing_object); in vm_object_backing_insert_ref()
804 object->backing_object = backing_object; in vm_object_backing_insert_ref()
809 * Transfer a backing reference from backing_object to object.
812 vm_object_backing_transfer(vm_object_t object, vm_object_t backing_object) in vm_object_backing_transfer() argument
818 * moves from within backing_object to within object. in vm_object_backing_transfer()
820 vm_object_backing_remove_locked(object); in vm_object_backing_transfer()
827 vm_object_backing_insert_locked(object, new_backing_object); in vm_object_backing_transfer()
833 * is replaced by object. in vm_object_backing_transfer()
835 object->backing_object = new_backing_object; in vm_object_backing_transfer()
844 vm_object_collapse_wait(vm_object_t object) in vm_object_collapse_wait() argument
847 VM_OBJECT_ASSERT_WLOCKED(object); in vm_object_collapse_wait()
849 while ((object->flags & OBJ_COLLAPSING) != 0) { in vm_object_collapse_wait()
850 vm_object_pip_wait(object, "vmcolwait"); in vm_object_collapse_wait()
856 * Waits for a backing object to clear a pending collapse and returns
857 * it locked if it is an ANON object.
860 vm_object_backing_collapse_wait(vm_object_t object) in vm_object_backing_collapse_wait() argument
864 VM_OBJECT_ASSERT_WLOCKED(object); in vm_object_backing_collapse_wait()
867 backing_object = object->backing_object; in vm_object_backing_collapse_wait()
874 VM_OBJECT_WUNLOCK(object); in vm_object_backing_collapse_wait()
877 VM_OBJECT_WLOCK(object); in vm_object_backing_collapse_wait()
883 * vm_object_terminate_single_page removes a pageable page from the object,
890 vm_object_t object __diagused = objectv; in vm_object_terminate_single_page()
893 KASSERT(p->object == object && in vm_object_terminate_single_page()
896 p->object = NULL; in vm_object_terminate_single_page()
898 KASSERT((object->flags & OBJ_UNMANAGED) != 0 || in vm_object_terminate_single_page()
908 * from the object and resets the object to an empty state.
911 vm_object_terminate_pages(vm_object_t object) in vm_object_terminate_pages() argument
913 VM_OBJECT_ASSERT_WLOCKED(object); in vm_object_terminate_pages()
916 * If the object contained any pages, then reset it to an empty state. in vm_object_terminate_pages()
917 * Rather than incrementally removing each page from the object, the in vm_object_terminate_pages()
918 * page and object are reset to any empty state. in vm_object_terminate_pages()
920 if (object->resident_page_count == 0) in vm_object_terminate_pages()
923 vm_radix_reclaim_callback(&object->rtree, in vm_object_terminate_pages()
924 vm_object_terminate_single_page, object); in vm_object_terminate_pages()
925 TAILQ_INIT(&object->memq); in vm_object_terminate_pages()
926 object->resident_page_count = 0; in vm_object_terminate_pages()
927 if (object->type == OBJT_VNODE) in vm_object_terminate_pages()
928 vdrop(object->handle); in vm_object_terminate_pages()
932 * vm_object_terminate actually destroys the specified object, freeing
935 * The object must be locked.
939 vm_object_terminate(vm_object_t object) in vm_object_terminate() argument
942 VM_OBJECT_ASSERT_WLOCKED(object); in vm_object_terminate()
943 KASSERT((object->flags & OBJ_DEAD) != 0, in vm_object_terminate()
944 ("terminating non-dead obj %p", object)); in vm_object_terminate()
945 KASSERT((object->flags & OBJ_COLLAPSING) == 0, in vm_object_terminate()
946 ("terminating collapsing obj %p", object)); in vm_object_terminate()
947 KASSERT(object->backing_object == NULL, in vm_object_terminate()
948 ("terminating shadow obj %p", object)); in vm_object_terminate()
952 * done with the object. Note that new paging_in_progress in vm_object_terminate()
954 * OBJ_DEAD flag set (without unlocking the object), and avoid in vm_object_terminate()
955 * the object being terminated. in vm_object_terminate()
957 vm_object_pip_wait(object, "objtrm"); in vm_object_terminate()
959 KASSERT(object->ref_count == 0, in vm_object_terminate()
960 ("vm_object_terminate: object with references, ref_count=%d", in vm_object_terminate()
961 object->ref_count)); in vm_object_terminate()
963 if ((object->flags & OBJ_PG_DTOR) == 0) in vm_object_terminate()
964 vm_object_terminate_pages(object); in vm_object_terminate()
967 if (__predict_false(!LIST_EMPTY(&object->rvq))) in vm_object_terminate()
968 vm_reserv_break_all(object); in vm_object_terminate()
971 KASSERT(object->cred == NULL || (object->flags & OBJ_SWAP) != 0, in vm_object_terminate()
972 ("%s: non-swap obj %p has cred", __func__, object)); in vm_object_terminate()
975 * Let the pager know object is dead. in vm_object_terminate()
977 vm_pager_deallocate(object); in vm_object_terminate()
978 VM_OBJECT_WUNLOCK(object); in vm_object_terminate()
980 vm_object_destroy(object); in vm_object_terminate()
984 * Make the page read-only so that we can clear the object flags. However, if
985 * this is a nosync mmap then the object is likely to stay dirty so do not
986 * mess with the page and do not clear the object flags. Returns TRUE if the
997 * nosync page, skip it. Note that the object flags were not in vm_object_page_remove_write()
1012 * Clean all dirty pages in the specified range of object. Leaves page
1015 * leaving the object dirty.
1026 * The object must be locked.
1032 vm_object_page_clean(vm_object_t object, vm_ooffset_t start, vm_ooffset_t end, in vm_object_page_clean() argument
1040 VM_OBJECT_ASSERT_WLOCKED(object); in vm_object_page_clean()
1042 if (!vm_object_mightbedirty(object) || object->resident_page_count == 0) in vm_object_page_clean()
1050 tend = (end == 0) ? object->size : OFF_TO_IDX(end + PAGE_MASK); in vm_object_page_clean()
1051 allclean = tstart == 0 && tend >= object->size; in vm_object_page_clean()
1055 curgeneration = object->generation; in vm_object_page_clean()
1057 for (p = vm_page_find_least(object, tstart); p != NULL; p = np) { in vm_object_page_clean()
1065 if (object->generation != curgeneration && in vm_object_page_clean()
1068 np = vm_page_find_least(object, pi); in vm_object_page_clean()
1075 if (object->type == OBJT_VNODE) { in vm_object_page_clean()
1076 n = vm_object_page_collect_flush(object, p, pagerflags, in vm_object_page_clean()
1082 if (object->generation != curgeneration && in vm_object_page_clean()
1106 np = vm_page_find_least(object, pi + n); in vm_object_page_clean()
1117 if (allclean && object->type == OBJT_VNODE) in vm_object_page_clean()
1118 object->cleangeneration = curgeneration; in vm_object_page_clean()
1123 vm_object_page_collect_flush(vm_object_t object, vm_page_t p, int pagerflags, in vm_object_page_collect_flush() argument
1131 VM_OBJECT_ASSERT_WLOCKED(object); in vm_object_page_collect_flush()
1163 * anonymous objects, so we track down the vnode object
1168 * If the backing object is a device object with unmanaged pages, then any
1173 * may start out with a NULL object.
1176 vm_object_sync(vm_object_t object, vm_ooffset_t offset, vm_size_t size, in vm_object_sync() argument
1185 if (object == NULL) in vm_object_sync()
1189 VM_OBJECT_WLOCK(object); in vm_object_sync()
1190 while ((backing_object = object->backing_object) != NULL) { in vm_object_sync()
1192 offset += object->backing_object_offset; in vm_object_sync()
1193 VM_OBJECT_WUNLOCK(object); in vm_object_sync()
1194 object = backing_object; in vm_object_sync()
1195 if (object->size < OFF_TO_IDX(offset + size)) in vm_object_sync()
1196 size = IDX_TO_OFF(object->size) - offset; in vm_object_sync()
1210 if (object->type == OBJT_VNODE && in vm_object_sync()
1211 vm_object_mightbedirty(object) != 0 && in vm_object_sync()
1212 ((vp = object->handle)->v_vflag & VV_NOSYNC) == 0) { in vm_object_sync()
1213 VM_OBJECT_WUNLOCK(object); in vm_object_sync()
1217 atop(size) == object->size) { in vm_object_sync()
1231 VM_OBJECT_WLOCK(object); in vm_object_sync()
1232 res = vm_object_page_clean(object, offset, offset + size, in vm_object_sync()
1234 VM_OBJECT_WUNLOCK(object); in vm_object_sync()
1256 VM_OBJECT_WLOCK(object); in vm_object_sync()
1258 if ((object->type == OBJT_VNODE || in vm_object_sync()
1259 object->type == OBJT_DEVICE) && invalidate) { in vm_object_sync()
1260 if (object->type == OBJT_DEVICE) in vm_object_sync()
1271 vm_object_page_remove(object, OFF_TO_IDX(offset), in vm_object_sync()
1274 VM_OBJECT_WUNLOCK(object); in vm_object_sync()
1279 * Determine whether the given advice can be applied to the object. Advice is
1285 vm_object_advice_applies(vm_object_t object, int advice) in vm_object_advice_applies() argument
1288 if ((object->flags & OBJ_UNMANAGED) != 0) in vm_object_advice_applies()
1292 return ((object->flags & (OBJ_ONEMAPPING | OBJ_ANON)) == in vm_object_advice_applies()
1297 vm_object_madvise_freespace(vm_object_t object, int advice, vm_pindex_t pindex, in vm_object_madvise_freespace() argument
1302 vm_pager_freespace(object, pindex, size); in vm_object_madvise_freespace()
1308 * Implements the madvise function at the object/page level.
1310 * MADV_WILLNEED (any object)
1314 * MADV_DONTNEED (any object)
1326 vm_object_madvise(vm_object_t object, vm_pindex_t pindex, vm_pindex_t end, in vm_object_madvise() argument
1333 if (object == NULL) in vm_object_madvise()
1337 VM_OBJECT_WLOCK(object); in vm_object_madvise()
1338 if (!vm_object_advice_applies(object, advice)) { in vm_object_madvise()
1339 VM_OBJECT_WUNLOCK(object); in vm_object_madvise()
1342 for (m = vm_page_find_least(object, pindex); pindex < end; pindex++) { in vm_object_madvise()
1343 tobject = object; in vm_object_madvise()
1346 * If the next page isn't resident in the top-level object, we in vm_object_madvise()
1353 * Optimize a common case: if the top-level object has in vm_object_madvise()
1354 * no backing object, we can skip over the non-resident in vm_object_madvise()
1357 if (object->backing_object == NULL) { in vm_object_madvise()
1360 vm_object_madvise_freespace(object, advice, in vm_object_madvise()
1372 * Prepare to search the next object in the in vm_object_madvise()
1381 if (tobject != object) in vm_object_madvise()
1396 * can not be invalidated while the object lock is held. in vm_object_madvise()
1405 if (object != tobject) in vm_object_madvise()
1406 VM_OBJECT_WUNLOCK(object); in vm_object_madvise()
1423 if (tobject != object) in vm_object_madvise()
1426 VM_OBJECT_WUNLOCK(object); in vm_object_madvise()
1432 * Create a new object which is backed by the
1433 * specified existing object range. The source
1434 * object reference is deallocated.
1436 * The new object and offset into that object
1440 vm_object_shadow(vm_object_t *object, vm_ooffset_t *offset, vm_size_t length, in vm_object_shadow() argument
1446 source = *object; in vm_object_shadow()
1449 * Don't create the new object if the old object isn't shared. in vm_object_shadow()
1453 * harmless and we will end up with an extra shadow object that in vm_object_shadow()
1461 * Allocate a new object with the given length. in vm_object_shadow()
1466 * Store the offset into the source object, and fix up the offset into in vm_object_shadow()
1467 * the new object. in vm_object_shadow()
1475 * The new object shadows the source object, adding a in vm_object_shadow()
1477 * to point to the new object, removing a reference to in vm_object_shadow()
1478 * the source object. Net result: no change of in vm_object_shadow()
1488 * Try to optimize the result object's page color when in vm_object_shadow()
1490 * consistency in the combined shadowed object. in vm_object_shadow()
1510 *object = result; in vm_object_shadow()
1516 * Split the pages in a map entry into a new object. This affords
1517 * easier removal of unused pages, and keeps object inheritance from
1529 orig_object = entry->object.vm_object; in vm_object_split()
1531 ("vm_object_split: Splitting object with multiple mappings.")); in vm_object_split()
1547 * additional reference on backing_object by new object will in vm_object_split()
1554 * At this point, the new object is still private, so the order in in vm_object_split()
1574 * that the object is in transition. in vm_object_split()
1622 * the original object, then transferring the reservation to in vm_object_split()
1623 * the new object is neither particularly beneficial nor in vm_object_split()
1625 * with the original object. If, however, all of the in vm_object_split()
1627 * object, then transferring the reservation is typically in vm_object_split()
1648 entry->object.vm_object = new_object; in vm_object_split()
1655 vm_object_collapse_scan_wait(struct pctrie_iter *pages, vm_object_t object, in vm_object_collapse_scan_wait() argument
1660 VM_OBJECT_ASSERT_WLOCKED(object); in vm_object_collapse_scan_wait()
1661 backing_object = object->backing_object; in vm_object_collapse_scan_wait()
1664 KASSERT(p == NULL || p->object == object || p->object == backing_object, in vm_object_collapse_scan_wait()
1665 ("invalid ownership %p %p %p", p, object, backing_object)); in vm_object_collapse_scan_wait()
1668 VM_OBJECT_WUNLOCK(object); in vm_object_collapse_scan_wait()
1671 VM_OBJECT_WLOCK(object); in vm_object_collapse_scan_wait()
1672 } else if (p->object == object) { in vm_object_collapse_scan_wait()
1675 VM_OBJECT_WLOCK(object); in vm_object_collapse_scan_wait()
1677 VM_OBJECT_WUNLOCK(object); in vm_object_collapse_scan_wait()
1680 VM_OBJECT_WLOCK(object); in vm_object_collapse_scan_wait()
1688 vm_object_collapse_scan(vm_object_t object) in vm_object_collapse_scan() argument
1695 VM_OBJECT_ASSERT_WLOCKED(object); in vm_object_collapse_scan()
1696 VM_OBJECT_ASSERT_WLOCKED(object->backing_object); in vm_object_collapse_scan()
1698 backing_object = object->backing_object; in vm_object_collapse_scan()
1699 backing_offset_index = OFF_TO_IDX(object->backing_object_offset); in vm_object_collapse_scan()
1710 next = vm_object_collapse_scan_wait(&pages, object, p); in vm_object_collapse_scan()
1714 KASSERT(object->backing_object == backing_object, in vm_object_collapse_scan()
1715 ("vm_object_collapse_scan: backing object mismatch %p != %p", in vm_object_collapse_scan()
1716 object->backing_object, backing_object)); in vm_object_collapse_scan()
1717 KASSERT(p->object == backing_object, in vm_object_collapse_scan()
1718 ("vm_object_collapse_scan: object mismatch %p != %p", in vm_object_collapse_scan()
1719 p->object, backing_object)); in vm_object_collapse_scan()
1721 if (p->pindex < backing_offset_index || object->size <= in vm_object_collapse_scan()
1742 pp = vm_page_lookup(object, new_pindex); in vm_object_collapse_scan()
1751 next = vm_object_collapse_scan_wait(&pages, object, pp); in vm_object_collapse_scan()
1766 if (pp != NULL || vm_pager_has_page(object, new_pindex, NULL, in vm_object_collapse_scan()
1772 * backing object. in vm_object_collapse_scan()
1787 * backing object to the main object. in vm_object_collapse_scan()
1793 if (!vm_page_iter_rename(&pages, p, object, new_pindex)) { in vm_object_collapse_scan()
1795 next = vm_object_collapse_scan_wait(&pages, object, in vm_object_collapse_scan()
1808 vm_reserv_rename(p, object, backing_object, in vm_object_collapse_scan()
1820 * Collapse an object with the object backing it.
1821 * Pages in the backing object are moved into the
1822 * parent, and the backing object is deallocated.
1825 vm_object_collapse(vm_object_t object) in vm_object_collapse() argument
1829 VM_OBJECT_ASSERT_WLOCKED(object); in vm_object_collapse()
1832 KASSERT((object->flags & (OBJ_DEAD | OBJ_ANON)) == OBJ_ANON, in vm_object_collapse()
1833 ("collapsing invalid object")); in vm_object_collapse()
1840 backing_object = vm_object_backing_collapse_wait(object); in vm_object_collapse()
1844 KASSERT(object->ref_count > 0 && in vm_object_collapse()
1845 object->ref_count > atomic_load_int(&object->shadow_count), in vm_object_collapse()
1847 object->ref_count, atomic_load_int(&object->shadow_count))); in vm_object_collapse()
1850 ("vm_object_collapse: Backing object already collapsing.")); in vm_object_collapse()
1851 KASSERT((object->flags & (OBJ_COLLAPSING | OBJ_DEAD)) == 0, in vm_object_collapse()
1852 ("vm_object_collapse: object is already collapsing.")); in vm_object_collapse()
1855 * We know that we can either collapse the backing object if in vm_object_collapse()
1857 * the parent bypass the object if the parent happens to shadow in vm_object_collapse()
1858 * all the resident pages in the entire backing object. in vm_object_collapse()
1865 vm_object_pip_add(object, 1); in vm_object_collapse()
1866 vm_object_set_flag(object, OBJ_COLLAPSING); in vm_object_collapse()
1872 * object, we can collapse it into the parent. in vm_object_collapse()
1874 vm_object_collapse_scan(object); in vm_object_collapse()
1877 * Move the pager from backing_object to object. in vm_object_collapse()
1880 * backing_object's and object's locks are released and in vm_object_collapse()
1883 swap_pager_copy(backing_object, object, in vm_object_collapse()
1884 OFF_TO_IDX(object->backing_object_offset), TRUE); in vm_object_collapse()
1887 * Object now shadows whatever backing_object did. in vm_object_collapse()
1889 vm_object_clear_flag(object, OBJ_COLLAPSING); in vm_object_collapse()
1890 vm_object_backing_transfer(object, backing_object); in vm_object_collapse()
1891 object->backing_object_offset += in vm_object_collapse()
1893 VM_OBJECT_WUNLOCK(object); in vm_object_collapse()
1894 vm_object_pip_wakeup(object); in vm_object_collapse()
1899 * Since the backing object has no pages, no pager left, in vm_object_collapse()
1900 * and no object references within it, all that is in vm_object_collapse()
1911 VM_OBJECT_WLOCK(object); in vm_object_collapse()
1914 * If we do not entirely shadow the backing object, in vm_object_collapse()
1917 * The object lock and backing_object lock must not in vm_object_collapse()
1920 if (!swap_pager_scan_all_shadowed(object)) { in vm_object_collapse()
1926 * Make the parent shadow the next object in the in vm_object_collapse()
1930 vm_object_backing_remove_locked(object); in vm_object_collapse()
1933 vm_object_backing_insert_ref(object, in vm_object_collapse()
1935 object->backing_object_offset += in vm_object_collapse()
1952 * Try again with this object's new backing object. in vm_object_collapse()
1960 * For the given object, either frees or invalidates each of the
1963 * mapping, then it may be invalidated but not removed from the object.
1966 * extends from "start" to the end of the object. If the option
1981 * The object must be locked.
1984 vm_object_page_remove(vm_object_t object, vm_pindex_t start, vm_pindex_t end, in vm_object_page_remove() argument
1990 VM_OBJECT_ASSERT_WLOCKED(object); in vm_object_page_remove()
1991 KASSERT((object->flags & OBJ_UNMANAGED) == 0 || in vm_object_page_remove()
1993 ("vm_object_page_remove: illegal options for object %p", object)); in vm_object_page_remove()
1994 if (object->resident_page_count == 0) in vm_object_page_remove()
1996 vm_object_pip_add(object, 1); in vm_object_page_remove()
1997 vm_page_iter_limit_init(&pages, object, end); in vm_object_page_remove()
2027 VM_OBJECT_WLOCK(object); in vm_object_page_remove()
2037 object->ref_count != 0) in vm_object_page_remove()
2051 object->ref_count != 0 && in vm_object_page_remove()
2060 object->ref_count != 0 && !vm_page_try_remove_all(p)) in vm_object_page_remove()
2064 vm_object_pip_wakeup(object); in vm_object_page_remove()
2066 vm_pager_freespace(object, start, (end == 0 ? object->size : end) - in vm_object_page_remove()
2073 * For the given object, attempt to move the specified pages to
2079 * "start" to the end of the object.
2084 * The object must be locked.
2087 vm_object_page_noreuse(vm_object_t object, vm_pindex_t start, vm_pindex_t end) in vm_object_page_noreuse() argument
2091 VM_OBJECT_ASSERT_LOCKED(object); in vm_object_page_noreuse()
2092 KASSERT((object->flags & (OBJ_FICTITIOUS | OBJ_UNMANAGED)) == 0, in vm_object_page_noreuse()
2093 ("vm_object_page_noreuse: illegal object %p", object)); in vm_object_page_noreuse()
2094 if (object->resident_page_count == 0) in vm_object_page_noreuse()
2096 p = vm_page_find_least(object, start); in vm_object_page_noreuse()
2109 * Populate the specified range of the object with valid pages. Returns
2114 * OBJT_DEVICE object.
2116 * The object must be locked.
2119 vm_object_populate(vm_object_t object, vm_pindex_t start, vm_pindex_t end) in vm_object_populate() argument
2125 VM_OBJECT_ASSERT_WLOCKED(object); in vm_object_populate()
2127 rv = vm_page_grab_valid(&m, object, pindex, VM_ALLOC_NORMAL); in vm_object_populate()
2133 * the object. in vm_object_populate()
2137 m = vm_page_lookup(object, start); in vm_object_populate()
2149 * regions of memory into a single object.
2153 * NOTE: Only works at the moment if the second object is NULL -
2154 * if it's not, which object do we lock first?
2157 * prev_object First object to coalesce
2160 * next_size Size of reference to the second object
2165 * The object must *not* be locked.
2180 * Try to collapse the object first. in vm_object_coalesce()
2186 * another object . has a copy elsewhere (any of which mean that the in vm_object_coalesce()
2212 * later. Non-NULL cred in the object would prevent in vm_object_coalesce()
2215 * cause allocation of the separate object for the map in vm_object_coalesce()
2228 * Remove any pages that may still be in the object from a previous in vm_object_coalesce()
2238 ("object %p overcharged 1 %jx %jx", prev_object, in vm_object_coalesce()
2247 * Extend the object if necessary. in vm_object_coalesce()
2257 vm_object_set_writeable_dirty_(vm_object_t object) in vm_object_set_writeable_dirty_() argument
2259 atomic_add_int(&object->generation, 1); in vm_object_set_writeable_dirty_()
2263 vm_object_mightbedirty_(vm_object_t object) in vm_object_mightbedirty_() argument
2265 return (object->generation != object->cleangeneration); in vm_object_mightbedirty_()
2271 * For each page offset within the specified range of the given object,
2277 vm_object_unwire(vm_object_t object, vm_ooffset_t offset, vm_size_t length, in vm_object_unwire() argument
2290 if ((object->flags & OBJ_FICTITIOUS) != 0) in vm_object_unwire()
2296 VM_OBJECT_RLOCK(object); in vm_object_unwire()
2297 m = vm_page_find_least(object, pindex); in vm_object_unwire()
2301 * The first object in the shadow chain doesn't in vm_object_unwire()
2303 * the page must exist in a backing object. in vm_object_unwire()
2305 tobject = object; in vm_object_unwire()
2328 for (tobject = object; locked_depth >= 1; in vm_object_unwire()
2331 if (tm->object != tobject) in vm_object_unwire()
2335 tobject = tm->object; in vm_object_unwire()
2346 /* Release the accumulated object locks. */ in vm_object_unwire()
2347 for (tobject = object; locked_depth >= 1; locked_depth--) { in vm_object_unwire()
2355 * Return the vnode for the given object, or NULL if none exists.
2360 vm_object_vnode(vm_object_t object) in vm_object_vnode() argument
2364 VM_OBJECT_ASSERT_LOCKED(object); in vm_object_vnode()
2365 vm_pager_getvp(object, &vp, NULL); in vm_object_vnode()
2370 * Busy the vm object. This prevents new pages belonging to the object from
2402 * This function aims to determine if the object is mapped,
2457 * after reacquiring the VM object lock. in vm_object_list_handler()
2482 * A page may belong to the object but be in vm_object_list_handler()
2484 * object lock is not held. This makes the in vm_object_list_handler()
2608 _vm_object_in_map(vm_map_t map, vm_object_t object, vm_map_entry_t entry) in _vm_object_in_map() argument
2619 if (_vm_object_in_map(map, object, tmpe)) { in _vm_object_in_map()
2624 tmpm = entry->object.sub_map; in _vm_object_in_map()
2626 if (_vm_object_in_map(tmpm, object, tmpe)) { in _vm_object_in_map()
2630 } else if ((obj = entry->object.vm_object) != NULL) { in _vm_object_in_map()
2632 if (obj == object) { in _vm_object_in_map()
2640 vm_object_in_map(vm_object_t object) in vm_object_in_map() argument
2648 if (_vm_object_in_map(&p->p_vmspace->vm_map, object, 0)) { in vm_object_in_map()
2654 if (_vm_object_in_map(kernel_map, object, 0)) in vm_object_in_map()
2661 vm_object_t object; in DB_SHOW_COMMAND_FLAGS() local
2667 TAILQ_FOREACH(object, &vm_object_list, object_list) { in DB_SHOW_COMMAND_FLAGS()
2668 if ((object->flags & OBJ_ANON) != 0) { in DB_SHOW_COMMAND_FLAGS()
2669 if (object->ref_count == 0) { in DB_SHOW_COMMAND_FLAGS()
2671 (long)object->size); in DB_SHOW_COMMAND_FLAGS()
2673 if (!vm_object_in_map(object)) { in DB_SHOW_COMMAND_FLAGS()
2677 object->ref_count, (u_long)object->size, in DB_SHOW_COMMAND_FLAGS()
2678 (u_long)object->size, in DB_SHOW_COMMAND_FLAGS()
2679 (void *)object->backing_object); in DB_SHOW_COMMAND_FLAGS()
2690 DB_SHOW_COMMAND(object, vm_object_print_static) in DB_SHOW_COMMAND() argument
2693 vm_object_t object = (vm_object_t)addr; in DB_SHOW_COMMAND() local
2703 if (object == NULL) in DB_SHOW_COMMAND()
2707 "Object %p: type=%d, size=0x%jx, res=%d, ref=%d, flags=0x%x ruid %d charge %jx\n", in DB_SHOW_COMMAND()
2708 object, (int)object->type, (uintmax_t)object->size, in DB_SHOW_COMMAND()
2709 object->resident_page_count, object->ref_count, object->flags, in DB_SHOW_COMMAND()
2710 object->cred ? object->cred->cr_ruid : -1, (uintmax_t)object->charge); in DB_SHOW_COMMAND()
2712 atomic_load_int(&object->shadow_count), in DB_SHOW_COMMAND()
2713 object->backing_object ? object->backing_object->ref_count : 0, in DB_SHOW_COMMAND()
2714 object->backing_object, (uintmax_t)object->backing_object_offset); in DB_SHOW_COMMAND()
2721 TAILQ_FOREACH(p, &object->memq, listq) { in DB_SHOW_COMMAND()
2759 vm_object_t object; in DB_SHOW_COMMAND_FLAGS() local
2765 TAILQ_FOREACH(object, &vm_object_list, object_list) { in DB_SHOW_COMMAND_FLAGS()
2766 db_printf("new object: %p\n", (void *)object); in DB_SHOW_COMMAND_FLAGS()
2773 TAILQ_FOREACH(m, &object->memq, listq) { in DB_SHOW_COMMAND_FLAGS()