1 /*- 2 * SPDX-License-Identifier: (BSD-3-Clause AND MIT-CMU) 3 * 4 * Copyright (c) 1991, 1993 5 * The Regents of the University of California. All rights reserved. 6 * 7 * This code is derived from software contributed to Berkeley by 8 * The Mach Operating System project at Carnegie-Mellon University. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. Neither the name of the University nor the names of its contributors 19 * may be used to endorse or promote products derived from this software 20 * without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 * 34 * 35 * Copyright (c) 1987, 1990 Carnegie-Mellon University. 36 * All rights reserved. 37 * 38 * Authors: Avadis Tevanian, Jr., Michael Wayne Young 39 * 40 * Permission to use, copy, modify and distribute this software and 41 * its documentation is hereby granted, provided that both the copyright 42 * notice and this permission notice appear in all copies of the 43 * software, derivative works or modified versions, and any portions 44 * thereof, and that both notices appear in supporting documentation. 45 * 46 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 47 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 48 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 49 * 50 * Carnegie Mellon requests users of this software to return to 51 * 52 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 53 * School of Computer Science 54 * Carnegie Mellon University 55 * Pittsburgh PA 15213-3890 56 * 57 * any improvements or extensions that they make and grant Carnegie the 58 * rights to redistribute these changes. 59 */ 60 61 /* 62 * Virtual memory object module. 63 */ 64 65 #include "opt_vm.h" 66 67 #include <sys/systm.h> 68 #include <sys/blockcount.h> 69 #include <sys/conf.h> 70 #include <sys/cpuset.h> 71 #include <sys/ipc.h> 72 #include <sys/jail.h> 73 #include <sys/limits.h> 74 #include <sys/lock.h> 75 #include <sys/mman.h> 76 #include <sys/mount.h> 77 #include <sys/kernel.h> 78 #include <sys/mutex.h> 79 #include <sys/pctrie.h> 80 #include <sys/proc.h> 81 #include <sys/refcount.h> 82 #include <sys/shm.h> 83 #include <sys/sx.h> 84 #include <sys/sysctl.h> 85 #include <sys/resourcevar.h> 86 #include <sys/refcount.h> 87 #include <sys/rwlock.h> 88 #include <sys/user.h> 89 #include <sys/vnode.h> 90 #include <sys/vmmeter.h> 91 92 #include <vm/vm.h> 93 #include <vm/vm_param.h> 94 #include <vm/pmap.h> 95 #include <vm/vm_map.h> 96 #include <vm/vm_object.h> 97 #include <vm/vm_page.h> 98 #include <vm/vm_pageout.h> 99 #include <vm/vm_pager.h> 100 #include <vm/vm_phys.h> 101 #include <vm/vm_pagequeue.h> 102 #include <vm/swap_pager.h> 103 #include <vm/vm_kern.h> 104 #include <vm/vm_extern.h> 105 #include <vm/vm_radix.h> 106 #include <vm/vm_reserv.h> 107 #include <vm/uma.h> 108 109 static int old_msync; 110 SYSCTL_INT(_vm, OID_AUTO, old_msync, CTLFLAG_RW, &old_msync, 0, 111 "Use old (insecure) msync behavior"); 112 113 static int vm_object_page_collect_flush(vm_object_t object, vm_page_t p, 114 int pagerflags, int flags, boolean_t *allclean, 115 boolean_t *eio); 116 static boolean_t vm_object_page_remove_write(vm_page_t p, int flags, 117 boolean_t *allclean); 118 static void vm_object_backing_remove(vm_object_t object); 119 120 /* 121 * Virtual memory objects maintain the actual data 122 * associated with allocated virtual memory. A given 123 * page of memory exists within exactly one object. 124 * 125 * An object is only deallocated when all "references" 126 * are given up. Only one "reference" to a given 127 * region of an object should be writeable. 128 * 129 * Associated with each object is a list of all resident 130 * memory pages belonging to that object; this list is 131 * maintained by the "vm_page" module, and locked by the object's 132 * lock. 133 * 134 * Each object also records a "pager" routine which is 135 * used to retrieve (and store) pages to the proper backing 136 * storage. In addition, objects may be backed by other 137 * objects from which they were virtual-copied. 138 * 139 * The only items within the object structure which are 140 * modified after time of creation are: 141 * reference count locked by object's lock 142 * pager routine locked by object's lock 143 * 144 */ 145 146 struct object_q vm_object_list; 147 struct mtx vm_object_list_mtx; /* lock for object list and count */ 148 149 struct vm_object kernel_object_store; 150 151 static SYSCTL_NODE(_vm_stats, OID_AUTO, object, CTLFLAG_RD | CTLFLAG_MPSAFE, 0, 152 "VM object stats"); 153 154 static COUNTER_U64_DEFINE_EARLY(object_collapses); 155 SYSCTL_COUNTER_U64(_vm_stats_object, OID_AUTO, collapses, CTLFLAG_RD, 156 &object_collapses, 157 "VM object collapses"); 158 159 static COUNTER_U64_DEFINE_EARLY(object_bypasses); 160 SYSCTL_COUNTER_U64(_vm_stats_object, OID_AUTO, bypasses, CTLFLAG_RD, 161 &object_bypasses, 162 "VM object bypasses"); 163 164 static COUNTER_U64_DEFINE_EARLY(object_collapse_waits); 165 SYSCTL_COUNTER_U64(_vm_stats_object, OID_AUTO, collapse_waits, CTLFLAG_RD, 166 &object_collapse_waits, 167 "Number of sleeps for collapse"); 168 169 static uma_zone_t obj_zone; 170 171 static int vm_object_zinit(void *mem, int size, int flags); 172 173 #ifdef INVARIANTS 174 static void vm_object_zdtor(void *mem, int size, void *arg); 175 176 static void 177 vm_object_zdtor(void *mem, int size, void *arg) 178 { 179 vm_object_t object; 180 181 object = (vm_object_t)mem; 182 KASSERT(object->ref_count == 0, 183 ("object %p ref_count = %d", object, object->ref_count)); 184 KASSERT(TAILQ_EMPTY(&object->memq), 185 ("object %p has resident pages in its memq", object)); 186 KASSERT(vm_radix_is_empty(&object->rtree), 187 ("object %p has resident pages in its trie", object)); 188 #if VM_NRESERVLEVEL > 0 189 KASSERT(LIST_EMPTY(&object->rvq), 190 ("object %p has reservations", 191 object)); 192 #endif 193 KASSERT(!vm_object_busied(object), 194 ("object %p busy = %d", object, blockcount_read(&object->busy))); 195 KASSERT(object->resident_page_count == 0, 196 ("object %p resident_page_count = %d", 197 object, object->resident_page_count)); 198 KASSERT(atomic_load_int(&object->shadow_count) == 0, 199 ("object %p shadow_count = %d", 200 object, atomic_load_int(&object->shadow_count))); 201 KASSERT(object->type == OBJT_DEAD, 202 ("object %p has non-dead type %d", 203 object, object->type)); 204 KASSERT(object->charge == 0 && object->cred == NULL, 205 ("object %p has non-zero charge %ju (%p)", 206 object, (uintmax_t)object->charge, object->cred)); 207 } 208 #endif 209 210 static int 211 vm_object_zinit(void *mem, int size, int flags) 212 { 213 vm_object_t object; 214 215 object = (vm_object_t)mem; 216 rw_init_flags(&object->lock, "vmobject", RW_DUPOK | RW_NEW); 217 218 /* These are true for any object that has been freed */ 219 object->type = OBJT_DEAD; 220 vm_radix_init(&object->rtree); 221 refcount_init(&object->ref_count, 0); 222 blockcount_init(&object->paging_in_progress); 223 blockcount_init(&object->busy); 224 object->resident_page_count = 0; 225 atomic_store_int(&object->shadow_count, 0); 226 object->flags = OBJ_DEAD; 227 228 mtx_lock(&vm_object_list_mtx); 229 TAILQ_INSERT_TAIL(&vm_object_list, object, object_list); 230 mtx_unlock(&vm_object_list_mtx); 231 return (0); 232 } 233 234 static void 235 _vm_object_allocate(objtype_t type, vm_pindex_t size, u_short flags, 236 vm_object_t object, void *handle) 237 { 238 239 TAILQ_INIT(&object->memq); 240 LIST_INIT(&object->shadow_head); 241 242 object->type = type; 243 object->flags = flags; 244 if ((flags & OBJ_SWAP) != 0) { 245 pctrie_init(&object->un_pager.swp.swp_blks); 246 object->un_pager.swp.writemappings = 0; 247 } 248 249 /* 250 * Ensure that swap_pager_swapoff() iteration over object_list 251 * sees up to date type and pctrie head if it observed 252 * non-dead object. 253 */ 254 atomic_thread_fence_rel(); 255 256 object->pg_color = 0; 257 object->size = size; 258 object->domain.dr_policy = NULL; 259 object->generation = 1; 260 object->cleangeneration = 1; 261 refcount_init(&object->ref_count, 1); 262 object->memattr = VM_MEMATTR_DEFAULT; 263 object->cred = NULL; 264 object->charge = 0; 265 object->handle = handle; 266 object->backing_object = NULL; 267 object->backing_object_offset = (vm_ooffset_t) 0; 268 #if VM_NRESERVLEVEL > 0 269 LIST_INIT(&object->rvq); 270 #endif 271 umtx_shm_object_init(object); 272 } 273 274 /* 275 * vm_object_init: 276 * 277 * Initialize the VM objects module. 278 */ 279 void 280 vm_object_init(void) 281 { 282 TAILQ_INIT(&vm_object_list); 283 mtx_init(&vm_object_list_mtx, "vm object_list", NULL, MTX_DEF); 284 285 rw_init(&kernel_object->lock, "kernel vm object"); 286 vm_radix_init(&kernel_object->rtree); 287 _vm_object_allocate(OBJT_PHYS, atop(VM_MAX_KERNEL_ADDRESS - 288 VM_MIN_KERNEL_ADDRESS), OBJ_UNMANAGED, kernel_object, NULL); 289 #if VM_NRESERVLEVEL > 0 290 kernel_object->flags |= OBJ_COLORED; 291 kernel_object->pg_color = (u_short)atop(VM_MIN_KERNEL_ADDRESS); 292 #endif 293 kernel_object->un_pager.phys.ops = &default_phys_pg_ops; 294 295 /* 296 * The lock portion of struct vm_object must be type stable due 297 * to vm_pageout_fallback_object_lock locking a vm object 298 * without holding any references to it. 299 * 300 * paging_in_progress is valid always. Lockless references to 301 * the objects may acquire pip and then check OBJ_DEAD. 302 */ 303 obj_zone = uma_zcreate("VM OBJECT", sizeof (struct vm_object), NULL, 304 #ifdef INVARIANTS 305 vm_object_zdtor, 306 #else 307 NULL, 308 #endif 309 vm_object_zinit, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE); 310 311 vm_radix_zinit(); 312 } 313 314 void 315 vm_object_clear_flag(vm_object_t object, u_short bits) 316 { 317 318 VM_OBJECT_ASSERT_WLOCKED(object); 319 object->flags &= ~bits; 320 } 321 322 /* 323 * Sets the default memory attribute for the specified object. Pages 324 * that are allocated to this object are by default assigned this memory 325 * attribute. 326 * 327 * Presently, this function must be called before any pages are allocated 328 * to the object. In the future, this requirement may be relaxed for 329 * "default" and "swap" objects. 330 */ 331 int 332 vm_object_set_memattr(vm_object_t object, vm_memattr_t memattr) 333 { 334 335 VM_OBJECT_ASSERT_WLOCKED(object); 336 337 if (object->type == OBJT_DEAD) 338 return (KERN_INVALID_ARGUMENT); 339 if (!TAILQ_EMPTY(&object->memq)) 340 return (KERN_FAILURE); 341 342 object->memattr = memattr; 343 return (KERN_SUCCESS); 344 } 345 346 void 347 vm_object_pip_add(vm_object_t object, short i) 348 { 349 350 if (i > 0) 351 blockcount_acquire(&object->paging_in_progress, i); 352 } 353 354 void 355 vm_object_pip_wakeup(vm_object_t object) 356 { 357 358 vm_object_pip_wakeupn(object, 1); 359 } 360 361 void 362 vm_object_pip_wakeupn(vm_object_t object, short i) 363 { 364 365 if (i > 0) 366 blockcount_release(&object->paging_in_progress, i); 367 } 368 369 /* 370 * Atomically drop the object lock and wait for pip to drain. This protects 371 * from sleep/wakeup races due to identity changes. The lock is not re-acquired 372 * on return. 373 */ 374 static void 375 vm_object_pip_sleep(vm_object_t object, const char *waitid) 376 { 377 378 (void)blockcount_sleep(&object->paging_in_progress, &object->lock, 379 waitid, PVM | PDROP); 380 } 381 382 void 383 vm_object_pip_wait(vm_object_t object, const char *waitid) 384 { 385 386 VM_OBJECT_ASSERT_WLOCKED(object); 387 388 blockcount_wait(&object->paging_in_progress, &object->lock, waitid, 389 PVM); 390 } 391 392 void 393 vm_object_pip_wait_unlocked(vm_object_t object, const char *waitid) 394 { 395 396 VM_OBJECT_ASSERT_UNLOCKED(object); 397 398 blockcount_wait(&object->paging_in_progress, NULL, waitid, PVM); 399 } 400 401 /* 402 * vm_object_allocate: 403 * 404 * Returns a new object with the given size. 405 */ 406 vm_object_t 407 vm_object_allocate(objtype_t type, vm_pindex_t size) 408 { 409 vm_object_t object; 410 u_short flags; 411 412 switch (type) { 413 case OBJT_DEAD: 414 panic("vm_object_allocate: can't create OBJT_DEAD"); 415 case OBJT_SWAP: 416 flags = OBJ_COLORED | OBJ_SWAP; 417 break; 418 case OBJT_DEVICE: 419 case OBJT_SG: 420 flags = OBJ_FICTITIOUS | OBJ_UNMANAGED; 421 break; 422 case OBJT_MGTDEVICE: 423 flags = OBJ_FICTITIOUS; 424 break; 425 case OBJT_PHYS: 426 flags = OBJ_UNMANAGED; 427 break; 428 case OBJT_VNODE: 429 flags = 0; 430 break; 431 default: 432 panic("vm_object_allocate: type %d is undefined or dynamic", 433 type); 434 } 435 object = (vm_object_t)uma_zalloc(obj_zone, M_WAITOK); 436 _vm_object_allocate(type, size, flags, object, NULL); 437 438 return (object); 439 } 440 441 vm_object_t 442 vm_object_allocate_dyn(objtype_t dyntype, vm_pindex_t size, u_short flags) 443 { 444 vm_object_t object; 445 446 MPASS(dyntype >= OBJT_FIRST_DYN /* && dyntype < nitems(pagertab) */); 447 object = (vm_object_t)uma_zalloc(obj_zone, M_WAITOK); 448 _vm_object_allocate(dyntype, size, flags, object, NULL); 449 450 return (object); 451 } 452 453 /* 454 * vm_object_allocate_anon: 455 * 456 * Returns a new default object of the given size and marked as 457 * anonymous memory for special split/collapse handling. Color 458 * to be initialized by the caller. 459 */ 460 vm_object_t 461 vm_object_allocate_anon(vm_pindex_t size, vm_object_t backing_object, 462 struct ucred *cred, vm_size_t charge) 463 { 464 vm_object_t handle, object; 465 466 if (backing_object == NULL) 467 handle = NULL; 468 else if ((backing_object->flags & OBJ_ANON) != 0) 469 handle = backing_object->handle; 470 else 471 handle = backing_object; 472 object = uma_zalloc(obj_zone, M_WAITOK); 473 _vm_object_allocate(OBJT_SWAP, size, 474 OBJ_ANON | OBJ_ONEMAPPING | OBJ_SWAP, object, handle); 475 object->cred = cred; 476 object->charge = cred != NULL ? charge : 0; 477 return (object); 478 } 479 480 static void 481 vm_object_reference_vnode(vm_object_t object) 482 { 483 u_int old; 484 485 /* 486 * vnode objects need the lock for the first reference 487 * to serialize with vnode_object_deallocate(). 488 */ 489 if (!refcount_acquire_if_gt(&object->ref_count, 0)) { 490 VM_OBJECT_RLOCK(object); 491 old = refcount_acquire(&object->ref_count); 492 if (object->type == OBJT_VNODE && old == 0) 493 vref(object->handle); 494 VM_OBJECT_RUNLOCK(object); 495 } 496 } 497 498 /* 499 * vm_object_reference: 500 * 501 * Acquires a reference to the given object. 502 */ 503 void 504 vm_object_reference(vm_object_t object) 505 { 506 507 if (object == NULL) 508 return; 509 510 if (object->type == OBJT_VNODE) 511 vm_object_reference_vnode(object); 512 else 513 refcount_acquire(&object->ref_count); 514 KASSERT((object->flags & OBJ_DEAD) == 0, 515 ("vm_object_reference: Referenced dead object.")); 516 } 517 518 /* 519 * vm_object_reference_locked: 520 * 521 * Gets another reference to the given object. 522 * 523 * The object must be locked. 524 */ 525 void 526 vm_object_reference_locked(vm_object_t object) 527 { 528 u_int old; 529 530 VM_OBJECT_ASSERT_LOCKED(object); 531 old = refcount_acquire(&object->ref_count); 532 if (object->type == OBJT_VNODE && old == 0) 533 vref(object->handle); 534 KASSERT((object->flags & OBJ_DEAD) == 0, 535 ("vm_object_reference: Referenced dead object.")); 536 } 537 538 /* 539 * Handle deallocating an object of type OBJT_VNODE. 540 */ 541 static void 542 vm_object_deallocate_vnode(vm_object_t object) 543 { 544 struct vnode *vp = (struct vnode *) object->handle; 545 bool last; 546 547 KASSERT(object->type == OBJT_VNODE, 548 ("vm_object_deallocate_vnode: not a vnode object")); 549 KASSERT(vp != NULL, ("vm_object_deallocate_vnode: missing vp")); 550 551 /* Object lock to protect handle lookup. */ 552 last = refcount_release(&object->ref_count); 553 VM_OBJECT_RUNLOCK(object); 554 555 if (!last) 556 return; 557 558 if (!umtx_shm_vnobj_persistent) 559 umtx_shm_object_terminated(object); 560 561 /* vrele may need the vnode lock. */ 562 vrele(vp); 563 } 564 565 /* 566 * We dropped a reference on an object and discovered that it had a 567 * single remaining shadow. This is a sibling of the reference we 568 * dropped. Attempt to collapse the sibling and backing object. 569 */ 570 static vm_object_t 571 vm_object_deallocate_anon(vm_object_t backing_object) 572 { 573 vm_object_t object; 574 575 /* Fetch the final shadow. */ 576 object = LIST_FIRST(&backing_object->shadow_head); 577 KASSERT(object != NULL && 578 atomic_load_int(&backing_object->shadow_count) == 1, 579 ("vm_object_anon_deallocate: ref_count: %d, shadow_count: %d", 580 backing_object->ref_count, 581 atomic_load_int(&backing_object->shadow_count))); 582 KASSERT((object->flags & OBJ_ANON) != 0, 583 ("invalid shadow object %p", object)); 584 585 if (!VM_OBJECT_TRYWLOCK(object)) { 586 /* 587 * Prevent object from disappearing since we do not have a 588 * reference. 589 */ 590 vm_object_pip_add(object, 1); 591 VM_OBJECT_WUNLOCK(backing_object); 592 VM_OBJECT_WLOCK(object); 593 vm_object_pip_wakeup(object); 594 } else 595 VM_OBJECT_WUNLOCK(backing_object); 596 597 /* 598 * Check for a collapse/terminate race with the last reference holder. 599 */ 600 if ((object->flags & (OBJ_DEAD | OBJ_COLLAPSING)) != 0 || 601 !refcount_acquire_if_not_zero(&object->ref_count)) { 602 VM_OBJECT_WUNLOCK(object); 603 return (NULL); 604 } 605 backing_object = object->backing_object; 606 if (backing_object != NULL && (backing_object->flags & OBJ_ANON) != 0) 607 vm_object_collapse(object); 608 VM_OBJECT_WUNLOCK(object); 609 610 return (object); 611 } 612 613 /* 614 * vm_object_deallocate: 615 * 616 * Release a reference to the specified object, 617 * gained either through a vm_object_allocate 618 * or a vm_object_reference call. When all references 619 * are gone, storage associated with this object 620 * may be relinquished. 621 * 622 * No object may be locked. 623 */ 624 void 625 vm_object_deallocate(vm_object_t object) 626 { 627 vm_object_t temp; 628 bool released; 629 630 while (object != NULL) { 631 /* 632 * If the reference count goes to 0 we start calling 633 * vm_object_terminate() on the object chain. A ref count 634 * of 1 may be a special case depending on the shadow count 635 * being 0 or 1. These cases require a write lock on the 636 * object. 637 */ 638 if ((object->flags & OBJ_ANON) == 0) 639 released = refcount_release_if_gt(&object->ref_count, 1); 640 else 641 released = refcount_release_if_gt(&object->ref_count, 2); 642 if (released) 643 return; 644 645 if (object->type == OBJT_VNODE) { 646 VM_OBJECT_RLOCK(object); 647 if (object->type == OBJT_VNODE) { 648 vm_object_deallocate_vnode(object); 649 return; 650 } 651 VM_OBJECT_RUNLOCK(object); 652 } 653 654 VM_OBJECT_WLOCK(object); 655 KASSERT(object->ref_count > 0, 656 ("vm_object_deallocate: object deallocated too many times: %d", 657 object->type)); 658 659 /* 660 * If this is not the final reference to an anonymous 661 * object we may need to collapse the shadow chain. 662 */ 663 if (!refcount_release(&object->ref_count)) { 664 if (object->ref_count > 1 || 665 atomic_load_int(&object->shadow_count) == 0) { 666 if ((object->flags & OBJ_ANON) != 0 && 667 object->ref_count == 1) 668 vm_object_set_flag(object, 669 OBJ_ONEMAPPING); 670 VM_OBJECT_WUNLOCK(object); 671 return; 672 } 673 674 /* Handle collapsing last ref on anonymous objects. */ 675 object = vm_object_deallocate_anon(object); 676 continue; 677 } 678 679 /* 680 * Handle the final reference to an object. We restart 681 * the loop with the backing object to avoid recursion. 682 */ 683 umtx_shm_object_terminated(object); 684 temp = object->backing_object; 685 if (temp != NULL) { 686 KASSERT(object->type == OBJT_SWAP, 687 ("shadowed tmpfs v_object 2 %p", object)); 688 vm_object_backing_remove(object); 689 } 690 691 KASSERT((object->flags & OBJ_DEAD) == 0, 692 ("vm_object_deallocate: Terminating dead object.")); 693 vm_object_set_flag(object, OBJ_DEAD); 694 vm_object_terminate(object); 695 object = temp; 696 } 697 } 698 699 void 700 vm_object_destroy(vm_object_t object) 701 { 702 uma_zfree(obj_zone, object); 703 } 704 705 static void 706 vm_object_sub_shadow(vm_object_t object) 707 { 708 KASSERT(object->shadow_count >= 1, 709 ("object %p sub_shadow count zero", object)); 710 atomic_subtract_int(&object->shadow_count, 1); 711 } 712 713 static void 714 vm_object_backing_remove_locked(vm_object_t object) 715 { 716 vm_object_t backing_object; 717 718 backing_object = object->backing_object; 719 VM_OBJECT_ASSERT_WLOCKED(object); 720 VM_OBJECT_ASSERT_WLOCKED(backing_object); 721 722 KASSERT((object->flags & OBJ_COLLAPSING) == 0, 723 ("vm_object_backing_remove: Removing collapsing object.")); 724 725 vm_object_sub_shadow(backing_object); 726 if ((object->flags & OBJ_SHADOWLIST) != 0) { 727 LIST_REMOVE(object, shadow_list); 728 vm_object_clear_flag(object, OBJ_SHADOWLIST); 729 } 730 object->backing_object = NULL; 731 } 732 733 static void 734 vm_object_backing_remove(vm_object_t object) 735 { 736 vm_object_t backing_object; 737 738 VM_OBJECT_ASSERT_WLOCKED(object); 739 740 backing_object = object->backing_object; 741 if ((object->flags & OBJ_SHADOWLIST) != 0) { 742 VM_OBJECT_WLOCK(backing_object); 743 vm_object_backing_remove_locked(object); 744 VM_OBJECT_WUNLOCK(backing_object); 745 } else { 746 object->backing_object = NULL; 747 vm_object_sub_shadow(backing_object); 748 } 749 } 750 751 static void 752 vm_object_backing_insert_locked(vm_object_t object, vm_object_t backing_object) 753 { 754 755 VM_OBJECT_ASSERT_WLOCKED(object); 756 757 atomic_add_int(&backing_object->shadow_count, 1); 758 if ((backing_object->flags & OBJ_ANON) != 0) { 759 VM_OBJECT_ASSERT_WLOCKED(backing_object); 760 LIST_INSERT_HEAD(&backing_object->shadow_head, object, 761 shadow_list); 762 vm_object_set_flag(object, OBJ_SHADOWLIST); 763 } 764 object->backing_object = backing_object; 765 } 766 767 static void 768 vm_object_backing_insert(vm_object_t object, vm_object_t backing_object) 769 { 770 771 VM_OBJECT_ASSERT_WLOCKED(object); 772 773 if ((backing_object->flags & OBJ_ANON) != 0) { 774 VM_OBJECT_WLOCK(backing_object); 775 vm_object_backing_insert_locked(object, backing_object); 776 VM_OBJECT_WUNLOCK(backing_object); 777 } else { 778 object->backing_object = backing_object; 779 atomic_add_int(&backing_object->shadow_count, 1); 780 } 781 } 782 783 /* 784 * Insert an object into a backing_object's shadow list with an additional 785 * reference to the backing_object added. 786 */ 787 static void 788 vm_object_backing_insert_ref(vm_object_t object, vm_object_t backing_object) 789 { 790 791 VM_OBJECT_ASSERT_WLOCKED(object); 792 793 if ((backing_object->flags & OBJ_ANON) != 0) { 794 VM_OBJECT_WLOCK(backing_object); 795 KASSERT((backing_object->flags & OBJ_DEAD) == 0, 796 ("shadowing dead anonymous object")); 797 vm_object_reference_locked(backing_object); 798 vm_object_backing_insert_locked(object, backing_object); 799 vm_object_clear_flag(backing_object, OBJ_ONEMAPPING); 800 VM_OBJECT_WUNLOCK(backing_object); 801 } else { 802 vm_object_reference(backing_object); 803 atomic_add_int(&backing_object->shadow_count, 1); 804 object->backing_object = backing_object; 805 } 806 } 807 808 /* 809 * Transfer a backing reference from backing_object to object. 810 */ 811 static void 812 vm_object_backing_transfer(vm_object_t object, vm_object_t backing_object) 813 { 814 vm_object_t new_backing_object; 815 816 /* 817 * Note that the reference to backing_object->backing_object 818 * moves from within backing_object to within object. 819 */ 820 vm_object_backing_remove_locked(object); 821 new_backing_object = backing_object->backing_object; 822 if (new_backing_object == NULL) 823 return; 824 if ((new_backing_object->flags & OBJ_ANON) != 0) { 825 VM_OBJECT_WLOCK(new_backing_object); 826 vm_object_backing_remove_locked(backing_object); 827 vm_object_backing_insert_locked(object, new_backing_object); 828 VM_OBJECT_WUNLOCK(new_backing_object); 829 } else { 830 /* 831 * shadow_count for new_backing_object is left 832 * unchanged, its reference provided by backing_object 833 * is replaced by object. 834 */ 835 object->backing_object = new_backing_object; 836 backing_object->backing_object = NULL; 837 } 838 } 839 840 /* 841 * Wait for a concurrent collapse to settle. 842 */ 843 static void 844 vm_object_collapse_wait(vm_object_t object) 845 { 846 847 VM_OBJECT_ASSERT_WLOCKED(object); 848 849 while ((object->flags & OBJ_COLLAPSING) != 0) { 850 vm_object_pip_wait(object, "vmcolwait"); 851 counter_u64_add(object_collapse_waits, 1); 852 } 853 } 854 855 /* 856 * Waits for a backing object to clear a pending collapse and returns 857 * it locked if it is an ANON object. 858 */ 859 static vm_object_t 860 vm_object_backing_collapse_wait(vm_object_t object) 861 { 862 vm_object_t backing_object; 863 864 VM_OBJECT_ASSERT_WLOCKED(object); 865 866 for (;;) { 867 backing_object = object->backing_object; 868 if (backing_object == NULL || 869 (backing_object->flags & OBJ_ANON) == 0) 870 return (NULL); 871 VM_OBJECT_WLOCK(backing_object); 872 if ((backing_object->flags & (OBJ_DEAD | OBJ_COLLAPSING)) == 0) 873 break; 874 VM_OBJECT_WUNLOCK(object); 875 vm_object_pip_sleep(backing_object, "vmbckwait"); 876 counter_u64_add(object_collapse_waits, 1); 877 VM_OBJECT_WLOCK(object); 878 } 879 return (backing_object); 880 } 881 882 /* 883 * vm_object_terminate_single_page removes a pageable page from the object, 884 * and removes it from the paging queues and frees it, if it is not wired. 885 * It is invoked via callback from vm_object_terminate_pages. 886 */ 887 static void 888 vm_object_terminate_single_page(vm_page_t p, void *objectv) 889 { 890 vm_object_t object __diagused = objectv; 891 892 vm_page_assert_unbusied(p); 893 KASSERT(p->object == object && 894 (p->ref_count & VPRC_OBJREF) != 0, 895 ("%s: page %p is inconsistent", __func__, p)); 896 p->object = NULL; 897 if (vm_page_drop(p, VPRC_OBJREF) == VPRC_OBJREF) { 898 VM_CNT_INC(v_pfree); 899 vm_page_free(p); 900 } 901 } 902 903 /* 904 * vm_object_terminate_pages removes any remaining pageable pages 905 * from the object and resets the object to an empty state. 906 */ 907 static void 908 vm_object_terminate_pages(vm_object_t object) 909 { 910 VM_OBJECT_ASSERT_WLOCKED(object); 911 912 /* 913 * If the object contained any pages, then reset it to an empty state. 914 * Rather than incrementally removing each page from the object, the 915 * page and object are reset to any empty state. 916 */ 917 if (object->resident_page_count == 0) 918 return; 919 920 vm_radix_reclaim_callback(&object->rtree, 921 vm_object_terminate_single_page, object); 922 TAILQ_INIT(&object->memq); 923 object->resident_page_count = 0; 924 if (object->type == OBJT_VNODE) 925 vdrop(object->handle); 926 } 927 928 /* 929 * vm_object_terminate actually destroys the specified object, freeing 930 * up all previously used resources. 931 * 932 * The object must be locked. 933 * This routine may block. 934 */ 935 void 936 vm_object_terminate(vm_object_t object) 937 { 938 939 VM_OBJECT_ASSERT_WLOCKED(object); 940 KASSERT((object->flags & OBJ_DEAD) != 0, 941 ("terminating non-dead obj %p", object)); 942 KASSERT((object->flags & OBJ_COLLAPSING) == 0, 943 ("terminating collapsing obj %p", object)); 944 KASSERT(object->backing_object == NULL, 945 ("terminating shadow obj %p", object)); 946 947 /* 948 * Wait for the pageout daemon and other current users to be 949 * done with the object. Note that new paging_in_progress 950 * users can come after this wait, but they must check 951 * OBJ_DEAD flag set (without unlocking the object), and avoid 952 * the object being terminated. 953 */ 954 vm_object_pip_wait(object, "objtrm"); 955 956 KASSERT(object->ref_count == 0, 957 ("vm_object_terminate: object with references, ref_count=%d", 958 object->ref_count)); 959 960 if ((object->flags & OBJ_PG_DTOR) == 0) 961 vm_object_terminate_pages(object); 962 963 #if VM_NRESERVLEVEL > 0 964 if (__predict_false(!LIST_EMPTY(&object->rvq))) 965 vm_reserv_break_all(object); 966 #endif 967 968 KASSERT(object->cred == NULL || (object->flags & OBJ_SWAP) != 0, 969 ("%s: non-swap obj %p has cred", __func__, object)); 970 971 /* 972 * Let the pager know object is dead. 973 */ 974 vm_pager_deallocate(object); 975 VM_OBJECT_WUNLOCK(object); 976 977 vm_object_destroy(object); 978 } 979 980 /* 981 * Make the page read-only so that we can clear the object flags. However, if 982 * this is a nosync mmap then the object is likely to stay dirty so do not 983 * mess with the page and do not clear the object flags. Returns TRUE if the 984 * page should be flushed, and FALSE otherwise. 985 */ 986 static boolean_t 987 vm_object_page_remove_write(vm_page_t p, int flags, boolean_t *allclean) 988 { 989 990 vm_page_assert_busied(p); 991 992 /* 993 * If we have been asked to skip nosync pages and this is a 994 * nosync page, skip it. Note that the object flags were not 995 * cleared in this case so we do not have to set them. 996 */ 997 if ((flags & OBJPC_NOSYNC) != 0 && (p->a.flags & PGA_NOSYNC) != 0) { 998 *allclean = FALSE; 999 return (FALSE); 1000 } else { 1001 pmap_remove_write(p); 1002 return (p->dirty != 0); 1003 } 1004 } 1005 1006 /* 1007 * vm_object_page_clean 1008 * 1009 * Clean all dirty pages in the specified range of object. Leaves page 1010 * on whatever queue it is currently on. If NOSYNC is set then do not 1011 * write out pages with PGA_NOSYNC set (originally comes from MAP_NOSYNC), 1012 * leaving the object dirty. 1013 * 1014 * For swap objects backing tmpfs regular files, do not flush anything, 1015 * but remove write protection on the mapped pages to update mtime through 1016 * mmaped writes. 1017 * 1018 * When stuffing pages asynchronously, allow clustering. XXX we need a 1019 * synchronous clustering mode implementation. 1020 * 1021 * Odd semantics: if start == end, we clean everything. 1022 * 1023 * The object must be locked. 1024 * 1025 * Returns FALSE if some page from the range was not written, as 1026 * reported by the pager, and TRUE otherwise. 1027 */ 1028 boolean_t 1029 vm_object_page_clean(vm_object_t object, vm_ooffset_t start, vm_ooffset_t end, 1030 int flags) 1031 { 1032 vm_page_t np, p; 1033 vm_pindex_t pi, tend, tstart; 1034 int curgeneration, n, pagerflags; 1035 boolean_t eio, res, allclean; 1036 1037 VM_OBJECT_ASSERT_WLOCKED(object); 1038 1039 if (!vm_object_mightbedirty(object) || object->resident_page_count == 0) 1040 return (TRUE); 1041 1042 pagerflags = (flags & (OBJPC_SYNC | OBJPC_INVAL)) != 0 ? 1043 VM_PAGER_PUT_SYNC : VM_PAGER_CLUSTER_OK; 1044 pagerflags |= (flags & OBJPC_INVAL) != 0 ? VM_PAGER_PUT_INVAL : 0; 1045 1046 tstart = OFF_TO_IDX(start); 1047 tend = (end == 0) ? object->size : OFF_TO_IDX(end + PAGE_MASK); 1048 allclean = tstart == 0 && tend >= object->size; 1049 res = TRUE; 1050 1051 rescan: 1052 curgeneration = object->generation; 1053 1054 for (p = vm_page_find_least(object, tstart); p != NULL; p = np) { 1055 pi = p->pindex; 1056 if (pi >= tend) 1057 break; 1058 np = TAILQ_NEXT(p, listq); 1059 if (vm_page_none_valid(p)) 1060 continue; 1061 if (vm_page_busy_acquire(p, VM_ALLOC_WAITFAIL) == 0) { 1062 if (object->generation != curgeneration && 1063 (flags & OBJPC_SYNC) != 0) 1064 goto rescan; 1065 np = vm_page_find_least(object, pi); 1066 continue; 1067 } 1068 if (!vm_object_page_remove_write(p, flags, &allclean)) { 1069 vm_page_xunbusy(p); 1070 continue; 1071 } 1072 if (object->type == OBJT_VNODE) { 1073 n = vm_object_page_collect_flush(object, p, pagerflags, 1074 flags, &allclean, &eio); 1075 if (eio) { 1076 res = FALSE; 1077 allclean = FALSE; 1078 } 1079 if (object->generation != curgeneration && 1080 (flags & OBJPC_SYNC) != 0) 1081 goto rescan; 1082 1083 /* 1084 * If the VOP_PUTPAGES() did a truncated write, so 1085 * that even the first page of the run is not fully 1086 * written, vm_pageout_flush() returns 0 as the run 1087 * length. Since the condition that caused truncated 1088 * write may be permanent, e.g. exhausted free space, 1089 * accepting n == 0 would cause an infinite loop. 1090 * 1091 * Forwarding the iterator leaves the unwritten page 1092 * behind, but there is not much we can do there if 1093 * filesystem refuses to write it. 1094 */ 1095 if (n == 0) { 1096 n = 1; 1097 allclean = FALSE; 1098 } 1099 } else { 1100 n = 1; 1101 vm_page_xunbusy(p); 1102 } 1103 np = vm_page_find_least(object, pi + n); 1104 } 1105 #if 0 1106 VOP_FSYNC(vp, (pagerflags & VM_PAGER_PUT_SYNC) ? MNT_WAIT : 0); 1107 #endif 1108 1109 /* 1110 * Leave updating cleangeneration for tmpfs objects to tmpfs 1111 * scan. It needs to update mtime, which happens for other 1112 * filesystems during page writeouts. 1113 */ 1114 if (allclean && object->type == OBJT_VNODE) 1115 object->cleangeneration = curgeneration; 1116 return (res); 1117 } 1118 1119 static int 1120 vm_object_page_collect_flush(vm_object_t object, vm_page_t p, int pagerflags, 1121 int flags, boolean_t *allclean, boolean_t *eio) 1122 { 1123 vm_page_t ma[2 * vm_pageout_page_count - 1], tp; 1124 int base, count, runlen; 1125 1126 vm_page_lock_assert(p, MA_NOTOWNED); 1127 vm_page_assert_xbusied(p); 1128 VM_OBJECT_ASSERT_WLOCKED(object); 1129 base = nitems(ma) / 2; 1130 ma[base] = p; 1131 for (count = 1, tp = p; count < vm_pageout_page_count; count++) { 1132 tp = vm_page_next(tp); 1133 if (tp == NULL || vm_page_tryxbusy(tp) == 0) 1134 break; 1135 if (!vm_object_page_remove_write(tp, flags, allclean)) { 1136 vm_page_xunbusy(tp); 1137 break; 1138 } 1139 ma[base + count] = tp; 1140 } 1141 1142 for (tp = p; count < vm_pageout_page_count; count++) { 1143 tp = vm_page_prev(tp); 1144 if (tp == NULL || vm_page_tryxbusy(tp) == 0) 1145 break; 1146 if (!vm_object_page_remove_write(tp, flags, allclean)) { 1147 vm_page_xunbusy(tp); 1148 break; 1149 } 1150 ma[--base] = tp; 1151 } 1152 1153 vm_pageout_flush(&ma[base], count, pagerflags, nitems(ma) / 2 - base, 1154 &runlen, eio); 1155 return (runlen); 1156 } 1157 1158 /* 1159 * Note that there is absolutely no sense in writing out 1160 * anonymous objects, so we track down the vnode object 1161 * to write out. 1162 * We invalidate (remove) all pages from the address space 1163 * for semantic correctness. 1164 * 1165 * If the backing object is a device object with unmanaged pages, then any 1166 * mappings to the specified range of pages must be removed before this 1167 * function is called. 1168 * 1169 * Note: certain anonymous maps, such as MAP_NOSYNC maps, 1170 * may start out with a NULL object. 1171 */ 1172 boolean_t 1173 vm_object_sync(vm_object_t object, vm_ooffset_t offset, vm_size_t size, 1174 boolean_t syncio, boolean_t invalidate) 1175 { 1176 vm_object_t backing_object; 1177 struct vnode *vp; 1178 struct mount *mp; 1179 int error, flags, fsync_after; 1180 boolean_t res; 1181 1182 if (object == NULL) 1183 return (TRUE); 1184 res = TRUE; 1185 error = 0; 1186 VM_OBJECT_WLOCK(object); 1187 while ((backing_object = object->backing_object) != NULL) { 1188 VM_OBJECT_WLOCK(backing_object); 1189 offset += object->backing_object_offset; 1190 VM_OBJECT_WUNLOCK(object); 1191 object = backing_object; 1192 if (object->size < OFF_TO_IDX(offset + size)) 1193 size = IDX_TO_OFF(object->size) - offset; 1194 } 1195 /* 1196 * Flush pages if writing is allowed, invalidate them 1197 * if invalidation requested. Pages undergoing I/O 1198 * will be ignored by vm_object_page_remove(). 1199 * 1200 * We cannot lock the vnode and then wait for paging 1201 * to complete without deadlocking against vm_fault. 1202 * Instead we simply call vm_object_page_remove() and 1203 * allow it to block internally on a page-by-page 1204 * basis when it encounters pages undergoing async 1205 * I/O. 1206 */ 1207 if (object->type == OBJT_VNODE && 1208 vm_object_mightbedirty(object) != 0 && 1209 ((vp = object->handle)->v_vflag & VV_NOSYNC) == 0) { 1210 VM_OBJECT_WUNLOCK(object); 1211 (void)vn_start_write(vp, &mp, V_WAIT); 1212 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 1213 if (syncio && !invalidate && offset == 0 && 1214 atop(size) == object->size) { 1215 /* 1216 * If syncing the whole mapping of the file, 1217 * it is faster to schedule all the writes in 1218 * async mode, also allowing the clustering, 1219 * and then wait for i/o to complete. 1220 */ 1221 flags = 0; 1222 fsync_after = TRUE; 1223 } else { 1224 flags = (syncio || invalidate) ? OBJPC_SYNC : 0; 1225 flags |= invalidate ? (OBJPC_SYNC | OBJPC_INVAL) : 0; 1226 fsync_after = FALSE; 1227 } 1228 VM_OBJECT_WLOCK(object); 1229 res = vm_object_page_clean(object, offset, offset + size, 1230 flags); 1231 VM_OBJECT_WUNLOCK(object); 1232 if (fsync_after) { 1233 for (;;) { 1234 error = VOP_FSYNC(vp, MNT_WAIT, curthread); 1235 if (error != ERELOOKUP) 1236 break; 1237 1238 /* 1239 * Allow SU/bufdaemon to handle more 1240 * dependencies in the meantime. 1241 */ 1242 VOP_UNLOCK(vp); 1243 vn_finished_write(mp); 1244 1245 (void)vn_start_write(vp, &mp, V_WAIT); 1246 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 1247 } 1248 } 1249 VOP_UNLOCK(vp); 1250 vn_finished_write(mp); 1251 if (error != 0) 1252 res = FALSE; 1253 VM_OBJECT_WLOCK(object); 1254 } 1255 if ((object->type == OBJT_VNODE || 1256 object->type == OBJT_DEVICE) && invalidate) { 1257 if (object->type == OBJT_DEVICE) 1258 /* 1259 * The option OBJPR_NOTMAPPED must be passed here 1260 * because vm_object_page_remove() cannot remove 1261 * unmanaged mappings. 1262 */ 1263 flags = OBJPR_NOTMAPPED; 1264 else if (old_msync) 1265 flags = 0; 1266 else 1267 flags = OBJPR_CLEANONLY; 1268 vm_object_page_remove(object, OFF_TO_IDX(offset), 1269 OFF_TO_IDX(offset + size + PAGE_MASK), flags); 1270 } 1271 VM_OBJECT_WUNLOCK(object); 1272 return (res); 1273 } 1274 1275 /* 1276 * Determine whether the given advice can be applied to the object. Advice is 1277 * not applied to unmanaged pages since they never belong to page queues, and 1278 * since MADV_FREE is destructive, it can apply only to anonymous pages that 1279 * have been mapped at most once. 1280 */ 1281 static bool 1282 vm_object_advice_applies(vm_object_t object, int advice) 1283 { 1284 1285 if ((object->flags & OBJ_UNMANAGED) != 0) 1286 return (false); 1287 if (advice != MADV_FREE) 1288 return (true); 1289 return ((object->flags & (OBJ_ONEMAPPING | OBJ_ANON)) == 1290 (OBJ_ONEMAPPING | OBJ_ANON)); 1291 } 1292 1293 static void 1294 vm_object_madvise_freespace(vm_object_t object, int advice, vm_pindex_t pindex, 1295 vm_size_t size) 1296 { 1297 1298 if (advice == MADV_FREE) 1299 vm_pager_freespace(object, pindex, size); 1300 } 1301 1302 /* 1303 * vm_object_madvise: 1304 * 1305 * Implements the madvise function at the object/page level. 1306 * 1307 * MADV_WILLNEED (any object) 1308 * 1309 * Activate the specified pages if they are resident. 1310 * 1311 * MADV_DONTNEED (any object) 1312 * 1313 * Deactivate the specified pages if they are resident. 1314 * 1315 * MADV_FREE (OBJT_SWAP objects, OBJ_ONEMAPPING only) 1316 * 1317 * Deactivate and clean the specified pages if they are 1318 * resident. This permits the process to reuse the pages 1319 * without faulting or the kernel to reclaim the pages 1320 * without I/O. 1321 */ 1322 void 1323 vm_object_madvise(vm_object_t object, vm_pindex_t pindex, vm_pindex_t end, 1324 int advice) 1325 { 1326 vm_pindex_t tpindex; 1327 vm_object_t backing_object, tobject; 1328 vm_page_t m, tm; 1329 1330 if (object == NULL) 1331 return; 1332 1333 relookup: 1334 VM_OBJECT_WLOCK(object); 1335 if (!vm_object_advice_applies(object, advice)) { 1336 VM_OBJECT_WUNLOCK(object); 1337 return; 1338 } 1339 for (m = vm_page_find_least(object, pindex); pindex < end; pindex++) { 1340 tobject = object; 1341 1342 /* 1343 * If the next page isn't resident in the top-level object, we 1344 * need to search the shadow chain. When applying MADV_FREE, we 1345 * take care to release any swap space used to store 1346 * non-resident pages. 1347 */ 1348 if (m == NULL || pindex < m->pindex) { 1349 /* 1350 * Optimize a common case: if the top-level object has 1351 * no backing object, we can skip over the non-resident 1352 * range in constant time. 1353 */ 1354 if (object->backing_object == NULL) { 1355 tpindex = (m != NULL && m->pindex < end) ? 1356 m->pindex : end; 1357 vm_object_madvise_freespace(object, advice, 1358 pindex, tpindex - pindex); 1359 if ((pindex = tpindex) == end) 1360 break; 1361 goto next_page; 1362 } 1363 1364 tpindex = pindex; 1365 do { 1366 vm_object_madvise_freespace(tobject, advice, 1367 tpindex, 1); 1368 /* 1369 * Prepare to search the next object in the 1370 * chain. 1371 */ 1372 backing_object = tobject->backing_object; 1373 if (backing_object == NULL) 1374 goto next_pindex; 1375 VM_OBJECT_WLOCK(backing_object); 1376 tpindex += 1377 OFF_TO_IDX(tobject->backing_object_offset); 1378 if (tobject != object) 1379 VM_OBJECT_WUNLOCK(tobject); 1380 tobject = backing_object; 1381 if (!vm_object_advice_applies(tobject, advice)) 1382 goto next_pindex; 1383 } while ((tm = vm_page_lookup(tobject, tpindex)) == 1384 NULL); 1385 } else { 1386 next_page: 1387 tm = m; 1388 m = TAILQ_NEXT(m, listq); 1389 } 1390 1391 /* 1392 * If the page is not in a normal state, skip it. The page 1393 * can not be invalidated while the object lock is held. 1394 */ 1395 if (!vm_page_all_valid(tm) || vm_page_wired(tm)) 1396 goto next_pindex; 1397 KASSERT((tm->flags & PG_FICTITIOUS) == 0, 1398 ("vm_object_madvise: page %p is fictitious", tm)); 1399 KASSERT((tm->oflags & VPO_UNMANAGED) == 0, 1400 ("vm_object_madvise: page %p is not managed", tm)); 1401 if (vm_page_tryxbusy(tm) == 0) { 1402 if (object != tobject) 1403 VM_OBJECT_WUNLOCK(object); 1404 if (advice == MADV_WILLNEED) { 1405 /* 1406 * Reference the page before unlocking and 1407 * sleeping so that the page daemon is less 1408 * likely to reclaim it. 1409 */ 1410 vm_page_aflag_set(tm, PGA_REFERENCED); 1411 } 1412 if (!vm_page_busy_sleep(tm, "madvpo", 0)) 1413 VM_OBJECT_WUNLOCK(tobject); 1414 goto relookup; 1415 } 1416 vm_page_advise(tm, advice); 1417 vm_page_xunbusy(tm); 1418 vm_object_madvise_freespace(tobject, advice, tm->pindex, 1); 1419 next_pindex: 1420 if (tobject != object) 1421 VM_OBJECT_WUNLOCK(tobject); 1422 } 1423 VM_OBJECT_WUNLOCK(object); 1424 } 1425 1426 /* 1427 * vm_object_shadow: 1428 * 1429 * Create a new object which is backed by the 1430 * specified existing object range. The source 1431 * object reference is deallocated. 1432 * 1433 * The new object and offset into that object 1434 * are returned in the source parameters. 1435 */ 1436 void 1437 vm_object_shadow(vm_object_t *object, vm_ooffset_t *offset, vm_size_t length, 1438 struct ucred *cred, bool shared) 1439 { 1440 vm_object_t source; 1441 vm_object_t result; 1442 1443 source = *object; 1444 1445 /* 1446 * Don't create the new object if the old object isn't shared. 1447 * 1448 * If we hold the only reference we can guarantee that it won't 1449 * increase while we have the map locked. Otherwise the race is 1450 * harmless and we will end up with an extra shadow object that 1451 * will be collapsed later. 1452 */ 1453 if (source != NULL && source->ref_count == 1 && 1454 (source->flags & OBJ_ANON) != 0) 1455 return; 1456 1457 /* 1458 * Allocate a new object with the given length. 1459 */ 1460 result = vm_object_allocate_anon(atop(length), source, cred, length); 1461 1462 /* 1463 * Store the offset into the source object, and fix up the offset into 1464 * the new object. 1465 */ 1466 result->backing_object_offset = *offset; 1467 1468 if (shared || source != NULL) { 1469 VM_OBJECT_WLOCK(result); 1470 1471 /* 1472 * The new object shadows the source object, adding a 1473 * reference to it. Our caller changes his reference 1474 * to point to the new object, removing a reference to 1475 * the source object. Net result: no change of 1476 * reference count, unless the caller needs to add one 1477 * more reference due to forking a shared map entry. 1478 */ 1479 if (shared) { 1480 vm_object_reference_locked(result); 1481 vm_object_clear_flag(result, OBJ_ONEMAPPING); 1482 } 1483 1484 /* 1485 * Try to optimize the result object's page color when 1486 * shadowing in order to maintain page coloring 1487 * consistency in the combined shadowed object. 1488 */ 1489 if (source != NULL) { 1490 vm_object_backing_insert(result, source); 1491 result->domain = source->domain; 1492 #if VM_NRESERVLEVEL > 0 1493 vm_object_set_flag(result, 1494 (source->flags & OBJ_COLORED)); 1495 result->pg_color = (source->pg_color + 1496 OFF_TO_IDX(*offset)) & ((1 << (VM_NFREEORDER - 1497 1)) - 1); 1498 #endif 1499 } 1500 VM_OBJECT_WUNLOCK(result); 1501 } 1502 1503 /* 1504 * Return the new things 1505 */ 1506 *offset = 0; 1507 *object = result; 1508 } 1509 1510 /* 1511 * vm_object_split: 1512 * 1513 * Split the pages in a map entry into a new object. This affords 1514 * easier removal of unused pages, and keeps object inheritance from 1515 * being a negative impact on memory usage. 1516 */ 1517 void 1518 vm_object_split(vm_map_entry_t entry) 1519 { 1520 vm_page_t m, m_next; 1521 vm_object_t orig_object, new_object, backing_object; 1522 vm_pindex_t idx, offidxstart; 1523 vm_size_t size; 1524 1525 orig_object = entry->object.vm_object; 1526 KASSERT((orig_object->flags & OBJ_ONEMAPPING) != 0, 1527 ("vm_object_split: Splitting object with multiple mappings.")); 1528 if ((orig_object->flags & OBJ_ANON) == 0) 1529 return; 1530 if (orig_object->ref_count <= 1) 1531 return; 1532 VM_OBJECT_WUNLOCK(orig_object); 1533 1534 offidxstart = OFF_TO_IDX(entry->offset); 1535 size = atop(entry->end - entry->start); 1536 1537 new_object = vm_object_allocate_anon(size, orig_object, 1538 orig_object->cred, ptoa(size)); 1539 1540 /* 1541 * We must wait for the orig_object to complete any in-progress 1542 * collapse so that the swap blocks are stable below. The 1543 * additional reference on backing_object by new object will 1544 * prevent further collapse operations until split completes. 1545 */ 1546 VM_OBJECT_WLOCK(orig_object); 1547 vm_object_collapse_wait(orig_object); 1548 1549 /* 1550 * At this point, the new object is still private, so the order in 1551 * which the original and new objects are locked does not matter. 1552 */ 1553 VM_OBJECT_WLOCK(new_object); 1554 new_object->domain = orig_object->domain; 1555 backing_object = orig_object->backing_object; 1556 if (backing_object != NULL) { 1557 vm_object_backing_insert_ref(new_object, backing_object); 1558 new_object->backing_object_offset = 1559 orig_object->backing_object_offset + entry->offset; 1560 } 1561 if (orig_object->cred != NULL) { 1562 crhold(orig_object->cred); 1563 KASSERT(orig_object->charge >= ptoa(size), 1564 ("orig_object->charge < 0")); 1565 orig_object->charge -= ptoa(size); 1566 } 1567 1568 /* 1569 * Mark the split operation so that swap_pager_getpages() knows 1570 * that the object is in transition. 1571 */ 1572 vm_object_set_flag(orig_object, OBJ_SPLIT); 1573 #ifdef INVARIANTS 1574 idx = 0; 1575 #endif 1576 retry: 1577 m = vm_page_find_least(orig_object, offidxstart); 1578 KASSERT(m == NULL || idx <= m->pindex - offidxstart, 1579 ("%s: object %p was repopulated", __func__, orig_object)); 1580 for (; m != NULL && (idx = m->pindex - offidxstart) < size; 1581 m = m_next) { 1582 m_next = TAILQ_NEXT(m, listq); 1583 1584 /* 1585 * We must wait for pending I/O to complete before we can 1586 * rename the page. 1587 * 1588 * We do not have to VM_PROT_NONE the page as mappings should 1589 * not be changed by this operation. 1590 */ 1591 if (vm_page_tryxbusy(m) == 0) { 1592 VM_OBJECT_WUNLOCK(new_object); 1593 if (vm_page_busy_sleep(m, "spltwt", 0)) 1594 VM_OBJECT_WLOCK(orig_object); 1595 VM_OBJECT_WLOCK(new_object); 1596 goto retry; 1597 } 1598 1599 /* 1600 * The page was left invalid. Likely placed there by 1601 * an incomplete fault. Just remove and ignore. 1602 */ 1603 if (vm_page_none_valid(m)) { 1604 if (vm_page_remove(m)) 1605 vm_page_free(m); 1606 continue; 1607 } 1608 1609 /* vm_page_rename() will dirty the page. */ 1610 if (vm_page_rename(m, new_object, idx)) { 1611 vm_page_xunbusy(m); 1612 VM_OBJECT_WUNLOCK(new_object); 1613 VM_OBJECT_WUNLOCK(orig_object); 1614 vm_radix_wait(); 1615 VM_OBJECT_WLOCK(orig_object); 1616 VM_OBJECT_WLOCK(new_object); 1617 goto retry; 1618 } 1619 1620 #if VM_NRESERVLEVEL > 0 1621 /* 1622 * If some of the reservation's allocated pages remain with 1623 * the original object, then transferring the reservation to 1624 * the new object is neither particularly beneficial nor 1625 * particularly harmful as compared to leaving the reservation 1626 * with the original object. If, however, all of the 1627 * reservation's allocated pages are transferred to the new 1628 * object, then transferring the reservation is typically 1629 * beneficial. Determining which of these two cases applies 1630 * would be more costly than unconditionally renaming the 1631 * reservation. 1632 */ 1633 vm_reserv_rename(m, new_object, orig_object, offidxstart); 1634 #endif 1635 } 1636 1637 /* 1638 * swap_pager_copy() can sleep, in which case the orig_object's 1639 * and new_object's locks are released and reacquired. 1640 */ 1641 swap_pager_copy(orig_object, new_object, offidxstart, 0); 1642 1643 TAILQ_FOREACH(m, &new_object->memq, listq) 1644 vm_page_xunbusy(m); 1645 1646 vm_object_clear_flag(orig_object, OBJ_SPLIT); 1647 VM_OBJECT_WUNLOCK(orig_object); 1648 VM_OBJECT_WUNLOCK(new_object); 1649 entry->object.vm_object = new_object; 1650 entry->offset = 0LL; 1651 vm_object_deallocate(orig_object); 1652 VM_OBJECT_WLOCK(new_object); 1653 } 1654 1655 static vm_page_t 1656 vm_object_collapse_scan_wait(vm_object_t object, vm_page_t p) 1657 { 1658 vm_object_t backing_object; 1659 1660 VM_OBJECT_ASSERT_WLOCKED(object); 1661 backing_object = object->backing_object; 1662 VM_OBJECT_ASSERT_WLOCKED(backing_object); 1663 1664 KASSERT(p == NULL || p->object == object || p->object == backing_object, 1665 ("invalid ownership %p %p %p", p, object, backing_object)); 1666 /* The page is only NULL when rename fails. */ 1667 if (p == NULL) { 1668 VM_OBJECT_WUNLOCK(object); 1669 VM_OBJECT_WUNLOCK(backing_object); 1670 vm_radix_wait(); 1671 VM_OBJECT_WLOCK(object); 1672 } else if (p->object == object) { 1673 VM_OBJECT_WUNLOCK(backing_object); 1674 if (vm_page_busy_sleep(p, "vmocol", 0)) 1675 VM_OBJECT_WLOCK(object); 1676 } else { 1677 VM_OBJECT_WUNLOCK(object); 1678 if (!vm_page_busy_sleep(p, "vmocol", 0)) 1679 VM_OBJECT_WUNLOCK(backing_object); 1680 VM_OBJECT_WLOCK(object); 1681 } 1682 VM_OBJECT_WLOCK(backing_object); 1683 return (TAILQ_FIRST(&backing_object->memq)); 1684 } 1685 1686 static bool 1687 vm_object_scan_all_shadowed(vm_object_t object) 1688 { 1689 vm_object_t backing_object; 1690 vm_page_t p, pp; 1691 vm_pindex_t backing_offset_index, new_pindex, pi, ps; 1692 1693 VM_OBJECT_ASSERT_WLOCKED(object); 1694 VM_OBJECT_ASSERT_WLOCKED(object->backing_object); 1695 1696 backing_object = object->backing_object; 1697 1698 if ((backing_object->flags & OBJ_ANON) == 0) 1699 return (false); 1700 1701 pi = backing_offset_index = OFF_TO_IDX(object->backing_object_offset); 1702 p = vm_page_find_least(backing_object, pi); 1703 ps = swap_pager_find_least(backing_object, pi); 1704 1705 /* 1706 * Only check pages inside the parent object's range and 1707 * inside the parent object's mapping of the backing object. 1708 */ 1709 for (;; pi++) { 1710 if (p != NULL && p->pindex < pi) 1711 p = TAILQ_NEXT(p, listq); 1712 if (ps < pi) 1713 ps = swap_pager_find_least(backing_object, pi); 1714 if (p == NULL && ps >= backing_object->size) 1715 break; 1716 else if (p == NULL) 1717 pi = ps; 1718 else 1719 pi = MIN(p->pindex, ps); 1720 1721 new_pindex = pi - backing_offset_index; 1722 if (new_pindex >= object->size) 1723 break; 1724 1725 if (p != NULL) { 1726 /* 1727 * If the backing object page is busy a 1728 * grandparent or older page may still be 1729 * undergoing CoW. It is not safe to collapse 1730 * the backing object until it is quiesced. 1731 */ 1732 if (vm_page_tryxbusy(p) == 0) 1733 return (false); 1734 1735 /* 1736 * We raced with the fault handler that left 1737 * newly allocated invalid page on the object 1738 * queue and retried. 1739 */ 1740 if (!vm_page_all_valid(p)) 1741 goto unbusy_ret; 1742 } 1743 1744 /* 1745 * See if the parent has the page or if the parent's object 1746 * pager has the page. If the parent has the page but the page 1747 * is not valid, the parent's object pager must have the page. 1748 * 1749 * If this fails, the parent does not completely shadow the 1750 * object and we might as well give up now. 1751 */ 1752 pp = vm_page_lookup(object, new_pindex); 1753 1754 /* 1755 * The valid check here is stable due to object lock 1756 * being required to clear valid and initiate paging. 1757 * Busy of p disallows fault handler to validate pp. 1758 */ 1759 if ((pp == NULL || vm_page_none_valid(pp)) && 1760 !vm_pager_has_page(object, new_pindex, NULL, NULL)) 1761 goto unbusy_ret; 1762 if (p != NULL) 1763 vm_page_xunbusy(p); 1764 } 1765 return (true); 1766 1767 unbusy_ret: 1768 if (p != NULL) 1769 vm_page_xunbusy(p); 1770 return (false); 1771 } 1772 1773 static void 1774 vm_object_collapse_scan(vm_object_t object) 1775 { 1776 vm_object_t backing_object; 1777 vm_page_t next, p, pp; 1778 vm_pindex_t backing_offset_index, new_pindex; 1779 1780 VM_OBJECT_ASSERT_WLOCKED(object); 1781 VM_OBJECT_ASSERT_WLOCKED(object->backing_object); 1782 1783 backing_object = object->backing_object; 1784 backing_offset_index = OFF_TO_IDX(object->backing_object_offset); 1785 1786 /* 1787 * Our scan 1788 */ 1789 for (p = TAILQ_FIRST(&backing_object->memq); p != NULL; p = next) { 1790 next = TAILQ_NEXT(p, listq); 1791 new_pindex = p->pindex - backing_offset_index; 1792 1793 /* 1794 * Check for busy page 1795 */ 1796 if (vm_page_tryxbusy(p) == 0) { 1797 next = vm_object_collapse_scan_wait(object, p); 1798 continue; 1799 } 1800 1801 KASSERT(object->backing_object == backing_object, 1802 ("vm_object_collapse_scan: backing object mismatch %p != %p", 1803 object->backing_object, backing_object)); 1804 KASSERT(p->object == backing_object, 1805 ("vm_object_collapse_scan: object mismatch %p != %p", 1806 p->object, backing_object)); 1807 1808 if (p->pindex < backing_offset_index || 1809 new_pindex >= object->size) { 1810 vm_pager_freespace(backing_object, p->pindex, 1); 1811 1812 KASSERT(!pmap_page_is_mapped(p), 1813 ("freeing mapped page %p", p)); 1814 if (vm_page_remove(p)) 1815 vm_page_free(p); 1816 continue; 1817 } 1818 1819 if (!vm_page_all_valid(p)) { 1820 KASSERT(!pmap_page_is_mapped(p), 1821 ("freeing mapped page %p", p)); 1822 if (vm_page_remove(p)) 1823 vm_page_free(p); 1824 continue; 1825 } 1826 1827 pp = vm_page_lookup(object, new_pindex); 1828 if (pp != NULL && vm_page_tryxbusy(pp) == 0) { 1829 vm_page_xunbusy(p); 1830 /* 1831 * The page in the parent is busy and possibly not 1832 * (yet) valid. Until its state is finalized by the 1833 * busy bit owner, we can't tell whether it shadows the 1834 * original page. 1835 */ 1836 next = vm_object_collapse_scan_wait(object, pp); 1837 continue; 1838 } 1839 1840 if (pp != NULL && vm_page_none_valid(pp)) { 1841 /* 1842 * The page was invalid in the parent. Likely placed 1843 * there by an incomplete fault. Just remove and 1844 * ignore. p can replace it. 1845 */ 1846 if (vm_page_remove(pp)) 1847 vm_page_free(pp); 1848 pp = NULL; 1849 } 1850 1851 if (pp != NULL || vm_pager_has_page(object, new_pindex, NULL, 1852 NULL)) { 1853 /* 1854 * The page already exists in the parent OR swap exists 1855 * for this location in the parent. Leave the parent's 1856 * page alone. Destroy the original page from the 1857 * backing object. 1858 */ 1859 vm_pager_freespace(backing_object, p->pindex, 1); 1860 KASSERT(!pmap_page_is_mapped(p), 1861 ("freeing mapped page %p", p)); 1862 if (vm_page_remove(p)) 1863 vm_page_free(p); 1864 if (pp != NULL) 1865 vm_page_xunbusy(pp); 1866 continue; 1867 } 1868 1869 /* 1870 * Page does not exist in parent, rename the page from the 1871 * backing object to the main object. 1872 * 1873 * If the page was mapped to a process, it can remain mapped 1874 * through the rename. vm_page_rename() will dirty the page. 1875 */ 1876 if (vm_page_rename(p, object, new_pindex)) { 1877 vm_page_xunbusy(p); 1878 next = vm_object_collapse_scan_wait(object, NULL); 1879 continue; 1880 } 1881 1882 /* Use the old pindex to free the right page. */ 1883 vm_pager_freespace(backing_object, new_pindex + 1884 backing_offset_index, 1); 1885 1886 #if VM_NRESERVLEVEL > 0 1887 /* 1888 * Rename the reservation. 1889 */ 1890 vm_reserv_rename(p, object, backing_object, 1891 backing_offset_index); 1892 #endif 1893 vm_page_xunbusy(p); 1894 } 1895 return; 1896 } 1897 1898 /* 1899 * vm_object_collapse: 1900 * 1901 * Collapse an object with the object backing it. 1902 * Pages in the backing object are moved into the 1903 * parent, and the backing object is deallocated. 1904 */ 1905 void 1906 vm_object_collapse(vm_object_t object) 1907 { 1908 vm_object_t backing_object, new_backing_object; 1909 1910 VM_OBJECT_ASSERT_WLOCKED(object); 1911 1912 while (TRUE) { 1913 KASSERT((object->flags & (OBJ_DEAD | OBJ_ANON)) == OBJ_ANON, 1914 ("collapsing invalid object")); 1915 1916 /* 1917 * Wait for the backing_object to finish any pending 1918 * collapse so that the caller sees the shortest possible 1919 * shadow chain. 1920 */ 1921 backing_object = vm_object_backing_collapse_wait(object); 1922 if (backing_object == NULL) 1923 return; 1924 1925 KASSERT(object->ref_count > 0 && 1926 object->ref_count > atomic_load_int(&object->shadow_count), 1927 ("collapse with invalid ref %d or shadow %d count.", 1928 object->ref_count, atomic_load_int(&object->shadow_count))); 1929 KASSERT((backing_object->flags & 1930 (OBJ_COLLAPSING | OBJ_DEAD)) == 0, 1931 ("vm_object_collapse: Backing object already collapsing.")); 1932 KASSERT((object->flags & (OBJ_COLLAPSING | OBJ_DEAD)) == 0, 1933 ("vm_object_collapse: object is already collapsing.")); 1934 1935 /* 1936 * We know that we can either collapse the backing object if 1937 * the parent is the only reference to it, or (perhaps) have 1938 * the parent bypass the object if the parent happens to shadow 1939 * all the resident pages in the entire backing object. 1940 */ 1941 if (backing_object->ref_count == 1) { 1942 KASSERT(atomic_load_int(&backing_object->shadow_count) 1943 == 1, 1944 ("vm_object_collapse: shadow_count: %d", 1945 atomic_load_int(&backing_object->shadow_count))); 1946 vm_object_pip_add(object, 1); 1947 vm_object_set_flag(object, OBJ_COLLAPSING); 1948 vm_object_pip_add(backing_object, 1); 1949 vm_object_set_flag(backing_object, OBJ_DEAD); 1950 1951 /* 1952 * If there is exactly one reference to the backing 1953 * object, we can collapse it into the parent. 1954 */ 1955 vm_object_collapse_scan(object); 1956 1957 /* 1958 * Move the pager from backing_object to object. 1959 * 1960 * swap_pager_copy() can sleep, in which case the 1961 * backing_object's and object's locks are released and 1962 * reacquired. 1963 */ 1964 swap_pager_copy(backing_object, object, 1965 OFF_TO_IDX(object->backing_object_offset), TRUE); 1966 1967 /* 1968 * Object now shadows whatever backing_object did. 1969 */ 1970 vm_object_clear_flag(object, OBJ_COLLAPSING); 1971 vm_object_backing_transfer(object, backing_object); 1972 object->backing_object_offset += 1973 backing_object->backing_object_offset; 1974 VM_OBJECT_WUNLOCK(object); 1975 vm_object_pip_wakeup(object); 1976 1977 /* 1978 * Discard backing_object. 1979 * 1980 * Since the backing object has no pages, no pager left, 1981 * and no object references within it, all that is 1982 * necessary is to dispose of it. 1983 */ 1984 KASSERT(backing_object->ref_count == 1, ( 1985 "backing_object %p was somehow re-referenced during collapse!", 1986 backing_object)); 1987 vm_object_pip_wakeup(backing_object); 1988 (void)refcount_release(&backing_object->ref_count); 1989 umtx_shm_object_terminated(backing_object); 1990 vm_object_terminate(backing_object); 1991 counter_u64_add(object_collapses, 1); 1992 VM_OBJECT_WLOCK(object); 1993 } else { 1994 /* 1995 * If we do not entirely shadow the backing object, 1996 * there is nothing we can do so we give up. 1997 * 1998 * The object lock and backing_object lock must not 1999 * be dropped during this sequence. 2000 */ 2001 if (!vm_object_scan_all_shadowed(object)) { 2002 VM_OBJECT_WUNLOCK(backing_object); 2003 break; 2004 } 2005 2006 /* 2007 * Make the parent shadow the next object in the 2008 * chain. Deallocating backing_object will not remove 2009 * it, since its reference count is at least 2. 2010 */ 2011 vm_object_backing_remove_locked(object); 2012 new_backing_object = backing_object->backing_object; 2013 if (new_backing_object != NULL) { 2014 vm_object_backing_insert_ref(object, 2015 new_backing_object); 2016 object->backing_object_offset += 2017 backing_object->backing_object_offset; 2018 } 2019 2020 /* 2021 * Drop the reference count on backing_object. Since 2022 * its ref_count was at least 2, it will not vanish. 2023 */ 2024 (void)refcount_release(&backing_object->ref_count); 2025 KASSERT(backing_object->ref_count >= 1, ( 2026 "backing_object %p was somehow dereferenced during collapse!", 2027 backing_object)); 2028 VM_OBJECT_WUNLOCK(backing_object); 2029 counter_u64_add(object_bypasses, 1); 2030 } 2031 2032 /* 2033 * Try again with this object's new backing object. 2034 */ 2035 } 2036 } 2037 2038 /* 2039 * vm_object_page_remove: 2040 * 2041 * For the given object, either frees or invalidates each of the 2042 * specified pages. In general, a page is freed. However, if a page is 2043 * wired for any reason other than the existence of a managed, wired 2044 * mapping, then it may be invalidated but not removed from the object. 2045 * Pages are specified by the given range ["start", "end") and the option 2046 * OBJPR_CLEANONLY. As a special case, if "end" is zero, then the range 2047 * extends from "start" to the end of the object. If the option 2048 * OBJPR_CLEANONLY is specified, then only the non-dirty pages within the 2049 * specified range are affected. If the option OBJPR_NOTMAPPED is 2050 * specified, then the pages within the specified range must have no 2051 * mappings. Otherwise, if this option is not specified, any mappings to 2052 * the specified pages are removed before the pages are freed or 2053 * invalidated. 2054 * 2055 * In general, this operation should only be performed on objects that 2056 * contain managed pages. There are, however, two exceptions. First, it 2057 * is performed on the kernel and kmem objects by vm_map_entry_delete(). 2058 * Second, it is used by msync(..., MS_INVALIDATE) to invalidate device- 2059 * backed pages. In both of these cases, the option OBJPR_CLEANONLY must 2060 * not be specified and the option OBJPR_NOTMAPPED must be specified. 2061 * 2062 * The object must be locked. 2063 */ 2064 void 2065 vm_object_page_remove(vm_object_t object, vm_pindex_t start, vm_pindex_t end, 2066 int options) 2067 { 2068 vm_page_t p, next; 2069 2070 VM_OBJECT_ASSERT_WLOCKED(object); 2071 KASSERT((object->flags & OBJ_UNMANAGED) == 0 || 2072 (options & (OBJPR_CLEANONLY | OBJPR_NOTMAPPED)) == OBJPR_NOTMAPPED, 2073 ("vm_object_page_remove: illegal options for object %p", object)); 2074 if (object->resident_page_count == 0) 2075 return; 2076 vm_object_pip_add(object, 1); 2077 again: 2078 p = vm_page_find_least(object, start); 2079 2080 /* 2081 * Here, the variable "p" is either (1) the page with the least pindex 2082 * greater than or equal to the parameter "start" or (2) NULL. 2083 */ 2084 for (; p != NULL && (p->pindex < end || end == 0); p = next) { 2085 next = TAILQ_NEXT(p, listq); 2086 2087 /* 2088 * Skip invalid pages if asked to do so. Try to avoid acquiring 2089 * the busy lock, as some consumers rely on this to avoid 2090 * deadlocks. 2091 * 2092 * A thread may concurrently transition the page from invalid to 2093 * valid using only the busy lock, so the result of this check 2094 * is immediately stale. It is up to consumers to handle this, 2095 * for instance by ensuring that all invalid->valid transitions 2096 * happen with a mutex held, as may be possible for a 2097 * filesystem. 2098 */ 2099 if ((options & OBJPR_VALIDONLY) != 0 && vm_page_none_valid(p)) 2100 continue; 2101 2102 /* 2103 * If the page is wired for any reason besides the existence 2104 * of managed, wired mappings, then it cannot be freed. For 2105 * example, fictitious pages, which represent device memory, 2106 * are inherently wired and cannot be freed. They can, 2107 * however, be invalidated if the option OBJPR_CLEANONLY is 2108 * not specified. 2109 */ 2110 if (vm_page_tryxbusy(p) == 0) { 2111 if (vm_page_busy_sleep(p, "vmopar", 0)) 2112 VM_OBJECT_WLOCK(object); 2113 goto again; 2114 } 2115 if ((options & OBJPR_VALIDONLY) != 0 && vm_page_none_valid(p)) { 2116 vm_page_xunbusy(p); 2117 continue; 2118 } 2119 if (vm_page_wired(p)) { 2120 wired: 2121 if ((options & OBJPR_NOTMAPPED) == 0 && 2122 object->ref_count != 0) 2123 pmap_remove_all(p); 2124 if ((options & OBJPR_CLEANONLY) == 0) { 2125 vm_page_invalid(p); 2126 vm_page_undirty(p); 2127 } 2128 vm_page_xunbusy(p); 2129 continue; 2130 } 2131 KASSERT((p->flags & PG_FICTITIOUS) == 0, 2132 ("vm_object_page_remove: page %p is fictitious", p)); 2133 if ((options & OBJPR_CLEANONLY) != 0 && 2134 !vm_page_none_valid(p)) { 2135 if ((options & OBJPR_NOTMAPPED) == 0 && 2136 object->ref_count != 0 && 2137 !vm_page_try_remove_write(p)) 2138 goto wired; 2139 if (p->dirty != 0) { 2140 vm_page_xunbusy(p); 2141 continue; 2142 } 2143 } 2144 if ((options & OBJPR_NOTMAPPED) == 0 && 2145 object->ref_count != 0 && !vm_page_try_remove_all(p)) 2146 goto wired; 2147 vm_page_free(p); 2148 } 2149 vm_object_pip_wakeup(object); 2150 2151 vm_pager_freespace(object, start, (end == 0 ? object->size : end) - 2152 start); 2153 } 2154 2155 /* 2156 * vm_object_page_noreuse: 2157 * 2158 * For the given object, attempt to move the specified pages to 2159 * the head of the inactive queue. This bypasses regular LRU 2160 * operation and allows the pages to be reused quickly under memory 2161 * pressure. If a page is wired for any reason, then it will not 2162 * be queued. Pages are specified by the range ["start", "end"). 2163 * As a special case, if "end" is zero, then the range extends from 2164 * "start" to the end of the object. 2165 * 2166 * This operation should only be performed on objects that 2167 * contain non-fictitious, managed pages. 2168 * 2169 * The object must be locked. 2170 */ 2171 void 2172 vm_object_page_noreuse(vm_object_t object, vm_pindex_t start, vm_pindex_t end) 2173 { 2174 vm_page_t p, next; 2175 2176 VM_OBJECT_ASSERT_LOCKED(object); 2177 KASSERT((object->flags & (OBJ_FICTITIOUS | OBJ_UNMANAGED)) == 0, 2178 ("vm_object_page_noreuse: illegal object %p", object)); 2179 if (object->resident_page_count == 0) 2180 return; 2181 p = vm_page_find_least(object, start); 2182 2183 /* 2184 * Here, the variable "p" is either (1) the page with the least pindex 2185 * greater than or equal to the parameter "start" or (2) NULL. 2186 */ 2187 for (; p != NULL && (p->pindex < end || end == 0); p = next) { 2188 next = TAILQ_NEXT(p, listq); 2189 vm_page_deactivate_noreuse(p); 2190 } 2191 } 2192 2193 /* 2194 * Populate the specified range of the object with valid pages. Returns 2195 * TRUE if the range is successfully populated and FALSE otherwise. 2196 * 2197 * Note: This function should be optimized to pass a larger array of 2198 * pages to vm_pager_get_pages() before it is applied to a non- 2199 * OBJT_DEVICE object. 2200 * 2201 * The object must be locked. 2202 */ 2203 boolean_t 2204 vm_object_populate(vm_object_t object, vm_pindex_t start, vm_pindex_t end) 2205 { 2206 vm_page_t m; 2207 vm_pindex_t pindex; 2208 int rv; 2209 2210 VM_OBJECT_ASSERT_WLOCKED(object); 2211 for (pindex = start; pindex < end; pindex++) { 2212 rv = vm_page_grab_valid(&m, object, pindex, VM_ALLOC_NORMAL); 2213 if (rv != VM_PAGER_OK) 2214 break; 2215 2216 /* 2217 * Keep "m" busy because a subsequent iteration may unlock 2218 * the object. 2219 */ 2220 } 2221 if (pindex > start) { 2222 m = vm_page_lookup(object, start); 2223 while (m != NULL && m->pindex < pindex) { 2224 vm_page_xunbusy(m); 2225 m = TAILQ_NEXT(m, listq); 2226 } 2227 } 2228 return (pindex == end); 2229 } 2230 2231 /* 2232 * Routine: vm_object_coalesce 2233 * Function: Coalesces two objects backing up adjoining 2234 * regions of memory into a single object. 2235 * 2236 * returns TRUE if objects were combined. 2237 * 2238 * NOTE: Only works at the moment if the second object is NULL - 2239 * if it's not, which object do we lock first? 2240 * 2241 * Parameters: 2242 * prev_object First object to coalesce 2243 * prev_offset Offset into prev_object 2244 * prev_size Size of reference to prev_object 2245 * next_size Size of reference to the second object 2246 * reserved Indicator that extension region has 2247 * swap accounted for 2248 * 2249 * Conditions: 2250 * The object must *not* be locked. 2251 */ 2252 boolean_t 2253 vm_object_coalesce(vm_object_t prev_object, vm_ooffset_t prev_offset, 2254 vm_size_t prev_size, vm_size_t next_size, boolean_t reserved) 2255 { 2256 vm_pindex_t next_pindex; 2257 2258 if (prev_object == NULL) 2259 return (TRUE); 2260 if ((prev_object->flags & OBJ_ANON) == 0) 2261 return (FALSE); 2262 2263 VM_OBJECT_WLOCK(prev_object); 2264 /* 2265 * Try to collapse the object first. 2266 */ 2267 vm_object_collapse(prev_object); 2268 2269 /* 2270 * Can't coalesce if: . more than one reference . paged out . shadows 2271 * another object . has a copy elsewhere (any of which mean that the 2272 * pages not mapped to prev_entry may be in use anyway) 2273 */ 2274 if (prev_object->backing_object != NULL) { 2275 VM_OBJECT_WUNLOCK(prev_object); 2276 return (FALSE); 2277 } 2278 2279 prev_size >>= PAGE_SHIFT; 2280 next_size >>= PAGE_SHIFT; 2281 next_pindex = OFF_TO_IDX(prev_offset) + prev_size; 2282 2283 if (prev_object->ref_count > 1 && 2284 prev_object->size != next_pindex && 2285 (prev_object->flags & OBJ_ONEMAPPING) == 0) { 2286 VM_OBJECT_WUNLOCK(prev_object); 2287 return (FALSE); 2288 } 2289 2290 /* 2291 * Account for the charge. 2292 */ 2293 if (prev_object->cred != NULL) { 2294 /* 2295 * If prev_object was charged, then this mapping, 2296 * although not charged now, may become writable 2297 * later. Non-NULL cred in the object would prevent 2298 * swap reservation during enabling of the write 2299 * access, so reserve swap now. Failed reservation 2300 * cause allocation of the separate object for the map 2301 * entry, and swap reservation for this entry is 2302 * managed in appropriate time. 2303 */ 2304 if (!reserved && !swap_reserve_by_cred(ptoa(next_size), 2305 prev_object->cred)) { 2306 VM_OBJECT_WUNLOCK(prev_object); 2307 return (FALSE); 2308 } 2309 prev_object->charge += ptoa(next_size); 2310 } 2311 2312 /* 2313 * Remove any pages that may still be in the object from a previous 2314 * deallocation. 2315 */ 2316 if (next_pindex < prev_object->size) { 2317 vm_object_page_remove(prev_object, next_pindex, next_pindex + 2318 next_size, 0); 2319 #if 0 2320 if (prev_object->cred != NULL) { 2321 KASSERT(prev_object->charge >= 2322 ptoa(prev_object->size - next_pindex), 2323 ("object %p overcharged 1 %jx %jx", prev_object, 2324 (uintmax_t)next_pindex, (uintmax_t)next_size)); 2325 prev_object->charge -= ptoa(prev_object->size - 2326 next_pindex); 2327 } 2328 #endif 2329 } 2330 2331 /* 2332 * Extend the object if necessary. 2333 */ 2334 if (next_pindex + next_size > prev_object->size) 2335 prev_object->size = next_pindex + next_size; 2336 2337 VM_OBJECT_WUNLOCK(prev_object); 2338 return (TRUE); 2339 } 2340 2341 void 2342 vm_object_set_writeable_dirty_(vm_object_t object) 2343 { 2344 atomic_add_int(&object->generation, 1); 2345 } 2346 2347 bool 2348 vm_object_mightbedirty_(vm_object_t object) 2349 { 2350 return (object->generation != object->cleangeneration); 2351 } 2352 2353 /* 2354 * vm_object_unwire: 2355 * 2356 * For each page offset within the specified range of the given object, 2357 * find the highest-level page in the shadow chain and unwire it. A page 2358 * must exist at every page offset, and the highest-level page must be 2359 * wired. 2360 */ 2361 void 2362 vm_object_unwire(vm_object_t object, vm_ooffset_t offset, vm_size_t length, 2363 uint8_t queue) 2364 { 2365 vm_object_t tobject, t1object; 2366 vm_page_t m, tm; 2367 vm_pindex_t end_pindex, pindex, tpindex; 2368 int depth, locked_depth; 2369 2370 KASSERT((offset & PAGE_MASK) == 0, 2371 ("vm_object_unwire: offset is not page aligned")); 2372 KASSERT((length & PAGE_MASK) == 0, 2373 ("vm_object_unwire: length is not a multiple of PAGE_SIZE")); 2374 /* The wired count of a fictitious page never changes. */ 2375 if ((object->flags & OBJ_FICTITIOUS) != 0) 2376 return; 2377 pindex = OFF_TO_IDX(offset); 2378 end_pindex = pindex + atop(length); 2379 again: 2380 locked_depth = 1; 2381 VM_OBJECT_RLOCK(object); 2382 m = vm_page_find_least(object, pindex); 2383 while (pindex < end_pindex) { 2384 if (m == NULL || pindex < m->pindex) { 2385 /* 2386 * The first object in the shadow chain doesn't 2387 * contain a page at the current index. Therefore, 2388 * the page must exist in a backing object. 2389 */ 2390 tobject = object; 2391 tpindex = pindex; 2392 depth = 0; 2393 do { 2394 tpindex += 2395 OFF_TO_IDX(tobject->backing_object_offset); 2396 tobject = tobject->backing_object; 2397 KASSERT(tobject != NULL, 2398 ("vm_object_unwire: missing page")); 2399 if ((tobject->flags & OBJ_FICTITIOUS) != 0) 2400 goto next_page; 2401 depth++; 2402 if (depth == locked_depth) { 2403 locked_depth++; 2404 VM_OBJECT_RLOCK(tobject); 2405 } 2406 } while ((tm = vm_page_lookup(tobject, tpindex)) == 2407 NULL); 2408 } else { 2409 tm = m; 2410 m = TAILQ_NEXT(m, listq); 2411 } 2412 if (vm_page_trysbusy(tm) == 0) { 2413 for (tobject = object; locked_depth >= 1; 2414 locked_depth--) { 2415 t1object = tobject->backing_object; 2416 if (tm->object != tobject) 2417 VM_OBJECT_RUNLOCK(tobject); 2418 tobject = t1object; 2419 } 2420 tobject = tm->object; 2421 if (!vm_page_busy_sleep(tm, "unwbo", 2422 VM_ALLOC_IGN_SBUSY)) 2423 VM_OBJECT_RUNLOCK(tobject); 2424 goto again; 2425 } 2426 vm_page_unwire(tm, queue); 2427 vm_page_sunbusy(tm); 2428 next_page: 2429 pindex++; 2430 } 2431 /* Release the accumulated object locks. */ 2432 for (tobject = object; locked_depth >= 1; locked_depth--) { 2433 t1object = tobject->backing_object; 2434 VM_OBJECT_RUNLOCK(tobject); 2435 tobject = t1object; 2436 } 2437 } 2438 2439 /* 2440 * Return the vnode for the given object, or NULL if none exists. 2441 * For tmpfs objects, the function may return NULL if there is 2442 * no vnode allocated at the time of the call. 2443 */ 2444 struct vnode * 2445 vm_object_vnode(vm_object_t object) 2446 { 2447 struct vnode *vp; 2448 2449 VM_OBJECT_ASSERT_LOCKED(object); 2450 vm_pager_getvp(object, &vp, NULL); 2451 return (vp); 2452 } 2453 2454 /* 2455 * Busy the vm object. This prevents new pages belonging to the object from 2456 * becoming busy. Existing pages persist as busy. Callers are responsible 2457 * for checking page state before proceeding. 2458 */ 2459 void 2460 vm_object_busy(vm_object_t obj) 2461 { 2462 2463 VM_OBJECT_ASSERT_LOCKED(obj); 2464 2465 blockcount_acquire(&obj->busy, 1); 2466 /* The fence is required to order loads of page busy. */ 2467 atomic_thread_fence_acq_rel(); 2468 } 2469 2470 void 2471 vm_object_unbusy(vm_object_t obj) 2472 { 2473 2474 blockcount_release(&obj->busy, 1); 2475 } 2476 2477 void 2478 vm_object_busy_wait(vm_object_t obj, const char *wmesg) 2479 { 2480 2481 VM_OBJECT_ASSERT_UNLOCKED(obj); 2482 2483 (void)blockcount_sleep(&obj->busy, NULL, wmesg, PVM); 2484 } 2485 2486 /* 2487 * This function aims to determine if the object is mapped, 2488 * specifically, if it is referenced by a vm_map_entry. Because 2489 * objects occasionally acquire transient references that do not 2490 * represent a mapping, the method used here is inexact. However, it 2491 * has very low overhead and is good enough for the advisory 2492 * vm.vmtotal sysctl. 2493 */ 2494 bool 2495 vm_object_is_active(vm_object_t obj) 2496 { 2497 2498 return (obj->ref_count > atomic_load_int(&obj->shadow_count)); 2499 } 2500 2501 static int 2502 vm_object_list_handler(struct sysctl_req *req, bool swap_only) 2503 { 2504 struct kinfo_vmobject *kvo; 2505 char *fullpath, *freepath; 2506 struct vnode *vp; 2507 struct vattr va; 2508 vm_object_t obj; 2509 vm_page_t m; 2510 struct cdev *cdev; 2511 struct cdevsw *csw; 2512 u_long sp; 2513 int count, error, ref; 2514 key_t key; 2515 unsigned short seq; 2516 bool want_path; 2517 2518 if (req->oldptr == NULL) { 2519 /* 2520 * If an old buffer has not been provided, generate an 2521 * estimate of the space needed for a subsequent call. 2522 */ 2523 mtx_lock(&vm_object_list_mtx); 2524 count = 0; 2525 TAILQ_FOREACH(obj, &vm_object_list, object_list) { 2526 if (obj->type == OBJT_DEAD) 2527 continue; 2528 count++; 2529 } 2530 mtx_unlock(&vm_object_list_mtx); 2531 return (SYSCTL_OUT(req, NULL, sizeof(struct kinfo_vmobject) * 2532 count * 11 / 10)); 2533 } 2534 2535 want_path = !(swap_only || jailed(curthread->td_ucred)); 2536 kvo = malloc(sizeof(*kvo), M_TEMP, M_WAITOK | M_ZERO); 2537 error = 0; 2538 2539 /* 2540 * VM objects are type stable and are never removed from the 2541 * list once added. This allows us to safely read obj->object_list 2542 * after reacquiring the VM object lock. 2543 */ 2544 mtx_lock(&vm_object_list_mtx); 2545 TAILQ_FOREACH(obj, &vm_object_list, object_list) { 2546 if (obj->type == OBJT_DEAD || 2547 (swap_only && (obj->flags & (OBJ_ANON | OBJ_SWAP)) == 0)) 2548 continue; 2549 VM_OBJECT_RLOCK(obj); 2550 if (obj->type == OBJT_DEAD || 2551 (swap_only && (obj->flags & (OBJ_ANON | OBJ_SWAP)) == 0)) { 2552 VM_OBJECT_RUNLOCK(obj); 2553 continue; 2554 } 2555 mtx_unlock(&vm_object_list_mtx); 2556 kvo->kvo_size = ptoa(obj->size); 2557 kvo->kvo_resident = obj->resident_page_count; 2558 kvo->kvo_ref_count = obj->ref_count; 2559 kvo->kvo_shadow_count = atomic_load_int(&obj->shadow_count); 2560 kvo->kvo_memattr = obj->memattr; 2561 kvo->kvo_active = 0; 2562 kvo->kvo_inactive = 0; 2563 kvo->kvo_flags = 0; 2564 if (!swap_only) { 2565 TAILQ_FOREACH(m, &obj->memq, listq) { 2566 /* 2567 * A page may belong to the object but be 2568 * dequeued and set to PQ_NONE while the 2569 * object lock is not held. This makes the 2570 * reads of m->queue below racy, and we do not 2571 * count pages set to PQ_NONE. However, this 2572 * sysctl is only meant to give an 2573 * approximation of the system anyway. 2574 */ 2575 if (m->a.queue == PQ_ACTIVE) 2576 kvo->kvo_active++; 2577 else if (m->a.queue == PQ_INACTIVE) 2578 kvo->kvo_inactive++; 2579 } 2580 } 2581 2582 kvo->kvo_vn_fileid = 0; 2583 kvo->kvo_vn_fsid = 0; 2584 kvo->kvo_vn_fsid_freebsd11 = 0; 2585 freepath = NULL; 2586 fullpath = ""; 2587 vp = NULL; 2588 kvo->kvo_type = vm_object_kvme_type(obj, want_path ? &vp : 2589 NULL); 2590 if (vp != NULL) { 2591 vref(vp); 2592 } else if ((obj->flags & OBJ_ANON) != 0) { 2593 MPASS(kvo->kvo_type == KVME_TYPE_SWAP); 2594 kvo->kvo_me = (uintptr_t)obj; 2595 /* tmpfs objs are reported as vnodes */ 2596 kvo->kvo_backing_obj = (uintptr_t)obj->backing_object; 2597 sp = swap_pager_swapped_pages(obj); 2598 kvo->kvo_swapped = sp > UINT32_MAX ? UINT32_MAX : sp; 2599 } 2600 if (obj->type == OBJT_DEVICE || obj->type == OBJT_MGTDEVICE) { 2601 cdev = obj->un_pager.devp.dev; 2602 if (cdev != NULL) { 2603 csw = dev_refthread(cdev, &ref); 2604 if (csw != NULL) { 2605 strlcpy(kvo->kvo_path, cdev->si_name, 2606 sizeof(kvo->kvo_path)); 2607 dev_relthread(cdev, ref); 2608 } 2609 } 2610 } 2611 VM_OBJECT_RUNLOCK(obj); 2612 if ((obj->flags & OBJ_SYSVSHM) != 0) { 2613 kvo->kvo_flags |= KVMO_FLAG_SYSVSHM; 2614 shmobjinfo(obj, &key, &seq); 2615 kvo->kvo_vn_fileid = key; 2616 kvo->kvo_vn_fsid_freebsd11 = seq; 2617 } 2618 if ((obj->flags & OBJ_POSIXSHM) != 0) { 2619 kvo->kvo_flags |= KVMO_FLAG_POSIXSHM; 2620 shm_get_path(obj, kvo->kvo_path, 2621 sizeof(kvo->kvo_path)); 2622 } 2623 if (vp != NULL) { 2624 vn_fullpath(vp, &fullpath, &freepath); 2625 vn_lock(vp, LK_SHARED | LK_RETRY); 2626 if (VOP_GETATTR(vp, &va, curthread->td_ucred) == 0) { 2627 kvo->kvo_vn_fileid = va.va_fileid; 2628 kvo->kvo_vn_fsid = va.va_fsid; 2629 kvo->kvo_vn_fsid_freebsd11 = va.va_fsid; 2630 /* truncate */ 2631 } 2632 vput(vp); 2633 strlcpy(kvo->kvo_path, fullpath, sizeof(kvo->kvo_path)); 2634 free(freepath, M_TEMP); 2635 } 2636 2637 /* Pack record size down */ 2638 kvo->kvo_structsize = offsetof(struct kinfo_vmobject, kvo_path) 2639 + strlen(kvo->kvo_path) + 1; 2640 kvo->kvo_structsize = roundup(kvo->kvo_structsize, 2641 sizeof(uint64_t)); 2642 error = SYSCTL_OUT(req, kvo, kvo->kvo_structsize); 2643 maybe_yield(); 2644 mtx_lock(&vm_object_list_mtx); 2645 if (error) 2646 break; 2647 } 2648 mtx_unlock(&vm_object_list_mtx); 2649 free(kvo, M_TEMP); 2650 return (error); 2651 } 2652 2653 static int 2654 sysctl_vm_object_list(SYSCTL_HANDLER_ARGS) 2655 { 2656 return (vm_object_list_handler(req, false)); 2657 } 2658 2659 SYSCTL_PROC(_vm, OID_AUTO, objects, CTLTYPE_STRUCT | CTLFLAG_RW | CTLFLAG_SKIP | 2660 CTLFLAG_MPSAFE, NULL, 0, sysctl_vm_object_list, "S,kinfo_vmobject", 2661 "List of VM objects"); 2662 2663 static int 2664 sysctl_vm_object_list_swap(SYSCTL_HANDLER_ARGS) 2665 { 2666 return (vm_object_list_handler(req, true)); 2667 } 2668 2669 /* 2670 * This sysctl returns list of the anonymous or swap objects. Intent 2671 * is to provide stripped optimized list useful to analyze swap use. 2672 * Since technically non-swap (default) objects participate in the 2673 * shadow chains, and are converted to swap type as needed by swap 2674 * pager, we must report them. 2675 */ 2676 SYSCTL_PROC(_vm, OID_AUTO, swap_objects, 2677 CTLTYPE_STRUCT | CTLFLAG_RW | CTLFLAG_SKIP | CTLFLAG_MPSAFE, NULL, 0, 2678 sysctl_vm_object_list_swap, "S,kinfo_vmobject", 2679 "List of swap VM objects"); 2680 2681 #include "opt_ddb.h" 2682 #ifdef DDB 2683 #include <sys/kernel.h> 2684 2685 #include <sys/cons.h> 2686 2687 #include <ddb/ddb.h> 2688 2689 static int 2690 _vm_object_in_map(vm_map_t map, vm_object_t object, vm_map_entry_t entry) 2691 { 2692 vm_map_t tmpm; 2693 vm_map_entry_t tmpe; 2694 vm_object_t obj; 2695 2696 if (map == 0) 2697 return 0; 2698 2699 if (entry == 0) { 2700 VM_MAP_ENTRY_FOREACH(tmpe, map) { 2701 if (_vm_object_in_map(map, object, tmpe)) { 2702 return 1; 2703 } 2704 } 2705 } else if (entry->eflags & MAP_ENTRY_IS_SUB_MAP) { 2706 tmpm = entry->object.sub_map; 2707 VM_MAP_ENTRY_FOREACH(tmpe, tmpm) { 2708 if (_vm_object_in_map(tmpm, object, tmpe)) { 2709 return 1; 2710 } 2711 } 2712 } else if ((obj = entry->object.vm_object) != NULL) { 2713 for (; obj; obj = obj->backing_object) 2714 if (obj == object) { 2715 return 1; 2716 } 2717 } 2718 return 0; 2719 } 2720 2721 static int 2722 vm_object_in_map(vm_object_t object) 2723 { 2724 struct proc *p; 2725 2726 /* sx_slock(&allproc_lock); */ 2727 FOREACH_PROC_IN_SYSTEM(p) { 2728 if (!p->p_vmspace /* || (p->p_flag & (P_SYSTEM|P_WEXIT)) */) 2729 continue; 2730 if (_vm_object_in_map(&p->p_vmspace->vm_map, object, 0)) { 2731 /* sx_sunlock(&allproc_lock); */ 2732 return 1; 2733 } 2734 } 2735 /* sx_sunlock(&allproc_lock); */ 2736 if (_vm_object_in_map(kernel_map, object, 0)) 2737 return 1; 2738 return 0; 2739 } 2740 2741 DB_SHOW_COMMAND_FLAGS(vmochk, vm_object_check, DB_CMD_MEMSAFE) 2742 { 2743 vm_object_t object; 2744 2745 /* 2746 * make sure that internal objs are in a map somewhere 2747 * and none have zero ref counts. 2748 */ 2749 TAILQ_FOREACH(object, &vm_object_list, object_list) { 2750 if ((object->flags & OBJ_ANON) != 0) { 2751 if (object->ref_count == 0) { 2752 db_printf("vmochk: internal obj has zero ref count: %ld\n", 2753 (long)object->size); 2754 } 2755 if (!vm_object_in_map(object)) { 2756 db_printf( 2757 "vmochk: internal obj is not in a map: " 2758 "ref: %d, size: %lu: 0x%lx, backing_object: %p\n", 2759 object->ref_count, (u_long)object->size, 2760 (u_long)object->size, 2761 (void *)object->backing_object); 2762 } 2763 } 2764 if (db_pager_quit) 2765 return; 2766 } 2767 } 2768 2769 /* 2770 * vm_object_print: [ debug ] 2771 */ 2772 DB_SHOW_COMMAND(object, vm_object_print_static) 2773 { 2774 /* XXX convert args. */ 2775 vm_object_t object = (vm_object_t)addr; 2776 boolean_t full = have_addr; 2777 2778 vm_page_t p; 2779 2780 /* XXX count is an (unused) arg. Avoid shadowing it. */ 2781 #define count was_count 2782 2783 int count; 2784 2785 if (object == NULL) 2786 return; 2787 2788 db_iprintf( 2789 "Object %p: type=%d, size=0x%jx, res=%d, ref=%d, flags=0x%x ruid %d charge %jx\n", 2790 object, (int)object->type, (uintmax_t)object->size, 2791 object->resident_page_count, object->ref_count, object->flags, 2792 object->cred ? object->cred->cr_ruid : -1, (uintmax_t)object->charge); 2793 db_iprintf(" sref=%d, backing_object(%d)=(%p)+0x%jx\n", 2794 atomic_load_int(&object->shadow_count), 2795 object->backing_object ? object->backing_object->ref_count : 0, 2796 object->backing_object, (uintmax_t)object->backing_object_offset); 2797 2798 if (!full) 2799 return; 2800 2801 db_indent += 2; 2802 count = 0; 2803 TAILQ_FOREACH(p, &object->memq, listq) { 2804 if (count == 0) 2805 db_iprintf("memory:="); 2806 else if (count == 6) { 2807 db_printf("\n"); 2808 db_iprintf(" ..."); 2809 count = 0; 2810 } else 2811 db_printf(","); 2812 count++; 2813 2814 db_printf("(off=0x%jx,page=0x%jx)", 2815 (uintmax_t)p->pindex, (uintmax_t)VM_PAGE_TO_PHYS(p)); 2816 2817 if (db_pager_quit) 2818 break; 2819 } 2820 if (count != 0) 2821 db_printf("\n"); 2822 db_indent -= 2; 2823 } 2824 2825 /* XXX. */ 2826 #undef count 2827 2828 /* XXX need this non-static entry for calling from vm_map_print. */ 2829 void 2830 vm_object_print( 2831 /* db_expr_t */ long addr, 2832 boolean_t have_addr, 2833 /* db_expr_t */ long count, 2834 char *modif) 2835 { 2836 vm_object_print_static(addr, have_addr, count, modif); 2837 } 2838 2839 DB_SHOW_COMMAND_FLAGS(vmopag, vm_object_print_pages, DB_CMD_MEMSAFE) 2840 { 2841 vm_object_t object; 2842 vm_pindex_t fidx; 2843 vm_paddr_t pa; 2844 vm_page_t m, prev_m; 2845 int rcount; 2846 2847 TAILQ_FOREACH(object, &vm_object_list, object_list) { 2848 db_printf("new object: %p\n", (void *)object); 2849 if (db_pager_quit) 2850 return; 2851 2852 rcount = 0; 2853 fidx = 0; 2854 pa = -1; 2855 TAILQ_FOREACH(m, &object->memq, listq) { 2856 if ((prev_m = TAILQ_PREV(m, pglist, listq)) != NULL && 2857 prev_m->pindex + 1 != m->pindex) { 2858 if (rcount) { 2859 db_printf(" index(%ld)run(%d)pa(0x%lx)\n", 2860 (long)fidx, rcount, (long)pa); 2861 if (db_pager_quit) 2862 return; 2863 rcount = 0; 2864 } 2865 } 2866 if (rcount && 2867 (VM_PAGE_TO_PHYS(m) == pa + rcount * PAGE_SIZE)) { 2868 ++rcount; 2869 continue; 2870 } 2871 if (rcount) { 2872 db_printf(" index(%ld)run(%d)pa(0x%lx)\n", 2873 (long)fidx, rcount, (long)pa); 2874 if (db_pager_quit) 2875 return; 2876 } 2877 fidx = m->pindex; 2878 pa = VM_PAGE_TO_PHYS(m); 2879 rcount = 1; 2880 } 2881 if (rcount) { 2882 db_printf(" index(%ld)run(%d)pa(0x%lx)\n", 2883 (long)fidx, rcount, (long)pa); 2884 if (db_pager_quit) 2885 return; 2886 } 2887 } 2888 } 2889 #endif /* DDB */ 2890