1 /*- 2 * SPDX-License-Identifier: (BSD-3-Clause AND MIT-CMU) 3 * 4 * Copyright (c) 1991, 1993 5 * The Regents of the University of California. All rights reserved. 6 * 7 * This code is derived from software contributed to Berkeley by 8 * The Mach Operating System project at Carnegie-Mellon University. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. Neither the name of the University nor the names of its contributors 19 * may be used to endorse or promote products derived from this software 20 * without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 * 34 * from: @(#)vm_object.c 8.5 (Berkeley) 3/22/94 35 * 36 * 37 * Copyright (c) 1987, 1990 Carnegie-Mellon University. 38 * All rights reserved. 39 * 40 * Authors: Avadis Tevanian, Jr., Michael Wayne Young 41 * 42 * Permission to use, copy, modify and distribute this software and 43 * its documentation is hereby granted, provided that both the copyright 44 * notice and this permission notice appear in all copies of the 45 * software, derivative works or modified versions, and any portions 46 * thereof, and that both notices appear in supporting documentation. 47 * 48 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 49 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 50 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 51 * 52 * Carnegie Mellon requests users of this software to return to 53 * 54 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 55 * School of Computer Science 56 * Carnegie Mellon University 57 * Pittsburgh PA 15213-3890 58 * 59 * any improvements or extensions that they make and grant Carnegie the 60 * rights to redistribute these changes. 61 */ 62 63 /* 64 * Virtual memory object module. 65 */ 66 67 #include <sys/cdefs.h> 68 __FBSDID("$FreeBSD$"); 69 70 #include "opt_vm.h" 71 72 #include <sys/param.h> 73 #include <sys/systm.h> 74 #include <sys/blockcount.h> 75 #include <sys/cpuset.h> 76 #include <sys/lock.h> 77 #include <sys/mman.h> 78 #include <sys/mount.h> 79 #include <sys/kernel.h> 80 #include <sys/pctrie.h> 81 #include <sys/sysctl.h> 82 #include <sys/mutex.h> 83 #include <sys/proc.h> /* for curproc, pageproc */ 84 #include <sys/refcount.h> 85 #include <sys/socket.h> 86 #include <sys/resourcevar.h> 87 #include <sys/refcount.h> 88 #include <sys/rwlock.h> 89 #include <sys/user.h> 90 #include <sys/vnode.h> 91 #include <sys/vmmeter.h> 92 #include <sys/sx.h> 93 94 #include <vm/vm.h> 95 #include <vm/vm_param.h> 96 #include <vm/pmap.h> 97 #include <vm/vm_map.h> 98 #include <vm/vm_object.h> 99 #include <vm/vm_page.h> 100 #include <vm/vm_pageout.h> 101 #include <vm/vm_pager.h> 102 #include <vm/vm_phys.h> 103 #include <vm/vm_pagequeue.h> 104 #include <vm/swap_pager.h> 105 #include <vm/vm_kern.h> 106 #include <vm/vm_extern.h> 107 #include <vm/vm_radix.h> 108 #include <vm/vm_reserv.h> 109 #include <vm/uma.h> 110 111 static int old_msync; 112 SYSCTL_INT(_vm, OID_AUTO, old_msync, CTLFLAG_RW, &old_msync, 0, 113 "Use old (insecure) msync behavior"); 114 115 static int vm_object_page_collect_flush(vm_object_t object, vm_page_t p, 116 int pagerflags, int flags, boolean_t *allclean, 117 boolean_t *eio); 118 static boolean_t vm_object_page_remove_write(vm_page_t p, int flags, 119 boolean_t *allclean); 120 static void vm_object_backing_remove(vm_object_t object); 121 122 /* 123 * Virtual memory objects maintain the actual data 124 * associated with allocated virtual memory. A given 125 * page of memory exists within exactly one object. 126 * 127 * An object is only deallocated when all "references" 128 * are given up. Only one "reference" to a given 129 * region of an object should be writeable. 130 * 131 * Associated with each object is a list of all resident 132 * memory pages belonging to that object; this list is 133 * maintained by the "vm_page" module, and locked by the object's 134 * lock. 135 * 136 * Each object also records a "pager" routine which is 137 * used to retrieve (and store) pages to the proper backing 138 * storage. In addition, objects may be backed by other 139 * objects from which they were virtual-copied. 140 * 141 * The only items within the object structure which are 142 * modified after time of creation are: 143 * reference count locked by object's lock 144 * pager routine locked by object's lock 145 * 146 */ 147 148 struct object_q vm_object_list; 149 struct mtx vm_object_list_mtx; /* lock for object list and count */ 150 151 struct vm_object kernel_object_store; 152 153 static SYSCTL_NODE(_vm_stats, OID_AUTO, object, CTLFLAG_RD | CTLFLAG_MPSAFE, 0, 154 "VM object stats"); 155 156 static counter_u64_t object_collapses = EARLY_COUNTER; 157 SYSCTL_COUNTER_U64(_vm_stats_object, OID_AUTO, collapses, CTLFLAG_RD, 158 &object_collapses, 159 "VM object collapses"); 160 161 static counter_u64_t object_bypasses = EARLY_COUNTER; 162 SYSCTL_COUNTER_U64(_vm_stats_object, OID_AUTO, bypasses, CTLFLAG_RD, 163 &object_bypasses, 164 "VM object bypasses"); 165 166 static counter_u64_t object_collapse_waits = EARLY_COUNTER; 167 SYSCTL_COUNTER_U64(_vm_stats_object, OID_AUTO, collapse_waits, CTLFLAG_RD, 168 &object_collapse_waits, 169 "Number of sleeps for collapse"); 170 171 static void 172 counter_startup(void) 173 { 174 175 object_collapses = counter_u64_alloc(M_WAITOK); 176 object_bypasses = counter_u64_alloc(M_WAITOK); 177 object_collapse_waits = counter_u64_alloc(M_WAITOK); 178 } 179 SYSINIT(object_counters, SI_SUB_CPU, SI_ORDER_ANY, counter_startup, NULL); 180 181 static uma_zone_t obj_zone; 182 183 static int vm_object_zinit(void *mem, int size, int flags); 184 185 #ifdef INVARIANTS 186 static void vm_object_zdtor(void *mem, int size, void *arg); 187 188 static void 189 vm_object_zdtor(void *mem, int size, void *arg) 190 { 191 vm_object_t object; 192 193 object = (vm_object_t)mem; 194 KASSERT(object->ref_count == 0, 195 ("object %p ref_count = %d", object, object->ref_count)); 196 KASSERT(TAILQ_EMPTY(&object->memq), 197 ("object %p has resident pages in its memq", object)); 198 KASSERT(vm_radix_is_empty(&object->rtree), 199 ("object %p has resident pages in its trie", object)); 200 #if VM_NRESERVLEVEL > 0 201 KASSERT(LIST_EMPTY(&object->rvq), 202 ("object %p has reservations", 203 object)); 204 #endif 205 KASSERT(blockcount_read(&object->paging_in_progress) == 0, 206 ("object %p paging_in_progress = %d", 207 object, blockcount_read(&object->paging_in_progress))); 208 KASSERT(!vm_object_busied(object), 209 ("object %p busy = %d", object, blockcount_read(&object->busy))); 210 KASSERT(object->resident_page_count == 0, 211 ("object %p resident_page_count = %d", 212 object, object->resident_page_count)); 213 KASSERT(object->shadow_count == 0, 214 ("object %p shadow_count = %d", 215 object, object->shadow_count)); 216 KASSERT(object->type == OBJT_DEAD, 217 ("object %p has non-dead type %d", 218 object, object->type)); 219 } 220 #endif 221 222 static int 223 vm_object_zinit(void *mem, int size, int flags) 224 { 225 vm_object_t object; 226 227 object = (vm_object_t)mem; 228 rw_init_flags(&object->lock, "vm object", RW_DUPOK | RW_NEW); 229 230 /* These are true for any object that has been freed */ 231 object->type = OBJT_DEAD; 232 vm_radix_init(&object->rtree); 233 refcount_init(&object->ref_count, 0); 234 blockcount_init(&object->paging_in_progress); 235 blockcount_init(&object->busy); 236 object->resident_page_count = 0; 237 object->shadow_count = 0; 238 object->flags = OBJ_DEAD; 239 240 mtx_lock(&vm_object_list_mtx); 241 TAILQ_INSERT_TAIL(&vm_object_list, object, object_list); 242 mtx_unlock(&vm_object_list_mtx); 243 return (0); 244 } 245 246 static void 247 _vm_object_allocate(objtype_t type, vm_pindex_t size, u_short flags, 248 vm_object_t object, void *handle) 249 { 250 251 TAILQ_INIT(&object->memq); 252 LIST_INIT(&object->shadow_head); 253 254 object->type = type; 255 if (type == OBJT_SWAP) 256 pctrie_init(&object->un_pager.swp.swp_blks); 257 258 /* 259 * Ensure that swap_pager_swapoff() iteration over object_list 260 * sees up to date type and pctrie head if it observed 261 * non-dead object. 262 */ 263 atomic_thread_fence_rel(); 264 265 object->pg_color = 0; 266 object->flags = flags; 267 object->size = size; 268 object->domain.dr_policy = NULL; 269 object->generation = 1; 270 object->cleangeneration = 1; 271 refcount_init(&object->ref_count, 1); 272 object->memattr = VM_MEMATTR_DEFAULT; 273 object->cred = NULL; 274 object->charge = 0; 275 object->handle = handle; 276 object->backing_object = NULL; 277 object->backing_object_offset = (vm_ooffset_t) 0; 278 #if VM_NRESERVLEVEL > 0 279 LIST_INIT(&object->rvq); 280 #endif 281 umtx_shm_object_init(object); 282 } 283 284 /* 285 * vm_object_init: 286 * 287 * Initialize the VM objects module. 288 */ 289 void 290 vm_object_init(void) 291 { 292 TAILQ_INIT(&vm_object_list); 293 mtx_init(&vm_object_list_mtx, "vm object_list", NULL, MTX_DEF); 294 295 rw_init(&kernel_object->lock, "kernel vm object"); 296 _vm_object_allocate(OBJT_PHYS, atop(VM_MAX_KERNEL_ADDRESS - 297 VM_MIN_KERNEL_ADDRESS), OBJ_UNMANAGED, kernel_object, NULL); 298 #if VM_NRESERVLEVEL > 0 299 kernel_object->flags |= OBJ_COLORED; 300 kernel_object->pg_color = (u_short)atop(VM_MIN_KERNEL_ADDRESS); 301 #endif 302 303 /* 304 * The lock portion of struct vm_object must be type stable due 305 * to vm_pageout_fallback_object_lock locking a vm object 306 * without holding any references to it. 307 */ 308 obj_zone = uma_zcreate("VM OBJECT", sizeof (struct vm_object), NULL, 309 #ifdef INVARIANTS 310 vm_object_zdtor, 311 #else 312 NULL, 313 #endif 314 vm_object_zinit, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE); 315 316 vm_radix_zinit(); 317 } 318 319 void 320 vm_object_clear_flag(vm_object_t object, u_short bits) 321 { 322 323 VM_OBJECT_ASSERT_WLOCKED(object); 324 object->flags &= ~bits; 325 } 326 327 /* 328 * Sets the default memory attribute for the specified object. Pages 329 * that are allocated to this object are by default assigned this memory 330 * attribute. 331 * 332 * Presently, this function must be called before any pages are allocated 333 * to the object. In the future, this requirement may be relaxed for 334 * "default" and "swap" objects. 335 */ 336 int 337 vm_object_set_memattr(vm_object_t object, vm_memattr_t memattr) 338 { 339 340 VM_OBJECT_ASSERT_WLOCKED(object); 341 switch (object->type) { 342 case OBJT_DEFAULT: 343 case OBJT_DEVICE: 344 case OBJT_MGTDEVICE: 345 case OBJT_PHYS: 346 case OBJT_SG: 347 case OBJT_SWAP: 348 case OBJT_VNODE: 349 if (!TAILQ_EMPTY(&object->memq)) 350 return (KERN_FAILURE); 351 break; 352 case OBJT_DEAD: 353 return (KERN_INVALID_ARGUMENT); 354 default: 355 panic("vm_object_set_memattr: object %p is of undefined type", 356 object); 357 } 358 object->memattr = memattr; 359 return (KERN_SUCCESS); 360 } 361 362 void 363 vm_object_pip_add(vm_object_t object, short i) 364 { 365 366 if (i > 0) 367 blockcount_acquire(&object->paging_in_progress, i); 368 } 369 370 void 371 vm_object_pip_wakeup(vm_object_t object) 372 { 373 374 vm_object_pip_wakeupn(object, 1); 375 } 376 377 void 378 vm_object_pip_wakeupn(vm_object_t object, short i) 379 { 380 381 if (i > 0) 382 blockcount_release(&object->paging_in_progress, i); 383 } 384 385 /* 386 * Atomically drop the object lock and wait for pip to drain. This protects 387 * from sleep/wakeup races due to identity changes. The lock is not re-acquired 388 * on return. 389 */ 390 static void 391 vm_object_pip_sleep(vm_object_t object, const char *waitid) 392 { 393 394 (void)blockcount_sleep(&object->paging_in_progress, &object->lock, 395 waitid, PVM | PDROP); 396 } 397 398 void 399 vm_object_pip_wait(vm_object_t object, const char *waitid) 400 { 401 402 VM_OBJECT_ASSERT_WLOCKED(object); 403 404 blockcount_wait(&object->paging_in_progress, &object->lock, waitid, 405 PVM); 406 } 407 408 void 409 vm_object_pip_wait_unlocked(vm_object_t object, const char *waitid) 410 { 411 412 VM_OBJECT_ASSERT_UNLOCKED(object); 413 414 blockcount_wait(&object->paging_in_progress, NULL, waitid, PVM); 415 } 416 417 /* 418 * vm_object_allocate: 419 * 420 * Returns a new object with the given size. 421 */ 422 vm_object_t 423 vm_object_allocate(objtype_t type, vm_pindex_t size) 424 { 425 vm_object_t object; 426 u_short flags; 427 428 switch (type) { 429 case OBJT_DEAD: 430 panic("vm_object_allocate: can't create OBJT_DEAD"); 431 case OBJT_DEFAULT: 432 case OBJT_SWAP: 433 flags = OBJ_COLORED; 434 break; 435 case OBJT_DEVICE: 436 case OBJT_SG: 437 flags = OBJ_FICTITIOUS | OBJ_UNMANAGED; 438 break; 439 case OBJT_MGTDEVICE: 440 flags = OBJ_FICTITIOUS; 441 break; 442 case OBJT_PHYS: 443 flags = OBJ_UNMANAGED; 444 break; 445 case OBJT_VNODE: 446 flags = 0; 447 break; 448 default: 449 panic("vm_object_allocate: type %d is undefined", type); 450 } 451 object = (vm_object_t)uma_zalloc(obj_zone, M_WAITOK); 452 _vm_object_allocate(type, size, flags, object, NULL); 453 454 return (object); 455 } 456 457 /* 458 * vm_object_allocate_anon: 459 * 460 * Returns a new default object of the given size and marked as 461 * anonymous memory for special split/collapse handling. Color 462 * to be initialized by the caller. 463 */ 464 vm_object_t 465 vm_object_allocate_anon(vm_pindex_t size, vm_object_t backing_object, 466 struct ucred *cred, vm_size_t charge) 467 { 468 vm_object_t handle, object; 469 470 if (backing_object == NULL) 471 handle = NULL; 472 else if ((backing_object->flags & OBJ_ANON) != 0) 473 handle = backing_object->handle; 474 else 475 handle = backing_object; 476 object = uma_zalloc(obj_zone, M_WAITOK); 477 _vm_object_allocate(OBJT_DEFAULT, size, OBJ_ANON | OBJ_ONEMAPPING, 478 object, handle); 479 object->cred = cred; 480 object->charge = cred != NULL ? charge : 0; 481 return (object); 482 } 483 484 static void 485 vm_object_reference_vnode(vm_object_t object) 486 { 487 u_int old; 488 489 /* 490 * vnode objects need the lock for the first reference 491 * to serialize with vnode_object_deallocate(). 492 */ 493 if (!refcount_acquire_if_gt(&object->ref_count, 0)) { 494 VM_OBJECT_RLOCK(object); 495 old = refcount_acquire(&object->ref_count); 496 if (object->type == OBJT_VNODE && old == 0) 497 vref(object->handle); 498 VM_OBJECT_RUNLOCK(object); 499 } 500 } 501 502 /* 503 * vm_object_reference: 504 * 505 * Acquires a reference to the given object. 506 */ 507 void 508 vm_object_reference(vm_object_t object) 509 { 510 511 if (object == NULL) 512 return; 513 514 if (object->type == OBJT_VNODE) 515 vm_object_reference_vnode(object); 516 else 517 refcount_acquire(&object->ref_count); 518 KASSERT((object->flags & OBJ_DEAD) == 0, 519 ("vm_object_reference: Referenced dead object.")); 520 } 521 522 /* 523 * vm_object_reference_locked: 524 * 525 * Gets another reference to the given object. 526 * 527 * The object must be locked. 528 */ 529 void 530 vm_object_reference_locked(vm_object_t object) 531 { 532 u_int old; 533 534 VM_OBJECT_ASSERT_LOCKED(object); 535 old = refcount_acquire(&object->ref_count); 536 if (object->type == OBJT_VNODE && old == 0) 537 vref(object->handle); 538 KASSERT((object->flags & OBJ_DEAD) == 0, 539 ("vm_object_reference: Referenced dead object.")); 540 } 541 542 /* 543 * Handle deallocating an object of type OBJT_VNODE. 544 */ 545 static void 546 vm_object_deallocate_vnode(vm_object_t object) 547 { 548 struct vnode *vp = (struct vnode *) object->handle; 549 bool last; 550 551 KASSERT(object->type == OBJT_VNODE, 552 ("vm_object_deallocate_vnode: not a vnode object")); 553 KASSERT(vp != NULL, ("vm_object_deallocate_vnode: missing vp")); 554 555 /* Object lock to protect handle lookup. */ 556 last = refcount_release(&object->ref_count); 557 VM_OBJECT_RUNLOCK(object); 558 559 if (!last) 560 return; 561 562 if (!umtx_shm_vnobj_persistent) 563 umtx_shm_object_terminated(object); 564 565 /* vrele may need the vnode lock. */ 566 vrele(vp); 567 } 568 569 570 /* 571 * We dropped a reference on an object and discovered that it had a 572 * single remaining shadow. This is a sibling of the reference we 573 * dropped. Attempt to collapse the sibling and backing object. 574 */ 575 static vm_object_t 576 vm_object_deallocate_anon(vm_object_t backing_object) 577 { 578 vm_object_t object; 579 580 /* Fetch the final shadow. */ 581 object = LIST_FIRST(&backing_object->shadow_head); 582 KASSERT(object != NULL && backing_object->shadow_count == 1, 583 ("vm_object_anon_deallocate: ref_count: %d, shadow_count: %d", 584 backing_object->ref_count, backing_object->shadow_count)); 585 KASSERT((object->flags & (OBJ_TMPFS_NODE | OBJ_ANON)) == OBJ_ANON, 586 ("invalid shadow object %p", object)); 587 588 if (!VM_OBJECT_TRYWLOCK(object)) { 589 /* 590 * Prevent object from disappearing since we do not have a 591 * reference. 592 */ 593 vm_object_pip_add(object, 1); 594 VM_OBJECT_WUNLOCK(backing_object); 595 VM_OBJECT_WLOCK(object); 596 vm_object_pip_wakeup(object); 597 } else 598 VM_OBJECT_WUNLOCK(backing_object); 599 600 /* 601 * Check for a collapse/terminate race with the last reference holder. 602 */ 603 if ((object->flags & (OBJ_DEAD | OBJ_COLLAPSING)) != 0 || 604 !refcount_acquire_if_not_zero(&object->ref_count)) { 605 VM_OBJECT_WUNLOCK(object); 606 return (NULL); 607 } 608 backing_object = object->backing_object; 609 if (backing_object != NULL && (backing_object->flags & OBJ_ANON) != 0) 610 vm_object_collapse(object); 611 VM_OBJECT_WUNLOCK(object); 612 613 return (object); 614 } 615 616 /* 617 * vm_object_deallocate: 618 * 619 * Release a reference to the specified object, 620 * gained either through a vm_object_allocate 621 * or a vm_object_reference call. When all references 622 * are gone, storage associated with this object 623 * may be relinquished. 624 * 625 * No object may be locked. 626 */ 627 void 628 vm_object_deallocate(vm_object_t object) 629 { 630 vm_object_t temp; 631 bool released; 632 633 while (object != NULL) { 634 /* 635 * If the reference count goes to 0 we start calling 636 * vm_object_terminate() on the object chain. A ref count 637 * of 1 may be a special case depending on the shadow count 638 * being 0 or 1. These cases require a write lock on the 639 * object. 640 */ 641 if ((object->flags & OBJ_ANON) == 0) 642 released = refcount_release_if_gt(&object->ref_count, 1); 643 else 644 released = refcount_release_if_gt(&object->ref_count, 2); 645 if (released) 646 return; 647 648 if (object->type == OBJT_VNODE) { 649 VM_OBJECT_RLOCK(object); 650 if (object->type == OBJT_VNODE) { 651 vm_object_deallocate_vnode(object); 652 return; 653 } 654 VM_OBJECT_RUNLOCK(object); 655 } 656 657 VM_OBJECT_WLOCK(object); 658 KASSERT(object->ref_count > 0, 659 ("vm_object_deallocate: object deallocated too many times: %d", 660 object->type)); 661 662 /* 663 * If this is not the final reference to an anonymous 664 * object we may need to collapse the shadow chain. 665 */ 666 if (!refcount_release(&object->ref_count)) { 667 if (object->ref_count > 1 || 668 object->shadow_count == 0) { 669 if ((object->flags & OBJ_ANON) != 0 && 670 object->ref_count == 1) 671 vm_object_set_flag(object, 672 OBJ_ONEMAPPING); 673 VM_OBJECT_WUNLOCK(object); 674 return; 675 } 676 677 /* Handle collapsing last ref on anonymous objects. */ 678 object = vm_object_deallocate_anon(object); 679 continue; 680 } 681 682 /* 683 * Handle the final reference to an object. We restart 684 * the loop with the backing object to avoid recursion. 685 */ 686 umtx_shm_object_terminated(object); 687 temp = object->backing_object; 688 if (temp != NULL) { 689 KASSERT((object->flags & OBJ_TMPFS_NODE) == 0, 690 ("shadowed tmpfs v_object 2 %p", object)); 691 vm_object_backing_remove(object); 692 } 693 694 KASSERT((object->flags & OBJ_DEAD) == 0, 695 ("vm_object_deallocate: Terminating dead object.")); 696 vm_object_set_flag(object, OBJ_DEAD); 697 vm_object_terminate(object); 698 object = temp; 699 } 700 } 701 702 /* 703 * vm_object_destroy removes the object from the global object list 704 * and frees the space for the object. 705 */ 706 void 707 vm_object_destroy(vm_object_t object) 708 { 709 710 /* 711 * Release the allocation charge. 712 */ 713 if (object->cred != NULL) { 714 swap_release_by_cred(object->charge, object->cred); 715 object->charge = 0; 716 crfree(object->cred); 717 object->cred = NULL; 718 } 719 720 /* 721 * Free the space for the object. 722 */ 723 uma_zfree(obj_zone, object); 724 } 725 726 static void 727 vm_object_backing_remove_locked(vm_object_t object) 728 { 729 vm_object_t backing_object; 730 731 backing_object = object->backing_object; 732 VM_OBJECT_ASSERT_WLOCKED(object); 733 VM_OBJECT_ASSERT_WLOCKED(backing_object); 734 735 KASSERT((object->flags & OBJ_COLLAPSING) == 0, 736 ("vm_object_backing_remove: Removing collapsing object.")); 737 738 if ((object->flags & OBJ_SHADOWLIST) != 0) { 739 LIST_REMOVE(object, shadow_list); 740 backing_object->shadow_count--; 741 object->flags &= ~OBJ_SHADOWLIST; 742 } 743 object->backing_object = NULL; 744 } 745 746 static void 747 vm_object_backing_remove(vm_object_t object) 748 { 749 vm_object_t backing_object; 750 751 VM_OBJECT_ASSERT_WLOCKED(object); 752 753 if ((object->flags & OBJ_SHADOWLIST) != 0) { 754 backing_object = object->backing_object; 755 VM_OBJECT_WLOCK(backing_object); 756 vm_object_backing_remove_locked(object); 757 VM_OBJECT_WUNLOCK(backing_object); 758 } else 759 object->backing_object = NULL; 760 } 761 762 static void 763 vm_object_backing_insert_locked(vm_object_t object, vm_object_t backing_object) 764 { 765 766 VM_OBJECT_ASSERT_WLOCKED(object); 767 768 if ((backing_object->flags & OBJ_ANON) != 0) { 769 VM_OBJECT_ASSERT_WLOCKED(backing_object); 770 LIST_INSERT_HEAD(&backing_object->shadow_head, object, 771 shadow_list); 772 backing_object->shadow_count++; 773 object->flags |= OBJ_SHADOWLIST; 774 } 775 object->backing_object = backing_object; 776 } 777 778 static void 779 vm_object_backing_insert(vm_object_t object, vm_object_t backing_object) 780 { 781 782 VM_OBJECT_ASSERT_WLOCKED(object); 783 784 if ((backing_object->flags & OBJ_ANON) != 0) { 785 VM_OBJECT_WLOCK(backing_object); 786 vm_object_backing_insert_locked(object, backing_object); 787 VM_OBJECT_WUNLOCK(backing_object); 788 } else 789 object->backing_object = backing_object; 790 } 791 792 /* 793 * Insert an object into a backing_object's shadow list with an additional 794 * reference to the backing_object added. 795 */ 796 static void 797 vm_object_backing_insert_ref(vm_object_t object, vm_object_t backing_object) 798 { 799 800 VM_OBJECT_ASSERT_WLOCKED(object); 801 802 if ((backing_object->flags & OBJ_ANON) != 0) { 803 VM_OBJECT_WLOCK(backing_object); 804 KASSERT((backing_object->flags & OBJ_DEAD) == 0, 805 ("shadowing dead anonymous object")); 806 vm_object_reference_locked(backing_object); 807 vm_object_backing_insert_locked(object, backing_object); 808 vm_object_clear_flag(backing_object, OBJ_ONEMAPPING); 809 VM_OBJECT_WUNLOCK(backing_object); 810 } else { 811 vm_object_reference(backing_object); 812 object->backing_object = backing_object; 813 } 814 } 815 816 /* 817 * Transfer a backing reference from backing_object to object. 818 */ 819 static void 820 vm_object_backing_transfer(vm_object_t object, vm_object_t backing_object) 821 { 822 vm_object_t new_backing_object; 823 824 /* 825 * Note that the reference to backing_object->backing_object 826 * moves from within backing_object to within object. 827 */ 828 vm_object_backing_remove_locked(object); 829 new_backing_object = backing_object->backing_object; 830 if (new_backing_object == NULL) 831 return; 832 if ((new_backing_object->flags & OBJ_ANON) != 0) { 833 VM_OBJECT_WLOCK(new_backing_object); 834 vm_object_backing_remove_locked(backing_object); 835 vm_object_backing_insert_locked(object, new_backing_object); 836 VM_OBJECT_WUNLOCK(new_backing_object); 837 } else { 838 object->backing_object = new_backing_object; 839 backing_object->backing_object = NULL; 840 } 841 } 842 843 /* 844 * Wait for a concurrent collapse to settle. 845 */ 846 static void 847 vm_object_collapse_wait(vm_object_t object) 848 { 849 850 VM_OBJECT_ASSERT_WLOCKED(object); 851 852 while ((object->flags & OBJ_COLLAPSING) != 0) { 853 vm_object_pip_wait(object, "vmcolwait"); 854 counter_u64_add(object_collapse_waits, 1); 855 } 856 } 857 858 /* 859 * Waits for a backing object to clear a pending collapse and returns 860 * it locked if it is an ANON object. 861 */ 862 static vm_object_t 863 vm_object_backing_collapse_wait(vm_object_t object) 864 { 865 vm_object_t backing_object; 866 867 VM_OBJECT_ASSERT_WLOCKED(object); 868 869 for (;;) { 870 backing_object = object->backing_object; 871 if (backing_object == NULL || 872 (backing_object->flags & OBJ_ANON) == 0) 873 return (NULL); 874 VM_OBJECT_WLOCK(backing_object); 875 if ((backing_object->flags & (OBJ_DEAD | OBJ_COLLAPSING)) == 0) 876 break; 877 VM_OBJECT_WUNLOCK(object); 878 vm_object_pip_sleep(backing_object, "vmbckwait"); 879 counter_u64_add(object_collapse_waits, 1); 880 VM_OBJECT_WLOCK(object); 881 } 882 return (backing_object); 883 } 884 885 /* 886 * vm_object_terminate_pages removes any remaining pageable pages 887 * from the object and resets the object to an empty state. 888 */ 889 static void 890 vm_object_terminate_pages(vm_object_t object) 891 { 892 vm_page_t p, p_next; 893 894 VM_OBJECT_ASSERT_WLOCKED(object); 895 896 /* 897 * Free any remaining pageable pages. This also removes them from the 898 * paging queues. However, don't free wired pages, just remove them 899 * from the object. Rather than incrementally removing each page from 900 * the object, the page and object are reset to any empty state. 901 */ 902 TAILQ_FOREACH_SAFE(p, &object->memq, listq, p_next) { 903 vm_page_assert_unbusied(p); 904 KASSERT(p->object == object && 905 (p->ref_count & VPRC_OBJREF) != 0, 906 ("vm_object_terminate_pages: page %p is inconsistent", p)); 907 908 p->object = NULL; 909 if (vm_page_drop(p, VPRC_OBJREF) == VPRC_OBJREF) { 910 VM_CNT_INC(v_pfree); 911 vm_page_free(p); 912 } 913 } 914 915 /* 916 * If the object contained any pages, then reset it to an empty state. 917 * None of the object's fields, including "resident_page_count", were 918 * modified by the preceding loop. 919 */ 920 if (object->resident_page_count != 0) { 921 vm_radix_reclaim_allnodes(&object->rtree); 922 TAILQ_INIT(&object->memq); 923 object->resident_page_count = 0; 924 if (object->type == OBJT_VNODE) 925 vdrop(object->handle); 926 } 927 } 928 929 /* 930 * vm_object_terminate actually destroys the specified object, freeing 931 * up all previously used resources. 932 * 933 * The object must be locked. 934 * This routine may block. 935 */ 936 void 937 vm_object_terminate(vm_object_t object) 938 { 939 940 VM_OBJECT_ASSERT_WLOCKED(object); 941 KASSERT((object->flags & OBJ_DEAD) != 0, 942 ("terminating non-dead obj %p", object)); 943 KASSERT((object->flags & OBJ_COLLAPSING) == 0, 944 ("terminating collapsing obj %p", object)); 945 KASSERT(object->backing_object == NULL, 946 ("terminating shadow obj %p", object)); 947 948 /* 949 * wait for the pageout daemon to be done with the object 950 */ 951 vm_object_pip_wait(object, "objtrm"); 952 953 KASSERT(!blockcount_read(&object->paging_in_progress), 954 ("vm_object_terminate: pageout in progress")); 955 956 KASSERT(object->ref_count == 0, 957 ("vm_object_terminate: object with references, ref_count=%d", 958 object->ref_count)); 959 960 if ((object->flags & OBJ_PG_DTOR) == 0) 961 vm_object_terminate_pages(object); 962 963 #if VM_NRESERVLEVEL > 0 964 if (__predict_false(!LIST_EMPTY(&object->rvq))) 965 vm_reserv_break_all(object); 966 #endif 967 968 KASSERT(object->cred == NULL || object->type == OBJT_DEFAULT || 969 object->type == OBJT_SWAP, 970 ("%s: non-swap obj %p has cred", __func__, object)); 971 972 /* 973 * Let the pager know object is dead. 974 */ 975 vm_pager_deallocate(object); 976 VM_OBJECT_WUNLOCK(object); 977 978 vm_object_destroy(object); 979 } 980 981 /* 982 * Make the page read-only so that we can clear the object flags. However, if 983 * this is a nosync mmap then the object is likely to stay dirty so do not 984 * mess with the page and do not clear the object flags. Returns TRUE if the 985 * page should be flushed, and FALSE otherwise. 986 */ 987 static boolean_t 988 vm_object_page_remove_write(vm_page_t p, int flags, boolean_t *allclean) 989 { 990 991 vm_page_assert_busied(p); 992 993 /* 994 * If we have been asked to skip nosync pages and this is a 995 * nosync page, skip it. Note that the object flags were not 996 * cleared in this case so we do not have to set them. 997 */ 998 if ((flags & OBJPC_NOSYNC) != 0 && (p->a.flags & PGA_NOSYNC) != 0) { 999 *allclean = FALSE; 1000 return (FALSE); 1001 } else { 1002 pmap_remove_write(p); 1003 return (p->dirty != 0); 1004 } 1005 } 1006 1007 /* 1008 * vm_object_page_clean 1009 * 1010 * Clean all dirty pages in the specified range of object. Leaves page 1011 * on whatever queue it is currently on. If NOSYNC is set then do not 1012 * write out pages with PGA_NOSYNC set (originally comes from MAP_NOSYNC), 1013 * leaving the object dirty. 1014 * 1015 * For swap objects backing tmpfs regular files, do not flush anything, 1016 * but remove write protection on the mapped pages to update mtime through 1017 * mmaped writes. 1018 * 1019 * When stuffing pages asynchronously, allow clustering. XXX we need a 1020 * synchronous clustering mode implementation. 1021 * 1022 * Odd semantics: if start == end, we clean everything. 1023 * 1024 * The object must be locked. 1025 * 1026 * Returns FALSE if some page from the range was not written, as 1027 * reported by the pager, and TRUE otherwise. 1028 */ 1029 boolean_t 1030 vm_object_page_clean(vm_object_t object, vm_ooffset_t start, vm_ooffset_t end, 1031 int flags) 1032 { 1033 vm_page_t np, p; 1034 vm_pindex_t pi, tend, tstart; 1035 int curgeneration, n, pagerflags; 1036 boolean_t eio, res, allclean; 1037 1038 VM_OBJECT_ASSERT_WLOCKED(object); 1039 1040 if (!vm_object_mightbedirty(object) || object->resident_page_count == 0) 1041 return (TRUE); 1042 1043 pagerflags = (flags & (OBJPC_SYNC | OBJPC_INVAL)) != 0 ? 1044 VM_PAGER_PUT_SYNC : VM_PAGER_CLUSTER_OK; 1045 pagerflags |= (flags & OBJPC_INVAL) != 0 ? VM_PAGER_PUT_INVAL : 0; 1046 1047 tstart = OFF_TO_IDX(start); 1048 tend = (end == 0) ? object->size : OFF_TO_IDX(end + PAGE_MASK); 1049 allclean = tstart == 0 && tend >= object->size; 1050 res = TRUE; 1051 1052 rescan: 1053 curgeneration = object->generation; 1054 1055 for (p = vm_page_find_least(object, tstart); p != NULL; p = np) { 1056 pi = p->pindex; 1057 if (pi >= tend) 1058 break; 1059 np = TAILQ_NEXT(p, listq); 1060 if (vm_page_none_valid(p)) 1061 continue; 1062 if (vm_page_busy_acquire(p, VM_ALLOC_WAITFAIL) == 0) { 1063 if (object->generation != curgeneration && 1064 (flags & OBJPC_SYNC) != 0) 1065 goto rescan; 1066 np = vm_page_find_least(object, pi); 1067 continue; 1068 } 1069 if (!vm_object_page_remove_write(p, flags, &allclean)) { 1070 vm_page_xunbusy(p); 1071 continue; 1072 } 1073 if (object->type == OBJT_VNODE) { 1074 n = vm_object_page_collect_flush(object, p, pagerflags, 1075 flags, &allclean, &eio); 1076 if (eio) { 1077 res = FALSE; 1078 allclean = FALSE; 1079 } 1080 if (object->generation != curgeneration && 1081 (flags & OBJPC_SYNC) != 0) 1082 goto rescan; 1083 1084 /* 1085 * If the VOP_PUTPAGES() did a truncated write, so 1086 * that even the first page of the run is not fully 1087 * written, vm_pageout_flush() returns 0 as the run 1088 * length. Since the condition that caused truncated 1089 * write may be permanent, e.g. exhausted free space, 1090 * accepting n == 0 would cause an infinite loop. 1091 * 1092 * Forwarding the iterator leaves the unwritten page 1093 * behind, but there is not much we can do there if 1094 * filesystem refuses to write it. 1095 */ 1096 if (n == 0) { 1097 n = 1; 1098 allclean = FALSE; 1099 } 1100 } else { 1101 n = 1; 1102 vm_page_xunbusy(p); 1103 } 1104 np = vm_page_find_least(object, pi + n); 1105 } 1106 #if 0 1107 VOP_FSYNC(vp, (pagerflags & VM_PAGER_PUT_SYNC) ? MNT_WAIT : 0); 1108 #endif 1109 1110 /* 1111 * Leave updating cleangeneration for tmpfs objects to tmpfs 1112 * scan. It needs to update mtime, which happens for other 1113 * filesystems during page writeouts. 1114 */ 1115 if (allclean && object->type == OBJT_VNODE) 1116 object->cleangeneration = curgeneration; 1117 return (res); 1118 } 1119 1120 static int 1121 vm_object_page_collect_flush(vm_object_t object, vm_page_t p, int pagerflags, 1122 int flags, boolean_t *allclean, boolean_t *eio) 1123 { 1124 vm_page_t ma[vm_pageout_page_count], p_first, tp; 1125 int count, i, mreq, runlen; 1126 1127 vm_page_lock_assert(p, MA_NOTOWNED); 1128 vm_page_assert_xbusied(p); 1129 VM_OBJECT_ASSERT_WLOCKED(object); 1130 1131 count = 1; 1132 mreq = 0; 1133 1134 for (tp = p; count < vm_pageout_page_count; count++) { 1135 tp = vm_page_next(tp); 1136 if (tp == NULL || vm_page_tryxbusy(tp) == 0) 1137 break; 1138 if (!vm_object_page_remove_write(tp, flags, allclean)) { 1139 vm_page_xunbusy(tp); 1140 break; 1141 } 1142 } 1143 1144 for (p_first = p; count < vm_pageout_page_count; count++) { 1145 tp = vm_page_prev(p_first); 1146 if (tp == NULL || vm_page_tryxbusy(tp) == 0) 1147 break; 1148 if (!vm_object_page_remove_write(tp, flags, allclean)) { 1149 vm_page_xunbusy(tp); 1150 break; 1151 } 1152 p_first = tp; 1153 mreq++; 1154 } 1155 1156 for (tp = p_first, i = 0; i < count; tp = TAILQ_NEXT(tp, listq), i++) 1157 ma[i] = tp; 1158 1159 vm_pageout_flush(ma, count, pagerflags, mreq, &runlen, eio); 1160 return (runlen); 1161 } 1162 1163 /* 1164 * Note that there is absolutely no sense in writing out 1165 * anonymous objects, so we track down the vnode object 1166 * to write out. 1167 * We invalidate (remove) all pages from the address space 1168 * for semantic correctness. 1169 * 1170 * If the backing object is a device object with unmanaged pages, then any 1171 * mappings to the specified range of pages must be removed before this 1172 * function is called. 1173 * 1174 * Note: certain anonymous maps, such as MAP_NOSYNC maps, 1175 * may start out with a NULL object. 1176 */ 1177 boolean_t 1178 vm_object_sync(vm_object_t object, vm_ooffset_t offset, vm_size_t size, 1179 boolean_t syncio, boolean_t invalidate) 1180 { 1181 vm_object_t backing_object; 1182 struct vnode *vp; 1183 struct mount *mp; 1184 int error, flags, fsync_after; 1185 boolean_t res; 1186 1187 if (object == NULL) 1188 return (TRUE); 1189 res = TRUE; 1190 error = 0; 1191 VM_OBJECT_WLOCK(object); 1192 while ((backing_object = object->backing_object) != NULL) { 1193 VM_OBJECT_WLOCK(backing_object); 1194 offset += object->backing_object_offset; 1195 VM_OBJECT_WUNLOCK(object); 1196 object = backing_object; 1197 if (object->size < OFF_TO_IDX(offset + size)) 1198 size = IDX_TO_OFF(object->size) - offset; 1199 } 1200 /* 1201 * Flush pages if writing is allowed, invalidate them 1202 * if invalidation requested. Pages undergoing I/O 1203 * will be ignored by vm_object_page_remove(). 1204 * 1205 * We cannot lock the vnode and then wait for paging 1206 * to complete without deadlocking against vm_fault. 1207 * Instead we simply call vm_object_page_remove() and 1208 * allow it to block internally on a page-by-page 1209 * basis when it encounters pages undergoing async 1210 * I/O. 1211 */ 1212 if (object->type == OBJT_VNODE && 1213 vm_object_mightbedirty(object) != 0 && 1214 ((vp = object->handle)->v_vflag & VV_NOSYNC) == 0) { 1215 VM_OBJECT_WUNLOCK(object); 1216 (void) vn_start_write(vp, &mp, V_WAIT); 1217 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 1218 if (syncio && !invalidate && offset == 0 && 1219 atop(size) == object->size) { 1220 /* 1221 * If syncing the whole mapping of the file, 1222 * it is faster to schedule all the writes in 1223 * async mode, also allowing the clustering, 1224 * and then wait for i/o to complete. 1225 */ 1226 flags = 0; 1227 fsync_after = TRUE; 1228 } else { 1229 flags = (syncio || invalidate) ? OBJPC_SYNC : 0; 1230 flags |= invalidate ? (OBJPC_SYNC | OBJPC_INVAL) : 0; 1231 fsync_after = FALSE; 1232 } 1233 VM_OBJECT_WLOCK(object); 1234 res = vm_object_page_clean(object, offset, offset + size, 1235 flags); 1236 VM_OBJECT_WUNLOCK(object); 1237 if (fsync_after) 1238 error = VOP_FSYNC(vp, MNT_WAIT, curthread); 1239 VOP_UNLOCK(vp); 1240 vn_finished_write(mp); 1241 if (error != 0) 1242 res = FALSE; 1243 VM_OBJECT_WLOCK(object); 1244 } 1245 if ((object->type == OBJT_VNODE || 1246 object->type == OBJT_DEVICE) && invalidate) { 1247 if (object->type == OBJT_DEVICE) 1248 /* 1249 * The option OBJPR_NOTMAPPED must be passed here 1250 * because vm_object_page_remove() cannot remove 1251 * unmanaged mappings. 1252 */ 1253 flags = OBJPR_NOTMAPPED; 1254 else if (old_msync) 1255 flags = 0; 1256 else 1257 flags = OBJPR_CLEANONLY; 1258 vm_object_page_remove(object, OFF_TO_IDX(offset), 1259 OFF_TO_IDX(offset + size + PAGE_MASK), flags); 1260 } 1261 VM_OBJECT_WUNLOCK(object); 1262 return (res); 1263 } 1264 1265 /* 1266 * Determine whether the given advice can be applied to the object. Advice is 1267 * not applied to unmanaged pages since they never belong to page queues, and 1268 * since MADV_FREE is destructive, it can apply only to anonymous pages that 1269 * have been mapped at most once. 1270 */ 1271 static bool 1272 vm_object_advice_applies(vm_object_t object, int advice) 1273 { 1274 1275 if ((object->flags & OBJ_UNMANAGED) != 0) 1276 return (false); 1277 if (advice != MADV_FREE) 1278 return (true); 1279 return ((object->flags & (OBJ_ONEMAPPING | OBJ_ANON)) == 1280 (OBJ_ONEMAPPING | OBJ_ANON)); 1281 } 1282 1283 static void 1284 vm_object_madvise_freespace(vm_object_t object, int advice, vm_pindex_t pindex, 1285 vm_size_t size) 1286 { 1287 1288 if (advice == MADV_FREE && object->type == OBJT_SWAP) 1289 swap_pager_freespace(object, pindex, size); 1290 } 1291 1292 /* 1293 * vm_object_madvise: 1294 * 1295 * Implements the madvise function at the object/page level. 1296 * 1297 * MADV_WILLNEED (any object) 1298 * 1299 * Activate the specified pages if they are resident. 1300 * 1301 * MADV_DONTNEED (any object) 1302 * 1303 * Deactivate the specified pages if they are resident. 1304 * 1305 * MADV_FREE (OBJT_DEFAULT/OBJT_SWAP objects, 1306 * OBJ_ONEMAPPING only) 1307 * 1308 * Deactivate and clean the specified pages if they are 1309 * resident. This permits the process to reuse the pages 1310 * without faulting or the kernel to reclaim the pages 1311 * without I/O. 1312 */ 1313 void 1314 vm_object_madvise(vm_object_t object, vm_pindex_t pindex, vm_pindex_t end, 1315 int advice) 1316 { 1317 vm_pindex_t tpindex; 1318 vm_object_t backing_object, tobject; 1319 vm_page_t m, tm; 1320 1321 if (object == NULL) 1322 return; 1323 1324 relookup: 1325 VM_OBJECT_WLOCK(object); 1326 if (!vm_object_advice_applies(object, advice)) { 1327 VM_OBJECT_WUNLOCK(object); 1328 return; 1329 } 1330 for (m = vm_page_find_least(object, pindex); pindex < end; pindex++) { 1331 tobject = object; 1332 1333 /* 1334 * If the next page isn't resident in the top-level object, we 1335 * need to search the shadow chain. When applying MADV_FREE, we 1336 * take care to release any swap space used to store 1337 * non-resident pages. 1338 */ 1339 if (m == NULL || pindex < m->pindex) { 1340 /* 1341 * Optimize a common case: if the top-level object has 1342 * no backing object, we can skip over the non-resident 1343 * range in constant time. 1344 */ 1345 if (object->backing_object == NULL) { 1346 tpindex = (m != NULL && m->pindex < end) ? 1347 m->pindex : end; 1348 vm_object_madvise_freespace(object, advice, 1349 pindex, tpindex - pindex); 1350 if ((pindex = tpindex) == end) 1351 break; 1352 goto next_page; 1353 } 1354 1355 tpindex = pindex; 1356 do { 1357 vm_object_madvise_freespace(tobject, advice, 1358 tpindex, 1); 1359 /* 1360 * Prepare to search the next object in the 1361 * chain. 1362 */ 1363 backing_object = tobject->backing_object; 1364 if (backing_object == NULL) 1365 goto next_pindex; 1366 VM_OBJECT_WLOCK(backing_object); 1367 tpindex += 1368 OFF_TO_IDX(tobject->backing_object_offset); 1369 if (tobject != object) 1370 VM_OBJECT_WUNLOCK(tobject); 1371 tobject = backing_object; 1372 if (!vm_object_advice_applies(tobject, advice)) 1373 goto next_pindex; 1374 } while ((tm = vm_page_lookup(tobject, tpindex)) == 1375 NULL); 1376 } else { 1377 next_page: 1378 tm = m; 1379 m = TAILQ_NEXT(m, listq); 1380 } 1381 1382 /* 1383 * If the page is not in a normal state, skip it. The page 1384 * can not be invalidated while the object lock is held. 1385 */ 1386 if (!vm_page_all_valid(tm) || vm_page_wired(tm)) 1387 goto next_pindex; 1388 KASSERT((tm->flags & PG_FICTITIOUS) == 0, 1389 ("vm_object_madvise: page %p is fictitious", tm)); 1390 KASSERT((tm->oflags & VPO_UNMANAGED) == 0, 1391 ("vm_object_madvise: page %p is not managed", tm)); 1392 if (vm_page_tryxbusy(tm) == 0) { 1393 if (object != tobject) 1394 VM_OBJECT_WUNLOCK(object); 1395 if (advice == MADV_WILLNEED) { 1396 /* 1397 * Reference the page before unlocking and 1398 * sleeping so that the page daemon is less 1399 * likely to reclaim it. 1400 */ 1401 vm_page_aflag_set(tm, PGA_REFERENCED); 1402 } 1403 vm_page_busy_sleep(tm, "madvpo", false); 1404 goto relookup; 1405 } 1406 vm_page_advise(tm, advice); 1407 vm_page_xunbusy(tm); 1408 vm_object_madvise_freespace(tobject, advice, tm->pindex, 1); 1409 next_pindex: 1410 if (tobject != object) 1411 VM_OBJECT_WUNLOCK(tobject); 1412 } 1413 VM_OBJECT_WUNLOCK(object); 1414 } 1415 1416 /* 1417 * vm_object_shadow: 1418 * 1419 * Create a new object which is backed by the 1420 * specified existing object range. The source 1421 * object reference is deallocated. 1422 * 1423 * The new object and offset into that object 1424 * are returned in the source parameters. 1425 */ 1426 void 1427 vm_object_shadow(vm_object_t *object, vm_ooffset_t *offset, vm_size_t length, 1428 struct ucred *cred, bool shared) 1429 { 1430 vm_object_t source; 1431 vm_object_t result; 1432 1433 source = *object; 1434 1435 /* 1436 * Don't create the new object if the old object isn't shared. 1437 * 1438 * If we hold the only reference we can guarantee that it won't 1439 * increase while we have the map locked. Otherwise the race is 1440 * harmless and we will end up with an extra shadow object that 1441 * will be collapsed later. 1442 */ 1443 if (source != NULL && source->ref_count == 1 && 1444 (source->flags & OBJ_ANON) != 0) 1445 return; 1446 1447 /* 1448 * Allocate a new object with the given length. 1449 */ 1450 result = vm_object_allocate_anon(atop(length), source, cred, length); 1451 1452 /* 1453 * Store the offset into the source object, and fix up the offset into 1454 * the new object. 1455 */ 1456 result->backing_object_offset = *offset; 1457 1458 if (shared || source != NULL) { 1459 VM_OBJECT_WLOCK(result); 1460 1461 /* 1462 * The new object shadows the source object, adding a 1463 * reference to it. Our caller changes his reference 1464 * to point to the new object, removing a reference to 1465 * the source object. Net result: no change of 1466 * reference count, unless the caller needs to add one 1467 * more reference due to forking a shared map entry. 1468 */ 1469 if (shared) { 1470 vm_object_reference_locked(result); 1471 vm_object_clear_flag(result, OBJ_ONEMAPPING); 1472 } 1473 1474 /* 1475 * Try to optimize the result object's page color when 1476 * shadowing in order to maintain page coloring 1477 * consistency in the combined shadowed object. 1478 */ 1479 if (source != NULL) { 1480 vm_object_backing_insert(result, source); 1481 result->domain = source->domain; 1482 #if VM_NRESERVLEVEL > 0 1483 result->flags |= source->flags & OBJ_COLORED; 1484 result->pg_color = (source->pg_color + 1485 OFF_TO_IDX(*offset)) & ((1 << (VM_NFREEORDER - 1486 1)) - 1); 1487 #endif 1488 } 1489 VM_OBJECT_WUNLOCK(result); 1490 } 1491 1492 /* 1493 * Return the new things 1494 */ 1495 *offset = 0; 1496 *object = result; 1497 } 1498 1499 /* 1500 * vm_object_split: 1501 * 1502 * Split the pages in a map entry into a new object. This affords 1503 * easier removal of unused pages, and keeps object inheritance from 1504 * being a negative impact on memory usage. 1505 */ 1506 void 1507 vm_object_split(vm_map_entry_t entry) 1508 { 1509 vm_page_t m, m_next; 1510 vm_object_t orig_object, new_object, backing_object; 1511 vm_pindex_t idx, offidxstart; 1512 vm_size_t size; 1513 1514 orig_object = entry->object.vm_object; 1515 KASSERT((orig_object->flags & OBJ_ONEMAPPING) != 0, 1516 ("vm_object_split: Splitting object with multiple mappings.")); 1517 if ((orig_object->flags & OBJ_ANON) == 0) 1518 return; 1519 if (orig_object->ref_count <= 1) 1520 return; 1521 VM_OBJECT_WUNLOCK(orig_object); 1522 1523 offidxstart = OFF_TO_IDX(entry->offset); 1524 size = atop(entry->end - entry->start); 1525 1526 /* 1527 * If swap_pager_copy() is later called, it will convert new_object 1528 * into a swap object. 1529 */ 1530 new_object = vm_object_allocate_anon(size, orig_object, 1531 orig_object->cred, ptoa(size)); 1532 1533 /* 1534 * We must wait for the orig_object to complete any in-progress 1535 * collapse so that the swap blocks are stable below. The 1536 * additional reference on backing_object by new object will 1537 * prevent further collapse operations until split completes. 1538 */ 1539 VM_OBJECT_WLOCK(orig_object); 1540 vm_object_collapse_wait(orig_object); 1541 1542 /* 1543 * At this point, the new object is still private, so the order in 1544 * which the original and new objects are locked does not matter. 1545 */ 1546 VM_OBJECT_WLOCK(new_object); 1547 new_object->domain = orig_object->domain; 1548 backing_object = orig_object->backing_object; 1549 if (backing_object != NULL) { 1550 vm_object_backing_insert_ref(new_object, backing_object); 1551 new_object->backing_object_offset = 1552 orig_object->backing_object_offset + entry->offset; 1553 } 1554 if (orig_object->cred != NULL) { 1555 crhold(orig_object->cred); 1556 KASSERT(orig_object->charge >= ptoa(size), 1557 ("orig_object->charge < 0")); 1558 orig_object->charge -= ptoa(size); 1559 } 1560 1561 /* 1562 * Mark the split operation so that swap_pager_getpages() knows 1563 * that the object is in transition. 1564 */ 1565 vm_object_set_flag(orig_object, OBJ_SPLIT); 1566 retry: 1567 m = vm_page_find_least(orig_object, offidxstart); 1568 for (; m != NULL && (idx = m->pindex - offidxstart) < size; 1569 m = m_next) { 1570 m_next = TAILQ_NEXT(m, listq); 1571 1572 /* 1573 * We must wait for pending I/O to complete before we can 1574 * rename the page. 1575 * 1576 * We do not have to VM_PROT_NONE the page as mappings should 1577 * not be changed by this operation. 1578 */ 1579 if (vm_page_tryxbusy(m) == 0) { 1580 VM_OBJECT_WUNLOCK(new_object); 1581 vm_page_sleep_if_busy(m, "spltwt"); 1582 VM_OBJECT_WLOCK(new_object); 1583 goto retry; 1584 } 1585 1586 /* 1587 * The page was left invalid. Likely placed there by 1588 * an incomplete fault. Just remove and ignore. 1589 */ 1590 if (vm_page_none_valid(m)) { 1591 if (vm_page_remove(m)) 1592 vm_page_free(m); 1593 continue; 1594 } 1595 1596 /* vm_page_rename() will dirty the page. */ 1597 if (vm_page_rename(m, new_object, idx)) { 1598 vm_page_xunbusy(m); 1599 VM_OBJECT_WUNLOCK(new_object); 1600 VM_OBJECT_WUNLOCK(orig_object); 1601 vm_radix_wait(); 1602 VM_OBJECT_WLOCK(orig_object); 1603 VM_OBJECT_WLOCK(new_object); 1604 goto retry; 1605 } 1606 1607 #if VM_NRESERVLEVEL > 0 1608 /* 1609 * If some of the reservation's allocated pages remain with 1610 * the original object, then transferring the reservation to 1611 * the new object is neither particularly beneficial nor 1612 * particularly harmful as compared to leaving the reservation 1613 * with the original object. If, however, all of the 1614 * reservation's allocated pages are transferred to the new 1615 * object, then transferring the reservation is typically 1616 * beneficial. Determining which of these two cases applies 1617 * would be more costly than unconditionally renaming the 1618 * reservation. 1619 */ 1620 vm_reserv_rename(m, new_object, orig_object, offidxstart); 1621 #endif 1622 if (orig_object->type != OBJT_SWAP) 1623 vm_page_xunbusy(m); 1624 } 1625 if (orig_object->type == OBJT_SWAP) { 1626 /* 1627 * swap_pager_copy() can sleep, in which case the orig_object's 1628 * and new_object's locks are released and reacquired. 1629 */ 1630 swap_pager_copy(orig_object, new_object, offidxstart, 0); 1631 TAILQ_FOREACH(m, &new_object->memq, listq) 1632 vm_page_xunbusy(m); 1633 } 1634 vm_object_clear_flag(orig_object, OBJ_SPLIT); 1635 VM_OBJECT_WUNLOCK(orig_object); 1636 VM_OBJECT_WUNLOCK(new_object); 1637 entry->object.vm_object = new_object; 1638 entry->offset = 0LL; 1639 vm_object_deallocate(orig_object); 1640 VM_OBJECT_WLOCK(new_object); 1641 } 1642 1643 static vm_page_t 1644 vm_object_collapse_scan_wait(vm_object_t object, vm_page_t p) 1645 { 1646 vm_object_t backing_object; 1647 1648 VM_OBJECT_ASSERT_WLOCKED(object); 1649 backing_object = object->backing_object; 1650 VM_OBJECT_ASSERT_WLOCKED(backing_object); 1651 1652 KASSERT(p == NULL || p->object == object || p->object == backing_object, 1653 ("invalid ownership %p %p %p", p, object, backing_object)); 1654 /* The page is only NULL when rename fails. */ 1655 if (p == NULL) { 1656 VM_OBJECT_WUNLOCK(object); 1657 VM_OBJECT_WUNLOCK(backing_object); 1658 vm_radix_wait(); 1659 } else { 1660 if (p->object == object) 1661 VM_OBJECT_WUNLOCK(backing_object); 1662 else 1663 VM_OBJECT_WUNLOCK(object); 1664 vm_page_busy_sleep(p, "vmocol", false); 1665 } 1666 VM_OBJECT_WLOCK(object); 1667 VM_OBJECT_WLOCK(backing_object); 1668 return (TAILQ_FIRST(&backing_object->memq)); 1669 } 1670 1671 static bool 1672 vm_object_scan_all_shadowed(vm_object_t object) 1673 { 1674 vm_object_t backing_object; 1675 vm_page_t p, pp; 1676 vm_pindex_t backing_offset_index, new_pindex, pi, ps; 1677 1678 VM_OBJECT_ASSERT_WLOCKED(object); 1679 VM_OBJECT_ASSERT_WLOCKED(object->backing_object); 1680 1681 backing_object = object->backing_object; 1682 1683 if ((backing_object->flags & OBJ_ANON) == 0) 1684 return (false); 1685 1686 pi = backing_offset_index = OFF_TO_IDX(object->backing_object_offset); 1687 p = vm_page_find_least(backing_object, pi); 1688 ps = swap_pager_find_least(backing_object, pi); 1689 1690 /* 1691 * Only check pages inside the parent object's range and 1692 * inside the parent object's mapping of the backing object. 1693 */ 1694 for (;; pi++) { 1695 if (p != NULL && p->pindex < pi) 1696 p = TAILQ_NEXT(p, listq); 1697 if (ps < pi) 1698 ps = swap_pager_find_least(backing_object, pi); 1699 if (p == NULL && ps >= backing_object->size) 1700 break; 1701 else if (p == NULL) 1702 pi = ps; 1703 else 1704 pi = MIN(p->pindex, ps); 1705 1706 new_pindex = pi - backing_offset_index; 1707 if (new_pindex >= object->size) 1708 break; 1709 1710 if (p != NULL) { 1711 /* 1712 * If the backing object page is busy a 1713 * grandparent or older page may still be 1714 * undergoing CoW. It is not safe to collapse 1715 * the backing object until it is quiesced. 1716 */ 1717 if (vm_page_tryxbusy(p) == 0) 1718 return (false); 1719 1720 /* 1721 * We raced with the fault handler that left 1722 * newly allocated invalid page on the object 1723 * queue and retried. 1724 */ 1725 if (!vm_page_all_valid(p)) 1726 goto unbusy_ret; 1727 } 1728 1729 /* 1730 * See if the parent has the page or if the parent's object 1731 * pager has the page. If the parent has the page but the page 1732 * is not valid, the parent's object pager must have the page. 1733 * 1734 * If this fails, the parent does not completely shadow the 1735 * object and we might as well give up now. 1736 */ 1737 pp = vm_page_lookup(object, new_pindex); 1738 1739 /* 1740 * The valid check here is stable due to object lock 1741 * being required to clear valid and initiate paging. 1742 * Busy of p disallows fault handler to validate pp. 1743 */ 1744 if ((pp == NULL || vm_page_none_valid(pp)) && 1745 !vm_pager_has_page(object, new_pindex, NULL, NULL)) 1746 goto unbusy_ret; 1747 if (p != NULL) 1748 vm_page_xunbusy(p); 1749 } 1750 return (true); 1751 1752 unbusy_ret: 1753 if (p != NULL) 1754 vm_page_xunbusy(p); 1755 return (false); 1756 } 1757 1758 static void 1759 vm_object_collapse_scan(vm_object_t object) 1760 { 1761 vm_object_t backing_object; 1762 vm_page_t next, p, pp; 1763 vm_pindex_t backing_offset_index, new_pindex; 1764 1765 VM_OBJECT_ASSERT_WLOCKED(object); 1766 VM_OBJECT_ASSERT_WLOCKED(object->backing_object); 1767 1768 backing_object = object->backing_object; 1769 backing_offset_index = OFF_TO_IDX(object->backing_object_offset); 1770 1771 /* 1772 * Our scan 1773 */ 1774 for (p = TAILQ_FIRST(&backing_object->memq); p != NULL; p = next) { 1775 next = TAILQ_NEXT(p, listq); 1776 new_pindex = p->pindex - backing_offset_index; 1777 1778 /* 1779 * Check for busy page 1780 */ 1781 if (vm_page_tryxbusy(p) == 0) { 1782 next = vm_object_collapse_scan_wait(object, p); 1783 continue; 1784 } 1785 1786 KASSERT(object->backing_object == backing_object, 1787 ("vm_object_collapse_scan: backing object mismatch %p != %p", 1788 object->backing_object, backing_object)); 1789 KASSERT(p->object == backing_object, 1790 ("vm_object_collapse_scan: object mismatch %p != %p", 1791 p->object, backing_object)); 1792 1793 if (p->pindex < backing_offset_index || 1794 new_pindex >= object->size) { 1795 if (backing_object->type == OBJT_SWAP) 1796 swap_pager_freespace(backing_object, p->pindex, 1797 1); 1798 1799 KASSERT(!pmap_page_is_mapped(p), 1800 ("freeing mapped page %p", p)); 1801 if (vm_page_remove(p)) 1802 vm_page_free(p); 1803 continue; 1804 } 1805 1806 if (!vm_page_all_valid(p)) { 1807 KASSERT(!pmap_page_is_mapped(p), 1808 ("freeing mapped page %p", p)); 1809 if (vm_page_remove(p)) 1810 vm_page_free(p); 1811 continue; 1812 } 1813 1814 pp = vm_page_lookup(object, new_pindex); 1815 if (pp != NULL && vm_page_tryxbusy(pp) == 0) { 1816 vm_page_xunbusy(p); 1817 /* 1818 * The page in the parent is busy and possibly not 1819 * (yet) valid. Until its state is finalized by the 1820 * busy bit owner, we can't tell whether it shadows the 1821 * original page. 1822 */ 1823 next = vm_object_collapse_scan_wait(object, pp); 1824 continue; 1825 } 1826 1827 if (pp != NULL && vm_page_none_valid(pp)) { 1828 /* 1829 * The page was invalid in the parent. Likely placed 1830 * there by an incomplete fault. Just remove and 1831 * ignore. p can replace it. 1832 */ 1833 if (vm_page_remove(pp)) 1834 vm_page_free(pp); 1835 pp = NULL; 1836 } 1837 1838 if (pp != NULL || vm_pager_has_page(object, new_pindex, NULL, 1839 NULL)) { 1840 /* 1841 * The page already exists in the parent OR swap exists 1842 * for this location in the parent. Leave the parent's 1843 * page alone. Destroy the original page from the 1844 * backing object. 1845 */ 1846 if (backing_object->type == OBJT_SWAP) 1847 swap_pager_freespace(backing_object, p->pindex, 1848 1); 1849 KASSERT(!pmap_page_is_mapped(p), 1850 ("freeing mapped page %p", p)); 1851 if (vm_page_remove(p)) 1852 vm_page_free(p); 1853 if (pp != NULL) 1854 vm_page_xunbusy(pp); 1855 continue; 1856 } 1857 1858 /* 1859 * Page does not exist in parent, rename the page from the 1860 * backing object to the main object. 1861 * 1862 * If the page was mapped to a process, it can remain mapped 1863 * through the rename. vm_page_rename() will dirty the page. 1864 */ 1865 if (vm_page_rename(p, object, new_pindex)) { 1866 vm_page_xunbusy(p); 1867 next = vm_object_collapse_scan_wait(object, NULL); 1868 continue; 1869 } 1870 1871 /* Use the old pindex to free the right page. */ 1872 if (backing_object->type == OBJT_SWAP) 1873 swap_pager_freespace(backing_object, 1874 new_pindex + backing_offset_index, 1); 1875 1876 #if VM_NRESERVLEVEL > 0 1877 /* 1878 * Rename the reservation. 1879 */ 1880 vm_reserv_rename(p, object, backing_object, 1881 backing_offset_index); 1882 #endif 1883 vm_page_xunbusy(p); 1884 } 1885 return; 1886 } 1887 1888 /* 1889 * vm_object_collapse: 1890 * 1891 * Collapse an object with the object backing it. 1892 * Pages in the backing object are moved into the 1893 * parent, and the backing object is deallocated. 1894 */ 1895 void 1896 vm_object_collapse(vm_object_t object) 1897 { 1898 vm_object_t backing_object, new_backing_object; 1899 1900 VM_OBJECT_ASSERT_WLOCKED(object); 1901 1902 while (TRUE) { 1903 KASSERT((object->flags & (OBJ_DEAD | OBJ_ANON)) == OBJ_ANON, 1904 ("collapsing invalid object")); 1905 1906 /* 1907 * Wait for the backing_object to finish any pending 1908 * collapse so that the caller sees the shortest possible 1909 * shadow chain. 1910 */ 1911 backing_object = vm_object_backing_collapse_wait(object); 1912 if (backing_object == NULL) 1913 return; 1914 1915 KASSERT(object->ref_count > 0 && 1916 object->ref_count > object->shadow_count, 1917 ("collapse with invalid ref %d or shadow %d count.", 1918 object->ref_count, object->shadow_count)); 1919 KASSERT((backing_object->flags & 1920 (OBJ_COLLAPSING | OBJ_DEAD)) == 0, 1921 ("vm_object_collapse: Backing object already collapsing.")); 1922 KASSERT((object->flags & (OBJ_COLLAPSING | OBJ_DEAD)) == 0, 1923 ("vm_object_collapse: object is already collapsing.")); 1924 1925 /* 1926 * We know that we can either collapse the backing object if 1927 * the parent is the only reference to it, or (perhaps) have 1928 * the parent bypass the object if the parent happens to shadow 1929 * all the resident pages in the entire backing object. 1930 */ 1931 if (backing_object->ref_count == 1) { 1932 KASSERT(backing_object->shadow_count == 1, 1933 ("vm_object_collapse: shadow_count: %d", 1934 backing_object->shadow_count)); 1935 vm_object_pip_add(object, 1); 1936 vm_object_set_flag(object, OBJ_COLLAPSING); 1937 vm_object_pip_add(backing_object, 1); 1938 vm_object_set_flag(backing_object, OBJ_DEAD); 1939 1940 /* 1941 * If there is exactly one reference to the backing 1942 * object, we can collapse it into the parent. 1943 */ 1944 vm_object_collapse_scan(object); 1945 1946 #if VM_NRESERVLEVEL > 0 1947 /* 1948 * Break any reservations from backing_object. 1949 */ 1950 if (__predict_false(!LIST_EMPTY(&backing_object->rvq))) 1951 vm_reserv_break_all(backing_object); 1952 #endif 1953 1954 /* 1955 * Move the pager from backing_object to object. 1956 */ 1957 if (backing_object->type == OBJT_SWAP) { 1958 /* 1959 * swap_pager_copy() can sleep, in which case 1960 * the backing_object's and object's locks are 1961 * released and reacquired. 1962 * Since swap_pager_copy() is being asked to 1963 * destroy backing_object, it will change the 1964 * type to OBJT_DEFAULT. 1965 */ 1966 swap_pager_copy( 1967 backing_object, 1968 object, 1969 OFF_TO_IDX(object->backing_object_offset), TRUE); 1970 } 1971 1972 /* 1973 * Object now shadows whatever backing_object did. 1974 */ 1975 vm_object_clear_flag(object, OBJ_COLLAPSING); 1976 vm_object_backing_transfer(object, backing_object); 1977 object->backing_object_offset += 1978 backing_object->backing_object_offset; 1979 VM_OBJECT_WUNLOCK(object); 1980 vm_object_pip_wakeup(object); 1981 1982 /* 1983 * Discard backing_object. 1984 * 1985 * Since the backing object has no pages, no pager left, 1986 * and no object references within it, all that is 1987 * necessary is to dispose of it. 1988 */ 1989 KASSERT(backing_object->ref_count == 1, ( 1990 "backing_object %p was somehow re-referenced during collapse!", 1991 backing_object)); 1992 vm_object_pip_wakeup(backing_object); 1993 (void)refcount_release(&backing_object->ref_count); 1994 vm_object_terminate(backing_object); 1995 counter_u64_add(object_collapses, 1); 1996 VM_OBJECT_WLOCK(object); 1997 } else { 1998 /* 1999 * If we do not entirely shadow the backing object, 2000 * there is nothing we can do so we give up. 2001 * 2002 * The object lock and backing_object lock must not 2003 * be dropped during this sequence. 2004 */ 2005 if (!vm_object_scan_all_shadowed(object)) { 2006 VM_OBJECT_WUNLOCK(backing_object); 2007 break; 2008 } 2009 2010 /* 2011 * Make the parent shadow the next object in the 2012 * chain. Deallocating backing_object will not remove 2013 * it, since its reference count is at least 2. 2014 */ 2015 vm_object_backing_remove_locked(object); 2016 new_backing_object = backing_object->backing_object; 2017 if (new_backing_object != NULL) { 2018 vm_object_backing_insert_ref(object, 2019 new_backing_object); 2020 object->backing_object_offset += 2021 backing_object->backing_object_offset; 2022 } 2023 2024 /* 2025 * Drop the reference count on backing_object. Since 2026 * its ref_count was at least 2, it will not vanish. 2027 */ 2028 (void)refcount_release(&backing_object->ref_count); 2029 KASSERT(backing_object->ref_count >= 1, ( 2030 "backing_object %p was somehow dereferenced during collapse!", 2031 backing_object)); 2032 VM_OBJECT_WUNLOCK(backing_object); 2033 counter_u64_add(object_bypasses, 1); 2034 } 2035 2036 /* 2037 * Try again with this object's new backing object. 2038 */ 2039 } 2040 } 2041 2042 /* 2043 * vm_object_page_remove: 2044 * 2045 * For the given object, either frees or invalidates each of the 2046 * specified pages. In general, a page is freed. However, if a page is 2047 * wired for any reason other than the existence of a managed, wired 2048 * mapping, then it may be invalidated but not removed from the object. 2049 * Pages are specified by the given range ["start", "end") and the option 2050 * OBJPR_CLEANONLY. As a special case, if "end" is zero, then the range 2051 * extends from "start" to the end of the object. If the option 2052 * OBJPR_CLEANONLY is specified, then only the non-dirty pages within the 2053 * specified range are affected. If the option OBJPR_NOTMAPPED is 2054 * specified, then the pages within the specified range must have no 2055 * mappings. Otherwise, if this option is not specified, any mappings to 2056 * the specified pages are removed before the pages are freed or 2057 * invalidated. 2058 * 2059 * In general, this operation should only be performed on objects that 2060 * contain managed pages. There are, however, two exceptions. First, it 2061 * is performed on the kernel and kmem objects by vm_map_entry_delete(). 2062 * Second, it is used by msync(..., MS_INVALIDATE) to invalidate device- 2063 * backed pages. In both of these cases, the option OBJPR_CLEANONLY must 2064 * not be specified and the option OBJPR_NOTMAPPED must be specified. 2065 * 2066 * The object must be locked. 2067 */ 2068 void 2069 vm_object_page_remove(vm_object_t object, vm_pindex_t start, vm_pindex_t end, 2070 int options) 2071 { 2072 vm_page_t p, next; 2073 2074 VM_OBJECT_ASSERT_WLOCKED(object); 2075 KASSERT((object->flags & OBJ_UNMANAGED) == 0 || 2076 (options & (OBJPR_CLEANONLY | OBJPR_NOTMAPPED)) == OBJPR_NOTMAPPED, 2077 ("vm_object_page_remove: illegal options for object %p", object)); 2078 if (object->resident_page_count == 0) 2079 return; 2080 vm_object_pip_add(object, 1); 2081 again: 2082 p = vm_page_find_least(object, start); 2083 2084 /* 2085 * Here, the variable "p" is either (1) the page with the least pindex 2086 * greater than or equal to the parameter "start" or (2) NULL. 2087 */ 2088 for (; p != NULL && (p->pindex < end || end == 0); p = next) { 2089 next = TAILQ_NEXT(p, listq); 2090 2091 /* 2092 * If the page is wired for any reason besides the existence 2093 * of managed, wired mappings, then it cannot be freed. For 2094 * example, fictitious pages, which represent device memory, 2095 * are inherently wired and cannot be freed. They can, 2096 * however, be invalidated if the option OBJPR_CLEANONLY is 2097 * not specified. 2098 */ 2099 if (vm_page_tryxbusy(p) == 0) { 2100 vm_page_sleep_if_busy(p, "vmopar"); 2101 goto again; 2102 } 2103 if (vm_page_wired(p)) { 2104 wired: 2105 if ((options & OBJPR_NOTMAPPED) == 0 && 2106 object->ref_count != 0) 2107 pmap_remove_all(p); 2108 if ((options & OBJPR_CLEANONLY) == 0) { 2109 vm_page_invalid(p); 2110 vm_page_undirty(p); 2111 } 2112 vm_page_xunbusy(p); 2113 continue; 2114 } 2115 KASSERT((p->flags & PG_FICTITIOUS) == 0, 2116 ("vm_object_page_remove: page %p is fictitious", p)); 2117 if ((options & OBJPR_CLEANONLY) != 0 && 2118 !vm_page_none_valid(p)) { 2119 if ((options & OBJPR_NOTMAPPED) == 0 && 2120 object->ref_count != 0 && 2121 !vm_page_try_remove_write(p)) 2122 goto wired; 2123 if (p->dirty != 0) { 2124 vm_page_xunbusy(p); 2125 continue; 2126 } 2127 } 2128 if ((options & OBJPR_NOTMAPPED) == 0 && 2129 object->ref_count != 0 && !vm_page_try_remove_all(p)) 2130 goto wired; 2131 vm_page_free(p); 2132 } 2133 vm_object_pip_wakeup(object); 2134 } 2135 2136 /* 2137 * vm_object_page_noreuse: 2138 * 2139 * For the given object, attempt to move the specified pages to 2140 * the head of the inactive queue. This bypasses regular LRU 2141 * operation and allows the pages to be reused quickly under memory 2142 * pressure. If a page is wired for any reason, then it will not 2143 * be queued. Pages are specified by the range ["start", "end"). 2144 * As a special case, if "end" is zero, then the range extends from 2145 * "start" to the end of the object. 2146 * 2147 * This operation should only be performed on objects that 2148 * contain non-fictitious, managed pages. 2149 * 2150 * The object must be locked. 2151 */ 2152 void 2153 vm_object_page_noreuse(vm_object_t object, vm_pindex_t start, vm_pindex_t end) 2154 { 2155 vm_page_t p, next; 2156 2157 VM_OBJECT_ASSERT_LOCKED(object); 2158 KASSERT((object->flags & (OBJ_FICTITIOUS | OBJ_UNMANAGED)) == 0, 2159 ("vm_object_page_noreuse: illegal object %p", object)); 2160 if (object->resident_page_count == 0) 2161 return; 2162 p = vm_page_find_least(object, start); 2163 2164 /* 2165 * Here, the variable "p" is either (1) the page with the least pindex 2166 * greater than or equal to the parameter "start" or (2) NULL. 2167 */ 2168 for (; p != NULL && (p->pindex < end || end == 0); p = next) { 2169 next = TAILQ_NEXT(p, listq); 2170 vm_page_deactivate_noreuse(p); 2171 } 2172 } 2173 2174 /* 2175 * Populate the specified range of the object with valid pages. Returns 2176 * TRUE if the range is successfully populated and FALSE otherwise. 2177 * 2178 * Note: This function should be optimized to pass a larger array of 2179 * pages to vm_pager_get_pages() before it is applied to a non- 2180 * OBJT_DEVICE object. 2181 * 2182 * The object must be locked. 2183 */ 2184 boolean_t 2185 vm_object_populate(vm_object_t object, vm_pindex_t start, vm_pindex_t end) 2186 { 2187 vm_page_t m; 2188 vm_pindex_t pindex; 2189 int rv; 2190 2191 VM_OBJECT_ASSERT_WLOCKED(object); 2192 for (pindex = start; pindex < end; pindex++) { 2193 rv = vm_page_grab_valid(&m, object, pindex, VM_ALLOC_NORMAL); 2194 if (rv != VM_PAGER_OK) 2195 break; 2196 2197 /* 2198 * Keep "m" busy because a subsequent iteration may unlock 2199 * the object. 2200 */ 2201 } 2202 if (pindex > start) { 2203 m = vm_page_lookup(object, start); 2204 while (m != NULL && m->pindex < pindex) { 2205 vm_page_xunbusy(m); 2206 m = TAILQ_NEXT(m, listq); 2207 } 2208 } 2209 return (pindex == end); 2210 } 2211 2212 /* 2213 * Routine: vm_object_coalesce 2214 * Function: Coalesces two objects backing up adjoining 2215 * regions of memory into a single object. 2216 * 2217 * returns TRUE if objects were combined. 2218 * 2219 * NOTE: Only works at the moment if the second object is NULL - 2220 * if it's not, which object do we lock first? 2221 * 2222 * Parameters: 2223 * prev_object First object to coalesce 2224 * prev_offset Offset into prev_object 2225 * prev_size Size of reference to prev_object 2226 * next_size Size of reference to the second object 2227 * reserved Indicator that extension region has 2228 * swap accounted for 2229 * 2230 * Conditions: 2231 * The object must *not* be locked. 2232 */ 2233 boolean_t 2234 vm_object_coalesce(vm_object_t prev_object, vm_ooffset_t prev_offset, 2235 vm_size_t prev_size, vm_size_t next_size, boolean_t reserved) 2236 { 2237 vm_pindex_t next_pindex; 2238 2239 if (prev_object == NULL) 2240 return (TRUE); 2241 if ((prev_object->flags & OBJ_ANON) == 0) 2242 return (FALSE); 2243 2244 VM_OBJECT_WLOCK(prev_object); 2245 /* 2246 * Try to collapse the object first. 2247 */ 2248 vm_object_collapse(prev_object); 2249 2250 /* 2251 * Can't coalesce if: . more than one reference . paged out . shadows 2252 * another object . has a copy elsewhere (any of which mean that the 2253 * pages not mapped to prev_entry may be in use anyway) 2254 */ 2255 if (prev_object->backing_object != NULL) { 2256 VM_OBJECT_WUNLOCK(prev_object); 2257 return (FALSE); 2258 } 2259 2260 prev_size >>= PAGE_SHIFT; 2261 next_size >>= PAGE_SHIFT; 2262 next_pindex = OFF_TO_IDX(prev_offset) + prev_size; 2263 2264 if (prev_object->ref_count > 1 && 2265 prev_object->size != next_pindex && 2266 (prev_object->flags & OBJ_ONEMAPPING) == 0) { 2267 VM_OBJECT_WUNLOCK(prev_object); 2268 return (FALSE); 2269 } 2270 2271 /* 2272 * Account for the charge. 2273 */ 2274 if (prev_object->cred != NULL) { 2275 2276 /* 2277 * If prev_object was charged, then this mapping, 2278 * although not charged now, may become writable 2279 * later. Non-NULL cred in the object would prevent 2280 * swap reservation during enabling of the write 2281 * access, so reserve swap now. Failed reservation 2282 * cause allocation of the separate object for the map 2283 * entry, and swap reservation for this entry is 2284 * managed in appropriate time. 2285 */ 2286 if (!reserved && !swap_reserve_by_cred(ptoa(next_size), 2287 prev_object->cred)) { 2288 VM_OBJECT_WUNLOCK(prev_object); 2289 return (FALSE); 2290 } 2291 prev_object->charge += ptoa(next_size); 2292 } 2293 2294 /* 2295 * Remove any pages that may still be in the object from a previous 2296 * deallocation. 2297 */ 2298 if (next_pindex < prev_object->size) { 2299 vm_object_page_remove(prev_object, next_pindex, next_pindex + 2300 next_size, 0); 2301 if (prev_object->type == OBJT_SWAP) 2302 swap_pager_freespace(prev_object, 2303 next_pindex, next_size); 2304 #if 0 2305 if (prev_object->cred != NULL) { 2306 KASSERT(prev_object->charge >= 2307 ptoa(prev_object->size - next_pindex), 2308 ("object %p overcharged 1 %jx %jx", prev_object, 2309 (uintmax_t)next_pindex, (uintmax_t)next_size)); 2310 prev_object->charge -= ptoa(prev_object->size - 2311 next_pindex); 2312 } 2313 #endif 2314 } 2315 2316 /* 2317 * Extend the object if necessary. 2318 */ 2319 if (next_pindex + next_size > prev_object->size) 2320 prev_object->size = next_pindex + next_size; 2321 2322 VM_OBJECT_WUNLOCK(prev_object); 2323 return (TRUE); 2324 } 2325 2326 void 2327 vm_object_set_writeable_dirty(vm_object_t object) 2328 { 2329 2330 /* Only set for vnodes & tmpfs */ 2331 if (object->type != OBJT_VNODE && 2332 (object->flags & OBJ_TMPFS_NODE) == 0) 2333 return; 2334 atomic_add_int(&object->generation, 1); 2335 } 2336 2337 /* 2338 * vm_object_unwire: 2339 * 2340 * For each page offset within the specified range of the given object, 2341 * find the highest-level page in the shadow chain and unwire it. A page 2342 * must exist at every page offset, and the highest-level page must be 2343 * wired. 2344 */ 2345 void 2346 vm_object_unwire(vm_object_t object, vm_ooffset_t offset, vm_size_t length, 2347 uint8_t queue) 2348 { 2349 vm_object_t tobject, t1object; 2350 vm_page_t m, tm; 2351 vm_pindex_t end_pindex, pindex, tpindex; 2352 int depth, locked_depth; 2353 2354 KASSERT((offset & PAGE_MASK) == 0, 2355 ("vm_object_unwire: offset is not page aligned")); 2356 KASSERT((length & PAGE_MASK) == 0, 2357 ("vm_object_unwire: length is not a multiple of PAGE_SIZE")); 2358 /* The wired count of a fictitious page never changes. */ 2359 if ((object->flags & OBJ_FICTITIOUS) != 0) 2360 return; 2361 pindex = OFF_TO_IDX(offset); 2362 end_pindex = pindex + atop(length); 2363 again: 2364 locked_depth = 1; 2365 VM_OBJECT_RLOCK(object); 2366 m = vm_page_find_least(object, pindex); 2367 while (pindex < end_pindex) { 2368 if (m == NULL || pindex < m->pindex) { 2369 /* 2370 * The first object in the shadow chain doesn't 2371 * contain a page at the current index. Therefore, 2372 * the page must exist in a backing object. 2373 */ 2374 tobject = object; 2375 tpindex = pindex; 2376 depth = 0; 2377 do { 2378 tpindex += 2379 OFF_TO_IDX(tobject->backing_object_offset); 2380 tobject = tobject->backing_object; 2381 KASSERT(tobject != NULL, 2382 ("vm_object_unwire: missing page")); 2383 if ((tobject->flags & OBJ_FICTITIOUS) != 0) 2384 goto next_page; 2385 depth++; 2386 if (depth == locked_depth) { 2387 locked_depth++; 2388 VM_OBJECT_RLOCK(tobject); 2389 } 2390 } while ((tm = vm_page_lookup(tobject, tpindex)) == 2391 NULL); 2392 } else { 2393 tm = m; 2394 m = TAILQ_NEXT(m, listq); 2395 } 2396 if (vm_page_trysbusy(tm) == 0) { 2397 for (tobject = object; locked_depth >= 1; 2398 locked_depth--) { 2399 t1object = tobject->backing_object; 2400 if (tm->object != tobject) 2401 VM_OBJECT_RUNLOCK(tobject); 2402 tobject = t1object; 2403 } 2404 vm_page_busy_sleep(tm, "unwbo", true); 2405 goto again; 2406 } 2407 vm_page_unwire(tm, queue); 2408 vm_page_sunbusy(tm); 2409 next_page: 2410 pindex++; 2411 } 2412 /* Release the accumulated object locks. */ 2413 for (tobject = object; locked_depth >= 1; locked_depth--) { 2414 t1object = tobject->backing_object; 2415 VM_OBJECT_RUNLOCK(tobject); 2416 tobject = t1object; 2417 } 2418 } 2419 2420 /* 2421 * Return the vnode for the given object, or NULL if none exists. 2422 * For tmpfs objects, the function may return NULL if there is 2423 * no vnode allocated at the time of the call. 2424 */ 2425 struct vnode * 2426 vm_object_vnode(vm_object_t object) 2427 { 2428 struct vnode *vp; 2429 2430 VM_OBJECT_ASSERT_LOCKED(object); 2431 if (object->type == OBJT_VNODE) { 2432 vp = object->handle; 2433 KASSERT(vp != NULL, ("%s: OBJT_VNODE has no vnode", __func__)); 2434 } else if (object->type == OBJT_SWAP && 2435 (object->flags & OBJ_TMPFS) != 0) { 2436 vp = object->un_pager.swp.swp_tmpfs; 2437 KASSERT(vp != NULL, ("%s: OBJT_TMPFS has no vnode", __func__)); 2438 } else { 2439 vp = NULL; 2440 } 2441 return (vp); 2442 } 2443 2444 2445 /* 2446 * Busy the vm object. This prevents new pages belonging to the object from 2447 * becoming busy. Existing pages persist as busy. Callers are responsible 2448 * for checking page state before proceeding. 2449 */ 2450 void 2451 vm_object_busy(vm_object_t obj) 2452 { 2453 2454 VM_OBJECT_ASSERT_LOCKED(obj); 2455 2456 blockcount_acquire(&obj->busy, 1); 2457 /* The fence is required to order loads of page busy. */ 2458 atomic_thread_fence_acq_rel(); 2459 } 2460 2461 void 2462 vm_object_unbusy(vm_object_t obj) 2463 { 2464 2465 blockcount_release(&obj->busy, 1); 2466 } 2467 2468 void 2469 vm_object_busy_wait(vm_object_t obj, const char *wmesg) 2470 { 2471 2472 VM_OBJECT_ASSERT_UNLOCKED(obj); 2473 2474 (void)blockcount_sleep(&obj->busy, NULL, wmesg, PVM); 2475 } 2476 2477 /* 2478 * Return the kvme type of the given object. 2479 * If vpp is not NULL, set it to the object's vm_object_vnode() or NULL. 2480 */ 2481 int 2482 vm_object_kvme_type(vm_object_t object, struct vnode **vpp) 2483 { 2484 2485 VM_OBJECT_ASSERT_LOCKED(object); 2486 if (vpp != NULL) 2487 *vpp = vm_object_vnode(object); 2488 switch (object->type) { 2489 case OBJT_DEFAULT: 2490 return (KVME_TYPE_DEFAULT); 2491 case OBJT_VNODE: 2492 return (KVME_TYPE_VNODE); 2493 case OBJT_SWAP: 2494 if ((object->flags & OBJ_TMPFS_NODE) != 0) 2495 return (KVME_TYPE_VNODE); 2496 return (KVME_TYPE_SWAP); 2497 case OBJT_DEVICE: 2498 return (KVME_TYPE_DEVICE); 2499 case OBJT_PHYS: 2500 return (KVME_TYPE_PHYS); 2501 case OBJT_DEAD: 2502 return (KVME_TYPE_DEAD); 2503 case OBJT_SG: 2504 return (KVME_TYPE_SG); 2505 case OBJT_MGTDEVICE: 2506 return (KVME_TYPE_MGTDEVICE); 2507 default: 2508 return (KVME_TYPE_UNKNOWN); 2509 } 2510 } 2511 2512 static int 2513 sysctl_vm_object_list(SYSCTL_HANDLER_ARGS) 2514 { 2515 struct kinfo_vmobject *kvo; 2516 char *fullpath, *freepath; 2517 struct vnode *vp; 2518 struct vattr va; 2519 vm_object_t obj; 2520 vm_page_t m; 2521 int count, error; 2522 2523 if (req->oldptr == NULL) { 2524 /* 2525 * If an old buffer has not been provided, generate an 2526 * estimate of the space needed for a subsequent call. 2527 */ 2528 mtx_lock(&vm_object_list_mtx); 2529 count = 0; 2530 TAILQ_FOREACH(obj, &vm_object_list, object_list) { 2531 if (obj->type == OBJT_DEAD) 2532 continue; 2533 count++; 2534 } 2535 mtx_unlock(&vm_object_list_mtx); 2536 return (SYSCTL_OUT(req, NULL, sizeof(struct kinfo_vmobject) * 2537 count * 11 / 10)); 2538 } 2539 2540 kvo = malloc(sizeof(*kvo), M_TEMP, M_WAITOK); 2541 error = 0; 2542 2543 /* 2544 * VM objects are type stable and are never removed from the 2545 * list once added. This allows us to safely read obj->object_list 2546 * after reacquiring the VM object lock. 2547 */ 2548 mtx_lock(&vm_object_list_mtx); 2549 TAILQ_FOREACH(obj, &vm_object_list, object_list) { 2550 if (obj->type == OBJT_DEAD) 2551 continue; 2552 VM_OBJECT_RLOCK(obj); 2553 if (obj->type == OBJT_DEAD) { 2554 VM_OBJECT_RUNLOCK(obj); 2555 continue; 2556 } 2557 mtx_unlock(&vm_object_list_mtx); 2558 kvo->kvo_size = ptoa(obj->size); 2559 kvo->kvo_resident = obj->resident_page_count; 2560 kvo->kvo_ref_count = obj->ref_count; 2561 kvo->kvo_shadow_count = obj->shadow_count; 2562 kvo->kvo_memattr = obj->memattr; 2563 kvo->kvo_active = 0; 2564 kvo->kvo_inactive = 0; 2565 TAILQ_FOREACH(m, &obj->memq, listq) { 2566 /* 2567 * A page may belong to the object but be 2568 * dequeued and set to PQ_NONE while the 2569 * object lock is not held. This makes the 2570 * reads of m->queue below racy, and we do not 2571 * count pages set to PQ_NONE. However, this 2572 * sysctl is only meant to give an 2573 * approximation of the system anyway. 2574 */ 2575 if (m->a.queue == PQ_ACTIVE) 2576 kvo->kvo_active++; 2577 else if (m->a.queue == PQ_INACTIVE) 2578 kvo->kvo_inactive++; 2579 } 2580 2581 kvo->kvo_vn_fileid = 0; 2582 kvo->kvo_vn_fsid = 0; 2583 kvo->kvo_vn_fsid_freebsd11 = 0; 2584 freepath = NULL; 2585 fullpath = ""; 2586 kvo->kvo_type = vm_object_kvme_type(obj, &vp); 2587 if (vp != NULL) 2588 vref(vp); 2589 VM_OBJECT_RUNLOCK(obj); 2590 if (vp != NULL) { 2591 vn_fullpath(curthread, vp, &fullpath, &freepath); 2592 vn_lock(vp, LK_SHARED | LK_RETRY); 2593 if (VOP_GETATTR(vp, &va, curthread->td_ucred) == 0) { 2594 kvo->kvo_vn_fileid = va.va_fileid; 2595 kvo->kvo_vn_fsid = va.va_fsid; 2596 kvo->kvo_vn_fsid_freebsd11 = va.va_fsid; 2597 /* truncate */ 2598 } 2599 vput(vp); 2600 } 2601 2602 strlcpy(kvo->kvo_path, fullpath, sizeof(kvo->kvo_path)); 2603 if (freepath != NULL) 2604 free(freepath, M_TEMP); 2605 2606 /* Pack record size down */ 2607 kvo->kvo_structsize = offsetof(struct kinfo_vmobject, kvo_path) 2608 + strlen(kvo->kvo_path) + 1; 2609 kvo->kvo_structsize = roundup(kvo->kvo_structsize, 2610 sizeof(uint64_t)); 2611 error = SYSCTL_OUT(req, kvo, kvo->kvo_structsize); 2612 mtx_lock(&vm_object_list_mtx); 2613 if (error) 2614 break; 2615 } 2616 mtx_unlock(&vm_object_list_mtx); 2617 free(kvo, M_TEMP); 2618 return (error); 2619 } 2620 SYSCTL_PROC(_vm, OID_AUTO, objects, CTLTYPE_STRUCT | CTLFLAG_RW | CTLFLAG_SKIP | 2621 CTLFLAG_MPSAFE, NULL, 0, sysctl_vm_object_list, "S,kinfo_vmobject", 2622 "List of VM objects"); 2623 2624 #include "opt_ddb.h" 2625 #ifdef DDB 2626 #include <sys/kernel.h> 2627 2628 #include <sys/cons.h> 2629 2630 #include <ddb/ddb.h> 2631 2632 static int 2633 _vm_object_in_map(vm_map_t map, vm_object_t object, vm_map_entry_t entry) 2634 { 2635 vm_map_t tmpm; 2636 vm_map_entry_t tmpe; 2637 vm_object_t obj; 2638 2639 if (map == 0) 2640 return 0; 2641 2642 if (entry == 0) { 2643 VM_MAP_ENTRY_FOREACH(tmpe, map) { 2644 if (_vm_object_in_map(map, object, tmpe)) { 2645 return 1; 2646 } 2647 } 2648 } else if (entry->eflags & MAP_ENTRY_IS_SUB_MAP) { 2649 tmpm = entry->object.sub_map; 2650 VM_MAP_ENTRY_FOREACH(tmpe, tmpm) { 2651 if (_vm_object_in_map(tmpm, object, tmpe)) { 2652 return 1; 2653 } 2654 } 2655 } else if ((obj = entry->object.vm_object) != NULL) { 2656 for (; obj; obj = obj->backing_object) 2657 if (obj == object) { 2658 return 1; 2659 } 2660 } 2661 return 0; 2662 } 2663 2664 static int 2665 vm_object_in_map(vm_object_t object) 2666 { 2667 struct proc *p; 2668 2669 /* sx_slock(&allproc_lock); */ 2670 FOREACH_PROC_IN_SYSTEM(p) { 2671 if (!p->p_vmspace /* || (p->p_flag & (P_SYSTEM|P_WEXIT)) */) 2672 continue; 2673 if (_vm_object_in_map(&p->p_vmspace->vm_map, object, 0)) { 2674 /* sx_sunlock(&allproc_lock); */ 2675 return 1; 2676 } 2677 } 2678 /* sx_sunlock(&allproc_lock); */ 2679 if (_vm_object_in_map(kernel_map, object, 0)) 2680 return 1; 2681 return 0; 2682 } 2683 2684 DB_SHOW_COMMAND(vmochk, vm_object_check) 2685 { 2686 vm_object_t object; 2687 2688 /* 2689 * make sure that internal objs are in a map somewhere 2690 * and none have zero ref counts. 2691 */ 2692 TAILQ_FOREACH(object, &vm_object_list, object_list) { 2693 if ((object->flags & OBJ_ANON) != 0) { 2694 if (object->ref_count == 0) { 2695 db_printf("vmochk: internal obj has zero ref count: %ld\n", 2696 (long)object->size); 2697 } 2698 if (!vm_object_in_map(object)) { 2699 db_printf( 2700 "vmochk: internal obj is not in a map: " 2701 "ref: %d, size: %lu: 0x%lx, backing_object: %p\n", 2702 object->ref_count, (u_long)object->size, 2703 (u_long)object->size, 2704 (void *)object->backing_object); 2705 } 2706 } 2707 } 2708 } 2709 2710 /* 2711 * vm_object_print: [ debug ] 2712 */ 2713 DB_SHOW_COMMAND(object, vm_object_print_static) 2714 { 2715 /* XXX convert args. */ 2716 vm_object_t object = (vm_object_t)addr; 2717 boolean_t full = have_addr; 2718 2719 vm_page_t p; 2720 2721 /* XXX count is an (unused) arg. Avoid shadowing it. */ 2722 #define count was_count 2723 2724 int count; 2725 2726 if (object == NULL) 2727 return; 2728 2729 db_iprintf( 2730 "Object %p: type=%d, size=0x%jx, res=%d, ref=%d, flags=0x%x ruid %d charge %jx\n", 2731 object, (int)object->type, (uintmax_t)object->size, 2732 object->resident_page_count, object->ref_count, object->flags, 2733 object->cred ? object->cred->cr_ruid : -1, (uintmax_t)object->charge); 2734 db_iprintf(" sref=%d, backing_object(%d)=(%p)+0x%jx\n", 2735 object->shadow_count, 2736 object->backing_object ? object->backing_object->ref_count : 0, 2737 object->backing_object, (uintmax_t)object->backing_object_offset); 2738 2739 if (!full) 2740 return; 2741 2742 db_indent += 2; 2743 count = 0; 2744 TAILQ_FOREACH(p, &object->memq, listq) { 2745 if (count == 0) 2746 db_iprintf("memory:="); 2747 else if (count == 6) { 2748 db_printf("\n"); 2749 db_iprintf(" ..."); 2750 count = 0; 2751 } else 2752 db_printf(","); 2753 count++; 2754 2755 db_printf("(off=0x%jx,page=0x%jx)", 2756 (uintmax_t)p->pindex, (uintmax_t)VM_PAGE_TO_PHYS(p)); 2757 } 2758 if (count != 0) 2759 db_printf("\n"); 2760 db_indent -= 2; 2761 } 2762 2763 /* XXX. */ 2764 #undef count 2765 2766 /* XXX need this non-static entry for calling from vm_map_print. */ 2767 void 2768 vm_object_print( 2769 /* db_expr_t */ long addr, 2770 boolean_t have_addr, 2771 /* db_expr_t */ long count, 2772 char *modif) 2773 { 2774 vm_object_print_static(addr, have_addr, count, modif); 2775 } 2776 2777 DB_SHOW_COMMAND(vmopag, vm_object_print_pages) 2778 { 2779 vm_object_t object; 2780 vm_pindex_t fidx; 2781 vm_paddr_t pa; 2782 vm_page_t m, prev_m; 2783 int rcount, nl, c; 2784 2785 nl = 0; 2786 TAILQ_FOREACH(object, &vm_object_list, object_list) { 2787 db_printf("new object: %p\n", (void *)object); 2788 if (nl > 18) { 2789 c = cngetc(); 2790 if (c != ' ') 2791 return; 2792 nl = 0; 2793 } 2794 nl++; 2795 rcount = 0; 2796 fidx = 0; 2797 pa = -1; 2798 TAILQ_FOREACH(m, &object->memq, listq) { 2799 if (m->pindex > 128) 2800 break; 2801 if ((prev_m = TAILQ_PREV(m, pglist, listq)) != NULL && 2802 prev_m->pindex + 1 != m->pindex) { 2803 if (rcount) { 2804 db_printf(" index(%ld)run(%d)pa(0x%lx)\n", 2805 (long)fidx, rcount, (long)pa); 2806 if (nl > 18) { 2807 c = cngetc(); 2808 if (c != ' ') 2809 return; 2810 nl = 0; 2811 } 2812 nl++; 2813 rcount = 0; 2814 } 2815 } 2816 if (rcount && 2817 (VM_PAGE_TO_PHYS(m) == pa + rcount * PAGE_SIZE)) { 2818 ++rcount; 2819 continue; 2820 } 2821 if (rcount) { 2822 db_printf(" index(%ld)run(%d)pa(0x%lx)\n", 2823 (long)fidx, rcount, (long)pa); 2824 if (nl > 18) { 2825 c = cngetc(); 2826 if (c != ' ') 2827 return; 2828 nl = 0; 2829 } 2830 nl++; 2831 } 2832 fidx = m->pindex; 2833 pa = VM_PAGE_TO_PHYS(m); 2834 rcount = 1; 2835 } 2836 if (rcount) { 2837 db_printf(" index(%ld)run(%d)pa(0x%lx)\n", 2838 (long)fidx, rcount, (long)pa); 2839 if (nl > 18) { 2840 c = cngetc(); 2841 if (c != ' ') 2842 return; 2843 nl = 0; 2844 } 2845 nl++; 2846 } 2847 } 2848 } 2849 #endif /* DDB */ 2850