1 /*- 2 * SPDX-License-Identifier: (BSD-3-Clause AND MIT-CMU) 3 * 4 * Copyright (c) 1991, 1993 5 * The Regents of the University of California. All rights reserved. 6 * 7 * This code is derived from software contributed to Berkeley by 8 * The Mach Operating System project at Carnegie-Mellon University. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. Neither the name of the University nor the names of its contributors 19 * may be used to endorse or promote products derived from this software 20 * without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 * 34 * from: @(#)vm_object.c 8.5 (Berkeley) 3/22/94 35 * 36 * 37 * Copyright (c) 1987, 1990 Carnegie-Mellon University. 38 * All rights reserved. 39 * 40 * Authors: Avadis Tevanian, Jr., Michael Wayne Young 41 * 42 * Permission to use, copy, modify and distribute this software and 43 * its documentation is hereby granted, provided that both the copyright 44 * notice and this permission notice appear in all copies of the 45 * software, derivative works or modified versions, and any portions 46 * thereof, and that both notices appear in supporting documentation. 47 * 48 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 49 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 50 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 51 * 52 * Carnegie Mellon requests users of this software to return to 53 * 54 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 55 * School of Computer Science 56 * Carnegie Mellon University 57 * Pittsburgh PA 15213-3890 58 * 59 * any improvements or extensions that they make and grant Carnegie the 60 * rights to redistribute these changes. 61 */ 62 63 /* 64 * Virtual memory object module. 65 */ 66 67 #include <sys/cdefs.h> 68 __FBSDID("$FreeBSD$"); 69 70 #include "opt_vm.h" 71 72 #include <sys/param.h> 73 #include <sys/systm.h> 74 #include <sys/cpuset.h> 75 #include <sys/lock.h> 76 #include <sys/mman.h> 77 #include <sys/mount.h> 78 #include <sys/kernel.h> 79 #include <sys/pctrie.h> 80 #include <sys/sysctl.h> 81 #include <sys/mutex.h> 82 #include <sys/proc.h> /* for curproc, pageproc */ 83 #include <sys/refcount.h> 84 #include <sys/socket.h> 85 #include <sys/resourcevar.h> 86 #include <sys/refcount.h> 87 #include <sys/rwlock.h> 88 #include <sys/user.h> 89 #include <sys/vnode.h> 90 #include <sys/vmmeter.h> 91 #include <sys/sx.h> 92 93 #include <vm/vm.h> 94 #include <vm/vm_param.h> 95 #include <vm/pmap.h> 96 #include <vm/vm_map.h> 97 #include <vm/vm_object.h> 98 #include <vm/vm_page.h> 99 #include <vm/vm_pageout.h> 100 #include <vm/vm_pager.h> 101 #include <vm/vm_phys.h> 102 #include <vm/vm_pagequeue.h> 103 #include <vm/swap_pager.h> 104 #include <vm/vm_kern.h> 105 #include <vm/vm_extern.h> 106 #include <vm/vm_radix.h> 107 #include <vm/vm_reserv.h> 108 #include <vm/uma.h> 109 110 static int old_msync; 111 SYSCTL_INT(_vm, OID_AUTO, old_msync, CTLFLAG_RW, &old_msync, 0, 112 "Use old (insecure) msync behavior"); 113 114 static int vm_object_page_collect_flush(vm_object_t object, vm_page_t p, 115 int pagerflags, int flags, boolean_t *allclean, 116 boolean_t *eio); 117 static boolean_t vm_object_page_remove_write(vm_page_t p, int flags, 118 boolean_t *allclean); 119 static void vm_object_backing_remove(vm_object_t object); 120 121 /* 122 * Virtual memory objects maintain the actual data 123 * associated with allocated virtual memory. A given 124 * page of memory exists within exactly one object. 125 * 126 * An object is only deallocated when all "references" 127 * are given up. Only one "reference" to a given 128 * region of an object should be writeable. 129 * 130 * Associated with each object is a list of all resident 131 * memory pages belonging to that object; this list is 132 * maintained by the "vm_page" module, and locked by the object's 133 * lock. 134 * 135 * Each object also records a "pager" routine which is 136 * used to retrieve (and store) pages to the proper backing 137 * storage. In addition, objects may be backed by other 138 * objects from which they were virtual-copied. 139 * 140 * The only items within the object structure which are 141 * modified after time of creation are: 142 * reference count locked by object's lock 143 * pager routine locked by object's lock 144 * 145 */ 146 147 struct object_q vm_object_list; 148 struct mtx vm_object_list_mtx; /* lock for object list and count */ 149 150 struct vm_object kernel_object_store; 151 152 static SYSCTL_NODE(_vm_stats, OID_AUTO, object, CTLFLAG_RD, 0, 153 "VM object stats"); 154 155 static counter_u64_t object_collapses = EARLY_COUNTER; 156 SYSCTL_COUNTER_U64(_vm_stats_object, OID_AUTO, collapses, CTLFLAG_RD, 157 &object_collapses, 158 "VM object collapses"); 159 160 static counter_u64_t object_bypasses = EARLY_COUNTER; 161 SYSCTL_COUNTER_U64(_vm_stats_object, OID_AUTO, bypasses, CTLFLAG_RD, 162 &object_bypasses, 163 "VM object bypasses"); 164 165 static counter_u64_t object_collapse_waits = EARLY_COUNTER; 166 SYSCTL_COUNTER_U64(_vm_stats_object, OID_AUTO, collapse_waits, CTLFLAG_RD, 167 &object_collapse_waits, 168 "Number of sleeps for collapse"); 169 170 static void 171 counter_startup(void) 172 { 173 174 object_collapses = counter_u64_alloc(M_WAITOK); 175 object_bypasses = counter_u64_alloc(M_WAITOK); 176 object_collapse_waits = counter_u64_alloc(M_WAITOK); 177 } 178 SYSINIT(object_counters, SI_SUB_CPU, SI_ORDER_ANY, counter_startup, NULL); 179 180 static uma_zone_t obj_zone; 181 182 static int vm_object_zinit(void *mem, int size, int flags); 183 184 #ifdef INVARIANTS 185 static void vm_object_zdtor(void *mem, int size, void *arg); 186 187 static void 188 vm_object_zdtor(void *mem, int size, void *arg) 189 { 190 vm_object_t object; 191 192 object = (vm_object_t)mem; 193 KASSERT(object->ref_count == 0, 194 ("object %p ref_count = %d", object, object->ref_count)); 195 KASSERT(TAILQ_EMPTY(&object->memq), 196 ("object %p has resident pages in its memq", object)); 197 KASSERT(vm_radix_is_empty(&object->rtree), 198 ("object %p has resident pages in its trie", object)); 199 #if VM_NRESERVLEVEL > 0 200 KASSERT(LIST_EMPTY(&object->rvq), 201 ("object %p has reservations", 202 object)); 203 #endif 204 KASSERT(REFCOUNT_COUNT(object->paging_in_progress) == 0, 205 ("object %p paging_in_progress = %d", 206 object, REFCOUNT_COUNT(object->paging_in_progress))); 207 KASSERT(object->busy == 0, 208 ("object %p busy = %d", 209 object, object->busy)); 210 KASSERT(object->resident_page_count == 0, 211 ("object %p resident_page_count = %d", 212 object, object->resident_page_count)); 213 KASSERT(object->shadow_count == 0, 214 ("object %p shadow_count = %d", 215 object, object->shadow_count)); 216 KASSERT(object->type == OBJT_DEAD, 217 ("object %p has non-dead type %d", 218 object, object->type)); 219 } 220 #endif 221 222 static int 223 vm_object_zinit(void *mem, int size, int flags) 224 { 225 vm_object_t object; 226 227 object = (vm_object_t)mem; 228 rw_init_flags(&object->lock, "vm object", RW_DUPOK | RW_NEW); 229 230 /* These are true for any object that has been freed */ 231 object->type = OBJT_DEAD; 232 vm_radix_init(&object->rtree); 233 refcount_init(&object->ref_count, 0); 234 refcount_init(&object->paging_in_progress, 0); 235 refcount_init(&object->busy, 0); 236 object->resident_page_count = 0; 237 object->shadow_count = 0; 238 object->flags = OBJ_DEAD; 239 240 mtx_lock(&vm_object_list_mtx); 241 TAILQ_INSERT_TAIL(&vm_object_list, object, object_list); 242 mtx_unlock(&vm_object_list_mtx); 243 return (0); 244 } 245 246 static void 247 _vm_object_allocate(objtype_t type, vm_pindex_t size, u_short flags, 248 vm_object_t object, void *handle) 249 { 250 251 TAILQ_INIT(&object->memq); 252 LIST_INIT(&object->shadow_head); 253 254 object->type = type; 255 if (type == OBJT_SWAP) 256 pctrie_init(&object->un_pager.swp.swp_blks); 257 258 /* 259 * Ensure that swap_pager_swapoff() iteration over object_list 260 * sees up to date type and pctrie head if it observed 261 * non-dead object. 262 */ 263 atomic_thread_fence_rel(); 264 265 object->pg_color = 0; 266 object->flags = flags; 267 object->size = size; 268 object->domain.dr_policy = NULL; 269 object->generation = 1; 270 object->cleangeneration = 1; 271 refcount_init(&object->ref_count, 1); 272 object->memattr = VM_MEMATTR_DEFAULT; 273 object->cred = NULL; 274 object->charge = 0; 275 object->handle = handle; 276 object->backing_object = NULL; 277 object->backing_object_offset = (vm_ooffset_t) 0; 278 #if VM_NRESERVLEVEL > 0 279 LIST_INIT(&object->rvq); 280 #endif 281 umtx_shm_object_init(object); 282 } 283 284 /* 285 * vm_object_init: 286 * 287 * Initialize the VM objects module. 288 */ 289 void 290 vm_object_init(void) 291 { 292 TAILQ_INIT(&vm_object_list); 293 mtx_init(&vm_object_list_mtx, "vm object_list", NULL, MTX_DEF); 294 295 rw_init(&kernel_object->lock, "kernel vm object"); 296 _vm_object_allocate(OBJT_PHYS, atop(VM_MAX_KERNEL_ADDRESS - 297 VM_MIN_KERNEL_ADDRESS), OBJ_UNMANAGED, kernel_object, NULL); 298 #if VM_NRESERVLEVEL > 0 299 kernel_object->flags |= OBJ_COLORED; 300 kernel_object->pg_color = (u_short)atop(VM_MIN_KERNEL_ADDRESS); 301 #endif 302 303 /* 304 * The lock portion of struct vm_object must be type stable due 305 * to vm_pageout_fallback_object_lock locking a vm object 306 * without holding any references to it. 307 */ 308 obj_zone = uma_zcreate("VM OBJECT", sizeof (struct vm_object), NULL, 309 #ifdef INVARIANTS 310 vm_object_zdtor, 311 #else 312 NULL, 313 #endif 314 vm_object_zinit, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE); 315 316 vm_radix_zinit(); 317 } 318 319 void 320 vm_object_clear_flag(vm_object_t object, u_short bits) 321 { 322 323 VM_OBJECT_ASSERT_WLOCKED(object); 324 object->flags &= ~bits; 325 } 326 327 /* 328 * Sets the default memory attribute for the specified object. Pages 329 * that are allocated to this object are by default assigned this memory 330 * attribute. 331 * 332 * Presently, this function must be called before any pages are allocated 333 * to the object. In the future, this requirement may be relaxed for 334 * "default" and "swap" objects. 335 */ 336 int 337 vm_object_set_memattr(vm_object_t object, vm_memattr_t memattr) 338 { 339 340 VM_OBJECT_ASSERT_WLOCKED(object); 341 switch (object->type) { 342 case OBJT_DEFAULT: 343 case OBJT_DEVICE: 344 case OBJT_MGTDEVICE: 345 case OBJT_PHYS: 346 case OBJT_SG: 347 case OBJT_SWAP: 348 case OBJT_VNODE: 349 if (!TAILQ_EMPTY(&object->memq)) 350 return (KERN_FAILURE); 351 break; 352 case OBJT_DEAD: 353 return (KERN_INVALID_ARGUMENT); 354 default: 355 panic("vm_object_set_memattr: object %p is of undefined type", 356 object); 357 } 358 object->memattr = memattr; 359 return (KERN_SUCCESS); 360 } 361 362 void 363 vm_object_pip_add(vm_object_t object, short i) 364 { 365 366 refcount_acquiren(&object->paging_in_progress, i); 367 } 368 369 void 370 vm_object_pip_wakeup(vm_object_t object) 371 { 372 373 refcount_release(&object->paging_in_progress); 374 } 375 376 void 377 vm_object_pip_wakeupn(vm_object_t object, short i) 378 { 379 380 refcount_releasen(&object->paging_in_progress, i); 381 } 382 383 /* 384 * Atomically drop the interlock and wait for pip to drain. This protects 385 * from sleep/wakeup races due to identity changes. The lock is not 386 * re-acquired on return. 387 */ 388 static void 389 vm_object_pip_sleep(vm_object_t object, char *waitid) 390 { 391 392 refcount_sleep_interlock(&object->paging_in_progress, 393 &object->lock, waitid, PVM); 394 } 395 396 void 397 vm_object_pip_wait(vm_object_t object, char *waitid) 398 { 399 400 VM_OBJECT_ASSERT_WLOCKED(object); 401 402 while (REFCOUNT_COUNT(object->paging_in_progress) > 0) { 403 vm_object_pip_sleep(object, waitid); 404 VM_OBJECT_WLOCK(object); 405 } 406 } 407 408 void 409 vm_object_pip_wait_unlocked(vm_object_t object, char *waitid) 410 { 411 412 VM_OBJECT_ASSERT_UNLOCKED(object); 413 414 while (REFCOUNT_COUNT(object->paging_in_progress) > 0) 415 refcount_wait(&object->paging_in_progress, waitid, PVM); 416 } 417 418 /* 419 * vm_object_allocate: 420 * 421 * Returns a new object with the given size. 422 */ 423 vm_object_t 424 vm_object_allocate(objtype_t type, vm_pindex_t size) 425 { 426 vm_object_t object; 427 u_short flags; 428 429 switch (type) { 430 case OBJT_DEAD: 431 panic("vm_object_allocate: can't create OBJT_DEAD"); 432 case OBJT_DEFAULT: 433 case OBJT_SWAP: 434 flags = OBJ_COLORED; 435 break; 436 case OBJT_DEVICE: 437 case OBJT_SG: 438 flags = OBJ_FICTITIOUS | OBJ_UNMANAGED; 439 break; 440 case OBJT_MGTDEVICE: 441 flags = OBJ_FICTITIOUS; 442 break; 443 case OBJT_PHYS: 444 flags = OBJ_UNMANAGED; 445 break; 446 case OBJT_VNODE: 447 flags = 0; 448 break; 449 default: 450 panic("vm_object_allocate: type %d is undefined", type); 451 } 452 object = (vm_object_t)uma_zalloc(obj_zone, M_WAITOK); 453 _vm_object_allocate(type, size, flags, object, NULL); 454 455 return (object); 456 } 457 458 /* 459 * vm_object_allocate_anon: 460 * 461 * Returns a new default object of the given size and marked as 462 * anonymous memory for special split/collapse handling. Color 463 * to be initialized by the caller. 464 */ 465 vm_object_t 466 vm_object_allocate_anon(vm_pindex_t size, vm_object_t backing_object, 467 struct ucred *cred, vm_size_t charge) 468 { 469 vm_object_t handle, object; 470 471 if (backing_object == NULL) 472 handle = NULL; 473 else if ((backing_object->flags & OBJ_ANON) != 0) 474 handle = backing_object->handle; 475 else 476 handle = backing_object; 477 object = uma_zalloc(obj_zone, M_WAITOK); 478 _vm_object_allocate(OBJT_DEFAULT, size, OBJ_ANON | OBJ_ONEMAPPING, 479 object, handle); 480 object->cred = cred; 481 object->charge = cred != NULL ? charge : 0; 482 return (object); 483 } 484 485 static void 486 vm_object_reference_vnode(vm_object_t object) 487 { 488 struct vnode *vp; 489 u_int old; 490 491 /* 492 * vnode objects need the lock for the first reference 493 * to serialize with vnode_object_deallocate(). 494 */ 495 if (!refcount_acquire_if_gt(&object->ref_count, 0)) { 496 VM_OBJECT_RLOCK(object); 497 old = refcount_acquire(&object->ref_count); 498 if (object->type == OBJT_VNODE && old == 0) { 499 vp = object->handle; 500 vref(vp); 501 } 502 VM_OBJECT_RUNLOCK(object); 503 } 504 } 505 506 /* 507 * vm_object_reference: 508 * 509 * Acquires a reference to the given object. 510 */ 511 void 512 vm_object_reference(vm_object_t object) 513 { 514 515 if (object == NULL) 516 return; 517 518 if (object->type == OBJT_VNODE) 519 vm_object_reference_vnode(object); 520 else 521 refcount_acquire(&object->ref_count); 522 KASSERT((object->flags & OBJ_DEAD) == 0, 523 ("vm_object_reference: Referenced dead object.")); 524 } 525 526 /* 527 * vm_object_reference_locked: 528 * 529 * Gets another reference to the given object. 530 * 531 * The object must be locked. 532 */ 533 void 534 vm_object_reference_locked(vm_object_t object) 535 { 536 struct vnode *vp; 537 u_int old; 538 539 VM_OBJECT_ASSERT_LOCKED(object); 540 old = refcount_acquire(&object->ref_count); 541 if (object->type == OBJT_VNODE && old == 0) { 542 vp = object->handle; vref(vp); } 543 KASSERT((object->flags & OBJ_DEAD) == 0, 544 ("vm_object_reference: Referenced dead object.")); 545 } 546 547 /* 548 * Handle deallocating an object of type OBJT_VNODE. 549 */ 550 static void 551 vm_object_deallocate_vnode(vm_object_t object) 552 { 553 struct vnode *vp = (struct vnode *) object->handle; 554 bool last; 555 556 KASSERT(object->type == OBJT_VNODE, 557 ("vm_object_deallocate_vnode: not a vnode object")); 558 KASSERT(vp != NULL, ("vm_object_deallocate_vnode: missing vp")); 559 560 /* Object lock to protect handle lookup. */ 561 last = refcount_release(&object->ref_count); 562 VM_OBJECT_RUNLOCK(object); 563 564 if (!last) 565 return; 566 567 if (!umtx_shm_vnobj_persistent) 568 umtx_shm_object_terminated(object); 569 570 /* vrele may need the vnode lock. */ 571 vrele(vp); 572 } 573 574 575 /* 576 * We dropped a reference on an object and discovered that it had a 577 * single remaining shadow. This is a sibling of the reference we 578 * dropped. Attempt to collapse the sibling and backing object. 579 */ 580 static vm_object_t 581 vm_object_deallocate_anon(vm_object_t backing_object) 582 { 583 vm_object_t object; 584 585 /* Fetch the final shadow. */ 586 object = LIST_FIRST(&backing_object->shadow_head); 587 KASSERT(object != NULL && backing_object->shadow_count == 1, 588 ("vm_object_anon_deallocate: ref_count: %d, shadow_count: %d", 589 backing_object->ref_count, backing_object->shadow_count)); 590 KASSERT((object->flags & (OBJ_TMPFS_NODE | OBJ_ANON)) == OBJ_ANON, 591 ("invalid shadow object %p", object)); 592 593 if (!VM_OBJECT_TRYWLOCK(object)) { 594 /* 595 * Prevent object from disappearing since we do not have a 596 * reference. 597 */ 598 vm_object_pip_add(object, 1); 599 VM_OBJECT_WUNLOCK(backing_object); 600 VM_OBJECT_WLOCK(object); 601 vm_object_pip_wakeup(object); 602 } else 603 VM_OBJECT_WUNLOCK(backing_object); 604 605 /* 606 * Check for a collapse/terminate race with the last reference holder. 607 */ 608 if ((object->flags & (OBJ_DEAD | OBJ_COLLAPSING)) != 0 || 609 !refcount_acquire_if_not_zero(&object->ref_count)) { 610 VM_OBJECT_WUNLOCK(object); 611 return (NULL); 612 } 613 backing_object = object->backing_object; 614 if (backing_object != NULL && (backing_object->flags & OBJ_ANON) != 0) 615 vm_object_collapse(object); 616 VM_OBJECT_WUNLOCK(object); 617 618 return (object); 619 } 620 621 /* 622 * vm_object_deallocate: 623 * 624 * Release a reference to the specified object, 625 * gained either through a vm_object_allocate 626 * or a vm_object_reference call. When all references 627 * are gone, storage associated with this object 628 * may be relinquished. 629 * 630 * No object may be locked. 631 */ 632 void 633 vm_object_deallocate(vm_object_t object) 634 { 635 vm_object_t temp; 636 bool released; 637 638 while (object != NULL) { 639 /* 640 * If the reference count goes to 0 we start calling 641 * vm_object_terminate() on the object chain. A ref count 642 * of 1 may be a special case depending on the shadow count 643 * being 0 or 1. These cases require a write lock on the 644 * object. 645 */ 646 if ((object->flags & OBJ_ANON) == 0) 647 released = refcount_release_if_gt(&object->ref_count, 1); 648 else 649 released = refcount_release_if_gt(&object->ref_count, 2); 650 if (released) 651 return; 652 653 if (object->type == OBJT_VNODE) { 654 VM_OBJECT_RLOCK(object); 655 if (object->type == OBJT_VNODE) { 656 vm_object_deallocate_vnode(object); 657 return; 658 } 659 VM_OBJECT_RUNLOCK(object); 660 } 661 662 VM_OBJECT_WLOCK(object); 663 KASSERT(object->ref_count > 0, 664 ("vm_object_deallocate: object deallocated too many times: %d", 665 object->type)); 666 667 /* 668 * If this is not the final reference to an anonymous 669 * object we may need to collapse the shadow chain. 670 */ 671 if (!refcount_release(&object->ref_count)) { 672 if (object->ref_count > 1 || 673 object->shadow_count == 0) { 674 if ((object->flags & OBJ_ANON) != 0 && 675 object->ref_count == 1) 676 vm_object_set_flag(object, 677 OBJ_ONEMAPPING); 678 VM_OBJECT_WUNLOCK(object); 679 return; 680 } 681 682 /* Handle collapsing last ref on anonymous objects. */ 683 object = vm_object_deallocate_anon(object); 684 continue; 685 } 686 687 /* 688 * Handle the final reference to an object. We restart 689 * the loop with the backing object to avoid recursion. 690 */ 691 umtx_shm_object_terminated(object); 692 temp = object->backing_object; 693 if (temp != NULL) { 694 KASSERT((object->flags & OBJ_TMPFS_NODE) == 0, 695 ("shadowed tmpfs v_object 2 %p", object)); 696 vm_object_backing_remove(object); 697 } 698 699 KASSERT((object->flags & OBJ_DEAD) == 0, 700 ("vm_object_deallocate: Terminating dead object.")); 701 vm_object_set_flag(object, OBJ_DEAD); 702 vm_object_terminate(object); 703 object = temp; 704 } 705 } 706 707 /* 708 * vm_object_destroy removes the object from the global object list 709 * and frees the space for the object. 710 */ 711 void 712 vm_object_destroy(vm_object_t object) 713 { 714 715 /* 716 * Release the allocation charge. 717 */ 718 if (object->cred != NULL) { 719 swap_release_by_cred(object->charge, object->cred); 720 object->charge = 0; 721 crfree(object->cred); 722 object->cred = NULL; 723 } 724 725 /* 726 * Free the space for the object. 727 */ 728 uma_zfree(obj_zone, object); 729 } 730 731 static void 732 vm_object_backing_remove_locked(vm_object_t object) 733 { 734 vm_object_t backing_object; 735 736 backing_object = object->backing_object; 737 VM_OBJECT_ASSERT_WLOCKED(object); 738 VM_OBJECT_ASSERT_WLOCKED(backing_object); 739 740 KASSERT((object->flags & OBJ_COLLAPSING) == 0, 741 ("vm_object_backing_remove: Removing collapsing object.")); 742 743 if ((object->flags & OBJ_SHADOWLIST) != 0) { 744 LIST_REMOVE(object, shadow_list); 745 backing_object->shadow_count--; 746 object->flags &= ~OBJ_SHADOWLIST; 747 } 748 object->backing_object = NULL; 749 } 750 751 static void 752 vm_object_backing_remove(vm_object_t object) 753 { 754 vm_object_t backing_object; 755 756 VM_OBJECT_ASSERT_WLOCKED(object); 757 758 if ((object->flags & OBJ_SHADOWLIST) != 0) { 759 backing_object = object->backing_object; 760 VM_OBJECT_WLOCK(backing_object); 761 vm_object_backing_remove_locked(object); 762 VM_OBJECT_WUNLOCK(backing_object); 763 } else 764 object->backing_object = NULL; 765 } 766 767 static void 768 vm_object_backing_insert_locked(vm_object_t object, vm_object_t backing_object) 769 { 770 771 VM_OBJECT_ASSERT_WLOCKED(object); 772 773 if ((backing_object->flags & OBJ_ANON) != 0) { 774 VM_OBJECT_ASSERT_WLOCKED(backing_object); 775 LIST_INSERT_HEAD(&backing_object->shadow_head, object, 776 shadow_list); 777 backing_object->shadow_count++; 778 object->flags |= OBJ_SHADOWLIST; 779 } 780 object->backing_object = backing_object; 781 } 782 783 static void 784 vm_object_backing_insert(vm_object_t object, vm_object_t backing_object) 785 { 786 787 VM_OBJECT_ASSERT_WLOCKED(object); 788 789 if ((backing_object->flags & OBJ_ANON) != 0) { 790 VM_OBJECT_WLOCK(backing_object); 791 vm_object_backing_insert_locked(object, backing_object); 792 VM_OBJECT_WUNLOCK(backing_object); 793 } else 794 object->backing_object = backing_object; 795 } 796 797 /* 798 * Insert an object into a backing_object's shadow list with an additional 799 * reference to the backing_object added. 800 */ 801 static void 802 vm_object_backing_insert_ref(vm_object_t object, vm_object_t backing_object) 803 { 804 805 VM_OBJECT_ASSERT_WLOCKED(object); 806 807 if ((backing_object->flags & OBJ_ANON) != 0) { 808 VM_OBJECT_WLOCK(backing_object); 809 KASSERT((backing_object->flags & OBJ_DEAD) == 0, 810 ("shadowing dead anonymous object")); 811 vm_object_reference_locked(backing_object); 812 vm_object_backing_insert_locked(object, backing_object); 813 vm_object_clear_flag(backing_object, OBJ_ONEMAPPING); 814 VM_OBJECT_WUNLOCK(backing_object); 815 } else { 816 vm_object_reference(backing_object); 817 object->backing_object = backing_object; 818 } 819 } 820 821 /* 822 * Transfer a backing reference from backing_object to object. 823 */ 824 static void 825 vm_object_backing_transfer(vm_object_t object, vm_object_t backing_object) 826 { 827 vm_object_t new_backing_object; 828 829 /* 830 * Note that the reference to backing_object->backing_object 831 * moves from within backing_object to within object. 832 */ 833 vm_object_backing_remove_locked(object); 834 new_backing_object = backing_object->backing_object; 835 if (new_backing_object == NULL) 836 return; 837 if ((new_backing_object->flags & OBJ_ANON) != 0) { 838 VM_OBJECT_WLOCK(new_backing_object); 839 vm_object_backing_remove_locked(backing_object); 840 vm_object_backing_insert_locked(object, new_backing_object); 841 VM_OBJECT_WUNLOCK(new_backing_object); 842 } else { 843 object->backing_object = new_backing_object; 844 backing_object->backing_object = NULL; 845 } 846 } 847 848 /* 849 * Wait for a concurrent collapse to settle. 850 */ 851 static void 852 vm_object_collapse_wait(vm_object_t object) 853 { 854 855 VM_OBJECT_ASSERT_WLOCKED(object); 856 857 while ((object->flags & OBJ_COLLAPSING) != 0) { 858 vm_object_pip_wait(object, "vmcolwait"); 859 counter_u64_add(object_collapse_waits, 1); 860 } 861 } 862 863 /* 864 * Waits for a backing object to clear a pending collapse and returns 865 * it locked if it is an ANON object. 866 */ 867 static vm_object_t 868 vm_object_backing_collapse_wait(vm_object_t object) 869 { 870 vm_object_t backing_object; 871 872 VM_OBJECT_ASSERT_WLOCKED(object); 873 874 for (;;) { 875 backing_object = object->backing_object; 876 if (backing_object == NULL || 877 (backing_object->flags & OBJ_ANON) == 0) 878 return (NULL); 879 VM_OBJECT_WLOCK(backing_object); 880 if ((backing_object->flags & (OBJ_DEAD | OBJ_COLLAPSING)) == 0) 881 break; 882 VM_OBJECT_WUNLOCK(object); 883 vm_object_pip_sleep(backing_object, "vmbckwait"); 884 counter_u64_add(object_collapse_waits, 1); 885 VM_OBJECT_WLOCK(object); 886 } 887 return (backing_object); 888 } 889 890 /* 891 * vm_object_terminate_pages removes any remaining pageable pages 892 * from the object and resets the object to an empty state. 893 */ 894 static void 895 vm_object_terminate_pages(vm_object_t object) 896 { 897 vm_page_t p, p_next; 898 899 VM_OBJECT_ASSERT_WLOCKED(object); 900 901 /* 902 * Free any remaining pageable pages. This also removes them from the 903 * paging queues. However, don't free wired pages, just remove them 904 * from the object. Rather than incrementally removing each page from 905 * the object, the page and object are reset to any empty state. 906 */ 907 TAILQ_FOREACH_SAFE(p, &object->memq, listq, p_next) { 908 vm_page_assert_unbusied(p); 909 KASSERT(p->object == object && 910 (p->ref_count & VPRC_OBJREF) != 0, 911 ("vm_object_terminate_pages: page %p is inconsistent", p)); 912 913 p->object = NULL; 914 if (vm_page_drop(p, VPRC_OBJREF) == VPRC_OBJREF) { 915 VM_CNT_INC(v_pfree); 916 vm_page_free(p); 917 } 918 } 919 920 /* 921 * If the object contained any pages, then reset it to an empty state. 922 * None of the object's fields, including "resident_page_count", were 923 * modified by the preceding loop. 924 */ 925 if (object->resident_page_count != 0) { 926 vm_radix_reclaim_allnodes(&object->rtree); 927 TAILQ_INIT(&object->memq); 928 object->resident_page_count = 0; 929 if (object->type == OBJT_VNODE) 930 vdrop(object->handle); 931 } 932 } 933 934 /* 935 * vm_object_terminate actually destroys the specified object, freeing 936 * up all previously used resources. 937 * 938 * The object must be locked. 939 * This routine may block. 940 */ 941 void 942 vm_object_terminate(vm_object_t object) 943 { 944 945 VM_OBJECT_ASSERT_WLOCKED(object); 946 KASSERT((object->flags & OBJ_DEAD) != 0, 947 ("terminating non-dead obj %p", object)); 948 KASSERT((object->flags & OBJ_COLLAPSING) == 0, 949 ("terminating collapsing obj %p", object)); 950 KASSERT(object->backing_object == NULL, 951 ("terminating shadow obj %p", object)); 952 953 /* 954 * wait for the pageout daemon to be done with the object 955 */ 956 vm_object_pip_wait(object, "objtrm"); 957 958 KASSERT(!REFCOUNT_COUNT(object->paging_in_progress), 959 ("vm_object_terminate: pageout in progress")); 960 961 KASSERT(object->ref_count == 0, 962 ("vm_object_terminate: object with references, ref_count=%d", 963 object->ref_count)); 964 965 if ((object->flags & OBJ_PG_DTOR) == 0) 966 vm_object_terminate_pages(object); 967 968 #if VM_NRESERVLEVEL > 0 969 if (__predict_false(!LIST_EMPTY(&object->rvq))) 970 vm_reserv_break_all(object); 971 #endif 972 973 KASSERT(object->cred == NULL || object->type == OBJT_DEFAULT || 974 object->type == OBJT_SWAP, 975 ("%s: non-swap obj %p has cred", __func__, object)); 976 977 /* 978 * Let the pager know object is dead. 979 */ 980 vm_pager_deallocate(object); 981 VM_OBJECT_WUNLOCK(object); 982 983 vm_object_destroy(object); 984 } 985 986 /* 987 * Make the page read-only so that we can clear the object flags. However, if 988 * this is a nosync mmap then the object is likely to stay dirty so do not 989 * mess with the page and do not clear the object flags. Returns TRUE if the 990 * page should be flushed, and FALSE otherwise. 991 */ 992 static boolean_t 993 vm_object_page_remove_write(vm_page_t p, int flags, boolean_t *allclean) 994 { 995 996 vm_page_assert_busied(p); 997 998 /* 999 * If we have been asked to skip nosync pages and this is a 1000 * nosync page, skip it. Note that the object flags were not 1001 * cleared in this case so we do not have to set them. 1002 */ 1003 if ((flags & OBJPC_NOSYNC) != 0 && (p->a.flags & PGA_NOSYNC) != 0) { 1004 *allclean = FALSE; 1005 return (FALSE); 1006 } else { 1007 pmap_remove_write(p); 1008 return (p->dirty != 0); 1009 } 1010 } 1011 1012 /* 1013 * vm_object_page_clean 1014 * 1015 * Clean all dirty pages in the specified range of object. Leaves page 1016 * on whatever queue it is currently on. If NOSYNC is set then do not 1017 * write out pages with PGA_NOSYNC set (originally comes from MAP_NOSYNC), 1018 * leaving the object dirty. 1019 * 1020 * For swap objects backing tmpfs regular files, do not flush anything, 1021 * but remove write protection on the mapped pages to update mtime through 1022 * mmaped writes. 1023 * 1024 * When stuffing pages asynchronously, allow clustering. XXX we need a 1025 * synchronous clustering mode implementation. 1026 * 1027 * Odd semantics: if start == end, we clean everything. 1028 * 1029 * The object must be locked. 1030 * 1031 * Returns FALSE if some page from the range was not written, as 1032 * reported by the pager, and TRUE otherwise. 1033 */ 1034 boolean_t 1035 vm_object_page_clean(vm_object_t object, vm_ooffset_t start, vm_ooffset_t end, 1036 int flags) 1037 { 1038 vm_page_t np, p; 1039 vm_pindex_t pi, tend, tstart; 1040 int curgeneration, n, pagerflags; 1041 boolean_t eio, res, allclean; 1042 1043 VM_OBJECT_ASSERT_WLOCKED(object); 1044 1045 if (!vm_object_mightbedirty(object) || object->resident_page_count == 0) 1046 return (TRUE); 1047 1048 pagerflags = (flags & (OBJPC_SYNC | OBJPC_INVAL)) != 0 ? 1049 VM_PAGER_PUT_SYNC : VM_PAGER_CLUSTER_OK; 1050 pagerflags |= (flags & OBJPC_INVAL) != 0 ? VM_PAGER_PUT_INVAL : 0; 1051 1052 tstart = OFF_TO_IDX(start); 1053 tend = (end == 0) ? object->size : OFF_TO_IDX(end + PAGE_MASK); 1054 allclean = tstart == 0 && tend >= object->size; 1055 res = TRUE; 1056 1057 rescan: 1058 curgeneration = object->generation; 1059 1060 for (p = vm_page_find_least(object, tstart); p != NULL; p = np) { 1061 pi = p->pindex; 1062 if (pi >= tend) 1063 break; 1064 np = TAILQ_NEXT(p, listq); 1065 if (vm_page_none_valid(p)) 1066 continue; 1067 if (vm_page_busy_acquire(p, VM_ALLOC_WAITFAIL) == 0) { 1068 if (object->generation != curgeneration && 1069 (flags & OBJPC_SYNC) != 0) 1070 goto rescan; 1071 np = vm_page_find_least(object, pi); 1072 continue; 1073 } 1074 if (!vm_object_page_remove_write(p, flags, &allclean)) { 1075 vm_page_xunbusy(p); 1076 continue; 1077 } 1078 if (object->type == OBJT_VNODE) { 1079 n = vm_object_page_collect_flush(object, p, pagerflags, 1080 flags, &allclean, &eio); 1081 if (eio) { 1082 res = FALSE; 1083 allclean = FALSE; 1084 } 1085 if (object->generation != curgeneration && 1086 (flags & OBJPC_SYNC) != 0) 1087 goto rescan; 1088 1089 /* 1090 * If the VOP_PUTPAGES() did a truncated write, so 1091 * that even the first page of the run is not fully 1092 * written, vm_pageout_flush() returns 0 as the run 1093 * length. Since the condition that caused truncated 1094 * write may be permanent, e.g. exhausted free space, 1095 * accepting n == 0 would cause an infinite loop. 1096 * 1097 * Forwarding the iterator leaves the unwritten page 1098 * behind, but there is not much we can do there if 1099 * filesystem refuses to write it. 1100 */ 1101 if (n == 0) { 1102 n = 1; 1103 allclean = FALSE; 1104 } 1105 } else { 1106 n = 1; 1107 vm_page_xunbusy(p); 1108 } 1109 np = vm_page_find_least(object, pi + n); 1110 } 1111 #if 0 1112 VOP_FSYNC(vp, (pagerflags & VM_PAGER_PUT_SYNC) ? MNT_WAIT : 0); 1113 #endif 1114 1115 /* 1116 * Leave updating cleangeneration for tmpfs objects to tmpfs 1117 * scan. It needs to update mtime, which happens for other 1118 * filesystems during page writeouts. 1119 */ 1120 if (allclean && object->type == OBJT_VNODE) 1121 object->cleangeneration = curgeneration; 1122 return (res); 1123 } 1124 1125 static int 1126 vm_object_page_collect_flush(vm_object_t object, vm_page_t p, int pagerflags, 1127 int flags, boolean_t *allclean, boolean_t *eio) 1128 { 1129 vm_page_t ma[vm_pageout_page_count], p_first, tp; 1130 int count, i, mreq, runlen; 1131 1132 vm_page_lock_assert(p, MA_NOTOWNED); 1133 vm_page_assert_xbusied(p); 1134 VM_OBJECT_ASSERT_WLOCKED(object); 1135 1136 count = 1; 1137 mreq = 0; 1138 1139 for (tp = p; count < vm_pageout_page_count; count++) { 1140 tp = vm_page_next(tp); 1141 if (tp == NULL || vm_page_tryxbusy(tp) == 0) 1142 break; 1143 if (!vm_object_page_remove_write(tp, flags, allclean)) { 1144 vm_page_xunbusy(tp); 1145 break; 1146 } 1147 } 1148 1149 for (p_first = p; count < vm_pageout_page_count; count++) { 1150 tp = vm_page_prev(p_first); 1151 if (tp == NULL || vm_page_tryxbusy(tp) == 0) 1152 break; 1153 if (!vm_object_page_remove_write(tp, flags, allclean)) { 1154 vm_page_xunbusy(tp); 1155 break; 1156 } 1157 p_first = tp; 1158 mreq++; 1159 } 1160 1161 for (tp = p_first, i = 0; i < count; tp = TAILQ_NEXT(tp, listq), i++) 1162 ma[i] = tp; 1163 1164 vm_pageout_flush(ma, count, pagerflags, mreq, &runlen, eio); 1165 return (runlen); 1166 } 1167 1168 /* 1169 * Note that there is absolutely no sense in writing out 1170 * anonymous objects, so we track down the vnode object 1171 * to write out. 1172 * We invalidate (remove) all pages from the address space 1173 * for semantic correctness. 1174 * 1175 * If the backing object is a device object with unmanaged pages, then any 1176 * mappings to the specified range of pages must be removed before this 1177 * function is called. 1178 * 1179 * Note: certain anonymous maps, such as MAP_NOSYNC maps, 1180 * may start out with a NULL object. 1181 */ 1182 boolean_t 1183 vm_object_sync(vm_object_t object, vm_ooffset_t offset, vm_size_t size, 1184 boolean_t syncio, boolean_t invalidate) 1185 { 1186 vm_object_t backing_object; 1187 struct vnode *vp; 1188 struct mount *mp; 1189 int error, flags, fsync_after; 1190 boolean_t res; 1191 1192 if (object == NULL) 1193 return (TRUE); 1194 res = TRUE; 1195 error = 0; 1196 VM_OBJECT_WLOCK(object); 1197 while ((backing_object = object->backing_object) != NULL) { 1198 VM_OBJECT_WLOCK(backing_object); 1199 offset += object->backing_object_offset; 1200 VM_OBJECT_WUNLOCK(object); 1201 object = backing_object; 1202 if (object->size < OFF_TO_IDX(offset + size)) 1203 size = IDX_TO_OFF(object->size) - offset; 1204 } 1205 /* 1206 * Flush pages if writing is allowed, invalidate them 1207 * if invalidation requested. Pages undergoing I/O 1208 * will be ignored by vm_object_page_remove(). 1209 * 1210 * We cannot lock the vnode and then wait for paging 1211 * to complete without deadlocking against vm_fault. 1212 * Instead we simply call vm_object_page_remove() and 1213 * allow it to block internally on a page-by-page 1214 * basis when it encounters pages undergoing async 1215 * I/O. 1216 */ 1217 if (object->type == OBJT_VNODE && 1218 vm_object_mightbedirty(object) != 0 && 1219 ((vp = object->handle)->v_vflag & VV_NOSYNC) == 0) { 1220 VM_OBJECT_WUNLOCK(object); 1221 (void) vn_start_write(vp, &mp, V_WAIT); 1222 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 1223 if (syncio && !invalidate && offset == 0 && 1224 atop(size) == object->size) { 1225 /* 1226 * If syncing the whole mapping of the file, 1227 * it is faster to schedule all the writes in 1228 * async mode, also allowing the clustering, 1229 * and then wait for i/o to complete. 1230 */ 1231 flags = 0; 1232 fsync_after = TRUE; 1233 } else { 1234 flags = (syncio || invalidate) ? OBJPC_SYNC : 0; 1235 flags |= invalidate ? (OBJPC_SYNC | OBJPC_INVAL) : 0; 1236 fsync_after = FALSE; 1237 } 1238 VM_OBJECT_WLOCK(object); 1239 res = vm_object_page_clean(object, offset, offset + size, 1240 flags); 1241 VM_OBJECT_WUNLOCK(object); 1242 if (fsync_after) 1243 error = VOP_FSYNC(vp, MNT_WAIT, curthread); 1244 VOP_UNLOCK(vp); 1245 vn_finished_write(mp); 1246 if (error != 0) 1247 res = FALSE; 1248 VM_OBJECT_WLOCK(object); 1249 } 1250 if ((object->type == OBJT_VNODE || 1251 object->type == OBJT_DEVICE) && invalidate) { 1252 if (object->type == OBJT_DEVICE) 1253 /* 1254 * The option OBJPR_NOTMAPPED must be passed here 1255 * because vm_object_page_remove() cannot remove 1256 * unmanaged mappings. 1257 */ 1258 flags = OBJPR_NOTMAPPED; 1259 else if (old_msync) 1260 flags = 0; 1261 else 1262 flags = OBJPR_CLEANONLY; 1263 vm_object_page_remove(object, OFF_TO_IDX(offset), 1264 OFF_TO_IDX(offset + size + PAGE_MASK), flags); 1265 } 1266 VM_OBJECT_WUNLOCK(object); 1267 return (res); 1268 } 1269 1270 /* 1271 * Determine whether the given advice can be applied to the object. Advice is 1272 * not applied to unmanaged pages since they never belong to page queues, and 1273 * since MADV_FREE is destructive, it can apply only to anonymous pages that 1274 * have been mapped at most once. 1275 */ 1276 static bool 1277 vm_object_advice_applies(vm_object_t object, int advice) 1278 { 1279 1280 if ((object->flags & OBJ_UNMANAGED) != 0) 1281 return (false); 1282 if (advice != MADV_FREE) 1283 return (true); 1284 return ((object->flags & (OBJ_ONEMAPPING | OBJ_ANON)) == 1285 (OBJ_ONEMAPPING | OBJ_ANON)); 1286 } 1287 1288 static void 1289 vm_object_madvise_freespace(vm_object_t object, int advice, vm_pindex_t pindex, 1290 vm_size_t size) 1291 { 1292 1293 if (advice == MADV_FREE && object->type == OBJT_SWAP) 1294 swap_pager_freespace(object, pindex, size); 1295 } 1296 1297 /* 1298 * vm_object_madvise: 1299 * 1300 * Implements the madvise function at the object/page level. 1301 * 1302 * MADV_WILLNEED (any object) 1303 * 1304 * Activate the specified pages if they are resident. 1305 * 1306 * MADV_DONTNEED (any object) 1307 * 1308 * Deactivate the specified pages if they are resident. 1309 * 1310 * MADV_FREE (OBJT_DEFAULT/OBJT_SWAP objects, 1311 * OBJ_ONEMAPPING only) 1312 * 1313 * Deactivate and clean the specified pages if they are 1314 * resident. This permits the process to reuse the pages 1315 * without faulting or the kernel to reclaim the pages 1316 * without I/O. 1317 */ 1318 void 1319 vm_object_madvise(vm_object_t object, vm_pindex_t pindex, vm_pindex_t end, 1320 int advice) 1321 { 1322 vm_pindex_t tpindex; 1323 vm_object_t backing_object, tobject; 1324 vm_page_t m, tm; 1325 1326 if (object == NULL) 1327 return; 1328 1329 relookup: 1330 VM_OBJECT_WLOCK(object); 1331 if (!vm_object_advice_applies(object, advice)) { 1332 VM_OBJECT_WUNLOCK(object); 1333 return; 1334 } 1335 for (m = vm_page_find_least(object, pindex); pindex < end; pindex++) { 1336 tobject = object; 1337 1338 /* 1339 * If the next page isn't resident in the top-level object, we 1340 * need to search the shadow chain. When applying MADV_FREE, we 1341 * take care to release any swap space used to store 1342 * non-resident pages. 1343 */ 1344 if (m == NULL || pindex < m->pindex) { 1345 /* 1346 * Optimize a common case: if the top-level object has 1347 * no backing object, we can skip over the non-resident 1348 * range in constant time. 1349 */ 1350 if (object->backing_object == NULL) { 1351 tpindex = (m != NULL && m->pindex < end) ? 1352 m->pindex : end; 1353 vm_object_madvise_freespace(object, advice, 1354 pindex, tpindex - pindex); 1355 if ((pindex = tpindex) == end) 1356 break; 1357 goto next_page; 1358 } 1359 1360 tpindex = pindex; 1361 do { 1362 vm_object_madvise_freespace(tobject, advice, 1363 tpindex, 1); 1364 /* 1365 * Prepare to search the next object in the 1366 * chain. 1367 */ 1368 backing_object = tobject->backing_object; 1369 if (backing_object == NULL) 1370 goto next_pindex; 1371 VM_OBJECT_WLOCK(backing_object); 1372 tpindex += 1373 OFF_TO_IDX(tobject->backing_object_offset); 1374 if (tobject != object) 1375 VM_OBJECT_WUNLOCK(tobject); 1376 tobject = backing_object; 1377 if (!vm_object_advice_applies(tobject, advice)) 1378 goto next_pindex; 1379 } while ((tm = vm_page_lookup(tobject, tpindex)) == 1380 NULL); 1381 } else { 1382 next_page: 1383 tm = m; 1384 m = TAILQ_NEXT(m, listq); 1385 } 1386 1387 /* 1388 * If the page is not in a normal state, skip it. The page 1389 * can not be invalidated while the object lock is held. 1390 */ 1391 if (!vm_page_all_valid(tm) || vm_page_wired(tm)) 1392 goto next_pindex; 1393 KASSERT((tm->flags & PG_FICTITIOUS) == 0, 1394 ("vm_object_madvise: page %p is fictitious", tm)); 1395 KASSERT((tm->oflags & VPO_UNMANAGED) == 0, 1396 ("vm_object_madvise: page %p is not managed", tm)); 1397 if (vm_page_tryxbusy(tm) == 0) { 1398 if (object != tobject) 1399 VM_OBJECT_WUNLOCK(object); 1400 if (advice == MADV_WILLNEED) { 1401 /* 1402 * Reference the page before unlocking and 1403 * sleeping so that the page daemon is less 1404 * likely to reclaim it. 1405 */ 1406 vm_page_aflag_set(tm, PGA_REFERENCED); 1407 } 1408 vm_page_busy_sleep(tm, "madvpo", false); 1409 goto relookup; 1410 } 1411 vm_page_advise(tm, advice); 1412 vm_page_xunbusy(tm); 1413 vm_object_madvise_freespace(tobject, advice, tm->pindex, 1); 1414 next_pindex: 1415 if (tobject != object) 1416 VM_OBJECT_WUNLOCK(tobject); 1417 } 1418 VM_OBJECT_WUNLOCK(object); 1419 } 1420 1421 /* 1422 * vm_object_shadow: 1423 * 1424 * Create a new object which is backed by the 1425 * specified existing object range. The source 1426 * object reference is deallocated. 1427 * 1428 * The new object and offset into that object 1429 * are returned in the source parameters. 1430 */ 1431 void 1432 vm_object_shadow(vm_object_t *object, vm_ooffset_t *offset, vm_size_t length, 1433 struct ucred *cred, bool shared) 1434 { 1435 vm_object_t source; 1436 vm_object_t result; 1437 1438 source = *object; 1439 1440 /* 1441 * Don't create the new object if the old object isn't shared. 1442 * 1443 * If we hold the only reference we can guarantee that it won't 1444 * increase while we have the map locked. Otherwise the race is 1445 * harmless and we will end up with an extra shadow object that 1446 * will be collapsed later. 1447 */ 1448 if (source != NULL && source->ref_count == 1 && 1449 (source->flags & OBJ_ANON) != 0) 1450 return; 1451 1452 /* 1453 * Allocate a new object with the given length. 1454 */ 1455 result = vm_object_allocate_anon(atop(length), source, cred, length); 1456 1457 /* 1458 * Store the offset into the source object, and fix up the offset into 1459 * the new object. 1460 */ 1461 result->backing_object_offset = *offset; 1462 1463 if (shared || source != NULL) { 1464 VM_OBJECT_WLOCK(result); 1465 1466 /* 1467 * The new object shadows the source object, adding a 1468 * reference to it. Our caller changes his reference 1469 * to point to the new object, removing a reference to 1470 * the source object. Net result: no change of 1471 * reference count, unless the caller needs to add one 1472 * more reference due to forking a shared map entry. 1473 */ 1474 if (shared) { 1475 vm_object_reference_locked(result); 1476 vm_object_clear_flag(result, OBJ_ONEMAPPING); 1477 } 1478 1479 /* 1480 * Try to optimize the result object's page color when 1481 * shadowing in order to maintain page coloring 1482 * consistency in the combined shadowed object. 1483 */ 1484 if (source != NULL) { 1485 vm_object_backing_insert(result, source); 1486 result->domain = source->domain; 1487 #if VM_NRESERVLEVEL > 0 1488 result->flags |= source->flags & OBJ_COLORED; 1489 result->pg_color = (source->pg_color + 1490 OFF_TO_IDX(*offset)) & ((1 << (VM_NFREEORDER - 1491 1)) - 1); 1492 #endif 1493 } 1494 VM_OBJECT_WUNLOCK(result); 1495 } 1496 1497 /* 1498 * Return the new things 1499 */ 1500 *offset = 0; 1501 *object = result; 1502 } 1503 1504 /* 1505 * vm_object_split: 1506 * 1507 * Split the pages in a map entry into a new object. This affords 1508 * easier removal of unused pages, and keeps object inheritance from 1509 * being a negative impact on memory usage. 1510 */ 1511 void 1512 vm_object_split(vm_map_entry_t entry) 1513 { 1514 vm_page_t m, m_next; 1515 vm_object_t orig_object, new_object, backing_object; 1516 vm_pindex_t idx, offidxstart; 1517 vm_size_t size; 1518 1519 orig_object = entry->object.vm_object; 1520 KASSERT((orig_object->flags & OBJ_ONEMAPPING) != 0, 1521 ("vm_object_split: Splitting object with multiple mappings.")); 1522 if ((orig_object->flags & OBJ_ANON) == 0) 1523 return; 1524 if (orig_object->ref_count <= 1) 1525 return; 1526 VM_OBJECT_WUNLOCK(orig_object); 1527 1528 offidxstart = OFF_TO_IDX(entry->offset); 1529 size = atop(entry->end - entry->start); 1530 1531 /* 1532 * If swap_pager_copy() is later called, it will convert new_object 1533 * into a swap object. 1534 */ 1535 new_object = vm_object_allocate_anon(size, orig_object, 1536 orig_object->cred, ptoa(size)); 1537 1538 /* 1539 * We must wait for the orig_object to complete any in-progress 1540 * collapse so that the swap blocks are stable below. The 1541 * additional reference on backing_object by new object will 1542 * prevent further collapse operations until split completes. 1543 */ 1544 VM_OBJECT_WLOCK(orig_object); 1545 vm_object_collapse_wait(orig_object); 1546 1547 /* 1548 * At this point, the new object is still private, so the order in 1549 * which the original and new objects are locked does not matter. 1550 */ 1551 VM_OBJECT_WLOCK(new_object); 1552 new_object->domain = orig_object->domain; 1553 backing_object = orig_object->backing_object; 1554 if (backing_object != NULL) { 1555 vm_object_backing_insert_ref(new_object, backing_object); 1556 new_object->backing_object_offset = 1557 orig_object->backing_object_offset + entry->offset; 1558 } 1559 if (orig_object->cred != NULL) { 1560 crhold(orig_object->cred); 1561 KASSERT(orig_object->charge >= ptoa(size), 1562 ("orig_object->charge < 0")); 1563 orig_object->charge -= ptoa(size); 1564 } 1565 1566 /* 1567 * Mark the split operation so that swap_pager_getpages() knows 1568 * that the object is in transition. 1569 */ 1570 vm_object_set_flag(orig_object, OBJ_SPLIT); 1571 retry: 1572 m = vm_page_find_least(orig_object, offidxstart); 1573 for (; m != NULL && (idx = m->pindex - offidxstart) < size; 1574 m = m_next) { 1575 m_next = TAILQ_NEXT(m, listq); 1576 1577 /* 1578 * We must wait for pending I/O to complete before we can 1579 * rename the page. 1580 * 1581 * We do not have to VM_PROT_NONE the page as mappings should 1582 * not be changed by this operation. 1583 */ 1584 if (vm_page_tryxbusy(m) == 0) { 1585 VM_OBJECT_WUNLOCK(new_object); 1586 vm_page_sleep_if_busy(m, "spltwt"); 1587 VM_OBJECT_WLOCK(new_object); 1588 goto retry; 1589 } 1590 1591 /* 1592 * The page was left invalid. Likely placed there by 1593 * an incomplete fault. Just remove and ignore. 1594 */ 1595 if (vm_page_none_valid(m)) { 1596 if (vm_page_remove(m)) 1597 vm_page_free(m); 1598 continue; 1599 } 1600 1601 /* vm_page_rename() will dirty the page. */ 1602 if (vm_page_rename(m, new_object, idx)) { 1603 vm_page_xunbusy(m); 1604 VM_OBJECT_WUNLOCK(new_object); 1605 VM_OBJECT_WUNLOCK(orig_object); 1606 vm_radix_wait(); 1607 VM_OBJECT_WLOCK(orig_object); 1608 VM_OBJECT_WLOCK(new_object); 1609 goto retry; 1610 } 1611 1612 #if VM_NRESERVLEVEL > 0 1613 /* 1614 * If some of the reservation's allocated pages remain with 1615 * the original object, then transferring the reservation to 1616 * the new object is neither particularly beneficial nor 1617 * particularly harmful as compared to leaving the reservation 1618 * with the original object. If, however, all of the 1619 * reservation's allocated pages are transferred to the new 1620 * object, then transferring the reservation is typically 1621 * beneficial. Determining which of these two cases applies 1622 * would be more costly than unconditionally renaming the 1623 * reservation. 1624 */ 1625 vm_reserv_rename(m, new_object, orig_object, offidxstart); 1626 #endif 1627 if (orig_object->type != OBJT_SWAP) 1628 vm_page_xunbusy(m); 1629 } 1630 if (orig_object->type == OBJT_SWAP) { 1631 /* 1632 * swap_pager_copy() can sleep, in which case the orig_object's 1633 * and new_object's locks are released and reacquired. 1634 */ 1635 swap_pager_copy(orig_object, new_object, offidxstart, 0); 1636 TAILQ_FOREACH(m, &new_object->memq, listq) 1637 vm_page_xunbusy(m); 1638 } 1639 vm_object_clear_flag(orig_object, OBJ_SPLIT); 1640 VM_OBJECT_WUNLOCK(orig_object); 1641 VM_OBJECT_WUNLOCK(new_object); 1642 entry->object.vm_object = new_object; 1643 entry->offset = 0LL; 1644 vm_object_deallocate(orig_object); 1645 VM_OBJECT_WLOCK(new_object); 1646 } 1647 1648 static vm_page_t 1649 vm_object_collapse_scan_wait(vm_object_t object, vm_page_t p) 1650 { 1651 vm_object_t backing_object; 1652 1653 VM_OBJECT_ASSERT_WLOCKED(object); 1654 backing_object = object->backing_object; 1655 VM_OBJECT_ASSERT_WLOCKED(backing_object); 1656 1657 KASSERT(p == NULL || p->object == object || p->object == backing_object, 1658 ("invalid ownership %p %p %p", p, object, backing_object)); 1659 /* The page is only NULL when rename fails. */ 1660 if (p == NULL) { 1661 VM_OBJECT_WUNLOCK(object); 1662 VM_OBJECT_WUNLOCK(backing_object); 1663 vm_radix_wait(); 1664 } else { 1665 if (p->object == object) 1666 VM_OBJECT_WUNLOCK(backing_object); 1667 else 1668 VM_OBJECT_WUNLOCK(object); 1669 vm_page_busy_sleep(p, "vmocol", false); 1670 } 1671 VM_OBJECT_WLOCK(object); 1672 VM_OBJECT_WLOCK(backing_object); 1673 return (TAILQ_FIRST(&backing_object->memq)); 1674 } 1675 1676 static bool 1677 vm_object_scan_all_shadowed(vm_object_t object) 1678 { 1679 vm_object_t backing_object; 1680 vm_page_t p, pp; 1681 vm_pindex_t backing_offset_index, new_pindex, pi, ps; 1682 1683 VM_OBJECT_ASSERT_WLOCKED(object); 1684 VM_OBJECT_ASSERT_WLOCKED(object->backing_object); 1685 1686 backing_object = object->backing_object; 1687 1688 if ((backing_object->flags & OBJ_ANON) == 0) 1689 return (false); 1690 1691 pi = backing_offset_index = OFF_TO_IDX(object->backing_object_offset); 1692 p = vm_page_find_least(backing_object, pi); 1693 ps = swap_pager_find_least(backing_object, pi); 1694 1695 /* 1696 * Only check pages inside the parent object's range and 1697 * inside the parent object's mapping of the backing object. 1698 */ 1699 for (;; pi++) { 1700 if (p != NULL && p->pindex < pi) 1701 p = TAILQ_NEXT(p, listq); 1702 if (ps < pi) 1703 ps = swap_pager_find_least(backing_object, pi); 1704 if (p == NULL && ps >= backing_object->size) 1705 break; 1706 else if (p == NULL) 1707 pi = ps; 1708 else 1709 pi = MIN(p->pindex, ps); 1710 1711 new_pindex = pi - backing_offset_index; 1712 if (new_pindex >= object->size) 1713 break; 1714 1715 if (p != NULL) { 1716 /* 1717 * If the backing object page is busy a 1718 * grandparent or older page may still be 1719 * undergoing CoW. It is not safe to collapse 1720 * the backing object until it is quiesced. 1721 */ 1722 if (vm_page_tryxbusy(p) == 0) 1723 return (false); 1724 1725 /* 1726 * We raced with the fault handler that left 1727 * newly allocated invalid page on the object 1728 * queue and retried. 1729 */ 1730 if (!vm_page_all_valid(p)) 1731 goto unbusy_ret; 1732 } 1733 1734 /* 1735 * See if the parent has the page or if the parent's object 1736 * pager has the page. If the parent has the page but the page 1737 * is not valid, the parent's object pager must have the page. 1738 * 1739 * If this fails, the parent does not completely shadow the 1740 * object and we might as well give up now. 1741 */ 1742 pp = vm_page_lookup(object, new_pindex); 1743 1744 /* 1745 * The valid check here is stable due to object lock 1746 * being required to clear valid and initiate paging. 1747 * Busy of p disallows fault handler to validate pp. 1748 */ 1749 if ((pp == NULL || vm_page_none_valid(pp)) && 1750 !vm_pager_has_page(object, new_pindex, NULL, NULL)) 1751 goto unbusy_ret; 1752 if (p != NULL) 1753 vm_page_xunbusy(p); 1754 } 1755 return (true); 1756 1757 unbusy_ret: 1758 if (p != NULL) 1759 vm_page_xunbusy(p); 1760 return (false); 1761 } 1762 1763 static void 1764 vm_object_collapse_scan(vm_object_t object) 1765 { 1766 vm_object_t backing_object; 1767 vm_page_t next, p, pp; 1768 vm_pindex_t backing_offset_index, new_pindex; 1769 1770 VM_OBJECT_ASSERT_WLOCKED(object); 1771 VM_OBJECT_ASSERT_WLOCKED(object->backing_object); 1772 1773 backing_object = object->backing_object; 1774 backing_offset_index = OFF_TO_IDX(object->backing_object_offset); 1775 1776 /* 1777 * Our scan 1778 */ 1779 for (p = TAILQ_FIRST(&backing_object->memq); p != NULL; p = next) { 1780 next = TAILQ_NEXT(p, listq); 1781 new_pindex = p->pindex - backing_offset_index; 1782 1783 /* 1784 * Check for busy page 1785 */ 1786 if (vm_page_tryxbusy(p) == 0) { 1787 next = vm_object_collapse_scan_wait(object, p); 1788 continue; 1789 } 1790 1791 KASSERT(object->backing_object == backing_object, 1792 ("vm_object_collapse_scan: backing object mismatch %p != %p", 1793 object->backing_object, backing_object)); 1794 KASSERT(p->object == backing_object, 1795 ("vm_object_collapse_scan: object mismatch %p != %p", 1796 p->object, backing_object)); 1797 1798 if (p->pindex < backing_offset_index || 1799 new_pindex >= object->size) { 1800 if (backing_object->type == OBJT_SWAP) 1801 swap_pager_freespace(backing_object, p->pindex, 1802 1); 1803 1804 KASSERT(!pmap_page_is_mapped(p), 1805 ("freeing mapped page %p", p)); 1806 if (vm_page_remove(p)) 1807 vm_page_free(p); 1808 continue; 1809 } 1810 1811 if (!vm_page_all_valid(p)) { 1812 KASSERT(!pmap_page_is_mapped(p), 1813 ("freeing mapped page %p", p)); 1814 if (vm_page_remove(p)) 1815 vm_page_free(p); 1816 continue; 1817 } 1818 1819 pp = vm_page_lookup(object, new_pindex); 1820 if (pp != NULL && vm_page_tryxbusy(pp) == 0) { 1821 vm_page_xunbusy(p); 1822 /* 1823 * The page in the parent is busy and possibly not 1824 * (yet) valid. Until its state is finalized by the 1825 * busy bit owner, we can't tell whether it shadows the 1826 * original page. 1827 */ 1828 next = vm_object_collapse_scan_wait(object, pp); 1829 continue; 1830 } 1831 1832 if (pp != NULL && vm_page_none_valid(pp)) { 1833 /* 1834 * The page was invalid in the parent. Likely placed 1835 * there by an incomplete fault. Just remove and 1836 * ignore. p can replace it. 1837 */ 1838 if (vm_page_remove(pp)) 1839 vm_page_free(pp); 1840 pp = NULL; 1841 } 1842 1843 if (pp != NULL || vm_pager_has_page(object, new_pindex, NULL, 1844 NULL)) { 1845 /* 1846 * The page already exists in the parent OR swap exists 1847 * for this location in the parent. Leave the parent's 1848 * page alone. Destroy the original page from the 1849 * backing object. 1850 */ 1851 if (backing_object->type == OBJT_SWAP) 1852 swap_pager_freespace(backing_object, p->pindex, 1853 1); 1854 KASSERT(!pmap_page_is_mapped(p), 1855 ("freeing mapped page %p", p)); 1856 if (vm_page_remove(p)) 1857 vm_page_free(p); 1858 if (pp != NULL) 1859 vm_page_xunbusy(pp); 1860 continue; 1861 } 1862 1863 /* 1864 * Page does not exist in parent, rename the page from the 1865 * backing object to the main object. 1866 * 1867 * If the page was mapped to a process, it can remain mapped 1868 * through the rename. vm_page_rename() will dirty the page. 1869 */ 1870 if (vm_page_rename(p, object, new_pindex)) { 1871 vm_page_xunbusy(p); 1872 next = vm_object_collapse_scan_wait(object, NULL); 1873 continue; 1874 } 1875 1876 /* Use the old pindex to free the right page. */ 1877 if (backing_object->type == OBJT_SWAP) 1878 swap_pager_freespace(backing_object, 1879 new_pindex + backing_offset_index, 1); 1880 1881 #if VM_NRESERVLEVEL > 0 1882 /* 1883 * Rename the reservation. 1884 */ 1885 vm_reserv_rename(p, object, backing_object, 1886 backing_offset_index); 1887 #endif 1888 vm_page_xunbusy(p); 1889 } 1890 return; 1891 } 1892 1893 /* 1894 * vm_object_collapse: 1895 * 1896 * Collapse an object with the object backing it. 1897 * Pages in the backing object are moved into the 1898 * parent, and the backing object is deallocated. 1899 */ 1900 void 1901 vm_object_collapse(vm_object_t object) 1902 { 1903 vm_object_t backing_object, new_backing_object; 1904 1905 VM_OBJECT_ASSERT_WLOCKED(object); 1906 1907 while (TRUE) { 1908 KASSERT((object->flags & (OBJ_DEAD | OBJ_ANON)) == OBJ_ANON, 1909 ("collapsing invalid object")); 1910 1911 /* 1912 * Wait for the backing_object to finish any pending 1913 * collapse so that the caller sees the shortest possible 1914 * shadow chain. 1915 */ 1916 backing_object = vm_object_backing_collapse_wait(object); 1917 if (backing_object == NULL) 1918 return; 1919 1920 KASSERT(object->ref_count > 0 && 1921 object->ref_count > object->shadow_count, 1922 ("collapse with invalid ref %d or shadow %d count.", 1923 object->ref_count, object->shadow_count)); 1924 KASSERT((backing_object->flags & 1925 (OBJ_COLLAPSING | OBJ_DEAD)) == 0, 1926 ("vm_object_collapse: Backing object already collapsing.")); 1927 KASSERT((object->flags & (OBJ_COLLAPSING | OBJ_DEAD)) == 0, 1928 ("vm_object_collapse: object is already collapsing.")); 1929 1930 /* 1931 * We know that we can either collapse the backing object if 1932 * the parent is the only reference to it, or (perhaps) have 1933 * the parent bypass the object if the parent happens to shadow 1934 * all the resident pages in the entire backing object. 1935 */ 1936 if (backing_object->ref_count == 1) { 1937 KASSERT(backing_object->shadow_count == 1, 1938 ("vm_object_collapse: shadow_count: %d", 1939 backing_object->shadow_count)); 1940 vm_object_pip_add(object, 1); 1941 vm_object_set_flag(object, OBJ_COLLAPSING); 1942 vm_object_pip_add(backing_object, 1); 1943 vm_object_set_flag(backing_object, OBJ_DEAD); 1944 1945 /* 1946 * If there is exactly one reference to the backing 1947 * object, we can collapse it into the parent. 1948 */ 1949 vm_object_collapse_scan(object); 1950 1951 #if VM_NRESERVLEVEL > 0 1952 /* 1953 * Break any reservations from backing_object. 1954 */ 1955 if (__predict_false(!LIST_EMPTY(&backing_object->rvq))) 1956 vm_reserv_break_all(backing_object); 1957 #endif 1958 1959 /* 1960 * Move the pager from backing_object to object. 1961 */ 1962 if (backing_object->type == OBJT_SWAP) { 1963 /* 1964 * swap_pager_copy() can sleep, in which case 1965 * the backing_object's and object's locks are 1966 * released and reacquired. 1967 * Since swap_pager_copy() is being asked to 1968 * destroy backing_object, it will change the 1969 * type to OBJT_DEFAULT. 1970 */ 1971 swap_pager_copy( 1972 backing_object, 1973 object, 1974 OFF_TO_IDX(object->backing_object_offset), TRUE); 1975 } 1976 1977 /* 1978 * Object now shadows whatever backing_object did. 1979 */ 1980 vm_object_clear_flag(object, OBJ_COLLAPSING); 1981 vm_object_backing_transfer(object, backing_object); 1982 object->backing_object_offset += 1983 backing_object->backing_object_offset; 1984 VM_OBJECT_WUNLOCK(object); 1985 vm_object_pip_wakeup(object); 1986 1987 /* 1988 * Discard backing_object. 1989 * 1990 * Since the backing object has no pages, no pager left, 1991 * and no object references within it, all that is 1992 * necessary is to dispose of it. 1993 */ 1994 KASSERT(backing_object->ref_count == 1, ( 1995 "backing_object %p was somehow re-referenced during collapse!", 1996 backing_object)); 1997 vm_object_pip_wakeup(backing_object); 1998 (void)refcount_release(&backing_object->ref_count); 1999 vm_object_terminate(backing_object); 2000 counter_u64_add(object_collapses, 1); 2001 VM_OBJECT_WLOCK(object); 2002 } else { 2003 /* 2004 * If we do not entirely shadow the backing object, 2005 * there is nothing we can do so we give up. 2006 * 2007 * The object lock and backing_object lock must not 2008 * be dropped during this sequence. 2009 */ 2010 if (!vm_object_scan_all_shadowed(object)) { 2011 VM_OBJECT_WUNLOCK(backing_object); 2012 break; 2013 } 2014 2015 /* 2016 * Make the parent shadow the next object in the 2017 * chain. Deallocating backing_object will not remove 2018 * it, since its reference count is at least 2. 2019 */ 2020 vm_object_backing_remove_locked(object); 2021 new_backing_object = backing_object->backing_object; 2022 if (new_backing_object != NULL) { 2023 vm_object_backing_insert_ref(object, 2024 new_backing_object); 2025 object->backing_object_offset += 2026 backing_object->backing_object_offset; 2027 } 2028 2029 /* 2030 * Drop the reference count on backing_object. Since 2031 * its ref_count was at least 2, it will not vanish. 2032 */ 2033 (void)refcount_release(&backing_object->ref_count); 2034 KASSERT(backing_object->ref_count >= 1, ( 2035 "backing_object %p was somehow dereferenced during collapse!", 2036 backing_object)); 2037 VM_OBJECT_WUNLOCK(backing_object); 2038 counter_u64_add(object_bypasses, 1); 2039 } 2040 2041 /* 2042 * Try again with this object's new backing object. 2043 */ 2044 } 2045 } 2046 2047 /* 2048 * vm_object_page_remove: 2049 * 2050 * For the given object, either frees or invalidates each of the 2051 * specified pages. In general, a page is freed. However, if a page is 2052 * wired for any reason other than the existence of a managed, wired 2053 * mapping, then it may be invalidated but not removed from the object. 2054 * Pages are specified by the given range ["start", "end") and the option 2055 * OBJPR_CLEANONLY. As a special case, if "end" is zero, then the range 2056 * extends from "start" to the end of the object. If the option 2057 * OBJPR_CLEANONLY is specified, then only the non-dirty pages within the 2058 * specified range are affected. If the option OBJPR_NOTMAPPED is 2059 * specified, then the pages within the specified range must have no 2060 * mappings. Otherwise, if this option is not specified, any mappings to 2061 * the specified pages are removed before the pages are freed or 2062 * invalidated. 2063 * 2064 * In general, this operation should only be performed on objects that 2065 * contain managed pages. There are, however, two exceptions. First, it 2066 * is performed on the kernel and kmem objects by vm_map_entry_delete(). 2067 * Second, it is used by msync(..., MS_INVALIDATE) to invalidate device- 2068 * backed pages. In both of these cases, the option OBJPR_CLEANONLY must 2069 * not be specified and the option OBJPR_NOTMAPPED must be specified. 2070 * 2071 * The object must be locked. 2072 */ 2073 void 2074 vm_object_page_remove(vm_object_t object, vm_pindex_t start, vm_pindex_t end, 2075 int options) 2076 { 2077 vm_page_t p, next; 2078 2079 VM_OBJECT_ASSERT_WLOCKED(object); 2080 KASSERT((object->flags & OBJ_UNMANAGED) == 0 || 2081 (options & (OBJPR_CLEANONLY | OBJPR_NOTMAPPED)) == OBJPR_NOTMAPPED, 2082 ("vm_object_page_remove: illegal options for object %p", object)); 2083 if (object->resident_page_count == 0) 2084 return; 2085 vm_object_pip_add(object, 1); 2086 again: 2087 p = vm_page_find_least(object, start); 2088 2089 /* 2090 * Here, the variable "p" is either (1) the page with the least pindex 2091 * greater than or equal to the parameter "start" or (2) NULL. 2092 */ 2093 for (; p != NULL && (p->pindex < end || end == 0); p = next) { 2094 next = TAILQ_NEXT(p, listq); 2095 2096 /* 2097 * If the page is wired for any reason besides the existence 2098 * of managed, wired mappings, then it cannot be freed. For 2099 * example, fictitious pages, which represent device memory, 2100 * are inherently wired and cannot be freed. They can, 2101 * however, be invalidated if the option OBJPR_CLEANONLY is 2102 * not specified. 2103 */ 2104 if (vm_page_tryxbusy(p) == 0) { 2105 vm_page_sleep_if_busy(p, "vmopar"); 2106 goto again; 2107 } 2108 if (vm_page_wired(p)) { 2109 wired: 2110 if ((options & OBJPR_NOTMAPPED) == 0 && 2111 object->ref_count != 0) 2112 pmap_remove_all(p); 2113 if ((options & OBJPR_CLEANONLY) == 0) { 2114 vm_page_invalid(p); 2115 vm_page_undirty(p); 2116 } 2117 vm_page_xunbusy(p); 2118 continue; 2119 } 2120 KASSERT((p->flags & PG_FICTITIOUS) == 0, 2121 ("vm_object_page_remove: page %p is fictitious", p)); 2122 if ((options & OBJPR_CLEANONLY) != 0 && 2123 !vm_page_none_valid(p)) { 2124 if ((options & OBJPR_NOTMAPPED) == 0 && 2125 object->ref_count != 0 && 2126 !vm_page_try_remove_write(p)) 2127 goto wired; 2128 if (p->dirty != 0) { 2129 vm_page_xunbusy(p); 2130 continue; 2131 } 2132 } 2133 if ((options & OBJPR_NOTMAPPED) == 0 && 2134 object->ref_count != 0 && !vm_page_try_remove_all(p)) 2135 goto wired; 2136 vm_page_free(p); 2137 } 2138 vm_object_pip_wakeup(object); 2139 } 2140 2141 /* 2142 * vm_object_page_noreuse: 2143 * 2144 * For the given object, attempt to move the specified pages to 2145 * the head of the inactive queue. This bypasses regular LRU 2146 * operation and allows the pages to be reused quickly under memory 2147 * pressure. If a page is wired for any reason, then it will not 2148 * be queued. Pages are specified by the range ["start", "end"). 2149 * As a special case, if "end" is zero, then the range extends from 2150 * "start" to the end of the object. 2151 * 2152 * This operation should only be performed on objects that 2153 * contain non-fictitious, managed pages. 2154 * 2155 * The object must be locked. 2156 */ 2157 void 2158 vm_object_page_noreuse(vm_object_t object, vm_pindex_t start, vm_pindex_t end) 2159 { 2160 vm_page_t p, next; 2161 2162 VM_OBJECT_ASSERT_LOCKED(object); 2163 KASSERT((object->flags & (OBJ_FICTITIOUS | OBJ_UNMANAGED)) == 0, 2164 ("vm_object_page_noreuse: illegal object %p", object)); 2165 if (object->resident_page_count == 0) 2166 return; 2167 p = vm_page_find_least(object, start); 2168 2169 /* 2170 * Here, the variable "p" is either (1) the page with the least pindex 2171 * greater than or equal to the parameter "start" or (2) NULL. 2172 */ 2173 for (; p != NULL && (p->pindex < end || end == 0); p = next) { 2174 next = TAILQ_NEXT(p, listq); 2175 vm_page_deactivate_noreuse(p); 2176 } 2177 } 2178 2179 /* 2180 * Populate the specified range of the object with valid pages. Returns 2181 * TRUE if the range is successfully populated and FALSE otherwise. 2182 * 2183 * Note: This function should be optimized to pass a larger array of 2184 * pages to vm_pager_get_pages() before it is applied to a non- 2185 * OBJT_DEVICE object. 2186 * 2187 * The object must be locked. 2188 */ 2189 boolean_t 2190 vm_object_populate(vm_object_t object, vm_pindex_t start, vm_pindex_t end) 2191 { 2192 vm_page_t m; 2193 vm_pindex_t pindex; 2194 int rv; 2195 2196 VM_OBJECT_ASSERT_WLOCKED(object); 2197 for (pindex = start; pindex < end; pindex++) { 2198 rv = vm_page_grab_valid(&m, object, pindex, VM_ALLOC_NORMAL); 2199 if (rv != VM_PAGER_OK) 2200 break; 2201 2202 /* 2203 * Keep "m" busy because a subsequent iteration may unlock 2204 * the object. 2205 */ 2206 } 2207 if (pindex > start) { 2208 m = vm_page_lookup(object, start); 2209 while (m != NULL && m->pindex < pindex) { 2210 vm_page_xunbusy(m); 2211 m = TAILQ_NEXT(m, listq); 2212 } 2213 } 2214 return (pindex == end); 2215 } 2216 2217 /* 2218 * Routine: vm_object_coalesce 2219 * Function: Coalesces two objects backing up adjoining 2220 * regions of memory into a single object. 2221 * 2222 * returns TRUE if objects were combined. 2223 * 2224 * NOTE: Only works at the moment if the second object is NULL - 2225 * if it's not, which object do we lock first? 2226 * 2227 * Parameters: 2228 * prev_object First object to coalesce 2229 * prev_offset Offset into prev_object 2230 * prev_size Size of reference to prev_object 2231 * next_size Size of reference to the second object 2232 * reserved Indicator that extension region has 2233 * swap accounted for 2234 * 2235 * Conditions: 2236 * The object must *not* be locked. 2237 */ 2238 boolean_t 2239 vm_object_coalesce(vm_object_t prev_object, vm_ooffset_t prev_offset, 2240 vm_size_t prev_size, vm_size_t next_size, boolean_t reserved) 2241 { 2242 vm_pindex_t next_pindex; 2243 2244 if (prev_object == NULL) 2245 return (TRUE); 2246 if ((prev_object->flags & OBJ_ANON) == 0) 2247 return (FALSE); 2248 2249 VM_OBJECT_WLOCK(prev_object); 2250 /* 2251 * Try to collapse the object first. 2252 */ 2253 vm_object_collapse(prev_object); 2254 2255 /* 2256 * Can't coalesce if: . more than one reference . paged out . shadows 2257 * another object . has a copy elsewhere (any of which mean that the 2258 * pages not mapped to prev_entry may be in use anyway) 2259 */ 2260 if (prev_object->backing_object != NULL) { 2261 VM_OBJECT_WUNLOCK(prev_object); 2262 return (FALSE); 2263 } 2264 2265 prev_size >>= PAGE_SHIFT; 2266 next_size >>= PAGE_SHIFT; 2267 next_pindex = OFF_TO_IDX(prev_offset) + prev_size; 2268 2269 if (prev_object->ref_count > 1 && 2270 prev_object->size != next_pindex && 2271 (prev_object->flags & OBJ_ONEMAPPING) == 0) { 2272 VM_OBJECT_WUNLOCK(prev_object); 2273 return (FALSE); 2274 } 2275 2276 /* 2277 * Account for the charge. 2278 */ 2279 if (prev_object->cred != NULL) { 2280 2281 /* 2282 * If prev_object was charged, then this mapping, 2283 * although not charged now, may become writable 2284 * later. Non-NULL cred in the object would prevent 2285 * swap reservation during enabling of the write 2286 * access, so reserve swap now. Failed reservation 2287 * cause allocation of the separate object for the map 2288 * entry, and swap reservation for this entry is 2289 * managed in appropriate time. 2290 */ 2291 if (!reserved && !swap_reserve_by_cred(ptoa(next_size), 2292 prev_object->cred)) { 2293 VM_OBJECT_WUNLOCK(prev_object); 2294 return (FALSE); 2295 } 2296 prev_object->charge += ptoa(next_size); 2297 } 2298 2299 /* 2300 * Remove any pages that may still be in the object from a previous 2301 * deallocation. 2302 */ 2303 if (next_pindex < prev_object->size) { 2304 vm_object_page_remove(prev_object, next_pindex, next_pindex + 2305 next_size, 0); 2306 if (prev_object->type == OBJT_SWAP) 2307 swap_pager_freespace(prev_object, 2308 next_pindex, next_size); 2309 #if 0 2310 if (prev_object->cred != NULL) { 2311 KASSERT(prev_object->charge >= 2312 ptoa(prev_object->size - next_pindex), 2313 ("object %p overcharged 1 %jx %jx", prev_object, 2314 (uintmax_t)next_pindex, (uintmax_t)next_size)); 2315 prev_object->charge -= ptoa(prev_object->size - 2316 next_pindex); 2317 } 2318 #endif 2319 } 2320 2321 /* 2322 * Extend the object if necessary. 2323 */ 2324 if (next_pindex + next_size > prev_object->size) 2325 prev_object->size = next_pindex + next_size; 2326 2327 VM_OBJECT_WUNLOCK(prev_object); 2328 return (TRUE); 2329 } 2330 2331 void 2332 vm_object_set_writeable_dirty(vm_object_t object) 2333 { 2334 2335 /* Only set for vnodes & tmpfs */ 2336 if (object->type != OBJT_VNODE && 2337 (object->flags & OBJ_TMPFS_NODE) == 0) 2338 return; 2339 atomic_add_int(&object->generation, 1); 2340 } 2341 2342 /* 2343 * vm_object_unwire: 2344 * 2345 * For each page offset within the specified range of the given object, 2346 * find the highest-level page in the shadow chain and unwire it. A page 2347 * must exist at every page offset, and the highest-level page must be 2348 * wired. 2349 */ 2350 void 2351 vm_object_unwire(vm_object_t object, vm_ooffset_t offset, vm_size_t length, 2352 uint8_t queue) 2353 { 2354 vm_object_t tobject, t1object; 2355 vm_page_t m, tm; 2356 vm_pindex_t end_pindex, pindex, tpindex; 2357 int depth, locked_depth; 2358 2359 KASSERT((offset & PAGE_MASK) == 0, 2360 ("vm_object_unwire: offset is not page aligned")); 2361 KASSERT((length & PAGE_MASK) == 0, 2362 ("vm_object_unwire: length is not a multiple of PAGE_SIZE")); 2363 /* The wired count of a fictitious page never changes. */ 2364 if ((object->flags & OBJ_FICTITIOUS) != 0) 2365 return; 2366 pindex = OFF_TO_IDX(offset); 2367 end_pindex = pindex + atop(length); 2368 again: 2369 locked_depth = 1; 2370 VM_OBJECT_RLOCK(object); 2371 m = vm_page_find_least(object, pindex); 2372 while (pindex < end_pindex) { 2373 if (m == NULL || pindex < m->pindex) { 2374 /* 2375 * The first object in the shadow chain doesn't 2376 * contain a page at the current index. Therefore, 2377 * the page must exist in a backing object. 2378 */ 2379 tobject = object; 2380 tpindex = pindex; 2381 depth = 0; 2382 do { 2383 tpindex += 2384 OFF_TO_IDX(tobject->backing_object_offset); 2385 tobject = tobject->backing_object; 2386 KASSERT(tobject != NULL, 2387 ("vm_object_unwire: missing page")); 2388 if ((tobject->flags & OBJ_FICTITIOUS) != 0) 2389 goto next_page; 2390 depth++; 2391 if (depth == locked_depth) { 2392 locked_depth++; 2393 VM_OBJECT_RLOCK(tobject); 2394 } 2395 } while ((tm = vm_page_lookup(tobject, tpindex)) == 2396 NULL); 2397 } else { 2398 tm = m; 2399 m = TAILQ_NEXT(m, listq); 2400 } 2401 if (vm_page_trysbusy(tm) == 0) { 2402 for (tobject = object; locked_depth >= 1; 2403 locked_depth--) { 2404 t1object = tobject->backing_object; 2405 if (tm->object != tobject) 2406 VM_OBJECT_RUNLOCK(tobject); 2407 tobject = t1object; 2408 } 2409 vm_page_busy_sleep(tm, "unwbo", true); 2410 goto again; 2411 } 2412 vm_page_unwire(tm, queue); 2413 vm_page_sunbusy(tm); 2414 next_page: 2415 pindex++; 2416 } 2417 /* Release the accumulated object locks. */ 2418 for (tobject = object; locked_depth >= 1; locked_depth--) { 2419 t1object = tobject->backing_object; 2420 VM_OBJECT_RUNLOCK(tobject); 2421 tobject = t1object; 2422 } 2423 } 2424 2425 /* 2426 * Return the vnode for the given object, or NULL if none exists. 2427 * For tmpfs objects, the function may return NULL if there is 2428 * no vnode allocated at the time of the call. 2429 */ 2430 struct vnode * 2431 vm_object_vnode(vm_object_t object) 2432 { 2433 struct vnode *vp; 2434 2435 VM_OBJECT_ASSERT_LOCKED(object); 2436 if (object->type == OBJT_VNODE) { 2437 vp = object->handle; 2438 KASSERT(vp != NULL, ("%s: OBJT_VNODE has no vnode", __func__)); 2439 } else if (object->type == OBJT_SWAP && 2440 (object->flags & OBJ_TMPFS) != 0) { 2441 vp = object->un_pager.swp.swp_tmpfs; 2442 KASSERT(vp != NULL, ("%s: OBJT_TMPFS has no vnode", __func__)); 2443 } else { 2444 vp = NULL; 2445 } 2446 return (vp); 2447 } 2448 2449 2450 /* 2451 * Busy the vm object. This prevents new pages belonging to the object from 2452 * becoming busy. Existing pages persist as busy. Callers are responsible 2453 * for checking page state before proceeding. 2454 */ 2455 void 2456 vm_object_busy(vm_object_t obj) 2457 { 2458 2459 VM_OBJECT_ASSERT_LOCKED(obj); 2460 2461 refcount_acquire(&obj->busy); 2462 /* The fence is required to order loads of page busy. */ 2463 atomic_thread_fence_acq_rel(); 2464 } 2465 2466 void 2467 vm_object_unbusy(vm_object_t obj) 2468 { 2469 2470 2471 refcount_release(&obj->busy); 2472 } 2473 2474 void 2475 vm_object_busy_wait(vm_object_t obj, const char *wmesg) 2476 { 2477 2478 VM_OBJECT_ASSERT_UNLOCKED(obj); 2479 2480 if (obj->busy) 2481 refcount_sleep(&obj->busy, wmesg, PVM); 2482 } 2483 2484 /* 2485 * Return the kvme type of the given object. 2486 * If vpp is not NULL, set it to the object's vm_object_vnode() or NULL. 2487 */ 2488 int 2489 vm_object_kvme_type(vm_object_t object, struct vnode **vpp) 2490 { 2491 2492 VM_OBJECT_ASSERT_LOCKED(object); 2493 if (vpp != NULL) 2494 *vpp = vm_object_vnode(object); 2495 switch (object->type) { 2496 case OBJT_DEFAULT: 2497 return (KVME_TYPE_DEFAULT); 2498 case OBJT_VNODE: 2499 return (KVME_TYPE_VNODE); 2500 case OBJT_SWAP: 2501 if ((object->flags & OBJ_TMPFS_NODE) != 0) 2502 return (KVME_TYPE_VNODE); 2503 return (KVME_TYPE_SWAP); 2504 case OBJT_DEVICE: 2505 return (KVME_TYPE_DEVICE); 2506 case OBJT_PHYS: 2507 return (KVME_TYPE_PHYS); 2508 case OBJT_DEAD: 2509 return (KVME_TYPE_DEAD); 2510 case OBJT_SG: 2511 return (KVME_TYPE_SG); 2512 case OBJT_MGTDEVICE: 2513 return (KVME_TYPE_MGTDEVICE); 2514 default: 2515 return (KVME_TYPE_UNKNOWN); 2516 } 2517 } 2518 2519 static int 2520 sysctl_vm_object_list(SYSCTL_HANDLER_ARGS) 2521 { 2522 struct kinfo_vmobject *kvo; 2523 char *fullpath, *freepath; 2524 struct vnode *vp; 2525 struct vattr va; 2526 vm_object_t obj; 2527 vm_page_t m; 2528 int count, error; 2529 2530 if (req->oldptr == NULL) { 2531 /* 2532 * If an old buffer has not been provided, generate an 2533 * estimate of the space needed for a subsequent call. 2534 */ 2535 mtx_lock(&vm_object_list_mtx); 2536 count = 0; 2537 TAILQ_FOREACH(obj, &vm_object_list, object_list) { 2538 if (obj->type == OBJT_DEAD) 2539 continue; 2540 count++; 2541 } 2542 mtx_unlock(&vm_object_list_mtx); 2543 return (SYSCTL_OUT(req, NULL, sizeof(struct kinfo_vmobject) * 2544 count * 11 / 10)); 2545 } 2546 2547 kvo = malloc(sizeof(*kvo), M_TEMP, M_WAITOK); 2548 error = 0; 2549 2550 /* 2551 * VM objects are type stable and are never removed from the 2552 * list once added. This allows us to safely read obj->object_list 2553 * after reacquiring the VM object lock. 2554 */ 2555 mtx_lock(&vm_object_list_mtx); 2556 TAILQ_FOREACH(obj, &vm_object_list, object_list) { 2557 if (obj->type == OBJT_DEAD) 2558 continue; 2559 VM_OBJECT_RLOCK(obj); 2560 if (obj->type == OBJT_DEAD) { 2561 VM_OBJECT_RUNLOCK(obj); 2562 continue; 2563 } 2564 mtx_unlock(&vm_object_list_mtx); 2565 kvo->kvo_size = ptoa(obj->size); 2566 kvo->kvo_resident = obj->resident_page_count; 2567 kvo->kvo_ref_count = obj->ref_count; 2568 kvo->kvo_shadow_count = obj->shadow_count; 2569 kvo->kvo_memattr = obj->memattr; 2570 kvo->kvo_active = 0; 2571 kvo->kvo_inactive = 0; 2572 TAILQ_FOREACH(m, &obj->memq, listq) { 2573 /* 2574 * A page may belong to the object but be 2575 * dequeued and set to PQ_NONE while the 2576 * object lock is not held. This makes the 2577 * reads of m->queue below racy, and we do not 2578 * count pages set to PQ_NONE. However, this 2579 * sysctl is only meant to give an 2580 * approximation of the system anyway. 2581 */ 2582 if (m->a.queue == PQ_ACTIVE) 2583 kvo->kvo_active++; 2584 else if (m->a.queue == PQ_INACTIVE) 2585 kvo->kvo_inactive++; 2586 } 2587 2588 kvo->kvo_vn_fileid = 0; 2589 kvo->kvo_vn_fsid = 0; 2590 kvo->kvo_vn_fsid_freebsd11 = 0; 2591 freepath = NULL; 2592 fullpath = ""; 2593 kvo->kvo_type = vm_object_kvme_type(obj, &vp); 2594 if (vp != NULL) 2595 vref(vp); 2596 VM_OBJECT_RUNLOCK(obj); 2597 if (vp != NULL) { 2598 vn_fullpath(curthread, vp, &fullpath, &freepath); 2599 vn_lock(vp, LK_SHARED | LK_RETRY); 2600 if (VOP_GETATTR(vp, &va, curthread->td_ucred) == 0) { 2601 kvo->kvo_vn_fileid = va.va_fileid; 2602 kvo->kvo_vn_fsid = va.va_fsid; 2603 kvo->kvo_vn_fsid_freebsd11 = va.va_fsid; 2604 /* truncate */ 2605 } 2606 vput(vp); 2607 } 2608 2609 strlcpy(kvo->kvo_path, fullpath, sizeof(kvo->kvo_path)); 2610 if (freepath != NULL) 2611 free(freepath, M_TEMP); 2612 2613 /* Pack record size down */ 2614 kvo->kvo_structsize = offsetof(struct kinfo_vmobject, kvo_path) 2615 + strlen(kvo->kvo_path) + 1; 2616 kvo->kvo_structsize = roundup(kvo->kvo_structsize, 2617 sizeof(uint64_t)); 2618 error = SYSCTL_OUT(req, kvo, kvo->kvo_structsize); 2619 mtx_lock(&vm_object_list_mtx); 2620 if (error) 2621 break; 2622 } 2623 mtx_unlock(&vm_object_list_mtx); 2624 free(kvo, M_TEMP); 2625 return (error); 2626 } 2627 SYSCTL_PROC(_vm, OID_AUTO, objects, CTLTYPE_STRUCT | CTLFLAG_RW | CTLFLAG_SKIP | 2628 CTLFLAG_MPSAFE, NULL, 0, sysctl_vm_object_list, "S,kinfo_vmobject", 2629 "List of VM objects"); 2630 2631 #include "opt_ddb.h" 2632 #ifdef DDB 2633 #include <sys/kernel.h> 2634 2635 #include <sys/cons.h> 2636 2637 #include <ddb/ddb.h> 2638 2639 static int 2640 _vm_object_in_map(vm_map_t map, vm_object_t object, vm_map_entry_t entry) 2641 { 2642 vm_map_t tmpm; 2643 vm_map_entry_t tmpe; 2644 vm_object_t obj; 2645 2646 if (map == 0) 2647 return 0; 2648 2649 if (entry == 0) { 2650 VM_MAP_ENTRY_FOREACH(tmpe, map) { 2651 if (_vm_object_in_map(map, object, tmpe)) { 2652 return 1; 2653 } 2654 } 2655 } else if (entry->eflags & MAP_ENTRY_IS_SUB_MAP) { 2656 tmpm = entry->object.sub_map; 2657 VM_MAP_ENTRY_FOREACH(tmpe, tmpm) { 2658 if (_vm_object_in_map(tmpm, object, tmpe)) { 2659 return 1; 2660 } 2661 } 2662 } else if ((obj = entry->object.vm_object) != NULL) { 2663 for (; obj; obj = obj->backing_object) 2664 if (obj == object) { 2665 return 1; 2666 } 2667 } 2668 return 0; 2669 } 2670 2671 static int 2672 vm_object_in_map(vm_object_t object) 2673 { 2674 struct proc *p; 2675 2676 /* sx_slock(&allproc_lock); */ 2677 FOREACH_PROC_IN_SYSTEM(p) { 2678 if (!p->p_vmspace /* || (p->p_flag & (P_SYSTEM|P_WEXIT)) */) 2679 continue; 2680 if (_vm_object_in_map(&p->p_vmspace->vm_map, object, 0)) { 2681 /* sx_sunlock(&allproc_lock); */ 2682 return 1; 2683 } 2684 } 2685 /* sx_sunlock(&allproc_lock); */ 2686 if (_vm_object_in_map(kernel_map, object, 0)) 2687 return 1; 2688 return 0; 2689 } 2690 2691 DB_SHOW_COMMAND(vmochk, vm_object_check) 2692 { 2693 vm_object_t object; 2694 2695 /* 2696 * make sure that internal objs are in a map somewhere 2697 * and none have zero ref counts. 2698 */ 2699 TAILQ_FOREACH(object, &vm_object_list, object_list) { 2700 if ((object->flags & OBJ_ANON) != 0) { 2701 if (object->ref_count == 0) { 2702 db_printf("vmochk: internal obj has zero ref count: %ld\n", 2703 (long)object->size); 2704 } 2705 if (!vm_object_in_map(object)) { 2706 db_printf( 2707 "vmochk: internal obj is not in a map: " 2708 "ref: %d, size: %lu: 0x%lx, backing_object: %p\n", 2709 object->ref_count, (u_long)object->size, 2710 (u_long)object->size, 2711 (void *)object->backing_object); 2712 } 2713 } 2714 } 2715 } 2716 2717 /* 2718 * vm_object_print: [ debug ] 2719 */ 2720 DB_SHOW_COMMAND(object, vm_object_print_static) 2721 { 2722 /* XXX convert args. */ 2723 vm_object_t object = (vm_object_t)addr; 2724 boolean_t full = have_addr; 2725 2726 vm_page_t p; 2727 2728 /* XXX count is an (unused) arg. Avoid shadowing it. */ 2729 #define count was_count 2730 2731 int count; 2732 2733 if (object == NULL) 2734 return; 2735 2736 db_iprintf( 2737 "Object %p: type=%d, size=0x%jx, res=%d, ref=%d, flags=0x%x ruid %d charge %jx\n", 2738 object, (int)object->type, (uintmax_t)object->size, 2739 object->resident_page_count, object->ref_count, object->flags, 2740 object->cred ? object->cred->cr_ruid : -1, (uintmax_t)object->charge); 2741 db_iprintf(" sref=%d, backing_object(%d)=(%p)+0x%jx\n", 2742 object->shadow_count, 2743 object->backing_object ? object->backing_object->ref_count : 0, 2744 object->backing_object, (uintmax_t)object->backing_object_offset); 2745 2746 if (!full) 2747 return; 2748 2749 db_indent += 2; 2750 count = 0; 2751 TAILQ_FOREACH(p, &object->memq, listq) { 2752 if (count == 0) 2753 db_iprintf("memory:="); 2754 else if (count == 6) { 2755 db_printf("\n"); 2756 db_iprintf(" ..."); 2757 count = 0; 2758 } else 2759 db_printf(","); 2760 count++; 2761 2762 db_printf("(off=0x%jx,page=0x%jx)", 2763 (uintmax_t)p->pindex, (uintmax_t)VM_PAGE_TO_PHYS(p)); 2764 } 2765 if (count != 0) 2766 db_printf("\n"); 2767 db_indent -= 2; 2768 } 2769 2770 /* XXX. */ 2771 #undef count 2772 2773 /* XXX need this non-static entry for calling from vm_map_print. */ 2774 void 2775 vm_object_print( 2776 /* db_expr_t */ long addr, 2777 boolean_t have_addr, 2778 /* db_expr_t */ long count, 2779 char *modif) 2780 { 2781 vm_object_print_static(addr, have_addr, count, modif); 2782 } 2783 2784 DB_SHOW_COMMAND(vmopag, vm_object_print_pages) 2785 { 2786 vm_object_t object; 2787 vm_pindex_t fidx; 2788 vm_paddr_t pa; 2789 vm_page_t m, prev_m; 2790 int rcount, nl, c; 2791 2792 nl = 0; 2793 TAILQ_FOREACH(object, &vm_object_list, object_list) { 2794 db_printf("new object: %p\n", (void *)object); 2795 if (nl > 18) { 2796 c = cngetc(); 2797 if (c != ' ') 2798 return; 2799 nl = 0; 2800 } 2801 nl++; 2802 rcount = 0; 2803 fidx = 0; 2804 pa = -1; 2805 TAILQ_FOREACH(m, &object->memq, listq) { 2806 if (m->pindex > 128) 2807 break; 2808 if ((prev_m = TAILQ_PREV(m, pglist, listq)) != NULL && 2809 prev_m->pindex + 1 != m->pindex) { 2810 if (rcount) { 2811 db_printf(" index(%ld)run(%d)pa(0x%lx)\n", 2812 (long)fidx, rcount, (long)pa); 2813 if (nl > 18) { 2814 c = cngetc(); 2815 if (c != ' ') 2816 return; 2817 nl = 0; 2818 } 2819 nl++; 2820 rcount = 0; 2821 } 2822 } 2823 if (rcount && 2824 (VM_PAGE_TO_PHYS(m) == pa + rcount * PAGE_SIZE)) { 2825 ++rcount; 2826 continue; 2827 } 2828 if (rcount) { 2829 db_printf(" index(%ld)run(%d)pa(0x%lx)\n", 2830 (long)fidx, rcount, (long)pa); 2831 if (nl > 18) { 2832 c = cngetc(); 2833 if (c != ' ') 2834 return; 2835 nl = 0; 2836 } 2837 nl++; 2838 } 2839 fidx = m->pindex; 2840 pa = VM_PAGE_TO_PHYS(m); 2841 rcount = 1; 2842 } 2843 if (rcount) { 2844 db_printf(" index(%ld)run(%d)pa(0x%lx)\n", 2845 (long)fidx, rcount, (long)pa); 2846 if (nl > 18) { 2847 c = cngetc(); 2848 if (c != ' ') 2849 return; 2850 nl = 0; 2851 } 2852 nl++; 2853 } 2854 } 2855 } 2856 #endif /* DDB */ 2857