1 /*- 2 * SPDX-License-Identifier: (BSD-3-Clause AND MIT-CMU) 3 * 4 * Copyright (c) 1991, 1993 5 * The Regents of the University of California. All rights reserved. 6 * 7 * This code is derived from software contributed to Berkeley by 8 * The Mach Operating System project at Carnegie-Mellon University. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. Neither the name of the University nor the names of its contributors 19 * may be used to endorse or promote products derived from this software 20 * without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 * 34 * from: @(#)vm_object.c 8.5 (Berkeley) 3/22/94 35 * 36 * 37 * Copyright (c) 1987, 1990 Carnegie-Mellon University. 38 * All rights reserved. 39 * 40 * Authors: Avadis Tevanian, Jr., Michael Wayne Young 41 * 42 * Permission to use, copy, modify and distribute this software and 43 * its documentation is hereby granted, provided that both the copyright 44 * notice and this permission notice appear in all copies of the 45 * software, derivative works or modified versions, and any portions 46 * thereof, and that both notices appear in supporting documentation. 47 * 48 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 49 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 50 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 51 * 52 * Carnegie Mellon requests users of this software to return to 53 * 54 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 55 * School of Computer Science 56 * Carnegie Mellon University 57 * Pittsburgh PA 15213-3890 58 * 59 * any improvements or extensions that they make and grant Carnegie the 60 * rights to redistribute these changes. 61 */ 62 63 /* 64 * Virtual memory object module. 65 */ 66 67 #include <sys/cdefs.h> 68 __FBSDID("$FreeBSD$"); 69 70 #include "opt_vm.h" 71 72 #include <sys/param.h> 73 #include <sys/systm.h> 74 #include <sys/cpuset.h> 75 #include <sys/lock.h> 76 #include <sys/mman.h> 77 #include <sys/mount.h> 78 #include <sys/kernel.h> 79 #include <sys/pctrie.h> 80 #include <sys/sysctl.h> 81 #include <sys/mutex.h> 82 #include <sys/proc.h> /* for curproc, pageproc */ 83 #include <sys/socket.h> 84 #include <sys/resourcevar.h> 85 #include <sys/rwlock.h> 86 #include <sys/user.h> 87 #include <sys/vnode.h> 88 #include <sys/vmmeter.h> 89 #include <sys/sx.h> 90 91 #include <vm/vm.h> 92 #include <vm/vm_param.h> 93 #include <vm/pmap.h> 94 #include <vm/vm_map.h> 95 #include <vm/vm_object.h> 96 #include <vm/vm_page.h> 97 #include <vm/vm_pageout.h> 98 #include <vm/vm_pager.h> 99 #include <vm/vm_phys.h> 100 #include <vm/vm_pagequeue.h> 101 #include <vm/swap_pager.h> 102 #include <vm/vm_kern.h> 103 #include <vm/vm_extern.h> 104 #include <vm/vm_radix.h> 105 #include <vm/vm_reserv.h> 106 #include <vm/uma.h> 107 108 static int old_msync; 109 SYSCTL_INT(_vm, OID_AUTO, old_msync, CTLFLAG_RW, &old_msync, 0, 110 "Use old (insecure) msync behavior"); 111 112 static int vm_object_page_collect_flush(vm_object_t object, vm_page_t p, 113 int pagerflags, int flags, boolean_t *clearobjflags, 114 boolean_t *eio); 115 static boolean_t vm_object_page_remove_write(vm_page_t p, int flags, 116 boolean_t *clearobjflags); 117 static void vm_object_qcollapse(vm_object_t object); 118 static void vm_object_vndeallocate(vm_object_t object); 119 120 /* 121 * Virtual memory objects maintain the actual data 122 * associated with allocated virtual memory. A given 123 * page of memory exists within exactly one object. 124 * 125 * An object is only deallocated when all "references" 126 * are given up. Only one "reference" to a given 127 * region of an object should be writeable. 128 * 129 * Associated with each object is a list of all resident 130 * memory pages belonging to that object; this list is 131 * maintained by the "vm_page" module, and locked by the object's 132 * lock. 133 * 134 * Each object also records a "pager" routine which is 135 * used to retrieve (and store) pages to the proper backing 136 * storage. In addition, objects may be backed by other 137 * objects from which they were virtual-copied. 138 * 139 * The only items within the object structure which are 140 * modified after time of creation are: 141 * reference count locked by object's lock 142 * pager routine locked by object's lock 143 * 144 */ 145 146 struct object_q vm_object_list; 147 struct mtx vm_object_list_mtx; /* lock for object list and count */ 148 149 struct vm_object kernel_object_store; 150 151 static SYSCTL_NODE(_vm_stats, OID_AUTO, object, CTLFLAG_RD, 0, 152 "VM object stats"); 153 154 static counter_u64_t object_collapses = EARLY_COUNTER; 155 SYSCTL_COUNTER_U64(_vm_stats_object, OID_AUTO, collapses, CTLFLAG_RD, 156 &object_collapses, 157 "VM object collapses"); 158 159 static counter_u64_t object_bypasses = EARLY_COUNTER; 160 SYSCTL_COUNTER_U64(_vm_stats_object, OID_AUTO, bypasses, CTLFLAG_RD, 161 &object_bypasses, 162 "VM object bypasses"); 163 164 static void 165 counter_startup(void) 166 { 167 168 object_collapses = counter_u64_alloc(M_WAITOK); 169 object_bypasses = counter_u64_alloc(M_WAITOK); 170 } 171 SYSINIT(object_counters, SI_SUB_CPU, SI_ORDER_ANY, counter_startup, NULL); 172 173 static uma_zone_t obj_zone; 174 175 static int vm_object_zinit(void *mem, int size, int flags); 176 177 #ifdef INVARIANTS 178 static void vm_object_zdtor(void *mem, int size, void *arg); 179 180 static void 181 vm_object_zdtor(void *mem, int size, void *arg) 182 { 183 vm_object_t object; 184 185 object = (vm_object_t)mem; 186 KASSERT(object->ref_count == 0, 187 ("object %p ref_count = %d", object, object->ref_count)); 188 KASSERT(TAILQ_EMPTY(&object->memq), 189 ("object %p has resident pages in its memq", object)); 190 KASSERT(vm_radix_is_empty(&object->rtree), 191 ("object %p has resident pages in its trie", object)); 192 #if VM_NRESERVLEVEL > 0 193 KASSERT(LIST_EMPTY(&object->rvq), 194 ("object %p has reservations", 195 object)); 196 #endif 197 KASSERT(object->paging_in_progress == 0, 198 ("object %p paging_in_progress = %d", 199 object, object->paging_in_progress)); 200 KASSERT(object->resident_page_count == 0, 201 ("object %p resident_page_count = %d", 202 object, object->resident_page_count)); 203 KASSERT(object->shadow_count == 0, 204 ("object %p shadow_count = %d", 205 object, object->shadow_count)); 206 KASSERT(object->type == OBJT_DEAD, 207 ("object %p has non-dead type %d", 208 object, object->type)); 209 } 210 #endif 211 212 static int 213 vm_object_zinit(void *mem, int size, int flags) 214 { 215 vm_object_t object; 216 217 object = (vm_object_t)mem; 218 rw_init_flags(&object->lock, "vm object", RW_DUPOK | RW_NEW); 219 220 /* These are true for any object that has been freed */ 221 object->type = OBJT_DEAD; 222 object->ref_count = 0; 223 vm_radix_init(&object->rtree); 224 object->paging_in_progress = 0; 225 object->resident_page_count = 0; 226 object->shadow_count = 0; 227 object->flags = OBJ_DEAD; 228 229 mtx_lock(&vm_object_list_mtx); 230 TAILQ_INSERT_TAIL(&vm_object_list, object, object_list); 231 mtx_unlock(&vm_object_list_mtx); 232 return (0); 233 } 234 235 static void 236 _vm_object_allocate(objtype_t type, vm_pindex_t size, vm_object_t object) 237 { 238 239 TAILQ_INIT(&object->memq); 240 LIST_INIT(&object->shadow_head); 241 242 object->type = type; 243 if (type == OBJT_SWAP) 244 pctrie_init(&object->un_pager.swp.swp_blks); 245 246 /* 247 * Ensure that swap_pager_swapoff() iteration over object_list 248 * sees up to date type and pctrie head if it observed 249 * non-dead object. 250 */ 251 atomic_thread_fence_rel(); 252 253 switch (type) { 254 case OBJT_DEAD: 255 panic("_vm_object_allocate: can't create OBJT_DEAD"); 256 case OBJT_DEFAULT: 257 case OBJT_SWAP: 258 object->flags = OBJ_ONEMAPPING; 259 break; 260 case OBJT_DEVICE: 261 case OBJT_SG: 262 object->flags = OBJ_FICTITIOUS | OBJ_UNMANAGED; 263 break; 264 case OBJT_MGTDEVICE: 265 object->flags = OBJ_FICTITIOUS; 266 break; 267 case OBJT_PHYS: 268 object->flags = OBJ_UNMANAGED; 269 break; 270 case OBJT_VNODE: 271 object->flags = 0; 272 break; 273 default: 274 panic("_vm_object_allocate: type %d is undefined", type); 275 } 276 object->size = size; 277 object->generation = 1; 278 object->ref_count = 1; 279 object->memattr = VM_MEMATTR_DEFAULT; 280 object->cred = NULL; 281 object->charge = 0; 282 object->handle = NULL; 283 object->backing_object = NULL; 284 object->backing_object_offset = (vm_ooffset_t) 0; 285 #if VM_NRESERVLEVEL > 0 286 LIST_INIT(&object->rvq); 287 #endif 288 umtx_shm_object_init(object); 289 } 290 291 /* 292 * vm_object_init: 293 * 294 * Initialize the VM objects module. 295 */ 296 void 297 vm_object_init(void) 298 { 299 TAILQ_INIT(&vm_object_list); 300 mtx_init(&vm_object_list_mtx, "vm object_list", NULL, MTX_DEF); 301 302 rw_init(&kernel_object->lock, "kernel vm object"); 303 _vm_object_allocate(OBJT_PHYS, atop(VM_MAX_KERNEL_ADDRESS - 304 VM_MIN_KERNEL_ADDRESS), kernel_object); 305 #if VM_NRESERVLEVEL > 0 306 kernel_object->flags |= OBJ_COLORED; 307 kernel_object->pg_color = (u_short)atop(VM_MIN_KERNEL_ADDRESS); 308 #endif 309 310 /* 311 * The lock portion of struct vm_object must be type stable due 312 * to vm_pageout_fallback_object_lock locking a vm object 313 * without holding any references to it. 314 */ 315 obj_zone = uma_zcreate("VM OBJECT", sizeof (struct vm_object), NULL, 316 #ifdef INVARIANTS 317 vm_object_zdtor, 318 #else 319 NULL, 320 #endif 321 vm_object_zinit, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE); 322 323 vm_radix_zinit(); 324 } 325 326 void 327 vm_object_clear_flag(vm_object_t object, u_short bits) 328 { 329 330 VM_OBJECT_ASSERT_WLOCKED(object); 331 object->flags &= ~bits; 332 } 333 334 /* 335 * Sets the default memory attribute for the specified object. Pages 336 * that are allocated to this object are by default assigned this memory 337 * attribute. 338 * 339 * Presently, this function must be called before any pages are allocated 340 * to the object. In the future, this requirement may be relaxed for 341 * "default" and "swap" objects. 342 */ 343 int 344 vm_object_set_memattr(vm_object_t object, vm_memattr_t memattr) 345 { 346 347 VM_OBJECT_ASSERT_WLOCKED(object); 348 switch (object->type) { 349 case OBJT_DEFAULT: 350 case OBJT_DEVICE: 351 case OBJT_MGTDEVICE: 352 case OBJT_PHYS: 353 case OBJT_SG: 354 case OBJT_SWAP: 355 case OBJT_VNODE: 356 if (!TAILQ_EMPTY(&object->memq)) 357 return (KERN_FAILURE); 358 break; 359 case OBJT_DEAD: 360 return (KERN_INVALID_ARGUMENT); 361 default: 362 panic("vm_object_set_memattr: object %p is of undefined type", 363 object); 364 } 365 object->memattr = memattr; 366 return (KERN_SUCCESS); 367 } 368 369 void 370 vm_object_pip_add(vm_object_t object, short i) 371 { 372 373 VM_OBJECT_ASSERT_WLOCKED(object); 374 object->paging_in_progress += i; 375 } 376 377 void 378 vm_object_pip_subtract(vm_object_t object, short i) 379 { 380 381 VM_OBJECT_ASSERT_WLOCKED(object); 382 object->paging_in_progress -= i; 383 } 384 385 void 386 vm_object_pip_wakeup(vm_object_t object) 387 { 388 389 VM_OBJECT_ASSERT_WLOCKED(object); 390 object->paging_in_progress--; 391 if ((object->flags & OBJ_PIPWNT) && object->paging_in_progress == 0) { 392 vm_object_clear_flag(object, OBJ_PIPWNT); 393 wakeup(object); 394 } 395 } 396 397 void 398 vm_object_pip_wakeupn(vm_object_t object, short i) 399 { 400 401 VM_OBJECT_ASSERT_WLOCKED(object); 402 if (i) 403 object->paging_in_progress -= i; 404 if ((object->flags & OBJ_PIPWNT) && object->paging_in_progress == 0) { 405 vm_object_clear_flag(object, OBJ_PIPWNT); 406 wakeup(object); 407 } 408 } 409 410 void 411 vm_object_pip_wait(vm_object_t object, char *waitid) 412 { 413 414 VM_OBJECT_ASSERT_WLOCKED(object); 415 while (object->paging_in_progress) { 416 object->flags |= OBJ_PIPWNT; 417 VM_OBJECT_SLEEP(object, object, PVM, waitid, 0); 418 } 419 } 420 421 /* 422 * vm_object_allocate: 423 * 424 * Returns a new object with the given size. 425 */ 426 vm_object_t 427 vm_object_allocate(objtype_t type, vm_pindex_t size) 428 { 429 vm_object_t object; 430 431 object = (vm_object_t)uma_zalloc(obj_zone, M_WAITOK); 432 _vm_object_allocate(type, size, object); 433 return (object); 434 } 435 436 437 /* 438 * vm_object_reference: 439 * 440 * Gets another reference to the given object. Note: OBJ_DEAD 441 * objects can be referenced during final cleaning. 442 */ 443 void 444 vm_object_reference(vm_object_t object) 445 { 446 if (object == NULL) 447 return; 448 VM_OBJECT_WLOCK(object); 449 vm_object_reference_locked(object); 450 VM_OBJECT_WUNLOCK(object); 451 } 452 453 /* 454 * vm_object_reference_locked: 455 * 456 * Gets another reference to the given object. 457 * 458 * The object must be locked. 459 */ 460 void 461 vm_object_reference_locked(vm_object_t object) 462 { 463 struct vnode *vp; 464 465 VM_OBJECT_ASSERT_WLOCKED(object); 466 object->ref_count++; 467 if (object->type == OBJT_VNODE) { 468 vp = object->handle; 469 vref(vp); 470 } 471 } 472 473 /* 474 * Handle deallocating an object of type OBJT_VNODE. 475 */ 476 static void 477 vm_object_vndeallocate(vm_object_t object) 478 { 479 struct vnode *vp = (struct vnode *) object->handle; 480 481 VM_OBJECT_ASSERT_WLOCKED(object); 482 KASSERT(object->type == OBJT_VNODE, 483 ("vm_object_vndeallocate: not a vnode object")); 484 KASSERT(vp != NULL, ("vm_object_vndeallocate: missing vp")); 485 #ifdef INVARIANTS 486 if (object->ref_count == 0) { 487 vn_printf(vp, "vm_object_vndeallocate "); 488 panic("vm_object_vndeallocate: bad object reference count"); 489 } 490 #endif 491 492 if (!umtx_shm_vnobj_persistent && object->ref_count == 1) 493 umtx_shm_object_terminated(object); 494 495 /* 496 * The test for text of vp vnode does not need a bypass to 497 * reach right VV_TEXT there, since it is obtained from 498 * object->handle. 499 */ 500 if (object->ref_count > 1 || (vp->v_vflag & VV_TEXT) == 0) { 501 object->ref_count--; 502 VM_OBJECT_WUNLOCK(object); 503 /* vrele may need the vnode lock. */ 504 vrele(vp); 505 } else { 506 vhold(vp); 507 VM_OBJECT_WUNLOCK(object); 508 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 509 vdrop(vp); 510 VM_OBJECT_WLOCK(object); 511 object->ref_count--; 512 if (object->type == OBJT_DEAD) { 513 VM_OBJECT_WUNLOCK(object); 514 VOP_UNLOCK(vp, 0); 515 } else { 516 if (object->ref_count == 0) 517 VOP_UNSET_TEXT(vp); 518 VM_OBJECT_WUNLOCK(object); 519 vput(vp); 520 } 521 } 522 } 523 524 /* 525 * vm_object_deallocate: 526 * 527 * Release a reference to the specified object, 528 * gained either through a vm_object_allocate 529 * or a vm_object_reference call. When all references 530 * are gone, storage associated with this object 531 * may be relinquished. 532 * 533 * No object may be locked. 534 */ 535 void 536 vm_object_deallocate(vm_object_t object) 537 { 538 vm_object_t temp; 539 struct vnode *vp; 540 541 while (object != NULL) { 542 VM_OBJECT_WLOCK(object); 543 if (object->type == OBJT_VNODE) { 544 vm_object_vndeallocate(object); 545 return; 546 } 547 548 KASSERT(object->ref_count != 0, 549 ("vm_object_deallocate: object deallocated too many times: %d", object->type)); 550 551 /* 552 * If the reference count goes to 0 we start calling 553 * vm_object_terminate() on the object chain. 554 * A ref count of 1 may be a special case depending on the 555 * shadow count being 0 or 1. 556 */ 557 object->ref_count--; 558 if (object->ref_count > 1) { 559 VM_OBJECT_WUNLOCK(object); 560 return; 561 } else if (object->ref_count == 1) { 562 if (object->type == OBJT_SWAP && 563 (object->flags & OBJ_TMPFS) != 0) { 564 vp = object->un_pager.swp.swp_tmpfs; 565 vhold(vp); 566 VM_OBJECT_WUNLOCK(object); 567 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 568 VM_OBJECT_WLOCK(object); 569 if (object->type == OBJT_DEAD || 570 object->ref_count != 1) { 571 VM_OBJECT_WUNLOCK(object); 572 VOP_UNLOCK(vp, 0); 573 vdrop(vp); 574 return; 575 } 576 if ((object->flags & OBJ_TMPFS) != 0) 577 VOP_UNSET_TEXT(vp); 578 VOP_UNLOCK(vp, 0); 579 vdrop(vp); 580 } 581 if (object->shadow_count == 0 && 582 object->handle == NULL && 583 (object->type == OBJT_DEFAULT || 584 (object->type == OBJT_SWAP && 585 (object->flags & OBJ_TMPFS_NODE) == 0))) { 586 vm_object_set_flag(object, OBJ_ONEMAPPING); 587 } else if ((object->shadow_count == 1) && 588 (object->handle == NULL) && 589 (object->type == OBJT_DEFAULT || 590 object->type == OBJT_SWAP)) { 591 vm_object_t robject; 592 593 robject = LIST_FIRST(&object->shadow_head); 594 KASSERT(robject != NULL, 595 ("vm_object_deallocate: ref_count: %d, shadow_count: %d", 596 object->ref_count, 597 object->shadow_count)); 598 KASSERT((robject->flags & OBJ_TMPFS_NODE) == 0, 599 ("shadowed tmpfs v_object %p", object)); 600 if (!VM_OBJECT_TRYWLOCK(robject)) { 601 /* 602 * Avoid a potential deadlock. 603 */ 604 object->ref_count++; 605 VM_OBJECT_WUNLOCK(object); 606 /* 607 * More likely than not the thread 608 * holding robject's lock has lower 609 * priority than the current thread. 610 * Let the lower priority thread run. 611 */ 612 pause("vmo_de", 1); 613 continue; 614 } 615 /* 616 * Collapse object into its shadow unless its 617 * shadow is dead. In that case, object will 618 * be deallocated by the thread that is 619 * deallocating its shadow. 620 */ 621 if ((robject->flags & OBJ_DEAD) == 0 && 622 (robject->handle == NULL) && 623 (robject->type == OBJT_DEFAULT || 624 robject->type == OBJT_SWAP)) { 625 626 robject->ref_count++; 627 retry: 628 if (robject->paging_in_progress) { 629 VM_OBJECT_WUNLOCK(object); 630 vm_object_pip_wait(robject, 631 "objde1"); 632 temp = robject->backing_object; 633 if (object == temp) { 634 VM_OBJECT_WLOCK(object); 635 goto retry; 636 } 637 } else if (object->paging_in_progress) { 638 VM_OBJECT_WUNLOCK(robject); 639 object->flags |= OBJ_PIPWNT; 640 VM_OBJECT_SLEEP(object, object, 641 PDROP | PVM, "objde2", 0); 642 VM_OBJECT_WLOCK(robject); 643 temp = robject->backing_object; 644 if (object == temp) { 645 VM_OBJECT_WLOCK(object); 646 goto retry; 647 } 648 } else 649 VM_OBJECT_WUNLOCK(object); 650 651 if (robject->ref_count == 1) { 652 robject->ref_count--; 653 object = robject; 654 goto doterm; 655 } 656 object = robject; 657 vm_object_collapse(object); 658 VM_OBJECT_WUNLOCK(object); 659 continue; 660 } 661 VM_OBJECT_WUNLOCK(robject); 662 } 663 VM_OBJECT_WUNLOCK(object); 664 return; 665 } 666 doterm: 667 umtx_shm_object_terminated(object); 668 temp = object->backing_object; 669 if (temp != NULL) { 670 KASSERT((object->flags & OBJ_TMPFS_NODE) == 0, 671 ("shadowed tmpfs v_object 2 %p", object)); 672 VM_OBJECT_WLOCK(temp); 673 LIST_REMOVE(object, shadow_list); 674 temp->shadow_count--; 675 VM_OBJECT_WUNLOCK(temp); 676 object->backing_object = NULL; 677 } 678 /* 679 * Don't double-terminate, we could be in a termination 680 * recursion due to the terminate having to sync data 681 * to disk. 682 */ 683 if ((object->flags & OBJ_DEAD) == 0) 684 vm_object_terminate(object); 685 else 686 VM_OBJECT_WUNLOCK(object); 687 object = temp; 688 } 689 } 690 691 /* 692 * vm_object_destroy removes the object from the global object list 693 * and frees the space for the object. 694 */ 695 void 696 vm_object_destroy(vm_object_t object) 697 { 698 699 /* 700 * Release the allocation charge. 701 */ 702 if (object->cred != NULL) { 703 swap_release_by_cred(object->charge, object->cred); 704 object->charge = 0; 705 crfree(object->cred); 706 object->cred = NULL; 707 } 708 709 /* 710 * Free the space for the object. 711 */ 712 uma_zfree(obj_zone, object); 713 } 714 715 /* 716 * vm_object_terminate_pages removes any remaining pageable pages 717 * from the object and resets the object to an empty state. 718 */ 719 static void 720 vm_object_terminate_pages(vm_object_t object) 721 { 722 vm_page_t p, p_next; 723 struct mtx *mtx; 724 725 VM_OBJECT_ASSERT_WLOCKED(object); 726 727 mtx = NULL; 728 729 /* 730 * Free any remaining pageable pages. This also removes them from the 731 * paging queues. However, don't free wired pages, just remove them 732 * from the object. Rather than incrementally removing each page from 733 * the object, the page and object are reset to any empty state. 734 */ 735 TAILQ_FOREACH_SAFE(p, &object->memq, listq, p_next) { 736 vm_page_assert_unbusied(p); 737 if ((object->flags & OBJ_UNMANAGED) == 0) 738 /* 739 * vm_page_free_prep() only needs the page 740 * lock for managed pages. 741 */ 742 vm_page_change_lock(p, &mtx); 743 p->object = NULL; 744 if (p->wire_count != 0) 745 continue; 746 VM_CNT_INC(v_pfree); 747 vm_page_free(p); 748 } 749 if (mtx != NULL) 750 mtx_unlock(mtx); 751 752 /* 753 * If the object contained any pages, then reset it to an empty state. 754 * None of the object's fields, including "resident_page_count", were 755 * modified by the preceding loop. 756 */ 757 if (object->resident_page_count != 0) { 758 vm_radix_reclaim_allnodes(&object->rtree); 759 TAILQ_INIT(&object->memq); 760 object->resident_page_count = 0; 761 if (object->type == OBJT_VNODE) 762 vdrop(object->handle); 763 } 764 } 765 766 /* 767 * vm_object_terminate actually destroys the specified object, freeing 768 * up all previously used resources. 769 * 770 * The object must be locked. 771 * This routine may block. 772 */ 773 void 774 vm_object_terminate(vm_object_t object) 775 { 776 777 VM_OBJECT_ASSERT_WLOCKED(object); 778 779 /* 780 * Make sure no one uses us. 781 */ 782 vm_object_set_flag(object, OBJ_DEAD); 783 784 /* 785 * wait for the pageout daemon to be done with the object 786 */ 787 vm_object_pip_wait(object, "objtrm"); 788 789 KASSERT(!object->paging_in_progress, 790 ("vm_object_terminate: pageout in progress")); 791 792 /* 793 * Clean and free the pages, as appropriate. All references to the 794 * object are gone, so we don't need to lock it. 795 */ 796 if (object->type == OBJT_VNODE) { 797 struct vnode *vp = (struct vnode *)object->handle; 798 799 /* 800 * Clean pages and flush buffers. 801 */ 802 vm_object_page_clean(object, 0, 0, OBJPC_SYNC); 803 VM_OBJECT_WUNLOCK(object); 804 805 vinvalbuf(vp, V_SAVE, 0, 0); 806 807 BO_LOCK(&vp->v_bufobj); 808 vp->v_bufobj.bo_flag |= BO_DEAD; 809 BO_UNLOCK(&vp->v_bufobj); 810 811 VM_OBJECT_WLOCK(object); 812 } 813 814 KASSERT(object->ref_count == 0, 815 ("vm_object_terminate: object with references, ref_count=%d", 816 object->ref_count)); 817 818 if ((object->flags & OBJ_PG_DTOR) == 0) 819 vm_object_terminate_pages(object); 820 821 #if VM_NRESERVLEVEL > 0 822 if (__predict_false(!LIST_EMPTY(&object->rvq))) 823 vm_reserv_break_all(object); 824 #endif 825 826 KASSERT(object->cred == NULL || object->type == OBJT_DEFAULT || 827 object->type == OBJT_SWAP, 828 ("%s: non-swap obj %p has cred", __func__, object)); 829 830 /* 831 * Let the pager know object is dead. 832 */ 833 vm_pager_deallocate(object); 834 VM_OBJECT_WUNLOCK(object); 835 836 vm_object_destroy(object); 837 } 838 839 /* 840 * Make the page read-only so that we can clear the object flags. However, if 841 * this is a nosync mmap then the object is likely to stay dirty so do not 842 * mess with the page and do not clear the object flags. Returns TRUE if the 843 * page should be flushed, and FALSE otherwise. 844 */ 845 static boolean_t 846 vm_object_page_remove_write(vm_page_t p, int flags, boolean_t *clearobjflags) 847 { 848 849 /* 850 * If we have been asked to skip nosync pages and this is a 851 * nosync page, skip it. Note that the object flags were not 852 * cleared in this case so we do not have to set them. 853 */ 854 if ((flags & OBJPC_NOSYNC) != 0 && (p->oflags & VPO_NOSYNC) != 0) { 855 *clearobjflags = FALSE; 856 return (FALSE); 857 } else { 858 pmap_remove_write(p); 859 return (p->dirty != 0); 860 } 861 } 862 863 /* 864 * vm_object_page_clean 865 * 866 * Clean all dirty pages in the specified range of object. Leaves page 867 * on whatever queue it is currently on. If NOSYNC is set then do not 868 * write out pages with VPO_NOSYNC set (originally comes from MAP_NOSYNC), 869 * leaving the object dirty. 870 * 871 * When stuffing pages asynchronously, allow clustering. XXX we need a 872 * synchronous clustering mode implementation. 873 * 874 * Odd semantics: if start == end, we clean everything. 875 * 876 * The object must be locked. 877 * 878 * Returns FALSE if some page from the range was not written, as 879 * reported by the pager, and TRUE otherwise. 880 */ 881 boolean_t 882 vm_object_page_clean(vm_object_t object, vm_ooffset_t start, vm_ooffset_t end, 883 int flags) 884 { 885 vm_page_t np, p; 886 vm_pindex_t pi, tend, tstart; 887 int curgeneration, n, pagerflags; 888 boolean_t clearobjflags, eio, res; 889 890 VM_OBJECT_ASSERT_WLOCKED(object); 891 892 /* 893 * The OBJ_MIGHTBEDIRTY flag is only set for OBJT_VNODE 894 * objects. The check below prevents the function from 895 * operating on non-vnode objects. 896 */ 897 if ((object->flags & OBJ_MIGHTBEDIRTY) == 0 || 898 object->resident_page_count == 0) 899 return (TRUE); 900 901 pagerflags = (flags & (OBJPC_SYNC | OBJPC_INVAL)) != 0 ? 902 VM_PAGER_PUT_SYNC : VM_PAGER_CLUSTER_OK; 903 pagerflags |= (flags & OBJPC_INVAL) != 0 ? VM_PAGER_PUT_INVAL : 0; 904 905 tstart = OFF_TO_IDX(start); 906 tend = (end == 0) ? object->size : OFF_TO_IDX(end + PAGE_MASK); 907 clearobjflags = tstart == 0 && tend >= object->size; 908 res = TRUE; 909 910 rescan: 911 curgeneration = object->generation; 912 913 for (p = vm_page_find_least(object, tstart); p != NULL; p = np) { 914 pi = p->pindex; 915 if (pi >= tend) 916 break; 917 np = TAILQ_NEXT(p, listq); 918 if (p->valid == 0) 919 continue; 920 if (vm_page_sleep_if_busy(p, "vpcwai")) { 921 if (object->generation != curgeneration) { 922 if ((flags & OBJPC_SYNC) != 0) 923 goto rescan; 924 else 925 clearobjflags = FALSE; 926 } 927 np = vm_page_find_least(object, pi); 928 continue; 929 } 930 if (!vm_object_page_remove_write(p, flags, &clearobjflags)) 931 continue; 932 933 n = vm_object_page_collect_flush(object, p, pagerflags, 934 flags, &clearobjflags, &eio); 935 if (eio) { 936 res = FALSE; 937 clearobjflags = FALSE; 938 } 939 if (object->generation != curgeneration) { 940 if ((flags & OBJPC_SYNC) != 0) 941 goto rescan; 942 else 943 clearobjflags = FALSE; 944 } 945 946 /* 947 * If the VOP_PUTPAGES() did a truncated write, so 948 * that even the first page of the run is not fully 949 * written, vm_pageout_flush() returns 0 as the run 950 * length. Since the condition that caused truncated 951 * write may be permanent, e.g. exhausted free space, 952 * accepting n == 0 would cause an infinite loop. 953 * 954 * Forwarding the iterator leaves the unwritten page 955 * behind, but there is not much we can do there if 956 * filesystem refuses to write it. 957 */ 958 if (n == 0) { 959 n = 1; 960 clearobjflags = FALSE; 961 } 962 np = vm_page_find_least(object, pi + n); 963 } 964 #if 0 965 VOP_FSYNC(vp, (pagerflags & VM_PAGER_PUT_SYNC) ? MNT_WAIT : 0); 966 #endif 967 968 if (clearobjflags) 969 vm_object_clear_flag(object, OBJ_MIGHTBEDIRTY); 970 return (res); 971 } 972 973 static int 974 vm_object_page_collect_flush(vm_object_t object, vm_page_t p, int pagerflags, 975 int flags, boolean_t *clearobjflags, boolean_t *eio) 976 { 977 vm_page_t ma[vm_pageout_page_count], p_first, tp; 978 int count, i, mreq, runlen; 979 980 vm_page_lock_assert(p, MA_NOTOWNED); 981 VM_OBJECT_ASSERT_WLOCKED(object); 982 983 count = 1; 984 mreq = 0; 985 986 for (tp = p; count < vm_pageout_page_count; count++) { 987 tp = vm_page_next(tp); 988 if (tp == NULL || vm_page_busied(tp)) 989 break; 990 if (!vm_object_page_remove_write(tp, flags, clearobjflags)) 991 break; 992 } 993 994 for (p_first = p; count < vm_pageout_page_count; count++) { 995 tp = vm_page_prev(p_first); 996 if (tp == NULL || vm_page_busied(tp)) 997 break; 998 if (!vm_object_page_remove_write(tp, flags, clearobjflags)) 999 break; 1000 p_first = tp; 1001 mreq++; 1002 } 1003 1004 for (tp = p_first, i = 0; i < count; tp = TAILQ_NEXT(tp, listq), i++) 1005 ma[i] = tp; 1006 1007 vm_pageout_flush(ma, count, pagerflags, mreq, &runlen, eio); 1008 return (runlen); 1009 } 1010 1011 /* 1012 * Note that there is absolutely no sense in writing out 1013 * anonymous objects, so we track down the vnode object 1014 * to write out. 1015 * We invalidate (remove) all pages from the address space 1016 * for semantic correctness. 1017 * 1018 * If the backing object is a device object with unmanaged pages, then any 1019 * mappings to the specified range of pages must be removed before this 1020 * function is called. 1021 * 1022 * Note: certain anonymous maps, such as MAP_NOSYNC maps, 1023 * may start out with a NULL object. 1024 */ 1025 boolean_t 1026 vm_object_sync(vm_object_t object, vm_ooffset_t offset, vm_size_t size, 1027 boolean_t syncio, boolean_t invalidate) 1028 { 1029 vm_object_t backing_object; 1030 struct vnode *vp; 1031 struct mount *mp; 1032 int error, flags, fsync_after; 1033 boolean_t res; 1034 1035 if (object == NULL) 1036 return (TRUE); 1037 res = TRUE; 1038 error = 0; 1039 VM_OBJECT_WLOCK(object); 1040 while ((backing_object = object->backing_object) != NULL) { 1041 VM_OBJECT_WLOCK(backing_object); 1042 offset += object->backing_object_offset; 1043 VM_OBJECT_WUNLOCK(object); 1044 object = backing_object; 1045 if (object->size < OFF_TO_IDX(offset + size)) 1046 size = IDX_TO_OFF(object->size) - offset; 1047 } 1048 /* 1049 * Flush pages if writing is allowed, invalidate them 1050 * if invalidation requested. Pages undergoing I/O 1051 * will be ignored by vm_object_page_remove(). 1052 * 1053 * We cannot lock the vnode and then wait for paging 1054 * to complete without deadlocking against vm_fault. 1055 * Instead we simply call vm_object_page_remove() and 1056 * allow it to block internally on a page-by-page 1057 * basis when it encounters pages undergoing async 1058 * I/O. 1059 */ 1060 if (object->type == OBJT_VNODE && 1061 (object->flags & OBJ_MIGHTBEDIRTY) != 0 && 1062 ((vp = object->handle)->v_vflag & VV_NOSYNC) == 0) { 1063 VM_OBJECT_WUNLOCK(object); 1064 (void) vn_start_write(vp, &mp, V_WAIT); 1065 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 1066 if (syncio && !invalidate && offset == 0 && 1067 atop(size) == object->size) { 1068 /* 1069 * If syncing the whole mapping of the file, 1070 * it is faster to schedule all the writes in 1071 * async mode, also allowing the clustering, 1072 * and then wait for i/o to complete. 1073 */ 1074 flags = 0; 1075 fsync_after = TRUE; 1076 } else { 1077 flags = (syncio || invalidate) ? OBJPC_SYNC : 0; 1078 flags |= invalidate ? (OBJPC_SYNC | OBJPC_INVAL) : 0; 1079 fsync_after = FALSE; 1080 } 1081 VM_OBJECT_WLOCK(object); 1082 res = vm_object_page_clean(object, offset, offset + size, 1083 flags); 1084 VM_OBJECT_WUNLOCK(object); 1085 if (fsync_after) 1086 error = VOP_FSYNC(vp, MNT_WAIT, curthread); 1087 VOP_UNLOCK(vp, 0); 1088 vn_finished_write(mp); 1089 if (error != 0) 1090 res = FALSE; 1091 VM_OBJECT_WLOCK(object); 1092 } 1093 if ((object->type == OBJT_VNODE || 1094 object->type == OBJT_DEVICE) && invalidate) { 1095 if (object->type == OBJT_DEVICE) 1096 /* 1097 * The option OBJPR_NOTMAPPED must be passed here 1098 * because vm_object_page_remove() cannot remove 1099 * unmanaged mappings. 1100 */ 1101 flags = OBJPR_NOTMAPPED; 1102 else if (old_msync) 1103 flags = 0; 1104 else 1105 flags = OBJPR_CLEANONLY; 1106 vm_object_page_remove(object, OFF_TO_IDX(offset), 1107 OFF_TO_IDX(offset + size + PAGE_MASK), flags); 1108 } 1109 VM_OBJECT_WUNLOCK(object); 1110 return (res); 1111 } 1112 1113 /* 1114 * Determine whether the given advice can be applied to the object. Advice is 1115 * not applied to unmanaged pages since they never belong to page queues, and 1116 * since MADV_FREE is destructive, it can apply only to anonymous pages that 1117 * have been mapped at most once. 1118 */ 1119 static bool 1120 vm_object_advice_applies(vm_object_t object, int advice) 1121 { 1122 1123 if ((object->flags & OBJ_UNMANAGED) != 0) 1124 return (false); 1125 if (advice != MADV_FREE) 1126 return (true); 1127 return ((object->type == OBJT_DEFAULT || object->type == OBJT_SWAP) && 1128 (object->flags & OBJ_ONEMAPPING) != 0); 1129 } 1130 1131 static void 1132 vm_object_madvise_freespace(vm_object_t object, int advice, vm_pindex_t pindex, 1133 vm_size_t size) 1134 { 1135 1136 if (advice == MADV_FREE && object->type == OBJT_SWAP) 1137 swap_pager_freespace(object, pindex, size); 1138 } 1139 1140 /* 1141 * vm_object_madvise: 1142 * 1143 * Implements the madvise function at the object/page level. 1144 * 1145 * MADV_WILLNEED (any object) 1146 * 1147 * Activate the specified pages if they are resident. 1148 * 1149 * MADV_DONTNEED (any object) 1150 * 1151 * Deactivate the specified pages if they are resident. 1152 * 1153 * MADV_FREE (OBJT_DEFAULT/OBJT_SWAP objects, 1154 * OBJ_ONEMAPPING only) 1155 * 1156 * Deactivate and clean the specified pages if they are 1157 * resident. This permits the process to reuse the pages 1158 * without faulting or the kernel to reclaim the pages 1159 * without I/O. 1160 */ 1161 void 1162 vm_object_madvise(vm_object_t object, vm_pindex_t pindex, vm_pindex_t end, 1163 int advice) 1164 { 1165 vm_pindex_t tpindex; 1166 vm_object_t backing_object, tobject; 1167 vm_page_t m, tm; 1168 1169 if (object == NULL) 1170 return; 1171 1172 relookup: 1173 VM_OBJECT_WLOCK(object); 1174 if (!vm_object_advice_applies(object, advice)) { 1175 VM_OBJECT_WUNLOCK(object); 1176 return; 1177 } 1178 for (m = vm_page_find_least(object, pindex); pindex < end; pindex++) { 1179 tobject = object; 1180 1181 /* 1182 * If the next page isn't resident in the top-level object, we 1183 * need to search the shadow chain. When applying MADV_FREE, we 1184 * take care to release any swap space used to store 1185 * non-resident pages. 1186 */ 1187 if (m == NULL || pindex < m->pindex) { 1188 /* 1189 * Optimize a common case: if the top-level object has 1190 * no backing object, we can skip over the non-resident 1191 * range in constant time. 1192 */ 1193 if (object->backing_object == NULL) { 1194 tpindex = (m != NULL && m->pindex < end) ? 1195 m->pindex : end; 1196 vm_object_madvise_freespace(object, advice, 1197 pindex, tpindex - pindex); 1198 if ((pindex = tpindex) == end) 1199 break; 1200 goto next_page; 1201 } 1202 1203 tpindex = pindex; 1204 do { 1205 vm_object_madvise_freespace(tobject, advice, 1206 tpindex, 1); 1207 /* 1208 * Prepare to search the next object in the 1209 * chain. 1210 */ 1211 backing_object = tobject->backing_object; 1212 if (backing_object == NULL) 1213 goto next_pindex; 1214 VM_OBJECT_WLOCK(backing_object); 1215 tpindex += 1216 OFF_TO_IDX(tobject->backing_object_offset); 1217 if (tobject != object) 1218 VM_OBJECT_WUNLOCK(tobject); 1219 tobject = backing_object; 1220 if (!vm_object_advice_applies(tobject, advice)) 1221 goto next_pindex; 1222 } while ((tm = vm_page_lookup(tobject, tpindex)) == 1223 NULL); 1224 } else { 1225 next_page: 1226 tm = m; 1227 m = TAILQ_NEXT(m, listq); 1228 } 1229 1230 /* 1231 * If the page is not in a normal state, skip it. 1232 */ 1233 if (tm->valid != VM_PAGE_BITS_ALL) 1234 goto next_pindex; 1235 vm_page_lock(tm); 1236 if (vm_page_held(tm)) { 1237 vm_page_unlock(tm); 1238 goto next_pindex; 1239 } 1240 KASSERT((tm->flags & PG_FICTITIOUS) == 0, 1241 ("vm_object_madvise: page %p is fictitious", tm)); 1242 KASSERT((tm->oflags & VPO_UNMANAGED) == 0, 1243 ("vm_object_madvise: page %p is not managed", tm)); 1244 if (vm_page_busied(tm)) { 1245 if (object != tobject) 1246 VM_OBJECT_WUNLOCK(tobject); 1247 VM_OBJECT_WUNLOCK(object); 1248 if (advice == MADV_WILLNEED) { 1249 /* 1250 * Reference the page before unlocking and 1251 * sleeping so that the page daemon is less 1252 * likely to reclaim it. 1253 */ 1254 vm_page_aflag_set(tm, PGA_REFERENCED); 1255 } 1256 vm_page_busy_sleep(tm, "madvpo", false); 1257 goto relookup; 1258 } 1259 vm_page_advise(tm, advice); 1260 vm_page_unlock(tm); 1261 vm_object_madvise_freespace(tobject, advice, tm->pindex, 1); 1262 next_pindex: 1263 if (tobject != object) 1264 VM_OBJECT_WUNLOCK(tobject); 1265 } 1266 VM_OBJECT_WUNLOCK(object); 1267 } 1268 1269 /* 1270 * vm_object_shadow: 1271 * 1272 * Create a new object which is backed by the 1273 * specified existing object range. The source 1274 * object reference is deallocated. 1275 * 1276 * The new object and offset into that object 1277 * are returned in the source parameters. 1278 */ 1279 void 1280 vm_object_shadow( 1281 vm_object_t *object, /* IN/OUT */ 1282 vm_ooffset_t *offset, /* IN/OUT */ 1283 vm_size_t length) 1284 { 1285 vm_object_t source; 1286 vm_object_t result; 1287 1288 source = *object; 1289 1290 /* 1291 * Don't create the new object if the old object isn't shared. 1292 */ 1293 if (source != NULL) { 1294 VM_OBJECT_WLOCK(source); 1295 if (source->ref_count == 1 && 1296 source->handle == NULL && 1297 (source->type == OBJT_DEFAULT || 1298 source->type == OBJT_SWAP)) { 1299 VM_OBJECT_WUNLOCK(source); 1300 return; 1301 } 1302 VM_OBJECT_WUNLOCK(source); 1303 } 1304 1305 /* 1306 * Allocate a new object with the given length. 1307 */ 1308 result = vm_object_allocate(OBJT_DEFAULT, atop(length)); 1309 1310 /* 1311 * The new object shadows the source object, adding a reference to it. 1312 * Our caller changes his reference to point to the new object, 1313 * removing a reference to the source object. Net result: no change 1314 * of reference count. 1315 * 1316 * Try to optimize the result object's page color when shadowing 1317 * in order to maintain page coloring consistency in the combined 1318 * shadowed object. 1319 */ 1320 result->backing_object = source; 1321 /* 1322 * Store the offset into the source object, and fix up the offset into 1323 * the new object. 1324 */ 1325 result->backing_object_offset = *offset; 1326 if (source != NULL) { 1327 VM_OBJECT_WLOCK(source); 1328 result->domain = source->domain; 1329 LIST_INSERT_HEAD(&source->shadow_head, result, shadow_list); 1330 source->shadow_count++; 1331 #if VM_NRESERVLEVEL > 0 1332 result->flags |= source->flags & OBJ_COLORED; 1333 result->pg_color = (source->pg_color + OFF_TO_IDX(*offset)) & 1334 ((1 << (VM_NFREEORDER - 1)) - 1); 1335 #endif 1336 VM_OBJECT_WUNLOCK(source); 1337 } 1338 1339 1340 /* 1341 * Return the new things 1342 */ 1343 *offset = 0; 1344 *object = result; 1345 } 1346 1347 /* 1348 * vm_object_split: 1349 * 1350 * Split the pages in a map entry into a new object. This affords 1351 * easier removal of unused pages, and keeps object inheritance from 1352 * being a negative impact on memory usage. 1353 */ 1354 void 1355 vm_object_split(vm_map_entry_t entry) 1356 { 1357 vm_page_t m, m_next; 1358 vm_object_t orig_object, new_object, source; 1359 vm_pindex_t idx, offidxstart; 1360 vm_size_t size; 1361 1362 orig_object = entry->object.vm_object; 1363 if (orig_object->type != OBJT_DEFAULT && orig_object->type != OBJT_SWAP) 1364 return; 1365 if (orig_object->ref_count <= 1) 1366 return; 1367 VM_OBJECT_WUNLOCK(orig_object); 1368 1369 offidxstart = OFF_TO_IDX(entry->offset); 1370 size = atop(entry->end - entry->start); 1371 1372 /* 1373 * If swap_pager_copy() is later called, it will convert new_object 1374 * into a swap object. 1375 */ 1376 new_object = vm_object_allocate(OBJT_DEFAULT, size); 1377 1378 /* 1379 * At this point, the new object is still private, so the order in 1380 * which the original and new objects are locked does not matter. 1381 */ 1382 VM_OBJECT_WLOCK(new_object); 1383 VM_OBJECT_WLOCK(orig_object); 1384 new_object->domain = orig_object->domain; 1385 source = orig_object->backing_object; 1386 if (source != NULL) { 1387 VM_OBJECT_WLOCK(source); 1388 if ((source->flags & OBJ_DEAD) != 0) { 1389 VM_OBJECT_WUNLOCK(source); 1390 VM_OBJECT_WUNLOCK(orig_object); 1391 VM_OBJECT_WUNLOCK(new_object); 1392 vm_object_deallocate(new_object); 1393 VM_OBJECT_WLOCK(orig_object); 1394 return; 1395 } 1396 LIST_INSERT_HEAD(&source->shadow_head, 1397 new_object, shadow_list); 1398 source->shadow_count++; 1399 vm_object_reference_locked(source); /* for new_object */ 1400 vm_object_clear_flag(source, OBJ_ONEMAPPING); 1401 VM_OBJECT_WUNLOCK(source); 1402 new_object->backing_object_offset = 1403 orig_object->backing_object_offset + entry->offset; 1404 new_object->backing_object = source; 1405 } 1406 if (orig_object->cred != NULL) { 1407 new_object->cred = orig_object->cred; 1408 crhold(orig_object->cred); 1409 new_object->charge = ptoa(size); 1410 KASSERT(orig_object->charge >= ptoa(size), 1411 ("orig_object->charge < 0")); 1412 orig_object->charge -= ptoa(size); 1413 } 1414 retry: 1415 m = vm_page_find_least(orig_object, offidxstart); 1416 for (; m != NULL && (idx = m->pindex - offidxstart) < size; 1417 m = m_next) { 1418 m_next = TAILQ_NEXT(m, listq); 1419 1420 /* 1421 * We must wait for pending I/O to complete before we can 1422 * rename the page. 1423 * 1424 * We do not have to VM_PROT_NONE the page as mappings should 1425 * not be changed by this operation. 1426 */ 1427 if (vm_page_busied(m)) { 1428 VM_OBJECT_WUNLOCK(new_object); 1429 vm_page_lock(m); 1430 VM_OBJECT_WUNLOCK(orig_object); 1431 vm_page_busy_sleep(m, "spltwt", false); 1432 VM_OBJECT_WLOCK(orig_object); 1433 VM_OBJECT_WLOCK(new_object); 1434 goto retry; 1435 } 1436 1437 /* vm_page_rename() will dirty the page. */ 1438 if (vm_page_rename(m, new_object, idx)) { 1439 VM_OBJECT_WUNLOCK(new_object); 1440 VM_OBJECT_WUNLOCK(orig_object); 1441 vm_radix_wait(); 1442 VM_OBJECT_WLOCK(orig_object); 1443 VM_OBJECT_WLOCK(new_object); 1444 goto retry; 1445 } 1446 #if VM_NRESERVLEVEL > 0 1447 /* 1448 * If some of the reservation's allocated pages remain with 1449 * the original object, then transferring the reservation to 1450 * the new object is neither particularly beneficial nor 1451 * particularly harmful as compared to leaving the reservation 1452 * with the original object. If, however, all of the 1453 * reservation's allocated pages are transferred to the new 1454 * object, then transferring the reservation is typically 1455 * beneficial. Determining which of these two cases applies 1456 * would be more costly than unconditionally renaming the 1457 * reservation. 1458 */ 1459 vm_reserv_rename(m, new_object, orig_object, offidxstart); 1460 #endif 1461 if (orig_object->type == OBJT_SWAP) 1462 vm_page_xbusy(m); 1463 } 1464 if (orig_object->type == OBJT_SWAP) { 1465 /* 1466 * swap_pager_copy() can sleep, in which case the orig_object's 1467 * and new_object's locks are released and reacquired. 1468 */ 1469 swap_pager_copy(orig_object, new_object, offidxstart, 0); 1470 TAILQ_FOREACH(m, &new_object->memq, listq) 1471 vm_page_xunbusy(m); 1472 } 1473 VM_OBJECT_WUNLOCK(orig_object); 1474 VM_OBJECT_WUNLOCK(new_object); 1475 entry->object.vm_object = new_object; 1476 entry->offset = 0LL; 1477 vm_object_deallocate(orig_object); 1478 VM_OBJECT_WLOCK(new_object); 1479 } 1480 1481 #define OBSC_COLLAPSE_NOWAIT 0x0002 1482 #define OBSC_COLLAPSE_WAIT 0x0004 1483 1484 static vm_page_t 1485 vm_object_collapse_scan_wait(vm_object_t object, vm_page_t p, vm_page_t next, 1486 int op) 1487 { 1488 vm_object_t backing_object; 1489 1490 VM_OBJECT_ASSERT_WLOCKED(object); 1491 backing_object = object->backing_object; 1492 VM_OBJECT_ASSERT_WLOCKED(backing_object); 1493 1494 KASSERT(p == NULL || vm_page_busied(p), ("unbusy page %p", p)); 1495 KASSERT(p == NULL || p->object == object || p->object == backing_object, 1496 ("invalid ownership %p %p %p", p, object, backing_object)); 1497 if ((op & OBSC_COLLAPSE_NOWAIT) != 0) 1498 return (next); 1499 if (p != NULL) 1500 vm_page_lock(p); 1501 VM_OBJECT_WUNLOCK(object); 1502 VM_OBJECT_WUNLOCK(backing_object); 1503 /* The page is only NULL when rename fails. */ 1504 if (p == NULL) 1505 vm_radix_wait(); 1506 else 1507 vm_page_busy_sleep(p, "vmocol", false); 1508 VM_OBJECT_WLOCK(object); 1509 VM_OBJECT_WLOCK(backing_object); 1510 return (TAILQ_FIRST(&backing_object->memq)); 1511 } 1512 1513 static bool 1514 vm_object_scan_all_shadowed(vm_object_t object) 1515 { 1516 vm_object_t backing_object; 1517 vm_page_t p, pp; 1518 vm_pindex_t backing_offset_index, new_pindex, pi, ps; 1519 1520 VM_OBJECT_ASSERT_WLOCKED(object); 1521 VM_OBJECT_ASSERT_WLOCKED(object->backing_object); 1522 1523 backing_object = object->backing_object; 1524 1525 if (backing_object->type != OBJT_DEFAULT && 1526 backing_object->type != OBJT_SWAP) 1527 return (false); 1528 1529 pi = backing_offset_index = OFF_TO_IDX(object->backing_object_offset); 1530 p = vm_page_find_least(backing_object, pi); 1531 ps = swap_pager_find_least(backing_object, pi); 1532 1533 /* 1534 * Only check pages inside the parent object's range and 1535 * inside the parent object's mapping of the backing object. 1536 */ 1537 for (;; pi++) { 1538 if (p != NULL && p->pindex < pi) 1539 p = TAILQ_NEXT(p, listq); 1540 if (ps < pi) 1541 ps = swap_pager_find_least(backing_object, pi); 1542 if (p == NULL && ps >= backing_object->size) 1543 break; 1544 else if (p == NULL) 1545 pi = ps; 1546 else 1547 pi = MIN(p->pindex, ps); 1548 1549 new_pindex = pi - backing_offset_index; 1550 if (new_pindex >= object->size) 1551 break; 1552 1553 /* 1554 * See if the parent has the page or if the parent's object 1555 * pager has the page. If the parent has the page but the page 1556 * is not valid, the parent's object pager must have the page. 1557 * 1558 * If this fails, the parent does not completely shadow the 1559 * object and we might as well give up now. 1560 */ 1561 pp = vm_page_lookup(object, new_pindex); 1562 if ((pp == NULL || pp->valid == 0) && 1563 !vm_pager_has_page(object, new_pindex, NULL, NULL)) 1564 return (false); 1565 } 1566 return (true); 1567 } 1568 1569 static bool 1570 vm_object_collapse_scan(vm_object_t object, int op) 1571 { 1572 vm_object_t backing_object; 1573 vm_page_t next, p, pp; 1574 vm_pindex_t backing_offset_index, new_pindex; 1575 1576 VM_OBJECT_ASSERT_WLOCKED(object); 1577 VM_OBJECT_ASSERT_WLOCKED(object->backing_object); 1578 1579 backing_object = object->backing_object; 1580 backing_offset_index = OFF_TO_IDX(object->backing_object_offset); 1581 1582 /* 1583 * Initial conditions 1584 */ 1585 if ((op & OBSC_COLLAPSE_WAIT) != 0) 1586 vm_object_set_flag(backing_object, OBJ_DEAD); 1587 1588 /* 1589 * Our scan 1590 */ 1591 for (p = TAILQ_FIRST(&backing_object->memq); p != NULL; p = next) { 1592 next = TAILQ_NEXT(p, listq); 1593 new_pindex = p->pindex - backing_offset_index; 1594 1595 /* 1596 * Check for busy page 1597 */ 1598 if (vm_page_busied(p)) { 1599 next = vm_object_collapse_scan_wait(object, p, next, op); 1600 continue; 1601 } 1602 1603 KASSERT(p->object == backing_object, 1604 ("vm_object_collapse_scan: object mismatch")); 1605 1606 if (p->pindex < backing_offset_index || 1607 new_pindex >= object->size) { 1608 if (backing_object->type == OBJT_SWAP) 1609 swap_pager_freespace(backing_object, p->pindex, 1610 1); 1611 1612 /* 1613 * Page is out of the parent object's range, we can 1614 * simply destroy it. 1615 */ 1616 vm_page_lock(p); 1617 KASSERT(!pmap_page_is_mapped(p), 1618 ("freeing mapped page %p", p)); 1619 if (p->wire_count == 0) 1620 vm_page_free(p); 1621 else 1622 vm_page_remove(p); 1623 vm_page_unlock(p); 1624 continue; 1625 } 1626 1627 pp = vm_page_lookup(object, new_pindex); 1628 if (pp != NULL && vm_page_busied(pp)) { 1629 /* 1630 * The page in the parent is busy and possibly not 1631 * (yet) valid. Until its state is finalized by the 1632 * busy bit owner, we can't tell whether it shadows the 1633 * original page. Therefore, we must either skip it 1634 * and the original (backing_object) page or wait for 1635 * its state to be finalized. 1636 * 1637 * This is due to a race with vm_fault() where we must 1638 * unbusy the original (backing_obj) page before we can 1639 * (re)lock the parent. Hence we can get here. 1640 */ 1641 next = vm_object_collapse_scan_wait(object, pp, next, 1642 op); 1643 continue; 1644 } 1645 1646 KASSERT(pp == NULL || pp->valid != 0, 1647 ("unbusy invalid page %p", pp)); 1648 1649 if (pp != NULL || vm_pager_has_page(object, new_pindex, NULL, 1650 NULL)) { 1651 /* 1652 * The page already exists in the parent OR swap exists 1653 * for this location in the parent. Leave the parent's 1654 * page alone. Destroy the original page from the 1655 * backing object. 1656 */ 1657 if (backing_object->type == OBJT_SWAP) 1658 swap_pager_freespace(backing_object, p->pindex, 1659 1); 1660 vm_page_lock(p); 1661 KASSERT(!pmap_page_is_mapped(p), 1662 ("freeing mapped page %p", p)); 1663 if (p->wire_count == 0) 1664 vm_page_free(p); 1665 else 1666 vm_page_remove(p); 1667 vm_page_unlock(p); 1668 continue; 1669 } 1670 1671 /* 1672 * Page does not exist in parent, rename the page from the 1673 * backing object to the main object. 1674 * 1675 * If the page was mapped to a process, it can remain mapped 1676 * through the rename. vm_page_rename() will dirty the page. 1677 */ 1678 if (vm_page_rename(p, object, new_pindex)) { 1679 next = vm_object_collapse_scan_wait(object, NULL, next, 1680 op); 1681 continue; 1682 } 1683 1684 /* Use the old pindex to free the right page. */ 1685 if (backing_object->type == OBJT_SWAP) 1686 swap_pager_freespace(backing_object, 1687 new_pindex + backing_offset_index, 1); 1688 1689 #if VM_NRESERVLEVEL > 0 1690 /* 1691 * Rename the reservation. 1692 */ 1693 vm_reserv_rename(p, object, backing_object, 1694 backing_offset_index); 1695 #endif 1696 } 1697 return (true); 1698 } 1699 1700 1701 /* 1702 * this version of collapse allows the operation to occur earlier and 1703 * when paging_in_progress is true for an object... This is not a complete 1704 * operation, but should plug 99.9% of the rest of the leaks. 1705 */ 1706 static void 1707 vm_object_qcollapse(vm_object_t object) 1708 { 1709 vm_object_t backing_object = object->backing_object; 1710 1711 VM_OBJECT_ASSERT_WLOCKED(object); 1712 VM_OBJECT_ASSERT_WLOCKED(backing_object); 1713 1714 if (backing_object->ref_count != 1) 1715 return; 1716 1717 vm_object_collapse_scan(object, OBSC_COLLAPSE_NOWAIT); 1718 } 1719 1720 /* 1721 * vm_object_collapse: 1722 * 1723 * Collapse an object with the object backing it. 1724 * Pages in the backing object are moved into the 1725 * parent, and the backing object is deallocated. 1726 */ 1727 void 1728 vm_object_collapse(vm_object_t object) 1729 { 1730 vm_object_t backing_object, new_backing_object; 1731 1732 VM_OBJECT_ASSERT_WLOCKED(object); 1733 1734 while (TRUE) { 1735 /* 1736 * Verify that the conditions are right for collapse: 1737 * 1738 * The object exists and the backing object exists. 1739 */ 1740 if ((backing_object = object->backing_object) == NULL) 1741 break; 1742 1743 /* 1744 * we check the backing object first, because it is most likely 1745 * not collapsable. 1746 */ 1747 VM_OBJECT_WLOCK(backing_object); 1748 if (backing_object->handle != NULL || 1749 (backing_object->type != OBJT_DEFAULT && 1750 backing_object->type != OBJT_SWAP) || 1751 (backing_object->flags & OBJ_DEAD) || 1752 object->handle != NULL || 1753 (object->type != OBJT_DEFAULT && 1754 object->type != OBJT_SWAP) || 1755 (object->flags & OBJ_DEAD)) { 1756 VM_OBJECT_WUNLOCK(backing_object); 1757 break; 1758 } 1759 1760 if (object->paging_in_progress != 0 || 1761 backing_object->paging_in_progress != 0) { 1762 vm_object_qcollapse(object); 1763 VM_OBJECT_WUNLOCK(backing_object); 1764 break; 1765 } 1766 1767 /* 1768 * We know that we can either collapse the backing object (if 1769 * the parent is the only reference to it) or (perhaps) have 1770 * the parent bypass the object if the parent happens to shadow 1771 * all the resident pages in the entire backing object. 1772 * 1773 * This is ignoring pager-backed pages such as swap pages. 1774 * vm_object_collapse_scan fails the shadowing test in this 1775 * case. 1776 */ 1777 if (backing_object->ref_count == 1) { 1778 vm_object_pip_add(object, 1); 1779 vm_object_pip_add(backing_object, 1); 1780 1781 /* 1782 * If there is exactly one reference to the backing 1783 * object, we can collapse it into the parent. 1784 */ 1785 vm_object_collapse_scan(object, OBSC_COLLAPSE_WAIT); 1786 1787 #if VM_NRESERVLEVEL > 0 1788 /* 1789 * Break any reservations from backing_object. 1790 */ 1791 if (__predict_false(!LIST_EMPTY(&backing_object->rvq))) 1792 vm_reserv_break_all(backing_object); 1793 #endif 1794 1795 /* 1796 * Move the pager from backing_object to object. 1797 */ 1798 if (backing_object->type == OBJT_SWAP) { 1799 /* 1800 * swap_pager_copy() can sleep, in which case 1801 * the backing_object's and object's locks are 1802 * released and reacquired. 1803 * Since swap_pager_copy() is being asked to 1804 * destroy the source, it will change the 1805 * backing_object's type to OBJT_DEFAULT. 1806 */ 1807 swap_pager_copy( 1808 backing_object, 1809 object, 1810 OFF_TO_IDX(object->backing_object_offset), TRUE); 1811 } 1812 /* 1813 * Object now shadows whatever backing_object did. 1814 * Note that the reference to 1815 * backing_object->backing_object moves from within 1816 * backing_object to within object. 1817 */ 1818 LIST_REMOVE(object, shadow_list); 1819 backing_object->shadow_count--; 1820 if (backing_object->backing_object) { 1821 VM_OBJECT_WLOCK(backing_object->backing_object); 1822 LIST_REMOVE(backing_object, shadow_list); 1823 LIST_INSERT_HEAD( 1824 &backing_object->backing_object->shadow_head, 1825 object, shadow_list); 1826 /* 1827 * The shadow_count has not changed. 1828 */ 1829 VM_OBJECT_WUNLOCK(backing_object->backing_object); 1830 } 1831 object->backing_object = backing_object->backing_object; 1832 object->backing_object_offset += 1833 backing_object->backing_object_offset; 1834 1835 /* 1836 * Discard backing_object. 1837 * 1838 * Since the backing object has no pages, no pager left, 1839 * and no object references within it, all that is 1840 * necessary is to dispose of it. 1841 */ 1842 KASSERT(backing_object->ref_count == 1, ( 1843 "backing_object %p was somehow re-referenced during collapse!", 1844 backing_object)); 1845 vm_object_pip_wakeup(backing_object); 1846 backing_object->type = OBJT_DEAD; 1847 backing_object->ref_count = 0; 1848 VM_OBJECT_WUNLOCK(backing_object); 1849 vm_object_destroy(backing_object); 1850 1851 vm_object_pip_wakeup(object); 1852 counter_u64_add(object_collapses, 1); 1853 } else { 1854 /* 1855 * If we do not entirely shadow the backing object, 1856 * there is nothing we can do so we give up. 1857 */ 1858 if (object->resident_page_count != object->size && 1859 !vm_object_scan_all_shadowed(object)) { 1860 VM_OBJECT_WUNLOCK(backing_object); 1861 break; 1862 } 1863 1864 /* 1865 * Make the parent shadow the next object in the 1866 * chain. Deallocating backing_object will not remove 1867 * it, since its reference count is at least 2. 1868 */ 1869 LIST_REMOVE(object, shadow_list); 1870 backing_object->shadow_count--; 1871 1872 new_backing_object = backing_object->backing_object; 1873 if ((object->backing_object = new_backing_object) != NULL) { 1874 VM_OBJECT_WLOCK(new_backing_object); 1875 LIST_INSERT_HEAD( 1876 &new_backing_object->shadow_head, 1877 object, 1878 shadow_list 1879 ); 1880 new_backing_object->shadow_count++; 1881 vm_object_reference_locked(new_backing_object); 1882 VM_OBJECT_WUNLOCK(new_backing_object); 1883 object->backing_object_offset += 1884 backing_object->backing_object_offset; 1885 } 1886 1887 /* 1888 * Drop the reference count on backing_object. Since 1889 * its ref_count was at least 2, it will not vanish. 1890 */ 1891 backing_object->ref_count--; 1892 VM_OBJECT_WUNLOCK(backing_object); 1893 counter_u64_add(object_bypasses, 1); 1894 } 1895 1896 /* 1897 * Try again with this object's new backing object. 1898 */ 1899 } 1900 } 1901 1902 /* 1903 * vm_object_page_remove: 1904 * 1905 * For the given object, either frees or invalidates each of the 1906 * specified pages. In general, a page is freed. However, if a page is 1907 * wired for any reason other than the existence of a managed, wired 1908 * mapping, then it may be invalidated but not removed from the object. 1909 * Pages are specified by the given range ["start", "end") and the option 1910 * OBJPR_CLEANONLY. As a special case, if "end" is zero, then the range 1911 * extends from "start" to the end of the object. If the option 1912 * OBJPR_CLEANONLY is specified, then only the non-dirty pages within the 1913 * specified range are affected. If the option OBJPR_NOTMAPPED is 1914 * specified, then the pages within the specified range must have no 1915 * mappings. Otherwise, if this option is not specified, any mappings to 1916 * the specified pages are removed before the pages are freed or 1917 * invalidated. 1918 * 1919 * In general, this operation should only be performed on objects that 1920 * contain managed pages. There are, however, two exceptions. First, it 1921 * is performed on the kernel and kmem objects by vm_map_entry_delete(). 1922 * Second, it is used by msync(..., MS_INVALIDATE) to invalidate device- 1923 * backed pages. In both of these cases, the option OBJPR_CLEANONLY must 1924 * not be specified and the option OBJPR_NOTMAPPED must be specified. 1925 * 1926 * The object must be locked. 1927 */ 1928 void 1929 vm_object_page_remove(vm_object_t object, vm_pindex_t start, vm_pindex_t end, 1930 int options) 1931 { 1932 vm_page_t p, next; 1933 struct mtx *mtx; 1934 1935 VM_OBJECT_ASSERT_WLOCKED(object); 1936 KASSERT((object->flags & OBJ_UNMANAGED) == 0 || 1937 (options & (OBJPR_CLEANONLY | OBJPR_NOTMAPPED)) == OBJPR_NOTMAPPED, 1938 ("vm_object_page_remove: illegal options for object %p", object)); 1939 if (object->resident_page_count == 0) 1940 return; 1941 vm_object_pip_add(object, 1); 1942 again: 1943 p = vm_page_find_least(object, start); 1944 mtx = NULL; 1945 1946 /* 1947 * Here, the variable "p" is either (1) the page with the least pindex 1948 * greater than or equal to the parameter "start" or (2) NULL. 1949 */ 1950 for (; p != NULL && (p->pindex < end || end == 0); p = next) { 1951 next = TAILQ_NEXT(p, listq); 1952 1953 /* 1954 * If the page is wired for any reason besides the existence 1955 * of managed, wired mappings, then it cannot be freed. For 1956 * example, fictitious pages, which represent device memory, 1957 * are inherently wired and cannot be freed. They can, 1958 * however, be invalidated if the option OBJPR_CLEANONLY is 1959 * not specified. 1960 */ 1961 vm_page_change_lock(p, &mtx); 1962 if (vm_page_xbusied(p)) { 1963 VM_OBJECT_WUNLOCK(object); 1964 vm_page_busy_sleep(p, "vmopax", true); 1965 VM_OBJECT_WLOCK(object); 1966 goto again; 1967 } 1968 if (p->wire_count != 0) { 1969 if ((options & OBJPR_NOTMAPPED) == 0 && 1970 object->ref_count != 0) 1971 pmap_remove_all(p); 1972 if ((options & OBJPR_CLEANONLY) == 0) { 1973 p->valid = 0; 1974 vm_page_undirty(p); 1975 } 1976 continue; 1977 } 1978 if (vm_page_busied(p)) { 1979 VM_OBJECT_WUNLOCK(object); 1980 vm_page_busy_sleep(p, "vmopar", false); 1981 VM_OBJECT_WLOCK(object); 1982 goto again; 1983 } 1984 KASSERT((p->flags & PG_FICTITIOUS) == 0, 1985 ("vm_object_page_remove: page %p is fictitious", p)); 1986 if ((options & OBJPR_CLEANONLY) != 0 && p->valid != 0) { 1987 if ((options & OBJPR_NOTMAPPED) == 0 && 1988 object->ref_count != 0) 1989 pmap_remove_write(p); 1990 if (p->dirty != 0) 1991 continue; 1992 } 1993 if ((options & OBJPR_NOTMAPPED) == 0 && object->ref_count != 0) 1994 pmap_remove_all(p); 1995 vm_page_free(p); 1996 } 1997 if (mtx != NULL) 1998 mtx_unlock(mtx); 1999 vm_object_pip_wakeup(object); 2000 } 2001 2002 /* 2003 * vm_object_page_noreuse: 2004 * 2005 * For the given object, attempt to move the specified pages to 2006 * the head of the inactive queue. This bypasses regular LRU 2007 * operation and allows the pages to be reused quickly under memory 2008 * pressure. If a page is wired for any reason, then it will not 2009 * be queued. Pages are specified by the range ["start", "end"). 2010 * As a special case, if "end" is zero, then the range extends from 2011 * "start" to the end of the object. 2012 * 2013 * This operation should only be performed on objects that 2014 * contain non-fictitious, managed pages. 2015 * 2016 * The object must be locked. 2017 */ 2018 void 2019 vm_object_page_noreuse(vm_object_t object, vm_pindex_t start, vm_pindex_t end) 2020 { 2021 struct mtx *mtx; 2022 vm_page_t p, next; 2023 2024 VM_OBJECT_ASSERT_LOCKED(object); 2025 KASSERT((object->flags & (OBJ_FICTITIOUS | OBJ_UNMANAGED)) == 0, 2026 ("vm_object_page_noreuse: illegal object %p", object)); 2027 if (object->resident_page_count == 0) 2028 return; 2029 p = vm_page_find_least(object, start); 2030 2031 /* 2032 * Here, the variable "p" is either (1) the page with the least pindex 2033 * greater than or equal to the parameter "start" or (2) NULL. 2034 */ 2035 mtx = NULL; 2036 for (; p != NULL && (p->pindex < end || end == 0); p = next) { 2037 next = TAILQ_NEXT(p, listq); 2038 vm_page_change_lock(p, &mtx); 2039 vm_page_deactivate_noreuse(p); 2040 } 2041 if (mtx != NULL) 2042 mtx_unlock(mtx); 2043 } 2044 2045 /* 2046 * Populate the specified range of the object with valid pages. Returns 2047 * TRUE if the range is successfully populated and FALSE otherwise. 2048 * 2049 * Note: This function should be optimized to pass a larger array of 2050 * pages to vm_pager_get_pages() before it is applied to a non- 2051 * OBJT_DEVICE object. 2052 * 2053 * The object must be locked. 2054 */ 2055 boolean_t 2056 vm_object_populate(vm_object_t object, vm_pindex_t start, vm_pindex_t end) 2057 { 2058 vm_page_t m; 2059 vm_pindex_t pindex; 2060 int rv; 2061 2062 VM_OBJECT_ASSERT_WLOCKED(object); 2063 for (pindex = start; pindex < end; pindex++) { 2064 m = vm_page_grab(object, pindex, VM_ALLOC_NORMAL); 2065 if (m->valid != VM_PAGE_BITS_ALL) { 2066 rv = vm_pager_get_pages(object, &m, 1, NULL, NULL); 2067 if (rv != VM_PAGER_OK) { 2068 vm_page_lock(m); 2069 vm_page_free(m); 2070 vm_page_unlock(m); 2071 break; 2072 } 2073 } 2074 /* 2075 * Keep "m" busy because a subsequent iteration may unlock 2076 * the object. 2077 */ 2078 } 2079 if (pindex > start) { 2080 m = vm_page_lookup(object, start); 2081 while (m != NULL && m->pindex < pindex) { 2082 vm_page_xunbusy(m); 2083 m = TAILQ_NEXT(m, listq); 2084 } 2085 } 2086 return (pindex == end); 2087 } 2088 2089 /* 2090 * Routine: vm_object_coalesce 2091 * Function: Coalesces two objects backing up adjoining 2092 * regions of memory into a single object. 2093 * 2094 * returns TRUE if objects were combined. 2095 * 2096 * NOTE: Only works at the moment if the second object is NULL - 2097 * if it's not, which object do we lock first? 2098 * 2099 * Parameters: 2100 * prev_object First object to coalesce 2101 * prev_offset Offset into prev_object 2102 * prev_size Size of reference to prev_object 2103 * next_size Size of reference to the second object 2104 * reserved Indicator that extension region has 2105 * swap accounted for 2106 * 2107 * Conditions: 2108 * The object must *not* be locked. 2109 */ 2110 boolean_t 2111 vm_object_coalesce(vm_object_t prev_object, vm_ooffset_t prev_offset, 2112 vm_size_t prev_size, vm_size_t next_size, boolean_t reserved) 2113 { 2114 vm_pindex_t next_pindex; 2115 2116 if (prev_object == NULL) 2117 return (TRUE); 2118 VM_OBJECT_WLOCK(prev_object); 2119 if ((prev_object->type != OBJT_DEFAULT && 2120 prev_object->type != OBJT_SWAP) || 2121 (prev_object->flags & OBJ_TMPFS_NODE) != 0) { 2122 VM_OBJECT_WUNLOCK(prev_object); 2123 return (FALSE); 2124 } 2125 2126 /* 2127 * Try to collapse the object first 2128 */ 2129 vm_object_collapse(prev_object); 2130 2131 /* 2132 * Can't coalesce if: . more than one reference . paged out . shadows 2133 * another object . has a copy elsewhere (any of which mean that the 2134 * pages not mapped to prev_entry may be in use anyway) 2135 */ 2136 if (prev_object->backing_object != NULL) { 2137 VM_OBJECT_WUNLOCK(prev_object); 2138 return (FALSE); 2139 } 2140 2141 prev_size >>= PAGE_SHIFT; 2142 next_size >>= PAGE_SHIFT; 2143 next_pindex = OFF_TO_IDX(prev_offset) + prev_size; 2144 2145 if (prev_object->ref_count > 1 && 2146 prev_object->size != next_pindex && 2147 (prev_object->flags & OBJ_ONEMAPPING) == 0) { 2148 VM_OBJECT_WUNLOCK(prev_object); 2149 return (FALSE); 2150 } 2151 2152 /* 2153 * Account for the charge. 2154 */ 2155 if (prev_object->cred != NULL) { 2156 2157 /* 2158 * If prev_object was charged, then this mapping, 2159 * although not charged now, may become writable 2160 * later. Non-NULL cred in the object would prevent 2161 * swap reservation during enabling of the write 2162 * access, so reserve swap now. Failed reservation 2163 * cause allocation of the separate object for the map 2164 * entry, and swap reservation for this entry is 2165 * managed in appropriate time. 2166 */ 2167 if (!reserved && !swap_reserve_by_cred(ptoa(next_size), 2168 prev_object->cred)) { 2169 VM_OBJECT_WUNLOCK(prev_object); 2170 return (FALSE); 2171 } 2172 prev_object->charge += ptoa(next_size); 2173 } 2174 2175 /* 2176 * Remove any pages that may still be in the object from a previous 2177 * deallocation. 2178 */ 2179 if (next_pindex < prev_object->size) { 2180 vm_object_page_remove(prev_object, next_pindex, next_pindex + 2181 next_size, 0); 2182 if (prev_object->type == OBJT_SWAP) 2183 swap_pager_freespace(prev_object, 2184 next_pindex, next_size); 2185 #if 0 2186 if (prev_object->cred != NULL) { 2187 KASSERT(prev_object->charge >= 2188 ptoa(prev_object->size - next_pindex), 2189 ("object %p overcharged 1 %jx %jx", prev_object, 2190 (uintmax_t)next_pindex, (uintmax_t)next_size)); 2191 prev_object->charge -= ptoa(prev_object->size - 2192 next_pindex); 2193 } 2194 #endif 2195 } 2196 2197 /* 2198 * Extend the object if necessary. 2199 */ 2200 if (next_pindex + next_size > prev_object->size) 2201 prev_object->size = next_pindex + next_size; 2202 2203 VM_OBJECT_WUNLOCK(prev_object); 2204 return (TRUE); 2205 } 2206 2207 void 2208 vm_object_set_writeable_dirty(vm_object_t object) 2209 { 2210 2211 VM_OBJECT_ASSERT_WLOCKED(object); 2212 if (object->type != OBJT_VNODE) { 2213 if ((object->flags & OBJ_TMPFS_NODE) != 0) { 2214 KASSERT(object->type == OBJT_SWAP, ("non-swap tmpfs")); 2215 vm_object_set_flag(object, OBJ_TMPFS_DIRTY); 2216 } 2217 return; 2218 } 2219 object->generation++; 2220 if ((object->flags & OBJ_MIGHTBEDIRTY) != 0) 2221 return; 2222 vm_object_set_flag(object, OBJ_MIGHTBEDIRTY); 2223 } 2224 2225 /* 2226 * vm_object_unwire: 2227 * 2228 * For each page offset within the specified range of the given object, 2229 * find the highest-level page in the shadow chain and unwire it. A page 2230 * must exist at every page offset, and the highest-level page must be 2231 * wired. 2232 */ 2233 void 2234 vm_object_unwire(vm_object_t object, vm_ooffset_t offset, vm_size_t length, 2235 uint8_t queue) 2236 { 2237 vm_object_t tobject, t1object; 2238 vm_page_t m, tm; 2239 vm_pindex_t end_pindex, pindex, tpindex; 2240 int depth, locked_depth; 2241 2242 KASSERT((offset & PAGE_MASK) == 0, 2243 ("vm_object_unwire: offset is not page aligned")); 2244 KASSERT((length & PAGE_MASK) == 0, 2245 ("vm_object_unwire: length is not a multiple of PAGE_SIZE")); 2246 /* The wired count of a fictitious page never changes. */ 2247 if ((object->flags & OBJ_FICTITIOUS) != 0) 2248 return; 2249 pindex = OFF_TO_IDX(offset); 2250 end_pindex = pindex + atop(length); 2251 again: 2252 locked_depth = 1; 2253 VM_OBJECT_RLOCK(object); 2254 m = vm_page_find_least(object, pindex); 2255 while (pindex < end_pindex) { 2256 if (m == NULL || pindex < m->pindex) { 2257 /* 2258 * The first object in the shadow chain doesn't 2259 * contain a page at the current index. Therefore, 2260 * the page must exist in a backing object. 2261 */ 2262 tobject = object; 2263 tpindex = pindex; 2264 depth = 0; 2265 do { 2266 tpindex += 2267 OFF_TO_IDX(tobject->backing_object_offset); 2268 tobject = tobject->backing_object; 2269 KASSERT(tobject != NULL, 2270 ("vm_object_unwire: missing page")); 2271 if ((tobject->flags & OBJ_FICTITIOUS) != 0) 2272 goto next_page; 2273 depth++; 2274 if (depth == locked_depth) { 2275 locked_depth++; 2276 VM_OBJECT_RLOCK(tobject); 2277 } 2278 } while ((tm = vm_page_lookup(tobject, tpindex)) == 2279 NULL); 2280 } else { 2281 tm = m; 2282 m = TAILQ_NEXT(m, listq); 2283 } 2284 vm_page_lock(tm); 2285 if (vm_page_xbusied(tm)) { 2286 for (tobject = object; locked_depth >= 1; 2287 locked_depth--) { 2288 t1object = tobject->backing_object; 2289 VM_OBJECT_RUNLOCK(tobject); 2290 tobject = t1object; 2291 } 2292 vm_page_busy_sleep(tm, "unwbo", true); 2293 goto again; 2294 } 2295 vm_page_unwire(tm, queue); 2296 vm_page_unlock(tm); 2297 next_page: 2298 pindex++; 2299 } 2300 /* Release the accumulated object locks. */ 2301 for (tobject = object; locked_depth >= 1; locked_depth--) { 2302 t1object = tobject->backing_object; 2303 VM_OBJECT_RUNLOCK(tobject); 2304 tobject = t1object; 2305 } 2306 } 2307 2308 struct vnode * 2309 vm_object_vnode(vm_object_t object) 2310 { 2311 2312 VM_OBJECT_ASSERT_LOCKED(object); 2313 if (object->type == OBJT_VNODE) 2314 return (object->handle); 2315 if (object->type == OBJT_SWAP && (object->flags & OBJ_TMPFS) != 0) 2316 return (object->un_pager.swp.swp_tmpfs); 2317 return (NULL); 2318 } 2319 2320 static int 2321 sysctl_vm_object_list(SYSCTL_HANDLER_ARGS) 2322 { 2323 struct kinfo_vmobject *kvo; 2324 char *fullpath, *freepath; 2325 struct vnode *vp; 2326 struct vattr va; 2327 vm_object_t obj; 2328 vm_page_t m; 2329 int count, error; 2330 2331 if (req->oldptr == NULL) { 2332 /* 2333 * If an old buffer has not been provided, generate an 2334 * estimate of the space needed for a subsequent call. 2335 */ 2336 mtx_lock(&vm_object_list_mtx); 2337 count = 0; 2338 TAILQ_FOREACH(obj, &vm_object_list, object_list) { 2339 if (obj->type == OBJT_DEAD) 2340 continue; 2341 count++; 2342 } 2343 mtx_unlock(&vm_object_list_mtx); 2344 return (SYSCTL_OUT(req, NULL, sizeof(struct kinfo_vmobject) * 2345 count * 11 / 10)); 2346 } 2347 2348 kvo = malloc(sizeof(*kvo), M_TEMP, M_WAITOK); 2349 error = 0; 2350 2351 /* 2352 * VM objects are type stable and are never removed from the 2353 * list once added. This allows us to safely read obj->object_list 2354 * after reacquiring the VM object lock. 2355 */ 2356 mtx_lock(&vm_object_list_mtx); 2357 TAILQ_FOREACH(obj, &vm_object_list, object_list) { 2358 if (obj->type == OBJT_DEAD) 2359 continue; 2360 VM_OBJECT_RLOCK(obj); 2361 if (obj->type == OBJT_DEAD) { 2362 VM_OBJECT_RUNLOCK(obj); 2363 continue; 2364 } 2365 mtx_unlock(&vm_object_list_mtx); 2366 kvo->kvo_size = ptoa(obj->size); 2367 kvo->kvo_resident = obj->resident_page_count; 2368 kvo->kvo_ref_count = obj->ref_count; 2369 kvo->kvo_shadow_count = obj->shadow_count; 2370 kvo->kvo_memattr = obj->memattr; 2371 kvo->kvo_active = 0; 2372 kvo->kvo_inactive = 0; 2373 TAILQ_FOREACH(m, &obj->memq, listq) { 2374 /* 2375 * A page may belong to the object but be 2376 * dequeued and set to PQ_NONE while the 2377 * object lock is not held. This makes the 2378 * reads of m->queue below racy, and we do not 2379 * count pages set to PQ_NONE. However, this 2380 * sysctl is only meant to give an 2381 * approximation of the system anyway. 2382 */ 2383 if (m->queue == PQ_ACTIVE) 2384 kvo->kvo_active++; 2385 else if (m->queue == PQ_INACTIVE) 2386 kvo->kvo_inactive++; 2387 } 2388 2389 kvo->kvo_vn_fileid = 0; 2390 kvo->kvo_vn_fsid = 0; 2391 kvo->kvo_vn_fsid_freebsd11 = 0; 2392 freepath = NULL; 2393 fullpath = ""; 2394 vp = NULL; 2395 switch (obj->type) { 2396 case OBJT_DEFAULT: 2397 kvo->kvo_type = KVME_TYPE_DEFAULT; 2398 break; 2399 case OBJT_VNODE: 2400 kvo->kvo_type = KVME_TYPE_VNODE; 2401 vp = obj->handle; 2402 vref(vp); 2403 break; 2404 case OBJT_SWAP: 2405 kvo->kvo_type = KVME_TYPE_SWAP; 2406 break; 2407 case OBJT_DEVICE: 2408 kvo->kvo_type = KVME_TYPE_DEVICE; 2409 break; 2410 case OBJT_PHYS: 2411 kvo->kvo_type = KVME_TYPE_PHYS; 2412 break; 2413 case OBJT_DEAD: 2414 kvo->kvo_type = KVME_TYPE_DEAD; 2415 break; 2416 case OBJT_SG: 2417 kvo->kvo_type = KVME_TYPE_SG; 2418 break; 2419 case OBJT_MGTDEVICE: 2420 kvo->kvo_type = KVME_TYPE_MGTDEVICE; 2421 break; 2422 default: 2423 kvo->kvo_type = KVME_TYPE_UNKNOWN; 2424 break; 2425 } 2426 VM_OBJECT_RUNLOCK(obj); 2427 if (vp != NULL) { 2428 vn_fullpath(curthread, vp, &fullpath, &freepath); 2429 vn_lock(vp, LK_SHARED | LK_RETRY); 2430 if (VOP_GETATTR(vp, &va, curthread->td_ucred) == 0) { 2431 kvo->kvo_vn_fileid = va.va_fileid; 2432 kvo->kvo_vn_fsid = va.va_fsid; 2433 kvo->kvo_vn_fsid_freebsd11 = va.va_fsid; 2434 /* truncate */ 2435 } 2436 vput(vp); 2437 } 2438 2439 strlcpy(kvo->kvo_path, fullpath, sizeof(kvo->kvo_path)); 2440 if (freepath != NULL) 2441 free(freepath, M_TEMP); 2442 2443 /* Pack record size down */ 2444 kvo->kvo_structsize = offsetof(struct kinfo_vmobject, kvo_path) 2445 + strlen(kvo->kvo_path) + 1; 2446 kvo->kvo_structsize = roundup(kvo->kvo_structsize, 2447 sizeof(uint64_t)); 2448 error = SYSCTL_OUT(req, kvo, kvo->kvo_structsize); 2449 mtx_lock(&vm_object_list_mtx); 2450 if (error) 2451 break; 2452 } 2453 mtx_unlock(&vm_object_list_mtx); 2454 free(kvo, M_TEMP); 2455 return (error); 2456 } 2457 SYSCTL_PROC(_vm, OID_AUTO, objects, CTLTYPE_STRUCT | CTLFLAG_RW | CTLFLAG_SKIP | 2458 CTLFLAG_MPSAFE, NULL, 0, sysctl_vm_object_list, "S,kinfo_vmobject", 2459 "List of VM objects"); 2460 2461 #include "opt_ddb.h" 2462 #ifdef DDB 2463 #include <sys/kernel.h> 2464 2465 #include <sys/cons.h> 2466 2467 #include <ddb/ddb.h> 2468 2469 static int 2470 _vm_object_in_map(vm_map_t map, vm_object_t object, vm_map_entry_t entry) 2471 { 2472 vm_map_t tmpm; 2473 vm_map_entry_t tmpe; 2474 vm_object_t obj; 2475 int entcount; 2476 2477 if (map == 0) 2478 return 0; 2479 2480 if (entry == 0) { 2481 tmpe = map->header.next; 2482 entcount = map->nentries; 2483 while (entcount-- && (tmpe != &map->header)) { 2484 if (_vm_object_in_map(map, object, tmpe)) { 2485 return 1; 2486 } 2487 tmpe = tmpe->next; 2488 } 2489 } else if (entry->eflags & MAP_ENTRY_IS_SUB_MAP) { 2490 tmpm = entry->object.sub_map; 2491 tmpe = tmpm->header.next; 2492 entcount = tmpm->nentries; 2493 while (entcount-- && tmpe != &tmpm->header) { 2494 if (_vm_object_in_map(tmpm, object, tmpe)) { 2495 return 1; 2496 } 2497 tmpe = tmpe->next; 2498 } 2499 } else if ((obj = entry->object.vm_object) != NULL) { 2500 for (; obj; obj = obj->backing_object) 2501 if (obj == object) { 2502 return 1; 2503 } 2504 } 2505 return 0; 2506 } 2507 2508 static int 2509 vm_object_in_map(vm_object_t object) 2510 { 2511 struct proc *p; 2512 2513 /* sx_slock(&allproc_lock); */ 2514 FOREACH_PROC_IN_SYSTEM(p) { 2515 if (!p->p_vmspace /* || (p->p_flag & (P_SYSTEM|P_WEXIT)) */) 2516 continue; 2517 if (_vm_object_in_map(&p->p_vmspace->vm_map, object, 0)) { 2518 /* sx_sunlock(&allproc_lock); */ 2519 return 1; 2520 } 2521 } 2522 /* sx_sunlock(&allproc_lock); */ 2523 if (_vm_object_in_map(kernel_map, object, 0)) 2524 return 1; 2525 return 0; 2526 } 2527 2528 DB_SHOW_COMMAND(vmochk, vm_object_check) 2529 { 2530 vm_object_t object; 2531 2532 /* 2533 * make sure that internal objs are in a map somewhere 2534 * and none have zero ref counts. 2535 */ 2536 TAILQ_FOREACH(object, &vm_object_list, object_list) { 2537 if (object->handle == NULL && 2538 (object->type == OBJT_DEFAULT || object->type == OBJT_SWAP)) { 2539 if (object->ref_count == 0) { 2540 db_printf("vmochk: internal obj has zero ref count: %ld\n", 2541 (long)object->size); 2542 } 2543 if (!vm_object_in_map(object)) { 2544 db_printf( 2545 "vmochk: internal obj is not in a map: " 2546 "ref: %d, size: %lu: 0x%lx, backing_object: %p\n", 2547 object->ref_count, (u_long)object->size, 2548 (u_long)object->size, 2549 (void *)object->backing_object); 2550 } 2551 } 2552 } 2553 } 2554 2555 /* 2556 * vm_object_print: [ debug ] 2557 */ 2558 DB_SHOW_COMMAND(object, vm_object_print_static) 2559 { 2560 /* XXX convert args. */ 2561 vm_object_t object = (vm_object_t)addr; 2562 boolean_t full = have_addr; 2563 2564 vm_page_t p; 2565 2566 /* XXX count is an (unused) arg. Avoid shadowing it. */ 2567 #define count was_count 2568 2569 int count; 2570 2571 if (object == NULL) 2572 return; 2573 2574 db_iprintf( 2575 "Object %p: type=%d, size=0x%jx, res=%d, ref=%d, flags=0x%x ruid %d charge %jx\n", 2576 object, (int)object->type, (uintmax_t)object->size, 2577 object->resident_page_count, object->ref_count, object->flags, 2578 object->cred ? object->cred->cr_ruid : -1, (uintmax_t)object->charge); 2579 db_iprintf(" sref=%d, backing_object(%d)=(%p)+0x%jx\n", 2580 object->shadow_count, 2581 object->backing_object ? object->backing_object->ref_count : 0, 2582 object->backing_object, (uintmax_t)object->backing_object_offset); 2583 2584 if (!full) 2585 return; 2586 2587 db_indent += 2; 2588 count = 0; 2589 TAILQ_FOREACH(p, &object->memq, listq) { 2590 if (count == 0) 2591 db_iprintf("memory:="); 2592 else if (count == 6) { 2593 db_printf("\n"); 2594 db_iprintf(" ..."); 2595 count = 0; 2596 } else 2597 db_printf(","); 2598 count++; 2599 2600 db_printf("(off=0x%jx,page=0x%jx)", 2601 (uintmax_t)p->pindex, (uintmax_t)VM_PAGE_TO_PHYS(p)); 2602 } 2603 if (count != 0) 2604 db_printf("\n"); 2605 db_indent -= 2; 2606 } 2607 2608 /* XXX. */ 2609 #undef count 2610 2611 /* XXX need this non-static entry for calling from vm_map_print. */ 2612 void 2613 vm_object_print( 2614 /* db_expr_t */ long addr, 2615 boolean_t have_addr, 2616 /* db_expr_t */ long count, 2617 char *modif) 2618 { 2619 vm_object_print_static(addr, have_addr, count, modif); 2620 } 2621 2622 DB_SHOW_COMMAND(vmopag, vm_object_print_pages) 2623 { 2624 vm_object_t object; 2625 vm_pindex_t fidx; 2626 vm_paddr_t pa; 2627 vm_page_t m, prev_m; 2628 int rcount, nl, c; 2629 2630 nl = 0; 2631 TAILQ_FOREACH(object, &vm_object_list, object_list) { 2632 db_printf("new object: %p\n", (void *)object); 2633 if (nl > 18) { 2634 c = cngetc(); 2635 if (c != ' ') 2636 return; 2637 nl = 0; 2638 } 2639 nl++; 2640 rcount = 0; 2641 fidx = 0; 2642 pa = -1; 2643 TAILQ_FOREACH(m, &object->memq, listq) { 2644 if (m->pindex > 128) 2645 break; 2646 if ((prev_m = TAILQ_PREV(m, pglist, listq)) != NULL && 2647 prev_m->pindex + 1 != m->pindex) { 2648 if (rcount) { 2649 db_printf(" index(%ld)run(%d)pa(0x%lx)\n", 2650 (long)fidx, rcount, (long)pa); 2651 if (nl > 18) { 2652 c = cngetc(); 2653 if (c != ' ') 2654 return; 2655 nl = 0; 2656 } 2657 nl++; 2658 rcount = 0; 2659 } 2660 } 2661 if (rcount && 2662 (VM_PAGE_TO_PHYS(m) == pa + rcount * PAGE_SIZE)) { 2663 ++rcount; 2664 continue; 2665 } 2666 if (rcount) { 2667 db_printf(" index(%ld)run(%d)pa(0x%lx)\n", 2668 (long)fidx, rcount, (long)pa); 2669 if (nl > 18) { 2670 c = cngetc(); 2671 if (c != ' ') 2672 return; 2673 nl = 0; 2674 } 2675 nl++; 2676 } 2677 fidx = m->pindex; 2678 pa = VM_PAGE_TO_PHYS(m); 2679 rcount = 1; 2680 } 2681 if (rcount) { 2682 db_printf(" index(%ld)run(%d)pa(0x%lx)\n", 2683 (long)fidx, rcount, (long)pa); 2684 if (nl > 18) { 2685 c = cngetc(); 2686 if (c != ' ') 2687 return; 2688 nl = 0; 2689 } 2690 nl++; 2691 } 2692 } 2693 } 2694 #endif /* DDB */ 2695