1 /*- 2 * SPDX-License-Identifier: (BSD-3-Clause AND MIT-CMU) 3 * 4 * Copyright (c) 1991, 1993 5 * The Regents of the University of California. All rights reserved. 6 * 7 * This code is derived from software contributed to Berkeley by 8 * The Mach Operating System project at Carnegie-Mellon University. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. Neither the name of the University nor the names of its contributors 19 * may be used to endorse or promote products derived from this software 20 * without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 * 34 * from: @(#)vm_object.c 8.5 (Berkeley) 3/22/94 35 * 36 * 37 * Copyright (c) 1987, 1990 Carnegie-Mellon University. 38 * All rights reserved. 39 * 40 * Authors: Avadis Tevanian, Jr., Michael Wayne Young 41 * 42 * Permission to use, copy, modify and distribute this software and 43 * its documentation is hereby granted, provided that both the copyright 44 * notice and this permission notice appear in all copies of the 45 * software, derivative works or modified versions, and any portions 46 * thereof, and that both notices appear in supporting documentation. 47 * 48 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 49 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 50 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 51 * 52 * Carnegie Mellon requests users of this software to return to 53 * 54 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 55 * School of Computer Science 56 * Carnegie Mellon University 57 * Pittsburgh PA 15213-3890 58 * 59 * any improvements or extensions that they make and grant Carnegie the 60 * rights to redistribute these changes. 61 */ 62 63 /* 64 * Virtual memory object module. 65 */ 66 67 #include <sys/cdefs.h> 68 __FBSDID("$FreeBSD$"); 69 70 #include "opt_vm.h" 71 72 #include <sys/param.h> 73 #include <sys/systm.h> 74 #include <sys/cpuset.h> 75 #include <sys/lock.h> 76 #include <sys/mman.h> 77 #include <sys/mount.h> 78 #include <sys/kernel.h> 79 #include <sys/pctrie.h> 80 #include <sys/sysctl.h> 81 #include <sys/mutex.h> 82 #include <sys/proc.h> /* for curproc, pageproc */ 83 #include <sys/socket.h> 84 #include <sys/resourcevar.h> 85 #include <sys/rwlock.h> 86 #include <sys/user.h> 87 #include <sys/vnode.h> 88 #include <sys/vmmeter.h> 89 #include <sys/sx.h> 90 91 #include <vm/vm.h> 92 #include <vm/vm_param.h> 93 #include <vm/pmap.h> 94 #include <vm/vm_map.h> 95 #include <vm/vm_object.h> 96 #include <vm/vm_page.h> 97 #include <vm/vm_pageout.h> 98 #include <vm/vm_pager.h> 99 #include <vm/vm_phys.h> 100 #include <vm/vm_pagequeue.h> 101 #include <vm/swap_pager.h> 102 #include <vm/vm_kern.h> 103 #include <vm/vm_extern.h> 104 #include <vm/vm_radix.h> 105 #include <vm/vm_reserv.h> 106 #include <vm/uma.h> 107 108 static int old_msync; 109 SYSCTL_INT(_vm, OID_AUTO, old_msync, CTLFLAG_RW, &old_msync, 0, 110 "Use old (insecure) msync behavior"); 111 112 static int vm_object_page_collect_flush(vm_object_t object, vm_page_t p, 113 int pagerflags, int flags, boolean_t *clearobjflags, 114 boolean_t *eio); 115 static boolean_t vm_object_page_remove_write(vm_page_t p, int flags, 116 boolean_t *clearobjflags); 117 static void vm_object_qcollapse(vm_object_t object); 118 static void vm_object_vndeallocate(vm_object_t object); 119 120 /* 121 * Virtual memory objects maintain the actual data 122 * associated with allocated virtual memory. A given 123 * page of memory exists within exactly one object. 124 * 125 * An object is only deallocated when all "references" 126 * are given up. Only one "reference" to a given 127 * region of an object should be writeable. 128 * 129 * Associated with each object is a list of all resident 130 * memory pages belonging to that object; this list is 131 * maintained by the "vm_page" module, and locked by the object's 132 * lock. 133 * 134 * Each object also records a "pager" routine which is 135 * used to retrieve (and store) pages to the proper backing 136 * storage. In addition, objects may be backed by other 137 * objects from which they were virtual-copied. 138 * 139 * The only items within the object structure which are 140 * modified after time of creation are: 141 * reference count locked by object's lock 142 * pager routine locked by object's lock 143 * 144 */ 145 146 struct object_q vm_object_list; 147 struct mtx vm_object_list_mtx; /* lock for object list and count */ 148 149 struct vm_object kernel_object_store; 150 151 static SYSCTL_NODE(_vm_stats, OID_AUTO, object, CTLFLAG_RD, 0, 152 "VM object stats"); 153 154 static counter_u64_t object_collapses = EARLY_COUNTER; 155 SYSCTL_COUNTER_U64(_vm_stats_object, OID_AUTO, collapses, CTLFLAG_RD, 156 &object_collapses, 157 "VM object collapses"); 158 159 static counter_u64_t object_bypasses = EARLY_COUNTER; 160 SYSCTL_COUNTER_U64(_vm_stats_object, OID_AUTO, bypasses, CTLFLAG_RD, 161 &object_bypasses, 162 "VM object bypasses"); 163 164 static void 165 counter_startup(void) 166 { 167 168 object_collapses = counter_u64_alloc(M_WAITOK); 169 object_bypasses = counter_u64_alloc(M_WAITOK); 170 } 171 SYSINIT(object_counters, SI_SUB_CPU, SI_ORDER_ANY, counter_startup, NULL); 172 173 static uma_zone_t obj_zone; 174 175 static int vm_object_zinit(void *mem, int size, int flags); 176 177 #ifdef INVARIANTS 178 static void vm_object_zdtor(void *mem, int size, void *arg); 179 180 static void 181 vm_object_zdtor(void *mem, int size, void *arg) 182 { 183 vm_object_t object; 184 185 object = (vm_object_t)mem; 186 KASSERT(object->ref_count == 0, 187 ("object %p ref_count = %d", object, object->ref_count)); 188 KASSERT(TAILQ_EMPTY(&object->memq), 189 ("object %p has resident pages in its memq", object)); 190 KASSERT(vm_radix_is_empty(&object->rtree), 191 ("object %p has resident pages in its trie", object)); 192 #if VM_NRESERVLEVEL > 0 193 KASSERT(LIST_EMPTY(&object->rvq), 194 ("object %p has reservations", 195 object)); 196 #endif 197 KASSERT(object->paging_in_progress == 0, 198 ("object %p paging_in_progress = %d", 199 object, object->paging_in_progress)); 200 KASSERT(object->resident_page_count == 0, 201 ("object %p resident_page_count = %d", 202 object, object->resident_page_count)); 203 KASSERT(object->shadow_count == 0, 204 ("object %p shadow_count = %d", 205 object, object->shadow_count)); 206 KASSERT(object->type == OBJT_DEAD, 207 ("object %p has non-dead type %d", 208 object, object->type)); 209 } 210 #endif 211 212 static int 213 vm_object_zinit(void *mem, int size, int flags) 214 { 215 vm_object_t object; 216 217 object = (vm_object_t)mem; 218 rw_init_flags(&object->lock, "vm object", RW_DUPOK | RW_NEW); 219 220 /* These are true for any object that has been freed */ 221 object->type = OBJT_DEAD; 222 object->ref_count = 0; 223 vm_radix_init(&object->rtree); 224 object->paging_in_progress = 0; 225 object->resident_page_count = 0; 226 object->shadow_count = 0; 227 object->flags = OBJ_DEAD; 228 229 mtx_lock(&vm_object_list_mtx); 230 TAILQ_INSERT_TAIL(&vm_object_list, object, object_list); 231 mtx_unlock(&vm_object_list_mtx); 232 return (0); 233 } 234 235 static void 236 _vm_object_allocate(objtype_t type, vm_pindex_t size, vm_object_t object) 237 { 238 239 TAILQ_INIT(&object->memq); 240 LIST_INIT(&object->shadow_head); 241 242 object->type = type; 243 if (type == OBJT_SWAP) 244 pctrie_init(&object->un_pager.swp.swp_blks); 245 246 /* 247 * Ensure that swap_pager_swapoff() iteration over object_list 248 * sees up to date type and pctrie head if it observed 249 * non-dead object. 250 */ 251 atomic_thread_fence_rel(); 252 253 switch (type) { 254 case OBJT_DEAD: 255 panic("_vm_object_allocate: can't create OBJT_DEAD"); 256 case OBJT_DEFAULT: 257 case OBJT_SWAP: 258 object->flags = OBJ_ONEMAPPING; 259 break; 260 case OBJT_DEVICE: 261 case OBJT_SG: 262 object->flags = OBJ_FICTITIOUS | OBJ_UNMANAGED; 263 break; 264 case OBJT_MGTDEVICE: 265 object->flags = OBJ_FICTITIOUS; 266 break; 267 case OBJT_PHYS: 268 object->flags = OBJ_UNMANAGED; 269 break; 270 case OBJT_VNODE: 271 object->flags = 0; 272 break; 273 default: 274 panic("_vm_object_allocate: type %d is undefined", type); 275 } 276 object->size = size; 277 object->domain.dr_policy = NULL; 278 object->generation = 1; 279 object->ref_count = 1; 280 object->memattr = VM_MEMATTR_DEFAULT; 281 object->cred = NULL; 282 object->charge = 0; 283 object->handle = NULL; 284 object->backing_object = NULL; 285 object->backing_object_offset = (vm_ooffset_t) 0; 286 #if VM_NRESERVLEVEL > 0 287 LIST_INIT(&object->rvq); 288 #endif 289 umtx_shm_object_init(object); 290 } 291 292 /* 293 * vm_object_init: 294 * 295 * Initialize the VM objects module. 296 */ 297 void 298 vm_object_init(void) 299 { 300 TAILQ_INIT(&vm_object_list); 301 mtx_init(&vm_object_list_mtx, "vm object_list", NULL, MTX_DEF); 302 303 rw_init(&kernel_object->lock, "kernel vm object"); 304 _vm_object_allocate(OBJT_PHYS, atop(VM_MAX_KERNEL_ADDRESS - 305 VM_MIN_KERNEL_ADDRESS), kernel_object); 306 #if VM_NRESERVLEVEL > 0 307 kernel_object->flags |= OBJ_COLORED; 308 kernel_object->pg_color = (u_short)atop(VM_MIN_KERNEL_ADDRESS); 309 #endif 310 311 /* 312 * The lock portion of struct vm_object must be type stable due 313 * to vm_pageout_fallback_object_lock locking a vm object 314 * without holding any references to it. 315 */ 316 obj_zone = uma_zcreate("VM OBJECT", sizeof (struct vm_object), NULL, 317 #ifdef INVARIANTS 318 vm_object_zdtor, 319 #else 320 NULL, 321 #endif 322 vm_object_zinit, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE); 323 324 vm_radix_zinit(); 325 } 326 327 void 328 vm_object_clear_flag(vm_object_t object, u_short bits) 329 { 330 331 VM_OBJECT_ASSERT_WLOCKED(object); 332 object->flags &= ~bits; 333 } 334 335 /* 336 * Sets the default memory attribute for the specified object. Pages 337 * that are allocated to this object are by default assigned this memory 338 * attribute. 339 * 340 * Presently, this function must be called before any pages are allocated 341 * to the object. In the future, this requirement may be relaxed for 342 * "default" and "swap" objects. 343 */ 344 int 345 vm_object_set_memattr(vm_object_t object, vm_memattr_t memattr) 346 { 347 348 VM_OBJECT_ASSERT_WLOCKED(object); 349 switch (object->type) { 350 case OBJT_DEFAULT: 351 case OBJT_DEVICE: 352 case OBJT_MGTDEVICE: 353 case OBJT_PHYS: 354 case OBJT_SG: 355 case OBJT_SWAP: 356 case OBJT_VNODE: 357 if (!TAILQ_EMPTY(&object->memq)) 358 return (KERN_FAILURE); 359 break; 360 case OBJT_DEAD: 361 return (KERN_INVALID_ARGUMENT); 362 default: 363 panic("vm_object_set_memattr: object %p is of undefined type", 364 object); 365 } 366 object->memattr = memattr; 367 return (KERN_SUCCESS); 368 } 369 370 void 371 vm_object_pip_add(vm_object_t object, short i) 372 { 373 374 VM_OBJECT_ASSERT_WLOCKED(object); 375 object->paging_in_progress += i; 376 } 377 378 void 379 vm_object_pip_subtract(vm_object_t object, short i) 380 { 381 382 VM_OBJECT_ASSERT_WLOCKED(object); 383 object->paging_in_progress -= i; 384 } 385 386 void 387 vm_object_pip_wakeup(vm_object_t object) 388 { 389 390 VM_OBJECT_ASSERT_WLOCKED(object); 391 object->paging_in_progress--; 392 if ((object->flags & OBJ_PIPWNT) && object->paging_in_progress == 0) { 393 vm_object_clear_flag(object, OBJ_PIPWNT); 394 wakeup(object); 395 } 396 } 397 398 void 399 vm_object_pip_wakeupn(vm_object_t object, short i) 400 { 401 402 VM_OBJECT_ASSERT_WLOCKED(object); 403 if (i) 404 object->paging_in_progress -= i; 405 if ((object->flags & OBJ_PIPWNT) && object->paging_in_progress == 0) { 406 vm_object_clear_flag(object, OBJ_PIPWNT); 407 wakeup(object); 408 } 409 } 410 411 void 412 vm_object_pip_wait(vm_object_t object, char *waitid) 413 { 414 415 VM_OBJECT_ASSERT_WLOCKED(object); 416 while (object->paging_in_progress) { 417 object->flags |= OBJ_PIPWNT; 418 VM_OBJECT_SLEEP(object, object, PVM, waitid, 0); 419 } 420 } 421 422 /* 423 * vm_object_allocate: 424 * 425 * Returns a new object with the given size. 426 */ 427 vm_object_t 428 vm_object_allocate(objtype_t type, vm_pindex_t size) 429 { 430 vm_object_t object; 431 432 object = (vm_object_t)uma_zalloc(obj_zone, M_WAITOK); 433 _vm_object_allocate(type, size, object); 434 return (object); 435 } 436 437 438 /* 439 * vm_object_reference: 440 * 441 * Gets another reference to the given object. Note: OBJ_DEAD 442 * objects can be referenced during final cleaning. 443 */ 444 void 445 vm_object_reference(vm_object_t object) 446 { 447 if (object == NULL) 448 return; 449 VM_OBJECT_WLOCK(object); 450 vm_object_reference_locked(object); 451 VM_OBJECT_WUNLOCK(object); 452 } 453 454 /* 455 * vm_object_reference_locked: 456 * 457 * Gets another reference to the given object. 458 * 459 * The object must be locked. 460 */ 461 void 462 vm_object_reference_locked(vm_object_t object) 463 { 464 struct vnode *vp; 465 466 VM_OBJECT_ASSERT_WLOCKED(object); 467 object->ref_count++; 468 if (object->type == OBJT_VNODE) { 469 vp = object->handle; 470 vref(vp); 471 } 472 } 473 474 /* 475 * Handle deallocating an object of type OBJT_VNODE. 476 */ 477 static void 478 vm_object_vndeallocate(vm_object_t object) 479 { 480 struct vnode *vp = (struct vnode *) object->handle; 481 482 VM_OBJECT_ASSERT_WLOCKED(object); 483 KASSERT(object->type == OBJT_VNODE, 484 ("vm_object_vndeallocate: not a vnode object")); 485 KASSERT(vp != NULL, ("vm_object_vndeallocate: missing vp")); 486 #ifdef INVARIANTS 487 if (object->ref_count == 0) { 488 vn_printf(vp, "vm_object_vndeallocate "); 489 panic("vm_object_vndeallocate: bad object reference count"); 490 } 491 #endif 492 493 if (!umtx_shm_vnobj_persistent && object->ref_count == 1) 494 umtx_shm_object_terminated(object); 495 496 /* 497 * The test for text of vp vnode does not need a bypass to 498 * reach right VV_TEXT there, since it is obtained from 499 * object->handle. 500 */ 501 if (object->ref_count > 1 || (vp->v_vflag & VV_TEXT) == 0) { 502 object->ref_count--; 503 VM_OBJECT_WUNLOCK(object); 504 /* vrele may need the vnode lock. */ 505 vrele(vp); 506 } else { 507 vhold(vp); 508 VM_OBJECT_WUNLOCK(object); 509 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 510 vdrop(vp); 511 VM_OBJECT_WLOCK(object); 512 object->ref_count--; 513 if (object->type == OBJT_DEAD) { 514 VM_OBJECT_WUNLOCK(object); 515 VOP_UNLOCK(vp, 0); 516 } else { 517 if (object->ref_count == 0) 518 VOP_UNSET_TEXT(vp); 519 VM_OBJECT_WUNLOCK(object); 520 vput(vp); 521 } 522 } 523 } 524 525 /* 526 * vm_object_deallocate: 527 * 528 * Release a reference to the specified object, 529 * gained either through a vm_object_allocate 530 * or a vm_object_reference call. When all references 531 * are gone, storage associated with this object 532 * may be relinquished. 533 * 534 * No object may be locked. 535 */ 536 void 537 vm_object_deallocate(vm_object_t object) 538 { 539 vm_object_t temp; 540 struct vnode *vp; 541 542 while (object != NULL) { 543 VM_OBJECT_WLOCK(object); 544 if (object->type == OBJT_VNODE) { 545 vm_object_vndeallocate(object); 546 return; 547 } 548 549 KASSERT(object->ref_count != 0, 550 ("vm_object_deallocate: object deallocated too many times: %d", object->type)); 551 552 /* 553 * If the reference count goes to 0 we start calling 554 * vm_object_terminate() on the object chain. 555 * A ref count of 1 may be a special case depending on the 556 * shadow count being 0 or 1. 557 */ 558 object->ref_count--; 559 if (object->ref_count > 1) { 560 VM_OBJECT_WUNLOCK(object); 561 return; 562 } else if (object->ref_count == 1) { 563 if (object->type == OBJT_SWAP && 564 (object->flags & OBJ_TMPFS) != 0) { 565 vp = object->un_pager.swp.swp_tmpfs; 566 vhold(vp); 567 VM_OBJECT_WUNLOCK(object); 568 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 569 VM_OBJECT_WLOCK(object); 570 if (object->type == OBJT_DEAD || 571 object->ref_count != 1) { 572 VM_OBJECT_WUNLOCK(object); 573 VOP_UNLOCK(vp, 0); 574 vdrop(vp); 575 return; 576 } 577 if ((object->flags & OBJ_TMPFS) != 0) 578 VOP_UNSET_TEXT(vp); 579 VOP_UNLOCK(vp, 0); 580 vdrop(vp); 581 } 582 if (object->shadow_count == 0 && 583 object->handle == NULL && 584 (object->type == OBJT_DEFAULT || 585 (object->type == OBJT_SWAP && 586 (object->flags & OBJ_TMPFS_NODE) == 0))) { 587 vm_object_set_flag(object, OBJ_ONEMAPPING); 588 } else if ((object->shadow_count == 1) && 589 (object->handle == NULL) && 590 (object->type == OBJT_DEFAULT || 591 object->type == OBJT_SWAP)) { 592 vm_object_t robject; 593 594 robject = LIST_FIRST(&object->shadow_head); 595 KASSERT(robject != NULL, 596 ("vm_object_deallocate: ref_count: %d, shadow_count: %d", 597 object->ref_count, 598 object->shadow_count)); 599 KASSERT((robject->flags & OBJ_TMPFS_NODE) == 0, 600 ("shadowed tmpfs v_object %p", object)); 601 if (!VM_OBJECT_TRYWLOCK(robject)) { 602 /* 603 * Avoid a potential deadlock. 604 */ 605 object->ref_count++; 606 VM_OBJECT_WUNLOCK(object); 607 /* 608 * More likely than not the thread 609 * holding robject's lock has lower 610 * priority than the current thread. 611 * Let the lower priority thread run. 612 */ 613 pause("vmo_de", 1); 614 continue; 615 } 616 /* 617 * Collapse object into its shadow unless its 618 * shadow is dead. In that case, object will 619 * be deallocated by the thread that is 620 * deallocating its shadow. 621 */ 622 if ((robject->flags & OBJ_DEAD) == 0 && 623 (robject->handle == NULL) && 624 (robject->type == OBJT_DEFAULT || 625 robject->type == OBJT_SWAP)) { 626 627 robject->ref_count++; 628 retry: 629 if (robject->paging_in_progress) { 630 VM_OBJECT_WUNLOCK(object); 631 vm_object_pip_wait(robject, 632 "objde1"); 633 temp = robject->backing_object; 634 if (object == temp) { 635 VM_OBJECT_WLOCK(object); 636 goto retry; 637 } 638 } else if (object->paging_in_progress) { 639 VM_OBJECT_WUNLOCK(robject); 640 object->flags |= OBJ_PIPWNT; 641 VM_OBJECT_SLEEP(object, object, 642 PDROP | PVM, "objde2", 0); 643 VM_OBJECT_WLOCK(robject); 644 temp = robject->backing_object; 645 if (object == temp) { 646 VM_OBJECT_WLOCK(object); 647 goto retry; 648 } 649 } else 650 VM_OBJECT_WUNLOCK(object); 651 652 if (robject->ref_count == 1) { 653 robject->ref_count--; 654 object = robject; 655 goto doterm; 656 } 657 object = robject; 658 vm_object_collapse(object); 659 VM_OBJECT_WUNLOCK(object); 660 continue; 661 } 662 VM_OBJECT_WUNLOCK(robject); 663 } 664 VM_OBJECT_WUNLOCK(object); 665 return; 666 } 667 doterm: 668 umtx_shm_object_terminated(object); 669 temp = object->backing_object; 670 if (temp != NULL) { 671 KASSERT((object->flags & OBJ_TMPFS_NODE) == 0, 672 ("shadowed tmpfs v_object 2 %p", object)); 673 VM_OBJECT_WLOCK(temp); 674 LIST_REMOVE(object, shadow_list); 675 temp->shadow_count--; 676 VM_OBJECT_WUNLOCK(temp); 677 object->backing_object = NULL; 678 } 679 /* 680 * Don't double-terminate, we could be in a termination 681 * recursion due to the terminate having to sync data 682 * to disk. 683 */ 684 if ((object->flags & OBJ_DEAD) == 0) 685 vm_object_terminate(object); 686 else 687 VM_OBJECT_WUNLOCK(object); 688 object = temp; 689 } 690 } 691 692 /* 693 * vm_object_destroy removes the object from the global object list 694 * and frees the space for the object. 695 */ 696 void 697 vm_object_destroy(vm_object_t object) 698 { 699 700 /* 701 * Release the allocation charge. 702 */ 703 if (object->cred != NULL) { 704 swap_release_by_cred(object->charge, object->cred); 705 object->charge = 0; 706 crfree(object->cred); 707 object->cred = NULL; 708 } 709 710 /* 711 * Free the space for the object. 712 */ 713 uma_zfree(obj_zone, object); 714 } 715 716 /* 717 * vm_object_terminate_pages removes any remaining pageable pages 718 * from the object and resets the object to an empty state. 719 */ 720 static void 721 vm_object_terminate_pages(vm_object_t object) 722 { 723 vm_page_t p, p_next; 724 struct mtx *mtx; 725 726 VM_OBJECT_ASSERT_WLOCKED(object); 727 728 mtx = NULL; 729 730 /* 731 * Free any remaining pageable pages. This also removes them from the 732 * paging queues. However, don't free wired pages, just remove them 733 * from the object. Rather than incrementally removing each page from 734 * the object, the page and object are reset to any empty state. 735 */ 736 TAILQ_FOREACH_SAFE(p, &object->memq, listq, p_next) { 737 vm_page_assert_unbusied(p); 738 if ((object->flags & OBJ_UNMANAGED) == 0) 739 /* 740 * vm_page_free_prep() only needs the page 741 * lock for managed pages. 742 */ 743 vm_page_change_lock(p, &mtx); 744 p->object = NULL; 745 if (p->wire_count != 0) 746 continue; 747 VM_CNT_INC(v_pfree); 748 vm_page_free(p); 749 } 750 if (mtx != NULL) 751 mtx_unlock(mtx); 752 753 /* 754 * If the object contained any pages, then reset it to an empty state. 755 * None of the object's fields, including "resident_page_count", were 756 * modified by the preceding loop. 757 */ 758 if (object->resident_page_count != 0) { 759 vm_radix_reclaim_allnodes(&object->rtree); 760 TAILQ_INIT(&object->memq); 761 object->resident_page_count = 0; 762 if (object->type == OBJT_VNODE) 763 vdrop(object->handle); 764 } 765 } 766 767 /* 768 * vm_object_terminate actually destroys the specified object, freeing 769 * up all previously used resources. 770 * 771 * The object must be locked. 772 * This routine may block. 773 */ 774 void 775 vm_object_terminate(vm_object_t object) 776 { 777 778 VM_OBJECT_ASSERT_WLOCKED(object); 779 780 /* 781 * Make sure no one uses us. 782 */ 783 vm_object_set_flag(object, OBJ_DEAD); 784 785 /* 786 * wait for the pageout daemon to be done with the object 787 */ 788 vm_object_pip_wait(object, "objtrm"); 789 790 KASSERT(!object->paging_in_progress, 791 ("vm_object_terminate: pageout in progress")); 792 793 /* 794 * Clean and free the pages, as appropriate. All references to the 795 * object are gone, so we don't need to lock it. 796 */ 797 if (object->type == OBJT_VNODE) { 798 struct vnode *vp = (struct vnode *)object->handle; 799 800 /* 801 * Clean pages and flush buffers. 802 */ 803 vm_object_page_clean(object, 0, 0, OBJPC_SYNC); 804 VM_OBJECT_WUNLOCK(object); 805 806 vinvalbuf(vp, V_SAVE, 0, 0); 807 808 BO_LOCK(&vp->v_bufobj); 809 vp->v_bufobj.bo_flag |= BO_DEAD; 810 BO_UNLOCK(&vp->v_bufobj); 811 812 VM_OBJECT_WLOCK(object); 813 } 814 815 KASSERT(object->ref_count == 0, 816 ("vm_object_terminate: object with references, ref_count=%d", 817 object->ref_count)); 818 819 if ((object->flags & OBJ_PG_DTOR) == 0) 820 vm_object_terminate_pages(object); 821 822 #if VM_NRESERVLEVEL > 0 823 if (__predict_false(!LIST_EMPTY(&object->rvq))) 824 vm_reserv_break_all(object); 825 #endif 826 827 KASSERT(object->cred == NULL || object->type == OBJT_DEFAULT || 828 object->type == OBJT_SWAP, 829 ("%s: non-swap obj %p has cred", __func__, object)); 830 831 /* 832 * Let the pager know object is dead. 833 */ 834 vm_pager_deallocate(object); 835 VM_OBJECT_WUNLOCK(object); 836 837 vm_object_destroy(object); 838 } 839 840 /* 841 * Make the page read-only so that we can clear the object flags. However, if 842 * this is a nosync mmap then the object is likely to stay dirty so do not 843 * mess with the page and do not clear the object flags. Returns TRUE if the 844 * page should be flushed, and FALSE otherwise. 845 */ 846 static boolean_t 847 vm_object_page_remove_write(vm_page_t p, int flags, boolean_t *clearobjflags) 848 { 849 850 /* 851 * If we have been asked to skip nosync pages and this is a 852 * nosync page, skip it. Note that the object flags were not 853 * cleared in this case so we do not have to set them. 854 */ 855 if ((flags & OBJPC_NOSYNC) != 0 && (p->oflags & VPO_NOSYNC) != 0) { 856 *clearobjflags = FALSE; 857 return (FALSE); 858 } else { 859 pmap_remove_write(p); 860 return (p->dirty != 0); 861 } 862 } 863 864 /* 865 * vm_object_page_clean 866 * 867 * Clean all dirty pages in the specified range of object. Leaves page 868 * on whatever queue it is currently on. If NOSYNC is set then do not 869 * write out pages with VPO_NOSYNC set (originally comes from MAP_NOSYNC), 870 * leaving the object dirty. 871 * 872 * When stuffing pages asynchronously, allow clustering. XXX we need a 873 * synchronous clustering mode implementation. 874 * 875 * Odd semantics: if start == end, we clean everything. 876 * 877 * The object must be locked. 878 * 879 * Returns FALSE if some page from the range was not written, as 880 * reported by the pager, and TRUE otherwise. 881 */ 882 boolean_t 883 vm_object_page_clean(vm_object_t object, vm_ooffset_t start, vm_ooffset_t end, 884 int flags) 885 { 886 vm_page_t np, p; 887 vm_pindex_t pi, tend, tstart; 888 int curgeneration, n, pagerflags; 889 boolean_t clearobjflags, eio, res; 890 891 VM_OBJECT_ASSERT_WLOCKED(object); 892 893 /* 894 * The OBJ_MIGHTBEDIRTY flag is only set for OBJT_VNODE 895 * objects. The check below prevents the function from 896 * operating on non-vnode objects. 897 */ 898 if ((object->flags & OBJ_MIGHTBEDIRTY) == 0 || 899 object->resident_page_count == 0) 900 return (TRUE); 901 902 pagerflags = (flags & (OBJPC_SYNC | OBJPC_INVAL)) != 0 ? 903 VM_PAGER_PUT_SYNC : VM_PAGER_CLUSTER_OK; 904 pagerflags |= (flags & OBJPC_INVAL) != 0 ? VM_PAGER_PUT_INVAL : 0; 905 906 tstart = OFF_TO_IDX(start); 907 tend = (end == 0) ? object->size : OFF_TO_IDX(end + PAGE_MASK); 908 clearobjflags = tstart == 0 && tend >= object->size; 909 res = TRUE; 910 911 rescan: 912 curgeneration = object->generation; 913 914 for (p = vm_page_find_least(object, tstart); p != NULL; p = np) { 915 pi = p->pindex; 916 if (pi >= tend) 917 break; 918 np = TAILQ_NEXT(p, listq); 919 if (p->valid == 0) 920 continue; 921 if (vm_page_sleep_if_busy(p, "vpcwai")) { 922 if (object->generation != curgeneration) { 923 if ((flags & OBJPC_SYNC) != 0) 924 goto rescan; 925 else 926 clearobjflags = FALSE; 927 } 928 np = vm_page_find_least(object, pi); 929 continue; 930 } 931 if (!vm_object_page_remove_write(p, flags, &clearobjflags)) 932 continue; 933 934 n = vm_object_page_collect_flush(object, p, pagerflags, 935 flags, &clearobjflags, &eio); 936 if (eio) { 937 res = FALSE; 938 clearobjflags = FALSE; 939 } 940 if (object->generation != curgeneration) { 941 if ((flags & OBJPC_SYNC) != 0) 942 goto rescan; 943 else 944 clearobjflags = FALSE; 945 } 946 947 /* 948 * If the VOP_PUTPAGES() did a truncated write, so 949 * that even the first page of the run is not fully 950 * written, vm_pageout_flush() returns 0 as the run 951 * length. Since the condition that caused truncated 952 * write may be permanent, e.g. exhausted free space, 953 * accepting n == 0 would cause an infinite loop. 954 * 955 * Forwarding the iterator leaves the unwritten page 956 * behind, but there is not much we can do there if 957 * filesystem refuses to write it. 958 */ 959 if (n == 0) { 960 n = 1; 961 clearobjflags = FALSE; 962 } 963 np = vm_page_find_least(object, pi + n); 964 } 965 #if 0 966 VOP_FSYNC(vp, (pagerflags & VM_PAGER_PUT_SYNC) ? MNT_WAIT : 0); 967 #endif 968 969 if (clearobjflags) 970 vm_object_clear_flag(object, OBJ_MIGHTBEDIRTY); 971 return (res); 972 } 973 974 static int 975 vm_object_page_collect_flush(vm_object_t object, vm_page_t p, int pagerflags, 976 int flags, boolean_t *clearobjflags, boolean_t *eio) 977 { 978 vm_page_t ma[vm_pageout_page_count], p_first, tp; 979 int count, i, mreq, runlen; 980 981 vm_page_lock_assert(p, MA_NOTOWNED); 982 VM_OBJECT_ASSERT_WLOCKED(object); 983 984 count = 1; 985 mreq = 0; 986 987 for (tp = p; count < vm_pageout_page_count; count++) { 988 tp = vm_page_next(tp); 989 if (tp == NULL || vm_page_busied(tp)) 990 break; 991 if (!vm_object_page_remove_write(tp, flags, clearobjflags)) 992 break; 993 } 994 995 for (p_first = p; count < vm_pageout_page_count; count++) { 996 tp = vm_page_prev(p_first); 997 if (tp == NULL || vm_page_busied(tp)) 998 break; 999 if (!vm_object_page_remove_write(tp, flags, clearobjflags)) 1000 break; 1001 p_first = tp; 1002 mreq++; 1003 } 1004 1005 for (tp = p_first, i = 0; i < count; tp = TAILQ_NEXT(tp, listq), i++) 1006 ma[i] = tp; 1007 1008 vm_pageout_flush(ma, count, pagerflags, mreq, &runlen, eio); 1009 return (runlen); 1010 } 1011 1012 /* 1013 * Note that there is absolutely no sense in writing out 1014 * anonymous objects, so we track down the vnode object 1015 * to write out. 1016 * We invalidate (remove) all pages from the address space 1017 * for semantic correctness. 1018 * 1019 * If the backing object is a device object with unmanaged pages, then any 1020 * mappings to the specified range of pages must be removed before this 1021 * function is called. 1022 * 1023 * Note: certain anonymous maps, such as MAP_NOSYNC maps, 1024 * may start out with a NULL object. 1025 */ 1026 boolean_t 1027 vm_object_sync(vm_object_t object, vm_ooffset_t offset, vm_size_t size, 1028 boolean_t syncio, boolean_t invalidate) 1029 { 1030 vm_object_t backing_object; 1031 struct vnode *vp; 1032 struct mount *mp; 1033 int error, flags, fsync_after; 1034 boolean_t res; 1035 1036 if (object == NULL) 1037 return (TRUE); 1038 res = TRUE; 1039 error = 0; 1040 VM_OBJECT_WLOCK(object); 1041 while ((backing_object = object->backing_object) != NULL) { 1042 VM_OBJECT_WLOCK(backing_object); 1043 offset += object->backing_object_offset; 1044 VM_OBJECT_WUNLOCK(object); 1045 object = backing_object; 1046 if (object->size < OFF_TO_IDX(offset + size)) 1047 size = IDX_TO_OFF(object->size) - offset; 1048 } 1049 /* 1050 * Flush pages if writing is allowed, invalidate them 1051 * if invalidation requested. Pages undergoing I/O 1052 * will be ignored by vm_object_page_remove(). 1053 * 1054 * We cannot lock the vnode and then wait for paging 1055 * to complete without deadlocking against vm_fault. 1056 * Instead we simply call vm_object_page_remove() and 1057 * allow it to block internally on a page-by-page 1058 * basis when it encounters pages undergoing async 1059 * I/O. 1060 */ 1061 if (object->type == OBJT_VNODE && 1062 (object->flags & OBJ_MIGHTBEDIRTY) != 0 && 1063 ((vp = object->handle)->v_vflag & VV_NOSYNC) == 0) { 1064 VM_OBJECT_WUNLOCK(object); 1065 (void) vn_start_write(vp, &mp, V_WAIT); 1066 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 1067 if (syncio && !invalidate && offset == 0 && 1068 atop(size) == object->size) { 1069 /* 1070 * If syncing the whole mapping of the file, 1071 * it is faster to schedule all the writes in 1072 * async mode, also allowing the clustering, 1073 * and then wait for i/o to complete. 1074 */ 1075 flags = 0; 1076 fsync_after = TRUE; 1077 } else { 1078 flags = (syncio || invalidate) ? OBJPC_SYNC : 0; 1079 flags |= invalidate ? (OBJPC_SYNC | OBJPC_INVAL) : 0; 1080 fsync_after = FALSE; 1081 } 1082 VM_OBJECT_WLOCK(object); 1083 res = vm_object_page_clean(object, offset, offset + size, 1084 flags); 1085 VM_OBJECT_WUNLOCK(object); 1086 if (fsync_after) 1087 error = VOP_FSYNC(vp, MNT_WAIT, curthread); 1088 VOP_UNLOCK(vp, 0); 1089 vn_finished_write(mp); 1090 if (error != 0) 1091 res = FALSE; 1092 VM_OBJECT_WLOCK(object); 1093 } 1094 if ((object->type == OBJT_VNODE || 1095 object->type == OBJT_DEVICE) && invalidate) { 1096 if (object->type == OBJT_DEVICE) 1097 /* 1098 * The option OBJPR_NOTMAPPED must be passed here 1099 * because vm_object_page_remove() cannot remove 1100 * unmanaged mappings. 1101 */ 1102 flags = OBJPR_NOTMAPPED; 1103 else if (old_msync) 1104 flags = 0; 1105 else 1106 flags = OBJPR_CLEANONLY; 1107 vm_object_page_remove(object, OFF_TO_IDX(offset), 1108 OFF_TO_IDX(offset + size + PAGE_MASK), flags); 1109 } 1110 VM_OBJECT_WUNLOCK(object); 1111 return (res); 1112 } 1113 1114 /* 1115 * Determine whether the given advice can be applied to the object. Advice is 1116 * not applied to unmanaged pages since they never belong to page queues, and 1117 * since MADV_FREE is destructive, it can apply only to anonymous pages that 1118 * have been mapped at most once. 1119 */ 1120 static bool 1121 vm_object_advice_applies(vm_object_t object, int advice) 1122 { 1123 1124 if ((object->flags & OBJ_UNMANAGED) != 0) 1125 return (false); 1126 if (advice != MADV_FREE) 1127 return (true); 1128 return ((object->type == OBJT_DEFAULT || object->type == OBJT_SWAP) && 1129 (object->flags & OBJ_ONEMAPPING) != 0); 1130 } 1131 1132 static void 1133 vm_object_madvise_freespace(vm_object_t object, int advice, vm_pindex_t pindex, 1134 vm_size_t size) 1135 { 1136 1137 if (advice == MADV_FREE && object->type == OBJT_SWAP) 1138 swap_pager_freespace(object, pindex, size); 1139 } 1140 1141 /* 1142 * vm_object_madvise: 1143 * 1144 * Implements the madvise function at the object/page level. 1145 * 1146 * MADV_WILLNEED (any object) 1147 * 1148 * Activate the specified pages if they are resident. 1149 * 1150 * MADV_DONTNEED (any object) 1151 * 1152 * Deactivate the specified pages if they are resident. 1153 * 1154 * MADV_FREE (OBJT_DEFAULT/OBJT_SWAP objects, 1155 * OBJ_ONEMAPPING only) 1156 * 1157 * Deactivate and clean the specified pages if they are 1158 * resident. This permits the process to reuse the pages 1159 * without faulting or the kernel to reclaim the pages 1160 * without I/O. 1161 */ 1162 void 1163 vm_object_madvise(vm_object_t object, vm_pindex_t pindex, vm_pindex_t end, 1164 int advice) 1165 { 1166 vm_pindex_t tpindex; 1167 vm_object_t backing_object, tobject; 1168 vm_page_t m, tm; 1169 1170 if (object == NULL) 1171 return; 1172 1173 relookup: 1174 VM_OBJECT_WLOCK(object); 1175 if (!vm_object_advice_applies(object, advice)) { 1176 VM_OBJECT_WUNLOCK(object); 1177 return; 1178 } 1179 for (m = vm_page_find_least(object, pindex); pindex < end; pindex++) { 1180 tobject = object; 1181 1182 /* 1183 * If the next page isn't resident in the top-level object, we 1184 * need to search the shadow chain. When applying MADV_FREE, we 1185 * take care to release any swap space used to store 1186 * non-resident pages. 1187 */ 1188 if (m == NULL || pindex < m->pindex) { 1189 /* 1190 * Optimize a common case: if the top-level object has 1191 * no backing object, we can skip over the non-resident 1192 * range in constant time. 1193 */ 1194 if (object->backing_object == NULL) { 1195 tpindex = (m != NULL && m->pindex < end) ? 1196 m->pindex : end; 1197 vm_object_madvise_freespace(object, advice, 1198 pindex, tpindex - pindex); 1199 if ((pindex = tpindex) == end) 1200 break; 1201 goto next_page; 1202 } 1203 1204 tpindex = pindex; 1205 do { 1206 vm_object_madvise_freespace(tobject, advice, 1207 tpindex, 1); 1208 /* 1209 * Prepare to search the next object in the 1210 * chain. 1211 */ 1212 backing_object = tobject->backing_object; 1213 if (backing_object == NULL) 1214 goto next_pindex; 1215 VM_OBJECT_WLOCK(backing_object); 1216 tpindex += 1217 OFF_TO_IDX(tobject->backing_object_offset); 1218 if (tobject != object) 1219 VM_OBJECT_WUNLOCK(tobject); 1220 tobject = backing_object; 1221 if (!vm_object_advice_applies(tobject, advice)) 1222 goto next_pindex; 1223 } while ((tm = vm_page_lookup(tobject, tpindex)) == 1224 NULL); 1225 } else { 1226 next_page: 1227 tm = m; 1228 m = TAILQ_NEXT(m, listq); 1229 } 1230 1231 /* 1232 * If the page is not in a normal state, skip it. 1233 */ 1234 if (tm->valid != VM_PAGE_BITS_ALL) 1235 goto next_pindex; 1236 vm_page_lock(tm); 1237 if (vm_page_held(tm)) { 1238 vm_page_unlock(tm); 1239 goto next_pindex; 1240 } 1241 KASSERT((tm->flags & PG_FICTITIOUS) == 0, 1242 ("vm_object_madvise: page %p is fictitious", tm)); 1243 KASSERT((tm->oflags & VPO_UNMANAGED) == 0, 1244 ("vm_object_madvise: page %p is not managed", tm)); 1245 if (vm_page_busied(tm)) { 1246 if (object != tobject) 1247 VM_OBJECT_WUNLOCK(tobject); 1248 VM_OBJECT_WUNLOCK(object); 1249 if (advice == MADV_WILLNEED) { 1250 /* 1251 * Reference the page before unlocking and 1252 * sleeping so that the page daemon is less 1253 * likely to reclaim it. 1254 */ 1255 vm_page_aflag_set(tm, PGA_REFERENCED); 1256 } 1257 vm_page_busy_sleep(tm, "madvpo", false); 1258 goto relookup; 1259 } 1260 vm_page_advise(tm, advice); 1261 vm_page_unlock(tm); 1262 vm_object_madvise_freespace(tobject, advice, tm->pindex, 1); 1263 next_pindex: 1264 if (tobject != object) 1265 VM_OBJECT_WUNLOCK(tobject); 1266 } 1267 VM_OBJECT_WUNLOCK(object); 1268 } 1269 1270 /* 1271 * vm_object_shadow: 1272 * 1273 * Create a new object which is backed by the 1274 * specified existing object range. The source 1275 * object reference is deallocated. 1276 * 1277 * The new object and offset into that object 1278 * are returned in the source parameters. 1279 */ 1280 void 1281 vm_object_shadow( 1282 vm_object_t *object, /* IN/OUT */ 1283 vm_ooffset_t *offset, /* IN/OUT */ 1284 vm_size_t length) 1285 { 1286 vm_object_t source; 1287 vm_object_t result; 1288 1289 source = *object; 1290 1291 /* 1292 * Don't create the new object if the old object isn't shared. 1293 */ 1294 if (source != NULL) { 1295 VM_OBJECT_WLOCK(source); 1296 if (source->ref_count == 1 && 1297 source->handle == NULL && 1298 (source->type == OBJT_DEFAULT || 1299 source->type == OBJT_SWAP)) { 1300 VM_OBJECT_WUNLOCK(source); 1301 return; 1302 } 1303 VM_OBJECT_WUNLOCK(source); 1304 } 1305 1306 /* 1307 * Allocate a new object with the given length. 1308 */ 1309 result = vm_object_allocate(OBJT_DEFAULT, atop(length)); 1310 1311 /* 1312 * The new object shadows the source object, adding a reference to it. 1313 * Our caller changes his reference to point to the new object, 1314 * removing a reference to the source object. Net result: no change 1315 * of reference count. 1316 * 1317 * Try to optimize the result object's page color when shadowing 1318 * in order to maintain page coloring consistency in the combined 1319 * shadowed object. 1320 */ 1321 result->backing_object = source; 1322 /* 1323 * Store the offset into the source object, and fix up the offset into 1324 * the new object. 1325 */ 1326 result->backing_object_offset = *offset; 1327 if (source != NULL) { 1328 VM_OBJECT_WLOCK(source); 1329 result->domain = source->domain; 1330 LIST_INSERT_HEAD(&source->shadow_head, result, shadow_list); 1331 source->shadow_count++; 1332 #if VM_NRESERVLEVEL > 0 1333 result->flags |= source->flags & OBJ_COLORED; 1334 result->pg_color = (source->pg_color + OFF_TO_IDX(*offset)) & 1335 ((1 << (VM_NFREEORDER - 1)) - 1); 1336 #endif 1337 VM_OBJECT_WUNLOCK(source); 1338 } 1339 1340 1341 /* 1342 * Return the new things 1343 */ 1344 *offset = 0; 1345 *object = result; 1346 } 1347 1348 /* 1349 * vm_object_split: 1350 * 1351 * Split the pages in a map entry into a new object. This affords 1352 * easier removal of unused pages, and keeps object inheritance from 1353 * being a negative impact on memory usage. 1354 */ 1355 void 1356 vm_object_split(vm_map_entry_t entry) 1357 { 1358 vm_page_t m, m_next; 1359 vm_object_t orig_object, new_object, source; 1360 vm_pindex_t idx, offidxstart; 1361 vm_size_t size; 1362 1363 orig_object = entry->object.vm_object; 1364 if (orig_object->type != OBJT_DEFAULT && orig_object->type != OBJT_SWAP) 1365 return; 1366 if (orig_object->ref_count <= 1) 1367 return; 1368 VM_OBJECT_WUNLOCK(orig_object); 1369 1370 offidxstart = OFF_TO_IDX(entry->offset); 1371 size = atop(entry->end - entry->start); 1372 1373 /* 1374 * If swap_pager_copy() is later called, it will convert new_object 1375 * into a swap object. 1376 */ 1377 new_object = vm_object_allocate(OBJT_DEFAULT, size); 1378 1379 /* 1380 * At this point, the new object is still private, so the order in 1381 * which the original and new objects are locked does not matter. 1382 */ 1383 VM_OBJECT_WLOCK(new_object); 1384 VM_OBJECT_WLOCK(orig_object); 1385 new_object->domain = orig_object->domain; 1386 source = orig_object->backing_object; 1387 if (source != NULL) { 1388 VM_OBJECT_WLOCK(source); 1389 if ((source->flags & OBJ_DEAD) != 0) { 1390 VM_OBJECT_WUNLOCK(source); 1391 VM_OBJECT_WUNLOCK(orig_object); 1392 VM_OBJECT_WUNLOCK(new_object); 1393 vm_object_deallocate(new_object); 1394 VM_OBJECT_WLOCK(orig_object); 1395 return; 1396 } 1397 LIST_INSERT_HEAD(&source->shadow_head, 1398 new_object, shadow_list); 1399 source->shadow_count++; 1400 vm_object_reference_locked(source); /* for new_object */ 1401 vm_object_clear_flag(source, OBJ_ONEMAPPING); 1402 VM_OBJECT_WUNLOCK(source); 1403 new_object->backing_object_offset = 1404 orig_object->backing_object_offset + entry->offset; 1405 new_object->backing_object = source; 1406 } 1407 if (orig_object->cred != NULL) { 1408 new_object->cred = orig_object->cred; 1409 crhold(orig_object->cred); 1410 new_object->charge = ptoa(size); 1411 KASSERT(orig_object->charge >= ptoa(size), 1412 ("orig_object->charge < 0")); 1413 orig_object->charge -= ptoa(size); 1414 } 1415 retry: 1416 m = vm_page_find_least(orig_object, offidxstart); 1417 for (; m != NULL && (idx = m->pindex - offidxstart) < size; 1418 m = m_next) { 1419 m_next = TAILQ_NEXT(m, listq); 1420 1421 /* 1422 * We must wait for pending I/O to complete before we can 1423 * rename the page. 1424 * 1425 * We do not have to VM_PROT_NONE the page as mappings should 1426 * not be changed by this operation. 1427 */ 1428 if (vm_page_busied(m)) { 1429 VM_OBJECT_WUNLOCK(new_object); 1430 vm_page_lock(m); 1431 VM_OBJECT_WUNLOCK(orig_object); 1432 vm_page_busy_sleep(m, "spltwt", false); 1433 VM_OBJECT_WLOCK(orig_object); 1434 VM_OBJECT_WLOCK(new_object); 1435 goto retry; 1436 } 1437 1438 /* vm_page_rename() will dirty the page. */ 1439 if (vm_page_rename(m, new_object, idx)) { 1440 VM_OBJECT_WUNLOCK(new_object); 1441 VM_OBJECT_WUNLOCK(orig_object); 1442 vm_radix_wait(); 1443 VM_OBJECT_WLOCK(orig_object); 1444 VM_OBJECT_WLOCK(new_object); 1445 goto retry; 1446 } 1447 #if VM_NRESERVLEVEL > 0 1448 /* 1449 * If some of the reservation's allocated pages remain with 1450 * the original object, then transferring the reservation to 1451 * the new object is neither particularly beneficial nor 1452 * particularly harmful as compared to leaving the reservation 1453 * with the original object. If, however, all of the 1454 * reservation's allocated pages are transferred to the new 1455 * object, then transferring the reservation is typically 1456 * beneficial. Determining which of these two cases applies 1457 * would be more costly than unconditionally renaming the 1458 * reservation. 1459 */ 1460 vm_reserv_rename(m, new_object, orig_object, offidxstart); 1461 #endif 1462 if (orig_object->type == OBJT_SWAP) 1463 vm_page_xbusy(m); 1464 } 1465 if (orig_object->type == OBJT_SWAP) { 1466 /* 1467 * swap_pager_copy() can sleep, in which case the orig_object's 1468 * and new_object's locks are released and reacquired. 1469 */ 1470 swap_pager_copy(orig_object, new_object, offidxstart, 0); 1471 TAILQ_FOREACH(m, &new_object->memq, listq) 1472 vm_page_xunbusy(m); 1473 } 1474 VM_OBJECT_WUNLOCK(orig_object); 1475 VM_OBJECT_WUNLOCK(new_object); 1476 entry->object.vm_object = new_object; 1477 entry->offset = 0LL; 1478 vm_object_deallocate(orig_object); 1479 VM_OBJECT_WLOCK(new_object); 1480 } 1481 1482 #define OBSC_COLLAPSE_NOWAIT 0x0002 1483 #define OBSC_COLLAPSE_WAIT 0x0004 1484 1485 static vm_page_t 1486 vm_object_collapse_scan_wait(vm_object_t object, vm_page_t p, vm_page_t next, 1487 int op) 1488 { 1489 vm_object_t backing_object; 1490 1491 VM_OBJECT_ASSERT_WLOCKED(object); 1492 backing_object = object->backing_object; 1493 VM_OBJECT_ASSERT_WLOCKED(backing_object); 1494 1495 KASSERT(p == NULL || vm_page_busied(p), ("unbusy page %p", p)); 1496 KASSERT(p == NULL || p->object == object || p->object == backing_object, 1497 ("invalid ownership %p %p %p", p, object, backing_object)); 1498 if ((op & OBSC_COLLAPSE_NOWAIT) != 0) 1499 return (next); 1500 if (p != NULL) 1501 vm_page_lock(p); 1502 VM_OBJECT_WUNLOCK(object); 1503 VM_OBJECT_WUNLOCK(backing_object); 1504 /* The page is only NULL when rename fails. */ 1505 if (p == NULL) 1506 vm_radix_wait(); 1507 else 1508 vm_page_busy_sleep(p, "vmocol", false); 1509 VM_OBJECT_WLOCK(object); 1510 VM_OBJECT_WLOCK(backing_object); 1511 return (TAILQ_FIRST(&backing_object->memq)); 1512 } 1513 1514 static bool 1515 vm_object_scan_all_shadowed(vm_object_t object) 1516 { 1517 vm_object_t backing_object; 1518 vm_page_t p, pp; 1519 vm_pindex_t backing_offset_index, new_pindex, pi, ps; 1520 1521 VM_OBJECT_ASSERT_WLOCKED(object); 1522 VM_OBJECT_ASSERT_WLOCKED(object->backing_object); 1523 1524 backing_object = object->backing_object; 1525 1526 if (backing_object->type != OBJT_DEFAULT && 1527 backing_object->type != OBJT_SWAP) 1528 return (false); 1529 1530 pi = backing_offset_index = OFF_TO_IDX(object->backing_object_offset); 1531 p = vm_page_find_least(backing_object, pi); 1532 ps = swap_pager_find_least(backing_object, pi); 1533 1534 /* 1535 * Only check pages inside the parent object's range and 1536 * inside the parent object's mapping of the backing object. 1537 */ 1538 for (;; pi++) { 1539 if (p != NULL && p->pindex < pi) 1540 p = TAILQ_NEXT(p, listq); 1541 if (ps < pi) 1542 ps = swap_pager_find_least(backing_object, pi); 1543 if (p == NULL && ps >= backing_object->size) 1544 break; 1545 else if (p == NULL) 1546 pi = ps; 1547 else 1548 pi = MIN(p->pindex, ps); 1549 1550 new_pindex = pi - backing_offset_index; 1551 if (new_pindex >= object->size) 1552 break; 1553 1554 /* 1555 * See if the parent has the page or if the parent's object 1556 * pager has the page. If the parent has the page but the page 1557 * is not valid, the parent's object pager must have the page. 1558 * 1559 * If this fails, the parent does not completely shadow the 1560 * object and we might as well give up now. 1561 */ 1562 pp = vm_page_lookup(object, new_pindex); 1563 if ((pp == NULL || pp->valid == 0) && 1564 !vm_pager_has_page(object, new_pindex, NULL, NULL)) 1565 return (false); 1566 } 1567 return (true); 1568 } 1569 1570 static bool 1571 vm_object_collapse_scan(vm_object_t object, int op) 1572 { 1573 vm_object_t backing_object; 1574 vm_page_t next, p, pp; 1575 vm_pindex_t backing_offset_index, new_pindex; 1576 1577 VM_OBJECT_ASSERT_WLOCKED(object); 1578 VM_OBJECT_ASSERT_WLOCKED(object->backing_object); 1579 1580 backing_object = object->backing_object; 1581 backing_offset_index = OFF_TO_IDX(object->backing_object_offset); 1582 1583 /* 1584 * Initial conditions 1585 */ 1586 if ((op & OBSC_COLLAPSE_WAIT) != 0) 1587 vm_object_set_flag(backing_object, OBJ_DEAD); 1588 1589 /* 1590 * Our scan 1591 */ 1592 for (p = TAILQ_FIRST(&backing_object->memq); p != NULL; p = next) { 1593 next = TAILQ_NEXT(p, listq); 1594 new_pindex = p->pindex - backing_offset_index; 1595 1596 /* 1597 * Check for busy page 1598 */ 1599 if (vm_page_busied(p)) { 1600 next = vm_object_collapse_scan_wait(object, p, next, op); 1601 continue; 1602 } 1603 1604 KASSERT(p->object == backing_object, 1605 ("vm_object_collapse_scan: object mismatch")); 1606 1607 if (p->pindex < backing_offset_index || 1608 new_pindex >= object->size) { 1609 if (backing_object->type == OBJT_SWAP) 1610 swap_pager_freespace(backing_object, p->pindex, 1611 1); 1612 1613 /* 1614 * Page is out of the parent object's range, we can 1615 * simply destroy it. 1616 */ 1617 vm_page_lock(p); 1618 KASSERT(!pmap_page_is_mapped(p), 1619 ("freeing mapped page %p", p)); 1620 if (p->wire_count == 0) 1621 vm_page_free(p); 1622 else 1623 vm_page_remove(p); 1624 vm_page_unlock(p); 1625 continue; 1626 } 1627 1628 pp = vm_page_lookup(object, new_pindex); 1629 if (pp != NULL && vm_page_busied(pp)) { 1630 /* 1631 * The page in the parent is busy and possibly not 1632 * (yet) valid. Until its state is finalized by the 1633 * busy bit owner, we can't tell whether it shadows the 1634 * original page. Therefore, we must either skip it 1635 * and the original (backing_object) page or wait for 1636 * its state to be finalized. 1637 * 1638 * This is due to a race with vm_fault() where we must 1639 * unbusy the original (backing_obj) page before we can 1640 * (re)lock the parent. Hence we can get here. 1641 */ 1642 next = vm_object_collapse_scan_wait(object, pp, next, 1643 op); 1644 continue; 1645 } 1646 1647 KASSERT(pp == NULL || pp->valid != 0, 1648 ("unbusy invalid page %p", pp)); 1649 1650 if (pp != NULL || vm_pager_has_page(object, new_pindex, NULL, 1651 NULL)) { 1652 /* 1653 * The page already exists in the parent OR swap exists 1654 * for this location in the parent. Leave the parent's 1655 * page alone. Destroy the original page from the 1656 * backing object. 1657 */ 1658 if (backing_object->type == OBJT_SWAP) 1659 swap_pager_freespace(backing_object, p->pindex, 1660 1); 1661 vm_page_lock(p); 1662 KASSERT(!pmap_page_is_mapped(p), 1663 ("freeing mapped page %p", p)); 1664 if (p->wire_count == 0) 1665 vm_page_free(p); 1666 else 1667 vm_page_remove(p); 1668 vm_page_unlock(p); 1669 continue; 1670 } 1671 1672 /* 1673 * Page does not exist in parent, rename the page from the 1674 * backing object to the main object. 1675 * 1676 * If the page was mapped to a process, it can remain mapped 1677 * through the rename. vm_page_rename() will dirty the page. 1678 */ 1679 if (vm_page_rename(p, object, new_pindex)) { 1680 next = vm_object_collapse_scan_wait(object, NULL, next, 1681 op); 1682 continue; 1683 } 1684 1685 /* Use the old pindex to free the right page. */ 1686 if (backing_object->type == OBJT_SWAP) 1687 swap_pager_freespace(backing_object, 1688 new_pindex + backing_offset_index, 1); 1689 1690 #if VM_NRESERVLEVEL > 0 1691 /* 1692 * Rename the reservation. 1693 */ 1694 vm_reserv_rename(p, object, backing_object, 1695 backing_offset_index); 1696 #endif 1697 } 1698 return (true); 1699 } 1700 1701 1702 /* 1703 * this version of collapse allows the operation to occur earlier and 1704 * when paging_in_progress is true for an object... This is not a complete 1705 * operation, but should plug 99.9% of the rest of the leaks. 1706 */ 1707 static void 1708 vm_object_qcollapse(vm_object_t object) 1709 { 1710 vm_object_t backing_object = object->backing_object; 1711 1712 VM_OBJECT_ASSERT_WLOCKED(object); 1713 VM_OBJECT_ASSERT_WLOCKED(backing_object); 1714 1715 if (backing_object->ref_count != 1) 1716 return; 1717 1718 vm_object_collapse_scan(object, OBSC_COLLAPSE_NOWAIT); 1719 } 1720 1721 /* 1722 * vm_object_collapse: 1723 * 1724 * Collapse an object with the object backing it. 1725 * Pages in the backing object are moved into the 1726 * parent, and the backing object is deallocated. 1727 */ 1728 void 1729 vm_object_collapse(vm_object_t object) 1730 { 1731 vm_object_t backing_object, new_backing_object; 1732 1733 VM_OBJECT_ASSERT_WLOCKED(object); 1734 1735 while (TRUE) { 1736 /* 1737 * Verify that the conditions are right for collapse: 1738 * 1739 * The object exists and the backing object exists. 1740 */ 1741 if ((backing_object = object->backing_object) == NULL) 1742 break; 1743 1744 /* 1745 * we check the backing object first, because it is most likely 1746 * not collapsable. 1747 */ 1748 VM_OBJECT_WLOCK(backing_object); 1749 if (backing_object->handle != NULL || 1750 (backing_object->type != OBJT_DEFAULT && 1751 backing_object->type != OBJT_SWAP) || 1752 (backing_object->flags & OBJ_DEAD) || 1753 object->handle != NULL || 1754 (object->type != OBJT_DEFAULT && 1755 object->type != OBJT_SWAP) || 1756 (object->flags & OBJ_DEAD)) { 1757 VM_OBJECT_WUNLOCK(backing_object); 1758 break; 1759 } 1760 1761 if (object->paging_in_progress != 0 || 1762 backing_object->paging_in_progress != 0) { 1763 vm_object_qcollapse(object); 1764 VM_OBJECT_WUNLOCK(backing_object); 1765 break; 1766 } 1767 1768 /* 1769 * We know that we can either collapse the backing object (if 1770 * the parent is the only reference to it) or (perhaps) have 1771 * the parent bypass the object if the parent happens to shadow 1772 * all the resident pages in the entire backing object. 1773 * 1774 * This is ignoring pager-backed pages such as swap pages. 1775 * vm_object_collapse_scan fails the shadowing test in this 1776 * case. 1777 */ 1778 if (backing_object->ref_count == 1) { 1779 vm_object_pip_add(object, 1); 1780 vm_object_pip_add(backing_object, 1); 1781 1782 /* 1783 * If there is exactly one reference to the backing 1784 * object, we can collapse it into the parent. 1785 */ 1786 vm_object_collapse_scan(object, OBSC_COLLAPSE_WAIT); 1787 1788 #if VM_NRESERVLEVEL > 0 1789 /* 1790 * Break any reservations from backing_object. 1791 */ 1792 if (__predict_false(!LIST_EMPTY(&backing_object->rvq))) 1793 vm_reserv_break_all(backing_object); 1794 #endif 1795 1796 /* 1797 * Move the pager from backing_object to object. 1798 */ 1799 if (backing_object->type == OBJT_SWAP) { 1800 /* 1801 * swap_pager_copy() can sleep, in which case 1802 * the backing_object's and object's locks are 1803 * released and reacquired. 1804 * Since swap_pager_copy() is being asked to 1805 * destroy the source, it will change the 1806 * backing_object's type to OBJT_DEFAULT. 1807 */ 1808 swap_pager_copy( 1809 backing_object, 1810 object, 1811 OFF_TO_IDX(object->backing_object_offset), TRUE); 1812 } 1813 /* 1814 * Object now shadows whatever backing_object did. 1815 * Note that the reference to 1816 * backing_object->backing_object moves from within 1817 * backing_object to within object. 1818 */ 1819 LIST_REMOVE(object, shadow_list); 1820 backing_object->shadow_count--; 1821 if (backing_object->backing_object) { 1822 VM_OBJECT_WLOCK(backing_object->backing_object); 1823 LIST_REMOVE(backing_object, shadow_list); 1824 LIST_INSERT_HEAD( 1825 &backing_object->backing_object->shadow_head, 1826 object, shadow_list); 1827 /* 1828 * The shadow_count has not changed. 1829 */ 1830 VM_OBJECT_WUNLOCK(backing_object->backing_object); 1831 } 1832 object->backing_object = backing_object->backing_object; 1833 object->backing_object_offset += 1834 backing_object->backing_object_offset; 1835 1836 /* 1837 * Discard backing_object. 1838 * 1839 * Since the backing object has no pages, no pager left, 1840 * and no object references within it, all that is 1841 * necessary is to dispose of it. 1842 */ 1843 KASSERT(backing_object->ref_count == 1, ( 1844 "backing_object %p was somehow re-referenced during collapse!", 1845 backing_object)); 1846 vm_object_pip_wakeup(backing_object); 1847 backing_object->type = OBJT_DEAD; 1848 backing_object->ref_count = 0; 1849 VM_OBJECT_WUNLOCK(backing_object); 1850 vm_object_destroy(backing_object); 1851 1852 vm_object_pip_wakeup(object); 1853 counter_u64_add(object_collapses, 1); 1854 } else { 1855 /* 1856 * If we do not entirely shadow the backing object, 1857 * there is nothing we can do so we give up. 1858 */ 1859 if (object->resident_page_count != object->size && 1860 !vm_object_scan_all_shadowed(object)) { 1861 VM_OBJECT_WUNLOCK(backing_object); 1862 break; 1863 } 1864 1865 /* 1866 * Make the parent shadow the next object in the 1867 * chain. Deallocating backing_object will not remove 1868 * it, since its reference count is at least 2. 1869 */ 1870 LIST_REMOVE(object, shadow_list); 1871 backing_object->shadow_count--; 1872 1873 new_backing_object = backing_object->backing_object; 1874 if ((object->backing_object = new_backing_object) != NULL) { 1875 VM_OBJECT_WLOCK(new_backing_object); 1876 LIST_INSERT_HEAD( 1877 &new_backing_object->shadow_head, 1878 object, 1879 shadow_list 1880 ); 1881 new_backing_object->shadow_count++; 1882 vm_object_reference_locked(new_backing_object); 1883 VM_OBJECT_WUNLOCK(new_backing_object); 1884 object->backing_object_offset += 1885 backing_object->backing_object_offset; 1886 } 1887 1888 /* 1889 * Drop the reference count on backing_object. Since 1890 * its ref_count was at least 2, it will not vanish. 1891 */ 1892 backing_object->ref_count--; 1893 VM_OBJECT_WUNLOCK(backing_object); 1894 counter_u64_add(object_bypasses, 1); 1895 } 1896 1897 /* 1898 * Try again with this object's new backing object. 1899 */ 1900 } 1901 } 1902 1903 /* 1904 * vm_object_page_remove: 1905 * 1906 * For the given object, either frees or invalidates each of the 1907 * specified pages. In general, a page is freed. However, if a page is 1908 * wired for any reason other than the existence of a managed, wired 1909 * mapping, then it may be invalidated but not removed from the object. 1910 * Pages are specified by the given range ["start", "end") and the option 1911 * OBJPR_CLEANONLY. As a special case, if "end" is zero, then the range 1912 * extends from "start" to the end of the object. If the option 1913 * OBJPR_CLEANONLY is specified, then only the non-dirty pages within the 1914 * specified range are affected. If the option OBJPR_NOTMAPPED is 1915 * specified, then the pages within the specified range must have no 1916 * mappings. Otherwise, if this option is not specified, any mappings to 1917 * the specified pages are removed before the pages are freed or 1918 * invalidated. 1919 * 1920 * In general, this operation should only be performed on objects that 1921 * contain managed pages. There are, however, two exceptions. First, it 1922 * is performed on the kernel and kmem objects by vm_map_entry_delete(). 1923 * Second, it is used by msync(..., MS_INVALIDATE) to invalidate device- 1924 * backed pages. In both of these cases, the option OBJPR_CLEANONLY must 1925 * not be specified and the option OBJPR_NOTMAPPED must be specified. 1926 * 1927 * The object must be locked. 1928 */ 1929 void 1930 vm_object_page_remove(vm_object_t object, vm_pindex_t start, vm_pindex_t end, 1931 int options) 1932 { 1933 vm_page_t p, next; 1934 struct mtx *mtx; 1935 1936 VM_OBJECT_ASSERT_WLOCKED(object); 1937 KASSERT((object->flags & OBJ_UNMANAGED) == 0 || 1938 (options & (OBJPR_CLEANONLY | OBJPR_NOTMAPPED)) == OBJPR_NOTMAPPED, 1939 ("vm_object_page_remove: illegal options for object %p", object)); 1940 if (object->resident_page_count == 0) 1941 return; 1942 vm_object_pip_add(object, 1); 1943 again: 1944 p = vm_page_find_least(object, start); 1945 mtx = NULL; 1946 1947 /* 1948 * Here, the variable "p" is either (1) the page with the least pindex 1949 * greater than or equal to the parameter "start" or (2) NULL. 1950 */ 1951 for (; p != NULL && (p->pindex < end || end == 0); p = next) { 1952 next = TAILQ_NEXT(p, listq); 1953 1954 /* 1955 * If the page is wired for any reason besides the existence 1956 * of managed, wired mappings, then it cannot be freed. For 1957 * example, fictitious pages, which represent device memory, 1958 * are inherently wired and cannot be freed. They can, 1959 * however, be invalidated if the option OBJPR_CLEANONLY is 1960 * not specified. 1961 */ 1962 vm_page_change_lock(p, &mtx); 1963 if (vm_page_xbusied(p)) { 1964 VM_OBJECT_WUNLOCK(object); 1965 vm_page_busy_sleep(p, "vmopax", true); 1966 VM_OBJECT_WLOCK(object); 1967 goto again; 1968 } 1969 if (p->wire_count != 0) { 1970 if ((options & OBJPR_NOTMAPPED) == 0 && 1971 object->ref_count != 0) 1972 pmap_remove_all(p); 1973 if ((options & OBJPR_CLEANONLY) == 0) { 1974 p->valid = 0; 1975 vm_page_undirty(p); 1976 } 1977 continue; 1978 } 1979 if (vm_page_busied(p)) { 1980 VM_OBJECT_WUNLOCK(object); 1981 vm_page_busy_sleep(p, "vmopar", false); 1982 VM_OBJECT_WLOCK(object); 1983 goto again; 1984 } 1985 KASSERT((p->flags & PG_FICTITIOUS) == 0, 1986 ("vm_object_page_remove: page %p is fictitious", p)); 1987 if ((options & OBJPR_CLEANONLY) != 0 && p->valid != 0) { 1988 if ((options & OBJPR_NOTMAPPED) == 0 && 1989 object->ref_count != 0) 1990 pmap_remove_write(p); 1991 if (p->dirty != 0) 1992 continue; 1993 } 1994 if ((options & OBJPR_NOTMAPPED) == 0 && object->ref_count != 0) 1995 pmap_remove_all(p); 1996 vm_page_free(p); 1997 } 1998 if (mtx != NULL) 1999 mtx_unlock(mtx); 2000 vm_object_pip_wakeup(object); 2001 } 2002 2003 /* 2004 * vm_object_page_noreuse: 2005 * 2006 * For the given object, attempt to move the specified pages to 2007 * the head of the inactive queue. This bypasses regular LRU 2008 * operation and allows the pages to be reused quickly under memory 2009 * pressure. If a page is wired for any reason, then it will not 2010 * be queued. Pages are specified by the range ["start", "end"). 2011 * As a special case, if "end" is zero, then the range extends from 2012 * "start" to the end of the object. 2013 * 2014 * This operation should only be performed on objects that 2015 * contain non-fictitious, managed pages. 2016 * 2017 * The object must be locked. 2018 */ 2019 void 2020 vm_object_page_noreuse(vm_object_t object, vm_pindex_t start, vm_pindex_t end) 2021 { 2022 struct mtx *mtx; 2023 vm_page_t p, next; 2024 2025 VM_OBJECT_ASSERT_LOCKED(object); 2026 KASSERT((object->flags & (OBJ_FICTITIOUS | OBJ_UNMANAGED)) == 0, 2027 ("vm_object_page_noreuse: illegal object %p", object)); 2028 if (object->resident_page_count == 0) 2029 return; 2030 p = vm_page_find_least(object, start); 2031 2032 /* 2033 * Here, the variable "p" is either (1) the page with the least pindex 2034 * greater than or equal to the parameter "start" or (2) NULL. 2035 */ 2036 mtx = NULL; 2037 for (; p != NULL && (p->pindex < end || end == 0); p = next) { 2038 next = TAILQ_NEXT(p, listq); 2039 vm_page_change_lock(p, &mtx); 2040 vm_page_deactivate_noreuse(p); 2041 } 2042 if (mtx != NULL) 2043 mtx_unlock(mtx); 2044 } 2045 2046 /* 2047 * Populate the specified range of the object with valid pages. Returns 2048 * TRUE if the range is successfully populated and FALSE otherwise. 2049 * 2050 * Note: This function should be optimized to pass a larger array of 2051 * pages to vm_pager_get_pages() before it is applied to a non- 2052 * OBJT_DEVICE object. 2053 * 2054 * The object must be locked. 2055 */ 2056 boolean_t 2057 vm_object_populate(vm_object_t object, vm_pindex_t start, vm_pindex_t end) 2058 { 2059 vm_page_t m; 2060 vm_pindex_t pindex; 2061 int rv; 2062 2063 VM_OBJECT_ASSERT_WLOCKED(object); 2064 for (pindex = start; pindex < end; pindex++) { 2065 m = vm_page_grab(object, pindex, VM_ALLOC_NORMAL); 2066 if (m->valid != VM_PAGE_BITS_ALL) { 2067 rv = vm_pager_get_pages(object, &m, 1, NULL, NULL); 2068 if (rv != VM_PAGER_OK) { 2069 vm_page_lock(m); 2070 vm_page_free(m); 2071 vm_page_unlock(m); 2072 break; 2073 } 2074 } 2075 /* 2076 * Keep "m" busy because a subsequent iteration may unlock 2077 * the object. 2078 */ 2079 } 2080 if (pindex > start) { 2081 m = vm_page_lookup(object, start); 2082 while (m != NULL && m->pindex < pindex) { 2083 vm_page_xunbusy(m); 2084 m = TAILQ_NEXT(m, listq); 2085 } 2086 } 2087 return (pindex == end); 2088 } 2089 2090 /* 2091 * Routine: vm_object_coalesce 2092 * Function: Coalesces two objects backing up adjoining 2093 * regions of memory into a single object. 2094 * 2095 * returns TRUE if objects were combined. 2096 * 2097 * NOTE: Only works at the moment if the second object is NULL - 2098 * if it's not, which object do we lock first? 2099 * 2100 * Parameters: 2101 * prev_object First object to coalesce 2102 * prev_offset Offset into prev_object 2103 * prev_size Size of reference to prev_object 2104 * next_size Size of reference to the second object 2105 * reserved Indicator that extension region has 2106 * swap accounted for 2107 * 2108 * Conditions: 2109 * The object must *not* be locked. 2110 */ 2111 boolean_t 2112 vm_object_coalesce(vm_object_t prev_object, vm_ooffset_t prev_offset, 2113 vm_size_t prev_size, vm_size_t next_size, boolean_t reserved) 2114 { 2115 vm_pindex_t next_pindex; 2116 2117 if (prev_object == NULL) 2118 return (TRUE); 2119 VM_OBJECT_WLOCK(prev_object); 2120 if ((prev_object->type != OBJT_DEFAULT && 2121 prev_object->type != OBJT_SWAP) || 2122 (prev_object->flags & OBJ_TMPFS_NODE) != 0) { 2123 VM_OBJECT_WUNLOCK(prev_object); 2124 return (FALSE); 2125 } 2126 2127 /* 2128 * Try to collapse the object first 2129 */ 2130 vm_object_collapse(prev_object); 2131 2132 /* 2133 * Can't coalesce if: . more than one reference . paged out . shadows 2134 * another object . has a copy elsewhere (any of which mean that the 2135 * pages not mapped to prev_entry may be in use anyway) 2136 */ 2137 if (prev_object->backing_object != NULL) { 2138 VM_OBJECT_WUNLOCK(prev_object); 2139 return (FALSE); 2140 } 2141 2142 prev_size >>= PAGE_SHIFT; 2143 next_size >>= PAGE_SHIFT; 2144 next_pindex = OFF_TO_IDX(prev_offset) + prev_size; 2145 2146 if (prev_object->ref_count > 1 && 2147 prev_object->size != next_pindex && 2148 (prev_object->flags & OBJ_ONEMAPPING) == 0) { 2149 VM_OBJECT_WUNLOCK(prev_object); 2150 return (FALSE); 2151 } 2152 2153 /* 2154 * Account for the charge. 2155 */ 2156 if (prev_object->cred != NULL) { 2157 2158 /* 2159 * If prev_object was charged, then this mapping, 2160 * although not charged now, may become writable 2161 * later. Non-NULL cred in the object would prevent 2162 * swap reservation during enabling of the write 2163 * access, so reserve swap now. Failed reservation 2164 * cause allocation of the separate object for the map 2165 * entry, and swap reservation for this entry is 2166 * managed in appropriate time. 2167 */ 2168 if (!reserved && !swap_reserve_by_cred(ptoa(next_size), 2169 prev_object->cred)) { 2170 VM_OBJECT_WUNLOCK(prev_object); 2171 return (FALSE); 2172 } 2173 prev_object->charge += ptoa(next_size); 2174 } 2175 2176 /* 2177 * Remove any pages that may still be in the object from a previous 2178 * deallocation. 2179 */ 2180 if (next_pindex < prev_object->size) { 2181 vm_object_page_remove(prev_object, next_pindex, next_pindex + 2182 next_size, 0); 2183 if (prev_object->type == OBJT_SWAP) 2184 swap_pager_freespace(prev_object, 2185 next_pindex, next_size); 2186 #if 0 2187 if (prev_object->cred != NULL) { 2188 KASSERT(prev_object->charge >= 2189 ptoa(prev_object->size - next_pindex), 2190 ("object %p overcharged 1 %jx %jx", prev_object, 2191 (uintmax_t)next_pindex, (uintmax_t)next_size)); 2192 prev_object->charge -= ptoa(prev_object->size - 2193 next_pindex); 2194 } 2195 #endif 2196 } 2197 2198 /* 2199 * Extend the object if necessary. 2200 */ 2201 if (next_pindex + next_size > prev_object->size) 2202 prev_object->size = next_pindex + next_size; 2203 2204 VM_OBJECT_WUNLOCK(prev_object); 2205 return (TRUE); 2206 } 2207 2208 void 2209 vm_object_set_writeable_dirty(vm_object_t object) 2210 { 2211 2212 VM_OBJECT_ASSERT_WLOCKED(object); 2213 if (object->type != OBJT_VNODE) { 2214 if ((object->flags & OBJ_TMPFS_NODE) != 0) { 2215 KASSERT(object->type == OBJT_SWAP, ("non-swap tmpfs")); 2216 vm_object_set_flag(object, OBJ_TMPFS_DIRTY); 2217 } 2218 return; 2219 } 2220 object->generation++; 2221 if ((object->flags & OBJ_MIGHTBEDIRTY) != 0) 2222 return; 2223 vm_object_set_flag(object, OBJ_MIGHTBEDIRTY); 2224 } 2225 2226 /* 2227 * vm_object_unwire: 2228 * 2229 * For each page offset within the specified range of the given object, 2230 * find the highest-level page in the shadow chain and unwire it. A page 2231 * must exist at every page offset, and the highest-level page must be 2232 * wired. 2233 */ 2234 void 2235 vm_object_unwire(vm_object_t object, vm_ooffset_t offset, vm_size_t length, 2236 uint8_t queue) 2237 { 2238 vm_object_t tobject, t1object; 2239 vm_page_t m, tm; 2240 vm_pindex_t end_pindex, pindex, tpindex; 2241 int depth, locked_depth; 2242 2243 KASSERT((offset & PAGE_MASK) == 0, 2244 ("vm_object_unwire: offset is not page aligned")); 2245 KASSERT((length & PAGE_MASK) == 0, 2246 ("vm_object_unwire: length is not a multiple of PAGE_SIZE")); 2247 /* The wired count of a fictitious page never changes. */ 2248 if ((object->flags & OBJ_FICTITIOUS) != 0) 2249 return; 2250 pindex = OFF_TO_IDX(offset); 2251 end_pindex = pindex + atop(length); 2252 again: 2253 locked_depth = 1; 2254 VM_OBJECT_RLOCK(object); 2255 m = vm_page_find_least(object, pindex); 2256 while (pindex < end_pindex) { 2257 if (m == NULL || pindex < m->pindex) { 2258 /* 2259 * The first object in the shadow chain doesn't 2260 * contain a page at the current index. Therefore, 2261 * the page must exist in a backing object. 2262 */ 2263 tobject = object; 2264 tpindex = pindex; 2265 depth = 0; 2266 do { 2267 tpindex += 2268 OFF_TO_IDX(tobject->backing_object_offset); 2269 tobject = tobject->backing_object; 2270 KASSERT(tobject != NULL, 2271 ("vm_object_unwire: missing page")); 2272 if ((tobject->flags & OBJ_FICTITIOUS) != 0) 2273 goto next_page; 2274 depth++; 2275 if (depth == locked_depth) { 2276 locked_depth++; 2277 VM_OBJECT_RLOCK(tobject); 2278 } 2279 } while ((tm = vm_page_lookup(tobject, tpindex)) == 2280 NULL); 2281 } else { 2282 tm = m; 2283 m = TAILQ_NEXT(m, listq); 2284 } 2285 vm_page_lock(tm); 2286 if (vm_page_xbusied(tm)) { 2287 for (tobject = object; locked_depth >= 1; 2288 locked_depth--) { 2289 t1object = tobject->backing_object; 2290 VM_OBJECT_RUNLOCK(tobject); 2291 tobject = t1object; 2292 } 2293 vm_page_busy_sleep(tm, "unwbo", true); 2294 goto again; 2295 } 2296 vm_page_unwire(tm, queue); 2297 vm_page_unlock(tm); 2298 next_page: 2299 pindex++; 2300 } 2301 /* Release the accumulated object locks. */ 2302 for (tobject = object; locked_depth >= 1; locked_depth--) { 2303 t1object = tobject->backing_object; 2304 VM_OBJECT_RUNLOCK(tobject); 2305 tobject = t1object; 2306 } 2307 } 2308 2309 struct vnode * 2310 vm_object_vnode(vm_object_t object) 2311 { 2312 2313 VM_OBJECT_ASSERT_LOCKED(object); 2314 if (object->type == OBJT_VNODE) 2315 return (object->handle); 2316 if (object->type == OBJT_SWAP && (object->flags & OBJ_TMPFS) != 0) 2317 return (object->un_pager.swp.swp_tmpfs); 2318 return (NULL); 2319 } 2320 2321 static int 2322 sysctl_vm_object_list(SYSCTL_HANDLER_ARGS) 2323 { 2324 struct kinfo_vmobject *kvo; 2325 char *fullpath, *freepath; 2326 struct vnode *vp; 2327 struct vattr va; 2328 vm_object_t obj; 2329 vm_page_t m; 2330 int count, error; 2331 2332 if (req->oldptr == NULL) { 2333 /* 2334 * If an old buffer has not been provided, generate an 2335 * estimate of the space needed for a subsequent call. 2336 */ 2337 mtx_lock(&vm_object_list_mtx); 2338 count = 0; 2339 TAILQ_FOREACH(obj, &vm_object_list, object_list) { 2340 if (obj->type == OBJT_DEAD) 2341 continue; 2342 count++; 2343 } 2344 mtx_unlock(&vm_object_list_mtx); 2345 return (SYSCTL_OUT(req, NULL, sizeof(struct kinfo_vmobject) * 2346 count * 11 / 10)); 2347 } 2348 2349 kvo = malloc(sizeof(*kvo), M_TEMP, M_WAITOK); 2350 error = 0; 2351 2352 /* 2353 * VM objects are type stable and are never removed from the 2354 * list once added. This allows us to safely read obj->object_list 2355 * after reacquiring the VM object lock. 2356 */ 2357 mtx_lock(&vm_object_list_mtx); 2358 TAILQ_FOREACH(obj, &vm_object_list, object_list) { 2359 if (obj->type == OBJT_DEAD) 2360 continue; 2361 VM_OBJECT_RLOCK(obj); 2362 if (obj->type == OBJT_DEAD) { 2363 VM_OBJECT_RUNLOCK(obj); 2364 continue; 2365 } 2366 mtx_unlock(&vm_object_list_mtx); 2367 kvo->kvo_size = ptoa(obj->size); 2368 kvo->kvo_resident = obj->resident_page_count; 2369 kvo->kvo_ref_count = obj->ref_count; 2370 kvo->kvo_shadow_count = obj->shadow_count; 2371 kvo->kvo_memattr = obj->memattr; 2372 kvo->kvo_active = 0; 2373 kvo->kvo_inactive = 0; 2374 TAILQ_FOREACH(m, &obj->memq, listq) { 2375 /* 2376 * A page may belong to the object but be 2377 * dequeued and set to PQ_NONE while the 2378 * object lock is not held. This makes the 2379 * reads of m->queue below racy, and we do not 2380 * count pages set to PQ_NONE. However, this 2381 * sysctl is only meant to give an 2382 * approximation of the system anyway. 2383 */ 2384 if (m->queue == PQ_ACTIVE) 2385 kvo->kvo_active++; 2386 else if (m->queue == PQ_INACTIVE) 2387 kvo->kvo_inactive++; 2388 } 2389 2390 kvo->kvo_vn_fileid = 0; 2391 kvo->kvo_vn_fsid = 0; 2392 kvo->kvo_vn_fsid_freebsd11 = 0; 2393 freepath = NULL; 2394 fullpath = ""; 2395 vp = NULL; 2396 switch (obj->type) { 2397 case OBJT_DEFAULT: 2398 kvo->kvo_type = KVME_TYPE_DEFAULT; 2399 break; 2400 case OBJT_VNODE: 2401 kvo->kvo_type = KVME_TYPE_VNODE; 2402 vp = obj->handle; 2403 vref(vp); 2404 break; 2405 case OBJT_SWAP: 2406 kvo->kvo_type = KVME_TYPE_SWAP; 2407 break; 2408 case OBJT_DEVICE: 2409 kvo->kvo_type = KVME_TYPE_DEVICE; 2410 break; 2411 case OBJT_PHYS: 2412 kvo->kvo_type = KVME_TYPE_PHYS; 2413 break; 2414 case OBJT_DEAD: 2415 kvo->kvo_type = KVME_TYPE_DEAD; 2416 break; 2417 case OBJT_SG: 2418 kvo->kvo_type = KVME_TYPE_SG; 2419 break; 2420 case OBJT_MGTDEVICE: 2421 kvo->kvo_type = KVME_TYPE_MGTDEVICE; 2422 break; 2423 default: 2424 kvo->kvo_type = KVME_TYPE_UNKNOWN; 2425 break; 2426 } 2427 VM_OBJECT_RUNLOCK(obj); 2428 if (vp != NULL) { 2429 vn_fullpath(curthread, vp, &fullpath, &freepath); 2430 vn_lock(vp, LK_SHARED | LK_RETRY); 2431 if (VOP_GETATTR(vp, &va, curthread->td_ucred) == 0) { 2432 kvo->kvo_vn_fileid = va.va_fileid; 2433 kvo->kvo_vn_fsid = va.va_fsid; 2434 kvo->kvo_vn_fsid_freebsd11 = va.va_fsid; 2435 /* truncate */ 2436 } 2437 vput(vp); 2438 } 2439 2440 strlcpy(kvo->kvo_path, fullpath, sizeof(kvo->kvo_path)); 2441 if (freepath != NULL) 2442 free(freepath, M_TEMP); 2443 2444 /* Pack record size down */ 2445 kvo->kvo_structsize = offsetof(struct kinfo_vmobject, kvo_path) 2446 + strlen(kvo->kvo_path) + 1; 2447 kvo->kvo_structsize = roundup(kvo->kvo_structsize, 2448 sizeof(uint64_t)); 2449 error = SYSCTL_OUT(req, kvo, kvo->kvo_structsize); 2450 mtx_lock(&vm_object_list_mtx); 2451 if (error) 2452 break; 2453 } 2454 mtx_unlock(&vm_object_list_mtx); 2455 free(kvo, M_TEMP); 2456 return (error); 2457 } 2458 SYSCTL_PROC(_vm, OID_AUTO, objects, CTLTYPE_STRUCT | CTLFLAG_RW | CTLFLAG_SKIP | 2459 CTLFLAG_MPSAFE, NULL, 0, sysctl_vm_object_list, "S,kinfo_vmobject", 2460 "List of VM objects"); 2461 2462 #include "opt_ddb.h" 2463 #ifdef DDB 2464 #include <sys/kernel.h> 2465 2466 #include <sys/cons.h> 2467 2468 #include <ddb/ddb.h> 2469 2470 static int 2471 _vm_object_in_map(vm_map_t map, vm_object_t object, vm_map_entry_t entry) 2472 { 2473 vm_map_t tmpm; 2474 vm_map_entry_t tmpe; 2475 vm_object_t obj; 2476 int entcount; 2477 2478 if (map == 0) 2479 return 0; 2480 2481 if (entry == 0) { 2482 tmpe = map->header.next; 2483 entcount = map->nentries; 2484 while (entcount-- && (tmpe != &map->header)) { 2485 if (_vm_object_in_map(map, object, tmpe)) { 2486 return 1; 2487 } 2488 tmpe = tmpe->next; 2489 } 2490 } else if (entry->eflags & MAP_ENTRY_IS_SUB_MAP) { 2491 tmpm = entry->object.sub_map; 2492 tmpe = tmpm->header.next; 2493 entcount = tmpm->nentries; 2494 while (entcount-- && tmpe != &tmpm->header) { 2495 if (_vm_object_in_map(tmpm, object, tmpe)) { 2496 return 1; 2497 } 2498 tmpe = tmpe->next; 2499 } 2500 } else if ((obj = entry->object.vm_object) != NULL) { 2501 for (; obj; obj = obj->backing_object) 2502 if (obj == object) { 2503 return 1; 2504 } 2505 } 2506 return 0; 2507 } 2508 2509 static int 2510 vm_object_in_map(vm_object_t object) 2511 { 2512 struct proc *p; 2513 2514 /* sx_slock(&allproc_lock); */ 2515 FOREACH_PROC_IN_SYSTEM(p) { 2516 if (!p->p_vmspace /* || (p->p_flag & (P_SYSTEM|P_WEXIT)) */) 2517 continue; 2518 if (_vm_object_in_map(&p->p_vmspace->vm_map, object, 0)) { 2519 /* sx_sunlock(&allproc_lock); */ 2520 return 1; 2521 } 2522 } 2523 /* sx_sunlock(&allproc_lock); */ 2524 if (_vm_object_in_map(kernel_map, object, 0)) 2525 return 1; 2526 return 0; 2527 } 2528 2529 DB_SHOW_COMMAND(vmochk, vm_object_check) 2530 { 2531 vm_object_t object; 2532 2533 /* 2534 * make sure that internal objs are in a map somewhere 2535 * and none have zero ref counts. 2536 */ 2537 TAILQ_FOREACH(object, &vm_object_list, object_list) { 2538 if (object->handle == NULL && 2539 (object->type == OBJT_DEFAULT || object->type == OBJT_SWAP)) { 2540 if (object->ref_count == 0) { 2541 db_printf("vmochk: internal obj has zero ref count: %ld\n", 2542 (long)object->size); 2543 } 2544 if (!vm_object_in_map(object)) { 2545 db_printf( 2546 "vmochk: internal obj is not in a map: " 2547 "ref: %d, size: %lu: 0x%lx, backing_object: %p\n", 2548 object->ref_count, (u_long)object->size, 2549 (u_long)object->size, 2550 (void *)object->backing_object); 2551 } 2552 } 2553 } 2554 } 2555 2556 /* 2557 * vm_object_print: [ debug ] 2558 */ 2559 DB_SHOW_COMMAND(object, vm_object_print_static) 2560 { 2561 /* XXX convert args. */ 2562 vm_object_t object = (vm_object_t)addr; 2563 boolean_t full = have_addr; 2564 2565 vm_page_t p; 2566 2567 /* XXX count is an (unused) arg. Avoid shadowing it. */ 2568 #define count was_count 2569 2570 int count; 2571 2572 if (object == NULL) 2573 return; 2574 2575 db_iprintf( 2576 "Object %p: type=%d, size=0x%jx, res=%d, ref=%d, flags=0x%x ruid %d charge %jx\n", 2577 object, (int)object->type, (uintmax_t)object->size, 2578 object->resident_page_count, object->ref_count, object->flags, 2579 object->cred ? object->cred->cr_ruid : -1, (uintmax_t)object->charge); 2580 db_iprintf(" sref=%d, backing_object(%d)=(%p)+0x%jx\n", 2581 object->shadow_count, 2582 object->backing_object ? object->backing_object->ref_count : 0, 2583 object->backing_object, (uintmax_t)object->backing_object_offset); 2584 2585 if (!full) 2586 return; 2587 2588 db_indent += 2; 2589 count = 0; 2590 TAILQ_FOREACH(p, &object->memq, listq) { 2591 if (count == 0) 2592 db_iprintf("memory:="); 2593 else if (count == 6) { 2594 db_printf("\n"); 2595 db_iprintf(" ..."); 2596 count = 0; 2597 } else 2598 db_printf(","); 2599 count++; 2600 2601 db_printf("(off=0x%jx,page=0x%jx)", 2602 (uintmax_t)p->pindex, (uintmax_t)VM_PAGE_TO_PHYS(p)); 2603 } 2604 if (count != 0) 2605 db_printf("\n"); 2606 db_indent -= 2; 2607 } 2608 2609 /* XXX. */ 2610 #undef count 2611 2612 /* XXX need this non-static entry for calling from vm_map_print. */ 2613 void 2614 vm_object_print( 2615 /* db_expr_t */ long addr, 2616 boolean_t have_addr, 2617 /* db_expr_t */ long count, 2618 char *modif) 2619 { 2620 vm_object_print_static(addr, have_addr, count, modif); 2621 } 2622 2623 DB_SHOW_COMMAND(vmopag, vm_object_print_pages) 2624 { 2625 vm_object_t object; 2626 vm_pindex_t fidx; 2627 vm_paddr_t pa; 2628 vm_page_t m, prev_m; 2629 int rcount, nl, c; 2630 2631 nl = 0; 2632 TAILQ_FOREACH(object, &vm_object_list, object_list) { 2633 db_printf("new object: %p\n", (void *)object); 2634 if (nl > 18) { 2635 c = cngetc(); 2636 if (c != ' ') 2637 return; 2638 nl = 0; 2639 } 2640 nl++; 2641 rcount = 0; 2642 fidx = 0; 2643 pa = -1; 2644 TAILQ_FOREACH(m, &object->memq, listq) { 2645 if (m->pindex > 128) 2646 break; 2647 if ((prev_m = TAILQ_PREV(m, pglist, listq)) != NULL && 2648 prev_m->pindex + 1 != m->pindex) { 2649 if (rcount) { 2650 db_printf(" index(%ld)run(%d)pa(0x%lx)\n", 2651 (long)fidx, rcount, (long)pa); 2652 if (nl > 18) { 2653 c = cngetc(); 2654 if (c != ' ') 2655 return; 2656 nl = 0; 2657 } 2658 nl++; 2659 rcount = 0; 2660 } 2661 } 2662 if (rcount && 2663 (VM_PAGE_TO_PHYS(m) == pa + rcount * PAGE_SIZE)) { 2664 ++rcount; 2665 continue; 2666 } 2667 if (rcount) { 2668 db_printf(" index(%ld)run(%d)pa(0x%lx)\n", 2669 (long)fidx, rcount, (long)pa); 2670 if (nl > 18) { 2671 c = cngetc(); 2672 if (c != ' ') 2673 return; 2674 nl = 0; 2675 } 2676 nl++; 2677 } 2678 fidx = m->pindex; 2679 pa = VM_PAGE_TO_PHYS(m); 2680 rcount = 1; 2681 } 2682 if (rcount) { 2683 db_printf(" index(%ld)run(%d)pa(0x%lx)\n", 2684 (long)fidx, rcount, (long)pa); 2685 if (nl > 18) { 2686 c = cngetc(); 2687 if (c != ' ') 2688 return; 2689 nl = 0; 2690 } 2691 nl++; 2692 } 2693 } 2694 } 2695 #endif /* DDB */ 2696