1 /*- 2 * Copyright (c) 1991, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * This code is derived from software contributed to Berkeley by 6 * The Mach Operating System project at Carnegie-Mellon University. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 4. Neither the name of the University nor the names of its contributors 17 * may be used to endorse or promote products derived from this software 18 * without specific prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 30 * SUCH DAMAGE. 31 * 32 * from: @(#)vm_object.c 8.5 (Berkeley) 3/22/94 33 * 34 * 35 * Copyright (c) 1987, 1990 Carnegie-Mellon University. 36 * All rights reserved. 37 * 38 * Authors: Avadis Tevanian, Jr., Michael Wayne Young 39 * 40 * Permission to use, copy, modify and distribute this software and 41 * its documentation is hereby granted, provided that both the copyright 42 * notice and this permission notice appear in all copies of the 43 * software, derivative works or modified versions, and any portions 44 * thereof, and that both notices appear in supporting documentation. 45 * 46 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 47 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 48 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 49 * 50 * Carnegie Mellon requests users of this software to return to 51 * 52 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 53 * School of Computer Science 54 * Carnegie Mellon University 55 * Pittsburgh PA 15213-3890 56 * 57 * any improvements or extensions that they make and grant Carnegie the 58 * rights to redistribute these changes. 59 */ 60 61 /* 62 * Virtual memory object module. 63 */ 64 65 #include <sys/cdefs.h> 66 __FBSDID("$FreeBSD$"); 67 68 #include "opt_vm.h" 69 70 #include <sys/param.h> 71 #include <sys/systm.h> 72 #include <sys/lock.h> 73 #include <sys/mman.h> 74 #include <sys/mount.h> 75 #include <sys/kernel.h> 76 #include <sys/sysctl.h> 77 #include <sys/mutex.h> 78 #include <sys/proc.h> /* for curproc, pageproc */ 79 #include <sys/socket.h> 80 #include <sys/vnode.h> 81 #include <sys/vmmeter.h> 82 #include <sys/sx.h> 83 84 #include <vm/vm.h> 85 #include <vm/vm_param.h> 86 #include <vm/pmap.h> 87 #include <vm/vm_map.h> 88 #include <vm/vm_object.h> 89 #include <vm/vm_page.h> 90 #include <vm/vm_pageout.h> 91 #include <vm/vm_pager.h> 92 #include <vm/swap_pager.h> 93 #include <vm/vm_kern.h> 94 #include <vm/vm_extern.h> 95 #include <vm/vm_reserv.h> 96 #include <vm/uma.h> 97 98 #define EASY_SCAN_FACTOR 8 99 100 #define MSYNC_FLUSH_HARDSEQ 0x01 101 #define MSYNC_FLUSH_SOFTSEQ 0x02 102 103 /* 104 * msync / VM object flushing optimizations 105 */ 106 static int msync_flush_flags = MSYNC_FLUSH_HARDSEQ | MSYNC_FLUSH_SOFTSEQ; 107 SYSCTL_INT(_vm, OID_AUTO, msync_flush_flags, 108 CTLFLAG_RW, &msync_flush_flags, 0, ""); 109 110 static int old_msync; 111 SYSCTL_INT(_vm, OID_AUTO, old_msync, CTLFLAG_RW, &old_msync, 0, 112 "Use old (insecure) msync behavior"); 113 114 static void vm_object_qcollapse(vm_object_t object); 115 static int vm_object_page_collect_flush(vm_object_t object, vm_page_t p, int curgeneration, int pagerflags); 116 static void vm_object_vndeallocate(vm_object_t object); 117 118 /* 119 * Virtual memory objects maintain the actual data 120 * associated with allocated virtual memory. A given 121 * page of memory exists within exactly one object. 122 * 123 * An object is only deallocated when all "references" 124 * are given up. Only one "reference" to a given 125 * region of an object should be writeable. 126 * 127 * Associated with each object is a list of all resident 128 * memory pages belonging to that object; this list is 129 * maintained by the "vm_page" module, and locked by the object's 130 * lock. 131 * 132 * Each object also records a "pager" routine which is 133 * used to retrieve (and store) pages to the proper backing 134 * storage. In addition, objects may be backed by other 135 * objects from which they were virtual-copied. 136 * 137 * The only items within the object structure which are 138 * modified after time of creation are: 139 * reference count locked by object's lock 140 * pager routine locked by object's lock 141 * 142 */ 143 144 struct object_q vm_object_list; 145 struct mtx vm_object_list_mtx; /* lock for object list and count */ 146 147 struct vm_object kernel_object_store; 148 struct vm_object kmem_object_store; 149 150 SYSCTL_NODE(_vm_stats, OID_AUTO, object, CTLFLAG_RD, 0, "VM object stats"); 151 152 static long object_collapses; 153 SYSCTL_LONG(_vm_stats_object, OID_AUTO, collapses, CTLFLAG_RD, 154 &object_collapses, 0, "VM object collapses"); 155 156 static long object_bypasses; 157 SYSCTL_LONG(_vm_stats_object, OID_AUTO, bypasses, CTLFLAG_RD, 158 &object_bypasses, 0, "VM object bypasses"); 159 160 static uma_zone_t obj_zone; 161 162 static int vm_object_zinit(void *mem, int size, int flags); 163 164 #ifdef INVARIANTS 165 static void vm_object_zdtor(void *mem, int size, void *arg); 166 167 static void 168 vm_object_zdtor(void *mem, int size, void *arg) 169 { 170 vm_object_t object; 171 172 object = (vm_object_t)mem; 173 KASSERT(TAILQ_EMPTY(&object->memq), 174 ("object %p has resident pages", 175 object)); 176 #if VM_NRESERVLEVEL > 0 177 KASSERT(LIST_EMPTY(&object->rvq), 178 ("object %p has reservations", 179 object)); 180 #endif 181 KASSERT(object->cache == NULL, 182 ("object %p has cached pages", 183 object)); 184 KASSERT(object->paging_in_progress == 0, 185 ("object %p paging_in_progress = %d", 186 object, object->paging_in_progress)); 187 KASSERT(object->resident_page_count == 0, 188 ("object %p resident_page_count = %d", 189 object, object->resident_page_count)); 190 KASSERT(object->shadow_count == 0, 191 ("object %p shadow_count = %d", 192 object, object->shadow_count)); 193 } 194 #endif 195 196 static int 197 vm_object_zinit(void *mem, int size, int flags) 198 { 199 vm_object_t object; 200 201 object = (vm_object_t)mem; 202 bzero(&object->mtx, sizeof(object->mtx)); 203 VM_OBJECT_LOCK_INIT(object, "standard object"); 204 205 /* These are true for any object that has been freed */ 206 object->paging_in_progress = 0; 207 object->resident_page_count = 0; 208 object->shadow_count = 0; 209 return (0); 210 } 211 212 void 213 _vm_object_allocate(objtype_t type, vm_pindex_t size, vm_object_t object) 214 { 215 216 TAILQ_INIT(&object->memq); 217 LIST_INIT(&object->shadow_head); 218 219 object->root = NULL; 220 object->type = type; 221 object->size = size; 222 object->generation = 1; 223 object->ref_count = 1; 224 object->flags = 0; 225 if ((object->type == OBJT_DEFAULT) || (object->type == OBJT_SWAP)) 226 object->flags = OBJ_ONEMAPPING; 227 object->pg_color = 0; 228 object->handle = NULL; 229 object->backing_object = NULL; 230 object->backing_object_offset = (vm_ooffset_t) 0; 231 #if VM_NRESERVLEVEL > 0 232 LIST_INIT(&object->rvq); 233 #endif 234 object->cache = NULL; 235 236 mtx_lock(&vm_object_list_mtx); 237 TAILQ_INSERT_TAIL(&vm_object_list, object, object_list); 238 mtx_unlock(&vm_object_list_mtx); 239 } 240 241 /* 242 * vm_object_init: 243 * 244 * Initialize the VM objects module. 245 */ 246 void 247 vm_object_init(void) 248 { 249 TAILQ_INIT(&vm_object_list); 250 mtx_init(&vm_object_list_mtx, "vm object_list", NULL, MTX_DEF); 251 252 VM_OBJECT_LOCK_INIT(&kernel_object_store, "kernel object"); 253 _vm_object_allocate(OBJT_PHYS, OFF_TO_IDX(VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS), 254 kernel_object); 255 #if VM_NRESERVLEVEL > 0 256 kernel_object->flags |= OBJ_COLORED; 257 kernel_object->pg_color = (u_short)atop(VM_MIN_KERNEL_ADDRESS); 258 #endif 259 260 VM_OBJECT_LOCK_INIT(&kmem_object_store, "kmem object"); 261 _vm_object_allocate(OBJT_PHYS, OFF_TO_IDX(VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS), 262 kmem_object); 263 #if VM_NRESERVLEVEL > 0 264 kmem_object->flags |= OBJ_COLORED; 265 kmem_object->pg_color = (u_short)atop(VM_MIN_KERNEL_ADDRESS); 266 #endif 267 268 /* 269 * The lock portion of struct vm_object must be type stable due 270 * to vm_pageout_fallback_object_lock locking a vm object 271 * without holding any references to it. 272 */ 273 obj_zone = uma_zcreate("VM OBJECT", sizeof (struct vm_object), NULL, 274 #ifdef INVARIANTS 275 vm_object_zdtor, 276 #else 277 NULL, 278 #endif 279 vm_object_zinit, NULL, UMA_ALIGN_PTR, UMA_ZONE_VM|UMA_ZONE_NOFREE); 280 } 281 282 void 283 vm_object_clear_flag(vm_object_t object, u_short bits) 284 { 285 286 VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); 287 object->flags &= ~bits; 288 } 289 290 void 291 vm_object_pip_add(vm_object_t object, short i) 292 { 293 294 VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); 295 object->paging_in_progress += i; 296 } 297 298 void 299 vm_object_pip_subtract(vm_object_t object, short i) 300 { 301 302 VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); 303 object->paging_in_progress -= i; 304 } 305 306 void 307 vm_object_pip_wakeup(vm_object_t object) 308 { 309 310 VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); 311 object->paging_in_progress--; 312 if ((object->flags & OBJ_PIPWNT) && object->paging_in_progress == 0) { 313 vm_object_clear_flag(object, OBJ_PIPWNT); 314 wakeup(object); 315 } 316 } 317 318 void 319 vm_object_pip_wakeupn(vm_object_t object, short i) 320 { 321 322 VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); 323 if (i) 324 object->paging_in_progress -= i; 325 if ((object->flags & OBJ_PIPWNT) && object->paging_in_progress == 0) { 326 vm_object_clear_flag(object, OBJ_PIPWNT); 327 wakeup(object); 328 } 329 } 330 331 void 332 vm_object_pip_wait(vm_object_t object, char *waitid) 333 { 334 335 VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); 336 while (object->paging_in_progress) { 337 object->flags |= OBJ_PIPWNT; 338 msleep(object, VM_OBJECT_MTX(object), PVM, waitid, 0); 339 } 340 } 341 342 /* 343 * vm_object_allocate: 344 * 345 * Returns a new object with the given size. 346 */ 347 vm_object_t 348 vm_object_allocate(objtype_t type, vm_pindex_t size) 349 { 350 vm_object_t object; 351 352 object = (vm_object_t)uma_zalloc(obj_zone, M_WAITOK); 353 _vm_object_allocate(type, size, object); 354 return (object); 355 } 356 357 358 /* 359 * vm_object_reference: 360 * 361 * Gets another reference to the given object. Note: OBJ_DEAD 362 * objects can be referenced during final cleaning. 363 */ 364 void 365 vm_object_reference(vm_object_t object) 366 { 367 if (object == NULL) 368 return; 369 VM_OBJECT_LOCK(object); 370 vm_object_reference_locked(object); 371 VM_OBJECT_UNLOCK(object); 372 } 373 374 /* 375 * vm_object_reference_locked: 376 * 377 * Gets another reference to the given object. 378 * 379 * The object must be locked. 380 */ 381 void 382 vm_object_reference_locked(vm_object_t object) 383 { 384 struct vnode *vp; 385 386 VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); 387 object->ref_count++; 388 if (object->type == OBJT_VNODE) { 389 vp = object->handle; 390 vref(vp); 391 } 392 } 393 394 /* 395 * Handle deallocating an object of type OBJT_VNODE. 396 */ 397 static void 398 vm_object_vndeallocate(vm_object_t object) 399 { 400 struct vnode *vp = (struct vnode *) object->handle; 401 402 VFS_ASSERT_GIANT(vp->v_mount); 403 VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); 404 KASSERT(object->type == OBJT_VNODE, 405 ("vm_object_vndeallocate: not a vnode object")); 406 KASSERT(vp != NULL, ("vm_object_vndeallocate: missing vp")); 407 #ifdef INVARIANTS 408 if (object->ref_count == 0) { 409 vprint("vm_object_vndeallocate", vp); 410 panic("vm_object_vndeallocate: bad object reference count"); 411 } 412 #endif 413 414 object->ref_count--; 415 if (object->ref_count == 0) { 416 mp_fixme("Unlocked vflag access."); 417 vp->v_vflag &= ~VV_TEXT; 418 } 419 VM_OBJECT_UNLOCK(object); 420 /* 421 * vrele may need a vop lock 422 */ 423 vrele(vp); 424 } 425 426 /* 427 * vm_object_deallocate: 428 * 429 * Release a reference to the specified object, 430 * gained either through a vm_object_allocate 431 * or a vm_object_reference call. When all references 432 * are gone, storage associated with this object 433 * may be relinquished. 434 * 435 * No object may be locked. 436 */ 437 void 438 vm_object_deallocate(vm_object_t object) 439 { 440 vm_object_t temp; 441 442 while (object != NULL) { 443 int vfslocked; 444 445 vfslocked = 0; 446 restart: 447 VM_OBJECT_LOCK(object); 448 if (object->type == OBJT_VNODE) { 449 struct vnode *vp = (struct vnode *) object->handle; 450 451 /* 452 * Conditionally acquire Giant for a vnode-backed 453 * object. We have to be careful since the type of 454 * a vnode object can change while the object is 455 * unlocked. 456 */ 457 if (VFS_NEEDSGIANT(vp->v_mount) && !vfslocked) { 458 vfslocked = 1; 459 if (!mtx_trylock(&Giant)) { 460 VM_OBJECT_UNLOCK(object); 461 mtx_lock(&Giant); 462 goto restart; 463 } 464 } 465 vm_object_vndeallocate(object); 466 VFS_UNLOCK_GIANT(vfslocked); 467 return; 468 } else 469 /* 470 * This is to handle the case that the object 471 * changed type while we dropped its lock to 472 * obtain Giant. 473 */ 474 VFS_UNLOCK_GIANT(vfslocked); 475 476 KASSERT(object->ref_count != 0, 477 ("vm_object_deallocate: object deallocated too many times: %d", object->type)); 478 479 /* 480 * If the reference count goes to 0 we start calling 481 * vm_object_terminate() on the object chain. 482 * A ref count of 1 may be a special case depending on the 483 * shadow count being 0 or 1. 484 */ 485 object->ref_count--; 486 if (object->ref_count > 1) { 487 VM_OBJECT_UNLOCK(object); 488 return; 489 } else if (object->ref_count == 1) { 490 if (object->shadow_count == 0 && 491 object->handle == NULL && 492 (object->type == OBJT_DEFAULT || 493 object->type == OBJT_SWAP)) { 494 vm_object_set_flag(object, OBJ_ONEMAPPING); 495 } else if ((object->shadow_count == 1) && 496 (object->handle == NULL) && 497 (object->type == OBJT_DEFAULT || 498 object->type == OBJT_SWAP)) { 499 vm_object_t robject; 500 501 robject = LIST_FIRST(&object->shadow_head); 502 KASSERT(robject != NULL, 503 ("vm_object_deallocate: ref_count: %d, shadow_count: %d", 504 object->ref_count, 505 object->shadow_count)); 506 if (!VM_OBJECT_TRYLOCK(robject)) { 507 /* 508 * Avoid a potential deadlock. 509 */ 510 object->ref_count++; 511 VM_OBJECT_UNLOCK(object); 512 /* 513 * More likely than not the thread 514 * holding robject's lock has lower 515 * priority than the current thread. 516 * Let the lower priority thread run. 517 */ 518 pause("vmo_de", 1); 519 continue; 520 } 521 /* 522 * Collapse object into its shadow unless its 523 * shadow is dead. In that case, object will 524 * be deallocated by the thread that is 525 * deallocating its shadow. 526 */ 527 if ((robject->flags & OBJ_DEAD) == 0 && 528 (robject->handle == NULL) && 529 (robject->type == OBJT_DEFAULT || 530 robject->type == OBJT_SWAP)) { 531 532 robject->ref_count++; 533 retry: 534 if (robject->paging_in_progress) { 535 VM_OBJECT_UNLOCK(object); 536 vm_object_pip_wait(robject, 537 "objde1"); 538 temp = robject->backing_object; 539 if (object == temp) { 540 VM_OBJECT_LOCK(object); 541 goto retry; 542 } 543 } else if (object->paging_in_progress) { 544 VM_OBJECT_UNLOCK(robject); 545 object->flags |= OBJ_PIPWNT; 546 msleep(object, 547 VM_OBJECT_MTX(object), 548 PDROP | PVM, "objde2", 0); 549 VM_OBJECT_LOCK(robject); 550 temp = robject->backing_object; 551 if (object == temp) { 552 VM_OBJECT_LOCK(object); 553 goto retry; 554 } 555 } else 556 VM_OBJECT_UNLOCK(object); 557 558 if (robject->ref_count == 1) { 559 robject->ref_count--; 560 object = robject; 561 goto doterm; 562 } 563 object = robject; 564 vm_object_collapse(object); 565 VM_OBJECT_UNLOCK(object); 566 continue; 567 } 568 VM_OBJECT_UNLOCK(robject); 569 } 570 VM_OBJECT_UNLOCK(object); 571 return; 572 } 573 doterm: 574 temp = object->backing_object; 575 if (temp != NULL) { 576 VM_OBJECT_LOCK(temp); 577 LIST_REMOVE(object, shadow_list); 578 temp->shadow_count--; 579 temp->generation++; 580 VM_OBJECT_UNLOCK(temp); 581 object->backing_object = NULL; 582 } 583 /* 584 * Don't double-terminate, we could be in a termination 585 * recursion due to the terminate having to sync data 586 * to disk. 587 */ 588 if ((object->flags & OBJ_DEAD) == 0) 589 vm_object_terminate(object); 590 else 591 VM_OBJECT_UNLOCK(object); 592 object = temp; 593 } 594 } 595 596 /* 597 * vm_object_terminate actually destroys the specified object, freeing 598 * up all previously used resources. 599 * 600 * The object must be locked. 601 * This routine may block. 602 */ 603 void 604 vm_object_terminate(vm_object_t object) 605 { 606 vm_page_t p; 607 608 VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); 609 610 /* 611 * Make sure no one uses us. 612 */ 613 vm_object_set_flag(object, OBJ_DEAD); 614 615 /* 616 * wait for the pageout daemon to be done with the object 617 */ 618 vm_object_pip_wait(object, "objtrm"); 619 620 KASSERT(!object->paging_in_progress, 621 ("vm_object_terminate: pageout in progress")); 622 623 /* 624 * Clean and free the pages, as appropriate. All references to the 625 * object are gone, so we don't need to lock it. 626 */ 627 if (object->type == OBJT_VNODE) { 628 struct vnode *vp = (struct vnode *)object->handle; 629 630 /* 631 * Clean pages and flush buffers. 632 */ 633 vm_object_page_clean(object, 0, 0, OBJPC_SYNC); 634 VM_OBJECT_UNLOCK(object); 635 636 vinvalbuf(vp, V_SAVE, NULL, 0, 0); 637 638 VM_OBJECT_LOCK(object); 639 } 640 641 KASSERT(object->ref_count == 0, 642 ("vm_object_terminate: object with references, ref_count=%d", 643 object->ref_count)); 644 645 /* 646 * Now free any remaining pages. For internal objects, this also 647 * removes them from paging queues. Don't free wired pages, just 648 * remove them from the object. 649 */ 650 vm_page_lock_queues(); 651 while ((p = TAILQ_FIRST(&object->memq)) != NULL) { 652 KASSERT(!p->busy && (p->oflags & VPO_BUSY) == 0, 653 ("vm_object_terminate: freeing busy page %p " 654 "p->busy = %d, p->flags %x\n", p, p->busy, p->flags)); 655 if (p->wire_count == 0) { 656 vm_page_free(p); 657 cnt.v_pfree++; 658 } else { 659 vm_page_remove(p); 660 } 661 } 662 vm_page_unlock_queues(); 663 664 #if VM_NRESERVLEVEL > 0 665 if (__predict_false(!LIST_EMPTY(&object->rvq))) 666 vm_reserv_break_all(object); 667 #endif 668 if (__predict_false(object->cache != NULL)) 669 vm_page_cache_free(object, 0, 0); 670 671 /* 672 * Let the pager know object is dead. 673 */ 674 vm_pager_deallocate(object); 675 VM_OBJECT_UNLOCK(object); 676 677 /* 678 * Remove the object from the global object list. 679 */ 680 mtx_lock(&vm_object_list_mtx); 681 TAILQ_REMOVE(&vm_object_list, object, object_list); 682 mtx_unlock(&vm_object_list_mtx); 683 684 /* 685 * Free the space for the object. 686 */ 687 uma_zfree(obj_zone, object); 688 } 689 690 /* 691 * vm_object_page_clean 692 * 693 * Clean all dirty pages in the specified range of object. Leaves page 694 * on whatever queue it is currently on. If NOSYNC is set then do not 695 * write out pages with VPO_NOSYNC set (originally comes from MAP_NOSYNC), 696 * leaving the object dirty. 697 * 698 * When stuffing pages asynchronously, allow clustering. XXX we need a 699 * synchronous clustering mode implementation. 700 * 701 * Odd semantics: if start == end, we clean everything. 702 * 703 * The object must be locked. 704 */ 705 void 706 vm_object_page_clean(vm_object_t object, vm_pindex_t start, vm_pindex_t end, int flags) 707 { 708 vm_page_t p, np; 709 vm_pindex_t tstart, tend; 710 vm_pindex_t pi; 711 int clearobjflags; 712 int pagerflags; 713 int curgeneration; 714 715 VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); 716 if (object->type != OBJT_VNODE || 717 (object->flags & OBJ_MIGHTBEDIRTY) == 0) 718 return; 719 720 pagerflags = (flags & (OBJPC_SYNC | OBJPC_INVAL)) ? VM_PAGER_PUT_SYNC : VM_PAGER_CLUSTER_OK; 721 pagerflags |= (flags & OBJPC_INVAL) ? VM_PAGER_PUT_INVAL : 0; 722 723 vm_object_set_flag(object, OBJ_CLEANING); 724 725 tstart = start; 726 if (end == 0) { 727 tend = object->size; 728 } else { 729 tend = end; 730 } 731 732 vm_page_lock_queues(); 733 /* 734 * If the caller is smart and only msync()s a range he knows is 735 * dirty, we may be able to avoid an object scan. This results in 736 * a phenominal improvement in performance. We cannot do this 737 * as a matter of course because the object may be huge - e.g. 738 * the size might be in the gigabytes or terrabytes. 739 */ 740 if (msync_flush_flags & MSYNC_FLUSH_HARDSEQ) { 741 vm_pindex_t tscan; 742 int scanlimit; 743 int scanreset; 744 745 scanreset = object->resident_page_count / EASY_SCAN_FACTOR; 746 if (scanreset < 16) 747 scanreset = 16; 748 pagerflags |= VM_PAGER_IGNORE_CLEANCHK; 749 750 scanlimit = scanreset; 751 tscan = tstart; 752 while (tscan < tend) { 753 curgeneration = object->generation; 754 p = vm_page_lookup(object, tscan); 755 if (p == NULL || p->valid == 0) { 756 if (--scanlimit == 0) 757 break; 758 ++tscan; 759 continue; 760 } 761 vm_page_test_dirty(p); 762 if ((p->dirty & p->valid) == 0) { 763 if (--scanlimit == 0) 764 break; 765 ++tscan; 766 continue; 767 } 768 /* 769 * If we have been asked to skip nosync pages and 770 * this is a nosync page, we can't continue. 771 */ 772 if ((flags & OBJPC_NOSYNC) && (p->oflags & VPO_NOSYNC)) { 773 if (--scanlimit == 0) 774 break; 775 ++tscan; 776 continue; 777 } 778 scanlimit = scanreset; 779 780 /* 781 * This returns 0 if it was unable to busy the first 782 * page (i.e. had to sleep). 783 */ 784 tscan += vm_object_page_collect_flush(object, p, curgeneration, pagerflags); 785 } 786 787 /* 788 * If everything was dirty and we flushed it successfully, 789 * and the requested range is not the entire object, we 790 * don't have to mess with CLEANCHK or MIGHTBEDIRTY and can 791 * return immediately. 792 */ 793 if (tscan >= tend && (tstart || tend < object->size)) { 794 vm_page_unlock_queues(); 795 vm_object_clear_flag(object, OBJ_CLEANING); 796 return; 797 } 798 pagerflags &= ~VM_PAGER_IGNORE_CLEANCHK; 799 } 800 801 /* 802 * Generally set CLEANCHK interlock and make the page read-only so 803 * we can then clear the object flags. 804 * 805 * However, if this is a nosync mmap then the object is likely to 806 * stay dirty so do not mess with the page and do not clear the 807 * object flags. 808 */ 809 clearobjflags = 1; 810 TAILQ_FOREACH(p, &object->memq, listq) { 811 p->oflags |= VPO_CLEANCHK; 812 if ((flags & OBJPC_NOSYNC) && (p->oflags & VPO_NOSYNC)) 813 clearobjflags = 0; 814 else 815 pmap_remove_write(p); 816 } 817 818 if (clearobjflags && (tstart == 0) && (tend == object->size)) { 819 struct vnode *vp; 820 821 vm_object_clear_flag(object, OBJ_MIGHTBEDIRTY); 822 if (object->type == OBJT_VNODE && 823 (vp = (struct vnode *)object->handle) != NULL) { 824 VI_LOCK(vp); 825 if (vp->v_iflag & VI_OBJDIRTY) 826 vp->v_iflag &= ~VI_OBJDIRTY; 827 VI_UNLOCK(vp); 828 } 829 } 830 831 rescan: 832 curgeneration = object->generation; 833 834 for (p = TAILQ_FIRST(&object->memq); p; p = np) { 835 int n; 836 837 np = TAILQ_NEXT(p, listq); 838 839 again: 840 pi = p->pindex; 841 if ((p->oflags & VPO_CLEANCHK) == 0 || 842 (pi < tstart) || (pi >= tend) || 843 p->valid == 0) { 844 p->oflags &= ~VPO_CLEANCHK; 845 continue; 846 } 847 848 vm_page_test_dirty(p); 849 if ((p->dirty & p->valid) == 0) { 850 p->oflags &= ~VPO_CLEANCHK; 851 continue; 852 } 853 854 /* 855 * If we have been asked to skip nosync pages and this is a 856 * nosync page, skip it. Note that the object flags were 857 * not cleared in this case so we do not have to set them. 858 */ 859 if ((flags & OBJPC_NOSYNC) && (p->oflags & VPO_NOSYNC)) { 860 p->oflags &= ~VPO_CLEANCHK; 861 continue; 862 } 863 864 n = vm_object_page_collect_flush(object, p, 865 curgeneration, pagerflags); 866 if (n == 0) 867 goto rescan; 868 869 if (object->generation != curgeneration) 870 goto rescan; 871 872 /* 873 * Try to optimize the next page. If we can't we pick up 874 * our (random) scan where we left off. 875 */ 876 if (msync_flush_flags & MSYNC_FLUSH_SOFTSEQ) { 877 if ((p = vm_page_lookup(object, pi + n)) != NULL) 878 goto again; 879 } 880 } 881 vm_page_unlock_queues(); 882 #if 0 883 VOP_FSYNC(vp, (pagerflags & VM_PAGER_PUT_SYNC)?MNT_WAIT:0, curproc); 884 #endif 885 886 vm_object_clear_flag(object, OBJ_CLEANING); 887 return; 888 } 889 890 static int 891 vm_object_page_collect_flush(vm_object_t object, vm_page_t p, int curgeneration, int pagerflags) 892 { 893 int runlen; 894 int maxf; 895 int chkb; 896 int maxb; 897 int i; 898 vm_pindex_t pi; 899 vm_page_t maf[vm_pageout_page_count]; 900 vm_page_t mab[vm_pageout_page_count]; 901 vm_page_t ma[vm_pageout_page_count]; 902 903 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 904 pi = p->pindex; 905 while (vm_page_sleep_if_busy(p, TRUE, "vpcwai")) { 906 vm_page_lock_queues(); 907 if (object->generation != curgeneration) { 908 return(0); 909 } 910 } 911 maxf = 0; 912 for(i = 1; i < vm_pageout_page_count; i++) { 913 vm_page_t tp; 914 915 if ((tp = vm_page_lookup(object, pi + i)) != NULL) { 916 if ((tp->oflags & VPO_BUSY) || 917 ((pagerflags & VM_PAGER_IGNORE_CLEANCHK) == 0 && 918 (tp->oflags & VPO_CLEANCHK) == 0) || 919 (tp->busy != 0)) 920 break; 921 vm_page_test_dirty(tp); 922 if ((tp->dirty & tp->valid) == 0) { 923 tp->oflags &= ~VPO_CLEANCHK; 924 break; 925 } 926 maf[ i - 1 ] = tp; 927 maxf++; 928 continue; 929 } 930 break; 931 } 932 933 maxb = 0; 934 chkb = vm_pageout_page_count - maxf; 935 if (chkb) { 936 for(i = 1; i < chkb;i++) { 937 vm_page_t tp; 938 939 if ((tp = vm_page_lookup(object, pi - i)) != NULL) { 940 if ((tp->oflags & VPO_BUSY) || 941 ((pagerflags & VM_PAGER_IGNORE_CLEANCHK) == 0 && 942 (tp->oflags & VPO_CLEANCHK) == 0) || 943 (tp->busy != 0)) 944 break; 945 vm_page_test_dirty(tp); 946 if ((tp->dirty & tp->valid) == 0) { 947 tp->oflags &= ~VPO_CLEANCHK; 948 break; 949 } 950 mab[ i - 1 ] = tp; 951 maxb++; 952 continue; 953 } 954 break; 955 } 956 } 957 958 for(i = 0; i < maxb; i++) { 959 int index = (maxb - i) - 1; 960 ma[index] = mab[i]; 961 ma[index]->oflags &= ~VPO_CLEANCHK; 962 } 963 p->oflags &= ~VPO_CLEANCHK; 964 ma[maxb] = p; 965 for(i = 0; i < maxf; i++) { 966 int index = (maxb + i) + 1; 967 ma[index] = maf[i]; 968 ma[index]->oflags &= ~VPO_CLEANCHK; 969 } 970 runlen = maxb + maxf + 1; 971 972 vm_pageout_flush(ma, runlen, pagerflags); 973 for (i = 0; i < runlen; i++) { 974 if (ma[i]->valid & ma[i]->dirty) { 975 pmap_remove_write(ma[i]); 976 ma[i]->oflags |= VPO_CLEANCHK; 977 978 /* 979 * maxf will end up being the actual number of pages 980 * we wrote out contiguously, non-inclusive of the 981 * first page. We do not count look-behind pages. 982 */ 983 if (i >= maxb + 1 && (maxf > i - maxb - 1)) 984 maxf = i - maxb - 1; 985 } 986 } 987 return(maxf + 1); 988 } 989 990 /* 991 * Note that there is absolutely no sense in writing out 992 * anonymous objects, so we track down the vnode object 993 * to write out. 994 * We invalidate (remove) all pages from the address space 995 * for semantic correctness. 996 * 997 * Note: certain anonymous maps, such as MAP_NOSYNC maps, 998 * may start out with a NULL object. 999 */ 1000 void 1001 vm_object_sync(vm_object_t object, vm_ooffset_t offset, vm_size_t size, 1002 boolean_t syncio, boolean_t invalidate) 1003 { 1004 vm_object_t backing_object; 1005 struct vnode *vp; 1006 struct mount *mp; 1007 int flags; 1008 1009 if (object == NULL) 1010 return; 1011 VM_OBJECT_LOCK(object); 1012 while ((backing_object = object->backing_object) != NULL) { 1013 VM_OBJECT_LOCK(backing_object); 1014 offset += object->backing_object_offset; 1015 VM_OBJECT_UNLOCK(object); 1016 object = backing_object; 1017 if (object->size < OFF_TO_IDX(offset + size)) 1018 size = IDX_TO_OFF(object->size) - offset; 1019 } 1020 /* 1021 * Flush pages if writing is allowed, invalidate them 1022 * if invalidation requested. Pages undergoing I/O 1023 * will be ignored by vm_object_page_remove(). 1024 * 1025 * We cannot lock the vnode and then wait for paging 1026 * to complete without deadlocking against vm_fault. 1027 * Instead we simply call vm_object_page_remove() and 1028 * allow it to block internally on a page-by-page 1029 * basis when it encounters pages undergoing async 1030 * I/O. 1031 */ 1032 if (object->type == OBJT_VNODE && 1033 (object->flags & OBJ_MIGHTBEDIRTY) != 0) { 1034 int vfslocked; 1035 vp = object->handle; 1036 VM_OBJECT_UNLOCK(object); 1037 (void) vn_start_write(vp, &mp, V_WAIT); 1038 vfslocked = VFS_LOCK_GIANT(vp->v_mount); 1039 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 1040 flags = (syncio || invalidate) ? OBJPC_SYNC : 0; 1041 flags |= invalidate ? OBJPC_INVAL : 0; 1042 VM_OBJECT_LOCK(object); 1043 vm_object_page_clean(object, 1044 OFF_TO_IDX(offset), 1045 OFF_TO_IDX(offset + size + PAGE_MASK), 1046 flags); 1047 VM_OBJECT_UNLOCK(object); 1048 VOP_UNLOCK(vp, 0); 1049 VFS_UNLOCK_GIANT(vfslocked); 1050 vn_finished_write(mp); 1051 VM_OBJECT_LOCK(object); 1052 } 1053 if ((object->type == OBJT_VNODE || 1054 object->type == OBJT_DEVICE) && invalidate) { 1055 boolean_t purge; 1056 purge = old_msync || (object->type == OBJT_DEVICE); 1057 vm_object_page_remove(object, 1058 OFF_TO_IDX(offset), 1059 OFF_TO_IDX(offset + size + PAGE_MASK), 1060 purge ? FALSE : TRUE); 1061 } 1062 VM_OBJECT_UNLOCK(object); 1063 } 1064 1065 /* 1066 * vm_object_madvise: 1067 * 1068 * Implements the madvise function at the object/page level. 1069 * 1070 * MADV_WILLNEED (any object) 1071 * 1072 * Activate the specified pages if they are resident. 1073 * 1074 * MADV_DONTNEED (any object) 1075 * 1076 * Deactivate the specified pages if they are resident. 1077 * 1078 * MADV_FREE (OBJT_DEFAULT/OBJT_SWAP objects, 1079 * OBJ_ONEMAPPING only) 1080 * 1081 * Deactivate and clean the specified pages if they are 1082 * resident. This permits the process to reuse the pages 1083 * without faulting or the kernel to reclaim the pages 1084 * without I/O. 1085 */ 1086 void 1087 vm_object_madvise(vm_object_t object, vm_pindex_t pindex, int count, int advise) 1088 { 1089 vm_pindex_t end, tpindex; 1090 vm_object_t backing_object, tobject; 1091 vm_page_t m; 1092 1093 if (object == NULL) 1094 return; 1095 VM_OBJECT_LOCK(object); 1096 end = pindex + count; 1097 /* 1098 * Locate and adjust resident pages 1099 */ 1100 for (; pindex < end; pindex += 1) { 1101 relookup: 1102 tobject = object; 1103 tpindex = pindex; 1104 shadowlookup: 1105 /* 1106 * MADV_FREE only operates on OBJT_DEFAULT or OBJT_SWAP pages 1107 * and those pages must be OBJ_ONEMAPPING. 1108 */ 1109 if (advise == MADV_FREE) { 1110 if ((tobject->type != OBJT_DEFAULT && 1111 tobject->type != OBJT_SWAP) || 1112 (tobject->flags & OBJ_ONEMAPPING) == 0) { 1113 goto unlock_tobject; 1114 } 1115 } 1116 m = vm_page_lookup(tobject, tpindex); 1117 if (m == NULL && advise == MADV_WILLNEED) { 1118 /* 1119 * If the page is cached, reactivate it. 1120 */ 1121 m = vm_page_alloc(tobject, tpindex, VM_ALLOC_IFCACHED | 1122 VM_ALLOC_NOBUSY); 1123 } 1124 if (m == NULL) { 1125 /* 1126 * There may be swap even if there is no backing page 1127 */ 1128 if (advise == MADV_FREE && tobject->type == OBJT_SWAP) 1129 swap_pager_freespace(tobject, tpindex, 1); 1130 /* 1131 * next object 1132 */ 1133 backing_object = tobject->backing_object; 1134 if (backing_object == NULL) 1135 goto unlock_tobject; 1136 VM_OBJECT_LOCK(backing_object); 1137 tpindex += OFF_TO_IDX(tobject->backing_object_offset); 1138 if (tobject != object) 1139 VM_OBJECT_UNLOCK(tobject); 1140 tobject = backing_object; 1141 goto shadowlookup; 1142 } 1143 /* 1144 * If the page is busy or not in a normal active state, 1145 * we skip it. If the page is not managed there are no 1146 * page queues to mess with. Things can break if we mess 1147 * with pages in any of the below states. 1148 */ 1149 vm_page_lock_queues(); 1150 if (m->hold_count || 1151 m->wire_count || 1152 (m->flags & PG_UNMANAGED) || 1153 m->valid != VM_PAGE_BITS_ALL) { 1154 vm_page_unlock_queues(); 1155 goto unlock_tobject; 1156 } 1157 if ((m->oflags & VPO_BUSY) || m->busy) { 1158 vm_page_flag_set(m, PG_REFERENCED); 1159 vm_page_unlock_queues(); 1160 if (object != tobject) 1161 VM_OBJECT_UNLOCK(object); 1162 m->oflags |= VPO_WANTED; 1163 msleep(m, VM_OBJECT_MTX(tobject), PDROP | PVM, "madvpo", 0); 1164 VM_OBJECT_LOCK(object); 1165 goto relookup; 1166 } 1167 if (advise == MADV_WILLNEED) { 1168 vm_page_activate(m); 1169 } else if (advise == MADV_DONTNEED) { 1170 vm_page_dontneed(m); 1171 } else if (advise == MADV_FREE) { 1172 /* 1173 * Mark the page clean. This will allow the page 1174 * to be freed up by the system. However, such pages 1175 * are often reused quickly by malloc()/free() 1176 * so we do not do anything that would cause 1177 * a page fault if we can help it. 1178 * 1179 * Specifically, we do not try to actually free 1180 * the page now nor do we try to put it in the 1181 * cache (which would cause a page fault on reuse). 1182 * 1183 * But we do make the page is freeable as we 1184 * can without actually taking the step of unmapping 1185 * it. 1186 */ 1187 pmap_clear_modify(m); 1188 m->dirty = 0; 1189 m->act_count = 0; 1190 vm_page_dontneed(m); 1191 } 1192 vm_page_unlock_queues(); 1193 if (advise == MADV_FREE && tobject->type == OBJT_SWAP) 1194 swap_pager_freespace(tobject, tpindex, 1); 1195 unlock_tobject: 1196 if (tobject != object) 1197 VM_OBJECT_UNLOCK(tobject); 1198 } 1199 VM_OBJECT_UNLOCK(object); 1200 } 1201 1202 /* 1203 * vm_object_shadow: 1204 * 1205 * Create a new object which is backed by the 1206 * specified existing object range. The source 1207 * object reference is deallocated. 1208 * 1209 * The new object and offset into that object 1210 * are returned in the source parameters. 1211 */ 1212 void 1213 vm_object_shadow( 1214 vm_object_t *object, /* IN/OUT */ 1215 vm_ooffset_t *offset, /* IN/OUT */ 1216 vm_size_t length) 1217 { 1218 vm_object_t source; 1219 vm_object_t result; 1220 1221 source = *object; 1222 1223 /* 1224 * Don't create the new object if the old object isn't shared. 1225 */ 1226 if (source != NULL) { 1227 VM_OBJECT_LOCK(source); 1228 if (source->ref_count == 1 && 1229 source->handle == NULL && 1230 (source->type == OBJT_DEFAULT || 1231 source->type == OBJT_SWAP)) { 1232 VM_OBJECT_UNLOCK(source); 1233 return; 1234 } 1235 VM_OBJECT_UNLOCK(source); 1236 } 1237 1238 /* 1239 * Allocate a new object with the given length. 1240 */ 1241 result = vm_object_allocate(OBJT_DEFAULT, length); 1242 1243 /* 1244 * The new object shadows the source object, adding a reference to it. 1245 * Our caller changes his reference to point to the new object, 1246 * removing a reference to the source object. Net result: no change 1247 * of reference count. 1248 * 1249 * Try to optimize the result object's page color when shadowing 1250 * in order to maintain page coloring consistency in the combined 1251 * shadowed object. 1252 */ 1253 result->backing_object = source; 1254 /* 1255 * Store the offset into the source object, and fix up the offset into 1256 * the new object. 1257 */ 1258 result->backing_object_offset = *offset; 1259 if (source != NULL) { 1260 VM_OBJECT_LOCK(source); 1261 LIST_INSERT_HEAD(&source->shadow_head, result, shadow_list); 1262 source->shadow_count++; 1263 source->generation++; 1264 #if VM_NRESERVLEVEL > 0 1265 result->flags |= source->flags & (OBJ_NEEDGIANT | OBJ_COLORED); 1266 result->pg_color = (source->pg_color + OFF_TO_IDX(*offset)) & 1267 ((1 << (VM_NFREEORDER - 1)) - 1); 1268 #else 1269 result->flags |= source->flags & OBJ_NEEDGIANT; 1270 #endif 1271 VM_OBJECT_UNLOCK(source); 1272 } 1273 1274 1275 /* 1276 * Return the new things 1277 */ 1278 *offset = 0; 1279 *object = result; 1280 } 1281 1282 /* 1283 * vm_object_split: 1284 * 1285 * Split the pages in a map entry into a new object. This affords 1286 * easier removal of unused pages, and keeps object inheritance from 1287 * being a negative impact on memory usage. 1288 */ 1289 void 1290 vm_object_split(vm_map_entry_t entry) 1291 { 1292 vm_page_t m, m_next; 1293 vm_object_t orig_object, new_object, source; 1294 vm_pindex_t idx, offidxstart; 1295 vm_size_t size; 1296 1297 orig_object = entry->object.vm_object; 1298 if (orig_object->type != OBJT_DEFAULT && orig_object->type != OBJT_SWAP) 1299 return; 1300 if (orig_object->ref_count <= 1) 1301 return; 1302 VM_OBJECT_UNLOCK(orig_object); 1303 1304 offidxstart = OFF_TO_IDX(entry->offset); 1305 size = atop(entry->end - entry->start); 1306 1307 /* 1308 * If swap_pager_copy() is later called, it will convert new_object 1309 * into a swap object. 1310 */ 1311 new_object = vm_object_allocate(OBJT_DEFAULT, size); 1312 1313 /* 1314 * At this point, the new object is still private, so the order in 1315 * which the original and new objects are locked does not matter. 1316 */ 1317 VM_OBJECT_LOCK(new_object); 1318 VM_OBJECT_LOCK(orig_object); 1319 source = orig_object->backing_object; 1320 if (source != NULL) { 1321 VM_OBJECT_LOCK(source); 1322 if ((source->flags & OBJ_DEAD) != 0) { 1323 VM_OBJECT_UNLOCK(source); 1324 VM_OBJECT_UNLOCK(orig_object); 1325 VM_OBJECT_UNLOCK(new_object); 1326 vm_object_deallocate(new_object); 1327 VM_OBJECT_LOCK(orig_object); 1328 return; 1329 } 1330 LIST_INSERT_HEAD(&source->shadow_head, 1331 new_object, shadow_list); 1332 source->shadow_count++; 1333 source->generation++; 1334 vm_object_reference_locked(source); /* for new_object */ 1335 vm_object_clear_flag(source, OBJ_ONEMAPPING); 1336 VM_OBJECT_UNLOCK(source); 1337 new_object->backing_object_offset = 1338 orig_object->backing_object_offset + entry->offset; 1339 new_object->backing_object = source; 1340 } 1341 new_object->flags |= orig_object->flags & OBJ_NEEDGIANT; 1342 retry: 1343 if ((m = TAILQ_FIRST(&orig_object->memq)) != NULL) { 1344 if (m->pindex < offidxstart) { 1345 m = vm_page_splay(offidxstart, orig_object->root); 1346 if ((orig_object->root = m)->pindex < offidxstart) 1347 m = TAILQ_NEXT(m, listq); 1348 } 1349 } 1350 vm_page_lock_queues(); 1351 for (; m != NULL && (idx = m->pindex - offidxstart) < size; 1352 m = m_next) { 1353 m_next = TAILQ_NEXT(m, listq); 1354 1355 /* 1356 * We must wait for pending I/O to complete before we can 1357 * rename the page. 1358 * 1359 * We do not have to VM_PROT_NONE the page as mappings should 1360 * not be changed by this operation. 1361 */ 1362 if ((m->oflags & VPO_BUSY) || m->busy) { 1363 vm_page_flag_set(m, PG_REFERENCED); 1364 vm_page_unlock_queues(); 1365 VM_OBJECT_UNLOCK(new_object); 1366 m->oflags |= VPO_WANTED; 1367 msleep(m, VM_OBJECT_MTX(orig_object), PVM, "spltwt", 0); 1368 VM_OBJECT_LOCK(new_object); 1369 goto retry; 1370 } 1371 vm_page_rename(m, new_object, idx); 1372 /* page automatically made dirty by rename and cache handled */ 1373 vm_page_busy(m); 1374 } 1375 vm_page_unlock_queues(); 1376 if (orig_object->type == OBJT_SWAP) { 1377 /* 1378 * swap_pager_copy() can sleep, in which case the orig_object's 1379 * and new_object's locks are released and reacquired. 1380 */ 1381 swap_pager_copy(orig_object, new_object, offidxstart, 0); 1382 1383 /* 1384 * Transfer any cached pages from orig_object to new_object. 1385 */ 1386 if (__predict_false(orig_object->cache != NULL)) 1387 vm_page_cache_transfer(orig_object, offidxstart, 1388 new_object); 1389 } 1390 VM_OBJECT_UNLOCK(orig_object); 1391 TAILQ_FOREACH(m, &new_object->memq, listq) 1392 vm_page_wakeup(m); 1393 VM_OBJECT_UNLOCK(new_object); 1394 entry->object.vm_object = new_object; 1395 entry->offset = 0LL; 1396 vm_object_deallocate(orig_object); 1397 VM_OBJECT_LOCK(new_object); 1398 } 1399 1400 #define OBSC_TEST_ALL_SHADOWED 0x0001 1401 #define OBSC_COLLAPSE_NOWAIT 0x0002 1402 #define OBSC_COLLAPSE_WAIT 0x0004 1403 1404 static int 1405 vm_object_backing_scan(vm_object_t object, int op) 1406 { 1407 int r = 1; 1408 vm_page_t p; 1409 vm_object_t backing_object; 1410 vm_pindex_t backing_offset_index; 1411 1412 VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); 1413 VM_OBJECT_LOCK_ASSERT(object->backing_object, MA_OWNED); 1414 1415 backing_object = object->backing_object; 1416 backing_offset_index = OFF_TO_IDX(object->backing_object_offset); 1417 1418 /* 1419 * Initial conditions 1420 */ 1421 if (op & OBSC_TEST_ALL_SHADOWED) { 1422 /* 1423 * We do not want to have to test for the existence of cache 1424 * or swap pages in the backing object. XXX but with the 1425 * new swapper this would be pretty easy to do. 1426 * 1427 * XXX what about anonymous MAP_SHARED memory that hasn't 1428 * been ZFOD faulted yet? If we do not test for this, the 1429 * shadow test may succeed! XXX 1430 */ 1431 if (backing_object->type != OBJT_DEFAULT) { 1432 return (0); 1433 } 1434 } 1435 if (op & OBSC_COLLAPSE_WAIT) { 1436 vm_object_set_flag(backing_object, OBJ_DEAD); 1437 } 1438 1439 /* 1440 * Our scan 1441 */ 1442 p = TAILQ_FIRST(&backing_object->memq); 1443 while (p) { 1444 vm_page_t next = TAILQ_NEXT(p, listq); 1445 vm_pindex_t new_pindex = p->pindex - backing_offset_index; 1446 1447 if (op & OBSC_TEST_ALL_SHADOWED) { 1448 vm_page_t pp; 1449 1450 /* 1451 * Ignore pages outside the parent object's range 1452 * and outside the parent object's mapping of the 1453 * backing object. 1454 * 1455 * note that we do not busy the backing object's 1456 * page. 1457 */ 1458 if ( 1459 p->pindex < backing_offset_index || 1460 new_pindex >= object->size 1461 ) { 1462 p = next; 1463 continue; 1464 } 1465 1466 /* 1467 * See if the parent has the page or if the parent's 1468 * object pager has the page. If the parent has the 1469 * page but the page is not valid, the parent's 1470 * object pager must have the page. 1471 * 1472 * If this fails, the parent does not completely shadow 1473 * the object and we might as well give up now. 1474 */ 1475 1476 pp = vm_page_lookup(object, new_pindex); 1477 if ( 1478 (pp == NULL || pp->valid == 0) && 1479 !vm_pager_has_page(object, new_pindex, NULL, NULL) 1480 ) { 1481 r = 0; 1482 break; 1483 } 1484 } 1485 1486 /* 1487 * Check for busy page 1488 */ 1489 if (op & (OBSC_COLLAPSE_WAIT | OBSC_COLLAPSE_NOWAIT)) { 1490 vm_page_t pp; 1491 1492 if (op & OBSC_COLLAPSE_NOWAIT) { 1493 if ((p->oflags & VPO_BUSY) || 1494 !p->valid || 1495 p->busy) { 1496 p = next; 1497 continue; 1498 } 1499 } else if (op & OBSC_COLLAPSE_WAIT) { 1500 if ((p->oflags & VPO_BUSY) || p->busy) { 1501 vm_page_lock_queues(); 1502 vm_page_flag_set(p, PG_REFERENCED); 1503 vm_page_unlock_queues(); 1504 VM_OBJECT_UNLOCK(object); 1505 p->oflags |= VPO_WANTED; 1506 msleep(p, VM_OBJECT_MTX(backing_object), 1507 PDROP | PVM, "vmocol", 0); 1508 VM_OBJECT_LOCK(object); 1509 VM_OBJECT_LOCK(backing_object); 1510 /* 1511 * If we slept, anything could have 1512 * happened. Since the object is 1513 * marked dead, the backing offset 1514 * should not have changed so we 1515 * just restart our scan. 1516 */ 1517 p = TAILQ_FIRST(&backing_object->memq); 1518 continue; 1519 } 1520 } 1521 1522 KASSERT( 1523 p->object == backing_object, 1524 ("vm_object_backing_scan: object mismatch") 1525 ); 1526 1527 /* 1528 * Destroy any associated swap 1529 */ 1530 if (backing_object->type == OBJT_SWAP) { 1531 swap_pager_freespace( 1532 backing_object, 1533 p->pindex, 1534 1 1535 ); 1536 } 1537 1538 if ( 1539 p->pindex < backing_offset_index || 1540 new_pindex >= object->size 1541 ) { 1542 /* 1543 * Page is out of the parent object's range, we 1544 * can simply destroy it. 1545 */ 1546 vm_page_lock_queues(); 1547 KASSERT(!pmap_page_is_mapped(p), 1548 ("freeing mapped page %p", p)); 1549 if (p->wire_count == 0) 1550 vm_page_free(p); 1551 else 1552 vm_page_remove(p); 1553 vm_page_unlock_queues(); 1554 p = next; 1555 continue; 1556 } 1557 1558 pp = vm_page_lookup(object, new_pindex); 1559 if ( 1560 pp != NULL || 1561 vm_pager_has_page(object, new_pindex, NULL, NULL) 1562 ) { 1563 /* 1564 * page already exists in parent OR swap exists 1565 * for this location in the parent. Destroy 1566 * the original page from the backing object. 1567 * 1568 * Leave the parent's page alone 1569 */ 1570 vm_page_lock_queues(); 1571 KASSERT(!pmap_page_is_mapped(p), 1572 ("freeing mapped page %p", p)); 1573 if (p->wire_count == 0) 1574 vm_page_free(p); 1575 else 1576 vm_page_remove(p); 1577 vm_page_unlock_queues(); 1578 p = next; 1579 continue; 1580 } 1581 1582 #if VM_NRESERVLEVEL > 0 1583 /* 1584 * Rename the reservation. 1585 */ 1586 vm_reserv_rename(p, object, backing_object, 1587 backing_offset_index); 1588 #endif 1589 1590 /* 1591 * Page does not exist in parent, rename the 1592 * page from the backing object to the main object. 1593 * 1594 * If the page was mapped to a process, it can remain 1595 * mapped through the rename. 1596 */ 1597 vm_page_lock_queues(); 1598 vm_page_rename(p, object, new_pindex); 1599 vm_page_unlock_queues(); 1600 /* page automatically made dirty by rename */ 1601 } 1602 p = next; 1603 } 1604 return (r); 1605 } 1606 1607 1608 /* 1609 * this version of collapse allows the operation to occur earlier and 1610 * when paging_in_progress is true for an object... This is not a complete 1611 * operation, but should plug 99.9% of the rest of the leaks. 1612 */ 1613 static void 1614 vm_object_qcollapse(vm_object_t object) 1615 { 1616 vm_object_t backing_object = object->backing_object; 1617 1618 VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); 1619 VM_OBJECT_LOCK_ASSERT(backing_object, MA_OWNED); 1620 1621 if (backing_object->ref_count != 1) 1622 return; 1623 1624 vm_object_backing_scan(object, OBSC_COLLAPSE_NOWAIT); 1625 } 1626 1627 /* 1628 * vm_object_collapse: 1629 * 1630 * Collapse an object with the object backing it. 1631 * Pages in the backing object are moved into the 1632 * parent, and the backing object is deallocated. 1633 */ 1634 void 1635 vm_object_collapse(vm_object_t object) 1636 { 1637 VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); 1638 1639 while (TRUE) { 1640 vm_object_t backing_object; 1641 1642 /* 1643 * Verify that the conditions are right for collapse: 1644 * 1645 * The object exists and the backing object exists. 1646 */ 1647 if ((backing_object = object->backing_object) == NULL) 1648 break; 1649 1650 /* 1651 * we check the backing object first, because it is most likely 1652 * not collapsable. 1653 */ 1654 VM_OBJECT_LOCK(backing_object); 1655 if (backing_object->handle != NULL || 1656 (backing_object->type != OBJT_DEFAULT && 1657 backing_object->type != OBJT_SWAP) || 1658 (backing_object->flags & OBJ_DEAD) || 1659 object->handle != NULL || 1660 (object->type != OBJT_DEFAULT && 1661 object->type != OBJT_SWAP) || 1662 (object->flags & OBJ_DEAD)) { 1663 VM_OBJECT_UNLOCK(backing_object); 1664 break; 1665 } 1666 1667 if ( 1668 object->paging_in_progress != 0 || 1669 backing_object->paging_in_progress != 0 1670 ) { 1671 vm_object_qcollapse(object); 1672 VM_OBJECT_UNLOCK(backing_object); 1673 break; 1674 } 1675 /* 1676 * We know that we can either collapse the backing object (if 1677 * the parent is the only reference to it) or (perhaps) have 1678 * the parent bypass the object if the parent happens to shadow 1679 * all the resident pages in the entire backing object. 1680 * 1681 * This is ignoring pager-backed pages such as swap pages. 1682 * vm_object_backing_scan fails the shadowing test in this 1683 * case. 1684 */ 1685 if (backing_object->ref_count == 1) { 1686 /* 1687 * If there is exactly one reference to the backing 1688 * object, we can collapse it into the parent. 1689 */ 1690 vm_object_backing_scan(object, OBSC_COLLAPSE_WAIT); 1691 1692 #if VM_NRESERVLEVEL > 0 1693 /* 1694 * Break any reservations from backing_object. 1695 */ 1696 if (__predict_false(!LIST_EMPTY(&backing_object->rvq))) 1697 vm_reserv_break_all(backing_object); 1698 #endif 1699 1700 /* 1701 * Move the pager from backing_object to object. 1702 */ 1703 if (backing_object->type == OBJT_SWAP) { 1704 /* 1705 * swap_pager_copy() can sleep, in which case 1706 * the backing_object's and object's locks are 1707 * released and reacquired. 1708 */ 1709 swap_pager_copy( 1710 backing_object, 1711 object, 1712 OFF_TO_IDX(object->backing_object_offset), TRUE); 1713 1714 /* 1715 * Free any cached pages from backing_object. 1716 */ 1717 if (__predict_false(backing_object->cache != NULL)) 1718 vm_page_cache_free(backing_object, 0, 0); 1719 } 1720 /* 1721 * Object now shadows whatever backing_object did. 1722 * Note that the reference to 1723 * backing_object->backing_object moves from within 1724 * backing_object to within object. 1725 */ 1726 LIST_REMOVE(object, shadow_list); 1727 backing_object->shadow_count--; 1728 backing_object->generation++; 1729 if (backing_object->backing_object) { 1730 VM_OBJECT_LOCK(backing_object->backing_object); 1731 LIST_REMOVE(backing_object, shadow_list); 1732 LIST_INSERT_HEAD( 1733 &backing_object->backing_object->shadow_head, 1734 object, shadow_list); 1735 /* 1736 * The shadow_count has not changed. 1737 */ 1738 backing_object->backing_object->generation++; 1739 VM_OBJECT_UNLOCK(backing_object->backing_object); 1740 } 1741 object->backing_object = backing_object->backing_object; 1742 object->backing_object_offset += 1743 backing_object->backing_object_offset; 1744 1745 /* 1746 * Discard backing_object. 1747 * 1748 * Since the backing object has no pages, no pager left, 1749 * and no object references within it, all that is 1750 * necessary is to dispose of it. 1751 */ 1752 KASSERT(backing_object->ref_count == 1, ("backing_object %p was somehow re-referenced during collapse!", backing_object)); 1753 VM_OBJECT_UNLOCK(backing_object); 1754 1755 mtx_lock(&vm_object_list_mtx); 1756 TAILQ_REMOVE( 1757 &vm_object_list, 1758 backing_object, 1759 object_list 1760 ); 1761 mtx_unlock(&vm_object_list_mtx); 1762 1763 uma_zfree(obj_zone, backing_object); 1764 1765 object_collapses++; 1766 } else { 1767 vm_object_t new_backing_object; 1768 1769 /* 1770 * If we do not entirely shadow the backing object, 1771 * there is nothing we can do so we give up. 1772 */ 1773 if (object->resident_page_count != object->size && 1774 vm_object_backing_scan(object, 1775 OBSC_TEST_ALL_SHADOWED) == 0) { 1776 VM_OBJECT_UNLOCK(backing_object); 1777 break; 1778 } 1779 1780 /* 1781 * Make the parent shadow the next object in the 1782 * chain. Deallocating backing_object will not remove 1783 * it, since its reference count is at least 2. 1784 */ 1785 LIST_REMOVE(object, shadow_list); 1786 backing_object->shadow_count--; 1787 backing_object->generation++; 1788 1789 new_backing_object = backing_object->backing_object; 1790 if ((object->backing_object = new_backing_object) != NULL) { 1791 VM_OBJECT_LOCK(new_backing_object); 1792 LIST_INSERT_HEAD( 1793 &new_backing_object->shadow_head, 1794 object, 1795 shadow_list 1796 ); 1797 new_backing_object->shadow_count++; 1798 new_backing_object->generation++; 1799 vm_object_reference_locked(new_backing_object); 1800 VM_OBJECT_UNLOCK(new_backing_object); 1801 object->backing_object_offset += 1802 backing_object->backing_object_offset; 1803 } 1804 1805 /* 1806 * Drop the reference count on backing_object. Since 1807 * its ref_count was at least 2, it will not vanish. 1808 */ 1809 backing_object->ref_count--; 1810 VM_OBJECT_UNLOCK(backing_object); 1811 object_bypasses++; 1812 } 1813 1814 /* 1815 * Try again with this object's new backing object. 1816 */ 1817 } 1818 } 1819 1820 /* 1821 * vm_object_page_remove: 1822 * 1823 * For the given object, either frees or invalidates each of the 1824 * specified pages. In general, a page is freed. However, if a 1825 * page is wired for any reason other than the existence of a 1826 * managed, wired mapping, then it may be invalidated but not 1827 * removed from the object. Pages are specified by the given 1828 * range ["start", "end") and Boolean "clean_only". As a 1829 * special case, if "end" is zero, then the range extends from 1830 * "start" to the end of the object. If "clean_only" is TRUE, 1831 * then only the non-dirty pages within the specified range are 1832 * affected. 1833 * 1834 * In general, this operation should only be performed on objects 1835 * that contain managed pages. There are two exceptions. First, 1836 * it may be performed on the kernel and kmem objects. Second, 1837 * it may be used by msync(..., MS_INVALIDATE) to invalidate 1838 * device-backed pages. 1839 * 1840 * The object must be locked. 1841 */ 1842 void 1843 vm_object_page_remove(vm_object_t object, vm_pindex_t start, vm_pindex_t end, 1844 boolean_t clean_only) 1845 { 1846 vm_page_t p, next; 1847 int wirings; 1848 1849 VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); 1850 if (object->resident_page_count == 0) 1851 goto skipmemq; 1852 1853 /* 1854 * Since physically-backed objects do not use managed pages, we can't 1855 * remove pages from the object (we must instead remove the page 1856 * references, and then destroy the object). 1857 */ 1858 KASSERT(object->type != OBJT_PHYS || object == kernel_object || 1859 object == kmem_object, 1860 ("attempt to remove pages from a physical object")); 1861 1862 vm_object_pip_add(object, 1); 1863 again: 1864 vm_page_lock_queues(); 1865 if ((p = TAILQ_FIRST(&object->memq)) != NULL) { 1866 if (p->pindex < start) { 1867 p = vm_page_splay(start, object->root); 1868 if ((object->root = p)->pindex < start) 1869 p = TAILQ_NEXT(p, listq); 1870 } 1871 } 1872 /* 1873 * Assert: the variable p is either (1) the page with the 1874 * least pindex greater than or equal to the parameter pindex 1875 * or (2) NULL. 1876 */ 1877 for (; 1878 p != NULL && (p->pindex < end || end == 0); 1879 p = next) { 1880 next = TAILQ_NEXT(p, listq); 1881 1882 /* 1883 * If the page is wired for any reason besides the 1884 * existence of managed, wired mappings, then it cannot 1885 * be freed. For example, fictitious pages, which 1886 * represent device memory, are inherently wired and 1887 * cannot be freed. They can, however, be invalidated 1888 * if "clean_only" is FALSE. 1889 */ 1890 if ((wirings = p->wire_count) != 0 && 1891 (wirings = pmap_page_wired_mappings(p)) != p->wire_count) { 1892 /* Fictitious pages do not have managed mappings. */ 1893 if ((p->flags & PG_FICTITIOUS) == 0) 1894 pmap_remove_all(p); 1895 /* Account for removal of managed, wired mappings. */ 1896 p->wire_count -= wirings; 1897 if (!clean_only) 1898 p->valid = 0; 1899 continue; 1900 } 1901 if (vm_page_sleep_if_busy(p, TRUE, "vmopar")) 1902 goto again; 1903 KASSERT((p->flags & PG_FICTITIOUS) == 0, 1904 ("vm_object_page_remove: page %p is fictitious", p)); 1905 if (clean_only && p->valid) { 1906 pmap_remove_write(p); 1907 if (p->valid & p->dirty) 1908 continue; 1909 } 1910 pmap_remove_all(p); 1911 /* Account for removal of managed, wired mappings. */ 1912 if (wirings != 0) 1913 p->wire_count -= wirings; 1914 vm_page_free(p); 1915 } 1916 vm_page_unlock_queues(); 1917 vm_object_pip_wakeup(object); 1918 skipmemq: 1919 if (__predict_false(object->cache != NULL)) 1920 vm_page_cache_free(object, start, end); 1921 } 1922 1923 /* 1924 * Routine: vm_object_coalesce 1925 * Function: Coalesces two objects backing up adjoining 1926 * regions of memory into a single object. 1927 * 1928 * returns TRUE if objects were combined. 1929 * 1930 * NOTE: Only works at the moment if the second object is NULL - 1931 * if it's not, which object do we lock first? 1932 * 1933 * Parameters: 1934 * prev_object First object to coalesce 1935 * prev_offset Offset into prev_object 1936 * prev_size Size of reference to prev_object 1937 * next_size Size of reference to the second object 1938 * 1939 * Conditions: 1940 * The object must *not* be locked. 1941 */ 1942 boolean_t 1943 vm_object_coalesce(vm_object_t prev_object, vm_ooffset_t prev_offset, 1944 vm_size_t prev_size, vm_size_t next_size) 1945 { 1946 vm_pindex_t next_pindex; 1947 1948 if (prev_object == NULL) 1949 return (TRUE); 1950 VM_OBJECT_LOCK(prev_object); 1951 if (prev_object->type != OBJT_DEFAULT && 1952 prev_object->type != OBJT_SWAP) { 1953 VM_OBJECT_UNLOCK(prev_object); 1954 return (FALSE); 1955 } 1956 1957 /* 1958 * Try to collapse the object first 1959 */ 1960 vm_object_collapse(prev_object); 1961 1962 /* 1963 * Can't coalesce if: . more than one reference . paged out . shadows 1964 * another object . has a copy elsewhere (any of which mean that the 1965 * pages not mapped to prev_entry may be in use anyway) 1966 */ 1967 if (prev_object->backing_object != NULL) { 1968 VM_OBJECT_UNLOCK(prev_object); 1969 return (FALSE); 1970 } 1971 1972 prev_size >>= PAGE_SHIFT; 1973 next_size >>= PAGE_SHIFT; 1974 next_pindex = OFF_TO_IDX(prev_offset) + prev_size; 1975 1976 if ((prev_object->ref_count > 1) && 1977 (prev_object->size != next_pindex)) { 1978 VM_OBJECT_UNLOCK(prev_object); 1979 return (FALSE); 1980 } 1981 1982 /* 1983 * Remove any pages that may still be in the object from a previous 1984 * deallocation. 1985 */ 1986 if (next_pindex < prev_object->size) { 1987 vm_object_page_remove(prev_object, 1988 next_pindex, 1989 next_pindex + next_size, FALSE); 1990 if (prev_object->type == OBJT_SWAP) 1991 swap_pager_freespace(prev_object, 1992 next_pindex, next_size); 1993 } 1994 1995 /* 1996 * Extend the object if necessary. 1997 */ 1998 if (next_pindex + next_size > prev_object->size) 1999 prev_object->size = next_pindex + next_size; 2000 2001 VM_OBJECT_UNLOCK(prev_object); 2002 return (TRUE); 2003 } 2004 2005 void 2006 vm_object_set_writeable_dirty(vm_object_t object) 2007 { 2008 struct vnode *vp; 2009 2010 VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); 2011 if ((object->flags & OBJ_MIGHTBEDIRTY) != 0) 2012 return; 2013 vm_object_set_flag(object, OBJ_MIGHTBEDIRTY); 2014 if (object->type == OBJT_VNODE && 2015 (vp = (struct vnode *)object->handle) != NULL) { 2016 VI_LOCK(vp); 2017 vp->v_iflag |= VI_OBJDIRTY; 2018 VI_UNLOCK(vp); 2019 } 2020 } 2021 2022 #include "opt_ddb.h" 2023 #ifdef DDB 2024 #include <sys/kernel.h> 2025 2026 #include <sys/cons.h> 2027 2028 #include <ddb/ddb.h> 2029 2030 static int 2031 _vm_object_in_map(vm_map_t map, vm_object_t object, vm_map_entry_t entry) 2032 { 2033 vm_map_t tmpm; 2034 vm_map_entry_t tmpe; 2035 vm_object_t obj; 2036 int entcount; 2037 2038 if (map == 0) 2039 return 0; 2040 2041 if (entry == 0) { 2042 tmpe = map->header.next; 2043 entcount = map->nentries; 2044 while (entcount-- && (tmpe != &map->header)) { 2045 if (_vm_object_in_map(map, object, tmpe)) { 2046 return 1; 2047 } 2048 tmpe = tmpe->next; 2049 } 2050 } else if (entry->eflags & MAP_ENTRY_IS_SUB_MAP) { 2051 tmpm = entry->object.sub_map; 2052 tmpe = tmpm->header.next; 2053 entcount = tmpm->nentries; 2054 while (entcount-- && tmpe != &tmpm->header) { 2055 if (_vm_object_in_map(tmpm, object, tmpe)) { 2056 return 1; 2057 } 2058 tmpe = tmpe->next; 2059 } 2060 } else if ((obj = entry->object.vm_object) != NULL) { 2061 for (; obj; obj = obj->backing_object) 2062 if (obj == object) { 2063 return 1; 2064 } 2065 } 2066 return 0; 2067 } 2068 2069 static int 2070 vm_object_in_map(vm_object_t object) 2071 { 2072 struct proc *p; 2073 2074 /* sx_slock(&allproc_lock); */ 2075 FOREACH_PROC_IN_SYSTEM(p) { 2076 if (!p->p_vmspace /* || (p->p_flag & (P_SYSTEM|P_WEXIT)) */) 2077 continue; 2078 if (_vm_object_in_map(&p->p_vmspace->vm_map, object, 0)) { 2079 /* sx_sunlock(&allproc_lock); */ 2080 return 1; 2081 } 2082 } 2083 /* sx_sunlock(&allproc_lock); */ 2084 if (_vm_object_in_map(kernel_map, object, 0)) 2085 return 1; 2086 if (_vm_object_in_map(kmem_map, object, 0)) 2087 return 1; 2088 if (_vm_object_in_map(pager_map, object, 0)) 2089 return 1; 2090 if (_vm_object_in_map(buffer_map, object, 0)) 2091 return 1; 2092 return 0; 2093 } 2094 2095 DB_SHOW_COMMAND(vmochk, vm_object_check) 2096 { 2097 vm_object_t object; 2098 2099 /* 2100 * make sure that internal objs are in a map somewhere 2101 * and none have zero ref counts. 2102 */ 2103 TAILQ_FOREACH(object, &vm_object_list, object_list) { 2104 if (object->handle == NULL && 2105 (object->type == OBJT_DEFAULT || object->type == OBJT_SWAP)) { 2106 if (object->ref_count == 0) { 2107 db_printf("vmochk: internal obj has zero ref count: %ld\n", 2108 (long)object->size); 2109 } 2110 if (!vm_object_in_map(object)) { 2111 db_printf( 2112 "vmochk: internal obj is not in a map: " 2113 "ref: %d, size: %lu: 0x%lx, backing_object: %p\n", 2114 object->ref_count, (u_long)object->size, 2115 (u_long)object->size, 2116 (void *)object->backing_object); 2117 } 2118 } 2119 } 2120 } 2121 2122 /* 2123 * vm_object_print: [ debug ] 2124 */ 2125 DB_SHOW_COMMAND(object, vm_object_print_static) 2126 { 2127 /* XXX convert args. */ 2128 vm_object_t object = (vm_object_t)addr; 2129 boolean_t full = have_addr; 2130 2131 vm_page_t p; 2132 2133 /* XXX count is an (unused) arg. Avoid shadowing it. */ 2134 #define count was_count 2135 2136 int count; 2137 2138 if (object == NULL) 2139 return; 2140 2141 db_iprintf( 2142 "Object %p: type=%d, size=0x%jx, res=%d, ref=%d, flags=0x%x\n", 2143 object, (int)object->type, (uintmax_t)object->size, 2144 object->resident_page_count, object->ref_count, object->flags); 2145 db_iprintf(" sref=%d, backing_object(%d)=(%p)+0x%jx\n", 2146 object->shadow_count, 2147 object->backing_object ? object->backing_object->ref_count : 0, 2148 object->backing_object, (uintmax_t)object->backing_object_offset); 2149 2150 if (!full) 2151 return; 2152 2153 db_indent += 2; 2154 count = 0; 2155 TAILQ_FOREACH(p, &object->memq, listq) { 2156 if (count == 0) 2157 db_iprintf("memory:="); 2158 else if (count == 6) { 2159 db_printf("\n"); 2160 db_iprintf(" ..."); 2161 count = 0; 2162 } else 2163 db_printf(","); 2164 count++; 2165 2166 db_printf("(off=0x%jx,page=0x%jx)", 2167 (uintmax_t)p->pindex, (uintmax_t)VM_PAGE_TO_PHYS(p)); 2168 } 2169 if (count != 0) 2170 db_printf("\n"); 2171 db_indent -= 2; 2172 } 2173 2174 /* XXX. */ 2175 #undef count 2176 2177 /* XXX need this non-static entry for calling from vm_map_print. */ 2178 void 2179 vm_object_print( 2180 /* db_expr_t */ long addr, 2181 boolean_t have_addr, 2182 /* db_expr_t */ long count, 2183 char *modif) 2184 { 2185 vm_object_print_static(addr, have_addr, count, modif); 2186 } 2187 2188 DB_SHOW_COMMAND(vmopag, vm_object_print_pages) 2189 { 2190 vm_object_t object; 2191 int nl = 0; 2192 int c; 2193 2194 TAILQ_FOREACH(object, &vm_object_list, object_list) { 2195 vm_pindex_t idx, fidx; 2196 vm_pindex_t osize; 2197 vm_paddr_t pa = -1; 2198 int rcount; 2199 vm_page_t m; 2200 2201 db_printf("new object: %p\n", (void *)object); 2202 if (nl > 18) { 2203 c = cngetc(); 2204 if (c != ' ') 2205 return; 2206 nl = 0; 2207 } 2208 nl++; 2209 rcount = 0; 2210 fidx = 0; 2211 osize = object->size; 2212 if (osize > 128) 2213 osize = 128; 2214 for (idx = 0; idx < osize; idx++) { 2215 m = vm_page_lookup(object, idx); 2216 if (m == NULL) { 2217 if (rcount) { 2218 db_printf(" index(%ld)run(%d)pa(0x%lx)\n", 2219 (long)fidx, rcount, (long)pa); 2220 if (nl > 18) { 2221 c = cngetc(); 2222 if (c != ' ') 2223 return; 2224 nl = 0; 2225 } 2226 nl++; 2227 rcount = 0; 2228 } 2229 continue; 2230 } 2231 2232 2233 if (rcount && 2234 (VM_PAGE_TO_PHYS(m) == pa + rcount * PAGE_SIZE)) { 2235 ++rcount; 2236 continue; 2237 } 2238 if (rcount) { 2239 db_printf(" index(%ld)run(%d)pa(0x%lx)\n", 2240 (long)fidx, rcount, (long)pa); 2241 if (nl > 18) { 2242 c = cngetc(); 2243 if (c != ' ') 2244 return; 2245 nl = 0; 2246 } 2247 nl++; 2248 } 2249 fidx = idx; 2250 pa = VM_PAGE_TO_PHYS(m); 2251 rcount = 1; 2252 } 2253 if (rcount) { 2254 db_printf(" index(%ld)run(%d)pa(0x%lx)\n", 2255 (long)fidx, rcount, (long)pa); 2256 if (nl > 18) { 2257 c = cngetc(); 2258 if (c != ' ') 2259 return; 2260 nl = 0; 2261 } 2262 nl++; 2263 } 2264 } 2265 } 2266 #endif /* DDB */ 2267