1 /* 2 * Copyright (c) 1991, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * This code is derived from software contributed to Berkeley by 6 * The Mach Operating System project at Carnegie-Mellon University. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. All advertising materials mentioning features or use of this software 17 * must display the following acknowledgement: 18 * This product includes software developed by the University of 19 * California, Berkeley and its contributors. 20 * 4. Neither the name of the University nor the names of its contributors 21 * may be used to endorse or promote products derived from this software 22 * without specific prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34 * SUCH DAMAGE. 35 * 36 * @(#)vm_object.c 8.5 (Berkeley) 3/22/94 37 * 38 * 39 * Copyright (c) 1987, 1990 Carnegie-Mellon University. 40 * All rights reserved. 41 * 42 * Authors: Avadis Tevanian, Jr., Michael Wayne Young 43 * 44 * Permission to use, copy, modify and distribute this software and 45 * its documentation is hereby granted, provided that both the copyright 46 * notice and this permission notice appear in all copies of the 47 * software, derivative works or modified versions, and any portions 48 * thereof, and that both notices appear in supporting documentation. 49 * 50 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 51 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 52 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 53 * 54 * Carnegie Mellon requests users of this software to return to 55 * 56 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 57 * School of Computer Science 58 * Carnegie Mellon University 59 * Pittsburgh PA 15213-3890 60 * 61 * any improvements or extensions that they make and grant Carnegie the 62 * rights to redistribute these changes. 63 */ 64 65 /* 66 * Virtual memory object module. 67 */ 68 69 #include <sys/param.h> 70 #include <sys/systm.h> 71 #include <sys/malloc.h> 72 73 #include <vm/vm.h> 74 #include <vm/vm_page.h> 75 #include <vm/vm_pageout.h> 76 77 static void _vm_object_allocate(vm_size_t, vm_object_t); 78 void vm_object_deactivate_pages(vm_object_t); 79 void vm_object_cache_trim(void); 80 void vm_object_remove(vm_pager_t); 81 82 /* 83 * Virtual memory objects maintain the actual data 84 * associated with allocated virtual memory. A given 85 * page of memory exists within exactly one object. 86 * 87 * An object is only deallocated when all "references" 88 * are given up. Only one "reference" to a given 89 * region of an object should be writeable. 90 * 91 * Associated with each object is a list of all resident 92 * memory pages belonging to that object; this list is 93 * maintained by the "vm_page" module, and locked by the object's 94 * lock. 95 * 96 * Each object also records a "pager" routine which is 97 * used to retrieve (and store) pages to the proper backing 98 * storage. In addition, objects may be backed by other 99 * objects from which they were virtual-copied. 100 * 101 * The only items within the object structure which are 102 * modified after time of creation are: 103 * reference count locked by object's lock 104 * pager routine locked by object's lock 105 * 106 */ 107 108 109 struct vm_object kernel_object_store; 110 struct vm_object kmem_object_store; 111 112 extern int vm_cache_max; 113 #define VM_OBJECT_HASH_COUNT 157 114 115 struct vm_object_hash_head vm_object_hashtable[VM_OBJECT_HASH_COUNT]; 116 117 long object_collapses = 0; 118 long object_bypasses = 0; 119 120 static void 121 _vm_object_allocate(size, object) 122 vm_size_t size; 123 register vm_object_t object; 124 { 125 bzero(object, sizeof *object); 126 TAILQ_INIT(&object->memq); 127 vm_object_lock_init(object); 128 object->ref_count = 1; 129 object->resident_page_count = 0; 130 object->size = size; 131 object->flags = OBJ_INTERNAL; /* vm_allocate_with_pager will reset */ 132 object->paging_in_progress = 0; 133 object->copy = NULL; 134 135 /* 136 * Object starts out read-write, with no pager. 137 */ 138 139 object->pager = NULL; 140 object->paging_offset = 0; 141 object->shadow = NULL; 142 object->shadow_offset = (vm_offset_t) 0; 143 144 simple_lock(&vm_object_list_lock); 145 TAILQ_INSERT_TAIL(&vm_object_list, object, object_list); 146 vm_object_count++; 147 cnt.v_nzfod += atop(size); 148 simple_unlock(&vm_object_list_lock); 149 } 150 151 /* 152 * vm_object_init: 153 * 154 * Initialize the VM objects module. 155 */ 156 void 157 vm_object_init(vm_offset_t nothing) 158 { 159 register int i; 160 161 TAILQ_INIT(&vm_object_cached_list); 162 TAILQ_INIT(&vm_object_list); 163 vm_object_count = 0; 164 simple_lock_init(&vm_cache_lock); 165 simple_lock_init(&vm_object_list_lock); 166 167 for (i = 0; i < VM_OBJECT_HASH_COUNT; i++) 168 TAILQ_INIT(&vm_object_hashtable[i]); 169 170 kernel_object = &kernel_object_store; 171 _vm_object_allocate(VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS, 172 kernel_object); 173 174 kmem_object = &kmem_object_store; 175 _vm_object_allocate(VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS, 176 kmem_object); 177 } 178 179 /* 180 * vm_object_allocate: 181 * 182 * Returns a new object with the given size. 183 */ 184 185 vm_object_t 186 vm_object_allocate(size) 187 vm_size_t size; 188 { 189 register vm_object_t result; 190 int s; 191 192 result = (vm_object_t) 193 malloc((u_long)sizeof *result, M_VMOBJ, M_WAITOK); 194 195 196 _vm_object_allocate(size, result); 197 198 return(result); 199 } 200 201 202 /* 203 * vm_object_reference: 204 * 205 * Gets another reference to the given object. 206 */ 207 inline void 208 vm_object_reference(object) 209 register vm_object_t object; 210 { 211 if (object == NULL) 212 return; 213 214 vm_object_lock(object); 215 object->ref_count++; 216 vm_object_unlock(object); 217 } 218 219 /* 220 * vm_object_deallocate: 221 * 222 * Release a reference to the specified object, 223 * gained either through a vm_object_allocate 224 * or a vm_object_reference call. When all references 225 * are gone, storage associated with this object 226 * may be relinquished. 227 * 228 * No object may be locked. 229 */ 230 void 231 vm_object_deallocate(object) 232 vm_object_t object; 233 { 234 vm_object_t temp; 235 236 while (object != NULL) { 237 238 /* 239 * The cache holds a reference (uncounted) to 240 * the object; we must lock it before removing 241 * the object. 242 */ 243 244 vm_object_cache_lock(); 245 246 /* 247 * Lose the reference 248 */ 249 vm_object_lock(object); 250 if (--(object->ref_count) != 0) { 251 252 vm_object_unlock(object); 253 /* 254 * If there are still references, then 255 * we are done. 256 */ 257 vm_object_cache_unlock(); 258 return; 259 } 260 261 /* 262 * See if this object can persist. If so, enter 263 * it in the cache, then deactivate all of its 264 * pages. 265 */ 266 267 if (object->flags & OBJ_CANPERSIST) { 268 269 TAILQ_INSERT_TAIL(&vm_object_cached_list, object, 270 cached_list); 271 vm_object_cached++; 272 vm_object_cache_unlock(); 273 274 /* 275 * this code segment was removed because it kills performance with 276 * large -- repetively used binaries. The functionality now resides 277 * in the pageout daemon 278 * vm_object_deactivate_pages(object); 279 */ 280 vm_object_unlock(object); 281 282 vm_object_cache_trim(); 283 return; 284 } 285 286 /* 287 * Make sure no one can look us up now. 288 */ 289 vm_object_remove(object->pager); 290 vm_object_cache_unlock(); 291 292 temp = object->shadow; 293 vm_object_terminate(object); 294 /* unlocks and deallocates object */ 295 object = temp; 296 } 297 } 298 299 /* 300 * vm_object_terminate actually destroys the specified object, freeing 301 * up all previously used resources. 302 * 303 * The object must be locked. 304 */ 305 void 306 vm_object_terminate(object) 307 register vm_object_t object; 308 { 309 register vm_page_t p; 310 vm_object_t shadow_object; 311 int s; 312 313 /* 314 * Detach the object from its shadow if we are the shadow's 315 * copy. 316 */ 317 if ((shadow_object = object->shadow) != NULL) { 318 vm_object_lock(shadow_object); 319 if (shadow_object->copy == object) 320 shadow_object->copy = NULL; 321 /* 322 else if (shadow_object->copy != NULL) 323 panic("vm_object_terminate: copy/shadow inconsistency"); 324 */ 325 vm_object_unlock(shadow_object); 326 } 327 328 /* 329 * Wait until the pageout daemon is through 330 * with the object. 331 */ 332 333 while (object->paging_in_progress) { 334 vm_object_sleep((int)object, object, FALSE); 335 vm_object_lock(object); 336 } 337 338 /* 339 * While the paging system is locked, 340 * pull the object's pages off the active 341 * and inactive queues. This keeps the 342 * pageout daemon from playing with them 343 * during vm_pager_deallocate. 344 * 345 * We can't free the pages yet, because the 346 * object's pager may have to write them out 347 * before deallocating the paging space. 348 */ 349 350 for( p = object->memq.tqh_first; p; p=p->listq.tqe_next) { 351 VM_PAGE_CHECK(p); 352 353 vm_page_lock_queues(); 354 s = splimp(); 355 if (p->flags & PG_ACTIVE) { 356 TAILQ_REMOVE(&vm_page_queue_active, p, pageq); 357 p->flags &= ~PG_ACTIVE; 358 cnt.v_active_count--; 359 } 360 361 if (p->flags & PG_INACTIVE) { 362 TAILQ_REMOVE(&vm_page_queue_inactive, p, pageq); 363 p->flags &= ~PG_INACTIVE; 364 cnt.v_inactive_count--; 365 } 366 splx(s); 367 vm_page_unlock_queues(); 368 } 369 370 vm_object_unlock(object); 371 372 if (object->paging_in_progress != 0) 373 panic("vm_object_deallocate: pageout in progress"); 374 375 /* 376 * Clean and free the pages, as appropriate. 377 * All references to the object are gone, 378 * so we don't need to lock it. 379 */ 380 381 if ((object->flags & OBJ_INTERNAL) == 0) { 382 vm_object_lock(object); 383 (void) vm_object_page_clean(object, 0, 0, TRUE, TRUE); 384 vm_object_unlock(object); 385 } 386 387 /* 388 * Now free the pages. 389 * For internal objects, this also removes them from paging queues. 390 */ 391 while ((p = object->memq.tqh_first) != NULL) { 392 VM_PAGE_CHECK(p); 393 vm_page_lock_queues(); 394 vm_page_free(p); 395 cnt.v_pfree++; 396 vm_page_unlock_queues(); 397 } 398 399 /* 400 * Let the pager know object is dead. 401 */ 402 403 if (object->pager != NULL) 404 vm_pager_deallocate(object->pager); 405 406 407 simple_lock(&vm_object_list_lock); 408 TAILQ_REMOVE(&vm_object_list, object, object_list); 409 vm_object_count--; 410 simple_unlock(&vm_object_list_lock); 411 412 /* 413 * Free the space for the object. 414 */ 415 416 free((caddr_t)object, M_VMOBJ); 417 } 418 419 /* 420 * vm_object_page_clean 421 * 422 * Clean all dirty pages in the specified range of object. 423 * Leaves page on whatever queue it is currently on. 424 * 425 * Odd semantics: if start == end, we clean everything. 426 * 427 * The object must be locked. 428 */ 429 #if 1 430 boolean_t 431 vm_object_page_clean(object, start, end, syncio, de_queue) 432 register vm_object_t object; 433 register vm_offset_t start; 434 register vm_offset_t end; 435 boolean_t syncio; 436 boolean_t de_queue; 437 { 438 register vm_page_t p, nextp; 439 int s; 440 int size; 441 442 if (object->pager == NULL) 443 return 1; 444 445 if (start != end) { 446 start = trunc_page(start); 447 end = round_page(end); 448 } 449 size = end - start; 450 451 again: 452 /* 453 * Wait until the pageout daemon is through with the object. 454 */ 455 while (object->paging_in_progress) { 456 vm_object_sleep((int)object, object, FALSE); 457 } 458 459 nextp = object->memq.tqh_first; 460 while ( (p = nextp) && ((start == end) || (size != 0) ) ) { 461 nextp = p->listq.tqe_next; 462 if (start == end || (p->offset >= start && p->offset < end)) { 463 if (p->flags & PG_BUSY) 464 continue; 465 466 size -= PAGE_SIZE; 467 468 if ((p->flags & PG_CLEAN) 469 && pmap_is_modified(VM_PAGE_TO_PHYS(p))) 470 p->flags &= ~PG_CLEAN; 471 472 if ((p->flags & PG_CLEAN) == 0) { 473 vm_pageout_clean(p,VM_PAGEOUT_FORCE); 474 goto again; 475 } 476 } 477 } 478 wakeup((caddr_t)object); 479 return 1; 480 } 481 #endif 482 /* 483 * vm_object_page_clean 484 * 485 * Clean all dirty pages in the specified range of object. 486 * If syncio is TRUE, page cleaning is done synchronously. 487 * If de_queue is TRUE, pages are removed from any paging queue 488 * they were on, otherwise they are left on whatever queue they 489 * were on before the cleaning operation began. 490 * 491 * Odd semantics: if start == end, we clean everything. 492 * 493 * The object must be locked. 494 * 495 * Returns TRUE if all was well, FALSE if there was a pager error 496 * somewhere. We attempt to clean (and dequeue) all pages regardless 497 * of where an error occurs. 498 */ 499 #if 0 500 boolean_t 501 vm_object_page_clean(object, start, end, syncio, de_queue) 502 register vm_object_t object; 503 register vm_offset_t start; 504 register vm_offset_t end; 505 boolean_t syncio; 506 boolean_t de_queue; 507 { 508 register vm_page_t p; 509 int onqueue; 510 boolean_t noerror = TRUE; 511 512 if (object == NULL) 513 return (TRUE); 514 515 /* 516 * If it is an internal object and there is no pager, attempt to 517 * allocate one. Note that vm_object_collapse may relocate one 518 * from a collapsed object so we must recheck afterward. 519 */ 520 if ((object->flags & OBJ_INTERNAL) && object->pager == NULL) { 521 vm_object_collapse(object); 522 if (object->pager == NULL) { 523 vm_pager_t pager; 524 525 vm_object_unlock(object); 526 pager = vm_pager_allocate(PG_DFLT, (caddr_t)0, 527 object->size, VM_PROT_ALL, 528 (vm_offset_t)0); 529 if (pager) 530 vm_object_setpager(object, pager, 0, FALSE); 531 vm_object_lock(object); 532 } 533 } 534 if (object->pager == NULL) 535 return (FALSE); 536 537 again: 538 /* 539 * Wait until the pageout daemon is through with the object. 540 */ 541 while (object->paging_in_progress) { 542 vm_object_sleep((int)object, object, FALSE); 543 vm_object_lock(object); 544 } 545 /* 546 * Loop through the object page list cleaning as necessary. 547 */ 548 for (p = object->memq.tqh_first; p != NULL; p = p->listq.tqe_next) { 549 onqueue = 0; 550 if ((start == end || p->offset >= start && p->offset < end) && 551 !(p->flags & PG_FICTITIOUS)) { 552 if ((p->flags & PG_CLEAN) && 553 pmap_is_modified(VM_PAGE_TO_PHYS(p))) 554 p->flags &= ~PG_CLEAN; 555 /* 556 * Remove the page from any paging queue. 557 * This needs to be done if either we have been 558 * explicitly asked to do so or it is about to 559 * be cleaned (see comment below). 560 */ 561 if (de_queue || !(p->flags & PG_CLEAN)) { 562 vm_page_lock_queues(); 563 if (p->flags & PG_ACTIVE) { 564 TAILQ_REMOVE(&vm_page_queue_active, 565 p, pageq); 566 p->flags &= ~PG_ACTIVE; 567 cnt.v_active_count--; 568 onqueue = 1; 569 } else if (p->flags & PG_INACTIVE) { 570 TAILQ_REMOVE(&vm_page_queue_inactive, 571 p, pageq); 572 p->flags &= ~PG_INACTIVE; 573 cnt.v_inactive_count--; 574 onqueue = -1; 575 } else 576 onqueue = 0; 577 vm_page_unlock_queues(); 578 } 579 /* 580 * To ensure the state of the page doesn't change 581 * during the clean operation we do two things. 582 * First we set the busy bit and write-protect all 583 * mappings to ensure that write accesses to the 584 * page block (in vm_fault). Second, we remove 585 * the page from any paging queue to foil the 586 * pageout daemon (vm_pageout_scan). 587 */ 588 pmap_page_protect(VM_PAGE_TO_PHYS(p), VM_PROT_READ); 589 if (!(p->flags & PG_CLEAN)) { 590 p->flags |= PG_BUSY; 591 object->paging_in_progress++; 592 vm_object_unlock(object); 593 /* 594 * XXX if put fails we mark the page as 595 * clean to avoid an infinite loop. 596 * Will loose changes to the page. 597 */ 598 if (vm_pager_put(object->pager, p, syncio)) { 599 printf("%s: pager_put error\n", 600 "vm_object_page_clean"); 601 p->flags |= PG_CLEAN; 602 noerror = FALSE; 603 } 604 vm_object_lock(object); 605 object->paging_in_progress--; 606 if (!de_queue && onqueue) { 607 vm_page_lock_queues(); 608 if (onqueue > 0) 609 vm_page_activate(p); 610 else 611 vm_page_deactivate(p); 612 vm_page_unlock_queues(); 613 } 614 p->flags &= ~PG_BUSY; 615 PAGE_WAKEUP(p); 616 goto again; 617 } 618 } 619 } 620 return (noerror); 621 } 622 #endif 623 624 /* 625 * vm_object_deactivate_pages 626 * 627 * Deactivate all pages in the specified object. (Keep its pages 628 * in memory even though it is no longer referenced.) 629 * 630 * The object must be locked. 631 */ 632 void 633 vm_object_deactivate_pages(object) 634 register vm_object_t object; 635 { 636 register vm_page_t p, next; 637 638 for (p = object->memq.tqh_first; p != NULL; p = next) { 639 next = p->listq.tqe_next; 640 vm_page_lock_queues(); 641 vm_page_deactivate(p); 642 vm_page_unlock_queues(); 643 } 644 } 645 646 /* 647 * Trim the object cache to size. 648 */ 649 void 650 vm_object_cache_trim() 651 { 652 register vm_object_t object; 653 654 vm_object_cache_lock(); 655 while (vm_object_cached > vm_cache_max) { 656 object = vm_object_cached_list.tqh_first; 657 vm_object_cache_unlock(); 658 659 if (object != vm_object_lookup(object->pager)) 660 panic("vm_object_deactivate: I'm sooo confused."); 661 662 pager_cache(object, FALSE); 663 664 vm_object_cache_lock(); 665 } 666 vm_object_cache_unlock(); 667 } 668 669 670 /* 671 * vm_object_pmap_copy: 672 * 673 * Makes all physical pages in the specified 674 * object range copy-on-write. No writeable 675 * references to these pages should remain. 676 * 677 * The object must *not* be locked. 678 */ 679 void vm_object_pmap_copy(object, start, end) 680 register vm_object_t object; 681 register vm_offset_t start; 682 register vm_offset_t end; 683 { 684 register vm_page_t p; 685 686 if (object == NULL) 687 return; 688 689 vm_object_lock(object); 690 for (p = object->memq.tqh_first; p != NULL; p = p->listq.tqe_next) { 691 if ((start <= p->offset) && (p->offset < end)) { 692 pmap_page_protect(VM_PAGE_TO_PHYS(p), VM_PROT_READ); 693 p->flags |= PG_COPYONWRITE; 694 } 695 } 696 vm_object_unlock(object); 697 } 698 699 /* 700 * vm_object_pmap_remove: 701 * 702 * Removes all physical pages in the specified 703 * object range from all physical maps. 704 * 705 * The object must *not* be locked. 706 */ 707 void 708 vm_object_pmap_remove(object, start, end) 709 register vm_object_t object; 710 register vm_offset_t start; 711 register vm_offset_t end; 712 { 713 register vm_page_t p; 714 715 if (object == NULL) 716 return; 717 718 vm_object_lock(object); 719 again: 720 for (p = object->memq.tqh_first; p != NULL; p = p->listq.tqe_next) { 721 if ((start <= p->offset) && (p->offset < end)) { 722 if (p->flags & PG_BUSY) { 723 p->flags |= PG_WANTED; 724 tsleep((caddr_t) p, PVM, "vmopmr", 0); 725 goto again; 726 } 727 pmap_page_protect(VM_PAGE_TO_PHYS(p), VM_PROT_NONE); 728 if ((p->flags & PG_CLEAN) == 0) 729 p->flags |= PG_LAUNDRY; 730 } 731 } 732 vm_object_unlock(object); 733 } 734 735 /* 736 * vm_object_copy: 737 * 738 * Create a new object which is a copy of an existing 739 * object, and mark all of the pages in the existing 740 * object 'copy-on-write'. The new object has one reference. 741 * Returns the new object. 742 * 743 * May defer the copy until later if the object is not backed 744 * up by a non-default pager. 745 */ 746 void vm_object_copy(src_object, src_offset, size, 747 dst_object, dst_offset, src_needs_copy) 748 register vm_object_t src_object; 749 vm_offset_t src_offset; 750 vm_size_t size; 751 vm_object_t *dst_object; /* OUT */ 752 vm_offset_t *dst_offset; /* OUT */ 753 boolean_t *src_needs_copy; /* OUT */ 754 { 755 register vm_object_t new_copy; 756 register vm_object_t old_copy; 757 vm_offset_t new_start, new_end; 758 759 register vm_page_t p; 760 761 if (src_object == NULL) { 762 /* 763 * Nothing to copy 764 */ 765 *dst_object = NULL; 766 *dst_offset = 0; 767 *src_needs_copy = FALSE; 768 return; 769 } 770 771 772 /* 773 * If the object's pager is null_pager or the 774 * default pager, we don't have to make a copy 775 * of it. Instead, we set the needs copy flag and 776 * make a shadow later. 777 */ 778 779 vm_object_lock(src_object); 780 781 /* 782 * Try to collapse the object before copying it. 783 */ 784 785 vm_object_collapse(src_object); 786 787 if (src_object->pager == NULL || 788 src_object->pager->pg_type == PG_SWAP || 789 (src_object->flags & OBJ_INTERNAL)) { 790 791 /* 792 * Make another reference to the object 793 */ 794 src_object->ref_count++; 795 796 /* 797 * Mark all of the pages copy-on-write. 798 */ 799 for (p = src_object->memq.tqh_first; p; p = p->listq.tqe_next) 800 if (src_offset <= p->offset && 801 p->offset < src_offset + size) 802 p->flags |= PG_COPYONWRITE; 803 vm_object_unlock(src_object); 804 805 *dst_object = src_object; 806 *dst_offset = src_offset; 807 808 /* 809 * Must make a shadow when write is desired 810 */ 811 *src_needs_copy = TRUE; 812 return; 813 } 814 815 816 /* 817 * If the object has a pager, the pager wants to 818 * see all of the changes. We need a copy-object 819 * for the changed pages. 820 * 821 * If there is a copy-object, and it is empty, 822 * no changes have been made to the object since the 823 * copy-object was made. We can use the same copy- 824 * object. 825 */ 826 827 Retry1: 828 old_copy = src_object->copy; 829 if (old_copy != NULL) { 830 /* 831 * Try to get the locks (out of order) 832 */ 833 if (!vm_object_lock_try(old_copy)) { 834 vm_object_unlock(src_object); 835 836 /* should spin a bit here... */ 837 vm_object_lock(src_object); 838 goto Retry1; 839 } 840 841 if (old_copy->resident_page_count == 0 && 842 old_copy->pager == NULL) { 843 /* 844 * Return another reference to 845 * the existing copy-object. 846 */ 847 old_copy->ref_count++; 848 vm_object_unlock(old_copy); 849 vm_object_unlock(src_object); 850 *dst_object = old_copy; 851 *dst_offset = src_offset; 852 *src_needs_copy = FALSE; 853 return; 854 } 855 vm_object_unlock(old_copy); 856 } 857 vm_object_unlock(src_object); 858 859 /* 860 * If the object has a pager, the pager wants 861 * to see all of the changes. We must make 862 * a copy-object and put the changed pages there. 863 * 864 * The copy-object is always made large enough to 865 * completely shadow the original object, since 866 * it may have several users who want to shadow 867 * the original object at different points. 868 */ 869 870 new_copy = vm_object_allocate(src_object->size); 871 872 Retry2: 873 vm_object_lock(src_object); 874 /* 875 * Copy object may have changed while we were unlocked 876 */ 877 old_copy = src_object->copy; 878 if (old_copy != NULL) { 879 /* 880 * Try to get the locks (out of order) 881 */ 882 if (!vm_object_lock_try(old_copy)) { 883 vm_object_unlock(src_object); 884 goto Retry2; 885 } 886 887 /* 888 * Consistency check 889 */ 890 if (old_copy->shadow != src_object || 891 old_copy->shadow_offset != (vm_offset_t) 0) 892 panic("vm_object_copy: copy/shadow inconsistency"); 893 894 /* 895 * Make the old copy-object shadow the new one. 896 * It will receive no more pages from the original 897 * object. 898 */ 899 900 src_object->ref_count--; /* remove ref. from old_copy */ 901 old_copy->shadow = new_copy; 902 new_copy->ref_count++; /* locking not needed - we 903 have the only pointer */ 904 vm_object_unlock(old_copy); /* done with old_copy */ 905 } 906 907 new_start = (vm_offset_t) 0; /* always shadow original at 0 */ 908 new_end = (vm_offset_t) new_copy->size; /* for the whole object */ 909 910 /* 911 * Point the new copy at the existing object. 912 */ 913 914 new_copy->shadow = src_object; 915 new_copy->shadow_offset = new_start; 916 src_object->ref_count++; 917 src_object->copy = new_copy; 918 919 /* 920 * Mark all the affected pages of the existing object 921 * copy-on-write. 922 */ 923 for (p = src_object->memq.tqh_first; p != NULL; p = p->listq.tqe_next) 924 if ((new_start <= p->offset) && (p->offset < new_end)) 925 p->flags |= PG_COPYONWRITE; 926 927 vm_object_unlock(src_object); 928 929 *dst_object = new_copy; 930 *dst_offset = src_offset - new_start; 931 *src_needs_copy = FALSE; 932 } 933 934 /* 935 * vm_object_shadow: 936 * 937 * Create a new object which is backed by the 938 * specified existing object range. The source 939 * object reference is deallocated. 940 * 941 * The new object and offset into that object 942 * are returned in the source parameters. 943 */ 944 945 void 946 vm_object_shadow(object, offset, length) 947 vm_object_t *object; /* IN/OUT */ 948 vm_offset_t *offset; /* IN/OUT */ 949 vm_size_t length; 950 { 951 register vm_object_t source; 952 register vm_object_t result; 953 954 source = *object; 955 956 /* 957 * Allocate a new object with the given length 958 */ 959 960 if ((result = vm_object_allocate(length)) == NULL) 961 panic("vm_object_shadow: no object for shadowing"); 962 963 /* 964 * The new object shadows the source object, adding 965 * a reference to it. Our caller changes his reference 966 * to point to the new object, removing a reference to 967 * the source object. Net result: no change of reference 968 * count. 969 */ 970 result->shadow = source; 971 972 /* 973 * Store the offset into the source object, 974 * and fix up the offset into the new object. 975 */ 976 977 result->shadow_offset = *offset; 978 979 /* 980 * Return the new things 981 */ 982 983 *offset = 0; 984 *object = result; 985 } 986 987 /* 988 * Set the specified object's pager to the specified pager. 989 */ 990 991 void 992 vm_object_setpager(object, pager, paging_offset, 993 read_only) 994 vm_object_t object; 995 vm_pager_t pager; 996 vm_offset_t paging_offset; 997 boolean_t read_only; 998 { 999 #ifdef lint 1000 read_only++; /* No longer used */ 1001 #endif lint 1002 1003 vm_object_lock(object); /* XXX ? */ 1004 if (object->pager && object->pager != pager) { 1005 panic("!!!pager already allocated!!!\n"); 1006 } 1007 object->pager = pager; 1008 object->paging_offset = paging_offset; 1009 vm_object_unlock(object); /* XXX ? */ 1010 } 1011 1012 /* 1013 * vm_object_hash hashes the pager/id pair. 1014 */ 1015 1016 #define vm_object_hash(pager) \ 1017 (((unsigned)pager >> 5)%VM_OBJECT_HASH_COUNT) 1018 1019 /* 1020 * vm_object_lookup looks in the object cache for an object with the 1021 * specified pager and paging id. 1022 */ 1023 1024 vm_object_t vm_object_lookup(pager) 1025 vm_pager_t pager; 1026 { 1027 register vm_object_hash_entry_t entry; 1028 vm_object_t object; 1029 1030 vm_object_cache_lock(); 1031 1032 for (entry = vm_object_hashtable[vm_object_hash(pager)].tqh_first; 1033 entry != NULL; 1034 entry = entry->hash_links.tqe_next) { 1035 object = entry->object; 1036 if (object->pager == pager) { 1037 vm_object_lock(object); 1038 if (object->ref_count == 0) { 1039 TAILQ_REMOVE(&vm_object_cached_list, object, 1040 cached_list); 1041 vm_object_cached--; 1042 } 1043 object->ref_count++; 1044 vm_object_unlock(object); 1045 vm_object_cache_unlock(); 1046 return(object); 1047 } 1048 } 1049 1050 vm_object_cache_unlock(); 1051 return(NULL); 1052 } 1053 1054 /* 1055 * vm_object_enter enters the specified object/pager/id into 1056 * the hash table. 1057 */ 1058 1059 void vm_object_enter(object, pager) 1060 vm_object_t object; 1061 vm_pager_t pager; 1062 { 1063 struct vm_object_hash_head *bucket; 1064 register vm_object_hash_entry_t entry; 1065 1066 /* 1067 * We don't cache null objects, and we can't cache 1068 * objects with the null pager. 1069 */ 1070 1071 if (object == NULL) 1072 return; 1073 if (pager == NULL) 1074 return; 1075 1076 bucket = &vm_object_hashtable[vm_object_hash(pager)]; 1077 entry = (vm_object_hash_entry_t) 1078 malloc((u_long)sizeof *entry, M_VMOBJHASH, M_WAITOK); 1079 entry->object = object; 1080 object->flags |= OBJ_CANPERSIST; 1081 1082 vm_object_cache_lock(); 1083 TAILQ_INSERT_TAIL(bucket, entry, hash_links); 1084 vm_object_cache_unlock(); 1085 } 1086 1087 /* 1088 * vm_object_remove: 1089 * 1090 * Remove the pager from the hash table. 1091 * Note: This assumes that the object cache 1092 * is locked. XXX this should be fixed 1093 * by reorganizing vm_object_deallocate. 1094 */ 1095 void 1096 vm_object_remove(pager) 1097 register vm_pager_t pager; 1098 { 1099 struct vm_object_hash_head *bucket; 1100 register vm_object_hash_entry_t entry; 1101 register vm_object_t object; 1102 1103 bucket = &vm_object_hashtable[vm_object_hash(pager)]; 1104 1105 for (entry = bucket->tqh_first; 1106 entry != NULL; 1107 entry = entry->hash_links.tqe_next) { 1108 object = entry->object; 1109 if (object->pager == pager) { 1110 TAILQ_REMOVE(bucket, entry, hash_links); 1111 free((caddr_t)entry, M_VMOBJHASH); 1112 break; 1113 } 1114 } 1115 } 1116 1117 boolean_t vm_object_collapse_allowed = TRUE; 1118 /* 1119 * vm_object_collapse: 1120 * 1121 * Collapse an object with the object backing it. 1122 * Pages in the backing object are moved into the 1123 * parent, and the backing object is deallocated. 1124 * 1125 * Requires that the object be locked and the page 1126 * queues be unlocked. 1127 * 1128 * This routine has significant changes by John S. Dyson 1129 * to fix some swap memory leaks. 18 Dec 93 1130 * 1131 */ 1132 void 1133 vm_object_collapse(object) 1134 register vm_object_t object; 1135 1136 { 1137 register vm_object_t backing_object; 1138 register vm_offset_t backing_offset; 1139 register vm_size_t size; 1140 register vm_offset_t new_offset; 1141 register vm_page_t p, pp; 1142 1143 if (!vm_object_collapse_allowed) 1144 return; 1145 1146 while (TRUE) { 1147 /* 1148 * Verify that the conditions are right for collapse: 1149 * 1150 * The object exists and no pages in it are currently 1151 * being paged out. 1152 */ 1153 if (object == NULL || 1154 object->paging_in_progress != 0) 1155 return; 1156 1157 /* 1158 * There is a backing object, and 1159 */ 1160 1161 if ((backing_object = object->shadow) == NULL) 1162 return; 1163 1164 vm_object_lock(backing_object); 1165 /* 1166 * ... 1167 * The backing object is not read_only, 1168 * and no pages in the backing object are 1169 * currently being paged out. 1170 * The backing object is internal. 1171 */ 1172 1173 if ((backing_object->flags & OBJ_INTERNAL) == 0 || 1174 backing_object->paging_in_progress != 0) { 1175 vm_object_unlock(backing_object); 1176 return; 1177 } 1178 1179 /* 1180 * The backing object can't be a copy-object: 1181 * the shadow_offset for the copy-object must stay 1182 * as 0. Furthermore (for the 'we have all the 1183 * pages' case), if we bypass backing_object and 1184 * just shadow the next object in the chain, old 1185 * pages from that object would then have to be copied 1186 * BOTH into the (former) backing_object and into the 1187 * parent object. 1188 */ 1189 if (backing_object->shadow != NULL && 1190 backing_object->shadow->copy == backing_object) { 1191 vm_object_unlock(backing_object); 1192 return; 1193 } 1194 1195 /* 1196 * we can deal only with the swap pager 1197 */ 1198 if ((object->pager && 1199 object->pager->pg_type != PG_SWAP) || 1200 (backing_object->pager && 1201 backing_object->pager->pg_type != PG_SWAP)) { 1202 vm_object_unlock(backing_object); 1203 return; 1204 } 1205 1206 1207 /* 1208 * We know that we can either collapse the backing 1209 * object (if the parent is the only reference to 1210 * it) or (perhaps) remove the parent's reference 1211 * to it. 1212 */ 1213 1214 backing_offset = object->shadow_offset; 1215 size = object->size; 1216 1217 /* 1218 * If there is exactly one reference to the backing 1219 * object, we can collapse it into the parent. 1220 */ 1221 1222 if (backing_object->ref_count == 1) { 1223 1224 /* 1225 * We can collapse the backing object. 1226 * 1227 * Move all in-memory pages from backing_object 1228 * to the parent. Pages that have been paged out 1229 * will be overwritten by any of the parent's 1230 * pages that shadow them. 1231 */ 1232 1233 while (p = backing_object->memq.tqh_first) { 1234 1235 new_offset = (p->offset - backing_offset); 1236 1237 /* 1238 * If the parent has a page here, or if 1239 * this page falls outside the parent, 1240 * dispose of it. 1241 * 1242 * Otherwise, move it as planned. 1243 */ 1244 1245 if (p->offset < backing_offset || 1246 new_offset >= size) { 1247 vm_page_lock_queues(); 1248 vm_page_free(p); 1249 vm_page_unlock_queues(); 1250 } else { 1251 pp = vm_page_lookup(object, new_offset); 1252 if (pp != NULL || (object->pager && vm_pager_has_page(object->pager, 1253 object->paging_offset + new_offset))) { 1254 vm_page_lock_queues(); 1255 vm_page_free(p); 1256 vm_page_unlock_queues(); 1257 } else { 1258 vm_page_rename(p, object, new_offset); 1259 } 1260 } 1261 } 1262 1263 /* 1264 * Move the pager from backing_object to object. 1265 */ 1266 1267 if (backing_object->pager) { 1268 backing_object->paging_in_progress++; 1269 if (object->pager) { 1270 vm_pager_t bopager; 1271 object->paging_in_progress++; 1272 /* 1273 * copy shadow object pages into ours 1274 * and destroy unneeded pages in shadow object. 1275 */ 1276 bopager = backing_object->pager; 1277 backing_object->pager = NULL; 1278 vm_object_remove(backing_object->pager); 1279 swap_pager_copy( 1280 bopager, backing_object->paging_offset, 1281 object->pager, object->paging_offset, 1282 object->shadow_offset); 1283 object->paging_in_progress--; 1284 if (object->paging_in_progress == 0) 1285 wakeup((caddr_t)object); 1286 } else { 1287 object->paging_in_progress++; 1288 /* 1289 * grab the shadow objects pager 1290 */ 1291 object->pager = backing_object->pager; 1292 object->paging_offset = backing_object->paging_offset + backing_offset; 1293 vm_object_remove(backing_object->pager); 1294 backing_object->pager = NULL; 1295 /* 1296 * free unnecessary blocks 1297 */ 1298 swap_pager_freespace(object->pager, 0, object->paging_offset); 1299 object->paging_in_progress--; 1300 if (object->paging_in_progress == 0) 1301 wakeup((caddr_t)object); 1302 } 1303 backing_object->paging_in_progress--; 1304 if (backing_object->paging_in_progress == 0) 1305 wakeup((caddr_t)backing_object); 1306 } 1307 1308 1309 /* 1310 * Object now shadows whatever backing_object did. 1311 * Note that the reference to backing_object->shadow 1312 * moves from within backing_object to within object. 1313 */ 1314 1315 object->shadow = backing_object->shadow; 1316 object->shadow_offset += backing_object->shadow_offset; 1317 if (object->shadow != NULL && 1318 object->shadow->copy != NULL) { 1319 panic("vm_object_collapse: we collapsed a copy-object!"); 1320 } 1321 /* 1322 * Discard backing_object. 1323 * 1324 * Since the backing object has no pages, no 1325 * pager left, and no object references within it, 1326 * all that is necessary is to dispose of it. 1327 */ 1328 1329 vm_object_unlock(backing_object); 1330 1331 simple_lock(&vm_object_list_lock); 1332 TAILQ_REMOVE(&vm_object_list, backing_object, 1333 object_list); 1334 vm_object_count--; 1335 simple_unlock(&vm_object_list_lock); 1336 1337 free((caddr_t)backing_object, M_VMOBJ); 1338 1339 object_collapses++; 1340 } 1341 else { 1342 /* 1343 * If all of the pages in the backing object are 1344 * shadowed by the parent object, the parent 1345 * object no longer has to shadow the backing 1346 * object; it can shadow the next one in the 1347 * chain. 1348 * 1349 * The backing object must not be paged out - we'd 1350 * have to check all of the paged-out pages, as 1351 * well. 1352 */ 1353 1354 if (backing_object->pager != NULL) { 1355 vm_object_unlock(backing_object); 1356 return; 1357 } 1358 1359 /* 1360 * Should have a check for a 'small' number 1361 * of pages here. 1362 */ 1363 1364 for( p = backing_object->memq.tqh_first;p;p=p->listq.tqe_next) { 1365 new_offset = (p->offset - backing_offset); 1366 1367 /* 1368 * If the parent has a page here, or if 1369 * this page falls outside the parent, 1370 * keep going. 1371 * 1372 * Otherwise, the backing_object must be 1373 * left in the chain. 1374 */ 1375 1376 if (p->offset >= backing_offset && 1377 new_offset <= size && 1378 ((pp = vm_page_lookup(object, new_offset)) == NULL || (pp->flags & PG_FAKE)) && 1379 (!object->pager || !vm_pager_has_page(object->pager, object->paging_offset+new_offset))) { 1380 /* 1381 * Page still needed. 1382 * Can't go any further. 1383 */ 1384 vm_object_unlock(backing_object); 1385 return; 1386 } 1387 } 1388 1389 /* 1390 * Make the parent shadow the next object 1391 * in the chain. Deallocating backing_object 1392 * will not remove it, since its reference 1393 * count is at least 2. 1394 */ 1395 1396 vm_object_reference(object->shadow = backing_object->shadow); 1397 object->shadow_offset += backing_object->shadow_offset; 1398 1399 /* 1400 * Backing object might have had a copy pointer 1401 * to us. If it did, clear it. 1402 */ 1403 if (backing_object->copy == object) { 1404 backing_object->copy = NULL; 1405 } 1406 1407 /* Drop the reference count on backing_object. 1408 * Since its ref_count was at least 2, it 1409 * will not vanish; so we don't need to call 1410 * vm_object_deallocate. 1411 */ 1412 if (backing_object->ref_count == 1) 1413 printf("should have called obj deallocate\n"); 1414 backing_object->ref_count--; 1415 vm_object_unlock(backing_object); 1416 1417 object_bypasses ++; 1418 1419 } 1420 1421 /* 1422 * Try again with this object's new backing object. 1423 */ 1424 } 1425 } 1426 1427 /* 1428 * vm_object_page_remove: [internal] 1429 * 1430 * Removes all physical pages in the specified 1431 * object range from the object's list of pages. 1432 * 1433 * The object must be locked. 1434 */ 1435 void 1436 vm_object_page_remove(object, start, end) 1437 register vm_object_t object; 1438 register vm_offset_t start; 1439 register vm_offset_t end; 1440 { 1441 register vm_page_t p, next; 1442 vm_offset_t size; 1443 int cnt; 1444 int s; 1445 1446 if (object == NULL) 1447 return; 1448 1449 start = trunc_page(start); 1450 end = round_page(end); 1451 again: 1452 size = end-start; 1453 if (size > 4*PAGE_SIZE || size >= object->size/4) { 1454 for (p = object->memq.tqh_first; (p != NULL && size > 0); p = next) { 1455 next = p->listq.tqe_next; 1456 if ((start <= p->offset) && (p->offset < end)) { 1457 if (p->flags & PG_BUSY) { 1458 p->flags |= PG_WANTED; 1459 tsleep((caddr_t) p, PVM, "vmopar", 0); 1460 goto again; 1461 } 1462 pmap_page_protect(VM_PAGE_TO_PHYS(p), VM_PROT_NONE); 1463 vm_page_lock_queues(); 1464 vm_page_free(p); 1465 vm_page_unlock_queues(); 1466 size -= PAGE_SIZE; 1467 } 1468 } 1469 } else { 1470 while (size > 0) { 1471 while (p = vm_page_lookup(object, start)) { 1472 if (p->flags & PG_BUSY) { 1473 p->flags |= PG_WANTED; 1474 tsleep((caddr_t) p, PVM, "vmopar", 0); 1475 goto again; 1476 } 1477 pmap_page_protect(VM_PAGE_TO_PHYS(p), VM_PROT_NONE); 1478 vm_page_lock_queues(); 1479 vm_page_free(p); 1480 vm_page_unlock_queues(); 1481 } 1482 start += PAGE_SIZE; 1483 size -= PAGE_SIZE; 1484 } 1485 } 1486 } 1487 1488 /* 1489 * Routine: vm_object_coalesce 1490 * Function: Coalesces two objects backing up adjoining 1491 * regions of memory into a single object. 1492 * 1493 * returns TRUE if objects were combined. 1494 * 1495 * NOTE: Only works at the moment if the second object is NULL - 1496 * if it's not, which object do we lock first? 1497 * 1498 * Parameters: 1499 * prev_object First object to coalesce 1500 * prev_offset Offset into prev_object 1501 * next_object Second object into coalesce 1502 * next_offset Offset into next_object 1503 * 1504 * prev_size Size of reference to prev_object 1505 * next_size Size of reference to next_object 1506 * 1507 * Conditions: 1508 * The object must *not* be locked. 1509 */ 1510 boolean_t vm_object_coalesce(prev_object, next_object, 1511 prev_offset, next_offset, 1512 prev_size, next_size) 1513 1514 register vm_object_t prev_object; 1515 vm_object_t next_object; 1516 vm_offset_t prev_offset, next_offset; 1517 vm_size_t prev_size, next_size; 1518 { 1519 vm_size_t newsize; 1520 1521 #ifdef lint 1522 next_offset++; 1523 #endif 1524 1525 if (next_object != NULL) { 1526 return(FALSE); 1527 } 1528 1529 if (prev_object == NULL) { 1530 return(TRUE); 1531 } 1532 1533 vm_object_lock(prev_object); 1534 1535 /* 1536 * Try to collapse the object first 1537 */ 1538 vm_object_collapse(prev_object); 1539 1540 /* 1541 * Can't coalesce if: 1542 * . more than one reference 1543 * . paged out 1544 * . shadows another object 1545 * . has a copy elsewhere 1546 * (any of which mean that the pages not mapped to 1547 * prev_entry may be in use anyway) 1548 */ 1549 1550 if (prev_object->ref_count > 1 || 1551 prev_object->pager != NULL || 1552 prev_object->shadow != NULL || 1553 prev_object->copy != NULL) { 1554 vm_object_unlock(prev_object); 1555 return(FALSE); 1556 } 1557 1558 /* 1559 * Remove any pages that may still be in the object from 1560 * a previous deallocation. 1561 */ 1562 1563 vm_object_page_remove(prev_object, 1564 prev_offset + prev_size, 1565 prev_offset + prev_size + next_size); 1566 1567 /* 1568 * Extend the object if necessary. 1569 */ 1570 newsize = prev_offset + prev_size + next_size; 1571 if (newsize > prev_object->size) 1572 prev_object->size = newsize; 1573 1574 vm_object_unlock(prev_object); 1575 return(TRUE); 1576 } 1577 1578 /* 1579 * returns page after looking up in shadow chain 1580 */ 1581 1582 vm_page_t 1583 vm_object_page_lookup(object, offset) 1584 vm_object_t object; 1585 vm_offset_t offset; 1586 { 1587 vm_page_t m; 1588 if (!(m=vm_page_lookup(object, offset))) { 1589 if (!object->shadow) 1590 return 0; 1591 else 1592 return vm_object_page_lookup(object->shadow, offset + object->shadow_offset); 1593 } 1594 return m; 1595 } 1596 1597 #define DEBUG 1598 #if defined(DEBUG) || (NDDB > 0) 1599 /* 1600 * vm_object_print: [ debug ] 1601 */ 1602 void vm_object_print(object, full) 1603 vm_object_t object; 1604 boolean_t full; 1605 { 1606 register vm_page_t p; 1607 extern indent; 1608 1609 register int count; 1610 1611 if (object == NULL) 1612 return; 1613 1614 iprintf("Object 0x%x: size=0x%x, res=%d, ref=%d, ", 1615 (int) object, (int) object->size, 1616 object->resident_page_count, object->ref_count); 1617 printf("pager=0x%x+0x%x, shadow=(0x%x)+0x%x\n", 1618 (int) object->pager, (int) object->paging_offset, 1619 (int) object->shadow, (int) object->shadow_offset); 1620 printf("cache: next=0x%x, prev=0x%x\n", 1621 object->cached_list.tqe_next, object->cached_list.tqe_prev); 1622 1623 if (!full) 1624 return; 1625 1626 indent += 2; 1627 count = 0; 1628 for (p = object->memq.tqh_first; p != NULL; p = p->listq.tqe_next) { 1629 if (count == 0) 1630 iprintf("memory:="); 1631 else if (count == 6) { 1632 printf("\n"); 1633 iprintf(" ..."); 1634 count = 0; 1635 } else 1636 printf(","); 1637 count++; 1638 1639 printf("(off=0x%x,page=0x%x)", p->offset, VM_PAGE_TO_PHYS(p)); 1640 } 1641 if (count != 0) 1642 printf("\n"); 1643 indent -= 2; 1644 } 1645 #endif /* defined(DEBUG) || (NDDB > 0) */ 1646