1 /* 2 * Copyright (c) 1991, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * This code is derived from software contributed to Berkeley by 6 * The Mach Operating System project at Carnegie-Mellon University. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. All advertising materials mentioning features or use of this software 17 * must display the following acknowledgement: 18 * This product includes software developed by the University of 19 * California, Berkeley and its contributors. 20 * 4. Neither the name of the University nor the names of its contributors 21 * may be used to endorse or promote products derived from this software 22 * without specific prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34 * SUCH DAMAGE. 35 * 36 * from: @(#)vm_object.c 8.5 (Berkeley) 3/22/94 37 * 38 * 39 * Copyright (c) 1987, 1990 Carnegie-Mellon University. 40 * All rights reserved. 41 * 42 * Authors: Avadis Tevanian, Jr., Michael Wayne Young 43 * 44 * Permission to use, copy, modify and distribute this software and 45 * its documentation is hereby granted, provided that both the copyright 46 * notice and this permission notice appear in all copies of the 47 * software, derivative works or modified versions, and any portions 48 * thereof, and that both notices appear in supporting documentation. 49 * 50 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 51 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 52 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 53 * 54 * Carnegie Mellon requests users of this software to return to 55 * 56 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 57 * School of Computer Science 58 * Carnegie Mellon University 59 * Pittsburgh PA 15213-3890 60 * 61 * any improvements or extensions that they make and grant Carnegie the 62 * rights to redistribute these changes. 63 * 64 * $Id: vm_object.c,v 1.81 1996/09/14 11:54:57 bde Exp $ 65 */ 66 67 /* 68 * Virtual memory object module. 69 */ 70 71 #include <sys/param.h> 72 #include <sys/systm.h> 73 #include <sys/kernel.h> 74 #include <sys/proc.h> /* for curproc, pageproc */ 75 #include <sys/malloc.h> 76 #include <sys/vnode.h> 77 #include <sys/mount.h> 78 #include <sys/vmmeter.h> 79 #include <sys/mman.h> 80 81 #include <vm/vm.h> 82 #include <vm/vm_param.h> 83 #include <vm/vm_prot.h> 84 #include <vm/lock.h> 85 #include <vm/pmap.h> 86 #include <vm/vm_map.h> 87 #include <vm/vm_object.h> 88 #include <vm/vm_page.h> 89 #include <vm/vm_pageout.h> 90 #include <vm/vm_pager.h> 91 #include <vm/swap_pager.h> 92 #include <vm/vm_kern.h> 93 #include <vm/vm_extern.h> 94 95 static void _vm_object_allocate __P((objtype_t, vm_size_t, vm_object_t)); 96 static void vm_object_qcollapse __P((vm_object_t object)); 97 #ifdef not_used 98 static void vm_object_deactivate_pages __P((vm_object_t)); 99 #endif 100 static void vm_object_terminate __P((vm_object_t)); 101 static void vm_object_cache_trim __P((void)); 102 103 /* 104 * Virtual memory objects maintain the actual data 105 * associated with allocated virtual memory. A given 106 * page of memory exists within exactly one object. 107 * 108 * An object is only deallocated when all "references" 109 * are given up. Only one "reference" to a given 110 * region of an object should be writeable. 111 * 112 * Associated with each object is a list of all resident 113 * memory pages belonging to that object; this list is 114 * maintained by the "vm_page" module, and locked by the object's 115 * lock. 116 * 117 * Each object also records a "pager" routine which is 118 * used to retrieve (and store) pages to the proper backing 119 * storage. In addition, objects may be backed by other 120 * objects from which they were virtual-copied. 121 * 122 * The only items within the object structure which are 123 * modified after time of creation are: 124 * reference count locked by object's lock 125 * pager routine locked by object's lock 126 * 127 */ 128 129 int vm_object_cache_max; 130 struct object_q vm_object_cached_list; 131 static int vm_object_cached; 132 struct object_q vm_object_list; 133 static long vm_object_count; 134 vm_object_t kernel_object; 135 vm_object_t kmem_object; 136 static struct vm_object kernel_object_store; 137 static struct vm_object kmem_object_store; 138 extern int vm_pageout_page_count; 139 140 static long object_collapses; 141 static long object_bypasses; 142 static int next_index; 143 144 static void 145 _vm_object_allocate(type, size, object) 146 objtype_t type; 147 vm_size_t size; 148 register vm_object_t object; 149 { 150 TAILQ_INIT(&object->memq); 151 TAILQ_INIT(&object->shadow_head); 152 153 object->type = type; 154 object->size = size; 155 object->ref_count = 1; 156 object->flags = 0; 157 object->behavior = OBJ_NORMAL; 158 object->paging_in_progress = 0; 159 object->resident_page_count = 0; 160 object->shadow_count = 0; 161 object->pg_color = next_index; 162 next_index = (next_index + PQ_PRIME1) & PQ_L2_MASK; 163 object->handle = NULL; 164 object->paging_offset = (vm_ooffset_t) 0; 165 object->backing_object = NULL; 166 object->backing_object_offset = (vm_ooffset_t) 0; 167 object->page_hint = NULL; 168 169 object->last_read = 0; 170 171 TAILQ_INSERT_TAIL(&vm_object_list, object, object_list); 172 vm_object_count++; 173 } 174 175 /* 176 * vm_object_init: 177 * 178 * Initialize the VM objects module. 179 */ 180 void 181 vm_object_init() 182 { 183 TAILQ_INIT(&vm_object_cached_list); 184 TAILQ_INIT(&vm_object_list); 185 vm_object_count = 0; 186 187 vm_object_cache_max = 84; 188 if (cnt.v_page_count > 1000) 189 vm_object_cache_max += (cnt.v_page_count - 1000) / 4; 190 191 kernel_object = &kernel_object_store; 192 _vm_object_allocate(OBJT_DEFAULT, OFF_TO_IDX(VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS), 193 kernel_object); 194 195 kmem_object = &kmem_object_store; 196 _vm_object_allocate(OBJT_DEFAULT, OFF_TO_IDX(VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS), 197 kmem_object); 198 } 199 200 /* 201 * vm_object_allocate: 202 * 203 * Returns a new object with the given size. 204 */ 205 206 vm_object_t 207 vm_object_allocate(type, size) 208 objtype_t type; 209 vm_size_t size; 210 { 211 register vm_object_t result; 212 213 result = (vm_object_t) 214 malloc((u_long) sizeof *result, M_VMOBJ, M_WAITOK); 215 216 217 _vm_object_allocate(type, size, result); 218 219 return (result); 220 } 221 222 223 /* 224 * vm_object_reference: 225 * 226 * Gets another reference to the given object. 227 */ 228 void 229 vm_object_reference(object) 230 register vm_object_t object; 231 { 232 if (object == NULL) 233 return; 234 235 if (object->ref_count == 0) { 236 if ((object->flags & OBJ_CANPERSIST) == 0) 237 panic("vm_object_reference: non-persistent object with 0 ref_count"); 238 TAILQ_REMOVE(&vm_object_cached_list, object, cached_list); 239 vm_object_cached--; 240 } 241 object->ref_count++; 242 } 243 244 /* 245 * vm_object_deallocate: 246 * 247 * Release a reference to the specified object, 248 * gained either through a vm_object_allocate 249 * or a vm_object_reference call. When all references 250 * are gone, storage associated with this object 251 * may be relinquished. 252 * 253 * No object may be locked. 254 */ 255 void 256 vm_object_deallocate(object) 257 vm_object_t object; 258 { 259 vm_object_t temp; 260 261 while (object != NULL) { 262 263 if (object->ref_count == 0) 264 panic("vm_object_deallocate: object deallocated too many times"); 265 266 /* 267 * Lose the reference 268 */ 269 object->ref_count--; 270 if (object->ref_count != 0) { 271 if ((object->ref_count == 1) && 272 (object->handle == NULL) && 273 (object->type == OBJT_DEFAULT || 274 object->type == OBJT_SWAP)) { 275 vm_object_t robject; 276 robject = TAILQ_FIRST(&object->shadow_head); 277 if ((robject != NULL) && 278 (robject->handle == NULL) && 279 (robject->type == OBJT_DEFAULT || 280 robject->type == OBJT_SWAP)) { 281 int s; 282 robject->ref_count += 2; 283 object->ref_count += 2; 284 285 do { 286 s = splvm(); 287 while (robject->paging_in_progress) { 288 robject->flags |= OBJ_PIPWNT; 289 tsleep(robject, PVM, "objde1", 0); 290 } 291 292 while (object->paging_in_progress) { 293 object->flags |= OBJ_PIPWNT; 294 tsleep(object, PVM, "objde2", 0); 295 } 296 splx(s); 297 298 } while( object->paging_in_progress || robject->paging_in_progress); 299 300 object->ref_count -= 2; 301 robject->ref_count -= 2; 302 if( robject->ref_count == 0) { 303 robject->ref_count += 1; 304 object = robject; 305 continue; 306 } 307 vm_object_collapse(robject); 308 return; 309 } 310 } 311 /* 312 * If there are still references, then we are done. 313 */ 314 return; 315 } 316 317 if (object->type == OBJT_VNODE) { 318 struct vnode *vp = object->handle; 319 320 vp->v_flag &= ~VTEXT; 321 } 322 323 /* 324 * See if this object can persist and has some resident 325 * pages. If so, enter it in the cache. 326 */ 327 if (object->flags & OBJ_CANPERSIST) { 328 if (object->resident_page_count != 0) { 329 #if 0 330 vm_object_page_clean(object, 0, 0 ,TRUE, TRUE); 331 #endif 332 TAILQ_INSERT_TAIL(&vm_object_cached_list, object, 333 cached_list); 334 vm_object_cached++; 335 336 vm_object_cache_trim(); 337 return; 338 } else { 339 object->flags &= ~OBJ_CANPERSIST; 340 } 341 } 342 343 /* 344 * Make sure no one uses us. 345 */ 346 object->flags |= OBJ_DEAD; 347 348 temp = object->backing_object; 349 if (temp) { 350 TAILQ_REMOVE(&temp->shadow_head, object, shadow_list); 351 --temp->shadow_count; 352 } 353 vm_object_terminate(object); 354 /* unlocks and deallocates object */ 355 object = temp; 356 } 357 } 358 359 /* 360 * vm_object_terminate actually destroys the specified object, freeing 361 * up all previously used resources. 362 * 363 * The object must be locked. 364 */ 365 static void 366 vm_object_terminate(object) 367 register vm_object_t object; 368 { 369 register vm_page_t p; 370 int s; 371 372 /* 373 * wait for the pageout daemon to be done with the object 374 */ 375 s = splvm(); 376 while (object->paging_in_progress) { 377 object->flags |= OBJ_PIPWNT; 378 tsleep(object, PVM, "objtrm", 0); 379 } 380 splx(s); 381 382 if (object->paging_in_progress != 0) 383 panic("vm_object_deallocate: pageout in progress"); 384 385 /* 386 * Clean and free the pages, as appropriate. All references to the 387 * object are gone, so we don't need to lock it. 388 */ 389 if (object->type == OBJT_VNODE) { 390 struct vnode *vp = object->handle; 391 int waslocked; 392 393 waslocked = VOP_ISLOCKED(vp); 394 if (!waslocked) 395 VOP_LOCK(vp); 396 vm_object_page_clean(object, 0, 0, TRUE, FALSE); 397 vinvalbuf(vp, V_SAVE, NOCRED, NULL, 0, 0); 398 if (!waslocked) 399 VOP_UNLOCK(vp); 400 } 401 /* 402 * Now free the pages. For internal objects, this also removes them 403 * from paging queues. 404 */ 405 while ((p = TAILQ_FIRST(&object->memq)) != NULL) { 406 if (p->busy || (p->flags & PG_BUSY)) 407 printf("vm_object_terminate: freeing busy page\n"); 408 PAGE_WAKEUP(p); 409 vm_page_free(p); 410 cnt.v_pfree++; 411 } 412 413 /* 414 * Let the pager know object is dead. 415 */ 416 vm_pager_deallocate(object); 417 418 TAILQ_REMOVE(&vm_object_list, object, object_list); 419 vm_object_count--; 420 421 wakeup(object); 422 423 /* 424 * Free the space for the object. 425 */ 426 free((caddr_t) object, M_VMOBJ); 427 } 428 429 /* 430 * vm_object_page_clean 431 * 432 * Clean all dirty pages in the specified range of object. 433 * Leaves page on whatever queue it is currently on. 434 * 435 * Odd semantics: if start == end, we clean everything. 436 * 437 * The object must be locked. 438 */ 439 440 void 441 vm_object_page_clean(object, start, end, syncio, lockflag) 442 vm_object_t object; 443 vm_pindex_t start; 444 vm_pindex_t end; 445 boolean_t syncio; 446 boolean_t lockflag; 447 { 448 register vm_page_t p, np, tp; 449 register vm_offset_t tstart, tend; 450 vm_pindex_t pi; 451 int s; 452 struct vnode *vp; 453 int runlen; 454 int maxf; 455 int chkb; 456 int maxb; 457 int i; 458 vm_page_t maf[vm_pageout_page_count]; 459 vm_page_t mab[vm_pageout_page_count]; 460 vm_page_t ma[vm_pageout_page_count]; 461 462 if (object->type != OBJT_VNODE || 463 (object->flags & OBJ_MIGHTBEDIRTY) == 0) 464 return; 465 466 vp = object->handle; 467 468 if (lockflag) 469 VOP_LOCK(vp); 470 object->flags |= OBJ_CLEANING; 471 472 tstart = start; 473 if (end == 0) { 474 tend = object->size; 475 } else { 476 tend = end; 477 } 478 if ((tstart == 0) && (tend == object->size)) { 479 object->flags &= ~(OBJ_WRITEABLE|OBJ_MIGHTBEDIRTY); 480 } 481 for(p = TAILQ_FIRST(&object->memq); p; p = TAILQ_NEXT(p, listq)) 482 p->flags |= PG_CLEANCHK; 483 484 rescan: 485 for(p = TAILQ_FIRST(&object->memq); p; p = np) { 486 np = TAILQ_NEXT(p, listq); 487 488 pi = p->pindex; 489 if (((p->flags & PG_CLEANCHK) == 0) || 490 (pi < tstart) || (pi >= tend) || 491 (p->valid == 0) || 492 ((p->queue - p->pc) == PQ_CACHE)) { 493 p->flags &= ~PG_CLEANCHK; 494 continue; 495 } 496 497 vm_page_test_dirty(p); 498 if ((p->dirty & p->valid) == 0) { 499 p->flags &= ~PG_CLEANCHK; 500 continue; 501 } 502 503 s = splvm(); 504 if ((p->flags & PG_BUSY) || p->busy) { 505 p->flags |= PG_WANTED|PG_REFERENCED; 506 tsleep(p, PVM, "vpcwai", 0); 507 splx(s); 508 goto rescan; 509 } 510 splx(s); 511 512 s = splvm(); 513 maxf = 0; 514 for(i=1;i<vm_pageout_page_count;i++) { 515 if (tp = vm_page_lookup(object, pi + i)) { 516 if ((tp->flags & PG_BUSY) || 517 (tp->flags & PG_CLEANCHK) == 0) 518 break; 519 if((tp->queue - tp->pc) == PQ_CACHE) { 520 tp->flags &= ~PG_CLEANCHK; 521 break; 522 } 523 vm_page_test_dirty(tp); 524 if ((tp->dirty & tp->valid) == 0) { 525 tp->flags &= ~PG_CLEANCHK; 526 break; 527 } 528 maf[ i - 1 ] = tp; 529 maxf++; 530 continue; 531 } 532 break; 533 } 534 535 maxb = 0; 536 chkb = vm_pageout_page_count - maxf; 537 if (chkb) { 538 for(i = 1; i < chkb;i++) { 539 if (tp = vm_page_lookup(object, pi - i)) { 540 if ((tp->flags & PG_BUSY) || 541 (tp->flags & PG_CLEANCHK) == 0) 542 break; 543 if((tp->queue - tp->pc) == PQ_CACHE) { 544 tp->flags &= ~PG_CLEANCHK; 545 break; 546 } 547 vm_page_test_dirty(tp); 548 if ((tp->dirty & tp->valid) == 0) { 549 tp->flags &= ~PG_CLEANCHK; 550 break; 551 } 552 mab[ i - 1 ] = tp; 553 maxb++; 554 continue; 555 } 556 break; 557 } 558 } 559 560 for(i=0;i<maxb;i++) { 561 int index = (maxb - i) - 1; 562 ma[index] = mab[i]; 563 ma[index]->flags |= PG_BUSY; 564 ma[index]->flags &= ~PG_CLEANCHK; 565 vm_page_protect(ma[index], VM_PROT_READ); 566 } 567 vm_page_protect(p, VM_PROT_READ); 568 p->flags |= PG_BUSY; 569 p->flags &= ~PG_CLEANCHK; 570 ma[maxb] = p; 571 for(i=0;i<maxf;i++) { 572 int index = (maxb + i) + 1; 573 ma[index] = maf[i]; 574 ma[index]->flags |= PG_BUSY; 575 ma[index]->flags &= ~PG_CLEANCHK; 576 vm_page_protect(ma[index], VM_PROT_READ); 577 } 578 runlen = maxb + maxf + 1; 579 splx(s); 580 vm_pageout_flush(ma, runlen, 0); 581 goto rescan; 582 } 583 584 VOP_FSYNC(vp, NULL, syncio, curproc); 585 586 if (lockflag) 587 VOP_UNLOCK(vp); 588 object->flags &= ~OBJ_CLEANING; 589 return; 590 } 591 592 #ifdef not_used 593 /* XXX I cannot tell if this should be an exported symbol */ 594 /* 595 * vm_object_deactivate_pages 596 * 597 * Deactivate all pages in the specified object. (Keep its pages 598 * in memory even though it is no longer referenced.) 599 * 600 * The object must be locked. 601 */ 602 static void 603 vm_object_deactivate_pages(object) 604 register vm_object_t object; 605 { 606 register vm_page_t p, next; 607 608 for (p = TAILQ_FIRST(&object->memq); p != NULL; p = next) { 609 next = TAILQ_NEXT(p, listq); 610 vm_page_deactivate(p); 611 } 612 } 613 #endif 614 615 /* 616 * Trim the object cache to size. 617 */ 618 static void 619 vm_object_cache_trim() 620 { 621 register vm_object_t object; 622 623 while (vm_object_cached > vm_object_cache_max) { 624 object = TAILQ_FIRST(&vm_object_cached_list); 625 626 vm_object_reference(object); 627 pager_cache(object, FALSE); 628 } 629 } 630 631 632 /* 633 * vm_object_pmap_copy: 634 * 635 * Makes all physical pages in the specified 636 * object range copy-on-write. No writeable 637 * references to these pages should remain. 638 * 639 * The object must *not* be locked. 640 */ 641 void 642 vm_object_pmap_copy(object, start, end) 643 register vm_object_t object; 644 register vm_pindex_t start; 645 register vm_pindex_t end; 646 { 647 register vm_page_t p; 648 649 if (object == NULL || (object->flags & OBJ_WRITEABLE) == 0) 650 return; 651 652 for (p = TAILQ_FIRST(&object->memq); 653 p != NULL; 654 p = TAILQ_NEXT(p, listq)) { 655 vm_page_protect(p, VM_PROT_READ); 656 } 657 658 object->flags &= ~OBJ_WRITEABLE; 659 } 660 661 /* 662 * vm_object_pmap_remove: 663 * 664 * Removes all physical pages in the specified 665 * object range from all physical maps. 666 * 667 * The object must *not* be locked. 668 */ 669 void 670 vm_object_pmap_remove(object, start, end) 671 register vm_object_t object; 672 register vm_pindex_t start; 673 register vm_pindex_t end; 674 { 675 register vm_page_t p; 676 if (object == NULL) 677 return; 678 for (p = TAILQ_FIRST(&object->memq); 679 p != NULL; 680 p = TAILQ_NEXT(p, listq)) { 681 if (p->pindex >= start && p->pindex < end) 682 vm_page_protect(p, VM_PROT_NONE); 683 } 684 } 685 686 /* 687 * vm_object_madvise: 688 * 689 * Implements the madvise function at the object/page level. 690 */ 691 void 692 vm_object_madvise(object, pindex, count, advise) 693 vm_object_t object; 694 vm_pindex_t pindex; 695 int count; 696 int advise; 697 { 698 vm_pindex_t end; 699 vm_page_t m; 700 701 if (object == NULL) 702 return; 703 704 end = pindex + count; 705 706 for (; pindex < end; pindex += 1) { 707 m = vm_page_lookup(object, pindex); 708 709 /* 710 * If the page is busy or not in a normal active state, 711 * we skip it. Things can break if we mess with pages 712 * in any of the below states. 713 */ 714 if (m == NULL || m->busy || (m->flags & PG_BUSY) || 715 m->hold_count || m->wire_count || 716 m->valid != VM_PAGE_BITS_ALL) 717 continue; 718 719 if (advise == MADV_WILLNEED) { 720 if (m->queue != PQ_ACTIVE) 721 vm_page_activate(m); 722 } else if ((advise == MADV_DONTNEED) || 723 ((advise == MADV_FREE) && 724 ((object->type != OBJT_DEFAULT) && 725 (object->type != OBJT_SWAP)))) { 726 vm_page_deactivate(m); 727 } else if (advise == MADV_FREE) { 728 /* 729 * Force a demand-zero on next ref 730 */ 731 if (object->type == OBJT_SWAP) 732 swap_pager_dmzspace(object, m->pindex, 1); 733 vm_page_protect(m, VM_PROT_NONE); 734 vm_page_free(m); 735 } 736 } 737 } 738 739 /* 740 * vm_object_shadow: 741 * 742 * Create a new object which is backed by the 743 * specified existing object range. The source 744 * object reference is deallocated. 745 * 746 * The new object and offset into that object 747 * are returned in the source parameters. 748 */ 749 750 void 751 vm_object_shadow(object, offset, length) 752 vm_object_t *object; /* IN/OUT */ 753 vm_ooffset_t *offset; /* IN/OUT */ 754 vm_size_t length; 755 { 756 register vm_object_t source; 757 register vm_object_t result; 758 759 source = *object; 760 761 /* 762 * Allocate a new object with the given length 763 */ 764 765 if ((result = vm_object_allocate(OBJT_DEFAULT, length)) == NULL) 766 panic("vm_object_shadow: no object for shadowing"); 767 768 /* 769 * The new object shadows the source object, adding a reference to it. 770 * Our caller changes his reference to point to the new object, 771 * removing a reference to the source object. Net result: no change 772 * of reference count. 773 */ 774 result->backing_object = source; 775 if (source) { 776 TAILQ_INSERT_TAIL(&source->shadow_head, result, shadow_list); 777 ++source->shadow_count; 778 } 779 780 /* 781 * Store the offset into the source object, and fix up the offset into 782 * the new object. 783 */ 784 785 result->backing_object_offset = *offset; 786 787 /* 788 * Return the new things 789 */ 790 791 *offset = 0; 792 *object = result; 793 } 794 795 796 /* 797 * this version of collapse allows the operation to occur earlier and 798 * when paging_in_progress is true for an object... This is not a complete 799 * operation, but should plug 99.9% of the rest of the leaks. 800 */ 801 static void 802 vm_object_qcollapse(object) 803 register vm_object_t object; 804 { 805 register vm_object_t backing_object; 806 register vm_pindex_t backing_offset_index, paging_offset_index; 807 vm_pindex_t backing_object_paging_offset_index; 808 vm_pindex_t new_pindex; 809 register vm_page_t p, pp; 810 register vm_size_t size; 811 812 backing_object = object->backing_object; 813 if (backing_object->ref_count != 1) 814 return; 815 816 backing_object->ref_count += 2; 817 818 backing_offset_index = OFF_TO_IDX(object->backing_object_offset); 819 backing_object_paging_offset_index = OFF_TO_IDX(backing_object->paging_offset); 820 paging_offset_index = OFF_TO_IDX(object->paging_offset); 821 size = object->size; 822 p = TAILQ_FIRST(&backing_object->memq); 823 while (p) { 824 vm_page_t next; 825 826 next = TAILQ_NEXT(p, listq); 827 if ((p->flags & (PG_BUSY | PG_FICTITIOUS)) || 828 ((p->queue - p->pc) == PQ_CACHE) || 829 !p->valid || p->hold_count || p->wire_count || p->busy) { 830 p = next; 831 continue; 832 } 833 new_pindex = p->pindex - backing_offset_index; 834 if (p->pindex < backing_offset_index || 835 new_pindex >= size) { 836 if (backing_object->type == OBJT_SWAP) 837 swap_pager_freespace(backing_object, 838 backing_object_paging_offset_index+p->pindex, 839 1); 840 vm_page_protect(p, VM_PROT_NONE); 841 vm_page_free(p); 842 } else { 843 pp = vm_page_lookup(object, new_pindex); 844 if (pp != NULL || (object->type == OBJT_SWAP && vm_pager_has_page(object, 845 paging_offset_index + new_pindex, NULL, NULL))) { 846 if (backing_object->type == OBJT_SWAP) 847 swap_pager_freespace(backing_object, 848 backing_object_paging_offset_index + p->pindex, 1); 849 vm_page_protect(p, VM_PROT_NONE); 850 vm_page_free(p); 851 } else { 852 if (backing_object->type == OBJT_SWAP) 853 swap_pager_freespace(backing_object, 854 backing_object_paging_offset_index + p->pindex, 1); 855 vm_page_rename(p, object, new_pindex); 856 p->dirty = VM_PAGE_BITS_ALL; 857 } 858 } 859 p = next; 860 } 861 backing_object->ref_count -= 2; 862 } 863 864 /* 865 * vm_object_collapse: 866 * 867 * Collapse an object with the object backing it. 868 * Pages in the backing object are moved into the 869 * parent, and the backing object is deallocated. 870 */ 871 void 872 vm_object_collapse(object) 873 vm_object_t object; 874 875 { 876 vm_object_t backing_object; 877 vm_ooffset_t backing_offset; 878 vm_size_t size; 879 vm_pindex_t new_pindex, backing_offset_index; 880 vm_page_t p, pp; 881 882 while (TRUE) { 883 /* 884 * Verify that the conditions are right for collapse: 885 * 886 * The object exists and no pages in it are currently being paged 887 * out. 888 */ 889 if (object == NULL) 890 return; 891 892 /* 893 * Make sure there is a backing object. 894 */ 895 if ((backing_object = object->backing_object) == NULL) 896 return; 897 898 /* 899 * we check the backing object first, because it is most likely 900 * not collapsable. 901 */ 902 if (backing_object->handle != NULL || 903 (backing_object->type != OBJT_DEFAULT && 904 backing_object->type != OBJT_SWAP) || 905 (backing_object->flags & OBJ_DEAD) || 906 object->handle != NULL || 907 (object->type != OBJT_DEFAULT && 908 object->type != OBJT_SWAP) || 909 (object->flags & OBJ_DEAD)) { 910 return; 911 } 912 913 if (object->paging_in_progress != 0 || 914 backing_object->paging_in_progress != 0) { 915 vm_object_qcollapse(object); 916 return; 917 } 918 919 /* 920 * We know that we can either collapse the backing object (if 921 * the parent is the only reference to it) or (perhaps) remove 922 * the parent's reference to it. 923 */ 924 925 backing_offset = object->backing_object_offset; 926 backing_offset_index = OFF_TO_IDX(backing_offset); 927 size = object->size; 928 929 /* 930 * If there is exactly one reference to the backing object, we 931 * can collapse it into the parent. 932 */ 933 934 if (backing_object->ref_count == 1) { 935 936 backing_object->flags |= OBJ_DEAD; 937 /* 938 * We can collapse the backing object. 939 * 940 * Move all in-memory pages from backing_object to the 941 * parent. Pages that have been paged out will be 942 * overwritten by any of the parent's pages that 943 * shadow them. 944 */ 945 946 while ((p = TAILQ_FIRST(&backing_object->memq)) != 0) { 947 948 new_pindex = p->pindex - backing_offset_index; 949 950 /* 951 * If the parent has a page here, or if this 952 * page falls outside the parent, dispose of 953 * it. 954 * 955 * Otherwise, move it as planned. 956 */ 957 958 if (p->pindex < backing_offset_index || 959 new_pindex >= size) { 960 vm_page_protect(p, VM_PROT_NONE); 961 PAGE_WAKEUP(p); 962 vm_page_free(p); 963 } else { 964 pp = vm_page_lookup(object, new_pindex); 965 if (pp != NULL || (object->type == OBJT_SWAP && vm_pager_has_page(object, 966 OFF_TO_IDX(object->paging_offset) + new_pindex, NULL, NULL))) { 967 vm_page_protect(p, VM_PROT_NONE); 968 PAGE_WAKEUP(p); 969 vm_page_free(p); 970 } else { 971 vm_page_rename(p, object, new_pindex); 972 } 973 } 974 } 975 976 /* 977 * Move the pager from backing_object to object. 978 */ 979 980 if (backing_object->type == OBJT_SWAP) { 981 backing_object->paging_in_progress++; 982 if (object->type == OBJT_SWAP) { 983 object->paging_in_progress++; 984 /* 985 * copy shadow object pages into ours 986 * and destroy unneeded pages in 987 * shadow object. 988 */ 989 swap_pager_copy( 990 backing_object, 991 OFF_TO_IDX(backing_object->paging_offset), 992 object, 993 OFF_TO_IDX(object->paging_offset), 994 OFF_TO_IDX(object->backing_object_offset)); 995 vm_object_pip_wakeup(object); 996 } else { 997 object->paging_in_progress++; 998 /* 999 * move the shadow backing_object's pager data to 1000 * "object" and convert "object" type to OBJT_SWAP. 1001 */ 1002 object->type = OBJT_SWAP; 1003 object->un_pager.swp.swp_nblocks = 1004 backing_object->un_pager.swp.swp_nblocks; 1005 object->un_pager.swp.swp_allocsize = 1006 backing_object->un_pager.swp.swp_allocsize; 1007 object->un_pager.swp.swp_blocks = 1008 backing_object->un_pager.swp.swp_blocks; 1009 object->un_pager.swp.swp_poip = /* XXX */ 1010 backing_object->un_pager.swp.swp_poip; 1011 object->paging_offset = backing_object->paging_offset + backing_offset; 1012 TAILQ_INSERT_TAIL(&swap_pager_un_object_list, object, pager_object_list); 1013 1014 /* 1015 * Convert backing object from OBJT_SWAP to 1016 * OBJT_DEFAULT. XXX - only the TAILQ_REMOVE is 1017 * actually necessary. 1018 */ 1019 backing_object->type = OBJT_DEFAULT; 1020 TAILQ_REMOVE(&swap_pager_un_object_list, backing_object, pager_object_list); 1021 /* 1022 * free unnecessary blocks 1023 */ 1024 swap_pager_freespace(object, 0, 1025 OFF_TO_IDX(object->paging_offset)); 1026 vm_object_pip_wakeup(object); 1027 } 1028 1029 vm_object_pip_wakeup(backing_object); 1030 } 1031 /* 1032 * Object now shadows whatever backing_object did. 1033 * Note that the reference to backing_object->backing_object 1034 * moves from within backing_object to within object. 1035 */ 1036 1037 TAILQ_REMOVE(&object->backing_object->shadow_head, object, 1038 shadow_list); 1039 --object->backing_object->shadow_count; 1040 if (backing_object->backing_object) { 1041 TAILQ_REMOVE(&backing_object->backing_object->shadow_head, 1042 backing_object, shadow_list); 1043 --backing_object->backing_object->shadow_count; 1044 } 1045 object->backing_object = backing_object->backing_object; 1046 if (object->backing_object) { 1047 TAILQ_INSERT_TAIL(&object->backing_object->shadow_head, 1048 object, shadow_list); 1049 ++object->backing_object->shadow_count; 1050 } 1051 1052 object->backing_object_offset += backing_object->backing_object_offset; 1053 /* 1054 * Discard backing_object. 1055 * 1056 * Since the backing object has no pages, no pager left, 1057 * and no object references within it, all that is 1058 * necessary is to dispose of it. 1059 */ 1060 1061 TAILQ_REMOVE(&vm_object_list, backing_object, 1062 object_list); 1063 vm_object_count--; 1064 1065 free((caddr_t) backing_object, M_VMOBJ); 1066 1067 object_collapses++; 1068 } else { 1069 /* 1070 * If all of the pages in the backing object are 1071 * shadowed by the parent object, the parent object no 1072 * longer has to shadow the backing object; it can 1073 * shadow the next one in the chain. 1074 * 1075 * The backing object must not be paged out - we'd have 1076 * to check all of the paged-out pages, as well. 1077 */ 1078 1079 if (backing_object->type != OBJT_DEFAULT) { 1080 return; 1081 } 1082 /* 1083 * Should have a check for a 'small' number of pages 1084 * here. 1085 */ 1086 1087 for (p = TAILQ_FIRST(&backing_object->memq); p; p = TAILQ_NEXT(p, listq)) { 1088 new_pindex = p->pindex - backing_offset_index; 1089 1090 /* 1091 * If the parent has a page here, or if this 1092 * page falls outside the parent, keep going. 1093 * 1094 * Otherwise, the backing_object must be left in 1095 * the chain. 1096 */ 1097 1098 if (p->pindex >= backing_offset_index && 1099 new_pindex <= size) { 1100 1101 pp = vm_page_lookup(object, new_pindex); 1102 1103 if ((pp == NULL || pp->valid == 0) && 1104 !vm_pager_has_page(object, OFF_TO_IDX(object->paging_offset) + new_pindex, NULL, NULL)) { 1105 /* 1106 * Page still needed. Can't go any 1107 * further. 1108 */ 1109 return; 1110 } 1111 } 1112 } 1113 1114 /* 1115 * Make the parent shadow the next object in the 1116 * chain. Deallocating backing_object will not remove 1117 * it, since its reference count is at least 2. 1118 */ 1119 1120 TAILQ_REMOVE(&object->backing_object->shadow_head, 1121 object, shadow_list); 1122 --object->backing_object->shadow_count; 1123 vm_object_reference(object->backing_object = backing_object->backing_object); 1124 if (object->backing_object) { 1125 TAILQ_INSERT_TAIL(&object->backing_object->shadow_head, 1126 object, shadow_list); 1127 ++object->backing_object->shadow_count; 1128 } 1129 object->backing_object_offset += backing_object->backing_object_offset; 1130 1131 /* 1132 * Drop the reference count on backing_object. Since 1133 * its ref_count was at least 2, it will not vanish; 1134 * so we don't need to call vm_object_deallocate. 1135 */ 1136 if (backing_object->ref_count == 1) 1137 printf("should have called obj deallocate\n"); 1138 backing_object->ref_count--; 1139 1140 object_bypasses++; 1141 1142 } 1143 1144 /* 1145 * Try again with this object's new backing object. 1146 */ 1147 } 1148 } 1149 1150 /* 1151 * vm_object_page_remove: [internal] 1152 * 1153 * Removes all physical pages in the specified 1154 * object range from the object's list of pages. 1155 * 1156 * The object must be locked. 1157 */ 1158 void 1159 vm_object_page_remove(object, start, end, clean_only) 1160 register vm_object_t object; 1161 register vm_pindex_t start; 1162 register vm_pindex_t end; 1163 boolean_t clean_only; 1164 { 1165 register vm_page_t p, next; 1166 unsigned int size; 1167 int s; 1168 1169 if (object == NULL) 1170 return; 1171 1172 object->paging_in_progress++; 1173 again: 1174 size = end - start; 1175 if (size > 4 || size >= object->size / 4) { 1176 for (p = TAILQ_FIRST(&object->memq); p != NULL; p = next) { 1177 next = TAILQ_NEXT(p, listq); 1178 if ((start <= p->pindex) && (p->pindex < end)) { 1179 if (p->wire_count != 0) { 1180 vm_page_protect(p, VM_PROT_NONE); 1181 p->valid = 0; 1182 continue; 1183 } 1184 1185 /* 1186 * The busy flags are only cleared at 1187 * interrupt -- minimize the spl transitions 1188 */ 1189 if ((p->flags & PG_BUSY) || p->busy) { 1190 s = splvm(); 1191 if ((p->flags & PG_BUSY) || p->busy) { 1192 p->flags |= PG_WANTED; 1193 tsleep(p, PVM, "vmopar", 0); 1194 splx(s); 1195 goto again; 1196 } 1197 splx(s); 1198 } 1199 1200 if (clean_only) { 1201 vm_page_test_dirty(p); 1202 if (p->valid & p->dirty) 1203 continue; 1204 } 1205 vm_page_protect(p, VM_PROT_NONE); 1206 PAGE_WAKEUP(p); 1207 vm_page_free(p); 1208 } 1209 } 1210 } else { 1211 while (size > 0) { 1212 if ((p = vm_page_lookup(object, start)) != 0) { 1213 if (p->wire_count != 0) { 1214 p->valid = 0; 1215 vm_page_protect(p, VM_PROT_NONE); 1216 start += 1; 1217 size -= 1; 1218 continue; 1219 } 1220 /* 1221 * The busy flags are only cleared at 1222 * interrupt -- minimize the spl transitions 1223 */ 1224 if ((p->flags & PG_BUSY) || p->busy) { 1225 s = splvm(); 1226 if ((p->flags & PG_BUSY) || p->busy) { 1227 p->flags |= PG_WANTED; 1228 tsleep(p, PVM, "vmopar", 0); 1229 splx(s); 1230 goto again; 1231 } 1232 splx(s); 1233 } 1234 if (clean_only) { 1235 vm_page_test_dirty(p); 1236 if (p->valid & p->dirty) { 1237 start += 1; 1238 size -= 1; 1239 continue; 1240 } 1241 } 1242 vm_page_protect(p, VM_PROT_NONE); 1243 PAGE_WAKEUP(p); 1244 vm_page_free(p); 1245 } 1246 start += 1; 1247 size -= 1; 1248 } 1249 } 1250 vm_object_pip_wakeup(object); 1251 } 1252 1253 /* 1254 * Routine: vm_object_coalesce 1255 * Function: Coalesces two objects backing up adjoining 1256 * regions of memory into a single object. 1257 * 1258 * returns TRUE if objects were combined. 1259 * 1260 * NOTE: Only works at the moment if the second object is NULL - 1261 * if it's not, which object do we lock first? 1262 * 1263 * Parameters: 1264 * prev_object First object to coalesce 1265 * prev_offset Offset into prev_object 1266 * next_object Second object into coalesce 1267 * next_offset Offset into next_object 1268 * 1269 * prev_size Size of reference to prev_object 1270 * next_size Size of reference to next_object 1271 * 1272 * Conditions: 1273 * The object must *not* be locked. 1274 */ 1275 boolean_t 1276 vm_object_coalesce(prev_object, prev_pindex, prev_size, next_size) 1277 register vm_object_t prev_object; 1278 vm_pindex_t prev_pindex; 1279 vm_size_t prev_size, next_size; 1280 { 1281 vm_size_t newsize; 1282 1283 if (prev_object == NULL) { 1284 return (TRUE); 1285 } 1286 1287 if (prev_object->type != OBJT_DEFAULT) { 1288 return (FALSE); 1289 } 1290 1291 /* 1292 * Try to collapse the object first 1293 */ 1294 vm_object_collapse(prev_object); 1295 1296 /* 1297 * Can't coalesce if: . more than one reference . paged out . shadows 1298 * another object . has a copy elsewhere (any of which mean that the 1299 * pages not mapped to prev_entry may be in use anyway) 1300 */ 1301 1302 if (prev_object->ref_count > 1 || 1303 prev_object->backing_object != NULL) { 1304 return (FALSE); 1305 } 1306 1307 prev_size >>= PAGE_SHIFT; 1308 next_size >>= PAGE_SHIFT; 1309 /* 1310 * Remove any pages that may still be in the object from a previous 1311 * deallocation. 1312 */ 1313 1314 vm_object_page_remove(prev_object, 1315 prev_pindex + prev_size, 1316 prev_pindex + prev_size + next_size, FALSE); 1317 1318 /* 1319 * Extend the object if necessary. 1320 */ 1321 newsize = prev_pindex + prev_size + next_size; 1322 if (newsize > prev_object->size) 1323 prev_object->size = newsize; 1324 1325 return (TRUE); 1326 } 1327 1328 #include "opt_ddb.h" 1329 #ifdef DDB 1330 #include <sys/kernel.h> 1331 1332 #include <machine/cons.h> 1333 1334 #include <ddb/ddb.h> 1335 1336 static int _vm_object_in_map __P((vm_map_t map, vm_object_t object, 1337 vm_map_entry_t entry)); 1338 static int vm_object_in_map __P((vm_object_t object)); 1339 1340 static int 1341 _vm_object_in_map(map, object, entry) 1342 vm_map_t map; 1343 vm_object_t object; 1344 vm_map_entry_t entry; 1345 { 1346 vm_map_t tmpm; 1347 vm_map_entry_t tmpe; 1348 vm_object_t obj; 1349 int entcount; 1350 1351 if (map == 0) 1352 return 0; 1353 1354 if (entry == 0) { 1355 tmpe = map->header.next; 1356 entcount = map->nentries; 1357 while (entcount-- && (tmpe != &map->header)) { 1358 if( _vm_object_in_map(map, object, tmpe)) { 1359 return 1; 1360 } 1361 tmpe = tmpe->next; 1362 } 1363 } else if (entry->is_sub_map || entry->is_a_map) { 1364 tmpm = entry->object.share_map; 1365 tmpe = tmpm->header.next; 1366 entcount = tmpm->nentries; 1367 while (entcount-- && tmpe != &tmpm->header) { 1368 if( _vm_object_in_map(tmpm, object, tmpe)) { 1369 return 1; 1370 } 1371 tmpe = tmpe->next; 1372 } 1373 } else if (obj = entry->object.vm_object) { 1374 for(; obj; obj=obj->backing_object) 1375 if( obj == object) { 1376 return 1; 1377 } 1378 } 1379 return 0; 1380 } 1381 1382 static int 1383 vm_object_in_map( object) 1384 vm_object_t object; 1385 { 1386 struct proc *p; 1387 for (p = allproc.lh_first; p != 0; p = p->p_list.le_next) { 1388 if( !p->p_vmspace /* || (p->p_flag & (P_SYSTEM|P_WEXIT)) */) 1389 continue; 1390 if( _vm_object_in_map(&p->p_vmspace->vm_map, object, 0)) 1391 return 1; 1392 } 1393 if( _vm_object_in_map( kernel_map, object, 0)) 1394 return 1; 1395 if( _vm_object_in_map( kmem_map, object, 0)) 1396 return 1; 1397 if( _vm_object_in_map( pager_map, object, 0)) 1398 return 1; 1399 if( _vm_object_in_map( buffer_map, object, 0)) 1400 return 1; 1401 if( _vm_object_in_map( io_map, object, 0)) 1402 return 1; 1403 if( _vm_object_in_map( phys_map, object, 0)) 1404 return 1; 1405 if( _vm_object_in_map( mb_map, object, 0)) 1406 return 1; 1407 if( _vm_object_in_map( u_map, object, 0)) 1408 return 1; 1409 return 0; 1410 } 1411 1412 DB_SHOW_COMMAND(vmochk, vm_object_check) 1413 { 1414 vm_object_t object; 1415 1416 /* 1417 * make sure that internal objs are in a map somewhere 1418 * and none have zero ref counts. 1419 */ 1420 for (object = TAILQ_FIRST(&vm_object_list); 1421 object != NULL; 1422 object = TAILQ_NEXT(object, object_list)) { 1423 if (object->handle == NULL && 1424 (object->type == OBJT_DEFAULT || object->type == OBJT_SWAP)) { 1425 if (object->ref_count == 0) { 1426 db_printf("vmochk: internal obj has zero ref count: %d\n", 1427 object->size); 1428 } 1429 if (!vm_object_in_map(object)) { 1430 db_printf("vmochk: internal obj is not in a map: " 1431 "ref: %d, size: %d: 0x%x, backing_object: 0x%x\n", 1432 object->ref_count, object->size, 1433 object->size, object->backing_object); 1434 } 1435 } 1436 } 1437 } 1438 1439 /* 1440 * vm_object_print: [ debug ] 1441 */ 1442 DB_SHOW_COMMAND(object, vm_object_print_static) 1443 { 1444 /* XXX convert args. */ 1445 vm_object_t object = (vm_object_t)addr; 1446 boolean_t full = have_addr; 1447 1448 register vm_page_t p; 1449 1450 /* XXX count is an (unused) arg. Avoid shadowing it. */ 1451 #define count was_count 1452 1453 register int count; 1454 1455 if (object == NULL) 1456 return; 1457 1458 db_iprintf("Object 0x%x: size=0x%x, res=%d, ref=%d, ", 1459 (int) object, (int) object->size, 1460 object->resident_page_count, object->ref_count); 1461 db_printf("offset=0x%x, backing_object=(0x%x)+0x%x\n", 1462 (int) object->paging_offset, 1463 (int) object->backing_object, (int) object->backing_object_offset); 1464 db_printf("cache: next=%p, prev=%p\n", 1465 TAILQ_NEXT(object, cached_list), TAILQ_PREV(object, cached_list)); 1466 1467 if (!full) 1468 return; 1469 1470 db_indent += 2; 1471 count = 0; 1472 for (p = TAILQ_FIRST(&object->memq); p != NULL; p = TAILQ_NEXT(p, listq)) { 1473 if (count == 0) 1474 db_iprintf("memory:="); 1475 else if (count == 6) { 1476 db_printf("\n"); 1477 db_iprintf(" ..."); 1478 count = 0; 1479 } else 1480 db_printf(","); 1481 count++; 1482 1483 db_printf("(off=0x%lx,page=0x%lx)", 1484 (u_long) p->pindex, (u_long) VM_PAGE_TO_PHYS(p)); 1485 } 1486 if (count != 0) 1487 db_printf("\n"); 1488 db_indent -= 2; 1489 } 1490 1491 /* XXX. */ 1492 #undef count 1493 1494 /* XXX need this non-static entry for calling from vm_map_print. */ 1495 void 1496 vm_object_print(addr, have_addr, count, modif) 1497 db_expr_t addr; 1498 boolean_t have_addr; 1499 db_expr_t count; 1500 char *modif; 1501 { 1502 vm_object_print_static(addr, have_addr, count, modif); 1503 } 1504 1505 DB_SHOW_COMMAND(vmopag, vm_object_print_pages) 1506 { 1507 vm_object_t object; 1508 int nl = 0; 1509 int c; 1510 for (object = TAILQ_FIRST(&vm_object_list); 1511 object != NULL; 1512 object = TAILQ_NEXT(object, object_list)) { 1513 vm_pindex_t idx, fidx; 1514 vm_pindex_t osize; 1515 vm_offset_t pa = -1, padiff; 1516 int rcount; 1517 vm_page_t m; 1518 1519 db_printf("new object: 0x%x\n", object); 1520 if ( nl > 18) { 1521 c = cngetc(); 1522 if (c != ' ') 1523 return; 1524 nl = 0; 1525 } 1526 nl++; 1527 rcount = 0; 1528 fidx = 0; 1529 osize = object->size; 1530 if (osize > 128) 1531 osize = 128; 1532 for(idx=0;idx<osize;idx++) { 1533 m = vm_page_lookup(object, idx); 1534 if (m == NULL) { 1535 if (rcount) { 1536 db_printf(" index(%d)run(%d)pa(0x%x)\n", 1537 fidx, rcount, pa); 1538 if ( nl > 18) { 1539 c = cngetc(); 1540 if (c != ' ') 1541 return; 1542 nl = 0; 1543 } 1544 nl++; 1545 rcount = 0; 1546 } 1547 continue; 1548 } 1549 1550 1551 if (rcount && 1552 (VM_PAGE_TO_PHYS(m) == pa + rcount * PAGE_SIZE)) { 1553 ++rcount; 1554 continue; 1555 } 1556 if (rcount) { 1557 padiff = pa + rcount * PAGE_SIZE - VM_PAGE_TO_PHYS(m); 1558 padiff >>= PAGE_SHIFT; 1559 padiff &= PQ_L2_MASK; 1560 if (padiff == 0) { 1561 pa = VM_PAGE_TO_PHYS(m) - rcount * PAGE_SIZE; 1562 ++rcount; 1563 continue; 1564 } 1565 db_printf(" index(%d)run(%d)pa(0x%x)", fidx, rcount, pa); 1566 db_printf("pd(%d)\n", padiff); 1567 if ( nl > 18) { 1568 c = cngetc(); 1569 if (c != ' ') 1570 return; 1571 nl = 0; 1572 } 1573 nl++; 1574 } 1575 fidx = idx; 1576 pa = VM_PAGE_TO_PHYS(m); 1577 rcount = 1; 1578 } 1579 if (rcount) { 1580 db_printf(" index(%d)run(%d)pa(0x%x)\n", fidx, rcount, pa); 1581 if ( nl > 18) { 1582 c = cngetc(); 1583 if (c != ' ') 1584 return; 1585 nl = 0; 1586 } 1587 nl++; 1588 } 1589 } 1590 } 1591 #endif /* DDB */ 1592