1 /* 2 * Copyright (c) 1991, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * This code is derived from software contributed to Berkeley by 6 * The Mach Operating System project at Carnegie-Mellon University. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. All advertising materials mentioning features or use of this software 17 * must display the following acknowledgement: 18 * This product includes software developed by the University of 19 * California, Berkeley and its contributors. 20 * 4. Neither the name of the University nor the names of its contributors 21 * may be used to endorse or promote products derived from this software 22 * without specific prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34 * SUCH DAMAGE. 35 * 36 * from: @(#)vm_object.c 8.5 (Berkeley) 3/22/94 37 * 38 * 39 * Copyright (c) 1987, 1990 Carnegie-Mellon University. 40 * All rights reserved. 41 * 42 * Authors: Avadis Tevanian, Jr., Michael Wayne Young 43 * 44 * Permission to use, copy, modify and distribute this software and 45 * its documentation is hereby granted, provided that both the copyright 46 * notice and this permission notice appear in all copies of the 47 * software, derivative works or modified versions, and any portions 48 * thereof, and that both notices appear in supporting documentation. 49 * 50 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 51 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 52 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 53 * 54 * Carnegie Mellon requests users of this software to return to 55 * 56 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 57 * School of Computer Science 58 * Carnegie Mellon University 59 * Pittsburgh PA 15213-3890 60 * 61 * any improvements or extensions that they make and grant Carnegie the 62 * rights to redistribute these changes. 63 * 64 * $Id: vm_object.c,v 1.82 1996/09/28 03:33:26 dyson Exp $ 65 */ 66 67 /* 68 * Virtual memory object module. 69 */ 70 71 #include <sys/param.h> 72 #include <sys/systm.h> 73 #include <sys/kernel.h> 74 #include <sys/proc.h> /* for curproc, pageproc */ 75 #include <sys/malloc.h> 76 #include <sys/vnode.h> 77 #include <sys/mount.h> 78 #include <sys/vmmeter.h> 79 #include <sys/mman.h> 80 81 #include <vm/vm.h> 82 #include <vm/vm_param.h> 83 #include <vm/vm_prot.h> 84 #include <vm/lock.h> 85 #include <vm/pmap.h> 86 #include <vm/vm_map.h> 87 #include <vm/vm_object.h> 88 #include <vm/vm_page.h> 89 #include <vm/vm_pageout.h> 90 #include <vm/vm_pager.h> 91 #include <vm/swap_pager.h> 92 #include <vm/vm_kern.h> 93 #include <vm/vm_extern.h> 94 95 static void _vm_object_allocate __P((objtype_t, vm_size_t, vm_object_t)); 96 static void vm_object_qcollapse __P((vm_object_t object)); 97 #ifdef not_used 98 static void vm_object_deactivate_pages __P((vm_object_t)); 99 #endif 100 static void vm_object_terminate __P((vm_object_t)); 101 static void vm_object_cache_trim __P((void)); 102 103 /* 104 * Virtual memory objects maintain the actual data 105 * associated with allocated virtual memory. A given 106 * page of memory exists within exactly one object. 107 * 108 * An object is only deallocated when all "references" 109 * are given up. Only one "reference" to a given 110 * region of an object should be writeable. 111 * 112 * Associated with each object is a list of all resident 113 * memory pages belonging to that object; this list is 114 * maintained by the "vm_page" module, and locked by the object's 115 * lock. 116 * 117 * Each object also records a "pager" routine which is 118 * used to retrieve (and store) pages to the proper backing 119 * storage. In addition, objects may be backed by other 120 * objects from which they were virtual-copied. 121 * 122 * The only items within the object structure which are 123 * modified after time of creation are: 124 * reference count locked by object's lock 125 * pager routine locked by object's lock 126 * 127 */ 128 129 int vm_object_cache_max; 130 struct object_q vm_object_cached_list; 131 static int vm_object_cached; 132 struct object_q vm_object_list; 133 static long vm_object_count; 134 vm_object_t kernel_object; 135 vm_object_t kmem_object; 136 static struct vm_object kernel_object_store; 137 static struct vm_object kmem_object_store; 138 extern int vm_pageout_page_count; 139 140 static long object_collapses; 141 static long object_bypasses; 142 static int next_index; 143 144 static void 145 _vm_object_allocate(type, size, object) 146 objtype_t type; 147 vm_size_t size; 148 register vm_object_t object; 149 { 150 TAILQ_INIT(&object->memq); 151 TAILQ_INIT(&object->shadow_head); 152 153 object->type = type; 154 object->size = size; 155 object->ref_count = 1; 156 object->flags = 0; 157 object->behavior = OBJ_NORMAL; 158 object->paging_in_progress = 0; 159 object->resident_page_count = 0; 160 object->shadow_count = 0; 161 object->pg_color = next_index; 162 next_index = (next_index + PQ_PRIME1) & PQ_L2_MASK; 163 object->handle = NULL; 164 object->paging_offset = (vm_ooffset_t) 0; 165 object->backing_object = NULL; 166 object->backing_object_offset = (vm_ooffset_t) 0; 167 object->page_hint = NULL; 168 169 object->last_read = 0; 170 171 TAILQ_INSERT_TAIL(&vm_object_list, object, object_list); 172 vm_object_count++; 173 } 174 175 /* 176 * vm_object_init: 177 * 178 * Initialize the VM objects module. 179 */ 180 void 181 vm_object_init() 182 { 183 TAILQ_INIT(&vm_object_cached_list); 184 TAILQ_INIT(&vm_object_list); 185 vm_object_count = 0; 186 187 vm_object_cache_max = 84; 188 if (cnt.v_page_count > 1000) 189 vm_object_cache_max += (cnt.v_page_count - 1000) / 4; 190 191 kernel_object = &kernel_object_store; 192 _vm_object_allocate(OBJT_DEFAULT, OFF_TO_IDX(VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS), 193 kernel_object); 194 195 kmem_object = &kmem_object_store; 196 _vm_object_allocate(OBJT_DEFAULT, OFF_TO_IDX(VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS), 197 kmem_object); 198 } 199 200 /* 201 * vm_object_allocate: 202 * 203 * Returns a new object with the given size. 204 */ 205 206 vm_object_t 207 vm_object_allocate(type, size) 208 objtype_t type; 209 vm_size_t size; 210 { 211 register vm_object_t result; 212 213 result = (vm_object_t) 214 malloc((u_long) sizeof *result, M_VMOBJ, M_WAITOK); 215 216 217 _vm_object_allocate(type, size, result); 218 219 return (result); 220 } 221 222 223 /* 224 * vm_object_reference: 225 * 226 * Gets another reference to the given object. 227 */ 228 void 229 vm_object_reference(object) 230 register vm_object_t object; 231 { 232 if (object == NULL) 233 return; 234 235 if (object->ref_count == 0) { 236 if ((object->flags & OBJ_CANPERSIST) == 0) 237 panic("vm_object_reference: non-persistent object with 0 ref_count"); 238 TAILQ_REMOVE(&vm_object_cached_list, object, cached_list); 239 vm_object_cached--; 240 } 241 object->ref_count++; 242 } 243 244 /* 245 * vm_object_deallocate: 246 * 247 * Release a reference to the specified object, 248 * gained either through a vm_object_allocate 249 * or a vm_object_reference call. When all references 250 * are gone, storage associated with this object 251 * may be relinquished. 252 * 253 * No object may be locked. 254 */ 255 void 256 vm_object_deallocate(object) 257 vm_object_t object; 258 { 259 vm_object_t temp; 260 261 while (object != NULL) { 262 263 if (object->ref_count == 0) 264 panic("vm_object_deallocate: object deallocated too many times"); 265 266 /* 267 * Lose the reference 268 */ 269 object->ref_count--; 270 if (object->ref_count != 0) { 271 if ((object->ref_count == 1) && 272 (object->handle == NULL) && 273 (object->type == OBJT_DEFAULT || 274 object->type == OBJT_SWAP)) { 275 vm_object_t robject; 276 robject = TAILQ_FIRST(&object->shadow_head); 277 if ((robject != NULL) && 278 (robject->handle == NULL) && 279 (robject->type == OBJT_DEFAULT || 280 robject->type == OBJT_SWAP)) { 281 int s; 282 robject->ref_count += 2; 283 object->ref_count += 2; 284 285 do { 286 s = splvm(); 287 while (robject->paging_in_progress) { 288 robject->flags |= OBJ_PIPWNT; 289 tsleep(robject, PVM, "objde1", 0); 290 } 291 292 while (object->paging_in_progress) { 293 object->flags |= OBJ_PIPWNT; 294 tsleep(object, PVM, "objde2", 0); 295 } 296 splx(s); 297 298 } while( object->paging_in_progress || robject->paging_in_progress); 299 300 object->ref_count -= 2; 301 robject->ref_count -= 2; 302 if( robject->ref_count == 0) { 303 robject->ref_count += 1; 304 object = robject; 305 continue; 306 } 307 vm_object_collapse(robject); 308 return; 309 } 310 } 311 /* 312 * If there are still references, then we are done. 313 */ 314 return; 315 } 316 317 if (object->type == OBJT_VNODE) { 318 struct vnode *vp = object->handle; 319 320 vp->v_flag &= ~VTEXT; 321 } 322 323 /* 324 * See if this object can persist and has some resident 325 * pages. If so, enter it in the cache. 326 */ 327 if (object->flags & OBJ_CANPERSIST) { 328 if (object->resident_page_count != 0) { 329 #if 0 330 vm_object_page_clean(object, 0, 0 ,TRUE, TRUE); 331 #endif 332 TAILQ_INSERT_TAIL(&vm_object_cached_list, object, 333 cached_list); 334 vm_object_cached++; 335 336 vm_object_cache_trim(); 337 return; 338 } else { 339 object->flags &= ~OBJ_CANPERSIST; 340 } 341 } 342 343 /* 344 * Make sure no one uses us. 345 */ 346 object->flags |= OBJ_DEAD; 347 348 temp = object->backing_object; 349 if (temp) { 350 TAILQ_REMOVE(&temp->shadow_head, object, shadow_list); 351 --temp->shadow_count; 352 } 353 vm_object_terminate(object); 354 /* unlocks and deallocates object */ 355 object = temp; 356 } 357 } 358 359 /* 360 * vm_object_terminate actually destroys the specified object, freeing 361 * up all previously used resources. 362 * 363 * The object must be locked. 364 */ 365 static void 366 vm_object_terminate(object) 367 register vm_object_t object; 368 { 369 register vm_page_t p; 370 int s; 371 372 /* 373 * wait for the pageout daemon to be done with the object 374 */ 375 s = splvm(); 376 while (object->paging_in_progress) { 377 object->flags |= OBJ_PIPWNT; 378 tsleep(object, PVM, "objtrm", 0); 379 } 380 splx(s); 381 382 if (object->paging_in_progress != 0) 383 panic("vm_object_deallocate: pageout in progress"); 384 385 /* 386 * Clean and free the pages, as appropriate. All references to the 387 * object are gone, so we don't need to lock it. 388 */ 389 if (object->type == OBJT_VNODE) { 390 struct vnode *vp = object->handle; 391 int waslocked; 392 393 waslocked = VOP_ISLOCKED(vp); 394 if (!waslocked) 395 VOP_LOCK(vp); 396 vm_object_page_clean(object, 0, 0, TRUE, FALSE); 397 vinvalbuf(vp, V_SAVE, NOCRED, NULL, 0, 0); 398 if (!waslocked) 399 VOP_UNLOCK(vp); 400 } 401 /* 402 * Now free the pages. For internal objects, this also removes them 403 * from paging queues. 404 */ 405 while ((p = TAILQ_FIRST(&object->memq)) != NULL) { 406 if (p->busy || (p->flags & PG_BUSY)) 407 printf("vm_object_terminate: freeing busy page\n"); 408 PAGE_WAKEUP(p); 409 vm_page_free(p); 410 cnt.v_pfree++; 411 } 412 413 /* 414 * Let the pager know object is dead. 415 */ 416 vm_pager_deallocate(object); 417 418 TAILQ_REMOVE(&vm_object_list, object, object_list); 419 vm_object_count--; 420 421 wakeup(object); 422 423 /* 424 * Free the space for the object. 425 */ 426 free((caddr_t) object, M_VMOBJ); 427 } 428 429 /* 430 * vm_object_page_clean 431 * 432 * Clean all dirty pages in the specified range of object. 433 * Leaves page on whatever queue it is currently on. 434 * 435 * Odd semantics: if start == end, we clean everything. 436 * 437 * The object must be locked. 438 */ 439 440 void 441 vm_object_page_clean(object, start, end, syncio, lockflag) 442 vm_object_t object; 443 vm_pindex_t start; 444 vm_pindex_t end; 445 boolean_t syncio; 446 boolean_t lockflag; 447 { 448 register vm_page_t p, np, tp; 449 register vm_offset_t tstart, tend; 450 vm_pindex_t pi; 451 int s; 452 struct vnode *vp; 453 int runlen; 454 int maxf; 455 int chkb; 456 int maxb; 457 int i; 458 vm_page_t maf[vm_pageout_page_count]; 459 vm_page_t mab[vm_pageout_page_count]; 460 vm_page_t ma[vm_pageout_page_count]; 461 462 if (object->type != OBJT_VNODE || 463 (object->flags & OBJ_MIGHTBEDIRTY) == 0) 464 return; 465 466 vp = object->handle; 467 468 if (lockflag) 469 VOP_LOCK(vp); 470 object->flags |= OBJ_CLEANING; 471 472 tstart = start; 473 if (end == 0) { 474 tend = object->size; 475 } else { 476 tend = end; 477 } 478 if ((tstart == 0) && (tend == object->size)) { 479 object->flags &= ~(OBJ_WRITEABLE|OBJ_MIGHTBEDIRTY); 480 } 481 for(p = TAILQ_FIRST(&object->memq); p; p = TAILQ_NEXT(p, listq)) 482 p->flags |= PG_CLEANCHK; 483 484 rescan: 485 for(p = TAILQ_FIRST(&object->memq); p; p = np) { 486 np = TAILQ_NEXT(p, listq); 487 488 pi = p->pindex; 489 if (((p->flags & PG_CLEANCHK) == 0) || 490 (pi < tstart) || (pi >= tend) || 491 (p->valid == 0) || 492 ((p->queue - p->pc) == PQ_CACHE)) { 493 p->flags &= ~PG_CLEANCHK; 494 continue; 495 } 496 497 vm_page_test_dirty(p); 498 if ((p->dirty & p->valid) == 0) { 499 p->flags &= ~PG_CLEANCHK; 500 continue; 501 } 502 503 s = splvm(); 504 if ((p->flags & PG_BUSY) || p->busy) { 505 p->flags |= PG_WANTED|PG_REFERENCED; 506 tsleep(p, PVM, "vpcwai", 0); 507 splx(s); 508 goto rescan; 509 } 510 splx(s); 511 512 s = splvm(); 513 maxf = 0; 514 for(i=1;i<vm_pageout_page_count;i++) { 515 if (tp = vm_page_lookup(object, pi + i)) { 516 if ((tp->flags & PG_BUSY) || 517 (tp->flags & PG_CLEANCHK) == 0) 518 break; 519 if((tp->queue - tp->pc) == PQ_CACHE) { 520 tp->flags &= ~PG_CLEANCHK; 521 break; 522 } 523 vm_page_test_dirty(tp); 524 if ((tp->dirty & tp->valid) == 0) { 525 tp->flags &= ~PG_CLEANCHK; 526 break; 527 } 528 maf[ i - 1 ] = tp; 529 maxf++; 530 continue; 531 } 532 break; 533 } 534 535 maxb = 0; 536 chkb = vm_pageout_page_count - maxf; 537 if (chkb) { 538 for(i = 1; i < chkb;i++) { 539 if (tp = vm_page_lookup(object, pi - i)) { 540 if ((tp->flags & PG_BUSY) || 541 (tp->flags & PG_CLEANCHK) == 0) 542 break; 543 if((tp->queue - tp->pc) == PQ_CACHE) { 544 tp->flags &= ~PG_CLEANCHK; 545 break; 546 } 547 vm_page_test_dirty(tp); 548 if ((tp->dirty & tp->valid) == 0) { 549 tp->flags &= ~PG_CLEANCHK; 550 break; 551 } 552 mab[ i - 1 ] = tp; 553 maxb++; 554 continue; 555 } 556 break; 557 } 558 } 559 560 for(i=0;i<maxb;i++) { 561 int index = (maxb - i) - 1; 562 ma[index] = mab[i]; 563 ma[index]->flags |= PG_BUSY; 564 ma[index]->flags &= ~PG_CLEANCHK; 565 vm_page_protect(ma[index], VM_PROT_READ); 566 } 567 vm_page_protect(p, VM_PROT_READ); 568 p->flags |= PG_BUSY; 569 p->flags &= ~PG_CLEANCHK; 570 ma[maxb] = p; 571 for(i=0;i<maxf;i++) { 572 int index = (maxb + i) + 1; 573 ma[index] = maf[i]; 574 ma[index]->flags |= PG_BUSY; 575 ma[index]->flags &= ~PG_CLEANCHK; 576 vm_page_protect(ma[index], VM_PROT_READ); 577 } 578 runlen = maxb + maxf + 1; 579 splx(s); 580 vm_pageout_flush(ma, runlen, 0); 581 goto rescan; 582 } 583 584 VOP_FSYNC(vp, NULL, syncio, curproc); 585 586 if (lockflag) 587 VOP_UNLOCK(vp); 588 object->flags &= ~OBJ_CLEANING; 589 return; 590 } 591 592 #ifdef not_used 593 /* XXX I cannot tell if this should be an exported symbol */ 594 /* 595 * vm_object_deactivate_pages 596 * 597 * Deactivate all pages in the specified object. (Keep its pages 598 * in memory even though it is no longer referenced.) 599 * 600 * The object must be locked. 601 */ 602 static void 603 vm_object_deactivate_pages(object) 604 register vm_object_t object; 605 { 606 register vm_page_t p, next; 607 608 for (p = TAILQ_FIRST(&object->memq); p != NULL; p = next) { 609 next = TAILQ_NEXT(p, listq); 610 vm_page_deactivate(p); 611 } 612 } 613 #endif 614 615 /* 616 * Trim the object cache to size. 617 */ 618 static void 619 vm_object_cache_trim() 620 { 621 register vm_object_t object; 622 623 while (vm_object_cached > vm_object_cache_max) { 624 object = TAILQ_FIRST(&vm_object_cached_list); 625 626 vm_object_reference(object); 627 pager_cache(object, FALSE); 628 } 629 } 630 631 632 /* 633 * vm_object_pmap_copy: 634 * 635 * Makes all physical pages in the specified 636 * object range copy-on-write. No writeable 637 * references to these pages should remain. 638 * 639 * The object must *not* be locked. 640 */ 641 void 642 vm_object_pmap_copy(object, start, end) 643 register vm_object_t object; 644 register vm_pindex_t start; 645 register vm_pindex_t end; 646 { 647 register vm_page_t p; 648 649 if (object == NULL || (object->flags & OBJ_WRITEABLE) == 0) 650 return; 651 652 for (p = TAILQ_FIRST(&object->memq); 653 p != NULL; 654 p = TAILQ_NEXT(p, listq)) { 655 vm_page_protect(p, VM_PROT_READ); 656 } 657 658 object->flags &= ~OBJ_WRITEABLE; 659 } 660 661 /* 662 * vm_object_pmap_remove: 663 * 664 * Removes all physical pages in the specified 665 * object range from all physical maps. 666 * 667 * The object must *not* be locked. 668 */ 669 void 670 vm_object_pmap_remove(object, start, end) 671 register vm_object_t object; 672 register vm_pindex_t start; 673 register vm_pindex_t end; 674 { 675 register vm_page_t p; 676 if (object == NULL) 677 return; 678 for (p = TAILQ_FIRST(&object->memq); 679 p != NULL; 680 p = TAILQ_NEXT(p, listq)) { 681 if (p->pindex >= start && p->pindex < end) 682 vm_page_protect(p, VM_PROT_NONE); 683 } 684 } 685 686 /* 687 * vm_object_madvise: 688 * 689 * Implements the madvise function at the object/page level. 690 */ 691 void 692 vm_object_madvise(object, pindex, count, advise) 693 vm_object_t object; 694 vm_pindex_t pindex; 695 int count; 696 int advise; 697 { 698 vm_pindex_t end; 699 vm_page_t m; 700 701 if (object == NULL) 702 return; 703 704 end = pindex + count; 705 706 for (; pindex < end; pindex += 1) { 707 m = vm_page_lookup(object, pindex); 708 709 /* 710 * If the page is busy or not in a normal active state, 711 * we skip it. Things can break if we mess with pages 712 * in any of the below states. 713 */ 714 if (m == NULL || m->busy || (m->flags & PG_BUSY) || 715 m->hold_count || m->wire_count || 716 m->valid != VM_PAGE_BITS_ALL) 717 continue; 718 719 if (advise == MADV_WILLNEED) { 720 if (m->queue != PQ_ACTIVE) 721 vm_page_activate(m); 722 } else if ((advise == MADV_DONTNEED) || 723 ((advise == MADV_FREE) && 724 ((object->type != OBJT_DEFAULT) && 725 (object->type != OBJT_SWAP)))) { 726 vm_page_deactivate(m); 727 } else if (advise == MADV_FREE) { 728 /* 729 * Force a demand-zero on next ref 730 */ 731 if (object->type == OBJT_SWAP) 732 swap_pager_dmzspace(object, m->pindex, 1); 733 vm_page_protect(m, VM_PROT_NONE); 734 vm_page_free(m); 735 } 736 } 737 } 738 739 /* 740 * vm_object_shadow: 741 * 742 * Create a new object which is backed by the 743 * specified existing object range. The source 744 * object reference is deallocated. 745 * 746 * The new object and offset into that object 747 * are returned in the source parameters. 748 */ 749 750 void 751 vm_object_shadow(object, offset, length) 752 vm_object_t *object; /* IN/OUT */ 753 vm_ooffset_t *offset; /* IN/OUT */ 754 vm_size_t length; 755 { 756 register vm_object_t source; 757 register vm_object_t result; 758 759 source = *object; 760 761 /* 762 * Allocate a new object with the given length 763 */ 764 765 if ((result = vm_object_allocate(OBJT_DEFAULT, length)) == NULL) 766 panic("vm_object_shadow: no object for shadowing"); 767 768 /* 769 * The new object shadows the source object, adding a reference to it. 770 * Our caller changes his reference to point to the new object, 771 * removing a reference to the source object. Net result: no change 772 * of reference count. 773 */ 774 result->backing_object = source; 775 if (source) { 776 TAILQ_INSERT_TAIL(&source->shadow_head, result, shadow_list); 777 ++source->shadow_count; 778 } 779 780 /* 781 * Store the offset into the source object, and fix up the offset into 782 * the new object. 783 */ 784 785 result->backing_object_offset = *offset; 786 787 /* 788 * Return the new things 789 */ 790 791 *offset = 0; 792 *object = result; 793 } 794 795 796 #if defined(OLD_COLLAPSE_CODE) 797 /* 798 * this version of collapse allows the operation to occur earlier and 799 * when paging_in_progress is true for an object... This is not a complete 800 * operation, but should plug 99.9% of the rest of the leaks. 801 */ 802 static void 803 vm_object_qcollapse(object) 804 register vm_object_t object; 805 { 806 register vm_object_t backing_object; 807 register vm_pindex_t backing_offset_index, paging_offset_index; 808 vm_pindex_t backing_object_paging_offset_index; 809 vm_pindex_t new_pindex; 810 register vm_page_t p, pp; 811 register vm_size_t size; 812 813 backing_object = object->backing_object; 814 if (backing_object->ref_count != 1) 815 return; 816 817 backing_object->ref_count += 2; 818 819 backing_offset_index = OFF_TO_IDX(object->backing_object_offset); 820 backing_object_paging_offset_index = OFF_TO_IDX(backing_object->paging_offset); 821 paging_offset_index = OFF_TO_IDX(object->paging_offset); 822 size = object->size; 823 p = TAILQ_FIRST(&backing_object->memq); 824 while (p) { 825 vm_page_t next; 826 827 next = TAILQ_NEXT(p, listq); 828 if ((p->flags & (PG_BUSY | PG_FICTITIOUS)) || 829 ((p->queue - p->pc) == PQ_CACHE) || 830 !p->valid || p->hold_count || p->wire_count || p->busy) { 831 p = next; 832 continue; 833 } 834 new_pindex = p->pindex - backing_offset_index; 835 if (p->pindex < backing_offset_index || 836 new_pindex >= size) { 837 if (backing_object->type == OBJT_SWAP) 838 swap_pager_freespace(backing_object, 839 backing_object_paging_offset_index+p->pindex, 840 1); 841 vm_page_protect(p, VM_PROT_NONE); 842 vm_page_free(p); 843 } else { 844 pp = vm_page_lookup(object, new_pindex); 845 if (pp != NULL || (object->type == OBJT_SWAP && vm_pager_has_page(object, 846 paging_offset_index + new_pindex, NULL, NULL))) { 847 if (backing_object->type == OBJT_SWAP) 848 swap_pager_freespace(backing_object, 849 backing_object_paging_offset_index + p->pindex, 1); 850 vm_page_protect(p, VM_PROT_NONE); 851 vm_page_free(p); 852 } else { 853 if (backing_object->type == OBJT_SWAP) 854 swap_pager_freespace(backing_object, 855 backing_object_paging_offset_index + p->pindex, 1); 856 vm_page_rename(p, object, new_pindex); 857 p->dirty = VM_PAGE_BITS_ALL; 858 } 859 } 860 p = next; 861 } 862 backing_object->ref_count -= 2; 863 } 864 #endif 865 866 /* 867 * vm_object_collapse: 868 * 869 * Collapse an object with the object backing it. 870 * Pages in the backing object are moved into the 871 * parent, and the backing object is deallocated. 872 */ 873 void 874 vm_object_collapse(object) 875 vm_object_t object; 876 877 { 878 vm_object_t backing_object; 879 vm_ooffset_t backing_offset; 880 vm_size_t size; 881 vm_pindex_t new_pindex, backing_offset_index; 882 vm_page_t p, pp; 883 884 while (TRUE) { 885 /* 886 * Verify that the conditions are right for collapse: 887 * 888 * The object exists and no pages in it are currently being paged 889 * out. 890 */ 891 if (object == NULL) 892 return; 893 894 /* 895 * Make sure there is a backing object. 896 */ 897 if ((backing_object = object->backing_object) == NULL) 898 return; 899 900 /* 901 * we check the backing object first, because it is most likely 902 * not collapsable. 903 */ 904 if (backing_object->handle != NULL || 905 (backing_object->type != OBJT_DEFAULT && 906 backing_object->type != OBJT_SWAP) || 907 (backing_object->flags & OBJ_DEAD) || 908 object->handle != NULL || 909 (object->type != OBJT_DEFAULT && 910 object->type != OBJT_SWAP) || 911 (object->flags & OBJ_DEAD)) { 912 return; 913 } 914 915 if (object->paging_in_progress != 0 || 916 backing_object->paging_in_progress != 0) { 917 #if defined(OLD_COLLAPSE_CODE) 918 vm_object_qcollapse(object); 919 #endif 920 return; 921 } 922 923 /* 924 * We know that we can either collapse the backing object (if 925 * the parent is the only reference to it) or (perhaps) remove 926 * the parent's reference to it. 927 */ 928 929 backing_offset = object->backing_object_offset; 930 backing_offset_index = OFF_TO_IDX(backing_offset); 931 size = object->size; 932 933 /* 934 * If there is exactly one reference to the backing object, we 935 * can collapse it into the parent. 936 */ 937 938 if (backing_object->ref_count == 1) { 939 940 backing_object->flags |= OBJ_DEAD; 941 /* 942 * We can collapse the backing object. 943 * 944 * Move all in-memory pages from backing_object to the 945 * parent. Pages that have been paged out will be 946 * overwritten by any of the parent's pages that 947 * shadow them. 948 */ 949 950 while ((p = TAILQ_FIRST(&backing_object->memq)) != 0) { 951 952 new_pindex = p->pindex - backing_offset_index; 953 954 /* 955 * If the parent has a page here, or if this 956 * page falls outside the parent, dispose of 957 * it. 958 * 959 * Otherwise, move it as planned. 960 */ 961 962 if (p->pindex < backing_offset_index || 963 new_pindex >= size) { 964 vm_page_protect(p, VM_PROT_NONE); 965 PAGE_WAKEUP(p); 966 vm_page_free(p); 967 } else { 968 pp = vm_page_lookup(object, new_pindex); 969 if (pp != NULL || (object->type == OBJT_SWAP && vm_pager_has_page(object, 970 OFF_TO_IDX(object->paging_offset) + new_pindex, NULL, NULL))) { 971 vm_page_protect(p, VM_PROT_NONE); 972 PAGE_WAKEUP(p); 973 vm_page_free(p); 974 } else { 975 vm_page_rename(p, object, new_pindex); 976 } 977 } 978 } 979 980 /* 981 * Move the pager from backing_object to object. 982 */ 983 984 if (backing_object->type == OBJT_SWAP) { 985 backing_object->paging_in_progress++; 986 if (object->type == OBJT_SWAP) { 987 object->paging_in_progress++; 988 /* 989 * copy shadow object pages into ours 990 * and destroy unneeded pages in 991 * shadow object. 992 */ 993 swap_pager_copy( 994 backing_object, 995 OFF_TO_IDX(backing_object->paging_offset), 996 object, 997 OFF_TO_IDX(object->paging_offset), 998 OFF_TO_IDX(object->backing_object_offset)); 999 vm_object_pip_wakeup(object); 1000 } else { 1001 object->paging_in_progress++; 1002 /* 1003 * move the shadow backing_object's pager data to 1004 * "object" and convert "object" type to OBJT_SWAP. 1005 */ 1006 object->type = OBJT_SWAP; 1007 object->un_pager.swp.swp_nblocks = 1008 backing_object->un_pager.swp.swp_nblocks; 1009 object->un_pager.swp.swp_allocsize = 1010 backing_object->un_pager.swp.swp_allocsize; 1011 object->un_pager.swp.swp_blocks = 1012 backing_object->un_pager.swp.swp_blocks; 1013 object->un_pager.swp.swp_poip = /* XXX */ 1014 backing_object->un_pager.swp.swp_poip; 1015 object->paging_offset = backing_object->paging_offset + backing_offset; 1016 TAILQ_INSERT_TAIL(&swap_pager_un_object_list, object, pager_object_list); 1017 1018 /* 1019 * Convert backing object from OBJT_SWAP to 1020 * OBJT_DEFAULT. XXX - only the TAILQ_REMOVE is 1021 * actually necessary. 1022 */ 1023 backing_object->type = OBJT_DEFAULT; 1024 TAILQ_REMOVE(&swap_pager_un_object_list, backing_object, pager_object_list); 1025 /* 1026 * free unnecessary blocks 1027 */ 1028 swap_pager_freespace(object, 0, 1029 OFF_TO_IDX(object->paging_offset)); 1030 vm_object_pip_wakeup(object); 1031 } 1032 1033 vm_object_pip_wakeup(backing_object); 1034 } 1035 /* 1036 * Object now shadows whatever backing_object did. 1037 * Note that the reference to backing_object->backing_object 1038 * moves from within backing_object to within object. 1039 */ 1040 1041 TAILQ_REMOVE(&object->backing_object->shadow_head, object, 1042 shadow_list); 1043 --object->backing_object->shadow_count; 1044 if (backing_object->backing_object) { 1045 TAILQ_REMOVE(&backing_object->backing_object->shadow_head, 1046 backing_object, shadow_list); 1047 --backing_object->backing_object->shadow_count; 1048 } 1049 object->backing_object = backing_object->backing_object; 1050 if (object->backing_object) { 1051 TAILQ_INSERT_TAIL(&object->backing_object->shadow_head, 1052 object, shadow_list); 1053 ++object->backing_object->shadow_count; 1054 } 1055 1056 object->backing_object_offset += backing_object->backing_object_offset; 1057 /* 1058 * Discard backing_object. 1059 * 1060 * Since the backing object has no pages, no pager left, 1061 * and no object references within it, all that is 1062 * necessary is to dispose of it. 1063 */ 1064 1065 TAILQ_REMOVE(&vm_object_list, backing_object, 1066 object_list); 1067 vm_object_count--; 1068 1069 free((caddr_t) backing_object, M_VMOBJ); 1070 1071 object_collapses++; 1072 } else { 1073 /* 1074 * If all of the pages in the backing object are 1075 * shadowed by the parent object, the parent object no 1076 * longer has to shadow the backing object; it can 1077 * shadow the next one in the chain. 1078 * 1079 * The backing object must not be paged out - we'd have 1080 * to check all of the paged-out pages, as well. 1081 */ 1082 1083 if (backing_object->type != OBJT_DEFAULT) { 1084 return; 1085 } 1086 /* 1087 * Should have a check for a 'small' number of pages 1088 * here. 1089 */ 1090 1091 for (p = TAILQ_FIRST(&backing_object->memq); p; p = TAILQ_NEXT(p, listq)) { 1092 new_pindex = p->pindex - backing_offset_index; 1093 1094 /* 1095 * If the parent has a page here, or if this 1096 * page falls outside the parent, keep going. 1097 * 1098 * Otherwise, the backing_object must be left in 1099 * the chain. 1100 */ 1101 1102 if (p->pindex >= backing_offset_index && 1103 new_pindex <= size) { 1104 1105 pp = vm_page_lookup(object, new_pindex); 1106 1107 if ((pp == NULL || pp->valid == 0) && 1108 !vm_pager_has_page(object, OFF_TO_IDX(object->paging_offset) + new_pindex, NULL, NULL)) { 1109 /* 1110 * Page still needed. Can't go any 1111 * further. 1112 */ 1113 return; 1114 } 1115 } 1116 } 1117 1118 /* 1119 * Make the parent shadow the next object in the 1120 * chain. Deallocating backing_object will not remove 1121 * it, since its reference count is at least 2. 1122 */ 1123 1124 TAILQ_REMOVE(&object->backing_object->shadow_head, 1125 object, shadow_list); 1126 --object->backing_object->shadow_count; 1127 vm_object_reference(object->backing_object = backing_object->backing_object); 1128 if (object->backing_object) { 1129 TAILQ_INSERT_TAIL(&object->backing_object->shadow_head, 1130 object, shadow_list); 1131 ++object->backing_object->shadow_count; 1132 } 1133 object->backing_object_offset += backing_object->backing_object_offset; 1134 1135 /* 1136 * Drop the reference count on backing_object. Since 1137 * its ref_count was at least 2, it will not vanish; 1138 * so we don't need to call vm_object_deallocate. 1139 */ 1140 if (backing_object->ref_count == 1) 1141 printf("should have called obj deallocate\n"); 1142 backing_object->ref_count--; 1143 1144 object_bypasses++; 1145 1146 } 1147 1148 /* 1149 * Try again with this object's new backing object. 1150 */ 1151 } 1152 } 1153 1154 /* 1155 * vm_object_page_remove: [internal] 1156 * 1157 * Removes all physical pages in the specified 1158 * object range from the object's list of pages. 1159 * 1160 * The object must be locked. 1161 */ 1162 void 1163 vm_object_page_remove(object, start, end, clean_only) 1164 register vm_object_t object; 1165 register vm_pindex_t start; 1166 register vm_pindex_t end; 1167 boolean_t clean_only; 1168 { 1169 register vm_page_t p, next; 1170 unsigned int size; 1171 int s; 1172 1173 if (object == NULL) 1174 return; 1175 1176 object->paging_in_progress++; 1177 again: 1178 size = end - start; 1179 if (size > 4 || size >= object->size / 4) { 1180 for (p = TAILQ_FIRST(&object->memq); p != NULL; p = next) { 1181 next = TAILQ_NEXT(p, listq); 1182 if ((start <= p->pindex) && (p->pindex < end)) { 1183 if (p->wire_count != 0) { 1184 vm_page_protect(p, VM_PROT_NONE); 1185 p->valid = 0; 1186 continue; 1187 } 1188 1189 /* 1190 * The busy flags are only cleared at 1191 * interrupt -- minimize the spl transitions 1192 */ 1193 if ((p->flags & PG_BUSY) || p->busy) { 1194 s = splvm(); 1195 if ((p->flags & PG_BUSY) || p->busy) { 1196 p->flags |= PG_WANTED; 1197 tsleep(p, PVM, "vmopar", 0); 1198 splx(s); 1199 goto again; 1200 } 1201 splx(s); 1202 } 1203 1204 if (clean_only) { 1205 vm_page_test_dirty(p); 1206 if (p->valid & p->dirty) 1207 continue; 1208 } 1209 vm_page_protect(p, VM_PROT_NONE); 1210 PAGE_WAKEUP(p); 1211 vm_page_free(p); 1212 } 1213 } 1214 } else { 1215 while (size > 0) { 1216 if ((p = vm_page_lookup(object, start)) != 0) { 1217 if (p->wire_count != 0) { 1218 p->valid = 0; 1219 vm_page_protect(p, VM_PROT_NONE); 1220 start += 1; 1221 size -= 1; 1222 continue; 1223 } 1224 /* 1225 * The busy flags are only cleared at 1226 * interrupt -- minimize the spl transitions 1227 */ 1228 if ((p->flags & PG_BUSY) || p->busy) { 1229 s = splvm(); 1230 if ((p->flags & PG_BUSY) || p->busy) { 1231 p->flags |= PG_WANTED; 1232 tsleep(p, PVM, "vmopar", 0); 1233 splx(s); 1234 goto again; 1235 } 1236 splx(s); 1237 } 1238 if (clean_only) { 1239 vm_page_test_dirty(p); 1240 if (p->valid & p->dirty) { 1241 start += 1; 1242 size -= 1; 1243 continue; 1244 } 1245 } 1246 vm_page_protect(p, VM_PROT_NONE); 1247 PAGE_WAKEUP(p); 1248 vm_page_free(p); 1249 } 1250 start += 1; 1251 size -= 1; 1252 } 1253 } 1254 vm_object_pip_wakeup(object); 1255 } 1256 1257 /* 1258 * Routine: vm_object_coalesce 1259 * Function: Coalesces two objects backing up adjoining 1260 * regions of memory into a single object. 1261 * 1262 * returns TRUE if objects were combined. 1263 * 1264 * NOTE: Only works at the moment if the second object is NULL - 1265 * if it's not, which object do we lock first? 1266 * 1267 * Parameters: 1268 * prev_object First object to coalesce 1269 * prev_offset Offset into prev_object 1270 * next_object Second object into coalesce 1271 * next_offset Offset into next_object 1272 * 1273 * prev_size Size of reference to prev_object 1274 * next_size Size of reference to next_object 1275 * 1276 * Conditions: 1277 * The object must *not* be locked. 1278 */ 1279 boolean_t 1280 vm_object_coalesce(prev_object, prev_pindex, prev_size, next_size) 1281 register vm_object_t prev_object; 1282 vm_pindex_t prev_pindex; 1283 vm_size_t prev_size, next_size; 1284 { 1285 vm_size_t newsize; 1286 1287 if (prev_object == NULL) { 1288 return (TRUE); 1289 } 1290 1291 if (prev_object->type != OBJT_DEFAULT) { 1292 return (FALSE); 1293 } 1294 1295 #if defined(OLD_COLLAPSE_CODE) 1296 /* 1297 * Try to collapse the object first 1298 */ 1299 vm_object_collapse(prev_object); 1300 #endif 1301 1302 /* 1303 * Can't coalesce if: . more than one reference . paged out . shadows 1304 * another object . has a copy elsewhere (any of which mean that the 1305 * pages not mapped to prev_entry may be in use anyway) 1306 */ 1307 1308 if (prev_object->backing_object != NULL) { 1309 return (FALSE); 1310 } 1311 1312 prev_size >>= PAGE_SHIFT; 1313 next_size >>= PAGE_SHIFT; 1314 1315 if ((prev_object->ref_count > 1) && 1316 (prev_object->size != prev_pindex + prev_size)) { 1317 return (FALSE); 1318 } 1319 1320 /* 1321 * Remove any pages that may still be in the object from a previous 1322 * deallocation. 1323 */ 1324 1325 vm_object_page_remove(prev_object, 1326 prev_pindex + prev_size, 1327 prev_pindex + prev_size + next_size, FALSE); 1328 1329 /* 1330 * Extend the object if necessary. 1331 */ 1332 newsize = prev_pindex + prev_size + next_size; 1333 if (newsize > prev_object->size) 1334 prev_object->size = newsize; 1335 1336 return (TRUE); 1337 } 1338 1339 #include "opt_ddb.h" 1340 #ifdef DDB 1341 #include <sys/kernel.h> 1342 1343 #include <machine/cons.h> 1344 1345 #include <ddb/ddb.h> 1346 1347 static int _vm_object_in_map __P((vm_map_t map, vm_object_t object, 1348 vm_map_entry_t entry)); 1349 static int vm_object_in_map __P((vm_object_t object)); 1350 1351 static int 1352 _vm_object_in_map(map, object, entry) 1353 vm_map_t map; 1354 vm_object_t object; 1355 vm_map_entry_t entry; 1356 { 1357 vm_map_t tmpm; 1358 vm_map_entry_t tmpe; 1359 vm_object_t obj; 1360 int entcount; 1361 1362 if (map == 0) 1363 return 0; 1364 1365 if (entry == 0) { 1366 tmpe = map->header.next; 1367 entcount = map->nentries; 1368 while (entcount-- && (tmpe != &map->header)) { 1369 if( _vm_object_in_map(map, object, tmpe)) { 1370 return 1; 1371 } 1372 tmpe = tmpe->next; 1373 } 1374 } else if (entry->is_sub_map || entry->is_a_map) { 1375 tmpm = entry->object.share_map; 1376 tmpe = tmpm->header.next; 1377 entcount = tmpm->nentries; 1378 while (entcount-- && tmpe != &tmpm->header) { 1379 if( _vm_object_in_map(tmpm, object, tmpe)) { 1380 return 1; 1381 } 1382 tmpe = tmpe->next; 1383 } 1384 } else if (obj = entry->object.vm_object) { 1385 for(; obj; obj=obj->backing_object) 1386 if( obj == object) { 1387 return 1; 1388 } 1389 } 1390 return 0; 1391 } 1392 1393 static int 1394 vm_object_in_map( object) 1395 vm_object_t object; 1396 { 1397 struct proc *p; 1398 for (p = allproc.lh_first; p != 0; p = p->p_list.le_next) { 1399 if( !p->p_vmspace /* || (p->p_flag & (P_SYSTEM|P_WEXIT)) */) 1400 continue; 1401 if( _vm_object_in_map(&p->p_vmspace->vm_map, object, 0)) 1402 return 1; 1403 } 1404 if( _vm_object_in_map( kernel_map, object, 0)) 1405 return 1; 1406 if( _vm_object_in_map( kmem_map, object, 0)) 1407 return 1; 1408 if( _vm_object_in_map( pager_map, object, 0)) 1409 return 1; 1410 if( _vm_object_in_map( buffer_map, object, 0)) 1411 return 1; 1412 if( _vm_object_in_map( io_map, object, 0)) 1413 return 1; 1414 if( _vm_object_in_map( phys_map, object, 0)) 1415 return 1; 1416 if( _vm_object_in_map( mb_map, object, 0)) 1417 return 1; 1418 if( _vm_object_in_map( u_map, object, 0)) 1419 return 1; 1420 return 0; 1421 } 1422 1423 DB_SHOW_COMMAND(vmochk, vm_object_check) 1424 { 1425 vm_object_t object; 1426 1427 /* 1428 * make sure that internal objs are in a map somewhere 1429 * and none have zero ref counts. 1430 */ 1431 for (object = TAILQ_FIRST(&vm_object_list); 1432 object != NULL; 1433 object = TAILQ_NEXT(object, object_list)) { 1434 if (object->handle == NULL && 1435 (object->type == OBJT_DEFAULT || object->type == OBJT_SWAP)) { 1436 if (object->ref_count == 0) { 1437 db_printf("vmochk: internal obj has zero ref count: %d\n", 1438 object->size); 1439 } 1440 if (!vm_object_in_map(object)) { 1441 db_printf("vmochk: internal obj is not in a map: " 1442 "ref: %d, size: %d: 0x%x, backing_object: 0x%x\n", 1443 object->ref_count, object->size, 1444 object->size, object->backing_object); 1445 } 1446 } 1447 } 1448 } 1449 1450 /* 1451 * vm_object_print: [ debug ] 1452 */ 1453 DB_SHOW_COMMAND(object, vm_object_print_static) 1454 { 1455 /* XXX convert args. */ 1456 vm_object_t object = (vm_object_t)addr; 1457 boolean_t full = have_addr; 1458 1459 register vm_page_t p; 1460 1461 /* XXX count is an (unused) arg. Avoid shadowing it. */ 1462 #define count was_count 1463 1464 register int count; 1465 1466 if (object == NULL) 1467 return; 1468 1469 db_iprintf("Object 0x%x: size=0x%x, res=%d, ref=%d, ", 1470 (int) object, (int) object->size, 1471 object->resident_page_count, object->ref_count); 1472 db_printf("offset=0x%x, backing_object=(0x%x)+0x%x\n", 1473 (int) object->paging_offset, 1474 (int) object->backing_object, (int) object->backing_object_offset); 1475 db_printf("cache: next=%p, prev=%p\n", 1476 TAILQ_NEXT(object, cached_list), TAILQ_PREV(object, cached_list)); 1477 1478 if (!full) 1479 return; 1480 1481 db_indent += 2; 1482 count = 0; 1483 for (p = TAILQ_FIRST(&object->memq); p != NULL; p = TAILQ_NEXT(p, listq)) { 1484 if (count == 0) 1485 db_iprintf("memory:="); 1486 else if (count == 6) { 1487 db_printf("\n"); 1488 db_iprintf(" ..."); 1489 count = 0; 1490 } else 1491 db_printf(","); 1492 count++; 1493 1494 db_printf("(off=0x%lx,page=0x%lx)", 1495 (u_long) p->pindex, (u_long) VM_PAGE_TO_PHYS(p)); 1496 } 1497 if (count != 0) 1498 db_printf("\n"); 1499 db_indent -= 2; 1500 } 1501 1502 /* XXX. */ 1503 #undef count 1504 1505 /* XXX need this non-static entry for calling from vm_map_print. */ 1506 void 1507 vm_object_print(addr, have_addr, count, modif) 1508 db_expr_t addr; 1509 boolean_t have_addr; 1510 db_expr_t count; 1511 char *modif; 1512 { 1513 vm_object_print_static(addr, have_addr, count, modif); 1514 } 1515 1516 DB_SHOW_COMMAND(vmopag, vm_object_print_pages) 1517 { 1518 vm_object_t object; 1519 int nl = 0; 1520 int c; 1521 for (object = TAILQ_FIRST(&vm_object_list); 1522 object != NULL; 1523 object = TAILQ_NEXT(object, object_list)) { 1524 vm_pindex_t idx, fidx; 1525 vm_pindex_t osize; 1526 vm_offset_t pa = -1, padiff; 1527 int rcount; 1528 vm_page_t m; 1529 1530 db_printf("new object: 0x%x\n", object); 1531 if ( nl > 18) { 1532 c = cngetc(); 1533 if (c != ' ') 1534 return; 1535 nl = 0; 1536 } 1537 nl++; 1538 rcount = 0; 1539 fidx = 0; 1540 osize = object->size; 1541 if (osize > 128) 1542 osize = 128; 1543 for(idx=0;idx<osize;idx++) { 1544 m = vm_page_lookup(object, idx); 1545 if (m == NULL) { 1546 if (rcount) { 1547 db_printf(" index(%d)run(%d)pa(0x%x)\n", 1548 fidx, rcount, pa); 1549 if ( nl > 18) { 1550 c = cngetc(); 1551 if (c != ' ') 1552 return; 1553 nl = 0; 1554 } 1555 nl++; 1556 rcount = 0; 1557 } 1558 continue; 1559 } 1560 1561 1562 if (rcount && 1563 (VM_PAGE_TO_PHYS(m) == pa + rcount * PAGE_SIZE)) { 1564 ++rcount; 1565 continue; 1566 } 1567 if (rcount) { 1568 padiff = pa + rcount * PAGE_SIZE - VM_PAGE_TO_PHYS(m); 1569 padiff >>= PAGE_SHIFT; 1570 padiff &= PQ_L2_MASK; 1571 if (padiff == 0) { 1572 pa = VM_PAGE_TO_PHYS(m) - rcount * PAGE_SIZE; 1573 ++rcount; 1574 continue; 1575 } 1576 db_printf(" index(%d)run(%d)pa(0x%x)", fidx, rcount, pa); 1577 db_printf("pd(%d)\n", padiff); 1578 if ( nl > 18) { 1579 c = cngetc(); 1580 if (c != ' ') 1581 return; 1582 nl = 0; 1583 } 1584 nl++; 1585 } 1586 fidx = idx; 1587 pa = VM_PAGE_TO_PHYS(m); 1588 rcount = 1; 1589 } 1590 if (rcount) { 1591 db_printf(" index(%d)run(%d)pa(0x%x)\n", fidx, rcount, pa); 1592 if ( nl > 18) { 1593 c = cngetc(); 1594 if (c != ' ') 1595 return; 1596 nl = 0; 1597 } 1598 nl++; 1599 } 1600 } 1601 } 1602 #endif /* DDB */ 1603