1 /* 2 * Copyright (c) 1991, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * This code is derived from software contributed to Berkeley by 6 * The Mach Operating System project at Carnegie-Mellon University. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. All advertising materials mentioning features or use of this software 17 * must display the following acknowledgement: 18 * This product includes software developed by the University of 19 * California, Berkeley and its contributors. 20 * 4. Neither the name of the University nor the names of its contributors 21 * may be used to endorse or promote products derived from this software 22 * without specific prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34 * SUCH DAMAGE. 35 * 36 * from: @(#)vm_object.c 8.5 (Berkeley) 3/22/94 37 * 38 * 39 * Copyright (c) 1987, 1990 Carnegie-Mellon University. 40 * All rights reserved. 41 * 42 * Authors: Avadis Tevanian, Jr., Michael Wayne Young 43 * 44 * Permission to use, copy, modify and distribute this software and 45 * its documentation is hereby granted, provided that both the copyright 46 * notice and this permission notice appear in all copies of the 47 * software, derivative works or modified versions, and any portions 48 * thereof, and that both notices appear in supporting documentation. 49 * 50 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 51 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 52 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 53 * 54 * Carnegie Mellon requests users of this software to return to 55 * 56 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 57 * School of Computer Science 58 * Carnegie Mellon University 59 * Pittsburgh PA 15213-3890 60 * 61 * any improvements or extensions that they make and grant Carnegie the 62 * rights to redistribute these changes. 63 * 64 * $Id: vm_object.c,v 1.97 1997/09/01 03:17:22 bde Exp $ 65 */ 66 67 /* 68 * Virtual memory object module. 69 */ 70 71 #include <sys/param.h> 72 #include <sys/systm.h> 73 #include <sys/proc.h> /* for curproc, pageproc */ 74 #include <sys/malloc.h> 75 #include <sys/vnode.h> 76 #include <sys/vmmeter.h> 77 #include <sys/mman.h> 78 79 #include <vm/vm.h> 80 #include <vm/vm_param.h> 81 #include <vm/vm_prot.h> 82 #include <sys/lock.h> 83 #include <vm/pmap.h> 84 #include <vm/vm_map.h> 85 #include <vm/vm_object.h> 86 #include <vm/vm_page.h> 87 #include <vm/vm_pageout.h> 88 #include <vm/vm_pager.h> 89 #include <vm/swap_pager.h> 90 #include <vm/vm_kern.h> 91 #include <vm/vm_extern.h> 92 #include <vm/vm_zone.h> 93 94 static void vm_object_qcollapse __P((vm_object_t object)); 95 #ifdef not_used 96 static void vm_object_deactivate_pages __P((vm_object_t)); 97 #endif 98 static void vm_object_terminate __P((vm_object_t)); 99 static void vm_object_cache_trim __P((void)); 100 101 /* 102 * Virtual memory objects maintain the actual data 103 * associated with allocated virtual memory. A given 104 * page of memory exists within exactly one object. 105 * 106 * An object is only deallocated when all "references" 107 * are given up. Only one "reference" to a given 108 * region of an object should be writeable. 109 * 110 * Associated with each object is a list of all resident 111 * memory pages belonging to that object; this list is 112 * maintained by the "vm_page" module, and locked by the object's 113 * lock. 114 * 115 * Each object also records a "pager" routine which is 116 * used to retrieve (and store) pages to the proper backing 117 * storage. In addition, objects may be backed by other 118 * objects from which they were virtual-copied. 119 * 120 * The only items within the object structure which are 121 * modified after time of creation are: 122 * reference count locked by object's lock 123 * pager routine locked by object's lock 124 * 125 */ 126 127 int vm_object_cache_max; 128 struct object_q vm_object_cached_list; 129 static int vm_object_cached; /* size of cached list */ 130 struct object_q vm_object_list; 131 struct simplelock vm_object_list_lock; 132 static long vm_object_count; /* count of all objects */ 133 vm_object_t kernel_object; 134 vm_object_t kmem_object; 135 static struct vm_object kernel_object_store; 136 static struct vm_object kmem_object_store; 137 extern int vm_pageout_page_count; 138 139 static long object_collapses; 140 static long object_bypasses; 141 static int next_index; 142 static vm_zone_t obj_zone; 143 static struct vm_zone obj_zone_store; 144 #define VM_OBJECTS_INIT 256 145 struct vm_object vm_objects_init[VM_OBJECTS_INIT]; 146 147 void 148 _vm_object_allocate(type, size, object) 149 objtype_t type; 150 vm_size_t size; 151 register vm_object_t object; 152 { 153 int incr; 154 TAILQ_INIT(&object->memq); 155 TAILQ_INIT(&object->shadow_head); 156 157 object->type = type; 158 object->size = size; 159 object->ref_count = 1; 160 object->flags = 0; 161 object->behavior = OBJ_NORMAL; 162 object->paging_in_progress = 0; 163 object->resident_page_count = 0; 164 object->shadow_count = 0; 165 object->pg_color = next_index; 166 if ( size > (PQ_L2_SIZE / 3 + PQ_PRIME1)) 167 incr = PQ_L2_SIZE / 3 + PQ_PRIME1; 168 else 169 incr = size; 170 next_index = (next_index + incr) & PQ_L2_MASK; 171 object->handle = NULL; 172 object->paging_offset = (vm_ooffset_t) 0; 173 object->backing_object = NULL; 174 object->backing_object_offset = (vm_ooffset_t) 0; 175 object->page_hint = NULL; 176 177 object->last_read = 0; 178 179 TAILQ_INSERT_TAIL(&vm_object_list, object, object_list); 180 vm_object_count++; 181 } 182 183 /* 184 * vm_object_init: 185 * 186 * Initialize the VM objects module. 187 */ 188 void 189 vm_object_init() 190 { 191 TAILQ_INIT(&vm_object_cached_list); 192 TAILQ_INIT(&vm_object_list); 193 simple_lock_init(&vm_object_list_lock); 194 vm_object_count = 0; 195 196 vm_object_cache_max = 84; 197 if (cnt.v_page_count > 1000) 198 vm_object_cache_max += (cnt.v_page_count - 1000) / 4; 199 200 kernel_object = &kernel_object_store; 201 _vm_object_allocate(OBJT_DEFAULT, OFF_TO_IDX(VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS), 202 kernel_object); 203 204 kmem_object = &kmem_object_store; 205 _vm_object_allocate(OBJT_DEFAULT, OFF_TO_IDX(VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS), 206 kmem_object); 207 208 obj_zone = &obj_zone_store; 209 zbootinit(obj_zone, "VM OBJECT", sizeof (struct vm_object), 210 vm_objects_init, VM_OBJECTS_INIT); 211 } 212 213 void 214 vm_object_init2() { 215 zinitna(obj_zone, NULL, NULL, 0, 0, 0, 4); 216 } 217 218 /* 219 * vm_object_allocate: 220 * 221 * Returns a new object with the given size. 222 */ 223 224 vm_object_t 225 vm_object_allocate(type, size) 226 objtype_t type; 227 vm_size_t size; 228 { 229 register vm_object_t result; 230 result = (vm_object_t) zalloc(obj_zone); 231 232 _vm_object_allocate(type, size, result); 233 234 return (result); 235 } 236 237 238 /* 239 * vm_object_reference: 240 * 241 * Gets another reference to the given object. 242 */ 243 void 244 vm_object_reference(object) 245 register vm_object_t object; 246 { 247 if (object == NULL) 248 return; 249 250 if (object->ref_count == 0) { 251 if ((object->flags & OBJ_CANPERSIST) == 0) 252 panic("vm_object_reference: non-persistent object with 0 ref_count"); 253 TAILQ_REMOVE(&vm_object_cached_list, object, cached_list); 254 vm_object_cached--; 255 } 256 object->ref_count++; 257 } 258 259 /* 260 * vm_object_deallocate: 261 * 262 * Release a reference to the specified object, 263 * gained either through a vm_object_allocate 264 * or a vm_object_reference call. When all references 265 * are gone, storage associated with this object 266 * may be relinquished. 267 * 268 * No object may be locked. 269 */ 270 void 271 vm_object_deallocate(object) 272 vm_object_t object; 273 { 274 vm_object_t temp; 275 276 while (object != NULL) { 277 278 if (object->ref_count == 0) 279 panic("vm_object_deallocate: object deallocated too many times"); 280 281 /* 282 * Lose the reference 283 */ 284 object->ref_count--; 285 if (object->ref_count != 0) { 286 if ((object->ref_count == 1) && 287 (object->handle == NULL) && 288 (object->type == OBJT_DEFAULT || 289 object->type == OBJT_SWAP)) { 290 vm_object_t robject; 291 robject = TAILQ_FIRST(&object->shadow_head); 292 if ((robject != NULL) && 293 (robject->handle == NULL) && 294 (robject->type == OBJT_DEFAULT || 295 robject->type == OBJT_SWAP)) { 296 int s; 297 robject->ref_count += 2; 298 object->ref_count += 2; 299 300 do { 301 s = splvm(); 302 while (robject->paging_in_progress) { 303 robject->flags |= OBJ_PIPWNT; 304 tsleep(robject, PVM, "objde1", 0); 305 } 306 307 while (object->paging_in_progress) { 308 object->flags |= OBJ_PIPWNT; 309 tsleep(object, PVM, "objde2", 0); 310 } 311 splx(s); 312 313 } while( object->paging_in_progress || robject->paging_in_progress); 314 315 object->ref_count -= 2; 316 robject->ref_count -= 2; 317 if( robject->ref_count == 0) { 318 robject->ref_count += 1; 319 object = robject; 320 continue; 321 } 322 vm_object_collapse(robject); 323 return; 324 } 325 } 326 /* 327 * If there are still references, then we are done. 328 */ 329 return; 330 } 331 332 if (object->type == OBJT_VNODE) { 333 struct vnode *vp = object->handle; 334 335 vp->v_flag &= ~VTEXT; 336 } 337 338 /* 339 * See if this object can persist and has some resident 340 * pages. If so, enter it in the cache. 341 */ 342 if (object->flags & OBJ_CANPERSIST) { 343 if (object->resident_page_count != 0) { 344 #if 0 345 vm_object_page_clean(object, 0, 0 ,TRUE, TRUE); 346 #endif 347 TAILQ_INSERT_TAIL(&vm_object_cached_list, object, 348 cached_list); 349 vm_object_cached++; 350 351 vm_object_cache_trim(); 352 return; 353 } else { 354 object->flags &= ~OBJ_CANPERSIST; 355 } 356 } 357 358 /* 359 * Make sure no one uses us. 360 */ 361 object->flags |= OBJ_DEAD; 362 363 temp = object->backing_object; 364 if (temp) { 365 TAILQ_REMOVE(&temp->shadow_head, object, shadow_list); 366 --temp->shadow_count; 367 } 368 vm_object_terminate(object); 369 /* unlocks and deallocates object */ 370 object = temp; 371 } 372 } 373 374 /* 375 * vm_object_terminate actually destroys the specified object, freeing 376 * up all previously used resources. 377 * 378 * The object must be locked. 379 */ 380 static void 381 vm_object_terminate(object) 382 register vm_object_t object; 383 { 384 register vm_page_t p; 385 int s; 386 387 if (object->flags & OBJ_VFS_REF) 388 panic("vm_object_deallocate: freeing VFS_REF'ed object"); 389 390 /* 391 * wait for the pageout daemon to be done with the object 392 */ 393 s = splvm(); 394 while (object->paging_in_progress) { 395 object->flags |= OBJ_PIPWNT; 396 tsleep(object, PVM, "objtrm", 0); 397 } 398 splx(s); 399 400 if (object->paging_in_progress != 0) 401 panic("vm_object_deallocate: pageout in progress"); 402 403 /* 404 * Clean and free the pages, as appropriate. All references to the 405 * object are gone, so we don't need to lock it. 406 */ 407 if (object->type == OBJT_VNODE) { 408 struct vnode *vp = object->handle; 409 struct proc *p = curproc; /* XXX */ 410 int waslocked; 411 412 waslocked = VOP_ISLOCKED(vp); 413 if (!waslocked) 414 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p); 415 vm_object_page_clean(object, 0, 0, TRUE, FALSE); 416 vinvalbuf(vp, V_SAVE, NOCRED, NULL, 0, 0); 417 if (!waslocked) 418 VOP_UNLOCK(vp, 0, p); 419 } 420 421 /* 422 * Now free the pages. For internal objects, this also removes them 423 * from paging queues. 424 */ 425 while ((p = TAILQ_FIRST(&object->memq)) != NULL) { 426 if (p->busy || (p->flags & PG_BUSY)) 427 printf("vm_object_terminate: freeing busy page\n"); 428 PAGE_WAKEUP(p); 429 vm_page_free(p); 430 cnt.v_pfree++; 431 } 432 433 /* 434 * Let the pager know object is dead. 435 */ 436 vm_pager_deallocate(object); 437 438 simple_lock(&vm_object_list_lock); 439 TAILQ_REMOVE(&vm_object_list, object, object_list); 440 vm_object_count--; 441 simple_unlock(&vm_object_list_lock); 442 443 wakeup(object); 444 445 /* 446 * Free the space for the object. 447 */ 448 zfree(obj_zone, object); 449 } 450 451 /* 452 * vm_object_page_clean 453 * 454 * Clean all dirty pages in the specified range of object. 455 * Leaves page on whatever queue it is currently on. 456 * 457 * Odd semantics: if start == end, we clean everything. 458 * 459 * The object must be locked. 460 */ 461 462 void 463 vm_object_page_clean(object, start, end, syncio, lockflag) 464 vm_object_t object; 465 vm_pindex_t start; 466 vm_pindex_t end; 467 boolean_t syncio; 468 boolean_t lockflag; 469 { 470 register vm_page_t p, np, tp; 471 register vm_offset_t tstart, tend; 472 vm_pindex_t pi; 473 int s; 474 struct vnode *vp; 475 int runlen; 476 int maxf; 477 int chkb; 478 int maxb; 479 int i; 480 vm_page_t maf[vm_pageout_page_count]; 481 vm_page_t mab[vm_pageout_page_count]; 482 vm_page_t ma[vm_pageout_page_count]; 483 struct proc *pproc = curproc; /* XXX */ 484 485 if (object->type != OBJT_VNODE || 486 (object->flags & OBJ_MIGHTBEDIRTY) == 0) 487 return; 488 489 vp = object->handle; 490 491 if (lockflag) 492 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, pproc); 493 object->flags |= OBJ_CLEANING; 494 495 tstart = start; 496 if (end == 0) { 497 tend = object->size; 498 } else { 499 tend = end; 500 } 501 if ((tstart == 0) && (tend == object->size)) { 502 object->flags &= ~(OBJ_WRITEABLE|OBJ_MIGHTBEDIRTY); 503 } 504 for(p = TAILQ_FIRST(&object->memq); p; p = TAILQ_NEXT(p, listq)) 505 p->flags |= PG_CLEANCHK; 506 507 rescan: 508 for(p = TAILQ_FIRST(&object->memq); p; p = np) { 509 np = TAILQ_NEXT(p, listq); 510 511 pi = p->pindex; 512 if (((p->flags & PG_CLEANCHK) == 0) || 513 (pi < tstart) || (pi >= tend) || 514 (p->valid == 0) || 515 ((p->queue - p->pc) == PQ_CACHE)) { 516 p->flags &= ~PG_CLEANCHK; 517 continue; 518 } 519 520 vm_page_test_dirty(p); 521 if ((p->dirty & p->valid) == 0) { 522 p->flags &= ~PG_CLEANCHK; 523 continue; 524 } 525 526 s = splvm(); 527 if ((p->flags & PG_BUSY) || p->busy) { 528 p->flags |= PG_WANTED|PG_REFERENCED; 529 tsleep(p, PVM, "vpcwai", 0); 530 splx(s); 531 goto rescan; 532 } 533 splx(s); 534 535 s = splvm(); 536 maxf = 0; 537 for(i=1;i<vm_pageout_page_count;i++) { 538 if (tp = vm_page_lookup(object, pi + i)) { 539 if ((tp->flags & PG_BUSY) || 540 (tp->flags & PG_CLEANCHK) == 0) 541 break; 542 if((tp->queue - tp->pc) == PQ_CACHE) { 543 tp->flags &= ~PG_CLEANCHK; 544 break; 545 } 546 vm_page_test_dirty(tp); 547 if ((tp->dirty & tp->valid) == 0) { 548 tp->flags &= ~PG_CLEANCHK; 549 break; 550 } 551 maf[ i - 1 ] = tp; 552 maxf++; 553 continue; 554 } 555 break; 556 } 557 558 maxb = 0; 559 chkb = vm_pageout_page_count - maxf; 560 if (chkb) { 561 for(i = 1; i < chkb;i++) { 562 if (tp = vm_page_lookup(object, pi - i)) { 563 if ((tp->flags & PG_BUSY) || 564 (tp->flags & PG_CLEANCHK) == 0) 565 break; 566 if((tp->queue - tp->pc) == PQ_CACHE) { 567 tp->flags &= ~PG_CLEANCHK; 568 break; 569 } 570 vm_page_test_dirty(tp); 571 if ((tp->dirty & tp->valid) == 0) { 572 tp->flags &= ~PG_CLEANCHK; 573 break; 574 } 575 mab[ i - 1 ] = tp; 576 maxb++; 577 continue; 578 } 579 break; 580 } 581 } 582 583 for(i=0;i<maxb;i++) { 584 int index = (maxb - i) - 1; 585 ma[index] = mab[i]; 586 ma[index]->flags |= PG_BUSY; 587 ma[index]->flags &= ~PG_CLEANCHK; 588 vm_page_protect(ma[index], VM_PROT_READ); 589 } 590 vm_page_protect(p, VM_PROT_READ); 591 p->flags |= PG_BUSY; 592 p->flags &= ~PG_CLEANCHK; 593 ma[maxb] = p; 594 for(i=0;i<maxf;i++) { 595 int index = (maxb + i) + 1; 596 ma[index] = maf[i]; 597 ma[index]->flags |= PG_BUSY; 598 ma[index]->flags &= ~PG_CLEANCHK; 599 vm_page_protect(ma[index], VM_PROT_READ); 600 } 601 runlen = maxb + maxf + 1; 602 splx(s); 603 vm_pageout_flush(ma, runlen, 0); 604 goto rescan; 605 } 606 607 VOP_FSYNC(vp, NULL, syncio, curproc); 608 609 if (lockflag) 610 VOP_UNLOCK(vp, 0, pproc); 611 object->flags &= ~OBJ_CLEANING; 612 return; 613 } 614 615 #ifdef not_used 616 /* XXX I cannot tell if this should be an exported symbol */ 617 /* 618 * vm_object_deactivate_pages 619 * 620 * Deactivate all pages in the specified object. (Keep its pages 621 * in memory even though it is no longer referenced.) 622 * 623 * The object must be locked. 624 */ 625 static void 626 vm_object_deactivate_pages(object) 627 register vm_object_t object; 628 { 629 register vm_page_t p, next; 630 631 for (p = TAILQ_FIRST(&object->memq); p != NULL; p = next) { 632 next = TAILQ_NEXT(p, listq); 633 vm_page_deactivate(p); 634 } 635 } 636 #endif 637 638 /* 639 * Trim the object cache to size. 640 */ 641 static void 642 vm_object_cache_trim() 643 { 644 register vm_object_t object; 645 646 while (vm_object_cached > vm_object_cache_max) { 647 object = TAILQ_FIRST(&vm_object_cached_list); 648 649 vm_object_reference(object); 650 pager_cache(object, FALSE); 651 } 652 } 653 654 655 /* 656 * vm_object_pmap_copy: 657 * 658 * Makes all physical pages in the specified 659 * object range copy-on-write. No writeable 660 * references to these pages should remain. 661 * 662 * The object must *not* be locked. 663 */ 664 void 665 vm_object_pmap_copy(object, start, end) 666 register vm_object_t object; 667 register vm_pindex_t start; 668 register vm_pindex_t end; 669 { 670 register vm_page_t p; 671 672 if (object == NULL || (object->flags & OBJ_WRITEABLE) == 0) 673 return; 674 675 for (p = TAILQ_FIRST(&object->memq); 676 p != NULL; 677 p = TAILQ_NEXT(p, listq)) { 678 vm_page_protect(p, VM_PROT_READ); 679 } 680 681 object->flags &= ~OBJ_WRITEABLE; 682 } 683 684 /* 685 * vm_object_pmap_remove: 686 * 687 * Removes all physical pages in the specified 688 * object range from all physical maps. 689 * 690 * The object must *not* be locked. 691 */ 692 void 693 vm_object_pmap_remove(object, start, end) 694 register vm_object_t object; 695 register vm_pindex_t start; 696 register vm_pindex_t end; 697 { 698 register vm_page_t p; 699 if (object == NULL) 700 return; 701 for (p = TAILQ_FIRST(&object->memq); 702 p != NULL; 703 p = TAILQ_NEXT(p, listq)) { 704 if (p->pindex >= start && p->pindex < end) 705 vm_page_protect(p, VM_PROT_NONE); 706 } 707 if ((start == 0) && (object->size == end)) 708 object->flags &= ~OBJ_WRITEABLE; 709 } 710 711 /* 712 * vm_object_madvise: 713 * 714 * Implements the madvise function at the object/page level. 715 */ 716 void 717 vm_object_madvise(object, pindex, count, advise) 718 vm_object_t object; 719 vm_pindex_t pindex; 720 int count; 721 int advise; 722 { 723 int s; 724 vm_pindex_t end, tpindex; 725 vm_object_t tobject; 726 vm_page_t m; 727 728 if (object == NULL) 729 return; 730 731 end = pindex + count; 732 733 for (; pindex < end; pindex += 1) { 734 735 relookup: 736 tobject = object; 737 tpindex = pindex; 738 shadowlookup: 739 m = vm_page_lookup(tobject, tpindex); 740 if (m == NULL) { 741 if (tobject->type != OBJT_DEFAULT) { 742 continue; 743 } 744 745 tobject = tobject->backing_object; 746 if ((tobject == NULL) || (tobject->ref_count != 1)) { 747 continue; 748 } 749 tpindex += OFF_TO_IDX(tobject->backing_object_offset); 750 goto shadowlookup; 751 } 752 753 /* 754 * If the page is busy or not in a normal active state, 755 * we skip it. Things can break if we mess with pages 756 * in any of the below states. 757 */ 758 if (m->hold_count || m->wire_count || 759 m->valid != VM_PAGE_BITS_ALL) { 760 continue; 761 } 762 763 if (m->busy || (m->flags & PG_BUSY)) { 764 s = splvm(); 765 if (m->busy || (m->flags & PG_BUSY)) { 766 m->flags |= PG_WANTED; 767 tsleep(m, PVM, "madvpw", 0); 768 } 769 splx(s); 770 goto relookup; 771 } 772 773 if (advise == MADV_WILLNEED) { 774 if (m->queue != PQ_ACTIVE) 775 vm_page_activate(m); 776 } else if (advise == MADV_DONTNEED) { 777 vm_page_deactivate(m); 778 } else if (advise == MADV_FREE) { 779 pmap_clear_modify(VM_PAGE_TO_PHYS(m)); 780 m->dirty = 0; 781 /* 782 * Force a demand zero if attempt to read from swap. 783 * We currently don't handle vnode files correctly, 784 * and will reread stale contents unnecessarily. 785 */ 786 if (object->type == OBJT_SWAP) 787 swap_pager_dmzspace(tobject, m->pindex, 1); 788 } 789 } 790 } 791 792 /* 793 * vm_object_shadow: 794 * 795 * Create a new object which is backed by the 796 * specified existing object range. The source 797 * object reference is deallocated. 798 * 799 * The new object and offset into that object 800 * are returned in the source parameters. 801 */ 802 803 void 804 vm_object_shadow(object, offset, length) 805 vm_object_t *object; /* IN/OUT */ 806 vm_ooffset_t *offset; /* IN/OUT */ 807 vm_size_t length; 808 { 809 register vm_object_t source; 810 register vm_object_t result; 811 812 source = *object; 813 814 /* 815 * Allocate a new object with the given length 816 */ 817 818 if ((result = vm_object_allocate(OBJT_DEFAULT, length)) == NULL) 819 panic("vm_object_shadow: no object for shadowing"); 820 821 /* 822 * The new object shadows the source object, adding a reference to it. 823 * Our caller changes his reference to point to the new object, 824 * removing a reference to the source object. Net result: no change 825 * of reference count. 826 */ 827 result->backing_object = source; 828 if (source) { 829 TAILQ_INSERT_TAIL(&source->shadow_head, result, shadow_list); 830 ++source->shadow_count; 831 } 832 833 /* 834 * Store the offset into the source object, and fix up the offset into 835 * the new object. 836 */ 837 838 result->backing_object_offset = *offset; 839 840 /* 841 * Return the new things 842 */ 843 844 *offset = 0; 845 *object = result; 846 } 847 848 849 /* 850 * this version of collapse allows the operation to occur earlier and 851 * when paging_in_progress is true for an object... This is not a complete 852 * operation, but should plug 99.9% of the rest of the leaks. 853 */ 854 static void 855 vm_object_qcollapse(object) 856 register vm_object_t object; 857 { 858 register vm_object_t backing_object; 859 register vm_pindex_t backing_offset_index, paging_offset_index; 860 vm_pindex_t backing_object_paging_offset_index; 861 vm_pindex_t new_pindex; 862 register vm_page_t p, pp; 863 register vm_size_t size; 864 865 backing_object = object->backing_object; 866 if (backing_object->ref_count != 1) 867 return; 868 869 backing_object->ref_count += 2; 870 871 backing_offset_index = OFF_TO_IDX(object->backing_object_offset); 872 backing_object_paging_offset_index = OFF_TO_IDX(backing_object->paging_offset); 873 paging_offset_index = OFF_TO_IDX(object->paging_offset); 874 size = object->size; 875 p = TAILQ_FIRST(&backing_object->memq); 876 while (p) { 877 vm_page_t next; 878 879 next = TAILQ_NEXT(p, listq); 880 if ((p->flags & (PG_BUSY | PG_FICTITIOUS)) || 881 ((p->queue - p->pc) == PQ_CACHE) || 882 !p->valid || p->hold_count || p->wire_count || p->busy) { 883 p = next; 884 continue; 885 } 886 new_pindex = p->pindex - backing_offset_index; 887 if (p->pindex < backing_offset_index || 888 new_pindex >= size) { 889 if (backing_object->type == OBJT_SWAP) 890 swap_pager_freespace(backing_object, 891 backing_object_paging_offset_index+p->pindex, 892 1); 893 vm_page_protect(p, VM_PROT_NONE); 894 vm_page_free(p); 895 } else { 896 pp = vm_page_lookup(object, new_pindex); 897 if (pp != NULL || (object->type == OBJT_SWAP && vm_pager_has_page(object, 898 paging_offset_index + new_pindex, NULL, NULL))) { 899 if (backing_object->type == OBJT_SWAP) 900 swap_pager_freespace(backing_object, 901 backing_object_paging_offset_index + p->pindex, 1); 902 vm_page_protect(p, VM_PROT_NONE); 903 vm_page_free(p); 904 } else { 905 if (backing_object->type == OBJT_SWAP) 906 swap_pager_freespace(backing_object, 907 backing_object_paging_offset_index + p->pindex, 1); 908 vm_page_rename(p, object, new_pindex); 909 vm_page_protect(p, VM_PROT_NONE); 910 p->dirty = VM_PAGE_BITS_ALL; 911 } 912 } 913 p = next; 914 } 915 backing_object->ref_count -= 2; 916 } 917 918 /* 919 * vm_object_collapse: 920 * 921 * Collapse an object with the object backing it. 922 * Pages in the backing object are moved into the 923 * parent, and the backing object is deallocated. 924 */ 925 void 926 vm_object_collapse(object) 927 vm_object_t object; 928 929 { 930 vm_object_t backing_object; 931 vm_ooffset_t backing_offset; 932 vm_size_t size; 933 vm_pindex_t new_pindex, backing_offset_index; 934 vm_page_t p, pp; 935 936 while (TRUE) { 937 /* 938 * Verify that the conditions are right for collapse: 939 * 940 * The object exists and no pages in it are currently being paged 941 * out. 942 */ 943 if (object == NULL) 944 return; 945 946 /* 947 * Make sure there is a backing object. 948 */ 949 if ((backing_object = object->backing_object) == NULL) 950 return; 951 952 /* 953 * we check the backing object first, because it is most likely 954 * not collapsable. 955 */ 956 if (backing_object->handle != NULL || 957 (backing_object->type != OBJT_DEFAULT && 958 backing_object->type != OBJT_SWAP) || 959 (backing_object->flags & OBJ_DEAD) || 960 object->handle != NULL || 961 (object->type != OBJT_DEFAULT && 962 object->type != OBJT_SWAP) || 963 (object->flags & OBJ_DEAD)) { 964 return; 965 } 966 967 if (object->paging_in_progress != 0 || 968 backing_object->paging_in_progress != 0) { 969 vm_object_qcollapse(object); 970 return; 971 } 972 973 /* 974 * We know that we can either collapse the backing object (if 975 * the parent is the only reference to it) or (perhaps) remove 976 * the parent's reference to it. 977 */ 978 979 backing_offset = object->backing_object_offset; 980 backing_offset_index = OFF_TO_IDX(backing_offset); 981 size = object->size; 982 983 /* 984 * If there is exactly one reference to the backing object, we 985 * can collapse it into the parent. 986 */ 987 988 if (backing_object->ref_count == 1) { 989 990 backing_object->flags |= OBJ_DEAD; 991 /* 992 * We can collapse the backing object. 993 * 994 * Move all in-memory pages from backing_object to the 995 * parent. Pages that have been paged out will be 996 * overwritten by any of the parent's pages that 997 * shadow them. 998 */ 999 1000 while ((p = TAILQ_FIRST(&backing_object->memq)) != 0) { 1001 1002 new_pindex = p->pindex - backing_offset_index; 1003 1004 /* 1005 * If the parent has a page here, or if this 1006 * page falls outside the parent, dispose of 1007 * it. 1008 * 1009 * Otherwise, move it as planned. 1010 */ 1011 1012 if (p->pindex < backing_offset_index || 1013 new_pindex >= size) { 1014 vm_page_protect(p, VM_PROT_NONE); 1015 PAGE_WAKEUP(p); 1016 vm_page_free(p); 1017 } else { 1018 pp = vm_page_lookup(object, new_pindex); 1019 if (pp != NULL || (object->type == OBJT_SWAP && vm_pager_has_page(object, 1020 OFF_TO_IDX(object->paging_offset) + new_pindex, NULL, NULL))) { 1021 vm_page_protect(p, VM_PROT_NONE); 1022 PAGE_WAKEUP(p); 1023 vm_page_free(p); 1024 } else { 1025 vm_page_protect(p, VM_PROT_NONE); 1026 vm_page_rename(p, object, new_pindex); 1027 p->dirty = VM_PAGE_BITS_ALL; 1028 } 1029 } 1030 } 1031 1032 /* 1033 * Move the pager from backing_object to object. 1034 */ 1035 1036 if (backing_object->type == OBJT_SWAP) { 1037 backing_object->paging_in_progress++; 1038 if (object->type == OBJT_SWAP) { 1039 object->paging_in_progress++; 1040 /* 1041 * copy shadow object pages into ours 1042 * and destroy unneeded pages in 1043 * shadow object. 1044 */ 1045 swap_pager_copy( 1046 backing_object, 1047 OFF_TO_IDX(backing_object->paging_offset), 1048 object, 1049 OFF_TO_IDX(object->paging_offset), 1050 OFF_TO_IDX(object->backing_object_offset)); 1051 vm_object_pip_wakeup(object); 1052 } else { 1053 object->paging_in_progress++; 1054 /* 1055 * move the shadow backing_object's pager data to 1056 * "object" and convert "object" type to OBJT_SWAP. 1057 */ 1058 object->type = OBJT_SWAP; 1059 object->un_pager.swp.swp_nblocks = 1060 backing_object->un_pager.swp.swp_nblocks; 1061 object->un_pager.swp.swp_allocsize = 1062 backing_object->un_pager.swp.swp_allocsize; 1063 object->un_pager.swp.swp_blocks = 1064 backing_object->un_pager.swp.swp_blocks; 1065 object->un_pager.swp.swp_poip = /* XXX */ 1066 backing_object->un_pager.swp.swp_poip; 1067 object->paging_offset = backing_object->paging_offset + backing_offset; 1068 TAILQ_INSERT_TAIL(&swap_pager_un_object_list, object, pager_object_list); 1069 1070 /* 1071 * Convert backing object from OBJT_SWAP to 1072 * OBJT_DEFAULT. XXX - only the TAILQ_REMOVE is 1073 * actually necessary. 1074 */ 1075 backing_object->type = OBJT_DEFAULT; 1076 TAILQ_REMOVE(&swap_pager_un_object_list, backing_object, pager_object_list); 1077 /* 1078 * free unnecessary blocks 1079 */ 1080 swap_pager_freespace(object, 0, 1081 OFF_TO_IDX(object->paging_offset)); 1082 vm_object_pip_wakeup(object); 1083 } 1084 1085 vm_object_pip_wakeup(backing_object); 1086 } 1087 /* 1088 * Object now shadows whatever backing_object did. 1089 * Note that the reference to backing_object->backing_object 1090 * moves from within backing_object to within object. 1091 */ 1092 1093 TAILQ_REMOVE(&object->backing_object->shadow_head, object, 1094 shadow_list); 1095 --object->backing_object->shadow_count; 1096 if (backing_object->backing_object) { 1097 TAILQ_REMOVE(&backing_object->backing_object->shadow_head, 1098 backing_object, shadow_list); 1099 --backing_object->backing_object->shadow_count; 1100 } 1101 object->backing_object = backing_object->backing_object; 1102 if (object->backing_object) { 1103 TAILQ_INSERT_TAIL(&object->backing_object->shadow_head, 1104 object, shadow_list); 1105 ++object->backing_object->shadow_count; 1106 } 1107 1108 object->backing_object_offset += backing_object->backing_object_offset; 1109 /* 1110 * Discard backing_object. 1111 * 1112 * Since the backing object has no pages, no pager left, 1113 * and no object references within it, all that is 1114 * necessary is to dispose of it. 1115 */ 1116 1117 TAILQ_REMOVE(&vm_object_list, backing_object, 1118 object_list); 1119 vm_object_count--; 1120 1121 zfree(obj_zone, backing_object); 1122 1123 object_collapses++; 1124 } else { 1125 /* 1126 * If all of the pages in the backing object are 1127 * shadowed by the parent object, the parent object no 1128 * longer has to shadow the backing object; it can 1129 * shadow the next one in the chain. 1130 * 1131 * The backing object must not be paged out - we'd have 1132 * to check all of the paged-out pages, as well. 1133 */ 1134 1135 if (backing_object->type != OBJT_DEFAULT) { 1136 return; 1137 } 1138 /* 1139 * Should have a check for a 'small' number of pages 1140 * here. 1141 */ 1142 1143 for (p = TAILQ_FIRST(&backing_object->memq); p; p = TAILQ_NEXT(p, listq)) { 1144 new_pindex = p->pindex - backing_offset_index; 1145 1146 /* 1147 * If the parent has a page here, or if this 1148 * page falls outside the parent, keep going. 1149 * 1150 * Otherwise, the backing_object must be left in 1151 * the chain. 1152 */ 1153 1154 if (p->pindex >= backing_offset_index && 1155 new_pindex <= size) { 1156 1157 pp = vm_page_lookup(object, new_pindex); 1158 1159 if ((pp == NULL || pp->valid == 0) && 1160 !vm_pager_has_page(object, OFF_TO_IDX(object->paging_offset) + new_pindex, NULL, NULL)) { 1161 /* 1162 * Page still needed. Can't go any 1163 * further. 1164 */ 1165 return; 1166 } 1167 } 1168 } 1169 1170 /* 1171 * Make the parent shadow the next object in the 1172 * chain. Deallocating backing_object will not remove 1173 * it, since its reference count is at least 2. 1174 */ 1175 1176 TAILQ_REMOVE(&object->backing_object->shadow_head, 1177 object, shadow_list); 1178 --object->backing_object->shadow_count; 1179 vm_object_reference(object->backing_object = backing_object->backing_object); 1180 if (object->backing_object) { 1181 TAILQ_INSERT_TAIL(&object->backing_object->shadow_head, 1182 object, shadow_list); 1183 ++object->backing_object->shadow_count; 1184 } 1185 object->backing_object_offset += backing_object->backing_object_offset; 1186 1187 /* 1188 * Drop the reference count on backing_object. Since 1189 * its ref_count was at least 2, it will not vanish; 1190 * so we don't need to call vm_object_deallocate. 1191 */ 1192 if (backing_object->ref_count == 1) 1193 printf("should have called obj deallocate\n"); 1194 backing_object->ref_count--; 1195 1196 object_bypasses++; 1197 1198 } 1199 1200 /* 1201 * Try again with this object's new backing object. 1202 */ 1203 } 1204 } 1205 1206 /* 1207 * vm_object_page_remove: [internal] 1208 * 1209 * Removes all physical pages in the specified 1210 * object range from the object's list of pages. 1211 * 1212 * The object must be locked. 1213 */ 1214 void 1215 vm_object_page_remove(object, start, end, clean_only) 1216 register vm_object_t object; 1217 register vm_pindex_t start; 1218 register vm_pindex_t end; 1219 boolean_t clean_only; 1220 { 1221 register vm_page_t p, next; 1222 unsigned int size; 1223 int s; 1224 1225 if (object == NULL) 1226 return; 1227 1228 object->paging_in_progress++; 1229 again: 1230 size = end - start; 1231 if (size > 4 || size >= object->size / 4) { 1232 for (p = TAILQ_FIRST(&object->memq); p != NULL; p = next) { 1233 next = TAILQ_NEXT(p, listq); 1234 if ((start <= p->pindex) && (p->pindex < end)) { 1235 if (p->wire_count != 0) { 1236 vm_page_protect(p, VM_PROT_NONE); 1237 p->valid = 0; 1238 continue; 1239 } 1240 1241 /* 1242 * The busy flags are only cleared at 1243 * interrupt -- minimize the spl transitions 1244 */ 1245 if ((p->flags & PG_BUSY) || p->busy) { 1246 s = splvm(); 1247 if ((p->flags & PG_BUSY) || p->busy) { 1248 p->flags |= PG_WANTED; 1249 tsleep(p, PVM, "vmopar", 0); 1250 splx(s); 1251 goto again; 1252 } 1253 splx(s); 1254 } 1255 1256 if (clean_only) { 1257 vm_page_test_dirty(p); 1258 if (p->valid & p->dirty) 1259 continue; 1260 } 1261 vm_page_protect(p, VM_PROT_NONE); 1262 PAGE_WAKEUP(p); 1263 vm_page_free(p); 1264 } 1265 } 1266 } else { 1267 while (size > 0) { 1268 if ((p = vm_page_lookup(object, start)) != 0) { 1269 if (p->wire_count != 0) { 1270 p->valid = 0; 1271 vm_page_protect(p, VM_PROT_NONE); 1272 start += 1; 1273 size -= 1; 1274 continue; 1275 } 1276 /* 1277 * The busy flags are only cleared at 1278 * interrupt -- minimize the spl transitions 1279 */ 1280 if ((p->flags & PG_BUSY) || p->busy) { 1281 s = splvm(); 1282 if ((p->flags & PG_BUSY) || p->busy) { 1283 p->flags |= PG_WANTED; 1284 tsleep(p, PVM, "vmopar", 0); 1285 splx(s); 1286 goto again; 1287 } 1288 splx(s); 1289 } 1290 if (clean_only) { 1291 vm_page_test_dirty(p); 1292 if (p->valid & p->dirty) { 1293 start += 1; 1294 size -= 1; 1295 continue; 1296 } 1297 } 1298 vm_page_protect(p, VM_PROT_NONE); 1299 PAGE_WAKEUP(p); 1300 vm_page_free(p); 1301 } 1302 start += 1; 1303 size -= 1; 1304 } 1305 } 1306 vm_object_pip_wakeup(object); 1307 } 1308 1309 /* 1310 * Routine: vm_object_coalesce 1311 * Function: Coalesces two objects backing up adjoining 1312 * regions of memory into a single object. 1313 * 1314 * returns TRUE if objects were combined. 1315 * 1316 * NOTE: Only works at the moment if the second object is NULL - 1317 * if it's not, which object do we lock first? 1318 * 1319 * Parameters: 1320 * prev_object First object to coalesce 1321 * prev_offset Offset into prev_object 1322 * next_object Second object into coalesce 1323 * next_offset Offset into next_object 1324 * 1325 * prev_size Size of reference to prev_object 1326 * next_size Size of reference to next_object 1327 * 1328 * Conditions: 1329 * The object must *not* be locked. 1330 */ 1331 boolean_t 1332 vm_object_coalesce(prev_object, prev_pindex, prev_size, next_size) 1333 register vm_object_t prev_object; 1334 vm_pindex_t prev_pindex; 1335 vm_size_t prev_size, next_size; 1336 { 1337 vm_size_t newsize; 1338 1339 if (prev_object == NULL) { 1340 return (TRUE); 1341 } 1342 1343 if (prev_object->type != OBJT_DEFAULT) { 1344 return (FALSE); 1345 } 1346 1347 /* 1348 * Try to collapse the object first 1349 */ 1350 vm_object_collapse(prev_object); 1351 1352 /* 1353 * Can't coalesce if: . more than one reference . paged out . shadows 1354 * another object . has a copy elsewhere (any of which mean that the 1355 * pages not mapped to prev_entry may be in use anyway) 1356 */ 1357 1358 if (prev_object->backing_object != NULL) { 1359 return (FALSE); 1360 } 1361 1362 prev_size >>= PAGE_SHIFT; 1363 next_size >>= PAGE_SHIFT; 1364 1365 if ((prev_object->ref_count > 1) && 1366 (prev_object->size != prev_pindex + prev_size)) { 1367 return (FALSE); 1368 } 1369 1370 /* 1371 * Remove any pages that may still be in the object from a previous 1372 * deallocation. 1373 */ 1374 1375 vm_object_page_remove(prev_object, 1376 prev_pindex + prev_size, 1377 prev_pindex + prev_size + next_size, FALSE); 1378 1379 /* 1380 * Extend the object if necessary. 1381 */ 1382 newsize = prev_pindex + prev_size + next_size; 1383 if (newsize > prev_object->size) 1384 prev_object->size = newsize; 1385 1386 return (TRUE); 1387 } 1388 1389 #include "opt_ddb.h" 1390 #ifdef DDB 1391 #include <sys/kernel.h> 1392 1393 #include <machine/cons.h> 1394 1395 #include <ddb/ddb.h> 1396 1397 static int _vm_object_in_map __P((vm_map_t map, vm_object_t object, 1398 vm_map_entry_t entry)); 1399 static int vm_object_in_map __P((vm_object_t object)); 1400 1401 static int 1402 _vm_object_in_map(map, object, entry) 1403 vm_map_t map; 1404 vm_object_t object; 1405 vm_map_entry_t entry; 1406 { 1407 vm_map_t tmpm; 1408 vm_map_entry_t tmpe; 1409 vm_object_t obj; 1410 int entcount; 1411 1412 if (map == 0) 1413 return 0; 1414 1415 if (entry == 0) { 1416 tmpe = map->header.next; 1417 entcount = map->nentries; 1418 while (entcount-- && (tmpe != &map->header)) { 1419 if( _vm_object_in_map(map, object, tmpe)) { 1420 return 1; 1421 } 1422 tmpe = tmpe->next; 1423 } 1424 } else if (entry->eflags & (MAP_ENTRY_IS_A_MAP|MAP_ENTRY_IS_SUB_MAP)) { 1425 tmpm = entry->object.share_map; 1426 tmpe = tmpm->header.next; 1427 entcount = tmpm->nentries; 1428 while (entcount-- && tmpe != &tmpm->header) { 1429 if( _vm_object_in_map(tmpm, object, tmpe)) { 1430 return 1; 1431 } 1432 tmpe = tmpe->next; 1433 } 1434 } else if (obj = entry->object.vm_object) { 1435 for(; obj; obj=obj->backing_object) 1436 if( obj == object) { 1437 return 1; 1438 } 1439 } 1440 return 0; 1441 } 1442 1443 static int 1444 vm_object_in_map( object) 1445 vm_object_t object; 1446 { 1447 struct proc *p; 1448 for (p = allproc.lh_first; p != 0; p = p->p_list.le_next) { 1449 if( !p->p_vmspace /* || (p->p_flag & (P_SYSTEM|P_WEXIT)) */) 1450 continue; 1451 if( _vm_object_in_map(&p->p_vmspace->vm_map, object, 0)) 1452 return 1; 1453 } 1454 if( _vm_object_in_map( kernel_map, object, 0)) 1455 return 1; 1456 if( _vm_object_in_map( kmem_map, object, 0)) 1457 return 1; 1458 if( _vm_object_in_map( pager_map, object, 0)) 1459 return 1; 1460 if( _vm_object_in_map( buffer_map, object, 0)) 1461 return 1; 1462 if( _vm_object_in_map( io_map, object, 0)) 1463 return 1; 1464 if( _vm_object_in_map( phys_map, object, 0)) 1465 return 1; 1466 if( _vm_object_in_map( mb_map, object, 0)) 1467 return 1; 1468 if( _vm_object_in_map( u_map, object, 0)) 1469 return 1; 1470 return 0; 1471 } 1472 1473 DB_SHOW_COMMAND(vmochk, vm_object_check) 1474 { 1475 vm_object_t object; 1476 1477 /* 1478 * make sure that internal objs are in a map somewhere 1479 * and none have zero ref counts. 1480 */ 1481 for (object = TAILQ_FIRST(&vm_object_list); 1482 object != NULL; 1483 object = TAILQ_NEXT(object, object_list)) { 1484 if (object->handle == NULL && 1485 (object->type == OBJT_DEFAULT || object->type == OBJT_SWAP)) { 1486 if (object->ref_count == 0) { 1487 db_printf("vmochk: internal obj has zero ref count: %d\n", 1488 object->size); 1489 } 1490 if (!vm_object_in_map(object)) { 1491 db_printf("vmochk: internal obj is not in a map: " 1492 "ref: %d, size: %d: 0x%x, backing_object: 0x%x\n", 1493 object->ref_count, object->size, 1494 object->size, object->backing_object); 1495 } 1496 } 1497 } 1498 } 1499 1500 /* 1501 * vm_object_print: [ debug ] 1502 */ 1503 DB_SHOW_COMMAND(object, vm_object_print_static) 1504 { 1505 /* XXX convert args. */ 1506 vm_object_t object = (vm_object_t)addr; 1507 boolean_t full = have_addr; 1508 1509 register vm_page_t p; 1510 1511 /* XXX count is an (unused) arg. Avoid shadowing it. */ 1512 #define count was_count 1513 1514 register int count; 1515 1516 if (object == NULL) 1517 return; 1518 1519 db_iprintf("Object 0x%x: size=0x%x, res=%d, ref=%d, ", 1520 (int) object, (int) object->size, 1521 object->resident_page_count, object->ref_count); 1522 db_printf("offset=0x%x, backing_object=(0x%x)+0x%x\n", 1523 (int) object->paging_offset, 1524 (int) object->backing_object, (int) object->backing_object_offset); 1525 db_printf("cache: next=%p, prev=%p\n", 1526 TAILQ_NEXT(object, cached_list), TAILQ_PREV(object, cached_list)); 1527 1528 if (!full) 1529 return; 1530 1531 db_indent += 2; 1532 count = 0; 1533 for (p = TAILQ_FIRST(&object->memq); p != NULL; p = TAILQ_NEXT(p, listq)) { 1534 if (count == 0) 1535 db_iprintf("memory:="); 1536 else if (count == 6) { 1537 db_printf("\n"); 1538 db_iprintf(" ..."); 1539 count = 0; 1540 } else 1541 db_printf(","); 1542 count++; 1543 1544 db_printf("(off=0x%lx,page=0x%lx)", 1545 (u_long) p->pindex, (u_long) VM_PAGE_TO_PHYS(p)); 1546 } 1547 if (count != 0) 1548 db_printf("\n"); 1549 db_indent -= 2; 1550 } 1551 1552 /* XXX. */ 1553 #undef count 1554 1555 /* XXX need this non-static entry for calling from vm_map_print. */ 1556 void 1557 vm_object_print(addr, have_addr, count, modif) 1558 db_expr_t addr; 1559 boolean_t have_addr; 1560 db_expr_t count; 1561 char *modif; 1562 { 1563 vm_object_print_static(addr, have_addr, count, modif); 1564 } 1565 1566 DB_SHOW_COMMAND(vmopag, vm_object_print_pages) 1567 { 1568 vm_object_t object; 1569 int nl = 0; 1570 int c; 1571 for (object = TAILQ_FIRST(&vm_object_list); 1572 object != NULL; 1573 object = TAILQ_NEXT(object, object_list)) { 1574 vm_pindex_t idx, fidx; 1575 vm_pindex_t osize; 1576 vm_offset_t pa = -1, padiff; 1577 int rcount; 1578 vm_page_t m; 1579 1580 db_printf("new object: 0x%x\n", object); 1581 if ( nl > 18) { 1582 c = cngetc(); 1583 if (c != ' ') 1584 return; 1585 nl = 0; 1586 } 1587 nl++; 1588 rcount = 0; 1589 fidx = 0; 1590 osize = object->size; 1591 if (osize > 128) 1592 osize = 128; 1593 for(idx=0;idx<osize;idx++) { 1594 m = vm_page_lookup(object, idx); 1595 if (m == NULL) { 1596 if (rcount) { 1597 db_printf(" index(%d)run(%d)pa(0x%x)\n", 1598 fidx, rcount, pa); 1599 if ( nl > 18) { 1600 c = cngetc(); 1601 if (c != ' ') 1602 return; 1603 nl = 0; 1604 } 1605 nl++; 1606 rcount = 0; 1607 } 1608 continue; 1609 } 1610 1611 1612 if (rcount && 1613 (VM_PAGE_TO_PHYS(m) == pa + rcount * PAGE_SIZE)) { 1614 ++rcount; 1615 continue; 1616 } 1617 if (rcount) { 1618 padiff = pa + rcount * PAGE_SIZE - VM_PAGE_TO_PHYS(m); 1619 padiff >>= PAGE_SHIFT; 1620 padiff &= PQ_L2_MASK; 1621 if (padiff == 0) { 1622 pa = VM_PAGE_TO_PHYS(m) - rcount * PAGE_SIZE; 1623 ++rcount; 1624 continue; 1625 } 1626 db_printf(" index(%d)run(%d)pa(0x%x)", fidx, rcount, pa); 1627 db_printf("pd(%d)\n", padiff); 1628 if ( nl > 18) { 1629 c = cngetc(); 1630 if (c != ' ') 1631 return; 1632 nl = 0; 1633 } 1634 nl++; 1635 } 1636 fidx = idx; 1637 pa = VM_PAGE_TO_PHYS(m); 1638 rcount = 1; 1639 } 1640 if (rcount) { 1641 db_printf(" index(%d)run(%d)pa(0x%x)\n", fidx, rcount, pa); 1642 if ( nl > 18) { 1643 c = cngetc(); 1644 if (c != ' ') 1645 return; 1646 nl = 0; 1647 } 1648 nl++; 1649 } 1650 } 1651 } 1652 #endif /* DDB */ 1653