1 /* 2 * Copyright (c) 1991, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * This code is derived from software contributed to Berkeley by 6 * The Mach Operating System project at Carnegie-Mellon University. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 4. Neither the name of the University nor the names of its contributors 17 * may be used to endorse or promote products derived from this software 18 * without specific prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 30 * SUCH DAMAGE. 31 * 32 * from: @(#)vm_object.c 8.5 (Berkeley) 3/22/94 33 * 34 * 35 * Copyright (c) 1987, 1990 Carnegie-Mellon University. 36 * All rights reserved. 37 * 38 * Authors: Avadis Tevanian, Jr., Michael Wayne Young 39 * 40 * Permission to use, copy, modify and distribute this software and 41 * its documentation is hereby granted, provided that both the copyright 42 * notice and this permission notice appear in all copies of the 43 * software, derivative works or modified versions, and any portions 44 * thereof, and that both notices appear in supporting documentation. 45 * 46 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 47 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 48 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 49 * 50 * Carnegie Mellon requests users of this software to return to 51 * 52 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 53 * School of Computer Science 54 * Carnegie Mellon University 55 * Pittsburgh PA 15213-3890 56 * 57 * any improvements or extensions that they make and grant Carnegie the 58 * rights to redistribute these changes. 59 */ 60 61 /* 62 * Virtual memory object module. 63 */ 64 65 #include <sys/cdefs.h> 66 __FBSDID("$FreeBSD$"); 67 68 #include <sys/param.h> 69 #include <sys/systm.h> 70 #include <sys/lock.h> 71 #include <sys/mman.h> 72 #include <sys/mount.h> 73 #include <sys/kernel.h> 74 #include <sys/sysctl.h> 75 #include <sys/mutex.h> 76 #include <sys/proc.h> /* for curproc, pageproc */ 77 #include <sys/socket.h> 78 #include <sys/vnode.h> 79 #include <sys/vmmeter.h> 80 #include <sys/sx.h> 81 82 #include <vm/vm.h> 83 #include <vm/vm_param.h> 84 #include <vm/pmap.h> 85 #include <vm/vm_map.h> 86 #include <vm/vm_object.h> 87 #include <vm/vm_page.h> 88 #include <vm/vm_pageout.h> 89 #include <vm/vm_pager.h> 90 #include <vm/swap_pager.h> 91 #include <vm/vm_kern.h> 92 #include <vm/vm_extern.h> 93 #include <vm/uma.h> 94 95 #define EASY_SCAN_FACTOR 8 96 97 #define MSYNC_FLUSH_HARDSEQ 0x01 98 #define MSYNC_FLUSH_SOFTSEQ 0x02 99 100 /* 101 * msync / VM object flushing optimizations 102 */ 103 static int msync_flush_flags = MSYNC_FLUSH_HARDSEQ | MSYNC_FLUSH_SOFTSEQ; 104 SYSCTL_INT(_vm, OID_AUTO, msync_flush_flags, 105 CTLFLAG_RW, &msync_flush_flags, 0, ""); 106 107 static int old_msync; 108 SYSCTL_INT(_vm, OID_AUTO, old_msync, CTLFLAG_RW, &old_msync, 0, 109 "Use old (insecure) msync behavior"); 110 111 static void vm_object_qcollapse(vm_object_t object); 112 static int vm_object_page_collect_flush(vm_object_t object, vm_page_t p, int curgeneration, int pagerflags); 113 114 /* 115 * Virtual memory objects maintain the actual data 116 * associated with allocated virtual memory. A given 117 * page of memory exists within exactly one object. 118 * 119 * An object is only deallocated when all "references" 120 * are given up. Only one "reference" to a given 121 * region of an object should be writeable. 122 * 123 * Associated with each object is a list of all resident 124 * memory pages belonging to that object; this list is 125 * maintained by the "vm_page" module, and locked by the object's 126 * lock. 127 * 128 * Each object also records a "pager" routine which is 129 * used to retrieve (and store) pages to the proper backing 130 * storage. In addition, objects may be backed by other 131 * objects from which they were virtual-copied. 132 * 133 * The only items within the object structure which are 134 * modified after time of creation are: 135 * reference count locked by object's lock 136 * pager routine locked by object's lock 137 * 138 */ 139 140 struct object_q vm_object_list; 141 struct mtx vm_object_list_mtx; /* lock for object list and count */ 142 143 struct vm_object kernel_object_store; 144 struct vm_object kmem_object_store; 145 146 static long object_collapses; 147 static long object_bypasses; 148 static int next_index; 149 static uma_zone_t obj_zone; 150 #define VM_OBJECTS_INIT 256 151 152 static void vm_object_zinit(void *mem, int size); 153 154 #ifdef INVARIANTS 155 static void vm_object_zdtor(void *mem, int size, void *arg); 156 157 static void 158 vm_object_zdtor(void *mem, int size, void *arg) 159 { 160 vm_object_t object; 161 162 object = (vm_object_t)mem; 163 KASSERT(TAILQ_EMPTY(&object->memq), 164 ("object %p has resident pages", 165 object)); 166 KASSERT(object->paging_in_progress == 0, 167 ("object %p paging_in_progress = %d", 168 object, object->paging_in_progress)); 169 KASSERT(object->resident_page_count == 0, 170 ("object %p resident_page_count = %d", 171 object, object->resident_page_count)); 172 KASSERT(object->shadow_count == 0, 173 ("object %p shadow_count = %d", 174 object, object->shadow_count)); 175 } 176 #endif 177 178 static void 179 vm_object_zinit(void *mem, int size) 180 { 181 vm_object_t object; 182 183 object = (vm_object_t)mem; 184 bzero(&object->mtx, sizeof(object->mtx)); 185 VM_OBJECT_LOCK_INIT(object); 186 187 /* These are true for any object that has been freed */ 188 object->paging_in_progress = 0; 189 object->resident_page_count = 0; 190 object->shadow_count = 0; 191 } 192 193 void 194 _vm_object_allocate(objtype_t type, vm_pindex_t size, vm_object_t object) 195 { 196 int incr; 197 198 TAILQ_INIT(&object->memq); 199 LIST_INIT(&object->shadow_head); 200 201 object->root = NULL; 202 object->type = type; 203 object->size = size; 204 object->generation = 1; 205 object->ref_count = 1; 206 object->flags = 0; 207 if ((object->type == OBJT_DEFAULT) || (object->type == OBJT_SWAP)) 208 object->flags = OBJ_ONEMAPPING; 209 if (size > (PQ_L2_SIZE / 3 + PQ_PRIME1)) 210 incr = PQ_L2_SIZE / 3 + PQ_PRIME1; 211 else 212 incr = size; 213 do 214 object->pg_color = next_index; 215 while (!atomic_cmpset_int(&next_index, object->pg_color, 216 (object->pg_color + incr) & PQ_L2_MASK)); 217 object->handle = NULL; 218 object->backing_object = NULL; 219 object->backing_object_offset = (vm_ooffset_t) 0; 220 221 mtx_lock(&vm_object_list_mtx); 222 TAILQ_INSERT_TAIL(&vm_object_list, object, object_list); 223 mtx_unlock(&vm_object_list_mtx); 224 } 225 226 /* 227 * vm_object_init: 228 * 229 * Initialize the VM objects module. 230 */ 231 void 232 vm_object_init(void) 233 { 234 TAILQ_INIT(&vm_object_list); 235 mtx_init(&vm_object_list_mtx, "vm object_list", NULL, MTX_DEF); 236 237 VM_OBJECT_LOCK_INIT(&kernel_object_store); 238 _vm_object_allocate(OBJT_DEFAULT, OFF_TO_IDX(VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS), 239 kernel_object); 240 241 /* 242 * The kmem object's mutex is given a unique name, instead of 243 * "vm object", to avoid false reports of lock-order reversal 244 * with a system map mutex. 245 */ 246 mtx_init(VM_OBJECT_MTX(kmem_object), "kmem object", NULL, MTX_DEF); 247 _vm_object_allocate(OBJT_DEFAULT, OFF_TO_IDX(VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS), 248 kmem_object); 249 250 obj_zone = uma_zcreate("VM OBJECT", sizeof (struct vm_object), NULL, 251 #ifdef INVARIANTS 252 vm_object_zdtor, 253 #else 254 NULL, 255 #endif 256 vm_object_zinit, NULL, UMA_ALIGN_PTR, UMA_ZONE_VM|UMA_ZONE_NOFREE); 257 uma_prealloc(obj_zone, VM_OBJECTS_INIT); 258 } 259 260 void 261 vm_object_clear_flag(vm_object_t object, u_short bits) 262 { 263 264 VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); 265 object->flags &= ~bits; 266 } 267 268 void 269 vm_object_pip_add(vm_object_t object, short i) 270 { 271 272 VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); 273 object->paging_in_progress += i; 274 } 275 276 void 277 vm_object_pip_subtract(vm_object_t object, short i) 278 { 279 280 VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); 281 object->paging_in_progress -= i; 282 } 283 284 void 285 vm_object_pip_wakeup(vm_object_t object) 286 { 287 288 VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); 289 object->paging_in_progress--; 290 if ((object->flags & OBJ_PIPWNT) && object->paging_in_progress == 0) { 291 vm_object_clear_flag(object, OBJ_PIPWNT); 292 wakeup(object); 293 } 294 } 295 296 void 297 vm_object_pip_wakeupn(vm_object_t object, short i) 298 { 299 300 VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); 301 if (i) 302 object->paging_in_progress -= i; 303 if ((object->flags & OBJ_PIPWNT) && object->paging_in_progress == 0) { 304 vm_object_clear_flag(object, OBJ_PIPWNT); 305 wakeup(object); 306 } 307 } 308 309 void 310 vm_object_pip_wait(vm_object_t object, char *waitid) 311 { 312 313 VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); 314 while (object->paging_in_progress) { 315 object->flags |= OBJ_PIPWNT; 316 msleep(object, VM_OBJECT_MTX(object), PVM, waitid, 0); 317 } 318 } 319 320 /* 321 * vm_object_allocate_wait 322 * 323 * Return a new object with the given size, and give the user the 324 * option of waiting for it to complete or failing if the needed 325 * memory isn't available. 326 */ 327 vm_object_t 328 vm_object_allocate_wait(objtype_t type, vm_pindex_t size, int flags) 329 { 330 vm_object_t result; 331 332 result = (vm_object_t) uma_zalloc(obj_zone, flags); 333 334 if (result != NULL) 335 _vm_object_allocate(type, size, result); 336 337 return (result); 338 } 339 340 /* 341 * vm_object_allocate: 342 * 343 * Returns a new object with the given size. 344 */ 345 vm_object_t 346 vm_object_allocate(objtype_t type, vm_pindex_t size) 347 { 348 return(vm_object_allocate_wait(type, size, M_WAITOK)); 349 } 350 351 352 /* 353 * vm_object_reference: 354 * 355 * Gets another reference to the given object. Note: OBJ_DEAD 356 * objects can be referenced during final cleaning. 357 */ 358 void 359 vm_object_reference(vm_object_t object) 360 { 361 struct vnode *vp; 362 int flags; 363 364 if (object == NULL) 365 return; 366 VM_OBJECT_LOCK(object); 367 object->ref_count++; 368 if (object->type == OBJT_VNODE) { 369 vp = object->handle; 370 VI_LOCK(vp); 371 VM_OBJECT_UNLOCK(object); 372 for (flags = LK_INTERLOCK; vget(vp, flags, curthread); 373 flags = 0) 374 printf("vm_object_reference: delay in vget\n"); 375 } else 376 VM_OBJECT_UNLOCK(object); 377 } 378 379 /* 380 * vm_object_reference_locked: 381 * 382 * Gets another reference to the given object. 383 * 384 * The object must be locked. 385 */ 386 void 387 vm_object_reference_locked(vm_object_t object) 388 { 389 struct vnode *vp; 390 391 VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); 392 KASSERT((object->flags & OBJ_DEAD) == 0, 393 ("vm_object_reference_locked: dead object referenced")); 394 object->ref_count++; 395 if (object->type == OBJT_VNODE) { 396 vp = object->handle; 397 vref(vp); 398 } 399 } 400 401 /* 402 * Handle deallocating an object of type OBJT_VNODE. 403 */ 404 void 405 vm_object_vndeallocate(vm_object_t object) 406 { 407 struct vnode *vp = (struct vnode *) object->handle; 408 409 GIANT_REQUIRED; 410 VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); 411 KASSERT(object->type == OBJT_VNODE, 412 ("vm_object_vndeallocate: not a vnode object")); 413 KASSERT(vp != NULL, ("vm_object_vndeallocate: missing vp")); 414 #ifdef INVARIANTS 415 if (object->ref_count == 0) { 416 vprint("vm_object_vndeallocate", vp); 417 panic("vm_object_vndeallocate: bad object reference count"); 418 } 419 #endif 420 421 object->ref_count--; 422 if (object->ref_count == 0) { 423 mp_fixme("Unlocked vflag access."); 424 vp->v_vflag &= ~VV_TEXT; 425 } 426 VM_OBJECT_UNLOCK(object); 427 /* 428 * vrele may need a vop lock 429 */ 430 vrele(vp); 431 } 432 433 /* 434 * vm_object_deallocate: 435 * 436 * Release a reference to the specified object, 437 * gained either through a vm_object_allocate 438 * or a vm_object_reference call. When all references 439 * are gone, storage associated with this object 440 * may be relinquished. 441 * 442 * No object may be locked. 443 */ 444 void 445 vm_object_deallocate(vm_object_t object) 446 { 447 vm_object_t temp; 448 449 while (object != NULL) { 450 /* 451 * In general, the object should be locked when working with 452 * its type. In this case, in order to maintain proper lock 453 * ordering, an exception is possible because a vnode-backed 454 * object never changes its type. 455 */ 456 if (object->type == OBJT_VNODE) 457 mtx_lock(&Giant); 458 VM_OBJECT_LOCK(object); 459 if (object->type == OBJT_VNODE) { 460 vm_object_vndeallocate(object); 461 mtx_unlock(&Giant); 462 return; 463 } 464 465 KASSERT(object->ref_count != 0, 466 ("vm_object_deallocate: object deallocated too many times: %d", object->type)); 467 468 /* 469 * If the reference count goes to 0 we start calling 470 * vm_object_terminate() on the object chain. 471 * A ref count of 1 may be a special case depending on the 472 * shadow count being 0 or 1. 473 */ 474 object->ref_count--; 475 if (object->ref_count > 1) { 476 VM_OBJECT_UNLOCK(object); 477 return; 478 } else if (object->ref_count == 1) { 479 if (object->shadow_count == 0) { 480 vm_object_set_flag(object, OBJ_ONEMAPPING); 481 } else if ((object->shadow_count == 1) && 482 (object->handle == NULL) && 483 (object->type == OBJT_DEFAULT || 484 object->type == OBJT_SWAP)) { 485 vm_object_t robject; 486 487 robject = LIST_FIRST(&object->shadow_head); 488 KASSERT(robject != NULL, 489 ("vm_object_deallocate: ref_count: %d, shadow_count: %d", 490 object->ref_count, 491 object->shadow_count)); 492 if (!VM_OBJECT_TRYLOCK(robject)) { 493 /* 494 * Avoid a potential deadlock. 495 */ 496 object->ref_count++; 497 VM_OBJECT_UNLOCK(object); 498 /* 499 * More likely than not the thread 500 * holding robject's lock has lower 501 * priority than the current thread. 502 * Let the lower priority thread run. 503 */ 504 tsleep(&proc0, PVM, "vmo_de", 1); 505 continue; 506 } 507 if ((robject->handle == NULL) && 508 (robject->type == OBJT_DEFAULT || 509 robject->type == OBJT_SWAP)) { 510 511 robject->ref_count++; 512 retry: 513 if (robject->paging_in_progress) { 514 VM_OBJECT_UNLOCK(object); 515 vm_object_pip_wait(robject, 516 "objde1"); 517 VM_OBJECT_LOCK(object); 518 goto retry; 519 } else if (object->paging_in_progress) { 520 VM_OBJECT_UNLOCK(robject); 521 object->flags |= OBJ_PIPWNT; 522 msleep(object, 523 VM_OBJECT_MTX(object), 524 PDROP | PVM, "objde2", 0); 525 VM_OBJECT_LOCK(robject); 526 VM_OBJECT_LOCK(object); 527 goto retry; 528 } 529 VM_OBJECT_UNLOCK(object); 530 if (robject->ref_count == 1) { 531 robject->ref_count--; 532 object = robject; 533 goto doterm; 534 } 535 object = robject; 536 vm_object_collapse(object); 537 VM_OBJECT_UNLOCK(object); 538 continue; 539 } 540 VM_OBJECT_UNLOCK(robject); 541 } 542 VM_OBJECT_UNLOCK(object); 543 return; 544 } 545 doterm: 546 temp = object->backing_object; 547 if (temp != NULL) { 548 VM_OBJECT_LOCK(temp); 549 LIST_REMOVE(object, shadow_list); 550 temp->shadow_count--; 551 temp->generation++; 552 VM_OBJECT_UNLOCK(temp); 553 object->backing_object = NULL; 554 } 555 /* 556 * Don't double-terminate, we could be in a termination 557 * recursion due to the terminate having to sync data 558 * to disk. 559 */ 560 if ((object->flags & OBJ_DEAD) == 0) 561 vm_object_terminate(object); 562 else 563 VM_OBJECT_UNLOCK(object); 564 object = temp; 565 } 566 } 567 568 /* 569 * vm_object_terminate actually destroys the specified object, freeing 570 * up all previously used resources. 571 * 572 * The object must be locked. 573 * This routine may block. 574 */ 575 void 576 vm_object_terminate(vm_object_t object) 577 { 578 vm_page_t p; 579 int s; 580 581 VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); 582 583 /* 584 * Make sure no one uses us. 585 */ 586 vm_object_set_flag(object, OBJ_DEAD); 587 588 /* 589 * wait for the pageout daemon to be done with the object 590 */ 591 vm_object_pip_wait(object, "objtrm"); 592 593 KASSERT(!object->paging_in_progress, 594 ("vm_object_terminate: pageout in progress")); 595 596 /* 597 * Clean and free the pages, as appropriate. All references to the 598 * object are gone, so we don't need to lock it. 599 */ 600 if (object->type == OBJT_VNODE) { 601 struct vnode *vp = (struct vnode *)object->handle; 602 603 /* 604 * Clean pages and flush buffers. 605 */ 606 vm_object_page_clean(object, 0, 0, OBJPC_SYNC); 607 VM_OBJECT_UNLOCK(object); 608 609 vinvalbuf(vp, V_SAVE, NOCRED, NULL, 0, 0); 610 611 VM_OBJECT_LOCK(object); 612 } 613 614 KASSERT(object->ref_count == 0, 615 ("vm_object_terminate: object with references, ref_count=%d", 616 object->ref_count)); 617 618 /* 619 * Now free any remaining pages. For internal objects, this also 620 * removes them from paging queues. Don't free wired pages, just 621 * remove them from the object. 622 */ 623 s = splvm(); 624 vm_page_lock_queues(); 625 while ((p = TAILQ_FIRST(&object->memq)) != NULL) { 626 KASSERT(!p->busy && (p->flags & PG_BUSY) == 0, 627 ("vm_object_terminate: freeing busy page %p " 628 "p->busy = %d, p->flags %x\n", p, p->busy, p->flags)); 629 if (p->wire_count == 0) { 630 vm_page_busy(p); 631 vm_page_free(p); 632 cnt.v_pfree++; 633 } else { 634 vm_page_busy(p); 635 vm_page_remove(p); 636 } 637 } 638 vm_page_unlock_queues(); 639 splx(s); 640 641 /* 642 * Let the pager know object is dead. 643 */ 644 vm_pager_deallocate(object); 645 VM_OBJECT_UNLOCK(object); 646 647 /* 648 * Remove the object from the global object list. 649 */ 650 mtx_lock(&vm_object_list_mtx); 651 TAILQ_REMOVE(&vm_object_list, object, object_list); 652 mtx_unlock(&vm_object_list_mtx); 653 654 wakeup(object); 655 656 /* 657 * Free the space for the object. 658 */ 659 uma_zfree(obj_zone, object); 660 } 661 662 /* 663 * vm_object_page_clean 664 * 665 * Clean all dirty pages in the specified range of object. Leaves page 666 * on whatever queue it is currently on. If NOSYNC is set then do not 667 * write out pages with PG_NOSYNC set (originally comes from MAP_NOSYNC), 668 * leaving the object dirty. 669 * 670 * When stuffing pages asynchronously, allow clustering. XXX we need a 671 * synchronous clustering mode implementation. 672 * 673 * Odd semantics: if start == end, we clean everything. 674 * 675 * The object must be locked. 676 */ 677 void 678 vm_object_page_clean(vm_object_t object, vm_pindex_t start, vm_pindex_t end, int flags) 679 { 680 vm_page_t p, np; 681 vm_pindex_t tstart, tend; 682 vm_pindex_t pi; 683 int clearobjflags; 684 int pagerflags; 685 int curgeneration; 686 687 GIANT_REQUIRED; 688 VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); 689 if (object->type != OBJT_VNODE || 690 (object->flags & OBJ_MIGHTBEDIRTY) == 0) 691 return; 692 693 pagerflags = (flags & (OBJPC_SYNC | OBJPC_INVAL)) ? VM_PAGER_PUT_SYNC : VM_PAGER_CLUSTER_OK; 694 pagerflags |= (flags & OBJPC_INVAL) ? VM_PAGER_PUT_INVAL : 0; 695 696 vm_object_set_flag(object, OBJ_CLEANING); 697 698 tstart = start; 699 if (end == 0) { 700 tend = object->size; 701 } else { 702 tend = end; 703 } 704 705 vm_page_lock_queues(); 706 /* 707 * If the caller is smart and only msync()s a range he knows is 708 * dirty, we may be able to avoid an object scan. This results in 709 * a phenominal improvement in performance. We cannot do this 710 * as a matter of course because the object may be huge - e.g. 711 * the size might be in the gigabytes or terrabytes. 712 */ 713 if (msync_flush_flags & MSYNC_FLUSH_HARDSEQ) { 714 vm_pindex_t tscan; 715 int scanlimit; 716 int scanreset; 717 718 scanreset = object->resident_page_count / EASY_SCAN_FACTOR; 719 if (scanreset < 16) 720 scanreset = 16; 721 pagerflags |= VM_PAGER_IGNORE_CLEANCHK; 722 723 scanlimit = scanreset; 724 tscan = tstart; 725 while (tscan < tend) { 726 curgeneration = object->generation; 727 p = vm_page_lookup(object, tscan); 728 if (p == NULL || p->valid == 0 || 729 (p->queue - p->pc) == PQ_CACHE) { 730 if (--scanlimit == 0) 731 break; 732 ++tscan; 733 continue; 734 } 735 vm_page_test_dirty(p); 736 if ((p->dirty & p->valid) == 0) { 737 if (--scanlimit == 0) 738 break; 739 ++tscan; 740 continue; 741 } 742 /* 743 * If we have been asked to skip nosync pages and 744 * this is a nosync page, we can't continue. 745 */ 746 if ((flags & OBJPC_NOSYNC) && (p->flags & PG_NOSYNC)) { 747 if (--scanlimit == 0) 748 break; 749 ++tscan; 750 continue; 751 } 752 scanlimit = scanreset; 753 754 /* 755 * This returns 0 if it was unable to busy the first 756 * page (i.e. had to sleep). 757 */ 758 tscan += vm_object_page_collect_flush(object, p, curgeneration, pagerflags); 759 } 760 761 /* 762 * If everything was dirty and we flushed it successfully, 763 * and the requested range is not the entire object, we 764 * don't have to mess with CLEANCHK or MIGHTBEDIRTY and can 765 * return immediately. 766 */ 767 if (tscan >= tend && (tstart || tend < object->size)) { 768 vm_page_unlock_queues(); 769 vm_object_clear_flag(object, OBJ_CLEANING); 770 return; 771 } 772 pagerflags &= ~VM_PAGER_IGNORE_CLEANCHK; 773 } 774 775 /* 776 * Generally set CLEANCHK interlock and make the page read-only so 777 * we can then clear the object flags. 778 * 779 * However, if this is a nosync mmap then the object is likely to 780 * stay dirty so do not mess with the page and do not clear the 781 * object flags. 782 */ 783 clearobjflags = 1; 784 TAILQ_FOREACH(p, &object->memq, listq) { 785 vm_page_flag_set(p, PG_CLEANCHK); 786 if ((flags & OBJPC_NOSYNC) && (p->flags & PG_NOSYNC)) 787 clearobjflags = 0; 788 else 789 pmap_page_protect(p, VM_PROT_READ); 790 } 791 792 if (clearobjflags && (tstart == 0) && (tend == object->size)) { 793 struct vnode *vp; 794 795 vm_object_clear_flag(object, OBJ_WRITEABLE|OBJ_MIGHTBEDIRTY); 796 if (object->type == OBJT_VNODE && 797 (vp = (struct vnode *)object->handle) != NULL) { 798 VI_LOCK(vp); 799 if (vp->v_iflag & VI_OBJDIRTY) 800 vp->v_iflag &= ~VI_OBJDIRTY; 801 VI_UNLOCK(vp); 802 } 803 } 804 805 rescan: 806 curgeneration = object->generation; 807 808 for (p = TAILQ_FIRST(&object->memq); p; p = np) { 809 int n; 810 811 np = TAILQ_NEXT(p, listq); 812 813 again: 814 pi = p->pindex; 815 if (((p->flags & PG_CLEANCHK) == 0) || 816 (pi < tstart) || (pi >= tend) || 817 (p->valid == 0) || 818 ((p->queue - p->pc) == PQ_CACHE)) { 819 vm_page_flag_clear(p, PG_CLEANCHK); 820 continue; 821 } 822 823 vm_page_test_dirty(p); 824 if ((p->dirty & p->valid) == 0) { 825 vm_page_flag_clear(p, PG_CLEANCHK); 826 continue; 827 } 828 829 /* 830 * If we have been asked to skip nosync pages and this is a 831 * nosync page, skip it. Note that the object flags were 832 * not cleared in this case so we do not have to set them. 833 */ 834 if ((flags & OBJPC_NOSYNC) && (p->flags & PG_NOSYNC)) { 835 vm_page_flag_clear(p, PG_CLEANCHK); 836 continue; 837 } 838 839 n = vm_object_page_collect_flush(object, p, 840 curgeneration, pagerflags); 841 if (n == 0) 842 goto rescan; 843 844 if (object->generation != curgeneration) 845 goto rescan; 846 847 /* 848 * Try to optimize the next page. If we can't we pick up 849 * our (random) scan where we left off. 850 */ 851 if (msync_flush_flags & MSYNC_FLUSH_SOFTSEQ) { 852 if ((p = vm_page_lookup(object, pi + n)) != NULL) 853 goto again; 854 } 855 } 856 vm_page_unlock_queues(); 857 #if 0 858 VOP_FSYNC(vp, NULL, (pagerflags & VM_PAGER_PUT_SYNC)?MNT_WAIT:0, curproc); 859 #endif 860 861 vm_object_clear_flag(object, OBJ_CLEANING); 862 return; 863 } 864 865 static int 866 vm_object_page_collect_flush(vm_object_t object, vm_page_t p, int curgeneration, int pagerflags) 867 { 868 int runlen; 869 int s; 870 int maxf; 871 int chkb; 872 int maxb; 873 int i; 874 vm_pindex_t pi; 875 vm_page_t maf[vm_pageout_page_count]; 876 vm_page_t mab[vm_pageout_page_count]; 877 vm_page_t ma[vm_pageout_page_count]; 878 879 s = splvm(); 880 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 881 pi = p->pindex; 882 while (vm_page_sleep_if_busy(p, TRUE, "vpcwai")) { 883 vm_page_lock_queues(); 884 if (object->generation != curgeneration) { 885 splx(s); 886 return(0); 887 } 888 } 889 maxf = 0; 890 for(i = 1; i < vm_pageout_page_count; i++) { 891 vm_page_t tp; 892 893 if ((tp = vm_page_lookup(object, pi + i)) != NULL) { 894 if ((tp->flags & PG_BUSY) || 895 ((pagerflags & VM_PAGER_IGNORE_CLEANCHK) == 0 && 896 (tp->flags & PG_CLEANCHK) == 0) || 897 (tp->busy != 0)) 898 break; 899 if((tp->queue - tp->pc) == PQ_CACHE) { 900 vm_page_flag_clear(tp, PG_CLEANCHK); 901 break; 902 } 903 vm_page_test_dirty(tp); 904 if ((tp->dirty & tp->valid) == 0) { 905 vm_page_flag_clear(tp, PG_CLEANCHK); 906 break; 907 } 908 maf[ i - 1 ] = tp; 909 maxf++; 910 continue; 911 } 912 break; 913 } 914 915 maxb = 0; 916 chkb = vm_pageout_page_count - maxf; 917 if (chkb) { 918 for(i = 1; i < chkb;i++) { 919 vm_page_t tp; 920 921 if ((tp = vm_page_lookup(object, pi - i)) != NULL) { 922 if ((tp->flags & PG_BUSY) || 923 ((pagerflags & VM_PAGER_IGNORE_CLEANCHK) == 0 && 924 (tp->flags & PG_CLEANCHK) == 0) || 925 (tp->busy != 0)) 926 break; 927 if ((tp->queue - tp->pc) == PQ_CACHE) { 928 vm_page_flag_clear(tp, PG_CLEANCHK); 929 break; 930 } 931 vm_page_test_dirty(tp); 932 if ((tp->dirty & tp->valid) == 0) { 933 vm_page_flag_clear(tp, PG_CLEANCHK); 934 break; 935 } 936 mab[ i - 1 ] = tp; 937 maxb++; 938 continue; 939 } 940 break; 941 } 942 } 943 944 for(i = 0; i < maxb; i++) { 945 int index = (maxb - i) - 1; 946 ma[index] = mab[i]; 947 vm_page_flag_clear(ma[index], PG_CLEANCHK); 948 } 949 vm_page_flag_clear(p, PG_CLEANCHK); 950 ma[maxb] = p; 951 for(i = 0; i < maxf; i++) { 952 int index = (maxb + i) + 1; 953 ma[index] = maf[i]; 954 vm_page_flag_clear(ma[index], PG_CLEANCHK); 955 } 956 runlen = maxb + maxf + 1; 957 958 splx(s); 959 vm_pageout_flush(ma, runlen, pagerflags); 960 for (i = 0; i < runlen; i++) { 961 if (ma[i]->valid & ma[i]->dirty) { 962 pmap_page_protect(ma[i], VM_PROT_READ); 963 vm_page_flag_set(ma[i], PG_CLEANCHK); 964 965 /* 966 * maxf will end up being the actual number of pages 967 * we wrote out contiguously, non-inclusive of the 968 * first page. We do not count look-behind pages. 969 */ 970 if (i >= maxb + 1 && (maxf > i - maxb - 1)) 971 maxf = i - maxb - 1; 972 } 973 } 974 return(maxf + 1); 975 } 976 977 /* 978 * Note that there is absolutely no sense in writing out 979 * anonymous objects, so we track down the vnode object 980 * to write out. 981 * We invalidate (remove) all pages from the address space 982 * for semantic correctness. 983 * 984 * Note: certain anonymous maps, such as MAP_NOSYNC maps, 985 * may start out with a NULL object. 986 */ 987 void 988 vm_object_sync(vm_object_t object, vm_ooffset_t offset, vm_size_t size, 989 boolean_t syncio, boolean_t invalidate) 990 { 991 vm_object_t backing_object; 992 struct vnode *vp; 993 int flags; 994 995 if (object == NULL) 996 return; 997 VM_OBJECT_LOCK(object); 998 while ((backing_object = object->backing_object) != NULL) { 999 VM_OBJECT_LOCK(backing_object); 1000 VM_OBJECT_UNLOCK(object); 1001 object = backing_object; 1002 offset += object->backing_object_offset; 1003 if (object->size < OFF_TO_IDX(offset + size)) 1004 size = IDX_TO_OFF(object->size) - offset; 1005 } 1006 /* 1007 * Flush pages if writing is allowed, invalidate them 1008 * if invalidation requested. Pages undergoing I/O 1009 * will be ignored by vm_object_page_remove(). 1010 * 1011 * We cannot lock the vnode and then wait for paging 1012 * to complete without deadlocking against vm_fault. 1013 * Instead we simply call vm_object_page_remove() and 1014 * allow it to block internally on a page-by-page 1015 * basis when it encounters pages undergoing async 1016 * I/O. 1017 */ 1018 if (object->type == OBJT_VNODE && 1019 (object->flags & OBJ_MIGHTBEDIRTY) != 0) { 1020 vp = object->handle; 1021 VM_OBJECT_UNLOCK(object); 1022 mtx_lock(&Giant); 1023 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, curthread); 1024 flags = (syncio || invalidate) ? OBJPC_SYNC : 0; 1025 flags |= invalidate ? OBJPC_INVAL : 0; 1026 VM_OBJECT_LOCK(object); 1027 vm_object_page_clean(object, 1028 OFF_TO_IDX(offset), 1029 OFF_TO_IDX(offset + size + PAGE_MASK), 1030 flags); 1031 VM_OBJECT_UNLOCK(object); 1032 VOP_UNLOCK(vp, 0, curthread); 1033 mtx_unlock(&Giant); 1034 VM_OBJECT_LOCK(object); 1035 } 1036 if ((object->type == OBJT_VNODE || 1037 object->type == OBJT_DEVICE) && invalidate) { 1038 vm_object_page_remove(object, 1039 OFF_TO_IDX(offset), 1040 OFF_TO_IDX(offset + size + PAGE_MASK), 1041 old_msync ? FALSE : TRUE); 1042 } 1043 VM_OBJECT_UNLOCK(object); 1044 } 1045 1046 /* 1047 * vm_object_madvise: 1048 * 1049 * Implements the madvise function at the object/page level. 1050 * 1051 * MADV_WILLNEED (any object) 1052 * 1053 * Activate the specified pages if they are resident. 1054 * 1055 * MADV_DONTNEED (any object) 1056 * 1057 * Deactivate the specified pages if they are resident. 1058 * 1059 * MADV_FREE (OBJT_DEFAULT/OBJT_SWAP objects, 1060 * OBJ_ONEMAPPING only) 1061 * 1062 * Deactivate and clean the specified pages if they are 1063 * resident. This permits the process to reuse the pages 1064 * without faulting or the kernel to reclaim the pages 1065 * without I/O. 1066 */ 1067 void 1068 vm_object_madvise(vm_object_t object, vm_pindex_t pindex, int count, int advise) 1069 { 1070 vm_pindex_t end, tpindex; 1071 vm_object_t backing_object, tobject; 1072 vm_page_t m; 1073 1074 if (object == NULL) 1075 return; 1076 end = pindex + count; 1077 /* 1078 * Locate and adjust resident pages 1079 */ 1080 for (; pindex < end; pindex += 1) { 1081 relookup: 1082 tobject = object; 1083 tpindex = pindex; 1084 VM_OBJECT_LOCK(tobject); 1085 shadowlookup: 1086 /* 1087 * MADV_FREE only operates on OBJT_DEFAULT or OBJT_SWAP pages 1088 * and those pages must be OBJ_ONEMAPPING. 1089 */ 1090 if (advise == MADV_FREE) { 1091 if ((tobject->type != OBJT_DEFAULT && 1092 tobject->type != OBJT_SWAP) || 1093 (tobject->flags & OBJ_ONEMAPPING) == 0) { 1094 goto unlock_tobject; 1095 } 1096 } 1097 m = vm_page_lookup(tobject, tpindex); 1098 if (m == NULL) { 1099 /* 1100 * There may be swap even if there is no backing page 1101 */ 1102 if (advise == MADV_FREE && tobject->type == OBJT_SWAP) 1103 swap_pager_freespace(tobject, tpindex, 1); 1104 /* 1105 * next object 1106 */ 1107 backing_object = tobject->backing_object; 1108 if (backing_object == NULL) 1109 goto unlock_tobject; 1110 VM_OBJECT_LOCK(backing_object); 1111 VM_OBJECT_UNLOCK(tobject); 1112 tobject = backing_object; 1113 tpindex += OFF_TO_IDX(tobject->backing_object_offset); 1114 goto shadowlookup; 1115 } 1116 /* 1117 * If the page is busy or not in a normal active state, 1118 * we skip it. If the page is not managed there are no 1119 * page queues to mess with. Things can break if we mess 1120 * with pages in any of the below states. 1121 */ 1122 vm_page_lock_queues(); 1123 if (m->hold_count || 1124 m->wire_count || 1125 (m->flags & PG_UNMANAGED) || 1126 m->valid != VM_PAGE_BITS_ALL) { 1127 vm_page_unlock_queues(); 1128 goto unlock_tobject; 1129 } 1130 if (vm_page_sleep_if_busy(m, TRUE, "madvpo")) { 1131 VM_OBJECT_UNLOCK(tobject); 1132 goto relookup; 1133 } 1134 if (advise == MADV_WILLNEED) { 1135 vm_page_activate(m); 1136 } else if (advise == MADV_DONTNEED) { 1137 vm_page_dontneed(m); 1138 } else if (advise == MADV_FREE) { 1139 /* 1140 * Mark the page clean. This will allow the page 1141 * to be freed up by the system. However, such pages 1142 * are often reused quickly by malloc()/free() 1143 * so we do not do anything that would cause 1144 * a page fault if we can help it. 1145 * 1146 * Specifically, we do not try to actually free 1147 * the page now nor do we try to put it in the 1148 * cache (which would cause a page fault on reuse). 1149 * 1150 * But we do make the page is freeable as we 1151 * can without actually taking the step of unmapping 1152 * it. 1153 */ 1154 pmap_clear_modify(m); 1155 m->dirty = 0; 1156 m->act_count = 0; 1157 vm_page_dontneed(m); 1158 } 1159 vm_page_unlock_queues(); 1160 if (advise == MADV_FREE && tobject->type == OBJT_SWAP) 1161 swap_pager_freespace(tobject, tpindex, 1); 1162 unlock_tobject: 1163 VM_OBJECT_UNLOCK(tobject); 1164 } 1165 } 1166 1167 /* 1168 * vm_object_shadow: 1169 * 1170 * Create a new object which is backed by the 1171 * specified existing object range. The source 1172 * object reference is deallocated. 1173 * 1174 * The new object and offset into that object 1175 * are returned in the source parameters. 1176 */ 1177 void 1178 vm_object_shadow( 1179 vm_object_t *object, /* IN/OUT */ 1180 vm_ooffset_t *offset, /* IN/OUT */ 1181 vm_size_t length) 1182 { 1183 vm_object_t source; 1184 vm_object_t result; 1185 1186 source = *object; 1187 1188 /* 1189 * Don't create the new object if the old object isn't shared. 1190 */ 1191 if (source != NULL) { 1192 VM_OBJECT_LOCK(source); 1193 if (source->ref_count == 1 && 1194 source->handle == NULL && 1195 (source->type == OBJT_DEFAULT || 1196 source->type == OBJT_SWAP)) { 1197 VM_OBJECT_UNLOCK(source); 1198 return; 1199 } 1200 VM_OBJECT_UNLOCK(source); 1201 } 1202 1203 /* 1204 * Allocate a new object with the given length. 1205 */ 1206 result = vm_object_allocate(OBJT_DEFAULT, length); 1207 1208 /* 1209 * The new object shadows the source object, adding a reference to it. 1210 * Our caller changes his reference to point to the new object, 1211 * removing a reference to the source object. Net result: no change 1212 * of reference count. 1213 * 1214 * Try to optimize the result object's page color when shadowing 1215 * in order to maintain page coloring consistency in the combined 1216 * shadowed object. 1217 */ 1218 result->backing_object = source; 1219 /* 1220 * Store the offset into the source object, and fix up the offset into 1221 * the new object. 1222 */ 1223 result->backing_object_offset = *offset; 1224 if (source != NULL) { 1225 VM_OBJECT_LOCK(source); 1226 LIST_INSERT_HEAD(&source->shadow_head, result, shadow_list); 1227 source->shadow_count++; 1228 source->generation++; 1229 if (length < source->size) 1230 length = source->size; 1231 if (length > PQ_L2_SIZE / 3 + PQ_PRIME1 || 1232 source->generation > 1) 1233 length = PQ_L2_SIZE / 3 + PQ_PRIME1; 1234 result->pg_color = (source->pg_color + 1235 length * source->generation) & PQ_L2_MASK; 1236 VM_OBJECT_UNLOCK(source); 1237 next_index = (result->pg_color + PQ_L2_SIZE / 3 + PQ_PRIME1) & 1238 PQ_L2_MASK; 1239 } 1240 1241 1242 /* 1243 * Return the new things 1244 */ 1245 *offset = 0; 1246 *object = result; 1247 } 1248 1249 /* 1250 * vm_object_split: 1251 * 1252 * Split the pages in a map entry into a new object. This affords 1253 * easier removal of unused pages, and keeps object inheritance from 1254 * being a negative impact on memory usage. 1255 */ 1256 void 1257 vm_object_split(vm_map_entry_t entry) 1258 { 1259 vm_page_t m; 1260 vm_object_t orig_object, new_object, source; 1261 vm_pindex_t offidxstart, offidxend; 1262 vm_size_t idx, size; 1263 1264 orig_object = entry->object.vm_object; 1265 if (orig_object->type != OBJT_DEFAULT && orig_object->type != OBJT_SWAP) 1266 return; 1267 if (orig_object->ref_count <= 1) 1268 return; 1269 VM_OBJECT_UNLOCK(orig_object); 1270 1271 offidxstart = OFF_TO_IDX(entry->offset); 1272 offidxend = offidxstart + OFF_TO_IDX(entry->end - entry->start); 1273 size = offidxend - offidxstart; 1274 1275 /* 1276 * If swap_pager_copy() is later called, it will convert new_object 1277 * into a swap object. 1278 */ 1279 new_object = vm_object_allocate(OBJT_DEFAULT, size); 1280 1281 VM_OBJECT_LOCK(new_object); 1282 VM_OBJECT_LOCK(orig_object); 1283 source = orig_object->backing_object; 1284 if (source != NULL) { 1285 VM_OBJECT_LOCK(source); 1286 LIST_INSERT_HEAD(&source->shadow_head, 1287 new_object, shadow_list); 1288 source->shadow_count++; 1289 source->generation++; 1290 vm_object_reference_locked(source); /* for new_object */ 1291 vm_object_clear_flag(source, OBJ_ONEMAPPING); 1292 VM_OBJECT_UNLOCK(source); 1293 new_object->backing_object_offset = 1294 orig_object->backing_object_offset + entry->offset; 1295 new_object->backing_object = source; 1296 } 1297 for (idx = 0; idx < size; idx++) { 1298 retry: 1299 m = vm_page_lookup(orig_object, offidxstart + idx); 1300 if (m == NULL) 1301 continue; 1302 1303 /* 1304 * We must wait for pending I/O to complete before we can 1305 * rename the page. 1306 * 1307 * We do not have to VM_PROT_NONE the page as mappings should 1308 * not be changed by this operation. 1309 */ 1310 vm_page_lock_queues(); 1311 if ((m->flags & PG_BUSY) || m->busy) { 1312 vm_page_flag_set(m, PG_WANTED | PG_REFERENCED); 1313 VM_OBJECT_UNLOCK(orig_object); 1314 VM_OBJECT_UNLOCK(new_object); 1315 msleep(m, &vm_page_queue_mtx, PDROP | PVM, "spltwt", 0); 1316 VM_OBJECT_LOCK(new_object); 1317 VM_OBJECT_LOCK(orig_object); 1318 goto retry; 1319 } 1320 vm_page_busy(m); 1321 vm_page_rename(m, new_object, idx); 1322 /* page automatically made dirty by rename and cache handled */ 1323 vm_page_busy(m); 1324 vm_page_unlock_queues(); 1325 } 1326 if (orig_object->type == OBJT_SWAP) { 1327 /* 1328 * swap_pager_copy() can sleep, in which case the orig_object's 1329 * and new_object's locks are released and reacquired. 1330 */ 1331 swap_pager_copy(orig_object, new_object, offidxstart, 0); 1332 } 1333 VM_OBJECT_UNLOCK(orig_object); 1334 vm_page_lock_queues(); 1335 TAILQ_FOREACH(m, &new_object->memq, listq) 1336 vm_page_wakeup(m); 1337 vm_page_unlock_queues(); 1338 VM_OBJECT_UNLOCK(new_object); 1339 entry->object.vm_object = new_object; 1340 entry->offset = 0LL; 1341 vm_object_deallocate(orig_object); 1342 VM_OBJECT_LOCK(new_object); 1343 } 1344 1345 #define OBSC_TEST_ALL_SHADOWED 0x0001 1346 #define OBSC_COLLAPSE_NOWAIT 0x0002 1347 #define OBSC_COLLAPSE_WAIT 0x0004 1348 1349 static int 1350 vm_object_backing_scan(vm_object_t object, int op) 1351 { 1352 int s; 1353 int r = 1; 1354 vm_page_t p; 1355 vm_object_t backing_object; 1356 vm_pindex_t backing_offset_index; 1357 1358 s = splvm(); 1359 VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); 1360 VM_OBJECT_LOCK_ASSERT(object->backing_object, MA_OWNED); 1361 1362 backing_object = object->backing_object; 1363 backing_offset_index = OFF_TO_IDX(object->backing_object_offset); 1364 1365 /* 1366 * Initial conditions 1367 */ 1368 if (op & OBSC_TEST_ALL_SHADOWED) { 1369 /* 1370 * We do not want to have to test for the existence of 1371 * swap pages in the backing object. XXX but with the 1372 * new swapper this would be pretty easy to do. 1373 * 1374 * XXX what about anonymous MAP_SHARED memory that hasn't 1375 * been ZFOD faulted yet? If we do not test for this, the 1376 * shadow test may succeed! XXX 1377 */ 1378 if (backing_object->type != OBJT_DEFAULT) { 1379 splx(s); 1380 return (0); 1381 } 1382 } 1383 if (op & OBSC_COLLAPSE_WAIT) { 1384 vm_object_set_flag(backing_object, OBJ_DEAD); 1385 } 1386 1387 /* 1388 * Our scan 1389 */ 1390 p = TAILQ_FIRST(&backing_object->memq); 1391 while (p) { 1392 vm_page_t next = TAILQ_NEXT(p, listq); 1393 vm_pindex_t new_pindex = p->pindex - backing_offset_index; 1394 1395 if (op & OBSC_TEST_ALL_SHADOWED) { 1396 vm_page_t pp; 1397 1398 /* 1399 * Ignore pages outside the parent object's range 1400 * and outside the parent object's mapping of the 1401 * backing object. 1402 * 1403 * note that we do not busy the backing object's 1404 * page. 1405 */ 1406 if ( 1407 p->pindex < backing_offset_index || 1408 new_pindex >= object->size 1409 ) { 1410 p = next; 1411 continue; 1412 } 1413 1414 /* 1415 * See if the parent has the page or if the parent's 1416 * object pager has the page. If the parent has the 1417 * page but the page is not valid, the parent's 1418 * object pager must have the page. 1419 * 1420 * If this fails, the parent does not completely shadow 1421 * the object and we might as well give up now. 1422 */ 1423 1424 pp = vm_page_lookup(object, new_pindex); 1425 if ( 1426 (pp == NULL || pp->valid == 0) && 1427 !vm_pager_has_page(object, new_pindex, NULL, NULL) 1428 ) { 1429 r = 0; 1430 break; 1431 } 1432 } 1433 1434 /* 1435 * Check for busy page 1436 */ 1437 if (op & (OBSC_COLLAPSE_WAIT | OBSC_COLLAPSE_NOWAIT)) { 1438 vm_page_t pp; 1439 1440 vm_page_lock_queues(); 1441 if (op & OBSC_COLLAPSE_NOWAIT) { 1442 if ((p->flags & PG_BUSY) || 1443 !p->valid || 1444 p->hold_count || 1445 p->wire_count || 1446 p->busy) { 1447 vm_page_unlock_queues(); 1448 p = next; 1449 continue; 1450 } 1451 } else if (op & OBSC_COLLAPSE_WAIT) { 1452 if ((p->flags & PG_BUSY) || p->busy) { 1453 vm_page_flag_set(p, 1454 PG_WANTED | PG_REFERENCED); 1455 VM_OBJECT_UNLOCK(backing_object); 1456 VM_OBJECT_UNLOCK(object); 1457 msleep(p, &vm_page_queue_mtx, 1458 PDROP | PVM, "vmocol", 0); 1459 VM_OBJECT_LOCK(object); 1460 VM_OBJECT_LOCK(backing_object); 1461 /* 1462 * If we slept, anything could have 1463 * happened. Since the object is 1464 * marked dead, the backing offset 1465 * should not have changed so we 1466 * just restart our scan. 1467 */ 1468 p = TAILQ_FIRST(&backing_object->memq); 1469 continue; 1470 } 1471 } 1472 1473 /* 1474 * Busy the page 1475 */ 1476 vm_page_busy(p); 1477 vm_page_unlock_queues(); 1478 1479 KASSERT( 1480 p->object == backing_object, 1481 ("vm_object_qcollapse(): object mismatch") 1482 ); 1483 1484 /* 1485 * Destroy any associated swap 1486 */ 1487 if (backing_object->type == OBJT_SWAP) { 1488 swap_pager_freespace( 1489 backing_object, 1490 p->pindex, 1491 1 1492 ); 1493 } 1494 1495 if ( 1496 p->pindex < backing_offset_index || 1497 new_pindex >= object->size 1498 ) { 1499 /* 1500 * Page is out of the parent object's range, we 1501 * can simply destroy it. 1502 */ 1503 vm_page_lock_queues(); 1504 pmap_remove_all(p); 1505 vm_page_free(p); 1506 vm_page_unlock_queues(); 1507 p = next; 1508 continue; 1509 } 1510 1511 pp = vm_page_lookup(object, new_pindex); 1512 if ( 1513 pp != NULL || 1514 vm_pager_has_page(object, new_pindex, NULL, NULL) 1515 ) { 1516 /* 1517 * page already exists in parent OR swap exists 1518 * for this location in the parent. Destroy 1519 * the original page from the backing object. 1520 * 1521 * Leave the parent's page alone 1522 */ 1523 vm_page_lock_queues(); 1524 pmap_remove_all(p); 1525 vm_page_free(p); 1526 vm_page_unlock_queues(); 1527 p = next; 1528 continue; 1529 } 1530 1531 /* 1532 * Page does not exist in parent, rename the 1533 * page from the backing object to the main object. 1534 * 1535 * If the page was mapped to a process, it can remain 1536 * mapped through the rename. 1537 */ 1538 vm_page_lock_queues(); 1539 vm_page_rename(p, object, new_pindex); 1540 vm_page_unlock_queues(); 1541 /* page automatically made dirty by rename */ 1542 } 1543 p = next; 1544 } 1545 splx(s); 1546 return (r); 1547 } 1548 1549 1550 /* 1551 * this version of collapse allows the operation to occur earlier and 1552 * when paging_in_progress is true for an object... This is not a complete 1553 * operation, but should plug 99.9% of the rest of the leaks. 1554 */ 1555 static void 1556 vm_object_qcollapse(vm_object_t object) 1557 { 1558 vm_object_t backing_object = object->backing_object; 1559 1560 VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); 1561 VM_OBJECT_LOCK_ASSERT(backing_object, MA_OWNED); 1562 1563 if (backing_object->ref_count != 1) 1564 return; 1565 1566 backing_object->ref_count += 2; 1567 1568 vm_object_backing_scan(object, OBSC_COLLAPSE_NOWAIT); 1569 1570 backing_object->ref_count -= 2; 1571 } 1572 1573 /* 1574 * vm_object_collapse: 1575 * 1576 * Collapse an object with the object backing it. 1577 * Pages in the backing object are moved into the 1578 * parent, and the backing object is deallocated. 1579 */ 1580 void 1581 vm_object_collapse(vm_object_t object) 1582 { 1583 VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); 1584 1585 while (TRUE) { 1586 vm_object_t backing_object; 1587 1588 /* 1589 * Verify that the conditions are right for collapse: 1590 * 1591 * The object exists and the backing object exists. 1592 */ 1593 if ((backing_object = object->backing_object) == NULL) 1594 break; 1595 1596 /* 1597 * we check the backing object first, because it is most likely 1598 * not collapsable. 1599 */ 1600 VM_OBJECT_LOCK(backing_object); 1601 if (backing_object->handle != NULL || 1602 (backing_object->type != OBJT_DEFAULT && 1603 backing_object->type != OBJT_SWAP) || 1604 (backing_object->flags & OBJ_DEAD) || 1605 object->handle != NULL || 1606 (object->type != OBJT_DEFAULT && 1607 object->type != OBJT_SWAP) || 1608 (object->flags & OBJ_DEAD)) { 1609 VM_OBJECT_UNLOCK(backing_object); 1610 break; 1611 } 1612 1613 if ( 1614 object->paging_in_progress != 0 || 1615 backing_object->paging_in_progress != 0 1616 ) { 1617 vm_object_qcollapse(object); 1618 VM_OBJECT_UNLOCK(backing_object); 1619 break; 1620 } 1621 /* 1622 * We know that we can either collapse the backing object (if 1623 * the parent is the only reference to it) or (perhaps) have 1624 * the parent bypass the object if the parent happens to shadow 1625 * all the resident pages in the entire backing object. 1626 * 1627 * This is ignoring pager-backed pages such as swap pages. 1628 * vm_object_backing_scan fails the shadowing test in this 1629 * case. 1630 */ 1631 if (backing_object->ref_count == 1) { 1632 /* 1633 * If there is exactly one reference to the backing 1634 * object, we can collapse it into the parent. 1635 */ 1636 vm_object_backing_scan(object, OBSC_COLLAPSE_WAIT); 1637 1638 /* 1639 * Move the pager from backing_object to object. 1640 */ 1641 if (backing_object->type == OBJT_SWAP) { 1642 /* 1643 * swap_pager_copy() can sleep, in which case 1644 * the backing_object's and object's locks are 1645 * released and reacquired. 1646 */ 1647 swap_pager_copy( 1648 backing_object, 1649 object, 1650 OFF_TO_IDX(object->backing_object_offset), TRUE); 1651 } 1652 /* 1653 * Object now shadows whatever backing_object did. 1654 * Note that the reference to 1655 * backing_object->backing_object moves from within 1656 * backing_object to within object. 1657 */ 1658 LIST_REMOVE(object, shadow_list); 1659 backing_object->shadow_count--; 1660 backing_object->generation++; 1661 if (backing_object->backing_object) { 1662 VM_OBJECT_LOCK(backing_object->backing_object); 1663 LIST_REMOVE(backing_object, shadow_list); 1664 LIST_INSERT_HEAD( 1665 &backing_object->backing_object->shadow_head, 1666 object, shadow_list); 1667 /* 1668 * The shadow_count has not changed. 1669 */ 1670 backing_object->backing_object->generation++; 1671 VM_OBJECT_UNLOCK(backing_object->backing_object); 1672 } 1673 object->backing_object = backing_object->backing_object; 1674 object->backing_object_offset += 1675 backing_object->backing_object_offset; 1676 1677 /* 1678 * Discard backing_object. 1679 * 1680 * Since the backing object has no pages, no pager left, 1681 * and no object references within it, all that is 1682 * necessary is to dispose of it. 1683 */ 1684 KASSERT(backing_object->ref_count == 1, ("backing_object %p was somehow re-referenced during collapse!", backing_object)); 1685 VM_OBJECT_UNLOCK(backing_object); 1686 1687 mtx_lock(&vm_object_list_mtx); 1688 TAILQ_REMOVE( 1689 &vm_object_list, 1690 backing_object, 1691 object_list 1692 ); 1693 mtx_unlock(&vm_object_list_mtx); 1694 1695 uma_zfree(obj_zone, backing_object); 1696 1697 object_collapses++; 1698 } else { 1699 vm_object_t new_backing_object; 1700 1701 /* 1702 * If we do not entirely shadow the backing object, 1703 * there is nothing we can do so we give up. 1704 */ 1705 if (vm_object_backing_scan(object, OBSC_TEST_ALL_SHADOWED) == 0) { 1706 VM_OBJECT_UNLOCK(backing_object); 1707 break; 1708 } 1709 1710 /* 1711 * Make the parent shadow the next object in the 1712 * chain. Deallocating backing_object will not remove 1713 * it, since its reference count is at least 2. 1714 */ 1715 LIST_REMOVE(object, shadow_list); 1716 backing_object->shadow_count--; 1717 backing_object->generation++; 1718 1719 new_backing_object = backing_object->backing_object; 1720 if ((object->backing_object = new_backing_object) != NULL) { 1721 VM_OBJECT_LOCK(new_backing_object); 1722 LIST_INSERT_HEAD( 1723 &new_backing_object->shadow_head, 1724 object, 1725 shadow_list 1726 ); 1727 new_backing_object->shadow_count++; 1728 new_backing_object->generation++; 1729 vm_object_reference_locked(new_backing_object); 1730 VM_OBJECT_UNLOCK(new_backing_object); 1731 object->backing_object_offset += 1732 backing_object->backing_object_offset; 1733 } 1734 1735 /* 1736 * Drop the reference count on backing_object. Since 1737 * its ref_count was at least 2, it will not vanish. 1738 */ 1739 backing_object->ref_count--; 1740 VM_OBJECT_UNLOCK(backing_object); 1741 object_bypasses++; 1742 } 1743 1744 /* 1745 * Try again with this object's new backing object. 1746 */ 1747 } 1748 } 1749 1750 /* 1751 * vm_object_page_remove: 1752 * 1753 * Removes all physical pages in the given range from the 1754 * object's list of pages. If the range's end is zero, all 1755 * physical pages from the range's start to the end of the object 1756 * are deleted. 1757 * 1758 * The object must be locked. 1759 */ 1760 void 1761 vm_object_page_remove(vm_object_t object, vm_pindex_t start, vm_pindex_t end, 1762 boolean_t clean_only) 1763 { 1764 vm_page_t p, next; 1765 1766 VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); 1767 if (object->resident_page_count == 0) 1768 return; 1769 1770 /* 1771 * Since physically-backed objects do not use managed pages, we can't 1772 * remove pages from the object (we must instead remove the page 1773 * references, and then destroy the object). 1774 */ 1775 KASSERT(object->type != OBJT_PHYS, 1776 ("attempt to remove pages from a physical object")); 1777 1778 vm_object_pip_add(object, 1); 1779 again: 1780 vm_page_lock_queues(); 1781 if ((p = TAILQ_FIRST(&object->memq)) != NULL) { 1782 if (p->pindex < start) { 1783 p = vm_page_splay(start, object->root); 1784 if ((object->root = p)->pindex < start) 1785 p = TAILQ_NEXT(p, listq); 1786 } 1787 } 1788 /* 1789 * Assert: the variable p is either (1) the page with the 1790 * least pindex greater than or equal to the parameter pindex 1791 * or (2) NULL. 1792 */ 1793 for (; 1794 p != NULL && (p->pindex < end || end == 0); 1795 p = next) { 1796 next = TAILQ_NEXT(p, listq); 1797 1798 if (p->wire_count != 0) { 1799 pmap_remove_all(p); 1800 if (!clean_only) 1801 p->valid = 0; 1802 continue; 1803 } 1804 if (vm_page_sleep_if_busy(p, TRUE, "vmopar")) 1805 goto again; 1806 if (clean_only && p->valid) { 1807 pmap_page_protect(p, VM_PROT_READ | VM_PROT_EXECUTE); 1808 if (p->valid & p->dirty) 1809 continue; 1810 } 1811 vm_page_busy(p); 1812 pmap_remove_all(p); 1813 vm_page_free(p); 1814 } 1815 vm_page_unlock_queues(); 1816 vm_object_pip_wakeup(object); 1817 } 1818 1819 /* 1820 * Routine: vm_object_coalesce 1821 * Function: Coalesces two objects backing up adjoining 1822 * regions of memory into a single object. 1823 * 1824 * returns TRUE if objects were combined. 1825 * 1826 * NOTE: Only works at the moment if the second object is NULL - 1827 * if it's not, which object do we lock first? 1828 * 1829 * Parameters: 1830 * prev_object First object to coalesce 1831 * prev_offset Offset into prev_object 1832 * next_object Second object into coalesce 1833 * next_offset Offset into next_object 1834 * 1835 * prev_size Size of reference to prev_object 1836 * next_size Size of reference to next_object 1837 * 1838 * Conditions: 1839 * The object must *not* be locked. 1840 */ 1841 boolean_t 1842 vm_object_coalesce(vm_object_t prev_object, vm_pindex_t prev_pindex, 1843 vm_size_t prev_size, vm_size_t next_size) 1844 { 1845 vm_pindex_t next_pindex; 1846 1847 if (prev_object == NULL) 1848 return (TRUE); 1849 VM_OBJECT_LOCK(prev_object); 1850 if (prev_object->type != OBJT_DEFAULT && 1851 prev_object->type != OBJT_SWAP) { 1852 VM_OBJECT_UNLOCK(prev_object); 1853 return (FALSE); 1854 } 1855 1856 /* 1857 * Try to collapse the object first 1858 */ 1859 vm_object_collapse(prev_object); 1860 1861 /* 1862 * Can't coalesce if: . more than one reference . paged out . shadows 1863 * another object . has a copy elsewhere (any of which mean that the 1864 * pages not mapped to prev_entry may be in use anyway) 1865 */ 1866 if (prev_object->backing_object != NULL) { 1867 VM_OBJECT_UNLOCK(prev_object); 1868 return (FALSE); 1869 } 1870 1871 prev_size >>= PAGE_SHIFT; 1872 next_size >>= PAGE_SHIFT; 1873 next_pindex = prev_pindex + prev_size; 1874 1875 if ((prev_object->ref_count > 1) && 1876 (prev_object->size != next_pindex)) { 1877 VM_OBJECT_UNLOCK(prev_object); 1878 return (FALSE); 1879 } 1880 1881 /* 1882 * Remove any pages that may still be in the object from a previous 1883 * deallocation. 1884 */ 1885 if (next_pindex < prev_object->size) { 1886 vm_object_page_remove(prev_object, 1887 next_pindex, 1888 next_pindex + next_size, FALSE); 1889 if (prev_object->type == OBJT_SWAP) 1890 swap_pager_freespace(prev_object, 1891 next_pindex, next_size); 1892 } 1893 1894 /* 1895 * Extend the object if necessary. 1896 */ 1897 if (next_pindex + next_size > prev_object->size) 1898 prev_object->size = next_pindex + next_size; 1899 1900 VM_OBJECT_UNLOCK(prev_object); 1901 return (TRUE); 1902 } 1903 1904 void 1905 vm_object_set_writeable_dirty(vm_object_t object) 1906 { 1907 struct vnode *vp; 1908 1909 VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); 1910 vm_object_set_flag(object, OBJ_WRITEABLE|OBJ_MIGHTBEDIRTY); 1911 if (object->type == OBJT_VNODE && 1912 (vp = (struct vnode *)object->handle) != NULL) { 1913 VI_LOCK(vp); 1914 if ((vp->v_iflag & VI_OBJDIRTY) == 0) 1915 vp->v_iflag |= VI_OBJDIRTY; 1916 VI_UNLOCK(vp); 1917 } 1918 } 1919 1920 #include "opt_ddb.h" 1921 #ifdef DDB 1922 #include <sys/kernel.h> 1923 1924 #include <sys/cons.h> 1925 1926 #include <ddb/ddb.h> 1927 1928 static int 1929 _vm_object_in_map(vm_map_t map, vm_object_t object, vm_map_entry_t entry) 1930 { 1931 vm_map_t tmpm; 1932 vm_map_entry_t tmpe; 1933 vm_object_t obj; 1934 int entcount; 1935 1936 if (map == 0) 1937 return 0; 1938 1939 if (entry == 0) { 1940 tmpe = map->header.next; 1941 entcount = map->nentries; 1942 while (entcount-- && (tmpe != &map->header)) { 1943 if (_vm_object_in_map(map, object, tmpe)) { 1944 return 1; 1945 } 1946 tmpe = tmpe->next; 1947 } 1948 } else if (entry->eflags & MAP_ENTRY_IS_SUB_MAP) { 1949 tmpm = entry->object.sub_map; 1950 tmpe = tmpm->header.next; 1951 entcount = tmpm->nentries; 1952 while (entcount-- && tmpe != &tmpm->header) { 1953 if (_vm_object_in_map(tmpm, object, tmpe)) { 1954 return 1; 1955 } 1956 tmpe = tmpe->next; 1957 } 1958 } else if ((obj = entry->object.vm_object) != NULL) { 1959 for (; obj; obj = obj->backing_object) 1960 if (obj == object) { 1961 return 1; 1962 } 1963 } 1964 return 0; 1965 } 1966 1967 static int 1968 vm_object_in_map(vm_object_t object) 1969 { 1970 struct proc *p; 1971 1972 /* sx_slock(&allproc_lock); */ 1973 LIST_FOREACH(p, &allproc, p_list) { 1974 if (!p->p_vmspace /* || (p->p_flag & (P_SYSTEM|P_WEXIT)) */) 1975 continue; 1976 if (_vm_object_in_map(&p->p_vmspace->vm_map, object, 0)) { 1977 /* sx_sunlock(&allproc_lock); */ 1978 return 1; 1979 } 1980 } 1981 /* sx_sunlock(&allproc_lock); */ 1982 if (_vm_object_in_map(kernel_map, object, 0)) 1983 return 1; 1984 if (_vm_object_in_map(kmem_map, object, 0)) 1985 return 1; 1986 if (_vm_object_in_map(pager_map, object, 0)) 1987 return 1; 1988 if (_vm_object_in_map(buffer_map, object, 0)) 1989 return 1; 1990 return 0; 1991 } 1992 1993 DB_SHOW_COMMAND(vmochk, vm_object_check) 1994 { 1995 vm_object_t object; 1996 1997 /* 1998 * make sure that internal objs are in a map somewhere 1999 * and none have zero ref counts. 2000 */ 2001 TAILQ_FOREACH(object, &vm_object_list, object_list) { 2002 if (object->handle == NULL && 2003 (object->type == OBJT_DEFAULT || object->type == OBJT_SWAP)) { 2004 if (object->ref_count == 0) { 2005 db_printf("vmochk: internal obj has zero ref count: %ld\n", 2006 (long)object->size); 2007 } 2008 if (!vm_object_in_map(object)) { 2009 db_printf( 2010 "vmochk: internal obj is not in a map: " 2011 "ref: %d, size: %lu: 0x%lx, backing_object: %p\n", 2012 object->ref_count, (u_long)object->size, 2013 (u_long)object->size, 2014 (void *)object->backing_object); 2015 } 2016 } 2017 } 2018 } 2019 2020 /* 2021 * vm_object_print: [ debug ] 2022 */ 2023 DB_SHOW_COMMAND(object, vm_object_print_static) 2024 { 2025 /* XXX convert args. */ 2026 vm_object_t object = (vm_object_t)addr; 2027 boolean_t full = have_addr; 2028 2029 vm_page_t p; 2030 2031 /* XXX count is an (unused) arg. Avoid shadowing it. */ 2032 #define count was_count 2033 2034 int count; 2035 2036 if (object == NULL) 2037 return; 2038 2039 db_iprintf( 2040 "Object %p: type=%d, size=0x%jx, res=%d, ref=%d, flags=0x%x\n", 2041 object, (int)object->type, (uintmax_t)object->size, 2042 object->resident_page_count, object->ref_count, object->flags); 2043 db_iprintf(" sref=%d, backing_object(%d)=(%p)+0x%jx\n", 2044 object->shadow_count, 2045 object->backing_object ? object->backing_object->ref_count : 0, 2046 object->backing_object, (uintmax_t)object->backing_object_offset); 2047 2048 if (!full) 2049 return; 2050 2051 db_indent += 2; 2052 count = 0; 2053 TAILQ_FOREACH(p, &object->memq, listq) { 2054 if (count == 0) 2055 db_iprintf("memory:="); 2056 else if (count == 6) { 2057 db_printf("\n"); 2058 db_iprintf(" ..."); 2059 count = 0; 2060 } else 2061 db_printf(","); 2062 count++; 2063 2064 db_printf("(off=0x%jx,page=0x%jx)", 2065 (uintmax_t)p->pindex, (uintmax_t)VM_PAGE_TO_PHYS(p)); 2066 } 2067 if (count != 0) 2068 db_printf("\n"); 2069 db_indent -= 2; 2070 } 2071 2072 /* XXX. */ 2073 #undef count 2074 2075 /* XXX need this non-static entry for calling from vm_map_print. */ 2076 void 2077 vm_object_print( 2078 /* db_expr_t */ long addr, 2079 boolean_t have_addr, 2080 /* db_expr_t */ long count, 2081 char *modif) 2082 { 2083 vm_object_print_static(addr, have_addr, count, modif); 2084 } 2085 2086 DB_SHOW_COMMAND(vmopag, vm_object_print_pages) 2087 { 2088 vm_object_t object; 2089 int nl = 0; 2090 int c; 2091 2092 TAILQ_FOREACH(object, &vm_object_list, object_list) { 2093 vm_pindex_t idx, fidx; 2094 vm_pindex_t osize; 2095 vm_paddr_t pa = -1, padiff; 2096 int rcount; 2097 vm_page_t m; 2098 2099 db_printf("new object: %p\n", (void *)object); 2100 if (nl > 18) { 2101 c = cngetc(); 2102 if (c != ' ') 2103 return; 2104 nl = 0; 2105 } 2106 nl++; 2107 rcount = 0; 2108 fidx = 0; 2109 osize = object->size; 2110 if (osize > 128) 2111 osize = 128; 2112 for (idx = 0; idx < osize; idx++) { 2113 m = vm_page_lookup(object, idx); 2114 if (m == NULL) { 2115 if (rcount) { 2116 db_printf(" index(%ld)run(%d)pa(0x%lx)\n", 2117 (long)fidx, rcount, (long)pa); 2118 if (nl > 18) { 2119 c = cngetc(); 2120 if (c != ' ') 2121 return; 2122 nl = 0; 2123 } 2124 nl++; 2125 rcount = 0; 2126 } 2127 continue; 2128 } 2129 2130 2131 if (rcount && 2132 (VM_PAGE_TO_PHYS(m) == pa + rcount * PAGE_SIZE)) { 2133 ++rcount; 2134 continue; 2135 } 2136 if (rcount) { 2137 padiff = pa + rcount * PAGE_SIZE - VM_PAGE_TO_PHYS(m); 2138 padiff >>= PAGE_SHIFT; 2139 padiff &= PQ_L2_MASK; 2140 if (padiff == 0) { 2141 pa = VM_PAGE_TO_PHYS(m) - rcount * PAGE_SIZE; 2142 ++rcount; 2143 continue; 2144 } 2145 db_printf(" index(%ld)run(%d)pa(0x%lx)", 2146 (long)fidx, rcount, (long)pa); 2147 db_printf("pd(%ld)\n", (long)padiff); 2148 if (nl > 18) { 2149 c = cngetc(); 2150 if (c != ' ') 2151 return; 2152 nl = 0; 2153 } 2154 nl++; 2155 } 2156 fidx = idx; 2157 pa = VM_PAGE_TO_PHYS(m); 2158 rcount = 1; 2159 } 2160 if (rcount) { 2161 db_printf(" index(%ld)run(%d)pa(0x%lx)\n", 2162 (long)fidx, rcount, (long)pa); 2163 if (nl > 18) { 2164 c = cngetc(); 2165 if (c != ' ') 2166 return; 2167 nl = 0; 2168 } 2169 nl++; 2170 } 2171 } 2172 } 2173 #endif /* DDB */ 2174