1 /* 2 * Copyright (c) 1991, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * This code is derived from software contributed to Berkeley by 6 * The Mach Operating System project at Carnegie-Mellon University. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. All advertising materials mentioning features or use of this software 17 * must display the following acknowledgement: 18 * This product includes software developed by the University of 19 * California, Berkeley and its contributors. 20 * 4. Neither the name of the University nor the names of its contributors 21 * may be used to endorse or promote products derived from this software 22 * without specific prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34 * SUCH DAMAGE. 35 * 36 * from: @(#)vm_object.c 8.5 (Berkeley) 3/22/94 37 * 38 * 39 * Copyright (c) 1987, 1990 Carnegie-Mellon University. 40 * All rights reserved. 41 * 42 * Authors: Avadis Tevanian, Jr., Michael Wayne Young 43 * 44 * Permission to use, copy, modify and distribute this software and 45 * its documentation is hereby granted, provided that both the copyright 46 * notice and this permission notice appear in all copies of the 47 * software, derivative works or modified versions, and any portions 48 * thereof, and that both notices appear in supporting documentation. 49 * 50 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 51 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 52 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 53 * 54 * Carnegie Mellon requests users of this software to return to 55 * 56 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 57 * School of Computer Science 58 * Carnegie Mellon University 59 * Pittsburgh PA 15213-3890 60 * 61 * any improvements or extensions that they make and grant Carnegie the 62 * rights to redistribute these changes. 63 * 64 * $FreeBSD$ 65 */ 66 67 /* 68 * Virtual memory object module. 69 */ 70 71 #include <sys/param.h> 72 #include <sys/systm.h> 73 #include <sys/lock.h> 74 #include <sys/mman.h> 75 #include <sys/mount.h> 76 #include <sys/mutex.h> 77 #include <sys/proc.h> /* for curproc, pageproc */ 78 #include <sys/socket.h> 79 #include <sys/vnode.h> 80 #include <sys/vmmeter.h> 81 #include <sys/sx.h> 82 83 #include <vm/vm.h> 84 #include <vm/vm_param.h> 85 #include <vm/pmap.h> 86 #include <vm/vm_map.h> 87 #include <vm/vm_object.h> 88 #include <vm/vm_page.h> 89 #include <vm/vm_pageout.h> 90 #include <vm/vm_pager.h> 91 #include <vm/vm_zone.h> 92 #include <vm/swap_pager.h> 93 #include <vm/vm_kern.h> 94 #include <vm/vm_extern.h> 95 96 static void vm_object_qcollapse __P((vm_object_t object)); 97 98 /* 99 * Virtual memory objects maintain the actual data 100 * associated with allocated virtual memory. A given 101 * page of memory exists within exactly one object. 102 * 103 * An object is only deallocated when all "references" 104 * are given up. Only one "reference" to a given 105 * region of an object should be writeable. 106 * 107 * Associated with each object is a list of all resident 108 * memory pages belonging to that object; this list is 109 * maintained by the "vm_page" module, and locked by the object's 110 * lock. 111 * 112 * Each object also records a "pager" routine which is 113 * used to retrieve (and store) pages to the proper backing 114 * storage. In addition, objects may be backed by other 115 * objects from which they were virtual-copied. 116 * 117 * The only items within the object structure which are 118 * modified after time of creation are: 119 * reference count locked by object's lock 120 * pager routine locked by object's lock 121 * 122 */ 123 124 struct object_q vm_object_list; 125 static struct mtx vm_object_list_mtx; /* lock for object list and count */ 126 static long vm_object_count; /* count of all objects */ 127 vm_object_t kernel_object; 128 vm_object_t kmem_object; 129 static struct vm_object kernel_object_store; 130 static struct vm_object kmem_object_store; 131 extern int vm_pageout_page_count; 132 133 static long object_collapses; 134 static long object_bypasses; 135 static int next_index; 136 static vm_zone_t obj_zone; 137 static struct vm_zone obj_zone_store; 138 static int object_hash_rand; 139 #define VM_OBJECTS_INIT 256 140 static struct vm_object vm_objects_init[VM_OBJECTS_INIT]; 141 142 void 143 _vm_object_allocate(objtype_t type, vm_size_t size, vm_object_t object) 144 { 145 int incr; 146 147 GIANT_REQUIRED; 148 149 TAILQ_INIT(&object->memq); 150 TAILQ_INIT(&object->shadow_head); 151 152 object->type = type; 153 object->size = size; 154 object->ref_count = 1; 155 object->flags = 0; 156 if ((object->type == OBJT_DEFAULT) || (object->type == OBJT_SWAP)) 157 vm_object_set_flag(object, OBJ_ONEMAPPING); 158 object->paging_in_progress = 0; 159 object->resident_page_count = 0; 160 object->shadow_count = 0; 161 object->pg_color = next_index; 162 if ( size > (PQ_L2_SIZE / 3 + PQ_PRIME1)) 163 incr = PQ_L2_SIZE / 3 + PQ_PRIME1; 164 else 165 incr = size; 166 next_index = (next_index + incr) & PQ_L2_MASK; 167 object->handle = NULL; 168 object->backing_object = NULL; 169 object->backing_object_offset = (vm_ooffset_t) 0; 170 /* 171 * Try to generate a number that will spread objects out in the 172 * hash table. We 'wipe' new objects across the hash in 128 page 173 * increments plus 1 more to offset it a little more by the time 174 * it wraps around. 175 */ 176 object->hash_rand = object_hash_rand - 129; 177 178 object->generation++; 179 180 TAILQ_INSERT_TAIL(&vm_object_list, object, object_list); 181 vm_object_count++; 182 object_hash_rand = object->hash_rand; 183 } 184 185 /* 186 * vm_object_init: 187 * 188 * Initialize the VM objects module. 189 */ 190 void 191 vm_object_init(void) 192 { 193 GIANT_REQUIRED; 194 195 TAILQ_INIT(&vm_object_list); 196 mtx_init(&vm_object_list_mtx, "vm object_list", MTX_DEF); 197 vm_object_count = 0; 198 199 kernel_object = &kernel_object_store; 200 _vm_object_allocate(OBJT_DEFAULT, OFF_TO_IDX(VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS), 201 kernel_object); 202 203 kmem_object = &kmem_object_store; 204 _vm_object_allocate(OBJT_DEFAULT, OFF_TO_IDX(VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS), 205 kmem_object); 206 207 obj_zone = &obj_zone_store; 208 zbootinit(obj_zone, "VM OBJECT", sizeof (struct vm_object), 209 vm_objects_init, VM_OBJECTS_INIT); 210 } 211 212 void 213 vm_object_init2(void) 214 { 215 zinitna(obj_zone, NULL, NULL, 0, 0, 0, 1); 216 } 217 218 void 219 vm_object_set_flag(vm_object_t object, u_short bits) 220 { 221 GIANT_REQUIRED; 222 object->flags |= bits; 223 } 224 225 void 226 vm_object_clear_flag(vm_object_t object, u_short bits) 227 { 228 GIANT_REQUIRED; 229 object->flags &= ~bits; 230 } 231 232 void 233 vm_object_pip_add(vm_object_t object, short i) 234 { 235 GIANT_REQUIRED; 236 object->paging_in_progress += i; 237 } 238 239 void 240 vm_object_pip_subtract(vm_object_t object, short i) 241 { 242 GIANT_REQUIRED; 243 object->paging_in_progress -= i; 244 } 245 246 void 247 vm_object_pip_wakeup(vm_object_t object) 248 { 249 GIANT_REQUIRED; 250 object->paging_in_progress--; 251 if ((object->flags & OBJ_PIPWNT) && object->paging_in_progress == 0) { 252 vm_object_clear_flag(object, OBJ_PIPWNT); 253 wakeup(object); 254 } 255 } 256 257 void 258 vm_object_pip_wakeupn(vm_object_t object, short i) 259 { 260 GIANT_REQUIRED; 261 if (i) 262 object->paging_in_progress -= i; 263 if ((object->flags & OBJ_PIPWNT) && object->paging_in_progress == 0) { 264 vm_object_clear_flag(object, OBJ_PIPWNT); 265 wakeup(object); 266 } 267 } 268 269 void 270 vm_object_pip_sleep(vm_object_t object, char *waitid) 271 { 272 GIANT_REQUIRED; 273 if (object->paging_in_progress) { 274 int s = splvm(); 275 if (object->paging_in_progress) { 276 vm_object_set_flag(object, OBJ_PIPWNT); 277 tsleep(object, PVM, waitid, 0); 278 } 279 splx(s); 280 } 281 } 282 283 void 284 vm_object_pip_wait(vm_object_t object, char *waitid) 285 { 286 GIANT_REQUIRED; 287 while (object->paging_in_progress) 288 vm_object_pip_sleep(object, waitid); 289 } 290 291 /* 292 * vm_object_allocate: 293 * 294 * Returns a new object with the given size. 295 */ 296 297 vm_object_t 298 vm_object_allocate(objtype_t type, vm_size_t size) 299 { 300 vm_object_t result; 301 302 GIANT_REQUIRED; 303 304 result = (vm_object_t) zalloc(obj_zone); 305 _vm_object_allocate(type, size, result); 306 307 return (result); 308 } 309 310 311 /* 312 * vm_object_reference: 313 * 314 * Gets another reference to the given object. 315 */ 316 void 317 vm_object_reference(vm_object_t object) 318 { 319 GIANT_REQUIRED; 320 321 if (object == NULL) 322 return; 323 324 KASSERT(!(object->flags & OBJ_DEAD), 325 ("vm_object_reference: attempting to reference dead obj")); 326 327 object->ref_count++; 328 if (object->type == OBJT_VNODE) { 329 while (vget((struct vnode *) object->handle, LK_RETRY|LK_NOOBJ, curthread)) { 330 printf("vm_object_reference: delay in getting object\n"); 331 } 332 } 333 } 334 335 /* 336 * handle deallocating a object of type OBJT_VNODE 337 */ 338 void 339 vm_object_vndeallocate(vm_object_t object) 340 { 341 struct vnode *vp = (struct vnode *) object->handle; 342 343 GIANT_REQUIRED; 344 KASSERT(object->type == OBJT_VNODE, 345 ("vm_object_vndeallocate: not a vnode object")); 346 KASSERT(vp != NULL, ("vm_object_vndeallocate: missing vp")); 347 #ifdef INVARIANTS 348 if (object->ref_count == 0) { 349 vprint("vm_object_vndeallocate", vp); 350 panic("vm_object_vndeallocate: bad object reference count"); 351 } 352 #endif 353 354 object->ref_count--; 355 if (object->ref_count == 0) { 356 vp->v_flag &= ~VTEXT; 357 vm_object_clear_flag(object, OBJ_OPT); 358 } 359 /* 360 * vrele may need a vop lock 361 */ 362 vrele(vp); 363 } 364 365 /* 366 * vm_object_deallocate: 367 * 368 * Release a reference to the specified object, 369 * gained either through a vm_object_allocate 370 * or a vm_object_reference call. When all references 371 * are gone, storage associated with this object 372 * may be relinquished. 373 * 374 * No object may be locked. 375 */ 376 void 377 vm_object_deallocate(vm_object_t object) 378 { 379 vm_object_t temp; 380 381 GIANT_REQUIRED; 382 383 while (object != NULL) { 384 385 if (object->type == OBJT_VNODE) { 386 vm_object_vndeallocate(object); 387 return; 388 } 389 390 KASSERT(object->ref_count != 0, 391 ("vm_object_deallocate: object deallocated too many times: %d", object->type)); 392 393 /* 394 * If the reference count goes to 0 we start calling 395 * vm_object_terminate() on the object chain. 396 * A ref count of 1 may be a special case depending on the 397 * shadow count being 0 or 1. 398 */ 399 object->ref_count--; 400 if (object->ref_count > 1) { 401 return; 402 } else if (object->ref_count == 1) { 403 if (object->shadow_count == 0) { 404 vm_object_set_flag(object, OBJ_ONEMAPPING); 405 } else if ((object->shadow_count == 1) && 406 (object->handle == NULL) && 407 (object->type == OBJT_DEFAULT || 408 object->type == OBJT_SWAP)) { 409 vm_object_t robject; 410 411 robject = TAILQ_FIRST(&object->shadow_head); 412 KASSERT(robject != NULL, 413 ("vm_object_deallocate: ref_count: %d, shadow_count: %d", 414 object->ref_count, 415 object->shadow_count)); 416 if ((robject->handle == NULL) && 417 (robject->type == OBJT_DEFAULT || 418 robject->type == OBJT_SWAP)) { 419 420 robject->ref_count++; 421 422 while ( 423 robject->paging_in_progress || 424 object->paging_in_progress 425 ) { 426 vm_object_pip_sleep(robject, "objde1"); 427 vm_object_pip_sleep(object, "objde2"); 428 } 429 430 if (robject->ref_count == 1) { 431 robject->ref_count--; 432 object = robject; 433 goto doterm; 434 } 435 436 object = robject; 437 vm_object_collapse(object); 438 continue; 439 } 440 } 441 442 return; 443 444 } 445 446 doterm: 447 448 temp = object->backing_object; 449 if (temp) { 450 TAILQ_REMOVE(&temp->shadow_head, object, shadow_list); 451 temp->shadow_count--; 452 if (temp->ref_count == 0) 453 vm_object_clear_flag(temp, OBJ_OPT); 454 temp->generation++; 455 object->backing_object = NULL; 456 } 457 vm_object_terminate(object); 458 /* unlocks and deallocates object */ 459 object = temp; 460 } 461 } 462 463 /* 464 * vm_object_terminate actually destroys the specified object, freeing 465 * up all previously used resources. 466 * 467 * The object must be locked. 468 * This routine may block. 469 */ 470 void 471 vm_object_terminate(vm_object_t object) 472 { 473 vm_page_t p; 474 int s; 475 476 GIANT_REQUIRED; 477 478 /* 479 * Make sure no one uses us. 480 */ 481 vm_object_set_flag(object, OBJ_DEAD); 482 483 /* 484 * wait for the pageout daemon to be done with the object 485 */ 486 vm_object_pip_wait(object, "objtrm"); 487 488 KASSERT(!object->paging_in_progress, 489 ("vm_object_terminate: pageout in progress")); 490 491 /* 492 * Clean and free the pages, as appropriate. All references to the 493 * object are gone, so we don't need to lock it. 494 */ 495 if (object->type == OBJT_VNODE) { 496 struct vnode *vp; 497 498 /* 499 * Freeze optimized copies. 500 */ 501 vm_freeze_copyopts(object, 0, object->size); 502 503 /* 504 * Clean pages and flush buffers. 505 */ 506 vm_object_page_clean(object, 0, 0, OBJPC_SYNC); 507 508 vp = (struct vnode *) object->handle; 509 vinvalbuf(vp, V_SAVE, NOCRED, NULL, 0, 0); 510 } 511 512 KASSERT(object->ref_count == 0, 513 ("vm_object_terminate: object with references, ref_count=%d", 514 object->ref_count)); 515 516 /* 517 * Now free any remaining pages. For internal objects, this also 518 * removes them from paging queues. Don't free wired pages, just 519 * remove them from the object. 520 */ 521 s = splvm(); 522 while ((p = TAILQ_FIRST(&object->memq)) != NULL) { 523 KASSERT(!p->busy && (p->flags & PG_BUSY) == 0, 524 ("vm_object_terminate: freeing busy page %p " 525 "p->busy = %d, p->flags %x\n", p, p->busy, p->flags)); 526 if (p->wire_count == 0) { 527 vm_page_busy(p); 528 vm_page_free(p); 529 cnt.v_pfree++; 530 } else { 531 vm_page_busy(p); 532 vm_page_remove(p); 533 } 534 } 535 splx(s); 536 537 /* 538 * Let the pager know object is dead. 539 */ 540 vm_pager_deallocate(object); 541 542 /* 543 * Remove the object from the global object list. 544 */ 545 mtx_lock(&vm_object_list_mtx); 546 TAILQ_REMOVE(&vm_object_list, object, object_list); 547 mtx_unlock(&vm_object_list_mtx); 548 549 wakeup(object); 550 551 /* 552 * Free the space for the object. 553 */ 554 zfree(obj_zone, object); 555 } 556 557 /* 558 * vm_object_page_clean 559 * 560 * Clean all dirty pages in the specified range of object. Leaves page 561 * on whatever queue it is currently on. If NOSYNC is set then do not 562 * write out pages with PG_NOSYNC set (originally comes from MAP_NOSYNC), 563 * leaving the object dirty. 564 * 565 * Odd semantics: if start == end, we clean everything. 566 * 567 * The object must be locked. 568 */ 569 570 void 571 vm_object_page_clean(vm_object_t object, vm_pindex_t start, vm_pindex_t end, int flags) 572 { 573 vm_page_t p, np, tp; 574 vm_offset_t tstart, tend; 575 vm_pindex_t pi; 576 int s; 577 struct vnode *vp; 578 int runlen; 579 int maxf; 580 int chkb; 581 int maxb; 582 int i; 583 int clearobjflags; 584 int pagerflags; 585 vm_page_t maf[vm_pageout_page_count]; 586 vm_page_t mab[vm_pageout_page_count]; 587 vm_page_t ma[vm_pageout_page_count]; 588 int curgeneration; 589 590 GIANT_REQUIRED; 591 592 if (object->type != OBJT_VNODE || 593 (object->flags & OBJ_MIGHTBEDIRTY) == 0) 594 return; 595 596 pagerflags = (flags & (OBJPC_SYNC | OBJPC_INVAL)) ? VM_PAGER_PUT_SYNC : 0; 597 pagerflags |= (flags & OBJPC_INVAL) ? VM_PAGER_PUT_INVAL : 0; 598 599 vp = object->handle; 600 601 vm_object_set_flag(object, OBJ_CLEANING); 602 603 tstart = start; 604 if (end == 0) { 605 tend = object->size; 606 } else { 607 tend = end; 608 } 609 610 /* 611 * Generally set CLEANCHK interlock and make the page read-only so 612 * we can then clear the object flags. 613 * 614 * However, if this is a nosync mmap then the object is likely to 615 * stay dirty so do not mess with the page and do not clear the 616 * object flags. 617 */ 618 619 clearobjflags = 1; 620 621 TAILQ_FOREACH(p, &object->memq, listq) { 622 vm_page_flag_set(p, PG_CLEANCHK); 623 if ((flags & OBJPC_NOSYNC) && (p->flags & PG_NOSYNC)) 624 clearobjflags = 0; 625 else 626 vm_page_protect(p, VM_PROT_READ); 627 } 628 629 if (clearobjflags && (tstart == 0) && (tend == object->size)) { 630 vm_object_clear_flag(object, OBJ_WRITEABLE|OBJ_MIGHTBEDIRTY); 631 } 632 633 rescan: 634 curgeneration = object->generation; 635 636 for (p = TAILQ_FIRST(&object->memq); p; p = np) { 637 np = TAILQ_NEXT(p, listq); 638 639 pi = p->pindex; 640 if (((p->flags & PG_CLEANCHK) == 0) || 641 (pi < tstart) || (pi >= tend) || 642 (p->valid == 0) || 643 ((p->queue - p->pc) == PQ_CACHE)) { 644 vm_page_flag_clear(p, PG_CLEANCHK); 645 continue; 646 } 647 648 vm_page_test_dirty(p); 649 if ((p->dirty & p->valid) == 0) { 650 vm_page_flag_clear(p, PG_CLEANCHK); 651 continue; 652 } 653 654 /* 655 * If we have been asked to skip nosync pages and this is a 656 * nosync page, skip it. Note that the object flags were 657 * not cleared in this case so we do not have to set them. 658 */ 659 if ((flags & OBJPC_NOSYNC) && (p->flags & PG_NOSYNC)) { 660 vm_page_flag_clear(p, PG_CLEANCHK); 661 continue; 662 } 663 664 s = splvm(); 665 while (vm_page_sleep_busy(p, TRUE, "vpcwai")) { 666 if (object->generation != curgeneration) { 667 splx(s); 668 goto rescan; 669 } 670 } 671 672 maxf = 0; 673 for (i = 1; i < vm_pageout_page_count; i++) { 674 if ((tp = vm_page_lookup(object, pi + i)) != NULL) { 675 if ((tp->flags & PG_BUSY) || 676 (tp->flags & PG_CLEANCHK) == 0 || 677 (tp->busy != 0)) 678 break; 679 if((tp->queue - tp->pc) == PQ_CACHE) { 680 vm_page_flag_clear(tp, PG_CLEANCHK); 681 break; 682 } 683 vm_page_test_dirty(tp); 684 if ((tp->dirty & tp->valid) == 0) { 685 vm_page_flag_clear(tp, PG_CLEANCHK); 686 break; 687 } 688 maf[ i - 1 ] = tp; 689 maxf++; 690 continue; 691 } 692 break; 693 } 694 695 maxb = 0; 696 chkb = vm_pageout_page_count - maxf; 697 if (chkb) { 698 for (i = 1; i < chkb; i++) { 699 if ((tp = vm_page_lookup(object, pi - i)) != NULL) { 700 if ((tp->flags & PG_BUSY) || 701 (tp->flags & PG_CLEANCHK) == 0 || 702 (tp->busy != 0)) 703 break; 704 if((tp->queue - tp->pc) == PQ_CACHE) { 705 vm_page_flag_clear(tp, PG_CLEANCHK); 706 break; 707 } 708 vm_page_test_dirty(tp); 709 if ((tp->dirty & tp->valid) == 0) { 710 vm_page_flag_clear(tp, PG_CLEANCHK); 711 break; 712 } 713 mab[ i - 1 ] = tp; 714 maxb++; 715 continue; 716 } 717 break; 718 } 719 } 720 721 for (i = 0; i < maxb; i++) { 722 int index = (maxb - i) - 1; 723 ma[index] = mab[i]; 724 vm_page_flag_clear(ma[index], PG_CLEANCHK); 725 } 726 vm_page_flag_clear(p, PG_CLEANCHK); 727 ma[maxb] = p; 728 for (i = 0 ; i < maxf; i++) { 729 int index = (maxb + i) + 1; 730 ma[index] = maf[i]; 731 vm_page_flag_clear(ma[index], PG_CLEANCHK); 732 } 733 runlen = maxb + maxf + 1; 734 735 splx(s); 736 vm_pageout_flush(ma, runlen, pagerflags); 737 for (i = 0; i < runlen; i++) { 738 if (ma[i]->valid & ma[i]->dirty) { 739 vm_page_protect(ma[i], VM_PROT_READ); 740 vm_page_flag_set(ma[i], PG_CLEANCHK); 741 } 742 } 743 if (object->generation != curgeneration) 744 goto rescan; 745 } 746 747 #if 0 748 VOP_FSYNC(vp, NULL, (pagerflags & VM_PAGER_PUT_SYNC)?MNT_WAIT:0, curproc); 749 #endif 750 751 vm_object_clear_flag(object, OBJ_CLEANING); 752 return; 753 } 754 755 /* 756 * Same as vm_object_pmap_copy, except range checking really 757 * works, and is meant for small sections of an object. 758 * 759 * This code protects resident pages by making them read-only 760 * and is typically called on a fork or split when a page 761 * is converted to copy-on-write. 762 * 763 * NOTE: If the page is already at VM_PROT_NONE, calling 764 * vm_page_protect will have no effect. 765 */ 766 767 void 768 vm_object_pmap_copy_1(vm_object_t object, vm_pindex_t start, vm_pindex_t end) 769 { 770 vm_pindex_t idx; 771 vm_page_t p; 772 773 GIANT_REQUIRED; 774 775 if (object == NULL || (object->flags & OBJ_WRITEABLE) == 0) 776 return; 777 778 for (idx = start; idx < end; idx++) { 779 p = vm_page_lookup(object, idx); 780 if (p == NULL) 781 continue; 782 vm_page_protect(p, VM_PROT_READ); 783 } 784 } 785 786 /* 787 * vm_object_pmap_remove: 788 * 789 * Removes all physical pages in the specified 790 * object range from all physical maps. 791 * 792 * The object must *not* be locked. 793 */ 794 void 795 vm_object_pmap_remove(vm_object_t object, vm_pindex_t start, vm_pindex_t end) 796 { 797 vm_page_t p; 798 799 GIANT_REQUIRED; 800 if (object == NULL) 801 return; 802 TAILQ_FOREACH(p, &object->memq, listq) { 803 if (p->pindex >= start && p->pindex < end) 804 vm_page_protect(p, VM_PROT_NONE); 805 } 806 if ((start == 0) && (object->size == end)) 807 vm_object_clear_flag(object, OBJ_WRITEABLE); 808 } 809 810 /* 811 * vm_object_madvise: 812 * 813 * Implements the madvise function at the object/page level. 814 * 815 * MADV_WILLNEED (any object) 816 * 817 * Activate the specified pages if they are resident. 818 * 819 * MADV_DONTNEED (any object) 820 * 821 * Deactivate the specified pages if they are resident. 822 * 823 * MADV_FREE (OBJT_DEFAULT/OBJT_SWAP objects, 824 * OBJ_ONEMAPPING only) 825 * 826 * Deactivate and clean the specified pages if they are 827 * resident. This permits the process to reuse the pages 828 * without faulting or the kernel to reclaim the pages 829 * without I/O. 830 */ 831 void 832 vm_object_madvise(vm_object_t object, vm_pindex_t pindex, int count, int advise) 833 { 834 vm_pindex_t end, tpindex; 835 vm_object_t tobject; 836 vm_page_t m; 837 838 GIANT_REQUIRED; 839 if (object == NULL) 840 return; 841 842 end = pindex + count; 843 844 /* 845 * Locate and adjust resident pages 846 */ 847 848 for (; pindex < end; pindex += 1) { 849 relookup: 850 tobject = object; 851 tpindex = pindex; 852 shadowlookup: 853 /* 854 * MADV_FREE only operates on OBJT_DEFAULT or OBJT_SWAP pages 855 * and those pages must be OBJ_ONEMAPPING. 856 */ 857 if (advise == MADV_FREE) { 858 if ((tobject->type != OBJT_DEFAULT && 859 tobject->type != OBJT_SWAP) || 860 (tobject->flags & OBJ_ONEMAPPING) == 0) { 861 continue; 862 } 863 } 864 865 m = vm_page_lookup(tobject, tpindex); 866 867 if (m == NULL) { 868 /* 869 * There may be swap even if there is no backing page 870 */ 871 if (advise == MADV_FREE && tobject->type == OBJT_SWAP) 872 swap_pager_freespace(tobject, tpindex, 1); 873 874 /* 875 * next object 876 */ 877 tobject = tobject->backing_object; 878 if (tobject == NULL) 879 continue; 880 tpindex += OFF_TO_IDX(tobject->backing_object_offset); 881 goto shadowlookup; 882 } 883 884 /* 885 * If the page is busy or not in a normal active state, 886 * we skip it. If the page is not managed there are no 887 * page queues to mess with. Things can break if we mess 888 * with pages in any of the below states. 889 */ 890 if ( 891 m->hold_count || 892 m->wire_count || 893 (m->flags & PG_UNMANAGED) || 894 m->valid != VM_PAGE_BITS_ALL 895 ) { 896 continue; 897 } 898 899 if (vm_page_sleep_busy(m, TRUE, "madvpo")) 900 goto relookup; 901 902 if (advise == MADV_WILLNEED) { 903 vm_page_activate(m); 904 } else if (advise == MADV_DONTNEED) { 905 vm_page_dontneed(m); 906 } else if (advise == MADV_FREE) { 907 /* 908 * Mark the page clean. This will allow the page 909 * to be freed up by the system. However, such pages 910 * are often reused quickly by malloc()/free() 911 * so we do not do anything that would cause 912 * a page fault if we can help it. 913 * 914 * Specifically, we do not try to actually free 915 * the page now nor do we try to put it in the 916 * cache (which would cause a page fault on reuse). 917 * 918 * But we do make the page is freeable as we 919 * can without actually taking the step of unmapping 920 * it. 921 */ 922 pmap_clear_modify(m); 923 m->dirty = 0; 924 m->act_count = 0; 925 vm_page_dontneed(m); 926 if (tobject->type == OBJT_SWAP) 927 swap_pager_freespace(tobject, tpindex, 1); 928 } 929 } 930 } 931 932 /* 933 * vm_object_shadow: 934 * 935 * Create a new object which is backed by the 936 * specified existing object range. The source 937 * object reference is deallocated. 938 * 939 * The new object and offset into that object 940 * are returned in the source parameters. 941 */ 942 943 void 944 vm_object_shadow( 945 vm_object_t *object, /* IN/OUT */ 946 vm_ooffset_t *offset, /* IN/OUT */ 947 vm_size_t length) 948 { 949 vm_object_t source; 950 vm_object_t result; 951 952 GIANT_REQUIRED; 953 source = *object; 954 955 /* 956 * Don't create the new object if the old object isn't shared. 957 */ 958 959 if (source != NULL && 960 source->ref_count == 1 && 961 source->handle == NULL && 962 (source->type == OBJT_DEFAULT || 963 source->type == OBJT_SWAP)) 964 return; 965 966 /* 967 * Allocate a new object with the given length 968 */ 969 result = vm_object_allocate(OBJT_DEFAULT, length); 970 KASSERT(result != NULL, ("vm_object_shadow: no object for shadowing")); 971 972 /* 973 * The new object shadows the source object, adding a reference to it. 974 * Our caller changes his reference to point to the new object, 975 * removing a reference to the source object. Net result: no change 976 * of reference count. 977 * 978 * Try to optimize the result object's page color when shadowing 979 * in order to maintain page coloring consistency in the combined 980 * shadowed object. 981 */ 982 result->backing_object = source; 983 if (source) { 984 TAILQ_INSERT_TAIL(&source->shadow_head, result, shadow_list); 985 source->shadow_count++; 986 source->generation++; 987 result->pg_color = (source->pg_color + OFF_TO_IDX(*offset)) & PQ_L2_MASK; 988 } 989 990 /* 991 * Store the offset into the source object, and fix up the offset into 992 * the new object. 993 */ 994 995 result->backing_object_offset = *offset; 996 997 /* 998 * Return the new things 999 */ 1000 1001 *offset = 0; 1002 *object = result; 1003 } 1004 1005 #define OBSC_TEST_ALL_SHADOWED 0x0001 1006 #define OBSC_COLLAPSE_NOWAIT 0x0002 1007 #define OBSC_COLLAPSE_WAIT 0x0004 1008 1009 static __inline int 1010 vm_object_backing_scan(vm_object_t object, int op) 1011 { 1012 int s; 1013 int r = 1; 1014 vm_page_t p; 1015 vm_object_t backing_object; 1016 vm_pindex_t backing_offset_index; 1017 1018 s = splvm(); 1019 GIANT_REQUIRED; 1020 1021 backing_object = object->backing_object; 1022 backing_offset_index = OFF_TO_IDX(object->backing_object_offset); 1023 1024 /* 1025 * Initial conditions 1026 */ 1027 1028 if (op & OBSC_TEST_ALL_SHADOWED) { 1029 /* 1030 * We do not want to have to test for the existence of 1031 * swap pages in the backing object. XXX but with the 1032 * new swapper this would be pretty easy to do. 1033 * 1034 * XXX what about anonymous MAP_SHARED memory that hasn't 1035 * been ZFOD faulted yet? If we do not test for this, the 1036 * shadow test may succeed! XXX 1037 */ 1038 if (backing_object->type != OBJT_DEFAULT) { 1039 splx(s); 1040 return(0); 1041 } 1042 } 1043 if (op & OBSC_COLLAPSE_WAIT) { 1044 vm_object_set_flag(backing_object, OBJ_DEAD); 1045 } 1046 1047 /* 1048 * Our scan 1049 */ 1050 1051 p = TAILQ_FIRST(&backing_object->memq); 1052 while (p) { 1053 vm_page_t next = TAILQ_NEXT(p, listq); 1054 vm_pindex_t new_pindex = p->pindex - backing_offset_index; 1055 1056 if (op & OBSC_TEST_ALL_SHADOWED) { 1057 vm_page_t pp; 1058 1059 /* 1060 * Ignore pages outside the parent object's range 1061 * and outside the parent object's mapping of the 1062 * backing object. 1063 * 1064 * note that we do not busy the backing object's 1065 * page. 1066 */ 1067 1068 if ( 1069 p->pindex < backing_offset_index || 1070 new_pindex >= object->size 1071 ) { 1072 p = next; 1073 continue; 1074 } 1075 1076 /* 1077 * See if the parent has the page or if the parent's 1078 * object pager has the page. If the parent has the 1079 * page but the page is not valid, the parent's 1080 * object pager must have the page. 1081 * 1082 * If this fails, the parent does not completely shadow 1083 * the object and we might as well give up now. 1084 */ 1085 1086 pp = vm_page_lookup(object, new_pindex); 1087 if ( 1088 (pp == NULL || pp->valid == 0) && 1089 !vm_pager_has_page(object, new_pindex, NULL, NULL) 1090 ) { 1091 r = 0; 1092 break; 1093 } 1094 } 1095 1096 /* 1097 * Check for busy page 1098 */ 1099 1100 if (op & (OBSC_COLLAPSE_WAIT | OBSC_COLLAPSE_NOWAIT)) { 1101 vm_page_t pp; 1102 1103 if (op & OBSC_COLLAPSE_NOWAIT) { 1104 if ( 1105 (p->flags & PG_BUSY) || 1106 !p->valid || 1107 p->hold_count || 1108 p->wire_count || 1109 p->busy 1110 ) { 1111 p = next; 1112 continue; 1113 } 1114 } else if (op & OBSC_COLLAPSE_WAIT) { 1115 if (vm_page_sleep_busy(p, TRUE, "vmocol")) { 1116 /* 1117 * If we slept, anything could have 1118 * happened. Since the object is 1119 * marked dead, the backing offset 1120 * should not have changed so we 1121 * just restart our scan. 1122 */ 1123 p = TAILQ_FIRST(&backing_object->memq); 1124 continue; 1125 } 1126 } 1127 1128 /* 1129 * Busy the page 1130 */ 1131 vm_page_busy(p); 1132 1133 KASSERT( 1134 p->object == backing_object, 1135 ("vm_object_qcollapse(): object mismatch") 1136 ); 1137 1138 /* 1139 * Destroy any associated swap 1140 */ 1141 if (backing_object->type == OBJT_SWAP) { 1142 swap_pager_freespace( 1143 backing_object, 1144 p->pindex, 1145 1 1146 ); 1147 } 1148 1149 if ( 1150 p->pindex < backing_offset_index || 1151 new_pindex >= object->size 1152 ) { 1153 /* 1154 * Page is out of the parent object's range, we 1155 * can simply destroy it. 1156 */ 1157 vm_page_protect(p, VM_PROT_NONE); 1158 vm_page_free(p); 1159 p = next; 1160 continue; 1161 } 1162 1163 pp = vm_page_lookup(object, new_pindex); 1164 if ( 1165 pp != NULL || 1166 vm_pager_has_page(object, new_pindex, NULL, NULL) 1167 ) { 1168 /* 1169 * page already exists in parent OR swap exists 1170 * for this location in the parent. Destroy 1171 * the original page from the backing object. 1172 * 1173 * Leave the parent's page alone 1174 */ 1175 vm_page_protect(p, VM_PROT_NONE); 1176 vm_page_free(p); 1177 p = next; 1178 continue; 1179 } 1180 1181 /* 1182 * Page does not exist in parent, rename the 1183 * page from the backing object to the main object. 1184 * 1185 * If the page was mapped to a process, it can remain 1186 * mapped through the rename. 1187 */ 1188 if ((p->queue - p->pc) == PQ_CACHE) 1189 vm_page_deactivate(p); 1190 1191 vm_page_rename(p, object, new_pindex); 1192 /* page automatically made dirty by rename */ 1193 } 1194 p = next; 1195 } 1196 splx(s); 1197 return(r); 1198 } 1199 1200 1201 /* 1202 * this version of collapse allows the operation to occur earlier and 1203 * when paging_in_progress is true for an object... This is not a complete 1204 * operation, but should plug 99.9% of the rest of the leaks. 1205 */ 1206 static void 1207 vm_object_qcollapse(vm_object_t object) 1208 { 1209 vm_object_t backing_object = object->backing_object; 1210 1211 GIANT_REQUIRED; 1212 1213 if (backing_object->ref_count != 1) 1214 return; 1215 1216 backing_object->ref_count += 2; 1217 1218 vm_object_backing_scan(object, OBSC_COLLAPSE_NOWAIT); 1219 1220 backing_object->ref_count -= 2; 1221 } 1222 1223 /* 1224 * vm_object_collapse: 1225 * 1226 * Collapse an object with the object backing it. 1227 * Pages in the backing object are moved into the 1228 * parent, and the backing object is deallocated. 1229 */ 1230 void 1231 vm_object_collapse(vm_object_t object) 1232 { 1233 GIANT_REQUIRED; 1234 1235 while (TRUE) { 1236 vm_object_t backing_object; 1237 1238 /* 1239 * Verify that the conditions are right for collapse: 1240 * 1241 * The object exists and the backing object exists. 1242 */ 1243 if (object == NULL) 1244 break; 1245 1246 if ((backing_object = object->backing_object) == NULL) 1247 break; 1248 1249 /* 1250 * we check the backing object first, because it is most likely 1251 * not collapsable. 1252 */ 1253 if (backing_object->handle != NULL || 1254 (backing_object->type != OBJT_DEFAULT && 1255 backing_object->type != OBJT_SWAP) || 1256 (backing_object->flags & OBJ_DEAD) || 1257 object->handle != NULL || 1258 (object->type != OBJT_DEFAULT && 1259 object->type != OBJT_SWAP) || 1260 (object->flags & OBJ_DEAD)) { 1261 break; 1262 } 1263 1264 if ( 1265 object->paging_in_progress != 0 || 1266 backing_object->paging_in_progress != 0 1267 ) { 1268 vm_object_qcollapse(object); 1269 break; 1270 } 1271 1272 /* 1273 * We know that we can either collapse the backing object (if 1274 * the parent is the only reference to it) or (perhaps) have 1275 * the parent bypass the object if the parent happens to shadow 1276 * all the resident pages in the entire backing object. 1277 * 1278 * This is ignoring pager-backed pages such as swap pages. 1279 * vm_object_backing_scan fails the shadowing test in this 1280 * case. 1281 */ 1282 1283 if (backing_object->ref_count == 1) { 1284 /* 1285 * If there is exactly one reference to the backing 1286 * object, we can collapse it into the parent. 1287 */ 1288 1289 vm_object_backing_scan(object, OBSC_COLLAPSE_WAIT); 1290 1291 /* 1292 * Move the pager from backing_object to object. 1293 */ 1294 1295 if (backing_object->type == OBJT_SWAP) { 1296 vm_object_pip_add(backing_object, 1); 1297 1298 /* 1299 * scrap the paging_offset junk and do a 1300 * discrete copy. This also removes major 1301 * assumptions about how the swap-pager 1302 * works from where it doesn't belong. The 1303 * new swapper is able to optimize the 1304 * destroy-source case. 1305 */ 1306 1307 vm_object_pip_add(object, 1); 1308 swap_pager_copy( 1309 backing_object, 1310 object, 1311 OFF_TO_IDX(object->backing_object_offset), TRUE); 1312 vm_object_pip_wakeup(object); 1313 1314 vm_object_pip_wakeup(backing_object); 1315 } 1316 /* 1317 * Object now shadows whatever backing_object did. 1318 * Note that the reference to 1319 * backing_object->backing_object moves from within 1320 * backing_object to within object. 1321 */ 1322 1323 TAILQ_REMOVE( 1324 &object->backing_object->shadow_head, 1325 object, 1326 shadow_list 1327 ); 1328 object->backing_object->shadow_count--; 1329 object->backing_object->generation++; 1330 if (backing_object->backing_object) { 1331 TAILQ_REMOVE( 1332 &backing_object->backing_object->shadow_head, 1333 backing_object, 1334 shadow_list 1335 ); 1336 backing_object->backing_object->shadow_count--; 1337 backing_object->backing_object->generation++; 1338 } 1339 object->backing_object = backing_object->backing_object; 1340 if (object->backing_object) { 1341 TAILQ_INSERT_TAIL( 1342 &object->backing_object->shadow_head, 1343 object, 1344 shadow_list 1345 ); 1346 object->backing_object->shadow_count++; 1347 object->backing_object->generation++; 1348 } 1349 1350 object->backing_object_offset += 1351 backing_object->backing_object_offset; 1352 1353 /* 1354 * Discard backing_object. 1355 * 1356 * Since the backing object has no pages, no pager left, 1357 * and no object references within it, all that is 1358 * necessary is to dispose of it. 1359 */ 1360 1361 TAILQ_REMOVE( 1362 &vm_object_list, 1363 backing_object, 1364 object_list 1365 ); 1366 vm_object_count--; 1367 1368 zfree(obj_zone, backing_object); 1369 1370 object_collapses++; 1371 } else { 1372 vm_object_t new_backing_object; 1373 1374 /* 1375 * If we do not entirely shadow the backing object, 1376 * there is nothing we can do so we give up. 1377 */ 1378 1379 if (vm_object_backing_scan(object, OBSC_TEST_ALL_SHADOWED) == 0) { 1380 break; 1381 } 1382 1383 /* 1384 * Make the parent shadow the next object in the 1385 * chain. Deallocating backing_object will not remove 1386 * it, since its reference count is at least 2. 1387 */ 1388 1389 TAILQ_REMOVE( 1390 &backing_object->shadow_head, 1391 object, 1392 shadow_list 1393 ); 1394 backing_object->shadow_count--; 1395 backing_object->generation++; 1396 1397 new_backing_object = backing_object->backing_object; 1398 if ((object->backing_object = new_backing_object) != NULL) { 1399 vm_object_reference(new_backing_object); 1400 TAILQ_INSERT_TAIL( 1401 &new_backing_object->shadow_head, 1402 object, 1403 shadow_list 1404 ); 1405 new_backing_object->shadow_count++; 1406 new_backing_object->generation++; 1407 object->backing_object_offset += 1408 backing_object->backing_object_offset; 1409 } 1410 1411 /* 1412 * Drop the reference count on backing_object. Since 1413 * its ref_count was at least 2, it will not vanish; 1414 * so we don't need to call vm_object_deallocate, but 1415 * we do anyway. 1416 */ 1417 vm_object_deallocate(backing_object); 1418 object_bypasses++; 1419 } 1420 1421 /* 1422 * Try again with this object's new backing object. 1423 */ 1424 } 1425 } 1426 1427 /* 1428 * vm_object_page_remove: [internal] 1429 * 1430 * Removes all physical pages in the specified 1431 * object range from the object's list of pages. 1432 * 1433 * The object must be locked. 1434 */ 1435 void 1436 vm_object_page_remove(vm_object_t object, vm_pindex_t start, vm_pindex_t end, boolean_t clean_only) 1437 { 1438 vm_page_t p, next; 1439 unsigned int size; 1440 int all; 1441 1442 GIANT_REQUIRED; 1443 1444 if (object == NULL || 1445 object->resident_page_count == 0) 1446 return; 1447 1448 all = ((end == 0) && (start == 0)); 1449 1450 /* 1451 * Since physically-backed objects do not use managed pages, we can't 1452 * remove pages from the object (we must instead remove the page 1453 * references, and then destroy the object). 1454 */ 1455 KASSERT(object->type != OBJT_PHYS, ("attempt to remove pages from a physical object")); 1456 1457 vm_object_pip_add(object, 1); 1458 again: 1459 size = end - start; 1460 if (all || size > object->resident_page_count / 4) { 1461 for (p = TAILQ_FIRST(&object->memq); p != NULL; p = next) { 1462 next = TAILQ_NEXT(p, listq); 1463 if (all || ((start <= p->pindex) && (p->pindex < end))) { 1464 if (p->wire_count != 0) { 1465 vm_page_protect(p, VM_PROT_NONE); 1466 if (!clean_only) 1467 p->valid = 0; 1468 continue; 1469 } 1470 1471 /* 1472 * The busy flags are only cleared at 1473 * interrupt -- minimize the spl transitions 1474 */ 1475 1476 if (vm_page_sleep_busy(p, TRUE, "vmopar")) 1477 goto again; 1478 1479 if (clean_only && p->valid) { 1480 vm_page_test_dirty(p); 1481 if (p->valid & p->dirty) 1482 continue; 1483 } 1484 1485 vm_page_busy(p); 1486 vm_page_protect(p, VM_PROT_NONE); 1487 vm_page_free(p); 1488 } 1489 } 1490 } else { 1491 while (size > 0) { 1492 if ((p = vm_page_lookup(object, start)) != 0) { 1493 1494 if (p->wire_count != 0) { 1495 vm_page_protect(p, VM_PROT_NONE); 1496 if (!clean_only) 1497 p->valid = 0; 1498 start += 1; 1499 size -= 1; 1500 continue; 1501 } 1502 1503 /* 1504 * The busy flags are only cleared at 1505 * interrupt -- minimize the spl transitions 1506 */ 1507 if (vm_page_sleep_busy(p, TRUE, "vmopar")) 1508 goto again; 1509 1510 if (clean_only && p->valid) { 1511 vm_page_test_dirty(p); 1512 if (p->valid & p->dirty) { 1513 start += 1; 1514 size -= 1; 1515 continue; 1516 } 1517 } 1518 1519 vm_page_busy(p); 1520 vm_page_protect(p, VM_PROT_NONE); 1521 vm_page_free(p); 1522 } 1523 start += 1; 1524 size -= 1; 1525 } 1526 } 1527 vm_object_pip_wakeup(object); 1528 } 1529 1530 /* 1531 * Routine: vm_object_coalesce 1532 * Function: Coalesces two objects backing up adjoining 1533 * regions of memory into a single object. 1534 * 1535 * returns TRUE if objects were combined. 1536 * 1537 * NOTE: Only works at the moment if the second object is NULL - 1538 * if it's not, which object do we lock first? 1539 * 1540 * Parameters: 1541 * prev_object First object to coalesce 1542 * prev_offset Offset into prev_object 1543 * next_object Second object into coalesce 1544 * next_offset Offset into next_object 1545 * 1546 * prev_size Size of reference to prev_object 1547 * next_size Size of reference to next_object 1548 * 1549 * Conditions: 1550 * The object must *not* be locked. 1551 */ 1552 boolean_t 1553 vm_object_coalesce(vm_object_t prev_object, vm_pindex_t prev_pindex, vm_size_t prev_size, vm_size_t next_size) 1554 { 1555 vm_pindex_t next_pindex; 1556 1557 GIANT_REQUIRED; 1558 1559 if (prev_object == NULL) { 1560 return (TRUE); 1561 } 1562 1563 if (prev_object->type != OBJT_DEFAULT && 1564 prev_object->type != OBJT_SWAP) { 1565 return (FALSE); 1566 } 1567 1568 /* 1569 * Try to collapse the object first 1570 */ 1571 vm_object_collapse(prev_object); 1572 1573 /* 1574 * Can't coalesce if: . more than one reference . paged out . shadows 1575 * another object . has a copy elsewhere (any of which mean that the 1576 * pages not mapped to prev_entry may be in use anyway) 1577 */ 1578 1579 if (prev_object->backing_object != NULL) { 1580 return (FALSE); 1581 } 1582 1583 prev_size >>= PAGE_SHIFT; 1584 next_size >>= PAGE_SHIFT; 1585 next_pindex = prev_pindex + prev_size; 1586 1587 if ((prev_object->ref_count > 1) && 1588 (prev_object->size != next_pindex)) { 1589 return (FALSE); 1590 } 1591 1592 /* 1593 * Remove any pages that may still be in the object from a previous 1594 * deallocation. 1595 */ 1596 if (next_pindex < prev_object->size) { 1597 vm_object_page_remove(prev_object, 1598 next_pindex, 1599 next_pindex + next_size, FALSE); 1600 if (prev_object->type == OBJT_SWAP) 1601 swap_pager_freespace(prev_object, 1602 next_pindex, next_size); 1603 } 1604 1605 /* 1606 * Extend the object if necessary. 1607 */ 1608 if (next_pindex + next_size > prev_object->size) 1609 prev_object->size = next_pindex + next_size; 1610 1611 return (TRUE); 1612 } 1613 1614 #include "opt_ddb.h" 1615 #ifdef DDB 1616 #include <sys/kernel.h> 1617 1618 #include <sys/cons.h> 1619 1620 #include <ddb/ddb.h> 1621 1622 static int 1623 _vm_object_in_map(vm_map_t map, vm_object_t object, vm_map_entry_t entry) 1624 { 1625 vm_map_t tmpm; 1626 vm_map_entry_t tmpe; 1627 vm_object_t obj; 1628 int entcount; 1629 1630 if (map == 0) 1631 return 0; 1632 1633 if (entry == 0) { 1634 tmpe = map->header.next; 1635 entcount = map->nentries; 1636 while (entcount-- && (tmpe != &map->header)) { 1637 if( _vm_object_in_map(map, object, tmpe)) { 1638 return 1; 1639 } 1640 tmpe = tmpe->next; 1641 } 1642 } else if (entry->eflags & MAP_ENTRY_IS_SUB_MAP) { 1643 tmpm = entry->object.sub_map; 1644 tmpe = tmpm->header.next; 1645 entcount = tmpm->nentries; 1646 while (entcount-- && tmpe != &tmpm->header) { 1647 if( _vm_object_in_map(tmpm, object, tmpe)) { 1648 return 1; 1649 } 1650 tmpe = tmpe->next; 1651 } 1652 } else if ((obj = entry->object.vm_object) != NULL) { 1653 for (; obj; obj = obj->backing_object) 1654 if( obj == object) { 1655 return 1; 1656 } 1657 } 1658 return 0; 1659 } 1660 1661 static int 1662 vm_object_in_map(vm_object_t object) 1663 { 1664 struct proc *p; 1665 1666 /* sx_slock(&allproc_lock); */ 1667 LIST_FOREACH(p, &allproc, p_list) { 1668 if( !p->p_vmspace /* || (p->p_flag & (P_SYSTEM|P_WEXIT)) */) 1669 continue; 1670 if( _vm_object_in_map(&p->p_vmspace->vm_map, object, 0)) { 1671 /* sx_sunlock(&allproc_lock); */ 1672 return 1; 1673 } 1674 } 1675 /* sx_sunlock(&allproc_lock); */ 1676 if( _vm_object_in_map( kernel_map, object, 0)) 1677 return 1; 1678 if( _vm_object_in_map( kmem_map, object, 0)) 1679 return 1; 1680 if( _vm_object_in_map( pager_map, object, 0)) 1681 return 1; 1682 if( _vm_object_in_map( buffer_map, object, 0)) 1683 return 1; 1684 return 0; 1685 } 1686 1687 DB_SHOW_COMMAND(vmochk, vm_object_check) 1688 { 1689 vm_object_t object; 1690 1691 /* 1692 * make sure that internal objs are in a map somewhere 1693 * and none have zero ref counts. 1694 */ 1695 TAILQ_FOREACH(object, &vm_object_list, object_list) { 1696 if (object->handle == NULL && 1697 (object->type == OBJT_DEFAULT || object->type == OBJT_SWAP)) { 1698 if (object->ref_count == 0) { 1699 db_printf("vmochk: internal obj has zero ref count: %ld\n", 1700 (long)object->size); 1701 } 1702 if (!vm_object_in_map(object)) { 1703 db_printf( 1704 "vmochk: internal obj is not in a map: " 1705 "ref: %d, size: %lu: 0x%lx, backing_object: %p\n", 1706 object->ref_count, (u_long)object->size, 1707 (u_long)object->size, 1708 (void *)object->backing_object); 1709 } 1710 } 1711 } 1712 } 1713 1714 /* 1715 * vm_object_print: [ debug ] 1716 */ 1717 DB_SHOW_COMMAND(object, vm_object_print_static) 1718 { 1719 /* XXX convert args. */ 1720 vm_object_t object = (vm_object_t)addr; 1721 boolean_t full = have_addr; 1722 1723 vm_page_t p; 1724 1725 /* XXX count is an (unused) arg. Avoid shadowing it. */ 1726 #define count was_count 1727 1728 int count; 1729 1730 if (object == NULL) 1731 return; 1732 1733 db_iprintf( 1734 "Object %p: type=%d, size=0x%lx, res=%d, ref=%d, flags=0x%x\n", 1735 object, (int)object->type, (u_long)object->size, 1736 object->resident_page_count, object->ref_count, object->flags); 1737 /* 1738 * XXX no %qd in kernel. Truncate object->backing_object_offset. 1739 */ 1740 db_iprintf(" sref=%d, backing_object(%d)=(%p)+0x%lx\n", 1741 object->shadow_count, 1742 object->backing_object ? object->backing_object->ref_count : 0, 1743 object->backing_object, (long)object->backing_object_offset); 1744 1745 if (!full) 1746 return; 1747 1748 db_indent += 2; 1749 count = 0; 1750 TAILQ_FOREACH(p, &object->memq, listq) { 1751 if (count == 0) 1752 db_iprintf("memory:="); 1753 else if (count == 6) { 1754 db_printf("\n"); 1755 db_iprintf(" ..."); 1756 count = 0; 1757 } else 1758 db_printf(","); 1759 count++; 1760 1761 db_printf("(off=0x%lx,page=0x%lx)", 1762 (u_long) p->pindex, (u_long) VM_PAGE_TO_PHYS(p)); 1763 } 1764 if (count != 0) 1765 db_printf("\n"); 1766 db_indent -= 2; 1767 } 1768 1769 /* XXX. */ 1770 #undef count 1771 1772 /* XXX need this non-static entry for calling from vm_map_print. */ 1773 void 1774 vm_object_print( 1775 /* db_expr_t */ long addr, 1776 boolean_t have_addr, 1777 /* db_expr_t */ long count, 1778 char *modif) 1779 { 1780 vm_object_print_static(addr, have_addr, count, modif); 1781 } 1782 1783 DB_SHOW_COMMAND(vmopag, vm_object_print_pages) 1784 { 1785 vm_object_t object; 1786 int nl = 0; 1787 int c; 1788 1789 TAILQ_FOREACH(object, &vm_object_list, object_list) { 1790 vm_pindex_t idx, fidx; 1791 vm_pindex_t osize; 1792 vm_offset_t pa = -1, padiff; 1793 int rcount; 1794 vm_page_t m; 1795 1796 db_printf("new object: %p\n", (void *)object); 1797 if ( nl > 18) { 1798 c = cngetc(); 1799 if (c != ' ') 1800 return; 1801 nl = 0; 1802 } 1803 nl++; 1804 rcount = 0; 1805 fidx = 0; 1806 osize = object->size; 1807 if (osize > 128) 1808 osize = 128; 1809 for (idx = 0; idx < osize; idx++) { 1810 m = vm_page_lookup(object, idx); 1811 if (m == NULL) { 1812 if (rcount) { 1813 db_printf(" index(%ld)run(%d)pa(0x%lx)\n", 1814 (long)fidx, rcount, (long)pa); 1815 if ( nl > 18) { 1816 c = cngetc(); 1817 if (c != ' ') 1818 return; 1819 nl = 0; 1820 } 1821 nl++; 1822 rcount = 0; 1823 } 1824 continue; 1825 } 1826 1827 1828 if (rcount && 1829 (VM_PAGE_TO_PHYS(m) == pa + rcount * PAGE_SIZE)) { 1830 ++rcount; 1831 continue; 1832 } 1833 if (rcount) { 1834 padiff = pa + rcount * PAGE_SIZE - VM_PAGE_TO_PHYS(m); 1835 padiff >>= PAGE_SHIFT; 1836 padiff &= PQ_L2_MASK; 1837 if (padiff == 0) { 1838 pa = VM_PAGE_TO_PHYS(m) - rcount * PAGE_SIZE; 1839 ++rcount; 1840 continue; 1841 } 1842 db_printf(" index(%ld)run(%d)pa(0x%lx)", 1843 (long)fidx, rcount, (long)pa); 1844 db_printf("pd(%ld)\n", (long)padiff); 1845 if ( nl > 18) { 1846 c = cngetc(); 1847 if (c != ' ') 1848 return; 1849 nl = 0; 1850 } 1851 nl++; 1852 } 1853 fidx = idx; 1854 pa = VM_PAGE_TO_PHYS(m); 1855 rcount = 1; 1856 } 1857 if (rcount) { 1858 db_printf(" index(%ld)run(%d)pa(0x%lx)\n", 1859 (long)fidx, rcount, (long)pa); 1860 if ( nl > 18) { 1861 c = cngetc(); 1862 if (c != ' ') 1863 return; 1864 nl = 0; 1865 } 1866 nl++; 1867 } 1868 } 1869 } 1870 #endif /* DDB */ 1871