1 /* 2 * Copyright (c) 1991, 1993 3 * The Regents of the University of California. All rights reserved. 4 * Copyright (c) 1994 John S. Dyson 5 * All rights reserved. 6 * Copyright (c) 1994 David Greenman 7 * All rights reserved. 8 * 9 * 10 * This code is derived from software contributed to Berkeley by 11 * The Mach Operating System project at Carnegie-Mellon University. 12 * 13 * Redistribution and use in source and binary forms, with or without 14 * modification, are permitted provided that the following conditions 15 * are met: 16 * 1. Redistributions of source code must retain the above copyright 17 * notice, this list of conditions and the following disclaimer. 18 * 2. Redistributions in binary form must reproduce the above copyright 19 * notice, this list of conditions and the following disclaimer in the 20 * documentation and/or other materials provided with the distribution. 21 * 3. All advertising materials mentioning features or use of this software 22 * must display the following acknowledgement: 23 * This product includes software developed by the University of 24 * California, Berkeley and its contributors. 25 * 4. Neither the name of the University nor the names of its contributors 26 * may be used to endorse or promote products derived from this software 27 * without specific prior written permission. 28 * 29 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 30 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 31 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 32 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 33 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 34 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 35 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 36 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 37 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 38 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 39 * SUCH DAMAGE. 40 * 41 * from: @(#)vm_fault.c 8.4 (Berkeley) 1/12/94 42 * 43 * 44 * Copyright (c) 1987, 1990 Carnegie-Mellon University. 45 * All rights reserved. 46 * 47 * Authors: Avadis Tevanian, Jr., Michael Wayne Young 48 * 49 * Permission to use, copy, modify and distribute this software and 50 * its documentation is hereby granted, provided that both the copyright 51 * notice and this permission notice appear in all copies of the 52 * software, derivative works or modified versions, and any portions 53 * thereof, and that both notices appear in supporting documentation. 54 * 55 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 56 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 57 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 58 * 59 * Carnegie Mellon requests users of this software to return to 60 * 61 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 62 * School of Computer Science 63 * Carnegie Mellon University 64 * Pittsburgh PA 15213-3890 65 * 66 * any improvements or extensions that they make and grant Carnegie the 67 * rights to redistribute these changes. 68 * 69 * $Id: vm_fault.c,v 1.32 1995/09/24 19:47:58 dyson Exp $ 70 */ 71 72 /* 73 * Page fault handling module. 74 */ 75 76 #include <sys/param.h> 77 #include <sys/systm.h> 78 #include <sys/proc.h> 79 #include <sys/vnode.h> 80 #include <sys/resource.h> 81 #include <sys/signalvar.h> 82 #include <sys/resourcevar.h> 83 84 #include <vm/vm.h> 85 #include <vm/vm_page.h> 86 #include <vm/vm_pageout.h> 87 #include <vm/vm_kern.h> 88 #include <vm/vm_pager.h> 89 #include <vm/vnode_pager.h> 90 #include <vm/swap_pager.h> 91 92 int vm_fault_additional_pages __P((vm_page_t, int, int, vm_page_t *, int *)); 93 94 #define VM_FAULT_READ_AHEAD 4 95 #define VM_FAULT_READ_BEHIND 3 96 #define VM_FAULT_READ (VM_FAULT_READ_AHEAD+VM_FAULT_READ_BEHIND+1) 97 98 /* 99 * vm_fault: 100 * 101 * Handle a page fault occuring at the given address, 102 * requiring the given permissions, in the map specified. 103 * If successful, the page is inserted into the 104 * associated physical map. 105 * 106 * NOTE: the given address should be truncated to the 107 * proper page address. 108 * 109 * KERN_SUCCESS is returned if the page fault is handled; otherwise, 110 * a standard error specifying why the fault is fatal is returned. 111 * 112 * 113 * The map in question must be referenced, and remains so. 114 * Caller may hold no locks. 115 */ 116 int 117 vm_fault(map, vaddr, fault_type, change_wiring) 118 vm_map_t map; 119 vm_offset_t vaddr; 120 vm_prot_t fault_type; 121 boolean_t change_wiring; 122 { 123 vm_object_t first_object; 124 vm_offset_t first_offset; 125 vm_map_entry_t entry; 126 register vm_object_t object; 127 register vm_offset_t offset; 128 vm_page_t m; 129 vm_page_t first_m; 130 vm_prot_t prot; 131 int result; 132 boolean_t wired; 133 boolean_t su; 134 boolean_t lookup_still_valid; 135 boolean_t page_exists; 136 vm_page_t old_m; 137 vm_object_t next_object; 138 vm_page_t marray[VM_FAULT_READ]; 139 int spl; 140 int hardfault = 0; 141 struct vnode *vp = NULL; 142 143 cnt.v_vm_faults++; /* needs lock XXX */ 144 /* 145 * Recovery actions 146 */ 147 #define FREE_PAGE(m) { \ 148 PAGE_WAKEUP(m); \ 149 vm_page_free(m); \ 150 } 151 152 #define RELEASE_PAGE(m) { \ 153 PAGE_WAKEUP(m); \ 154 if ((m->flags & PG_ACTIVE) == 0) vm_page_activate(m); \ 155 } 156 157 #define UNLOCK_MAP { \ 158 if (lookup_still_valid) { \ 159 vm_map_lookup_done(map, entry); \ 160 lookup_still_valid = FALSE; \ 161 } \ 162 } 163 164 #define UNLOCK_THINGS { \ 165 vm_object_pip_wakeup(object); \ 166 if (object != first_object) { \ 167 FREE_PAGE(first_m); \ 168 vm_object_pip_wakeup(first_object); \ 169 } \ 170 UNLOCK_MAP; \ 171 if (vp != NULL) VOP_UNLOCK(vp); \ 172 } 173 174 #define UNLOCK_AND_DEALLOCATE { \ 175 UNLOCK_THINGS; \ 176 vm_object_deallocate(first_object); \ 177 } 178 179 180 RetryFault:; 181 182 /* 183 * Find the backing store object and offset into it to begin the 184 * search. 185 */ 186 187 if ((result = vm_map_lookup(&map, vaddr, 188 fault_type, &entry, &first_object, 189 &first_offset, &prot, &wired, &su)) != KERN_SUCCESS) { 190 return (result); 191 } 192 193 vp = vnode_pager_lock(first_object); 194 195 lookup_still_valid = TRUE; 196 197 if (wired) 198 fault_type = prot; 199 200 first_m = NULL; 201 202 /* 203 * Make a reference to this object to prevent its disposal while we 204 * are messing with it. Once we have the reference, the map is free 205 * to be diddled. Since objects reference their shadows (and copies), 206 * they will stay around as well. 207 */ 208 209 first_object->ref_count++; 210 first_object->paging_in_progress++; 211 212 /* 213 * INVARIANTS (through entire routine): 214 * 215 * 1) At all times, we must either have the object lock or a busy 216 * page in some object to prevent some other process from trying to 217 * bring in the same page. 218 * 219 * Note that we cannot hold any locks during the pager access or when 220 * waiting for memory, so we use a busy page then. 221 * 222 * Note also that we aren't as concerned about more than one thead 223 * attempting to pager_data_unlock the same page at once, so we don't 224 * hold the page as busy then, but do record the highest unlock value 225 * so far. [Unlock requests may also be delivered out of order.] 226 * 227 * 2) Once we have a busy page, we must remove it from the pageout 228 * queues, so that the pageout daemon will not grab it away. 229 * 230 * 3) To prevent another process from racing us down the shadow chain 231 * and entering a new page in the top object before we do, we must 232 * keep a busy page in the top object while following the shadow 233 * chain. 234 * 235 * 4) We must increment paging_in_progress on any object for which 236 * we have a busy page, to prevent vm_object_collapse from removing 237 * the busy page without our noticing. 238 */ 239 240 /* 241 * Search for the page at object/offset. 242 */ 243 244 object = first_object; 245 offset = first_offset; 246 247 /* 248 * See whether this page is resident 249 */ 250 251 while (TRUE) { 252 m = vm_page_lookup(object, offset); 253 if (m != NULL) { 254 /* 255 * If the page is being brought in, wait for it and 256 * then retry. 257 */ 258 if ((m->flags & PG_BUSY) || m->busy) { 259 int s; 260 261 UNLOCK_THINGS; 262 s = splhigh(); 263 if ((m->flags & PG_BUSY) || m->busy) { 264 m->flags |= PG_WANTED | PG_REFERENCED; 265 cnt.v_intrans++; 266 tsleep(m, PSWP, "vmpfw", 0); 267 } 268 splx(s); 269 vm_object_deallocate(first_object); 270 goto RetryFault; 271 } 272 273 /* 274 * Mark page busy for other processes, and the pagedaemon. 275 */ 276 m->flags |= PG_BUSY; 277 if ((m->flags & PG_CACHE) && 278 (cnt.v_free_count + cnt.v_cache_count) < cnt.v_free_reserved) { 279 UNLOCK_AND_DEALLOCATE; 280 VM_WAIT; 281 PAGE_WAKEUP(m); 282 goto RetryFault; 283 } 284 285 if (m->valid && ((m->valid & VM_PAGE_BITS_ALL) != VM_PAGE_BITS_ALL) && 286 m->object != kernel_object && m->object != kmem_object) { 287 goto readrest; 288 } 289 break; 290 } 291 if (((object->type != OBJT_DEFAULT) && (!change_wiring || wired)) 292 || (object == first_object)) { 293 294 if (offset >= object->size) { 295 UNLOCK_AND_DEALLOCATE; 296 return (KERN_PROTECTION_FAILURE); 297 } 298 299 /* 300 * Allocate a new page for this object/offset pair. 301 */ 302 m = vm_page_alloc(object, offset, 303 vp?VM_ALLOC_NORMAL:(VM_ALLOC_NORMAL|VM_ALLOC_ZERO)); 304 305 if (m == NULL) { 306 UNLOCK_AND_DEALLOCATE; 307 VM_WAIT; 308 goto RetryFault; 309 } 310 } 311 readrest: 312 if (object->type != OBJT_DEFAULT && (!change_wiring || wired)) { 313 int rv; 314 int faultcount; 315 int reqpage; 316 317 /* 318 * now we find out if any other pages should be paged 319 * in at this time this routine checks to see if the 320 * pages surrounding this fault reside in the same 321 * object as the page for this fault. If they do, 322 * then they are faulted in also into the object. The 323 * array "marray" returned contains an array of 324 * vm_page_t structs where one of them is the 325 * vm_page_t passed to the routine. The reqpage 326 * return value is the index into the marray for the 327 * vm_page_t passed to the routine. 328 */ 329 faultcount = vm_fault_additional_pages( 330 m, VM_FAULT_READ_BEHIND, VM_FAULT_READ_AHEAD, 331 marray, &reqpage); 332 333 /* 334 * Call the pager to retrieve the data, if any, after 335 * releasing the lock on the map. 336 */ 337 UNLOCK_MAP; 338 339 rv = faultcount ? 340 vm_pager_get_pages(object, marray, faultcount, 341 reqpage) : VM_PAGER_FAIL; 342 343 if (rv == VM_PAGER_OK) { 344 /* 345 * Found the page. Leave it busy while we play 346 * with it. 347 */ 348 349 /* 350 * Relookup in case pager changed page. Pager 351 * is responsible for disposition of old page 352 * if moved. 353 */ 354 m = vm_page_lookup(object, offset); 355 if( !m) { 356 UNLOCK_AND_DEALLOCATE; 357 goto RetryFault; 358 } 359 360 pmap_clear_modify(VM_PAGE_TO_PHYS(m)); 361 m->valid = VM_PAGE_BITS_ALL; 362 m->flags |= PG_BUSY; 363 hardfault++; 364 break; 365 } 366 /* 367 * Remove the bogus page (which does not exist at this 368 * object/offset); before doing so, we must get back 369 * our object lock to preserve our invariant. 370 * 371 * Also wake up any other process that may want to bring 372 * in this page. 373 * 374 * If this is the top-level object, we must leave the 375 * busy page to prevent another process from rushing 376 * past us, and inserting the page in that object at 377 * the same time that we are. 378 */ 379 380 if (rv == VM_PAGER_ERROR) 381 printf("vm_fault: pager input (probably hardware) error, PID %d failure\n", 382 curproc->p_pid); 383 /* 384 * Data outside the range of the pager or an I/O error 385 */ 386 /* 387 * XXX - the check for kernel_map is a kludge to work 388 * around having the machine panic on a kernel space 389 * fault w/ I/O error. 390 */ 391 if (((map != kernel_map) && (rv == VM_PAGER_ERROR)) || (rv == VM_PAGER_BAD)) { 392 FREE_PAGE(m); 393 UNLOCK_AND_DEALLOCATE; 394 return ((rv == VM_PAGER_ERROR) ? KERN_FAILURE : KERN_PROTECTION_FAILURE); 395 } 396 if (object != first_object) { 397 FREE_PAGE(m); 398 /* 399 * XXX - we cannot just fall out at this 400 * point, m has been freed and is invalid! 401 */ 402 } 403 } 404 /* 405 * We get here if the object has default pager (or unwiring) or the 406 * pager doesn't have the page. 407 */ 408 if (object == first_object) 409 first_m = m; 410 411 /* 412 * Move on to the next object. Lock the next object before 413 * unlocking the current one. 414 */ 415 416 offset += object->backing_object_offset; 417 next_object = object->backing_object; 418 if (next_object == NULL) { 419 /* 420 * If there's no object left, fill the page in the top 421 * object with zeros. 422 */ 423 if (object != first_object) { 424 vm_object_pip_wakeup(object); 425 426 object = first_object; 427 offset = first_offset; 428 m = first_m; 429 } 430 first_m = NULL; 431 432 if ((m->flags & PG_ZERO) == 0) 433 vm_page_zero_fill(m); 434 m->valid = VM_PAGE_BITS_ALL; 435 cnt.v_zfod++; 436 break; 437 } else { 438 if (object != first_object) { 439 vm_object_pip_wakeup(object); 440 } 441 object = next_object; 442 object->paging_in_progress++; 443 } 444 } 445 446 if ((m->flags & PG_BUSY) == 0) 447 panic("vm_fault: not busy after main loop"); 448 449 /* 450 * PAGE HAS BEEN FOUND. [Loop invariant still holds -- the object lock 451 * is held.] 452 */ 453 454 old_m = m; /* save page that would be copied */ 455 456 /* 457 * If the page is being written, but isn't already owned by the 458 * top-level object, we have to copy it into a new page owned by the 459 * top-level object. 460 */ 461 462 if (object != first_object) { 463 /* 464 * We only really need to copy if we want to write it. 465 */ 466 467 if (fault_type & VM_PROT_WRITE) { 468 469 /* 470 * If we try to collapse first_object at this point, 471 * we may deadlock when we try to get the lock on an 472 * intermediate object (since we have the bottom 473 * object locked). We can't unlock the bottom object, 474 * because the page we found may move (by collapse) if 475 * we do. 476 * 477 * Instead, we first copy the page. Then, when we have 478 * no more use for the bottom object, we unlock it and 479 * try to collapse. 480 * 481 * Note that we copy the page even if we didn't need 482 * to... that's the breaks. 483 */ 484 485 /* 486 * We already have an empty page in first_object - use 487 * it. 488 */ 489 490 vm_page_copy(m, first_m); 491 first_m->valid = VM_PAGE_BITS_ALL; 492 493 /* 494 * If another map is truly sharing this page with us, 495 * we have to flush all uses of the original page, 496 * since we can't distinguish those which want the 497 * original from those which need the new copy. 498 * 499 * XXX If we know that only one map has access to this 500 * page, then we could avoid the pmap_page_protect() 501 * call. 502 */ 503 504 if ((m->flags & PG_ACTIVE) == 0) 505 vm_page_activate(m); 506 vm_page_protect(m, VM_PROT_NONE); 507 508 /* 509 * We no longer need the old page or object. 510 */ 511 PAGE_WAKEUP(m); 512 vm_object_pip_wakeup(object); 513 514 /* 515 * Only use the new page below... 516 */ 517 518 cnt.v_cow_faults++; 519 m = first_m; 520 object = first_object; 521 offset = first_offset; 522 523 /* 524 * Now that we've gotten the copy out of the way, 525 * let's try to collapse the top object. 526 * 527 * But we have to play ugly games with 528 * paging_in_progress to do that... 529 */ 530 vm_object_pip_wakeup(object); 531 vm_object_collapse(object); 532 object->paging_in_progress++; 533 } else { 534 prot &= ~VM_PROT_WRITE; 535 m->flags |= PG_COPYONWRITE; 536 } 537 } 538 539 /* 540 * We must verify that the maps have not changed since our last 541 * lookup. 542 */ 543 544 if (!lookup_still_valid) { 545 vm_object_t retry_object; 546 vm_offset_t retry_offset; 547 vm_prot_t retry_prot; 548 549 /* 550 * Since map entries may be pageable, make sure we can take a 551 * page fault on them. 552 */ 553 554 /* 555 * To avoid trying to write_lock the map while another process 556 * has it read_locked (in vm_map_pageable), we do not try for 557 * write permission. If the page is still writable, we will 558 * get write permission. If it is not, or has been marked 559 * needs_copy, we enter the mapping without write permission, 560 * and will merely take another fault. 561 */ 562 result = vm_map_lookup(&map, vaddr, fault_type & ~VM_PROT_WRITE, 563 &entry, &retry_object, &retry_offset, &retry_prot, &wired, &su); 564 565 /* 566 * If we don't need the page any longer, put it on the active 567 * list (the easiest thing to do here). If no one needs it, 568 * pageout will grab it eventually. 569 */ 570 571 if (result != KERN_SUCCESS) { 572 RELEASE_PAGE(m); 573 UNLOCK_AND_DEALLOCATE; 574 return (result); 575 } 576 lookup_still_valid = TRUE; 577 578 if ((retry_object != first_object) || 579 (retry_offset != first_offset)) { 580 RELEASE_PAGE(m); 581 UNLOCK_AND_DEALLOCATE; 582 goto RetryFault; 583 } 584 /* 585 * Check whether the protection has changed or the object has 586 * been copied while we left the map unlocked. Changing from 587 * read to write permission is OK - we leave the page 588 * write-protected, and catch the write fault. Changing from 589 * write to read permission means that we can't mark the page 590 * write-enabled after all. 591 */ 592 prot &= retry_prot; 593 if (m->flags & PG_COPYONWRITE) 594 prot &= ~VM_PROT_WRITE; 595 } 596 /* 597 * (the various bits we're fiddling with here are locked by the 598 * object's lock) 599 */ 600 601 /* XXX This distorts the meaning of the copy_on_write bit */ 602 603 if (prot & VM_PROT_WRITE) 604 m->flags &= ~PG_COPYONWRITE; 605 606 /* 607 * It's critically important that a wired-down page be faulted only 608 * once in each map for which it is wired. 609 */ 610 611 /* 612 * Put this page into the physical map. We had to do the unlock above 613 * because pmap_enter may cause other faults. We don't put the page 614 * back on the active queue until later so that the page-out daemon 615 * won't find us (yet). 616 */ 617 618 if (prot & VM_PROT_WRITE) { 619 m->flags |= PG_WRITEABLE; 620 m->object->flags |= OBJ_WRITEABLE; 621 /* 622 * If the fault is a write, we know that this page is being 623 * written NOW. This will save on the pmap_is_modified() calls 624 * later. 625 */ 626 if (fault_type & VM_PROT_WRITE) { 627 m->dirty = VM_PAGE_BITS_ALL; 628 } 629 } 630 631 m->flags |= PG_MAPPED|PG_REFERENCED; 632 m->flags &= ~PG_ZERO; 633 634 pmap_enter(map->pmap, vaddr, VM_PAGE_TO_PHYS(m), prot, wired); 635 #if 0 636 if (change_wiring == 0 && wired == 0) 637 pmap_prefault(map->pmap, vaddr, entry, first_object); 638 #endif 639 640 /* 641 * If the page is not wired down, then put it where the pageout daemon 642 * can find it. 643 */ 644 if (change_wiring) { 645 if (wired) 646 vm_page_wire(m); 647 else 648 vm_page_unwire(m); 649 } else { 650 if ((m->flags & PG_ACTIVE) == 0) 651 vm_page_activate(m); 652 } 653 654 if (curproc && (curproc->p_flag & P_INMEM) && curproc->p_stats) { 655 if (hardfault) { 656 curproc->p_stats->p_ru.ru_majflt++; 657 } else { 658 curproc->p_stats->p_ru.ru_minflt++; 659 } 660 } 661 662 if ((m->flags & PG_BUSY) == 0) 663 printf("page not busy: %d\n", m->offset); 664 /* 665 * Unlock everything, and return 666 */ 667 668 PAGE_WAKEUP(m); 669 UNLOCK_AND_DEALLOCATE; 670 671 return (KERN_SUCCESS); 672 673 } 674 675 /* 676 * vm_fault_wire: 677 * 678 * Wire down a range of virtual addresses in a map. 679 */ 680 int 681 vm_fault_wire(map, start, end) 682 vm_map_t map; 683 vm_offset_t start, end; 684 { 685 686 register vm_offset_t va; 687 register pmap_t pmap; 688 int rv; 689 690 pmap = vm_map_pmap(map); 691 692 /* 693 * Inform the physical mapping system that the range of addresses may 694 * not fault, so that page tables and such can be locked down as well. 695 */ 696 697 pmap_pageable(pmap, start, end, FALSE); 698 699 /* 700 * We simulate a fault to get the page and enter it in the physical 701 * map. 702 */ 703 704 for (va = start; va < end; va += PAGE_SIZE) { 705 706 while( curproc != pageproc && 707 (cnt.v_free_count <= cnt.v_pageout_free_min)) 708 VM_WAIT; 709 710 rv = vm_fault(map, va, VM_PROT_READ|VM_PROT_WRITE, TRUE); 711 if (rv) { 712 if (va != start) 713 vm_fault_unwire(map, start, va); 714 return (rv); 715 } 716 } 717 return (KERN_SUCCESS); 718 } 719 720 721 /* 722 * vm_fault_unwire: 723 * 724 * Unwire a range of virtual addresses in a map. 725 */ 726 void 727 vm_fault_unwire(map, start, end) 728 vm_map_t map; 729 vm_offset_t start, end; 730 { 731 732 register vm_offset_t va, pa; 733 register pmap_t pmap; 734 735 pmap = vm_map_pmap(map); 736 737 /* 738 * Since the pages are wired down, we must be able to get their 739 * mappings from the physical map system. 740 */ 741 742 for (va = start; va < end; va += PAGE_SIZE) { 743 pa = pmap_extract(pmap, va); 744 if (pa == (vm_offset_t) 0) { 745 panic("unwire: page not in pmap"); 746 } 747 pmap_change_wiring(pmap, va, FALSE); 748 vm_page_unwire(PHYS_TO_VM_PAGE(pa)); 749 } 750 751 /* 752 * Inform the physical mapping system that the range of addresses may 753 * fault, so that page tables and such may be unwired themselves. 754 */ 755 756 pmap_pageable(pmap, start, end, TRUE); 757 758 } 759 760 /* 761 * Routine: 762 * vm_fault_copy_entry 763 * Function: 764 * Copy all of the pages from a wired-down map entry to another. 765 * 766 * In/out conditions: 767 * The source and destination maps must be locked for write. 768 * The source map entry must be wired down (or be a sharing map 769 * entry corresponding to a main map entry that is wired down). 770 */ 771 772 void 773 vm_fault_copy_entry(dst_map, src_map, dst_entry, src_entry) 774 vm_map_t dst_map; 775 vm_map_t src_map; 776 vm_map_entry_t dst_entry; 777 vm_map_entry_t src_entry; 778 { 779 vm_object_t dst_object; 780 vm_object_t src_object; 781 vm_offset_t dst_offset; 782 vm_offset_t src_offset; 783 vm_prot_t prot; 784 vm_offset_t vaddr; 785 vm_page_t dst_m; 786 vm_page_t src_m; 787 788 #ifdef lint 789 src_map++; 790 #endif /* lint */ 791 792 src_object = src_entry->object.vm_object; 793 src_offset = src_entry->offset; 794 795 /* 796 * Create the top-level object for the destination entry. (Doesn't 797 * actually shadow anything - we copy the pages directly.) 798 */ 799 dst_object = vm_object_allocate(OBJT_DEFAULT, 800 (vm_size_t) (dst_entry->end - dst_entry->start)); 801 802 dst_entry->object.vm_object = dst_object; 803 dst_entry->offset = 0; 804 805 prot = dst_entry->max_protection; 806 807 /* 808 * Loop through all of the pages in the entry's range, copying each 809 * one from the source object (it should be there) to the destination 810 * object. 811 */ 812 for (vaddr = dst_entry->start, dst_offset = 0; 813 vaddr < dst_entry->end; 814 vaddr += PAGE_SIZE, dst_offset += PAGE_SIZE) { 815 816 /* 817 * Allocate a page in the destination object 818 */ 819 do { 820 dst_m = vm_page_alloc(dst_object, dst_offset, VM_ALLOC_NORMAL); 821 if (dst_m == NULL) { 822 VM_WAIT; 823 } 824 } while (dst_m == NULL); 825 826 /* 827 * Find the page in the source object, and copy it in. 828 * (Because the source is wired down, the page will be in 829 * memory.) 830 */ 831 src_m = vm_page_lookup(src_object, dst_offset + src_offset); 832 if (src_m == NULL) 833 panic("vm_fault_copy_wired: page missing"); 834 835 vm_page_copy(src_m, dst_m); 836 837 /* 838 * Enter it in the pmap... 839 */ 840 841 dst_m->flags |= PG_WRITEABLE|PG_MAPPED; 842 dst_m->flags &= ~PG_ZERO; 843 pmap_enter(dst_map->pmap, vaddr, VM_PAGE_TO_PHYS(dst_m), 844 prot, FALSE); 845 846 /* 847 * Mark it no longer busy, and put it on the active list. 848 */ 849 vm_page_activate(dst_m); 850 PAGE_WAKEUP(dst_m); 851 } 852 } 853 854 855 /* 856 * This routine checks around the requested page for other pages that 857 * might be able to be faulted in. This routine brackets the viable 858 * pages for the pages to be paged in. 859 * 860 * Inputs: 861 * m, rbehind, rahead 862 * 863 * Outputs: 864 * marray (array of vm_page_t), reqpage (index of requested page) 865 * 866 * Return value: 867 * number of pages in marray 868 */ 869 int 870 vm_fault_additional_pages(m, rbehind, rahead, marray, reqpage) 871 vm_page_t m; 872 int rbehind; 873 int rahead; 874 vm_page_t *marray; 875 int *reqpage; 876 { 877 int i; 878 vm_object_t object; 879 vm_offset_t offset, startoffset, endoffset, toffset, size; 880 vm_page_t rtm; 881 int treqpage; 882 int cbehind, cahead; 883 884 object = m->object; 885 offset = m->offset; 886 887 /* 888 * if the requested page is not available, then give up now 889 */ 890 891 if (!vm_pager_has_page(object, 892 object->paging_offset + offset, &cbehind, &cahead)) 893 return 0; 894 895 if ((cbehind == 0) && (cahead == 0)) { 896 *reqpage = 0; 897 marray[0] = m; 898 return 1; 899 } 900 901 if (rahead > cahead) { 902 rahead = cahead; 903 } 904 905 if (rbehind > cbehind) { 906 rbehind = cbehind; 907 } 908 909 /* 910 * try to do any readahead that we might have free pages for. 911 */ 912 if ((rahead + rbehind) > 913 ((cnt.v_free_count + cnt.v_cache_count) - cnt.v_free_reserved)) { 914 pagedaemon_wakeup(); 915 *reqpage = 0; 916 marray[0] = m; 917 return 1; 918 } 919 920 /* 921 * scan backward for the read behind pages -- in memory or on disk not 922 * in same object 923 */ 924 toffset = offset - PAGE_SIZE; 925 if (toffset < offset) { 926 if (rbehind * PAGE_SIZE > offset) 927 rbehind = offset / PAGE_SIZE; 928 startoffset = offset - rbehind * PAGE_SIZE; 929 while (toffset >= startoffset) { 930 if (vm_page_lookup( object, toffset)) { 931 startoffset = toffset + PAGE_SIZE; 932 break; 933 } 934 if (toffset == 0) 935 break; 936 toffset -= PAGE_SIZE; 937 } 938 } else { 939 startoffset = offset; 940 } 941 942 /* 943 * scan forward for the read ahead pages -- in memory or on disk not 944 * in same object 945 */ 946 toffset = offset + PAGE_SIZE; 947 endoffset = offset + (rahead + 1) * PAGE_SIZE; 948 if (endoffset > object->size) 949 endoffset = object->size; 950 while (toffset < endoffset) { 951 if ( vm_page_lookup(object, toffset)) { 952 break; 953 } 954 toffset += PAGE_SIZE; 955 } 956 endoffset = toffset; 957 958 /* calculate number of bytes of pages */ 959 size = (endoffset - startoffset) / PAGE_SIZE; 960 961 /* calculate the page offset of the required page */ 962 treqpage = (offset - startoffset) / PAGE_SIZE; 963 964 /* see if we have space (again) */ 965 if ((cnt.v_free_count + cnt.v_cache_count) > 966 (cnt.v_free_reserved + size)) { 967 /* 968 * get our pages and don't block for them 969 */ 970 for (i = 0; i < size; i++) { 971 if (i != treqpage) { 972 rtm = vm_page_alloc(object, 973 startoffset + i * PAGE_SIZE, 974 VM_ALLOC_NORMAL); 975 if (rtm == NULL) { 976 if (i < treqpage) { 977 int j; 978 for (j = 0; j < i; j++) { 979 FREE_PAGE(marray[j]); 980 } 981 *reqpage = 0; 982 marray[0] = m; 983 return 1; 984 } else { 985 size = i; 986 *reqpage = treqpage; 987 return size; 988 } 989 } 990 marray[i] = rtm; 991 } else { 992 marray[i] = m; 993 } 994 } 995 996 *reqpage = treqpage; 997 return size; 998 } 999 *reqpage = 0; 1000 marray[0] = m; 1001 return 1; 1002 } 1003