1 /* 2 * Copyright (c) 1991, 1993 3 * The Regents of the University of California. All rights reserved. 4 * Copyright (c) 1994 John S. Dyson 5 * All rights reserved. 6 * Copyright (c) 1994 David Greenman 7 * All rights reserved. 8 * 9 * 10 * This code is derived from software contributed to Berkeley by 11 * The Mach Operating System project at Carnegie-Mellon University. 12 * 13 * Redistribution and use in source and binary forms, with or without 14 * modification, are permitted provided that the following conditions 15 * are met: 16 * 1. Redistributions of source code must retain the above copyright 17 * notice, this list of conditions and the following disclaimer. 18 * 2. Redistributions in binary form must reproduce the above copyright 19 * notice, this list of conditions and the following disclaimer in the 20 * documentation and/or other materials provided with the distribution. 21 * 3. All advertising materials mentioning features or use of this software 22 * must display the following acknowledgement: 23 * This product includes software developed by the University of 24 * California, Berkeley and its contributors. 25 * 4. Neither the name of the University nor the names of its contributors 26 * may be used to endorse or promote products derived from this software 27 * without specific prior written permission. 28 * 29 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 30 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 31 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 32 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 33 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 34 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 35 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 36 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 37 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 38 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 39 * SUCH DAMAGE. 40 * 41 * from: @(#)vm_fault.c 8.4 (Berkeley) 1/12/94 42 * 43 * 44 * Copyright (c) 1987, 1990 Carnegie-Mellon University. 45 * All rights reserved. 46 * 47 * Authors: Avadis Tevanian, Jr., Michael Wayne Young 48 * 49 * Permission to use, copy, modify and distribute this software and 50 * its documentation is hereby granted, provided that both the copyright 51 * notice and this permission notice appear in all copies of the 52 * software, derivative works or modified versions, and any portions 53 * thereof, and that both notices appear in supporting documentation. 54 * 55 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 56 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 57 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 58 * 59 * Carnegie Mellon requests users of this software to return to 60 * 61 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 62 * School of Computer Science 63 * Carnegie Mellon University 64 * Pittsburgh PA 15213-3890 65 * 66 * any improvements or extensions that they make and grant Carnegie the 67 * rights to redistribute these changes. 68 * 69 * $Id: vm_fault.c,v 1.73 1998/01/06 05:25:54 dyson Exp $ 70 */ 71 72 /* 73 * Page fault handling module. 74 */ 75 76 #include <sys/param.h> 77 #include <sys/systm.h> 78 #include <sys/proc.h> 79 #include <sys/vnode.h> 80 #include <sys/resourcevar.h> 81 #include <sys/vmmeter.h> 82 83 #include <vm/vm.h> 84 #include <vm/vm_param.h> 85 #include <vm/vm_prot.h> 86 #include <sys/lock.h> 87 #include <vm/pmap.h> 88 #include <vm/vm_map.h> 89 #include <vm/vm_object.h> 90 #include <vm/vm_page.h> 91 #include <vm/vm_pageout.h> 92 #include <vm/vm_kern.h> 93 #include <vm/vm_pager.h> 94 #include <vm/vnode_pager.h> 95 #include <vm/vm_extern.h> 96 97 int vm_fault_additional_pages __P((vm_page_t, int, int, vm_page_t *, int *)); 98 99 #define VM_FAULT_READ_AHEAD 4 100 #define VM_FAULT_READ_BEHIND 3 101 #define VM_FAULT_READ (VM_FAULT_READ_AHEAD+VM_FAULT_READ_BEHIND+1) 102 103 /* 104 * vm_fault: 105 * 106 * Handle a page fault occuring at the given address, 107 * requiring the given permissions, in the map specified. 108 * If successful, the page is inserted into the 109 * associated physical map. 110 * 111 * NOTE: the given address should be truncated to the 112 * proper page address. 113 * 114 * KERN_SUCCESS is returned if the page fault is handled; otherwise, 115 * a standard error specifying why the fault is fatal is returned. 116 * 117 * 118 * The map in question must be referenced, and remains so. 119 * Caller may hold no locks. 120 */ 121 int 122 vm_fault(vm_map_t map, vm_offset_t vaddr, vm_prot_t fault_type, int fault_flags) 123 { 124 vm_object_t first_object; 125 vm_pindex_t first_pindex; 126 vm_map_entry_t entry; 127 register vm_object_t object; 128 register vm_pindex_t pindex; 129 vm_page_t m; 130 vm_page_t first_m; 131 vm_prot_t prot; 132 int result; 133 boolean_t wired; 134 boolean_t su; 135 boolean_t lookup_still_valid; 136 vm_page_t old_m; 137 vm_object_t next_object; 138 vm_page_t marray[VM_FAULT_READ]; 139 int hardfault = 0; 140 struct vnode *vp = NULL; 141 struct proc *p = curproc; /* XXX */ 142 143 cnt.v_vm_faults++; /* needs lock XXX */ 144 /* 145 * Recovery actions 146 */ 147 #define FREE_PAGE(m) { \ 148 PAGE_WAKEUP(m); \ 149 vm_page_free(m); \ 150 } 151 152 #define RELEASE_PAGE(m) { \ 153 PAGE_WAKEUP(m); \ 154 if (m->queue != PQ_ACTIVE) vm_page_activate(m); \ 155 } 156 157 #define UNLOCK_MAP { \ 158 if (lookup_still_valid) { \ 159 vm_map_lookup_done(map, entry); \ 160 lookup_still_valid = FALSE; \ 161 } \ 162 } 163 164 #define UNLOCK_THINGS { \ 165 vm_object_pip_wakeup(object); \ 166 if (object != first_object) { \ 167 FREE_PAGE(first_m); \ 168 vm_object_pip_wakeup(first_object); \ 169 } \ 170 UNLOCK_MAP; \ 171 if (vp != NULL) VOP_UNLOCK(vp, 0, p); \ 172 } 173 174 #define UNLOCK_AND_DEALLOCATE { \ 175 UNLOCK_THINGS; \ 176 vm_object_deallocate(first_object); \ 177 } 178 179 180 RetryFault:; 181 182 /* 183 * Find the backing store object and offset into it to begin the 184 * search. 185 */ 186 187 if ((result = vm_map_lookup(&map, vaddr, 188 fault_type, &entry, &first_object, 189 &first_pindex, &prot, &wired, &su)) != KERN_SUCCESS) { 190 return (result); 191 } 192 193 if (entry->eflags & MAP_ENTRY_NOFAULT) { 194 panic("vm_fault: fault on nofault entry, addr: %lx", 195 vaddr); 196 } 197 198 /* 199 * If we are user-wiring a r/w segment, and it is COW, then 200 * we need to do the COW operation. Note that we don't COW 201 * currently RO sections now, because it is NOT desirable 202 * to COW .text. We simply keep .text from ever being COW'ed 203 * and take the heat that one cannot debug wired .text sections. 204 */ 205 if (((fault_flags & VM_FAULT_WIRE_MASK) == VM_FAULT_USER_WIRE) && 206 (entry->eflags & MAP_ENTRY_NEEDS_COPY)) { 207 if(entry->protection & VM_PROT_WRITE) { 208 int tresult; 209 vm_map_lookup_done(map, entry); 210 211 tresult = vm_map_lookup(&map, vaddr, VM_PROT_READ|VM_PROT_WRITE, 212 &entry, &first_object, &first_pindex, &prot, &wired, &su); 213 if (tresult != KERN_SUCCESS) 214 return tresult; 215 } else { 216 /* 217 * If we don't COW now, on a user wire, the user will never 218 * be able to write to the mapping. If we don't make this 219 * restriction, the bookkeeping would be nearly impossible. 220 */ 221 entry->max_protection &= ~VM_PROT_WRITE; 222 } 223 } 224 225 /* 226 * Make a reference to this object to prevent its disposal while we 227 * are messing with it. Once we have the reference, the map is free 228 * to be diddled. Since objects reference their shadows (and copies), 229 * they will stay around as well. 230 */ 231 vm_object_reference(first_object); 232 first_object->paging_in_progress++; 233 234 vp = vnode_pager_lock(first_object); 235 if ((fault_type & VM_PROT_WRITE) && 236 (first_object->type == OBJT_VNODE)) { 237 vm_freeze_copyopts(first_object, first_pindex, first_pindex + 1); 238 } 239 240 lookup_still_valid = TRUE; 241 242 if (wired) 243 fault_type = prot; 244 245 first_m = NULL; 246 247 /* 248 * INVARIANTS (through entire routine): 249 * 250 * 1) At all times, we must either have the object lock or a busy 251 * page in some object to prevent some other process from trying to 252 * bring in the same page. 253 * 254 * Note that we cannot hold any locks during the pager access or when 255 * waiting for memory, so we use a busy page then. 256 * 257 * Note also that we aren't as concerned about more than one thead 258 * attempting to pager_data_unlock the same page at once, so we don't 259 * hold the page as busy then, but do record the highest unlock value 260 * so far. [Unlock requests may also be delivered out of order.] 261 * 262 * 2) Once we have a busy page, we must remove it from the pageout 263 * queues, so that the pageout daemon will not grab it away. 264 * 265 * 3) To prevent another process from racing us down the shadow chain 266 * and entering a new page in the top object before we do, we must 267 * keep a busy page in the top object while following the shadow 268 * chain. 269 * 270 * 4) We must increment paging_in_progress on any object for which 271 * we have a busy page, to prevent vm_object_collapse from removing 272 * the busy page without our noticing. 273 */ 274 275 /* 276 * Search for the page at object/offset. 277 */ 278 279 object = first_object; 280 pindex = first_pindex; 281 282 /* 283 * See whether this page is resident 284 */ 285 286 while (TRUE) { 287 m = vm_page_lookup(object, pindex); 288 if (m != NULL) { 289 int queue; 290 /* 291 * If the page is being brought in, wait for it and 292 * then retry. 293 */ 294 if ((m->flags & PG_BUSY) || m->busy) { 295 int s; 296 297 UNLOCK_THINGS; 298 s = splvm(); 299 if (((m->flags & PG_BUSY) || m->busy)) { 300 m->flags |= PG_WANTED | PG_REFERENCED; 301 cnt.v_intrans++; 302 tsleep(m, PSWP, "vmpfw", 0); 303 } 304 splx(s); 305 vm_object_deallocate(first_object); 306 goto RetryFault; 307 } 308 309 queue = m->queue; 310 vm_page_unqueue_nowakeup(m); 311 312 /* 313 * Mark page busy for other processes, and the pagedaemon. 314 */ 315 if (((queue - m->pc) == PQ_CACHE) && 316 (cnt.v_free_count + cnt.v_cache_count) < cnt.v_free_min) { 317 vm_page_activate(m); 318 UNLOCK_AND_DEALLOCATE; 319 VM_WAIT; 320 goto RetryFault; 321 } 322 323 m->flags |= PG_BUSY; 324 325 if (/*m->valid && */ 326 ((m->valid & VM_PAGE_BITS_ALL) != VM_PAGE_BITS_ALL) && 327 m->object != kernel_object && m->object != kmem_object) { 328 goto readrest; 329 } 330 break; 331 } 332 if (((object->type != OBJT_DEFAULT) && (((fault_flags & VM_FAULT_WIRE_MASK) == 0) || wired)) 333 || (object == first_object)) { 334 335 if (pindex >= object->size) { 336 UNLOCK_AND_DEALLOCATE; 337 return (KERN_PROTECTION_FAILURE); 338 } 339 340 /* 341 * Allocate a new page for this object/offset pair. 342 */ 343 m = vm_page_alloc(object, pindex, 344 (vp || object->backing_object)?VM_ALLOC_NORMAL:VM_ALLOC_ZERO); 345 346 if (m == NULL) { 347 UNLOCK_AND_DEALLOCATE; 348 VM_WAIT; 349 goto RetryFault; 350 } 351 } 352 readrest: 353 if (object->type != OBJT_DEFAULT && (((fault_flags & VM_FAULT_WIRE_MASK) == 0) || wired)) { 354 int rv; 355 int faultcount; 356 int reqpage; 357 int ahead, behind; 358 359 ahead = VM_FAULT_READ_AHEAD; 360 behind = VM_FAULT_READ_BEHIND; 361 if (first_object->behavior == OBJ_RANDOM) { 362 ahead = 0; 363 behind = 0; 364 } 365 366 if ((first_object->type != OBJT_DEVICE) && 367 (first_object->behavior == OBJ_SEQUENTIAL)) { 368 vm_pindex_t firstpindex, tmppindex; 369 if (first_pindex < 370 2*(VM_FAULT_READ_BEHIND + VM_FAULT_READ_AHEAD + 1)) 371 firstpindex = 0; 372 else 373 firstpindex = first_pindex - 374 2*(VM_FAULT_READ_BEHIND + VM_FAULT_READ_AHEAD + 1); 375 376 for(tmppindex = first_pindex - 1; 377 tmppindex >= firstpindex; 378 --tmppindex) { 379 vm_page_t mt; 380 mt = vm_page_lookup( first_object, tmppindex); 381 if (mt == NULL || (mt->valid != VM_PAGE_BITS_ALL)) 382 break; 383 if (mt->busy || 384 (mt->flags & (PG_BUSY|PG_FICTITIOUS)) || 385 mt->hold_count || 386 mt->wire_count) 387 continue; 388 if (mt->dirty == 0) 389 vm_page_test_dirty(mt); 390 if (mt->dirty) { 391 vm_page_protect(mt, VM_PROT_NONE); 392 vm_page_deactivate(mt); 393 } else { 394 vm_page_cache(mt); 395 } 396 } 397 398 ahead += behind; 399 behind = 0; 400 } 401 402 /* 403 * now we find out if any other pages should be paged 404 * in at this time this routine checks to see if the 405 * pages surrounding this fault reside in the same 406 * object as the page for this fault. If they do, 407 * then they are faulted in also into the object. The 408 * array "marray" returned contains an array of 409 * vm_page_t structs where one of them is the 410 * vm_page_t passed to the routine. The reqpage 411 * return value is the index into the marray for the 412 * vm_page_t passed to the routine. 413 */ 414 faultcount = vm_fault_additional_pages( 415 m, behind, ahead, marray, &reqpage); 416 417 /* 418 * Call the pager to retrieve the data, if any, after 419 * releasing the lock on the map. 420 */ 421 UNLOCK_MAP; 422 423 rv = faultcount ? 424 vm_pager_get_pages(object, marray, faultcount, 425 reqpage) : VM_PAGER_FAIL; 426 427 if (rv == VM_PAGER_OK) { 428 /* 429 * Found the page. Leave it busy while we play 430 * with it. 431 */ 432 433 /* 434 * Relookup in case pager changed page. Pager 435 * is responsible for disposition of old page 436 * if moved. 437 */ 438 m = vm_page_lookup(object, pindex); 439 if( !m) { 440 UNLOCK_AND_DEALLOCATE; 441 goto RetryFault; 442 } 443 444 hardfault++; 445 break; 446 } 447 /* 448 * Remove the bogus page (which does not exist at this 449 * object/offset); before doing so, we must get back 450 * our object lock to preserve our invariant. 451 * 452 * Also wake up any other process that may want to bring 453 * in this page. 454 * 455 * If this is the top-level object, we must leave the 456 * busy page to prevent another process from rushing 457 * past us, and inserting the page in that object at 458 * the same time that we are. 459 */ 460 461 if (rv == VM_PAGER_ERROR) 462 printf("vm_fault: pager input (probably hardware) error, PID %d failure\n", 463 curproc->p_pid); 464 /* 465 * Data outside the range of the pager or an I/O error 466 */ 467 /* 468 * XXX - the check for kernel_map is a kludge to work 469 * around having the machine panic on a kernel space 470 * fault w/ I/O error. 471 */ 472 if (((map != kernel_map) && (rv == VM_PAGER_ERROR)) || (rv == VM_PAGER_BAD)) { 473 FREE_PAGE(m); 474 UNLOCK_AND_DEALLOCATE; 475 return ((rv == VM_PAGER_ERROR) ? KERN_FAILURE : KERN_PROTECTION_FAILURE); 476 } 477 if (object != first_object) { 478 FREE_PAGE(m); 479 /* 480 * XXX - we cannot just fall out at this 481 * point, m has been freed and is invalid! 482 */ 483 } 484 } 485 /* 486 * We get here if the object has default pager (or unwiring) or the 487 * pager doesn't have the page. 488 */ 489 if (object == first_object) 490 first_m = m; 491 492 /* 493 * Move on to the next object. Lock the next object before 494 * unlocking the current one. 495 */ 496 497 pindex += OFF_TO_IDX(object->backing_object_offset); 498 next_object = object->backing_object; 499 if (next_object == NULL) { 500 /* 501 * If there's no object left, fill the page in the top 502 * object with zeros. 503 */ 504 if (object != first_object) { 505 vm_object_pip_wakeup(object); 506 507 object = first_object; 508 pindex = first_pindex; 509 m = first_m; 510 } 511 first_m = NULL; 512 513 if ((m->flags & PG_ZERO) == 0) 514 vm_page_zero_fill(m); 515 cnt.v_zfod++; 516 break; 517 } else { 518 if (object != first_object) { 519 vm_object_pip_wakeup(object); 520 } 521 object = next_object; 522 object->paging_in_progress++; 523 } 524 } 525 526 #if defined(DIAGNOSTIC) 527 if ((m->flags & PG_BUSY) == 0) 528 panic("vm_fault: not busy after main loop"); 529 #endif 530 531 /* 532 * PAGE HAS BEEN FOUND. [Loop invariant still holds -- the object lock 533 * is held.] 534 */ 535 536 old_m = m; /* save page that would be copied */ 537 538 /* 539 * If the page is being written, but isn't already owned by the 540 * top-level object, we have to copy it into a new page owned by the 541 * top-level object. 542 */ 543 544 if (object != first_object) { 545 /* 546 * We only really need to copy if we want to write it. 547 */ 548 549 if (fault_type & VM_PROT_WRITE) { 550 551 /* 552 * This allows pages to be virtually copied from a backing_object 553 * into the first_object, where the backing object has no other 554 * refs to it, and cannot gain any more refs. Instead of a 555 * bcopy, we just move the page from the backing object to the 556 * first object. Note that we must mark the page dirty in the 557 * first object so that it will go out to swap when needed. 558 */ 559 if (lookup_still_valid && 560 /* 561 * Only one shadow object 562 */ 563 (object->shadow_count == 1) && 564 /* 565 * No COW refs, except us 566 */ 567 (object->ref_count == 1) && 568 /* 569 * Noone else can look this object up 570 */ 571 (object->handle == NULL) && 572 /* 573 * No other ways to look the object up 574 */ 575 ((object->type == OBJT_DEFAULT) || 576 (object->type == OBJT_SWAP)) && 577 /* 578 * We don't chase down the shadow chain 579 */ 580 (object == first_object->backing_object)) { 581 582 /* 583 * get rid of the unnecessary page 584 */ 585 vm_page_protect(first_m, VM_PROT_NONE); 586 PAGE_WAKEUP(first_m); 587 vm_page_free(first_m); 588 /* 589 * grab the page and put it into the process'es object 590 */ 591 vm_page_rename(m, first_object, first_pindex); 592 first_m = m; 593 m->dirty = VM_PAGE_BITS_ALL; 594 m = NULL; 595 } else { 596 /* 597 * Oh, well, lets copy it. 598 */ 599 vm_page_copy(m, first_m); 600 } 601 602 /* 603 * This code handles the case where there are two references to the 604 * backing object, and one reference is getting a copy of the 605 * page. If the other reference is the only other object that 606 * points to the backing object, then perform a virtual copy 607 * from the backing object to the other object after the 608 * page is copied to the current first_object. If the other 609 * object already has the page, we destroy it in the backing object 610 * performing an optimized collapse-type operation. We don't 611 * bother removing the page from the backing object's swap space. 612 */ 613 if (lookup_still_valid && 614 /* 615 * make sure that we have two shadow objs 616 */ 617 (object->shadow_count == 2) && 618 /* 619 * And no COW refs -- note that there are sometimes 620 * temp refs to objs, but ignore that case -- we just 621 * punt. 622 */ 623 (object->ref_count == 2) && 624 /* 625 * Noone else can look us up 626 */ 627 (object->handle == NULL) && 628 /* 629 * Not something that can be referenced elsewhere 630 */ 631 ((object->type == OBJT_DEFAULT) || 632 (object->type == OBJT_SWAP)) && 633 /* 634 * We don't bother chasing down object chain 635 */ 636 (object == first_object->backing_object)) { 637 638 vm_object_t other_object; 639 vm_pindex_t other_pindex, other_pindex_offset; 640 vm_page_t tm; 641 642 other_object = TAILQ_FIRST(&object->shadow_head); 643 if (other_object == first_object) 644 other_object = TAILQ_NEXT(other_object, shadow_list); 645 if (!other_object) 646 panic("vm_fault: other object missing"); 647 if (other_object && 648 (other_object->type == OBJT_DEFAULT) && 649 (other_object->paging_in_progress == 0)) { 650 other_pindex_offset = 651 OFF_TO_IDX(other_object->backing_object_offset); 652 if (pindex >= other_pindex_offset) { 653 other_pindex = pindex - other_pindex_offset; 654 /* 655 * If the other object has the page, just free it. 656 */ 657 if ((tm = vm_page_lookup(other_object, other_pindex))) { 658 if ((tm->flags & PG_BUSY) == 0 && 659 tm->busy == 0 && 660 tm->valid == VM_PAGE_BITS_ALL) { 661 /* 662 * get rid of the unnecessary page 663 */ 664 vm_page_protect(m, VM_PROT_NONE); 665 PAGE_WAKEUP(m); 666 vm_page_free(m); 667 m = NULL; 668 tm->dirty = VM_PAGE_BITS_ALL; 669 first_m->dirty = VM_PAGE_BITS_ALL; 670 } 671 } else { 672 /* 673 * If the other object doesn't have the page, 674 * then we move it there. 675 */ 676 vm_page_rename(m, other_object, other_pindex); 677 m->dirty = VM_PAGE_BITS_ALL; 678 m->valid = VM_PAGE_BITS_ALL; 679 } 680 } 681 } 682 } 683 684 if (m) { 685 if (m->queue != PQ_ACTIVE) 686 vm_page_activate(m); 687 /* 688 * We no longer need the old page or object. 689 */ 690 PAGE_WAKEUP(m); 691 } 692 693 vm_object_pip_wakeup(object); 694 /* 695 * Only use the new page below... 696 */ 697 698 cnt.v_cow_faults++; 699 m = first_m; 700 object = first_object; 701 pindex = first_pindex; 702 703 /* 704 * Now that we've gotten the copy out of the way, 705 * let's try to collapse the top object. 706 * 707 * But we have to play ugly games with 708 * paging_in_progress to do that... 709 */ 710 vm_object_pip_wakeup(object); 711 vm_object_collapse(object); 712 object->paging_in_progress++; 713 } else { 714 prot &= ~VM_PROT_WRITE; 715 } 716 } 717 718 /* 719 * We must verify that the maps have not changed since our last 720 * lookup. 721 */ 722 723 if (!lookup_still_valid) { 724 vm_object_t retry_object; 725 vm_pindex_t retry_pindex; 726 vm_prot_t retry_prot; 727 728 /* 729 * Since map entries may be pageable, make sure we can take a 730 * page fault on them. 731 */ 732 733 /* 734 * To avoid trying to write_lock the map while another process 735 * has it read_locked (in vm_map_pageable), we do not try for 736 * write permission. If the page is still writable, we will 737 * get write permission. If it is not, or has been marked 738 * needs_copy, we enter the mapping without write permission, 739 * and will merely take another fault. 740 */ 741 result = vm_map_lookup(&map, vaddr, fault_type & ~VM_PROT_WRITE, 742 &entry, &retry_object, &retry_pindex, &retry_prot, &wired, &su); 743 744 /* 745 * If we don't need the page any longer, put it on the active 746 * list (the easiest thing to do here). If no one needs it, 747 * pageout will grab it eventually. 748 */ 749 750 if (result != KERN_SUCCESS) { 751 RELEASE_PAGE(m); 752 UNLOCK_AND_DEALLOCATE; 753 return (result); 754 } 755 lookup_still_valid = TRUE; 756 757 if ((retry_object != first_object) || 758 (retry_pindex != first_pindex)) { 759 RELEASE_PAGE(m); 760 UNLOCK_AND_DEALLOCATE; 761 goto RetryFault; 762 } 763 /* 764 * Check whether the protection has changed or the object has 765 * been copied while we left the map unlocked. Changing from 766 * read to write permission is OK - we leave the page 767 * write-protected, and catch the write fault. Changing from 768 * write to read permission means that we can't mark the page 769 * write-enabled after all. 770 */ 771 prot &= retry_prot; 772 } 773 774 /* 775 * Put this page into the physical map. We had to do the unlock above 776 * because pmap_enter may cause other faults. We don't put the page 777 * back on the active queue until later so that the page-out daemon 778 * won't find us (yet). 779 */ 780 781 if (prot & VM_PROT_WRITE) { 782 m->flags |= PG_WRITEABLE; 783 m->object->flags |= OBJ_WRITEABLE|OBJ_MIGHTBEDIRTY; 784 /* 785 * If the fault is a write, we know that this page is being 786 * written NOW. This will save on the pmap_is_modified() calls 787 * later. 788 */ 789 if (fault_flags & VM_FAULT_DIRTY) { 790 m->dirty = VM_PAGE_BITS_ALL; 791 } 792 } 793 794 UNLOCK_THINGS; 795 m->valid = VM_PAGE_BITS_ALL; 796 m->flags &= ~PG_ZERO; 797 798 pmap_enter(map->pmap, vaddr, VM_PAGE_TO_PHYS(m), prot, wired); 799 if (((fault_flags & VM_FAULT_WIRE_MASK) == 0) && (wired == 0)) 800 pmap_prefault(map->pmap, vaddr, entry, first_object); 801 802 m->flags |= PG_MAPPED|PG_REFERENCED; 803 if (fault_flags & VM_FAULT_HOLD) 804 vm_page_hold(m); 805 806 /* 807 * If the page is not wired down, then put it where the pageout daemon 808 * can find it. 809 */ 810 if (fault_flags & VM_FAULT_WIRE_MASK) { 811 if (wired) 812 vm_page_wire(m); 813 else 814 vm_page_unwire(m); 815 } else { 816 if (m->queue != PQ_ACTIVE) 817 vm_page_activate(m); 818 } 819 820 if (curproc && (curproc->p_flag & P_INMEM) && curproc->p_stats) { 821 if (hardfault) { 822 curproc->p_stats->p_ru.ru_majflt++; 823 } else { 824 curproc->p_stats->p_ru.ru_minflt++; 825 } 826 } 827 828 /* 829 * Unlock everything, and return 830 */ 831 832 PAGE_WAKEUP(m); 833 vm_object_deallocate(first_object); 834 835 return (KERN_SUCCESS); 836 837 } 838 839 /* 840 * vm_fault_wire: 841 * 842 * Wire down a range of virtual addresses in a map. 843 */ 844 int 845 vm_fault_wire(map, start, end) 846 vm_map_t map; 847 vm_offset_t start, end; 848 { 849 850 register vm_offset_t va; 851 register pmap_t pmap; 852 int rv; 853 854 pmap = vm_map_pmap(map); 855 856 /* 857 * Inform the physical mapping system that the range of addresses may 858 * not fault, so that page tables and such can be locked down as well. 859 */ 860 861 pmap_pageable(pmap, start, end, FALSE); 862 863 /* 864 * We simulate a fault to get the page and enter it in the physical 865 * map. 866 */ 867 868 for (va = start; va < end; va += PAGE_SIZE) { 869 rv = vm_fault(map, va, VM_PROT_READ|VM_PROT_WRITE, 870 VM_FAULT_CHANGE_WIRING); 871 if (rv) { 872 if (va != start) 873 vm_fault_unwire(map, start, va); 874 return (rv); 875 } 876 } 877 return (KERN_SUCCESS); 878 } 879 880 /* 881 * vm_fault_user_wire: 882 * 883 * Wire down a range of virtual addresses in a map. This 884 * is for user mode though, so we only ask for read access 885 * on currently read only sections. 886 */ 887 int 888 vm_fault_user_wire(map, start, end) 889 vm_map_t map; 890 vm_offset_t start, end; 891 { 892 893 register vm_offset_t va; 894 register pmap_t pmap; 895 int rv; 896 897 pmap = vm_map_pmap(map); 898 899 /* 900 * Inform the physical mapping system that the range of addresses may 901 * not fault, so that page tables and such can be locked down as well. 902 */ 903 pmap_pageable(pmap, start, end, FALSE); 904 905 /* 906 * We simulate a fault to get the page and enter it in the physical 907 * map. 908 */ 909 for (va = start; va < end; va += PAGE_SIZE) { 910 rv = vm_fault(map, va, VM_PROT_READ, VM_FAULT_USER_WIRE); 911 if (rv) { 912 if (va != start) 913 vm_fault_unwire(map, start, va); 914 return (rv); 915 } 916 } 917 return (KERN_SUCCESS); 918 } 919 920 921 /* 922 * vm_fault_unwire: 923 * 924 * Unwire a range of virtual addresses in a map. 925 */ 926 void 927 vm_fault_unwire(map, start, end) 928 vm_map_t map; 929 vm_offset_t start, end; 930 { 931 932 register vm_offset_t va, pa; 933 register pmap_t pmap; 934 935 pmap = vm_map_pmap(map); 936 937 /* 938 * Since the pages are wired down, we must be able to get their 939 * mappings from the physical map system. 940 */ 941 942 for (va = start; va < end; va += PAGE_SIZE) { 943 pa = pmap_extract(pmap, va); 944 if (pa != (vm_offset_t) 0) { 945 pmap_change_wiring(pmap, va, FALSE); 946 vm_page_unwire(PHYS_TO_VM_PAGE(pa)); 947 } 948 } 949 950 /* 951 * Inform the physical mapping system that the range of addresses may 952 * fault, so that page tables and such may be unwired themselves. 953 */ 954 955 pmap_pageable(pmap, start, end, TRUE); 956 957 } 958 959 /* 960 * Routine: 961 * vm_fault_copy_entry 962 * Function: 963 * Copy all of the pages from a wired-down map entry to another. 964 * 965 * In/out conditions: 966 * The source and destination maps must be locked for write. 967 * The source map entry must be wired down (or be a sharing map 968 * entry corresponding to a main map entry that is wired down). 969 */ 970 971 void 972 vm_fault_copy_entry(dst_map, src_map, dst_entry, src_entry) 973 vm_map_t dst_map; 974 vm_map_t src_map; 975 vm_map_entry_t dst_entry; 976 vm_map_entry_t src_entry; 977 { 978 vm_object_t dst_object; 979 vm_object_t src_object; 980 vm_ooffset_t dst_offset; 981 vm_ooffset_t src_offset; 982 vm_prot_t prot; 983 vm_offset_t vaddr; 984 vm_page_t dst_m; 985 vm_page_t src_m; 986 987 #ifdef lint 988 src_map++; 989 #endif /* lint */ 990 991 src_object = src_entry->object.vm_object; 992 src_offset = src_entry->offset; 993 994 /* 995 * Create the top-level object for the destination entry. (Doesn't 996 * actually shadow anything - we copy the pages directly.) 997 */ 998 dst_object = vm_object_allocate(OBJT_DEFAULT, 999 (vm_size_t) OFF_TO_IDX(dst_entry->end - dst_entry->start)); 1000 1001 dst_entry->object.vm_object = dst_object; 1002 dst_entry->offset = 0; 1003 1004 prot = dst_entry->max_protection; 1005 1006 /* 1007 * Loop through all of the pages in the entry's range, copying each 1008 * one from the source object (it should be there) to the destination 1009 * object. 1010 */ 1011 for (vaddr = dst_entry->start, dst_offset = 0; 1012 vaddr < dst_entry->end; 1013 vaddr += PAGE_SIZE, dst_offset += PAGE_SIZE) { 1014 1015 /* 1016 * Allocate a page in the destination object 1017 */ 1018 do { 1019 dst_m = vm_page_alloc(dst_object, 1020 OFF_TO_IDX(dst_offset), VM_ALLOC_NORMAL); 1021 if (dst_m == NULL) { 1022 VM_WAIT; 1023 } 1024 } while (dst_m == NULL); 1025 1026 /* 1027 * Find the page in the source object, and copy it in. 1028 * (Because the source is wired down, the page will be in 1029 * memory.) 1030 */ 1031 src_m = vm_page_lookup(src_object, 1032 OFF_TO_IDX(dst_offset + src_offset)); 1033 if (src_m == NULL) 1034 panic("vm_fault_copy_wired: page missing"); 1035 1036 vm_page_copy(src_m, dst_m); 1037 1038 /* 1039 * Enter it in the pmap... 1040 */ 1041 1042 dst_m->flags &= ~PG_ZERO; 1043 pmap_enter(dst_map->pmap, vaddr, VM_PAGE_TO_PHYS(dst_m), 1044 prot, FALSE); 1045 dst_m->flags |= PG_WRITEABLE|PG_MAPPED; 1046 1047 /* 1048 * Mark it no longer busy, and put it on the active list. 1049 */ 1050 vm_page_activate(dst_m); 1051 PAGE_WAKEUP(dst_m); 1052 } 1053 } 1054 1055 1056 /* 1057 * This routine checks around the requested page for other pages that 1058 * might be able to be faulted in. This routine brackets the viable 1059 * pages for the pages to be paged in. 1060 * 1061 * Inputs: 1062 * m, rbehind, rahead 1063 * 1064 * Outputs: 1065 * marray (array of vm_page_t), reqpage (index of requested page) 1066 * 1067 * Return value: 1068 * number of pages in marray 1069 */ 1070 int 1071 vm_fault_additional_pages(m, rbehind, rahead, marray, reqpage) 1072 vm_page_t m; 1073 int rbehind; 1074 int rahead; 1075 vm_page_t *marray; 1076 int *reqpage; 1077 { 1078 int i; 1079 vm_object_t object; 1080 vm_pindex_t pindex, startpindex, endpindex, tpindex; 1081 vm_offset_t size; 1082 vm_page_t rtm; 1083 int treqpage; 1084 int cbehind, cahead; 1085 1086 object = m->object; 1087 pindex = m->pindex; 1088 1089 /* 1090 * we don't fault-ahead for device pager 1091 */ 1092 if (object->type == OBJT_DEVICE) { 1093 *reqpage = 0; 1094 marray[0] = m; 1095 return 1; 1096 } 1097 1098 /* 1099 * if the requested page is not available, then give up now 1100 */ 1101 1102 if (!vm_pager_has_page(object, 1103 OFF_TO_IDX(object->paging_offset) + pindex, &cbehind, &cahead)) 1104 return 0; 1105 1106 if ((cbehind == 0) && (cahead == 0)) { 1107 *reqpage = 0; 1108 marray[0] = m; 1109 return 1; 1110 } 1111 1112 if (rahead > cahead) { 1113 rahead = cahead; 1114 } 1115 1116 if (rbehind > cbehind) { 1117 rbehind = cbehind; 1118 } 1119 1120 /* 1121 * try to do any readahead that we might have free pages for. 1122 */ 1123 if ((rahead + rbehind) > 1124 ((cnt.v_free_count + cnt.v_cache_count) - cnt.v_free_reserved)) { 1125 pagedaemon_wakeup(); 1126 *reqpage = 0; 1127 marray[0] = m; 1128 return 1; 1129 } 1130 1131 /* 1132 * scan backward for the read behind pages -- in memory or on disk not 1133 * in same object 1134 */ 1135 tpindex = pindex - 1; 1136 if (tpindex < pindex) { 1137 if (rbehind > pindex) 1138 rbehind = pindex; 1139 startpindex = pindex - rbehind; 1140 while (tpindex >= startpindex) { 1141 if (vm_page_lookup( object, tpindex)) { 1142 startpindex = tpindex + 1; 1143 break; 1144 } 1145 if (tpindex == 0) 1146 break; 1147 tpindex -= 1; 1148 } 1149 } else { 1150 startpindex = pindex; 1151 } 1152 1153 /* 1154 * scan forward for the read ahead pages -- in memory or on disk not 1155 * in same object 1156 */ 1157 tpindex = pindex + 1; 1158 endpindex = pindex + (rahead + 1); 1159 if (endpindex > object->size) 1160 endpindex = object->size; 1161 while (tpindex < endpindex) { 1162 if ( vm_page_lookup(object, tpindex)) { 1163 break; 1164 } 1165 tpindex += 1; 1166 } 1167 endpindex = tpindex; 1168 1169 /* calculate number of bytes of pages */ 1170 size = endpindex - startpindex; 1171 1172 /* calculate the page offset of the required page */ 1173 treqpage = pindex - startpindex; 1174 1175 /* see if we have space (again) */ 1176 if ((cnt.v_free_count + cnt.v_cache_count) > 1177 (cnt.v_free_reserved + size)) { 1178 /* 1179 * get our pages and don't block for them 1180 */ 1181 for (i = 0; i < size; i++) { 1182 if (i != treqpage) { 1183 rtm = vm_page_alloc(object, 1184 startpindex + i, 1185 VM_ALLOC_NORMAL); 1186 if (rtm == NULL) { 1187 if (i < treqpage) { 1188 int j; 1189 for (j = 0; j < i; j++) { 1190 FREE_PAGE(marray[j]); 1191 } 1192 *reqpage = 0; 1193 marray[0] = m; 1194 return 1; 1195 } else { 1196 size = i; 1197 *reqpage = treqpage; 1198 return size; 1199 } 1200 } 1201 marray[i] = rtm; 1202 } else { 1203 marray[i] = m; 1204 } 1205 } 1206 1207 *reqpage = treqpage; 1208 return size; 1209 } 1210 *reqpage = 0; 1211 marray[0] = m; 1212 return 1; 1213 } 1214