1 /*- 2 * Copyright (c) 1991, 1993 3 * The Regents of the University of California. All rights reserved. 4 * Copyright (c) 1994 John S. Dyson 5 * All rights reserved. 6 * Copyright (c) 1994 David Greenman 7 * All rights reserved. 8 * 9 * 10 * This code is derived from software contributed to Berkeley by 11 * The Mach Operating System project at Carnegie-Mellon University. 12 * 13 * Redistribution and use in source and binary forms, with or without 14 * modification, are permitted provided that the following conditions 15 * are met: 16 * 1. Redistributions of source code must retain the above copyright 17 * notice, this list of conditions and the following disclaimer. 18 * 2. Redistributions in binary form must reproduce the above copyright 19 * notice, this list of conditions and the following disclaimer in the 20 * documentation and/or other materials provided with the distribution. 21 * 3. All advertising materials mentioning features or use of this software 22 * must display the following acknowledgement: 23 * This product includes software developed by the University of 24 * California, Berkeley and its contributors. 25 * 4. Neither the name of the University nor the names of its contributors 26 * may be used to endorse or promote products derived from this software 27 * without specific prior written permission. 28 * 29 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 30 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 31 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 32 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 33 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 34 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 35 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 36 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 37 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 38 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 39 * SUCH DAMAGE. 40 * 41 * from: @(#)vm_fault.c 8.4 (Berkeley) 1/12/94 42 * 43 * 44 * Copyright (c) 1987, 1990 Carnegie-Mellon University. 45 * All rights reserved. 46 * 47 * Authors: Avadis Tevanian, Jr., Michael Wayne Young 48 * 49 * Permission to use, copy, modify and distribute this software and 50 * its documentation is hereby granted, provided that both the copyright 51 * notice and this permission notice appear in all copies of the 52 * software, derivative works or modified versions, and any portions 53 * thereof, and that both notices appear in supporting documentation. 54 * 55 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 56 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 57 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 58 * 59 * Carnegie Mellon requests users of this software to return to 60 * 61 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 62 * School of Computer Science 63 * Carnegie Mellon University 64 * Pittsburgh PA 15213-3890 65 * 66 * any improvements or extensions that they make and grant Carnegie the 67 * rights to redistribute these changes. 68 */ 69 70 /* 71 * Page fault handling module. 72 */ 73 74 #include <sys/cdefs.h> 75 __FBSDID("$FreeBSD$"); 76 77 #include "opt_vm.h" 78 79 #include <sys/param.h> 80 #include <sys/systm.h> 81 #include <sys/kernel.h> 82 #include <sys/lock.h> 83 #include <sys/mutex.h> 84 #include <sys/proc.h> 85 #include <sys/resourcevar.h> 86 #include <sys/sysctl.h> 87 #include <sys/vmmeter.h> 88 #include <sys/vnode.h> 89 90 #include <vm/vm.h> 91 #include <vm/vm_param.h> 92 #include <vm/pmap.h> 93 #include <vm/vm_map.h> 94 #include <vm/vm_object.h> 95 #include <vm/vm_page.h> 96 #include <vm/vm_pageout.h> 97 #include <vm/vm_kern.h> 98 #include <vm/vm_pager.h> 99 #include <vm/vm_extern.h> 100 101 #include <sys/mount.h> /* XXX Temporary for VFS_LOCK_GIANT() */ 102 103 #define PFBAK 4 104 #define PFFOR 4 105 #define PAGEORDER_SIZE (PFBAK+PFFOR) 106 107 static int prefault_pageorder[] = { 108 -1 * PAGE_SIZE, 1 * PAGE_SIZE, 109 -2 * PAGE_SIZE, 2 * PAGE_SIZE, 110 -3 * PAGE_SIZE, 3 * PAGE_SIZE, 111 -4 * PAGE_SIZE, 4 * PAGE_SIZE 112 }; 113 114 static int vm_fault_additional_pages(vm_page_t, int, int, vm_page_t *, int *); 115 static void vm_fault_prefault(pmap_t, vm_offset_t, vm_map_entry_t); 116 117 #define VM_FAULT_READ_AHEAD 8 118 #define VM_FAULT_READ_BEHIND 7 119 #define VM_FAULT_READ (VM_FAULT_READ_AHEAD+VM_FAULT_READ_BEHIND+1) 120 121 struct faultstate { 122 vm_page_t m; 123 vm_object_t object; 124 vm_pindex_t pindex; 125 vm_page_t first_m; 126 vm_object_t first_object; 127 vm_pindex_t first_pindex; 128 vm_map_t map; 129 vm_map_entry_t entry; 130 int lookup_still_valid; 131 struct vnode *vp; 132 int vfslocked; 133 }; 134 135 static inline void 136 release_page(struct faultstate *fs) 137 { 138 139 vm_page_wakeup(fs->m); 140 vm_page_lock(fs->m); 141 vm_page_deactivate(fs->m); 142 vm_page_unlock(fs->m); 143 fs->m = NULL; 144 } 145 146 static inline void 147 unlock_map(struct faultstate *fs) 148 { 149 150 if (fs->lookup_still_valid) { 151 vm_map_lookup_done(fs->map, fs->entry); 152 fs->lookup_still_valid = FALSE; 153 } 154 } 155 156 static void 157 unlock_and_deallocate(struct faultstate *fs) 158 { 159 160 vm_object_pip_wakeup(fs->object); 161 VM_OBJECT_UNLOCK(fs->object); 162 if (fs->object != fs->first_object) { 163 VM_OBJECT_LOCK(fs->first_object); 164 vm_page_lock(fs->first_m); 165 vm_page_free(fs->first_m); 166 vm_page_unlock(fs->first_m); 167 vm_object_pip_wakeup(fs->first_object); 168 VM_OBJECT_UNLOCK(fs->first_object); 169 fs->first_m = NULL; 170 } 171 vm_object_deallocate(fs->first_object); 172 unlock_map(fs); 173 if (fs->vp != NULL) { 174 vput(fs->vp); 175 fs->vp = NULL; 176 } 177 VFS_UNLOCK_GIANT(fs->vfslocked); 178 fs->vfslocked = 0; 179 } 180 181 /* 182 * TRYPAGER - used by vm_fault to calculate whether the pager for the 183 * current object *might* contain the page. 184 * 185 * default objects are zero-fill, there is no real pager. 186 */ 187 #define TRYPAGER (fs.object->type != OBJT_DEFAULT && \ 188 ((fault_flags & VM_FAULT_CHANGE_WIRING) == 0 || wired)) 189 190 /* 191 * vm_fault: 192 * 193 * Handle a page fault occurring at the given address, 194 * requiring the given permissions, in the map specified. 195 * If successful, the page is inserted into the 196 * associated physical map. 197 * 198 * NOTE: the given address should be truncated to the 199 * proper page address. 200 * 201 * KERN_SUCCESS is returned if the page fault is handled; otherwise, 202 * a standard error specifying why the fault is fatal is returned. 203 * 204 * The map in question must be referenced, and remains so. 205 * Caller may hold no locks. 206 */ 207 int 208 vm_fault(vm_map_t map, vm_offset_t vaddr, vm_prot_t fault_type, 209 int fault_flags) 210 { 211 212 return (vm_fault_hold(map, vaddr, fault_type, fault_flags, NULL)); 213 } 214 215 int 216 vm_fault_hold(vm_map_t map, vm_offset_t vaddr, vm_prot_t fault_type, 217 int fault_flags, vm_page_t *m_hold) 218 { 219 vm_prot_t prot; 220 int is_first_object_locked, result; 221 boolean_t growstack, wired; 222 int map_generation; 223 vm_object_t next_object; 224 vm_page_t marray[VM_FAULT_READ], mt, mt_prev; 225 int hardfault; 226 int faultcount, ahead, behind, alloc_req; 227 struct faultstate fs; 228 struct vnode *vp; 229 int locked, error; 230 231 hardfault = 0; 232 growstack = TRUE; 233 PCPU_INC(cnt.v_vm_faults); 234 fs.vp = NULL; 235 fs.vfslocked = 0; 236 faultcount = behind = 0; 237 238 RetryFault:; 239 240 /* 241 * Find the backing store object and offset into it to begin the 242 * search. 243 */ 244 fs.map = map; 245 result = vm_map_lookup(&fs.map, vaddr, fault_type, &fs.entry, 246 &fs.first_object, &fs.first_pindex, &prot, &wired); 247 if (result != KERN_SUCCESS) { 248 if (growstack && result == KERN_INVALID_ADDRESS && 249 map != kernel_map) { 250 result = vm_map_growstack(curproc, vaddr); 251 if (result != KERN_SUCCESS) 252 return (KERN_FAILURE); 253 growstack = FALSE; 254 goto RetryFault; 255 } 256 return (result); 257 } 258 259 map_generation = fs.map->timestamp; 260 261 if (fs.entry->eflags & MAP_ENTRY_NOFAULT) { 262 panic("vm_fault: fault on nofault entry, addr: %lx", 263 (u_long)vaddr); 264 } 265 266 /* 267 * Make a reference to this object to prevent its disposal while we 268 * are messing with it. Once we have the reference, the map is free 269 * to be diddled. Since objects reference their shadows (and copies), 270 * they will stay around as well. 271 * 272 * Bump the paging-in-progress count to prevent size changes (e.g. 273 * truncation operations) during I/O. This must be done after 274 * obtaining the vnode lock in order to avoid possible deadlocks. 275 */ 276 VM_OBJECT_LOCK(fs.first_object); 277 vm_object_reference_locked(fs.first_object); 278 vm_object_pip_add(fs.first_object, 1); 279 280 fs.lookup_still_valid = TRUE; 281 282 if (wired) 283 fault_type = prot | (fault_type & VM_PROT_COPY); 284 285 fs.first_m = NULL; 286 287 /* 288 * Search for the page at object/offset. 289 */ 290 fs.object = fs.first_object; 291 fs.pindex = fs.first_pindex; 292 while (TRUE) { 293 /* 294 * If the object is dead, we stop here 295 */ 296 if (fs.object->flags & OBJ_DEAD) { 297 unlock_and_deallocate(&fs); 298 return (KERN_PROTECTION_FAILURE); 299 } 300 301 /* 302 * See if page is resident 303 */ 304 fs.m = vm_page_lookup(fs.object, fs.pindex); 305 if (fs.m != NULL) { 306 /* 307 * check for page-based copy on write. 308 * We check fs.object == fs.first_object so 309 * as to ensure the legacy COW mechanism is 310 * used when the page in question is part of 311 * a shadow object. Otherwise, vm_page_cowfault() 312 * removes the page from the backing object, 313 * which is not what we want. 314 */ 315 vm_page_lock(fs.m); 316 if ((fs.m->cow) && 317 (fault_type & VM_PROT_WRITE) && 318 (fs.object == fs.first_object)) { 319 vm_page_cowfault(fs.m); 320 unlock_and_deallocate(&fs); 321 goto RetryFault; 322 } 323 324 /* 325 * Wait/Retry if the page is busy. We have to do this 326 * if the page is busy via either VPO_BUSY or 327 * vm_page_t->busy because the vm_pager may be using 328 * vm_page_t->busy for pageouts ( and even pageins if 329 * it is the vnode pager ), and we could end up trying 330 * to pagein and pageout the same page simultaneously. 331 * 332 * We can theoretically allow the busy case on a read 333 * fault if the page is marked valid, but since such 334 * pages are typically already pmap'd, putting that 335 * special case in might be more effort then it is 336 * worth. We cannot under any circumstances mess 337 * around with a vm_page_t->busy page except, perhaps, 338 * to pmap it. 339 */ 340 if ((fs.m->oflags & VPO_BUSY) || fs.m->busy) { 341 /* 342 * Reference the page before unlocking and 343 * sleeping so that the page daemon is less 344 * likely to reclaim it. 345 */ 346 vm_page_lock_queues(); 347 vm_page_flag_set(fs.m, PG_REFERENCED); 348 vm_page_unlock_queues(); 349 vm_page_unlock(fs.m); 350 if (fs.object != fs.first_object) { 351 if (!VM_OBJECT_TRYLOCK( 352 fs.first_object)) { 353 VM_OBJECT_UNLOCK(fs.object); 354 VM_OBJECT_LOCK(fs.first_object); 355 VM_OBJECT_LOCK(fs.object); 356 } 357 vm_page_lock(fs.first_m); 358 vm_page_free(fs.first_m); 359 vm_page_unlock(fs.first_m); 360 vm_object_pip_wakeup(fs.first_object); 361 VM_OBJECT_UNLOCK(fs.first_object); 362 fs.first_m = NULL; 363 } 364 unlock_map(&fs); 365 if (fs.m == vm_page_lookup(fs.object, 366 fs.pindex)) { 367 vm_page_sleep_if_busy(fs.m, TRUE, 368 "vmpfw"); 369 } 370 vm_object_pip_wakeup(fs.object); 371 VM_OBJECT_UNLOCK(fs.object); 372 PCPU_INC(cnt.v_intrans); 373 vm_object_deallocate(fs.first_object); 374 goto RetryFault; 375 } 376 vm_pageq_remove(fs.m); 377 vm_page_unlock(fs.m); 378 379 /* 380 * Mark page busy for other processes, and the 381 * pagedaemon. If it still isn't completely valid 382 * (readable), jump to readrest, else break-out ( we 383 * found the page ). 384 */ 385 vm_page_busy(fs.m); 386 if (fs.m->valid != VM_PAGE_BITS_ALL && 387 fs.m->object != kernel_object && fs.m->object != kmem_object) { 388 goto readrest; 389 } 390 391 break; 392 } 393 394 /* 395 * Page is not resident, If this is the search termination 396 * or the pager might contain the page, allocate a new page. 397 */ 398 if (TRYPAGER || fs.object == fs.first_object) { 399 if (fs.pindex >= fs.object->size) { 400 unlock_and_deallocate(&fs); 401 return (KERN_PROTECTION_FAILURE); 402 } 403 404 /* 405 * Allocate a new page for this object/offset pair. 406 * 407 * Unlocked read of the p_flag is harmless. At 408 * worst, the P_KILLED might be not observed 409 * there, and allocation can fail, causing 410 * restart and new reading of the p_flag. 411 */ 412 fs.m = NULL; 413 if (!vm_page_count_severe() || P_KILLED(curproc)) { 414 #if VM_NRESERVLEVEL > 0 415 if ((fs.object->flags & OBJ_COLORED) == 0) { 416 fs.object->flags |= OBJ_COLORED; 417 fs.object->pg_color = atop(vaddr) - 418 fs.pindex; 419 } 420 #endif 421 alloc_req = P_KILLED(curproc) ? 422 VM_ALLOC_SYSTEM : VM_ALLOC_NORMAL; 423 if (fs.object->type != OBJT_VNODE && 424 fs.object->backing_object == NULL) 425 alloc_req |= VM_ALLOC_ZERO; 426 fs.m = vm_page_alloc(fs.object, fs.pindex, 427 alloc_req); 428 } 429 if (fs.m == NULL) { 430 unlock_and_deallocate(&fs); 431 VM_WAITPFAULT; 432 goto RetryFault; 433 } else if (fs.m->valid == VM_PAGE_BITS_ALL) 434 break; 435 } 436 437 readrest: 438 /* 439 * We have found a valid page or we have allocated a new page. 440 * The page thus may not be valid or may not be entirely 441 * valid. 442 * 443 * Attempt to fault-in the page if there is a chance that the 444 * pager has it, and potentially fault in additional pages 445 * at the same time. 446 */ 447 if (TRYPAGER) { 448 int rv; 449 int reqpage = 0; 450 u_char behavior = vm_map_entry_behavior(fs.entry); 451 452 if (behavior == MAP_ENTRY_BEHAV_RANDOM || 453 P_KILLED(curproc)) { 454 ahead = 0; 455 behind = 0; 456 } else { 457 behind = (vaddr - fs.entry->start) >> PAGE_SHIFT; 458 if (behind > VM_FAULT_READ_BEHIND) 459 behind = VM_FAULT_READ_BEHIND; 460 461 ahead = ((fs.entry->end - vaddr) >> PAGE_SHIFT) - 1; 462 if (ahead > VM_FAULT_READ_AHEAD) 463 ahead = VM_FAULT_READ_AHEAD; 464 } 465 is_first_object_locked = FALSE; 466 if ((behavior == MAP_ENTRY_BEHAV_SEQUENTIAL || 467 (behavior != MAP_ENTRY_BEHAV_RANDOM && 468 fs.pindex >= fs.entry->lastr && 469 fs.pindex < fs.entry->lastr + VM_FAULT_READ)) && 470 (fs.first_object == fs.object || 471 (is_first_object_locked = VM_OBJECT_TRYLOCK(fs.first_object))) && 472 fs.first_object->type != OBJT_DEVICE && 473 fs.first_object->type != OBJT_PHYS && 474 fs.first_object->type != OBJT_SG) { 475 vm_pindex_t firstpindex; 476 477 if (fs.first_pindex < 2 * VM_FAULT_READ) 478 firstpindex = 0; 479 else 480 firstpindex = fs.first_pindex - 2 * VM_FAULT_READ; 481 mt = fs.first_object != fs.object ? 482 fs.first_m : fs.m; 483 KASSERT(mt != NULL, ("vm_fault: missing mt")); 484 KASSERT((mt->oflags & VPO_BUSY) != 0, 485 ("vm_fault: mt %p not busy", mt)); 486 mt_prev = vm_page_prev(mt); 487 488 /* 489 * note: partially valid pages cannot be 490 * included in the lookahead - NFS piecemeal 491 * writes will barf on it badly. 492 */ 493 while ((mt = mt_prev) != NULL && 494 mt->pindex >= firstpindex && 495 mt->valid == VM_PAGE_BITS_ALL) { 496 mt_prev = vm_page_prev(mt); 497 if (mt->busy || 498 (mt->oflags & VPO_BUSY)) 499 continue; 500 vm_page_lock(mt); 501 if (mt->hold_count || 502 mt->wire_count) { 503 vm_page_unlock(mt); 504 continue; 505 } 506 pmap_remove_all(mt); 507 if (mt->dirty != 0) 508 vm_page_deactivate(mt); 509 else 510 vm_page_cache(mt); 511 vm_page_unlock(mt); 512 } 513 ahead += behind; 514 behind = 0; 515 } 516 if (is_first_object_locked) 517 VM_OBJECT_UNLOCK(fs.first_object); 518 519 /* 520 * Call the pager to retrieve the data, if any, after 521 * releasing the lock on the map. We hold a ref on 522 * fs.object and the pages are VPO_BUSY'd. 523 */ 524 unlock_map(&fs); 525 526 vnode_lock: 527 if (fs.object->type == OBJT_VNODE) { 528 vp = fs.object->handle; 529 if (vp == fs.vp) 530 goto vnode_locked; 531 else if (fs.vp != NULL) { 532 vput(fs.vp); 533 fs.vp = NULL; 534 } 535 locked = VOP_ISLOCKED(vp); 536 537 if (VFS_NEEDSGIANT(vp->v_mount) && !fs.vfslocked) { 538 fs.vfslocked = 1; 539 if (!mtx_trylock(&Giant)) { 540 VM_OBJECT_UNLOCK(fs.object); 541 mtx_lock(&Giant); 542 VM_OBJECT_LOCK(fs.object); 543 goto vnode_lock; 544 } 545 } 546 if (locked != LK_EXCLUSIVE) 547 locked = LK_SHARED; 548 /* Do not sleep for vnode lock while fs.m is busy */ 549 error = vget(vp, locked | LK_CANRECURSE | 550 LK_NOWAIT, curthread); 551 if (error != 0) { 552 int vfslocked; 553 554 vfslocked = fs.vfslocked; 555 fs.vfslocked = 0; /* Keep Giant */ 556 vhold(vp); 557 release_page(&fs); 558 unlock_and_deallocate(&fs); 559 error = vget(vp, locked | LK_RETRY | 560 LK_CANRECURSE, curthread); 561 vdrop(vp); 562 fs.vp = vp; 563 fs.vfslocked = vfslocked; 564 KASSERT(error == 0, 565 ("vm_fault: vget failed")); 566 goto RetryFault; 567 } 568 fs.vp = vp; 569 } 570 vnode_locked: 571 KASSERT(fs.vp == NULL || !fs.map->system_map, 572 ("vm_fault: vnode-backed object mapped by system map")); 573 574 /* 575 * now we find out if any other pages should be paged 576 * in at this time this routine checks to see if the 577 * pages surrounding this fault reside in the same 578 * object as the page for this fault. If they do, 579 * then they are faulted in also into the object. The 580 * array "marray" returned contains an array of 581 * vm_page_t structs where one of them is the 582 * vm_page_t passed to the routine. The reqpage 583 * return value is the index into the marray for the 584 * vm_page_t passed to the routine. 585 * 586 * fs.m plus the additional pages are VPO_BUSY'd. 587 */ 588 faultcount = vm_fault_additional_pages( 589 fs.m, behind, ahead, marray, &reqpage); 590 591 rv = faultcount ? 592 vm_pager_get_pages(fs.object, marray, faultcount, 593 reqpage) : VM_PAGER_FAIL; 594 595 if (rv == VM_PAGER_OK) { 596 /* 597 * Found the page. Leave it busy while we play 598 * with it. 599 */ 600 601 /* 602 * Relookup in case pager changed page. Pager 603 * is responsible for disposition of old page 604 * if moved. 605 */ 606 fs.m = vm_page_lookup(fs.object, fs.pindex); 607 if (!fs.m) { 608 unlock_and_deallocate(&fs); 609 goto RetryFault; 610 } 611 612 hardfault++; 613 break; /* break to PAGE HAS BEEN FOUND */ 614 } 615 /* 616 * Remove the bogus page (which does not exist at this 617 * object/offset); before doing so, we must get back 618 * our object lock to preserve our invariant. 619 * 620 * Also wake up any other process that may want to bring 621 * in this page. 622 * 623 * If this is the top-level object, we must leave the 624 * busy page to prevent another process from rushing 625 * past us, and inserting the page in that object at 626 * the same time that we are. 627 */ 628 if (rv == VM_PAGER_ERROR) 629 printf("vm_fault: pager read error, pid %d (%s)\n", 630 curproc->p_pid, curproc->p_comm); 631 /* 632 * Data outside the range of the pager or an I/O error 633 */ 634 /* 635 * XXX - the check for kernel_map is a kludge to work 636 * around having the machine panic on a kernel space 637 * fault w/ I/O error. 638 */ 639 if (((fs.map != kernel_map) && (rv == VM_PAGER_ERROR)) || 640 (rv == VM_PAGER_BAD)) { 641 vm_page_lock(fs.m); 642 vm_page_free(fs.m); 643 vm_page_unlock(fs.m); 644 fs.m = NULL; 645 unlock_and_deallocate(&fs); 646 return ((rv == VM_PAGER_ERROR) ? KERN_FAILURE : KERN_PROTECTION_FAILURE); 647 } 648 if (fs.object != fs.first_object) { 649 vm_page_lock(fs.m); 650 vm_page_free(fs.m); 651 vm_page_unlock(fs.m); 652 fs.m = NULL; 653 /* 654 * XXX - we cannot just fall out at this 655 * point, m has been freed and is invalid! 656 */ 657 } 658 } 659 660 /* 661 * We get here if the object has default pager (or unwiring) 662 * or the pager doesn't have the page. 663 */ 664 if (fs.object == fs.first_object) 665 fs.first_m = fs.m; 666 667 /* 668 * Move on to the next object. Lock the next object before 669 * unlocking the current one. 670 */ 671 fs.pindex += OFF_TO_IDX(fs.object->backing_object_offset); 672 next_object = fs.object->backing_object; 673 if (next_object == NULL) { 674 /* 675 * If there's no object left, fill the page in the top 676 * object with zeros. 677 */ 678 if (fs.object != fs.first_object) { 679 vm_object_pip_wakeup(fs.object); 680 VM_OBJECT_UNLOCK(fs.object); 681 682 fs.object = fs.first_object; 683 fs.pindex = fs.first_pindex; 684 fs.m = fs.first_m; 685 VM_OBJECT_LOCK(fs.object); 686 } 687 fs.first_m = NULL; 688 689 /* 690 * Zero the page if necessary and mark it valid. 691 */ 692 if ((fs.m->flags & PG_ZERO) == 0) { 693 pmap_zero_page(fs.m); 694 } else { 695 PCPU_INC(cnt.v_ozfod); 696 } 697 PCPU_INC(cnt.v_zfod); 698 fs.m->valid = VM_PAGE_BITS_ALL; 699 break; /* break to PAGE HAS BEEN FOUND */ 700 } else { 701 KASSERT(fs.object != next_object, 702 ("object loop %p", next_object)); 703 VM_OBJECT_LOCK(next_object); 704 vm_object_pip_add(next_object, 1); 705 if (fs.object != fs.first_object) 706 vm_object_pip_wakeup(fs.object); 707 VM_OBJECT_UNLOCK(fs.object); 708 fs.object = next_object; 709 } 710 } 711 712 KASSERT((fs.m->oflags & VPO_BUSY) != 0, 713 ("vm_fault: not busy after main loop")); 714 715 /* 716 * PAGE HAS BEEN FOUND. [Loop invariant still holds -- the object lock 717 * is held.] 718 */ 719 720 /* 721 * If the page is being written, but isn't already owned by the 722 * top-level object, we have to copy it into a new page owned by the 723 * top-level object. 724 */ 725 if (fs.object != fs.first_object) { 726 /* 727 * We only really need to copy if we want to write it. 728 */ 729 if ((fault_type & (VM_PROT_COPY | VM_PROT_WRITE)) != 0) { 730 /* 731 * This allows pages to be virtually copied from a 732 * backing_object into the first_object, where the 733 * backing object has no other refs to it, and cannot 734 * gain any more refs. Instead of a bcopy, we just 735 * move the page from the backing object to the 736 * first object. Note that we must mark the page 737 * dirty in the first object so that it will go out 738 * to swap when needed. 739 */ 740 is_first_object_locked = FALSE; 741 if ( 742 /* 743 * Only one shadow object 744 */ 745 (fs.object->shadow_count == 1) && 746 /* 747 * No COW refs, except us 748 */ 749 (fs.object->ref_count == 1) && 750 /* 751 * No one else can look this object up 752 */ 753 (fs.object->handle == NULL) && 754 /* 755 * No other ways to look the object up 756 */ 757 ((fs.object->type == OBJT_DEFAULT) || 758 (fs.object->type == OBJT_SWAP)) && 759 (is_first_object_locked = VM_OBJECT_TRYLOCK(fs.first_object)) && 760 /* 761 * We don't chase down the shadow chain 762 */ 763 fs.object == fs.first_object->backing_object) { 764 /* 765 * get rid of the unnecessary page 766 */ 767 vm_page_lock(fs.first_m); 768 vm_page_free(fs.first_m); 769 vm_page_unlock(fs.first_m); 770 /* 771 * grab the page and put it into the 772 * process'es object. The page is 773 * automatically made dirty. 774 */ 775 vm_page_lock(fs.m); 776 vm_page_rename(fs.m, fs.first_object, fs.first_pindex); 777 vm_page_unlock(fs.m); 778 vm_page_busy(fs.m); 779 fs.first_m = fs.m; 780 fs.m = NULL; 781 PCPU_INC(cnt.v_cow_optim); 782 } else { 783 /* 784 * Oh, well, lets copy it. 785 */ 786 pmap_copy_page(fs.m, fs.first_m); 787 fs.first_m->valid = VM_PAGE_BITS_ALL; 788 if (wired && (fault_flags & 789 VM_FAULT_CHANGE_WIRING) == 0) { 790 vm_page_lock(fs.first_m); 791 vm_page_wire(fs.first_m); 792 vm_page_unlock(fs.first_m); 793 794 vm_page_lock(fs.m); 795 vm_page_unwire(fs.m, FALSE); 796 vm_page_unlock(fs.m); 797 } 798 /* 799 * We no longer need the old page or object. 800 */ 801 release_page(&fs); 802 } 803 /* 804 * fs.object != fs.first_object due to above 805 * conditional 806 */ 807 vm_object_pip_wakeup(fs.object); 808 VM_OBJECT_UNLOCK(fs.object); 809 /* 810 * Only use the new page below... 811 */ 812 fs.object = fs.first_object; 813 fs.pindex = fs.first_pindex; 814 fs.m = fs.first_m; 815 if (!is_first_object_locked) 816 VM_OBJECT_LOCK(fs.object); 817 PCPU_INC(cnt.v_cow_faults); 818 } else { 819 prot &= ~VM_PROT_WRITE; 820 } 821 } 822 823 /* 824 * We must verify that the maps have not changed since our last 825 * lookup. 826 */ 827 if (!fs.lookup_still_valid) { 828 vm_object_t retry_object; 829 vm_pindex_t retry_pindex; 830 vm_prot_t retry_prot; 831 832 if (!vm_map_trylock_read(fs.map)) { 833 release_page(&fs); 834 unlock_and_deallocate(&fs); 835 goto RetryFault; 836 } 837 fs.lookup_still_valid = TRUE; 838 if (fs.map->timestamp != map_generation) { 839 result = vm_map_lookup_locked(&fs.map, vaddr, fault_type, 840 &fs.entry, &retry_object, &retry_pindex, &retry_prot, &wired); 841 842 /* 843 * If we don't need the page any longer, put it on the inactive 844 * list (the easiest thing to do here). If no one needs it, 845 * pageout will grab it eventually. 846 */ 847 if (result != KERN_SUCCESS) { 848 release_page(&fs); 849 unlock_and_deallocate(&fs); 850 851 /* 852 * If retry of map lookup would have blocked then 853 * retry fault from start. 854 */ 855 if (result == KERN_FAILURE) 856 goto RetryFault; 857 return (result); 858 } 859 if ((retry_object != fs.first_object) || 860 (retry_pindex != fs.first_pindex)) { 861 release_page(&fs); 862 unlock_and_deallocate(&fs); 863 goto RetryFault; 864 } 865 866 /* 867 * Check whether the protection has changed or the object has 868 * been copied while we left the map unlocked. Changing from 869 * read to write permission is OK - we leave the page 870 * write-protected, and catch the write fault. Changing from 871 * write to read permission means that we can't mark the page 872 * write-enabled after all. 873 */ 874 prot &= retry_prot; 875 } 876 } 877 /* 878 * If the page was filled by a pager, update the map entry's 879 * last read offset. Since the pager does not return the 880 * actual set of pages that it read, this update is based on 881 * the requested set. Typically, the requested and actual 882 * sets are the same. 883 * 884 * XXX The following assignment modifies the map 885 * without holding a write lock on it. 886 */ 887 if (hardfault) 888 fs.entry->lastr = fs.pindex + faultcount - behind; 889 890 if ((prot & VM_PROT_WRITE) != 0 || 891 (fault_flags & VM_FAULT_DIRTY) != 0) { 892 vm_object_set_writeable_dirty(fs.object); 893 894 /* 895 * If this is a NOSYNC mmap we do not want to set VPO_NOSYNC 896 * if the page is already dirty to prevent data written with 897 * the expectation of being synced from not being synced. 898 * Likewise if this entry does not request NOSYNC then make 899 * sure the page isn't marked NOSYNC. Applications sharing 900 * data should use the same flags to avoid ping ponging. 901 */ 902 if (fs.entry->eflags & MAP_ENTRY_NOSYNC) { 903 if (fs.m->dirty == 0) 904 fs.m->oflags |= VPO_NOSYNC; 905 } else { 906 fs.m->oflags &= ~VPO_NOSYNC; 907 } 908 909 /* 910 * If the fault is a write, we know that this page is being 911 * written NOW so dirty it explicitly to save on 912 * pmap_is_modified() calls later. 913 * 914 * Also tell the backing pager, if any, that it should remove 915 * any swap backing since the page is now dirty. 916 */ 917 if (((fault_type & VM_PROT_WRITE) != 0 && 918 (fault_flags & VM_FAULT_CHANGE_WIRING) == 0) || 919 (fault_flags & VM_FAULT_DIRTY) != 0) { 920 vm_page_dirty(fs.m); 921 vm_pager_page_unswapped(fs.m); 922 } 923 } 924 925 /* 926 * Page had better still be busy 927 */ 928 KASSERT(fs.m->oflags & VPO_BUSY, 929 ("vm_fault: page %p not busy!", fs.m)); 930 /* 931 * Page must be completely valid or it is not fit to 932 * map into user space. vm_pager_get_pages() ensures this. 933 */ 934 KASSERT(fs.m->valid == VM_PAGE_BITS_ALL, 935 ("vm_fault: page %p partially invalid", fs.m)); 936 VM_OBJECT_UNLOCK(fs.object); 937 938 /* 939 * Put this page into the physical map. We had to do the unlock above 940 * because pmap_enter() may sleep. We don't put the page 941 * back on the active queue until later so that the pageout daemon 942 * won't find it (yet). 943 */ 944 pmap_enter(fs.map->pmap, vaddr, fault_type, fs.m, prot, wired); 945 if ((fault_flags & VM_FAULT_CHANGE_WIRING) == 0 && wired == 0) 946 vm_fault_prefault(fs.map->pmap, vaddr, fs.entry); 947 VM_OBJECT_LOCK(fs.object); 948 vm_page_lock(fs.m); 949 950 /* 951 * If the page is not wired down, then put it where the pageout daemon 952 * can find it. 953 */ 954 if (fault_flags & VM_FAULT_CHANGE_WIRING) { 955 if (wired) 956 vm_page_wire(fs.m); 957 else 958 vm_page_unwire(fs.m, 1); 959 } else 960 vm_page_activate(fs.m); 961 if (m_hold != NULL) { 962 *m_hold = fs.m; 963 vm_page_hold(fs.m); 964 } 965 vm_page_unlock(fs.m); 966 vm_page_wakeup(fs.m); 967 968 /* 969 * Unlock everything, and return 970 */ 971 unlock_and_deallocate(&fs); 972 if (hardfault) 973 curthread->td_ru.ru_majflt++; 974 else 975 curthread->td_ru.ru_minflt++; 976 977 return (KERN_SUCCESS); 978 } 979 980 /* 981 * vm_fault_prefault provides a quick way of clustering 982 * pagefaults into a processes address space. It is a "cousin" 983 * of vm_map_pmap_enter, except it runs at page fault time instead 984 * of mmap time. 985 */ 986 static void 987 vm_fault_prefault(pmap_t pmap, vm_offset_t addra, vm_map_entry_t entry) 988 { 989 int i; 990 vm_offset_t addr, starta; 991 vm_pindex_t pindex; 992 vm_page_t m; 993 vm_object_t object; 994 995 if (pmap != vmspace_pmap(curthread->td_proc->p_vmspace)) 996 return; 997 998 object = entry->object.vm_object; 999 1000 starta = addra - PFBAK * PAGE_SIZE; 1001 if (starta < entry->start) { 1002 starta = entry->start; 1003 } else if (starta > addra) { 1004 starta = 0; 1005 } 1006 1007 for (i = 0; i < PAGEORDER_SIZE; i++) { 1008 vm_object_t backing_object, lobject; 1009 1010 addr = addra + prefault_pageorder[i]; 1011 if (addr > addra + (PFFOR * PAGE_SIZE)) 1012 addr = 0; 1013 1014 if (addr < starta || addr >= entry->end) 1015 continue; 1016 1017 if (!pmap_is_prefaultable(pmap, addr)) 1018 continue; 1019 1020 pindex = ((addr - entry->start) + entry->offset) >> PAGE_SHIFT; 1021 lobject = object; 1022 VM_OBJECT_LOCK(lobject); 1023 while ((m = vm_page_lookup(lobject, pindex)) == NULL && 1024 lobject->type == OBJT_DEFAULT && 1025 (backing_object = lobject->backing_object) != NULL) { 1026 KASSERT((lobject->backing_object_offset & PAGE_MASK) == 1027 0, ("vm_fault_prefault: unaligned object offset")); 1028 pindex += lobject->backing_object_offset >> PAGE_SHIFT; 1029 VM_OBJECT_LOCK(backing_object); 1030 VM_OBJECT_UNLOCK(lobject); 1031 lobject = backing_object; 1032 } 1033 /* 1034 * give-up when a page is not in memory 1035 */ 1036 if (m == NULL) { 1037 VM_OBJECT_UNLOCK(lobject); 1038 break; 1039 } 1040 if (m->valid == VM_PAGE_BITS_ALL && 1041 (m->flags & PG_FICTITIOUS) == 0) 1042 pmap_enter_quick(pmap, addr, m, entry->protection); 1043 VM_OBJECT_UNLOCK(lobject); 1044 } 1045 } 1046 1047 /* 1048 * Hold each of the physical pages that are mapped by the specified range of 1049 * virtual addresses, ["addr", "addr" + "len"), if those mappings are valid 1050 * and allow the specified types of access, "prot". If all of the implied 1051 * pages are successfully held, then the number of held pages is returned 1052 * together with pointers to those pages in the array "ma". However, if any 1053 * of the pages cannot be held, -1 is returned. 1054 */ 1055 int 1056 vm_fault_quick_hold_pages(vm_map_t map, vm_offset_t addr, vm_size_t len, 1057 vm_prot_t prot, vm_page_t *ma, int max_count) 1058 { 1059 vm_offset_t end, va; 1060 vm_page_t *mp; 1061 int count; 1062 boolean_t pmap_failed; 1063 1064 end = round_page(addr + len); 1065 addr = trunc_page(addr); 1066 1067 /* 1068 * Check for illegal addresses. 1069 */ 1070 if (addr < vm_map_min(map) || addr > end || end > vm_map_max(map)) 1071 return (-1); 1072 1073 count = howmany(end - addr, PAGE_SIZE); 1074 if (count > max_count) 1075 panic("vm_fault_quick_hold_pages: count > max_count"); 1076 1077 /* 1078 * Most likely, the physical pages are resident in the pmap, so it is 1079 * faster to try pmap_extract_and_hold() first. 1080 */ 1081 pmap_failed = FALSE; 1082 for (mp = ma, va = addr; va < end; mp++, va += PAGE_SIZE) { 1083 *mp = pmap_extract_and_hold(map->pmap, va, prot); 1084 if (*mp == NULL) 1085 pmap_failed = TRUE; 1086 else if ((prot & VM_PROT_WRITE) != 0 && 1087 (*mp)->dirty != VM_PAGE_BITS_ALL) { 1088 /* 1089 * Explicitly dirty the physical page. Otherwise, the 1090 * caller's changes may go unnoticed because they are 1091 * performed through an unmanaged mapping or by a DMA 1092 * operation. 1093 */ 1094 vm_page_lock_queues(); 1095 vm_page_dirty(*mp); 1096 vm_page_unlock_queues(); 1097 } 1098 } 1099 if (pmap_failed) { 1100 /* 1101 * One or more pages could not be held by the pmap. Either no 1102 * page was mapped at the specified virtual address or that 1103 * mapping had insufficient permissions. Attempt to fault in 1104 * and hold these pages. 1105 */ 1106 for (mp = ma, va = addr; va < end; mp++, va += PAGE_SIZE) 1107 if (*mp == NULL && vm_fault_hold(map, va, prot, 1108 VM_FAULT_NORMAL, mp) != KERN_SUCCESS) 1109 goto error; 1110 } 1111 return (count); 1112 error: 1113 for (mp = ma; mp < ma + count; mp++) 1114 if (*mp != NULL) { 1115 vm_page_lock(*mp); 1116 vm_page_unhold(*mp); 1117 vm_page_unlock(*mp); 1118 } 1119 return (-1); 1120 } 1121 1122 /* 1123 * vm_fault_wire: 1124 * 1125 * Wire down a range of virtual addresses in a map. 1126 */ 1127 int 1128 vm_fault_wire(vm_map_t map, vm_offset_t start, vm_offset_t end, 1129 boolean_t fictitious) 1130 { 1131 vm_offset_t va; 1132 int rv; 1133 1134 /* 1135 * We simulate a fault to get the page and enter it in the physical 1136 * map. For user wiring, we only ask for read access on currently 1137 * read-only sections. 1138 */ 1139 for (va = start; va < end; va += PAGE_SIZE) { 1140 rv = vm_fault(map, va, VM_PROT_NONE, VM_FAULT_CHANGE_WIRING); 1141 if (rv) { 1142 if (va != start) 1143 vm_fault_unwire(map, start, va, fictitious); 1144 return (rv); 1145 } 1146 } 1147 return (KERN_SUCCESS); 1148 } 1149 1150 /* 1151 * vm_fault_unwire: 1152 * 1153 * Unwire a range of virtual addresses in a map. 1154 */ 1155 void 1156 vm_fault_unwire(vm_map_t map, vm_offset_t start, vm_offset_t end, 1157 boolean_t fictitious) 1158 { 1159 vm_paddr_t pa; 1160 vm_offset_t va; 1161 vm_page_t m; 1162 pmap_t pmap; 1163 1164 pmap = vm_map_pmap(map); 1165 1166 /* 1167 * Since the pages are wired down, we must be able to get their 1168 * mappings from the physical map system. 1169 */ 1170 for (va = start; va < end; va += PAGE_SIZE) { 1171 pa = pmap_extract(pmap, va); 1172 if (pa != 0) { 1173 pmap_change_wiring(pmap, va, FALSE); 1174 if (!fictitious) { 1175 m = PHYS_TO_VM_PAGE(pa); 1176 vm_page_lock(m); 1177 vm_page_unwire(m, TRUE); 1178 vm_page_unlock(m); 1179 } 1180 } 1181 } 1182 } 1183 1184 /* 1185 * Routine: 1186 * vm_fault_copy_entry 1187 * Function: 1188 * Create new shadow object backing dst_entry with private copy of 1189 * all underlying pages. When src_entry is equal to dst_entry, 1190 * function implements COW for wired-down map entry. Otherwise, 1191 * it forks wired entry into dst_map. 1192 * 1193 * In/out conditions: 1194 * The source and destination maps must be locked for write. 1195 * The source map entry must be wired down (or be a sharing map 1196 * entry corresponding to a main map entry that is wired down). 1197 */ 1198 void 1199 vm_fault_copy_entry(vm_map_t dst_map, vm_map_t src_map, 1200 vm_map_entry_t dst_entry, vm_map_entry_t src_entry, 1201 vm_ooffset_t *fork_charge) 1202 { 1203 vm_object_t backing_object, dst_object, object, src_object; 1204 vm_pindex_t dst_pindex, pindex, src_pindex; 1205 vm_prot_t access, prot; 1206 vm_offset_t vaddr; 1207 vm_page_t dst_m; 1208 vm_page_t src_m; 1209 boolean_t src_readonly, upgrade; 1210 1211 #ifdef lint 1212 src_map++; 1213 #endif /* lint */ 1214 1215 upgrade = src_entry == dst_entry; 1216 1217 src_object = src_entry->object.vm_object; 1218 src_pindex = OFF_TO_IDX(src_entry->offset); 1219 src_readonly = (src_entry->protection & VM_PROT_WRITE) == 0; 1220 1221 /* 1222 * Create the top-level object for the destination entry. (Doesn't 1223 * actually shadow anything - we copy the pages directly.) 1224 */ 1225 dst_object = vm_object_allocate(OBJT_DEFAULT, 1226 OFF_TO_IDX(dst_entry->end - dst_entry->start)); 1227 #if VM_NRESERVLEVEL > 0 1228 dst_object->flags |= OBJ_COLORED; 1229 dst_object->pg_color = atop(dst_entry->start); 1230 #endif 1231 1232 VM_OBJECT_LOCK(dst_object); 1233 KASSERT(upgrade || dst_entry->object.vm_object == NULL, 1234 ("vm_fault_copy_entry: vm_object not NULL")); 1235 dst_entry->object.vm_object = dst_object; 1236 dst_entry->offset = 0; 1237 dst_object->charge = dst_entry->end - dst_entry->start; 1238 if (fork_charge != NULL) { 1239 KASSERT(dst_entry->cred == NULL, 1240 ("vm_fault_copy_entry: leaked swp charge")); 1241 dst_object->cred = curthread->td_ucred; 1242 crhold(dst_object->cred); 1243 *fork_charge += dst_object->charge; 1244 } else { 1245 dst_object->cred = dst_entry->cred; 1246 dst_entry->cred = NULL; 1247 } 1248 access = prot = dst_entry->protection; 1249 /* 1250 * If not an upgrade, then enter the mappings in the pmap as 1251 * read and/or execute accesses. Otherwise, enter them as 1252 * write accesses. 1253 * 1254 * A writeable large page mapping is only created if all of 1255 * the constituent small page mappings are modified. Marking 1256 * PTEs as modified on inception allows promotion to happen 1257 * without taking potentially large number of soft faults. 1258 */ 1259 if (!upgrade) 1260 access &= ~VM_PROT_WRITE; 1261 1262 /* 1263 * Loop through all of the pages in the entry's range, copying each 1264 * one from the source object (it should be there) to the destination 1265 * object. 1266 */ 1267 for (vaddr = dst_entry->start, dst_pindex = 0; 1268 vaddr < dst_entry->end; 1269 vaddr += PAGE_SIZE, dst_pindex++) { 1270 1271 /* 1272 * Allocate a page in the destination object. 1273 */ 1274 do { 1275 dst_m = vm_page_alloc(dst_object, dst_pindex, 1276 VM_ALLOC_NORMAL); 1277 if (dst_m == NULL) { 1278 VM_OBJECT_UNLOCK(dst_object); 1279 VM_WAIT; 1280 VM_OBJECT_LOCK(dst_object); 1281 } 1282 } while (dst_m == NULL); 1283 1284 /* 1285 * Find the page in the source object, and copy it in. 1286 * (Because the source is wired down, the page will be in 1287 * memory.) 1288 */ 1289 VM_OBJECT_LOCK(src_object); 1290 object = src_object; 1291 pindex = src_pindex + dst_pindex; 1292 while ((src_m = vm_page_lookup(object, pindex)) == NULL && 1293 src_readonly && 1294 (backing_object = object->backing_object) != NULL) { 1295 /* 1296 * Allow fallback to backing objects if we are reading. 1297 */ 1298 VM_OBJECT_LOCK(backing_object); 1299 pindex += OFF_TO_IDX(object->backing_object_offset); 1300 VM_OBJECT_UNLOCK(object); 1301 object = backing_object; 1302 } 1303 if (src_m == NULL) 1304 panic("vm_fault_copy_wired: page missing"); 1305 pmap_copy_page(src_m, dst_m); 1306 VM_OBJECT_UNLOCK(object); 1307 dst_m->valid = VM_PAGE_BITS_ALL; 1308 VM_OBJECT_UNLOCK(dst_object); 1309 1310 /* 1311 * Enter it in the pmap. If a wired, copy-on-write 1312 * mapping is being replaced by a write-enabled 1313 * mapping, then wire that new mapping. 1314 */ 1315 pmap_enter(dst_map->pmap, vaddr, access, dst_m, prot, upgrade); 1316 1317 /* 1318 * Mark it no longer busy, and put it on the active list. 1319 */ 1320 VM_OBJECT_LOCK(dst_object); 1321 1322 if (upgrade) { 1323 vm_page_lock(src_m); 1324 vm_page_unwire(src_m, 0); 1325 vm_page_unlock(src_m); 1326 1327 vm_page_lock(dst_m); 1328 vm_page_wire(dst_m); 1329 vm_page_unlock(dst_m); 1330 } else { 1331 vm_page_lock(dst_m); 1332 vm_page_activate(dst_m); 1333 vm_page_unlock(dst_m); 1334 } 1335 vm_page_wakeup(dst_m); 1336 } 1337 VM_OBJECT_UNLOCK(dst_object); 1338 if (upgrade) { 1339 dst_entry->eflags &= ~(MAP_ENTRY_COW | MAP_ENTRY_NEEDS_COPY); 1340 vm_object_deallocate(src_object); 1341 } 1342 } 1343 1344 1345 /* 1346 * This routine checks around the requested page for other pages that 1347 * might be able to be faulted in. This routine brackets the viable 1348 * pages for the pages to be paged in. 1349 * 1350 * Inputs: 1351 * m, rbehind, rahead 1352 * 1353 * Outputs: 1354 * marray (array of vm_page_t), reqpage (index of requested page) 1355 * 1356 * Return value: 1357 * number of pages in marray 1358 */ 1359 static int 1360 vm_fault_additional_pages(m, rbehind, rahead, marray, reqpage) 1361 vm_page_t m; 1362 int rbehind; 1363 int rahead; 1364 vm_page_t *marray; 1365 int *reqpage; 1366 { 1367 int i,j; 1368 vm_object_t object; 1369 vm_pindex_t pindex, startpindex, endpindex, tpindex; 1370 vm_page_t rtm; 1371 int cbehind, cahead; 1372 1373 VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); 1374 1375 object = m->object; 1376 pindex = m->pindex; 1377 cbehind = cahead = 0; 1378 1379 /* 1380 * if the requested page is not available, then give up now 1381 */ 1382 if (!vm_pager_has_page(object, pindex, &cbehind, &cahead)) { 1383 return 0; 1384 } 1385 1386 if ((cbehind == 0) && (cahead == 0)) { 1387 *reqpage = 0; 1388 marray[0] = m; 1389 return 1; 1390 } 1391 1392 if (rahead > cahead) { 1393 rahead = cahead; 1394 } 1395 1396 if (rbehind > cbehind) { 1397 rbehind = cbehind; 1398 } 1399 1400 /* 1401 * scan backward for the read behind pages -- in memory 1402 */ 1403 if (pindex > 0) { 1404 if (rbehind > pindex) { 1405 rbehind = pindex; 1406 startpindex = 0; 1407 } else { 1408 startpindex = pindex - rbehind; 1409 } 1410 1411 if ((rtm = TAILQ_PREV(m, pglist, listq)) != NULL && 1412 rtm->pindex >= startpindex) 1413 startpindex = rtm->pindex + 1; 1414 1415 /* tpindex is unsigned; beware of numeric underflow. */ 1416 for (i = 0, tpindex = pindex - 1; tpindex >= startpindex && 1417 tpindex < pindex; i++, tpindex--) { 1418 1419 rtm = vm_page_alloc(object, tpindex, VM_ALLOC_NORMAL | 1420 VM_ALLOC_IFNOTCACHED); 1421 if (rtm == NULL) { 1422 /* 1423 * Shift the allocated pages to the 1424 * beginning of the array. 1425 */ 1426 for (j = 0; j < i; j++) { 1427 marray[j] = marray[j + tpindex + 1 - 1428 startpindex]; 1429 } 1430 break; 1431 } 1432 1433 marray[tpindex - startpindex] = rtm; 1434 } 1435 } else { 1436 startpindex = 0; 1437 i = 0; 1438 } 1439 1440 marray[i] = m; 1441 /* page offset of the required page */ 1442 *reqpage = i; 1443 1444 tpindex = pindex + 1; 1445 i++; 1446 1447 /* 1448 * scan forward for the read ahead pages 1449 */ 1450 endpindex = tpindex + rahead; 1451 if ((rtm = TAILQ_NEXT(m, listq)) != NULL && rtm->pindex < endpindex) 1452 endpindex = rtm->pindex; 1453 if (endpindex > object->size) 1454 endpindex = object->size; 1455 1456 for (; tpindex < endpindex; i++, tpindex++) { 1457 1458 rtm = vm_page_alloc(object, tpindex, VM_ALLOC_NORMAL | 1459 VM_ALLOC_IFNOTCACHED); 1460 if (rtm == NULL) { 1461 break; 1462 } 1463 1464 marray[i] = rtm; 1465 } 1466 1467 /* return number of pages */ 1468 return i; 1469 } 1470