1df8bae1dSRodney W. Grimes /* 2df8bae1dSRodney W. Grimes * Copyright (c) 1991, 1993 3df8bae1dSRodney W. Grimes * The Regents of the University of California. All rights reserved. 426f9a767SRodney W. Grimes * Copyright (c) 1994 John S. Dyson 526f9a767SRodney W. Grimes * All rights reserved. 626f9a767SRodney W. Grimes * Copyright (c) 1994 David Greenman 726f9a767SRodney W. Grimes * All rights reserved. 826f9a767SRodney W. Grimes * 9df8bae1dSRodney W. Grimes * 10df8bae1dSRodney W. Grimes * This code is derived from software contributed to Berkeley by 11df8bae1dSRodney W. Grimes * The Mach Operating System project at Carnegie-Mellon University. 12df8bae1dSRodney W. Grimes * 13df8bae1dSRodney W. Grimes * Redistribution and use in source and binary forms, with or without 14df8bae1dSRodney W. Grimes * modification, are permitted provided that the following conditions 15df8bae1dSRodney W. Grimes * are met: 16df8bae1dSRodney W. Grimes * 1. Redistributions of source code must retain the above copyright 17df8bae1dSRodney W. Grimes * notice, this list of conditions and the following disclaimer. 18df8bae1dSRodney W. Grimes * 2. Redistributions in binary form must reproduce the above copyright 19df8bae1dSRodney W. Grimes * notice, this list of conditions and the following disclaimer in the 20df8bae1dSRodney W. Grimes * documentation and/or other materials provided with the distribution. 21df8bae1dSRodney W. Grimes * 3. All advertising materials mentioning features or use of this software 22df8bae1dSRodney W. Grimes * must display the following acknowledgement: 23df8bae1dSRodney W. Grimes * This product includes software developed by the University of 24df8bae1dSRodney W. Grimes * California, Berkeley and its contributors. 25df8bae1dSRodney W. Grimes * 4. Neither the name of the University nor the names of its contributors 26df8bae1dSRodney W. Grimes * may be used to endorse or promote products derived from this software 27df8bae1dSRodney W. Grimes * without specific prior written permission. 28df8bae1dSRodney W. Grimes * 29df8bae1dSRodney W. Grimes * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 30df8bae1dSRodney W. Grimes * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 31df8bae1dSRodney W. Grimes * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 32df8bae1dSRodney W. Grimes * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 33df8bae1dSRodney W. Grimes * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 34df8bae1dSRodney W. Grimes * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 35df8bae1dSRodney W. Grimes * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 36df8bae1dSRodney W. Grimes * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 37df8bae1dSRodney W. Grimes * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 38df8bae1dSRodney W. Grimes * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 39df8bae1dSRodney W. Grimes * SUCH DAMAGE. 40df8bae1dSRodney W. Grimes * 413c4dd356SDavid Greenman * from: @(#)vm_fault.c 8.4 (Berkeley) 1/12/94 42df8bae1dSRodney W. Grimes * 43df8bae1dSRodney W. Grimes * 44df8bae1dSRodney W. Grimes * Copyright (c) 1987, 1990 Carnegie-Mellon University. 45df8bae1dSRodney W. Grimes * All rights reserved. 46df8bae1dSRodney W. Grimes * 47df8bae1dSRodney W. Grimes * Authors: Avadis Tevanian, Jr., Michael Wayne Young 48df8bae1dSRodney W. Grimes * 49df8bae1dSRodney W. Grimes * Permission to use, copy, modify and distribute this software and 50df8bae1dSRodney W. Grimes * its documentation is hereby granted, provided that both the copyright 51df8bae1dSRodney W. Grimes * notice and this permission notice appear in all copies of the 52df8bae1dSRodney W. Grimes * software, derivative works or modified versions, and any portions 53df8bae1dSRodney W. Grimes * thereof, and that both notices appear in supporting documentation. 54df8bae1dSRodney W. Grimes * 55df8bae1dSRodney W. Grimes * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 56df8bae1dSRodney W. Grimes * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 57df8bae1dSRodney W. Grimes * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 58df8bae1dSRodney W. Grimes * 59df8bae1dSRodney W. Grimes * Carnegie Mellon requests users of this software to return to 60df8bae1dSRodney W. Grimes * 61df8bae1dSRodney W. Grimes * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 62df8bae1dSRodney W. Grimes * School of Computer Science 63df8bae1dSRodney W. Grimes * Carnegie Mellon University 64df8bae1dSRodney W. Grimes * Pittsburgh PA 15213-3890 65df8bae1dSRodney W. Grimes * 66df8bae1dSRodney W. Grimes * any improvements or extensions that they make and grant Carnegie the 67df8bae1dSRodney W. Grimes * rights to redistribute these changes. 683c4dd356SDavid Greenman * 69c3aac50fSPeter Wemm * $FreeBSD$ 70df8bae1dSRodney W. Grimes */ 71df8bae1dSRodney W. Grimes 72df8bae1dSRodney W. Grimes /* 73df8bae1dSRodney W. Grimes * Page fault handling module. 74df8bae1dSRodney W. Grimes */ 75df8bae1dSRodney W. Grimes 76df8bae1dSRodney W. Grimes #include <sys/param.h> 77df8bae1dSRodney W. Grimes #include <sys/systm.h> 7826f9a767SRodney W. Grimes #include <sys/proc.h> 7924a1cce3SDavid Greenman #include <sys/vnode.h> 8026f9a767SRodney W. Grimes #include <sys/resourcevar.h> 81efeaf95aSDavid Greenman #include <sys/vmmeter.h> 82df8bae1dSRodney W. Grimes 83df8bae1dSRodney W. Grimes #include <vm/vm.h> 84efeaf95aSDavid Greenman #include <vm/vm_param.h> 85efeaf95aSDavid Greenman #include <vm/vm_prot.h> 86996c772fSJohn Dyson #include <sys/lock.h> 87efeaf95aSDavid Greenman #include <vm/pmap.h> 88efeaf95aSDavid Greenman #include <vm/vm_map.h> 89efeaf95aSDavid Greenman #include <vm/vm_object.h> 90df8bae1dSRodney W. Grimes #include <vm/vm_page.h> 91df8bae1dSRodney W. Grimes #include <vm/vm_pageout.h> 92a83c285cSDavid Greenman #include <vm/vm_kern.h> 9324a1cce3SDavid Greenman #include <vm/vm_pager.h> 9424a1cce3SDavid Greenman #include <vm/vnode_pager.h> 95efeaf95aSDavid Greenman #include <vm/vm_extern.h> 96df8bae1dSRodney W. Grimes 97303b270bSEivind Eklund static int vm_fault_additional_pages __P((vm_page_t, int, 98303b270bSEivind Eklund int, vm_page_t *, int *)); 9926f9a767SRodney W. Grimes 10047221757SJohn Dyson #define VM_FAULT_READ_AHEAD 8 10147221757SJohn Dyson #define VM_FAULT_READ_BEHIND 7 10226f9a767SRodney W. Grimes #define VM_FAULT_READ (VM_FAULT_READ_AHEAD+VM_FAULT_READ_BEHIND+1) 10326f9a767SRodney W. Grimes 1044866e085SJohn Dyson struct faultstate { 1054866e085SJohn Dyson vm_page_t m; 1064866e085SJohn Dyson vm_object_t object; 1074866e085SJohn Dyson vm_pindex_t pindex; 1084866e085SJohn Dyson vm_page_t first_m; 1094866e085SJohn Dyson vm_object_t first_object; 1104866e085SJohn Dyson vm_pindex_t first_pindex; 1114866e085SJohn Dyson vm_map_t map; 1124866e085SJohn Dyson vm_map_entry_t entry; 1134866e085SJohn Dyson int lookup_still_valid; 1144866e085SJohn Dyson struct vnode *vp; 1154866e085SJohn Dyson }; 1164866e085SJohn Dyson 1171c7c3c6aSMatthew Dillon static __inline void 1184866e085SJohn Dyson release_page(struct faultstate *fs) 1194866e085SJohn Dyson { 120e69763a3SDoug Rabson vm_page_wakeup(fs->m); 1214866e085SJohn Dyson vm_page_deactivate(fs->m); 1224866e085SJohn Dyson fs->m = NULL; 1234866e085SJohn Dyson } 1244866e085SJohn Dyson 1251c7c3c6aSMatthew Dillon static __inline void 1264866e085SJohn Dyson unlock_map(struct faultstate *fs) 1274866e085SJohn Dyson { 1284866e085SJohn Dyson if (fs->lookup_still_valid) { 1294866e085SJohn Dyson vm_map_lookup_done(fs->map, fs->entry); 1304866e085SJohn Dyson fs->lookup_still_valid = FALSE; 1314866e085SJohn Dyson } 1324866e085SJohn Dyson } 1334866e085SJohn Dyson 1344866e085SJohn Dyson static void 1354866e085SJohn Dyson _unlock_things(struct faultstate *fs, int dealloc) 1364866e085SJohn Dyson { 1374866e085SJohn Dyson vm_object_pip_wakeup(fs->object); 1384866e085SJohn Dyson if (fs->object != fs->first_object) { 1394866e085SJohn Dyson vm_page_free(fs->first_m); 1404866e085SJohn Dyson vm_object_pip_wakeup(fs->first_object); 1414866e085SJohn Dyson fs->first_m = NULL; 1424866e085SJohn Dyson } 1434866e085SJohn Dyson if (dealloc) { 1444866e085SJohn Dyson vm_object_deallocate(fs->first_object); 1454866e085SJohn Dyson } 1464866e085SJohn Dyson unlock_map(fs); 1474866e085SJohn Dyson if (fs->vp != NULL) { 1484866e085SJohn Dyson vput(fs->vp); 1494866e085SJohn Dyson fs->vp = NULL; 1504866e085SJohn Dyson } 1514866e085SJohn Dyson } 1524866e085SJohn Dyson 1534866e085SJohn Dyson #define unlock_things(fs) _unlock_things(fs, 0) 1544866e085SJohn Dyson #define unlock_and_deallocate(fs) _unlock_things(fs, 1) 1554866e085SJohn Dyson 156df8bae1dSRodney W. Grimes /* 15740360b1bSMatthew Dillon * TRYPAGER - used by vm_fault to calculate whether the pager for the 15840360b1bSMatthew Dillon * current object *might* contain the page. 15940360b1bSMatthew Dillon * 16040360b1bSMatthew Dillon * default objects are zero-fill, there is no real pager. 16140360b1bSMatthew Dillon */ 16240360b1bSMatthew Dillon 16340360b1bSMatthew Dillon #define TRYPAGER (fs.object->type != OBJT_DEFAULT && \ 16440360b1bSMatthew Dillon (((fault_flags & VM_FAULT_WIRE_MASK) == 0) || wired)) 16540360b1bSMatthew Dillon 16640360b1bSMatthew Dillon /* 167df8bae1dSRodney W. Grimes * vm_fault: 168df8bae1dSRodney W. Grimes * 169df8bae1dSRodney W. Grimes * Handle a page fault occuring at the given address, 170df8bae1dSRodney W. Grimes * requiring the given permissions, in the map specified. 171df8bae1dSRodney W. Grimes * If successful, the page is inserted into the 172df8bae1dSRodney W. Grimes * associated physical map. 173df8bae1dSRodney W. Grimes * 174df8bae1dSRodney W. Grimes * NOTE: the given address should be truncated to the 175df8bae1dSRodney W. Grimes * proper page address. 176df8bae1dSRodney W. Grimes * 177df8bae1dSRodney W. Grimes * KERN_SUCCESS is returned if the page fault is handled; otherwise, 178df8bae1dSRodney W. Grimes * a standard error specifying why the fault is fatal is returned. 179df8bae1dSRodney W. Grimes * 180df8bae1dSRodney W. Grimes * 181df8bae1dSRodney W. Grimes * The map in question must be referenced, and remains so. 182df8bae1dSRodney W. Grimes * Caller may hold no locks. 183df8bae1dSRodney W. Grimes */ 184df8bae1dSRodney W. Grimes int 185b9dcd593SBruce Evans vm_fault(vm_map_t map, vm_offset_t vaddr, vm_prot_t fault_type, int fault_flags) 186df8bae1dSRodney W. Grimes { 187df8bae1dSRodney W. Grimes vm_prot_t prot; 188df8bae1dSRodney W. Grimes int result; 189df8bae1dSRodney W. Grimes boolean_t wired; 1902d8acc0fSJohn Dyson int map_generation; 191df8bae1dSRodney W. Grimes vm_object_t next_object; 19226f9a767SRodney W. Grimes vm_page_t marray[VM_FAULT_READ]; 1934866e085SJohn Dyson int hardfault; 1942d8acc0fSJohn Dyson int faultcount; 1954866e085SJohn Dyson struct faultstate fs; 196df8bae1dSRodney W. Grimes 197b8d95f16SDavid Greenman cnt.v_vm_faults++; /* needs lock XXX */ 1984866e085SJohn Dyson hardfault = 0; 199df8bae1dSRodney W. Grimes 200df8bae1dSRodney W. Grimes RetryFault:; 201df8bae1dSRodney W. Grimes 202df8bae1dSRodney W. Grimes /* 2030d94caffSDavid Greenman * Find the backing store object and offset into it to begin the 2040d94caffSDavid Greenman * search. 205df8bae1dSRodney W. Grimes */ 20640360b1bSMatthew Dillon fs.map = map; 2074866e085SJohn Dyson if ((result = vm_map_lookup(&fs.map, vaddr, 2084866e085SJohn Dyson fault_type, &fs.entry, &fs.first_object, 2094866e085SJohn Dyson &fs.first_pindex, &prot, &wired)) != KERN_SUCCESS) { 21047221757SJohn Dyson if ((result != KERN_PROTECTION_FAILURE) || 21147221757SJohn Dyson ((fault_flags & VM_FAULT_WIRE_MASK) != VM_FAULT_USER_WIRE)) { 21247221757SJohn Dyson return result; 21309e0c6ccSJohn Dyson } 21409e0c6ccSJohn Dyson 2157aaaa4fdSJohn Dyson /* 2167aaaa4fdSJohn Dyson * If we are user-wiring a r/w segment, and it is COW, then 2177aaaa4fdSJohn Dyson * we need to do the COW operation. Note that we don't COW 218595236dfSJohn Dyson * currently RO sections now, because it is NOT desirable 2197aaaa4fdSJohn Dyson * to COW .text. We simply keep .text from ever being COW'ed 2207aaaa4fdSJohn Dyson * and take the heat that one cannot debug wired .text sections. 2217aaaa4fdSJohn Dyson */ 2224866e085SJohn Dyson result = vm_map_lookup(&fs.map, vaddr, 22347221757SJohn Dyson VM_PROT_READ|VM_PROT_WRITE|VM_PROT_OVERRIDE_WRITE, 2244866e085SJohn Dyson &fs.entry, &fs.first_object, &fs.first_pindex, &prot, &wired); 22547221757SJohn Dyson if (result != KERN_SUCCESS) { 22647221757SJohn Dyson return result; 22747221757SJohn Dyson } 22847221757SJohn Dyson 2297aaaa4fdSJohn Dyson /* 2307aaaa4fdSJohn Dyson * If we don't COW now, on a user wire, the user will never 2317aaaa4fdSJohn Dyson * be able to write to the mapping. If we don't make this 2327aaaa4fdSJohn Dyson * restriction, the bookkeeping would be nearly impossible. 2337aaaa4fdSJohn Dyson */ 2344866e085SJohn Dyson if ((fs.entry->protection & VM_PROT_WRITE) == 0) 2354866e085SJohn Dyson fs.entry->max_protection &= ~VM_PROT_WRITE; 2367aaaa4fdSJohn Dyson } 23747221757SJohn Dyson 2384866e085SJohn Dyson map_generation = fs.map->timestamp; 2392d8acc0fSJohn Dyson 2404866e085SJohn Dyson if (fs.entry->eflags & MAP_ENTRY_NOFAULT) { 24147221757SJohn Dyson panic("vm_fault: fault on nofault entry, addr: %lx", 24292c4c4ebSBruce Evans (u_long)vaddr); 2437aaaa4fdSJohn Dyson } 2447aaaa4fdSJohn Dyson 24595e5e988SJohn Dyson /* 24695e5e988SJohn Dyson * Make a reference to this object to prevent its disposal while we 24795e5e988SJohn Dyson * are messing with it. Once we have the reference, the map is free 24895e5e988SJohn Dyson * to be diddled. Since objects reference their shadows (and copies), 24995e5e988SJohn Dyson * they will stay around as well. 25095e5e988SJohn Dyson */ 2514866e085SJohn Dyson vm_object_reference(fs.first_object); 252d474eaaaSDoug Rabson vm_object_pip_add(fs.first_object, 1); 25395e5e988SJohn Dyson 2544866e085SJohn Dyson fs.vp = vnode_pager_lock(fs.first_object); 2551efb74fbSJohn Dyson if ((fault_type & VM_PROT_WRITE) && 2564866e085SJohn Dyson (fs.first_object->type == OBJT_VNODE)) { 2574866e085SJohn Dyson vm_freeze_copyopts(fs.first_object, 2584866e085SJohn Dyson fs.first_pindex, fs.first_pindex + 1); 2591efb74fbSJohn Dyson } 260f6b04d2bSDavid Greenman 2614866e085SJohn Dyson fs.lookup_still_valid = TRUE; 262df8bae1dSRodney W. Grimes 263df8bae1dSRodney W. Grimes if (wired) 264df8bae1dSRodney W. Grimes fault_type = prot; 265df8bae1dSRodney W. Grimes 2664866e085SJohn Dyson fs.first_m = NULL; 267df8bae1dSRodney W. Grimes 268df8bae1dSRodney W. Grimes /* 269df8bae1dSRodney W. Grimes * Search for the page at object/offset. 270df8bae1dSRodney W. Grimes */ 271df8bae1dSRodney W. Grimes 2724866e085SJohn Dyson fs.object = fs.first_object; 2734866e085SJohn Dyson fs.pindex = fs.first_pindex; 274df8bae1dSRodney W. Grimes 275df8bae1dSRodney W. Grimes while (TRUE) { 2761c7c3c6aSMatthew Dillon /* 2771c7c3c6aSMatthew Dillon * If the object is dead, we stop here 2781c7c3c6aSMatthew Dillon */ 27947221757SJohn Dyson 2804866e085SJohn Dyson if (fs.object->flags & OBJ_DEAD) { 2814866e085SJohn Dyson unlock_and_deallocate(&fs); 28247221757SJohn Dyson return (KERN_PROTECTION_FAILURE); 28347221757SJohn Dyson } 28447221757SJohn Dyson 2851c7c3c6aSMatthew Dillon /* 2861c7c3c6aSMatthew Dillon * See if page is resident 2871c7c3c6aSMatthew Dillon */ 2881c7c3c6aSMatthew Dillon 2894866e085SJohn Dyson fs.m = vm_page_lookup(fs.object, fs.pindex); 2904866e085SJohn Dyson if (fs.m != NULL) { 291c699f45eSDavid Greenman int queue, s; 292df8bae1dSRodney W. Grimes /* 2931c7c3c6aSMatthew Dillon * Wait/Retry if the page is busy. We have to do this 2941c7c3c6aSMatthew Dillon * if the page is busy via either PG_BUSY or 2951c7c3c6aSMatthew Dillon * vm_page_t->busy because the vm_pager may be using 2961c7c3c6aSMatthew Dillon * vm_page_t->busy for pageouts ( and even pageins if 2971c7c3c6aSMatthew Dillon * it is the vnode pager ), and we could end up trying 2981c7c3c6aSMatthew Dillon * to pagein and pageout the same page simultaniously. 2991c7c3c6aSMatthew Dillon * 3001c7c3c6aSMatthew Dillon * We can theoretically allow the busy case on a read 3011c7c3c6aSMatthew Dillon * fault if the page is marked valid, but since such 3021c7c3c6aSMatthew Dillon * pages are typically already pmap'd, putting that 3031c7c3c6aSMatthew Dillon * special case in might be more effort then it is 3041c7c3c6aSMatthew Dillon * worth. We cannot under any circumstances mess 3051c7c3c6aSMatthew Dillon * around with a vm_page_t->busy page except, perhaps, 3061c7c3c6aSMatthew Dillon * to pmap it. 307df8bae1dSRodney W. Grimes */ 3081c7c3c6aSMatthew Dillon if ((fs.m->flags & PG_BUSY) || fs.m->busy) { 3094866e085SJohn Dyson unlock_things(&fs); 3101c7c3c6aSMatthew Dillon (void)vm_page_sleep_busy(fs.m, TRUE, "vmpfw"); 311976e77fcSDavid Greenman cnt.v_intrans++; 3124866e085SJohn Dyson vm_object_deallocate(fs.first_object); 313df8bae1dSRodney W. Grimes goto RetryFault; 314df8bae1dSRodney W. Grimes } 315f6b04d2bSDavid Greenman 3164866e085SJohn Dyson queue = fs.m->queue; 317c699f45eSDavid Greenman s = splvm(); 3184866e085SJohn Dyson vm_page_unqueue_nowakeup(fs.m); 319c699f45eSDavid Greenman splx(s); 320c82b0181SJohn Dyson 32140360b1bSMatthew Dillon if ((queue - fs.m->pc) == PQ_CACHE && vm_page_count_severe()) { 3224866e085SJohn Dyson vm_page_activate(fs.m); 3234866e085SJohn Dyson unlock_and_deallocate(&fs); 32422ba64e8SJohn Dyson VM_WAIT; 32522ba64e8SJohn Dyson goto RetryFault; 32622ba64e8SJohn Dyson } 3277615edaaSMatthew Dillon 3281c7c3c6aSMatthew Dillon /* 3291c7c3c6aSMatthew Dillon * Mark page busy for other processes, and the 3301c7c3c6aSMatthew Dillon * pagedaemon. If it still isn't completely valid 3311c7c3c6aSMatthew Dillon * (readable), jump to readrest, else break-out ( we 3321c7c3c6aSMatthew Dillon * found the page ). 3331c7c3c6aSMatthew Dillon */ 33422ba64e8SJohn Dyson 335e69763a3SDoug Rabson vm_page_busy(fs.m); 3364866e085SJohn Dyson if (((fs.m->valid & VM_PAGE_BITS_ALL) != VM_PAGE_BITS_ALL) && 3374866e085SJohn Dyson fs.m->object != kernel_object && fs.m->object != kmem_object) { 3380d94caffSDavid Greenman goto readrest; 3390d94caffSDavid Greenman } 340ffc82b0aSJohn Dyson 341df8bae1dSRodney W. Grimes break; 342df8bae1dSRodney W. Grimes } 3431c7c3c6aSMatthew Dillon 3441c7c3c6aSMatthew Dillon /* 34540360b1bSMatthew Dillon * Page is not resident, If this is the search termination 34640360b1bSMatthew Dillon * or the pager might contain the page, allocate a new page. 3471c7c3c6aSMatthew Dillon */ 3481c7c3c6aSMatthew Dillon 34940360b1bSMatthew Dillon if (TRYPAGER || fs.object == fs.first_object) { 3504866e085SJohn Dyson if (fs.pindex >= fs.object->size) { 3514866e085SJohn Dyson unlock_and_deallocate(&fs); 3525f55e841SDavid Greenman return (KERN_PROTECTION_FAILURE); 3535f55e841SDavid Greenman } 35422ba64e8SJohn Dyson 355df8bae1dSRodney W. Grimes /* 3560d94caffSDavid Greenman * Allocate a new page for this object/offset pair. 357df8bae1dSRodney W. Grimes */ 35840360b1bSMatthew Dillon fs.m = NULL; 35940360b1bSMatthew Dillon if (!vm_page_count_severe()) { 3604866e085SJohn Dyson fs.m = vm_page_alloc(fs.object, fs.pindex, 3614866e085SJohn Dyson (fs.vp || fs.object->backing_object)? VM_ALLOC_NORMAL: VM_ALLOC_ZERO); 36240360b1bSMatthew Dillon } 3634866e085SJohn Dyson if (fs.m == NULL) { 3644866e085SJohn Dyson unlock_and_deallocate(&fs); 365df8bae1dSRodney W. Grimes VM_WAIT; 366df8bae1dSRodney W. Grimes goto RetryFault; 367df8bae1dSRodney W. Grimes } 368df8bae1dSRodney W. Grimes } 36947221757SJohn Dyson 3700d94caffSDavid Greenman readrest: 3711c7c3c6aSMatthew Dillon /* 37240360b1bSMatthew Dillon * We have found a valid page or we have allocated a new page. 37340360b1bSMatthew Dillon * The page thus may not be valid or may not be entirely 37440360b1bSMatthew Dillon * valid. 37540360b1bSMatthew Dillon * 37640360b1bSMatthew Dillon * Attempt to fault-in the page if there is a chance that the 37740360b1bSMatthew Dillon * pager has it, and potentially fault in additional pages 37840360b1bSMatthew Dillon * at the same time. 3791c7c3c6aSMatthew Dillon */ 3801c7c3c6aSMatthew Dillon 38140360b1bSMatthew Dillon if (TRYPAGER) { 382df8bae1dSRodney W. Grimes int rv; 38326f9a767SRodney W. Grimes int reqpage; 384867a482dSJohn Dyson int ahead, behind; 3857f866e4bSAlan Cox u_char behavior = vm_map_entry_behavior(fs.entry); 386867a482dSJohn Dyson 3877f866e4bSAlan Cox if (behavior == MAP_ENTRY_BEHAV_RANDOM) { 388867a482dSJohn Dyson ahead = 0; 389867a482dSJohn Dyson behind = 0; 3902d8acc0fSJohn Dyson } else { 3914866e085SJohn Dyson behind = (vaddr - fs.entry->start) >> PAGE_SHIFT; 3922d8acc0fSJohn Dyson if (behind > VM_FAULT_READ_BEHIND) 3932d8acc0fSJohn Dyson behind = VM_FAULT_READ_BEHIND; 3942d8acc0fSJohn Dyson 3954866e085SJohn Dyson ahead = ((fs.entry->end - vaddr) >> PAGE_SHIFT) - 1; 3962d8acc0fSJohn Dyson if (ahead > VM_FAULT_READ_AHEAD) 3972d8acc0fSJohn Dyson ahead = VM_FAULT_READ_AHEAD; 398867a482dSJohn Dyson } 399867a482dSJohn Dyson 4004866e085SJohn Dyson if ((fs.first_object->type != OBJT_DEVICE) && 40140360b1bSMatthew Dillon (behavior == MAP_ENTRY_BEHAV_SEQUENTIAL || 40240360b1bSMatthew Dillon (behavior != MAP_ENTRY_BEHAV_RANDOM && 40340360b1bSMatthew Dillon fs.pindex >= fs.entry->lastr && 40440360b1bSMatthew Dillon fs.pindex < fs.entry->lastr + VM_FAULT_READ)) 40540360b1bSMatthew Dillon ) { 406867a482dSJohn Dyson vm_pindex_t firstpindex, tmppindex; 40740360b1bSMatthew Dillon 40840360b1bSMatthew Dillon if (fs.first_pindex < 2 * VM_FAULT_READ) 409867a482dSJohn Dyson firstpindex = 0; 410867a482dSJohn Dyson else 41140360b1bSMatthew Dillon firstpindex = fs.first_pindex - 2 * VM_FAULT_READ; 412867a482dSJohn Dyson 4134221e284SAlan Cox /* 4144221e284SAlan Cox * note: partially valid pages cannot be 4154221e284SAlan Cox * included in the lookahead - NFS piecemeal 4164221e284SAlan Cox * writes will barf on it badly. 4174221e284SAlan Cox */ 4184221e284SAlan Cox 4194866e085SJohn Dyson for(tmppindex = fs.first_pindex - 1; 4202100d645SPeter Wemm tmppindex >= firstpindex; 421867a482dSJohn Dyson --tmppindex) { 422867a482dSJohn Dyson vm_page_t mt; 4234866e085SJohn Dyson mt = vm_page_lookup( fs.first_object, tmppindex); 424867a482dSJohn Dyson if (mt == NULL || (mt->valid != VM_PAGE_BITS_ALL)) 425867a482dSJohn Dyson break; 426ff97964aSJohn Dyson if (mt->busy || 427ff97964aSJohn Dyson (mt->flags & (PG_BUSY | PG_FICTITIOUS)) || 428ff97964aSJohn Dyson mt->hold_count || 429867a482dSJohn Dyson mt->wire_count) 430867a482dSJohn Dyson continue; 431867a482dSJohn Dyson if (mt->dirty == 0) 432867a482dSJohn Dyson vm_page_test_dirty(mt); 433867a482dSJohn Dyson if (mt->dirty) { 434867a482dSJohn Dyson vm_page_protect(mt, VM_PROT_NONE); 435867a482dSJohn Dyson vm_page_deactivate(mt); 436867a482dSJohn Dyson } else { 437867a482dSJohn Dyson vm_page_cache(mt); 438867a482dSJohn Dyson } 439867a482dSJohn Dyson } 440867a482dSJohn Dyson 441867a482dSJohn Dyson ahead += behind; 442867a482dSJohn Dyson behind = 0; 443867a482dSJohn Dyson } 444df8bae1dSRodney W. Grimes 445df8bae1dSRodney W. Grimes /* 4460d94caffSDavid Greenman * now we find out if any other pages should be paged 4470d94caffSDavid Greenman * in at this time this routine checks to see if the 4480d94caffSDavid Greenman * pages surrounding this fault reside in the same 4490d94caffSDavid Greenman * object as the page for this fault. If they do, 4500d94caffSDavid Greenman * then they are faulted in also into the object. The 4510d94caffSDavid Greenman * array "marray" returned contains an array of 4520d94caffSDavid Greenman * vm_page_t structs where one of them is the 4530d94caffSDavid Greenman * vm_page_t passed to the routine. The reqpage 4540d94caffSDavid Greenman * return value is the index into the marray for the 4550d94caffSDavid Greenman * vm_page_t passed to the routine. 4561c7c3c6aSMatthew Dillon * 4571c7c3c6aSMatthew Dillon * fs.m plus the additional pages are PG_BUSY'd. 45826f9a767SRodney W. Grimes */ 45905f0fdd2SPoul-Henning Kamp faultcount = vm_fault_additional_pages( 4604866e085SJohn Dyson fs.m, behind, ahead, marray, &reqpage); 461df8bae1dSRodney W. Grimes 462df8bae1dSRodney W. Grimes /* 46340360b1bSMatthew Dillon * update lastr imperfectly (we do not know how much 46440360b1bSMatthew Dillon * getpages will actually read), but good enough. 46540360b1bSMatthew Dillon */ 46640360b1bSMatthew Dillon fs.entry->lastr = fs.pindex + faultcount - behind; 46740360b1bSMatthew Dillon 46840360b1bSMatthew Dillon /* 4690d94caffSDavid Greenman * Call the pager to retrieve the data, if any, after 4701c7c3c6aSMatthew Dillon * releasing the lock on the map. We hold a ref on 4711c7c3c6aSMatthew Dillon * fs.object and the pages are PG_BUSY'd. 472df8bae1dSRodney W. Grimes */ 4734866e085SJohn Dyson unlock_map(&fs); 474df8bae1dSRodney W. Grimes 47526f9a767SRodney W. Grimes rv = faultcount ? 4764866e085SJohn Dyson vm_pager_get_pages(fs.object, marray, faultcount, 47724a1cce3SDavid Greenman reqpage) : VM_PAGER_FAIL; 47822ba64e8SJohn Dyson 47926f9a767SRodney W. Grimes if (rv == VM_PAGER_OK) { 480df8bae1dSRodney W. Grimes /* 481f230c45cSJohn Dyson * Found the page. Leave it busy while we play 482f230c45cSJohn Dyson * with it. 483f230c45cSJohn Dyson */ 484f230c45cSJohn Dyson 485f230c45cSJohn Dyson /* 4860d94caffSDavid Greenman * Relookup in case pager changed page. Pager 4870d94caffSDavid Greenman * is responsible for disposition of old page 4880d94caffSDavid Greenman * if moved. 489df8bae1dSRodney W. Grimes */ 4904866e085SJohn Dyson fs.m = vm_page_lookup(fs.object, fs.pindex); 4914866e085SJohn Dyson if(!fs.m) { 4924866e085SJohn Dyson unlock_and_deallocate(&fs); 493f6b04d2bSDavid Greenman goto RetryFault; 494f6b04d2bSDavid Greenman } 495f6b04d2bSDavid Greenman 49626f9a767SRodney W. Grimes hardfault++; 4971c7c3c6aSMatthew Dillon break; /* break to PAGE HAS BEEN FOUND */ 498df8bae1dSRodney W. Grimes } 499df8bae1dSRodney W. Grimes /* 5000d94caffSDavid Greenman * Remove the bogus page (which does not exist at this 5010d94caffSDavid Greenman * object/offset); before doing so, we must get back 5020d94caffSDavid Greenman * our object lock to preserve our invariant. 503df8bae1dSRodney W. Grimes * 50424a1cce3SDavid Greenman * Also wake up any other process that may want to bring 5050d94caffSDavid Greenman * in this page. 506df8bae1dSRodney W. Grimes * 5070d94caffSDavid Greenman * If this is the top-level object, we must leave the 50824a1cce3SDavid Greenman * busy page to prevent another process from rushing 5090d94caffSDavid Greenman * past us, and inserting the page in that object at 5100d94caffSDavid Greenman * the same time that we are. 511df8bae1dSRodney W. Grimes */ 51226f9a767SRodney W. Grimes 513a83c285cSDavid Greenman if (rv == VM_PAGER_ERROR) 514f3679e35SDavid Greenman printf("vm_fault: pager read error, pid %d (%s)\n", 515f3679e35SDavid Greenman curproc->p_pid, curproc->p_comm); 51626f9a767SRodney W. Grimes /* 517a83c285cSDavid Greenman * Data outside the range of the pager or an I/O error 51826f9a767SRodney W. Grimes */ 519a83c285cSDavid Greenman /* 5200d94caffSDavid Greenman * XXX - the check for kernel_map is a kludge to work 5210d94caffSDavid Greenman * around having the machine panic on a kernel space 5220d94caffSDavid Greenman * fault w/ I/O error. 523a83c285cSDavid Greenman */ 5244866e085SJohn Dyson if (((fs.map != kernel_map) && (rv == VM_PAGER_ERROR)) || 52547221757SJohn Dyson (rv == VM_PAGER_BAD)) { 5264866e085SJohn Dyson vm_page_free(fs.m); 5274866e085SJohn Dyson fs.m = NULL; 5284866e085SJohn Dyson unlock_and_deallocate(&fs); 529a83c285cSDavid Greenman return ((rv == VM_PAGER_ERROR) ? KERN_FAILURE : KERN_PROTECTION_FAILURE); 53026f9a767SRodney W. Grimes } 5314866e085SJohn Dyson if (fs.object != fs.first_object) { 5324866e085SJohn Dyson vm_page_free(fs.m); 5334866e085SJohn Dyson fs.m = NULL; 53426f9a767SRodney W. Grimes /* 53526f9a767SRodney W. Grimes * XXX - we cannot just fall out at this 53626f9a767SRodney W. Grimes * point, m has been freed and is invalid! 53726f9a767SRodney W. Grimes */ 538df8bae1dSRodney W. Grimes } 539df8bae1dSRodney W. Grimes } 54040360b1bSMatthew Dillon 541df8bae1dSRodney W. Grimes /* 5421c7c3c6aSMatthew Dillon * We get here if the object has default pager (or unwiring) 5431c7c3c6aSMatthew Dillon * or the pager doesn't have the page. 544df8bae1dSRodney W. Grimes */ 5454866e085SJohn Dyson if (fs.object == fs.first_object) 5464866e085SJohn Dyson fs.first_m = fs.m; 547df8bae1dSRodney W. Grimes 548df8bae1dSRodney W. Grimes /* 5490d94caffSDavid Greenman * Move on to the next object. Lock the next object before 5500d94caffSDavid Greenman * unlocking the current one. 551df8bae1dSRodney W. Grimes */ 552df8bae1dSRodney W. Grimes 5534866e085SJohn Dyson fs.pindex += OFF_TO_IDX(fs.object->backing_object_offset); 5544866e085SJohn Dyson next_object = fs.object->backing_object; 555df8bae1dSRodney W. Grimes if (next_object == NULL) { 556df8bae1dSRodney W. Grimes /* 5570d94caffSDavid Greenman * If there's no object left, fill the page in the top 5580d94caffSDavid Greenman * object with zeros. 559df8bae1dSRodney W. Grimes */ 5604866e085SJohn Dyson if (fs.object != fs.first_object) { 5614866e085SJohn Dyson vm_object_pip_wakeup(fs.object); 562df8bae1dSRodney W. Grimes 5634866e085SJohn Dyson fs.object = fs.first_object; 5644866e085SJohn Dyson fs.pindex = fs.first_pindex; 5654866e085SJohn Dyson fs.m = fs.first_m; 566df8bae1dSRodney W. Grimes } 5674866e085SJohn Dyson fs.first_m = NULL; 568df8bae1dSRodney W. Grimes 5694221e284SAlan Cox /* 5704221e284SAlan Cox * Zero the page if necessary and mark it valid. 5714221e284SAlan Cox */ 5724866e085SJohn Dyson if ((fs.m->flags & PG_ZERO) == 0) { 5734866e085SJohn Dyson vm_page_zero_fill(fs.m); 5744221e284SAlan Cox } else { 57514286e5eSAlan Cox cnt.v_ozfod++; 5764221e284SAlan Cox } 577df8bae1dSRodney W. Grimes cnt.v_zfod++; 5784221e284SAlan Cox fs.m->valid = VM_PAGE_BITS_ALL; 5791c7c3c6aSMatthew Dillon break; /* break to PAGE HAS BEEN FOUND */ 5800d94caffSDavid Greenman } else { 5814866e085SJohn Dyson if (fs.object != fs.first_object) { 5824866e085SJohn Dyson vm_object_pip_wakeup(fs.object); 583c0503609SDavid Greenman } 5841c7c3c6aSMatthew Dillon KASSERT(fs.object != next_object, ("object loop %p", next_object)); 5854866e085SJohn Dyson fs.object = next_object; 586d474eaaaSDoug Rabson vm_object_pip_add(fs.object, 1); 587df8bae1dSRodney W. Grimes } 588df8bae1dSRodney W. Grimes } 5891c7c3c6aSMatthew Dillon 5905526d2d9SEivind Eklund KASSERT((fs.m->flags & PG_BUSY) != 0, 5915526d2d9SEivind Eklund ("vm_fault: not busy after main loop")); 592df8bae1dSRodney W. Grimes 593df8bae1dSRodney W. Grimes /* 5940d94caffSDavid Greenman * PAGE HAS BEEN FOUND. [Loop invariant still holds -- the object lock 595df8bae1dSRodney W. Grimes * is held.] 596df8bae1dSRodney W. Grimes */ 597df8bae1dSRodney W. Grimes 598df8bae1dSRodney W. Grimes /* 5990d94caffSDavid Greenman * If the page is being written, but isn't already owned by the 6000d94caffSDavid Greenman * top-level object, we have to copy it into a new page owned by the 6010d94caffSDavid Greenman * top-level object. 602df8bae1dSRodney W. Grimes */ 603df8bae1dSRodney W. Grimes 6044866e085SJohn Dyson if (fs.object != fs.first_object) { 605df8bae1dSRodney W. Grimes /* 6060d94caffSDavid Greenman * We only really need to copy if we want to write it. 607df8bae1dSRodney W. Grimes */ 608df8bae1dSRodney W. Grimes 609df8bae1dSRodney W. Grimes if (fault_type & VM_PROT_WRITE) { 610df8bae1dSRodney W. Grimes /* 6111c7c3c6aSMatthew Dillon * This allows pages to be virtually copied from a 6121c7c3c6aSMatthew Dillon * backing_object into the first_object, where the 6131c7c3c6aSMatthew Dillon * backing object has no other refs to it, and cannot 6141c7c3c6aSMatthew Dillon * gain any more refs. Instead of a bcopy, we just 6151c7c3c6aSMatthew Dillon * move the page from the backing object to the 6161c7c3c6aSMatthew Dillon * first object. Note that we must mark the page 6171c7c3c6aSMatthew Dillon * dirty in the first object so that it will go out 6181c7c3c6aSMatthew Dillon * to swap when needed. 619df8bae1dSRodney W. Grimes */ 6204866e085SJohn Dyson if (map_generation == fs.map->timestamp && 621de5f6a77SJohn Dyson /* 622de5f6a77SJohn Dyson * Only one shadow object 623de5f6a77SJohn Dyson */ 6244866e085SJohn Dyson (fs.object->shadow_count == 1) && 625de5f6a77SJohn Dyson /* 626de5f6a77SJohn Dyson * No COW refs, except us 627de5f6a77SJohn Dyson */ 6284866e085SJohn Dyson (fs.object->ref_count == 1) && 629de5f6a77SJohn Dyson /* 630de5f6a77SJohn Dyson * Noone else can look this object up 631de5f6a77SJohn Dyson */ 6324866e085SJohn Dyson (fs.object->handle == NULL) && 633de5f6a77SJohn Dyson /* 634de5f6a77SJohn Dyson * No other ways to look the object up 635de5f6a77SJohn Dyson */ 6364866e085SJohn Dyson ((fs.object->type == OBJT_DEFAULT) || 6374866e085SJohn Dyson (fs.object->type == OBJT_SWAP)) && 638de5f6a77SJohn Dyson /* 639de5f6a77SJohn Dyson * We don't chase down the shadow chain 640de5f6a77SJohn Dyson */ 6414866e085SJohn Dyson (fs.object == fs.first_object->backing_object) && 642df8bae1dSRodney W. Grimes 643df8bae1dSRodney W. Grimes /* 6442d8acc0fSJohn Dyson * grab the lock if we need to 6452d8acc0fSJohn Dyson */ 6464866e085SJohn Dyson (fs.lookup_still_valid || 6479fdfe602SMatthew Dillon lockmgr(&fs.map->lock, LK_EXCLUSIVE|LK_NOWAIT, (void *)0, curproc) == 0) 6489fdfe602SMatthew Dillon ) { 6492d8acc0fSJohn Dyson 6504866e085SJohn Dyson fs.lookup_still_valid = 1; 6512d8acc0fSJohn Dyson /* 652de5f6a77SJohn Dyson * get rid of the unnecessary page 653df8bae1dSRodney W. Grimes */ 6544866e085SJohn Dyson vm_page_protect(fs.first_m, VM_PROT_NONE); 6554866e085SJohn Dyson vm_page_free(fs.first_m); 6564866e085SJohn Dyson fs.first_m = NULL; 6574866e085SJohn Dyson 658de5f6a77SJohn Dyson /* 6591c7c3c6aSMatthew Dillon * grab the page and put it into the 6601c7c3c6aSMatthew Dillon * process'es object. The page is 6611c7c3c6aSMatthew Dillon * automatically made dirty. 662de5f6a77SJohn Dyson */ 6634866e085SJohn Dyson vm_page_rename(fs.m, fs.first_object, fs.first_pindex); 6644866e085SJohn Dyson fs.first_m = fs.m; 665e69763a3SDoug Rabson vm_page_busy(fs.first_m); 6664866e085SJohn Dyson fs.m = NULL; 6674866e085SJohn Dyson cnt.v_cow_optim++; 668de5f6a77SJohn Dyson } else { 669de5f6a77SJohn Dyson /* 670de5f6a77SJohn Dyson * Oh, well, lets copy it. 671de5f6a77SJohn Dyson */ 6724866e085SJohn Dyson vm_page_copy(fs.m, fs.first_m); 673de5f6a77SJohn Dyson } 674df8bae1dSRodney W. Grimes 6754866e085SJohn Dyson if (fs.m) { 676df8bae1dSRodney W. Grimes /* 677df8bae1dSRodney W. Grimes * We no longer need the old page or object. 678df8bae1dSRodney W. Grimes */ 6794866e085SJohn Dyson release_page(&fs); 680de5f6a77SJohn Dyson } 681df8bae1dSRodney W. Grimes 6821c7c3c6aSMatthew Dillon /* 6831c7c3c6aSMatthew Dillon * fs.object != fs.first_object due to above 6841c7c3c6aSMatthew Dillon * conditional 6851c7c3c6aSMatthew Dillon */ 6861c7c3c6aSMatthew Dillon 6874866e085SJohn Dyson vm_object_pip_wakeup(fs.object); 6881c7c3c6aSMatthew Dillon 689df8bae1dSRodney W. Grimes /* 690df8bae1dSRodney W. Grimes * Only use the new page below... 691df8bae1dSRodney W. Grimes */ 692df8bae1dSRodney W. Grimes 693df8bae1dSRodney W. Grimes cnt.v_cow_faults++; 6944866e085SJohn Dyson fs.m = fs.first_m; 6954866e085SJohn Dyson fs.object = fs.first_object; 6964866e085SJohn Dyson fs.pindex = fs.first_pindex; 697df8bae1dSRodney W. Grimes 6980d94caffSDavid Greenman } else { 699df8bae1dSRodney W. Grimes prot &= ~VM_PROT_WRITE; 700df8bae1dSRodney W. Grimes } 701df8bae1dSRodney W. Grimes } 702df8bae1dSRodney W. Grimes 703df8bae1dSRodney W. Grimes /* 7040d94caffSDavid Greenman * We must verify that the maps have not changed since our last 7050d94caffSDavid Greenman * lookup. 706df8bae1dSRodney W. Grimes */ 707df8bae1dSRodney W. Grimes 7084866e085SJohn Dyson if (!fs.lookup_still_valid && 7094866e085SJohn Dyson (fs.map->timestamp != map_generation)) { 710df8bae1dSRodney W. Grimes vm_object_t retry_object; 711a316d390SJohn Dyson vm_pindex_t retry_pindex; 712df8bae1dSRodney W. Grimes vm_prot_t retry_prot; 713df8bae1dSRodney W. Grimes 714df8bae1dSRodney W. Grimes /* 7150d94caffSDavid Greenman * Since map entries may be pageable, make sure we can take a 7160d94caffSDavid Greenman * page fault on them. 717df8bae1dSRodney W. Grimes */ 718df8bae1dSRodney W. Grimes 719df8bae1dSRodney W. Grimes /* 720b33fb764SMatthew Dillon * Unlock vnode before the lookup to avoid deadlock. E.G. 721b33fb764SMatthew Dillon * avoid a deadlock between the inode and exec_map that can 722b33fb764SMatthew Dillon * occur due to locks being obtained in different orders. 723b33fb764SMatthew Dillon */ 724b33fb764SMatthew Dillon 725b33fb764SMatthew Dillon if (fs.vp != NULL) { 726b33fb764SMatthew Dillon vput(fs.vp); 727b33fb764SMatthew Dillon fs.vp = NULL; 728b33fb764SMatthew Dillon } 729b33fb764SMatthew Dillon 730b33fb764SMatthew Dillon /* 73124a1cce3SDavid Greenman * To avoid trying to write_lock the map while another process 7320d94caffSDavid Greenman * has it read_locked (in vm_map_pageable), we do not try for 7330d94caffSDavid Greenman * write permission. If the page is still writable, we will 7340d94caffSDavid Greenman * get write permission. If it is not, or has been marked 7350d94caffSDavid Greenman * needs_copy, we enter the mapping without write permission, 7360d94caffSDavid Greenman * and will merely take another fault. 737df8bae1dSRodney W. Grimes */ 7384866e085SJohn Dyson result = vm_map_lookup(&fs.map, vaddr, fault_type & ~VM_PROT_WRITE, 7394866e085SJohn Dyson &fs.entry, &retry_object, &retry_pindex, &retry_prot, &wired); 7404866e085SJohn Dyson map_generation = fs.map->timestamp; 741df8bae1dSRodney W. Grimes 742df8bae1dSRodney W. Grimes /* 7430d94caffSDavid Greenman * If we don't need the page any longer, put it on the active 7440d94caffSDavid Greenman * list (the easiest thing to do here). If no one needs it, 7450d94caffSDavid Greenman * pageout will grab it eventually. 746df8bae1dSRodney W. Grimes */ 747df8bae1dSRodney W. Grimes 748df8bae1dSRodney W. Grimes if (result != KERN_SUCCESS) { 7494866e085SJohn Dyson release_page(&fs); 7504866e085SJohn Dyson unlock_and_deallocate(&fs); 751df8bae1dSRodney W. Grimes return (result); 752df8bae1dSRodney W. Grimes } 7534866e085SJohn Dyson fs.lookup_still_valid = TRUE; 754df8bae1dSRodney W. Grimes 7554866e085SJohn Dyson if ((retry_object != fs.first_object) || 7564866e085SJohn Dyson (retry_pindex != fs.first_pindex)) { 7574866e085SJohn Dyson release_page(&fs); 7584866e085SJohn Dyson unlock_and_deallocate(&fs); 759df8bae1dSRodney W. Grimes goto RetryFault; 760df8bae1dSRodney W. Grimes } 761df8bae1dSRodney W. Grimes /* 7620d94caffSDavid Greenman * Check whether the protection has changed or the object has 7630d94caffSDavid Greenman * been copied while we left the map unlocked. Changing from 7640d94caffSDavid Greenman * read to write permission is OK - we leave the page 7650d94caffSDavid Greenman * write-protected, and catch the write fault. Changing from 7660d94caffSDavid Greenman * write to read permission means that we can't mark the page 7670d94caffSDavid Greenman * write-enabled after all. 768df8bae1dSRodney W. Grimes */ 769df8bae1dSRodney W. Grimes prot &= retry_prot; 770df8bae1dSRodney W. Grimes } 771df8bae1dSRodney W. Grimes 772df8bae1dSRodney W. Grimes /* 7730d94caffSDavid Greenman * Put this page into the physical map. We had to do the unlock above 7740d94caffSDavid Greenman * because pmap_enter may cause other faults. We don't put the page 7750d94caffSDavid Greenman * back on the active queue until later so that the page-out daemon 7760d94caffSDavid Greenman * won't find us (yet). 777df8bae1dSRodney W. Grimes */ 778df8bae1dSRodney W. Grimes 7792ddba215SDavid Greenman if (prot & VM_PROT_WRITE) { 780e69763a3SDoug Rabson vm_page_flag_set(fs.m, PG_WRITEABLE); 781069e9bc1SDoug Rabson vm_object_set_flag(fs.m->object, 782069e9bc1SDoug Rabson OBJ_WRITEABLE|OBJ_MIGHTBEDIRTY); 7832ddba215SDavid Greenman /* 7842ddba215SDavid Greenman * If the fault is a write, we know that this page is being 7852ddba215SDavid Greenman * written NOW. This will save on the pmap_is_modified() calls 7862ddba215SDavid Greenman * later. 7871c7c3c6aSMatthew Dillon * 7881c7c3c6aSMatthew Dillon * Also tell the backing pager, if any, that it should remove 7891c7c3c6aSMatthew Dillon * any swap backing since the page is now dirty. 7902ddba215SDavid Greenman */ 791a04c970aSJohn Dyson if (fault_flags & VM_FAULT_DIRTY) { 7927dbf82dcSMatthew Dillon vm_page_dirty(fs.m); 7931c7c3c6aSMatthew Dillon vm_pager_page_unswapped(fs.m); 7942ddba215SDavid Greenman } 7952ddba215SDavid Greenman } 796f6b04d2bSDavid Greenman 797bc6d84a6SMatthew Dillon /* 798bc6d84a6SMatthew Dillon * Page had better still be busy 799bc6d84a6SMatthew Dillon */ 800bc6d84a6SMatthew Dillon 801ca06c247SAlan Cox KASSERT(fs.m->flags & PG_BUSY, 802ca06c247SAlan Cox ("vm_fault: page %p not busy!", fs.m)); 803bc6d84a6SMatthew Dillon 8044866e085SJohn Dyson unlock_things(&fs); 8054221e284SAlan Cox 8064221e284SAlan Cox /* 8074221e284SAlan Cox * Sanity check: page must be completely valid or it is not fit to 8084221e284SAlan Cox * map into user space. vm_pager_get_pages() ensures this. 8094221e284SAlan Cox */ 8104221e284SAlan Cox 8114221e284SAlan Cox if (fs.m->valid != VM_PAGE_BITS_ALL) { 8124221e284SAlan Cox vm_page_zero_invalid(fs.m, TRUE); 8134221e284SAlan Cox printf("Warning: page %p partially invalid on fault\n", fs.m); 8144221e284SAlan Cox } 815f919ebdeSDavid Greenman 8164866e085SJohn Dyson pmap_enter(fs.map->pmap, vaddr, VM_PAGE_TO_PHYS(fs.m), prot, wired); 8174221e284SAlan Cox 8182d8acc0fSJohn Dyson if (((fault_flags & VM_FAULT_WIRE_MASK) == 0) && (wired == 0)) { 8194866e085SJohn Dyson pmap_prefault(fs.map->pmap, vaddr, fs.entry); 8202d8acc0fSJohn Dyson } 821df8bae1dSRodney W. Grimes 8224221e284SAlan Cox vm_page_flag_clear(fs.m, PG_ZERO); 823e69763a3SDoug Rabson vm_page_flag_set(fs.m, PG_MAPPED|PG_REFERENCED); 824a04c970aSJohn Dyson if (fault_flags & VM_FAULT_HOLD) 8254866e085SJohn Dyson vm_page_hold(fs.m); 826ff97964aSJohn Dyson 827df8bae1dSRodney W. Grimes /* 8280d94caffSDavid Greenman * If the page is not wired down, then put it where the pageout daemon 8290d94caffSDavid Greenman * can find it. 830df8bae1dSRodney W. Grimes */ 831bc6d84a6SMatthew Dillon 832a04c970aSJohn Dyson if (fault_flags & VM_FAULT_WIRE_MASK) { 833df8bae1dSRodney W. Grimes if (wired) 8344866e085SJohn Dyson vm_page_wire(fs.m); 835df8bae1dSRodney W. Grimes else 83673007561SDavid Greenman vm_page_unwire(fs.m, 1); 8370d94caffSDavid Greenman } else { 8384866e085SJohn Dyson vm_page_activate(fs.m); 83926f9a767SRodney W. Grimes } 84026f9a767SRodney W. Grimes 841a1f6d91cSDavid Greenman if (curproc && (curproc->p_flag & P_INMEM) && curproc->p_stats) { 84226f9a767SRodney W. Grimes if (hardfault) { 84326f9a767SRodney W. Grimes curproc->p_stats->p_ru.ru_majflt++; 84426f9a767SRodney W. Grimes } else { 84526f9a767SRodney W. Grimes curproc->p_stats->p_ru.ru_minflt++; 84626f9a767SRodney W. Grimes } 84726f9a767SRodney W. Grimes } 848df8bae1dSRodney W. Grimes 849df8bae1dSRodney W. Grimes /* 850df8bae1dSRodney W. Grimes * Unlock everything, and return 851df8bae1dSRodney W. Grimes */ 852df8bae1dSRodney W. Grimes 853e69763a3SDoug Rabson vm_page_wakeup(fs.m); 8544866e085SJohn Dyson vm_object_deallocate(fs.first_object); 855df8bae1dSRodney W. Grimes 856df8bae1dSRodney W. Grimes return (KERN_SUCCESS); 857df8bae1dSRodney W. Grimes 858df8bae1dSRodney W. Grimes } 859df8bae1dSRodney W. Grimes 860df8bae1dSRodney W. Grimes /* 861df8bae1dSRodney W. Grimes * vm_fault_wire: 862df8bae1dSRodney W. Grimes * 863df8bae1dSRodney W. Grimes * Wire down a range of virtual addresses in a map. 864df8bae1dSRodney W. Grimes */ 865df8bae1dSRodney W. Grimes int 866df8bae1dSRodney W. Grimes vm_fault_wire(map, start, end) 867df8bae1dSRodney W. Grimes vm_map_t map; 868df8bae1dSRodney W. Grimes vm_offset_t start, end; 869df8bae1dSRodney W. Grimes { 87026f9a767SRodney W. Grimes 871df8bae1dSRodney W. Grimes register vm_offset_t va; 872df8bae1dSRodney W. Grimes register pmap_t pmap; 873df8bae1dSRodney W. Grimes int rv; 874df8bae1dSRodney W. Grimes 875df8bae1dSRodney W. Grimes pmap = vm_map_pmap(map); 876df8bae1dSRodney W. Grimes 877df8bae1dSRodney W. Grimes /* 8780d94caffSDavid Greenman * Inform the physical mapping system that the range of addresses may 8790d94caffSDavid Greenman * not fault, so that page tables and such can be locked down as well. 880df8bae1dSRodney W. Grimes */ 881df8bae1dSRodney W. Grimes 882df8bae1dSRodney W. Grimes pmap_pageable(pmap, start, end, FALSE); 883df8bae1dSRodney W. Grimes 884df8bae1dSRodney W. Grimes /* 8850d94caffSDavid Greenman * We simulate a fault to get the page and enter it in the physical 8860d94caffSDavid Greenman * map. 887df8bae1dSRodney W. Grimes */ 888df8bae1dSRodney W. Grimes 889df8bae1dSRodney W. Grimes for (va = start; va < end; va += PAGE_SIZE) { 8907aaaa4fdSJohn Dyson rv = vm_fault(map, va, VM_PROT_READ|VM_PROT_WRITE, 8917aaaa4fdSJohn Dyson VM_FAULT_CHANGE_WIRING); 8927aaaa4fdSJohn Dyson if (rv) { 8937aaaa4fdSJohn Dyson if (va != start) 8947aaaa4fdSJohn Dyson vm_fault_unwire(map, start, va); 8957aaaa4fdSJohn Dyson return (rv); 8967aaaa4fdSJohn Dyson } 8977aaaa4fdSJohn Dyson } 8987aaaa4fdSJohn Dyson return (KERN_SUCCESS); 8997aaaa4fdSJohn Dyson } 9007aaaa4fdSJohn Dyson 9017aaaa4fdSJohn Dyson /* 9027aaaa4fdSJohn Dyson * vm_fault_user_wire: 9037aaaa4fdSJohn Dyson * 9047aaaa4fdSJohn Dyson * Wire down a range of virtual addresses in a map. This 9057aaaa4fdSJohn Dyson * is for user mode though, so we only ask for read access 9067aaaa4fdSJohn Dyson * on currently read only sections. 9077aaaa4fdSJohn Dyson */ 9087aaaa4fdSJohn Dyson int 9097aaaa4fdSJohn Dyson vm_fault_user_wire(map, start, end) 9107aaaa4fdSJohn Dyson vm_map_t map; 9117aaaa4fdSJohn Dyson vm_offset_t start, end; 9127aaaa4fdSJohn Dyson { 9137aaaa4fdSJohn Dyson 9147aaaa4fdSJohn Dyson register vm_offset_t va; 9157aaaa4fdSJohn Dyson register pmap_t pmap; 9167aaaa4fdSJohn Dyson int rv; 9177aaaa4fdSJohn Dyson 9187aaaa4fdSJohn Dyson pmap = vm_map_pmap(map); 9197aaaa4fdSJohn Dyson 9207aaaa4fdSJohn Dyson /* 9217aaaa4fdSJohn Dyson * Inform the physical mapping system that the range of addresses may 9227aaaa4fdSJohn Dyson * not fault, so that page tables and such can be locked down as well. 9237aaaa4fdSJohn Dyson */ 9242d8acc0fSJohn Dyson 9257aaaa4fdSJohn Dyson pmap_pageable(pmap, start, end, FALSE); 9267aaaa4fdSJohn Dyson 9277aaaa4fdSJohn Dyson /* 9287aaaa4fdSJohn Dyson * We simulate a fault to get the page and enter it in the physical 9297aaaa4fdSJohn Dyson * map. 9307aaaa4fdSJohn Dyson */ 9317aaaa4fdSJohn Dyson for (va = start; va < end; va += PAGE_SIZE) { 9327aaaa4fdSJohn Dyson rv = vm_fault(map, va, VM_PROT_READ, VM_FAULT_USER_WIRE); 933df8bae1dSRodney W. Grimes if (rv) { 934df8bae1dSRodney W. Grimes if (va != start) 935df8bae1dSRodney W. Grimes vm_fault_unwire(map, start, va); 936df8bae1dSRodney W. Grimes return (rv); 937df8bae1dSRodney W. Grimes } 938df8bae1dSRodney W. Grimes } 939df8bae1dSRodney W. Grimes return (KERN_SUCCESS); 940df8bae1dSRodney W. Grimes } 941df8bae1dSRodney W. Grimes 942df8bae1dSRodney W. Grimes 943df8bae1dSRodney W. Grimes /* 944df8bae1dSRodney W. Grimes * vm_fault_unwire: 945df8bae1dSRodney W. Grimes * 946df8bae1dSRodney W. Grimes * Unwire a range of virtual addresses in a map. 947df8bae1dSRodney W. Grimes */ 94826f9a767SRodney W. Grimes void 94926f9a767SRodney W. Grimes vm_fault_unwire(map, start, end) 950df8bae1dSRodney W. Grimes vm_map_t map; 951df8bae1dSRodney W. Grimes vm_offset_t start, end; 952df8bae1dSRodney W. Grimes { 953df8bae1dSRodney W. Grimes 954df8bae1dSRodney W. Grimes register vm_offset_t va, pa; 955df8bae1dSRodney W. Grimes register pmap_t pmap; 956df8bae1dSRodney W. Grimes 957df8bae1dSRodney W. Grimes pmap = vm_map_pmap(map); 958df8bae1dSRodney W. Grimes 959df8bae1dSRodney W. Grimes /* 9600d94caffSDavid Greenman * Since the pages are wired down, we must be able to get their 9610d94caffSDavid Greenman * mappings from the physical map system. 962df8bae1dSRodney W. Grimes */ 963df8bae1dSRodney W. Grimes 964df8bae1dSRodney W. Grimes for (va = start; va < end; va += PAGE_SIZE) { 965df8bae1dSRodney W. Grimes pa = pmap_extract(pmap, va); 966b18bfc3dSJohn Dyson if (pa != (vm_offset_t) 0) { 967df8bae1dSRodney W. Grimes pmap_change_wiring(pmap, va, FALSE); 96873007561SDavid Greenman vm_page_unwire(PHYS_TO_VM_PAGE(pa), 1); 969df8bae1dSRodney W. Grimes } 970b18bfc3dSJohn Dyson } 971df8bae1dSRodney W. Grimes 972df8bae1dSRodney W. Grimes /* 9730d94caffSDavid Greenman * Inform the physical mapping system that the range of addresses may 9740d94caffSDavid Greenman * fault, so that page tables and such may be unwired themselves. 975df8bae1dSRodney W. Grimes */ 976df8bae1dSRodney W. Grimes 977df8bae1dSRodney W. Grimes pmap_pageable(pmap, start, end, TRUE); 978df8bae1dSRodney W. Grimes 979df8bae1dSRodney W. Grimes } 980df8bae1dSRodney W. Grimes 981df8bae1dSRodney W. Grimes /* 982df8bae1dSRodney W. Grimes * Routine: 983df8bae1dSRodney W. Grimes * vm_fault_copy_entry 984df8bae1dSRodney W. Grimes * Function: 985df8bae1dSRodney W. Grimes * Copy all of the pages from a wired-down map entry to another. 986df8bae1dSRodney W. Grimes * 987df8bae1dSRodney W. Grimes * In/out conditions: 988df8bae1dSRodney W. Grimes * The source and destination maps must be locked for write. 989df8bae1dSRodney W. Grimes * The source map entry must be wired down (or be a sharing map 990df8bae1dSRodney W. Grimes * entry corresponding to a main map entry that is wired down). 991df8bae1dSRodney W. Grimes */ 992df8bae1dSRodney W. Grimes 99326f9a767SRodney W. Grimes void 99426f9a767SRodney W. Grimes vm_fault_copy_entry(dst_map, src_map, dst_entry, src_entry) 995df8bae1dSRodney W. Grimes vm_map_t dst_map; 996df8bae1dSRodney W. Grimes vm_map_t src_map; 997df8bae1dSRodney W. Grimes vm_map_entry_t dst_entry; 998df8bae1dSRodney W. Grimes vm_map_entry_t src_entry; 999df8bae1dSRodney W. Grimes { 1000df8bae1dSRodney W. Grimes vm_object_t dst_object; 1001df8bae1dSRodney W. Grimes vm_object_t src_object; 1002a316d390SJohn Dyson vm_ooffset_t dst_offset; 1003a316d390SJohn Dyson vm_ooffset_t src_offset; 1004df8bae1dSRodney W. Grimes vm_prot_t prot; 1005df8bae1dSRodney W. Grimes vm_offset_t vaddr; 1006df8bae1dSRodney W. Grimes vm_page_t dst_m; 1007df8bae1dSRodney W. Grimes vm_page_t src_m; 1008df8bae1dSRodney W. Grimes 1009df8bae1dSRodney W. Grimes #ifdef lint 1010df8bae1dSRodney W. Grimes src_map++; 10110d94caffSDavid Greenman #endif /* lint */ 1012df8bae1dSRodney W. Grimes 1013df8bae1dSRodney W. Grimes src_object = src_entry->object.vm_object; 1014df8bae1dSRodney W. Grimes src_offset = src_entry->offset; 1015df8bae1dSRodney W. Grimes 1016df8bae1dSRodney W. Grimes /* 10170d94caffSDavid Greenman * Create the top-level object for the destination entry. (Doesn't 10180d94caffSDavid Greenman * actually shadow anything - we copy the pages directly.) 1019df8bae1dSRodney W. Grimes */ 102024a1cce3SDavid Greenman dst_object = vm_object_allocate(OBJT_DEFAULT, 1021a316d390SJohn Dyson (vm_size_t) OFF_TO_IDX(dst_entry->end - dst_entry->start)); 1022df8bae1dSRodney W. Grimes 1023df8bae1dSRodney W. Grimes dst_entry->object.vm_object = dst_object; 1024df8bae1dSRodney W. Grimes dst_entry->offset = 0; 1025df8bae1dSRodney W. Grimes 1026df8bae1dSRodney W. Grimes prot = dst_entry->max_protection; 1027df8bae1dSRodney W. Grimes 1028df8bae1dSRodney W. Grimes /* 10290d94caffSDavid Greenman * Loop through all of the pages in the entry's range, copying each 10300d94caffSDavid Greenman * one from the source object (it should be there) to the destination 10310d94caffSDavid Greenman * object. 1032df8bae1dSRodney W. Grimes */ 1033df8bae1dSRodney W. Grimes for (vaddr = dst_entry->start, dst_offset = 0; 1034df8bae1dSRodney W. Grimes vaddr < dst_entry->end; 1035df8bae1dSRodney W. Grimes vaddr += PAGE_SIZE, dst_offset += PAGE_SIZE) { 1036df8bae1dSRodney W. Grimes 1037df8bae1dSRodney W. Grimes /* 1038df8bae1dSRodney W. Grimes * Allocate a page in the destination object 1039df8bae1dSRodney W. Grimes */ 1040df8bae1dSRodney W. Grimes do { 1041a316d390SJohn Dyson dst_m = vm_page_alloc(dst_object, 1042a316d390SJohn Dyson OFF_TO_IDX(dst_offset), VM_ALLOC_NORMAL); 1043df8bae1dSRodney W. Grimes if (dst_m == NULL) { 1044df8bae1dSRodney W. Grimes VM_WAIT; 1045df8bae1dSRodney W. Grimes } 1046df8bae1dSRodney W. Grimes } while (dst_m == NULL); 1047df8bae1dSRodney W. Grimes 1048df8bae1dSRodney W. Grimes /* 1049df8bae1dSRodney W. Grimes * Find the page in the source object, and copy it in. 10500d94caffSDavid Greenman * (Because the source is wired down, the page will be in 10510d94caffSDavid Greenman * memory.) 1052df8bae1dSRodney W. Grimes */ 1053a316d390SJohn Dyson src_m = vm_page_lookup(src_object, 1054a316d390SJohn Dyson OFF_TO_IDX(dst_offset + src_offset)); 1055df8bae1dSRodney W. Grimes if (src_m == NULL) 1056df8bae1dSRodney W. Grimes panic("vm_fault_copy_wired: page missing"); 1057df8bae1dSRodney W. Grimes 1058df8bae1dSRodney W. Grimes vm_page_copy(src_m, dst_m); 1059df8bae1dSRodney W. Grimes 1060df8bae1dSRodney W. Grimes /* 1061df8bae1dSRodney W. Grimes * Enter it in the pmap... 1062df8bae1dSRodney W. Grimes */ 1063df8bae1dSRodney W. Grimes 1064e69763a3SDoug Rabson vm_page_flag_clear(dst_m, PG_ZERO); 1065df8bae1dSRodney W. Grimes pmap_enter(dst_map->pmap, vaddr, VM_PAGE_TO_PHYS(dst_m), 1066df8bae1dSRodney W. Grimes prot, FALSE); 1067e69763a3SDoug Rabson vm_page_flag_set(dst_m, PG_WRITEABLE|PG_MAPPED); 1068df8bae1dSRodney W. Grimes 1069df8bae1dSRodney W. Grimes /* 1070df8bae1dSRodney W. Grimes * Mark it no longer busy, and put it on the active list. 1071df8bae1dSRodney W. Grimes */ 1072df8bae1dSRodney W. Grimes vm_page_activate(dst_m); 1073e69763a3SDoug Rabson vm_page_wakeup(dst_m); 1074df8bae1dSRodney W. Grimes } 1075df8bae1dSRodney W. Grimes } 107626f9a767SRodney W. Grimes 107726f9a767SRodney W. Grimes 107826f9a767SRodney W. Grimes /* 107926f9a767SRodney W. Grimes * This routine checks around the requested page for other pages that 108022ba64e8SJohn Dyson * might be able to be faulted in. This routine brackets the viable 108122ba64e8SJohn Dyson * pages for the pages to be paged in. 108226f9a767SRodney W. Grimes * 108326f9a767SRodney W. Grimes * Inputs: 108422ba64e8SJohn Dyson * m, rbehind, rahead 108526f9a767SRodney W. Grimes * 108626f9a767SRodney W. Grimes * Outputs: 108726f9a767SRodney W. Grimes * marray (array of vm_page_t), reqpage (index of requested page) 108826f9a767SRodney W. Grimes * 108926f9a767SRodney W. Grimes * Return value: 109026f9a767SRodney W. Grimes * number of pages in marray 109126f9a767SRodney W. Grimes */ 1092303b270bSEivind Eklund static int 109322ba64e8SJohn Dyson vm_fault_additional_pages(m, rbehind, rahead, marray, reqpage) 109426f9a767SRodney W. Grimes vm_page_t m; 109526f9a767SRodney W. Grimes int rbehind; 109622ba64e8SJohn Dyson int rahead; 109726f9a767SRodney W. Grimes vm_page_t *marray; 109826f9a767SRodney W. Grimes int *reqpage; 109926f9a767SRodney W. Grimes { 11002d8acc0fSJohn Dyson int i,j; 110126f9a767SRodney W. Grimes vm_object_t object; 1102a316d390SJohn Dyson vm_pindex_t pindex, startpindex, endpindex, tpindex; 110326f9a767SRodney W. Grimes vm_page_t rtm; 1104170db9c6SJohn Dyson int cbehind, cahead; 110526f9a767SRodney W. Grimes 110626f9a767SRodney W. Grimes object = m->object; 1107a316d390SJohn Dyson pindex = m->pindex; 110826f9a767SRodney W. Grimes 110926f9a767SRodney W. Grimes /* 1110f35329acSJohn Dyson * we don't fault-ahead for device pager 1111f35329acSJohn Dyson */ 1112f35329acSJohn Dyson if (object->type == OBJT_DEVICE) { 1113f35329acSJohn Dyson *reqpage = 0; 1114f35329acSJohn Dyson marray[0] = m; 1115f35329acSJohn Dyson return 1; 1116f35329acSJohn Dyson } 1117f35329acSJohn Dyson 1118f35329acSJohn Dyson /* 111926f9a767SRodney W. Grimes * if the requested page is not available, then give up now 112026f9a767SRodney W. Grimes */ 112126f9a767SRodney W. Grimes 11221c7c3c6aSMatthew Dillon if (!vm_pager_has_page(object, pindex, &cbehind, &cahead)) { 112326f9a767SRodney W. Grimes return 0; 11242d8acc0fSJohn Dyson } 112526f9a767SRodney W. Grimes 112622ba64e8SJohn Dyson if ((cbehind == 0) && (cahead == 0)) { 112722ba64e8SJohn Dyson *reqpage = 0; 112822ba64e8SJohn Dyson marray[0] = m; 112922ba64e8SJohn Dyson return 1; 1130170db9c6SJohn Dyson } 113122ba64e8SJohn Dyson 113222ba64e8SJohn Dyson if (rahead > cahead) { 113322ba64e8SJohn Dyson rahead = cahead; 113422ba64e8SJohn Dyson } 113522ba64e8SJohn Dyson 1136170db9c6SJohn Dyson if (rbehind > cbehind) { 1137170db9c6SJohn Dyson rbehind = cbehind; 1138170db9c6SJohn Dyson } 1139170db9c6SJohn Dyson 114026f9a767SRodney W. Grimes /* 114126f9a767SRodney W. Grimes * try to do any readahead that we might have free pages for. 114226f9a767SRodney W. Grimes */ 1143ccbb2f72SJohn Dyson if ((rahead + rbehind) > 114422ba64e8SJohn Dyson ((cnt.v_free_count + cnt.v_cache_count) - cnt.v_free_reserved)) { 1145f919ebdeSDavid Greenman pagedaemon_wakeup(); 114626f9a767SRodney W. Grimes marray[0] = m; 11472d8acc0fSJohn Dyson *reqpage = 0; 114826f9a767SRodney W. Grimes return 1; 114926f9a767SRodney W. Grimes } 115022ba64e8SJohn Dyson 115126f9a767SRodney W. Grimes /* 11522d8acc0fSJohn Dyson * scan backward for the read behind pages -- in memory 115326f9a767SRodney W. Grimes */ 11542d8acc0fSJohn Dyson if (pindex > 0) { 11552d8acc0fSJohn Dyson if (rbehind > pindex) { 1156a316d390SJohn Dyson rbehind = pindex; 11572d8acc0fSJohn Dyson startpindex = 0; 11582d8acc0fSJohn Dyson } else { 1159a316d390SJohn Dyson startpindex = pindex - rbehind; 11602d8acc0fSJohn Dyson } 11612d8acc0fSJohn Dyson 11622d8acc0fSJohn Dyson for ( tpindex = pindex - 1; tpindex >= startpindex; tpindex -= 1) { 1163a316d390SJohn Dyson if (vm_page_lookup( object, tpindex)) { 1164a316d390SJohn Dyson startpindex = tpindex + 1; 116526f9a767SRodney W. Grimes break; 116626f9a767SRodney W. Grimes } 1167a316d390SJohn Dyson if (tpindex == 0) 116826f9a767SRodney W. Grimes break; 1169317205caSDavid Greenman } 117026f9a767SRodney W. Grimes 11712d8acc0fSJohn Dyson for(i = 0, tpindex = startpindex; tpindex < pindex; i++, tpindex++) { 117226f9a767SRodney W. Grimes 11732d8acc0fSJohn Dyson rtm = vm_page_alloc(object, tpindex, VM_ALLOC_NORMAL); 1174ccbb2f72SJohn Dyson if (rtm == NULL) { 1175ccbb2f72SJohn Dyson for (j = 0; j < i; j++) { 11764866e085SJohn Dyson vm_page_free(marray[j]); 117726f9a767SRodney W. Grimes } 117826f9a767SRodney W. Grimes marray[0] = m; 11792d8acc0fSJohn Dyson *reqpage = 0; 118026f9a767SRodney W. Grimes return 1; 118126f9a767SRodney W. Grimes } 1182170db9c6SJohn Dyson 11832d8acc0fSJohn Dyson marray[i] = rtm; 118426f9a767SRodney W. Grimes } 11852d8acc0fSJohn Dyson } else { 11862d8acc0fSJohn Dyson startpindex = 0; 11872d8acc0fSJohn Dyson i = 0; 11882d8acc0fSJohn Dyson } 11892d8acc0fSJohn Dyson 11902d8acc0fSJohn Dyson marray[i] = m; 11912d8acc0fSJohn Dyson /* page offset of the required page */ 11922d8acc0fSJohn Dyson *reqpage = i; 11932d8acc0fSJohn Dyson 11942d8acc0fSJohn Dyson tpindex = pindex + 1; 11952d8acc0fSJohn Dyson i++; 11962d8acc0fSJohn Dyson 11972d8acc0fSJohn Dyson /* 11982d8acc0fSJohn Dyson * scan forward for the read ahead pages 11992d8acc0fSJohn Dyson */ 12002d8acc0fSJohn Dyson endpindex = tpindex + rahead; 12012d8acc0fSJohn Dyson if (endpindex > object->size) 12022d8acc0fSJohn Dyson endpindex = object->size; 12032d8acc0fSJohn Dyson 12042d8acc0fSJohn Dyson for( ; tpindex < endpindex; i++, tpindex++) { 12052d8acc0fSJohn Dyson 12062d8acc0fSJohn Dyson if (vm_page_lookup(object, tpindex)) { 12072d8acc0fSJohn Dyson break; 12082d8acc0fSJohn Dyson } 12092d8acc0fSJohn Dyson 12102d8acc0fSJohn Dyson rtm = vm_page_alloc(object, tpindex, VM_ALLOC_NORMAL); 12112d8acc0fSJohn Dyson if (rtm == NULL) { 12122d8acc0fSJohn Dyson break; 12132d8acc0fSJohn Dyson } 12142d8acc0fSJohn Dyson 12152d8acc0fSJohn Dyson marray[i] = rtm; 12162d8acc0fSJohn Dyson } 12172d8acc0fSJohn Dyson 12182d8acc0fSJohn Dyson /* return number of bytes of pages */ 12192d8acc0fSJohn Dyson return i; 122026f9a767SRodney W. Grimes } 1221