1df8bae1dSRodney W. Grimes /* 2df8bae1dSRodney W. Grimes * Copyright (c) 1991, 1993 3df8bae1dSRodney W. Grimes * The Regents of the University of California. All rights reserved. 426f9a767SRodney W. Grimes * Copyright (c) 1994 John S. Dyson 526f9a767SRodney W. Grimes * All rights reserved. 626f9a767SRodney W. Grimes * Copyright (c) 1994 David Greenman 726f9a767SRodney W. Grimes * All rights reserved. 826f9a767SRodney W. Grimes * 9df8bae1dSRodney W. Grimes * 10df8bae1dSRodney W. Grimes * This code is derived from software contributed to Berkeley by 11df8bae1dSRodney W. Grimes * The Mach Operating System project at Carnegie-Mellon University. 12df8bae1dSRodney W. Grimes * 13df8bae1dSRodney W. Grimes * Redistribution and use in source and binary forms, with or without 14df8bae1dSRodney W. Grimes * modification, are permitted provided that the following conditions 15df8bae1dSRodney W. Grimes * are met: 16df8bae1dSRodney W. Grimes * 1. Redistributions of source code must retain the above copyright 17df8bae1dSRodney W. Grimes * notice, this list of conditions and the following disclaimer. 18df8bae1dSRodney W. Grimes * 2. Redistributions in binary form must reproduce the above copyright 19df8bae1dSRodney W. Grimes * notice, this list of conditions and the following disclaimer in the 20df8bae1dSRodney W. Grimes * documentation and/or other materials provided with the distribution. 21df8bae1dSRodney W. Grimes * 3. All advertising materials mentioning features or use of this software 22df8bae1dSRodney W. Grimes * must display the following acknowledgement: 23df8bae1dSRodney W. Grimes * This product includes software developed by the University of 24df8bae1dSRodney W. Grimes * California, Berkeley and its contributors. 25df8bae1dSRodney W. Grimes * 4. Neither the name of the University nor the names of its contributors 26df8bae1dSRodney W. Grimes * may be used to endorse or promote products derived from this software 27df8bae1dSRodney W. Grimes * without specific prior written permission. 28df8bae1dSRodney W. Grimes * 29df8bae1dSRodney W. Grimes * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 30df8bae1dSRodney W. Grimes * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 31df8bae1dSRodney W. Grimes * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 32df8bae1dSRodney W. Grimes * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 33df8bae1dSRodney W. Grimes * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 34df8bae1dSRodney W. Grimes * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 35df8bae1dSRodney W. Grimes * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 36df8bae1dSRodney W. Grimes * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 37df8bae1dSRodney W. Grimes * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 38df8bae1dSRodney W. Grimes * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 39df8bae1dSRodney W. Grimes * SUCH DAMAGE. 40df8bae1dSRodney W. Grimes * 413c4dd356SDavid Greenman * from: @(#)vm_fault.c 8.4 (Berkeley) 1/12/94 42df8bae1dSRodney W. Grimes * 43df8bae1dSRodney W. Grimes * 44df8bae1dSRodney W. Grimes * Copyright (c) 1987, 1990 Carnegie-Mellon University. 45df8bae1dSRodney W. Grimes * All rights reserved. 46df8bae1dSRodney W. Grimes * 47df8bae1dSRodney W. Grimes * Authors: Avadis Tevanian, Jr., Michael Wayne Young 48df8bae1dSRodney W. Grimes * 49df8bae1dSRodney W. Grimes * Permission to use, copy, modify and distribute this software and 50df8bae1dSRodney W. Grimes * its documentation is hereby granted, provided that both the copyright 51df8bae1dSRodney W. Grimes * notice and this permission notice appear in all copies of the 52df8bae1dSRodney W. Grimes * software, derivative works or modified versions, and any portions 53df8bae1dSRodney W. Grimes * thereof, and that both notices appear in supporting documentation. 54df8bae1dSRodney W. Grimes * 55df8bae1dSRodney W. Grimes * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 56df8bae1dSRodney W. Grimes * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 57df8bae1dSRodney W. Grimes * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 58df8bae1dSRodney W. Grimes * 59df8bae1dSRodney W. Grimes * Carnegie Mellon requests users of this software to return to 60df8bae1dSRodney W. Grimes * 61df8bae1dSRodney W. Grimes * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 62df8bae1dSRodney W. Grimes * School of Computer Science 63df8bae1dSRodney W. Grimes * Carnegie Mellon University 64df8bae1dSRodney W. Grimes * Pittsburgh PA 15213-3890 65df8bae1dSRodney W. Grimes * 66df8bae1dSRodney W. Grimes * any improvements or extensions that they make and grant Carnegie the 67df8bae1dSRodney W. Grimes * rights to redistribute these changes. 683c4dd356SDavid Greenman * 691c7c3c6aSMatthew Dillon * $Id: vm_fault.c,v 1.93 1999/01/10 01:58:28 eivind Exp $ 70df8bae1dSRodney W. Grimes */ 71df8bae1dSRodney W. Grimes 72df8bae1dSRodney W. Grimes /* 73df8bae1dSRodney W. Grimes * Page fault handling module. 74df8bae1dSRodney W. Grimes */ 75df8bae1dSRodney W. Grimes 76df8bae1dSRodney W. Grimes #include <sys/param.h> 77df8bae1dSRodney W. Grimes #include <sys/systm.h> 7826f9a767SRodney W. Grimes #include <sys/proc.h> 7924a1cce3SDavid Greenman #include <sys/vnode.h> 8026f9a767SRodney W. Grimes #include <sys/resourcevar.h> 81efeaf95aSDavid Greenman #include <sys/vmmeter.h> 82df8bae1dSRodney W. Grimes 83df8bae1dSRodney W. Grimes #include <vm/vm.h> 84efeaf95aSDavid Greenman #include <vm/vm_param.h> 85efeaf95aSDavid Greenman #include <vm/vm_prot.h> 86996c772fSJohn Dyson #include <sys/lock.h> 87efeaf95aSDavid Greenman #include <vm/pmap.h> 88efeaf95aSDavid Greenman #include <vm/vm_map.h> 89efeaf95aSDavid Greenman #include <vm/vm_object.h> 90df8bae1dSRodney W. Grimes #include <vm/vm_page.h> 91df8bae1dSRodney W. Grimes #include <vm/vm_pageout.h> 92a83c285cSDavid Greenman #include <vm/vm_kern.h> 9324a1cce3SDavid Greenman #include <vm/vm_pager.h> 9424a1cce3SDavid Greenman #include <vm/vnode_pager.h> 95efeaf95aSDavid Greenman #include <vm/vm_extern.h> 96df8bae1dSRodney W. Grimes 97303b270bSEivind Eklund static int vm_fault_additional_pages __P((vm_page_t, int, 98303b270bSEivind Eklund int, vm_page_t *, int *)); 9926f9a767SRodney W. Grimes 10047221757SJohn Dyson #define VM_FAULT_READ_AHEAD 8 10147221757SJohn Dyson #define VM_FAULT_READ_BEHIND 7 10226f9a767SRodney W. Grimes #define VM_FAULT_READ (VM_FAULT_READ_AHEAD+VM_FAULT_READ_BEHIND+1) 10326f9a767SRodney W. Grimes 1044866e085SJohn Dyson struct faultstate { 1054866e085SJohn Dyson vm_page_t m; 1064866e085SJohn Dyson vm_object_t object; 1074866e085SJohn Dyson vm_pindex_t pindex; 1084866e085SJohn Dyson vm_page_t first_m; 1094866e085SJohn Dyson vm_object_t first_object; 1104866e085SJohn Dyson vm_pindex_t first_pindex; 1114866e085SJohn Dyson vm_map_t map; 1124866e085SJohn Dyson vm_map_entry_t entry; 1134866e085SJohn Dyson int lookup_still_valid; 1144866e085SJohn Dyson struct vnode *vp; 1154866e085SJohn Dyson }; 1164866e085SJohn Dyson 1171c7c3c6aSMatthew Dillon static __inline void 1184866e085SJohn Dyson release_page(struct faultstate *fs) 1194866e085SJohn Dyson { 120e69763a3SDoug Rabson vm_page_wakeup(fs->m); 1214866e085SJohn Dyson vm_page_deactivate(fs->m); 1224866e085SJohn Dyson fs->m = NULL; 1234866e085SJohn Dyson } 1244866e085SJohn Dyson 1251c7c3c6aSMatthew Dillon static __inline void 1264866e085SJohn Dyson unlock_map(struct faultstate *fs) 1274866e085SJohn Dyson { 1284866e085SJohn Dyson if (fs->lookup_still_valid) { 1294866e085SJohn Dyson vm_map_lookup_done(fs->map, fs->entry); 1304866e085SJohn Dyson fs->lookup_still_valid = FALSE; 1314866e085SJohn Dyson } 1324866e085SJohn Dyson } 1334866e085SJohn Dyson 1344866e085SJohn Dyson static void 1354866e085SJohn Dyson _unlock_things(struct faultstate *fs, int dealloc) 1364866e085SJohn Dyson { 1374866e085SJohn Dyson vm_object_pip_wakeup(fs->object); 1384866e085SJohn Dyson if (fs->object != fs->first_object) { 1394866e085SJohn Dyson vm_page_free(fs->first_m); 1404866e085SJohn Dyson vm_object_pip_wakeup(fs->first_object); 1414866e085SJohn Dyson fs->first_m = NULL; 1424866e085SJohn Dyson } 1434866e085SJohn Dyson if (dealloc) { 1444866e085SJohn Dyson vm_object_deallocate(fs->first_object); 1454866e085SJohn Dyson } 1464866e085SJohn Dyson unlock_map(fs); 1474866e085SJohn Dyson if (fs->vp != NULL) { 1484866e085SJohn Dyson vput(fs->vp); 1494866e085SJohn Dyson fs->vp = NULL; 1504866e085SJohn Dyson } 1514866e085SJohn Dyson } 1524866e085SJohn Dyson 1534866e085SJohn Dyson #define unlock_things(fs) _unlock_things(fs, 0) 1544866e085SJohn Dyson #define unlock_and_deallocate(fs) _unlock_things(fs, 1) 1554866e085SJohn Dyson 156df8bae1dSRodney W. Grimes /* 157df8bae1dSRodney W. Grimes * vm_fault: 158df8bae1dSRodney W. Grimes * 159df8bae1dSRodney W. Grimes * Handle a page fault occuring at the given address, 160df8bae1dSRodney W. Grimes * requiring the given permissions, in the map specified. 161df8bae1dSRodney W. Grimes * If successful, the page is inserted into the 162df8bae1dSRodney W. Grimes * associated physical map. 163df8bae1dSRodney W. Grimes * 164df8bae1dSRodney W. Grimes * NOTE: the given address should be truncated to the 165df8bae1dSRodney W. Grimes * proper page address. 166df8bae1dSRodney W. Grimes * 167df8bae1dSRodney W. Grimes * KERN_SUCCESS is returned if the page fault is handled; otherwise, 168df8bae1dSRodney W. Grimes * a standard error specifying why the fault is fatal is returned. 169df8bae1dSRodney W. Grimes * 170df8bae1dSRodney W. Grimes * 171df8bae1dSRodney W. Grimes * The map in question must be referenced, and remains so. 172df8bae1dSRodney W. Grimes * Caller may hold no locks. 173df8bae1dSRodney W. Grimes */ 174df8bae1dSRodney W. Grimes int 175b9dcd593SBruce Evans vm_fault(vm_map_t map, vm_offset_t vaddr, vm_prot_t fault_type, int fault_flags) 176df8bae1dSRodney W. Grimes { 177df8bae1dSRodney W. Grimes vm_prot_t prot; 178df8bae1dSRodney W. Grimes int result; 179df8bae1dSRodney W. Grimes boolean_t wired; 1802d8acc0fSJohn Dyson int map_generation; 181df8bae1dSRodney W. Grimes vm_page_t old_m; 182df8bae1dSRodney W. Grimes vm_object_t next_object; 18326f9a767SRodney W. Grimes vm_page_t marray[VM_FAULT_READ]; 1844866e085SJohn Dyson int hardfault; 1852d8acc0fSJohn Dyson int faultcount; 1864866e085SJohn Dyson struct faultstate fs; 187df8bae1dSRodney W. Grimes 188b8d95f16SDavid Greenman cnt.v_vm_faults++; /* needs lock XXX */ 1894866e085SJohn Dyson hardfault = 0; 190df8bae1dSRodney W. Grimes 191df8bae1dSRodney W. Grimes RetryFault:; 1924866e085SJohn Dyson fs.map = map; 193df8bae1dSRodney W. Grimes 194df8bae1dSRodney W. Grimes /* 1950d94caffSDavid Greenman * Find the backing store object and offset into it to begin the 1960d94caffSDavid Greenman * search. 197df8bae1dSRodney W. Grimes */ 1984866e085SJohn Dyson if ((result = vm_map_lookup(&fs.map, vaddr, 1994866e085SJohn Dyson fault_type, &fs.entry, &fs.first_object, 2004866e085SJohn Dyson &fs.first_pindex, &prot, &wired)) != KERN_SUCCESS) { 20147221757SJohn Dyson if ((result != KERN_PROTECTION_FAILURE) || 20247221757SJohn Dyson ((fault_flags & VM_FAULT_WIRE_MASK) != VM_FAULT_USER_WIRE)) { 20347221757SJohn Dyson return result; 20409e0c6ccSJohn Dyson } 20509e0c6ccSJohn Dyson 2067aaaa4fdSJohn Dyson /* 2077aaaa4fdSJohn Dyson * If we are user-wiring a r/w segment, and it is COW, then 2087aaaa4fdSJohn Dyson * we need to do the COW operation. Note that we don't COW 209595236dfSJohn Dyson * currently RO sections now, because it is NOT desirable 2107aaaa4fdSJohn Dyson * to COW .text. We simply keep .text from ever being COW'ed 2117aaaa4fdSJohn Dyson * and take the heat that one cannot debug wired .text sections. 2127aaaa4fdSJohn Dyson */ 2134866e085SJohn Dyson result = vm_map_lookup(&fs.map, vaddr, 21447221757SJohn Dyson VM_PROT_READ|VM_PROT_WRITE|VM_PROT_OVERRIDE_WRITE, 2154866e085SJohn Dyson &fs.entry, &fs.first_object, &fs.first_pindex, &prot, &wired); 21647221757SJohn Dyson if (result != KERN_SUCCESS) { 21747221757SJohn Dyson return result; 21847221757SJohn Dyson } 21947221757SJohn Dyson 2207aaaa4fdSJohn Dyson /* 2217aaaa4fdSJohn Dyson * If we don't COW now, on a user wire, the user will never 2227aaaa4fdSJohn Dyson * be able to write to the mapping. If we don't make this 2237aaaa4fdSJohn Dyson * restriction, the bookkeeping would be nearly impossible. 2247aaaa4fdSJohn Dyson */ 2254866e085SJohn Dyson if ((fs.entry->protection & VM_PROT_WRITE) == 0) 2264866e085SJohn Dyson fs.entry->max_protection &= ~VM_PROT_WRITE; 2277aaaa4fdSJohn Dyson } 22847221757SJohn Dyson 2294866e085SJohn Dyson map_generation = fs.map->timestamp; 2302d8acc0fSJohn Dyson 2314866e085SJohn Dyson if (fs.entry->eflags & MAP_ENTRY_NOFAULT) { 23247221757SJohn Dyson panic("vm_fault: fault on nofault entry, addr: %lx", 23392c4c4ebSBruce Evans (u_long)vaddr); 2347aaaa4fdSJohn Dyson } 2357aaaa4fdSJohn Dyson 23695e5e988SJohn Dyson /* 23795e5e988SJohn Dyson * Make a reference to this object to prevent its disposal while we 23895e5e988SJohn Dyson * are messing with it. Once we have the reference, the map is free 23995e5e988SJohn Dyson * to be diddled. Since objects reference their shadows (and copies), 24095e5e988SJohn Dyson * they will stay around as well. 24195e5e988SJohn Dyson */ 2424866e085SJohn Dyson vm_object_reference(fs.first_object); 243d474eaaaSDoug Rabson vm_object_pip_add(fs.first_object, 1); 24495e5e988SJohn Dyson 2454866e085SJohn Dyson fs.vp = vnode_pager_lock(fs.first_object); 2461efb74fbSJohn Dyson if ((fault_type & VM_PROT_WRITE) && 2474866e085SJohn Dyson (fs.first_object->type == OBJT_VNODE)) { 2484866e085SJohn Dyson vm_freeze_copyopts(fs.first_object, 2494866e085SJohn Dyson fs.first_pindex, fs.first_pindex + 1); 2501efb74fbSJohn Dyson } 251f6b04d2bSDavid Greenman 2524866e085SJohn Dyson fs.lookup_still_valid = TRUE; 253df8bae1dSRodney W. Grimes 254df8bae1dSRodney W. Grimes if (wired) 255df8bae1dSRodney W. Grimes fault_type = prot; 256df8bae1dSRodney W. Grimes 2574866e085SJohn Dyson fs.first_m = NULL; 258df8bae1dSRodney W. Grimes 259df8bae1dSRodney W. Grimes /* 260df8bae1dSRodney W. Grimes * Search for the page at object/offset. 261df8bae1dSRodney W. Grimes */ 262df8bae1dSRodney W. Grimes 2634866e085SJohn Dyson fs.object = fs.first_object; 2644866e085SJohn Dyson fs.pindex = fs.first_pindex; 265df8bae1dSRodney W. Grimes 266df8bae1dSRodney W. Grimes while (TRUE) { 2671c7c3c6aSMatthew Dillon /* 2681c7c3c6aSMatthew Dillon * If the object is dead, we stop here 2691c7c3c6aSMatthew Dillon */ 27047221757SJohn Dyson 2714866e085SJohn Dyson if (fs.object->flags & OBJ_DEAD) { 2724866e085SJohn Dyson unlock_and_deallocate(&fs); 27347221757SJohn Dyson return (KERN_PROTECTION_FAILURE); 27447221757SJohn Dyson } 27547221757SJohn Dyson 2761c7c3c6aSMatthew Dillon /* 2771c7c3c6aSMatthew Dillon * See if page is resident 2781c7c3c6aSMatthew Dillon */ 2791c7c3c6aSMatthew Dillon 2804866e085SJohn Dyson fs.m = vm_page_lookup(fs.object, fs.pindex); 2814866e085SJohn Dyson if (fs.m != NULL) { 282c699f45eSDavid Greenman int queue, s; 283df8bae1dSRodney W. Grimes /* 2841c7c3c6aSMatthew Dillon * Wait/Retry if the page is busy. We have to do this 2851c7c3c6aSMatthew Dillon * if the page is busy via either PG_BUSY or 2861c7c3c6aSMatthew Dillon * vm_page_t->busy because the vm_pager may be using 2871c7c3c6aSMatthew Dillon * vm_page_t->busy for pageouts ( and even pageins if 2881c7c3c6aSMatthew Dillon * it is the vnode pager ), and we could end up trying 2891c7c3c6aSMatthew Dillon * to pagein and pageout the same page simultaniously. 2901c7c3c6aSMatthew Dillon * 2911c7c3c6aSMatthew Dillon * We can theoretically allow the busy case on a read 2921c7c3c6aSMatthew Dillon * fault if the page is marked valid, but since such 2931c7c3c6aSMatthew Dillon * pages are typically already pmap'd, putting that 2941c7c3c6aSMatthew Dillon * special case in might be more effort then it is 2951c7c3c6aSMatthew Dillon * worth. We cannot under any circumstances mess 2961c7c3c6aSMatthew Dillon * around with a vm_page_t->busy page except, perhaps, 2971c7c3c6aSMatthew Dillon * to pmap it. 298df8bae1dSRodney W. Grimes */ 2991c7c3c6aSMatthew Dillon if ((fs.m->flags & PG_BUSY) || fs.m->busy) { 3004866e085SJohn Dyson unlock_things(&fs); 3011c7c3c6aSMatthew Dillon (void)vm_page_sleep_busy(fs.m, TRUE, "vmpfw"); 302976e77fcSDavid Greenman cnt.v_intrans++; 3034866e085SJohn Dyson vm_object_deallocate(fs.first_object); 304df8bae1dSRodney W. Grimes goto RetryFault; 305df8bae1dSRodney W. Grimes } 306f6b04d2bSDavid Greenman 3074866e085SJohn Dyson queue = fs.m->queue; 308c699f45eSDavid Greenman s = splvm(); 3094866e085SJohn Dyson vm_page_unqueue_nowakeup(fs.m); 310c699f45eSDavid Greenman splx(s); 311c82b0181SJohn Dyson 3121c7c3c6aSMatthew Dillon #if 0 313df8bae1dSRodney W. Grimes /* 3141c7c3c6aSMatthew Dillon * Code removed. In a low-memory situation (say, a 3151c7c3c6aSMatthew Dillon * memory-bound program is running), the last thing you 3161c7c3c6aSMatthew Dillon * do is starve reactivations for other processes. 3171c7c3c6aSMatthew Dillon * XXX we need to find a better way. 318df8bae1dSRodney W. Grimes */ 3194866e085SJohn Dyson if (((queue - fs.m->pc) == PQ_CACHE) && 320886d3e11SJohn Dyson (cnt.v_free_count + cnt.v_cache_count) < cnt.v_free_min) { 3214866e085SJohn Dyson vm_page_activate(fs.m); 3224866e085SJohn Dyson unlock_and_deallocate(&fs); 32322ba64e8SJohn Dyson VM_WAIT; 32422ba64e8SJohn Dyson goto RetryFault; 32522ba64e8SJohn Dyson } 3261c7c3c6aSMatthew Dillon #endif 3271c7c3c6aSMatthew Dillon /* 3281c7c3c6aSMatthew Dillon * Mark page busy for other processes, and the 3291c7c3c6aSMatthew Dillon * pagedaemon. If it still isn't completely valid 3301c7c3c6aSMatthew Dillon * (readable), jump to readrest, else break-out ( we 3311c7c3c6aSMatthew Dillon * found the page ). 3321c7c3c6aSMatthew Dillon */ 33322ba64e8SJohn Dyson 334e69763a3SDoug Rabson vm_page_busy(fs.m); 3354866e085SJohn Dyson if (((fs.m->valid & VM_PAGE_BITS_ALL) != VM_PAGE_BITS_ALL) && 3364866e085SJohn Dyson fs.m->object != kernel_object && fs.m->object != kmem_object) { 3370d94caffSDavid Greenman goto readrest; 3380d94caffSDavid Greenman } 339ffc82b0aSJohn Dyson 340df8bae1dSRodney W. Grimes break; 341df8bae1dSRodney W. Grimes } 3421c7c3c6aSMatthew Dillon 3431c7c3c6aSMatthew Dillon /* 3441c7c3c6aSMatthew Dillon * Page is not resident, If this is the search termination, 3451c7c3c6aSMatthew Dillon * allocate a new page. 3461c7c3c6aSMatthew Dillon */ 3471c7c3c6aSMatthew Dillon 3484866e085SJohn Dyson if (((fs.object->type != OBJT_DEFAULT) && 34947221757SJohn Dyson (((fault_flags & VM_FAULT_WIRE_MASK) == 0) || wired)) 3504866e085SJohn Dyson || (fs.object == fs.first_object)) { 351df8bae1dSRodney W. Grimes 3524866e085SJohn Dyson if (fs.pindex >= fs.object->size) { 3534866e085SJohn Dyson unlock_and_deallocate(&fs); 3545f55e841SDavid Greenman return (KERN_PROTECTION_FAILURE); 3555f55e841SDavid Greenman } 35622ba64e8SJohn Dyson 357df8bae1dSRodney W. Grimes /* 3580d94caffSDavid Greenman * Allocate a new page for this object/offset pair. 359df8bae1dSRodney W. Grimes */ 3604866e085SJohn Dyson fs.m = vm_page_alloc(fs.object, fs.pindex, 3614866e085SJohn Dyson (fs.vp || fs.object->backing_object)? VM_ALLOC_NORMAL: VM_ALLOC_ZERO); 362df8bae1dSRodney W. Grimes 3634866e085SJohn Dyson if (fs.m == NULL) { 3644866e085SJohn Dyson unlock_and_deallocate(&fs); 365df8bae1dSRodney W. Grimes VM_WAIT; 366df8bae1dSRodney W. Grimes goto RetryFault; 367df8bae1dSRodney W. Grimes } 368df8bae1dSRodney W. Grimes } 36947221757SJohn Dyson 3700d94caffSDavid Greenman readrest: 3711c7c3c6aSMatthew Dillon /* 3721c7c3c6aSMatthew Dillon * Have page, but it may not be entirely valid ( or valid at 3731c7c3c6aSMatthew Dillon * all ). If this object is not the default, try to fault-in 3741c7c3c6aSMatthew Dillon * the page as well as activate additional pages when 3751c7c3c6aSMatthew Dillon * appropriate, and page-in additional pages when appropriate. 3761c7c3c6aSMatthew Dillon */ 3771c7c3c6aSMatthew Dillon 3784866e085SJohn Dyson if (fs.object->type != OBJT_DEFAULT && 37947221757SJohn Dyson (((fault_flags & VM_FAULT_WIRE_MASK) == 0) || wired)) { 380df8bae1dSRodney W. Grimes int rv; 38126f9a767SRodney W. Grimes int reqpage; 382867a482dSJohn Dyson int ahead, behind; 383867a482dSJohn Dyson 3844866e085SJohn Dyson if (fs.first_object->behavior == OBJ_RANDOM) { 385867a482dSJohn Dyson ahead = 0; 386867a482dSJohn Dyson behind = 0; 3872d8acc0fSJohn Dyson } else { 3884866e085SJohn Dyson behind = (vaddr - fs.entry->start) >> PAGE_SHIFT; 3892d8acc0fSJohn Dyson if (behind > VM_FAULT_READ_BEHIND) 3902d8acc0fSJohn Dyson behind = VM_FAULT_READ_BEHIND; 3912d8acc0fSJohn Dyson 3924866e085SJohn Dyson ahead = ((fs.entry->end - vaddr) >> PAGE_SHIFT) - 1; 3932d8acc0fSJohn Dyson if (ahead > VM_FAULT_READ_AHEAD) 3942d8acc0fSJohn Dyson ahead = VM_FAULT_READ_AHEAD; 395867a482dSJohn Dyson } 396867a482dSJohn Dyson 3974866e085SJohn Dyson if ((fs.first_object->type != OBJT_DEVICE) && 3984866e085SJohn Dyson (fs.first_object->behavior == OBJ_SEQUENTIAL)) { 399867a482dSJohn Dyson vm_pindex_t firstpindex, tmppindex; 4004866e085SJohn Dyson if (fs.first_pindex < 401867a482dSJohn Dyson 2*(VM_FAULT_READ_BEHIND + VM_FAULT_READ_AHEAD + 1)) 402867a482dSJohn Dyson firstpindex = 0; 403867a482dSJohn Dyson else 4044866e085SJohn Dyson firstpindex = fs.first_pindex - 405867a482dSJohn Dyson 2*(VM_FAULT_READ_BEHIND + VM_FAULT_READ_AHEAD + 1); 406867a482dSJohn Dyson 4074866e085SJohn Dyson for(tmppindex = fs.first_pindex - 1; 4082100d645SPeter Wemm tmppindex >= firstpindex; 409867a482dSJohn Dyson --tmppindex) { 410867a482dSJohn Dyson vm_page_t mt; 4114866e085SJohn Dyson mt = vm_page_lookup( fs.first_object, tmppindex); 412867a482dSJohn Dyson if (mt == NULL || (mt->valid != VM_PAGE_BITS_ALL)) 413867a482dSJohn Dyson break; 414ff97964aSJohn Dyson if (mt->busy || 415ff97964aSJohn Dyson (mt->flags & (PG_BUSY | PG_FICTITIOUS)) || 416ff97964aSJohn Dyson mt->hold_count || 417867a482dSJohn Dyson mt->wire_count) 418867a482dSJohn Dyson continue; 419867a482dSJohn Dyson if (mt->dirty == 0) 420867a482dSJohn Dyson vm_page_test_dirty(mt); 421867a482dSJohn Dyson if (mt->dirty) { 422867a482dSJohn Dyson vm_page_protect(mt, VM_PROT_NONE); 423867a482dSJohn Dyson vm_page_deactivate(mt); 424867a482dSJohn Dyson } else { 425867a482dSJohn Dyson vm_page_cache(mt); 426867a482dSJohn Dyson } 427867a482dSJohn Dyson } 428867a482dSJohn Dyson 429867a482dSJohn Dyson ahead += behind; 430867a482dSJohn Dyson behind = 0; 431867a482dSJohn Dyson } 432df8bae1dSRodney W. Grimes 433df8bae1dSRodney W. Grimes /* 4340d94caffSDavid Greenman * now we find out if any other pages should be paged 4350d94caffSDavid Greenman * in at this time this routine checks to see if the 4360d94caffSDavid Greenman * pages surrounding this fault reside in the same 4370d94caffSDavid Greenman * object as the page for this fault. If they do, 4380d94caffSDavid Greenman * then they are faulted in also into the object. The 4390d94caffSDavid Greenman * array "marray" returned contains an array of 4400d94caffSDavid Greenman * vm_page_t structs where one of them is the 4410d94caffSDavid Greenman * vm_page_t passed to the routine. The reqpage 4420d94caffSDavid Greenman * return value is the index into the marray for the 4430d94caffSDavid Greenman * vm_page_t passed to the routine. 4441c7c3c6aSMatthew Dillon * 4451c7c3c6aSMatthew Dillon * fs.m plus the additional pages are PG_BUSY'd. 44626f9a767SRodney W. Grimes */ 44705f0fdd2SPoul-Henning Kamp faultcount = vm_fault_additional_pages( 4484866e085SJohn Dyson fs.m, behind, ahead, marray, &reqpage); 449df8bae1dSRodney W. Grimes 450df8bae1dSRodney W. Grimes /* 4510d94caffSDavid Greenman * Call the pager to retrieve the data, if any, after 4521c7c3c6aSMatthew Dillon * releasing the lock on the map. We hold a ref on 4531c7c3c6aSMatthew Dillon * fs.object and the pages are PG_BUSY'd. 454df8bae1dSRodney W. Grimes */ 4554866e085SJohn Dyson unlock_map(&fs); 456df8bae1dSRodney W. Grimes 45726f9a767SRodney W. Grimes rv = faultcount ? 4584866e085SJohn Dyson vm_pager_get_pages(fs.object, marray, faultcount, 45924a1cce3SDavid Greenman reqpage) : VM_PAGER_FAIL; 46022ba64e8SJohn Dyson 46126f9a767SRodney W. Grimes if (rv == VM_PAGER_OK) { 462df8bae1dSRodney W. Grimes /* 463f230c45cSJohn Dyson * Found the page. Leave it busy while we play 464f230c45cSJohn Dyson * with it. 465f230c45cSJohn Dyson */ 466f230c45cSJohn Dyson 467f230c45cSJohn Dyson /* 4680d94caffSDavid Greenman * Relookup in case pager changed page. Pager 4690d94caffSDavid Greenman * is responsible for disposition of old page 4700d94caffSDavid Greenman * if moved. 471df8bae1dSRodney W. Grimes */ 4724866e085SJohn Dyson fs.m = vm_page_lookup(fs.object, fs.pindex); 4734866e085SJohn Dyson if(!fs.m) { 4744866e085SJohn Dyson unlock_and_deallocate(&fs); 475f6b04d2bSDavid Greenman goto RetryFault; 476f6b04d2bSDavid Greenman } 477f6b04d2bSDavid Greenman 47826f9a767SRodney W. Grimes hardfault++; 4791c7c3c6aSMatthew Dillon break; /* break to PAGE HAS BEEN FOUND */ 480df8bae1dSRodney W. Grimes } 481df8bae1dSRodney W. Grimes /* 4820d94caffSDavid Greenman * Remove the bogus page (which does not exist at this 4830d94caffSDavid Greenman * object/offset); before doing so, we must get back 4840d94caffSDavid Greenman * our object lock to preserve our invariant. 485df8bae1dSRodney W. Grimes * 48624a1cce3SDavid Greenman * Also wake up any other process that may want to bring 4870d94caffSDavid Greenman * in this page. 488df8bae1dSRodney W. Grimes * 4890d94caffSDavid Greenman * If this is the top-level object, we must leave the 49024a1cce3SDavid Greenman * busy page to prevent another process from rushing 4910d94caffSDavid Greenman * past us, and inserting the page in that object at 4920d94caffSDavid Greenman * the same time that we are. 493df8bae1dSRodney W. Grimes */ 49426f9a767SRodney W. Grimes 495a83c285cSDavid Greenman if (rv == VM_PAGER_ERROR) 496f3679e35SDavid Greenman printf("vm_fault: pager read error, pid %d (%s)\n", 497f3679e35SDavid Greenman curproc->p_pid, curproc->p_comm); 49826f9a767SRodney W. Grimes /* 499a83c285cSDavid Greenman * Data outside the range of the pager or an I/O error 50026f9a767SRodney W. Grimes */ 501a83c285cSDavid Greenman /* 5020d94caffSDavid Greenman * XXX - the check for kernel_map is a kludge to work 5030d94caffSDavid Greenman * around having the machine panic on a kernel space 5040d94caffSDavid Greenman * fault w/ I/O error. 505a83c285cSDavid Greenman */ 5064866e085SJohn Dyson if (((fs.map != kernel_map) && (rv == VM_PAGER_ERROR)) || 50747221757SJohn Dyson (rv == VM_PAGER_BAD)) { 5084866e085SJohn Dyson vm_page_free(fs.m); 5094866e085SJohn Dyson fs.m = NULL; 5104866e085SJohn Dyson unlock_and_deallocate(&fs); 511a83c285cSDavid Greenman return ((rv == VM_PAGER_ERROR) ? KERN_FAILURE : KERN_PROTECTION_FAILURE); 51226f9a767SRodney W. Grimes } 5134866e085SJohn Dyson if (fs.object != fs.first_object) { 5144866e085SJohn Dyson vm_page_free(fs.m); 5154866e085SJohn Dyson fs.m = NULL; 51626f9a767SRodney W. Grimes /* 51726f9a767SRodney W. Grimes * XXX - we cannot just fall out at this 51826f9a767SRodney W. Grimes * point, m has been freed and is invalid! 51926f9a767SRodney W. Grimes */ 520df8bae1dSRodney W. Grimes } 521df8bae1dSRodney W. Grimes } 522df8bae1dSRodney W. Grimes /* 5231c7c3c6aSMatthew Dillon * We get here if the object has default pager (or unwiring) 5241c7c3c6aSMatthew Dillon * or the pager doesn't have the page. 525df8bae1dSRodney W. Grimes */ 5264866e085SJohn Dyson if (fs.object == fs.first_object) 5274866e085SJohn Dyson fs.first_m = fs.m; 528df8bae1dSRodney W. Grimes 529df8bae1dSRodney W. Grimes /* 5300d94caffSDavid Greenman * Move on to the next object. Lock the next object before 5310d94caffSDavid Greenman * unlocking the current one. 532df8bae1dSRodney W. Grimes */ 533df8bae1dSRodney W. Grimes 5344866e085SJohn Dyson fs.pindex += OFF_TO_IDX(fs.object->backing_object_offset); 5354866e085SJohn Dyson next_object = fs.object->backing_object; 536df8bae1dSRodney W. Grimes if (next_object == NULL) { 537df8bae1dSRodney W. Grimes /* 5380d94caffSDavid Greenman * If there's no object left, fill the page in the top 5390d94caffSDavid Greenman * object with zeros. 540df8bae1dSRodney W. Grimes */ 5414866e085SJohn Dyson if (fs.object != fs.first_object) { 5424866e085SJohn Dyson vm_object_pip_wakeup(fs.object); 543df8bae1dSRodney W. Grimes 5444866e085SJohn Dyson fs.object = fs.first_object; 5454866e085SJohn Dyson fs.pindex = fs.first_pindex; 5464866e085SJohn Dyson fs.m = fs.first_m; 547df8bae1dSRodney W. Grimes } 5484866e085SJohn Dyson fs.first_m = NULL; 549df8bae1dSRodney W. Grimes 5504866e085SJohn Dyson if ((fs.m->flags & PG_ZERO) == 0) { 5514866e085SJohn Dyson vm_page_zero_fill(fs.m); 5524866e085SJohn Dyson cnt.v_ozfod++; 5534866e085SJohn Dyson } 554df8bae1dSRodney W. Grimes cnt.v_zfod++; 5551c7c3c6aSMatthew Dillon break; /* break to PAGE HAS BEEN FOUND */ 5560d94caffSDavid Greenman } else { 5574866e085SJohn Dyson if (fs.object != fs.first_object) { 5584866e085SJohn Dyson vm_object_pip_wakeup(fs.object); 559c0503609SDavid Greenman } 5601c7c3c6aSMatthew Dillon KASSERT(fs.object != next_object, ("object loop %p", next_object)); 5614866e085SJohn Dyson fs.object = next_object; 562d474eaaaSDoug Rabson vm_object_pip_add(fs.object, 1); 563df8bae1dSRodney W. Grimes } 564df8bae1dSRodney W. Grimes } 5651c7c3c6aSMatthew Dillon 5665526d2d9SEivind Eklund KASSERT((fs.m->flags & PG_BUSY) != 0, 5675526d2d9SEivind Eklund ("vm_fault: not busy after main loop")); 568df8bae1dSRodney W. Grimes 569df8bae1dSRodney W. Grimes /* 5700d94caffSDavid Greenman * PAGE HAS BEEN FOUND. [Loop invariant still holds -- the object lock 571df8bae1dSRodney W. Grimes * is held.] 572df8bae1dSRodney W. Grimes */ 573df8bae1dSRodney W. Grimes 5744866e085SJohn Dyson old_m = fs.m; /* save page that would be copied */ 575df8bae1dSRodney W. Grimes 576df8bae1dSRodney W. Grimes /* 5770d94caffSDavid Greenman * If the page is being written, but isn't already owned by the 5780d94caffSDavid Greenman * top-level object, we have to copy it into a new page owned by the 5790d94caffSDavid Greenman * top-level object. 580df8bae1dSRodney W. Grimes */ 581df8bae1dSRodney W. Grimes 5824866e085SJohn Dyson if (fs.object != fs.first_object) { 583df8bae1dSRodney W. Grimes /* 5840d94caffSDavid Greenman * We only really need to copy if we want to write it. 585df8bae1dSRodney W. Grimes */ 586df8bae1dSRodney W. Grimes 587df8bae1dSRodney W. Grimes if (fault_type & VM_PROT_WRITE) { 588df8bae1dSRodney W. Grimes /* 5891c7c3c6aSMatthew Dillon * This allows pages to be virtually copied from a 5901c7c3c6aSMatthew Dillon * backing_object into the first_object, where the 5911c7c3c6aSMatthew Dillon * backing object has no other refs to it, and cannot 5921c7c3c6aSMatthew Dillon * gain any more refs. Instead of a bcopy, we just 5931c7c3c6aSMatthew Dillon * move the page from the backing object to the 5941c7c3c6aSMatthew Dillon * first object. Note that we must mark the page 5951c7c3c6aSMatthew Dillon * dirty in the first object so that it will go out 5961c7c3c6aSMatthew Dillon * to swap when needed. 597df8bae1dSRodney W. Grimes */ 5984866e085SJohn Dyson if (map_generation == fs.map->timestamp && 599de5f6a77SJohn Dyson /* 600de5f6a77SJohn Dyson * Only one shadow object 601de5f6a77SJohn Dyson */ 6024866e085SJohn Dyson (fs.object->shadow_count == 1) && 603de5f6a77SJohn Dyson /* 604de5f6a77SJohn Dyson * No COW refs, except us 605de5f6a77SJohn Dyson */ 6064866e085SJohn Dyson (fs.object->ref_count == 1) && 607de5f6a77SJohn Dyson /* 608de5f6a77SJohn Dyson * Noone else can look this object up 609de5f6a77SJohn Dyson */ 6104866e085SJohn Dyson (fs.object->handle == NULL) && 611de5f6a77SJohn Dyson /* 612de5f6a77SJohn Dyson * No other ways to look the object up 613de5f6a77SJohn Dyson */ 6144866e085SJohn Dyson ((fs.object->type == OBJT_DEFAULT) || 6154866e085SJohn Dyson (fs.object->type == OBJT_SWAP)) && 616de5f6a77SJohn Dyson /* 617de5f6a77SJohn Dyson * We don't chase down the shadow chain 618de5f6a77SJohn Dyson */ 6194866e085SJohn Dyson (fs.object == fs.first_object->backing_object) && 620df8bae1dSRodney W. Grimes 621df8bae1dSRodney W. Grimes /* 6222d8acc0fSJohn Dyson * grab the lock if we need to 6232d8acc0fSJohn Dyson */ 6244866e085SJohn Dyson (fs.lookup_still_valid || 6254866e085SJohn Dyson (((fs.entry->eflags & MAP_ENTRY_IS_A_MAP) == 0) && 6264866e085SJohn Dyson lockmgr(&fs.map->lock, 6272d8acc0fSJohn Dyson LK_EXCLUSIVE|LK_NOWAIT, (void *)0, curproc) == 0))) { 6282d8acc0fSJohn Dyson 6294866e085SJohn Dyson fs.lookup_still_valid = 1; 6302d8acc0fSJohn Dyson /* 631de5f6a77SJohn Dyson * get rid of the unnecessary page 632df8bae1dSRodney W. Grimes */ 6334866e085SJohn Dyson vm_page_protect(fs.first_m, VM_PROT_NONE); 6344866e085SJohn Dyson vm_page_free(fs.first_m); 6354866e085SJohn Dyson fs.first_m = NULL; 6364866e085SJohn Dyson 637de5f6a77SJohn Dyson /* 6381c7c3c6aSMatthew Dillon * grab the page and put it into the 6391c7c3c6aSMatthew Dillon * process'es object. The page is 6401c7c3c6aSMatthew Dillon * automatically made dirty. 641de5f6a77SJohn Dyson */ 6424866e085SJohn Dyson vm_page_rename(fs.m, fs.first_object, fs.first_pindex); 6434866e085SJohn Dyson fs.first_m = fs.m; 644e69763a3SDoug Rabson vm_page_busy(fs.first_m); 6454866e085SJohn Dyson fs.m = NULL; 6464866e085SJohn Dyson cnt.v_cow_optim++; 647de5f6a77SJohn Dyson } else { 648de5f6a77SJohn Dyson /* 649de5f6a77SJohn Dyson * Oh, well, lets copy it. 650de5f6a77SJohn Dyson */ 6514866e085SJohn Dyson vm_page_copy(fs.m, fs.first_m); 652de5f6a77SJohn Dyson } 653df8bae1dSRodney W. Grimes 6544866e085SJohn Dyson if (fs.m) { 655df8bae1dSRodney W. Grimes /* 656df8bae1dSRodney W. Grimes * We no longer need the old page or object. 657df8bae1dSRodney W. Grimes */ 6584866e085SJohn Dyson release_page(&fs); 659de5f6a77SJohn Dyson } 660df8bae1dSRodney W. Grimes 6611c7c3c6aSMatthew Dillon /* 6621c7c3c6aSMatthew Dillon * fs.object != fs.first_object due to above 6631c7c3c6aSMatthew Dillon * conditional 6641c7c3c6aSMatthew Dillon */ 6651c7c3c6aSMatthew Dillon 6664866e085SJohn Dyson vm_object_pip_wakeup(fs.object); 6671c7c3c6aSMatthew Dillon 668df8bae1dSRodney W. Grimes /* 669df8bae1dSRodney W. Grimes * Only use the new page below... 670df8bae1dSRodney W. Grimes */ 671df8bae1dSRodney W. Grimes 672df8bae1dSRodney W. Grimes cnt.v_cow_faults++; 6734866e085SJohn Dyson fs.m = fs.first_m; 6744866e085SJohn Dyson fs.object = fs.first_object; 6754866e085SJohn Dyson fs.pindex = fs.first_pindex; 676df8bae1dSRodney W. Grimes 6770d94caffSDavid Greenman } else { 678df8bae1dSRodney W. Grimes prot &= ~VM_PROT_WRITE; 679df8bae1dSRodney W. Grimes } 680df8bae1dSRodney W. Grimes } 681df8bae1dSRodney W. Grimes 682df8bae1dSRodney W. Grimes /* 6830d94caffSDavid Greenman * We must verify that the maps have not changed since our last 6840d94caffSDavid Greenman * lookup. 685df8bae1dSRodney W. Grimes */ 686df8bae1dSRodney W. Grimes 6874866e085SJohn Dyson if (!fs.lookup_still_valid && 6884866e085SJohn Dyson (fs.map->timestamp != map_generation)) { 689df8bae1dSRodney W. Grimes vm_object_t retry_object; 690a316d390SJohn Dyson vm_pindex_t retry_pindex; 691df8bae1dSRodney W. Grimes vm_prot_t retry_prot; 692df8bae1dSRodney W. Grimes 693df8bae1dSRodney W. Grimes /* 6940d94caffSDavid Greenman * Since map entries may be pageable, make sure we can take a 6950d94caffSDavid Greenman * page fault on them. 696df8bae1dSRodney W. Grimes */ 697df8bae1dSRodney W. Grimes 698df8bae1dSRodney W. Grimes /* 69924a1cce3SDavid Greenman * To avoid trying to write_lock the map while another process 7000d94caffSDavid Greenman * has it read_locked (in vm_map_pageable), we do not try for 7010d94caffSDavid Greenman * write permission. If the page is still writable, we will 7020d94caffSDavid Greenman * get write permission. If it is not, or has been marked 7030d94caffSDavid Greenman * needs_copy, we enter the mapping without write permission, 7040d94caffSDavid Greenman * and will merely take another fault. 705df8bae1dSRodney W. Grimes */ 7064866e085SJohn Dyson result = vm_map_lookup(&fs.map, vaddr, fault_type & ~VM_PROT_WRITE, 7074866e085SJohn Dyson &fs.entry, &retry_object, &retry_pindex, &retry_prot, &wired); 7084866e085SJohn Dyson map_generation = fs.map->timestamp; 709df8bae1dSRodney W. Grimes 710df8bae1dSRodney W. Grimes /* 7110d94caffSDavid Greenman * If we don't need the page any longer, put it on the active 7120d94caffSDavid Greenman * list (the easiest thing to do here). If no one needs it, 7130d94caffSDavid Greenman * pageout will grab it eventually. 714df8bae1dSRodney W. Grimes */ 715df8bae1dSRodney W. Grimes 716df8bae1dSRodney W. Grimes if (result != KERN_SUCCESS) { 7174866e085SJohn Dyson release_page(&fs); 7184866e085SJohn Dyson unlock_and_deallocate(&fs); 719df8bae1dSRodney W. Grimes return (result); 720df8bae1dSRodney W. Grimes } 7214866e085SJohn Dyson fs.lookup_still_valid = TRUE; 722df8bae1dSRodney W. Grimes 7234866e085SJohn Dyson if ((retry_object != fs.first_object) || 7244866e085SJohn Dyson (retry_pindex != fs.first_pindex)) { 7254866e085SJohn Dyson release_page(&fs); 7264866e085SJohn Dyson unlock_and_deallocate(&fs); 727df8bae1dSRodney W. Grimes goto RetryFault; 728df8bae1dSRodney W. Grimes } 729df8bae1dSRodney W. Grimes /* 7300d94caffSDavid Greenman * Check whether the protection has changed or the object has 7310d94caffSDavid Greenman * been copied while we left the map unlocked. Changing from 7320d94caffSDavid Greenman * read to write permission is OK - we leave the page 7330d94caffSDavid Greenman * write-protected, and catch the write fault. Changing from 7340d94caffSDavid Greenman * write to read permission means that we can't mark the page 7350d94caffSDavid Greenman * write-enabled after all. 736df8bae1dSRodney W. Grimes */ 737df8bae1dSRodney W. Grimes prot &= retry_prot; 738df8bae1dSRodney W. Grimes } 739df8bae1dSRodney W. Grimes 740df8bae1dSRodney W. Grimes /* 7410d94caffSDavid Greenman * Put this page into the physical map. We had to do the unlock above 7420d94caffSDavid Greenman * because pmap_enter may cause other faults. We don't put the page 7430d94caffSDavid Greenman * back on the active queue until later so that the page-out daemon 7440d94caffSDavid Greenman * won't find us (yet). 745df8bae1dSRodney W. Grimes */ 746df8bae1dSRodney W. Grimes 7472ddba215SDavid Greenman if (prot & VM_PROT_WRITE) { 748e69763a3SDoug Rabson vm_page_flag_set(fs.m, PG_WRITEABLE); 749069e9bc1SDoug Rabson vm_object_set_flag(fs.m->object, 750069e9bc1SDoug Rabson OBJ_WRITEABLE|OBJ_MIGHTBEDIRTY); 7512ddba215SDavid Greenman /* 7522ddba215SDavid Greenman * If the fault is a write, we know that this page is being 7532ddba215SDavid Greenman * written NOW. This will save on the pmap_is_modified() calls 7542ddba215SDavid Greenman * later. 7551c7c3c6aSMatthew Dillon * 7561c7c3c6aSMatthew Dillon * Also tell the backing pager, if any, that it should remove 7571c7c3c6aSMatthew Dillon * any swap backing since the page is now dirty. 7582ddba215SDavid Greenman */ 759a04c970aSJohn Dyson if (fault_flags & VM_FAULT_DIRTY) { 7604866e085SJohn Dyson fs.m->dirty = VM_PAGE_BITS_ALL; 7611c7c3c6aSMatthew Dillon vm_pager_page_unswapped(fs.m); 7622ddba215SDavid Greenman } 7632ddba215SDavid Greenman } 764f6b04d2bSDavid Greenman 7654866e085SJohn Dyson unlock_things(&fs); 7664866e085SJohn Dyson fs.m->valid = VM_PAGE_BITS_ALL; 767e69763a3SDoug Rabson vm_page_flag_clear(fs.m, PG_ZERO); 768f919ebdeSDavid Greenman 7694866e085SJohn Dyson pmap_enter(fs.map->pmap, vaddr, VM_PAGE_TO_PHYS(fs.m), prot, wired); 7702d8acc0fSJohn Dyson if (((fault_flags & VM_FAULT_WIRE_MASK) == 0) && (wired == 0)) { 7714866e085SJohn Dyson pmap_prefault(fs.map->pmap, vaddr, fs.entry); 7722d8acc0fSJohn Dyson } 773df8bae1dSRodney W. Grimes 774e69763a3SDoug Rabson vm_page_flag_set(fs.m, PG_MAPPED|PG_REFERENCED); 775a04c970aSJohn Dyson if (fault_flags & VM_FAULT_HOLD) 7764866e085SJohn Dyson vm_page_hold(fs.m); 777ff97964aSJohn Dyson 778df8bae1dSRodney W. Grimes /* 7790d94caffSDavid Greenman * If the page is not wired down, then put it where the pageout daemon 7800d94caffSDavid Greenman * can find it. 781df8bae1dSRodney W. Grimes */ 782a04c970aSJohn Dyson if (fault_flags & VM_FAULT_WIRE_MASK) { 783df8bae1dSRodney W. Grimes if (wired) 7844866e085SJohn Dyson vm_page_wire(fs.m); 785df8bae1dSRodney W. Grimes else 78673007561SDavid Greenman vm_page_unwire(fs.m, 1); 7870d94caffSDavid Greenman } else { 7884866e085SJohn Dyson vm_page_activate(fs.m); 78926f9a767SRodney W. Grimes } 79026f9a767SRodney W. Grimes 791a1f6d91cSDavid Greenman if (curproc && (curproc->p_flag & P_INMEM) && curproc->p_stats) { 79226f9a767SRodney W. Grimes if (hardfault) { 79326f9a767SRodney W. Grimes curproc->p_stats->p_ru.ru_majflt++; 79426f9a767SRodney W. Grimes } else { 79526f9a767SRodney W. Grimes curproc->p_stats->p_ru.ru_minflt++; 79626f9a767SRodney W. Grimes } 79726f9a767SRodney W. Grimes } 798df8bae1dSRodney W. Grimes 799df8bae1dSRodney W. Grimes /* 800df8bae1dSRodney W. Grimes * Unlock everything, and return 801df8bae1dSRodney W. Grimes */ 802df8bae1dSRodney W. Grimes 803e69763a3SDoug Rabson vm_page_wakeup(fs.m); 8044866e085SJohn Dyson vm_object_deallocate(fs.first_object); 805df8bae1dSRodney W. Grimes 806df8bae1dSRodney W. Grimes return (KERN_SUCCESS); 807df8bae1dSRodney W. Grimes 808df8bae1dSRodney W. Grimes } 809df8bae1dSRodney W. Grimes 810df8bae1dSRodney W. Grimes /* 811df8bae1dSRodney W. Grimes * vm_fault_wire: 812df8bae1dSRodney W. Grimes * 813df8bae1dSRodney W. Grimes * Wire down a range of virtual addresses in a map. 814df8bae1dSRodney W. Grimes */ 815df8bae1dSRodney W. Grimes int 816df8bae1dSRodney W. Grimes vm_fault_wire(map, start, end) 817df8bae1dSRodney W. Grimes vm_map_t map; 818df8bae1dSRodney W. Grimes vm_offset_t start, end; 819df8bae1dSRodney W. Grimes { 82026f9a767SRodney W. Grimes 821df8bae1dSRodney W. Grimes register vm_offset_t va; 822df8bae1dSRodney W. Grimes register pmap_t pmap; 823df8bae1dSRodney W. Grimes int rv; 824df8bae1dSRodney W. Grimes 825df8bae1dSRodney W. Grimes pmap = vm_map_pmap(map); 826df8bae1dSRodney W. Grimes 827df8bae1dSRodney W. Grimes /* 8280d94caffSDavid Greenman * Inform the physical mapping system that the range of addresses may 8290d94caffSDavid Greenman * not fault, so that page tables and such can be locked down as well. 830df8bae1dSRodney W. Grimes */ 831df8bae1dSRodney W. Grimes 832df8bae1dSRodney W. Grimes pmap_pageable(pmap, start, end, FALSE); 833df8bae1dSRodney W. Grimes 834df8bae1dSRodney W. Grimes /* 8350d94caffSDavid Greenman * We simulate a fault to get the page and enter it in the physical 8360d94caffSDavid Greenman * map. 837df8bae1dSRodney W. Grimes */ 838df8bae1dSRodney W. Grimes 839df8bae1dSRodney W. Grimes for (va = start; va < end; va += PAGE_SIZE) { 8407aaaa4fdSJohn Dyson rv = vm_fault(map, va, VM_PROT_READ|VM_PROT_WRITE, 8417aaaa4fdSJohn Dyson VM_FAULT_CHANGE_WIRING); 8427aaaa4fdSJohn Dyson if (rv) { 8437aaaa4fdSJohn Dyson if (va != start) 8447aaaa4fdSJohn Dyson vm_fault_unwire(map, start, va); 8457aaaa4fdSJohn Dyson return (rv); 8467aaaa4fdSJohn Dyson } 8477aaaa4fdSJohn Dyson } 8487aaaa4fdSJohn Dyson return (KERN_SUCCESS); 8497aaaa4fdSJohn Dyson } 8507aaaa4fdSJohn Dyson 8517aaaa4fdSJohn Dyson /* 8527aaaa4fdSJohn Dyson * vm_fault_user_wire: 8537aaaa4fdSJohn Dyson * 8547aaaa4fdSJohn Dyson * Wire down a range of virtual addresses in a map. This 8557aaaa4fdSJohn Dyson * is for user mode though, so we only ask for read access 8567aaaa4fdSJohn Dyson * on currently read only sections. 8577aaaa4fdSJohn Dyson */ 8587aaaa4fdSJohn Dyson int 8597aaaa4fdSJohn Dyson vm_fault_user_wire(map, start, end) 8607aaaa4fdSJohn Dyson vm_map_t map; 8617aaaa4fdSJohn Dyson vm_offset_t start, end; 8627aaaa4fdSJohn Dyson { 8637aaaa4fdSJohn Dyson 8647aaaa4fdSJohn Dyson register vm_offset_t va; 8657aaaa4fdSJohn Dyson register pmap_t pmap; 8667aaaa4fdSJohn Dyson int rv; 8677aaaa4fdSJohn Dyson 8687aaaa4fdSJohn Dyson pmap = vm_map_pmap(map); 8697aaaa4fdSJohn Dyson 8707aaaa4fdSJohn Dyson /* 8717aaaa4fdSJohn Dyson * Inform the physical mapping system that the range of addresses may 8727aaaa4fdSJohn Dyson * not fault, so that page tables and such can be locked down as well. 8737aaaa4fdSJohn Dyson */ 8742d8acc0fSJohn Dyson 8757aaaa4fdSJohn Dyson pmap_pageable(pmap, start, end, FALSE); 8767aaaa4fdSJohn Dyson 8777aaaa4fdSJohn Dyson /* 8787aaaa4fdSJohn Dyson * We simulate a fault to get the page and enter it in the physical 8797aaaa4fdSJohn Dyson * map. 8807aaaa4fdSJohn Dyson */ 8817aaaa4fdSJohn Dyson for (va = start; va < end; va += PAGE_SIZE) { 8827aaaa4fdSJohn Dyson rv = vm_fault(map, va, VM_PROT_READ, VM_FAULT_USER_WIRE); 883df8bae1dSRodney W. Grimes if (rv) { 884df8bae1dSRodney W. Grimes if (va != start) 885df8bae1dSRodney W. Grimes vm_fault_unwire(map, start, va); 886df8bae1dSRodney W. Grimes return (rv); 887df8bae1dSRodney W. Grimes } 888df8bae1dSRodney W. Grimes } 889df8bae1dSRodney W. Grimes return (KERN_SUCCESS); 890df8bae1dSRodney W. Grimes } 891df8bae1dSRodney W. Grimes 892df8bae1dSRodney W. Grimes 893df8bae1dSRodney W. Grimes /* 894df8bae1dSRodney W. Grimes * vm_fault_unwire: 895df8bae1dSRodney W. Grimes * 896df8bae1dSRodney W. Grimes * Unwire a range of virtual addresses in a map. 897df8bae1dSRodney W. Grimes */ 89826f9a767SRodney W. Grimes void 89926f9a767SRodney W. Grimes vm_fault_unwire(map, start, end) 900df8bae1dSRodney W. Grimes vm_map_t map; 901df8bae1dSRodney W. Grimes vm_offset_t start, end; 902df8bae1dSRodney W. Grimes { 903df8bae1dSRodney W. Grimes 904df8bae1dSRodney W. Grimes register vm_offset_t va, pa; 905df8bae1dSRodney W. Grimes register pmap_t pmap; 906df8bae1dSRodney W. Grimes 907df8bae1dSRodney W. Grimes pmap = vm_map_pmap(map); 908df8bae1dSRodney W. Grimes 909df8bae1dSRodney W. Grimes /* 9100d94caffSDavid Greenman * Since the pages are wired down, we must be able to get their 9110d94caffSDavid Greenman * mappings from the physical map system. 912df8bae1dSRodney W. Grimes */ 913df8bae1dSRodney W. Grimes 914df8bae1dSRodney W. Grimes for (va = start; va < end; va += PAGE_SIZE) { 915df8bae1dSRodney W. Grimes pa = pmap_extract(pmap, va); 916b18bfc3dSJohn Dyson if (pa != (vm_offset_t) 0) { 917df8bae1dSRodney W. Grimes pmap_change_wiring(pmap, va, FALSE); 91873007561SDavid Greenman vm_page_unwire(PHYS_TO_VM_PAGE(pa), 1); 919df8bae1dSRodney W. Grimes } 920b18bfc3dSJohn Dyson } 921df8bae1dSRodney W. Grimes 922df8bae1dSRodney W. Grimes /* 9230d94caffSDavid Greenman * Inform the physical mapping system that the range of addresses may 9240d94caffSDavid Greenman * fault, so that page tables and such may be unwired themselves. 925df8bae1dSRodney W. Grimes */ 926df8bae1dSRodney W. Grimes 927df8bae1dSRodney W. Grimes pmap_pageable(pmap, start, end, TRUE); 928df8bae1dSRodney W. Grimes 929df8bae1dSRodney W. Grimes } 930df8bae1dSRodney W. Grimes 931df8bae1dSRodney W. Grimes /* 932df8bae1dSRodney W. Grimes * Routine: 933df8bae1dSRodney W. Grimes * vm_fault_copy_entry 934df8bae1dSRodney W. Grimes * Function: 935df8bae1dSRodney W. Grimes * Copy all of the pages from a wired-down map entry to another. 936df8bae1dSRodney W. Grimes * 937df8bae1dSRodney W. Grimes * In/out conditions: 938df8bae1dSRodney W. Grimes * The source and destination maps must be locked for write. 939df8bae1dSRodney W. Grimes * The source map entry must be wired down (or be a sharing map 940df8bae1dSRodney W. Grimes * entry corresponding to a main map entry that is wired down). 941df8bae1dSRodney W. Grimes */ 942df8bae1dSRodney W. Grimes 94326f9a767SRodney W. Grimes void 94426f9a767SRodney W. Grimes vm_fault_copy_entry(dst_map, src_map, dst_entry, src_entry) 945df8bae1dSRodney W. Grimes vm_map_t dst_map; 946df8bae1dSRodney W. Grimes vm_map_t src_map; 947df8bae1dSRodney W. Grimes vm_map_entry_t dst_entry; 948df8bae1dSRodney W. Grimes vm_map_entry_t src_entry; 949df8bae1dSRodney W. Grimes { 950df8bae1dSRodney W. Grimes vm_object_t dst_object; 951df8bae1dSRodney W. Grimes vm_object_t src_object; 952a316d390SJohn Dyson vm_ooffset_t dst_offset; 953a316d390SJohn Dyson vm_ooffset_t src_offset; 954df8bae1dSRodney W. Grimes vm_prot_t prot; 955df8bae1dSRodney W. Grimes vm_offset_t vaddr; 956df8bae1dSRodney W. Grimes vm_page_t dst_m; 957df8bae1dSRodney W. Grimes vm_page_t src_m; 958df8bae1dSRodney W. Grimes 959df8bae1dSRodney W. Grimes #ifdef lint 960df8bae1dSRodney W. Grimes src_map++; 9610d94caffSDavid Greenman #endif /* lint */ 962df8bae1dSRodney W. Grimes 963df8bae1dSRodney W. Grimes src_object = src_entry->object.vm_object; 964df8bae1dSRodney W. Grimes src_offset = src_entry->offset; 965df8bae1dSRodney W. Grimes 966df8bae1dSRodney W. Grimes /* 9670d94caffSDavid Greenman * Create the top-level object for the destination entry. (Doesn't 9680d94caffSDavid Greenman * actually shadow anything - we copy the pages directly.) 969df8bae1dSRodney W. Grimes */ 97024a1cce3SDavid Greenman dst_object = vm_object_allocate(OBJT_DEFAULT, 971a316d390SJohn Dyson (vm_size_t) OFF_TO_IDX(dst_entry->end - dst_entry->start)); 972df8bae1dSRodney W. Grimes 973df8bae1dSRodney W. Grimes dst_entry->object.vm_object = dst_object; 974df8bae1dSRodney W. Grimes dst_entry->offset = 0; 975df8bae1dSRodney W. Grimes 976df8bae1dSRodney W. Grimes prot = dst_entry->max_protection; 977df8bae1dSRodney W. Grimes 978df8bae1dSRodney W. Grimes /* 9790d94caffSDavid Greenman * Loop through all of the pages in the entry's range, copying each 9800d94caffSDavid Greenman * one from the source object (it should be there) to the destination 9810d94caffSDavid Greenman * object. 982df8bae1dSRodney W. Grimes */ 983df8bae1dSRodney W. Grimes for (vaddr = dst_entry->start, dst_offset = 0; 984df8bae1dSRodney W. Grimes vaddr < dst_entry->end; 985df8bae1dSRodney W. Grimes vaddr += PAGE_SIZE, dst_offset += PAGE_SIZE) { 986df8bae1dSRodney W. Grimes 987df8bae1dSRodney W. Grimes /* 988df8bae1dSRodney W. Grimes * Allocate a page in the destination object 989df8bae1dSRodney W. Grimes */ 990df8bae1dSRodney W. Grimes do { 991a316d390SJohn Dyson dst_m = vm_page_alloc(dst_object, 992a316d390SJohn Dyson OFF_TO_IDX(dst_offset), VM_ALLOC_NORMAL); 993df8bae1dSRodney W. Grimes if (dst_m == NULL) { 994df8bae1dSRodney W. Grimes VM_WAIT; 995df8bae1dSRodney W. Grimes } 996df8bae1dSRodney W. Grimes } while (dst_m == NULL); 997df8bae1dSRodney W. Grimes 998df8bae1dSRodney W. Grimes /* 999df8bae1dSRodney W. Grimes * Find the page in the source object, and copy it in. 10000d94caffSDavid Greenman * (Because the source is wired down, the page will be in 10010d94caffSDavid Greenman * memory.) 1002df8bae1dSRodney W. Grimes */ 1003a316d390SJohn Dyson src_m = vm_page_lookup(src_object, 1004a316d390SJohn Dyson OFF_TO_IDX(dst_offset + src_offset)); 1005df8bae1dSRodney W. Grimes if (src_m == NULL) 1006df8bae1dSRodney W. Grimes panic("vm_fault_copy_wired: page missing"); 1007df8bae1dSRodney W. Grimes 1008df8bae1dSRodney W. Grimes vm_page_copy(src_m, dst_m); 1009df8bae1dSRodney W. Grimes 1010df8bae1dSRodney W. Grimes /* 1011df8bae1dSRodney W. Grimes * Enter it in the pmap... 1012df8bae1dSRodney W. Grimes */ 1013df8bae1dSRodney W. Grimes 1014e69763a3SDoug Rabson vm_page_flag_clear(dst_m, PG_ZERO); 1015df8bae1dSRodney W. Grimes pmap_enter(dst_map->pmap, vaddr, VM_PAGE_TO_PHYS(dst_m), 1016df8bae1dSRodney W. Grimes prot, FALSE); 1017e69763a3SDoug Rabson vm_page_flag_set(dst_m, PG_WRITEABLE|PG_MAPPED); 1018df8bae1dSRodney W. Grimes 1019df8bae1dSRodney W. Grimes /* 1020df8bae1dSRodney W. Grimes * Mark it no longer busy, and put it on the active list. 1021df8bae1dSRodney W. Grimes */ 1022df8bae1dSRodney W. Grimes vm_page_activate(dst_m); 1023e69763a3SDoug Rabson vm_page_wakeup(dst_m); 1024df8bae1dSRodney W. Grimes } 1025df8bae1dSRodney W. Grimes } 102626f9a767SRodney W. Grimes 102726f9a767SRodney W. Grimes 102826f9a767SRodney W. Grimes /* 102926f9a767SRodney W. Grimes * This routine checks around the requested page for other pages that 103022ba64e8SJohn Dyson * might be able to be faulted in. This routine brackets the viable 103122ba64e8SJohn Dyson * pages for the pages to be paged in. 103226f9a767SRodney W. Grimes * 103326f9a767SRodney W. Grimes * Inputs: 103422ba64e8SJohn Dyson * m, rbehind, rahead 103526f9a767SRodney W. Grimes * 103626f9a767SRodney W. Grimes * Outputs: 103726f9a767SRodney W. Grimes * marray (array of vm_page_t), reqpage (index of requested page) 103826f9a767SRodney W. Grimes * 103926f9a767SRodney W. Grimes * Return value: 104026f9a767SRodney W. Grimes * number of pages in marray 104126f9a767SRodney W. Grimes */ 1042303b270bSEivind Eklund static int 104322ba64e8SJohn Dyson vm_fault_additional_pages(m, rbehind, rahead, marray, reqpage) 104426f9a767SRodney W. Grimes vm_page_t m; 104526f9a767SRodney W. Grimes int rbehind; 104622ba64e8SJohn Dyson int rahead; 104726f9a767SRodney W. Grimes vm_page_t *marray; 104826f9a767SRodney W. Grimes int *reqpage; 104926f9a767SRodney W. Grimes { 10502d8acc0fSJohn Dyson int i,j; 105126f9a767SRodney W. Grimes vm_object_t object; 1052a316d390SJohn Dyson vm_pindex_t pindex, startpindex, endpindex, tpindex; 105326f9a767SRodney W. Grimes vm_page_t rtm; 1054170db9c6SJohn Dyson int cbehind, cahead; 105526f9a767SRodney W. Grimes 105626f9a767SRodney W. Grimes object = m->object; 1057a316d390SJohn Dyson pindex = m->pindex; 105826f9a767SRodney W. Grimes 105926f9a767SRodney W. Grimes /* 1060f35329acSJohn Dyson * we don't fault-ahead for device pager 1061f35329acSJohn Dyson */ 1062f35329acSJohn Dyson if (object->type == OBJT_DEVICE) { 1063f35329acSJohn Dyson *reqpage = 0; 1064f35329acSJohn Dyson marray[0] = m; 1065f35329acSJohn Dyson return 1; 1066f35329acSJohn Dyson } 1067f35329acSJohn Dyson 1068f35329acSJohn Dyson /* 106926f9a767SRodney W. Grimes * if the requested page is not available, then give up now 107026f9a767SRodney W. Grimes */ 107126f9a767SRodney W. Grimes 10721c7c3c6aSMatthew Dillon if (!vm_pager_has_page(object, pindex, &cbehind, &cahead)) { 107326f9a767SRodney W. Grimes return 0; 10742d8acc0fSJohn Dyson } 107526f9a767SRodney W. Grimes 107622ba64e8SJohn Dyson if ((cbehind == 0) && (cahead == 0)) { 107722ba64e8SJohn Dyson *reqpage = 0; 107822ba64e8SJohn Dyson marray[0] = m; 107922ba64e8SJohn Dyson return 1; 1080170db9c6SJohn Dyson } 108122ba64e8SJohn Dyson 108222ba64e8SJohn Dyson if (rahead > cahead) { 108322ba64e8SJohn Dyson rahead = cahead; 108422ba64e8SJohn Dyson } 108522ba64e8SJohn Dyson 1086170db9c6SJohn Dyson if (rbehind > cbehind) { 1087170db9c6SJohn Dyson rbehind = cbehind; 1088170db9c6SJohn Dyson } 1089170db9c6SJohn Dyson 109026f9a767SRodney W. Grimes /* 109126f9a767SRodney W. Grimes * try to do any readahead that we might have free pages for. 109226f9a767SRodney W. Grimes */ 1093ccbb2f72SJohn Dyson if ((rahead + rbehind) > 109422ba64e8SJohn Dyson ((cnt.v_free_count + cnt.v_cache_count) - cnt.v_free_reserved)) { 1095f919ebdeSDavid Greenman pagedaemon_wakeup(); 109626f9a767SRodney W. Grimes marray[0] = m; 10972d8acc0fSJohn Dyson *reqpage = 0; 109826f9a767SRodney W. Grimes return 1; 109926f9a767SRodney W. Grimes } 110022ba64e8SJohn Dyson 110126f9a767SRodney W. Grimes /* 11022d8acc0fSJohn Dyson * scan backward for the read behind pages -- in memory 110326f9a767SRodney W. Grimes */ 11042d8acc0fSJohn Dyson if (pindex > 0) { 11052d8acc0fSJohn Dyson if (rbehind > pindex) { 1106a316d390SJohn Dyson rbehind = pindex; 11072d8acc0fSJohn Dyson startpindex = 0; 11082d8acc0fSJohn Dyson } else { 1109a316d390SJohn Dyson startpindex = pindex - rbehind; 11102d8acc0fSJohn Dyson } 11112d8acc0fSJohn Dyson 11122d8acc0fSJohn Dyson for ( tpindex = pindex - 1; tpindex >= startpindex; tpindex -= 1) { 1113a316d390SJohn Dyson if (vm_page_lookup( object, tpindex)) { 1114a316d390SJohn Dyson startpindex = tpindex + 1; 111526f9a767SRodney W. Grimes break; 111626f9a767SRodney W. Grimes } 1117a316d390SJohn Dyson if (tpindex == 0) 111826f9a767SRodney W. Grimes break; 1119317205caSDavid Greenman } 112026f9a767SRodney W. Grimes 11212d8acc0fSJohn Dyson for(i = 0, tpindex = startpindex; tpindex < pindex; i++, tpindex++) { 112226f9a767SRodney W. Grimes 11232d8acc0fSJohn Dyson rtm = vm_page_alloc(object, tpindex, VM_ALLOC_NORMAL); 1124ccbb2f72SJohn Dyson if (rtm == NULL) { 1125ccbb2f72SJohn Dyson for (j = 0; j < i; j++) { 11264866e085SJohn Dyson vm_page_free(marray[j]); 112726f9a767SRodney W. Grimes } 112826f9a767SRodney W. Grimes marray[0] = m; 11292d8acc0fSJohn Dyson *reqpage = 0; 113026f9a767SRodney W. Grimes return 1; 113126f9a767SRodney W. Grimes } 1132170db9c6SJohn Dyson 11332d8acc0fSJohn Dyson marray[i] = rtm; 113426f9a767SRodney W. Grimes } 11352d8acc0fSJohn Dyson } else { 11362d8acc0fSJohn Dyson startpindex = 0; 11372d8acc0fSJohn Dyson i = 0; 11382d8acc0fSJohn Dyson } 11392d8acc0fSJohn Dyson 11402d8acc0fSJohn Dyson marray[i] = m; 11412d8acc0fSJohn Dyson /* page offset of the required page */ 11422d8acc0fSJohn Dyson *reqpage = i; 11432d8acc0fSJohn Dyson 11442d8acc0fSJohn Dyson tpindex = pindex + 1; 11452d8acc0fSJohn Dyson i++; 11462d8acc0fSJohn Dyson 11472d8acc0fSJohn Dyson /* 11482d8acc0fSJohn Dyson * scan forward for the read ahead pages 11492d8acc0fSJohn Dyson */ 11502d8acc0fSJohn Dyson endpindex = tpindex + rahead; 11512d8acc0fSJohn Dyson if (endpindex > object->size) 11522d8acc0fSJohn Dyson endpindex = object->size; 11532d8acc0fSJohn Dyson 11542d8acc0fSJohn Dyson for( ; tpindex < endpindex; i++, tpindex++) { 11552d8acc0fSJohn Dyson 11562d8acc0fSJohn Dyson if (vm_page_lookup(object, tpindex)) { 11572d8acc0fSJohn Dyson break; 11582d8acc0fSJohn Dyson } 11592d8acc0fSJohn Dyson 11602d8acc0fSJohn Dyson rtm = vm_page_alloc(object, tpindex, VM_ALLOC_NORMAL); 11612d8acc0fSJohn Dyson if (rtm == NULL) { 11622d8acc0fSJohn Dyson break; 11632d8acc0fSJohn Dyson } 11642d8acc0fSJohn Dyson 11652d8acc0fSJohn Dyson marray[i] = rtm; 11662d8acc0fSJohn Dyson } 11672d8acc0fSJohn Dyson 11682d8acc0fSJohn Dyson /* return number of bytes of pages */ 11692d8acc0fSJohn Dyson return i; 117026f9a767SRodney W. Grimes } 1171