1df8bae1dSRodney W. Grimes /* 2df8bae1dSRodney W. Grimes * Copyright (c) 1991, 1993 3df8bae1dSRodney W. Grimes * The Regents of the University of California. All rights reserved. 426f9a767SRodney W. Grimes * Copyright (c) 1994 John S. Dyson 526f9a767SRodney W. Grimes * All rights reserved. 626f9a767SRodney W. Grimes * Copyright (c) 1994 David Greenman 726f9a767SRodney W. Grimes * All rights reserved. 826f9a767SRodney W. Grimes * 9df8bae1dSRodney W. Grimes * 10df8bae1dSRodney W. Grimes * This code is derived from software contributed to Berkeley by 11df8bae1dSRodney W. Grimes * The Mach Operating System project at Carnegie-Mellon University. 12df8bae1dSRodney W. Grimes * 13df8bae1dSRodney W. Grimes * Redistribution and use in source and binary forms, with or without 14df8bae1dSRodney W. Grimes * modification, are permitted provided that the following conditions 15df8bae1dSRodney W. Grimes * are met: 16df8bae1dSRodney W. Grimes * 1. Redistributions of source code must retain the above copyright 17df8bae1dSRodney W. Grimes * notice, this list of conditions and the following disclaimer. 18df8bae1dSRodney W. Grimes * 2. Redistributions in binary form must reproduce the above copyright 19df8bae1dSRodney W. Grimes * notice, this list of conditions and the following disclaimer in the 20df8bae1dSRodney W. Grimes * documentation and/or other materials provided with the distribution. 21df8bae1dSRodney W. Grimes * 3. All advertising materials mentioning features or use of this software 22df8bae1dSRodney W. Grimes * must display the following acknowledgement: 23df8bae1dSRodney W. Grimes * This product includes software developed by the University of 24df8bae1dSRodney W. Grimes * California, Berkeley and its contributors. 25df8bae1dSRodney W. Grimes * 4. Neither the name of the University nor the names of its contributors 26df8bae1dSRodney W. Grimes * may be used to endorse or promote products derived from this software 27df8bae1dSRodney W. Grimes * without specific prior written permission. 28df8bae1dSRodney W. Grimes * 29df8bae1dSRodney W. Grimes * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 30df8bae1dSRodney W. Grimes * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 31df8bae1dSRodney W. Grimes * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 32df8bae1dSRodney W. Grimes * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 33df8bae1dSRodney W. Grimes * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 34df8bae1dSRodney W. Grimes * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 35df8bae1dSRodney W. Grimes * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 36df8bae1dSRodney W. Grimes * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 37df8bae1dSRodney W. Grimes * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 38df8bae1dSRodney W. Grimes * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 39df8bae1dSRodney W. Grimes * SUCH DAMAGE. 40df8bae1dSRodney W. Grimes * 413c4dd356SDavid Greenman * from: @(#)vm_fault.c 8.4 (Berkeley) 1/12/94 42df8bae1dSRodney W. Grimes * 43df8bae1dSRodney W. Grimes * 44df8bae1dSRodney W. Grimes * Copyright (c) 1987, 1990 Carnegie-Mellon University. 45df8bae1dSRodney W. Grimes * All rights reserved. 46df8bae1dSRodney W. Grimes * 47df8bae1dSRodney W. Grimes * Authors: Avadis Tevanian, Jr., Michael Wayne Young 48df8bae1dSRodney W. Grimes * 49df8bae1dSRodney W. Grimes * Permission to use, copy, modify and distribute this software and 50df8bae1dSRodney W. Grimes * its documentation is hereby granted, provided that both the copyright 51df8bae1dSRodney W. Grimes * notice and this permission notice appear in all copies of the 52df8bae1dSRodney W. Grimes * software, derivative works or modified versions, and any portions 53df8bae1dSRodney W. Grimes * thereof, and that both notices appear in supporting documentation. 54df8bae1dSRodney W. Grimes * 55df8bae1dSRodney W. Grimes * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 56df8bae1dSRodney W. Grimes * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 57df8bae1dSRodney W. Grimes * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 58df8bae1dSRodney W. Grimes * 59df8bae1dSRodney W. Grimes * Carnegie Mellon requests users of this software to return to 60df8bae1dSRodney W. Grimes * 61df8bae1dSRodney W. Grimes * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 62df8bae1dSRodney W. Grimes * School of Computer Science 63df8bae1dSRodney W. Grimes * Carnegie Mellon University 64df8bae1dSRodney W. Grimes * Pittsburgh PA 15213-3890 65df8bae1dSRodney W. Grimes * 66df8bae1dSRodney W. Grimes * any improvements or extensions that they make and grant Carnegie the 67df8bae1dSRodney W. Grimes * rights to redistribute these changes. 683c4dd356SDavid Greenman * 69aef922f5SJohn Dyson * $Id: vm_fault.c,v 1.35 1995/11/02 06:42:47 davidg Exp $ 70df8bae1dSRodney W. Grimes */ 71df8bae1dSRodney W. Grimes 72df8bae1dSRodney W. Grimes /* 73df8bae1dSRodney W. Grimes * Page fault handling module. 74df8bae1dSRodney W. Grimes */ 75df8bae1dSRodney W. Grimes 76df8bae1dSRodney W. Grimes #include <sys/param.h> 77df8bae1dSRodney W. Grimes #include <sys/systm.h> 7826f9a767SRodney W. Grimes #include <sys/proc.h> 7924a1cce3SDavid Greenman #include <sys/vnode.h> 8005f0fdd2SPoul-Henning Kamp #include <sys/resource.h> 8105f0fdd2SPoul-Henning Kamp #include <sys/signalvar.h> 8226f9a767SRodney W. Grimes #include <sys/resourcevar.h> 83df8bae1dSRodney W. Grimes 84df8bae1dSRodney W. Grimes #include <vm/vm.h> 85df8bae1dSRodney W. Grimes #include <vm/vm_page.h> 86df8bae1dSRodney W. Grimes #include <vm/vm_pageout.h> 87a83c285cSDavid Greenman #include <vm/vm_kern.h> 8824a1cce3SDavid Greenman #include <vm/vm_pager.h> 8924a1cce3SDavid Greenman #include <vm/vnode_pager.h> 90cd41fc12SDavid Greenman #include <vm/swap_pager.h> 91df8bae1dSRodney W. Grimes 9222ba64e8SJohn Dyson int vm_fault_additional_pages __P((vm_page_t, int, int, vm_page_t *, int *)); 9326f9a767SRodney W. Grimes 9426f9a767SRodney W. Grimes #define VM_FAULT_READ_AHEAD 4 9526f9a767SRodney W. Grimes #define VM_FAULT_READ_BEHIND 3 9626f9a767SRodney W. Grimes #define VM_FAULT_READ (VM_FAULT_READ_AHEAD+VM_FAULT_READ_BEHIND+1) 9726f9a767SRodney W. Grimes 98df8bae1dSRodney W. Grimes /* 99df8bae1dSRodney W. Grimes * vm_fault: 100df8bae1dSRodney W. Grimes * 101df8bae1dSRodney W. Grimes * Handle a page fault occuring at the given address, 102df8bae1dSRodney W. Grimes * requiring the given permissions, in the map specified. 103df8bae1dSRodney W. Grimes * If successful, the page is inserted into the 104df8bae1dSRodney W. Grimes * associated physical map. 105df8bae1dSRodney W. Grimes * 106df8bae1dSRodney W. Grimes * NOTE: the given address should be truncated to the 107df8bae1dSRodney W. Grimes * proper page address. 108df8bae1dSRodney W. Grimes * 109df8bae1dSRodney W. Grimes * KERN_SUCCESS is returned if the page fault is handled; otherwise, 110df8bae1dSRodney W. Grimes * a standard error specifying why the fault is fatal is returned. 111df8bae1dSRodney W. Grimes * 112df8bae1dSRodney W. Grimes * 113df8bae1dSRodney W. Grimes * The map in question must be referenced, and remains so. 114df8bae1dSRodney W. Grimes * Caller may hold no locks. 115df8bae1dSRodney W. Grimes */ 116df8bae1dSRodney W. Grimes int 117df8bae1dSRodney W. Grimes vm_fault(map, vaddr, fault_type, change_wiring) 118df8bae1dSRodney W. Grimes vm_map_t map; 119df8bae1dSRodney W. Grimes vm_offset_t vaddr; 120df8bae1dSRodney W. Grimes vm_prot_t fault_type; 121df8bae1dSRodney W. Grimes boolean_t change_wiring; 122df8bae1dSRodney W. Grimes { 123df8bae1dSRodney W. Grimes vm_object_t first_object; 124df8bae1dSRodney W. Grimes vm_offset_t first_offset; 125df8bae1dSRodney W. Grimes vm_map_entry_t entry; 126df8bae1dSRodney W. Grimes register vm_object_t object; 127df8bae1dSRodney W. Grimes register vm_offset_t offset; 12826f9a767SRodney W. Grimes vm_page_t m; 129df8bae1dSRodney W. Grimes vm_page_t first_m; 130df8bae1dSRodney W. Grimes vm_prot_t prot; 131df8bae1dSRodney W. Grimes int result; 132df8bae1dSRodney W. Grimes boolean_t wired; 133df8bae1dSRodney W. Grimes boolean_t su; 134df8bae1dSRodney W. Grimes boolean_t lookup_still_valid; 135df8bae1dSRodney W. Grimes boolean_t page_exists; 136df8bae1dSRodney W. Grimes vm_page_t old_m; 137df8bae1dSRodney W. Grimes vm_object_t next_object; 13826f9a767SRodney W. Grimes vm_page_t marray[VM_FAULT_READ]; 13926f9a767SRodney W. Grimes int spl; 14026f9a767SRodney W. Grimes int hardfault = 0; 141f6b04d2bSDavid Greenman struct vnode *vp = NULL; 142df8bae1dSRodney W. Grimes 143b8d95f16SDavid Greenman cnt.v_vm_faults++; /* needs lock XXX */ 144df8bae1dSRodney W. Grimes /* 145df8bae1dSRodney W. Grimes * Recovery actions 146df8bae1dSRodney W. Grimes */ 147df8bae1dSRodney W. Grimes #define FREE_PAGE(m) { \ 148df8bae1dSRodney W. Grimes PAGE_WAKEUP(m); \ 149df8bae1dSRodney W. Grimes vm_page_free(m); \ 150df8bae1dSRodney W. Grimes } 151df8bae1dSRodney W. Grimes 152df8bae1dSRodney W. Grimes #define RELEASE_PAGE(m) { \ 153df8bae1dSRodney W. Grimes PAGE_WAKEUP(m); \ 154f919ebdeSDavid Greenman if ((m->flags & PG_ACTIVE) == 0) vm_page_activate(m); \ 155df8bae1dSRodney W. Grimes } 156df8bae1dSRodney W. Grimes 157df8bae1dSRodney W. Grimes #define UNLOCK_MAP { \ 158df8bae1dSRodney W. Grimes if (lookup_still_valid) { \ 159df8bae1dSRodney W. Grimes vm_map_lookup_done(map, entry); \ 160df8bae1dSRodney W. Grimes lookup_still_valid = FALSE; \ 161df8bae1dSRodney W. Grimes } \ 162df8bae1dSRodney W. Grimes } 163df8bae1dSRodney W. Grimes 164df8bae1dSRodney W. Grimes #define UNLOCK_THINGS { \ 165f919ebdeSDavid Greenman vm_object_pip_wakeup(object); \ 166df8bae1dSRodney W. Grimes if (object != first_object) { \ 167df8bae1dSRodney W. Grimes FREE_PAGE(first_m); \ 168f919ebdeSDavid Greenman vm_object_pip_wakeup(first_object); \ 169df8bae1dSRodney W. Grimes } \ 170df8bae1dSRodney W. Grimes UNLOCK_MAP; \ 17124a1cce3SDavid Greenman if (vp != NULL) VOP_UNLOCK(vp); \ 172df8bae1dSRodney W. Grimes } 173df8bae1dSRodney W. Grimes 174df8bae1dSRodney W. Grimes #define UNLOCK_AND_DEALLOCATE { \ 175df8bae1dSRodney W. Grimes UNLOCK_THINGS; \ 176df8bae1dSRodney W. Grimes vm_object_deallocate(first_object); \ 177df8bae1dSRodney W. Grimes } 178df8bae1dSRodney W. Grimes 17926f9a767SRodney W. Grimes 180df8bae1dSRodney W. Grimes RetryFault:; 181df8bae1dSRodney W. Grimes 182df8bae1dSRodney W. Grimes /* 1830d94caffSDavid Greenman * Find the backing store object and offset into it to begin the 1840d94caffSDavid Greenman * search. 185df8bae1dSRodney W. Grimes */ 186df8bae1dSRodney W. Grimes 18722ba64e8SJohn Dyson if ((result = vm_map_lookup(&map, vaddr, 18822ba64e8SJohn Dyson fault_type, &entry, &first_object, 1890d94caffSDavid Greenman &first_offset, &prot, &wired, &su)) != KERN_SUCCESS) { 190df8bae1dSRodney W. Grimes return (result); 191df8bae1dSRodney W. Grimes } 192f6b04d2bSDavid Greenman 19324a1cce3SDavid Greenman vp = vnode_pager_lock(first_object); 194f6b04d2bSDavid Greenman 195df8bae1dSRodney W. Grimes lookup_still_valid = TRUE; 196df8bae1dSRodney W. Grimes 197df8bae1dSRodney W. Grimes if (wired) 198df8bae1dSRodney W. Grimes fault_type = prot; 199df8bae1dSRodney W. Grimes 200df8bae1dSRodney W. Grimes first_m = NULL; 201df8bae1dSRodney W. Grimes 202df8bae1dSRodney W. Grimes /* 2030d94caffSDavid Greenman * Make a reference to this object to prevent its disposal while we 2040d94caffSDavid Greenman * are messing with it. Once we have the reference, the map is free 2050d94caffSDavid Greenman * to be diddled. Since objects reference their shadows (and copies), 2060d94caffSDavid Greenman * they will stay around as well. 207df8bae1dSRodney W. Grimes */ 208df8bae1dSRodney W. Grimes 209df8bae1dSRodney W. Grimes first_object->ref_count++; 210df8bae1dSRodney W. Grimes first_object->paging_in_progress++; 211df8bae1dSRodney W. Grimes 212df8bae1dSRodney W. Grimes /* 213df8bae1dSRodney W. Grimes * INVARIANTS (through entire routine): 214df8bae1dSRodney W. Grimes * 2150d94caffSDavid Greenman * 1) At all times, we must either have the object lock or a busy 21624a1cce3SDavid Greenman * page in some object to prevent some other process from trying to 2170d94caffSDavid Greenman * bring in the same page. 218df8bae1dSRodney W. Grimes * 2190d94caffSDavid Greenman * Note that we cannot hold any locks during the pager access or when 2200d94caffSDavid Greenman * waiting for memory, so we use a busy page then. 221df8bae1dSRodney W. Grimes * 2220d94caffSDavid Greenman * Note also that we aren't as concerned about more than one thead 2230d94caffSDavid Greenman * attempting to pager_data_unlock the same page at once, so we don't 2240d94caffSDavid Greenman * hold the page as busy then, but do record the highest unlock value 2250d94caffSDavid Greenman * so far. [Unlock requests may also be delivered out of order.] 226df8bae1dSRodney W. Grimes * 2270d94caffSDavid Greenman * 2) Once we have a busy page, we must remove it from the pageout 2280d94caffSDavid Greenman * queues, so that the pageout daemon will not grab it away. 229df8bae1dSRodney W. Grimes * 23024a1cce3SDavid Greenman * 3) To prevent another process from racing us down the shadow chain 2310d94caffSDavid Greenman * and entering a new page in the top object before we do, we must 2320d94caffSDavid Greenman * keep a busy page in the top object while following the shadow 2330d94caffSDavid Greenman * chain. 234df8bae1dSRodney W. Grimes * 2350d94caffSDavid Greenman * 4) We must increment paging_in_progress on any object for which 2360d94caffSDavid Greenman * we have a busy page, to prevent vm_object_collapse from removing 2370d94caffSDavid Greenman * the busy page without our noticing. 238df8bae1dSRodney W. Grimes */ 239df8bae1dSRodney W. Grimes 240df8bae1dSRodney W. Grimes /* 241df8bae1dSRodney W. Grimes * Search for the page at object/offset. 242df8bae1dSRodney W. Grimes */ 243df8bae1dSRodney W. Grimes 244df8bae1dSRodney W. Grimes object = first_object; 245df8bae1dSRodney W. Grimes offset = first_offset; 246df8bae1dSRodney W. Grimes 247df8bae1dSRodney W. Grimes /* 248df8bae1dSRodney W. Grimes * See whether this page is resident 249df8bae1dSRodney W. Grimes */ 250df8bae1dSRodney W. Grimes 251df8bae1dSRodney W. Grimes while (TRUE) { 252df8bae1dSRodney W. Grimes m = vm_page_lookup(object, offset); 253df8bae1dSRodney W. Grimes if (m != NULL) { 254df8bae1dSRodney W. Grimes /* 2550d94caffSDavid Greenman * If the page is being brought in, wait for it and 2560d94caffSDavid Greenman * then retry. 257df8bae1dSRodney W. Grimes */ 2580d94caffSDavid Greenman if ((m->flags & PG_BUSY) || m->busy) { 25916f62314SDavid Greenman int s; 2600d94caffSDavid Greenman 261df8bae1dSRodney W. Grimes UNLOCK_THINGS; 26216f62314SDavid Greenman s = splhigh(); 2630d94caffSDavid Greenman if ((m->flags & PG_BUSY) || m->busy) { 2640d94caffSDavid Greenman m->flags |= PG_WANTED | PG_REFERENCED; 265976e77fcSDavid Greenman cnt.v_intrans++; 26624a1cce3SDavid Greenman tsleep(m, PSWP, "vmpfw", 0); 26726f9a767SRodney W. Grimes } 26816f62314SDavid Greenman splx(s); 269df8bae1dSRodney W. Grimes vm_object_deallocate(first_object); 270df8bae1dSRodney W. Grimes goto RetryFault; 271df8bae1dSRodney W. Grimes } 272f6b04d2bSDavid Greenman 273df8bae1dSRodney W. Grimes /* 27424a1cce3SDavid Greenman * Mark page busy for other processes, and the pagedaemon. 275df8bae1dSRodney W. Grimes */ 276df8bae1dSRodney W. Grimes m->flags |= PG_BUSY; 27722ba64e8SJohn Dyson if ((m->flags & PG_CACHE) && 27822ba64e8SJohn Dyson (cnt.v_free_count + cnt.v_cache_count) < cnt.v_free_reserved) { 27922ba64e8SJohn Dyson UNLOCK_AND_DEALLOCATE; 28022ba64e8SJohn Dyson VM_WAIT; 28122ba64e8SJohn Dyson PAGE_WAKEUP(m); 28222ba64e8SJohn Dyson goto RetryFault; 28322ba64e8SJohn Dyson } 28422ba64e8SJohn Dyson 285f919ebdeSDavid Greenman if (m->valid && ((m->valid & VM_PAGE_BITS_ALL) != VM_PAGE_BITS_ALL) && 286f919ebdeSDavid Greenman m->object != kernel_object && m->object != kmem_object) { 2870d94caffSDavid Greenman goto readrest; 2880d94caffSDavid Greenman } 289df8bae1dSRodney W. Grimes break; 290df8bae1dSRodney W. Grimes } 29124a1cce3SDavid Greenman if (((object->type != OBJT_DEFAULT) && (!change_wiring || wired)) 292df8bae1dSRodney W. Grimes || (object == first_object)) { 293df8bae1dSRodney W. Grimes 2945f55e841SDavid Greenman if (offset >= object->size) { 2955f55e841SDavid Greenman UNLOCK_AND_DEALLOCATE; 2965f55e841SDavid Greenman return (KERN_PROTECTION_FAILURE); 2975f55e841SDavid Greenman } 29822ba64e8SJohn Dyson 299df8bae1dSRodney W. Grimes /* 3000d94caffSDavid Greenman * Allocate a new page for this object/offset pair. 301df8bae1dSRodney W. Grimes */ 302f70f05f2SJohn Dyson m = vm_page_alloc(object, offset, 303f70f05f2SJohn Dyson vp?VM_ALLOC_NORMAL:(VM_ALLOC_NORMAL|VM_ALLOC_ZERO)); 304df8bae1dSRodney W. Grimes 305df8bae1dSRodney W. Grimes if (m == NULL) { 306df8bae1dSRodney W. Grimes UNLOCK_AND_DEALLOCATE; 307df8bae1dSRodney W. Grimes VM_WAIT; 308df8bae1dSRodney W. Grimes goto RetryFault; 309df8bae1dSRodney W. Grimes } 310df8bae1dSRodney W. Grimes } 3110d94caffSDavid Greenman readrest: 31224a1cce3SDavid Greenman if (object->type != OBJT_DEFAULT && (!change_wiring || wired)) { 313df8bae1dSRodney W. Grimes int rv; 31426f9a767SRodney W. Grimes int faultcount; 31526f9a767SRodney W. Grimes int reqpage; 316df8bae1dSRodney W. Grimes 317df8bae1dSRodney W. Grimes /* 3180d94caffSDavid Greenman * now we find out if any other pages should be paged 3190d94caffSDavid Greenman * in at this time this routine checks to see if the 3200d94caffSDavid Greenman * pages surrounding this fault reside in the same 3210d94caffSDavid Greenman * object as the page for this fault. If they do, 3220d94caffSDavid Greenman * then they are faulted in also into the object. The 3230d94caffSDavid Greenman * array "marray" returned contains an array of 3240d94caffSDavid Greenman * vm_page_t structs where one of them is the 3250d94caffSDavid Greenman * vm_page_t passed to the routine. The reqpage 3260d94caffSDavid Greenman * return value is the index into the marray for the 3270d94caffSDavid Greenman * vm_page_t passed to the routine. 32826f9a767SRodney W. Grimes */ 32905f0fdd2SPoul-Henning Kamp faultcount = vm_fault_additional_pages( 33005f0fdd2SPoul-Henning Kamp m, VM_FAULT_READ_BEHIND, VM_FAULT_READ_AHEAD, 33105f0fdd2SPoul-Henning Kamp marray, &reqpage); 332df8bae1dSRodney W. Grimes 333df8bae1dSRodney W. Grimes /* 3340d94caffSDavid Greenman * Call the pager to retrieve the data, if any, after 3350d94caffSDavid Greenman * releasing the lock on the map. 336df8bae1dSRodney W. Grimes */ 337df8bae1dSRodney W. Grimes UNLOCK_MAP; 338df8bae1dSRodney W. Grimes 33926f9a767SRodney W. Grimes rv = faultcount ? 34024a1cce3SDavid Greenman vm_pager_get_pages(object, marray, faultcount, 34124a1cce3SDavid Greenman reqpage) : VM_PAGER_FAIL; 34222ba64e8SJohn Dyson 34326f9a767SRodney W. Grimes if (rv == VM_PAGER_OK) { 344df8bae1dSRodney W. Grimes /* 3450d94caffSDavid Greenman * Found the page. Leave it busy while we play 3460d94caffSDavid Greenman * with it. 347df8bae1dSRodney W. Grimes */ 34826f9a767SRodney W. Grimes 349df8bae1dSRodney W. Grimes /* 3500d94caffSDavid Greenman * Relookup in case pager changed page. Pager 3510d94caffSDavid Greenman * is responsible for disposition of old page 3520d94caffSDavid Greenman * if moved. 353df8bae1dSRodney W. Grimes */ 354df8bae1dSRodney W. Grimes m = vm_page_lookup(object, offset); 355f6b04d2bSDavid Greenman if( !m) { 356f6b04d2bSDavid Greenman UNLOCK_AND_DEALLOCATE; 357f6b04d2bSDavid Greenman goto RetryFault; 358f6b04d2bSDavid Greenman } 359f6b04d2bSDavid Greenman 36026f9a767SRodney W. Grimes hardfault++; 361df8bae1dSRodney W. Grimes break; 362df8bae1dSRodney W. Grimes } 363df8bae1dSRodney W. Grimes /* 3640d94caffSDavid Greenman * Remove the bogus page (which does not exist at this 3650d94caffSDavid Greenman * object/offset); before doing so, we must get back 3660d94caffSDavid Greenman * our object lock to preserve our invariant. 367df8bae1dSRodney W. Grimes * 36824a1cce3SDavid Greenman * Also wake up any other process that may want to bring 3690d94caffSDavid Greenman * in this page. 370df8bae1dSRodney W. Grimes * 3710d94caffSDavid Greenman * If this is the top-level object, we must leave the 37224a1cce3SDavid Greenman * busy page to prevent another process from rushing 3730d94caffSDavid Greenman * past us, and inserting the page in that object at 3740d94caffSDavid Greenman * the same time that we are. 375df8bae1dSRodney W. Grimes */ 37626f9a767SRodney W. Grimes 377a83c285cSDavid Greenman if (rv == VM_PAGER_ERROR) 378a83c285cSDavid Greenman printf("vm_fault: pager input (probably hardware) error, PID %d failure\n", 379a83c285cSDavid Greenman curproc->p_pid); 38026f9a767SRodney W. Grimes /* 381a83c285cSDavid Greenman * Data outside the range of the pager or an I/O error 38226f9a767SRodney W. Grimes */ 383a83c285cSDavid Greenman /* 3840d94caffSDavid Greenman * XXX - the check for kernel_map is a kludge to work 3850d94caffSDavid Greenman * around having the machine panic on a kernel space 3860d94caffSDavid Greenman * fault w/ I/O error. 387a83c285cSDavid Greenman */ 388a83c285cSDavid Greenman if (((map != kernel_map) && (rv == VM_PAGER_ERROR)) || (rv == VM_PAGER_BAD)) { 38926f9a767SRodney W. Grimes FREE_PAGE(m); 39026f9a767SRodney W. Grimes UNLOCK_AND_DEALLOCATE; 391a83c285cSDavid Greenman return ((rv == VM_PAGER_ERROR) ? KERN_FAILURE : KERN_PROTECTION_FAILURE); 39226f9a767SRodney W. Grimes } 393df8bae1dSRodney W. Grimes if (object != first_object) { 394df8bae1dSRodney W. Grimes FREE_PAGE(m); 39526f9a767SRodney W. Grimes /* 39626f9a767SRodney W. Grimes * XXX - we cannot just fall out at this 39726f9a767SRodney W. Grimes * point, m has been freed and is invalid! 39826f9a767SRodney W. Grimes */ 399df8bae1dSRodney W. Grimes } 400df8bae1dSRodney W. Grimes } 401df8bae1dSRodney W. Grimes /* 40224a1cce3SDavid Greenman * We get here if the object has default pager (or unwiring) or the 4030d94caffSDavid Greenman * pager doesn't have the page. 404df8bae1dSRodney W. Grimes */ 405df8bae1dSRodney W. Grimes if (object == first_object) 406df8bae1dSRodney W. Grimes first_m = m; 407df8bae1dSRodney W. Grimes 408df8bae1dSRodney W. Grimes /* 4090d94caffSDavid Greenman * Move on to the next object. Lock the next object before 4100d94caffSDavid Greenman * unlocking the current one. 411df8bae1dSRodney W. Grimes */ 412df8bae1dSRodney W. Grimes 41324a1cce3SDavid Greenman offset += object->backing_object_offset; 41424a1cce3SDavid Greenman next_object = object->backing_object; 415df8bae1dSRodney W. Grimes if (next_object == NULL) { 416df8bae1dSRodney W. Grimes /* 4170d94caffSDavid Greenman * If there's no object left, fill the page in the top 4180d94caffSDavid Greenman * object with zeros. 419df8bae1dSRodney W. Grimes */ 420df8bae1dSRodney W. Grimes if (object != first_object) { 421f919ebdeSDavid Greenman vm_object_pip_wakeup(object); 422df8bae1dSRodney W. Grimes 423df8bae1dSRodney W. Grimes object = first_object; 424df8bae1dSRodney W. Grimes offset = first_offset; 425df8bae1dSRodney W. Grimes m = first_m; 426df8bae1dSRodney W. Grimes } 427df8bae1dSRodney W. Grimes first_m = NULL; 428df8bae1dSRodney W. Grimes 429f70f05f2SJohn Dyson if ((m->flags & PG_ZERO) == 0) 430df8bae1dSRodney W. Grimes vm_page_zero_fill(m); 4310d94caffSDavid Greenman m->valid = VM_PAGE_BITS_ALL; 432df8bae1dSRodney W. Grimes cnt.v_zfod++; 433df8bae1dSRodney W. Grimes break; 4340d94caffSDavid Greenman } else { 43526f9a767SRodney W. Grimes if (object != first_object) { 436f919ebdeSDavid Greenman vm_object_pip_wakeup(object); 437c0503609SDavid Greenman } 438df8bae1dSRodney W. Grimes object = next_object; 439df8bae1dSRodney W. Grimes object->paging_in_progress++; 440df8bae1dSRodney W. Grimes } 441df8bae1dSRodney W. Grimes } 442df8bae1dSRodney W. Grimes 443f919ebdeSDavid Greenman if ((m->flags & PG_BUSY) == 0) 444f919ebdeSDavid Greenman panic("vm_fault: not busy after main loop"); 445df8bae1dSRodney W. Grimes 446df8bae1dSRodney W. Grimes /* 4470d94caffSDavid Greenman * PAGE HAS BEEN FOUND. [Loop invariant still holds -- the object lock 448df8bae1dSRodney W. Grimes * is held.] 449df8bae1dSRodney W. Grimes */ 450df8bae1dSRodney W. Grimes 451df8bae1dSRodney W. Grimes old_m = m; /* save page that would be copied */ 452df8bae1dSRodney W. Grimes 453df8bae1dSRodney W. Grimes /* 4540d94caffSDavid Greenman * If the page is being written, but isn't already owned by the 4550d94caffSDavid Greenman * top-level object, we have to copy it into a new page owned by the 4560d94caffSDavid Greenman * top-level object. 457df8bae1dSRodney W. Grimes */ 458df8bae1dSRodney W. Grimes 459df8bae1dSRodney W. Grimes if (object != first_object) { 460df8bae1dSRodney W. Grimes /* 4610d94caffSDavid Greenman * We only really need to copy if we want to write it. 462df8bae1dSRodney W. Grimes */ 463df8bae1dSRodney W. Grimes 464df8bae1dSRodney W. Grimes if (fault_type & VM_PROT_WRITE) { 465df8bae1dSRodney W. Grimes 466df8bae1dSRodney W. Grimes /* 4670d94caffSDavid Greenman * If we try to collapse first_object at this point, 4680d94caffSDavid Greenman * we may deadlock when we try to get the lock on an 4690d94caffSDavid Greenman * intermediate object (since we have the bottom 4700d94caffSDavid Greenman * object locked). We can't unlock the bottom object, 4710d94caffSDavid Greenman * because the page we found may move (by collapse) if 4720d94caffSDavid Greenman * we do. 473df8bae1dSRodney W. Grimes * 4740d94caffSDavid Greenman * Instead, we first copy the page. Then, when we have 4750d94caffSDavid Greenman * no more use for the bottom object, we unlock it and 4760d94caffSDavid Greenman * try to collapse. 477df8bae1dSRodney W. Grimes * 4780d94caffSDavid Greenman * Note that we copy the page even if we didn't need 4790d94caffSDavid Greenman * to... that's the breaks. 480df8bae1dSRodney W. Grimes */ 481df8bae1dSRodney W. Grimes 482df8bae1dSRodney W. Grimes /* 4830d94caffSDavid Greenman * We already have an empty page in first_object - use 4840d94caffSDavid Greenman * it. 485df8bae1dSRodney W. Grimes */ 486df8bae1dSRodney W. Grimes 487df8bae1dSRodney W. Grimes vm_page_copy(m, first_m); 4880d94caffSDavid Greenman first_m->valid = VM_PAGE_BITS_ALL; 489df8bae1dSRodney W. Grimes 490df8bae1dSRodney W. Grimes /* 4910d94caffSDavid Greenman * If another map is truly sharing this page with us, 4920d94caffSDavid Greenman * we have to flush all uses of the original page, 4930d94caffSDavid Greenman * since we can't distinguish those which want the 4940d94caffSDavid Greenman * original from those which need the new copy. 495df8bae1dSRodney W. Grimes * 4960d94caffSDavid Greenman * XXX If we know that only one map has access to this 4970d94caffSDavid Greenman * page, then we could avoid the pmap_page_protect() 4980d94caffSDavid Greenman * call. 499df8bae1dSRodney W. Grimes */ 500df8bae1dSRodney W. Grimes 501f919ebdeSDavid Greenman if ((m->flags & PG_ACTIVE) == 0) 502df8bae1dSRodney W. Grimes vm_page_activate(m); 503f919ebdeSDavid Greenman vm_page_protect(m, VM_PROT_NONE); 504df8bae1dSRodney W. Grimes 505df8bae1dSRodney W. Grimes /* 506df8bae1dSRodney W. Grimes * We no longer need the old page or object. 507df8bae1dSRodney W. Grimes */ 508df8bae1dSRodney W. Grimes PAGE_WAKEUP(m); 509f919ebdeSDavid Greenman vm_object_pip_wakeup(object); 510df8bae1dSRodney W. Grimes 511df8bae1dSRodney W. Grimes /* 512df8bae1dSRodney W. Grimes * Only use the new page below... 513df8bae1dSRodney W. Grimes */ 514df8bae1dSRodney W. Grimes 515df8bae1dSRodney W. Grimes cnt.v_cow_faults++; 516df8bae1dSRodney W. Grimes m = first_m; 517df8bae1dSRodney W. Grimes object = first_object; 518df8bae1dSRodney W. Grimes offset = first_offset; 519df8bae1dSRodney W. Grimes 520df8bae1dSRodney W. Grimes /* 5210d94caffSDavid Greenman * Now that we've gotten the copy out of the way, 5220d94caffSDavid Greenman * let's try to collapse the top object. 52324a1cce3SDavid Greenman * 524df8bae1dSRodney W. Grimes * But we have to play ugly games with 525df8bae1dSRodney W. Grimes * paging_in_progress to do that... 526df8bae1dSRodney W. Grimes */ 527f919ebdeSDavid Greenman vm_object_pip_wakeup(object); 528df8bae1dSRodney W. Grimes vm_object_collapse(object); 529df8bae1dSRodney W. Grimes object->paging_in_progress++; 5300d94caffSDavid Greenman } else { 531df8bae1dSRodney W. Grimes prot &= ~VM_PROT_WRITE; 532df8bae1dSRodney W. Grimes } 533df8bae1dSRodney W. Grimes } 534df8bae1dSRodney W. Grimes 535df8bae1dSRodney W. Grimes /* 5360d94caffSDavid Greenman * We must verify that the maps have not changed since our last 5370d94caffSDavid Greenman * lookup. 538df8bae1dSRodney W. Grimes */ 539df8bae1dSRodney W. Grimes 540df8bae1dSRodney W. Grimes if (!lookup_still_valid) { 541df8bae1dSRodney W. Grimes vm_object_t retry_object; 542df8bae1dSRodney W. Grimes vm_offset_t retry_offset; 543df8bae1dSRodney W. Grimes vm_prot_t retry_prot; 544df8bae1dSRodney W. Grimes 545df8bae1dSRodney W. Grimes /* 5460d94caffSDavid Greenman * Since map entries may be pageable, make sure we can take a 5470d94caffSDavid Greenman * page fault on them. 548df8bae1dSRodney W. Grimes */ 549df8bae1dSRodney W. Grimes 550df8bae1dSRodney W. Grimes /* 55124a1cce3SDavid Greenman * To avoid trying to write_lock the map while another process 5520d94caffSDavid Greenman * has it read_locked (in vm_map_pageable), we do not try for 5530d94caffSDavid Greenman * write permission. If the page is still writable, we will 5540d94caffSDavid Greenman * get write permission. If it is not, or has been marked 5550d94caffSDavid Greenman * needs_copy, we enter the mapping without write permission, 5560d94caffSDavid Greenman * and will merely take another fault. 557df8bae1dSRodney W. Grimes */ 5580d94caffSDavid Greenman result = vm_map_lookup(&map, vaddr, fault_type & ~VM_PROT_WRITE, 5590d94caffSDavid Greenman &entry, &retry_object, &retry_offset, &retry_prot, &wired, &su); 560df8bae1dSRodney W. Grimes 561df8bae1dSRodney W. Grimes /* 5620d94caffSDavid Greenman * If we don't need the page any longer, put it on the active 5630d94caffSDavid Greenman * list (the easiest thing to do here). If no one needs it, 5640d94caffSDavid Greenman * pageout will grab it eventually. 565df8bae1dSRodney W. Grimes */ 566df8bae1dSRodney W. Grimes 567df8bae1dSRodney W. Grimes if (result != KERN_SUCCESS) { 568df8bae1dSRodney W. Grimes RELEASE_PAGE(m); 569df8bae1dSRodney W. Grimes UNLOCK_AND_DEALLOCATE; 570df8bae1dSRodney W. Grimes return (result); 571df8bae1dSRodney W. Grimes } 572df8bae1dSRodney W. Grimes lookup_still_valid = TRUE; 573df8bae1dSRodney W. Grimes 574df8bae1dSRodney W. Grimes if ((retry_object != first_object) || 575df8bae1dSRodney W. Grimes (retry_offset != first_offset)) { 576df8bae1dSRodney W. Grimes RELEASE_PAGE(m); 577df8bae1dSRodney W. Grimes UNLOCK_AND_DEALLOCATE; 578df8bae1dSRodney W. Grimes goto RetryFault; 579df8bae1dSRodney W. Grimes } 580df8bae1dSRodney W. Grimes /* 5810d94caffSDavid Greenman * Check whether the protection has changed or the object has 5820d94caffSDavid Greenman * been copied while we left the map unlocked. Changing from 5830d94caffSDavid Greenman * read to write permission is OK - we leave the page 5840d94caffSDavid Greenman * write-protected, and catch the write fault. Changing from 5850d94caffSDavid Greenman * write to read permission means that we can't mark the page 5860d94caffSDavid Greenman * write-enabled after all. 587df8bae1dSRodney W. Grimes */ 588df8bae1dSRodney W. Grimes prot &= retry_prot; 589df8bae1dSRodney W. Grimes } 590df8bae1dSRodney W. Grimes /* 5910d94caffSDavid Greenman * (the various bits we're fiddling with here are locked by the 5920d94caffSDavid Greenman * object's lock) 593df8bae1dSRodney W. Grimes */ 594df8bae1dSRodney W. Grimes 595df8bae1dSRodney W. Grimes /* 5960d94caffSDavid Greenman * It's critically important that a wired-down page be faulted only 5970d94caffSDavid Greenman * once in each map for which it is wired. 598df8bae1dSRodney W. Grimes */ 599df8bae1dSRodney W. Grimes 600df8bae1dSRodney W. Grimes /* 6010d94caffSDavid Greenman * Put this page into the physical map. We had to do the unlock above 6020d94caffSDavid Greenman * because pmap_enter may cause other faults. We don't put the page 6030d94caffSDavid Greenman * back on the active queue until later so that the page-out daemon 6040d94caffSDavid Greenman * won't find us (yet). 605df8bae1dSRodney W. Grimes */ 606df8bae1dSRodney W. Grimes 6072ddba215SDavid Greenman if (prot & VM_PROT_WRITE) { 608f919ebdeSDavid Greenman m->flags |= PG_WRITEABLE; 609aef922f5SJohn Dyson m->object->flags |= OBJ_WRITEABLE|OBJ_MIGHTBEDIRTY; 6102ddba215SDavid Greenman /* 6112ddba215SDavid Greenman * If the fault is a write, we know that this page is being 6122ddba215SDavid Greenman * written NOW. This will save on the pmap_is_modified() calls 6132ddba215SDavid Greenman * later. 6142ddba215SDavid Greenman */ 6152ddba215SDavid Greenman if (fault_type & VM_PROT_WRITE) { 6162ddba215SDavid Greenman m->dirty = VM_PAGE_BITS_ALL; 6172ddba215SDavid Greenman } 6182ddba215SDavid Greenman } 619f6b04d2bSDavid Greenman 620f70f05f2SJohn Dyson m->flags |= PG_MAPPED|PG_REFERENCED; 621ced399eeSJohn Dyson m->flags &= ~PG_ZERO; 622f919ebdeSDavid Greenman 623df8bae1dSRodney W. Grimes pmap_enter(map->pmap, vaddr, VM_PAGE_TO_PHYS(m), prot, wired); 6241e9122e5SDavid Greenman #if 0 625f6b04d2bSDavid Greenman if (change_wiring == 0 && wired == 0) 6266d40c3d3SDavid Greenman pmap_prefault(map->pmap, vaddr, entry, first_object); 6271e9122e5SDavid Greenman #endif 628df8bae1dSRodney W. Grimes 629df8bae1dSRodney W. Grimes /* 6300d94caffSDavid Greenman * If the page is not wired down, then put it where the pageout daemon 6310d94caffSDavid Greenman * can find it. 632df8bae1dSRodney W. Grimes */ 633df8bae1dSRodney W. Grimes if (change_wiring) { 634df8bae1dSRodney W. Grimes if (wired) 635df8bae1dSRodney W. Grimes vm_page_wire(m); 636df8bae1dSRodney W. Grimes else 637df8bae1dSRodney W. Grimes vm_page_unwire(m); 6380d94caffSDavid Greenman } else { 639f919ebdeSDavid Greenman if ((m->flags & PG_ACTIVE) == 0) 640df8bae1dSRodney W. Grimes vm_page_activate(m); 64126f9a767SRodney W. Grimes } 64226f9a767SRodney W. Grimes 643a1f6d91cSDavid Greenman if (curproc && (curproc->p_flag & P_INMEM) && curproc->p_stats) { 64426f9a767SRodney W. Grimes if (hardfault) { 64526f9a767SRodney W. Grimes curproc->p_stats->p_ru.ru_majflt++; 64626f9a767SRodney W. Grimes } else { 64726f9a767SRodney W. Grimes curproc->p_stats->p_ru.ru_minflt++; 64826f9a767SRodney W. Grimes } 64926f9a767SRodney W. Grimes } 650df8bae1dSRodney W. Grimes 65122ba64e8SJohn Dyson if ((m->flags & PG_BUSY) == 0) 65222ba64e8SJohn Dyson printf("page not busy: %d\n", m->offset); 653df8bae1dSRodney W. Grimes /* 654df8bae1dSRodney W. Grimes * Unlock everything, and return 655df8bae1dSRodney W. Grimes */ 656df8bae1dSRodney W. Grimes 657df8bae1dSRodney W. Grimes PAGE_WAKEUP(m); 658df8bae1dSRodney W. Grimes UNLOCK_AND_DEALLOCATE; 659df8bae1dSRodney W. Grimes 660df8bae1dSRodney W. Grimes return (KERN_SUCCESS); 661df8bae1dSRodney W. Grimes 662df8bae1dSRodney W. Grimes } 663df8bae1dSRodney W. Grimes 664df8bae1dSRodney W. Grimes /* 665df8bae1dSRodney W. Grimes * vm_fault_wire: 666df8bae1dSRodney W. Grimes * 667df8bae1dSRodney W. Grimes * Wire down a range of virtual addresses in a map. 668df8bae1dSRodney W. Grimes */ 669df8bae1dSRodney W. Grimes int 670df8bae1dSRodney W. Grimes vm_fault_wire(map, start, end) 671df8bae1dSRodney W. Grimes vm_map_t map; 672df8bae1dSRodney W. Grimes vm_offset_t start, end; 673df8bae1dSRodney W. Grimes { 67426f9a767SRodney W. Grimes 675df8bae1dSRodney W. Grimes register vm_offset_t va; 676df8bae1dSRodney W. Grimes register pmap_t pmap; 677df8bae1dSRodney W. Grimes int rv; 678df8bae1dSRodney W. Grimes 679df8bae1dSRodney W. Grimes pmap = vm_map_pmap(map); 680df8bae1dSRodney W. Grimes 681df8bae1dSRodney W. Grimes /* 6820d94caffSDavid Greenman * Inform the physical mapping system that the range of addresses may 6830d94caffSDavid Greenman * not fault, so that page tables and such can be locked down as well. 684df8bae1dSRodney W. Grimes */ 685df8bae1dSRodney W. Grimes 686df8bae1dSRodney W. Grimes pmap_pageable(pmap, start, end, FALSE); 687df8bae1dSRodney W. Grimes 688df8bae1dSRodney W. Grimes /* 6890d94caffSDavid Greenman * We simulate a fault to get the page and enter it in the physical 6900d94caffSDavid Greenman * map. 691df8bae1dSRodney W. Grimes */ 692df8bae1dSRodney W. Grimes 693df8bae1dSRodney W. Grimes for (va = start; va < end; va += PAGE_SIZE) { 6946d40c3d3SDavid Greenman 695a1f6d91cSDavid Greenman while( curproc != pageproc && 6966d40c3d3SDavid Greenman (cnt.v_free_count <= cnt.v_pageout_free_min)) 6976d40c3d3SDavid Greenman VM_WAIT; 6986d40c3d3SDavid Greenman 699a1f6d91cSDavid Greenman rv = vm_fault(map, va, VM_PROT_READ|VM_PROT_WRITE, TRUE); 700df8bae1dSRodney W. Grimes if (rv) { 701df8bae1dSRodney W. Grimes if (va != start) 702df8bae1dSRodney W. Grimes vm_fault_unwire(map, start, va); 703df8bae1dSRodney W. Grimes return (rv); 704df8bae1dSRodney W. Grimes } 705df8bae1dSRodney W. Grimes } 706df8bae1dSRodney W. Grimes return (KERN_SUCCESS); 707df8bae1dSRodney W. Grimes } 708df8bae1dSRodney W. Grimes 709df8bae1dSRodney W. Grimes 710df8bae1dSRodney W. Grimes /* 711df8bae1dSRodney W. Grimes * vm_fault_unwire: 712df8bae1dSRodney W. Grimes * 713df8bae1dSRodney W. Grimes * Unwire a range of virtual addresses in a map. 714df8bae1dSRodney W. Grimes */ 71526f9a767SRodney W. Grimes void 71626f9a767SRodney W. Grimes vm_fault_unwire(map, start, end) 717df8bae1dSRodney W. Grimes vm_map_t map; 718df8bae1dSRodney W. Grimes vm_offset_t start, end; 719df8bae1dSRodney W. Grimes { 720df8bae1dSRodney W. Grimes 721df8bae1dSRodney W. Grimes register vm_offset_t va, pa; 722df8bae1dSRodney W. Grimes register pmap_t pmap; 723df8bae1dSRodney W. Grimes 724df8bae1dSRodney W. Grimes pmap = vm_map_pmap(map); 725df8bae1dSRodney W. Grimes 726df8bae1dSRodney W. Grimes /* 7270d94caffSDavid Greenman * Since the pages are wired down, we must be able to get their 7280d94caffSDavid Greenman * mappings from the physical map system. 729df8bae1dSRodney W. Grimes */ 730df8bae1dSRodney W. Grimes 731df8bae1dSRodney W. Grimes for (va = start; va < end; va += PAGE_SIZE) { 732df8bae1dSRodney W. Grimes pa = pmap_extract(pmap, va); 733df8bae1dSRodney W. Grimes if (pa == (vm_offset_t) 0) { 734df8bae1dSRodney W. Grimes panic("unwire: page not in pmap"); 735df8bae1dSRodney W. Grimes } 736df8bae1dSRodney W. Grimes pmap_change_wiring(pmap, va, FALSE); 737df8bae1dSRodney W. Grimes vm_page_unwire(PHYS_TO_VM_PAGE(pa)); 738df8bae1dSRodney W. Grimes } 739df8bae1dSRodney W. Grimes 740df8bae1dSRodney W. Grimes /* 7410d94caffSDavid Greenman * Inform the physical mapping system that the range of addresses may 7420d94caffSDavid Greenman * fault, so that page tables and such may be unwired themselves. 743df8bae1dSRodney W. Grimes */ 744df8bae1dSRodney W. Grimes 745df8bae1dSRodney W. Grimes pmap_pageable(pmap, start, end, TRUE); 746df8bae1dSRodney W. Grimes 747df8bae1dSRodney W. Grimes } 748df8bae1dSRodney W. Grimes 749df8bae1dSRodney W. Grimes /* 750df8bae1dSRodney W. Grimes * Routine: 751df8bae1dSRodney W. Grimes * vm_fault_copy_entry 752df8bae1dSRodney W. Grimes * Function: 753df8bae1dSRodney W. Grimes * Copy all of the pages from a wired-down map entry to another. 754df8bae1dSRodney W. Grimes * 755df8bae1dSRodney W. Grimes * In/out conditions: 756df8bae1dSRodney W. Grimes * The source and destination maps must be locked for write. 757df8bae1dSRodney W. Grimes * The source map entry must be wired down (or be a sharing map 758df8bae1dSRodney W. Grimes * entry corresponding to a main map entry that is wired down). 759df8bae1dSRodney W. Grimes */ 760df8bae1dSRodney W. Grimes 76126f9a767SRodney W. Grimes void 76226f9a767SRodney W. Grimes vm_fault_copy_entry(dst_map, src_map, dst_entry, src_entry) 763df8bae1dSRodney W. Grimes vm_map_t dst_map; 764df8bae1dSRodney W. Grimes vm_map_t src_map; 765df8bae1dSRodney W. Grimes vm_map_entry_t dst_entry; 766df8bae1dSRodney W. Grimes vm_map_entry_t src_entry; 767df8bae1dSRodney W. Grimes { 768df8bae1dSRodney W. Grimes vm_object_t dst_object; 769df8bae1dSRodney W. Grimes vm_object_t src_object; 770df8bae1dSRodney W. Grimes vm_offset_t dst_offset; 771df8bae1dSRodney W. Grimes vm_offset_t src_offset; 772df8bae1dSRodney W. Grimes vm_prot_t prot; 773df8bae1dSRodney W. Grimes vm_offset_t vaddr; 774df8bae1dSRodney W. Grimes vm_page_t dst_m; 775df8bae1dSRodney W. Grimes vm_page_t src_m; 776df8bae1dSRodney W. Grimes 777df8bae1dSRodney W. Grimes #ifdef lint 778df8bae1dSRodney W. Grimes src_map++; 7790d94caffSDavid Greenman #endif /* lint */ 780df8bae1dSRodney W. Grimes 781df8bae1dSRodney W. Grimes src_object = src_entry->object.vm_object; 782df8bae1dSRodney W. Grimes src_offset = src_entry->offset; 783df8bae1dSRodney W. Grimes 784df8bae1dSRodney W. Grimes /* 7850d94caffSDavid Greenman * Create the top-level object for the destination entry. (Doesn't 7860d94caffSDavid Greenman * actually shadow anything - we copy the pages directly.) 787df8bae1dSRodney W. Grimes */ 78824a1cce3SDavid Greenman dst_object = vm_object_allocate(OBJT_DEFAULT, 789df8bae1dSRodney W. Grimes (vm_size_t) (dst_entry->end - dst_entry->start)); 790df8bae1dSRodney W. Grimes 791df8bae1dSRodney W. Grimes dst_entry->object.vm_object = dst_object; 792df8bae1dSRodney W. Grimes dst_entry->offset = 0; 793df8bae1dSRodney W. Grimes 794df8bae1dSRodney W. Grimes prot = dst_entry->max_protection; 795df8bae1dSRodney W. Grimes 796df8bae1dSRodney W. Grimes /* 7970d94caffSDavid Greenman * Loop through all of the pages in the entry's range, copying each 7980d94caffSDavid Greenman * one from the source object (it should be there) to the destination 7990d94caffSDavid Greenman * object. 800df8bae1dSRodney W. Grimes */ 801df8bae1dSRodney W. Grimes for (vaddr = dst_entry->start, dst_offset = 0; 802df8bae1dSRodney W. Grimes vaddr < dst_entry->end; 803df8bae1dSRodney W. Grimes vaddr += PAGE_SIZE, dst_offset += PAGE_SIZE) { 804df8bae1dSRodney W. Grimes 805df8bae1dSRodney W. Grimes /* 806df8bae1dSRodney W. Grimes * Allocate a page in the destination object 807df8bae1dSRodney W. Grimes */ 808df8bae1dSRodney W. Grimes do { 8096d40c3d3SDavid Greenman dst_m = vm_page_alloc(dst_object, dst_offset, VM_ALLOC_NORMAL); 810df8bae1dSRodney W. Grimes if (dst_m == NULL) { 811df8bae1dSRodney W. Grimes VM_WAIT; 812df8bae1dSRodney W. Grimes } 813df8bae1dSRodney W. Grimes } while (dst_m == NULL); 814df8bae1dSRodney W. Grimes 815df8bae1dSRodney W. Grimes /* 816df8bae1dSRodney W. Grimes * Find the page in the source object, and copy it in. 8170d94caffSDavid Greenman * (Because the source is wired down, the page will be in 8180d94caffSDavid Greenman * memory.) 819df8bae1dSRodney W. Grimes */ 820df8bae1dSRodney W. Grimes src_m = vm_page_lookup(src_object, dst_offset + src_offset); 821df8bae1dSRodney W. Grimes if (src_m == NULL) 822df8bae1dSRodney W. Grimes panic("vm_fault_copy_wired: page missing"); 823df8bae1dSRodney W. Grimes 824df8bae1dSRodney W. Grimes vm_page_copy(src_m, dst_m); 825df8bae1dSRodney W. Grimes 826df8bae1dSRodney W. Grimes /* 827df8bae1dSRodney W. Grimes * Enter it in the pmap... 828df8bae1dSRodney W. Grimes */ 829df8bae1dSRodney W. Grimes 830ced399eeSJohn Dyson dst_m->flags |= PG_WRITEABLE|PG_MAPPED; 831ccbb2f72SJohn Dyson dst_m->flags &= ~PG_ZERO; 832df8bae1dSRodney W. Grimes pmap_enter(dst_map->pmap, vaddr, VM_PAGE_TO_PHYS(dst_m), 833df8bae1dSRodney W. Grimes prot, FALSE); 834df8bae1dSRodney W. Grimes 835df8bae1dSRodney W. Grimes /* 836df8bae1dSRodney W. Grimes * Mark it no longer busy, and put it on the active list. 837df8bae1dSRodney W. Grimes */ 838df8bae1dSRodney W. Grimes vm_page_activate(dst_m); 839df8bae1dSRodney W. Grimes PAGE_WAKEUP(dst_m); 840df8bae1dSRodney W. Grimes } 841df8bae1dSRodney W. Grimes } 84226f9a767SRodney W. Grimes 84326f9a767SRodney W. Grimes 84426f9a767SRodney W. Grimes /* 84526f9a767SRodney W. Grimes * This routine checks around the requested page for other pages that 84622ba64e8SJohn Dyson * might be able to be faulted in. This routine brackets the viable 84722ba64e8SJohn Dyson * pages for the pages to be paged in. 84826f9a767SRodney W. Grimes * 84926f9a767SRodney W. Grimes * Inputs: 85022ba64e8SJohn Dyson * m, rbehind, rahead 85126f9a767SRodney W. Grimes * 85226f9a767SRodney W. Grimes * Outputs: 85326f9a767SRodney W. Grimes * marray (array of vm_page_t), reqpage (index of requested page) 85426f9a767SRodney W. Grimes * 85526f9a767SRodney W. Grimes * Return value: 85626f9a767SRodney W. Grimes * number of pages in marray 85726f9a767SRodney W. Grimes */ 85826f9a767SRodney W. Grimes int 85922ba64e8SJohn Dyson vm_fault_additional_pages(m, rbehind, rahead, marray, reqpage) 86026f9a767SRodney W. Grimes vm_page_t m; 86126f9a767SRodney W. Grimes int rbehind; 86222ba64e8SJohn Dyson int rahead; 86326f9a767SRodney W. Grimes vm_page_t *marray; 86426f9a767SRodney W. Grimes int *reqpage; 86526f9a767SRodney W. Grimes { 86626f9a767SRodney W. Grimes int i; 86726f9a767SRodney W. Grimes vm_object_t object; 86826f9a767SRodney W. Grimes vm_offset_t offset, startoffset, endoffset, toffset, size; 86926f9a767SRodney W. Grimes vm_page_t rtm; 87026f9a767SRodney W. Grimes int treqpage; 871170db9c6SJohn Dyson int cbehind, cahead; 87226f9a767SRodney W. Grimes 87326f9a767SRodney W. Grimes object = m->object; 87426f9a767SRodney W. Grimes offset = m->offset; 87526f9a767SRodney W. Grimes 87626f9a767SRodney W. Grimes /* 87726f9a767SRodney W. Grimes * if the requested page is not available, then give up now 87826f9a767SRodney W. Grimes */ 87926f9a767SRodney W. Grimes 880170db9c6SJohn Dyson if (!vm_pager_has_page(object, 881170db9c6SJohn Dyson object->paging_offset + offset, &cbehind, &cahead)) 88226f9a767SRodney W. Grimes return 0; 88326f9a767SRodney W. Grimes 88422ba64e8SJohn Dyson if ((cbehind == 0) && (cahead == 0)) { 88522ba64e8SJohn Dyson *reqpage = 0; 88622ba64e8SJohn Dyson marray[0] = m; 88722ba64e8SJohn Dyson return 1; 888170db9c6SJohn Dyson } 88922ba64e8SJohn Dyson 89022ba64e8SJohn Dyson if (rahead > cahead) { 89122ba64e8SJohn Dyson rahead = cahead; 89222ba64e8SJohn Dyson } 89322ba64e8SJohn Dyson 894170db9c6SJohn Dyson if (rbehind > cbehind) { 895170db9c6SJohn Dyson rbehind = cbehind; 896170db9c6SJohn Dyson } 897170db9c6SJohn Dyson 89826f9a767SRodney W. Grimes /* 89926f9a767SRodney W. Grimes * try to do any readahead that we might have free pages for. 90026f9a767SRodney W. Grimes */ 901ccbb2f72SJohn Dyson if ((rahead + rbehind) > 90222ba64e8SJohn Dyson ((cnt.v_free_count + cnt.v_cache_count) - cnt.v_free_reserved)) { 903f919ebdeSDavid Greenman pagedaemon_wakeup(); 90426f9a767SRodney W. Grimes *reqpage = 0; 90526f9a767SRodney W. Grimes marray[0] = m; 90626f9a767SRodney W. Grimes return 1; 90726f9a767SRodney W. Grimes } 90822ba64e8SJohn Dyson 90926f9a767SRodney W. Grimes /* 9100d94caffSDavid Greenman * scan backward for the read behind pages -- in memory or on disk not 9110d94caffSDavid Greenman * in same object 91226f9a767SRodney W. Grimes */ 91322ba64e8SJohn Dyson toffset = offset - PAGE_SIZE; 914317205caSDavid Greenman if (toffset < offset) { 91522ba64e8SJohn Dyson if (rbehind * PAGE_SIZE > offset) 91622ba64e8SJohn Dyson rbehind = offset / PAGE_SIZE; 91722ba64e8SJohn Dyson startoffset = offset - rbehind * PAGE_SIZE; 91826f9a767SRodney W. Grimes while (toffset >= startoffset) { 91922ba64e8SJohn Dyson if (vm_page_lookup( object, toffset)) { 92022ba64e8SJohn Dyson startoffset = toffset + PAGE_SIZE; 92126f9a767SRodney W. Grimes break; 92226f9a767SRodney W. Grimes } 92326f9a767SRodney W. Grimes if (toffset == 0) 92426f9a767SRodney W. Grimes break; 92522ba64e8SJohn Dyson toffset -= PAGE_SIZE; 92626f9a767SRodney W. Grimes } 927317205caSDavid Greenman } else { 928317205caSDavid Greenman startoffset = offset; 929317205caSDavid Greenman } 93026f9a767SRodney W. Grimes 93126f9a767SRodney W. Grimes /* 9320d94caffSDavid Greenman * scan forward for the read ahead pages -- in memory or on disk not 9330d94caffSDavid Greenman * in same object 93426f9a767SRodney W. Grimes */ 93522ba64e8SJohn Dyson toffset = offset + PAGE_SIZE; 93622ba64e8SJohn Dyson endoffset = offset + (rahead + 1) * PAGE_SIZE; 93722ba64e8SJohn Dyson if (endoffset > object->size) 93822ba64e8SJohn Dyson endoffset = object->size; 93922ba64e8SJohn Dyson while (toffset < endoffset) { 94022ba64e8SJohn Dyson if ( vm_page_lookup(object, toffset)) { 94126f9a767SRodney W. Grimes break; 94226f9a767SRodney W. Grimes } 94322ba64e8SJohn Dyson toffset += PAGE_SIZE; 94426f9a767SRodney W. Grimes } 94526f9a767SRodney W. Grimes endoffset = toffset; 94626f9a767SRodney W. Grimes 94726f9a767SRodney W. Grimes /* calculate number of bytes of pages */ 94822ba64e8SJohn Dyson size = (endoffset - startoffset) / PAGE_SIZE; 94926f9a767SRodney W. Grimes 95026f9a767SRodney W. Grimes /* calculate the page offset of the required page */ 95122ba64e8SJohn Dyson treqpage = (offset - startoffset) / PAGE_SIZE; 95226f9a767SRodney W. Grimes 95326f9a767SRodney W. Grimes /* see if we have space (again) */ 95422ba64e8SJohn Dyson if ((cnt.v_free_count + cnt.v_cache_count) > 95522ba64e8SJohn Dyson (cnt.v_free_reserved + size)) { 95626f9a767SRodney W. Grimes /* 95726f9a767SRodney W. Grimes * get our pages and don't block for them 95826f9a767SRodney W. Grimes */ 95926f9a767SRodney W. Grimes for (i = 0; i < size; i++) { 960170db9c6SJohn Dyson if (i != treqpage) { 961ccbb2f72SJohn Dyson rtm = vm_page_alloc(object, 96222ba64e8SJohn Dyson startoffset + i * PAGE_SIZE, 96322ba64e8SJohn Dyson VM_ALLOC_NORMAL); 964ccbb2f72SJohn Dyson if (rtm == NULL) { 965170db9c6SJohn Dyson if (i < treqpage) { 966ccbb2f72SJohn Dyson int j; 967ccbb2f72SJohn Dyson for (j = 0; j < i; j++) { 96821bf3904SJohn Dyson FREE_PAGE(marray[j]); 96926f9a767SRodney W. Grimes } 97026f9a767SRodney W. Grimes *reqpage = 0; 97126f9a767SRodney W. Grimes marray[0] = m; 97226f9a767SRodney W. Grimes return 1; 973ccbb2f72SJohn Dyson } else { 974ccbb2f72SJohn Dyson size = i; 975ccbb2f72SJohn Dyson *reqpage = treqpage; 976ccbb2f72SJohn Dyson return size; 977ccbb2f72SJohn Dyson } 978ccbb2f72SJohn Dyson } 979ccbb2f72SJohn Dyson marray[i] = rtm; 980ccbb2f72SJohn Dyson } else { 981ccbb2f72SJohn Dyson marray[i] = m; 982ccbb2f72SJohn Dyson } 98326f9a767SRodney W. Grimes } 984170db9c6SJohn Dyson 98526f9a767SRodney W. Grimes *reqpage = treqpage; 98626f9a767SRodney W. Grimes return size; 98726f9a767SRodney W. Grimes } 98826f9a767SRodney W. Grimes *reqpage = 0; 98926f9a767SRodney W. Grimes marray[0] = m; 99026f9a767SRodney W. Grimes return 1; 99126f9a767SRodney W. Grimes } 992