160727d8bSWarner Losh /*- 2df8bae1dSRodney W. Grimes * Copyright (c) 1991, 1993 3df8bae1dSRodney W. Grimes * The Regents of the University of California. All rights reserved. 426f9a767SRodney W. Grimes * Copyright (c) 1994 John S. Dyson 526f9a767SRodney W. Grimes * All rights reserved. 626f9a767SRodney W. Grimes * Copyright (c) 1994 David Greenman 726f9a767SRodney W. Grimes * All rights reserved. 826f9a767SRodney W. Grimes * 9df8bae1dSRodney W. Grimes * 10df8bae1dSRodney W. Grimes * This code is derived from software contributed to Berkeley by 11df8bae1dSRodney W. Grimes * The Mach Operating System project at Carnegie-Mellon University. 12df8bae1dSRodney W. Grimes * 13df8bae1dSRodney W. Grimes * Redistribution and use in source and binary forms, with or without 14df8bae1dSRodney W. Grimes * modification, are permitted provided that the following conditions 15df8bae1dSRodney W. Grimes * are met: 16df8bae1dSRodney W. Grimes * 1. Redistributions of source code must retain the above copyright 17df8bae1dSRodney W. Grimes * notice, this list of conditions and the following disclaimer. 18df8bae1dSRodney W. Grimes * 2. Redistributions in binary form must reproduce the above copyright 19df8bae1dSRodney W. Grimes * notice, this list of conditions and the following disclaimer in the 20df8bae1dSRodney W. Grimes * documentation and/or other materials provided with the distribution. 21df8bae1dSRodney W. Grimes * 3. All advertising materials mentioning features or use of this software 225929bcfaSPhilippe Charnier * must display the following acknowledgement: 23df8bae1dSRodney W. Grimes * This product includes software developed by the University of 24df8bae1dSRodney W. Grimes * California, Berkeley and its contributors. 25df8bae1dSRodney W. Grimes * 4. Neither the name of the University nor the names of its contributors 26df8bae1dSRodney W. Grimes * may be used to endorse or promote products derived from this software 27df8bae1dSRodney W. Grimes * without specific prior written permission. 28df8bae1dSRodney W. Grimes * 29df8bae1dSRodney W. Grimes * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 30df8bae1dSRodney W. Grimes * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 31df8bae1dSRodney W. Grimes * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 32df8bae1dSRodney W. Grimes * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 33df8bae1dSRodney W. Grimes * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 34df8bae1dSRodney W. Grimes * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 35df8bae1dSRodney W. Grimes * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 36df8bae1dSRodney W. Grimes * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 37df8bae1dSRodney W. Grimes * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 38df8bae1dSRodney W. Grimes * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 39df8bae1dSRodney W. Grimes * SUCH DAMAGE. 40df8bae1dSRodney W. Grimes * 413c4dd356SDavid Greenman * from: @(#)vm_fault.c 8.4 (Berkeley) 1/12/94 42df8bae1dSRodney W. Grimes * 43df8bae1dSRodney W. Grimes * 44df8bae1dSRodney W. Grimes * Copyright (c) 1987, 1990 Carnegie-Mellon University. 45df8bae1dSRodney W. Grimes * All rights reserved. 46df8bae1dSRodney W. Grimes * 47df8bae1dSRodney W. Grimes * Authors: Avadis Tevanian, Jr., Michael Wayne Young 48df8bae1dSRodney W. Grimes * 49df8bae1dSRodney W. Grimes * Permission to use, copy, modify and distribute this software and 50df8bae1dSRodney W. Grimes * its documentation is hereby granted, provided that both the copyright 51df8bae1dSRodney W. Grimes * notice and this permission notice appear in all copies of the 52df8bae1dSRodney W. Grimes * software, derivative works or modified versions, and any portions 53df8bae1dSRodney W. Grimes * thereof, and that both notices appear in supporting documentation. 54df8bae1dSRodney W. Grimes * 55df8bae1dSRodney W. Grimes * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 56df8bae1dSRodney W. Grimes * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 57df8bae1dSRodney W. Grimes * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 58df8bae1dSRodney W. Grimes * 59df8bae1dSRodney W. Grimes * Carnegie Mellon requests users of this software to return to 60df8bae1dSRodney W. Grimes * 61df8bae1dSRodney W. Grimes * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 62df8bae1dSRodney W. Grimes * School of Computer Science 63df8bae1dSRodney W. Grimes * Carnegie Mellon University 64df8bae1dSRodney W. Grimes * Pittsburgh PA 15213-3890 65df8bae1dSRodney W. Grimes * 66df8bae1dSRodney W. Grimes * any improvements or extensions that they make and grant Carnegie the 67df8bae1dSRodney W. Grimes * rights to redistribute these changes. 68df8bae1dSRodney W. Grimes */ 69df8bae1dSRodney W. Grimes 70df8bae1dSRodney W. Grimes /* 71df8bae1dSRodney W. Grimes * Page fault handling module. 72df8bae1dSRodney W. Grimes */ 73874651b1SDavid E. O'Brien 74874651b1SDavid E. O'Brien #include <sys/cdefs.h> 75874651b1SDavid E. O'Brien __FBSDID("$FreeBSD$"); 76874651b1SDavid E. O'Brien 77f8a47341SAlan Cox #include "opt_vm.h" 78f8a47341SAlan Cox 79df8bae1dSRodney W. Grimes #include <sys/param.h> 80df8bae1dSRodney W. Grimes #include <sys/systm.h> 814edf4a58SJohn Baldwin #include <sys/kernel.h> 82fb919e4dSMark Murray #include <sys/lock.h> 83fb919e4dSMark Murray #include <sys/mutex.h> 8426f9a767SRodney W. Grimes #include <sys/proc.h> 8526f9a767SRodney W. Grimes #include <sys/resourcevar.h> 8623955314SAlfred Perlstein #include <sys/sysctl.h> 874edf4a58SJohn Baldwin #include <sys/vmmeter.h> 884edf4a58SJohn Baldwin #include <sys/vnode.h> 89df8bae1dSRodney W. Grimes 90df8bae1dSRodney W. Grimes #include <vm/vm.h> 91efeaf95aSDavid Greenman #include <vm/vm_param.h> 92efeaf95aSDavid Greenman #include <vm/pmap.h> 93efeaf95aSDavid Greenman #include <vm/vm_map.h> 94efeaf95aSDavid Greenman #include <vm/vm_object.h> 95df8bae1dSRodney W. Grimes #include <vm/vm_page.h> 96df8bae1dSRodney W. Grimes #include <vm/vm_pageout.h> 97a83c285cSDavid Greenman #include <vm/vm_kern.h> 9824a1cce3SDavid Greenman #include <vm/vm_pager.h> 99efeaf95aSDavid Greenman #include <vm/vm_extern.h> 100df8bae1dSRodney W. Grimes 101ae51ff11SJeff Roberson #include <sys/mount.h> /* XXX Temporary for VFS_LOCK_GIANT() */ 102ae51ff11SJeff Roberson 103566526a9SAlan Cox #define PFBAK 4 104566526a9SAlan Cox #define PFFOR 4 105566526a9SAlan Cox #define PAGEORDER_SIZE (PFBAK+PFFOR) 106566526a9SAlan Cox 107566526a9SAlan Cox static int prefault_pageorder[] = { 108566526a9SAlan Cox -1 * PAGE_SIZE, 1 * PAGE_SIZE, 109566526a9SAlan Cox -2 * PAGE_SIZE, 2 * PAGE_SIZE, 110566526a9SAlan Cox -3 * PAGE_SIZE, 3 * PAGE_SIZE, 111566526a9SAlan Cox -4 * PAGE_SIZE, 4 * PAGE_SIZE 112566526a9SAlan Cox }; 113566526a9SAlan Cox 11411caded3SAlfred Perlstein static int vm_fault_additional_pages(vm_page_t, int, int, vm_page_t *, int *); 115566526a9SAlan Cox static void vm_fault_prefault(pmap_t, vm_offset_t, vm_map_entry_t); 11626f9a767SRodney W. Grimes 11747221757SJohn Dyson #define VM_FAULT_READ_AHEAD 8 11847221757SJohn Dyson #define VM_FAULT_READ_BEHIND 7 11926f9a767SRodney W. Grimes #define VM_FAULT_READ (VM_FAULT_READ_AHEAD+VM_FAULT_READ_BEHIND+1) 12026f9a767SRodney W. Grimes 1214866e085SJohn Dyson struct faultstate { 1224866e085SJohn Dyson vm_page_t m; 1234866e085SJohn Dyson vm_object_t object; 1244866e085SJohn Dyson vm_pindex_t pindex; 1254866e085SJohn Dyson vm_page_t first_m; 1264866e085SJohn Dyson vm_object_t first_object; 1274866e085SJohn Dyson vm_pindex_t first_pindex; 1284866e085SJohn Dyson vm_map_t map; 1294866e085SJohn Dyson vm_map_entry_t entry; 13025adb370SBrian Feldman int lookup_still_valid; 1314866e085SJohn Dyson struct vnode *vp; 132d2bf64c3SKonstantin Belousov int vfslocked; 1334866e085SJohn Dyson }; 1344866e085SJohn Dyson 13562a59e8fSWarner Losh static inline void 1364866e085SJohn Dyson release_page(struct faultstate *fs) 1374866e085SJohn Dyson { 1380d0be82aSKonstantin Belousov 139e69763a3SDoug Rabson vm_page_wakeup(fs->m); 14066bdd5d6SAlan Cox vm_page_lock_queues(); 1414866e085SJohn Dyson vm_page_deactivate(fs->m); 1422ad98273SAlan Cox vm_page_unlock_queues(); 1434866e085SJohn Dyson fs->m = NULL; 1444866e085SJohn Dyson } 1454866e085SJohn Dyson 14662a59e8fSWarner Losh static inline void 1474866e085SJohn Dyson unlock_map(struct faultstate *fs) 1484866e085SJohn Dyson { 1490d0be82aSKonstantin Belousov 15025adb370SBrian Feldman if (fs->lookup_still_valid) { 1514866e085SJohn Dyson vm_map_lookup_done(fs->map, fs->entry); 15225adb370SBrian Feldman fs->lookup_still_valid = FALSE; 1534866e085SJohn Dyson } 1544866e085SJohn Dyson } 1554866e085SJohn Dyson 1564866e085SJohn Dyson static void 157a51b0840SAlan Cox unlock_and_deallocate(struct faultstate *fs) 1584866e085SJohn Dyson { 159f29ba63eSAlan Cox 1604866e085SJohn Dyson vm_object_pip_wakeup(fs->object); 161b009d5a0SAlan Cox VM_OBJECT_UNLOCK(fs->object); 1624866e085SJohn Dyson if (fs->object != fs->first_object) { 163b009d5a0SAlan Cox VM_OBJECT_LOCK(fs->first_object); 1642ad98273SAlan Cox vm_page_lock_queues(); 1654866e085SJohn Dyson vm_page_free(fs->first_m); 1662ad98273SAlan Cox vm_page_unlock_queues(); 1674866e085SJohn Dyson vm_object_pip_wakeup(fs->first_object); 168b009d5a0SAlan Cox VM_OBJECT_UNLOCK(fs->first_object); 1694866e085SJohn Dyson fs->first_m = NULL; 1704866e085SJohn Dyson } 1714866e085SJohn Dyson vm_object_deallocate(fs->first_object); 1724866e085SJohn Dyson unlock_map(fs); 1734866e085SJohn Dyson if (fs->vp != NULL) { 1740cddd8f0SMatthew Dillon vput(fs->vp); 1754866e085SJohn Dyson fs->vp = NULL; 1764866e085SJohn Dyson } 177d2bf64c3SKonstantin Belousov VFS_UNLOCK_GIANT(fs->vfslocked); 178d2bf64c3SKonstantin Belousov fs->vfslocked = 0; 1794866e085SJohn Dyson } 1804866e085SJohn Dyson 181df8bae1dSRodney W. Grimes /* 18240360b1bSMatthew Dillon * TRYPAGER - used by vm_fault to calculate whether the pager for the 18340360b1bSMatthew Dillon * current object *might* contain the page. 18440360b1bSMatthew Dillon * 18540360b1bSMatthew Dillon * default objects are zero-fill, there is no real pager. 18640360b1bSMatthew Dillon */ 18740360b1bSMatthew Dillon #define TRYPAGER (fs.object->type != OBJT_DEFAULT && \ 1882db65ab4SAlan Cox ((fault_flags & VM_FAULT_CHANGE_WIRING) == 0 || wired)) 18940360b1bSMatthew Dillon 19040360b1bSMatthew Dillon /* 191df8bae1dSRodney W. Grimes * vm_fault: 192df8bae1dSRodney W. Grimes * 193956f3135SPhilippe Charnier * Handle a page fault occurring at the given address, 194df8bae1dSRodney W. Grimes * requiring the given permissions, in the map specified. 195df8bae1dSRodney W. Grimes * If successful, the page is inserted into the 196df8bae1dSRodney W. Grimes * associated physical map. 197df8bae1dSRodney W. Grimes * 198df8bae1dSRodney W. Grimes * NOTE: the given address should be truncated to the 199df8bae1dSRodney W. Grimes * proper page address. 200df8bae1dSRodney W. Grimes * 201df8bae1dSRodney W. Grimes * KERN_SUCCESS is returned if the page fault is handled; otherwise, 202df8bae1dSRodney W. Grimes * a standard error specifying why the fault is fatal is returned. 203df8bae1dSRodney W. Grimes * 204df8bae1dSRodney W. Grimes * 205df8bae1dSRodney W. Grimes * The map in question must be referenced, and remains so. 2060cddd8f0SMatthew Dillon * Caller may hold no locks. 207df8bae1dSRodney W. Grimes */ 208df8bae1dSRodney W. Grimes int 20923955314SAlfred Perlstein vm_fault(vm_map_t map, vm_offset_t vaddr, vm_prot_t fault_type, 21023955314SAlfred Perlstein int fault_flags) 21123955314SAlfred Perlstein { 212df8bae1dSRodney W. Grimes vm_prot_t prot; 213ebf75125SAlan Cox int is_first_object_locked, result; 214c722e407SAlan Cox boolean_t are_queues_locked, growstack, wired; 2152d8acc0fSJohn Dyson int map_generation; 216df8bae1dSRodney W. Grimes vm_object_t next_object; 21726f9a767SRodney W. Grimes vm_page_t marray[VM_FAULT_READ]; 2184866e085SJohn Dyson int hardfault; 219d2bf64c3SKonstantin Belousov int faultcount, ahead, behind; 2204866e085SJohn Dyson struct faultstate fs; 221d2bf64c3SKonstantin Belousov struct vnode *vp; 222d2bf64c3SKonstantin Belousov int locked, error; 223df8bae1dSRodney W. Grimes 2244866e085SJohn Dyson hardfault = 0; 2256139043bSAlan Cox growstack = TRUE; 22667596082SAttilio Rao PCPU_INC(cnt.v_vm_faults); 227d2bf64c3SKonstantin Belousov fs.vp = NULL; 228d2bf64c3SKonstantin Belousov fs.vfslocked = 0; 229d2bf64c3SKonstantin Belousov faultcount = behind = 0; 230df8bae1dSRodney W. Grimes 231df8bae1dSRodney W. Grimes RetryFault:; 232df8bae1dSRodney W. Grimes 233df8bae1dSRodney W. Grimes /* 2340d94caffSDavid Greenman * Find the backing store object and offset into it to begin the 2350d94caffSDavid Greenman * search. 236df8bae1dSRodney W. Grimes */ 23740360b1bSMatthew Dillon fs.map = map; 23892de35b0SAlan Cox result = vm_map_lookup(&fs.map, vaddr, fault_type, &fs.entry, 23992de35b0SAlan Cox &fs.first_object, &fs.first_pindex, &prot, &wired); 24092de35b0SAlan Cox if (result != KERN_SUCCESS) { 2416139043bSAlan Cox if (growstack && result == KERN_INVALID_ADDRESS && 2422db65ab4SAlan Cox map != kernel_map) { 2436139043bSAlan Cox result = vm_map_growstack(curproc, vaddr); 244a976eb5eSAlan Cox if (result != KERN_SUCCESS) 2456139043bSAlan Cox return (KERN_FAILURE); 2466139043bSAlan Cox growstack = FALSE; 2476139043bSAlan Cox goto RetryFault; 2486139043bSAlan Cox } 24992de35b0SAlan Cox return (result); 25009e0c6ccSJohn Dyson } 25109e0c6ccSJohn Dyson 2524866e085SJohn Dyson map_generation = fs.map->timestamp; 2532d8acc0fSJohn Dyson 2544866e085SJohn Dyson if (fs.entry->eflags & MAP_ENTRY_NOFAULT) { 25547221757SJohn Dyson panic("vm_fault: fault on nofault entry, addr: %lx", 25692c4c4ebSBruce Evans (u_long)vaddr); 2577aaaa4fdSJohn Dyson } 2587aaaa4fdSJohn Dyson 25995e5e988SJohn Dyson /* 26095e5e988SJohn Dyson * Make a reference to this object to prevent its disposal while we 26195e5e988SJohn Dyson * are messing with it. Once we have the reference, the map is free 26295e5e988SJohn Dyson * to be diddled. Since objects reference their shadows (and copies), 26395e5e988SJohn Dyson * they will stay around as well. 264fe8e0238SMatthew Dillon * 265fe8e0238SMatthew Dillon * Bump the paging-in-progress count to prevent size changes (e.g. 266fe8e0238SMatthew Dillon * truncation operations) during I/O. This must be done after 267fe8e0238SMatthew Dillon * obtaining the vnode lock in order to avoid possible deadlocks. 26895e5e988SJohn Dyson */ 269d22bc710SAlan Cox VM_OBJECT_LOCK(fs.first_object); 270a976eb5eSAlan Cox vm_object_reference_locked(fs.first_object); 271d474eaaaSDoug Rabson vm_object_pip_add(fs.first_object, 1); 27295e5e988SJohn Dyson 27325adb370SBrian Feldman fs.lookup_still_valid = TRUE; 274df8bae1dSRodney W. Grimes 275df8bae1dSRodney W. Grimes if (wired) 276d8778512SAlan Cox fault_type = prot | (fault_type & VM_PROT_COPY); 277df8bae1dSRodney W. Grimes 2784866e085SJohn Dyson fs.first_m = NULL; 279df8bae1dSRodney W. Grimes 280df8bae1dSRodney W. Grimes /* 281df8bae1dSRodney W. Grimes * Search for the page at object/offset. 282df8bae1dSRodney W. Grimes */ 2834866e085SJohn Dyson fs.object = fs.first_object; 2844866e085SJohn Dyson fs.pindex = fs.first_pindex; 285df8bae1dSRodney W. Grimes while (TRUE) { 2861c7c3c6aSMatthew Dillon /* 2871c7c3c6aSMatthew Dillon * If the object is dead, we stop here 2881c7c3c6aSMatthew Dillon */ 2894866e085SJohn Dyson if (fs.object->flags & OBJ_DEAD) { 2904866e085SJohn Dyson unlock_and_deallocate(&fs); 29147221757SJohn Dyson return (KERN_PROTECTION_FAILURE); 29247221757SJohn Dyson } 29347221757SJohn Dyson 2941c7c3c6aSMatthew Dillon /* 2951c7c3c6aSMatthew Dillon * See if page is resident 2961c7c3c6aSMatthew Dillon */ 2974866e085SJohn Dyson fs.m = vm_page_lookup(fs.object, fs.pindex); 2984866e085SJohn Dyson if (fs.m != NULL) { 29998cb733cSKenneth D. Merry /* 3009b80d344SKenneth D. Merry * check for page-based copy on write. 3019b80d344SKenneth D. Merry * We check fs.object == fs.first_object so 3029b80d344SKenneth D. Merry * as to ensure the legacy COW mechanism is 3039b80d344SKenneth D. Merry * used when the page in question is part of 3049b80d344SKenneth D. Merry * a shadow object. Otherwise, vm_page_cowfault() 3059b80d344SKenneth D. Merry * removes the page from the backing object, 3069b80d344SKenneth D. Merry * which is not what we want. 30798cb733cSKenneth D. Merry */ 308f4ecdf05SAlan Cox vm_page_lock_queues(); 30998cb733cSKenneth D. Merry if ((fs.m->cow) && 3109b80d344SKenneth D. Merry (fault_type & VM_PROT_WRITE) && 3119b80d344SKenneth D. Merry (fs.object == fs.first_object)) { 31298cb733cSKenneth D. Merry vm_page_cowfault(fs.m); 313f4ecdf05SAlan Cox vm_page_unlock_queues(); 314d18e8afeSAlan Cox unlock_and_deallocate(&fs); 31598cb733cSKenneth D. Merry goto RetryFault; 31698cb733cSKenneth D. Merry } 31798cb733cSKenneth D. Merry 318df8bae1dSRodney W. Grimes /* 3191c7c3c6aSMatthew Dillon * Wait/Retry if the page is busy. We have to do this 3209af80719SAlan Cox * if the page is busy via either VPO_BUSY or 3211c7c3c6aSMatthew Dillon * vm_page_t->busy because the vm_pager may be using 3221c7c3c6aSMatthew Dillon * vm_page_t->busy for pageouts ( and even pageins if 3231c7c3c6aSMatthew Dillon * it is the vnode pager ), and we could end up trying 324956f3135SPhilippe Charnier * to pagein and pageout the same page simultaneously. 3251c7c3c6aSMatthew Dillon * 3261c7c3c6aSMatthew Dillon * We can theoretically allow the busy case on a read 3271c7c3c6aSMatthew Dillon * fault if the page is marked valid, but since such 3281c7c3c6aSMatthew Dillon * pages are typically already pmap'd, putting that 3291c7c3c6aSMatthew Dillon * special case in might be more effort then it is 3301c7c3c6aSMatthew Dillon * worth. We cannot under any circumstances mess 3311c7c3c6aSMatthew Dillon * around with a vm_page_t->busy page except, perhaps, 3321c7c3c6aSMatthew Dillon * to pmap it. 333df8bae1dSRodney W. Grimes */ 3349af80719SAlan Cox if ((fs.m->oflags & VPO_BUSY) || fs.m->busy) { 3354abd55b2SAlan Cox vm_page_unlock_queues(); 336a51b0840SAlan Cox VM_OBJECT_UNLOCK(fs.object); 337a51b0840SAlan Cox if (fs.object != fs.first_object) { 338a51b0840SAlan Cox VM_OBJECT_LOCK(fs.first_object); 3399a96b638SAlan Cox vm_page_lock_queues(); 340a51b0840SAlan Cox vm_page_free(fs.first_m); 3419a96b638SAlan Cox vm_page_unlock_queues(); 342a51b0840SAlan Cox vm_object_pip_wakeup(fs.first_object); 343a51b0840SAlan Cox VM_OBJECT_UNLOCK(fs.first_object); 344a51b0840SAlan Cox fs.first_m = NULL; 345a51b0840SAlan Cox } 346a51b0840SAlan Cox unlock_map(&fs); 347a51b0840SAlan Cox VM_OBJECT_LOCK(fs.object); 348a51b0840SAlan Cox if (fs.m == vm_page_lookup(fs.object, 349a51b0840SAlan Cox fs.pindex)) { 350e7e56b28SAlan Cox vm_page_sleep_if_busy(fs.m, TRUE, 351e7e56b28SAlan Cox "vmpfw"); 352a51b0840SAlan Cox } 353a51b0840SAlan Cox vm_object_pip_wakeup(fs.object); 354a51b0840SAlan Cox VM_OBJECT_UNLOCK(fs.object); 35567596082SAttilio Rao PCPU_INC(cnt.v_intrans); 3564866e085SJohn Dyson vm_object_deallocate(fs.first_object); 357df8bae1dSRodney W. Grimes goto RetryFault; 358df8bae1dSRodney W. Grimes } 3597bfda801SAlan Cox vm_pageq_remove(fs.m); 360768131d2SAlan Cox vm_page_unlock_queues(); 3617615edaaSMatthew Dillon 3621c7c3c6aSMatthew Dillon /* 3631c7c3c6aSMatthew Dillon * Mark page busy for other processes, and the 3641c7c3c6aSMatthew Dillon * pagedaemon. If it still isn't completely valid 3651c7c3c6aSMatthew Dillon * (readable), jump to readrest, else break-out ( we 3661c7c3c6aSMatthew Dillon * found the page ). 3671c7c3c6aSMatthew Dillon */ 368e69763a3SDoug Rabson vm_page_busy(fs.m); 3690a2e596aSAlan Cox if (fs.m->valid != VM_PAGE_BITS_ALL && 3704866e085SJohn Dyson fs.m->object != kernel_object && fs.m->object != kmem_object) { 3710d94caffSDavid Greenman goto readrest; 3720d94caffSDavid Greenman } 373ffc82b0aSJohn Dyson 374df8bae1dSRodney W. Grimes break; 375df8bae1dSRodney W. Grimes } 3761c7c3c6aSMatthew Dillon 3771c7c3c6aSMatthew Dillon /* 37840360b1bSMatthew Dillon * Page is not resident, If this is the search termination 37940360b1bSMatthew Dillon * or the pager might contain the page, allocate a new page. 3801c7c3c6aSMatthew Dillon */ 38140360b1bSMatthew Dillon if (TRYPAGER || fs.object == fs.first_object) { 3824866e085SJohn Dyson if (fs.pindex >= fs.object->size) { 3834866e085SJohn Dyson unlock_and_deallocate(&fs); 3845f55e841SDavid Greenman return (KERN_PROTECTION_FAILURE); 3855f55e841SDavid Greenman } 38622ba64e8SJohn Dyson 387df8bae1dSRodney W. Grimes /* 3880d94caffSDavid Greenman * Allocate a new page for this object/offset pair. 389df8bae1dSRodney W. Grimes */ 39040360b1bSMatthew Dillon fs.m = NULL; 39140360b1bSMatthew Dillon if (!vm_page_count_severe()) { 392f8a47341SAlan Cox #if VM_NRESERVLEVEL > 0 393f8a47341SAlan Cox if ((fs.object->flags & OBJ_COLORED) == 0) { 394f8a47341SAlan Cox fs.object->flags |= OBJ_COLORED; 395f8a47341SAlan Cox fs.object->pg_color = atop(vaddr) - 396f8a47341SAlan Cox fs.pindex; 397f8a47341SAlan Cox } 398f8a47341SAlan Cox #endif 3994866e085SJohn Dyson fs.m = vm_page_alloc(fs.object, fs.pindex, 400d2bf64c3SKonstantin Belousov (fs.object->type == OBJT_VNODE || 401d2bf64c3SKonstantin Belousov fs.object->backing_object != NULL) ? 402d2bf64c3SKonstantin Belousov VM_ALLOC_NORMAL : VM_ALLOC_ZERO); 40340360b1bSMatthew Dillon } 4044866e085SJohn Dyson if (fs.m == NULL) { 4054866e085SJohn Dyson unlock_and_deallocate(&fs); 406ef6020d1SMike Silbersack VM_WAITPFAULT; 407df8bae1dSRodney W. Grimes goto RetryFault; 4080a2e596aSAlan Cox } else if (fs.m->valid == VM_PAGE_BITS_ALL) 4094ab8ab92SKonstantin Belousov break; 410df8bae1dSRodney W. Grimes } 41147221757SJohn Dyson 4120d94caffSDavid Greenman readrest: 4131c7c3c6aSMatthew Dillon /* 41440360b1bSMatthew Dillon * We have found a valid page or we have allocated a new page. 41540360b1bSMatthew Dillon * The page thus may not be valid or may not be entirely 41640360b1bSMatthew Dillon * valid. 41740360b1bSMatthew Dillon * 41840360b1bSMatthew Dillon * Attempt to fault-in the page if there is a chance that the 41940360b1bSMatthew Dillon * pager has it, and potentially fault in additional pages 42040360b1bSMatthew Dillon * at the same time. 4211c7c3c6aSMatthew Dillon */ 42240360b1bSMatthew Dillon if (TRYPAGER) { 423df8bae1dSRodney W. Grimes int rv; 4249dae7290SMatt Jacob int reqpage = 0; 4257f866e4bSAlan Cox u_char behavior = vm_map_entry_behavior(fs.entry); 426867a482dSJohn Dyson 4277f866e4bSAlan Cox if (behavior == MAP_ENTRY_BEHAV_RANDOM) { 428867a482dSJohn Dyson ahead = 0; 429867a482dSJohn Dyson behind = 0; 4302d8acc0fSJohn Dyson } else { 4314866e085SJohn Dyson behind = (vaddr - fs.entry->start) >> PAGE_SHIFT; 4322d8acc0fSJohn Dyson if (behind > VM_FAULT_READ_BEHIND) 4332d8acc0fSJohn Dyson behind = VM_FAULT_READ_BEHIND; 4342d8acc0fSJohn Dyson 4354866e085SJohn Dyson ahead = ((fs.entry->end - vaddr) >> PAGE_SHIFT) - 1; 4362d8acc0fSJohn Dyson if (ahead > VM_FAULT_READ_AHEAD) 4372d8acc0fSJohn Dyson ahead = VM_FAULT_READ_AHEAD; 438867a482dSJohn Dyson } 4398d8b9c6eSAlan Cox is_first_object_locked = FALSE; 4408d8b9c6eSAlan Cox if ((behavior == MAP_ENTRY_BEHAV_SEQUENTIAL || 44140360b1bSMatthew Dillon (behavior != MAP_ENTRY_BEHAV_RANDOM && 44240360b1bSMatthew Dillon fs.pindex >= fs.entry->lastr && 4438d8b9c6eSAlan Cox fs.pindex < fs.entry->lastr + VM_FAULT_READ)) && 4448d8b9c6eSAlan Cox (fs.first_object == fs.object || 4458d8b9c6eSAlan Cox (is_first_object_locked = VM_OBJECT_TRYLOCK(fs.first_object))) && 446ec96dca7SAlan Cox fs.first_object->type != OBJT_DEVICE && 44701381811SJohn Baldwin fs.first_object->type != OBJT_PHYS && 44801381811SJohn Baldwin fs.first_object->type != OBJT_SG) { 449867a482dSJohn Dyson vm_pindex_t firstpindex, tmppindex; 45040360b1bSMatthew Dillon 45140360b1bSMatthew Dillon if (fs.first_pindex < 2 * VM_FAULT_READ) 452867a482dSJohn Dyson firstpindex = 0; 453867a482dSJohn Dyson else 45440360b1bSMatthew Dillon firstpindex = fs.first_pindex - 2 * VM_FAULT_READ; 455867a482dSJohn Dyson 456c722e407SAlan Cox are_queues_locked = FALSE; 4574221e284SAlan Cox /* 4584221e284SAlan Cox * note: partially valid pages cannot be 4594221e284SAlan Cox * included in the lookahead - NFS piecemeal 4604221e284SAlan Cox * writes will barf on it badly. 4614221e284SAlan Cox */ 4624866e085SJohn Dyson for (tmppindex = fs.first_pindex - 1; 4632100d645SPeter Wemm tmppindex >= firstpindex; 464867a482dSJohn Dyson --tmppindex) { 465867a482dSJohn Dyson vm_page_t mt; 466a1287949SEivind Eklund 4674866e085SJohn Dyson mt = vm_page_lookup(fs.first_object, tmppindex); 468867a482dSJohn Dyson if (mt == NULL || (mt->valid != VM_PAGE_BITS_ALL)) 469867a482dSJohn Dyson break; 470ff97964aSJohn Dyson if (mt->busy || 471c722e407SAlan Cox (mt->oflags & VPO_BUSY)) 472c722e407SAlan Cox continue; 473c722e407SAlan Cox if (!are_queues_locked) { 474c722e407SAlan Cox are_queues_locked = TRUE; 475c722e407SAlan Cox vm_page_lock_queues(); 476c722e407SAlan Cox } 477c722e407SAlan Cox if (mt->hold_count || 478867a482dSJohn Dyson mt->wire_count) 479867a482dSJohn Dyson continue; 4804fec79beSAlan Cox pmap_remove_all(mt); 481c6d9ef2eSAlan Cox if (mt->dirty) { 482867a482dSJohn Dyson vm_page_deactivate(mt); 483867a482dSJohn Dyson } else { 484867a482dSJohn Dyson vm_page_cache(mt); 485867a482dSJohn Dyson } 486867a482dSJohn Dyson } 487c722e407SAlan Cox if (are_queues_locked) 48815a5d210SAlan Cox vm_page_unlock_queues(); 489867a482dSJohn Dyson ahead += behind; 490867a482dSJohn Dyson behind = 0; 491867a482dSJohn Dyson } 4928d8b9c6eSAlan Cox if (is_first_object_locked) 4938d8b9c6eSAlan Cox VM_OBJECT_UNLOCK(fs.first_object); 494d2bf64c3SKonstantin Belousov 495d2bf64c3SKonstantin Belousov /* 496d2bf64c3SKonstantin Belousov * Call the pager to retrieve the data, if any, after 497d2bf64c3SKonstantin Belousov * releasing the lock on the map. We hold a ref on 498d2bf64c3SKonstantin Belousov * fs.object and the pages are VPO_BUSY'd. 499d2bf64c3SKonstantin Belousov */ 500d2bf64c3SKonstantin Belousov unlock_map(&fs); 501d2bf64c3SKonstantin Belousov 502d2bf64c3SKonstantin Belousov vnode_lock: 503d2bf64c3SKonstantin Belousov if (fs.object->type == OBJT_VNODE) { 504d2bf64c3SKonstantin Belousov vp = fs.object->handle; 505d2bf64c3SKonstantin Belousov if (vp == fs.vp) 506d2bf64c3SKonstantin Belousov goto vnode_locked; 507d2bf64c3SKonstantin Belousov else if (fs.vp != NULL) { 508d2bf64c3SKonstantin Belousov vput(fs.vp); 509d2bf64c3SKonstantin Belousov fs.vp = NULL; 510d2bf64c3SKonstantin Belousov } 511d2bf64c3SKonstantin Belousov locked = VOP_ISLOCKED(vp); 512d2bf64c3SKonstantin Belousov 513d2bf64c3SKonstantin Belousov if (VFS_NEEDSGIANT(vp->v_mount) && !fs.vfslocked) { 514d2bf64c3SKonstantin Belousov fs.vfslocked = 1; 515d2bf64c3SKonstantin Belousov if (!mtx_trylock(&Giant)) { 516d2bf64c3SKonstantin Belousov VM_OBJECT_UNLOCK(fs.object); 517d2bf64c3SKonstantin Belousov mtx_lock(&Giant); 518d2bf64c3SKonstantin Belousov VM_OBJECT_LOCK(fs.object); 519d2bf64c3SKonstantin Belousov goto vnode_lock; 520d2bf64c3SKonstantin Belousov } 521d2bf64c3SKonstantin Belousov } 522d2bf64c3SKonstantin Belousov if (locked != LK_EXCLUSIVE) 523d2bf64c3SKonstantin Belousov locked = LK_SHARED; 524d2bf64c3SKonstantin Belousov /* Do not sleep for vnode lock while fs.m is busy */ 525d2bf64c3SKonstantin Belousov error = vget(vp, locked | LK_CANRECURSE | 526d2bf64c3SKonstantin Belousov LK_NOWAIT, curthread); 527d2bf64c3SKonstantin Belousov if (error != 0) { 528d2bf64c3SKonstantin Belousov int vfslocked; 529d2bf64c3SKonstantin Belousov 530d2bf64c3SKonstantin Belousov vfslocked = fs.vfslocked; 531d2bf64c3SKonstantin Belousov fs.vfslocked = 0; /* Keep Giant */ 532d2bf64c3SKonstantin Belousov vhold(vp); 533d2bf64c3SKonstantin Belousov release_page(&fs); 534d2bf64c3SKonstantin Belousov unlock_and_deallocate(&fs); 535d2bf64c3SKonstantin Belousov error = vget(vp, locked | LK_RETRY | 536d2bf64c3SKonstantin Belousov LK_CANRECURSE, curthread); 537d2bf64c3SKonstantin Belousov vdrop(vp); 538d2bf64c3SKonstantin Belousov fs.vp = vp; 539d2bf64c3SKonstantin Belousov fs.vfslocked = vfslocked; 540d2bf64c3SKonstantin Belousov KASSERT(error == 0, 541d2bf64c3SKonstantin Belousov ("vm_fault: vget failed")); 542d2bf64c3SKonstantin Belousov goto RetryFault; 543d2bf64c3SKonstantin Belousov } 544d2bf64c3SKonstantin Belousov fs.vp = vp; 545d2bf64c3SKonstantin Belousov } 546d2bf64c3SKonstantin Belousov vnode_locked: 547d2bf64c3SKonstantin Belousov KASSERT(fs.vp == NULL || !fs.map->system_map, 548d2bf64c3SKonstantin Belousov ("vm_fault: vnode-backed object mapped by system map")); 549d2bf64c3SKonstantin Belousov 550df8bae1dSRodney W. Grimes /* 5510d94caffSDavid Greenman * now we find out if any other pages should be paged 5520d94caffSDavid Greenman * in at this time this routine checks to see if the 5530d94caffSDavid Greenman * pages surrounding this fault reside in the same 5540d94caffSDavid Greenman * object as the page for this fault. If they do, 5550d94caffSDavid Greenman * then they are faulted in also into the object. The 5560d94caffSDavid Greenman * array "marray" returned contains an array of 5570d94caffSDavid Greenman * vm_page_t structs where one of them is the 5580d94caffSDavid Greenman * vm_page_t passed to the routine. The reqpage 5590d94caffSDavid Greenman * return value is the index into the marray for the 5600d94caffSDavid Greenman * vm_page_t passed to the routine. 5611c7c3c6aSMatthew Dillon * 5629af80719SAlan Cox * fs.m plus the additional pages are VPO_BUSY'd. 56326f9a767SRodney W. Grimes */ 56405f0fdd2SPoul-Henning Kamp faultcount = vm_fault_additional_pages( 5654866e085SJohn Dyson fs.m, behind, ahead, marray, &reqpage); 566df8bae1dSRodney W. Grimes 56726f9a767SRodney W. Grimes rv = faultcount ? 5684866e085SJohn Dyson vm_pager_get_pages(fs.object, marray, faultcount, 56924a1cce3SDavid Greenman reqpage) : VM_PAGER_FAIL; 57022ba64e8SJohn Dyson 57126f9a767SRodney W. Grimes if (rv == VM_PAGER_OK) { 572df8bae1dSRodney W. Grimes /* 573f230c45cSJohn Dyson * Found the page. Leave it busy while we play 574f230c45cSJohn Dyson * with it. 575f230c45cSJohn Dyson */ 576f230c45cSJohn Dyson 577f230c45cSJohn Dyson /* 5780d94caffSDavid Greenman * Relookup in case pager changed page. Pager 5790d94caffSDavid Greenman * is responsible for disposition of old page 5800d94caffSDavid Greenman * if moved. 581df8bae1dSRodney W. Grimes */ 5824866e085SJohn Dyson fs.m = vm_page_lookup(fs.object, fs.pindex); 5834866e085SJohn Dyson if (!fs.m) { 5844866e085SJohn Dyson unlock_and_deallocate(&fs); 585f6b04d2bSDavid Greenman goto RetryFault; 586f6b04d2bSDavid Greenman } 587f6b04d2bSDavid Greenman 58826f9a767SRodney W. Grimes hardfault++; 5891c7c3c6aSMatthew Dillon break; /* break to PAGE HAS BEEN FOUND */ 590df8bae1dSRodney W. Grimes } 591df8bae1dSRodney W. Grimes /* 5920d94caffSDavid Greenman * Remove the bogus page (which does not exist at this 5930d94caffSDavid Greenman * object/offset); before doing so, we must get back 5940d94caffSDavid Greenman * our object lock to preserve our invariant. 595df8bae1dSRodney W. Grimes * 59624a1cce3SDavid Greenman * Also wake up any other process that may want to bring 5970d94caffSDavid Greenman * in this page. 598df8bae1dSRodney W. Grimes * 5990d94caffSDavid Greenman * If this is the top-level object, we must leave the 60024a1cce3SDavid Greenman * busy page to prevent another process from rushing 6010d94caffSDavid Greenman * past us, and inserting the page in that object at 6020d94caffSDavid Greenman * the same time that we are. 603df8bae1dSRodney W. Grimes */ 604a83c285cSDavid Greenman if (rv == VM_PAGER_ERROR) 605f3679e35SDavid Greenman printf("vm_fault: pager read error, pid %d (%s)\n", 606f3679e35SDavid Greenman curproc->p_pid, curproc->p_comm); 60726f9a767SRodney W. Grimes /* 608a83c285cSDavid Greenman * Data outside the range of the pager or an I/O error 60926f9a767SRodney W. Grimes */ 610a83c285cSDavid Greenman /* 6110d94caffSDavid Greenman * XXX - the check for kernel_map is a kludge to work 6120d94caffSDavid Greenman * around having the machine panic on a kernel space 6130d94caffSDavid Greenman * fault w/ I/O error. 614a83c285cSDavid Greenman */ 6154866e085SJohn Dyson if (((fs.map != kernel_map) && (rv == VM_PAGER_ERROR)) || 61647221757SJohn Dyson (rv == VM_PAGER_BAD)) { 6172ad98273SAlan Cox vm_page_lock_queues(); 6184866e085SJohn Dyson vm_page_free(fs.m); 6192ad98273SAlan Cox vm_page_unlock_queues(); 6204866e085SJohn Dyson fs.m = NULL; 6214866e085SJohn Dyson unlock_and_deallocate(&fs); 622a83c285cSDavid Greenman return ((rv == VM_PAGER_ERROR) ? KERN_FAILURE : KERN_PROTECTION_FAILURE); 62326f9a767SRodney W. Grimes } 6244866e085SJohn Dyson if (fs.object != fs.first_object) { 6252ad98273SAlan Cox vm_page_lock_queues(); 6264866e085SJohn Dyson vm_page_free(fs.m); 6272ad98273SAlan Cox vm_page_unlock_queues(); 6284866e085SJohn Dyson fs.m = NULL; 62926f9a767SRodney W. Grimes /* 63026f9a767SRodney W. Grimes * XXX - we cannot just fall out at this 63126f9a767SRodney W. Grimes * point, m has been freed and is invalid! 63226f9a767SRodney W. Grimes */ 633df8bae1dSRodney W. Grimes } 634df8bae1dSRodney W. Grimes } 63540360b1bSMatthew Dillon 636df8bae1dSRodney W. Grimes /* 6371c7c3c6aSMatthew Dillon * We get here if the object has default pager (or unwiring) 6381c7c3c6aSMatthew Dillon * or the pager doesn't have the page. 639df8bae1dSRodney W. Grimes */ 6404866e085SJohn Dyson if (fs.object == fs.first_object) 6414866e085SJohn Dyson fs.first_m = fs.m; 642df8bae1dSRodney W. Grimes 643df8bae1dSRodney W. Grimes /* 6440d94caffSDavid Greenman * Move on to the next object. Lock the next object before 6450d94caffSDavid Greenman * unlocking the current one. 646df8bae1dSRodney W. Grimes */ 6474866e085SJohn Dyson fs.pindex += OFF_TO_IDX(fs.object->backing_object_offset); 6484866e085SJohn Dyson next_object = fs.object->backing_object; 649df8bae1dSRodney W. Grimes if (next_object == NULL) { 650df8bae1dSRodney W. Grimes /* 6510d94caffSDavid Greenman * If there's no object left, fill the page in the top 6520d94caffSDavid Greenman * object with zeros. 653df8bae1dSRodney W. Grimes */ 6544866e085SJohn Dyson if (fs.object != fs.first_object) { 6554866e085SJohn Dyson vm_object_pip_wakeup(fs.object); 656b009d5a0SAlan Cox VM_OBJECT_UNLOCK(fs.object); 657df8bae1dSRodney W. Grimes 6584866e085SJohn Dyson fs.object = fs.first_object; 6594866e085SJohn Dyson fs.pindex = fs.first_pindex; 6604866e085SJohn Dyson fs.m = fs.first_m; 661f29ba63eSAlan Cox VM_OBJECT_LOCK(fs.object); 662df8bae1dSRodney W. Grimes } 6634866e085SJohn Dyson fs.first_m = NULL; 664df8bae1dSRodney W. Grimes 6654221e284SAlan Cox /* 6664221e284SAlan Cox * Zero the page if necessary and mark it valid. 6674221e284SAlan Cox */ 6684866e085SJohn Dyson if ((fs.m->flags & PG_ZERO) == 0) { 669fff6062aSAlan Cox pmap_zero_page(fs.m); 6704221e284SAlan Cox } else { 67167596082SAttilio Rao PCPU_INC(cnt.v_ozfod); 6724221e284SAlan Cox } 67367596082SAttilio Rao PCPU_INC(cnt.v_zfod); 6744221e284SAlan Cox fs.m->valid = VM_PAGE_BITS_ALL; 6751c7c3c6aSMatthew Dillon break; /* break to PAGE HAS BEEN FOUND */ 6760d94caffSDavid Greenman } else { 677c8567c3aSAlan Cox KASSERT(fs.object != next_object, 678c8567c3aSAlan Cox ("object loop %p", next_object)); 679c8567c3aSAlan Cox VM_OBJECT_LOCK(next_object); 680c8567c3aSAlan Cox vm_object_pip_add(next_object, 1); 681c8567c3aSAlan Cox if (fs.object != fs.first_object) 6824866e085SJohn Dyson vm_object_pip_wakeup(fs.object); 683b009d5a0SAlan Cox VM_OBJECT_UNLOCK(fs.object); 6844866e085SJohn Dyson fs.object = next_object; 685df8bae1dSRodney W. Grimes } 686df8bae1dSRodney W. Grimes } 6871c7c3c6aSMatthew Dillon 6889af80719SAlan Cox KASSERT((fs.m->oflags & VPO_BUSY) != 0, 6895526d2d9SEivind Eklund ("vm_fault: not busy after main loop")); 690df8bae1dSRodney W. Grimes 691df8bae1dSRodney W. Grimes /* 6920d94caffSDavid Greenman * PAGE HAS BEEN FOUND. [Loop invariant still holds -- the object lock 693df8bae1dSRodney W. Grimes * is held.] 694df8bae1dSRodney W. Grimes */ 695df8bae1dSRodney W. Grimes 696df8bae1dSRodney W. Grimes /* 6970d94caffSDavid Greenman * If the page is being written, but isn't already owned by the 6980d94caffSDavid Greenman * top-level object, we have to copy it into a new page owned by the 6990d94caffSDavid Greenman * top-level object. 700df8bae1dSRodney W. Grimes */ 7014866e085SJohn Dyson if (fs.object != fs.first_object) { 702df8bae1dSRodney W. Grimes /* 7030d94caffSDavid Greenman * We only really need to copy if we want to write it. 704df8bae1dSRodney W. Grimes */ 705a6d42a0dSAlan Cox if ((fault_type & (VM_PROT_COPY | VM_PROT_WRITE)) != 0) { 706df8bae1dSRodney W. Grimes /* 7071c7c3c6aSMatthew Dillon * This allows pages to be virtually copied from a 7081c7c3c6aSMatthew Dillon * backing_object into the first_object, where the 7091c7c3c6aSMatthew Dillon * backing object has no other refs to it, and cannot 7101c7c3c6aSMatthew Dillon * gain any more refs. Instead of a bcopy, we just 7111c7c3c6aSMatthew Dillon * move the page from the backing object to the 7121c7c3c6aSMatthew Dillon * first object. Note that we must mark the page 7131c7c3c6aSMatthew Dillon * dirty in the first object so that it will go out 7141c7c3c6aSMatthew Dillon * to swap when needed. 715df8bae1dSRodney W. Grimes */ 716ebf75125SAlan Cox is_first_object_locked = FALSE; 717e50346b5SAlan Cox if ( 718de5f6a77SJohn Dyson /* 719de5f6a77SJohn Dyson * Only one shadow object 720de5f6a77SJohn Dyson */ 7214866e085SJohn Dyson (fs.object->shadow_count == 1) && 722de5f6a77SJohn Dyson /* 723de5f6a77SJohn Dyson * No COW refs, except us 724de5f6a77SJohn Dyson */ 7254866e085SJohn Dyson (fs.object->ref_count == 1) && 726de5f6a77SJohn Dyson /* 7275929bcfaSPhilippe Charnier * No one else can look this object up 728de5f6a77SJohn Dyson */ 7294866e085SJohn Dyson (fs.object->handle == NULL) && 730de5f6a77SJohn Dyson /* 731de5f6a77SJohn Dyson * No other ways to look the object up 732de5f6a77SJohn Dyson */ 7334866e085SJohn Dyson ((fs.object->type == OBJT_DEFAULT) || 7344866e085SJohn Dyson (fs.object->type == OBJT_SWAP)) && 735ebf75125SAlan Cox (is_first_object_locked = VM_OBJECT_TRYLOCK(fs.first_object)) && 736de5f6a77SJohn Dyson /* 737de5f6a77SJohn Dyson * We don't chase down the shadow chain 738de5f6a77SJohn Dyson */ 739e50346b5SAlan Cox fs.object == fs.first_object->backing_object) { 740d98ddc46SAlan Cox vm_page_lock_queues(); 7412d8acc0fSJohn Dyson /* 742de5f6a77SJohn Dyson * get rid of the unnecessary page 743df8bae1dSRodney W. Grimes */ 7444866e085SJohn Dyson vm_page_free(fs.first_m); 745de5f6a77SJohn Dyson /* 7461c7c3c6aSMatthew Dillon * grab the page and put it into the 7471c7c3c6aSMatthew Dillon * process'es object. The page is 7481c7c3c6aSMatthew Dillon * automatically made dirty. 749de5f6a77SJohn Dyson */ 7504866e085SJohn Dyson vm_page_rename(fs.m, fs.first_object, fs.first_pindex); 7519a96b638SAlan Cox vm_page_unlock_queues(); 752768131d2SAlan Cox vm_page_busy(fs.m); 753d98ddc46SAlan Cox fs.first_m = fs.m; 7544866e085SJohn Dyson fs.m = NULL; 75567596082SAttilio Rao PCPU_INC(cnt.v_cow_optim); 756de5f6a77SJohn Dyson } else { 757de5f6a77SJohn Dyson /* 758de5f6a77SJohn Dyson * Oh, well, lets copy it. 759de5f6a77SJohn Dyson */ 760669890eaSAlan Cox pmap_copy_page(fs.m, fs.first_m); 761669890eaSAlan Cox fs.first_m->valid = VM_PAGE_BITS_ALL; 762d8778512SAlan Cox if (wired && (fault_flags & 763d8778512SAlan Cox VM_FAULT_CHANGE_WIRING) == 0) { 764d8778512SAlan Cox vm_page_wire(fs.first_m); 765d8778512SAlan Cox vm_page_unwire(fs.m, FALSE); 766de5f6a77SJohn Dyson } 767df8bae1dSRodney W. Grimes /* 768df8bae1dSRodney W. Grimes * We no longer need the old page or object. 769df8bae1dSRodney W. Grimes */ 7704866e085SJohn Dyson release_page(&fs); 771de5f6a77SJohn Dyson } 7721c7c3c6aSMatthew Dillon /* 7731c7c3c6aSMatthew Dillon * fs.object != fs.first_object due to above 7741c7c3c6aSMatthew Dillon * conditional 7751c7c3c6aSMatthew Dillon */ 7764866e085SJohn Dyson vm_object_pip_wakeup(fs.object); 777b009d5a0SAlan Cox VM_OBJECT_UNLOCK(fs.object); 778df8bae1dSRodney W. Grimes /* 779df8bae1dSRodney W. Grimes * Only use the new page below... 780df8bae1dSRodney W. Grimes */ 7814866e085SJohn Dyson fs.object = fs.first_object; 7824866e085SJohn Dyson fs.pindex = fs.first_pindex; 783d98ddc46SAlan Cox fs.m = fs.first_m; 784f29ba63eSAlan Cox if (!is_first_object_locked) 785f29ba63eSAlan Cox VM_OBJECT_LOCK(fs.object); 78667596082SAttilio Rao PCPU_INC(cnt.v_cow_faults); 7870d94caffSDavid Greenman } else { 788df8bae1dSRodney W. Grimes prot &= ~VM_PROT_WRITE; 789df8bae1dSRodney W. Grimes } 790df8bae1dSRodney W. Grimes } 791df8bae1dSRodney W. Grimes 792df8bae1dSRodney W. Grimes /* 7930d94caffSDavid Greenman * We must verify that the maps have not changed since our last 7940d94caffSDavid Greenman * lookup. 795df8bae1dSRodney W. Grimes */ 79619dc5607STor Egge if (!fs.lookup_still_valid) { 797df8bae1dSRodney W. Grimes vm_object_t retry_object; 798a316d390SJohn Dyson vm_pindex_t retry_pindex; 799df8bae1dSRodney W. Grimes vm_prot_t retry_prot; 800df8bae1dSRodney W. Grimes 80119dc5607STor Egge if (!vm_map_trylock_read(fs.map)) { 802b823bbd6SMatthew Dillon release_page(&fs); 803b823bbd6SMatthew Dillon unlock_and_deallocate(&fs); 804b823bbd6SMatthew Dillon goto RetryFault; 805b823bbd6SMatthew Dillon } 80619dc5607STor Egge fs.lookup_still_valid = TRUE; 80719dc5607STor Egge if (fs.map->timestamp != map_generation) { 80819dc5607STor Egge result = vm_map_lookup_locked(&fs.map, vaddr, fault_type, 8094866e085SJohn Dyson &fs.entry, &retry_object, &retry_pindex, &retry_prot, &wired); 810df8bae1dSRodney W. Grimes 811df8bae1dSRodney W. Grimes /* 81244ed3417STor Egge * If we don't need the page any longer, put it on the inactive 8130d94caffSDavid Greenman * list (the easiest thing to do here). If no one needs it, 8140d94caffSDavid Greenman * pageout will grab it eventually. 815df8bae1dSRodney W. Grimes */ 816df8bae1dSRodney W. Grimes if (result != KERN_SUCCESS) { 8174866e085SJohn Dyson release_page(&fs); 8184866e085SJohn Dyson unlock_and_deallocate(&fs); 81919dc5607STor Egge 82019dc5607STor Egge /* 82119dc5607STor Egge * If retry of map lookup would have blocked then 82219dc5607STor Egge * retry fault from start. 82319dc5607STor Egge */ 82419dc5607STor Egge if (result == KERN_FAILURE) 82519dc5607STor Egge goto RetryFault; 826df8bae1dSRodney W. Grimes return (result); 827df8bae1dSRodney W. Grimes } 8284866e085SJohn Dyson if ((retry_object != fs.first_object) || 8294866e085SJohn Dyson (retry_pindex != fs.first_pindex)) { 8304866e085SJohn Dyson release_page(&fs); 8314866e085SJohn Dyson unlock_and_deallocate(&fs); 832df8bae1dSRodney W. Grimes goto RetryFault; 833df8bae1dSRodney W. Grimes } 83419dc5607STor Egge 835df8bae1dSRodney W. Grimes /* 8360d94caffSDavid Greenman * Check whether the protection has changed or the object has 8370d94caffSDavid Greenman * been copied while we left the map unlocked. Changing from 8380d94caffSDavid Greenman * read to write permission is OK - we leave the page 8390d94caffSDavid Greenman * write-protected, and catch the write fault. Changing from 8400d94caffSDavid Greenman * write to read permission means that we can't mark the page 8410d94caffSDavid Greenman * write-enabled after all. 842df8bae1dSRodney W. Grimes */ 843df8bae1dSRodney W. Grimes prot &= retry_prot; 844df8bae1dSRodney W. Grimes } 84519dc5607STor Egge } 846d2bf64c3SKonstantin Belousov /* 8475758fe71SAlan Cox * If the page was filled by a pager, update the map entry's 8485758fe71SAlan Cox * last read offset. Since the pager does not return the 8495758fe71SAlan Cox * actual set of pages that it read, this update is based on 8505758fe71SAlan Cox * the requested set. Typically, the requested and actual 8515758fe71SAlan Cox * sets are the same. 852d2bf64c3SKonstantin Belousov * 853d2bf64c3SKonstantin Belousov * XXX The following assignment modifies the map 854d2bf64c3SKonstantin Belousov * without holding a write lock on it. 855d2bf64c3SKonstantin Belousov */ 8565758fe71SAlan Cox if (hardfault) 857d2bf64c3SKonstantin Belousov fs.entry->lastr = fs.pindex + faultcount - behind; 858d2bf64c3SKonstantin Belousov 8592ddba215SDavid Greenman if (prot & VM_PROT_WRITE) { 860b146f9e5SAlan Cox vm_object_set_writeable_dirty(fs.object); 8614f79d873SMatthew Dillon 8622ddba215SDavid Greenman /* 863b146f9e5SAlan Cox * If this is a NOSYNC mmap we do not want to set VPO_NOSYNC 8644f79d873SMatthew Dillon * if the page is already dirty to prevent data written with 8654f79d873SMatthew Dillon * the expectation of being synced from not being synced. 8664f79d873SMatthew Dillon * Likewise if this entry does not request NOSYNC then make 8674f79d873SMatthew Dillon * sure the page isn't marked NOSYNC. Applications sharing 8684f79d873SMatthew Dillon * data should use the same flags to avoid ping ponging. 8692ddba215SDavid Greenman */ 8704f79d873SMatthew Dillon if (fs.entry->eflags & MAP_ENTRY_NOSYNC) { 8714f79d873SMatthew Dillon if (fs.m->dirty == 0) 872b146f9e5SAlan Cox fs.m->oflags |= VPO_NOSYNC; 8734f79d873SMatthew Dillon } else { 874b146f9e5SAlan Cox fs.m->oflags &= ~VPO_NOSYNC; 8754f79d873SMatthew Dillon } 876e2997feaSAlan Cox 877e2997feaSAlan Cox /* 878e2997feaSAlan Cox * If the fault is a write, we know that this page is being 879e2997feaSAlan Cox * written NOW so dirty it explicitly to save on 880e2997feaSAlan Cox * pmap_is_modified() calls later. 881e2997feaSAlan Cox * 882e2997feaSAlan Cox * Also tell the backing pager, if any, that it should remove 883e2997feaSAlan Cox * any swap backing since the page is now dirty. 884e2997feaSAlan Cox */ 885e2997feaSAlan Cox if ((fault_type & VM_PROT_WRITE) != 0 && 886e2997feaSAlan Cox (fault_flags & VM_FAULT_CHANGE_WIRING) == 0) { 8877dbf82dcSMatthew Dillon vm_page_dirty(fs.m); 8881c7c3c6aSMatthew Dillon vm_pager_page_unswapped(fs.m); 8892ddba215SDavid Greenman } 8902ddba215SDavid Greenman } 891f6b04d2bSDavid Greenman 892bc6d84a6SMatthew Dillon /* 893bc6d84a6SMatthew Dillon * Page had better still be busy 894bc6d84a6SMatthew Dillon */ 8959af80719SAlan Cox KASSERT(fs.m->oflags & VPO_BUSY, 896ca06c247SAlan Cox ("vm_fault: page %p not busy!", fs.m)); 8974221e284SAlan Cox /* 89878cfe1f7SAlan Cox * Page must be completely valid or it is not fit to 8994221e284SAlan Cox * map into user space. vm_pager_get_pages() ensures this. 9004221e284SAlan Cox */ 90178cfe1f7SAlan Cox KASSERT(fs.m->valid == VM_PAGE_BITS_ALL, 90278cfe1f7SAlan Cox ("vm_fault: page %p partially invalid", fs.m)); 903eebf3286SAlan Cox VM_OBJECT_UNLOCK(fs.object); 904cbfbaad8SAlan Cox 90586735996SAlan Cox /* 90686735996SAlan Cox * Put this page into the physical map. We had to do the unlock above 90786735996SAlan Cox * because pmap_enter() may sleep. We don't put the page 90886735996SAlan Cox * back on the active queue until later so that the pageout daemon 90986735996SAlan Cox * won't find it (yet). 91086735996SAlan Cox */ 911eb2a0517SAlan Cox pmap_enter(fs.map->pmap, vaddr, fault_type, fs.m, prot, wired); 9122db65ab4SAlan Cox if ((fault_flags & VM_FAULT_CHANGE_WIRING) == 0 && wired == 0) 913566526a9SAlan Cox vm_fault_prefault(fs.map->pmap, vaddr, fs.entry); 914eebf3286SAlan Cox VM_OBJECT_LOCK(fs.object); 9152d09a6adSAlan Cox vm_page_lock_queues(); 916db44450bSAlan Cox vm_page_flag_set(fs.m, PG_REFERENCED); 917ff97964aSJohn Dyson 918df8bae1dSRodney W. Grimes /* 9190d94caffSDavid Greenman * If the page is not wired down, then put it where the pageout daemon 9200d94caffSDavid Greenman * can find it. 921df8bae1dSRodney W. Grimes */ 9222db65ab4SAlan Cox if (fault_flags & VM_FAULT_CHANGE_WIRING) { 923df8bae1dSRodney W. Grimes if (wired) 9244866e085SJohn Dyson vm_page_wire(fs.m); 925df8bae1dSRodney W. Grimes else 92673007561SDavid Greenman vm_page_unwire(fs.m, 1); 9270d94caffSDavid Greenman } else { 9284866e085SJohn Dyson vm_page_activate(fs.m); 92926f9a767SRodney W. Grimes } 9302d09a6adSAlan Cox vm_page_unlock_queues(); 93166bdd5d6SAlan Cox vm_page_wakeup(fs.m); 932eeec6babSJohn Baldwin 933eebf3286SAlan Cox /* 934eebf3286SAlan Cox * Unlock everything, and return 935eebf3286SAlan Cox */ 936eebf3286SAlan Cox unlock_and_deallocate(&fs); 9371c4bcd05SJeff Roberson if (hardfault) 9381c4bcd05SJeff Roberson curthread->td_ru.ru_majflt++; 9391c4bcd05SJeff Roberson else 9401c4bcd05SJeff Roberson curthread->td_ru.ru_minflt++; 941df8bae1dSRodney W. Grimes 942df8bae1dSRodney W. Grimes return (KERN_SUCCESS); 943df8bae1dSRodney W. Grimes } 944df8bae1dSRodney W. Grimes 945df8bae1dSRodney W. Grimes /* 946566526a9SAlan Cox * vm_fault_prefault provides a quick way of clustering 947566526a9SAlan Cox * pagefaults into a processes address space. It is a "cousin" 948566526a9SAlan Cox * of vm_map_pmap_enter, except it runs at page fault time instead 949566526a9SAlan Cox * of mmap time. 950566526a9SAlan Cox */ 951566526a9SAlan Cox static void 952566526a9SAlan Cox vm_fault_prefault(pmap_t pmap, vm_offset_t addra, vm_map_entry_t entry) 953566526a9SAlan Cox { 954566526a9SAlan Cox int i; 955566526a9SAlan Cox vm_offset_t addr, starta; 956566526a9SAlan Cox vm_pindex_t pindex; 9572053c127SStephan Uphoff vm_page_t m; 958566526a9SAlan Cox vm_object_t object; 959566526a9SAlan Cox 960950d5f7aSAlan Cox if (pmap != vmspace_pmap(curthread->td_proc->p_vmspace)) 961566526a9SAlan Cox return; 962566526a9SAlan Cox 963566526a9SAlan Cox object = entry->object.vm_object; 964566526a9SAlan Cox 965566526a9SAlan Cox starta = addra - PFBAK * PAGE_SIZE; 966566526a9SAlan Cox if (starta < entry->start) { 967566526a9SAlan Cox starta = entry->start; 968566526a9SAlan Cox } else if (starta > addra) { 969566526a9SAlan Cox starta = 0; 970566526a9SAlan Cox } 971566526a9SAlan Cox 972566526a9SAlan Cox for (i = 0; i < PAGEORDER_SIZE; i++) { 973566526a9SAlan Cox vm_object_t backing_object, lobject; 974566526a9SAlan Cox 975566526a9SAlan Cox addr = addra + prefault_pageorder[i]; 976566526a9SAlan Cox if (addr > addra + (PFFOR * PAGE_SIZE)) 977566526a9SAlan Cox addr = 0; 978566526a9SAlan Cox 979566526a9SAlan Cox if (addr < starta || addr >= entry->end) 980566526a9SAlan Cox continue; 981566526a9SAlan Cox 982566526a9SAlan Cox if (!pmap_is_prefaultable(pmap, addr)) 983566526a9SAlan Cox continue; 984566526a9SAlan Cox 985566526a9SAlan Cox pindex = ((addr - entry->start) + entry->offset) >> PAGE_SHIFT; 986566526a9SAlan Cox lobject = object; 987566526a9SAlan Cox VM_OBJECT_LOCK(lobject); 988566526a9SAlan Cox while ((m = vm_page_lookup(lobject, pindex)) == NULL && 989566526a9SAlan Cox lobject->type == OBJT_DEFAULT && 990566526a9SAlan Cox (backing_object = lobject->backing_object) != NULL) { 99136930fc9SAlan Cox KASSERT((lobject->backing_object_offset & PAGE_MASK) == 99236930fc9SAlan Cox 0, ("vm_fault_prefault: unaligned object offset")); 993566526a9SAlan Cox pindex += lobject->backing_object_offset >> PAGE_SHIFT; 994566526a9SAlan Cox VM_OBJECT_LOCK(backing_object); 995566526a9SAlan Cox VM_OBJECT_UNLOCK(lobject); 996566526a9SAlan Cox lobject = backing_object; 997566526a9SAlan Cox } 998566526a9SAlan Cox /* 999566526a9SAlan Cox * give-up when a page is not in memory 1000566526a9SAlan Cox */ 1001cbfbaad8SAlan Cox if (m == NULL) { 1002cbfbaad8SAlan Cox VM_OBJECT_UNLOCK(lobject); 1003566526a9SAlan Cox break; 1004cbfbaad8SAlan Cox } 10050a2e596aSAlan Cox if (m->valid == VM_PAGE_BITS_ALL && 10069fea8cadSAlan Cox (m->flags & PG_FICTITIOUS) == 0) { 1007566526a9SAlan Cox vm_page_lock_queues(); 10087bfda801SAlan Cox pmap_enter_quick(pmap, addr, m, entry->protection); 10091f70d622SAlan Cox vm_page_unlock_queues(); 101085f5b245SAlan Cox } 1011cbfbaad8SAlan Cox VM_OBJECT_UNLOCK(lobject); 1012566526a9SAlan Cox } 1013566526a9SAlan Cox } 1014566526a9SAlan Cox 1015566526a9SAlan Cox /* 1016e3669ceeSMatthew Dillon * vm_fault_quick: 1017e3669ceeSMatthew Dillon * 1018e3669ceeSMatthew Dillon * Ensure that the requested virtual address, which may be in userland, 1019e3669ceeSMatthew Dillon * is valid. Fault-in the page if necessary. Return -1 on failure. 1020e3669ceeSMatthew Dillon */ 1021e3669ceeSMatthew Dillon int 1022e3669ceeSMatthew Dillon vm_fault_quick(caddr_t v, int prot) 1023e3669ceeSMatthew Dillon { 1024e3669ceeSMatthew Dillon int r; 1025e3669ceeSMatthew Dillon 1026e3669ceeSMatthew Dillon if (prot & VM_PROT_WRITE) 1027e3669ceeSMatthew Dillon r = subyte(v, fubyte(v)); 1028e3669ceeSMatthew Dillon else 1029e3669ceeSMatthew Dillon r = fubyte(v); 1030e3669ceeSMatthew Dillon return(r); 1031e3669ceeSMatthew Dillon } 1032e3669ceeSMatthew Dillon 1033e3669ceeSMatthew Dillon /* 1034df8bae1dSRodney W. Grimes * vm_fault_wire: 1035df8bae1dSRodney W. Grimes * 1036df8bae1dSRodney W. Grimes * Wire down a range of virtual addresses in a map. 1037df8bae1dSRodney W. Grimes */ 1038df8bae1dSRodney W. Grimes int 10394be14af9SAlan Cox vm_fault_wire(vm_map_t map, vm_offset_t start, vm_offset_t end, 10402db65ab4SAlan Cox boolean_t fictitious) 1041df8bae1dSRodney W. Grimes { 104254d92145SMatthew Dillon vm_offset_t va; 1043df8bae1dSRodney W. Grimes int rv; 1044df8bae1dSRodney W. Grimes 1045df8bae1dSRodney W. Grimes /* 10460d94caffSDavid Greenman * We simulate a fault to get the page and enter it in the physical 1047ef594d31SAlan Cox * map. For user wiring, we only ask for read access on currently 1048ef594d31SAlan Cox * read-only sections. 1049df8bae1dSRodney W. Grimes */ 1050df8bae1dSRodney W. Grimes for (va = start; va < end; va += PAGE_SIZE) { 10512db65ab4SAlan Cox rv = vm_fault(map, va, VM_PROT_NONE, VM_FAULT_CHANGE_WIRING); 10527aaaa4fdSJohn Dyson if (rv) { 10537aaaa4fdSJohn Dyson if (va != start) 10544be14af9SAlan Cox vm_fault_unwire(map, start, va, fictitious); 10557aaaa4fdSJohn Dyson return (rv); 10567aaaa4fdSJohn Dyson } 10577aaaa4fdSJohn Dyson } 10587aaaa4fdSJohn Dyson return (KERN_SUCCESS); 10597aaaa4fdSJohn Dyson } 10607aaaa4fdSJohn Dyson 10617aaaa4fdSJohn Dyson /* 1062df8bae1dSRodney W. Grimes * vm_fault_unwire: 1063df8bae1dSRodney W. Grimes * 1064df8bae1dSRodney W. Grimes * Unwire a range of virtual addresses in a map. 1065df8bae1dSRodney W. Grimes */ 106626f9a767SRodney W. Grimes void 10674be14af9SAlan Cox vm_fault_unwire(vm_map_t map, vm_offset_t start, vm_offset_t end, 10684be14af9SAlan Cox boolean_t fictitious) 1069df8bae1dSRodney W. Grimes { 1070227f9a1cSJake Burkholder vm_paddr_t pa; 1071227f9a1cSJake Burkholder vm_offset_t va; 107254d92145SMatthew Dillon pmap_t pmap; 1073df8bae1dSRodney W. Grimes 1074df8bae1dSRodney W. Grimes pmap = vm_map_pmap(map); 1075df8bae1dSRodney W. Grimes 1076df8bae1dSRodney W. Grimes /* 10770d94caffSDavid Greenman * Since the pages are wired down, we must be able to get their 10780d94caffSDavid Greenman * mappings from the physical map system. 1079df8bae1dSRodney W. Grimes */ 1080df8bae1dSRodney W. Grimes for (va = start; va < end; va += PAGE_SIZE) { 1081df8bae1dSRodney W. Grimes pa = pmap_extract(pmap, va); 1082227f9a1cSJake Burkholder if (pa != 0) { 1083df8bae1dSRodney W. Grimes pmap_change_wiring(pmap, va, FALSE); 10844be14af9SAlan Cox if (!fictitious) { 10852d09a6adSAlan Cox vm_page_lock_queues(); 108673007561SDavid Greenman vm_page_unwire(PHYS_TO_VM_PAGE(pa), 1); 10872d09a6adSAlan Cox vm_page_unlock_queues(); 1088df8bae1dSRodney W. Grimes } 1089b18bfc3dSJohn Dyson } 10904be14af9SAlan Cox } 1091df8bae1dSRodney W. Grimes } 1092df8bae1dSRodney W. Grimes 1093df8bae1dSRodney W. Grimes /* 1094df8bae1dSRodney W. Grimes * Routine: 1095df8bae1dSRodney W. Grimes * vm_fault_copy_entry 1096df8bae1dSRodney W. Grimes * Function: 1097210a6886SKonstantin Belousov * Create new shadow object backing dst_entry with private copy of 1098210a6886SKonstantin Belousov * all underlying pages. When src_entry is equal to dst_entry, 1099210a6886SKonstantin Belousov * function implements COW for wired-down map entry. Otherwise, 1100210a6886SKonstantin Belousov * it forks wired entry into dst_map. 1101df8bae1dSRodney W. Grimes * 1102df8bae1dSRodney W. Grimes * In/out conditions: 1103df8bae1dSRodney W. Grimes * The source and destination maps must be locked for write. 1104df8bae1dSRodney W. Grimes * The source map entry must be wired down (or be a sharing map 1105df8bae1dSRodney W. Grimes * entry corresponding to a main map entry that is wired down). 1106df8bae1dSRodney W. Grimes */ 110726f9a767SRodney W. Grimes void 1108121fd461SKonstantin Belousov vm_fault_copy_entry(vm_map_t dst_map, vm_map_t src_map, 1109121fd461SKonstantin Belousov vm_map_entry_t dst_entry, vm_map_entry_t src_entry, 1110121fd461SKonstantin Belousov vm_ooffset_t *fork_charge) 1111df8bae1dSRodney W. Grimes { 1112210a6886SKonstantin Belousov vm_object_t backing_object, dst_object, object, src_object; 11137afab86cSAlan Cox vm_pindex_t dst_pindex, pindex, src_pindex; 1114210a6886SKonstantin Belousov vm_prot_t access, prot; 1115df8bae1dSRodney W. Grimes vm_offset_t vaddr; 1116df8bae1dSRodney W. Grimes vm_page_t dst_m; 1117df8bae1dSRodney W. Grimes vm_page_t src_m; 1118210a6886SKonstantin Belousov boolean_t src_readonly, upgrade; 1119df8bae1dSRodney W. Grimes 1120df8bae1dSRodney W. Grimes #ifdef lint 1121df8bae1dSRodney W. Grimes src_map++; 11220d94caffSDavid Greenman #endif /* lint */ 1123df8bae1dSRodney W. Grimes 1124210a6886SKonstantin Belousov upgrade = src_entry == dst_entry; 1125210a6886SKonstantin Belousov 1126df8bae1dSRodney W. Grimes src_object = src_entry->object.vm_object; 11277afab86cSAlan Cox src_pindex = OFF_TO_IDX(src_entry->offset); 11287afab86cSAlan Cox src_readonly = (src_entry->protection & VM_PROT_WRITE) == 0; 1129df8bae1dSRodney W. Grimes 1130df8bae1dSRodney W. Grimes /* 11310d94caffSDavid Greenman * Create the top-level object for the destination entry. (Doesn't 11320d94caffSDavid Greenman * actually shadow anything - we copy the pages directly.) 1133df8bae1dSRodney W. Grimes */ 113424a1cce3SDavid Greenman dst_object = vm_object_allocate(OBJT_DEFAULT, 113557b5187bSAlan Cox OFF_TO_IDX(dst_entry->end - dst_entry->start)); 1136f8a47341SAlan Cox #if VM_NRESERVLEVEL > 0 1137f8a47341SAlan Cox dst_object->flags |= OBJ_COLORED; 1138f8a47341SAlan Cox dst_object->pg_color = atop(dst_entry->start); 1139f8a47341SAlan Cox #endif 1140df8bae1dSRodney W. Grimes 11418afcf0ccSAlan Cox VM_OBJECT_LOCK(dst_object); 1142210a6886SKonstantin Belousov KASSERT(upgrade || dst_entry->object.vm_object == NULL, 1143121fd461SKonstantin Belousov ("vm_fault_copy_entry: vm_object not NULL")); 1144df8bae1dSRodney W. Grimes dst_entry->object.vm_object = dst_object; 1145df8bae1dSRodney W. Grimes dst_entry->offset = 0; 11463364c323SKonstantin Belousov dst_object->charge = dst_entry->end - dst_entry->start; 1147210a6886SKonstantin Belousov if (fork_charge != NULL) { 1148121fd461SKonstantin Belousov KASSERT(dst_entry->uip == NULL, 1149121fd461SKonstantin Belousov ("vm_fault_copy_entry: leaked swp charge")); 1150210a6886SKonstantin Belousov dst_object->uip = curthread->td_ucred->cr_ruidinfo; 1151210a6886SKonstantin Belousov uihold(dst_object->uip); 1152121fd461SKonstantin Belousov *fork_charge += dst_object->charge; 1153210a6886SKonstantin Belousov } else { 1154210a6886SKonstantin Belousov dst_object->uip = dst_entry->uip; 1155210a6886SKonstantin Belousov dst_entry->uip = NULL; 1156210a6886SKonstantin Belousov } 1157e4ed417aSAlan Cox access = prot = dst_entry->protection; 1158210a6886SKonstantin Belousov /* 1159210a6886SKonstantin Belousov * If not an upgrade, then enter the mappings in the pmap as 1160210a6886SKonstantin Belousov * read and/or execute accesses. Otherwise, enter them as 1161210a6886SKonstantin Belousov * write accesses. 1162210a6886SKonstantin Belousov * 1163210a6886SKonstantin Belousov * A writeable large page mapping is only created if all of 1164210a6886SKonstantin Belousov * the constituent small page mappings are modified. Marking 1165210a6886SKonstantin Belousov * PTEs as modified on inception allows promotion to happen 1166210a6886SKonstantin Belousov * without taking potentially large number of soft faults. 1167210a6886SKonstantin Belousov */ 1168210a6886SKonstantin Belousov if (!upgrade) 1169210a6886SKonstantin Belousov access &= ~VM_PROT_WRITE; 1170df8bae1dSRodney W. Grimes 1171df8bae1dSRodney W. Grimes /* 11720d94caffSDavid Greenman * Loop through all of the pages in the entry's range, copying each 11730d94caffSDavid Greenman * one from the source object (it should be there) to the destination 11740d94caffSDavid Greenman * object. 1175df8bae1dSRodney W. Grimes */ 11767afab86cSAlan Cox for (vaddr = dst_entry->start, dst_pindex = 0; 1177df8bae1dSRodney W. Grimes vaddr < dst_entry->end; 11787afab86cSAlan Cox vaddr += PAGE_SIZE, dst_pindex++) { 1179df8bae1dSRodney W. Grimes 1180df8bae1dSRodney W. Grimes /* 11817afab86cSAlan Cox * Allocate a page in the destination object. 1182df8bae1dSRodney W. Grimes */ 1183df8bae1dSRodney W. Grimes do { 11847afab86cSAlan Cox dst_m = vm_page_alloc(dst_object, dst_pindex, 11857afab86cSAlan Cox VM_ALLOC_NORMAL); 1186df8bae1dSRodney W. Grimes if (dst_m == NULL) { 11878afcf0ccSAlan Cox VM_OBJECT_UNLOCK(dst_object); 1188df8bae1dSRodney W. Grimes VM_WAIT; 11898afcf0ccSAlan Cox VM_OBJECT_LOCK(dst_object); 1190df8bae1dSRodney W. Grimes } 1191df8bae1dSRodney W. Grimes } while (dst_m == NULL); 1192df8bae1dSRodney W. Grimes 1193df8bae1dSRodney W. Grimes /* 1194df8bae1dSRodney W. Grimes * Find the page in the source object, and copy it in. 11950d94caffSDavid Greenman * (Because the source is wired down, the page will be in 11960d94caffSDavid Greenman * memory.) 1197df8bae1dSRodney W. Grimes */ 1198f29ba63eSAlan Cox VM_OBJECT_LOCK(src_object); 1199c5b65a67SAlan Cox object = src_object; 12007afab86cSAlan Cox pindex = src_pindex + dst_pindex; 12017afab86cSAlan Cox while ((src_m = vm_page_lookup(object, pindex)) == NULL && 12027afab86cSAlan Cox src_readonly && 1203c5b65a67SAlan Cox (backing_object = object->backing_object) != NULL) { 1204c5b65a67SAlan Cox /* 1205c5b65a67SAlan Cox * Allow fallback to backing objects if we are reading. 1206c5b65a67SAlan Cox */ 1207c5b65a67SAlan Cox VM_OBJECT_LOCK(backing_object); 1208c5b65a67SAlan Cox pindex += OFF_TO_IDX(object->backing_object_offset); 1209c5b65a67SAlan Cox VM_OBJECT_UNLOCK(object); 1210c5b65a67SAlan Cox object = backing_object; 1211c5b65a67SAlan Cox } 1212df8bae1dSRodney W. Grimes if (src_m == NULL) 1213df8bae1dSRodney W. Grimes panic("vm_fault_copy_wired: page missing"); 1214669890eaSAlan Cox pmap_copy_page(src_m, dst_m); 1215c5b65a67SAlan Cox VM_OBJECT_UNLOCK(object); 1216669890eaSAlan Cox dst_m->valid = VM_PAGE_BITS_ALL; 12178afcf0ccSAlan Cox VM_OBJECT_UNLOCK(dst_object); 1218df8bae1dSRodney W. Grimes 1219df8bae1dSRodney W. Grimes /* 1220210a6886SKonstantin Belousov * Enter it in the pmap. If a wired, copy-on-write 1221210a6886SKonstantin Belousov * mapping is being replaced by a write-enabled 1222210a6886SKonstantin Belousov * mapping, then wire that new mapping. 1223df8bae1dSRodney W. Grimes */ 1224210a6886SKonstantin Belousov pmap_enter(dst_map->pmap, vaddr, access, dst_m, prot, upgrade); 1225df8bae1dSRodney W. Grimes 1226df8bae1dSRodney W. Grimes /* 1227df8bae1dSRodney W. Grimes * Mark it no longer busy, and put it on the active list. 1228df8bae1dSRodney W. Grimes */ 122944b8bd66SAlan Cox VM_OBJECT_LOCK(dst_object); 123044b8bd66SAlan Cox vm_page_lock_queues(); 1231210a6886SKonstantin Belousov if (upgrade) { 1232210a6886SKonstantin Belousov vm_page_unwire(src_m, 0); 1233210a6886SKonstantin Belousov vm_page_wire(dst_m); 1234210a6886SKonstantin Belousov } else 1235df8bae1dSRodney W. Grimes vm_page_activate(dst_m); 12364abd55b2SAlan Cox vm_page_unlock_queues(); 123766bdd5d6SAlan Cox vm_page_wakeup(dst_m); 1238df8bae1dSRodney W. Grimes } 12398afcf0ccSAlan Cox VM_OBJECT_UNLOCK(dst_object); 1240210a6886SKonstantin Belousov if (upgrade) { 1241210a6886SKonstantin Belousov dst_entry->eflags &= ~(MAP_ENTRY_COW | MAP_ENTRY_NEEDS_COPY); 1242210a6886SKonstantin Belousov vm_object_deallocate(src_object); 1243210a6886SKonstantin Belousov } 1244df8bae1dSRodney W. Grimes } 124526f9a767SRodney W. Grimes 124626f9a767SRodney W. Grimes 124726f9a767SRodney W. Grimes /* 124826f9a767SRodney W. Grimes * This routine checks around the requested page for other pages that 124922ba64e8SJohn Dyson * might be able to be faulted in. This routine brackets the viable 125022ba64e8SJohn Dyson * pages for the pages to be paged in. 125126f9a767SRodney W. Grimes * 125226f9a767SRodney W. Grimes * Inputs: 125322ba64e8SJohn Dyson * m, rbehind, rahead 125426f9a767SRodney W. Grimes * 125526f9a767SRodney W. Grimes * Outputs: 125626f9a767SRodney W. Grimes * marray (array of vm_page_t), reqpage (index of requested page) 125726f9a767SRodney W. Grimes * 125826f9a767SRodney W. Grimes * Return value: 125926f9a767SRodney W. Grimes * number of pages in marray 126026f9a767SRodney W. Grimes */ 1261303b270bSEivind Eklund static int 126222ba64e8SJohn Dyson vm_fault_additional_pages(m, rbehind, rahead, marray, reqpage) 126326f9a767SRodney W. Grimes vm_page_t m; 126426f9a767SRodney W. Grimes int rbehind; 126522ba64e8SJohn Dyson int rahead; 126626f9a767SRodney W. Grimes vm_page_t *marray; 126726f9a767SRodney W. Grimes int *reqpage; 126826f9a767SRodney W. Grimes { 12692d8acc0fSJohn Dyson int i,j; 127026f9a767SRodney W. Grimes vm_object_t object; 1271a316d390SJohn Dyson vm_pindex_t pindex, startpindex, endpindex, tpindex; 127226f9a767SRodney W. Grimes vm_page_t rtm; 1273170db9c6SJohn Dyson int cbehind, cahead; 127426f9a767SRodney W. Grimes 1275f29ba63eSAlan Cox VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); 127623955314SAlfred Perlstein 127726f9a767SRodney W. Grimes object = m->object; 1278a316d390SJohn Dyson pindex = m->pindex; 1279fcdd9721SPawel Jakub Dawidek cbehind = cahead = 0; 1280fcdd9721SPawel Jakub Dawidek 1281f35329acSJohn Dyson /* 128226f9a767SRodney W. Grimes * if the requested page is not available, then give up now 128326f9a767SRodney W. Grimes */ 12841c7c3c6aSMatthew Dillon if (!vm_pager_has_page(object, pindex, &cbehind, &cahead)) { 128526f9a767SRodney W. Grimes return 0; 12862d8acc0fSJohn Dyson } 128726f9a767SRodney W. Grimes 128822ba64e8SJohn Dyson if ((cbehind == 0) && (cahead == 0)) { 128922ba64e8SJohn Dyson *reqpage = 0; 129022ba64e8SJohn Dyson marray[0] = m; 129122ba64e8SJohn Dyson return 1; 1292170db9c6SJohn Dyson } 129322ba64e8SJohn Dyson 129422ba64e8SJohn Dyson if (rahead > cahead) { 129522ba64e8SJohn Dyson rahead = cahead; 129622ba64e8SJohn Dyson } 129722ba64e8SJohn Dyson 1298170db9c6SJohn Dyson if (rbehind > cbehind) { 1299170db9c6SJohn Dyson rbehind = cbehind; 1300170db9c6SJohn Dyson } 1301170db9c6SJohn Dyson 130226f9a767SRodney W. Grimes /* 13032d8acc0fSJohn Dyson * scan backward for the read behind pages -- in memory 130426f9a767SRodney W. Grimes */ 13052d8acc0fSJohn Dyson if (pindex > 0) { 13062d8acc0fSJohn Dyson if (rbehind > pindex) { 1307a316d390SJohn Dyson rbehind = pindex; 13082d8acc0fSJohn Dyson startpindex = 0; 13092d8acc0fSJohn Dyson } else { 1310a316d390SJohn Dyson startpindex = pindex - rbehind; 13112d8acc0fSJohn Dyson } 13122d8acc0fSJohn Dyson 13138f8790a7SAlan Cox if ((rtm = TAILQ_PREV(m, pglist, listq)) != NULL && 13148f8790a7SAlan Cox rtm->pindex >= startpindex) 13158f8790a7SAlan Cox startpindex = rtm->pindex + 1; 131626f9a767SRodney W. Grimes 131780645364SAlan Cox /* tpindex is unsigned; beware of numeric underflow. */ 131880645364SAlan Cox for (i = 0, tpindex = pindex - 1; tpindex >= startpindex && 131980645364SAlan Cox tpindex < pindex; i++, tpindex--) { 132026f9a767SRodney W. Grimes 13217bfda801SAlan Cox rtm = vm_page_alloc(object, tpindex, VM_ALLOC_NORMAL | 13227bfda801SAlan Cox VM_ALLOC_IFNOTCACHED); 1323ccbb2f72SJohn Dyson if (rtm == NULL) { 132480645364SAlan Cox /* 132580645364SAlan Cox * Shift the allocated pages to the 132680645364SAlan Cox * beginning of the array. 132780645364SAlan Cox */ 1328ccbb2f72SJohn Dyson for (j = 0; j < i; j++) { 132980645364SAlan Cox marray[j] = marray[j + tpindex + 1 - 133080645364SAlan Cox startpindex]; 133126f9a767SRodney W. Grimes } 133280645364SAlan Cox break; 133326f9a767SRodney W. Grimes } 1334170db9c6SJohn Dyson 133580645364SAlan Cox marray[tpindex - startpindex] = rtm; 133626f9a767SRodney W. Grimes } 13372d8acc0fSJohn Dyson } else { 13382d8acc0fSJohn Dyson startpindex = 0; 13392d8acc0fSJohn Dyson i = 0; 13402d8acc0fSJohn Dyson } 13412d8acc0fSJohn Dyson 13422d8acc0fSJohn Dyson marray[i] = m; 13432d8acc0fSJohn Dyson /* page offset of the required page */ 13442d8acc0fSJohn Dyson *reqpage = i; 13452d8acc0fSJohn Dyson 13462d8acc0fSJohn Dyson tpindex = pindex + 1; 13472d8acc0fSJohn Dyson i++; 13482d8acc0fSJohn Dyson 13492d8acc0fSJohn Dyson /* 13502d8acc0fSJohn Dyson * scan forward for the read ahead pages 13512d8acc0fSJohn Dyson */ 13522d8acc0fSJohn Dyson endpindex = tpindex + rahead; 13538f8790a7SAlan Cox if ((rtm = TAILQ_NEXT(m, listq)) != NULL && rtm->pindex < endpindex) 13548f8790a7SAlan Cox endpindex = rtm->pindex; 13552d8acc0fSJohn Dyson if (endpindex > object->size) 13562d8acc0fSJohn Dyson endpindex = object->size; 13572d8acc0fSJohn Dyson 13582d8acc0fSJohn Dyson for (; tpindex < endpindex; i++, tpindex++) { 13592d8acc0fSJohn Dyson 13607bfda801SAlan Cox rtm = vm_page_alloc(object, tpindex, VM_ALLOC_NORMAL | 13617bfda801SAlan Cox VM_ALLOC_IFNOTCACHED); 13622d8acc0fSJohn Dyson if (rtm == NULL) { 13632d8acc0fSJohn Dyson break; 13642d8acc0fSJohn Dyson } 13652d8acc0fSJohn Dyson 13662d8acc0fSJohn Dyson marray[i] = rtm; 13672d8acc0fSJohn Dyson } 13682d8acc0fSJohn Dyson 136965ea29a6SAlan Cox /* return number of pages */ 13702d8acc0fSJohn Dyson return i; 137126f9a767SRodney W. Grimes } 1372