160727d8bSWarner Losh /*- 2796df753SPedro F. Giffuni * SPDX-License-Identifier: (BSD-4-Clause AND MIT-CMU) 3df57947fSPedro F. Giffuni * 4df8bae1dSRodney W. Grimes * Copyright (c) 1991, 1993 5df8bae1dSRodney W. Grimes * The Regents of the University of California. All rights reserved. 626f9a767SRodney W. Grimes * Copyright (c) 1994 John S. Dyson 726f9a767SRodney W. Grimes * All rights reserved. 826f9a767SRodney W. Grimes * Copyright (c) 1994 David Greenman 926f9a767SRodney W. Grimes * All rights reserved. 1026f9a767SRodney W. Grimes * 11df8bae1dSRodney W. Grimes * 12df8bae1dSRodney W. Grimes * This code is derived from software contributed to Berkeley by 13df8bae1dSRodney W. Grimes * The Mach Operating System project at Carnegie-Mellon University. 14df8bae1dSRodney W. Grimes * 15df8bae1dSRodney W. Grimes * Redistribution and use in source and binary forms, with or without 16df8bae1dSRodney W. Grimes * modification, are permitted provided that the following conditions 17df8bae1dSRodney W. Grimes * are met: 18df8bae1dSRodney W. Grimes * 1. Redistributions of source code must retain the above copyright 19df8bae1dSRodney W. Grimes * notice, this list of conditions and the following disclaimer. 20df8bae1dSRodney W. Grimes * 2. Redistributions in binary form must reproduce the above copyright 21df8bae1dSRodney W. Grimes * notice, this list of conditions and the following disclaimer in the 22df8bae1dSRodney W. Grimes * documentation and/or other materials provided with the distribution. 23df8bae1dSRodney W. Grimes * 3. All advertising materials mentioning features or use of this software 245929bcfaSPhilippe Charnier * must display the following acknowledgement: 25df8bae1dSRodney W. Grimes * This product includes software developed by the University of 26df8bae1dSRodney W. Grimes * California, Berkeley and its contributors. 27df8bae1dSRodney W. Grimes * 4. Neither the name of the University nor the names of its contributors 28df8bae1dSRodney W. Grimes * may be used to endorse or promote products derived from this software 29df8bae1dSRodney W. Grimes * without specific prior written permission. 30df8bae1dSRodney W. Grimes * 31df8bae1dSRodney W. Grimes * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 32df8bae1dSRodney W. Grimes * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 33df8bae1dSRodney W. Grimes * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 34df8bae1dSRodney W. Grimes * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 35df8bae1dSRodney W. Grimes * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 36df8bae1dSRodney W. Grimes * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 37df8bae1dSRodney W. Grimes * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 38df8bae1dSRodney W. Grimes * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 39df8bae1dSRodney W. Grimes * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 40df8bae1dSRodney W. Grimes * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 41df8bae1dSRodney W. Grimes * SUCH DAMAGE. 42df8bae1dSRodney W. Grimes * 433c4dd356SDavid Greenman * from: @(#)vm_fault.c 8.4 (Berkeley) 1/12/94 44df8bae1dSRodney W. Grimes * 45df8bae1dSRodney W. Grimes * 46df8bae1dSRodney W. Grimes * Copyright (c) 1987, 1990 Carnegie-Mellon University. 47df8bae1dSRodney W. Grimes * All rights reserved. 48df8bae1dSRodney W. Grimes * 49df8bae1dSRodney W. Grimes * Authors: Avadis Tevanian, Jr., Michael Wayne Young 50df8bae1dSRodney W. Grimes * 51df8bae1dSRodney W. Grimes * Permission to use, copy, modify and distribute this software and 52df8bae1dSRodney W. Grimes * its documentation is hereby granted, provided that both the copyright 53df8bae1dSRodney W. Grimes * notice and this permission notice appear in all copies of the 54df8bae1dSRodney W. Grimes * software, derivative works or modified versions, and any portions 55df8bae1dSRodney W. Grimes * thereof, and that both notices appear in supporting documentation. 56df8bae1dSRodney W. Grimes * 57df8bae1dSRodney W. Grimes * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 58df8bae1dSRodney W. Grimes * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 59df8bae1dSRodney W. Grimes * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 60df8bae1dSRodney W. Grimes * 61df8bae1dSRodney W. Grimes * Carnegie Mellon requests users of this software to return to 62df8bae1dSRodney W. Grimes * 63df8bae1dSRodney W. Grimes * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 64df8bae1dSRodney W. Grimes * School of Computer Science 65df8bae1dSRodney W. Grimes * Carnegie Mellon University 66df8bae1dSRodney W. Grimes * Pittsburgh PA 15213-3890 67df8bae1dSRodney W. Grimes * 68df8bae1dSRodney W. Grimes * any improvements or extensions that they make and grant Carnegie the 69df8bae1dSRodney W. Grimes * rights to redistribute these changes. 70df8bae1dSRodney W. Grimes */ 71df8bae1dSRodney W. Grimes 72df8bae1dSRodney W. Grimes /* 73df8bae1dSRodney W. Grimes * Page fault handling module. 74df8bae1dSRodney W. Grimes */ 75874651b1SDavid E. O'Brien 76874651b1SDavid E. O'Brien #include <sys/cdefs.h> 77874651b1SDavid E. O'Brien __FBSDID("$FreeBSD$"); 78874651b1SDavid E. O'Brien 7935818d2eSJohn Baldwin #include "opt_ktrace.h" 80f8a47341SAlan Cox #include "opt_vm.h" 81f8a47341SAlan Cox 82df8bae1dSRodney W. Grimes #include <sys/param.h> 83df8bae1dSRodney W. Grimes #include <sys/systm.h> 844edf4a58SJohn Baldwin #include <sys/kernel.h> 85fb919e4dSMark Murray #include <sys/lock.h> 86a8b0f100SAlan Cox #include <sys/mman.h> 87eeacb3b0SMark Johnston #include <sys/mutex.h> 885d32157dSMark Johnston #include <sys/pctrie.h> 8926f9a767SRodney W. Grimes #include <sys/proc.h> 90ae34b6ffSEdward Tomasz Napierala #include <sys/racct.h> 9111b57401SHans Petter Selasky #include <sys/refcount.h> 9226f9a767SRodney W. Grimes #include <sys/resourcevar.h> 9389f6b863SAttilio Rao #include <sys/rwlock.h> 94df08823dSKonstantin Belousov #include <sys/signalvar.h> 9523955314SAlfred Perlstein #include <sys/sysctl.h> 96df08823dSKonstantin Belousov #include <sys/sysent.h> 974edf4a58SJohn Baldwin #include <sys/vmmeter.h> 984edf4a58SJohn Baldwin #include <sys/vnode.h> 9935818d2eSJohn Baldwin #ifdef KTRACE 10035818d2eSJohn Baldwin #include <sys/ktrace.h> 10135818d2eSJohn Baldwin #endif 102df8bae1dSRodney W. Grimes 103df8bae1dSRodney W. Grimes #include <vm/vm.h> 104efeaf95aSDavid Greenman #include <vm/vm_param.h> 105efeaf95aSDavid Greenman #include <vm/pmap.h> 106efeaf95aSDavid Greenman #include <vm/vm_map.h> 107efeaf95aSDavid Greenman #include <vm/vm_object.h> 108df8bae1dSRodney W. Grimes #include <vm/vm_page.h> 109df8bae1dSRodney W. Grimes #include <vm/vm_pageout.h> 110a83c285cSDavid Greenman #include <vm/vm_kern.h> 11124a1cce3SDavid Greenman #include <vm/vm_pager.h> 112efeaf95aSDavid Greenman #include <vm/vm_extern.h> 113dfdf9abdSAlan Cox #include <vm/vm_reserv.h> 114df8bae1dSRodney W. Grimes 115566526a9SAlan Cox #define PFBAK 4 116566526a9SAlan Cox #define PFFOR 4 117566526a9SAlan Cox 1185268042bSAlan Cox #define VM_FAULT_READ_DEFAULT (1 + VM_FAULT_READ_AHEAD_INIT) 119a8b0f100SAlan Cox 120a8b0f100SAlan Cox #define VM_FAULT_DONTNEED_MIN 1048576 12126f9a767SRodney W. Grimes 1224866e085SJohn Dyson struct faultstate { 1232c2f4413SJeff Roberson /* Fault parameters. */ 1245949b1caSJeff Roberson vm_offset_t vaddr; 1252c2f4413SJeff Roberson vm_page_t *m_hold; 1262c2f4413SJeff Roberson vm_prot_t fault_type; 1272c2f4413SJeff Roberson vm_prot_t prot; 1282c2f4413SJeff Roberson int fault_flags; 12945c09a74SMark Johnston boolean_t wired; 13045c09a74SMark Johnston 13145c09a74SMark Johnston /* Control state. */ 132174aad04SKonstantin Belousov struct timeval oom_start_time; 133174aad04SKonstantin Belousov bool oom_started; 13445c09a74SMark Johnston int nera; 1352c2f4413SJeff Roberson 1362c2f4413SJeff Roberson /* Page reference for cow. */ 13758447749SJeff Roberson vm_page_t m_cow; 1382c2f4413SJeff Roberson 1392c2f4413SJeff Roberson /* Current object. */ 1404866e085SJohn Dyson vm_object_t object; 1414866e085SJohn Dyson vm_pindex_t pindex; 1422c2f4413SJeff Roberson vm_page_t m; 1432c2f4413SJeff Roberson 1442c2f4413SJeff Roberson /* Top-level map object. */ 1454866e085SJohn Dyson vm_object_t first_object; 1464866e085SJohn Dyson vm_pindex_t first_pindex; 1472c2f4413SJeff Roberson vm_page_t first_m; 1482c2f4413SJeff Roberson 1492c2f4413SJeff Roberson /* Map state. */ 1504866e085SJohn Dyson vm_map_t map; 1514866e085SJohn Dyson vm_map_entry_t entry; 152dc5401d2SKonstantin Belousov int map_generation; 153cd8a6fe8SAlan Cox bool lookup_still_valid; 1542c2f4413SJeff Roberson 1552c2f4413SJeff Roberson /* Vnode if locked. */ 1564866e085SJohn Dyson struct vnode *vp; 1574866e085SJohn Dyson }; 1584866e085SJohn Dyson 159f1b642c2SMark Johnston /* 160f1b642c2SMark Johnston * Return codes for internal fault routines. 161f1b642c2SMark Johnston */ 162f1b642c2SMark Johnston enum fault_status { 163f1b642c2SMark Johnston FAULT_SUCCESS = 1, /* Return success to user. */ 164f1b642c2SMark Johnston FAULT_FAILURE, /* Return failure to user. */ 165f1b642c2SMark Johnston FAULT_CONTINUE, /* Continue faulting. */ 166f1b642c2SMark Johnston FAULT_RESTART, /* Restart fault. */ 167f1b642c2SMark Johnston FAULT_OUT_OF_BOUNDS, /* Invalid address for pager. */ 168f1b642c2SMark Johnston FAULT_HARD, /* Performed I/O. */ 169f1b642c2SMark Johnston FAULT_SOFT, /* Found valid page. */ 170d47d3a94SMark Johnston FAULT_PROTECTION_FAILURE, /* Invalid access. */ 171f1b642c2SMark Johnston }; 172f1b642c2SMark Johnston 173a8b0f100SAlan Cox static void vm_fault_dontneed(const struct faultstate *fs, vm_offset_t vaddr, 174a8b0f100SAlan Cox int ahead); 17563281952SAlan Cox static void vm_fault_prefault(const struct faultstate *fs, vm_offset_t addra, 176a7163bb9SKonstantin Belousov int backward, int forward, bool obj_locked); 17713458803SAlan Cox 178245139c6SKonstantin Belousov static int vm_pfault_oom_attempts = 3; 179245139c6SKonstantin Belousov SYSCTL_INT(_vm, OID_AUTO, pfault_oom_attempts, CTLFLAG_RWTUN, 180245139c6SKonstantin Belousov &vm_pfault_oom_attempts, 0, 181245139c6SKonstantin Belousov "Number of page allocation attempts in page fault handler before it " 182245139c6SKonstantin Belousov "triggers OOM handling"); 183245139c6SKonstantin Belousov 184245139c6SKonstantin Belousov static int vm_pfault_oom_wait = 10; 185245139c6SKonstantin Belousov SYSCTL_INT(_vm, OID_AUTO, pfault_oom_wait, CTLFLAG_RWTUN, 186245139c6SKonstantin Belousov &vm_pfault_oom_wait, 0, 187245139c6SKonstantin Belousov "Number of seconds to wait for free pages before retrying " 188245139c6SKonstantin Belousov "the page fault handler"); 189245139c6SKonstantin Belousov 19062a59e8fSWarner Losh static inline void 1914bf95d00SJeff Roberson fault_page_release(vm_page_t *mp) 1924866e085SJohn Dyson { 1934bf95d00SJeff Roberson vm_page_t m; 1940d0be82aSKonstantin Belousov 1954bf95d00SJeff Roberson m = *mp; 1964bf95d00SJeff Roberson if (m != NULL) { 197be801aaaSMark Johnston /* 1984bf95d00SJeff Roberson * We are likely to loop around again and attempt to busy 1994bf95d00SJeff Roberson * this page. Deactivating it leaves it available for 2004bf95d00SJeff Roberson * pageout while optimizing fault restarts. 201be801aaaSMark Johnston */ 2024bf95d00SJeff Roberson vm_page_deactivate(m); 2034bf95d00SJeff Roberson vm_page_xunbusy(m); 2044bf95d00SJeff Roberson *mp = NULL; 2054bf95d00SJeff Roberson } 2064bf95d00SJeff Roberson } 2074bf95d00SJeff Roberson 2084bf95d00SJeff Roberson static inline void 2094bf95d00SJeff Roberson fault_page_free(vm_page_t *mp) 2104bf95d00SJeff Roberson { 2114bf95d00SJeff Roberson vm_page_t m; 2124bf95d00SJeff Roberson 2134bf95d00SJeff Roberson m = *mp; 2144bf95d00SJeff Roberson if (m != NULL) { 2154bf95d00SJeff Roberson VM_OBJECT_ASSERT_WLOCKED(m->object); 2164bf95d00SJeff Roberson if (!vm_page_wired(m)) 2174bf95d00SJeff Roberson vm_page_free(m); 218419f0b1fSJeff Roberson else 219419f0b1fSJeff Roberson vm_page_xunbusy(m); 2204bf95d00SJeff Roberson *mp = NULL; 2214866e085SJohn Dyson } 222be2c5610SMark Johnston } 2234866e085SJohn Dyson 2245d32157dSMark Johnston /* 2255d32157dSMark Johnston * Return true if a vm_pager_get_pages() call is needed in order to check 2265d32157dSMark Johnston * whether the pager might have a particular page, false if it can be determined 2275d32157dSMark Johnston * immediately that the pager can not have a copy. For swap objects, this can 2285d32157dSMark Johnston * be checked quickly. 2295d32157dSMark Johnston */ 2305d32157dSMark Johnston static inline bool 2315d32157dSMark Johnston fault_object_needs_getpages(vm_object_t object) 2325d32157dSMark Johnston { 2335d32157dSMark Johnston VM_OBJECT_ASSERT_LOCKED(object); 2345d32157dSMark Johnston 2355d32157dSMark Johnston return ((object->flags & OBJ_SWAP) == 0 || 2365d32157dSMark Johnston !pctrie_is_empty(&object->un_pager.swp.swp_blks)); 2375d32157dSMark Johnston } 2385d32157dSMark Johnston 23962a59e8fSWarner Losh static inline void 2404866e085SJohn Dyson unlock_map(struct faultstate *fs) 2414866e085SJohn Dyson { 2420d0be82aSKonstantin Belousov 24325adb370SBrian Feldman if (fs->lookup_still_valid) { 2444866e085SJohn Dyson vm_map_lookup_done(fs->map, fs->entry); 245cd8a6fe8SAlan Cox fs->lookup_still_valid = false; 2464866e085SJohn Dyson } 2474866e085SJohn Dyson } 2484866e085SJohn Dyson 2494866e085SJohn Dyson static void 250cfabea3dSKonstantin Belousov unlock_vp(struct faultstate *fs) 251cfabea3dSKonstantin Belousov { 252cfabea3dSKonstantin Belousov 253cfabea3dSKonstantin Belousov if (fs->vp != NULL) { 254cfabea3dSKonstantin Belousov vput(fs->vp); 255cfabea3dSKonstantin Belousov fs->vp = NULL; 256cfabea3dSKonstantin Belousov } 257cfabea3dSKonstantin Belousov } 258cfabea3dSKonstantin Belousov 259cfabea3dSKonstantin Belousov static void 2604b3e0665SJeff Roberson fault_deallocate(struct faultstate *fs) 2614866e085SJohn Dyson { 262f29ba63eSAlan Cox 26358447749SJeff Roberson fault_page_release(&fs->m_cow); 2644bf95d00SJeff Roberson fault_page_release(&fs->m); 2654866e085SJohn Dyson vm_object_pip_wakeup(fs->object); 2664866e085SJohn Dyson if (fs->object != fs->first_object) { 26789f6b863SAttilio Rao VM_OBJECT_WLOCK(fs->first_object); 2684bf95d00SJeff Roberson fault_page_free(&fs->first_m); 26989f6b863SAttilio Rao VM_OBJECT_WUNLOCK(fs->first_object); 2704bf95d00SJeff Roberson vm_object_pip_wakeup(fs->first_object); 2714866e085SJohn Dyson } 2724866e085SJohn Dyson vm_object_deallocate(fs->first_object); 2734866e085SJohn Dyson unlock_map(fs); 274cfabea3dSKonstantin Belousov unlock_vp(fs); 2754866e085SJohn Dyson } 2764866e085SJohn Dyson 277a36f5532SKonstantin Belousov static void 2784b3e0665SJeff Roberson unlock_and_deallocate(struct faultstate *fs) 2794b3e0665SJeff Roberson { 2804b3e0665SJeff Roberson 2814b3e0665SJeff Roberson VM_OBJECT_WUNLOCK(fs->object); 2824b3e0665SJeff Roberson fault_deallocate(fs); 2834b3e0665SJeff Roberson } 2844b3e0665SJeff Roberson 2854b3e0665SJeff Roberson static void 2862c2f4413SJeff Roberson vm_fault_dirty(struct faultstate *fs, vm_page_t m) 287a36f5532SKonstantin Belousov { 288e26236e9SKonstantin Belousov bool need_dirty; 289a36f5532SKonstantin Belousov 2902c2f4413SJeff Roberson if (((fs->prot & VM_PROT_WRITE) == 0 && 2912c2f4413SJeff Roberson (fs->fault_flags & VM_FAULT_DIRTY) == 0) || 292a36f5532SKonstantin Belousov (m->oflags & VPO_UNMANAGED) != 0) 293a36f5532SKonstantin Belousov return; 294a36f5532SKonstantin Belousov 2950012f373SJeff Roberson VM_PAGE_OBJECT_BUSY_ASSERT(m); 296a36f5532SKonstantin Belousov 2972c2f4413SJeff Roberson need_dirty = ((fs->fault_type & VM_PROT_WRITE) != 0 && 2982c2f4413SJeff Roberson (fs->fault_flags & VM_FAULT_WIRE) == 0) || 2992c2f4413SJeff Roberson (fs->fault_flags & VM_FAULT_DIRTY) != 0; 300a36f5532SKonstantin Belousov 301a36f5532SKonstantin Belousov vm_object_set_writeable_dirty(m->object); 30267d0e293SJeff Roberson 303a36f5532SKonstantin Belousov /* 304a8081778SJeff Roberson * If the fault is a write, we know that this page is being 305a8081778SJeff Roberson * written NOW so dirty it explicitly to save on 306a8081778SJeff Roberson * pmap_is_modified() calls later. 307a8081778SJeff Roberson * 308a8081778SJeff Roberson * Also, since the page is now dirty, we can possibly tell 309a8081778SJeff Roberson * the pager to release any swap backing the page. 310a36f5532SKonstantin Belousov */ 311a8081778SJeff Roberson if (need_dirty && vm_page_set_dirty(m) == 0) { 312a36f5532SKonstantin Belousov /* 313fff5403fSJeff Roberson * If this is a NOSYNC mmap we do not want to set PGA_NOSYNC 314a36f5532SKonstantin Belousov * if the page is already dirty to prevent data written with 315a36f5532SKonstantin Belousov * the expectation of being synced from not being synced. 316a36f5532SKonstantin Belousov * Likewise if this entry does not request NOSYNC then make 317a36f5532SKonstantin Belousov * sure the page isn't marked NOSYNC. Applications sharing 318a36f5532SKonstantin Belousov * data should use the same flags to avoid ping ponging. 319a36f5532SKonstantin Belousov */ 3202c2f4413SJeff Roberson if ((fs->entry->eflags & MAP_ENTRY_NOSYNC) != 0) 321fff5403fSJeff Roberson vm_page_aflag_set(m, PGA_NOSYNC); 322a8081778SJeff Roberson else 323fff5403fSJeff Roberson vm_page_aflag_clear(m, PGA_NOSYNC); 324a36f5532SKonstantin Belousov } 325a36f5532SKonstantin Belousov 326a36f5532SKonstantin Belousov } 327a36f5532SKonstantin Belousov 32841ddec83SKonstantin Belousov /* 32941ddec83SKonstantin Belousov * Unlocks fs.first_object and fs.map on success. 33041ddec83SKonstantin Belousov */ 331f1b642c2SMark Johnston static enum fault_status 3322c2f4413SJeff Roberson vm_fault_soft_fast(struct faultstate *fs) 33341ddec83SKonstantin Belousov { 3348b5e1472SAlan Cox vm_page_t m, m_map; 335fe0dcc40SKonstantin Belousov #if VM_NRESERVLEVEL > 0 3368b5e1472SAlan Cox vm_page_t m_super; 33790ea34bfSAlan Cox int flags; 3388b5e1472SAlan Cox #endif 339f1b642c2SMark Johnston int psind; 3402c2f4413SJeff Roberson vm_offset_t vaddr; 341f1b642c2SMark Johnston enum fault_status res; 34241ddec83SKonstantin Belousov 34341ddec83SKonstantin Belousov MPASS(fs->vp == NULL); 344f1b642c2SMark Johnston 345f1b642c2SMark Johnston res = FAULT_SUCCESS; 3462c2f4413SJeff Roberson vaddr = fs->vaddr; 347205be21dSJeff Roberson vm_object_busy(fs->first_object); 34841ddec83SKonstantin Belousov m = vm_page_lookup(fs->first_object, fs->first_pindex); 34941ddec83SKonstantin Belousov /* A busy page can be mapped for read|execute access. */ 3502c2f4413SJeff Roberson if (m == NULL || ((fs->prot & VM_PROT_WRITE) != 0 && 3510012f373SJeff Roberson vm_page_busied(m)) || !vm_page_all_valid(m)) { 352f1b642c2SMark Johnston res = FAULT_FAILURE; 353205be21dSJeff Roberson goto out; 354205be21dSJeff Roberson } 3558b5e1472SAlan Cox m_map = m; 3568b5e1472SAlan Cox psind = 0; 357fe0dcc40SKonstantin Belousov #if VM_NRESERVLEVEL > 0 3588b5e1472SAlan Cox if ((m->flags & PG_FICTITIOUS) == 0 && 3598b5e1472SAlan Cox (m_super = vm_reserv_to_superpage(m)) != NULL && 3608b5e1472SAlan Cox rounddown2(vaddr, pagesizes[m_super->psind]) >= fs->entry->start && 3618b5e1472SAlan Cox roundup2(vaddr + 1, pagesizes[m_super->psind]) <= fs->entry->end && 3628b5e1472SAlan Cox (vaddr & (pagesizes[m_super->psind] - 1)) == (VM_PAGE_TO_PHYS(m) & 3632c2f4413SJeff Roberson (pagesizes[m_super->psind] - 1)) && !fs->wired && 3648b5e1472SAlan Cox pmap_ps_enabled(fs->map->pmap)) { 3658b5e1472SAlan Cox flags = PS_ALL_VALID; 3662c2f4413SJeff Roberson if ((fs->prot & VM_PROT_WRITE) != 0) { 3678b5e1472SAlan Cox /* 3688b5e1472SAlan Cox * Create a superpage mapping allowing write access 3698b5e1472SAlan Cox * only if none of the constituent pages are busy and 3708b5e1472SAlan Cox * all of them are already dirty (except possibly for 3718b5e1472SAlan Cox * the page that was faulted on). 3728b5e1472SAlan Cox */ 3738b5e1472SAlan Cox flags |= PS_NONE_BUSY; 3748b5e1472SAlan Cox if ((fs->first_object->flags & OBJ_UNMANAGED) == 0) 3758b5e1472SAlan Cox flags |= PS_ALL_DIRTY; 3768b5e1472SAlan Cox } 3778b5e1472SAlan Cox if (vm_page_ps_test(m_super, flags, m)) { 3788b5e1472SAlan Cox m_map = m_super; 3798b5e1472SAlan Cox psind = m_super->psind; 3808b5e1472SAlan Cox vaddr = rounddown2(vaddr, pagesizes[psind]); 3818b5e1472SAlan Cox /* Preset the modified bit for dirty superpages. */ 3828b5e1472SAlan Cox if ((flags & PS_ALL_DIRTY) != 0) 3832c2f4413SJeff Roberson fs->fault_type |= VM_PROT_WRITE; 3848b5e1472SAlan Cox } 3858b5e1472SAlan Cox } 3868b5e1472SAlan Cox #endif 387f1b642c2SMark Johnston if (pmap_enter(fs->map->pmap, vaddr, m_map, fs->prot, fs->fault_type | 388f1b642c2SMark Johnston PMAP_ENTER_NOSLEEP | (fs->wired ? PMAP_ENTER_WIRED : 0), psind) != 389f1b642c2SMark Johnston KERN_SUCCESS) { 390f1b642c2SMark Johnston res = FAULT_FAILURE; 391205be21dSJeff Roberson goto out; 392f1b642c2SMark Johnston } 3932c2f4413SJeff Roberson if (fs->m_hold != NULL) { 3942c2f4413SJeff Roberson (*fs->m_hold) = m; 395fee2a2faSMark Johnston vm_page_wire(m); 396fee2a2faSMark Johnston } 3972c2f4413SJeff Roberson if (psind == 0 && !fs->wired) 398a7163bb9SKonstantin Belousov vm_fault_prefault(fs, vaddr, PFBAK, PFFOR, true); 399a7163bb9SKonstantin Belousov VM_OBJECT_RUNLOCK(fs->first_object); 4002c2f4413SJeff Roberson vm_fault_dirty(fs, m); 40141ddec83SKonstantin Belousov vm_map_lookup_done(fs->map, fs->entry); 40241ddec83SKonstantin Belousov curthread->td_ru.ru_minflt++; 403205be21dSJeff Roberson 404205be21dSJeff Roberson out: 405205be21dSJeff Roberson vm_object_unbusy(fs->first_object); 406f1b642c2SMark Johnston return (res); 40741ddec83SKonstantin Belousov } 40841ddec83SKonstantin Belousov 409c42b43a0SKonstantin Belousov static void 410c42b43a0SKonstantin Belousov vm_fault_restore_map_lock(struct faultstate *fs) 411c42b43a0SKonstantin Belousov { 412c42b43a0SKonstantin Belousov 413c42b43a0SKonstantin Belousov VM_OBJECT_ASSERT_WLOCKED(fs->first_object); 414c99d0c58SMark Johnston MPASS(blockcount_read(&fs->first_object->paging_in_progress) > 0); 415c42b43a0SKonstantin Belousov 416c42b43a0SKonstantin Belousov if (!vm_map_trylock_read(fs->map)) { 417c42b43a0SKonstantin Belousov VM_OBJECT_WUNLOCK(fs->first_object); 418c42b43a0SKonstantin Belousov vm_map_lock_read(fs->map); 419c42b43a0SKonstantin Belousov VM_OBJECT_WLOCK(fs->first_object); 420c42b43a0SKonstantin Belousov } 421c42b43a0SKonstantin Belousov fs->lookup_still_valid = true; 422c42b43a0SKonstantin Belousov } 423c42b43a0SKonstantin Belousov 4247a432b84SKonstantin Belousov static void 4257a432b84SKonstantin Belousov vm_fault_populate_check_page(vm_page_t m) 4267a432b84SKonstantin Belousov { 4277a432b84SKonstantin Belousov 4287a432b84SKonstantin Belousov /* 4297a432b84SKonstantin Belousov * Check each page to ensure that the pager is obeying the 4307a432b84SKonstantin Belousov * interface: the page must be installed in the object, fully 4317a432b84SKonstantin Belousov * valid, and exclusively busied. 4327a432b84SKonstantin Belousov */ 4337a432b84SKonstantin Belousov MPASS(m != NULL); 4340012f373SJeff Roberson MPASS(vm_page_all_valid(m)); 4357a432b84SKonstantin Belousov MPASS(vm_page_xbusied(m)); 4367a432b84SKonstantin Belousov } 4377a432b84SKonstantin Belousov 4387a432b84SKonstantin Belousov static void 4397a432b84SKonstantin Belousov vm_fault_populate_cleanup(vm_object_t object, vm_pindex_t first, 4407a432b84SKonstantin Belousov vm_pindex_t last) 4417a432b84SKonstantin Belousov { 4427a432b84SKonstantin Belousov vm_page_t m; 4437a432b84SKonstantin Belousov vm_pindex_t pidx; 4447a432b84SKonstantin Belousov 4457a432b84SKonstantin Belousov VM_OBJECT_ASSERT_WLOCKED(object); 4467a432b84SKonstantin Belousov MPASS(first <= last); 4477a432b84SKonstantin Belousov for (pidx = first, m = vm_page_lookup(object, pidx); 4487a432b84SKonstantin Belousov pidx <= last; pidx++, m = vm_page_next(m)) { 4497a432b84SKonstantin Belousov vm_fault_populate_check_page(m); 4507a432b84SKonstantin Belousov vm_page_deactivate(m); 4517a432b84SKonstantin Belousov vm_page_xunbusy(m); 4527a432b84SKonstantin Belousov } 4537a432b84SKonstantin Belousov } 454c42b43a0SKonstantin Belousov 455f1b642c2SMark Johnston static enum fault_status 4562c2f4413SJeff Roberson vm_fault_populate(struct faultstate *fs) 457c42b43a0SKonstantin Belousov { 45870183daaSAlan Cox vm_offset_t vaddr; 459c42b43a0SKonstantin Belousov vm_page_t m; 4607a432b84SKonstantin Belousov vm_pindex_t map_first, map_last, pager_first, pager_last, pidx; 461d301b358SKonstantin Belousov int bdry_idx, i, npages, psind, rv; 462f1b642c2SMark Johnston enum fault_status res; 463c42b43a0SKonstantin Belousov 464c42b43a0SKonstantin Belousov MPASS(fs->object == fs->first_object); 465c42b43a0SKonstantin Belousov VM_OBJECT_ASSERT_WLOCKED(fs->first_object); 466c99d0c58SMark Johnston MPASS(blockcount_read(&fs->first_object->paging_in_progress) > 0); 467c42b43a0SKonstantin Belousov MPASS(fs->first_object->backing_object == NULL); 468c42b43a0SKonstantin Belousov MPASS(fs->lookup_still_valid); 469c42b43a0SKonstantin Belousov 4707a432b84SKonstantin Belousov pager_first = OFF_TO_IDX(fs->entry->offset); 47189564188SAlan Cox pager_last = pager_first + atop(fs->entry->end - fs->entry->start) - 1; 472c42b43a0SKonstantin Belousov unlock_map(fs); 473c42b43a0SKonstantin Belousov unlock_vp(fs); 474c42b43a0SKonstantin Belousov 475f1b642c2SMark Johnston res = FAULT_SUCCESS; 476f1b642c2SMark Johnston 477c42b43a0SKonstantin Belousov /* 478c42b43a0SKonstantin Belousov * Call the pager (driver) populate() method. 479c42b43a0SKonstantin Belousov * 480c42b43a0SKonstantin Belousov * There is no guarantee that the method will be called again 481c42b43a0SKonstantin Belousov * if the current fault is for read, and a future fault is 482c42b43a0SKonstantin Belousov * for write. Report the entry's maximum allowed protection 483c42b43a0SKonstantin Belousov * to the driver. 484c42b43a0SKonstantin Belousov */ 485c42b43a0SKonstantin Belousov rv = vm_pager_populate(fs->first_object, fs->first_pindex, 486d301b358SKonstantin Belousov fs->fault_type, fs->entry->max_protection, &pager_first, 487d301b358SKonstantin Belousov &pager_last); 488c42b43a0SKonstantin Belousov 489c42b43a0SKonstantin Belousov VM_OBJECT_ASSERT_WLOCKED(fs->first_object); 490c42b43a0SKonstantin Belousov if (rv == VM_PAGER_BAD) { 491c42b43a0SKonstantin Belousov /* 492c42b43a0SKonstantin Belousov * VM_PAGER_BAD is the backdoor for a pager to request 493c42b43a0SKonstantin Belousov * normal fault handling. 494c42b43a0SKonstantin Belousov */ 495c42b43a0SKonstantin Belousov vm_fault_restore_map_lock(fs); 496c42b43a0SKonstantin Belousov if (fs->map->timestamp != fs->map_generation) 497f1b642c2SMark Johnston return (FAULT_RESTART); 498f1b642c2SMark Johnston return (FAULT_CONTINUE); 499c42b43a0SKonstantin Belousov } 500c42b43a0SKonstantin Belousov if (rv != VM_PAGER_OK) 501f1b642c2SMark Johnston return (FAULT_FAILURE); /* AKA SIGSEGV */ 502c42b43a0SKonstantin Belousov 503c42b43a0SKonstantin Belousov /* Ensure that the driver is obeying the interface. */ 5047a432b84SKonstantin Belousov MPASS(pager_first <= pager_last); 5057a432b84SKonstantin Belousov MPASS(fs->first_pindex <= pager_last); 5067a432b84SKonstantin Belousov MPASS(fs->first_pindex >= pager_first); 5077a432b84SKonstantin Belousov MPASS(pager_last < fs->first_object->size); 508c42b43a0SKonstantin Belousov 509c42b43a0SKonstantin Belousov vm_fault_restore_map_lock(fs); 510d301b358SKonstantin Belousov bdry_idx = (fs->entry->eflags & MAP_ENTRY_SPLIT_BOUNDARY_MASK) >> 511d301b358SKonstantin Belousov MAP_ENTRY_SPLIT_BOUNDARY_SHIFT; 5127a432b84SKonstantin Belousov if (fs->map->timestamp != fs->map_generation) { 513d301b358SKonstantin Belousov if (bdry_idx == 0) { 5147a432b84SKonstantin Belousov vm_fault_populate_cleanup(fs->first_object, pager_first, 5157a432b84SKonstantin Belousov pager_last); 516d301b358SKonstantin Belousov } else { 517d301b358SKonstantin Belousov m = vm_page_lookup(fs->first_object, pager_first); 518d301b358SKonstantin Belousov if (m != fs->m) 519d301b358SKonstantin Belousov vm_page_xunbusy(m); 520d301b358SKonstantin Belousov } 521f1b642c2SMark Johnston return (FAULT_RESTART); 5227a432b84SKonstantin Belousov } 523c42b43a0SKonstantin Belousov 524c42b43a0SKonstantin Belousov /* 5257a432b84SKonstantin Belousov * The map is unchanged after our last unlock. Process the fault. 5267a432b84SKonstantin Belousov * 527d301b358SKonstantin Belousov * First, the special case of largepage mappings, where 528d301b358SKonstantin Belousov * populate only busies the first page in superpage run. 529d301b358SKonstantin Belousov */ 530d301b358SKonstantin Belousov if (bdry_idx != 0) { 53178257765SMark Johnston KASSERT(PMAP_HAS_LARGEPAGES, 53278257765SMark Johnston ("missing pmap support for large pages")); 533d301b358SKonstantin Belousov m = vm_page_lookup(fs->first_object, pager_first); 534d301b358SKonstantin Belousov vm_fault_populate_check_page(m); 535d301b358SKonstantin Belousov VM_OBJECT_WUNLOCK(fs->first_object); 536d301b358SKonstantin Belousov vaddr = fs->entry->start + IDX_TO_OFF(pager_first) - 537d301b358SKonstantin Belousov fs->entry->offset; 538d301b358SKonstantin Belousov /* assert alignment for entry */ 539d301b358SKonstantin Belousov KASSERT((vaddr & (pagesizes[bdry_idx] - 1)) == 0, 540d301b358SKonstantin Belousov ("unaligned superpage start %#jx pager_first %#jx offset %#jx vaddr %#jx", 541d301b358SKonstantin Belousov (uintmax_t)fs->entry->start, (uintmax_t)pager_first, 542d301b358SKonstantin Belousov (uintmax_t)fs->entry->offset, (uintmax_t)vaddr)); 543d301b358SKonstantin Belousov KASSERT((VM_PAGE_TO_PHYS(m) & (pagesizes[bdry_idx] - 1)) == 0, 544d301b358SKonstantin Belousov ("unaligned superpage m %p %#jx", m, 545d301b358SKonstantin Belousov (uintmax_t)VM_PAGE_TO_PHYS(m))); 546d301b358SKonstantin Belousov rv = pmap_enter(fs->map->pmap, vaddr, m, fs->prot, 547d301b358SKonstantin Belousov fs->fault_type | (fs->wired ? PMAP_ENTER_WIRED : 0) | 548d301b358SKonstantin Belousov PMAP_ENTER_LARGEPAGE, bdry_idx); 549d301b358SKonstantin Belousov VM_OBJECT_WLOCK(fs->first_object); 550d301b358SKonstantin Belousov vm_page_xunbusy(m); 551f1b642c2SMark Johnston if (rv != KERN_SUCCESS) { 552f1b642c2SMark Johnston res = FAULT_FAILURE; 553c7b913aaSKonstantin Belousov goto out; 554f1b642c2SMark Johnston } 555d301b358SKonstantin Belousov if ((fs->fault_flags & VM_FAULT_WIRE) != 0) { 556d301b358SKonstantin Belousov for (i = 0; i < atop(pagesizes[bdry_idx]); i++) 557d301b358SKonstantin Belousov vm_page_wire(m + i); 558d301b358SKonstantin Belousov } 559d301b358SKonstantin Belousov if (fs->m_hold != NULL) { 560d301b358SKonstantin Belousov *fs->m_hold = m + (fs->first_pindex - pager_first); 561d301b358SKonstantin Belousov vm_page_wire(*fs->m_hold); 562d301b358SKonstantin Belousov } 563d301b358SKonstantin Belousov goto out; 564d301b358SKonstantin Belousov } 565d301b358SKonstantin Belousov 566d301b358SKonstantin Belousov /* 5677a432b84SKonstantin Belousov * The range [pager_first, pager_last] that is given to the 5687a432b84SKonstantin Belousov * pager is only a hint. The pager may populate any range 5697a432b84SKonstantin Belousov * within the object that includes the requested page index. 5707a432b84SKonstantin Belousov * In case the pager expanded the range, clip it to fit into 5717a432b84SKonstantin Belousov * the map entry. 572c42b43a0SKonstantin Belousov */ 57389564188SAlan Cox map_first = OFF_TO_IDX(fs->entry->offset); 57489564188SAlan Cox if (map_first > pager_first) { 5757a432b84SKonstantin Belousov vm_fault_populate_cleanup(fs->first_object, pager_first, 5767a432b84SKonstantin Belousov map_first - 1); 57789564188SAlan Cox pager_first = map_first; 57889564188SAlan Cox } 57989564188SAlan Cox map_last = map_first + atop(fs->entry->end - fs->entry->start) - 1; 58089564188SAlan Cox if (map_last < pager_last) { 5817a432b84SKonstantin Belousov vm_fault_populate_cleanup(fs->first_object, map_last + 1, 5827a432b84SKonstantin Belousov pager_last); 58389564188SAlan Cox pager_last = map_last; 58489564188SAlan Cox } 58589564188SAlan Cox for (pidx = pager_first, m = vm_page_lookup(fs->first_object, pidx); 58670183daaSAlan Cox pidx <= pager_last; 58770183daaSAlan Cox pidx += npages, m = vm_page_next(&m[npages - 1])) { 58870183daaSAlan Cox vaddr = fs->entry->start + IDX_TO_OFF(pidx) - fs->entry->offset; 5898dc8feb5SJason A. Harmening 59070183daaSAlan Cox psind = m->psind; 59170183daaSAlan Cox if (psind > 0 && ((vaddr & (pagesizes[psind] - 1)) != 0 || 59270183daaSAlan Cox pidx + OFF_TO_IDX(pagesizes[psind]) - 1 > pager_last || 5932c2f4413SJeff Roberson !pmap_ps_enabled(fs->map->pmap) || fs->wired)) 59470183daaSAlan Cox psind = 0; 5958dc8feb5SJason A. Harmening 59670183daaSAlan Cox npages = atop(pagesizes[psind]); 59770183daaSAlan Cox for (i = 0; i < npages; i++) { 59870183daaSAlan Cox vm_fault_populate_check_page(&m[i]); 5992c2f4413SJeff Roberson vm_fault_dirty(fs, &m[i]); 60070183daaSAlan Cox } 601c42b43a0SKonstantin Belousov VM_OBJECT_WUNLOCK(fs->first_object); 6022c2f4413SJeff Roberson rv = pmap_enter(fs->map->pmap, vaddr, m, fs->prot, fs->fault_type | 6032c2f4413SJeff Roberson (fs->wired ? PMAP_ENTER_WIRED : 0), psind); 6048dc8feb5SJason A. Harmening 6058dc8feb5SJason A. Harmening /* 6068dc8feb5SJason A. Harmening * pmap_enter() may fail for a superpage mapping if additional 6078dc8feb5SJason A. Harmening * protection policies prevent the full mapping. 6088dc8feb5SJason A. Harmening * For example, this will happen on amd64 if the entire 6098dc8feb5SJason A. Harmening * address range does not share the same userspace protection 6108dc8feb5SJason A. Harmening * key. Revert to single-page mappings if this happens. 6118dc8feb5SJason A. Harmening */ 6128dc8feb5SJason A. Harmening MPASS(rv == KERN_SUCCESS || 6138dc8feb5SJason A. Harmening (psind > 0 && rv == KERN_PROTECTION_FAILURE)); 6148dc8feb5SJason A. Harmening if (__predict_false(psind > 0 && 6158dc8feb5SJason A. Harmening rv == KERN_PROTECTION_FAILURE)) { 61688642d97SMark Johnston MPASS(!fs->wired); 617e7a9df16SKonstantin Belousov for (i = 0; i < npages; i++) { 618e7a9df16SKonstantin Belousov rv = pmap_enter(fs->map->pmap, vaddr + ptoa(i), 61988642d97SMark Johnston &m[i], fs->prot, fs->fault_type, 0); 620e7a9df16SKonstantin Belousov MPASS(rv == KERN_SUCCESS); 621e7a9df16SKonstantin Belousov } 622e7a9df16SKonstantin Belousov } 6238dc8feb5SJason A. Harmening 624c42b43a0SKonstantin Belousov VM_OBJECT_WLOCK(fs->first_object); 62570183daaSAlan Cox for (i = 0; i < npages; i++) { 62688642d97SMark Johnston if ((fs->fault_flags & VM_FAULT_WIRE) != 0 && 62788642d97SMark Johnston m[i].pindex == fs->first_pindex) 62870183daaSAlan Cox vm_page_wire(&m[i]); 6299f5632e6SMark Johnston else 63070183daaSAlan Cox vm_page_activate(&m[i]); 63188642d97SMark Johnston if (fs->m_hold != NULL && 63288642d97SMark Johnston m[i].pindex == fs->first_pindex) { 6332c2f4413SJeff Roberson (*fs->m_hold) = &m[i]; 634eeacb3b0SMark Johnston vm_page_wire(&m[i]); 635c42b43a0SKonstantin Belousov } 6364cdea4a8SJeff Roberson vm_page_xunbusy(&m[i]); 63770183daaSAlan Cox } 638c42b43a0SKonstantin Belousov } 639d301b358SKonstantin Belousov out: 640c42b43a0SKonstantin Belousov curthread->td_ru.ru_majflt++; 641f1b642c2SMark Johnston return (res); 642c42b43a0SKonstantin Belousov } 643c42b43a0SKonstantin Belousov 644df08823dSKonstantin Belousov static int prot_fault_translation; 645df08823dSKonstantin Belousov SYSCTL_INT(_machdep, OID_AUTO, prot_fault_translation, CTLFLAG_RWTUN, 646df08823dSKonstantin Belousov &prot_fault_translation, 0, 647df08823dSKonstantin Belousov "Control signal to deliver on protection fault"); 648df08823dSKonstantin Belousov 649df08823dSKonstantin Belousov /* compat definition to keep common code for signal translation */ 650df08823dSKonstantin Belousov #define UCODE_PAGEFLT 12 651df08823dSKonstantin Belousov #ifdef T_PAGEFLT 652df08823dSKonstantin Belousov _Static_assert(UCODE_PAGEFLT == T_PAGEFLT, "T_PAGEFLT"); 653df08823dSKonstantin Belousov #endif 654df08823dSKonstantin Belousov 655df8bae1dSRodney W. Grimes /* 656df08823dSKonstantin Belousov * vm_fault_trap: 657df8bae1dSRodney W. Grimes * 658956f3135SPhilippe Charnier * Handle a page fault occurring at the given address, 659df8bae1dSRodney W. Grimes * requiring the given permissions, in the map specified. 660df8bae1dSRodney W. Grimes * If successful, the page is inserted into the 661df8bae1dSRodney W. Grimes * associated physical map. 662df8bae1dSRodney W. Grimes * 663df8bae1dSRodney W. Grimes * NOTE: the given address should be truncated to the 664df8bae1dSRodney W. Grimes * proper page address. 665df8bae1dSRodney W. Grimes * 666df8bae1dSRodney W. Grimes * KERN_SUCCESS is returned if the page fault is handled; otherwise, 667df8bae1dSRodney W. Grimes * a standard error specifying why the fault is fatal is returned. 668df8bae1dSRodney W. Grimes * 669df8bae1dSRodney W. Grimes * The map in question must be referenced, and remains so. 6700cddd8f0SMatthew Dillon * Caller may hold no locks. 671df8bae1dSRodney W. Grimes */ 672df8bae1dSRodney W. Grimes int 673df08823dSKonstantin Belousov vm_fault_trap(vm_map_t map, vm_offset_t vaddr, vm_prot_t fault_type, 674df08823dSKonstantin Belousov int fault_flags, int *signo, int *ucode) 67523955314SAlfred Perlstein { 67635818d2eSJohn Baldwin int result; 677acd11c74SAlan Cox 678df08823dSKonstantin Belousov MPASS(signo == NULL || ucode != NULL); 67935818d2eSJohn Baldwin #ifdef KTRACE 680c31cec45SKonstantin Belousov if (map != kernel_map && KTRPOINT(curthread, KTR_FAULT)) 68135818d2eSJohn Baldwin ktrfault(vaddr, fault_type); 68235818d2eSJohn Baldwin #endif 683df08823dSKonstantin Belousov result = vm_fault(map, trunc_page(vaddr), fault_type, fault_flags, 684be996836SAttilio Rao NULL); 685df08823dSKonstantin Belousov KASSERT(result == KERN_SUCCESS || result == KERN_FAILURE || 686df08823dSKonstantin Belousov result == KERN_INVALID_ADDRESS || 687df08823dSKonstantin Belousov result == KERN_RESOURCE_SHORTAGE || 688df08823dSKonstantin Belousov result == KERN_PROTECTION_FAILURE || 689df08823dSKonstantin Belousov result == KERN_OUT_OF_BOUNDS, 690df08823dSKonstantin Belousov ("Unexpected Mach error %d from vm_fault()", result)); 69135818d2eSJohn Baldwin #ifdef KTRACE 692c31cec45SKonstantin Belousov if (map != kernel_map && KTRPOINT(curthread, KTR_FAULTEND)) 69335818d2eSJohn Baldwin ktrfaultend(result); 69435818d2eSJohn Baldwin #endif 695df08823dSKonstantin Belousov if (result != KERN_SUCCESS && signo != NULL) { 696df08823dSKonstantin Belousov switch (result) { 697df08823dSKonstantin Belousov case KERN_FAILURE: 698df08823dSKonstantin Belousov case KERN_INVALID_ADDRESS: 699df08823dSKonstantin Belousov *signo = SIGSEGV; 700df08823dSKonstantin Belousov *ucode = SEGV_MAPERR; 701df08823dSKonstantin Belousov break; 702df08823dSKonstantin Belousov case KERN_RESOURCE_SHORTAGE: 703df08823dSKonstantin Belousov *signo = SIGBUS; 704df08823dSKonstantin Belousov *ucode = BUS_OOMERR; 705df08823dSKonstantin Belousov break; 706df08823dSKonstantin Belousov case KERN_OUT_OF_BOUNDS: 707df08823dSKonstantin Belousov *signo = SIGBUS; 708df08823dSKonstantin Belousov *ucode = BUS_OBJERR; 709df08823dSKonstantin Belousov break; 710df08823dSKonstantin Belousov case KERN_PROTECTION_FAILURE: 711df08823dSKonstantin Belousov if (prot_fault_translation == 0) { 712df08823dSKonstantin Belousov /* 713df08823dSKonstantin Belousov * Autodetect. This check also covers 714df08823dSKonstantin Belousov * the images without the ABI-tag ELF 715df08823dSKonstantin Belousov * note. 716df08823dSKonstantin Belousov */ 717df08823dSKonstantin Belousov if (SV_CURPROC_ABI() == SV_ABI_FREEBSD && 718df08823dSKonstantin Belousov curproc->p_osrel >= P_OSREL_SIGSEGV) { 719df08823dSKonstantin Belousov *signo = SIGSEGV; 720df08823dSKonstantin Belousov *ucode = SEGV_ACCERR; 721df08823dSKonstantin Belousov } else { 722df08823dSKonstantin Belousov *signo = SIGBUS; 723df08823dSKonstantin Belousov *ucode = UCODE_PAGEFLT; 724df08823dSKonstantin Belousov } 725df08823dSKonstantin Belousov } else if (prot_fault_translation == 1) { 726df08823dSKonstantin Belousov /* Always compat mode. */ 727df08823dSKonstantin Belousov *signo = SIGBUS; 728df08823dSKonstantin Belousov *ucode = UCODE_PAGEFLT; 729df08823dSKonstantin Belousov } else { 730df08823dSKonstantin Belousov /* Always SIGSEGV mode. */ 731df08823dSKonstantin Belousov *signo = SIGSEGV; 732df08823dSKonstantin Belousov *ucode = SEGV_ACCERR; 733df08823dSKonstantin Belousov } 734df08823dSKonstantin Belousov break; 735df08823dSKonstantin Belousov default: 736df08823dSKonstantin Belousov KASSERT(0, ("Unexpected Mach error %d from vm_fault()", 737df08823dSKonstantin Belousov result)); 738df08823dSKonstantin Belousov break; 739df08823dSKonstantin Belousov } 740df08823dSKonstantin Belousov } 74135818d2eSJohn Baldwin return (result); 742acd11c74SAlan Cox } 743acd11c74SAlan Cox 744f1b642c2SMark Johnston static enum fault_status 7451e40fe41SJeff Roberson vm_fault_lock_vnode(struct faultstate *fs, bool objlocked) 7460ddd3082SKonstantin Belousov { 7470ddd3082SKonstantin Belousov struct vnode *vp; 7480ddd3082SKonstantin Belousov int error, locked; 7490ddd3082SKonstantin Belousov 7500ddd3082SKonstantin Belousov if (fs->object->type != OBJT_VNODE) 751f1b642c2SMark Johnston return (FAULT_CONTINUE); 7520ddd3082SKonstantin Belousov vp = fs->object->handle; 75316b0c092SKonstantin Belousov if (vp == fs->vp) { 75416b0c092SKonstantin Belousov ASSERT_VOP_LOCKED(vp, "saved vnode is not locked"); 755f1b642c2SMark Johnston return (FAULT_CONTINUE); 75616b0c092SKonstantin Belousov } 7570ddd3082SKonstantin Belousov 7580ddd3082SKonstantin Belousov /* 7590ddd3082SKonstantin Belousov * Perform an unlock in case the desired vnode changed while 7600ddd3082SKonstantin Belousov * the map was unlocked during a retry. 7610ddd3082SKonstantin Belousov */ 7620ddd3082SKonstantin Belousov unlock_vp(fs); 7630ddd3082SKonstantin Belousov 7640ddd3082SKonstantin Belousov locked = VOP_ISLOCKED(vp); 7650ddd3082SKonstantin Belousov if (locked != LK_EXCLUSIVE) 7660ddd3082SKonstantin Belousov locked = LK_SHARED; 7670ddd3082SKonstantin Belousov 7680ddd3082SKonstantin Belousov /* 7690ddd3082SKonstantin Belousov * We must not sleep acquiring the vnode lock while we have 7700ddd3082SKonstantin Belousov * the page exclusive busied or the object's 7710ddd3082SKonstantin Belousov * paging-in-progress count incremented. Otherwise, we could 7720ddd3082SKonstantin Belousov * deadlock. 7730ddd3082SKonstantin Belousov */ 774a92a971bSMateusz Guzik error = vget(vp, locked | LK_CANRECURSE | LK_NOWAIT); 7750ddd3082SKonstantin Belousov if (error == 0) { 7760ddd3082SKonstantin Belousov fs->vp = vp; 777f1b642c2SMark Johnston return (FAULT_CONTINUE); 7780ddd3082SKonstantin Belousov } 7790ddd3082SKonstantin Belousov 7800ddd3082SKonstantin Belousov vhold(vp); 7811e40fe41SJeff Roberson if (objlocked) 7820ddd3082SKonstantin Belousov unlock_and_deallocate(fs); 7831e40fe41SJeff Roberson else 7841e40fe41SJeff Roberson fault_deallocate(fs); 785a92a971bSMateusz Guzik error = vget(vp, locked | LK_RETRY | LK_CANRECURSE); 7860ddd3082SKonstantin Belousov vdrop(vp); 7870ddd3082SKonstantin Belousov fs->vp = vp; 7880ddd3082SKonstantin Belousov KASSERT(error == 0, ("vm_fault: vget failed %d", error)); 789f1b642c2SMark Johnston return (FAULT_RESTART); 7900ddd3082SKonstantin Belousov } 7910ddd3082SKonstantin Belousov 792bef91632SJeff Roberson /* 7935949b1caSJeff Roberson * Calculate the desired readahead. Handle drop-behind. 7945949b1caSJeff Roberson * 7955949b1caSJeff Roberson * Returns the number of readahead blocks to pass to the pager. 7965949b1caSJeff Roberson */ 7975949b1caSJeff Roberson static int 7985949b1caSJeff Roberson vm_fault_readahead(struct faultstate *fs) 7995949b1caSJeff Roberson { 8005949b1caSJeff Roberson int era, nera; 8015949b1caSJeff Roberson u_char behavior; 8025949b1caSJeff Roberson 8035949b1caSJeff Roberson KASSERT(fs->lookup_still_valid, ("map unlocked")); 8045949b1caSJeff Roberson era = fs->entry->read_ahead; 8055949b1caSJeff Roberson behavior = vm_map_entry_behavior(fs->entry); 8065949b1caSJeff Roberson if (behavior == MAP_ENTRY_BEHAV_RANDOM) { 8075949b1caSJeff Roberson nera = 0; 8085949b1caSJeff Roberson } else if (behavior == MAP_ENTRY_BEHAV_SEQUENTIAL) { 8095949b1caSJeff Roberson nera = VM_FAULT_READ_AHEAD_MAX; 8105949b1caSJeff Roberson if (fs->vaddr == fs->entry->next_read) 8115949b1caSJeff Roberson vm_fault_dontneed(fs, fs->vaddr, nera); 8125949b1caSJeff Roberson } else if (fs->vaddr == fs->entry->next_read) { 8135949b1caSJeff Roberson /* 8145949b1caSJeff Roberson * This is a sequential fault. Arithmetically 8155949b1caSJeff Roberson * increase the requested number of pages in 8165949b1caSJeff Roberson * the read-ahead window. The requested 8175949b1caSJeff Roberson * number of pages is "# of sequential faults 8185949b1caSJeff Roberson * x (read ahead min + 1) + read ahead min" 8195949b1caSJeff Roberson */ 8205949b1caSJeff Roberson nera = VM_FAULT_READ_AHEAD_MIN; 8215949b1caSJeff Roberson if (era > 0) { 8225949b1caSJeff Roberson nera += era + 1; 8235949b1caSJeff Roberson if (nera > VM_FAULT_READ_AHEAD_MAX) 8245949b1caSJeff Roberson nera = VM_FAULT_READ_AHEAD_MAX; 8255949b1caSJeff Roberson } 8265949b1caSJeff Roberson if (era == VM_FAULT_READ_AHEAD_MAX) 8275949b1caSJeff Roberson vm_fault_dontneed(fs, fs->vaddr, nera); 8285949b1caSJeff Roberson } else { 8295949b1caSJeff Roberson /* 8305949b1caSJeff Roberson * This is a non-sequential fault. 8315949b1caSJeff Roberson */ 8325949b1caSJeff Roberson nera = 0; 8335949b1caSJeff Roberson } 8345949b1caSJeff Roberson if (era != nera) { 8355949b1caSJeff Roberson /* 8365949b1caSJeff Roberson * A read lock on the map suffices to update 8375949b1caSJeff Roberson * the read ahead count safely. 8385949b1caSJeff Roberson */ 8395949b1caSJeff Roberson fs->entry->read_ahead = nera; 8405949b1caSJeff Roberson } 8415949b1caSJeff Roberson 8425949b1caSJeff Roberson return (nera); 8435949b1caSJeff Roberson } 8445949b1caSJeff Roberson 845c308a3a6SJeff Roberson static int 846c308a3a6SJeff Roberson vm_fault_lookup(struct faultstate *fs) 847c308a3a6SJeff Roberson { 848c308a3a6SJeff Roberson int result; 849c308a3a6SJeff Roberson 850c308a3a6SJeff Roberson KASSERT(!fs->lookup_still_valid, 851c308a3a6SJeff Roberson ("vm_fault_lookup: Map already locked.")); 852c308a3a6SJeff Roberson result = vm_map_lookup(&fs->map, fs->vaddr, fs->fault_type | 853c308a3a6SJeff Roberson VM_PROT_FAULT_LOOKUP, &fs->entry, &fs->first_object, 854c308a3a6SJeff Roberson &fs->first_pindex, &fs->prot, &fs->wired); 855c308a3a6SJeff Roberson if (result != KERN_SUCCESS) { 856c308a3a6SJeff Roberson unlock_vp(fs); 857c308a3a6SJeff Roberson return (result); 858c308a3a6SJeff Roberson } 859c308a3a6SJeff Roberson 860c308a3a6SJeff Roberson fs->map_generation = fs->map->timestamp; 861c308a3a6SJeff Roberson 862c308a3a6SJeff Roberson if (fs->entry->eflags & MAP_ENTRY_NOFAULT) { 863c308a3a6SJeff Roberson panic("%s: fault on nofault entry, addr: %#lx", 864c308a3a6SJeff Roberson __func__, (u_long)fs->vaddr); 865c308a3a6SJeff Roberson } 866c308a3a6SJeff Roberson 867c308a3a6SJeff Roberson if (fs->entry->eflags & MAP_ENTRY_IN_TRANSITION && 868c308a3a6SJeff Roberson fs->entry->wiring_thread != curthread) { 869c308a3a6SJeff Roberson vm_map_unlock_read(fs->map); 870c308a3a6SJeff Roberson vm_map_lock(fs->map); 871c308a3a6SJeff Roberson if (vm_map_lookup_entry(fs->map, fs->vaddr, &fs->entry) && 872c308a3a6SJeff Roberson (fs->entry->eflags & MAP_ENTRY_IN_TRANSITION)) { 873c308a3a6SJeff Roberson unlock_vp(fs); 874c308a3a6SJeff Roberson fs->entry->eflags |= MAP_ENTRY_NEEDS_WAKEUP; 875c308a3a6SJeff Roberson vm_map_unlock_and_wait(fs->map, 0); 876c308a3a6SJeff Roberson } else 877c308a3a6SJeff Roberson vm_map_unlock(fs->map); 878c308a3a6SJeff Roberson return (KERN_RESOURCE_SHORTAGE); 879c308a3a6SJeff Roberson } 880c308a3a6SJeff Roberson 881c308a3a6SJeff Roberson MPASS((fs->entry->eflags & MAP_ENTRY_GUARD) == 0); 882c308a3a6SJeff Roberson 883c308a3a6SJeff Roberson if (fs->wired) 884c308a3a6SJeff Roberson fs->fault_type = fs->prot | (fs->fault_type & VM_PROT_COPY); 885c308a3a6SJeff Roberson else 886c308a3a6SJeff Roberson KASSERT((fs->fault_flags & VM_FAULT_WIRE) == 0, 887c308a3a6SJeff Roberson ("!fs->wired && VM_FAULT_WIRE")); 888c308a3a6SJeff Roberson fs->lookup_still_valid = true; 889c308a3a6SJeff Roberson 890c308a3a6SJeff Roberson return (KERN_SUCCESS); 891c308a3a6SJeff Roberson } 892c308a3a6SJeff Roberson 893fcb04758SJeff Roberson static int 894fcb04758SJeff Roberson vm_fault_relookup(struct faultstate *fs) 895fcb04758SJeff Roberson { 896fcb04758SJeff Roberson vm_object_t retry_object; 897fcb04758SJeff Roberson vm_pindex_t retry_pindex; 898fcb04758SJeff Roberson vm_prot_t retry_prot; 899fcb04758SJeff Roberson int result; 900fcb04758SJeff Roberson 901fcb04758SJeff Roberson if (!vm_map_trylock_read(fs->map)) 902fcb04758SJeff Roberson return (KERN_RESTART); 903fcb04758SJeff Roberson 904fcb04758SJeff Roberson fs->lookup_still_valid = true; 905fcb04758SJeff Roberson if (fs->map->timestamp == fs->map_generation) 906fcb04758SJeff Roberson return (KERN_SUCCESS); 907fcb04758SJeff Roberson 908fcb04758SJeff Roberson result = vm_map_lookup_locked(&fs->map, fs->vaddr, fs->fault_type, 909fcb04758SJeff Roberson &fs->entry, &retry_object, &retry_pindex, &retry_prot, 910fcb04758SJeff Roberson &fs->wired); 911fcb04758SJeff Roberson if (result != KERN_SUCCESS) { 912fcb04758SJeff Roberson /* 913fcb04758SJeff Roberson * If retry of map lookup would have blocked then 914fcb04758SJeff Roberson * retry fault from start. 915fcb04758SJeff Roberson */ 916fcb04758SJeff Roberson if (result == KERN_FAILURE) 917fcb04758SJeff Roberson return (KERN_RESTART); 918fcb04758SJeff Roberson return (result); 919fcb04758SJeff Roberson } 920fcb04758SJeff Roberson if (retry_object != fs->first_object || 921fcb04758SJeff Roberson retry_pindex != fs->first_pindex) 922fcb04758SJeff Roberson return (KERN_RESTART); 923fcb04758SJeff Roberson 924fcb04758SJeff Roberson /* 925fcb04758SJeff Roberson * Check whether the protection has changed or the object has 926fcb04758SJeff Roberson * been copied while we left the map unlocked. Changing from 927fcb04758SJeff Roberson * read to write permission is OK - we leave the page 928fcb04758SJeff Roberson * write-protected, and catch the write fault. Changing from 929fcb04758SJeff Roberson * write to read permission means that we can't mark the page 930fcb04758SJeff Roberson * write-enabled after all. 931fcb04758SJeff Roberson */ 932fcb04758SJeff Roberson fs->prot &= retry_prot; 933fcb04758SJeff Roberson fs->fault_type &= retry_prot; 934fcb04758SJeff Roberson if (fs->prot == 0) 935fcb04758SJeff Roberson return (KERN_RESTART); 936fcb04758SJeff Roberson 937fcb04758SJeff Roberson /* Reassert because wired may have changed. */ 938fcb04758SJeff Roberson KASSERT(fs->wired || (fs->fault_flags & VM_FAULT_WIRE) == 0, 939fcb04758SJeff Roberson ("!wired && VM_FAULT_WIRE")); 940fcb04758SJeff Roberson 941fcb04758SJeff Roberson return (KERN_SUCCESS); 942fcb04758SJeff Roberson } 943fcb04758SJeff Roberson 9445936b6a8SJeff Roberson static void 9455936b6a8SJeff Roberson vm_fault_cow(struct faultstate *fs) 9465936b6a8SJeff Roberson { 9475936b6a8SJeff Roberson bool is_first_object_locked; 9485936b6a8SJeff Roberson 949982693bbSMark Johnston KASSERT(fs->object != fs->first_object, 950982693bbSMark Johnston ("source and target COW objects are identical")); 951982693bbSMark Johnston 9525936b6a8SJeff Roberson /* 9535936b6a8SJeff Roberson * This allows pages to be virtually copied from a backing_object 9545936b6a8SJeff Roberson * into the first_object, where the backing object has no other 9555936b6a8SJeff Roberson * refs to it, and cannot gain any more refs. Instead of a bcopy, 9565936b6a8SJeff Roberson * we just move the page from the backing object to the first 9575936b6a8SJeff Roberson * object. Note that we must mark the page dirty in the first 9585936b6a8SJeff Roberson * object so that it will go out to swap when needed. 9595936b6a8SJeff Roberson */ 9605936b6a8SJeff Roberson is_first_object_locked = false; 9615936b6a8SJeff Roberson if ( 9625936b6a8SJeff Roberson /* 9635936b6a8SJeff Roberson * Only one shadow object and no other refs. 9645936b6a8SJeff Roberson */ 9655936b6a8SJeff Roberson fs->object->shadow_count == 1 && fs->object->ref_count == 1 && 9665936b6a8SJeff Roberson /* 9675936b6a8SJeff Roberson * No other ways to look the object up 9685936b6a8SJeff Roberson */ 9695936b6a8SJeff Roberson fs->object->handle == NULL && (fs->object->flags & OBJ_ANON) != 0 && 9705936b6a8SJeff Roberson /* 9715936b6a8SJeff Roberson * We don't chase down the shadow chain and we can acquire locks. 9725936b6a8SJeff Roberson */ 9735936b6a8SJeff Roberson (is_first_object_locked = VM_OBJECT_TRYWLOCK(fs->first_object)) && 9745936b6a8SJeff Roberson fs->object == fs->first_object->backing_object && 9755936b6a8SJeff Roberson VM_OBJECT_TRYWLOCK(fs->object)) { 9765936b6a8SJeff Roberson /* 9775936b6a8SJeff Roberson * Remove but keep xbusy for replace. fs->m is moved into 9785936b6a8SJeff Roberson * fs->first_object and left busy while fs->first_m is 9795936b6a8SJeff Roberson * conditionally freed. 9805936b6a8SJeff Roberson */ 9815936b6a8SJeff Roberson vm_page_remove_xbusy(fs->m); 9825936b6a8SJeff Roberson vm_page_replace(fs->m, fs->first_object, fs->first_pindex, 9835936b6a8SJeff Roberson fs->first_m); 9845936b6a8SJeff Roberson vm_page_dirty(fs->m); 9855936b6a8SJeff Roberson #if VM_NRESERVLEVEL > 0 9865936b6a8SJeff Roberson /* 9875936b6a8SJeff Roberson * Rename the reservation. 9885936b6a8SJeff Roberson */ 9895936b6a8SJeff Roberson vm_reserv_rename(fs->m, fs->first_object, fs->object, 9905936b6a8SJeff Roberson OFF_TO_IDX(fs->first_object->backing_object_offset)); 9915936b6a8SJeff Roberson #endif 9925936b6a8SJeff Roberson VM_OBJECT_WUNLOCK(fs->object); 9935936b6a8SJeff Roberson VM_OBJECT_WUNLOCK(fs->first_object); 9945936b6a8SJeff Roberson fs->first_m = fs->m; 9955936b6a8SJeff Roberson fs->m = NULL; 9965936b6a8SJeff Roberson VM_CNT_INC(v_cow_optim); 9975936b6a8SJeff Roberson } else { 9985936b6a8SJeff Roberson if (is_first_object_locked) 9995936b6a8SJeff Roberson VM_OBJECT_WUNLOCK(fs->first_object); 10005936b6a8SJeff Roberson /* 10015936b6a8SJeff Roberson * Oh, well, lets copy it. 10025936b6a8SJeff Roberson */ 10035936b6a8SJeff Roberson pmap_copy_page(fs->m, fs->first_m); 10045936b6a8SJeff Roberson vm_page_valid(fs->first_m); 10055936b6a8SJeff Roberson if (fs->wired && (fs->fault_flags & VM_FAULT_WIRE) == 0) { 10065936b6a8SJeff Roberson vm_page_wire(fs->first_m); 10075936b6a8SJeff Roberson vm_page_unwire(fs->m, PQ_INACTIVE); 10085936b6a8SJeff Roberson } 10095936b6a8SJeff Roberson /* 10105936b6a8SJeff Roberson * Save the cow page to be released after 10115936b6a8SJeff Roberson * pmap_enter is complete. 10125936b6a8SJeff Roberson */ 10135936b6a8SJeff Roberson fs->m_cow = fs->m; 10145936b6a8SJeff Roberson fs->m = NULL; 1015982693bbSMark Johnston 10165936b6a8SJeff Roberson /* 1017982693bbSMark Johnston * Typically, the shadow object is either private to this 1018982693bbSMark Johnston * address space (OBJ_ONEMAPPING) or its pages are read only. 1019982693bbSMark Johnston * In the highly unusual case where the pages of a shadow object 1020982693bbSMark Johnston * are read/write shared between this and other address spaces, 1021982693bbSMark Johnston * we need to ensure that any pmap-level mappings to the 1022982693bbSMark Johnston * original, copy-on-write page from the backing object are 1023982693bbSMark Johnston * removed from those other address spaces. 1024982693bbSMark Johnston * 1025982693bbSMark Johnston * The flag check is racy, but this is tolerable: if 1026982693bbSMark Johnston * OBJ_ONEMAPPING is cleared after the check, the busy state 1027982693bbSMark Johnston * ensures that new mappings of m_cow can't be created. 1028982693bbSMark Johnston * pmap_enter() will replace an existing mapping in the current 1029982693bbSMark Johnston * address space. If OBJ_ONEMAPPING is set after the check, 1030982693bbSMark Johnston * removing mappings will at worse trigger some unnecessary page 1031982693bbSMark Johnston * faults. 10325936b6a8SJeff Roberson */ 1033982693bbSMark Johnston vm_page_assert_xbusied(fs->m_cow); 1034982693bbSMark Johnston if ((fs->first_object->flags & OBJ_ONEMAPPING) == 0) 1035982693bbSMark Johnston pmap_remove_all(fs->m_cow); 1036982693bbSMark Johnston } 1037982693bbSMark Johnston 10385936b6a8SJeff Roberson vm_object_pip_wakeup(fs->object); 10395936b6a8SJeff Roberson 10405936b6a8SJeff Roberson /* 10415936b6a8SJeff Roberson * Only use the new page below... 10425936b6a8SJeff Roberson */ 10435936b6a8SJeff Roberson fs->object = fs->first_object; 10445936b6a8SJeff Roberson fs->pindex = fs->first_pindex; 10455936b6a8SJeff Roberson fs->m = fs->first_m; 10465936b6a8SJeff Roberson VM_CNT_INC(v_cow_faults); 10475936b6a8SJeff Roberson curthread->td_cow++; 10485936b6a8SJeff Roberson } 10495936b6a8SJeff Roberson 105091eb2e90SJeff Roberson static bool 105191eb2e90SJeff Roberson vm_fault_next(struct faultstate *fs) 105291eb2e90SJeff Roberson { 105391eb2e90SJeff Roberson vm_object_t next_object; 105491eb2e90SJeff Roberson 105591eb2e90SJeff Roberson /* 105691eb2e90SJeff Roberson * The requested page does not exist at this object/ 105791eb2e90SJeff Roberson * offset. Remove the invalid page from the object, 105891eb2e90SJeff Roberson * waking up anyone waiting for it, and continue on to 105991eb2e90SJeff Roberson * the next object. However, if this is the top-level 106091eb2e90SJeff Roberson * object, we must leave the busy page in place to 106191eb2e90SJeff Roberson * prevent another process from rushing past us, and 106291eb2e90SJeff Roberson * inserting the page in that object at the same time 106391eb2e90SJeff Roberson * that we are. 106491eb2e90SJeff Roberson */ 106591eb2e90SJeff Roberson if (fs->object == fs->first_object) { 106691eb2e90SJeff Roberson fs->first_m = fs->m; 106791eb2e90SJeff Roberson fs->m = NULL; 106891eb2e90SJeff Roberson } else 106991eb2e90SJeff Roberson fault_page_free(&fs->m); 107091eb2e90SJeff Roberson 107191eb2e90SJeff Roberson /* 107291eb2e90SJeff Roberson * Move on to the next object. Lock the next object before 107391eb2e90SJeff Roberson * unlocking the current one. 107491eb2e90SJeff Roberson */ 107591eb2e90SJeff Roberson VM_OBJECT_ASSERT_WLOCKED(fs->object); 107691eb2e90SJeff Roberson next_object = fs->object->backing_object; 1077fb4d37eaSJeff Roberson if (next_object == NULL) 1078fb4d37eaSJeff Roberson return (false); 1079fb4d37eaSJeff Roberson MPASS(fs->first_m != NULL); 1080fb4d37eaSJeff Roberson KASSERT(fs->object != next_object, ("object loop %p", next_object)); 1081fb4d37eaSJeff Roberson VM_OBJECT_WLOCK(next_object); 1082fb4d37eaSJeff Roberson vm_object_pip_add(next_object, 1); 1083fb4d37eaSJeff Roberson if (fs->object != fs->first_object) 1084fb4d37eaSJeff Roberson vm_object_pip_wakeup(fs->object); 1085fb4d37eaSJeff Roberson fs->pindex += OFF_TO_IDX(fs->object->backing_object_offset); 1086fb4d37eaSJeff Roberson VM_OBJECT_WUNLOCK(fs->object); 1087fb4d37eaSJeff Roberson fs->object = next_object; 1088fb4d37eaSJeff Roberson 1089fb4d37eaSJeff Roberson return (true); 1090fb4d37eaSJeff Roberson } 1091fb4d37eaSJeff Roberson 1092fb4d37eaSJeff Roberson static void 1093fb4d37eaSJeff Roberson vm_fault_zerofill(struct faultstate *fs) 1094fb4d37eaSJeff Roberson { 1095fb4d37eaSJeff Roberson 109691eb2e90SJeff Roberson /* 109791eb2e90SJeff Roberson * If there's no object left, fill the page in the top 109891eb2e90SJeff Roberson * object with zeros. 109991eb2e90SJeff Roberson */ 110091eb2e90SJeff Roberson if (fs->object != fs->first_object) { 110191eb2e90SJeff Roberson vm_object_pip_wakeup(fs->object); 110291eb2e90SJeff Roberson fs->object = fs->first_object; 110391eb2e90SJeff Roberson fs->pindex = fs->first_pindex; 110491eb2e90SJeff Roberson } 110591eb2e90SJeff Roberson MPASS(fs->first_m != NULL); 110691eb2e90SJeff Roberson MPASS(fs->m == NULL); 110791eb2e90SJeff Roberson fs->m = fs->first_m; 110891eb2e90SJeff Roberson fs->first_m = NULL; 110991eb2e90SJeff Roberson 111091eb2e90SJeff Roberson /* 111191eb2e90SJeff Roberson * Zero the page if necessary and mark it valid. 111291eb2e90SJeff Roberson */ 111391eb2e90SJeff Roberson if ((fs->m->flags & PG_ZERO) == 0) { 111491eb2e90SJeff Roberson pmap_zero_page(fs->m); 111591eb2e90SJeff Roberson } else { 111691eb2e90SJeff Roberson VM_CNT_INC(v_ozfod); 111791eb2e90SJeff Roberson } 111891eb2e90SJeff Roberson VM_CNT_INC(v_zfod); 111991eb2e90SJeff Roberson vm_page_valid(fs->m); 112091eb2e90SJeff Roberson } 112191eb2e90SJeff Roberson 1122df794f5cSJeff Roberson /* 1123174aad04SKonstantin Belousov * Initiate page fault after timeout. Returns true if caller should 1124174aad04SKonstantin Belousov * do vm_waitpfault() after the call. 1125174aad04SKonstantin Belousov */ 1126174aad04SKonstantin Belousov static bool 1127174aad04SKonstantin Belousov vm_fault_allocate_oom(struct faultstate *fs) 1128174aad04SKonstantin Belousov { 1129174aad04SKonstantin Belousov struct timeval now; 1130174aad04SKonstantin Belousov 1131174aad04SKonstantin Belousov unlock_and_deallocate(fs); 1132174aad04SKonstantin Belousov if (vm_pfault_oom_attempts < 0) 1133174aad04SKonstantin Belousov return (true); 1134174aad04SKonstantin Belousov if (!fs->oom_started) { 1135174aad04SKonstantin Belousov fs->oom_started = true; 1136174aad04SKonstantin Belousov getmicrotime(&fs->oom_start_time); 1137174aad04SKonstantin Belousov return (true); 1138174aad04SKonstantin Belousov } 1139174aad04SKonstantin Belousov 1140174aad04SKonstantin Belousov getmicrotime(&now); 1141174aad04SKonstantin Belousov timevalsub(&now, &fs->oom_start_time); 1142174aad04SKonstantin Belousov if (now.tv_sec < vm_pfault_oom_attempts * vm_pfault_oom_wait) 1143174aad04SKonstantin Belousov return (true); 1144174aad04SKonstantin Belousov 1145174aad04SKonstantin Belousov if (bootverbose) 1146174aad04SKonstantin Belousov printf( 1147174aad04SKonstantin Belousov "proc %d (%s) failed to alloc page on fault, starting OOM\n", 1148174aad04SKonstantin Belousov curproc->p_pid, curproc->p_comm); 1149174aad04SKonstantin Belousov vm_pageout_oom(VM_OOM_MEM_PF); 1150174aad04SKonstantin Belousov fs->oom_started = false; 1151174aad04SKonstantin Belousov return (false); 1152174aad04SKonstantin Belousov } 1153174aad04SKonstantin Belousov 1154174aad04SKonstantin Belousov /* 1155df794f5cSJeff Roberson * Allocate a page directly or via the object populate method. 1156df794f5cSJeff Roberson */ 1157f1b642c2SMark Johnston static enum fault_status 1158df794f5cSJeff Roberson vm_fault_allocate(struct faultstate *fs) 1159df794f5cSJeff Roberson { 1160df794f5cSJeff Roberson struct domainset *dset; 1161f1b642c2SMark Johnston enum fault_status res; 1162df794f5cSJeff Roberson 1163df794f5cSJeff Roberson if ((fs->object->flags & OBJ_SIZEVNLOCK) != 0) { 1164f1b642c2SMark Johnston res = vm_fault_lock_vnode(fs, true); 1165f1b642c2SMark Johnston MPASS(res == FAULT_CONTINUE || res == FAULT_RESTART); 1166f1b642c2SMark Johnston if (res == FAULT_RESTART) 1167f1b642c2SMark Johnston return (res); 1168df794f5cSJeff Roberson } 1169df794f5cSJeff Roberson 1170f1b642c2SMark Johnston if (fs->pindex >= fs->object->size) { 1171f1b642c2SMark Johnston unlock_and_deallocate(fs); 1172f1b642c2SMark Johnston return (FAULT_OUT_OF_BOUNDS); 1173f1b642c2SMark Johnston } 1174df794f5cSJeff Roberson 1175df794f5cSJeff Roberson if (fs->object == fs->first_object && 1176df794f5cSJeff Roberson (fs->first_object->flags & OBJ_POPULATE) != 0 && 1177df794f5cSJeff Roberson fs->first_object->shadow_count == 0) { 1178f1b642c2SMark Johnston res = vm_fault_populate(fs); 1179f1b642c2SMark Johnston switch (res) { 1180f1b642c2SMark Johnston case FAULT_SUCCESS: 1181f1b642c2SMark Johnston case FAULT_FAILURE: 1182f1b642c2SMark Johnston case FAULT_RESTART: 1183f1b642c2SMark Johnston unlock_and_deallocate(fs); 1184f1b642c2SMark Johnston return (res); 1185f1b642c2SMark Johnston case FAULT_CONTINUE: 1186df794f5cSJeff Roberson /* 1187df794f5cSJeff Roberson * Pager's populate() method 1188df794f5cSJeff Roberson * returned VM_PAGER_BAD. 1189df794f5cSJeff Roberson */ 1190df794f5cSJeff Roberson break; 1191df794f5cSJeff Roberson default: 1192df794f5cSJeff Roberson panic("inconsistent return codes"); 1193df794f5cSJeff Roberson } 1194df794f5cSJeff Roberson } 1195df794f5cSJeff Roberson 1196df794f5cSJeff Roberson /* 1197df794f5cSJeff Roberson * Allocate a new page for this object/offset pair. 1198df794f5cSJeff Roberson * 1199b801c79dSMark Johnston * If the process has a fatal signal pending, prioritize the allocation 1200b801c79dSMark Johnston * with the expectation that the process will exit shortly and free some 1201b801c79dSMark Johnston * pages. In particular, the signal may have been posted by the page 1202b801c79dSMark Johnston * daemon in an attempt to resolve an out-of-memory condition. 1203b801c79dSMark Johnston * 1204b801c79dSMark Johnston * The unlocked read of the p_flag is harmless. At worst, the P_KILLED 1205b801c79dSMark Johnston * might be not observed here, and allocation fails, causing a restart 1206b801c79dSMark Johnston * and new reading of the p_flag. 1207df794f5cSJeff Roberson */ 1208df794f5cSJeff Roberson dset = fs->object->domain.dr_policy; 1209df794f5cSJeff Roberson if (dset == NULL) 1210df794f5cSJeff Roberson dset = curthread->td_domain.dr_policy; 1211df794f5cSJeff Roberson if (!vm_page_count_severe_set(&dset->ds_mask) || P_KILLED(curproc)) { 1212df794f5cSJeff Roberson #if VM_NRESERVLEVEL > 0 1213df794f5cSJeff Roberson vm_object_color(fs->object, atop(fs->vaddr) - fs->pindex); 1214df794f5cSJeff Roberson #endif 1215b801c79dSMark Johnston fs->m = vm_page_alloc(fs->object, fs->pindex, 1216b801c79dSMark Johnston P_KILLED(curproc) ? VM_ALLOC_SYSTEM : 0); 1217df794f5cSJeff Roberson } 1218df794f5cSJeff Roberson if (fs->m == NULL) { 1219174aad04SKonstantin Belousov if (vm_fault_allocate_oom(fs)) 1220df794f5cSJeff Roberson vm_waitpfault(dset, vm_pfault_oom_wait * hz); 1221f1b642c2SMark Johnston return (FAULT_RESTART); 1222df794f5cSJeff Roberson } 1223174aad04SKonstantin Belousov fs->oom_started = false; 1224df794f5cSJeff Roberson 1225f1b642c2SMark Johnston return (FAULT_CONTINUE); 1226df794f5cSJeff Roberson } 12275909dafeSJeff Roberson 12285909dafeSJeff Roberson /* 12295909dafeSJeff Roberson * Call the pager to retrieve the page if there is a chance 12305909dafeSJeff Roberson * that the pager has it, and potentially retrieve additional 12315909dafeSJeff Roberson * pages at the same time. 12325909dafeSJeff Roberson */ 1233f1b642c2SMark Johnston static enum fault_status 123445c09a74SMark Johnston vm_fault_getpages(struct faultstate *fs, int *behindp, int *aheadp) 12355909dafeSJeff Roberson { 12365909dafeSJeff Roberson vm_offset_t e_end, e_start; 12375909dafeSJeff Roberson int ahead, behind, cluster_offset, rv; 1238f1b642c2SMark Johnston enum fault_status status; 12395909dafeSJeff Roberson u_char behavior; 12405909dafeSJeff Roberson 12415909dafeSJeff Roberson /* 12425909dafeSJeff Roberson * Prepare for unlocking the map. Save the map 12435909dafeSJeff Roberson * entry's start and end addresses, which are used to 12445909dafeSJeff Roberson * optimize the size of the pager operation below. 12455909dafeSJeff Roberson * Even if the map entry's addresses change after 12465909dafeSJeff Roberson * unlocking the map, using the saved addresses is 12475909dafeSJeff Roberson * safe. 12485909dafeSJeff Roberson */ 12495909dafeSJeff Roberson e_start = fs->entry->start; 12505909dafeSJeff Roberson e_end = fs->entry->end; 12515909dafeSJeff Roberson behavior = vm_map_entry_behavior(fs->entry); 12525909dafeSJeff Roberson 12535909dafeSJeff Roberson /* 125445c09a74SMark Johnston * If the pager for the current object might have 125545c09a74SMark Johnston * the page, then determine the number of additional 125645c09a74SMark Johnston * pages to read and potentially reprioritize 125745c09a74SMark Johnston * previously read pages for earlier reclamation. 125845c09a74SMark Johnston * These operations should only be performed once per 125945c09a74SMark Johnston * page fault. Even if the current pager doesn't 126045c09a74SMark Johnston * have the page, the number of additional pages to 126145c09a74SMark Johnston * read will apply to subsequent objects in the 126245c09a74SMark Johnston * shadow chain. 126345c09a74SMark Johnston */ 126445c09a74SMark Johnston if (fs->nera == -1 && !P_KILLED(curproc)) 126545c09a74SMark Johnston fs->nera = vm_fault_readahead(fs); 126645c09a74SMark Johnston 126745c09a74SMark Johnston /* 12685909dafeSJeff Roberson * Release the map lock before locking the vnode or 12695909dafeSJeff Roberson * sleeping in the pager. (If the current object has 12705909dafeSJeff Roberson * a shadow, then an earlier iteration of this loop 12715909dafeSJeff Roberson * may have already unlocked the map.) 12725909dafeSJeff Roberson */ 12735909dafeSJeff Roberson unlock_map(fs); 12745909dafeSJeff Roberson 1275f1b642c2SMark Johnston status = vm_fault_lock_vnode(fs, false); 1276f1b642c2SMark Johnston MPASS(status == FAULT_CONTINUE || status == FAULT_RESTART); 1277f1b642c2SMark Johnston if (status == FAULT_RESTART) 1278f1b642c2SMark Johnston return (status); 12795909dafeSJeff Roberson KASSERT(fs->vp == NULL || !fs->map->system_map, 12805909dafeSJeff Roberson ("vm_fault: vnode-backed object mapped by system map")); 12815909dafeSJeff Roberson 12825909dafeSJeff Roberson /* 12835909dafeSJeff Roberson * Page in the requested page and hint the pager, 12845909dafeSJeff Roberson * that it may bring up surrounding pages. 12855909dafeSJeff Roberson */ 128645c09a74SMark Johnston if (fs->nera == -1 || behavior == MAP_ENTRY_BEHAV_RANDOM || 12875909dafeSJeff Roberson P_KILLED(curproc)) { 12885909dafeSJeff Roberson behind = 0; 12895909dafeSJeff Roberson ahead = 0; 12905909dafeSJeff Roberson } else { 12915909dafeSJeff Roberson /* Is this a sequential fault? */ 129245c09a74SMark Johnston if (fs->nera > 0) { 12935909dafeSJeff Roberson behind = 0; 129445c09a74SMark Johnston ahead = fs->nera; 12955909dafeSJeff Roberson } else { 12965909dafeSJeff Roberson /* 12975909dafeSJeff Roberson * Request a cluster of pages that is 12985909dafeSJeff Roberson * aligned to a VM_FAULT_READ_DEFAULT 12995909dafeSJeff Roberson * page offset boundary within the 13005909dafeSJeff Roberson * object. Alignment to a page offset 13015909dafeSJeff Roberson * boundary is more likely to coincide 13025909dafeSJeff Roberson * with the underlying file system 13035909dafeSJeff Roberson * block than alignment to a virtual 13045909dafeSJeff Roberson * address boundary. 13055909dafeSJeff Roberson */ 13065909dafeSJeff Roberson cluster_offset = fs->pindex % VM_FAULT_READ_DEFAULT; 13075909dafeSJeff Roberson behind = ulmin(cluster_offset, 13085909dafeSJeff Roberson atop(fs->vaddr - e_start)); 13095909dafeSJeff Roberson ahead = VM_FAULT_READ_DEFAULT - 1 - cluster_offset; 13105909dafeSJeff Roberson } 13115909dafeSJeff Roberson ahead = ulmin(ahead, atop(e_end - fs->vaddr) - 1); 13125909dafeSJeff Roberson } 13135909dafeSJeff Roberson *behindp = behind; 13145909dafeSJeff Roberson *aheadp = ahead; 13155909dafeSJeff Roberson rv = vm_pager_get_pages(fs->object, &fs->m, 1, behindp, aheadp); 13165909dafeSJeff Roberson if (rv == VM_PAGER_OK) 1317f1b642c2SMark Johnston return (FAULT_HARD); 13185909dafeSJeff Roberson if (rv == VM_PAGER_ERROR) 13195909dafeSJeff Roberson printf("vm_fault: pager read error, pid %d (%s)\n", 13205909dafeSJeff Roberson curproc->p_pid, curproc->p_comm); 13215909dafeSJeff Roberson /* 13225909dafeSJeff Roberson * If an I/O error occurred or the requested page was 13235909dafeSJeff Roberson * outside the range of the pager, clean up and return 13245909dafeSJeff Roberson * an error. 13255909dafeSJeff Roberson */ 132645c09a74SMark Johnston if (rv == VM_PAGER_ERROR || rv == VM_PAGER_BAD) { 132745c09a74SMark Johnston VM_OBJECT_WLOCK(fs->object); 132845c09a74SMark Johnston fault_page_free(&fs->m); 132945c09a74SMark Johnston unlock_and_deallocate(fs); 1330f1b642c2SMark Johnston return (FAULT_OUT_OF_BOUNDS); 133145c09a74SMark Johnston } 133245c09a74SMark Johnston KASSERT(rv == VM_PAGER_FAIL, 13339a89977bSPeter Jeremy ("%s: unexpected pager error %d", __func__, rv)); 1334f1b642c2SMark Johnston return (FAULT_CONTINUE); 13355909dafeSJeff Roberson } 13365909dafeSJeff Roberson 13375949b1caSJeff Roberson /* 1338bef91632SJeff Roberson * Wait/Retry if the page is busy. We have to do this if the page is 1339bef91632SJeff Roberson * either exclusive or shared busy because the vm_pager may be using 1340bef91632SJeff Roberson * read busy for pageouts (and even pageins if it is the vnode pager), 1341bef91632SJeff Roberson * and we could end up trying to pagein and pageout the same page 1342bef91632SJeff Roberson * simultaneously. 1343bef91632SJeff Roberson * 1344bef91632SJeff Roberson * We can theoretically allow the busy case on a read fault if the page 1345bef91632SJeff Roberson * is marked valid, but since such pages are typically already pmap'd, 1346bef91632SJeff Roberson * putting that special case in might be more effort then it is worth. 1347bef91632SJeff Roberson * We cannot under any circumstances mess around with a shared busied 1348bef91632SJeff Roberson * page except, perhaps, to pmap it. 1349bef91632SJeff Roberson */ 1350bef91632SJeff Roberson static void 1351bef91632SJeff Roberson vm_fault_busy_sleep(struct faultstate *fs) 1352bef91632SJeff Roberson { 1353bef91632SJeff Roberson /* 1354bef91632SJeff Roberson * Reference the page before unlocking and 1355bef91632SJeff Roberson * sleeping so that the page daemon is less 1356bef91632SJeff Roberson * likely to reclaim it. 1357bef91632SJeff Roberson */ 1358bef91632SJeff Roberson vm_page_aflag_set(fs->m, PGA_REFERENCED); 1359bef91632SJeff Roberson if (fs->object != fs->first_object) { 1360bef91632SJeff Roberson fault_page_release(&fs->first_m); 1361bef91632SJeff Roberson vm_object_pip_wakeup(fs->first_object); 1362bef91632SJeff Roberson } 1363bef91632SJeff Roberson vm_object_pip_wakeup(fs->object); 1364bef91632SJeff Roberson unlock_map(fs); 136587b64663SMark Johnston if (fs->m != vm_page_lookup(fs->object, fs->pindex) || 136687b64663SMark Johnston !vm_page_busy_sleep(fs->m, "vmpfw", 0)) 1367bef91632SJeff Roberson VM_OBJECT_WUNLOCK(fs->object); 1368bef91632SJeff Roberson VM_CNT_INC(v_intrans); 1369bef91632SJeff Roberson vm_object_deallocate(fs->first_object); 1370bef91632SJeff Roberson } 1371bef91632SJeff Roberson 1372d47d3a94SMark Johnston /* 1373d47d3a94SMark Johnston * Handle page lookup, populate, allocate, page-in for the current 1374d47d3a94SMark Johnston * object. 1375d47d3a94SMark Johnston * 1376d47d3a94SMark Johnston * The object is locked on entry and will remain locked with a return 1377d47d3a94SMark Johnston * code of FAULT_CONTINUE so that fault may follow the shadow chain. 1378d47d3a94SMark Johnston * Otherwise, the object will be unlocked upon return. 1379d47d3a94SMark Johnston */ 1380d47d3a94SMark Johnston static enum fault_status 1381d47d3a94SMark Johnston vm_fault_object(struct faultstate *fs, int *behindp, int *aheadp) 1382d47d3a94SMark Johnston { 1383d47d3a94SMark Johnston enum fault_status res; 1384d47d3a94SMark Johnston bool dead; 1385d47d3a94SMark Johnston 1386d47d3a94SMark Johnston /* 1387d47d3a94SMark Johnston * If the object is marked for imminent termination, we retry 1388d47d3a94SMark Johnston * here, since the collapse pass has raced with us. Otherwise, 1389d47d3a94SMark Johnston * if we see terminally dead object, return fail. 1390d47d3a94SMark Johnston */ 1391d47d3a94SMark Johnston if ((fs->object->flags & OBJ_DEAD) != 0) { 1392d47d3a94SMark Johnston dead = fs->object->type == OBJT_DEAD; 1393d47d3a94SMark Johnston unlock_and_deallocate(fs); 1394d47d3a94SMark Johnston if (dead) 1395d47d3a94SMark Johnston return (FAULT_PROTECTION_FAILURE); 1396d47d3a94SMark Johnston pause("vmf_de", 1); 1397d47d3a94SMark Johnston return (FAULT_RESTART); 1398d47d3a94SMark Johnston } 1399d47d3a94SMark Johnston 1400d47d3a94SMark Johnston /* 1401d47d3a94SMark Johnston * See if the page is resident. 1402d47d3a94SMark Johnston */ 1403d47d3a94SMark Johnston fs->m = vm_page_lookup(fs->object, fs->pindex); 1404d47d3a94SMark Johnston if (fs->m != NULL) { 1405d47d3a94SMark Johnston if (!vm_page_tryxbusy(fs->m)) { 1406d47d3a94SMark Johnston vm_fault_busy_sleep(fs); 1407d47d3a94SMark Johnston return (FAULT_RESTART); 1408d47d3a94SMark Johnston } 1409d47d3a94SMark Johnston 1410d47d3a94SMark Johnston /* 1411d47d3a94SMark Johnston * The page is marked busy for other processes and the 1412d47d3a94SMark Johnston * pagedaemon. If it is still completely valid we are 1413d47d3a94SMark Johnston * done. 1414d47d3a94SMark Johnston */ 1415d47d3a94SMark Johnston if (vm_page_all_valid(fs->m)) { 1416d47d3a94SMark Johnston VM_OBJECT_WUNLOCK(fs->object); 1417d47d3a94SMark Johnston return (FAULT_SOFT); 1418d47d3a94SMark Johnston } 1419d47d3a94SMark Johnston } 1420d47d3a94SMark Johnston VM_OBJECT_ASSERT_WLOCKED(fs->object); 1421d47d3a94SMark Johnston 1422d47d3a94SMark Johnston /* 1423d47d3a94SMark Johnston * Page is not resident. If the pager might contain the page 1424d47d3a94SMark Johnston * or this is the beginning of the search, allocate a new 14255d32157dSMark Johnston * page. 1426d47d3a94SMark Johnston */ 14275d32157dSMark Johnston if (fs->m == NULL && (fault_object_needs_getpages(fs->object) || 1428d47d3a94SMark Johnston fs->object == fs->first_object)) { 1429d47d3a94SMark Johnston res = vm_fault_allocate(fs); 1430d47d3a94SMark Johnston if (res != FAULT_CONTINUE) 1431d47d3a94SMark Johnston return (res); 1432d47d3a94SMark Johnston } 1433d47d3a94SMark Johnston 1434d47d3a94SMark Johnston /* 1435d47d3a94SMark Johnston * Default objects have no pager so no exclusive busy exists 1436d47d3a94SMark Johnston * to protect this page in the chain. Skip to the next 1437d47d3a94SMark Johnston * object without dropping the lock to preserve atomicity of 1438d47d3a94SMark Johnston * shadow faults. 1439d47d3a94SMark Johnston */ 14405d32157dSMark Johnston if (fault_object_needs_getpages(fs->object)) { 1441d47d3a94SMark Johnston /* 1442d47d3a94SMark Johnston * At this point, we have either allocated a new page 1443d47d3a94SMark Johnston * or found an existing page that is only partially 1444d47d3a94SMark Johnston * valid. 1445d47d3a94SMark Johnston * 1446d47d3a94SMark Johnston * We hold a reference on the current object and the 1447d47d3a94SMark Johnston * page is exclusive busied. The exclusive busy 1448d47d3a94SMark Johnston * prevents simultaneous faults and collapses while 1449d47d3a94SMark Johnston * the object lock is dropped. 1450d47d3a94SMark Johnston */ 1451d47d3a94SMark Johnston VM_OBJECT_WUNLOCK(fs->object); 1452d47d3a94SMark Johnston res = vm_fault_getpages(fs, behindp, aheadp); 1453d47d3a94SMark Johnston if (res == FAULT_CONTINUE) 1454d47d3a94SMark Johnston VM_OBJECT_WLOCK(fs->object); 1455d47d3a94SMark Johnston } else { 1456d47d3a94SMark Johnston res = FAULT_CONTINUE; 1457d47d3a94SMark Johnston } 1458d47d3a94SMark Johnston return (res); 1459d47d3a94SMark Johnston } 1460d47d3a94SMark Johnston 1461acd11c74SAlan Cox int 1462df08823dSKonstantin Belousov vm_fault(vm_map_t map, vm_offset_t vaddr, vm_prot_t fault_type, 1463acd11c74SAlan Cox int fault_flags, vm_page_t *m_hold) 1464acd11c74SAlan Cox { 14654866e085SJohn Dyson struct faultstate fs; 1466f1b642c2SMark Johnston int ahead, behind, faultcount, rv; 1467f1b642c2SMark Johnston enum fault_status res; 1468d47d3a94SMark Johnston bool hardfault; 1469df8bae1dSRodney W. Grimes 147083c9dea1SGleb Smirnoff VM_CNT_INC(v_vm_faults); 1471c31cec45SKonstantin Belousov 1472c31cec45SKonstantin Belousov if ((curthread->td_pflags & TDP_NOFAULTING) != 0) 1473c31cec45SKonstantin Belousov return (KERN_PROTECTION_FAILURE); 1474c31cec45SKonstantin Belousov 1475d2bf64c3SKonstantin Belousov fs.vp = NULL; 14765949b1caSJeff Roberson fs.vaddr = vaddr; 14772c2f4413SJeff Roberson fs.m_hold = m_hold; 14782c2f4413SJeff Roberson fs.fault_flags = fault_flags; 1479c308a3a6SJeff Roberson fs.map = map; 1480c308a3a6SJeff Roberson fs.lookup_still_valid = false; 1481174aad04SKonstantin Belousov fs.oom_started = false; 148245c09a74SMark Johnston fs.nera = -1; 1483b0cd2017SGleb Smirnoff faultcount = 0; 1484320023e2SAlan Cox hardfault = false; 1485df8bae1dSRodney W. Grimes 1486245139c6SKonstantin Belousov RetryFault: 14872c2f4413SJeff Roberson fs.fault_type = fault_type; 1488df8bae1dSRodney W. Grimes 1489df8bae1dSRodney W. Grimes /* 14900d94caffSDavid Greenman * Find the backing store object and offset into it to begin the 14910d94caffSDavid Greenman * search. 1492df8bae1dSRodney W. Grimes */ 1493f1b642c2SMark Johnston rv = vm_fault_lookup(&fs); 1494f1b642c2SMark Johnston if (rv != KERN_SUCCESS) { 1495f1b642c2SMark Johnston if (rv == KERN_RESOURCE_SHORTAGE) 1496c308a3a6SJeff Roberson goto RetryFault; 1497f1b642c2SMark Johnston return (rv); 149809e0c6ccSJohn Dyson } 149909e0c6ccSJohn Dyson 15008d67b8c8SAlan Cox /* 15018d67b8c8SAlan Cox * Try to avoid lock contention on the top-level object through 15028d67b8c8SAlan Cox * special-case handling of some types of page faults, specifically, 150367d0e293SJeff Roberson * those that are mapping an existing page from the top-level object. 150467d0e293SJeff Roberson * Under this condition, a read lock on the object suffices, allowing 150567d0e293SJeff Roberson * multiple page faults of a similar type to run in parallel. 15068d67b8c8SAlan Cox */ 1507afe55ca3SKonstantin Belousov if (fs.vp == NULL /* avoid locked vnode leak */ && 1508d301b358SKonstantin Belousov (fs.entry->eflags & MAP_ENTRY_SPLIT_BOUNDARY_MASK) == 0 && 15092c2f4413SJeff Roberson (fs.fault_flags & (VM_FAULT_WIRE | VM_FAULT_DIRTY)) == 0) { 1510afe55ca3SKonstantin Belousov VM_OBJECT_RLOCK(fs.first_object); 1511f1b642c2SMark Johnston res = vm_fault_soft_fast(&fs); 1512f1b642c2SMark Johnston if (res == FAULT_SUCCESS) 1513f1b642c2SMark Johnston return (KERN_SUCCESS); 1514afe55ca3SKonstantin Belousov if (!VM_OBJECT_TRYUPGRADE(fs.first_object)) { 1515afe55ca3SKonstantin Belousov VM_OBJECT_RUNLOCK(fs.first_object); 1516afe55ca3SKonstantin Belousov VM_OBJECT_WLOCK(fs.first_object); 1517afe55ca3SKonstantin Belousov } 1518afe55ca3SKonstantin Belousov } else { 1519afe55ca3SKonstantin Belousov VM_OBJECT_WLOCK(fs.first_object); 1520afe55ca3SKonstantin Belousov } 1521afe55ca3SKonstantin Belousov 152295e5e988SJohn Dyson /* 152395e5e988SJohn Dyson * Make a reference to this object to prevent its disposal while we 152495e5e988SJohn Dyson * are messing with it. Once we have the reference, the map is free 152595e5e988SJohn Dyson * to be diddled. Since objects reference their shadows (and copies), 152695e5e988SJohn Dyson * they will stay around as well. 1527fe8e0238SMatthew Dillon * 1528fe8e0238SMatthew Dillon * Bump the paging-in-progress count to prevent size changes (e.g. 1529dda4d369SAlan Cox * truncation operations) during I/O. 153095e5e988SJohn Dyson */ 1531a976eb5eSAlan Cox vm_object_reference_locked(fs.first_object); 1532d474eaaaSDoug Rabson vm_object_pip_add(fs.first_object, 1); 153395e5e988SJohn Dyson 153458447749SJeff Roberson fs.m_cow = fs.m = fs.first_m = NULL; 1535df8bae1dSRodney W. Grimes 1536df8bae1dSRodney W. Grimes /* 1537df8bae1dSRodney W. Grimes * Search for the page at object/offset. 1538df8bae1dSRodney W. Grimes */ 15394866e085SJohn Dyson fs.object = fs.first_object; 15404866e085SJohn Dyson fs.pindex = fs.first_pindex; 1541d301b358SKonstantin Belousov 1542d301b358SKonstantin Belousov if ((fs.entry->eflags & MAP_ENTRY_SPLIT_BOUNDARY_MASK) != 0) { 1543f1b642c2SMark Johnston res = vm_fault_allocate(&fs); 1544f1b642c2SMark Johnston switch (res) { 1545f1b642c2SMark Johnston case FAULT_RESTART: 1546d301b358SKonstantin Belousov goto RetryFault; 1547f1b642c2SMark Johnston case FAULT_SUCCESS: 1548f1b642c2SMark Johnston return (KERN_SUCCESS); 1549f1b642c2SMark Johnston case FAULT_FAILURE: 1550f1b642c2SMark Johnston return (KERN_FAILURE); 1551f1b642c2SMark Johnston case FAULT_OUT_OF_BOUNDS: 1552f1b642c2SMark Johnston return (KERN_OUT_OF_BOUNDS); 1553f1b642c2SMark Johnston case FAULT_CONTINUE: 1554d301b358SKonstantin Belousov break; 1555d301b358SKonstantin Belousov default: 1556f1b642c2SMark Johnston panic("vm_fault: Unhandled status %d", res); 1557d301b358SKonstantin Belousov } 1558d301b358SKonstantin Belousov } 1559d301b358SKonstantin Belousov 1560df8bae1dSRodney W. Grimes while (TRUE) { 15614bf95d00SJeff Roberson KASSERT(fs.m == NULL, 15624bf95d00SJeff Roberson ("page still set %p at loop start", fs.m)); 156347221757SJohn Dyson 1564d47d3a94SMark Johnston res = vm_fault_object(&fs, &behind, &ahead); 1565f1b642c2SMark Johnston switch (res) { 1566d47d3a94SMark Johnston case FAULT_SOFT: 1567d47d3a94SMark Johnston goto found; 1568d47d3a94SMark Johnston case FAULT_HARD: 1569d47d3a94SMark Johnston faultcount = behind + 1 + ahead; 1570d47d3a94SMark Johnston hardfault = true; 1571d47d3a94SMark Johnston goto found; 1572f1b642c2SMark Johnston case FAULT_RESTART: 1573df794f5cSJeff Roberson goto RetryFault; 1574f1b642c2SMark Johnston case FAULT_SUCCESS: 1575f1b642c2SMark Johnston return (KERN_SUCCESS); 1576f1b642c2SMark Johnston case FAULT_FAILURE: 1577f1b642c2SMark Johnston return (KERN_FAILURE); 1578f1b642c2SMark Johnston case FAULT_OUT_OF_BOUNDS: 1579f1b642c2SMark Johnston return (KERN_OUT_OF_BOUNDS); 1580d47d3a94SMark Johnston case FAULT_PROTECTION_FAILURE: 1581d47d3a94SMark Johnston return (KERN_PROTECTION_FAILURE); 1582f1b642c2SMark Johnston case FAULT_CONTINUE: 1583c42b43a0SKonstantin Belousov break; 1584c42b43a0SKonstantin Belousov default: 1585f1b642c2SMark Johnston panic("vm_fault: Unhandled status %d", res); 1586c42b43a0SKonstantin Belousov } 15874bf95d00SJeff Roberson 1588521ddf39SAlan Cox /* 15895909dafeSJeff Roberson * The page was not found in the current object. Try to 15905909dafeSJeff Roberson * traverse into a backing object or zero fill if none is 15915909dafeSJeff Roberson * found. 1592521ddf39SAlan Cox */ 1593fb4d37eaSJeff Roberson if (vm_fault_next(&fs)) 1594fb4d37eaSJeff Roberson continue; 1595f31695ccSMark Johnston if ((fs.fault_flags & VM_FAULT_NOFILL) != 0) { 1596f31695ccSMark Johnston if (fs.first_object == fs.object) 1597f31695ccSMark Johnston fault_page_free(&fs.first_m); 1598f31695ccSMark Johnston unlock_and_deallocate(&fs); 1599f31695ccSMark Johnston return (KERN_OUT_OF_BOUNDS); 1600f31695ccSMark Johnston } 1601fb4d37eaSJeff Roberson VM_OBJECT_WUNLOCK(fs.object); 1602fb4d37eaSJeff Roberson vm_fault_zerofill(&fs); 16037b9b301cSAlan Cox /* Don't try to prefault neighboring pages. */ 16047b9b301cSAlan Cox faultcount = 1; 1605d47d3a94SMark Johnston break; 1606df8bae1dSRodney W. Grimes } 16071c7c3c6aSMatthew Dillon 1608d47d3a94SMark Johnston found: 1609df8bae1dSRodney W. Grimes /* 1610d47d3a94SMark Johnston * A valid page has been found and exclusively busied. The 1611d47d3a94SMark Johnston * object lock must no longer be held. 1612df8bae1dSRodney W. Grimes */ 16131e40fe41SJeff Roberson vm_page_assert_xbusied(fs.m); 16141e40fe41SJeff Roberson VM_OBJECT_ASSERT_UNLOCKED(fs.object); 1615df8bae1dSRodney W. Grimes 1616df8bae1dSRodney W. Grimes /* 16170d94caffSDavid Greenman * If the page is being written, but isn't already owned by the 16180d94caffSDavid Greenman * top-level object, we have to copy it into a new page owned by the 16190d94caffSDavid Greenman * top-level object. 1620df8bae1dSRodney W. Grimes */ 16214866e085SJohn Dyson if (fs.object != fs.first_object) { 1622df8bae1dSRodney W. Grimes /* 16230d94caffSDavid Greenman * We only really need to copy if we want to write it. 1624df8bae1dSRodney W. Grimes */ 16252c2f4413SJeff Roberson if ((fs.fault_type & (VM_PROT_COPY | VM_PROT_WRITE)) != 0) { 16265936b6a8SJeff Roberson vm_fault_cow(&fs); 16279f1abe3dSAlan Cox /* 16289f1abe3dSAlan Cox * We only try to prefault read-only mappings to the 16299f1abe3dSAlan Cox * neighboring pages when this copy-on-write fault is 16309f1abe3dSAlan Cox * a hard fault. In other cases, trying to prefault 16319f1abe3dSAlan Cox * is typically wasted effort. 16329f1abe3dSAlan Cox */ 16339f1abe3dSAlan Cox if (faultcount == 0) 16349f1abe3dSAlan Cox faultcount = 1; 16359f1abe3dSAlan Cox 16360d94caffSDavid Greenman } else { 16372c2f4413SJeff Roberson fs.prot &= ~VM_PROT_WRITE; 1638df8bae1dSRodney W. Grimes } 1639df8bae1dSRodney W. Grimes } 1640df8bae1dSRodney W. Grimes 1641df8bae1dSRodney W. Grimes /* 16420d94caffSDavid Greenman * We must verify that the maps have not changed since our last 16430d94caffSDavid Greenman * lookup. 1644df8bae1dSRodney W. Grimes */ 164519dc5607STor Egge if (!fs.lookup_still_valid) { 1646f1b642c2SMark Johnston rv = vm_fault_relookup(&fs); 1647f1b642c2SMark Johnston if (rv != KERN_SUCCESS) { 16481e40fe41SJeff Roberson fault_deallocate(&fs); 1649f1b642c2SMark Johnston if (rv == KERN_RESTART) 165019dc5607STor Egge goto RetryFault; 1651f1b642c2SMark Johnston return (rv); 1652df8bae1dSRodney W. Grimes } 165319dc5607STor Egge } 16541e40fe41SJeff Roberson VM_OBJECT_ASSERT_UNLOCKED(fs.object); 1655381b7242SAlan Cox 1656d2bf64c3SKonstantin Belousov /* 1657381b7242SAlan Cox * If the page was filled by a pager, save the virtual address that 1658381b7242SAlan Cox * should be faulted on next under a sequential access pattern to the 1659381b7242SAlan Cox * map entry. A read lock on the map suffices to update this address 1660381b7242SAlan Cox * safely. 1661d2bf64c3SKonstantin Belousov */ 16625758fe71SAlan Cox if (hardfault) 1663381b7242SAlan Cox fs.entry->next_read = vaddr + ptoa(ahead) + PAGE_SIZE; 1664d2bf64c3SKonstantin Belousov 16654221e284SAlan Cox /* 166678cfe1f7SAlan Cox * Page must be completely valid or it is not fit to 16674221e284SAlan Cox * map into user space. vm_pager_get_pages() ensures this. 16684221e284SAlan Cox */ 16691e40fe41SJeff Roberson vm_page_assert_xbusied(fs.m); 16700012f373SJeff Roberson KASSERT(vm_page_all_valid(fs.m), 167178cfe1f7SAlan Cox ("vm_fault: page %p partially invalid", fs.m)); 16721e40fe41SJeff Roberson 16732c2f4413SJeff Roberson vm_fault_dirty(&fs, fs.m); 1674cbfbaad8SAlan Cox 167586735996SAlan Cox /* 167686735996SAlan Cox * Put this page into the physical map. We had to do the unlock above 167786735996SAlan Cox * because pmap_enter() may sleep. We don't put the page 167886735996SAlan Cox * back on the active queue until later so that the pageout daemon 167986735996SAlan Cox * won't find it (yet). 168086735996SAlan Cox */ 16812c2f4413SJeff Roberson pmap_enter(fs.map->pmap, vaddr, fs.m, fs.prot, 16822c2f4413SJeff Roberson fs.fault_type | (fs.wired ? PMAP_ENTER_WIRED : 0), 0); 16832c2f4413SJeff Roberson if (faultcount != 1 && (fs.fault_flags & VM_FAULT_WIRE) == 0 && 16842c2f4413SJeff Roberson fs.wired == 0) 1685b0cd2017SGleb Smirnoff vm_fault_prefault(&fs, vaddr, 1686b0cd2017SGleb Smirnoff faultcount > 0 ? behind : PFBAK, 1687a7163bb9SKonstantin Belousov faultcount > 0 ? ahead : PFFOR, false); 1688ff97964aSJohn Dyson 1689df8bae1dSRodney W. Grimes /* 16900d94caffSDavid Greenman * If the page is not wired down, then put it where the pageout daemon 16910d94caffSDavid Greenman * can find it. 1692df8bae1dSRodney W. Grimes */ 16932c2f4413SJeff Roberson if ((fs.fault_flags & VM_FAULT_WIRE) != 0) 16944866e085SJohn Dyson vm_page_wire(fs.m); 16959f5632e6SMark Johnston else 16964866e085SJohn Dyson vm_page_activate(fs.m); 16972c2f4413SJeff Roberson if (fs.m_hold != NULL) { 16982c2f4413SJeff Roberson (*fs.m_hold) = fs.m; 1699eeacb3b0SMark Johnston vm_page_wire(fs.m); 1700acd11c74SAlan Cox } 1701c7aebda8SAttilio Rao vm_page_xunbusy(fs.m); 17024bf95d00SJeff Roberson fs.m = NULL; 1703eeec6babSJohn Baldwin 1704eebf3286SAlan Cox /* 1705eebf3286SAlan Cox * Unlock everything, and return 1706eebf3286SAlan Cox */ 17074b3e0665SJeff Roberson fault_deallocate(&fs); 1708b3a01bdfSAndrey Zonov if (hardfault) { 170983c9dea1SGleb Smirnoff VM_CNT_INC(v_io_faults); 17101c4bcd05SJeff Roberson curthread->td_ru.ru_majflt++; 1711ae34b6ffSEdward Tomasz Napierala #ifdef RACCT 1712ae34b6ffSEdward Tomasz Napierala if (racct_enable && fs.object->type == OBJT_VNODE) { 1713ae34b6ffSEdward Tomasz Napierala PROC_LOCK(curproc); 17142c2f4413SJeff Roberson if ((fs.fault_type & (VM_PROT_COPY | VM_PROT_WRITE)) != 0) { 1715ae34b6ffSEdward Tomasz Napierala racct_add_force(curproc, RACCT_WRITEBPS, 1716ae34b6ffSEdward Tomasz Napierala PAGE_SIZE + behind * PAGE_SIZE); 1717ae34b6ffSEdward Tomasz Napierala racct_add_force(curproc, RACCT_WRITEIOPS, 1); 1718ae34b6ffSEdward Tomasz Napierala } else { 1719ae34b6ffSEdward Tomasz Napierala racct_add_force(curproc, RACCT_READBPS, 1720ae34b6ffSEdward Tomasz Napierala PAGE_SIZE + ahead * PAGE_SIZE); 1721ae34b6ffSEdward Tomasz Napierala racct_add_force(curproc, RACCT_READIOPS, 1); 1722ae34b6ffSEdward Tomasz Napierala } 1723ae34b6ffSEdward Tomasz Napierala PROC_UNLOCK(curproc); 1724ae34b6ffSEdward Tomasz Napierala } 1725ae34b6ffSEdward Tomasz Napierala #endif 1726b3a01bdfSAndrey Zonov } else 17271c4bcd05SJeff Roberson curthread->td_ru.ru_minflt++; 1728df8bae1dSRodney W. Grimes 1729df8bae1dSRodney W. Grimes return (KERN_SUCCESS); 1730df8bae1dSRodney W. Grimes } 1731df8bae1dSRodney W. Grimes 1732df8bae1dSRodney W. Grimes /* 1733a8b0f100SAlan Cox * Speed up the reclamation of pages that precede the faulting pindex within 1734a8b0f100SAlan Cox * the first object of the shadow chain. Essentially, perform the equivalent 1735a8b0f100SAlan Cox * to madvise(..., MADV_DONTNEED) on a large cluster of pages that precedes 1736a8b0f100SAlan Cox * the faulting pindex by the cluster size when the pages read by vm_fault() 1737a8b0f100SAlan Cox * cross a cluster-size boundary. The cluster size is the greater of the 1738a8b0f100SAlan Cox * smallest superpage size and VM_FAULT_DONTNEED_MIN. 1739a8b0f100SAlan Cox * 1740a8b0f100SAlan Cox * When "fs->first_object" is a shadow object, the pages in the backing object 1741a8b0f100SAlan Cox * that precede the faulting pindex are deactivated by vm_fault(). So, this 1742a8b0f100SAlan Cox * function must only be concerned with pages in the first object. 174313458803SAlan Cox */ 174413458803SAlan Cox static void 1745a8b0f100SAlan Cox vm_fault_dontneed(const struct faultstate *fs, vm_offset_t vaddr, int ahead) 174613458803SAlan Cox { 1747a8b0f100SAlan Cox vm_map_entry_t entry; 174840cbcb99SJohn Baldwin vm_object_t first_object; 1749a8b0f100SAlan Cox vm_offset_t end, start; 1750a8b0f100SAlan Cox vm_page_t m, m_next; 1751a8b0f100SAlan Cox vm_pindex_t pend, pstart; 1752a8b0f100SAlan Cox vm_size_t size; 175313458803SAlan Cox 175440cbcb99SJohn Baldwin VM_OBJECT_ASSERT_UNLOCKED(fs->object); 175513458803SAlan Cox first_object = fs->first_object; 1756a8b0f100SAlan Cox /* Neither fictitious nor unmanaged pages can be reclaimed. */ 175728634820SAlan Cox if ((first_object->flags & (OBJ_FICTITIOUS | OBJ_UNMANAGED)) == 0) { 17581e40fe41SJeff Roberson VM_OBJECT_RLOCK(first_object); 1759a8b0f100SAlan Cox size = VM_FAULT_DONTNEED_MIN; 1760a8b0f100SAlan Cox if (MAXPAGESIZES > 1 && size < pagesizes[1]) 1761a8b0f100SAlan Cox size = pagesizes[1]; 1762a8b0f100SAlan Cox end = rounddown2(vaddr, size); 1763a8b0f100SAlan Cox if (vaddr - end >= size - PAGE_SIZE - ptoa(ahead) && 1764a8b0f100SAlan Cox (entry = fs->entry)->start < end) { 1765a8b0f100SAlan Cox if (end - entry->start < size) 1766a8b0f100SAlan Cox start = entry->start; 176713458803SAlan Cox else 1768a8b0f100SAlan Cox start = end - size; 1769a8b0f100SAlan Cox pmap_advise(fs->map->pmap, start, end, MADV_DONTNEED); 1770a8b0f100SAlan Cox pstart = OFF_TO_IDX(entry->offset) + atop(start - 1771a8b0f100SAlan Cox entry->start); 1772a8b0f100SAlan Cox m_next = vm_page_find_least(first_object, pstart); 1773a8b0f100SAlan Cox pend = OFF_TO_IDX(entry->offset) + atop(end - 1774a8b0f100SAlan Cox entry->start); 1775a8b0f100SAlan Cox while ((m = m_next) != NULL && m->pindex < pend) { 1776a8b0f100SAlan Cox m_next = TAILQ_NEXT(m, listq); 17770012f373SJeff Roberson if (!vm_page_all_valid(m) || 1778a8b0f100SAlan Cox vm_page_busied(m)) 177913458803SAlan Cox continue; 1780d8015db3SAlan Cox 1781d8015db3SAlan Cox /* 1782d8015db3SAlan Cox * Don't clear PGA_REFERENCED, since it would 1783d8015db3SAlan Cox * likely represent a reference by a different 1784d8015db3SAlan Cox * process. 1785d8015db3SAlan Cox * 1786d8015db3SAlan Cox * Typically, at this point, prefetched pages 1787d8015db3SAlan Cox * are still in the inactive queue. Only 1788d8015db3SAlan Cox * pages that triggered page faults are in the 17899f5632e6SMark Johnston * active queue. The test for whether the page 17909f5632e6SMark Johnston * is in the inactive queue is racy; in the 17919f5632e6SMark Johnston * worst case we will requeue the page 17929f5632e6SMark Johnston * unnecessarily. 1793d8015db3SAlan Cox */ 17940eb50f9cSMark Johnston if (!vm_page_inactive(m)) 1795d8015db3SAlan Cox vm_page_deactivate(m); 179613458803SAlan Cox } 179713458803SAlan Cox } 17981e40fe41SJeff Roberson VM_OBJECT_RUNLOCK(first_object); 1799a8b0f100SAlan Cox } 180013458803SAlan Cox } 180113458803SAlan Cox 180213458803SAlan Cox /* 1803566526a9SAlan Cox * vm_fault_prefault provides a quick way of clustering 1804566526a9SAlan Cox * pagefaults into a processes address space. It is a "cousin" 1805566526a9SAlan Cox * of vm_map_pmap_enter, except it runs at page fault time instead 1806566526a9SAlan Cox * of mmap time. 1807566526a9SAlan Cox */ 1808566526a9SAlan Cox static void 180963281952SAlan Cox vm_fault_prefault(const struct faultstate *fs, vm_offset_t addra, 1810a7163bb9SKonstantin Belousov int backward, int forward, bool obj_locked) 1811566526a9SAlan Cox { 181263281952SAlan Cox pmap_t pmap; 181363281952SAlan Cox vm_map_entry_t entry; 181463281952SAlan Cox vm_object_t backing_object, lobject; 1815566526a9SAlan Cox vm_offset_t addr, starta; 1816566526a9SAlan Cox vm_pindex_t pindex; 18172053c127SStephan Uphoff vm_page_t m; 1818b0cd2017SGleb Smirnoff int i; 1819566526a9SAlan Cox 182063281952SAlan Cox pmap = fs->map->pmap; 1821950d5f7aSAlan Cox if (pmap != vmspace_pmap(curthread->td_proc->p_vmspace)) 1822566526a9SAlan Cox return; 1823566526a9SAlan Cox 182463281952SAlan Cox entry = fs->entry; 1825566526a9SAlan Cox 182663cdcaaeSKonstantin Belousov if (addra < backward * PAGE_SIZE) { 1827566526a9SAlan Cox starta = entry->start; 182863cdcaaeSKonstantin Belousov } else { 182963cdcaaeSKonstantin Belousov starta = addra - backward * PAGE_SIZE; 183063cdcaaeSKonstantin Belousov if (starta < entry->start) 183163cdcaaeSKonstantin Belousov starta = entry->start; 1832566526a9SAlan Cox } 1833566526a9SAlan Cox 183463281952SAlan Cox /* 183563281952SAlan Cox * Generate the sequence of virtual addresses that are candidates for 183663281952SAlan Cox * prefaulting in an outward spiral from the faulting virtual address, 183763281952SAlan Cox * "addra". Specifically, the sequence is "addra - PAGE_SIZE", "addra 183863281952SAlan Cox * + PAGE_SIZE", "addra - 2 * PAGE_SIZE", "addra + 2 * PAGE_SIZE", ... 183963281952SAlan Cox * If the candidate address doesn't have a backing physical page, then 184063281952SAlan Cox * the loop immediately terminates. 184163281952SAlan Cox */ 184263281952SAlan Cox for (i = 0; i < 2 * imax(backward, forward); i++) { 184363281952SAlan Cox addr = addra + ((i >> 1) + 1) * ((i & 1) == 0 ? -PAGE_SIZE : 184463281952SAlan Cox PAGE_SIZE); 184563281952SAlan Cox if (addr > addra + forward * PAGE_SIZE) 1846566526a9SAlan Cox addr = 0; 1847566526a9SAlan Cox 1848566526a9SAlan Cox if (addr < starta || addr >= entry->end) 1849566526a9SAlan Cox continue; 1850566526a9SAlan Cox 1851566526a9SAlan Cox if (!pmap_is_prefaultable(pmap, addr)) 1852566526a9SAlan Cox continue; 1853566526a9SAlan Cox 1854566526a9SAlan Cox pindex = ((addr - entry->start) + entry->offset) >> PAGE_SHIFT; 185563281952SAlan Cox lobject = entry->object.vm_object; 1856a7163bb9SKonstantin Belousov if (!obj_locked) 1857c141ae7fSAlan Cox VM_OBJECT_RLOCK(lobject); 1858566526a9SAlan Cox while ((m = vm_page_lookup(lobject, pindex)) == NULL && 18595d32157dSMark Johnston !fault_object_needs_getpages(lobject) && 1860566526a9SAlan Cox (backing_object = lobject->backing_object) != NULL) { 186136930fc9SAlan Cox KASSERT((lobject->backing_object_offset & PAGE_MASK) == 186236930fc9SAlan Cox 0, ("vm_fault_prefault: unaligned object offset")); 1863566526a9SAlan Cox pindex += lobject->backing_object_offset >> PAGE_SHIFT; 1864c141ae7fSAlan Cox VM_OBJECT_RLOCK(backing_object); 1865a7163bb9SKonstantin Belousov if (!obj_locked || lobject != entry->object.vm_object) 1866c141ae7fSAlan Cox VM_OBJECT_RUNLOCK(lobject); 1867566526a9SAlan Cox lobject = backing_object; 1868566526a9SAlan Cox } 1869cbfbaad8SAlan Cox if (m == NULL) { 1870a7163bb9SKonstantin Belousov if (!obj_locked || lobject != entry->object.vm_object) 1871c141ae7fSAlan Cox VM_OBJECT_RUNLOCK(lobject); 1872566526a9SAlan Cox break; 1873cbfbaad8SAlan Cox } 18740012f373SJeff Roberson if (vm_page_all_valid(m) && 18753c4a2440SAlan Cox (m->flags & PG_FICTITIOUS) == 0) 18767bfda801SAlan Cox pmap_enter_quick(pmap, addr, m, entry->protection); 1877a7163bb9SKonstantin Belousov if (!obj_locked || lobject != entry->object.vm_object) 1878c141ae7fSAlan Cox VM_OBJECT_RUNLOCK(lobject); 1879566526a9SAlan Cox } 1880566526a9SAlan Cox } 1881566526a9SAlan Cox 1882566526a9SAlan Cox /* 188382de724fSAlan Cox * Hold each of the physical pages that are mapped by the specified range of 188482de724fSAlan Cox * virtual addresses, ["addr", "addr" + "len"), if those mappings are valid 188582de724fSAlan Cox * and allow the specified types of access, "prot". If all of the implied 188682de724fSAlan Cox * pages are successfully held, then the number of held pages is returned 188782de724fSAlan Cox * together with pointers to those pages in the array "ma". However, if any 188882de724fSAlan Cox * of the pages cannot be held, -1 is returned. 188982de724fSAlan Cox */ 189082de724fSAlan Cox int 189182de724fSAlan Cox vm_fault_quick_hold_pages(vm_map_t map, vm_offset_t addr, vm_size_t len, 189282de724fSAlan Cox vm_prot_t prot, vm_page_t *ma, int max_count) 189382de724fSAlan Cox { 189482de724fSAlan Cox vm_offset_t end, va; 189582de724fSAlan Cox vm_page_t *mp; 18967e14088dSKonstantin Belousov int count; 189782de724fSAlan Cox boolean_t pmap_failed; 189882de724fSAlan Cox 1899af32c419SKonstantin Belousov if (len == 0) 1900af32c419SKonstantin Belousov return (0); 190182de724fSAlan Cox end = round_page(addr + len); 190282de724fSAlan Cox addr = trunc_page(addr); 190382de724fSAlan Cox 19040f1e6ec5SMark Johnston if (!vm_map_range_valid(map, addr, end)) 190582de724fSAlan Cox return (-1); 190682de724fSAlan Cox 19077e14088dSKonstantin Belousov if (atop(end - addr) > max_count) 190882de724fSAlan Cox panic("vm_fault_quick_hold_pages: count > max_count"); 19097e14088dSKonstantin Belousov count = atop(end - addr); 191082de724fSAlan Cox 191182de724fSAlan Cox /* 191282de724fSAlan Cox * Most likely, the physical pages are resident in the pmap, so it is 191382de724fSAlan Cox * faster to try pmap_extract_and_hold() first. 191482de724fSAlan Cox */ 191582de724fSAlan Cox pmap_failed = FALSE; 191682de724fSAlan Cox for (mp = ma, va = addr; va < end; mp++, va += PAGE_SIZE) { 191782de724fSAlan Cox *mp = pmap_extract_and_hold(map->pmap, va, prot); 191882de724fSAlan Cox if (*mp == NULL) 191982de724fSAlan Cox pmap_failed = TRUE; 192082de724fSAlan Cox else if ((prot & VM_PROT_WRITE) != 0 && 1921a5dbab54SAlan Cox (*mp)->dirty != VM_PAGE_BITS_ALL) { 192282de724fSAlan Cox /* 192382de724fSAlan Cox * Explicitly dirty the physical page. Otherwise, the 192482de724fSAlan Cox * caller's changes may go unnoticed because they are 192582de724fSAlan Cox * performed through an unmanaged mapping or by a DMA 192682de724fSAlan Cox * operation. 19273c76db4cSAlan Cox * 1928abb9b935SKonstantin Belousov * The object lock is not held here. 1929abb9b935SKonstantin Belousov * See vm_page_clear_dirty_mask(). 193082de724fSAlan Cox */ 19313c76db4cSAlan Cox vm_page_dirty(*mp); 193282de724fSAlan Cox } 193382de724fSAlan Cox } 193482de724fSAlan Cox if (pmap_failed) { 193582de724fSAlan Cox /* 193682de724fSAlan Cox * One or more pages could not be held by the pmap. Either no 193782de724fSAlan Cox * page was mapped at the specified virtual address or that 193882de724fSAlan Cox * mapping had insufficient permissions. Attempt to fault in 193982de724fSAlan Cox * and hold these pages. 19408ec533d3SKonstantin Belousov * 19418ec533d3SKonstantin Belousov * If vm_fault_disable_pagefaults() was called, 19428ec533d3SKonstantin Belousov * i.e., TDP_NOFAULTING is set, we must not sleep nor 19438ec533d3SKonstantin Belousov * acquire MD VM locks, which means we must not call 1944df08823dSKonstantin Belousov * vm_fault(). Some (out of tree) callers mark 19458ec533d3SKonstantin Belousov * too wide a code area with vm_fault_disable_pagefaults() 19468ec533d3SKonstantin Belousov * already, use the VM_PROT_QUICK_NOFAULT flag to request 19478ec533d3SKonstantin Belousov * the proper behaviour explicitly. 194882de724fSAlan Cox */ 19498ec533d3SKonstantin Belousov if ((prot & VM_PROT_QUICK_NOFAULT) != 0 && 19508ec533d3SKonstantin Belousov (curthread->td_pflags & TDP_NOFAULTING) != 0) 19518ec533d3SKonstantin Belousov goto error; 195282de724fSAlan Cox for (mp = ma, va = addr; va < end; mp++, va += PAGE_SIZE) 1953df08823dSKonstantin Belousov if (*mp == NULL && vm_fault(map, va, prot, 195482de724fSAlan Cox VM_FAULT_NORMAL, mp) != KERN_SUCCESS) 195582de724fSAlan Cox goto error; 195682de724fSAlan Cox } 195782de724fSAlan Cox return (count); 195882de724fSAlan Cox error: 195982de724fSAlan Cox for (mp = ma; mp < ma + count; mp++) 1960fee2a2faSMark Johnston if (*mp != NULL) 1961fee2a2faSMark Johnston vm_page_unwire(*mp, PQ_INACTIVE); 196282de724fSAlan Cox return (-1); 196382de724fSAlan Cox } 196482de724fSAlan Cox 196582de724fSAlan Cox /* 1966df8bae1dSRodney W. Grimes * Routine: 1967df8bae1dSRodney W. Grimes * vm_fault_copy_entry 1968df8bae1dSRodney W. Grimes * Function: 1969b57be759SMark Johnston * Create new object backing dst_entry with private copy of all 1970b57be759SMark Johnston * underlying pages. When src_entry is equal to dst_entry, function 1971b57be759SMark Johnston * implements COW for wired-down map entry. Otherwise, it forks 1972b57be759SMark Johnston * wired entry into dst_map. 1973df8bae1dSRodney W. Grimes * 1974df8bae1dSRodney W. Grimes * In/out conditions: 1975df8bae1dSRodney W. Grimes * The source and destination maps must be locked for write. 1976df8bae1dSRodney W. Grimes * The source map entry must be wired down (or be a sharing map 1977df8bae1dSRodney W. Grimes * entry corresponding to a main map entry that is wired down). 1978df8bae1dSRodney W. Grimes */ 197926f9a767SRodney W. Grimes void 1980b57be759SMark Johnston vm_fault_copy_entry(vm_map_t dst_map, vm_map_t src_map __unused, 1981121fd461SKonstantin Belousov vm_map_entry_t dst_entry, vm_map_entry_t src_entry, 1982121fd461SKonstantin Belousov vm_ooffset_t *fork_charge) 1983df8bae1dSRodney W. Grimes { 1984210a6886SKonstantin Belousov vm_object_t backing_object, dst_object, object, src_object; 19857afab86cSAlan Cox vm_pindex_t dst_pindex, pindex, src_pindex; 1986210a6886SKonstantin Belousov vm_prot_t access, prot; 1987df8bae1dSRodney W. Grimes vm_offset_t vaddr; 1988df8bae1dSRodney W. Grimes vm_page_t dst_m; 1989df8bae1dSRodney W. Grimes vm_page_t src_m; 1990b57be759SMark Johnston bool upgrade; 1991df8bae1dSRodney W. Grimes 1992210a6886SKonstantin Belousov upgrade = src_entry == dst_entry; 1993b57be759SMark Johnston KASSERT(upgrade || dst_entry->object.vm_object == NULL, 1994b57be759SMark Johnston ("vm_fault_copy_entry: vm_object not NULL")); 1995b57be759SMark Johnston 1996b57be759SMark Johnston /* 1997b57be759SMark Johnston * If not an upgrade, then enter the mappings in the pmap as 1998b57be759SMark Johnston * read and/or execute accesses. Otherwise, enter them as 1999b57be759SMark Johnston * write accesses. 2000b57be759SMark Johnston * 2001b57be759SMark Johnston * A writeable large page mapping is only created if all of 2002b57be759SMark Johnston * the constituent small page mappings are modified. Marking 2003b57be759SMark Johnston * PTEs as modified on inception allows promotion to happen 2004b57be759SMark Johnston * without taking potentially large number of soft faults. 2005b57be759SMark Johnston */ 20060973283dSKonstantin Belousov access = prot = dst_entry->protection; 2007b57be759SMark Johnston if (!upgrade) 2008b57be759SMark Johnston access &= ~VM_PROT_WRITE; 2009210a6886SKonstantin Belousov 2010df8bae1dSRodney W. Grimes src_object = src_entry->object.vm_object; 20117afab86cSAlan Cox src_pindex = OFF_TO_IDX(src_entry->offset); 2012df8bae1dSRodney W. Grimes 20130973283dSKonstantin Belousov if (upgrade && (dst_entry->eflags & MAP_ENTRY_NEEDS_COPY) == 0) { 20140973283dSKonstantin Belousov dst_object = src_object; 20150973283dSKonstantin Belousov vm_object_reference(dst_object); 20160973283dSKonstantin Belousov } else { 2017df8bae1dSRodney W. Grimes /* 201867388836SKonstantin Belousov * Create the top-level object for the destination entry. 201967388836SKonstantin Belousov * Doesn't actually shadow anything - we copy the pages 202067388836SKonstantin Belousov * directly. 2021df8bae1dSRodney W. Grimes */ 202267388836SKonstantin Belousov dst_object = vm_object_allocate_anon(atop(dst_entry->end - 202367388836SKonstantin Belousov dst_entry->start), NULL, NULL, 0); 2024f8a47341SAlan Cox #if VM_NRESERVLEVEL > 0 2025f8a47341SAlan Cox dst_object->flags |= OBJ_COLORED; 2026f8a47341SAlan Cox dst_object->pg_color = atop(dst_entry->start); 2027f8a47341SAlan Cox #endif 2028a60d3db1SKonstantin Belousov dst_object->domain = src_object->domain; 2029a60d3db1SKonstantin Belousov dst_object->charge = dst_entry->end - dst_entry->start; 2030df8bae1dSRodney W. Grimes 2031df8bae1dSRodney W. Grimes dst_entry->object.vm_object = dst_object; 2032df8bae1dSRodney W. Grimes dst_entry->offset = 0; 203378022527SKonstantin Belousov dst_entry->eflags &= ~MAP_ENTRY_VN_EXEC; 20340973283dSKonstantin Belousov } 2035b57be759SMark Johnston 2036b57be759SMark Johnston VM_OBJECT_WLOCK(dst_object); 2037210a6886SKonstantin Belousov if (fork_charge != NULL) { 2038ef694c1aSEdward Tomasz Napierala KASSERT(dst_entry->cred == NULL, 2039121fd461SKonstantin Belousov ("vm_fault_copy_entry: leaked swp charge")); 2040ef694c1aSEdward Tomasz Napierala dst_object->cred = curthread->td_ucred; 2041ef694c1aSEdward Tomasz Napierala crhold(dst_object->cred); 2042121fd461SKonstantin Belousov *fork_charge += dst_object->charge; 20430cb2610eSMark Johnston } else if ((dst_object->flags & OBJ_SWAP) != 0 && 20449f25ab83SKonstantin Belousov dst_object->cred == NULL) { 20450973283dSKonstantin Belousov KASSERT(dst_entry->cred != NULL, ("no cred for entry %p", 20460973283dSKonstantin Belousov dst_entry)); 2047ef694c1aSEdward Tomasz Napierala dst_object->cred = dst_entry->cred; 2048ef694c1aSEdward Tomasz Napierala dst_entry->cred = NULL; 2049210a6886SKonstantin Belousov } 20500973283dSKonstantin Belousov 2051210a6886SKonstantin Belousov /* 2052ef45823eSKonstantin Belousov * Loop through all of the virtual pages within the entry's 2053ef45823eSKonstantin Belousov * range, copying each page from the source object to the 2054ef45823eSKonstantin Belousov * destination object. Since the source is wired, those pages 2055ef45823eSKonstantin Belousov * must exist. In contrast, the destination is pageable. 20566939b4d3SMark Johnston * Since the destination object doesn't share any backing storage 2057ef45823eSKonstantin Belousov * with the source object, all of its pages must be dirtied, 2058ef45823eSKonstantin Belousov * regardless of whether they can be written. 2059df8bae1dSRodney W. Grimes */ 20607afab86cSAlan Cox for (vaddr = dst_entry->start, dst_pindex = 0; 2061df8bae1dSRodney W. Grimes vaddr < dst_entry->end; 20627afab86cSAlan Cox vaddr += PAGE_SIZE, dst_pindex++) { 20630973283dSKonstantin Belousov again: 2064df8bae1dSRodney W. Grimes /* 2065df8bae1dSRodney W. Grimes * Find the page in the source object, and copy it in. 20664c74acf7SKonstantin Belousov * Because the source is wired down, the page will be 20674c74acf7SKonstantin Belousov * in memory. 2068df8bae1dSRodney W. Grimes */ 20690973283dSKonstantin Belousov if (src_object != dst_object) 207083b375eaSAttilio Rao VM_OBJECT_RLOCK(src_object); 2071c5b65a67SAlan Cox object = src_object; 20727afab86cSAlan Cox pindex = src_pindex + dst_pindex; 20737afab86cSAlan Cox while ((src_m = vm_page_lookup(object, pindex)) == NULL && 2074c5b65a67SAlan Cox (backing_object = object->backing_object) != NULL) { 2075c5b65a67SAlan Cox /* 20764c74acf7SKonstantin Belousov * Unless the source mapping is read-only or 20774c74acf7SKonstantin Belousov * it is presently being upgraded from 20784c74acf7SKonstantin Belousov * read-only, the first object in the shadow 20794c74acf7SKonstantin Belousov * chain should provide all of the pages. In 20804c74acf7SKonstantin Belousov * other words, this loop body should never be 20814c74acf7SKonstantin Belousov * executed when the source mapping is already 20824c74acf7SKonstantin Belousov * read/write. 2083c5b65a67SAlan Cox */ 20844c74acf7SKonstantin Belousov KASSERT((src_entry->protection & VM_PROT_WRITE) == 0 || 20854c74acf7SKonstantin Belousov upgrade, 20864c74acf7SKonstantin Belousov ("vm_fault_copy_entry: main object missing page")); 20874c74acf7SKonstantin Belousov 208883b375eaSAttilio Rao VM_OBJECT_RLOCK(backing_object); 2089c5b65a67SAlan Cox pindex += OFF_TO_IDX(object->backing_object_offset); 20900973283dSKonstantin Belousov if (object != dst_object) 209183b375eaSAttilio Rao VM_OBJECT_RUNLOCK(object); 2092c5b65a67SAlan Cox object = backing_object; 2093c5b65a67SAlan Cox } 20944c74acf7SKonstantin Belousov KASSERT(src_m != NULL, ("vm_fault_copy_entry: page missing")); 20950973283dSKonstantin Belousov 20960973283dSKonstantin Belousov if (object != dst_object) { 20970973283dSKonstantin Belousov /* 20980973283dSKonstantin Belousov * Allocate a page in the destination object. 20990973283dSKonstantin Belousov */ 21002602a2eaSKonstantin Belousov dst_m = vm_page_alloc(dst_object, (src_object == 21012602a2eaSKonstantin Belousov dst_object ? src_pindex : 0) + dst_pindex, 21022602a2eaSKonstantin Belousov VM_ALLOC_NORMAL); 21030973283dSKonstantin Belousov if (dst_m == NULL) { 21040973283dSKonstantin Belousov VM_OBJECT_WUNLOCK(dst_object); 21050973283dSKonstantin Belousov VM_OBJECT_RUNLOCK(object); 21062c0f13aaSKonstantin Belousov vm_wait(dst_object); 2107c8f780e3SKonstantin Belousov VM_OBJECT_WLOCK(dst_object); 21080973283dSKonstantin Belousov goto again; 21090973283dSKonstantin Belousov } 2110*5c50e900SMark Johnston 2111*5c50e900SMark Johnston /* 2112*5c50e900SMark Johnston * See the comment in vm_fault_cow(). 2113*5c50e900SMark Johnston */ 2114*5c50e900SMark Johnston if (src_object == dst_object && 2115*5c50e900SMark Johnston (object->flags & OBJ_ONEMAPPING) == 0) 2116*5c50e900SMark Johnston pmap_remove_all(src_m); 2117669890eaSAlan Cox pmap_copy_page(src_m, dst_m); 2118d0443e2bSMark Johnston 2119d0443e2bSMark Johnston /* 2120d0443e2bSMark Johnston * The object lock does not guarantee that "src_m" will 2121d0443e2bSMark Johnston * transition from invalid to valid, but it does ensure 2122d0443e2bSMark Johnston * that "src_m" will not transition from valid to 2123d0443e2bSMark Johnston * invalid. 2124d0443e2bSMark Johnston */ 212545d72c7dSKonstantin Belousov dst_m->dirty = dst_m->valid = src_m->valid; 2126d0443e2bSMark Johnston VM_OBJECT_RUNLOCK(object); 21270973283dSKonstantin Belousov } else { 21280973283dSKonstantin Belousov dst_m = src_m; 212963e97555SJeff Roberson if (vm_page_busy_acquire(dst_m, VM_ALLOC_WAITFAIL) == 0) 21300973283dSKonstantin Belousov goto again; 213163e97555SJeff Roberson if (dst_m->pindex >= dst_object->size) { 2132c62637d6SKonstantin Belousov /* 2133c62637d6SKonstantin Belousov * We are upgrading. Index can occur 2134c62637d6SKonstantin Belousov * out of bounds if the object type is 2135c62637d6SKonstantin Belousov * vnode and the file was truncated. 2136c62637d6SKonstantin Belousov */ 213763e97555SJeff Roberson vm_page_xunbusy(dst_m); 2138c62637d6SKonstantin Belousov break; 213963e97555SJeff Roberson } 21400973283dSKonstantin Belousov } 2141df8bae1dSRodney W. Grimes 2142df8bae1dSRodney W. Grimes /* 2143210a6886SKonstantin Belousov * Enter it in the pmap. If a wired, copy-on-write 2144210a6886SKonstantin Belousov * mapping is being replaced by a write-enabled 2145210a6886SKonstantin Belousov * mapping, then wire that new mapping. 214645d72c7dSKonstantin Belousov * 214745d72c7dSKonstantin Belousov * The page can be invalid if the user called 214845d72c7dSKonstantin Belousov * msync(MS_INVALIDATE) or truncated the backing vnode 214945d72c7dSKonstantin Belousov * or shared memory object. In this case, do not 215045d72c7dSKonstantin Belousov * insert it into pmap, but still do the copy so that 215145d72c7dSKonstantin Belousov * all copies of the wired map entry have similar 215245d72c7dSKonstantin Belousov * backing pages. 2153df8bae1dSRodney W. Grimes */ 21540012f373SJeff Roberson if (vm_page_all_valid(dst_m)) { 21551f88394bSMark Johnston VM_OBJECT_WUNLOCK(dst_object); 215639ffa8c1SKonstantin Belousov pmap_enter(dst_map->pmap, vaddr, dst_m, prot, 215739ffa8c1SKonstantin Belousov access | (upgrade ? PMAP_ENTER_WIRED : 0), 0); 21581f88394bSMark Johnston VM_OBJECT_WLOCK(dst_object); 215945d72c7dSKonstantin Belousov } 2160df8bae1dSRodney W. Grimes 2161df8bae1dSRodney W. Grimes /* 2162df8bae1dSRodney W. Grimes * Mark it no longer busy, and put it on the active list. 2163df8bae1dSRodney W. Grimes */ 2164210a6886SKonstantin Belousov if (upgrade) { 21650973283dSKonstantin Belousov if (src_m != dst_m) { 21663ae10f74SAttilio Rao vm_page_unwire(src_m, PQ_INACTIVE); 2167210a6886SKonstantin Belousov vm_page_wire(dst_m); 21682965a453SKip Macy } else { 2169d842aa51SMark Johnston KASSERT(vm_page_wired(dst_m), 21700973283dSKonstantin Belousov ("dst_m %p is not wired", dst_m)); 21710973283dSKonstantin Belousov } 21720973283dSKonstantin Belousov } else { 2173df8bae1dSRodney W. Grimes vm_page_activate(dst_m); 21742965a453SKip Macy } 2175c7aebda8SAttilio Rao vm_page_xunbusy(dst_m); 2176df8bae1dSRodney W. Grimes } 217789f6b863SAttilio Rao VM_OBJECT_WUNLOCK(dst_object); 2178210a6886SKonstantin Belousov if (upgrade) { 2179210a6886SKonstantin Belousov dst_entry->eflags &= ~(MAP_ENTRY_COW | MAP_ENTRY_NEEDS_COPY); 2180210a6886SKonstantin Belousov vm_object_deallocate(src_object); 2181210a6886SKonstantin Belousov } 2182df8bae1dSRodney W. Grimes } 218326f9a767SRodney W. Grimes 21845730afc9SAlan Cox /* 21855730afc9SAlan Cox * Block entry into the machine-independent layer's page fault handler by 21865730afc9SAlan Cox * the calling thread. Subsequent calls to vm_fault() by that thread will 21875730afc9SAlan Cox * return KERN_PROTECTION_FAILURE. Enable machine-dependent handling of 21885730afc9SAlan Cox * spurious page faults. 21895730afc9SAlan Cox */ 21902801687dSKonstantin Belousov int 21912801687dSKonstantin Belousov vm_fault_disable_pagefaults(void) 21922801687dSKonstantin Belousov { 21932801687dSKonstantin Belousov 21945730afc9SAlan Cox return (curthread_pflags_set(TDP_NOFAULTING | TDP_RESETSPUR)); 21952801687dSKonstantin Belousov } 21962801687dSKonstantin Belousov 21972801687dSKonstantin Belousov void 21982801687dSKonstantin Belousov vm_fault_enable_pagefaults(int save) 21992801687dSKonstantin Belousov { 22002801687dSKonstantin Belousov 22012801687dSKonstantin Belousov curthread_pflags_restore(save); 22022801687dSKonstantin Belousov } 2203