160727d8bSWarner Losh /*- 2796df753SPedro F. Giffuni * SPDX-License-Identifier: (BSD-4-Clause AND MIT-CMU) 3df57947fSPedro F. Giffuni * 4df8bae1dSRodney W. Grimes * Copyright (c) 1991, 1993 5df8bae1dSRodney W. Grimes * The Regents of the University of California. All rights reserved. 626f9a767SRodney W. Grimes * Copyright (c) 1994 John S. Dyson 726f9a767SRodney W. Grimes * All rights reserved. 826f9a767SRodney W. Grimes * Copyright (c) 1994 David Greenman 926f9a767SRodney W. Grimes * All rights reserved. 1026f9a767SRodney W. Grimes * 11df8bae1dSRodney W. Grimes * 12df8bae1dSRodney W. Grimes * This code is derived from software contributed to Berkeley by 13df8bae1dSRodney W. Grimes * The Mach Operating System project at Carnegie-Mellon University. 14df8bae1dSRodney W. Grimes * 15df8bae1dSRodney W. Grimes * Redistribution and use in source and binary forms, with or without 16df8bae1dSRodney W. Grimes * modification, are permitted provided that the following conditions 17df8bae1dSRodney W. Grimes * are met: 18df8bae1dSRodney W. Grimes * 1. Redistributions of source code must retain the above copyright 19df8bae1dSRodney W. Grimes * notice, this list of conditions and the following disclaimer. 20df8bae1dSRodney W. Grimes * 2. Redistributions in binary form must reproduce the above copyright 21df8bae1dSRodney W. Grimes * notice, this list of conditions and the following disclaimer in the 22df8bae1dSRodney W. Grimes * documentation and/or other materials provided with the distribution. 23df8bae1dSRodney W. Grimes * 3. All advertising materials mentioning features or use of this software 245929bcfaSPhilippe Charnier * must display the following acknowledgement: 25df8bae1dSRodney W. Grimes * This product includes software developed by the University of 26df8bae1dSRodney W. Grimes * California, Berkeley and its contributors. 27df8bae1dSRodney W. Grimes * 4. Neither the name of the University nor the names of its contributors 28df8bae1dSRodney W. Grimes * may be used to endorse or promote products derived from this software 29df8bae1dSRodney W. Grimes * without specific prior written permission. 30df8bae1dSRodney W. Grimes * 31df8bae1dSRodney W. Grimes * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 32df8bae1dSRodney W. Grimes * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 33df8bae1dSRodney W. Grimes * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 34df8bae1dSRodney W. Grimes * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 35df8bae1dSRodney W. Grimes * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 36df8bae1dSRodney W. Grimes * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 37df8bae1dSRodney W. Grimes * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 38df8bae1dSRodney W. Grimes * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 39df8bae1dSRodney W. Grimes * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 40df8bae1dSRodney W. Grimes * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 41df8bae1dSRodney W. Grimes * SUCH DAMAGE. 42df8bae1dSRodney W. Grimes * 433c4dd356SDavid Greenman * from: @(#)vm_fault.c 8.4 (Berkeley) 1/12/94 44df8bae1dSRodney W. Grimes * 45df8bae1dSRodney W. Grimes * 46df8bae1dSRodney W. Grimes * Copyright (c) 1987, 1990 Carnegie-Mellon University. 47df8bae1dSRodney W. Grimes * All rights reserved. 48df8bae1dSRodney W. Grimes * 49df8bae1dSRodney W. Grimes * Authors: Avadis Tevanian, Jr., Michael Wayne Young 50df8bae1dSRodney W. Grimes * 51df8bae1dSRodney W. Grimes * Permission to use, copy, modify and distribute this software and 52df8bae1dSRodney W. Grimes * its documentation is hereby granted, provided that both the copyright 53df8bae1dSRodney W. Grimes * notice and this permission notice appear in all copies of the 54df8bae1dSRodney W. Grimes * software, derivative works or modified versions, and any portions 55df8bae1dSRodney W. Grimes * thereof, and that both notices appear in supporting documentation. 56df8bae1dSRodney W. Grimes * 57df8bae1dSRodney W. Grimes * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 58df8bae1dSRodney W. Grimes * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 59df8bae1dSRodney W. Grimes * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 60df8bae1dSRodney W. Grimes * 61df8bae1dSRodney W. Grimes * Carnegie Mellon requests users of this software to return to 62df8bae1dSRodney W. Grimes * 63df8bae1dSRodney W. Grimes * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 64df8bae1dSRodney W. Grimes * School of Computer Science 65df8bae1dSRodney W. Grimes * Carnegie Mellon University 66df8bae1dSRodney W. Grimes * Pittsburgh PA 15213-3890 67df8bae1dSRodney W. Grimes * 68df8bae1dSRodney W. Grimes * any improvements or extensions that they make and grant Carnegie the 69df8bae1dSRodney W. Grimes * rights to redistribute these changes. 70df8bae1dSRodney W. Grimes */ 71df8bae1dSRodney W. Grimes 72df8bae1dSRodney W. Grimes /* 73df8bae1dSRodney W. Grimes * Page fault handling module. 74df8bae1dSRodney W. Grimes */ 75874651b1SDavid E. O'Brien 76874651b1SDavid E. O'Brien #include <sys/cdefs.h> 77874651b1SDavid E. O'Brien __FBSDID("$FreeBSD$"); 78874651b1SDavid E. O'Brien 7935818d2eSJohn Baldwin #include "opt_ktrace.h" 80f8a47341SAlan Cox #include "opt_vm.h" 81f8a47341SAlan Cox 82df8bae1dSRodney W. Grimes #include <sys/param.h> 83df8bae1dSRodney W. Grimes #include <sys/systm.h> 844edf4a58SJohn Baldwin #include <sys/kernel.h> 85fb919e4dSMark Murray #include <sys/lock.h> 86a8b0f100SAlan Cox #include <sys/mman.h> 87eeacb3b0SMark Johnston #include <sys/mutex.h> 8826f9a767SRodney W. Grimes #include <sys/proc.h> 89ae34b6ffSEdward Tomasz Napierala #include <sys/racct.h> 9011b57401SHans Petter Selasky #include <sys/refcount.h> 9126f9a767SRodney W. Grimes #include <sys/resourcevar.h> 9289f6b863SAttilio Rao #include <sys/rwlock.h> 93df08823dSKonstantin Belousov #include <sys/signalvar.h> 9423955314SAlfred Perlstein #include <sys/sysctl.h> 95df08823dSKonstantin Belousov #include <sys/sysent.h> 964edf4a58SJohn Baldwin #include <sys/vmmeter.h> 974edf4a58SJohn Baldwin #include <sys/vnode.h> 9835818d2eSJohn Baldwin #ifdef KTRACE 9935818d2eSJohn Baldwin #include <sys/ktrace.h> 10035818d2eSJohn Baldwin #endif 101df8bae1dSRodney W. Grimes 102df8bae1dSRodney W. Grimes #include <vm/vm.h> 103efeaf95aSDavid Greenman #include <vm/vm_param.h> 104efeaf95aSDavid Greenman #include <vm/pmap.h> 105efeaf95aSDavid Greenman #include <vm/vm_map.h> 106efeaf95aSDavid Greenman #include <vm/vm_object.h> 107df8bae1dSRodney W. Grimes #include <vm/vm_page.h> 108df8bae1dSRodney W. Grimes #include <vm/vm_pageout.h> 109a83c285cSDavid Greenman #include <vm/vm_kern.h> 11024a1cce3SDavid Greenman #include <vm/vm_pager.h> 111efeaf95aSDavid Greenman #include <vm/vm_extern.h> 112dfdf9abdSAlan Cox #include <vm/vm_reserv.h> 113df8bae1dSRodney W. Grimes 114566526a9SAlan Cox #define PFBAK 4 115566526a9SAlan Cox #define PFFOR 4 116566526a9SAlan Cox 1175268042bSAlan Cox #define VM_FAULT_READ_DEFAULT (1 + VM_FAULT_READ_AHEAD_INIT) 118a8b0f100SAlan Cox 119a8b0f100SAlan Cox #define VM_FAULT_DONTNEED_MIN 1048576 12026f9a767SRodney W. Grimes 1214866e085SJohn Dyson struct faultstate { 1222c2f4413SJeff Roberson /* Fault parameters. */ 1235949b1caSJeff Roberson vm_offset_t vaddr; 1242c2f4413SJeff Roberson vm_page_t *m_hold; 1252c2f4413SJeff Roberson vm_prot_t fault_type; 1262c2f4413SJeff Roberson vm_prot_t prot; 1272c2f4413SJeff Roberson int fault_flags; 128df794f5cSJeff Roberson int oom; 1292c2f4413SJeff Roberson boolean_t wired; 1302c2f4413SJeff Roberson 1312c2f4413SJeff Roberson /* Page reference for cow. */ 13258447749SJeff Roberson vm_page_t m_cow; 1332c2f4413SJeff Roberson 1342c2f4413SJeff Roberson /* Current object. */ 1354866e085SJohn Dyson vm_object_t object; 1364866e085SJohn Dyson vm_pindex_t pindex; 1372c2f4413SJeff Roberson vm_page_t m; 1382c2f4413SJeff Roberson 1392c2f4413SJeff Roberson /* Top-level map object. */ 1404866e085SJohn Dyson vm_object_t first_object; 1414866e085SJohn Dyson vm_pindex_t first_pindex; 1422c2f4413SJeff Roberson vm_page_t first_m; 1432c2f4413SJeff Roberson 1442c2f4413SJeff Roberson /* Map state. */ 1454866e085SJohn Dyson vm_map_t map; 1464866e085SJohn Dyson vm_map_entry_t entry; 147dc5401d2SKonstantin Belousov int map_generation; 148cd8a6fe8SAlan Cox bool lookup_still_valid; 1492c2f4413SJeff Roberson 1502c2f4413SJeff Roberson /* Vnode if locked. */ 1514866e085SJohn Dyson struct vnode *vp; 1524866e085SJohn Dyson }; 1534866e085SJohn Dyson 154a8b0f100SAlan Cox static void vm_fault_dontneed(const struct faultstate *fs, vm_offset_t vaddr, 155a8b0f100SAlan Cox int ahead); 15663281952SAlan Cox static void vm_fault_prefault(const struct faultstate *fs, vm_offset_t addra, 157a7163bb9SKonstantin Belousov int backward, int forward, bool obj_locked); 15813458803SAlan Cox 159245139c6SKonstantin Belousov static int vm_pfault_oom_attempts = 3; 160245139c6SKonstantin Belousov SYSCTL_INT(_vm, OID_AUTO, pfault_oom_attempts, CTLFLAG_RWTUN, 161245139c6SKonstantin Belousov &vm_pfault_oom_attempts, 0, 162245139c6SKonstantin Belousov "Number of page allocation attempts in page fault handler before it " 163245139c6SKonstantin Belousov "triggers OOM handling"); 164245139c6SKonstantin Belousov 165245139c6SKonstantin Belousov static int vm_pfault_oom_wait = 10; 166245139c6SKonstantin Belousov SYSCTL_INT(_vm, OID_AUTO, pfault_oom_wait, CTLFLAG_RWTUN, 167245139c6SKonstantin Belousov &vm_pfault_oom_wait, 0, 168245139c6SKonstantin Belousov "Number of seconds to wait for free pages before retrying " 169245139c6SKonstantin Belousov "the page fault handler"); 170245139c6SKonstantin Belousov 17162a59e8fSWarner Losh static inline void 1724bf95d00SJeff Roberson fault_page_release(vm_page_t *mp) 1734866e085SJohn Dyson { 1744bf95d00SJeff Roberson vm_page_t m; 1750d0be82aSKonstantin Belousov 1764bf95d00SJeff Roberson m = *mp; 1774bf95d00SJeff Roberson if (m != NULL) { 178be801aaaSMark Johnston /* 1794bf95d00SJeff Roberson * We are likely to loop around again and attempt to busy 1804bf95d00SJeff Roberson * this page. Deactivating it leaves it available for 1814bf95d00SJeff Roberson * pageout while optimizing fault restarts. 182be801aaaSMark Johnston */ 1834bf95d00SJeff Roberson vm_page_deactivate(m); 1844bf95d00SJeff Roberson vm_page_xunbusy(m); 1854bf95d00SJeff Roberson *mp = NULL; 1864bf95d00SJeff Roberson } 1874bf95d00SJeff Roberson } 1884bf95d00SJeff Roberson 1894bf95d00SJeff Roberson static inline void 1904bf95d00SJeff Roberson fault_page_free(vm_page_t *mp) 1914bf95d00SJeff Roberson { 1924bf95d00SJeff Roberson vm_page_t m; 1934bf95d00SJeff Roberson 1944bf95d00SJeff Roberson m = *mp; 1954bf95d00SJeff Roberson if (m != NULL) { 1964bf95d00SJeff Roberson VM_OBJECT_ASSERT_WLOCKED(m->object); 1974bf95d00SJeff Roberson if (!vm_page_wired(m)) 1984bf95d00SJeff Roberson vm_page_free(m); 199419f0b1fSJeff Roberson else 200419f0b1fSJeff Roberson vm_page_xunbusy(m); 2014bf95d00SJeff Roberson *mp = NULL; 2024866e085SJohn Dyson } 203be2c5610SMark Johnston } 2044866e085SJohn Dyson 20562a59e8fSWarner Losh static inline void 2064866e085SJohn Dyson unlock_map(struct faultstate *fs) 2074866e085SJohn Dyson { 2080d0be82aSKonstantin Belousov 20925adb370SBrian Feldman if (fs->lookup_still_valid) { 2104866e085SJohn Dyson vm_map_lookup_done(fs->map, fs->entry); 211cd8a6fe8SAlan Cox fs->lookup_still_valid = false; 2124866e085SJohn Dyson } 2134866e085SJohn Dyson } 2144866e085SJohn Dyson 2154866e085SJohn Dyson static void 216cfabea3dSKonstantin Belousov unlock_vp(struct faultstate *fs) 217cfabea3dSKonstantin Belousov { 218cfabea3dSKonstantin Belousov 219cfabea3dSKonstantin Belousov if (fs->vp != NULL) { 220cfabea3dSKonstantin Belousov vput(fs->vp); 221cfabea3dSKonstantin Belousov fs->vp = NULL; 222cfabea3dSKonstantin Belousov } 223cfabea3dSKonstantin Belousov } 224cfabea3dSKonstantin Belousov 225cfabea3dSKonstantin Belousov static void 2264b3e0665SJeff Roberson fault_deallocate(struct faultstate *fs) 2274866e085SJohn Dyson { 228f29ba63eSAlan Cox 22958447749SJeff Roberson fault_page_release(&fs->m_cow); 2304bf95d00SJeff Roberson fault_page_release(&fs->m); 2314866e085SJohn Dyson vm_object_pip_wakeup(fs->object); 2324866e085SJohn Dyson if (fs->object != fs->first_object) { 23389f6b863SAttilio Rao VM_OBJECT_WLOCK(fs->first_object); 2344bf95d00SJeff Roberson fault_page_free(&fs->first_m); 23589f6b863SAttilio Rao VM_OBJECT_WUNLOCK(fs->first_object); 2364bf95d00SJeff Roberson vm_object_pip_wakeup(fs->first_object); 2374866e085SJohn Dyson } 2384866e085SJohn Dyson vm_object_deallocate(fs->first_object); 2394866e085SJohn Dyson unlock_map(fs); 240cfabea3dSKonstantin Belousov unlock_vp(fs); 2414866e085SJohn Dyson } 2424866e085SJohn Dyson 243a36f5532SKonstantin Belousov static void 2444b3e0665SJeff Roberson unlock_and_deallocate(struct faultstate *fs) 2454b3e0665SJeff Roberson { 2464b3e0665SJeff Roberson 2474b3e0665SJeff Roberson VM_OBJECT_WUNLOCK(fs->object); 2484b3e0665SJeff Roberson fault_deallocate(fs); 2494b3e0665SJeff Roberson } 2504b3e0665SJeff Roberson 2514b3e0665SJeff Roberson static void 2522c2f4413SJeff Roberson vm_fault_dirty(struct faultstate *fs, vm_page_t m) 253a36f5532SKonstantin Belousov { 254e26236e9SKonstantin Belousov bool need_dirty; 255a36f5532SKonstantin Belousov 2562c2f4413SJeff Roberson if (((fs->prot & VM_PROT_WRITE) == 0 && 2572c2f4413SJeff Roberson (fs->fault_flags & VM_FAULT_DIRTY) == 0) || 258a36f5532SKonstantin Belousov (m->oflags & VPO_UNMANAGED) != 0) 259a36f5532SKonstantin Belousov return; 260a36f5532SKonstantin Belousov 2610012f373SJeff Roberson VM_PAGE_OBJECT_BUSY_ASSERT(m); 262a36f5532SKonstantin Belousov 2632c2f4413SJeff Roberson need_dirty = ((fs->fault_type & VM_PROT_WRITE) != 0 && 2642c2f4413SJeff Roberson (fs->fault_flags & VM_FAULT_WIRE) == 0) || 2652c2f4413SJeff Roberson (fs->fault_flags & VM_FAULT_DIRTY) != 0; 266a36f5532SKonstantin Belousov 267a36f5532SKonstantin Belousov vm_object_set_writeable_dirty(m->object); 26867d0e293SJeff Roberson 269a36f5532SKonstantin Belousov /* 270a8081778SJeff Roberson * If the fault is a write, we know that this page is being 271a8081778SJeff Roberson * written NOW so dirty it explicitly to save on 272a8081778SJeff Roberson * pmap_is_modified() calls later. 273a8081778SJeff Roberson * 274a8081778SJeff Roberson * Also, since the page is now dirty, we can possibly tell 275a8081778SJeff Roberson * the pager to release any swap backing the page. 276a36f5532SKonstantin Belousov */ 277a8081778SJeff Roberson if (need_dirty && vm_page_set_dirty(m) == 0) { 278a36f5532SKonstantin Belousov /* 279fff5403fSJeff Roberson * If this is a NOSYNC mmap we do not want to set PGA_NOSYNC 280a36f5532SKonstantin Belousov * if the page is already dirty to prevent data written with 281a36f5532SKonstantin Belousov * the expectation of being synced from not being synced. 282a36f5532SKonstantin Belousov * Likewise if this entry does not request NOSYNC then make 283a36f5532SKonstantin Belousov * sure the page isn't marked NOSYNC. Applications sharing 284a36f5532SKonstantin Belousov * data should use the same flags to avoid ping ponging. 285a36f5532SKonstantin Belousov */ 2862c2f4413SJeff Roberson if ((fs->entry->eflags & MAP_ENTRY_NOSYNC) != 0) 287fff5403fSJeff Roberson vm_page_aflag_set(m, PGA_NOSYNC); 288a8081778SJeff Roberson else 289fff5403fSJeff Roberson vm_page_aflag_clear(m, PGA_NOSYNC); 290a36f5532SKonstantin Belousov } 291a36f5532SKonstantin Belousov 292a36f5532SKonstantin Belousov } 293a36f5532SKonstantin Belousov 29441ddec83SKonstantin Belousov /* 29541ddec83SKonstantin Belousov * Unlocks fs.first_object and fs.map on success. 29641ddec83SKonstantin Belousov */ 29741ddec83SKonstantin Belousov static int 2982c2f4413SJeff Roberson vm_fault_soft_fast(struct faultstate *fs) 29941ddec83SKonstantin Belousov { 3008b5e1472SAlan Cox vm_page_t m, m_map; 301fe0dcc40SKonstantin Belousov #if VM_NRESERVLEVEL > 0 3028b5e1472SAlan Cox vm_page_t m_super; 30390ea34bfSAlan Cox int flags; 3048b5e1472SAlan Cox #endif 30590ea34bfSAlan Cox int psind, rv; 3062c2f4413SJeff Roberson vm_offset_t vaddr; 30741ddec83SKonstantin Belousov 30841ddec83SKonstantin Belousov MPASS(fs->vp == NULL); 3092c2f4413SJeff Roberson vaddr = fs->vaddr; 310205be21dSJeff Roberson vm_object_busy(fs->first_object); 31141ddec83SKonstantin Belousov m = vm_page_lookup(fs->first_object, fs->first_pindex); 31241ddec83SKonstantin Belousov /* A busy page can be mapped for read|execute access. */ 3132c2f4413SJeff Roberson if (m == NULL || ((fs->prot & VM_PROT_WRITE) != 0 && 3140012f373SJeff Roberson vm_page_busied(m)) || !vm_page_all_valid(m)) { 315205be21dSJeff Roberson rv = KERN_FAILURE; 316205be21dSJeff Roberson goto out; 317205be21dSJeff Roberson } 3188b5e1472SAlan Cox m_map = m; 3198b5e1472SAlan Cox psind = 0; 320fe0dcc40SKonstantin Belousov #if VM_NRESERVLEVEL > 0 3218b5e1472SAlan Cox if ((m->flags & PG_FICTITIOUS) == 0 && 3228b5e1472SAlan Cox (m_super = vm_reserv_to_superpage(m)) != NULL && 3238b5e1472SAlan Cox rounddown2(vaddr, pagesizes[m_super->psind]) >= fs->entry->start && 3248b5e1472SAlan Cox roundup2(vaddr + 1, pagesizes[m_super->psind]) <= fs->entry->end && 3258b5e1472SAlan Cox (vaddr & (pagesizes[m_super->psind] - 1)) == (VM_PAGE_TO_PHYS(m) & 3262c2f4413SJeff Roberson (pagesizes[m_super->psind] - 1)) && !fs->wired && 3278b5e1472SAlan Cox pmap_ps_enabled(fs->map->pmap)) { 3288b5e1472SAlan Cox flags = PS_ALL_VALID; 3292c2f4413SJeff Roberson if ((fs->prot & VM_PROT_WRITE) != 0) { 3308b5e1472SAlan Cox /* 3318b5e1472SAlan Cox * Create a superpage mapping allowing write access 3328b5e1472SAlan Cox * only if none of the constituent pages are busy and 3338b5e1472SAlan Cox * all of them are already dirty (except possibly for 3348b5e1472SAlan Cox * the page that was faulted on). 3358b5e1472SAlan Cox */ 3368b5e1472SAlan Cox flags |= PS_NONE_BUSY; 3378b5e1472SAlan Cox if ((fs->first_object->flags & OBJ_UNMANAGED) == 0) 3388b5e1472SAlan Cox flags |= PS_ALL_DIRTY; 3398b5e1472SAlan Cox } 3408b5e1472SAlan Cox if (vm_page_ps_test(m_super, flags, m)) { 3418b5e1472SAlan Cox m_map = m_super; 3428b5e1472SAlan Cox psind = m_super->psind; 3438b5e1472SAlan Cox vaddr = rounddown2(vaddr, pagesizes[psind]); 3448b5e1472SAlan Cox /* Preset the modified bit for dirty superpages. */ 3458b5e1472SAlan Cox if ((flags & PS_ALL_DIRTY) != 0) 3462c2f4413SJeff Roberson fs->fault_type |= VM_PROT_WRITE; 3478b5e1472SAlan Cox } 3488b5e1472SAlan Cox } 3498b5e1472SAlan Cox #endif 3502c2f4413SJeff Roberson rv = pmap_enter(fs->map->pmap, vaddr, m_map, fs->prot, fs->fault_type | 3512c2f4413SJeff Roberson PMAP_ENTER_NOSLEEP | (fs->wired ? PMAP_ENTER_WIRED : 0), psind); 35241ddec83SKonstantin Belousov if (rv != KERN_SUCCESS) 353205be21dSJeff Roberson goto out; 3542c2f4413SJeff Roberson if (fs->m_hold != NULL) { 3552c2f4413SJeff Roberson (*fs->m_hold) = m; 356fee2a2faSMark Johnston vm_page_wire(m); 357fee2a2faSMark Johnston } 3582c2f4413SJeff Roberson if (psind == 0 && !fs->wired) 359a7163bb9SKonstantin Belousov vm_fault_prefault(fs, vaddr, PFBAK, PFFOR, true); 360a7163bb9SKonstantin Belousov VM_OBJECT_RUNLOCK(fs->first_object); 3612c2f4413SJeff Roberson vm_fault_dirty(fs, m); 36241ddec83SKonstantin Belousov vm_map_lookup_done(fs->map, fs->entry); 36341ddec83SKonstantin Belousov curthread->td_ru.ru_minflt++; 364205be21dSJeff Roberson 365205be21dSJeff Roberson out: 366205be21dSJeff Roberson vm_object_unbusy(fs->first_object); 367205be21dSJeff Roberson return (rv); 36841ddec83SKonstantin Belousov } 36941ddec83SKonstantin Belousov 370c42b43a0SKonstantin Belousov static void 371c42b43a0SKonstantin Belousov vm_fault_restore_map_lock(struct faultstate *fs) 372c42b43a0SKonstantin Belousov { 373c42b43a0SKonstantin Belousov 374c42b43a0SKonstantin Belousov VM_OBJECT_ASSERT_WLOCKED(fs->first_object); 375c99d0c58SMark Johnston MPASS(blockcount_read(&fs->first_object->paging_in_progress) > 0); 376c42b43a0SKonstantin Belousov 377c42b43a0SKonstantin Belousov if (!vm_map_trylock_read(fs->map)) { 378c42b43a0SKonstantin Belousov VM_OBJECT_WUNLOCK(fs->first_object); 379c42b43a0SKonstantin Belousov vm_map_lock_read(fs->map); 380c42b43a0SKonstantin Belousov VM_OBJECT_WLOCK(fs->first_object); 381c42b43a0SKonstantin Belousov } 382c42b43a0SKonstantin Belousov fs->lookup_still_valid = true; 383c42b43a0SKonstantin Belousov } 384c42b43a0SKonstantin Belousov 3857a432b84SKonstantin Belousov static void 3867a432b84SKonstantin Belousov vm_fault_populate_check_page(vm_page_t m) 3877a432b84SKonstantin Belousov { 3887a432b84SKonstantin Belousov 3897a432b84SKonstantin Belousov /* 3907a432b84SKonstantin Belousov * Check each page to ensure that the pager is obeying the 3917a432b84SKonstantin Belousov * interface: the page must be installed in the object, fully 3927a432b84SKonstantin Belousov * valid, and exclusively busied. 3937a432b84SKonstantin Belousov */ 3947a432b84SKonstantin Belousov MPASS(m != NULL); 3950012f373SJeff Roberson MPASS(vm_page_all_valid(m)); 3967a432b84SKonstantin Belousov MPASS(vm_page_xbusied(m)); 3977a432b84SKonstantin Belousov } 3987a432b84SKonstantin Belousov 3997a432b84SKonstantin Belousov static void 4007a432b84SKonstantin Belousov vm_fault_populate_cleanup(vm_object_t object, vm_pindex_t first, 4017a432b84SKonstantin Belousov vm_pindex_t last) 4027a432b84SKonstantin Belousov { 4037a432b84SKonstantin Belousov vm_page_t m; 4047a432b84SKonstantin Belousov vm_pindex_t pidx; 4057a432b84SKonstantin Belousov 4067a432b84SKonstantin Belousov VM_OBJECT_ASSERT_WLOCKED(object); 4077a432b84SKonstantin Belousov MPASS(first <= last); 4087a432b84SKonstantin Belousov for (pidx = first, m = vm_page_lookup(object, pidx); 4097a432b84SKonstantin Belousov pidx <= last; pidx++, m = vm_page_next(m)) { 4107a432b84SKonstantin Belousov vm_fault_populate_check_page(m); 4117a432b84SKonstantin Belousov vm_page_deactivate(m); 4127a432b84SKonstantin Belousov vm_page_xunbusy(m); 4137a432b84SKonstantin Belousov } 4147a432b84SKonstantin Belousov } 415c42b43a0SKonstantin Belousov 416c42b43a0SKonstantin Belousov static int 4172c2f4413SJeff Roberson vm_fault_populate(struct faultstate *fs) 418c42b43a0SKonstantin Belousov { 41970183daaSAlan Cox vm_offset_t vaddr; 420c42b43a0SKonstantin Belousov vm_page_t m; 4217a432b84SKonstantin Belousov vm_pindex_t map_first, map_last, pager_first, pager_last, pidx; 422d301b358SKonstantin Belousov int bdry_idx, i, npages, psind, rv; 423c42b43a0SKonstantin Belousov 424c42b43a0SKonstantin Belousov MPASS(fs->object == fs->first_object); 425c42b43a0SKonstantin Belousov VM_OBJECT_ASSERT_WLOCKED(fs->first_object); 426c99d0c58SMark Johnston MPASS(blockcount_read(&fs->first_object->paging_in_progress) > 0); 427c42b43a0SKonstantin Belousov MPASS(fs->first_object->backing_object == NULL); 428c42b43a0SKonstantin Belousov MPASS(fs->lookup_still_valid); 429c42b43a0SKonstantin Belousov 4307a432b84SKonstantin Belousov pager_first = OFF_TO_IDX(fs->entry->offset); 43189564188SAlan Cox pager_last = pager_first + atop(fs->entry->end - fs->entry->start) - 1; 432c42b43a0SKonstantin Belousov unlock_map(fs); 433c42b43a0SKonstantin Belousov unlock_vp(fs); 434c42b43a0SKonstantin Belousov 435c42b43a0SKonstantin Belousov /* 436c42b43a0SKonstantin Belousov * Call the pager (driver) populate() method. 437c42b43a0SKonstantin Belousov * 438c42b43a0SKonstantin Belousov * There is no guarantee that the method will be called again 439c42b43a0SKonstantin Belousov * if the current fault is for read, and a future fault is 440c42b43a0SKonstantin Belousov * for write. Report the entry's maximum allowed protection 441c42b43a0SKonstantin Belousov * to the driver. 442c42b43a0SKonstantin Belousov */ 443c42b43a0SKonstantin Belousov rv = vm_pager_populate(fs->first_object, fs->first_pindex, 444d301b358SKonstantin Belousov fs->fault_type, fs->entry->max_protection, &pager_first, 445d301b358SKonstantin Belousov &pager_last); 446c42b43a0SKonstantin Belousov 447c42b43a0SKonstantin Belousov VM_OBJECT_ASSERT_WLOCKED(fs->first_object); 448c42b43a0SKonstantin Belousov if (rv == VM_PAGER_BAD) { 449c42b43a0SKonstantin Belousov /* 450c42b43a0SKonstantin Belousov * VM_PAGER_BAD is the backdoor for a pager to request 451c42b43a0SKonstantin Belousov * normal fault handling. 452c42b43a0SKonstantin Belousov */ 453c42b43a0SKonstantin Belousov vm_fault_restore_map_lock(fs); 454c42b43a0SKonstantin Belousov if (fs->map->timestamp != fs->map_generation) 455df794f5cSJeff Roberson return (KERN_RESTART); 456c42b43a0SKonstantin Belousov return (KERN_NOT_RECEIVER); 457c42b43a0SKonstantin Belousov } 458c42b43a0SKonstantin Belousov if (rv != VM_PAGER_OK) 459c42b43a0SKonstantin Belousov return (KERN_FAILURE); /* AKA SIGSEGV */ 460c42b43a0SKonstantin Belousov 461c42b43a0SKonstantin Belousov /* Ensure that the driver is obeying the interface. */ 4627a432b84SKonstantin Belousov MPASS(pager_first <= pager_last); 4637a432b84SKonstantin Belousov MPASS(fs->first_pindex <= pager_last); 4647a432b84SKonstantin Belousov MPASS(fs->first_pindex >= pager_first); 4657a432b84SKonstantin Belousov MPASS(pager_last < fs->first_object->size); 466c42b43a0SKonstantin Belousov 467c42b43a0SKonstantin Belousov vm_fault_restore_map_lock(fs); 468d301b358SKonstantin Belousov bdry_idx = (fs->entry->eflags & MAP_ENTRY_SPLIT_BOUNDARY_MASK) >> 469d301b358SKonstantin Belousov MAP_ENTRY_SPLIT_BOUNDARY_SHIFT; 4707a432b84SKonstantin Belousov if (fs->map->timestamp != fs->map_generation) { 471d301b358SKonstantin Belousov if (bdry_idx == 0) { 4727a432b84SKonstantin Belousov vm_fault_populate_cleanup(fs->first_object, pager_first, 4737a432b84SKonstantin Belousov pager_last); 474d301b358SKonstantin Belousov } else { 475d301b358SKonstantin Belousov m = vm_page_lookup(fs->first_object, pager_first); 476d301b358SKonstantin Belousov if (m != fs->m) 477d301b358SKonstantin Belousov vm_page_xunbusy(m); 478d301b358SKonstantin Belousov } 479df794f5cSJeff Roberson return (KERN_RESTART); 4807a432b84SKonstantin Belousov } 481c42b43a0SKonstantin Belousov 482c42b43a0SKonstantin Belousov /* 4837a432b84SKonstantin Belousov * The map is unchanged after our last unlock. Process the fault. 4847a432b84SKonstantin Belousov * 485d301b358SKonstantin Belousov * First, the special case of largepage mappings, where 486d301b358SKonstantin Belousov * populate only busies the first page in superpage run. 487d301b358SKonstantin Belousov */ 488d301b358SKonstantin Belousov if (bdry_idx != 0) { 48978257765SMark Johnston KASSERT(PMAP_HAS_LARGEPAGES, 49078257765SMark Johnston ("missing pmap support for large pages")); 491d301b358SKonstantin Belousov m = vm_page_lookup(fs->first_object, pager_first); 492d301b358SKonstantin Belousov vm_fault_populate_check_page(m); 493d301b358SKonstantin Belousov VM_OBJECT_WUNLOCK(fs->first_object); 494d301b358SKonstantin Belousov vaddr = fs->entry->start + IDX_TO_OFF(pager_first) - 495d301b358SKonstantin Belousov fs->entry->offset; 496d301b358SKonstantin Belousov /* assert alignment for entry */ 497d301b358SKonstantin Belousov KASSERT((vaddr & (pagesizes[bdry_idx] - 1)) == 0, 498d301b358SKonstantin Belousov ("unaligned superpage start %#jx pager_first %#jx offset %#jx vaddr %#jx", 499d301b358SKonstantin Belousov (uintmax_t)fs->entry->start, (uintmax_t)pager_first, 500d301b358SKonstantin Belousov (uintmax_t)fs->entry->offset, (uintmax_t)vaddr)); 501d301b358SKonstantin Belousov KASSERT((VM_PAGE_TO_PHYS(m) & (pagesizes[bdry_idx] - 1)) == 0, 502d301b358SKonstantin Belousov ("unaligned superpage m %p %#jx", m, 503d301b358SKonstantin Belousov (uintmax_t)VM_PAGE_TO_PHYS(m))); 504d301b358SKonstantin Belousov rv = pmap_enter(fs->map->pmap, vaddr, m, fs->prot, 505d301b358SKonstantin Belousov fs->fault_type | (fs->wired ? PMAP_ENTER_WIRED : 0) | 506d301b358SKonstantin Belousov PMAP_ENTER_LARGEPAGE, bdry_idx); 507d301b358SKonstantin Belousov VM_OBJECT_WLOCK(fs->first_object); 508d301b358SKonstantin Belousov vm_page_xunbusy(m); 509c7b913aaSKonstantin Belousov if (rv != KERN_SUCCESS) 510c7b913aaSKonstantin Belousov goto out; 511d301b358SKonstantin Belousov if ((fs->fault_flags & VM_FAULT_WIRE) != 0) { 512d301b358SKonstantin Belousov for (i = 0; i < atop(pagesizes[bdry_idx]); i++) 513d301b358SKonstantin Belousov vm_page_wire(m + i); 514d301b358SKonstantin Belousov } 515d301b358SKonstantin Belousov if (fs->m_hold != NULL) { 516d301b358SKonstantin Belousov *fs->m_hold = m + (fs->first_pindex - pager_first); 517d301b358SKonstantin Belousov vm_page_wire(*fs->m_hold); 518d301b358SKonstantin Belousov } 519d301b358SKonstantin Belousov goto out; 520d301b358SKonstantin Belousov } 521d301b358SKonstantin Belousov 522d301b358SKonstantin Belousov /* 5237a432b84SKonstantin Belousov * The range [pager_first, pager_last] that is given to the 5247a432b84SKonstantin Belousov * pager is only a hint. The pager may populate any range 5257a432b84SKonstantin Belousov * within the object that includes the requested page index. 5267a432b84SKonstantin Belousov * In case the pager expanded the range, clip it to fit into 5277a432b84SKonstantin Belousov * the map entry. 528c42b43a0SKonstantin Belousov */ 52989564188SAlan Cox map_first = OFF_TO_IDX(fs->entry->offset); 53089564188SAlan Cox if (map_first > pager_first) { 5317a432b84SKonstantin Belousov vm_fault_populate_cleanup(fs->first_object, pager_first, 5327a432b84SKonstantin Belousov map_first - 1); 53389564188SAlan Cox pager_first = map_first; 53489564188SAlan Cox } 53589564188SAlan Cox map_last = map_first + atop(fs->entry->end - fs->entry->start) - 1; 53689564188SAlan Cox if (map_last < pager_last) { 5377a432b84SKonstantin Belousov vm_fault_populate_cleanup(fs->first_object, map_last + 1, 5387a432b84SKonstantin Belousov pager_last); 53989564188SAlan Cox pager_last = map_last; 54089564188SAlan Cox } 54189564188SAlan Cox for (pidx = pager_first, m = vm_page_lookup(fs->first_object, pidx); 54270183daaSAlan Cox pidx <= pager_last; 54370183daaSAlan Cox pidx += npages, m = vm_page_next(&m[npages - 1])) { 54470183daaSAlan Cox vaddr = fs->entry->start + IDX_TO_OFF(pidx) - fs->entry->offset; 5458dc8feb5SJason A. Harmening 54670183daaSAlan Cox psind = m->psind; 54770183daaSAlan Cox if (psind > 0 && ((vaddr & (pagesizes[psind] - 1)) != 0 || 54870183daaSAlan Cox pidx + OFF_TO_IDX(pagesizes[psind]) - 1 > pager_last || 5492c2f4413SJeff Roberson !pmap_ps_enabled(fs->map->pmap) || fs->wired)) 55070183daaSAlan Cox psind = 0; 5518dc8feb5SJason A. Harmening 55270183daaSAlan Cox npages = atop(pagesizes[psind]); 55370183daaSAlan Cox for (i = 0; i < npages; i++) { 55470183daaSAlan Cox vm_fault_populate_check_page(&m[i]); 5552c2f4413SJeff Roberson vm_fault_dirty(fs, &m[i]); 55670183daaSAlan Cox } 557c42b43a0SKonstantin Belousov VM_OBJECT_WUNLOCK(fs->first_object); 5582c2f4413SJeff Roberson rv = pmap_enter(fs->map->pmap, vaddr, m, fs->prot, fs->fault_type | 5592c2f4413SJeff Roberson (fs->wired ? PMAP_ENTER_WIRED : 0), psind); 5608dc8feb5SJason A. Harmening 5618dc8feb5SJason A. Harmening /* 5628dc8feb5SJason A. Harmening * pmap_enter() may fail for a superpage mapping if additional 5638dc8feb5SJason A. Harmening * protection policies prevent the full mapping. 5648dc8feb5SJason A. Harmening * For example, this will happen on amd64 if the entire 5658dc8feb5SJason A. Harmening * address range does not share the same userspace protection 5668dc8feb5SJason A. Harmening * key. Revert to single-page mappings if this happens. 5678dc8feb5SJason A. Harmening */ 5688dc8feb5SJason A. Harmening MPASS(rv == KERN_SUCCESS || 5698dc8feb5SJason A. Harmening (psind > 0 && rv == KERN_PROTECTION_FAILURE)); 5708dc8feb5SJason A. Harmening if (__predict_false(psind > 0 && 5718dc8feb5SJason A. Harmening rv == KERN_PROTECTION_FAILURE)) { 572e7a9df16SKonstantin Belousov for (i = 0; i < npages; i++) { 573e7a9df16SKonstantin Belousov rv = pmap_enter(fs->map->pmap, vaddr + ptoa(i), 5742c2f4413SJeff Roberson &m[i], fs->prot, fs->fault_type | 5752c2f4413SJeff Roberson (fs->wired ? PMAP_ENTER_WIRED : 0), 0); 576e7a9df16SKonstantin Belousov MPASS(rv == KERN_SUCCESS); 577e7a9df16SKonstantin Belousov } 578e7a9df16SKonstantin Belousov } 5798dc8feb5SJason A. Harmening 580c42b43a0SKonstantin Belousov VM_OBJECT_WLOCK(fs->first_object); 58170183daaSAlan Cox for (i = 0; i < npages; i++) { 5822c2f4413SJeff Roberson if ((fs->fault_flags & VM_FAULT_WIRE) != 0) 58370183daaSAlan Cox vm_page_wire(&m[i]); 5849f5632e6SMark Johnston else 58570183daaSAlan Cox vm_page_activate(&m[i]); 5862c2f4413SJeff Roberson if (fs->m_hold != NULL && m[i].pindex == fs->first_pindex) { 5872c2f4413SJeff Roberson (*fs->m_hold) = &m[i]; 588eeacb3b0SMark Johnston vm_page_wire(&m[i]); 589c42b43a0SKonstantin Belousov } 5904cdea4a8SJeff Roberson vm_page_xunbusy(&m[i]); 59170183daaSAlan Cox } 592c42b43a0SKonstantin Belousov } 593d301b358SKonstantin Belousov out: 594c42b43a0SKonstantin Belousov curthread->td_ru.ru_majflt++; 595c7b913aaSKonstantin Belousov return (rv); 596c42b43a0SKonstantin Belousov } 597c42b43a0SKonstantin Belousov 598df08823dSKonstantin Belousov static int prot_fault_translation; 599df08823dSKonstantin Belousov SYSCTL_INT(_machdep, OID_AUTO, prot_fault_translation, CTLFLAG_RWTUN, 600df08823dSKonstantin Belousov &prot_fault_translation, 0, 601df08823dSKonstantin Belousov "Control signal to deliver on protection fault"); 602df08823dSKonstantin Belousov 603df08823dSKonstantin Belousov /* compat definition to keep common code for signal translation */ 604df08823dSKonstantin Belousov #define UCODE_PAGEFLT 12 605df08823dSKonstantin Belousov #ifdef T_PAGEFLT 606df08823dSKonstantin Belousov _Static_assert(UCODE_PAGEFLT == T_PAGEFLT, "T_PAGEFLT"); 607df08823dSKonstantin Belousov #endif 608df08823dSKonstantin Belousov 609df8bae1dSRodney W. Grimes /* 610df08823dSKonstantin Belousov * vm_fault_trap: 611df8bae1dSRodney W. Grimes * 612956f3135SPhilippe Charnier * Handle a page fault occurring at the given address, 613df8bae1dSRodney W. Grimes * requiring the given permissions, in the map specified. 614df8bae1dSRodney W. Grimes * If successful, the page is inserted into the 615df8bae1dSRodney W. Grimes * associated physical map. 616df8bae1dSRodney W. Grimes * 617df8bae1dSRodney W. Grimes * NOTE: the given address should be truncated to the 618df8bae1dSRodney W. Grimes * proper page address. 619df8bae1dSRodney W. Grimes * 620df8bae1dSRodney W. Grimes * KERN_SUCCESS is returned if the page fault is handled; otherwise, 621df8bae1dSRodney W. Grimes * a standard error specifying why the fault is fatal is returned. 622df8bae1dSRodney W. Grimes * 623df8bae1dSRodney W. Grimes * The map in question must be referenced, and remains so. 6240cddd8f0SMatthew Dillon * Caller may hold no locks. 625df8bae1dSRodney W. Grimes */ 626df8bae1dSRodney W. Grimes int 627df08823dSKonstantin Belousov vm_fault_trap(vm_map_t map, vm_offset_t vaddr, vm_prot_t fault_type, 628df08823dSKonstantin Belousov int fault_flags, int *signo, int *ucode) 62923955314SAlfred Perlstein { 63035818d2eSJohn Baldwin int result; 631acd11c74SAlan Cox 632df08823dSKonstantin Belousov MPASS(signo == NULL || ucode != NULL); 63335818d2eSJohn Baldwin #ifdef KTRACE 634c31cec45SKonstantin Belousov if (map != kernel_map && KTRPOINT(curthread, KTR_FAULT)) 63535818d2eSJohn Baldwin ktrfault(vaddr, fault_type); 63635818d2eSJohn Baldwin #endif 637df08823dSKonstantin Belousov result = vm_fault(map, trunc_page(vaddr), fault_type, fault_flags, 638be996836SAttilio Rao NULL); 639df08823dSKonstantin Belousov KASSERT(result == KERN_SUCCESS || result == KERN_FAILURE || 640df08823dSKonstantin Belousov result == KERN_INVALID_ADDRESS || 641df08823dSKonstantin Belousov result == KERN_RESOURCE_SHORTAGE || 642df08823dSKonstantin Belousov result == KERN_PROTECTION_FAILURE || 643df08823dSKonstantin Belousov result == KERN_OUT_OF_BOUNDS, 644df08823dSKonstantin Belousov ("Unexpected Mach error %d from vm_fault()", result)); 64535818d2eSJohn Baldwin #ifdef KTRACE 646c31cec45SKonstantin Belousov if (map != kernel_map && KTRPOINT(curthread, KTR_FAULTEND)) 64735818d2eSJohn Baldwin ktrfaultend(result); 64835818d2eSJohn Baldwin #endif 649df08823dSKonstantin Belousov if (result != KERN_SUCCESS && signo != NULL) { 650df08823dSKonstantin Belousov switch (result) { 651df08823dSKonstantin Belousov case KERN_FAILURE: 652df08823dSKonstantin Belousov case KERN_INVALID_ADDRESS: 653df08823dSKonstantin Belousov *signo = SIGSEGV; 654df08823dSKonstantin Belousov *ucode = SEGV_MAPERR; 655df08823dSKonstantin Belousov break; 656df08823dSKonstantin Belousov case KERN_RESOURCE_SHORTAGE: 657df08823dSKonstantin Belousov *signo = SIGBUS; 658df08823dSKonstantin Belousov *ucode = BUS_OOMERR; 659df08823dSKonstantin Belousov break; 660df08823dSKonstantin Belousov case KERN_OUT_OF_BOUNDS: 661df08823dSKonstantin Belousov *signo = SIGBUS; 662df08823dSKonstantin Belousov *ucode = BUS_OBJERR; 663df08823dSKonstantin Belousov break; 664df08823dSKonstantin Belousov case KERN_PROTECTION_FAILURE: 665df08823dSKonstantin Belousov if (prot_fault_translation == 0) { 666df08823dSKonstantin Belousov /* 667df08823dSKonstantin Belousov * Autodetect. This check also covers 668df08823dSKonstantin Belousov * the images without the ABI-tag ELF 669df08823dSKonstantin Belousov * note. 670df08823dSKonstantin Belousov */ 671df08823dSKonstantin Belousov if (SV_CURPROC_ABI() == SV_ABI_FREEBSD && 672df08823dSKonstantin Belousov curproc->p_osrel >= P_OSREL_SIGSEGV) { 673df08823dSKonstantin Belousov *signo = SIGSEGV; 674df08823dSKonstantin Belousov *ucode = SEGV_ACCERR; 675df08823dSKonstantin Belousov } else { 676df08823dSKonstantin Belousov *signo = SIGBUS; 677df08823dSKonstantin Belousov *ucode = UCODE_PAGEFLT; 678df08823dSKonstantin Belousov } 679df08823dSKonstantin Belousov } else if (prot_fault_translation == 1) { 680df08823dSKonstantin Belousov /* Always compat mode. */ 681df08823dSKonstantin Belousov *signo = SIGBUS; 682df08823dSKonstantin Belousov *ucode = UCODE_PAGEFLT; 683df08823dSKonstantin Belousov } else { 684df08823dSKonstantin Belousov /* Always SIGSEGV mode. */ 685df08823dSKonstantin Belousov *signo = SIGSEGV; 686df08823dSKonstantin Belousov *ucode = SEGV_ACCERR; 687df08823dSKonstantin Belousov } 688df08823dSKonstantin Belousov break; 689df08823dSKonstantin Belousov default: 690df08823dSKonstantin Belousov KASSERT(0, ("Unexpected Mach error %d from vm_fault()", 691df08823dSKonstantin Belousov result)); 692df08823dSKonstantin Belousov break; 693df08823dSKonstantin Belousov } 694df08823dSKonstantin Belousov } 69535818d2eSJohn Baldwin return (result); 696acd11c74SAlan Cox } 697acd11c74SAlan Cox 6980ddd3082SKonstantin Belousov static int 6991e40fe41SJeff Roberson vm_fault_lock_vnode(struct faultstate *fs, bool objlocked) 7000ddd3082SKonstantin Belousov { 7010ddd3082SKonstantin Belousov struct vnode *vp; 7020ddd3082SKonstantin Belousov int error, locked; 7030ddd3082SKonstantin Belousov 7040ddd3082SKonstantin Belousov if (fs->object->type != OBJT_VNODE) 7050ddd3082SKonstantin Belousov return (KERN_SUCCESS); 7060ddd3082SKonstantin Belousov vp = fs->object->handle; 70716b0c092SKonstantin Belousov if (vp == fs->vp) { 70816b0c092SKonstantin Belousov ASSERT_VOP_LOCKED(vp, "saved vnode is not locked"); 7090ddd3082SKonstantin Belousov return (KERN_SUCCESS); 71016b0c092SKonstantin Belousov } 7110ddd3082SKonstantin Belousov 7120ddd3082SKonstantin Belousov /* 7130ddd3082SKonstantin Belousov * Perform an unlock in case the desired vnode changed while 7140ddd3082SKonstantin Belousov * the map was unlocked during a retry. 7150ddd3082SKonstantin Belousov */ 7160ddd3082SKonstantin Belousov unlock_vp(fs); 7170ddd3082SKonstantin Belousov 7180ddd3082SKonstantin Belousov locked = VOP_ISLOCKED(vp); 7190ddd3082SKonstantin Belousov if (locked != LK_EXCLUSIVE) 7200ddd3082SKonstantin Belousov locked = LK_SHARED; 7210ddd3082SKonstantin Belousov 7220ddd3082SKonstantin Belousov /* 7230ddd3082SKonstantin Belousov * We must not sleep acquiring the vnode lock while we have 7240ddd3082SKonstantin Belousov * the page exclusive busied or the object's 7250ddd3082SKonstantin Belousov * paging-in-progress count incremented. Otherwise, we could 7260ddd3082SKonstantin Belousov * deadlock. 7270ddd3082SKonstantin Belousov */ 728a92a971bSMateusz Guzik error = vget(vp, locked | LK_CANRECURSE | LK_NOWAIT); 7290ddd3082SKonstantin Belousov if (error == 0) { 7300ddd3082SKonstantin Belousov fs->vp = vp; 7310ddd3082SKonstantin Belousov return (KERN_SUCCESS); 7320ddd3082SKonstantin Belousov } 7330ddd3082SKonstantin Belousov 7340ddd3082SKonstantin Belousov vhold(vp); 7351e40fe41SJeff Roberson if (objlocked) 7360ddd3082SKonstantin Belousov unlock_and_deallocate(fs); 7371e40fe41SJeff Roberson else 7381e40fe41SJeff Roberson fault_deallocate(fs); 739a92a971bSMateusz Guzik error = vget(vp, locked | LK_RETRY | LK_CANRECURSE); 7400ddd3082SKonstantin Belousov vdrop(vp); 7410ddd3082SKonstantin Belousov fs->vp = vp; 7420ddd3082SKonstantin Belousov KASSERT(error == 0, ("vm_fault: vget failed %d", error)); 7430ddd3082SKonstantin Belousov return (KERN_RESOURCE_SHORTAGE); 7440ddd3082SKonstantin Belousov } 7450ddd3082SKonstantin Belousov 746bef91632SJeff Roberson /* 7475949b1caSJeff Roberson * Calculate the desired readahead. Handle drop-behind. 7485949b1caSJeff Roberson * 7495949b1caSJeff Roberson * Returns the number of readahead blocks to pass to the pager. 7505949b1caSJeff Roberson */ 7515949b1caSJeff Roberson static int 7525949b1caSJeff Roberson vm_fault_readahead(struct faultstate *fs) 7535949b1caSJeff Roberson { 7545949b1caSJeff Roberson int era, nera; 7555949b1caSJeff Roberson u_char behavior; 7565949b1caSJeff Roberson 7575949b1caSJeff Roberson KASSERT(fs->lookup_still_valid, ("map unlocked")); 7585949b1caSJeff Roberson era = fs->entry->read_ahead; 7595949b1caSJeff Roberson behavior = vm_map_entry_behavior(fs->entry); 7605949b1caSJeff Roberson if (behavior == MAP_ENTRY_BEHAV_RANDOM) { 7615949b1caSJeff Roberson nera = 0; 7625949b1caSJeff Roberson } else if (behavior == MAP_ENTRY_BEHAV_SEQUENTIAL) { 7635949b1caSJeff Roberson nera = VM_FAULT_READ_AHEAD_MAX; 7645949b1caSJeff Roberson if (fs->vaddr == fs->entry->next_read) 7655949b1caSJeff Roberson vm_fault_dontneed(fs, fs->vaddr, nera); 7665949b1caSJeff Roberson } else if (fs->vaddr == fs->entry->next_read) { 7675949b1caSJeff Roberson /* 7685949b1caSJeff Roberson * This is a sequential fault. Arithmetically 7695949b1caSJeff Roberson * increase the requested number of pages in 7705949b1caSJeff Roberson * the read-ahead window. The requested 7715949b1caSJeff Roberson * number of pages is "# of sequential faults 7725949b1caSJeff Roberson * x (read ahead min + 1) + read ahead min" 7735949b1caSJeff Roberson */ 7745949b1caSJeff Roberson nera = VM_FAULT_READ_AHEAD_MIN; 7755949b1caSJeff Roberson if (era > 0) { 7765949b1caSJeff Roberson nera += era + 1; 7775949b1caSJeff Roberson if (nera > VM_FAULT_READ_AHEAD_MAX) 7785949b1caSJeff Roberson nera = VM_FAULT_READ_AHEAD_MAX; 7795949b1caSJeff Roberson } 7805949b1caSJeff Roberson if (era == VM_FAULT_READ_AHEAD_MAX) 7815949b1caSJeff Roberson vm_fault_dontneed(fs, fs->vaddr, nera); 7825949b1caSJeff Roberson } else { 7835949b1caSJeff Roberson /* 7845949b1caSJeff Roberson * This is a non-sequential fault. 7855949b1caSJeff Roberson */ 7865949b1caSJeff Roberson nera = 0; 7875949b1caSJeff Roberson } 7885949b1caSJeff Roberson if (era != nera) { 7895949b1caSJeff Roberson /* 7905949b1caSJeff Roberson * A read lock on the map suffices to update 7915949b1caSJeff Roberson * the read ahead count safely. 7925949b1caSJeff Roberson */ 7935949b1caSJeff Roberson fs->entry->read_ahead = nera; 7945949b1caSJeff Roberson } 7955949b1caSJeff Roberson 7965949b1caSJeff Roberson return (nera); 7975949b1caSJeff Roberson } 7985949b1caSJeff Roberson 799c308a3a6SJeff Roberson static int 800c308a3a6SJeff Roberson vm_fault_lookup(struct faultstate *fs) 801c308a3a6SJeff Roberson { 802c308a3a6SJeff Roberson int result; 803c308a3a6SJeff Roberson 804c308a3a6SJeff Roberson KASSERT(!fs->lookup_still_valid, 805c308a3a6SJeff Roberson ("vm_fault_lookup: Map already locked.")); 806c308a3a6SJeff Roberson result = vm_map_lookup(&fs->map, fs->vaddr, fs->fault_type | 807c308a3a6SJeff Roberson VM_PROT_FAULT_LOOKUP, &fs->entry, &fs->first_object, 808c308a3a6SJeff Roberson &fs->first_pindex, &fs->prot, &fs->wired); 809c308a3a6SJeff Roberson if (result != KERN_SUCCESS) { 810c308a3a6SJeff Roberson unlock_vp(fs); 811c308a3a6SJeff Roberson return (result); 812c308a3a6SJeff Roberson } 813c308a3a6SJeff Roberson 814c308a3a6SJeff Roberson fs->map_generation = fs->map->timestamp; 815c308a3a6SJeff Roberson 816c308a3a6SJeff Roberson if (fs->entry->eflags & MAP_ENTRY_NOFAULT) { 817c308a3a6SJeff Roberson panic("%s: fault on nofault entry, addr: %#lx", 818c308a3a6SJeff Roberson __func__, (u_long)fs->vaddr); 819c308a3a6SJeff Roberson } 820c308a3a6SJeff Roberson 821c308a3a6SJeff Roberson if (fs->entry->eflags & MAP_ENTRY_IN_TRANSITION && 822c308a3a6SJeff Roberson fs->entry->wiring_thread != curthread) { 823c308a3a6SJeff Roberson vm_map_unlock_read(fs->map); 824c308a3a6SJeff Roberson vm_map_lock(fs->map); 825c308a3a6SJeff Roberson if (vm_map_lookup_entry(fs->map, fs->vaddr, &fs->entry) && 826c308a3a6SJeff Roberson (fs->entry->eflags & MAP_ENTRY_IN_TRANSITION)) { 827c308a3a6SJeff Roberson unlock_vp(fs); 828c308a3a6SJeff Roberson fs->entry->eflags |= MAP_ENTRY_NEEDS_WAKEUP; 829c308a3a6SJeff Roberson vm_map_unlock_and_wait(fs->map, 0); 830c308a3a6SJeff Roberson } else 831c308a3a6SJeff Roberson vm_map_unlock(fs->map); 832c308a3a6SJeff Roberson return (KERN_RESOURCE_SHORTAGE); 833c308a3a6SJeff Roberson } 834c308a3a6SJeff Roberson 835c308a3a6SJeff Roberson MPASS((fs->entry->eflags & MAP_ENTRY_GUARD) == 0); 836c308a3a6SJeff Roberson 837c308a3a6SJeff Roberson if (fs->wired) 838c308a3a6SJeff Roberson fs->fault_type = fs->prot | (fs->fault_type & VM_PROT_COPY); 839c308a3a6SJeff Roberson else 840c308a3a6SJeff Roberson KASSERT((fs->fault_flags & VM_FAULT_WIRE) == 0, 841c308a3a6SJeff Roberson ("!fs->wired && VM_FAULT_WIRE")); 842c308a3a6SJeff Roberson fs->lookup_still_valid = true; 843c308a3a6SJeff Roberson 844c308a3a6SJeff Roberson return (KERN_SUCCESS); 845c308a3a6SJeff Roberson } 846c308a3a6SJeff Roberson 847fcb04758SJeff Roberson static int 848fcb04758SJeff Roberson vm_fault_relookup(struct faultstate *fs) 849fcb04758SJeff Roberson { 850fcb04758SJeff Roberson vm_object_t retry_object; 851fcb04758SJeff Roberson vm_pindex_t retry_pindex; 852fcb04758SJeff Roberson vm_prot_t retry_prot; 853fcb04758SJeff Roberson int result; 854fcb04758SJeff Roberson 855fcb04758SJeff Roberson if (!vm_map_trylock_read(fs->map)) 856fcb04758SJeff Roberson return (KERN_RESTART); 857fcb04758SJeff Roberson 858fcb04758SJeff Roberson fs->lookup_still_valid = true; 859fcb04758SJeff Roberson if (fs->map->timestamp == fs->map_generation) 860fcb04758SJeff Roberson return (KERN_SUCCESS); 861fcb04758SJeff Roberson 862fcb04758SJeff Roberson result = vm_map_lookup_locked(&fs->map, fs->vaddr, fs->fault_type, 863fcb04758SJeff Roberson &fs->entry, &retry_object, &retry_pindex, &retry_prot, 864fcb04758SJeff Roberson &fs->wired); 865fcb04758SJeff Roberson if (result != KERN_SUCCESS) { 866fcb04758SJeff Roberson /* 867fcb04758SJeff Roberson * If retry of map lookup would have blocked then 868fcb04758SJeff Roberson * retry fault from start. 869fcb04758SJeff Roberson */ 870fcb04758SJeff Roberson if (result == KERN_FAILURE) 871fcb04758SJeff Roberson return (KERN_RESTART); 872fcb04758SJeff Roberson return (result); 873fcb04758SJeff Roberson } 874fcb04758SJeff Roberson if (retry_object != fs->first_object || 875fcb04758SJeff Roberson retry_pindex != fs->first_pindex) 876fcb04758SJeff Roberson return (KERN_RESTART); 877fcb04758SJeff Roberson 878fcb04758SJeff Roberson /* 879fcb04758SJeff Roberson * Check whether the protection has changed or the object has 880fcb04758SJeff Roberson * been copied while we left the map unlocked. Changing from 881fcb04758SJeff Roberson * read to write permission is OK - we leave the page 882fcb04758SJeff Roberson * write-protected, and catch the write fault. Changing from 883fcb04758SJeff Roberson * write to read permission means that we can't mark the page 884fcb04758SJeff Roberson * write-enabled after all. 885fcb04758SJeff Roberson */ 886fcb04758SJeff Roberson fs->prot &= retry_prot; 887fcb04758SJeff Roberson fs->fault_type &= retry_prot; 888fcb04758SJeff Roberson if (fs->prot == 0) 889fcb04758SJeff Roberson return (KERN_RESTART); 890fcb04758SJeff Roberson 891fcb04758SJeff Roberson /* Reassert because wired may have changed. */ 892fcb04758SJeff Roberson KASSERT(fs->wired || (fs->fault_flags & VM_FAULT_WIRE) == 0, 893fcb04758SJeff Roberson ("!wired && VM_FAULT_WIRE")); 894fcb04758SJeff Roberson 895fcb04758SJeff Roberson return (KERN_SUCCESS); 896fcb04758SJeff Roberson } 897fcb04758SJeff Roberson 8985936b6a8SJeff Roberson static void 8995936b6a8SJeff Roberson vm_fault_cow(struct faultstate *fs) 9005936b6a8SJeff Roberson { 9015936b6a8SJeff Roberson bool is_first_object_locked; 9025936b6a8SJeff Roberson 903*982693bbSMark Johnston KASSERT(fs->object != fs->first_object, 904*982693bbSMark Johnston ("source and target COW objects are identical")); 905*982693bbSMark Johnston 9065936b6a8SJeff Roberson /* 9075936b6a8SJeff Roberson * This allows pages to be virtually copied from a backing_object 9085936b6a8SJeff Roberson * into the first_object, where the backing object has no other 9095936b6a8SJeff Roberson * refs to it, and cannot gain any more refs. Instead of a bcopy, 9105936b6a8SJeff Roberson * we just move the page from the backing object to the first 9115936b6a8SJeff Roberson * object. Note that we must mark the page dirty in the first 9125936b6a8SJeff Roberson * object so that it will go out to swap when needed. 9135936b6a8SJeff Roberson */ 9145936b6a8SJeff Roberson is_first_object_locked = false; 9155936b6a8SJeff Roberson if ( 9165936b6a8SJeff Roberson /* 9175936b6a8SJeff Roberson * Only one shadow object and no other refs. 9185936b6a8SJeff Roberson */ 9195936b6a8SJeff Roberson fs->object->shadow_count == 1 && fs->object->ref_count == 1 && 9205936b6a8SJeff Roberson /* 9215936b6a8SJeff Roberson * No other ways to look the object up 9225936b6a8SJeff Roberson */ 9235936b6a8SJeff Roberson fs->object->handle == NULL && (fs->object->flags & OBJ_ANON) != 0 && 9245936b6a8SJeff Roberson /* 9255936b6a8SJeff Roberson * We don't chase down the shadow chain and we can acquire locks. 9265936b6a8SJeff Roberson */ 9275936b6a8SJeff Roberson (is_first_object_locked = VM_OBJECT_TRYWLOCK(fs->first_object)) && 9285936b6a8SJeff Roberson fs->object == fs->first_object->backing_object && 9295936b6a8SJeff Roberson VM_OBJECT_TRYWLOCK(fs->object)) { 9305936b6a8SJeff Roberson /* 9315936b6a8SJeff Roberson * Remove but keep xbusy for replace. fs->m is moved into 9325936b6a8SJeff Roberson * fs->first_object and left busy while fs->first_m is 9335936b6a8SJeff Roberson * conditionally freed. 9345936b6a8SJeff Roberson */ 9355936b6a8SJeff Roberson vm_page_remove_xbusy(fs->m); 9365936b6a8SJeff Roberson vm_page_replace(fs->m, fs->first_object, fs->first_pindex, 9375936b6a8SJeff Roberson fs->first_m); 9385936b6a8SJeff Roberson vm_page_dirty(fs->m); 9395936b6a8SJeff Roberson #if VM_NRESERVLEVEL > 0 9405936b6a8SJeff Roberson /* 9415936b6a8SJeff Roberson * Rename the reservation. 9425936b6a8SJeff Roberson */ 9435936b6a8SJeff Roberson vm_reserv_rename(fs->m, fs->first_object, fs->object, 9445936b6a8SJeff Roberson OFF_TO_IDX(fs->first_object->backing_object_offset)); 9455936b6a8SJeff Roberson #endif 9465936b6a8SJeff Roberson VM_OBJECT_WUNLOCK(fs->object); 9475936b6a8SJeff Roberson VM_OBJECT_WUNLOCK(fs->first_object); 9485936b6a8SJeff Roberson fs->first_m = fs->m; 9495936b6a8SJeff Roberson fs->m = NULL; 9505936b6a8SJeff Roberson VM_CNT_INC(v_cow_optim); 9515936b6a8SJeff Roberson } else { 9525936b6a8SJeff Roberson if (is_first_object_locked) 9535936b6a8SJeff Roberson VM_OBJECT_WUNLOCK(fs->first_object); 9545936b6a8SJeff Roberson /* 9555936b6a8SJeff Roberson * Oh, well, lets copy it. 9565936b6a8SJeff Roberson */ 9575936b6a8SJeff Roberson pmap_copy_page(fs->m, fs->first_m); 9585936b6a8SJeff Roberson vm_page_valid(fs->first_m); 9595936b6a8SJeff Roberson if (fs->wired && (fs->fault_flags & VM_FAULT_WIRE) == 0) { 9605936b6a8SJeff Roberson vm_page_wire(fs->first_m); 9615936b6a8SJeff Roberson vm_page_unwire(fs->m, PQ_INACTIVE); 9625936b6a8SJeff Roberson } 9635936b6a8SJeff Roberson /* 9645936b6a8SJeff Roberson * Save the cow page to be released after 9655936b6a8SJeff Roberson * pmap_enter is complete. 9665936b6a8SJeff Roberson */ 9675936b6a8SJeff Roberson fs->m_cow = fs->m; 9685936b6a8SJeff Roberson fs->m = NULL; 969*982693bbSMark Johnston 9705936b6a8SJeff Roberson /* 971*982693bbSMark Johnston * Typically, the shadow object is either private to this 972*982693bbSMark Johnston * address space (OBJ_ONEMAPPING) or its pages are read only. 973*982693bbSMark Johnston * In the highly unusual case where the pages of a shadow object 974*982693bbSMark Johnston * are read/write shared between this and other address spaces, 975*982693bbSMark Johnston * we need to ensure that any pmap-level mappings to the 976*982693bbSMark Johnston * original, copy-on-write page from the backing object are 977*982693bbSMark Johnston * removed from those other address spaces. 978*982693bbSMark Johnston * 979*982693bbSMark Johnston * The flag check is racy, but this is tolerable: if 980*982693bbSMark Johnston * OBJ_ONEMAPPING is cleared after the check, the busy state 981*982693bbSMark Johnston * ensures that new mappings of m_cow can't be created. 982*982693bbSMark Johnston * pmap_enter() will replace an existing mapping in the current 983*982693bbSMark Johnston * address space. If OBJ_ONEMAPPING is set after the check, 984*982693bbSMark Johnston * removing mappings will at worse trigger some unnecessary page 985*982693bbSMark Johnston * faults. 9865936b6a8SJeff Roberson */ 987*982693bbSMark Johnston vm_page_assert_xbusied(fs->m_cow); 988*982693bbSMark Johnston if ((fs->first_object->flags & OBJ_ONEMAPPING) == 0) 989*982693bbSMark Johnston pmap_remove_all(fs->m_cow); 990*982693bbSMark Johnston } 991*982693bbSMark Johnston 9925936b6a8SJeff Roberson vm_object_pip_wakeup(fs->object); 9935936b6a8SJeff Roberson 9945936b6a8SJeff Roberson /* 9955936b6a8SJeff Roberson * Only use the new page below... 9965936b6a8SJeff Roberson */ 9975936b6a8SJeff Roberson fs->object = fs->first_object; 9985936b6a8SJeff Roberson fs->pindex = fs->first_pindex; 9995936b6a8SJeff Roberson fs->m = fs->first_m; 10005936b6a8SJeff Roberson VM_CNT_INC(v_cow_faults); 10015936b6a8SJeff Roberson curthread->td_cow++; 10025936b6a8SJeff Roberson } 10035936b6a8SJeff Roberson 100491eb2e90SJeff Roberson static bool 100591eb2e90SJeff Roberson vm_fault_next(struct faultstate *fs) 100691eb2e90SJeff Roberson { 100791eb2e90SJeff Roberson vm_object_t next_object; 100891eb2e90SJeff Roberson 100991eb2e90SJeff Roberson /* 101091eb2e90SJeff Roberson * The requested page does not exist at this object/ 101191eb2e90SJeff Roberson * offset. Remove the invalid page from the object, 101291eb2e90SJeff Roberson * waking up anyone waiting for it, and continue on to 101391eb2e90SJeff Roberson * the next object. However, if this is the top-level 101491eb2e90SJeff Roberson * object, we must leave the busy page in place to 101591eb2e90SJeff Roberson * prevent another process from rushing past us, and 101691eb2e90SJeff Roberson * inserting the page in that object at the same time 101791eb2e90SJeff Roberson * that we are. 101891eb2e90SJeff Roberson */ 101991eb2e90SJeff Roberson if (fs->object == fs->first_object) { 102091eb2e90SJeff Roberson fs->first_m = fs->m; 102191eb2e90SJeff Roberson fs->m = NULL; 102291eb2e90SJeff Roberson } else 102391eb2e90SJeff Roberson fault_page_free(&fs->m); 102491eb2e90SJeff Roberson 102591eb2e90SJeff Roberson /* 102691eb2e90SJeff Roberson * Move on to the next object. Lock the next object before 102791eb2e90SJeff Roberson * unlocking the current one. 102891eb2e90SJeff Roberson */ 102991eb2e90SJeff Roberson VM_OBJECT_ASSERT_WLOCKED(fs->object); 103091eb2e90SJeff Roberson next_object = fs->object->backing_object; 1031fb4d37eaSJeff Roberson if (next_object == NULL) 1032fb4d37eaSJeff Roberson return (false); 1033fb4d37eaSJeff Roberson MPASS(fs->first_m != NULL); 1034fb4d37eaSJeff Roberson KASSERT(fs->object != next_object, ("object loop %p", next_object)); 1035fb4d37eaSJeff Roberson VM_OBJECT_WLOCK(next_object); 1036fb4d37eaSJeff Roberson vm_object_pip_add(next_object, 1); 1037fb4d37eaSJeff Roberson if (fs->object != fs->first_object) 1038fb4d37eaSJeff Roberson vm_object_pip_wakeup(fs->object); 1039fb4d37eaSJeff Roberson fs->pindex += OFF_TO_IDX(fs->object->backing_object_offset); 1040fb4d37eaSJeff Roberson VM_OBJECT_WUNLOCK(fs->object); 1041fb4d37eaSJeff Roberson fs->object = next_object; 1042fb4d37eaSJeff Roberson 1043fb4d37eaSJeff Roberson return (true); 1044fb4d37eaSJeff Roberson } 1045fb4d37eaSJeff Roberson 1046fb4d37eaSJeff Roberson static void 1047fb4d37eaSJeff Roberson vm_fault_zerofill(struct faultstate *fs) 1048fb4d37eaSJeff Roberson { 1049fb4d37eaSJeff Roberson 105091eb2e90SJeff Roberson /* 105191eb2e90SJeff Roberson * If there's no object left, fill the page in the top 105291eb2e90SJeff Roberson * object with zeros. 105391eb2e90SJeff Roberson */ 105491eb2e90SJeff Roberson if (fs->object != fs->first_object) { 105591eb2e90SJeff Roberson vm_object_pip_wakeup(fs->object); 105691eb2e90SJeff Roberson fs->object = fs->first_object; 105791eb2e90SJeff Roberson fs->pindex = fs->first_pindex; 105891eb2e90SJeff Roberson } 105991eb2e90SJeff Roberson MPASS(fs->first_m != NULL); 106091eb2e90SJeff Roberson MPASS(fs->m == NULL); 106191eb2e90SJeff Roberson fs->m = fs->first_m; 106291eb2e90SJeff Roberson fs->first_m = NULL; 106391eb2e90SJeff Roberson 106491eb2e90SJeff Roberson /* 106591eb2e90SJeff Roberson * Zero the page if necessary and mark it valid. 106691eb2e90SJeff Roberson */ 106791eb2e90SJeff Roberson if ((fs->m->flags & PG_ZERO) == 0) { 106891eb2e90SJeff Roberson pmap_zero_page(fs->m); 106991eb2e90SJeff Roberson } else { 107091eb2e90SJeff Roberson VM_CNT_INC(v_ozfod); 107191eb2e90SJeff Roberson } 107291eb2e90SJeff Roberson VM_CNT_INC(v_zfod); 107391eb2e90SJeff Roberson vm_page_valid(fs->m); 107491eb2e90SJeff Roberson } 107591eb2e90SJeff Roberson 1076df794f5cSJeff Roberson /* 1077df794f5cSJeff Roberson * Allocate a page directly or via the object populate method. 1078df794f5cSJeff Roberson */ 1079df794f5cSJeff Roberson static int 1080df794f5cSJeff Roberson vm_fault_allocate(struct faultstate *fs) 1081df794f5cSJeff Roberson { 1082df794f5cSJeff Roberson struct domainset *dset; 1083df794f5cSJeff Roberson int alloc_req; 1084df794f5cSJeff Roberson int rv; 1085df794f5cSJeff Roberson 1086df794f5cSJeff Roberson if ((fs->object->flags & OBJ_SIZEVNLOCK) != 0) { 1087df794f5cSJeff Roberson rv = vm_fault_lock_vnode(fs, true); 1088df794f5cSJeff Roberson MPASS(rv == KERN_SUCCESS || rv == KERN_RESOURCE_SHORTAGE); 1089df794f5cSJeff Roberson if (rv == KERN_RESOURCE_SHORTAGE) 1090df794f5cSJeff Roberson return (rv); 1091df794f5cSJeff Roberson } 1092df794f5cSJeff Roberson 1093df794f5cSJeff Roberson if (fs->pindex >= fs->object->size) 1094df794f5cSJeff Roberson return (KERN_OUT_OF_BOUNDS); 1095df794f5cSJeff Roberson 1096df794f5cSJeff Roberson if (fs->object == fs->first_object && 1097df794f5cSJeff Roberson (fs->first_object->flags & OBJ_POPULATE) != 0 && 1098df794f5cSJeff Roberson fs->first_object->shadow_count == 0) { 1099df794f5cSJeff Roberson rv = vm_fault_populate(fs); 1100df794f5cSJeff Roberson switch (rv) { 1101df794f5cSJeff Roberson case KERN_SUCCESS: 1102df794f5cSJeff Roberson case KERN_FAILURE: 1103c7b913aaSKonstantin Belousov case KERN_PROTECTION_FAILURE: 1104df794f5cSJeff Roberson case KERN_RESTART: 1105df794f5cSJeff Roberson return (rv); 1106df794f5cSJeff Roberson case KERN_NOT_RECEIVER: 1107df794f5cSJeff Roberson /* 1108df794f5cSJeff Roberson * Pager's populate() method 1109df794f5cSJeff Roberson * returned VM_PAGER_BAD. 1110df794f5cSJeff Roberson */ 1111df794f5cSJeff Roberson break; 1112df794f5cSJeff Roberson default: 1113df794f5cSJeff Roberson panic("inconsistent return codes"); 1114df794f5cSJeff Roberson } 1115df794f5cSJeff Roberson } 1116df794f5cSJeff Roberson 1117df794f5cSJeff Roberson /* 1118df794f5cSJeff Roberson * Allocate a new page for this object/offset pair. 1119df794f5cSJeff Roberson * 1120df794f5cSJeff Roberson * Unlocked read of the p_flag is harmless. At worst, the P_KILLED 1121df794f5cSJeff Roberson * might be not observed there, and allocation can fail, causing 1122df794f5cSJeff Roberson * restart and new reading of the p_flag. 1123df794f5cSJeff Roberson */ 1124df794f5cSJeff Roberson dset = fs->object->domain.dr_policy; 1125df794f5cSJeff Roberson if (dset == NULL) 1126df794f5cSJeff Roberson dset = curthread->td_domain.dr_policy; 1127df794f5cSJeff Roberson if (!vm_page_count_severe_set(&dset->ds_mask) || P_KILLED(curproc)) { 1128df794f5cSJeff Roberson #if VM_NRESERVLEVEL > 0 1129df794f5cSJeff Roberson vm_object_color(fs->object, atop(fs->vaddr) - fs->pindex); 1130df794f5cSJeff Roberson #endif 1131df794f5cSJeff Roberson alloc_req = P_KILLED(curproc) ? 1132df794f5cSJeff Roberson VM_ALLOC_SYSTEM : VM_ALLOC_NORMAL; 1133df794f5cSJeff Roberson if (fs->object->type != OBJT_VNODE && 1134df794f5cSJeff Roberson fs->object->backing_object == NULL) 1135df794f5cSJeff Roberson alloc_req |= VM_ALLOC_ZERO; 1136df794f5cSJeff Roberson fs->m = vm_page_alloc(fs->object, fs->pindex, alloc_req); 1137df794f5cSJeff Roberson } 1138df794f5cSJeff Roberson if (fs->m == NULL) { 1139df794f5cSJeff Roberson unlock_and_deallocate(fs); 1140df794f5cSJeff Roberson if (vm_pfault_oom_attempts < 0 || 1141df794f5cSJeff Roberson fs->oom < vm_pfault_oom_attempts) { 1142df794f5cSJeff Roberson fs->oom++; 1143df794f5cSJeff Roberson vm_waitpfault(dset, vm_pfault_oom_wait * hz); 1144b70f6e15SKonstantin Belousov } else { 1145df794f5cSJeff Roberson if (bootverbose) 1146df794f5cSJeff Roberson printf( 1147df794f5cSJeff Roberson "proc %d (%s) failed to alloc page on fault, starting OOM\n", 1148df794f5cSJeff Roberson curproc->p_pid, curproc->p_comm); 1149df794f5cSJeff Roberson vm_pageout_oom(VM_OOM_MEM_PF); 1150b70f6e15SKonstantin Belousov fs->oom = 0; 1151b70f6e15SKonstantin Belousov } 1152df794f5cSJeff Roberson return (KERN_RESOURCE_SHORTAGE); 1153df794f5cSJeff Roberson } 1154df794f5cSJeff Roberson fs->oom = 0; 1155df794f5cSJeff Roberson 1156df794f5cSJeff Roberson return (KERN_NOT_RECEIVER); 1157df794f5cSJeff Roberson } 11585909dafeSJeff Roberson 11595909dafeSJeff Roberson /* 11605909dafeSJeff Roberson * Call the pager to retrieve the page if there is a chance 11615909dafeSJeff Roberson * that the pager has it, and potentially retrieve additional 11625909dafeSJeff Roberson * pages at the same time. 11635909dafeSJeff Roberson */ 11645909dafeSJeff Roberson static int 11655909dafeSJeff Roberson vm_fault_getpages(struct faultstate *fs, int nera, int *behindp, int *aheadp) 11665909dafeSJeff Roberson { 11675909dafeSJeff Roberson vm_offset_t e_end, e_start; 11685909dafeSJeff Roberson int ahead, behind, cluster_offset, rv; 11695909dafeSJeff Roberson u_char behavior; 11705909dafeSJeff Roberson 11715909dafeSJeff Roberson /* 11725909dafeSJeff Roberson * Prepare for unlocking the map. Save the map 11735909dafeSJeff Roberson * entry's start and end addresses, which are used to 11745909dafeSJeff Roberson * optimize the size of the pager operation below. 11755909dafeSJeff Roberson * Even if the map entry's addresses change after 11765909dafeSJeff Roberson * unlocking the map, using the saved addresses is 11775909dafeSJeff Roberson * safe. 11785909dafeSJeff Roberson */ 11795909dafeSJeff Roberson e_start = fs->entry->start; 11805909dafeSJeff Roberson e_end = fs->entry->end; 11815909dafeSJeff Roberson behavior = vm_map_entry_behavior(fs->entry); 11825909dafeSJeff Roberson 11835909dafeSJeff Roberson /* 11845909dafeSJeff Roberson * Release the map lock before locking the vnode or 11855909dafeSJeff Roberson * sleeping in the pager. (If the current object has 11865909dafeSJeff Roberson * a shadow, then an earlier iteration of this loop 11875909dafeSJeff Roberson * may have already unlocked the map.) 11885909dafeSJeff Roberson */ 11895909dafeSJeff Roberson unlock_map(fs); 11905909dafeSJeff Roberson 11915909dafeSJeff Roberson rv = vm_fault_lock_vnode(fs, false); 11925909dafeSJeff Roberson MPASS(rv == KERN_SUCCESS || rv == KERN_RESOURCE_SHORTAGE); 11935909dafeSJeff Roberson if (rv == KERN_RESOURCE_SHORTAGE) 11945909dafeSJeff Roberson return (rv); 11955909dafeSJeff Roberson KASSERT(fs->vp == NULL || !fs->map->system_map, 11965909dafeSJeff Roberson ("vm_fault: vnode-backed object mapped by system map")); 11975909dafeSJeff Roberson 11985909dafeSJeff Roberson /* 11995909dafeSJeff Roberson * Page in the requested page and hint the pager, 12005909dafeSJeff Roberson * that it may bring up surrounding pages. 12015909dafeSJeff Roberson */ 12025909dafeSJeff Roberson if (nera == -1 || behavior == MAP_ENTRY_BEHAV_RANDOM || 12035909dafeSJeff Roberson P_KILLED(curproc)) { 12045909dafeSJeff Roberson behind = 0; 12055909dafeSJeff Roberson ahead = 0; 12065909dafeSJeff Roberson } else { 12075909dafeSJeff Roberson /* Is this a sequential fault? */ 12085909dafeSJeff Roberson if (nera > 0) { 12095909dafeSJeff Roberson behind = 0; 12105909dafeSJeff Roberson ahead = nera; 12115909dafeSJeff Roberson } else { 12125909dafeSJeff Roberson /* 12135909dafeSJeff Roberson * Request a cluster of pages that is 12145909dafeSJeff Roberson * aligned to a VM_FAULT_READ_DEFAULT 12155909dafeSJeff Roberson * page offset boundary within the 12165909dafeSJeff Roberson * object. Alignment to a page offset 12175909dafeSJeff Roberson * boundary is more likely to coincide 12185909dafeSJeff Roberson * with the underlying file system 12195909dafeSJeff Roberson * block than alignment to a virtual 12205909dafeSJeff Roberson * address boundary. 12215909dafeSJeff Roberson */ 12225909dafeSJeff Roberson cluster_offset = fs->pindex % VM_FAULT_READ_DEFAULT; 12235909dafeSJeff Roberson behind = ulmin(cluster_offset, 12245909dafeSJeff Roberson atop(fs->vaddr - e_start)); 12255909dafeSJeff Roberson ahead = VM_FAULT_READ_DEFAULT - 1 - cluster_offset; 12265909dafeSJeff Roberson } 12275909dafeSJeff Roberson ahead = ulmin(ahead, atop(e_end - fs->vaddr) - 1); 12285909dafeSJeff Roberson } 12295909dafeSJeff Roberson *behindp = behind; 12305909dafeSJeff Roberson *aheadp = ahead; 12315909dafeSJeff Roberson rv = vm_pager_get_pages(fs->object, &fs->m, 1, behindp, aheadp); 12325909dafeSJeff Roberson if (rv == VM_PAGER_OK) 12335909dafeSJeff Roberson return (KERN_SUCCESS); 12345909dafeSJeff Roberson if (rv == VM_PAGER_ERROR) 12355909dafeSJeff Roberson printf("vm_fault: pager read error, pid %d (%s)\n", 12365909dafeSJeff Roberson curproc->p_pid, curproc->p_comm); 12375909dafeSJeff Roberson /* 12385909dafeSJeff Roberson * If an I/O error occurred or the requested page was 12395909dafeSJeff Roberson * outside the range of the pager, clean up and return 12405909dafeSJeff Roberson * an error. 12415909dafeSJeff Roberson */ 12425909dafeSJeff Roberson if (rv == VM_PAGER_ERROR || rv == VM_PAGER_BAD) 12435909dafeSJeff Roberson return (KERN_OUT_OF_BOUNDS); 12445909dafeSJeff Roberson return (KERN_NOT_RECEIVER); 12455909dafeSJeff Roberson } 12465909dafeSJeff Roberson 12475949b1caSJeff Roberson /* 1248bef91632SJeff Roberson * Wait/Retry if the page is busy. We have to do this if the page is 1249bef91632SJeff Roberson * either exclusive or shared busy because the vm_pager may be using 1250bef91632SJeff Roberson * read busy for pageouts (and even pageins if it is the vnode pager), 1251bef91632SJeff Roberson * and we could end up trying to pagein and pageout the same page 1252bef91632SJeff Roberson * simultaneously. 1253bef91632SJeff Roberson * 1254bef91632SJeff Roberson * We can theoretically allow the busy case on a read fault if the page 1255bef91632SJeff Roberson * is marked valid, but since such pages are typically already pmap'd, 1256bef91632SJeff Roberson * putting that special case in might be more effort then it is worth. 1257bef91632SJeff Roberson * We cannot under any circumstances mess around with a shared busied 1258bef91632SJeff Roberson * page except, perhaps, to pmap it. 1259bef91632SJeff Roberson */ 1260bef91632SJeff Roberson static void 1261bef91632SJeff Roberson vm_fault_busy_sleep(struct faultstate *fs) 1262bef91632SJeff Roberson { 1263bef91632SJeff Roberson /* 1264bef91632SJeff Roberson * Reference the page before unlocking and 1265bef91632SJeff Roberson * sleeping so that the page daemon is less 1266bef91632SJeff Roberson * likely to reclaim it. 1267bef91632SJeff Roberson */ 1268bef91632SJeff Roberson vm_page_aflag_set(fs->m, PGA_REFERENCED); 1269bef91632SJeff Roberson if (fs->object != fs->first_object) { 1270bef91632SJeff Roberson fault_page_release(&fs->first_m); 1271bef91632SJeff Roberson vm_object_pip_wakeup(fs->first_object); 1272bef91632SJeff Roberson } 1273bef91632SJeff Roberson vm_object_pip_wakeup(fs->object); 1274bef91632SJeff Roberson unlock_map(fs); 1275bef91632SJeff Roberson if (fs->m == vm_page_lookup(fs->object, fs->pindex)) 12767e1b379eSJeff Roberson vm_page_busy_sleep(fs->m, "vmpfw", false); 12777e1b379eSJeff Roberson else 1278bef91632SJeff Roberson VM_OBJECT_WUNLOCK(fs->object); 1279bef91632SJeff Roberson VM_CNT_INC(v_intrans); 1280bef91632SJeff Roberson vm_object_deallocate(fs->first_object); 1281bef91632SJeff Roberson } 1282bef91632SJeff Roberson 1283acd11c74SAlan Cox int 1284df08823dSKonstantin Belousov vm_fault(vm_map_t map, vm_offset_t vaddr, vm_prot_t fault_type, 1285acd11c74SAlan Cox int fault_flags, vm_page_t *m_hold) 1286acd11c74SAlan Cox { 12874866e085SJohn Dyson struct faultstate fs; 1288df794f5cSJeff Roberson int ahead, behind, faultcount; 1289df794f5cSJeff Roberson int nera, result, rv; 12905936b6a8SJeff Roberson bool dead, hardfault; 1291df8bae1dSRodney W. Grimes 129283c9dea1SGleb Smirnoff VM_CNT_INC(v_vm_faults); 1293c31cec45SKonstantin Belousov 1294c31cec45SKonstantin Belousov if ((curthread->td_pflags & TDP_NOFAULTING) != 0) 1295c31cec45SKonstantin Belousov return (KERN_PROTECTION_FAILURE); 1296c31cec45SKonstantin Belousov 1297d2bf64c3SKonstantin Belousov fs.vp = NULL; 12985949b1caSJeff Roberson fs.vaddr = vaddr; 12992c2f4413SJeff Roberson fs.m_hold = m_hold; 13002c2f4413SJeff Roberson fs.fault_flags = fault_flags; 1301c308a3a6SJeff Roberson fs.map = map; 1302c308a3a6SJeff Roberson fs.lookup_still_valid = false; 1303df794f5cSJeff Roberson fs.oom = 0; 1304b0cd2017SGleb Smirnoff faultcount = 0; 13050c3a4893SAlan Cox nera = -1; 1306320023e2SAlan Cox hardfault = false; 1307df8bae1dSRodney W. Grimes 1308245139c6SKonstantin Belousov RetryFault: 13092c2f4413SJeff Roberson fs.fault_type = fault_type; 1310df8bae1dSRodney W. Grimes 1311df8bae1dSRodney W. Grimes /* 13120d94caffSDavid Greenman * Find the backing store object and offset into it to begin the 13130d94caffSDavid Greenman * search. 1314df8bae1dSRodney W. Grimes */ 1315c308a3a6SJeff Roberson result = vm_fault_lookup(&fs); 131692de35b0SAlan Cox if (result != KERN_SUCCESS) { 1317c308a3a6SJeff Roberson if (result == KERN_RESOURCE_SHORTAGE) 1318c308a3a6SJeff Roberson goto RetryFault; 131992de35b0SAlan Cox return (result); 132009e0c6ccSJohn Dyson } 132109e0c6ccSJohn Dyson 13228d67b8c8SAlan Cox /* 13238d67b8c8SAlan Cox * Try to avoid lock contention on the top-level object through 13248d67b8c8SAlan Cox * special-case handling of some types of page faults, specifically, 132567d0e293SJeff Roberson * those that are mapping an existing page from the top-level object. 132667d0e293SJeff Roberson * Under this condition, a read lock on the object suffices, allowing 132767d0e293SJeff Roberson * multiple page faults of a similar type to run in parallel. 13288d67b8c8SAlan Cox */ 1329afe55ca3SKonstantin Belousov if (fs.vp == NULL /* avoid locked vnode leak */ && 1330d301b358SKonstantin Belousov (fs.entry->eflags & MAP_ENTRY_SPLIT_BOUNDARY_MASK) == 0 && 13312c2f4413SJeff Roberson (fs.fault_flags & (VM_FAULT_WIRE | VM_FAULT_DIRTY)) == 0) { 1332afe55ca3SKonstantin Belousov VM_OBJECT_RLOCK(fs.first_object); 13332c2f4413SJeff Roberson rv = vm_fault_soft_fast(&fs); 133441ddec83SKonstantin Belousov if (rv == KERN_SUCCESS) 133541ddec83SKonstantin Belousov return (rv); 1336afe55ca3SKonstantin Belousov if (!VM_OBJECT_TRYUPGRADE(fs.first_object)) { 1337afe55ca3SKonstantin Belousov VM_OBJECT_RUNLOCK(fs.first_object); 1338afe55ca3SKonstantin Belousov VM_OBJECT_WLOCK(fs.first_object); 1339afe55ca3SKonstantin Belousov } 1340afe55ca3SKonstantin Belousov } else { 1341afe55ca3SKonstantin Belousov VM_OBJECT_WLOCK(fs.first_object); 1342afe55ca3SKonstantin Belousov } 1343afe55ca3SKonstantin Belousov 134495e5e988SJohn Dyson /* 134595e5e988SJohn Dyson * Make a reference to this object to prevent its disposal while we 134695e5e988SJohn Dyson * are messing with it. Once we have the reference, the map is free 134795e5e988SJohn Dyson * to be diddled. Since objects reference their shadows (and copies), 134895e5e988SJohn Dyson * they will stay around as well. 1349fe8e0238SMatthew Dillon * 1350fe8e0238SMatthew Dillon * Bump the paging-in-progress count to prevent size changes (e.g. 1351dda4d369SAlan Cox * truncation operations) during I/O. 135295e5e988SJohn Dyson */ 1353a976eb5eSAlan Cox vm_object_reference_locked(fs.first_object); 1354d474eaaaSDoug Rabson vm_object_pip_add(fs.first_object, 1); 135595e5e988SJohn Dyson 135658447749SJeff Roberson fs.m_cow = fs.m = fs.first_m = NULL; 1357df8bae1dSRodney W. Grimes 1358df8bae1dSRodney W. Grimes /* 1359df8bae1dSRodney W. Grimes * Search for the page at object/offset. 1360df8bae1dSRodney W. Grimes */ 13614866e085SJohn Dyson fs.object = fs.first_object; 13624866e085SJohn Dyson fs.pindex = fs.first_pindex; 1363d301b358SKonstantin Belousov 1364d301b358SKonstantin Belousov if ((fs.entry->eflags & MAP_ENTRY_SPLIT_BOUNDARY_MASK) != 0) { 1365d301b358SKonstantin Belousov rv = vm_fault_allocate(&fs); 1366d301b358SKonstantin Belousov switch (rv) { 1367d301b358SKonstantin Belousov case KERN_RESTART: 1368d301b358SKonstantin Belousov unlock_and_deallocate(&fs); 1369d301b358SKonstantin Belousov /* FALLTHROUGH */ 1370d301b358SKonstantin Belousov case KERN_RESOURCE_SHORTAGE: 1371d301b358SKonstantin Belousov goto RetryFault; 1372d301b358SKonstantin Belousov case KERN_SUCCESS: 1373d301b358SKonstantin Belousov case KERN_FAILURE: 1374c7b913aaSKonstantin Belousov case KERN_PROTECTION_FAILURE: 1375d301b358SKonstantin Belousov case KERN_OUT_OF_BOUNDS: 1376d301b358SKonstantin Belousov unlock_and_deallocate(&fs); 1377d301b358SKonstantin Belousov return (rv); 1378d301b358SKonstantin Belousov case KERN_NOT_RECEIVER: 1379d301b358SKonstantin Belousov break; 1380d301b358SKonstantin Belousov default: 1381d301b358SKonstantin Belousov panic("vm_fault: Unhandled rv %d", rv); 1382d301b358SKonstantin Belousov } 1383d301b358SKonstantin Belousov } 1384d301b358SKonstantin Belousov 1385df8bae1dSRodney W. Grimes while (TRUE) { 13864bf95d00SJeff Roberson KASSERT(fs.m == NULL, 13874bf95d00SJeff Roberson ("page still set %p at loop start", fs.m)); 13881c7c3c6aSMatthew Dillon /* 1389725441f6SKonstantin Belousov * If the object is marked for imminent termination, 1390725441f6SKonstantin Belousov * we retry here, since the collapse pass has raced 1391725441f6SKonstantin Belousov * with us. Otherwise, if we see terminally dead 1392725441f6SKonstantin Belousov * object, return fail. 13931c7c3c6aSMatthew Dillon */ 1394725441f6SKonstantin Belousov if ((fs.object->flags & OBJ_DEAD) != 0) { 1395725441f6SKonstantin Belousov dead = fs.object->type == OBJT_DEAD; 13964866e085SJohn Dyson unlock_and_deallocate(&fs); 1397725441f6SKonstantin Belousov if (dead) 139847221757SJohn Dyson return (KERN_PROTECTION_FAILURE); 1399725441f6SKonstantin Belousov pause("vmf_de", 1); 1400725441f6SKonstantin Belousov goto RetryFault; 140147221757SJohn Dyson } 140247221757SJohn Dyson 14031c7c3c6aSMatthew Dillon /* 14041c7c3c6aSMatthew Dillon * See if page is resident 14051c7c3c6aSMatthew Dillon */ 14064866e085SJohn Dyson fs.m = vm_page_lookup(fs.object, fs.pindex); 14074866e085SJohn Dyson if (fs.m != NULL) { 140863e97555SJeff Roberson if (vm_page_tryxbusy(fs.m) == 0) { 1409bef91632SJeff Roberson vm_fault_busy_sleep(&fs); 1410df8bae1dSRodney W. Grimes goto RetryFault; 1411df8bae1dSRodney W. Grimes } 14127615edaaSMatthew Dillon 14131c7c3c6aSMatthew Dillon /* 141463e97555SJeff Roberson * The page is marked busy for other processes and the 1415df794f5cSJeff Roberson * pagedaemon. If it still is completely valid we 1416df794f5cSJeff Roberson * are done. 14171c7c3c6aSMatthew Dillon */ 1418df794f5cSJeff Roberson if (vm_page_all_valid(fs.m)) { 14191e40fe41SJeff Roberson VM_OBJECT_WUNLOCK(fs.object); 14201e40fe41SJeff Roberson break; /* break to PAGE HAS BEEN FOUND. */ 1421df8bae1dSRodney W. Grimes } 1422df794f5cSJeff Roberson } 14231e40fe41SJeff Roberson VM_OBJECT_ASSERT_WLOCKED(fs.object); 14241c7c3c6aSMatthew Dillon 14251c7c3c6aSMatthew Dillon /* 142610b4196bSAlan Cox * Page is not resident. If the pager might contain the page 142710b4196bSAlan Cox * or this is the beginning of the search, allocate a new 142810b4196bSAlan Cox * page. (Default objects are zero-fill, so there is no real 142910b4196bSAlan Cox * pager for them.) 14301c7c3c6aSMatthew Dillon */ 1431df794f5cSJeff Roberson if (fs.m == NULL && (fs.object->type != OBJT_DEFAULT || 1432df794f5cSJeff Roberson fs.object == fs.first_object)) { 1433df794f5cSJeff Roberson rv = vm_fault_allocate(&fs); 1434c42b43a0SKonstantin Belousov switch (rv) { 1435df794f5cSJeff Roberson case KERN_RESTART: 1436df794f5cSJeff Roberson unlock_and_deallocate(&fs); 1437df794f5cSJeff Roberson /* FALLTHROUGH */ 1438df794f5cSJeff Roberson case KERN_RESOURCE_SHORTAGE: 1439df794f5cSJeff Roberson goto RetryFault; 1440c42b43a0SKonstantin Belousov case KERN_SUCCESS: 1441c42b43a0SKonstantin Belousov case KERN_FAILURE: 1442c7b913aaSKonstantin Belousov case KERN_PROTECTION_FAILURE: 1443df794f5cSJeff Roberson case KERN_OUT_OF_BOUNDS: 1444c42b43a0SKonstantin Belousov unlock_and_deallocate(&fs); 1445c42b43a0SKonstantin Belousov return (rv); 1446c42b43a0SKonstantin Belousov case KERN_NOT_RECEIVER: 1447c42b43a0SKonstantin Belousov break; 1448c42b43a0SKonstantin Belousov default: 1449df794f5cSJeff Roberson panic("vm_fault: Unhandled rv %d", rv); 1450c42b43a0SKonstantin Belousov } 1451c42b43a0SKonstantin Belousov } 1452c42b43a0SKonstantin Belousov 1453df8bae1dSRodney W. Grimes /* 14541e40fe41SJeff Roberson * Default objects have no pager so no exclusive busy exists 14551e40fe41SJeff Roberson * to protect this page in the chain. Skip to the next 14561e40fe41SJeff Roberson * object without dropping the lock to preserve atomicity of 14571e40fe41SJeff Roberson * shadow faults. 14581e40fe41SJeff Roberson */ 1459be9d4fd6SJeff Roberson if (fs.object->type != OBJT_DEFAULT) { 14601e40fe41SJeff Roberson /* 1461be9d4fd6SJeff Roberson * At this point, we have either allocated a new page 1462be9d4fd6SJeff Roberson * or found an existing page that is only partially 1463be9d4fd6SJeff Roberson * valid. 146485702505SAlan Cox * 1465be9d4fd6SJeff Roberson * We hold a reference on the current object and the 1466be9d4fd6SJeff Roberson * page is exclusive busied. The exclusive busy 1467be9d4fd6SJeff Roberson * prevents simultaneous faults and collapses while 1468be9d4fd6SJeff Roberson * the object lock is dropped. 146985702505SAlan Cox */ 14701e40fe41SJeff Roberson VM_OBJECT_WUNLOCK(fs.object); 147185702505SAlan Cox 147285702505SAlan Cox /* 1473be9d4fd6SJeff Roberson * If the pager for the current object might have 1474be9d4fd6SJeff Roberson * the page, then determine the number of additional 1475be9d4fd6SJeff Roberson * pages to read and potentially reprioritize 1476be9d4fd6SJeff Roberson * previously read pages for earlier reclamation. 1477be9d4fd6SJeff Roberson * These operations should only be performed once per 1478be9d4fd6SJeff Roberson * page fault. Even if the current pager doesn't 1479be9d4fd6SJeff Roberson * have the page, the number of additional pages to 1480be9d4fd6SJeff Roberson * read will apply to subsequent objects in the 1481be9d4fd6SJeff Roberson * shadow chain. 14821c7c3c6aSMatthew Dillon */ 14835909dafeSJeff Roberson if (nera == -1 && !P_KILLED(curproc)) 14845949b1caSJeff Roberson nera = vm_fault_readahead(&fs); 14850c3a4893SAlan Cox 14865909dafeSJeff Roberson rv = vm_fault_getpages(&fs, nera, &behind, &ahead); 14875909dafeSJeff Roberson if (rv == KERN_SUCCESS) { 1488b0cd2017SGleb Smirnoff faultcount = behind + 1 + ahead; 1489320023e2SAlan Cox hardfault = true; 14901e40fe41SJeff Roberson break; /* break to PAGE HAS BEEN FOUND. */ 1491df8bae1dSRodney W. Grimes } 14925909dafeSJeff Roberson if (rv == KERN_RESOURCE_SHORTAGE) 14935909dafeSJeff Roberson goto RetryFault; 14941e40fe41SJeff Roberson VM_OBJECT_WLOCK(fs.object); 14955909dafeSJeff Roberson if (rv == KERN_OUT_OF_BOUNDS) { 14964bf95d00SJeff Roberson fault_page_free(&fs.m); 14974866e085SJohn Dyson unlock_and_deallocate(&fs); 14985909dafeSJeff Roberson return (rv); 14994bf95d00SJeff Roberson } 1500be9d4fd6SJeff Roberson } 15014bf95d00SJeff Roberson 1502521ddf39SAlan Cox /* 15035909dafeSJeff Roberson * The page was not found in the current object. Try to 15045909dafeSJeff Roberson * traverse into a backing object or zero fill if none is 15055909dafeSJeff Roberson * found. 1506521ddf39SAlan Cox */ 1507fb4d37eaSJeff Roberson if (vm_fault_next(&fs)) 1508fb4d37eaSJeff Roberson continue; 1509f31695ccSMark Johnston if ((fs.fault_flags & VM_FAULT_NOFILL) != 0) { 1510f31695ccSMark Johnston if (fs.first_object == fs.object) 1511f31695ccSMark Johnston fault_page_free(&fs.first_m); 1512f31695ccSMark Johnston unlock_and_deallocate(&fs); 1513f31695ccSMark Johnston return (KERN_OUT_OF_BOUNDS); 1514f31695ccSMark Johnston } 1515fb4d37eaSJeff Roberson VM_OBJECT_WUNLOCK(fs.object); 1516fb4d37eaSJeff Roberson vm_fault_zerofill(&fs); 15177b9b301cSAlan Cox /* Don't try to prefault neighboring pages. */ 15187b9b301cSAlan Cox faultcount = 1; 15191e40fe41SJeff Roberson break; /* break to PAGE HAS BEEN FOUND. */ 1520df8bae1dSRodney W. Grimes } 15211c7c3c6aSMatthew Dillon 1522df8bae1dSRodney W. Grimes /* 15231e40fe41SJeff Roberson * PAGE HAS BEEN FOUND. A valid page has been found and exclusively 15241e40fe41SJeff Roberson * busied. The object lock must no longer be held. 1525df8bae1dSRodney W. Grimes */ 15261e40fe41SJeff Roberson vm_page_assert_xbusied(fs.m); 15271e40fe41SJeff Roberson VM_OBJECT_ASSERT_UNLOCKED(fs.object); 1528df8bae1dSRodney W. Grimes 1529df8bae1dSRodney W. Grimes /* 15300d94caffSDavid Greenman * If the page is being written, but isn't already owned by the 15310d94caffSDavid Greenman * top-level object, we have to copy it into a new page owned by the 15320d94caffSDavid Greenman * top-level object. 1533df8bae1dSRodney W. Grimes */ 15344866e085SJohn Dyson if (fs.object != fs.first_object) { 1535df8bae1dSRodney W. Grimes /* 15360d94caffSDavid Greenman * We only really need to copy if we want to write it. 1537df8bae1dSRodney W. Grimes */ 15382c2f4413SJeff Roberson if ((fs.fault_type & (VM_PROT_COPY | VM_PROT_WRITE)) != 0) { 15395936b6a8SJeff Roberson vm_fault_cow(&fs); 15409f1abe3dSAlan Cox /* 15419f1abe3dSAlan Cox * We only try to prefault read-only mappings to the 15429f1abe3dSAlan Cox * neighboring pages when this copy-on-write fault is 15439f1abe3dSAlan Cox * a hard fault. In other cases, trying to prefault 15449f1abe3dSAlan Cox * is typically wasted effort. 15459f1abe3dSAlan Cox */ 15469f1abe3dSAlan Cox if (faultcount == 0) 15479f1abe3dSAlan Cox faultcount = 1; 15489f1abe3dSAlan Cox 15490d94caffSDavid Greenman } else { 15502c2f4413SJeff Roberson fs.prot &= ~VM_PROT_WRITE; 1551df8bae1dSRodney W. Grimes } 1552df8bae1dSRodney W. Grimes } 1553df8bae1dSRodney W. Grimes 1554df8bae1dSRodney W. Grimes /* 15550d94caffSDavid Greenman * We must verify that the maps have not changed since our last 15560d94caffSDavid Greenman * lookup. 1557df8bae1dSRodney W. Grimes */ 155819dc5607STor Egge if (!fs.lookup_still_valid) { 1559fcb04758SJeff Roberson result = vm_fault_relookup(&fs); 1560df8bae1dSRodney W. Grimes if (result != KERN_SUCCESS) { 15611e40fe41SJeff Roberson fault_deallocate(&fs); 1562fcb04758SJeff Roberson if (result == KERN_RESTART) 156319dc5607STor Egge goto RetryFault; 1564df8bae1dSRodney W. Grimes return (result); 1565df8bae1dSRodney W. Grimes } 156619dc5607STor Egge } 15671e40fe41SJeff Roberson VM_OBJECT_ASSERT_UNLOCKED(fs.object); 1568381b7242SAlan Cox 1569d2bf64c3SKonstantin Belousov /* 1570381b7242SAlan Cox * If the page was filled by a pager, save the virtual address that 1571381b7242SAlan Cox * should be faulted on next under a sequential access pattern to the 1572381b7242SAlan Cox * map entry. A read lock on the map suffices to update this address 1573381b7242SAlan Cox * safely. 1574d2bf64c3SKonstantin Belousov */ 15755758fe71SAlan Cox if (hardfault) 1576381b7242SAlan Cox fs.entry->next_read = vaddr + ptoa(ahead) + PAGE_SIZE; 1577d2bf64c3SKonstantin Belousov 15784221e284SAlan Cox /* 157978cfe1f7SAlan Cox * Page must be completely valid or it is not fit to 15804221e284SAlan Cox * map into user space. vm_pager_get_pages() ensures this. 15814221e284SAlan Cox */ 15821e40fe41SJeff Roberson vm_page_assert_xbusied(fs.m); 15830012f373SJeff Roberson KASSERT(vm_page_all_valid(fs.m), 158478cfe1f7SAlan Cox ("vm_fault: page %p partially invalid", fs.m)); 15851e40fe41SJeff Roberson 15862c2f4413SJeff Roberson vm_fault_dirty(&fs, fs.m); 1587cbfbaad8SAlan Cox 158886735996SAlan Cox /* 158986735996SAlan Cox * Put this page into the physical map. We had to do the unlock above 159086735996SAlan Cox * because pmap_enter() may sleep. We don't put the page 159186735996SAlan Cox * back on the active queue until later so that the pageout daemon 159286735996SAlan Cox * won't find it (yet). 159386735996SAlan Cox */ 15942c2f4413SJeff Roberson pmap_enter(fs.map->pmap, vaddr, fs.m, fs.prot, 15952c2f4413SJeff Roberson fs.fault_type | (fs.wired ? PMAP_ENTER_WIRED : 0), 0); 15962c2f4413SJeff Roberson if (faultcount != 1 && (fs.fault_flags & VM_FAULT_WIRE) == 0 && 15972c2f4413SJeff Roberson fs.wired == 0) 1598b0cd2017SGleb Smirnoff vm_fault_prefault(&fs, vaddr, 1599b0cd2017SGleb Smirnoff faultcount > 0 ? behind : PFBAK, 1600a7163bb9SKonstantin Belousov faultcount > 0 ? ahead : PFFOR, false); 1601ff97964aSJohn Dyson 1602df8bae1dSRodney W. Grimes /* 16030d94caffSDavid Greenman * If the page is not wired down, then put it where the pageout daemon 16040d94caffSDavid Greenman * can find it. 1605df8bae1dSRodney W. Grimes */ 16062c2f4413SJeff Roberson if ((fs.fault_flags & VM_FAULT_WIRE) != 0) 16074866e085SJohn Dyson vm_page_wire(fs.m); 16089f5632e6SMark Johnston else 16094866e085SJohn Dyson vm_page_activate(fs.m); 16102c2f4413SJeff Roberson if (fs.m_hold != NULL) { 16112c2f4413SJeff Roberson (*fs.m_hold) = fs.m; 1612eeacb3b0SMark Johnston vm_page_wire(fs.m); 1613acd11c74SAlan Cox } 1614c7aebda8SAttilio Rao vm_page_xunbusy(fs.m); 16154bf95d00SJeff Roberson fs.m = NULL; 1616eeec6babSJohn Baldwin 1617eebf3286SAlan Cox /* 1618eebf3286SAlan Cox * Unlock everything, and return 1619eebf3286SAlan Cox */ 16204b3e0665SJeff Roberson fault_deallocate(&fs); 1621b3a01bdfSAndrey Zonov if (hardfault) { 162283c9dea1SGleb Smirnoff VM_CNT_INC(v_io_faults); 16231c4bcd05SJeff Roberson curthread->td_ru.ru_majflt++; 1624ae34b6ffSEdward Tomasz Napierala #ifdef RACCT 1625ae34b6ffSEdward Tomasz Napierala if (racct_enable && fs.object->type == OBJT_VNODE) { 1626ae34b6ffSEdward Tomasz Napierala PROC_LOCK(curproc); 16272c2f4413SJeff Roberson if ((fs.fault_type & (VM_PROT_COPY | VM_PROT_WRITE)) != 0) { 1628ae34b6ffSEdward Tomasz Napierala racct_add_force(curproc, RACCT_WRITEBPS, 1629ae34b6ffSEdward Tomasz Napierala PAGE_SIZE + behind * PAGE_SIZE); 1630ae34b6ffSEdward Tomasz Napierala racct_add_force(curproc, RACCT_WRITEIOPS, 1); 1631ae34b6ffSEdward Tomasz Napierala } else { 1632ae34b6ffSEdward Tomasz Napierala racct_add_force(curproc, RACCT_READBPS, 1633ae34b6ffSEdward Tomasz Napierala PAGE_SIZE + ahead * PAGE_SIZE); 1634ae34b6ffSEdward Tomasz Napierala racct_add_force(curproc, RACCT_READIOPS, 1); 1635ae34b6ffSEdward Tomasz Napierala } 1636ae34b6ffSEdward Tomasz Napierala PROC_UNLOCK(curproc); 1637ae34b6ffSEdward Tomasz Napierala } 1638ae34b6ffSEdward Tomasz Napierala #endif 1639b3a01bdfSAndrey Zonov } else 16401c4bcd05SJeff Roberson curthread->td_ru.ru_minflt++; 1641df8bae1dSRodney W. Grimes 1642df8bae1dSRodney W. Grimes return (KERN_SUCCESS); 1643df8bae1dSRodney W. Grimes } 1644df8bae1dSRodney W. Grimes 1645df8bae1dSRodney W. Grimes /* 1646a8b0f100SAlan Cox * Speed up the reclamation of pages that precede the faulting pindex within 1647a8b0f100SAlan Cox * the first object of the shadow chain. Essentially, perform the equivalent 1648a8b0f100SAlan Cox * to madvise(..., MADV_DONTNEED) on a large cluster of pages that precedes 1649a8b0f100SAlan Cox * the faulting pindex by the cluster size when the pages read by vm_fault() 1650a8b0f100SAlan Cox * cross a cluster-size boundary. The cluster size is the greater of the 1651a8b0f100SAlan Cox * smallest superpage size and VM_FAULT_DONTNEED_MIN. 1652a8b0f100SAlan Cox * 1653a8b0f100SAlan Cox * When "fs->first_object" is a shadow object, the pages in the backing object 1654a8b0f100SAlan Cox * that precede the faulting pindex are deactivated by vm_fault(). So, this 1655a8b0f100SAlan Cox * function must only be concerned with pages in the first object. 165613458803SAlan Cox */ 165713458803SAlan Cox static void 1658a8b0f100SAlan Cox vm_fault_dontneed(const struct faultstate *fs, vm_offset_t vaddr, int ahead) 165913458803SAlan Cox { 1660a8b0f100SAlan Cox vm_map_entry_t entry; 166113458803SAlan Cox vm_object_t first_object, object; 1662a8b0f100SAlan Cox vm_offset_t end, start; 1663a8b0f100SAlan Cox vm_page_t m, m_next; 1664a8b0f100SAlan Cox vm_pindex_t pend, pstart; 1665a8b0f100SAlan Cox vm_size_t size; 166613458803SAlan Cox 166713458803SAlan Cox object = fs->object; 16681e40fe41SJeff Roberson VM_OBJECT_ASSERT_UNLOCKED(object); 166913458803SAlan Cox first_object = fs->first_object; 1670a8b0f100SAlan Cox /* Neither fictitious nor unmanaged pages can be reclaimed. */ 167128634820SAlan Cox if ((first_object->flags & (OBJ_FICTITIOUS | OBJ_UNMANAGED)) == 0) { 16721e40fe41SJeff Roberson VM_OBJECT_RLOCK(first_object); 1673a8b0f100SAlan Cox size = VM_FAULT_DONTNEED_MIN; 1674a8b0f100SAlan Cox if (MAXPAGESIZES > 1 && size < pagesizes[1]) 1675a8b0f100SAlan Cox size = pagesizes[1]; 1676a8b0f100SAlan Cox end = rounddown2(vaddr, size); 1677a8b0f100SAlan Cox if (vaddr - end >= size - PAGE_SIZE - ptoa(ahead) && 1678a8b0f100SAlan Cox (entry = fs->entry)->start < end) { 1679a8b0f100SAlan Cox if (end - entry->start < size) 1680a8b0f100SAlan Cox start = entry->start; 168113458803SAlan Cox else 1682a8b0f100SAlan Cox start = end - size; 1683a8b0f100SAlan Cox pmap_advise(fs->map->pmap, start, end, MADV_DONTNEED); 1684a8b0f100SAlan Cox pstart = OFF_TO_IDX(entry->offset) + atop(start - 1685a8b0f100SAlan Cox entry->start); 1686a8b0f100SAlan Cox m_next = vm_page_find_least(first_object, pstart); 1687a8b0f100SAlan Cox pend = OFF_TO_IDX(entry->offset) + atop(end - 1688a8b0f100SAlan Cox entry->start); 1689a8b0f100SAlan Cox while ((m = m_next) != NULL && m->pindex < pend) { 1690a8b0f100SAlan Cox m_next = TAILQ_NEXT(m, listq); 16910012f373SJeff Roberson if (!vm_page_all_valid(m) || 1692a8b0f100SAlan Cox vm_page_busied(m)) 169313458803SAlan Cox continue; 1694d8015db3SAlan Cox 1695d8015db3SAlan Cox /* 1696d8015db3SAlan Cox * Don't clear PGA_REFERENCED, since it would 1697d8015db3SAlan Cox * likely represent a reference by a different 1698d8015db3SAlan Cox * process. 1699d8015db3SAlan Cox * 1700d8015db3SAlan Cox * Typically, at this point, prefetched pages 1701d8015db3SAlan Cox * are still in the inactive queue. Only 1702d8015db3SAlan Cox * pages that triggered page faults are in the 17039f5632e6SMark Johnston * active queue. The test for whether the page 17049f5632e6SMark Johnston * is in the inactive queue is racy; in the 17059f5632e6SMark Johnston * worst case we will requeue the page 17069f5632e6SMark Johnston * unnecessarily. 1707d8015db3SAlan Cox */ 17080eb50f9cSMark Johnston if (!vm_page_inactive(m)) 1709d8015db3SAlan Cox vm_page_deactivate(m); 171013458803SAlan Cox } 171113458803SAlan Cox } 17121e40fe41SJeff Roberson VM_OBJECT_RUNLOCK(first_object); 1713a8b0f100SAlan Cox } 171413458803SAlan Cox } 171513458803SAlan Cox 171613458803SAlan Cox /* 1717566526a9SAlan Cox * vm_fault_prefault provides a quick way of clustering 1718566526a9SAlan Cox * pagefaults into a processes address space. It is a "cousin" 1719566526a9SAlan Cox * of vm_map_pmap_enter, except it runs at page fault time instead 1720566526a9SAlan Cox * of mmap time. 1721566526a9SAlan Cox */ 1722566526a9SAlan Cox static void 172363281952SAlan Cox vm_fault_prefault(const struct faultstate *fs, vm_offset_t addra, 1724a7163bb9SKonstantin Belousov int backward, int forward, bool obj_locked) 1725566526a9SAlan Cox { 172663281952SAlan Cox pmap_t pmap; 172763281952SAlan Cox vm_map_entry_t entry; 172863281952SAlan Cox vm_object_t backing_object, lobject; 1729566526a9SAlan Cox vm_offset_t addr, starta; 1730566526a9SAlan Cox vm_pindex_t pindex; 17312053c127SStephan Uphoff vm_page_t m; 1732b0cd2017SGleb Smirnoff int i; 1733566526a9SAlan Cox 173463281952SAlan Cox pmap = fs->map->pmap; 1735950d5f7aSAlan Cox if (pmap != vmspace_pmap(curthread->td_proc->p_vmspace)) 1736566526a9SAlan Cox return; 1737566526a9SAlan Cox 173863281952SAlan Cox entry = fs->entry; 1739566526a9SAlan Cox 174063cdcaaeSKonstantin Belousov if (addra < backward * PAGE_SIZE) { 1741566526a9SAlan Cox starta = entry->start; 174263cdcaaeSKonstantin Belousov } else { 174363cdcaaeSKonstantin Belousov starta = addra - backward * PAGE_SIZE; 174463cdcaaeSKonstantin Belousov if (starta < entry->start) 174563cdcaaeSKonstantin Belousov starta = entry->start; 1746566526a9SAlan Cox } 1747566526a9SAlan Cox 174863281952SAlan Cox /* 174963281952SAlan Cox * Generate the sequence of virtual addresses that are candidates for 175063281952SAlan Cox * prefaulting in an outward spiral from the faulting virtual address, 175163281952SAlan Cox * "addra". Specifically, the sequence is "addra - PAGE_SIZE", "addra 175263281952SAlan Cox * + PAGE_SIZE", "addra - 2 * PAGE_SIZE", "addra + 2 * PAGE_SIZE", ... 175363281952SAlan Cox * If the candidate address doesn't have a backing physical page, then 175463281952SAlan Cox * the loop immediately terminates. 175563281952SAlan Cox */ 175663281952SAlan Cox for (i = 0; i < 2 * imax(backward, forward); i++) { 175763281952SAlan Cox addr = addra + ((i >> 1) + 1) * ((i & 1) == 0 ? -PAGE_SIZE : 175863281952SAlan Cox PAGE_SIZE); 175963281952SAlan Cox if (addr > addra + forward * PAGE_SIZE) 1760566526a9SAlan Cox addr = 0; 1761566526a9SAlan Cox 1762566526a9SAlan Cox if (addr < starta || addr >= entry->end) 1763566526a9SAlan Cox continue; 1764566526a9SAlan Cox 1765566526a9SAlan Cox if (!pmap_is_prefaultable(pmap, addr)) 1766566526a9SAlan Cox continue; 1767566526a9SAlan Cox 1768566526a9SAlan Cox pindex = ((addr - entry->start) + entry->offset) >> PAGE_SHIFT; 176963281952SAlan Cox lobject = entry->object.vm_object; 1770a7163bb9SKonstantin Belousov if (!obj_locked) 1771c141ae7fSAlan Cox VM_OBJECT_RLOCK(lobject); 1772566526a9SAlan Cox while ((m = vm_page_lookup(lobject, pindex)) == NULL && 1773566526a9SAlan Cox lobject->type == OBJT_DEFAULT && 1774566526a9SAlan Cox (backing_object = lobject->backing_object) != NULL) { 177536930fc9SAlan Cox KASSERT((lobject->backing_object_offset & PAGE_MASK) == 177636930fc9SAlan Cox 0, ("vm_fault_prefault: unaligned object offset")); 1777566526a9SAlan Cox pindex += lobject->backing_object_offset >> PAGE_SHIFT; 1778c141ae7fSAlan Cox VM_OBJECT_RLOCK(backing_object); 1779a7163bb9SKonstantin Belousov if (!obj_locked || lobject != entry->object.vm_object) 1780c141ae7fSAlan Cox VM_OBJECT_RUNLOCK(lobject); 1781566526a9SAlan Cox lobject = backing_object; 1782566526a9SAlan Cox } 1783cbfbaad8SAlan Cox if (m == NULL) { 1784a7163bb9SKonstantin Belousov if (!obj_locked || lobject != entry->object.vm_object) 1785c141ae7fSAlan Cox VM_OBJECT_RUNLOCK(lobject); 1786566526a9SAlan Cox break; 1787cbfbaad8SAlan Cox } 17880012f373SJeff Roberson if (vm_page_all_valid(m) && 17893c4a2440SAlan Cox (m->flags & PG_FICTITIOUS) == 0) 17907bfda801SAlan Cox pmap_enter_quick(pmap, addr, m, entry->protection); 1791a7163bb9SKonstantin Belousov if (!obj_locked || lobject != entry->object.vm_object) 1792c141ae7fSAlan Cox VM_OBJECT_RUNLOCK(lobject); 1793566526a9SAlan Cox } 1794566526a9SAlan Cox } 1795566526a9SAlan Cox 1796566526a9SAlan Cox /* 179782de724fSAlan Cox * Hold each of the physical pages that are mapped by the specified range of 179882de724fSAlan Cox * virtual addresses, ["addr", "addr" + "len"), if those mappings are valid 179982de724fSAlan Cox * and allow the specified types of access, "prot". If all of the implied 180082de724fSAlan Cox * pages are successfully held, then the number of held pages is returned 180182de724fSAlan Cox * together with pointers to those pages in the array "ma". However, if any 180282de724fSAlan Cox * of the pages cannot be held, -1 is returned. 180382de724fSAlan Cox */ 180482de724fSAlan Cox int 180582de724fSAlan Cox vm_fault_quick_hold_pages(vm_map_t map, vm_offset_t addr, vm_size_t len, 180682de724fSAlan Cox vm_prot_t prot, vm_page_t *ma, int max_count) 180782de724fSAlan Cox { 180882de724fSAlan Cox vm_offset_t end, va; 180982de724fSAlan Cox vm_page_t *mp; 18107e14088dSKonstantin Belousov int count; 181182de724fSAlan Cox boolean_t pmap_failed; 181282de724fSAlan Cox 1813af32c419SKonstantin Belousov if (len == 0) 1814af32c419SKonstantin Belousov return (0); 181582de724fSAlan Cox end = round_page(addr + len); 181682de724fSAlan Cox addr = trunc_page(addr); 181782de724fSAlan Cox 18180f1e6ec5SMark Johnston if (!vm_map_range_valid(map, addr, end)) 181982de724fSAlan Cox return (-1); 182082de724fSAlan Cox 18217e14088dSKonstantin Belousov if (atop(end - addr) > max_count) 182282de724fSAlan Cox panic("vm_fault_quick_hold_pages: count > max_count"); 18237e14088dSKonstantin Belousov count = atop(end - addr); 182482de724fSAlan Cox 182582de724fSAlan Cox /* 182682de724fSAlan Cox * Most likely, the physical pages are resident in the pmap, so it is 182782de724fSAlan Cox * faster to try pmap_extract_and_hold() first. 182882de724fSAlan Cox */ 182982de724fSAlan Cox pmap_failed = FALSE; 183082de724fSAlan Cox for (mp = ma, va = addr; va < end; mp++, va += PAGE_SIZE) { 183182de724fSAlan Cox *mp = pmap_extract_and_hold(map->pmap, va, prot); 183282de724fSAlan Cox if (*mp == NULL) 183382de724fSAlan Cox pmap_failed = TRUE; 183482de724fSAlan Cox else if ((prot & VM_PROT_WRITE) != 0 && 1835a5dbab54SAlan Cox (*mp)->dirty != VM_PAGE_BITS_ALL) { 183682de724fSAlan Cox /* 183782de724fSAlan Cox * Explicitly dirty the physical page. Otherwise, the 183882de724fSAlan Cox * caller's changes may go unnoticed because they are 183982de724fSAlan Cox * performed through an unmanaged mapping or by a DMA 184082de724fSAlan Cox * operation. 18413c76db4cSAlan Cox * 1842abb9b935SKonstantin Belousov * The object lock is not held here. 1843abb9b935SKonstantin Belousov * See vm_page_clear_dirty_mask(). 184482de724fSAlan Cox */ 18453c76db4cSAlan Cox vm_page_dirty(*mp); 184682de724fSAlan Cox } 184782de724fSAlan Cox } 184882de724fSAlan Cox if (pmap_failed) { 184982de724fSAlan Cox /* 185082de724fSAlan Cox * One or more pages could not be held by the pmap. Either no 185182de724fSAlan Cox * page was mapped at the specified virtual address or that 185282de724fSAlan Cox * mapping had insufficient permissions. Attempt to fault in 185382de724fSAlan Cox * and hold these pages. 18548ec533d3SKonstantin Belousov * 18558ec533d3SKonstantin Belousov * If vm_fault_disable_pagefaults() was called, 18568ec533d3SKonstantin Belousov * i.e., TDP_NOFAULTING is set, we must not sleep nor 18578ec533d3SKonstantin Belousov * acquire MD VM locks, which means we must not call 1858df08823dSKonstantin Belousov * vm_fault(). Some (out of tree) callers mark 18598ec533d3SKonstantin Belousov * too wide a code area with vm_fault_disable_pagefaults() 18608ec533d3SKonstantin Belousov * already, use the VM_PROT_QUICK_NOFAULT flag to request 18618ec533d3SKonstantin Belousov * the proper behaviour explicitly. 186282de724fSAlan Cox */ 18638ec533d3SKonstantin Belousov if ((prot & VM_PROT_QUICK_NOFAULT) != 0 && 18648ec533d3SKonstantin Belousov (curthread->td_pflags & TDP_NOFAULTING) != 0) 18658ec533d3SKonstantin Belousov goto error; 186682de724fSAlan Cox for (mp = ma, va = addr; va < end; mp++, va += PAGE_SIZE) 1867df08823dSKonstantin Belousov if (*mp == NULL && vm_fault(map, va, prot, 186882de724fSAlan Cox VM_FAULT_NORMAL, mp) != KERN_SUCCESS) 186982de724fSAlan Cox goto error; 187082de724fSAlan Cox } 187182de724fSAlan Cox return (count); 187282de724fSAlan Cox error: 187382de724fSAlan Cox for (mp = ma; mp < ma + count; mp++) 1874fee2a2faSMark Johnston if (*mp != NULL) 1875fee2a2faSMark Johnston vm_page_unwire(*mp, PQ_INACTIVE); 187682de724fSAlan Cox return (-1); 187782de724fSAlan Cox } 187882de724fSAlan Cox 187982de724fSAlan Cox /* 1880df8bae1dSRodney W. Grimes * Routine: 1881df8bae1dSRodney W. Grimes * vm_fault_copy_entry 1882df8bae1dSRodney W. Grimes * Function: 1883210a6886SKonstantin Belousov * Create new shadow object backing dst_entry with private copy of 1884210a6886SKonstantin Belousov * all underlying pages. When src_entry is equal to dst_entry, 1885210a6886SKonstantin Belousov * function implements COW for wired-down map entry. Otherwise, 1886210a6886SKonstantin Belousov * it forks wired entry into dst_map. 1887df8bae1dSRodney W. Grimes * 1888df8bae1dSRodney W. Grimes * In/out conditions: 1889df8bae1dSRodney W. Grimes * The source and destination maps must be locked for write. 1890df8bae1dSRodney W. Grimes * The source map entry must be wired down (or be a sharing map 1891df8bae1dSRodney W. Grimes * entry corresponding to a main map entry that is wired down). 1892df8bae1dSRodney W. Grimes */ 189326f9a767SRodney W. Grimes void 1894121fd461SKonstantin Belousov vm_fault_copy_entry(vm_map_t dst_map, vm_map_t src_map, 1895121fd461SKonstantin Belousov vm_map_entry_t dst_entry, vm_map_entry_t src_entry, 1896121fd461SKonstantin Belousov vm_ooffset_t *fork_charge) 1897df8bae1dSRodney W. Grimes { 1898210a6886SKonstantin Belousov vm_object_t backing_object, dst_object, object, src_object; 18997afab86cSAlan Cox vm_pindex_t dst_pindex, pindex, src_pindex; 1900210a6886SKonstantin Belousov vm_prot_t access, prot; 1901df8bae1dSRodney W. Grimes vm_offset_t vaddr; 1902df8bae1dSRodney W. Grimes vm_page_t dst_m; 1903df8bae1dSRodney W. Grimes vm_page_t src_m; 19044c74acf7SKonstantin Belousov boolean_t upgrade; 1905df8bae1dSRodney W. Grimes 1906df8bae1dSRodney W. Grimes #ifdef lint 1907df8bae1dSRodney W. Grimes src_map++; 19080d94caffSDavid Greenman #endif /* lint */ 1909df8bae1dSRodney W. Grimes 1910210a6886SKonstantin Belousov upgrade = src_entry == dst_entry; 19110973283dSKonstantin Belousov access = prot = dst_entry->protection; 1912210a6886SKonstantin Belousov 1913df8bae1dSRodney W. Grimes src_object = src_entry->object.vm_object; 19147afab86cSAlan Cox src_pindex = OFF_TO_IDX(src_entry->offset); 1915df8bae1dSRodney W. Grimes 19160973283dSKonstantin Belousov if (upgrade && (dst_entry->eflags & MAP_ENTRY_NEEDS_COPY) == 0) { 19170973283dSKonstantin Belousov dst_object = src_object; 19180973283dSKonstantin Belousov vm_object_reference(dst_object); 19190973283dSKonstantin Belousov } else { 1920df8bae1dSRodney W. Grimes /* 192167388836SKonstantin Belousov * Create the top-level object for the destination entry. 192267388836SKonstantin Belousov * Doesn't actually shadow anything - we copy the pages 192367388836SKonstantin Belousov * directly. 1924df8bae1dSRodney W. Grimes */ 192567388836SKonstantin Belousov dst_object = vm_object_allocate_anon(atop(dst_entry->end - 192667388836SKonstantin Belousov dst_entry->start), NULL, NULL, 0); 1927f8a47341SAlan Cox #if VM_NRESERVLEVEL > 0 1928f8a47341SAlan Cox dst_object->flags |= OBJ_COLORED; 1929f8a47341SAlan Cox dst_object->pg_color = atop(dst_entry->start); 1930f8a47341SAlan Cox #endif 1931a60d3db1SKonstantin Belousov dst_object->domain = src_object->domain; 1932a60d3db1SKonstantin Belousov dst_object->charge = dst_entry->end - dst_entry->start; 19330973283dSKonstantin Belousov } 1934df8bae1dSRodney W. Grimes 193589f6b863SAttilio Rao VM_OBJECT_WLOCK(dst_object); 1936210a6886SKonstantin Belousov KASSERT(upgrade || dst_entry->object.vm_object == NULL, 1937121fd461SKonstantin Belousov ("vm_fault_copy_entry: vm_object not NULL")); 19380973283dSKonstantin Belousov if (src_object != dst_object) { 1939df8bae1dSRodney W. Grimes dst_entry->object.vm_object = dst_object; 1940df8bae1dSRodney W. Grimes dst_entry->offset = 0; 194178022527SKonstantin Belousov dst_entry->eflags &= ~MAP_ENTRY_VN_EXEC; 19420973283dSKonstantin Belousov } 1943210a6886SKonstantin Belousov if (fork_charge != NULL) { 1944ef694c1aSEdward Tomasz Napierala KASSERT(dst_entry->cred == NULL, 1945121fd461SKonstantin Belousov ("vm_fault_copy_entry: leaked swp charge")); 1946ef694c1aSEdward Tomasz Napierala dst_object->cred = curthread->td_ucred; 1947ef694c1aSEdward Tomasz Napierala crhold(dst_object->cred); 1948121fd461SKonstantin Belousov *fork_charge += dst_object->charge; 19499f25ab83SKonstantin Belousov } else if ((dst_object->type == OBJT_DEFAULT || 19509f25ab83SKonstantin Belousov dst_object->type == OBJT_SWAP) && 19519f25ab83SKonstantin Belousov dst_object->cred == NULL) { 19520973283dSKonstantin Belousov KASSERT(dst_entry->cred != NULL, ("no cred for entry %p", 19530973283dSKonstantin Belousov dst_entry)); 1954ef694c1aSEdward Tomasz Napierala dst_object->cred = dst_entry->cred; 1955ef694c1aSEdward Tomasz Napierala dst_entry->cred = NULL; 1956210a6886SKonstantin Belousov } 19570973283dSKonstantin Belousov 1958210a6886SKonstantin Belousov /* 1959210a6886SKonstantin Belousov * If not an upgrade, then enter the mappings in the pmap as 1960210a6886SKonstantin Belousov * read and/or execute accesses. Otherwise, enter them as 1961210a6886SKonstantin Belousov * write accesses. 1962210a6886SKonstantin Belousov * 1963210a6886SKonstantin Belousov * A writeable large page mapping is only created if all of 1964210a6886SKonstantin Belousov * the constituent small page mappings are modified. Marking 1965210a6886SKonstantin Belousov * PTEs as modified on inception allows promotion to happen 1966210a6886SKonstantin Belousov * without taking potentially large number of soft faults. 1967210a6886SKonstantin Belousov */ 1968210a6886SKonstantin Belousov if (!upgrade) 1969210a6886SKonstantin Belousov access &= ~VM_PROT_WRITE; 1970df8bae1dSRodney W. Grimes 1971df8bae1dSRodney W. Grimes /* 1972ef45823eSKonstantin Belousov * Loop through all of the virtual pages within the entry's 1973ef45823eSKonstantin Belousov * range, copying each page from the source object to the 1974ef45823eSKonstantin Belousov * destination object. Since the source is wired, those pages 1975ef45823eSKonstantin Belousov * must exist. In contrast, the destination is pageable. 19766939b4d3SMark Johnston * Since the destination object doesn't share any backing storage 1977ef45823eSKonstantin Belousov * with the source object, all of its pages must be dirtied, 1978ef45823eSKonstantin Belousov * regardless of whether they can be written. 1979df8bae1dSRodney W. Grimes */ 19807afab86cSAlan Cox for (vaddr = dst_entry->start, dst_pindex = 0; 1981df8bae1dSRodney W. Grimes vaddr < dst_entry->end; 19827afab86cSAlan Cox vaddr += PAGE_SIZE, dst_pindex++) { 19830973283dSKonstantin Belousov again: 1984df8bae1dSRodney W. Grimes /* 1985df8bae1dSRodney W. Grimes * Find the page in the source object, and copy it in. 19864c74acf7SKonstantin Belousov * Because the source is wired down, the page will be 19874c74acf7SKonstantin Belousov * in memory. 1988df8bae1dSRodney W. Grimes */ 19890973283dSKonstantin Belousov if (src_object != dst_object) 199083b375eaSAttilio Rao VM_OBJECT_RLOCK(src_object); 1991c5b65a67SAlan Cox object = src_object; 19927afab86cSAlan Cox pindex = src_pindex + dst_pindex; 19937afab86cSAlan Cox while ((src_m = vm_page_lookup(object, pindex)) == NULL && 1994c5b65a67SAlan Cox (backing_object = object->backing_object) != NULL) { 1995c5b65a67SAlan Cox /* 19964c74acf7SKonstantin Belousov * Unless the source mapping is read-only or 19974c74acf7SKonstantin Belousov * it is presently being upgraded from 19984c74acf7SKonstantin Belousov * read-only, the first object in the shadow 19994c74acf7SKonstantin Belousov * chain should provide all of the pages. In 20004c74acf7SKonstantin Belousov * other words, this loop body should never be 20014c74acf7SKonstantin Belousov * executed when the source mapping is already 20024c74acf7SKonstantin Belousov * read/write. 2003c5b65a67SAlan Cox */ 20044c74acf7SKonstantin Belousov KASSERT((src_entry->protection & VM_PROT_WRITE) == 0 || 20054c74acf7SKonstantin Belousov upgrade, 20064c74acf7SKonstantin Belousov ("vm_fault_copy_entry: main object missing page")); 20074c74acf7SKonstantin Belousov 200883b375eaSAttilio Rao VM_OBJECT_RLOCK(backing_object); 2009c5b65a67SAlan Cox pindex += OFF_TO_IDX(object->backing_object_offset); 20100973283dSKonstantin Belousov if (object != dst_object) 201183b375eaSAttilio Rao VM_OBJECT_RUNLOCK(object); 2012c5b65a67SAlan Cox object = backing_object; 2013c5b65a67SAlan Cox } 20144c74acf7SKonstantin Belousov KASSERT(src_m != NULL, ("vm_fault_copy_entry: page missing")); 20150973283dSKonstantin Belousov 20160973283dSKonstantin Belousov if (object != dst_object) { 20170973283dSKonstantin Belousov /* 20180973283dSKonstantin Belousov * Allocate a page in the destination object. 20190973283dSKonstantin Belousov */ 20202602a2eaSKonstantin Belousov dst_m = vm_page_alloc(dst_object, (src_object == 20212602a2eaSKonstantin Belousov dst_object ? src_pindex : 0) + dst_pindex, 20222602a2eaSKonstantin Belousov VM_ALLOC_NORMAL); 20230973283dSKonstantin Belousov if (dst_m == NULL) { 20240973283dSKonstantin Belousov VM_OBJECT_WUNLOCK(dst_object); 20250973283dSKonstantin Belousov VM_OBJECT_RUNLOCK(object); 20262c0f13aaSKonstantin Belousov vm_wait(dst_object); 2027c8f780e3SKonstantin Belousov VM_OBJECT_WLOCK(dst_object); 20280973283dSKonstantin Belousov goto again; 20290973283dSKonstantin Belousov } 2030669890eaSAlan Cox pmap_copy_page(src_m, dst_m); 203183b375eaSAttilio Rao VM_OBJECT_RUNLOCK(object); 203245d72c7dSKonstantin Belousov dst_m->dirty = dst_m->valid = src_m->valid; 20330973283dSKonstantin Belousov } else { 20340973283dSKonstantin Belousov dst_m = src_m; 203563e97555SJeff Roberson if (vm_page_busy_acquire(dst_m, VM_ALLOC_WAITFAIL) == 0) 20360973283dSKonstantin Belousov goto again; 203763e97555SJeff Roberson if (dst_m->pindex >= dst_object->size) { 2038c62637d6SKonstantin Belousov /* 2039c62637d6SKonstantin Belousov * We are upgrading. Index can occur 2040c62637d6SKonstantin Belousov * out of bounds if the object type is 2041c62637d6SKonstantin Belousov * vnode and the file was truncated. 2042c62637d6SKonstantin Belousov */ 204363e97555SJeff Roberson vm_page_xunbusy(dst_m); 2044c62637d6SKonstantin Belousov break; 204563e97555SJeff Roberson } 20460973283dSKonstantin Belousov } 204789f6b863SAttilio Rao VM_OBJECT_WUNLOCK(dst_object); 2048df8bae1dSRodney W. Grimes 2049df8bae1dSRodney W. Grimes /* 2050210a6886SKonstantin Belousov * Enter it in the pmap. If a wired, copy-on-write 2051210a6886SKonstantin Belousov * mapping is being replaced by a write-enabled 2052210a6886SKonstantin Belousov * mapping, then wire that new mapping. 205345d72c7dSKonstantin Belousov * 205445d72c7dSKonstantin Belousov * The page can be invalid if the user called 205545d72c7dSKonstantin Belousov * msync(MS_INVALIDATE) or truncated the backing vnode 205645d72c7dSKonstantin Belousov * or shared memory object. In this case, do not 205745d72c7dSKonstantin Belousov * insert it into pmap, but still do the copy so that 205845d72c7dSKonstantin Belousov * all copies of the wired map entry have similar 205945d72c7dSKonstantin Belousov * backing pages. 2060df8bae1dSRodney W. Grimes */ 20610012f373SJeff Roberson if (vm_page_all_valid(dst_m)) { 206239ffa8c1SKonstantin Belousov pmap_enter(dst_map->pmap, vaddr, dst_m, prot, 206339ffa8c1SKonstantin Belousov access | (upgrade ? PMAP_ENTER_WIRED : 0), 0); 206445d72c7dSKonstantin Belousov } 2065df8bae1dSRodney W. Grimes 2066df8bae1dSRodney W. Grimes /* 2067df8bae1dSRodney W. Grimes * Mark it no longer busy, and put it on the active list. 2068df8bae1dSRodney W. Grimes */ 206989f6b863SAttilio Rao VM_OBJECT_WLOCK(dst_object); 20702965a453SKip Macy 2071210a6886SKonstantin Belousov if (upgrade) { 20720973283dSKonstantin Belousov if (src_m != dst_m) { 20733ae10f74SAttilio Rao vm_page_unwire(src_m, PQ_INACTIVE); 2074210a6886SKonstantin Belousov vm_page_wire(dst_m); 20752965a453SKip Macy } else { 2076d842aa51SMark Johnston KASSERT(vm_page_wired(dst_m), 20770973283dSKonstantin Belousov ("dst_m %p is not wired", dst_m)); 20780973283dSKonstantin Belousov } 20790973283dSKonstantin Belousov } else { 2080df8bae1dSRodney W. Grimes vm_page_activate(dst_m); 20812965a453SKip Macy } 2082c7aebda8SAttilio Rao vm_page_xunbusy(dst_m); 2083df8bae1dSRodney W. Grimes } 208489f6b863SAttilio Rao VM_OBJECT_WUNLOCK(dst_object); 2085210a6886SKonstantin Belousov if (upgrade) { 2086210a6886SKonstantin Belousov dst_entry->eflags &= ~(MAP_ENTRY_COW | MAP_ENTRY_NEEDS_COPY); 2087210a6886SKonstantin Belousov vm_object_deallocate(src_object); 2088210a6886SKonstantin Belousov } 2089df8bae1dSRodney W. Grimes } 209026f9a767SRodney W. Grimes 20915730afc9SAlan Cox /* 20925730afc9SAlan Cox * Block entry into the machine-independent layer's page fault handler by 20935730afc9SAlan Cox * the calling thread. Subsequent calls to vm_fault() by that thread will 20945730afc9SAlan Cox * return KERN_PROTECTION_FAILURE. Enable machine-dependent handling of 20955730afc9SAlan Cox * spurious page faults. 20965730afc9SAlan Cox */ 20972801687dSKonstantin Belousov int 20982801687dSKonstantin Belousov vm_fault_disable_pagefaults(void) 20992801687dSKonstantin Belousov { 21002801687dSKonstantin Belousov 21015730afc9SAlan Cox return (curthread_pflags_set(TDP_NOFAULTING | TDP_RESETSPUR)); 21022801687dSKonstantin Belousov } 21032801687dSKonstantin Belousov 21042801687dSKonstantin Belousov void 21052801687dSKonstantin Belousov vm_fault_enable_pagefaults(int save) 21062801687dSKonstantin Belousov { 21072801687dSKonstantin Belousov 21082801687dSKonstantin Belousov curthread_pflags_restore(save); 21092801687dSKonstantin Belousov } 2110