160727d8bSWarner Losh /*- 2796df753SPedro F. Giffuni * SPDX-License-Identifier: (BSD-4-Clause AND MIT-CMU) 3df57947fSPedro F. Giffuni * 4df8bae1dSRodney W. Grimes * Copyright (c) 1991, 1993 5df8bae1dSRodney W. Grimes * The Regents of the University of California. All rights reserved. 626f9a767SRodney W. Grimes * Copyright (c) 1994 John S. Dyson 726f9a767SRodney W. Grimes * All rights reserved. 826f9a767SRodney W. Grimes * Copyright (c) 1994 David Greenman 926f9a767SRodney W. Grimes * All rights reserved. 1026f9a767SRodney W. Grimes * 11df8bae1dSRodney W. Grimes * 12df8bae1dSRodney W. Grimes * This code is derived from software contributed to Berkeley by 13df8bae1dSRodney W. Grimes * The Mach Operating System project at Carnegie-Mellon University. 14df8bae1dSRodney W. Grimes * 15df8bae1dSRodney W. Grimes * Redistribution and use in source and binary forms, with or without 16df8bae1dSRodney W. Grimes * modification, are permitted provided that the following conditions 17df8bae1dSRodney W. Grimes * are met: 18df8bae1dSRodney W. Grimes * 1. Redistributions of source code must retain the above copyright 19df8bae1dSRodney W. Grimes * notice, this list of conditions and the following disclaimer. 20df8bae1dSRodney W. Grimes * 2. Redistributions in binary form must reproduce the above copyright 21df8bae1dSRodney W. Grimes * notice, this list of conditions and the following disclaimer in the 22df8bae1dSRodney W. Grimes * documentation and/or other materials provided with the distribution. 23df8bae1dSRodney W. Grimes * 3. All advertising materials mentioning features or use of this software 245929bcfaSPhilippe Charnier * must display the following acknowledgement: 25df8bae1dSRodney W. Grimes * This product includes software developed by the University of 26df8bae1dSRodney W. Grimes * California, Berkeley and its contributors. 27df8bae1dSRodney W. Grimes * 4. Neither the name of the University nor the names of its contributors 28df8bae1dSRodney W. Grimes * may be used to endorse or promote products derived from this software 29df8bae1dSRodney W. Grimes * without specific prior written permission. 30df8bae1dSRodney W. Grimes * 31df8bae1dSRodney W. Grimes * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 32df8bae1dSRodney W. Grimes * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 33df8bae1dSRodney W. Grimes * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 34df8bae1dSRodney W. Grimes * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 35df8bae1dSRodney W. Grimes * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 36df8bae1dSRodney W. Grimes * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 37df8bae1dSRodney W. Grimes * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 38df8bae1dSRodney W. Grimes * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 39df8bae1dSRodney W. Grimes * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 40df8bae1dSRodney W. Grimes * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 41df8bae1dSRodney W. Grimes * SUCH DAMAGE. 42df8bae1dSRodney W. Grimes * 433c4dd356SDavid Greenman * from: @(#)vm_fault.c 8.4 (Berkeley) 1/12/94 44df8bae1dSRodney W. Grimes * 45df8bae1dSRodney W. Grimes * 46df8bae1dSRodney W. Grimes * Copyright (c) 1987, 1990 Carnegie-Mellon University. 47df8bae1dSRodney W. Grimes * All rights reserved. 48df8bae1dSRodney W. Grimes * 49df8bae1dSRodney W. Grimes * Authors: Avadis Tevanian, Jr., Michael Wayne Young 50df8bae1dSRodney W. Grimes * 51df8bae1dSRodney W. Grimes * Permission to use, copy, modify and distribute this software and 52df8bae1dSRodney W. Grimes * its documentation is hereby granted, provided that both the copyright 53df8bae1dSRodney W. Grimes * notice and this permission notice appear in all copies of the 54df8bae1dSRodney W. Grimes * software, derivative works or modified versions, and any portions 55df8bae1dSRodney W. Grimes * thereof, and that both notices appear in supporting documentation. 56df8bae1dSRodney W. Grimes * 57df8bae1dSRodney W. Grimes * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 58df8bae1dSRodney W. Grimes * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 59df8bae1dSRodney W. Grimes * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 60df8bae1dSRodney W. Grimes * 61df8bae1dSRodney W. Grimes * Carnegie Mellon requests users of this software to return to 62df8bae1dSRodney W. Grimes * 63df8bae1dSRodney W. Grimes * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 64df8bae1dSRodney W. Grimes * School of Computer Science 65df8bae1dSRodney W. Grimes * Carnegie Mellon University 66df8bae1dSRodney W. Grimes * Pittsburgh PA 15213-3890 67df8bae1dSRodney W. Grimes * 68df8bae1dSRodney W. Grimes * any improvements or extensions that they make and grant Carnegie the 69df8bae1dSRodney W. Grimes * rights to redistribute these changes. 70df8bae1dSRodney W. Grimes */ 71df8bae1dSRodney W. Grimes 72df8bae1dSRodney W. Grimes /* 73df8bae1dSRodney W. Grimes * Page fault handling module. 74df8bae1dSRodney W. Grimes */ 75874651b1SDavid E. O'Brien 76874651b1SDavid E. O'Brien #include <sys/cdefs.h> 77874651b1SDavid E. O'Brien __FBSDID("$FreeBSD$"); 78874651b1SDavid E. O'Brien 7935818d2eSJohn Baldwin #include "opt_ktrace.h" 80f8a47341SAlan Cox #include "opt_vm.h" 81f8a47341SAlan Cox 82df8bae1dSRodney W. Grimes #include <sys/param.h> 83df8bae1dSRodney W. Grimes #include <sys/systm.h> 844edf4a58SJohn Baldwin #include <sys/kernel.h> 85fb919e4dSMark Murray #include <sys/lock.h> 86a8b0f100SAlan Cox #include <sys/mman.h> 87eeacb3b0SMark Johnston #include <sys/mutex.h> 8826f9a767SRodney W. Grimes #include <sys/proc.h> 89ae34b6ffSEdward Tomasz Napierala #include <sys/racct.h> 9011b57401SHans Petter Selasky #include <sys/refcount.h> 9126f9a767SRodney W. Grimes #include <sys/resourcevar.h> 9289f6b863SAttilio Rao #include <sys/rwlock.h> 93df08823dSKonstantin Belousov #include <sys/signalvar.h> 9423955314SAlfred Perlstein #include <sys/sysctl.h> 95df08823dSKonstantin Belousov #include <sys/sysent.h> 964edf4a58SJohn Baldwin #include <sys/vmmeter.h> 974edf4a58SJohn Baldwin #include <sys/vnode.h> 9835818d2eSJohn Baldwin #ifdef KTRACE 9935818d2eSJohn Baldwin #include <sys/ktrace.h> 10035818d2eSJohn Baldwin #endif 101df8bae1dSRodney W. Grimes 102df8bae1dSRodney W. Grimes #include <vm/vm.h> 103efeaf95aSDavid Greenman #include <vm/vm_param.h> 104efeaf95aSDavid Greenman #include <vm/pmap.h> 105efeaf95aSDavid Greenman #include <vm/vm_map.h> 106efeaf95aSDavid Greenman #include <vm/vm_object.h> 107df8bae1dSRodney W. Grimes #include <vm/vm_page.h> 108df8bae1dSRodney W. Grimes #include <vm/vm_pageout.h> 109a83c285cSDavid Greenman #include <vm/vm_kern.h> 11024a1cce3SDavid Greenman #include <vm/vm_pager.h> 111efeaf95aSDavid Greenman #include <vm/vm_extern.h> 112dfdf9abdSAlan Cox #include <vm/vm_reserv.h> 113df8bae1dSRodney W. Grimes 114566526a9SAlan Cox #define PFBAK 4 115566526a9SAlan Cox #define PFFOR 4 116566526a9SAlan Cox 1175268042bSAlan Cox #define VM_FAULT_READ_DEFAULT (1 + VM_FAULT_READ_AHEAD_INIT) 11813458803SAlan Cox #define VM_FAULT_READ_MAX (1 + VM_FAULT_READ_AHEAD_MAX) 119a8b0f100SAlan Cox 120a8b0f100SAlan Cox #define VM_FAULT_DONTNEED_MIN 1048576 12126f9a767SRodney W. Grimes 1224866e085SJohn Dyson struct faultstate { 1232c2f4413SJeff Roberson /* Fault parameters. */ 1245949b1caSJeff Roberson vm_offset_t vaddr; 1252c2f4413SJeff Roberson vm_page_t *m_hold; 1262c2f4413SJeff Roberson vm_prot_t fault_type; 1272c2f4413SJeff Roberson vm_prot_t prot; 1282c2f4413SJeff Roberson int fault_flags; 129df794f5cSJeff Roberson int oom; 1302c2f4413SJeff Roberson boolean_t wired; 1312c2f4413SJeff Roberson 1322c2f4413SJeff Roberson /* Page reference for cow. */ 13358447749SJeff Roberson vm_page_t m_cow; 1342c2f4413SJeff Roberson 1352c2f4413SJeff Roberson /* Current object. */ 1364866e085SJohn Dyson vm_object_t object; 1374866e085SJohn Dyson vm_pindex_t pindex; 1382c2f4413SJeff Roberson vm_page_t m; 1392c2f4413SJeff Roberson 1402c2f4413SJeff Roberson /* Top-level map object. */ 1414866e085SJohn Dyson vm_object_t first_object; 1424866e085SJohn Dyson vm_pindex_t first_pindex; 1432c2f4413SJeff Roberson vm_page_t first_m; 1442c2f4413SJeff Roberson 1452c2f4413SJeff Roberson /* Map state. */ 1464866e085SJohn Dyson vm_map_t map; 1474866e085SJohn Dyson vm_map_entry_t entry; 148dc5401d2SKonstantin Belousov int map_generation; 149cd8a6fe8SAlan Cox bool lookup_still_valid; 1502c2f4413SJeff Roberson 1512c2f4413SJeff Roberson /* Vnode if locked. */ 1524866e085SJohn Dyson struct vnode *vp; 1534866e085SJohn Dyson }; 1544866e085SJohn Dyson 155a8b0f100SAlan Cox static void vm_fault_dontneed(const struct faultstate *fs, vm_offset_t vaddr, 156a8b0f100SAlan Cox int ahead); 15763281952SAlan Cox static void vm_fault_prefault(const struct faultstate *fs, vm_offset_t addra, 158a7163bb9SKonstantin Belousov int backward, int forward, bool obj_locked); 15913458803SAlan Cox 160245139c6SKonstantin Belousov static int vm_pfault_oom_attempts = 3; 161245139c6SKonstantin Belousov SYSCTL_INT(_vm, OID_AUTO, pfault_oom_attempts, CTLFLAG_RWTUN, 162245139c6SKonstantin Belousov &vm_pfault_oom_attempts, 0, 163245139c6SKonstantin Belousov "Number of page allocation attempts in page fault handler before it " 164245139c6SKonstantin Belousov "triggers OOM handling"); 165245139c6SKonstantin Belousov 166245139c6SKonstantin Belousov static int vm_pfault_oom_wait = 10; 167245139c6SKonstantin Belousov SYSCTL_INT(_vm, OID_AUTO, pfault_oom_wait, CTLFLAG_RWTUN, 168245139c6SKonstantin Belousov &vm_pfault_oom_wait, 0, 169245139c6SKonstantin Belousov "Number of seconds to wait for free pages before retrying " 170245139c6SKonstantin Belousov "the page fault handler"); 171245139c6SKonstantin Belousov 17262a59e8fSWarner Losh static inline void 1734bf95d00SJeff Roberson fault_page_release(vm_page_t *mp) 1744866e085SJohn Dyson { 1754bf95d00SJeff Roberson vm_page_t m; 1760d0be82aSKonstantin Belousov 1774bf95d00SJeff Roberson m = *mp; 1784bf95d00SJeff Roberson if (m != NULL) { 179be801aaaSMark Johnston /* 1804bf95d00SJeff Roberson * We are likely to loop around again and attempt to busy 1814bf95d00SJeff Roberson * this page. Deactivating it leaves it available for 1824bf95d00SJeff Roberson * pageout while optimizing fault restarts. 183be801aaaSMark Johnston */ 1844bf95d00SJeff Roberson vm_page_deactivate(m); 1854bf95d00SJeff Roberson vm_page_xunbusy(m); 1864bf95d00SJeff Roberson *mp = NULL; 1874bf95d00SJeff Roberson } 1884bf95d00SJeff Roberson } 1894bf95d00SJeff Roberson 1904bf95d00SJeff Roberson static inline void 1914bf95d00SJeff Roberson fault_page_free(vm_page_t *mp) 1924bf95d00SJeff Roberson { 1934bf95d00SJeff Roberson vm_page_t m; 1944bf95d00SJeff Roberson 1954bf95d00SJeff Roberson m = *mp; 1964bf95d00SJeff Roberson if (m != NULL) { 1974bf95d00SJeff Roberson VM_OBJECT_ASSERT_WLOCKED(m->object); 1984bf95d00SJeff Roberson if (!vm_page_wired(m)) 1994bf95d00SJeff Roberson vm_page_free(m); 200419f0b1fSJeff Roberson else 201419f0b1fSJeff Roberson vm_page_xunbusy(m); 2024bf95d00SJeff Roberson *mp = NULL; 2034866e085SJohn Dyson } 204be2c5610SMark Johnston } 2054866e085SJohn Dyson 20662a59e8fSWarner Losh static inline void 2074866e085SJohn Dyson unlock_map(struct faultstate *fs) 2084866e085SJohn Dyson { 2090d0be82aSKonstantin Belousov 21025adb370SBrian Feldman if (fs->lookup_still_valid) { 2114866e085SJohn Dyson vm_map_lookup_done(fs->map, fs->entry); 212cd8a6fe8SAlan Cox fs->lookup_still_valid = false; 2134866e085SJohn Dyson } 2144866e085SJohn Dyson } 2154866e085SJohn Dyson 2164866e085SJohn Dyson static void 217cfabea3dSKonstantin Belousov unlock_vp(struct faultstate *fs) 218cfabea3dSKonstantin Belousov { 219cfabea3dSKonstantin Belousov 220cfabea3dSKonstantin Belousov if (fs->vp != NULL) { 221cfabea3dSKonstantin Belousov vput(fs->vp); 222cfabea3dSKonstantin Belousov fs->vp = NULL; 223cfabea3dSKonstantin Belousov } 224cfabea3dSKonstantin Belousov } 225cfabea3dSKonstantin Belousov 226cfabea3dSKonstantin Belousov static void 2274b3e0665SJeff Roberson fault_deallocate(struct faultstate *fs) 2284866e085SJohn Dyson { 229f29ba63eSAlan Cox 23058447749SJeff Roberson fault_page_release(&fs->m_cow); 2314bf95d00SJeff Roberson fault_page_release(&fs->m); 2324866e085SJohn Dyson vm_object_pip_wakeup(fs->object); 2334866e085SJohn Dyson if (fs->object != fs->first_object) { 23489f6b863SAttilio Rao VM_OBJECT_WLOCK(fs->first_object); 2354bf95d00SJeff Roberson fault_page_free(&fs->first_m); 23689f6b863SAttilio Rao VM_OBJECT_WUNLOCK(fs->first_object); 2374bf95d00SJeff Roberson vm_object_pip_wakeup(fs->first_object); 2384866e085SJohn Dyson } 2394866e085SJohn Dyson vm_object_deallocate(fs->first_object); 2404866e085SJohn Dyson unlock_map(fs); 241cfabea3dSKonstantin Belousov unlock_vp(fs); 2424866e085SJohn Dyson } 2434866e085SJohn Dyson 244a36f5532SKonstantin Belousov static void 2454b3e0665SJeff Roberson unlock_and_deallocate(struct faultstate *fs) 2464b3e0665SJeff Roberson { 2474b3e0665SJeff Roberson 2484b3e0665SJeff Roberson VM_OBJECT_WUNLOCK(fs->object); 2494b3e0665SJeff Roberson fault_deallocate(fs); 2504b3e0665SJeff Roberson } 2514b3e0665SJeff Roberson 2524b3e0665SJeff Roberson static void 2532c2f4413SJeff Roberson vm_fault_dirty(struct faultstate *fs, vm_page_t m) 254a36f5532SKonstantin Belousov { 255e26236e9SKonstantin Belousov bool need_dirty; 256a36f5532SKonstantin Belousov 2572c2f4413SJeff Roberson if (((fs->prot & VM_PROT_WRITE) == 0 && 2582c2f4413SJeff Roberson (fs->fault_flags & VM_FAULT_DIRTY) == 0) || 259a36f5532SKonstantin Belousov (m->oflags & VPO_UNMANAGED) != 0) 260a36f5532SKonstantin Belousov return; 261a36f5532SKonstantin Belousov 2620012f373SJeff Roberson VM_PAGE_OBJECT_BUSY_ASSERT(m); 263a36f5532SKonstantin Belousov 2642c2f4413SJeff Roberson need_dirty = ((fs->fault_type & VM_PROT_WRITE) != 0 && 2652c2f4413SJeff Roberson (fs->fault_flags & VM_FAULT_WIRE) == 0) || 2662c2f4413SJeff Roberson (fs->fault_flags & VM_FAULT_DIRTY) != 0; 267a36f5532SKonstantin Belousov 268a36f5532SKonstantin Belousov vm_object_set_writeable_dirty(m->object); 26967d0e293SJeff Roberson 270a36f5532SKonstantin Belousov /* 271a8081778SJeff Roberson * If the fault is a write, we know that this page is being 272a8081778SJeff Roberson * written NOW so dirty it explicitly to save on 273a8081778SJeff Roberson * pmap_is_modified() calls later. 274a8081778SJeff Roberson * 275a8081778SJeff Roberson * Also, since the page is now dirty, we can possibly tell 276a8081778SJeff Roberson * the pager to release any swap backing the page. 277a36f5532SKonstantin Belousov */ 278a8081778SJeff Roberson if (need_dirty && vm_page_set_dirty(m) == 0) { 279a36f5532SKonstantin Belousov /* 280fff5403fSJeff Roberson * If this is a NOSYNC mmap we do not want to set PGA_NOSYNC 281a36f5532SKonstantin Belousov * if the page is already dirty to prevent data written with 282a36f5532SKonstantin Belousov * the expectation of being synced from not being synced. 283a36f5532SKonstantin Belousov * Likewise if this entry does not request NOSYNC then make 284a36f5532SKonstantin Belousov * sure the page isn't marked NOSYNC. Applications sharing 285a36f5532SKonstantin Belousov * data should use the same flags to avoid ping ponging. 286a36f5532SKonstantin Belousov */ 2872c2f4413SJeff Roberson if ((fs->entry->eflags & MAP_ENTRY_NOSYNC) != 0) 288fff5403fSJeff Roberson vm_page_aflag_set(m, PGA_NOSYNC); 289a8081778SJeff Roberson else 290fff5403fSJeff Roberson vm_page_aflag_clear(m, PGA_NOSYNC); 291a36f5532SKonstantin Belousov } 292a36f5532SKonstantin Belousov 293a36f5532SKonstantin Belousov } 294a36f5532SKonstantin Belousov 29541ddec83SKonstantin Belousov /* 29641ddec83SKonstantin Belousov * Unlocks fs.first_object and fs.map on success. 29741ddec83SKonstantin Belousov */ 29841ddec83SKonstantin Belousov static int 2992c2f4413SJeff Roberson vm_fault_soft_fast(struct faultstate *fs) 30041ddec83SKonstantin Belousov { 3018b5e1472SAlan Cox vm_page_t m, m_map; 302fe0dcc40SKonstantin Belousov #if VM_NRESERVLEVEL > 0 3038b5e1472SAlan Cox vm_page_t m_super; 30490ea34bfSAlan Cox int flags; 3058b5e1472SAlan Cox #endif 30690ea34bfSAlan Cox int psind, rv; 3072c2f4413SJeff Roberson vm_offset_t vaddr; 30841ddec83SKonstantin Belousov 30941ddec83SKonstantin Belousov MPASS(fs->vp == NULL); 3102c2f4413SJeff Roberson vaddr = fs->vaddr; 311205be21dSJeff Roberson vm_object_busy(fs->first_object); 31241ddec83SKonstantin Belousov m = vm_page_lookup(fs->first_object, fs->first_pindex); 31341ddec83SKonstantin Belousov /* A busy page can be mapped for read|execute access. */ 3142c2f4413SJeff Roberson if (m == NULL || ((fs->prot & VM_PROT_WRITE) != 0 && 3150012f373SJeff Roberson vm_page_busied(m)) || !vm_page_all_valid(m)) { 316205be21dSJeff Roberson rv = KERN_FAILURE; 317205be21dSJeff Roberson goto out; 318205be21dSJeff Roberson } 3198b5e1472SAlan Cox m_map = m; 3208b5e1472SAlan Cox psind = 0; 321fe0dcc40SKonstantin Belousov #if VM_NRESERVLEVEL > 0 3228b5e1472SAlan Cox if ((m->flags & PG_FICTITIOUS) == 0 && 3238b5e1472SAlan Cox (m_super = vm_reserv_to_superpage(m)) != NULL && 3248b5e1472SAlan Cox rounddown2(vaddr, pagesizes[m_super->psind]) >= fs->entry->start && 3258b5e1472SAlan Cox roundup2(vaddr + 1, pagesizes[m_super->psind]) <= fs->entry->end && 3268b5e1472SAlan Cox (vaddr & (pagesizes[m_super->psind] - 1)) == (VM_PAGE_TO_PHYS(m) & 3272c2f4413SJeff Roberson (pagesizes[m_super->psind] - 1)) && !fs->wired && 3288b5e1472SAlan Cox pmap_ps_enabled(fs->map->pmap)) { 3298b5e1472SAlan Cox flags = PS_ALL_VALID; 3302c2f4413SJeff Roberson if ((fs->prot & VM_PROT_WRITE) != 0) { 3318b5e1472SAlan Cox /* 3328b5e1472SAlan Cox * Create a superpage mapping allowing write access 3338b5e1472SAlan Cox * only if none of the constituent pages are busy and 3348b5e1472SAlan Cox * all of them are already dirty (except possibly for 3358b5e1472SAlan Cox * the page that was faulted on). 3368b5e1472SAlan Cox */ 3378b5e1472SAlan Cox flags |= PS_NONE_BUSY; 3388b5e1472SAlan Cox if ((fs->first_object->flags & OBJ_UNMANAGED) == 0) 3398b5e1472SAlan Cox flags |= PS_ALL_DIRTY; 3408b5e1472SAlan Cox } 3418b5e1472SAlan Cox if (vm_page_ps_test(m_super, flags, m)) { 3428b5e1472SAlan Cox m_map = m_super; 3438b5e1472SAlan Cox psind = m_super->psind; 3448b5e1472SAlan Cox vaddr = rounddown2(vaddr, pagesizes[psind]); 3458b5e1472SAlan Cox /* Preset the modified bit for dirty superpages. */ 3468b5e1472SAlan Cox if ((flags & PS_ALL_DIRTY) != 0) 3472c2f4413SJeff Roberson fs->fault_type |= VM_PROT_WRITE; 3488b5e1472SAlan Cox } 3498b5e1472SAlan Cox } 3508b5e1472SAlan Cox #endif 3512c2f4413SJeff Roberson rv = pmap_enter(fs->map->pmap, vaddr, m_map, fs->prot, fs->fault_type | 3522c2f4413SJeff Roberson PMAP_ENTER_NOSLEEP | (fs->wired ? PMAP_ENTER_WIRED : 0), psind); 35341ddec83SKonstantin Belousov if (rv != KERN_SUCCESS) 354205be21dSJeff Roberson goto out; 3552c2f4413SJeff Roberson if (fs->m_hold != NULL) { 3562c2f4413SJeff Roberson (*fs->m_hold) = m; 357fee2a2faSMark Johnston vm_page_wire(m); 358fee2a2faSMark Johnston } 3592c2f4413SJeff Roberson if (psind == 0 && !fs->wired) 360a7163bb9SKonstantin Belousov vm_fault_prefault(fs, vaddr, PFBAK, PFFOR, true); 361a7163bb9SKonstantin Belousov VM_OBJECT_RUNLOCK(fs->first_object); 3622c2f4413SJeff Roberson vm_fault_dirty(fs, m); 36341ddec83SKonstantin Belousov vm_map_lookup_done(fs->map, fs->entry); 36441ddec83SKonstantin Belousov curthread->td_ru.ru_minflt++; 365205be21dSJeff Roberson 366205be21dSJeff Roberson out: 367205be21dSJeff Roberson vm_object_unbusy(fs->first_object); 368205be21dSJeff Roberson return (rv); 36941ddec83SKonstantin Belousov } 37041ddec83SKonstantin Belousov 371c42b43a0SKonstantin Belousov static void 372c42b43a0SKonstantin Belousov vm_fault_restore_map_lock(struct faultstate *fs) 373c42b43a0SKonstantin Belousov { 374c42b43a0SKonstantin Belousov 375c42b43a0SKonstantin Belousov VM_OBJECT_ASSERT_WLOCKED(fs->first_object); 376c99d0c58SMark Johnston MPASS(blockcount_read(&fs->first_object->paging_in_progress) > 0); 377c42b43a0SKonstantin Belousov 378c42b43a0SKonstantin Belousov if (!vm_map_trylock_read(fs->map)) { 379c42b43a0SKonstantin Belousov VM_OBJECT_WUNLOCK(fs->first_object); 380c42b43a0SKonstantin Belousov vm_map_lock_read(fs->map); 381c42b43a0SKonstantin Belousov VM_OBJECT_WLOCK(fs->first_object); 382c42b43a0SKonstantin Belousov } 383c42b43a0SKonstantin Belousov fs->lookup_still_valid = true; 384c42b43a0SKonstantin Belousov } 385c42b43a0SKonstantin Belousov 3867a432b84SKonstantin Belousov static void 3877a432b84SKonstantin Belousov vm_fault_populate_check_page(vm_page_t m) 3887a432b84SKonstantin Belousov { 3897a432b84SKonstantin Belousov 3907a432b84SKonstantin Belousov /* 3917a432b84SKonstantin Belousov * Check each page to ensure that the pager is obeying the 3927a432b84SKonstantin Belousov * interface: the page must be installed in the object, fully 3937a432b84SKonstantin Belousov * valid, and exclusively busied. 3947a432b84SKonstantin Belousov */ 3957a432b84SKonstantin Belousov MPASS(m != NULL); 3960012f373SJeff Roberson MPASS(vm_page_all_valid(m)); 3977a432b84SKonstantin Belousov MPASS(vm_page_xbusied(m)); 3987a432b84SKonstantin Belousov } 3997a432b84SKonstantin Belousov 4007a432b84SKonstantin Belousov static void 4017a432b84SKonstantin Belousov vm_fault_populate_cleanup(vm_object_t object, vm_pindex_t first, 4027a432b84SKonstantin Belousov vm_pindex_t last) 4037a432b84SKonstantin Belousov { 4047a432b84SKonstantin Belousov vm_page_t m; 4057a432b84SKonstantin Belousov vm_pindex_t pidx; 4067a432b84SKonstantin Belousov 4077a432b84SKonstantin Belousov VM_OBJECT_ASSERT_WLOCKED(object); 4087a432b84SKonstantin Belousov MPASS(first <= last); 4097a432b84SKonstantin Belousov for (pidx = first, m = vm_page_lookup(object, pidx); 4107a432b84SKonstantin Belousov pidx <= last; pidx++, m = vm_page_next(m)) { 4117a432b84SKonstantin Belousov vm_fault_populate_check_page(m); 4127a432b84SKonstantin Belousov vm_page_deactivate(m); 4137a432b84SKonstantin Belousov vm_page_xunbusy(m); 4147a432b84SKonstantin Belousov } 4157a432b84SKonstantin Belousov } 416c42b43a0SKonstantin Belousov 417c42b43a0SKonstantin Belousov static int 4182c2f4413SJeff Roberson vm_fault_populate(struct faultstate *fs) 419c42b43a0SKonstantin Belousov { 42070183daaSAlan Cox vm_offset_t vaddr; 421c42b43a0SKonstantin Belousov vm_page_t m; 4227a432b84SKonstantin Belousov vm_pindex_t map_first, map_last, pager_first, pager_last, pidx; 423d301b358SKonstantin Belousov int bdry_idx, i, npages, psind, rv; 424c42b43a0SKonstantin Belousov 425c42b43a0SKonstantin Belousov MPASS(fs->object == fs->first_object); 426c42b43a0SKonstantin Belousov VM_OBJECT_ASSERT_WLOCKED(fs->first_object); 427c99d0c58SMark Johnston MPASS(blockcount_read(&fs->first_object->paging_in_progress) > 0); 428c42b43a0SKonstantin Belousov MPASS(fs->first_object->backing_object == NULL); 429c42b43a0SKonstantin Belousov MPASS(fs->lookup_still_valid); 430c42b43a0SKonstantin Belousov 4317a432b84SKonstantin Belousov pager_first = OFF_TO_IDX(fs->entry->offset); 43289564188SAlan Cox pager_last = pager_first + atop(fs->entry->end - fs->entry->start) - 1; 433c42b43a0SKonstantin Belousov unlock_map(fs); 434c42b43a0SKonstantin Belousov unlock_vp(fs); 435c42b43a0SKonstantin Belousov 436c42b43a0SKonstantin Belousov /* 437c42b43a0SKonstantin Belousov * Call the pager (driver) populate() method. 438c42b43a0SKonstantin Belousov * 439c42b43a0SKonstantin Belousov * There is no guarantee that the method will be called again 440c42b43a0SKonstantin Belousov * if the current fault is for read, and a future fault is 441c42b43a0SKonstantin Belousov * for write. Report the entry's maximum allowed protection 442c42b43a0SKonstantin Belousov * to the driver. 443c42b43a0SKonstantin Belousov */ 444c42b43a0SKonstantin Belousov rv = vm_pager_populate(fs->first_object, fs->first_pindex, 445d301b358SKonstantin Belousov fs->fault_type, fs->entry->max_protection, &pager_first, 446d301b358SKonstantin Belousov &pager_last); 447c42b43a0SKonstantin Belousov 448c42b43a0SKonstantin Belousov VM_OBJECT_ASSERT_WLOCKED(fs->first_object); 449c42b43a0SKonstantin Belousov if (rv == VM_PAGER_BAD) { 450c42b43a0SKonstantin Belousov /* 451c42b43a0SKonstantin Belousov * VM_PAGER_BAD is the backdoor for a pager to request 452c42b43a0SKonstantin Belousov * normal fault handling. 453c42b43a0SKonstantin Belousov */ 454c42b43a0SKonstantin Belousov vm_fault_restore_map_lock(fs); 455c42b43a0SKonstantin Belousov if (fs->map->timestamp != fs->map_generation) 456df794f5cSJeff Roberson return (KERN_RESTART); 457c42b43a0SKonstantin Belousov return (KERN_NOT_RECEIVER); 458c42b43a0SKonstantin Belousov } 459c42b43a0SKonstantin Belousov if (rv != VM_PAGER_OK) 460c42b43a0SKonstantin Belousov return (KERN_FAILURE); /* AKA SIGSEGV */ 461c42b43a0SKonstantin Belousov 462c42b43a0SKonstantin Belousov /* Ensure that the driver is obeying the interface. */ 4637a432b84SKonstantin Belousov MPASS(pager_first <= pager_last); 4647a432b84SKonstantin Belousov MPASS(fs->first_pindex <= pager_last); 4657a432b84SKonstantin Belousov MPASS(fs->first_pindex >= pager_first); 4667a432b84SKonstantin Belousov MPASS(pager_last < fs->first_object->size); 467c42b43a0SKonstantin Belousov 468c42b43a0SKonstantin Belousov vm_fault_restore_map_lock(fs); 469d301b358SKonstantin Belousov bdry_idx = (fs->entry->eflags & MAP_ENTRY_SPLIT_BOUNDARY_MASK) >> 470d301b358SKonstantin Belousov MAP_ENTRY_SPLIT_BOUNDARY_SHIFT; 4717a432b84SKonstantin Belousov if (fs->map->timestamp != fs->map_generation) { 472d301b358SKonstantin Belousov if (bdry_idx == 0) { 4737a432b84SKonstantin Belousov vm_fault_populate_cleanup(fs->first_object, pager_first, 4747a432b84SKonstantin Belousov pager_last); 475d301b358SKonstantin Belousov } else { 476d301b358SKonstantin Belousov m = vm_page_lookup(fs->first_object, pager_first); 477d301b358SKonstantin Belousov if (m != fs->m) 478d301b358SKonstantin Belousov vm_page_xunbusy(m); 479d301b358SKonstantin Belousov } 480df794f5cSJeff Roberson return (KERN_RESTART); 4817a432b84SKonstantin Belousov } 482c42b43a0SKonstantin Belousov 483c42b43a0SKonstantin Belousov /* 4847a432b84SKonstantin Belousov * The map is unchanged after our last unlock. Process the fault. 4857a432b84SKonstantin Belousov * 486d301b358SKonstantin Belousov * First, the special case of largepage mappings, where 487d301b358SKonstantin Belousov * populate only busies the first page in superpage run. 488d301b358SKonstantin Belousov */ 489d301b358SKonstantin Belousov if (bdry_idx != 0) { 49078257765SMark Johnston KASSERT(PMAP_HAS_LARGEPAGES, 49178257765SMark Johnston ("missing pmap support for large pages")); 492d301b358SKonstantin Belousov m = vm_page_lookup(fs->first_object, pager_first); 493d301b358SKonstantin Belousov vm_fault_populate_check_page(m); 494d301b358SKonstantin Belousov VM_OBJECT_WUNLOCK(fs->first_object); 495d301b358SKonstantin Belousov vaddr = fs->entry->start + IDX_TO_OFF(pager_first) - 496d301b358SKonstantin Belousov fs->entry->offset; 497d301b358SKonstantin Belousov /* assert alignment for entry */ 498d301b358SKonstantin Belousov KASSERT((vaddr & (pagesizes[bdry_idx] - 1)) == 0, 499d301b358SKonstantin Belousov ("unaligned superpage start %#jx pager_first %#jx offset %#jx vaddr %#jx", 500d301b358SKonstantin Belousov (uintmax_t)fs->entry->start, (uintmax_t)pager_first, 501d301b358SKonstantin Belousov (uintmax_t)fs->entry->offset, (uintmax_t)vaddr)); 502d301b358SKonstantin Belousov KASSERT((VM_PAGE_TO_PHYS(m) & (pagesizes[bdry_idx] - 1)) == 0, 503d301b358SKonstantin Belousov ("unaligned superpage m %p %#jx", m, 504d301b358SKonstantin Belousov (uintmax_t)VM_PAGE_TO_PHYS(m))); 505d301b358SKonstantin Belousov rv = pmap_enter(fs->map->pmap, vaddr, m, fs->prot, 506d301b358SKonstantin Belousov fs->fault_type | (fs->wired ? PMAP_ENTER_WIRED : 0) | 507d301b358SKonstantin Belousov PMAP_ENTER_LARGEPAGE, bdry_idx); 508d301b358SKonstantin Belousov VM_OBJECT_WLOCK(fs->first_object); 509d301b358SKonstantin Belousov vm_page_xunbusy(m); 510d301b358SKonstantin Belousov if ((fs->fault_flags & VM_FAULT_WIRE) != 0) { 511d301b358SKonstantin Belousov for (i = 0; i < atop(pagesizes[bdry_idx]); i++) 512d301b358SKonstantin Belousov vm_page_wire(m + i); 513d301b358SKonstantin Belousov } 514d301b358SKonstantin Belousov if (fs->m_hold != NULL) { 515d301b358SKonstantin Belousov *fs->m_hold = m + (fs->first_pindex - pager_first); 516d301b358SKonstantin Belousov vm_page_wire(*fs->m_hold); 517d301b358SKonstantin Belousov } 518d301b358SKonstantin Belousov goto out; 519d301b358SKonstantin Belousov } 520d301b358SKonstantin Belousov 521d301b358SKonstantin Belousov /* 5227a432b84SKonstantin Belousov * The range [pager_first, pager_last] that is given to the 5237a432b84SKonstantin Belousov * pager is only a hint. The pager may populate any range 5247a432b84SKonstantin Belousov * within the object that includes the requested page index. 5257a432b84SKonstantin Belousov * In case the pager expanded the range, clip it to fit into 5267a432b84SKonstantin Belousov * the map entry. 527c42b43a0SKonstantin Belousov */ 52889564188SAlan Cox map_first = OFF_TO_IDX(fs->entry->offset); 52989564188SAlan Cox if (map_first > pager_first) { 5307a432b84SKonstantin Belousov vm_fault_populate_cleanup(fs->first_object, pager_first, 5317a432b84SKonstantin Belousov map_first - 1); 53289564188SAlan Cox pager_first = map_first; 53389564188SAlan Cox } 53489564188SAlan Cox map_last = map_first + atop(fs->entry->end - fs->entry->start) - 1; 53589564188SAlan Cox if (map_last < pager_last) { 5367a432b84SKonstantin Belousov vm_fault_populate_cleanup(fs->first_object, map_last + 1, 5377a432b84SKonstantin Belousov pager_last); 53889564188SAlan Cox pager_last = map_last; 53989564188SAlan Cox } 54089564188SAlan Cox for (pidx = pager_first, m = vm_page_lookup(fs->first_object, pidx); 54170183daaSAlan Cox pidx <= pager_last; 54270183daaSAlan Cox pidx += npages, m = vm_page_next(&m[npages - 1])) { 54370183daaSAlan Cox vaddr = fs->entry->start + IDX_TO_OFF(pidx) - fs->entry->offset; 5442bf8cb38SAlan Cox #if defined(__aarch64__) || defined(__amd64__) || (defined(__arm__) && \ 545f6893f09SMark Johnston __ARM_ARCH >= 6) || defined(__i386__) || defined(__riscv) 54670183daaSAlan Cox psind = m->psind; 54770183daaSAlan Cox if (psind > 0 && ((vaddr & (pagesizes[psind] - 1)) != 0 || 54870183daaSAlan Cox pidx + OFF_TO_IDX(pagesizes[psind]) - 1 > pager_last || 5492c2f4413SJeff Roberson !pmap_ps_enabled(fs->map->pmap) || fs->wired)) 55070183daaSAlan Cox psind = 0; 55170183daaSAlan Cox #else 55270183daaSAlan Cox psind = 0; 55370183daaSAlan Cox #endif 55470183daaSAlan Cox npages = atop(pagesizes[psind]); 55570183daaSAlan Cox for (i = 0; i < npages; i++) { 55670183daaSAlan Cox vm_fault_populate_check_page(&m[i]); 5572c2f4413SJeff Roberson vm_fault_dirty(fs, &m[i]); 55870183daaSAlan Cox } 559c42b43a0SKonstantin Belousov VM_OBJECT_WUNLOCK(fs->first_object); 5602c2f4413SJeff Roberson rv = pmap_enter(fs->map->pmap, vaddr, m, fs->prot, fs->fault_type | 5612c2f4413SJeff Roberson (fs->wired ? PMAP_ENTER_WIRED : 0), psind); 562e7a9df16SKonstantin Belousov #if defined(__amd64__) 563e7a9df16SKonstantin Belousov if (psind > 0 && rv == KERN_FAILURE) { 564e7a9df16SKonstantin Belousov for (i = 0; i < npages; i++) { 565e7a9df16SKonstantin Belousov rv = pmap_enter(fs->map->pmap, vaddr + ptoa(i), 5662c2f4413SJeff Roberson &m[i], fs->prot, fs->fault_type | 5672c2f4413SJeff Roberson (fs->wired ? PMAP_ENTER_WIRED : 0), 0); 568e7a9df16SKonstantin Belousov MPASS(rv == KERN_SUCCESS); 569e7a9df16SKonstantin Belousov } 570e7a9df16SKonstantin Belousov } 571e7a9df16SKonstantin Belousov #else 572e7a9df16SKonstantin Belousov MPASS(rv == KERN_SUCCESS); 573e7a9df16SKonstantin Belousov #endif 574c42b43a0SKonstantin Belousov VM_OBJECT_WLOCK(fs->first_object); 57570183daaSAlan Cox for (i = 0; i < npages; i++) { 5762c2f4413SJeff Roberson if ((fs->fault_flags & VM_FAULT_WIRE) != 0) 57770183daaSAlan Cox vm_page_wire(&m[i]); 5789f5632e6SMark Johnston else 57970183daaSAlan Cox vm_page_activate(&m[i]); 5802c2f4413SJeff Roberson if (fs->m_hold != NULL && m[i].pindex == fs->first_pindex) { 5812c2f4413SJeff Roberson (*fs->m_hold) = &m[i]; 582eeacb3b0SMark Johnston vm_page_wire(&m[i]); 583c42b43a0SKonstantin Belousov } 5844cdea4a8SJeff Roberson vm_page_xunbusy(&m[i]); 58570183daaSAlan Cox } 586c42b43a0SKonstantin Belousov } 587d301b358SKonstantin Belousov out: 588c42b43a0SKonstantin Belousov curthread->td_ru.ru_majflt++; 589c42b43a0SKonstantin Belousov return (KERN_SUCCESS); 590c42b43a0SKonstantin Belousov } 591c42b43a0SKonstantin Belousov 592df08823dSKonstantin Belousov static int prot_fault_translation; 593df08823dSKonstantin Belousov SYSCTL_INT(_machdep, OID_AUTO, prot_fault_translation, CTLFLAG_RWTUN, 594df08823dSKonstantin Belousov &prot_fault_translation, 0, 595df08823dSKonstantin Belousov "Control signal to deliver on protection fault"); 596df08823dSKonstantin Belousov 597df08823dSKonstantin Belousov /* compat definition to keep common code for signal translation */ 598df08823dSKonstantin Belousov #define UCODE_PAGEFLT 12 599df08823dSKonstantin Belousov #ifdef T_PAGEFLT 600df08823dSKonstantin Belousov _Static_assert(UCODE_PAGEFLT == T_PAGEFLT, "T_PAGEFLT"); 601df08823dSKonstantin Belousov #endif 602df08823dSKonstantin Belousov 603df8bae1dSRodney W. Grimes /* 604df08823dSKonstantin Belousov * vm_fault_trap: 605df8bae1dSRodney W. Grimes * 606956f3135SPhilippe Charnier * Handle a page fault occurring at the given address, 607df8bae1dSRodney W. Grimes * requiring the given permissions, in the map specified. 608df8bae1dSRodney W. Grimes * If successful, the page is inserted into the 609df8bae1dSRodney W. Grimes * associated physical map. 610df8bae1dSRodney W. Grimes * 611df8bae1dSRodney W. Grimes * NOTE: the given address should be truncated to the 612df8bae1dSRodney W. Grimes * proper page address. 613df8bae1dSRodney W. Grimes * 614df8bae1dSRodney W. Grimes * KERN_SUCCESS is returned if the page fault is handled; otherwise, 615df8bae1dSRodney W. Grimes * a standard error specifying why the fault is fatal is returned. 616df8bae1dSRodney W. Grimes * 617df8bae1dSRodney W. Grimes * The map in question must be referenced, and remains so. 6180cddd8f0SMatthew Dillon * Caller may hold no locks. 619df8bae1dSRodney W. Grimes */ 620df8bae1dSRodney W. Grimes int 621df08823dSKonstantin Belousov vm_fault_trap(vm_map_t map, vm_offset_t vaddr, vm_prot_t fault_type, 622df08823dSKonstantin Belousov int fault_flags, int *signo, int *ucode) 62323955314SAlfred Perlstein { 62435818d2eSJohn Baldwin int result; 625acd11c74SAlan Cox 626df08823dSKonstantin Belousov MPASS(signo == NULL || ucode != NULL); 62735818d2eSJohn Baldwin #ifdef KTRACE 628c31cec45SKonstantin Belousov if (map != kernel_map && KTRPOINT(curthread, KTR_FAULT)) 62935818d2eSJohn Baldwin ktrfault(vaddr, fault_type); 63035818d2eSJohn Baldwin #endif 631df08823dSKonstantin Belousov result = vm_fault(map, trunc_page(vaddr), fault_type, fault_flags, 632be996836SAttilio Rao NULL); 633df08823dSKonstantin Belousov KASSERT(result == KERN_SUCCESS || result == KERN_FAILURE || 634df08823dSKonstantin Belousov result == KERN_INVALID_ADDRESS || 635df08823dSKonstantin Belousov result == KERN_RESOURCE_SHORTAGE || 636df08823dSKonstantin Belousov result == KERN_PROTECTION_FAILURE || 637df08823dSKonstantin Belousov result == KERN_OUT_OF_BOUNDS, 638df08823dSKonstantin Belousov ("Unexpected Mach error %d from vm_fault()", result)); 63935818d2eSJohn Baldwin #ifdef KTRACE 640c31cec45SKonstantin Belousov if (map != kernel_map && KTRPOINT(curthread, KTR_FAULTEND)) 64135818d2eSJohn Baldwin ktrfaultend(result); 64235818d2eSJohn Baldwin #endif 643df08823dSKonstantin Belousov if (result != KERN_SUCCESS && signo != NULL) { 644df08823dSKonstantin Belousov switch (result) { 645df08823dSKonstantin Belousov case KERN_FAILURE: 646df08823dSKonstantin Belousov case KERN_INVALID_ADDRESS: 647df08823dSKonstantin Belousov *signo = SIGSEGV; 648df08823dSKonstantin Belousov *ucode = SEGV_MAPERR; 649df08823dSKonstantin Belousov break; 650df08823dSKonstantin Belousov case KERN_RESOURCE_SHORTAGE: 651df08823dSKonstantin Belousov *signo = SIGBUS; 652df08823dSKonstantin Belousov *ucode = BUS_OOMERR; 653df08823dSKonstantin Belousov break; 654df08823dSKonstantin Belousov case KERN_OUT_OF_BOUNDS: 655df08823dSKonstantin Belousov *signo = SIGBUS; 656df08823dSKonstantin Belousov *ucode = BUS_OBJERR; 657df08823dSKonstantin Belousov break; 658df08823dSKonstantin Belousov case KERN_PROTECTION_FAILURE: 659df08823dSKonstantin Belousov if (prot_fault_translation == 0) { 660df08823dSKonstantin Belousov /* 661df08823dSKonstantin Belousov * Autodetect. This check also covers 662df08823dSKonstantin Belousov * the images without the ABI-tag ELF 663df08823dSKonstantin Belousov * note. 664df08823dSKonstantin Belousov */ 665df08823dSKonstantin Belousov if (SV_CURPROC_ABI() == SV_ABI_FREEBSD && 666df08823dSKonstantin Belousov curproc->p_osrel >= P_OSREL_SIGSEGV) { 667df08823dSKonstantin Belousov *signo = SIGSEGV; 668df08823dSKonstantin Belousov *ucode = SEGV_ACCERR; 669df08823dSKonstantin Belousov } else { 670df08823dSKonstantin Belousov *signo = SIGBUS; 671df08823dSKonstantin Belousov *ucode = UCODE_PAGEFLT; 672df08823dSKonstantin Belousov } 673df08823dSKonstantin Belousov } else if (prot_fault_translation == 1) { 674df08823dSKonstantin Belousov /* Always compat mode. */ 675df08823dSKonstantin Belousov *signo = SIGBUS; 676df08823dSKonstantin Belousov *ucode = UCODE_PAGEFLT; 677df08823dSKonstantin Belousov } else { 678df08823dSKonstantin Belousov /* Always SIGSEGV mode. */ 679df08823dSKonstantin Belousov *signo = SIGSEGV; 680df08823dSKonstantin Belousov *ucode = SEGV_ACCERR; 681df08823dSKonstantin Belousov } 682df08823dSKonstantin Belousov break; 683df08823dSKonstantin Belousov default: 684df08823dSKonstantin Belousov KASSERT(0, ("Unexpected Mach error %d from vm_fault()", 685df08823dSKonstantin Belousov result)); 686df08823dSKonstantin Belousov break; 687df08823dSKonstantin Belousov } 688df08823dSKonstantin Belousov } 68935818d2eSJohn Baldwin return (result); 690acd11c74SAlan Cox } 691acd11c74SAlan Cox 6920ddd3082SKonstantin Belousov static int 6931e40fe41SJeff Roberson vm_fault_lock_vnode(struct faultstate *fs, bool objlocked) 6940ddd3082SKonstantin Belousov { 6950ddd3082SKonstantin Belousov struct vnode *vp; 6960ddd3082SKonstantin Belousov int error, locked; 6970ddd3082SKonstantin Belousov 6980ddd3082SKonstantin Belousov if (fs->object->type != OBJT_VNODE) 6990ddd3082SKonstantin Belousov return (KERN_SUCCESS); 7000ddd3082SKonstantin Belousov vp = fs->object->handle; 70116b0c092SKonstantin Belousov if (vp == fs->vp) { 70216b0c092SKonstantin Belousov ASSERT_VOP_LOCKED(vp, "saved vnode is not locked"); 7030ddd3082SKonstantin Belousov return (KERN_SUCCESS); 70416b0c092SKonstantin Belousov } 7050ddd3082SKonstantin Belousov 7060ddd3082SKonstantin Belousov /* 7070ddd3082SKonstantin Belousov * Perform an unlock in case the desired vnode changed while 7080ddd3082SKonstantin Belousov * the map was unlocked during a retry. 7090ddd3082SKonstantin Belousov */ 7100ddd3082SKonstantin Belousov unlock_vp(fs); 7110ddd3082SKonstantin Belousov 7120ddd3082SKonstantin Belousov locked = VOP_ISLOCKED(vp); 7130ddd3082SKonstantin Belousov if (locked != LK_EXCLUSIVE) 7140ddd3082SKonstantin Belousov locked = LK_SHARED; 7150ddd3082SKonstantin Belousov 7160ddd3082SKonstantin Belousov /* 7170ddd3082SKonstantin Belousov * We must not sleep acquiring the vnode lock while we have 7180ddd3082SKonstantin Belousov * the page exclusive busied or the object's 7190ddd3082SKonstantin Belousov * paging-in-progress count incremented. Otherwise, we could 7200ddd3082SKonstantin Belousov * deadlock. 7210ddd3082SKonstantin Belousov */ 722a92a971bSMateusz Guzik error = vget(vp, locked | LK_CANRECURSE | LK_NOWAIT); 7230ddd3082SKonstantin Belousov if (error == 0) { 7240ddd3082SKonstantin Belousov fs->vp = vp; 7250ddd3082SKonstantin Belousov return (KERN_SUCCESS); 7260ddd3082SKonstantin Belousov } 7270ddd3082SKonstantin Belousov 7280ddd3082SKonstantin Belousov vhold(vp); 7291e40fe41SJeff Roberson if (objlocked) 7300ddd3082SKonstantin Belousov unlock_and_deallocate(fs); 7311e40fe41SJeff Roberson else 7321e40fe41SJeff Roberson fault_deallocate(fs); 733a92a971bSMateusz Guzik error = vget(vp, locked | LK_RETRY | LK_CANRECURSE); 7340ddd3082SKonstantin Belousov vdrop(vp); 7350ddd3082SKonstantin Belousov fs->vp = vp; 7360ddd3082SKonstantin Belousov KASSERT(error == 0, ("vm_fault: vget failed %d", error)); 7370ddd3082SKonstantin Belousov return (KERN_RESOURCE_SHORTAGE); 7380ddd3082SKonstantin Belousov } 7390ddd3082SKonstantin Belousov 740bef91632SJeff Roberson /* 7415949b1caSJeff Roberson * Calculate the desired readahead. Handle drop-behind. 7425949b1caSJeff Roberson * 7435949b1caSJeff Roberson * Returns the number of readahead blocks to pass to the pager. 7445949b1caSJeff Roberson */ 7455949b1caSJeff Roberson static int 7465949b1caSJeff Roberson vm_fault_readahead(struct faultstate *fs) 7475949b1caSJeff Roberson { 7485949b1caSJeff Roberson int era, nera; 7495949b1caSJeff Roberson u_char behavior; 7505949b1caSJeff Roberson 7515949b1caSJeff Roberson KASSERT(fs->lookup_still_valid, ("map unlocked")); 7525949b1caSJeff Roberson era = fs->entry->read_ahead; 7535949b1caSJeff Roberson behavior = vm_map_entry_behavior(fs->entry); 7545949b1caSJeff Roberson if (behavior == MAP_ENTRY_BEHAV_RANDOM) { 7555949b1caSJeff Roberson nera = 0; 7565949b1caSJeff Roberson } else if (behavior == MAP_ENTRY_BEHAV_SEQUENTIAL) { 7575949b1caSJeff Roberson nera = VM_FAULT_READ_AHEAD_MAX; 7585949b1caSJeff Roberson if (fs->vaddr == fs->entry->next_read) 7595949b1caSJeff Roberson vm_fault_dontneed(fs, fs->vaddr, nera); 7605949b1caSJeff Roberson } else if (fs->vaddr == fs->entry->next_read) { 7615949b1caSJeff Roberson /* 7625949b1caSJeff Roberson * This is a sequential fault. Arithmetically 7635949b1caSJeff Roberson * increase the requested number of pages in 7645949b1caSJeff Roberson * the read-ahead window. The requested 7655949b1caSJeff Roberson * number of pages is "# of sequential faults 7665949b1caSJeff Roberson * x (read ahead min + 1) + read ahead min" 7675949b1caSJeff Roberson */ 7685949b1caSJeff Roberson nera = VM_FAULT_READ_AHEAD_MIN; 7695949b1caSJeff Roberson if (era > 0) { 7705949b1caSJeff Roberson nera += era + 1; 7715949b1caSJeff Roberson if (nera > VM_FAULT_READ_AHEAD_MAX) 7725949b1caSJeff Roberson nera = VM_FAULT_READ_AHEAD_MAX; 7735949b1caSJeff Roberson } 7745949b1caSJeff Roberson if (era == VM_FAULT_READ_AHEAD_MAX) 7755949b1caSJeff Roberson vm_fault_dontneed(fs, fs->vaddr, nera); 7765949b1caSJeff Roberson } else { 7775949b1caSJeff Roberson /* 7785949b1caSJeff Roberson * This is a non-sequential fault. 7795949b1caSJeff Roberson */ 7805949b1caSJeff Roberson nera = 0; 7815949b1caSJeff Roberson } 7825949b1caSJeff Roberson if (era != nera) { 7835949b1caSJeff Roberson /* 7845949b1caSJeff Roberson * A read lock on the map suffices to update 7855949b1caSJeff Roberson * the read ahead count safely. 7865949b1caSJeff Roberson */ 7875949b1caSJeff Roberson fs->entry->read_ahead = nera; 7885949b1caSJeff Roberson } 7895949b1caSJeff Roberson 7905949b1caSJeff Roberson return (nera); 7915949b1caSJeff Roberson } 7925949b1caSJeff Roberson 793c308a3a6SJeff Roberson static int 794c308a3a6SJeff Roberson vm_fault_lookup(struct faultstate *fs) 795c308a3a6SJeff Roberson { 796c308a3a6SJeff Roberson int result; 797c308a3a6SJeff Roberson 798c308a3a6SJeff Roberson KASSERT(!fs->lookup_still_valid, 799c308a3a6SJeff Roberson ("vm_fault_lookup: Map already locked.")); 800c308a3a6SJeff Roberson result = vm_map_lookup(&fs->map, fs->vaddr, fs->fault_type | 801c308a3a6SJeff Roberson VM_PROT_FAULT_LOOKUP, &fs->entry, &fs->first_object, 802c308a3a6SJeff Roberson &fs->first_pindex, &fs->prot, &fs->wired); 803c308a3a6SJeff Roberson if (result != KERN_SUCCESS) { 804c308a3a6SJeff Roberson unlock_vp(fs); 805c308a3a6SJeff Roberson return (result); 806c308a3a6SJeff Roberson } 807c308a3a6SJeff Roberson 808c308a3a6SJeff Roberson fs->map_generation = fs->map->timestamp; 809c308a3a6SJeff Roberson 810c308a3a6SJeff Roberson if (fs->entry->eflags & MAP_ENTRY_NOFAULT) { 811c308a3a6SJeff Roberson panic("%s: fault on nofault entry, addr: %#lx", 812c308a3a6SJeff Roberson __func__, (u_long)fs->vaddr); 813c308a3a6SJeff Roberson } 814c308a3a6SJeff Roberson 815c308a3a6SJeff Roberson if (fs->entry->eflags & MAP_ENTRY_IN_TRANSITION && 816c308a3a6SJeff Roberson fs->entry->wiring_thread != curthread) { 817c308a3a6SJeff Roberson vm_map_unlock_read(fs->map); 818c308a3a6SJeff Roberson vm_map_lock(fs->map); 819c308a3a6SJeff Roberson if (vm_map_lookup_entry(fs->map, fs->vaddr, &fs->entry) && 820c308a3a6SJeff Roberson (fs->entry->eflags & MAP_ENTRY_IN_TRANSITION)) { 821c308a3a6SJeff Roberson unlock_vp(fs); 822c308a3a6SJeff Roberson fs->entry->eflags |= MAP_ENTRY_NEEDS_WAKEUP; 823c308a3a6SJeff Roberson vm_map_unlock_and_wait(fs->map, 0); 824c308a3a6SJeff Roberson } else 825c308a3a6SJeff Roberson vm_map_unlock(fs->map); 826c308a3a6SJeff Roberson return (KERN_RESOURCE_SHORTAGE); 827c308a3a6SJeff Roberson } 828c308a3a6SJeff Roberson 829c308a3a6SJeff Roberson MPASS((fs->entry->eflags & MAP_ENTRY_GUARD) == 0); 830c308a3a6SJeff Roberson 831c308a3a6SJeff Roberson if (fs->wired) 832c308a3a6SJeff Roberson fs->fault_type = fs->prot | (fs->fault_type & VM_PROT_COPY); 833c308a3a6SJeff Roberson else 834c308a3a6SJeff Roberson KASSERT((fs->fault_flags & VM_FAULT_WIRE) == 0, 835c308a3a6SJeff Roberson ("!fs->wired && VM_FAULT_WIRE")); 836c308a3a6SJeff Roberson fs->lookup_still_valid = true; 837c308a3a6SJeff Roberson 838c308a3a6SJeff Roberson return (KERN_SUCCESS); 839c308a3a6SJeff Roberson } 840c308a3a6SJeff Roberson 841fcb04758SJeff Roberson static int 842fcb04758SJeff Roberson vm_fault_relookup(struct faultstate *fs) 843fcb04758SJeff Roberson { 844fcb04758SJeff Roberson vm_object_t retry_object; 845fcb04758SJeff Roberson vm_pindex_t retry_pindex; 846fcb04758SJeff Roberson vm_prot_t retry_prot; 847fcb04758SJeff Roberson int result; 848fcb04758SJeff Roberson 849fcb04758SJeff Roberson if (!vm_map_trylock_read(fs->map)) 850fcb04758SJeff Roberson return (KERN_RESTART); 851fcb04758SJeff Roberson 852fcb04758SJeff Roberson fs->lookup_still_valid = true; 853fcb04758SJeff Roberson if (fs->map->timestamp == fs->map_generation) 854fcb04758SJeff Roberson return (KERN_SUCCESS); 855fcb04758SJeff Roberson 856fcb04758SJeff Roberson result = vm_map_lookup_locked(&fs->map, fs->vaddr, fs->fault_type, 857fcb04758SJeff Roberson &fs->entry, &retry_object, &retry_pindex, &retry_prot, 858fcb04758SJeff Roberson &fs->wired); 859fcb04758SJeff Roberson if (result != KERN_SUCCESS) { 860fcb04758SJeff Roberson /* 861fcb04758SJeff Roberson * If retry of map lookup would have blocked then 862fcb04758SJeff Roberson * retry fault from start. 863fcb04758SJeff Roberson */ 864fcb04758SJeff Roberson if (result == KERN_FAILURE) 865fcb04758SJeff Roberson return (KERN_RESTART); 866fcb04758SJeff Roberson return (result); 867fcb04758SJeff Roberson } 868fcb04758SJeff Roberson if (retry_object != fs->first_object || 869fcb04758SJeff Roberson retry_pindex != fs->first_pindex) 870fcb04758SJeff Roberson return (KERN_RESTART); 871fcb04758SJeff Roberson 872fcb04758SJeff Roberson /* 873fcb04758SJeff Roberson * Check whether the protection has changed or the object has 874fcb04758SJeff Roberson * been copied while we left the map unlocked. Changing from 875fcb04758SJeff Roberson * read to write permission is OK - we leave the page 876fcb04758SJeff Roberson * write-protected, and catch the write fault. Changing from 877fcb04758SJeff Roberson * write to read permission means that we can't mark the page 878fcb04758SJeff Roberson * write-enabled after all. 879fcb04758SJeff Roberson */ 880fcb04758SJeff Roberson fs->prot &= retry_prot; 881fcb04758SJeff Roberson fs->fault_type &= retry_prot; 882fcb04758SJeff Roberson if (fs->prot == 0) 883fcb04758SJeff Roberson return (KERN_RESTART); 884fcb04758SJeff Roberson 885fcb04758SJeff Roberson /* Reassert because wired may have changed. */ 886fcb04758SJeff Roberson KASSERT(fs->wired || (fs->fault_flags & VM_FAULT_WIRE) == 0, 887fcb04758SJeff Roberson ("!wired && VM_FAULT_WIRE")); 888fcb04758SJeff Roberson 889fcb04758SJeff Roberson return (KERN_SUCCESS); 890fcb04758SJeff Roberson } 891fcb04758SJeff Roberson 8925936b6a8SJeff Roberson static void 8935936b6a8SJeff Roberson vm_fault_cow(struct faultstate *fs) 8945936b6a8SJeff Roberson { 8955936b6a8SJeff Roberson bool is_first_object_locked; 8965936b6a8SJeff Roberson 8975936b6a8SJeff Roberson /* 8985936b6a8SJeff Roberson * This allows pages to be virtually copied from a backing_object 8995936b6a8SJeff Roberson * into the first_object, where the backing object has no other 9005936b6a8SJeff Roberson * refs to it, and cannot gain any more refs. Instead of a bcopy, 9015936b6a8SJeff Roberson * we just move the page from the backing object to the first 9025936b6a8SJeff Roberson * object. Note that we must mark the page dirty in the first 9035936b6a8SJeff Roberson * object so that it will go out to swap when needed. 9045936b6a8SJeff Roberson */ 9055936b6a8SJeff Roberson is_first_object_locked = false; 9065936b6a8SJeff Roberson if ( 9075936b6a8SJeff Roberson /* 9085936b6a8SJeff Roberson * Only one shadow object and no other refs. 9095936b6a8SJeff Roberson */ 9105936b6a8SJeff Roberson fs->object->shadow_count == 1 && fs->object->ref_count == 1 && 9115936b6a8SJeff Roberson /* 9125936b6a8SJeff Roberson * No other ways to look the object up 9135936b6a8SJeff Roberson */ 9145936b6a8SJeff Roberson fs->object->handle == NULL && (fs->object->flags & OBJ_ANON) != 0 && 9155936b6a8SJeff Roberson /* 9165936b6a8SJeff Roberson * We don't chase down the shadow chain and we can acquire locks. 9175936b6a8SJeff Roberson */ 9185936b6a8SJeff Roberson (is_first_object_locked = VM_OBJECT_TRYWLOCK(fs->first_object)) && 9195936b6a8SJeff Roberson fs->object == fs->first_object->backing_object && 9205936b6a8SJeff Roberson VM_OBJECT_TRYWLOCK(fs->object)) { 9215936b6a8SJeff Roberson /* 9225936b6a8SJeff Roberson * Remove but keep xbusy for replace. fs->m is moved into 9235936b6a8SJeff Roberson * fs->first_object and left busy while fs->first_m is 9245936b6a8SJeff Roberson * conditionally freed. 9255936b6a8SJeff Roberson */ 9265936b6a8SJeff Roberson vm_page_remove_xbusy(fs->m); 9275936b6a8SJeff Roberson vm_page_replace(fs->m, fs->first_object, fs->first_pindex, 9285936b6a8SJeff Roberson fs->first_m); 9295936b6a8SJeff Roberson vm_page_dirty(fs->m); 9305936b6a8SJeff Roberson #if VM_NRESERVLEVEL > 0 9315936b6a8SJeff Roberson /* 9325936b6a8SJeff Roberson * Rename the reservation. 9335936b6a8SJeff Roberson */ 9345936b6a8SJeff Roberson vm_reserv_rename(fs->m, fs->first_object, fs->object, 9355936b6a8SJeff Roberson OFF_TO_IDX(fs->first_object->backing_object_offset)); 9365936b6a8SJeff Roberson #endif 9375936b6a8SJeff Roberson VM_OBJECT_WUNLOCK(fs->object); 9385936b6a8SJeff Roberson VM_OBJECT_WUNLOCK(fs->first_object); 9395936b6a8SJeff Roberson fs->first_m = fs->m; 9405936b6a8SJeff Roberson fs->m = NULL; 9415936b6a8SJeff Roberson VM_CNT_INC(v_cow_optim); 9425936b6a8SJeff Roberson } else { 9435936b6a8SJeff Roberson if (is_first_object_locked) 9445936b6a8SJeff Roberson VM_OBJECT_WUNLOCK(fs->first_object); 9455936b6a8SJeff Roberson /* 9465936b6a8SJeff Roberson * Oh, well, lets copy it. 9475936b6a8SJeff Roberson */ 9485936b6a8SJeff Roberson pmap_copy_page(fs->m, fs->first_m); 9495936b6a8SJeff Roberson vm_page_valid(fs->first_m); 9505936b6a8SJeff Roberson if (fs->wired && (fs->fault_flags & VM_FAULT_WIRE) == 0) { 9515936b6a8SJeff Roberson vm_page_wire(fs->first_m); 9525936b6a8SJeff Roberson vm_page_unwire(fs->m, PQ_INACTIVE); 9535936b6a8SJeff Roberson } 9545936b6a8SJeff Roberson /* 9555936b6a8SJeff Roberson * Save the cow page to be released after 9565936b6a8SJeff Roberson * pmap_enter is complete. 9575936b6a8SJeff Roberson */ 9585936b6a8SJeff Roberson fs->m_cow = fs->m; 9595936b6a8SJeff Roberson fs->m = NULL; 9605936b6a8SJeff Roberson } 9615936b6a8SJeff Roberson /* 9625936b6a8SJeff Roberson * fs->object != fs->first_object due to above 9635936b6a8SJeff Roberson * conditional 9645936b6a8SJeff Roberson */ 9655936b6a8SJeff Roberson vm_object_pip_wakeup(fs->object); 9665936b6a8SJeff Roberson 9675936b6a8SJeff Roberson /* 9685936b6a8SJeff Roberson * Only use the new page below... 9695936b6a8SJeff Roberson */ 9705936b6a8SJeff Roberson fs->object = fs->first_object; 9715936b6a8SJeff Roberson fs->pindex = fs->first_pindex; 9725936b6a8SJeff Roberson fs->m = fs->first_m; 9735936b6a8SJeff Roberson VM_CNT_INC(v_cow_faults); 9745936b6a8SJeff Roberson curthread->td_cow++; 9755936b6a8SJeff Roberson } 9765936b6a8SJeff Roberson 97791eb2e90SJeff Roberson static bool 97891eb2e90SJeff Roberson vm_fault_next(struct faultstate *fs) 97991eb2e90SJeff Roberson { 98091eb2e90SJeff Roberson vm_object_t next_object; 98191eb2e90SJeff Roberson 98291eb2e90SJeff Roberson /* 98391eb2e90SJeff Roberson * The requested page does not exist at this object/ 98491eb2e90SJeff Roberson * offset. Remove the invalid page from the object, 98591eb2e90SJeff Roberson * waking up anyone waiting for it, and continue on to 98691eb2e90SJeff Roberson * the next object. However, if this is the top-level 98791eb2e90SJeff Roberson * object, we must leave the busy page in place to 98891eb2e90SJeff Roberson * prevent another process from rushing past us, and 98991eb2e90SJeff Roberson * inserting the page in that object at the same time 99091eb2e90SJeff Roberson * that we are. 99191eb2e90SJeff Roberson */ 99291eb2e90SJeff Roberson if (fs->object == fs->first_object) { 99391eb2e90SJeff Roberson fs->first_m = fs->m; 99491eb2e90SJeff Roberson fs->m = NULL; 99591eb2e90SJeff Roberson } else 99691eb2e90SJeff Roberson fault_page_free(&fs->m); 99791eb2e90SJeff Roberson 99891eb2e90SJeff Roberson /* 99991eb2e90SJeff Roberson * Move on to the next object. Lock the next object before 100091eb2e90SJeff Roberson * unlocking the current one. 100191eb2e90SJeff Roberson */ 100291eb2e90SJeff Roberson VM_OBJECT_ASSERT_WLOCKED(fs->object); 100391eb2e90SJeff Roberson next_object = fs->object->backing_object; 1004fb4d37eaSJeff Roberson if (next_object == NULL) 1005fb4d37eaSJeff Roberson return (false); 1006fb4d37eaSJeff Roberson MPASS(fs->first_m != NULL); 1007fb4d37eaSJeff Roberson KASSERT(fs->object != next_object, ("object loop %p", next_object)); 1008fb4d37eaSJeff Roberson VM_OBJECT_WLOCK(next_object); 1009fb4d37eaSJeff Roberson vm_object_pip_add(next_object, 1); 1010fb4d37eaSJeff Roberson if (fs->object != fs->first_object) 1011fb4d37eaSJeff Roberson vm_object_pip_wakeup(fs->object); 1012fb4d37eaSJeff Roberson fs->pindex += OFF_TO_IDX(fs->object->backing_object_offset); 1013fb4d37eaSJeff Roberson VM_OBJECT_WUNLOCK(fs->object); 1014fb4d37eaSJeff Roberson fs->object = next_object; 1015fb4d37eaSJeff Roberson 1016fb4d37eaSJeff Roberson return (true); 1017fb4d37eaSJeff Roberson } 1018fb4d37eaSJeff Roberson 1019fb4d37eaSJeff Roberson static void 1020fb4d37eaSJeff Roberson vm_fault_zerofill(struct faultstate *fs) 1021fb4d37eaSJeff Roberson { 1022fb4d37eaSJeff Roberson 102391eb2e90SJeff Roberson /* 102491eb2e90SJeff Roberson * If there's no object left, fill the page in the top 102591eb2e90SJeff Roberson * object with zeros. 102691eb2e90SJeff Roberson */ 102791eb2e90SJeff Roberson if (fs->object != fs->first_object) { 102891eb2e90SJeff Roberson vm_object_pip_wakeup(fs->object); 102991eb2e90SJeff Roberson fs->object = fs->first_object; 103091eb2e90SJeff Roberson fs->pindex = fs->first_pindex; 103191eb2e90SJeff Roberson } 103291eb2e90SJeff Roberson MPASS(fs->first_m != NULL); 103391eb2e90SJeff Roberson MPASS(fs->m == NULL); 103491eb2e90SJeff Roberson fs->m = fs->first_m; 103591eb2e90SJeff Roberson fs->first_m = NULL; 103691eb2e90SJeff Roberson 103791eb2e90SJeff Roberson /* 103891eb2e90SJeff Roberson * Zero the page if necessary and mark it valid. 103991eb2e90SJeff Roberson */ 104091eb2e90SJeff Roberson if ((fs->m->flags & PG_ZERO) == 0) { 104191eb2e90SJeff Roberson pmap_zero_page(fs->m); 104291eb2e90SJeff Roberson } else { 104391eb2e90SJeff Roberson VM_CNT_INC(v_ozfod); 104491eb2e90SJeff Roberson } 104591eb2e90SJeff Roberson VM_CNT_INC(v_zfod); 104691eb2e90SJeff Roberson vm_page_valid(fs->m); 104791eb2e90SJeff Roberson } 104891eb2e90SJeff Roberson 1049df794f5cSJeff Roberson /* 1050df794f5cSJeff Roberson * Allocate a page directly or via the object populate method. 1051df794f5cSJeff Roberson */ 1052df794f5cSJeff Roberson static int 1053df794f5cSJeff Roberson vm_fault_allocate(struct faultstate *fs) 1054df794f5cSJeff Roberson { 1055df794f5cSJeff Roberson struct domainset *dset; 1056df794f5cSJeff Roberson int alloc_req; 1057df794f5cSJeff Roberson int rv; 1058df794f5cSJeff Roberson 1059df794f5cSJeff Roberson if ((fs->object->flags & OBJ_SIZEVNLOCK) != 0) { 1060df794f5cSJeff Roberson rv = vm_fault_lock_vnode(fs, true); 1061df794f5cSJeff Roberson MPASS(rv == KERN_SUCCESS || rv == KERN_RESOURCE_SHORTAGE); 1062df794f5cSJeff Roberson if (rv == KERN_RESOURCE_SHORTAGE) 1063df794f5cSJeff Roberson return (rv); 1064df794f5cSJeff Roberson } 1065df794f5cSJeff Roberson 1066df794f5cSJeff Roberson if (fs->pindex >= fs->object->size) 1067df794f5cSJeff Roberson return (KERN_OUT_OF_BOUNDS); 1068df794f5cSJeff Roberson 1069df794f5cSJeff Roberson if (fs->object == fs->first_object && 1070df794f5cSJeff Roberson (fs->first_object->flags & OBJ_POPULATE) != 0 && 1071df794f5cSJeff Roberson fs->first_object->shadow_count == 0) { 1072df794f5cSJeff Roberson rv = vm_fault_populate(fs); 1073df794f5cSJeff Roberson switch (rv) { 1074df794f5cSJeff Roberson case KERN_SUCCESS: 1075df794f5cSJeff Roberson case KERN_FAILURE: 1076df794f5cSJeff Roberson case KERN_RESTART: 1077df794f5cSJeff Roberson return (rv); 1078df794f5cSJeff Roberson case KERN_NOT_RECEIVER: 1079df794f5cSJeff Roberson /* 1080df794f5cSJeff Roberson * Pager's populate() method 1081df794f5cSJeff Roberson * returned VM_PAGER_BAD. 1082df794f5cSJeff Roberson */ 1083df794f5cSJeff Roberson break; 1084df794f5cSJeff Roberson default: 1085df794f5cSJeff Roberson panic("inconsistent return codes"); 1086df794f5cSJeff Roberson } 1087df794f5cSJeff Roberson } 1088df794f5cSJeff Roberson 1089df794f5cSJeff Roberson /* 1090df794f5cSJeff Roberson * Allocate a new page for this object/offset pair. 1091df794f5cSJeff Roberson * 1092df794f5cSJeff Roberson * Unlocked read of the p_flag is harmless. At worst, the P_KILLED 1093df794f5cSJeff Roberson * might be not observed there, and allocation can fail, causing 1094df794f5cSJeff Roberson * restart and new reading of the p_flag. 1095df794f5cSJeff Roberson */ 1096df794f5cSJeff Roberson dset = fs->object->domain.dr_policy; 1097df794f5cSJeff Roberson if (dset == NULL) 1098df794f5cSJeff Roberson dset = curthread->td_domain.dr_policy; 1099df794f5cSJeff Roberson if (!vm_page_count_severe_set(&dset->ds_mask) || P_KILLED(curproc)) { 1100df794f5cSJeff Roberson #if VM_NRESERVLEVEL > 0 1101df794f5cSJeff Roberson vm_object_color(fs->object, atop(fs->vaddr) - fs->pindex); 1102df794f5cSJeff Roberson #endif 1103df794f5cSJeff Roberson alloc_req = P_KILLED(curproc) ? 1104df794f5cSJeff Roberson VM_ALLOC_SYSTEM : VM_ALLOC_NORMAL; 1105df794f5cSJeff Roberson if (fs->object->type != OBJT_VNODE && 1106df794f5cSJeff Roberson fs->object->backing_object == NULL) 1107df794f5cSJeff Roberson alloc_req |= VM_ALLOC_ZERO; 1108df794f5cSJeff Roberson fs->m = vm_page_alloc(fs->object, fs->pindex, alloc_req); 1109df794f5cSJeff Roberson } 1110df794f5cSJeff Roberson if (fs->m == NULL) { 1111df794f5cSJeff Roberson unlock_and_deallocate(fs); 1112df794f5cSJeff Roberson if (vm_pfault_oom_attempts < 0 || 1113df794f5cSJeff Roberson fs->oom < vm_pfault_oom_attempts) { 1114df794f5cSJeff Roberson fs->oom++; 1115df794f5cSJeff Roberson vm_waitpfault(dset, vm_pfault_oom_wait * hz); 1116b70f6e15SKonstantin Belousov } else { 1117df794f5cSJeff Roberson if (bootverbose) 1118df794f5cSJeff Roberson printf( 1119df794f5cSJeff Roberson "proc %d (%s) failed to alloc page on fault, starting OOM\n", 1120df794f5cSJeff Roberson curproc->p_pid, curproc->p_comm); 1121df794f5cSJeff Roberson vm_pageout_oom(VM_OOM_MEM_PF); 1122b70f6e15SKonstantin Belousov fs->oom = 0; 1123b70f6e15SKonstantin Belousov } 1124df794f5cSJeff Roberson return (KERN_RESOURCE_SHORTAGE); 1125df794f5cSJeff Roberson } 1126df794f5cSJeff Roberson fs->oom = 0; 1127df794f5cSJeff Roberson 1128df794f5cSJeff Roberson return (KERN_NOT_RECEIVER); 1129df794f5cSJeff Roberson } 11305909dafeSJeff Roberson 11315909dafeSJeff Roberson /* 11325909dafeSJeff Roberson * Call the pager to retrieve the page if there is a chance 11335909dafeSJeff Roberson * that the pager has it, and potentially retrieve additional 11345909dafeSJeff Roberson * pages at the same time. 11355909dafeSJeff Roberson */ 11365909dafeSJeff Roberson static int 11375909dafeSJeff Roberson vm_fault_getpages(struct faultstate *fs, int nera, int *behindp, int *aheadp) 11385909dafeSJeff Roberson { 11395909dafeSJeff Roberson vm_offset_t e_end, e_start; 11405909dafeSJeff Roberson int ahead, behind, cluster_offset, rv; 11415909dafeSJeff Roberson u_char behavior; 11425909dafeSJeff Roberson 11435909dafeSJeff Roberson /* 11445909dafeSJeff Roberson * Prepare for unlocking the map. Save the map 11455909dafeSJeff Roberson * entry's start and end addresses, which are used to 11465909dafeSJeff Roberson * optimize the size of the pager operation below. 11475909dafeSJeff Roberson * Even if the map entry's addresses change after 11485909dafeSJeff Roberson * unlocking the map, using the saved addresses is 11495909dafeSJeff Roberson * safe. 11505909dafeSJeff Roberson */ 11515909dafeSJeff Roberson e_start = fs->entry->start; 11525909dafeSJeff Roberson e_end = fs->entry->end; 11535909dafeSJeff Roberson behavior = vm_map_entry_behavior(fs->entry); 11545909dafeSJeff Roberson 11555909dafeSJeff Roberson /* 11565909dafeSJeff Roberson * Release the map lock before locking the vnode or 11575909dafeSJeff Roberson * sleeping in the pager. (If the current object has 11585909dafeSJeff Roberson * a shadow, then an earlier iteration of this loop 11595909dafeSJeff Roberson * may have already unlocked the map.) 11605909dafeSJeff Roberson */ 11615909dafeSJeff Roberson unlock_map(fs); 11625909dafeSJeff Roberson 11635909dafeSJeff Roberson rv = vm_fault_lock_vnode(fs, false); 11645909dafeSJeff Roberson MPASS(rv == KERN_SUCCESS || rv == KERN_RESOURCE_SHORTAGE); 11655909dafeSJeff Roberson if (rv == KERN_RESOURCE_SHORTAGE) 11665909dafeSJeff Roberson return (rv); 11675909dafeSJeff Roberson KASSERT(fs->vp == NULL || !fs->map->system_map, 11685909dafeSJeff Roberson ("vm_fault: vnode-backed object mapped by system map")); 11695909dafeSJeff Roberson 11705909dafeSJeff Roberson /* 11715909dafeSJeff Roberson * Page in the requested page and hint the pager, 11725909dafeSJeff Roberson * that it may bring up surrounding pages. 11735909dafeSJeff Roberson */ 11745909dafeSJeff Roberson if (nera == -1 || behavior == MAP_ENTRY_BEHAV_RANDOM || 11755909dafeSJeff Roberson P_KILLED(curproc)) { 11765909dafeSJeff Roberson behind = 0; 11775909dafeSJeff Roberson ahead = 0; 11785909dafeSJeff Roberson } else { 11795909dafeSJeff Roberson /* Is this a sequential fault? */ 11805909dafeSJeff Roberson if (nera > 0) { 11815909dafeSJeff Roberson behind = 0; 11825909dafeSJeff Roberson ahead = nera; 11835909dafeSJeff Roberson } else { 11845909dafeSJeff Roberson /* 11855909dafeSJeff Roberson * Request a cluster of pages that is 11865909dafeSJeff Roberson * aligned to a VM_FAULT_READ_DEFAULT 11875909dafeSJeff Roberson * page offset boundary within the 11885909dafeSJeff Roberson * object. Alignment to a page offset 11895909dafeSJeff Roberson * boundary is more likely to coincide 11905909dafeSJeff Roberson * with the underlying file system 11915909dafeSJeff Roberson * block than alignment to a virtual 11925909dafeSJeff Roberson * address boundary. 11935909dafeSJeff Roberson */ 11945909dafeSJeff Roberson cluster_offset = fs->pindex % VM_FAULT_READ_DEFAULT; 11955909dafeSJeff Roberson behind = ulmin(cluster_offset, 11965909dafeSJeff Roberson atop(fs->vaddr - e_start)); 11975909dafeSJeff Roberson ahead = VM_FAULT_READ_DEFAULT - 1 - cluster_offset; 11985909dafeSJeff Roberson } 11995909dafeSJeff Roberson ahead = ulmin(ahead, atop(e_end - fs->vaddr) - 1); 12005909dafeSJeff Roberson } 12015909dafeSJeff Roberson *behindp = behind; 12025909dafeSJeff Roberson *aheadp = ahead; 12035909dafeSJeff Roberson rv = vm_pager_get_pages(fs->object, &fs->m, 1, behindp, aheadp); 12045909dafeSJeff Roberson if (rv == VM_PAGER_OK) 12055909dafeSJeff Roberson return (KERN_SUCCESS); 12065909dafeSJeff Roberson if (rv == VM_PAGER_ERROR) 12075909dafeSJeff Roberson printf("vm_fault: pager read error, pid %d (%s)\n", 12085909dafeSJeff Roberson curproc->p_pid, curproc->p_comm); 12095909dafeSJeff Roberson /* 12105909dafeSJeff Roberson * If an I/O error occurred or the requested page was 12115909dafeSJeff Roberson * outside the range of the pager, clean up and return 12125909dafeSJeff Roberson * an error. 12135909dafeSJeff Roberson */ 12145909dafeSJeff Roberson if (rv == VM_PAGER_ERROR || rv == VM_PAGER_BAD) 12155909dafeSJeff Roberson return (KERN_OUT_OF_BOUNDS); 12165909dafeSJeff Roberson return (KERN_NOT_RECEIVER); 12175909dafeSJeff Roberson } 12185909dafeSJeff Roberson 12195949b1caSJeff Roberson /* 1220bef91632SJeff Roberson * Wait/Retry if the page is busy. We have to do this if the page is 1221bef91632SJeff Roberson * either exclusive or shared busy because the vm_pager may be using 1222bef91632SJeff Roberson * read busy for pageouts (and even pageins if it is the vnode pager), 1223bef91632SJeff Roberson * and we could end up trying to pagein and pageout the same page 1224bef91632SJeff Roberson * simultaneously. 1225bef91632SJeff Roberson * 1226bef91632SJeff Roberson * We can theoretically allow the busy case on a read fault if the page 1227bef91632SJeff Roberson * is marked valid, but since such pages are typically already pmap'd, 1228bef91632SJeff Roberson * putting that special case in might be more effort then it is worth. 1229bef91632SJeff Roberson * We cannot under any circumstances mess around with a shared busied 1230bef91632SJeff Roberson * page except, perhaps, to pmap it. 1231bef91632SJeff Roberson */ 1232bef91632SJeff Roberson static void 1233bef91632SJeff Roberson vm_fault_busy_sleep(struct faultstate *fs) 1234bef91632SJeff Roberson { 1235bef91632SJeff Roberson /* 1236bef91632SJeff Roberson * Reference the page before unlocking and 1237bef91632SJeff Roberson * sleeping so that the page daemon is less 1238bef91632SJeff Roberson * likely to reclaim it. 1239bef91632SJeff Roberson */ 1240bef91632SJeff Roberson vm_page_aflag_set(fs->m, PGA_REFERENCED); 1241bef91632SJeff Roberson if (fs->object != fs->first_object) { 1242bef91632SJeff Roberson fault_page_release(&fs->first_m); 1243bef91632SJeff Roberson vm_object_pip_wakeup(fs->first_object); 1244bef91632SJeff Roberson } 1245bef91632SJeff Roberson vm_object_pip_wakeup(fs->object); 1246bef91632SJeff Roberson unlock_map(fs); 1247bef91632SJeff Roberson if (fs->m == vm_page_lookup(fs->object, fs->pindex)) 12487e1b379eSJeff Roberson vm_page_busy_sleep(fs->m, "vmpfw", false); 12497e1b379eSJeff Roberson else 1250bef91632SJeff Roberson VM_OBJECT_WUNLOCK(fs->object); 1251bef91632SJeff Roberson VM_CNT_INC(v_intrans); 1252bef91632SJeff Roberson vm_object_deallocate(fs->first_object); 1253bef91632SJeff Roberson } 1254bef91632SJeff Roberson 1255acd11c74SAlan Cox int 1256df08823dSKonstantin Belousov vm_fault(vm_map_t map, vm_offset_t vaddr, vm_prot_t fault_type, 1257acd11c74SAlan Cox int fault_flags, vm_page_t *m_hold) 1258acd11c74SAlan Cox { 12594866e085SJohn Dyson struct faultstate fs; 1260df794f5cSJeff Roberson int ahead, behind, faultcount; 1261df794f5cSJeff Roberson int nera, result, rv; 12625936b6a8SJeff Roberson bool dead, hardfault; 1263df8bae1dSRodney W. Grimes 126483c9dea1SGleb Smirnoff VM_CNT_INC(v_vm_faults); 1265c31cec45SKonstantin Belousov 1266c31cec45SKonstantin Belousov if ((curthread->td_pflags & TDP_NOFAULTING) != 0) 1267c31cec45SKonstantin Belousov return (KERN_PROTECTION_FAILURE); 1268c31cec45SKonstantin Belousov 1269d2bf64c3SKonstantin Belousov fs.vp = NULL; 12705949b1caSJeff Roberson fs.vaddr = vaddr; 12712c2f4413SJeff Roberson fs.m_hold = m_hold; 12722c2f4413SJeff Roberson fs.fault_flags = fault_flags; 1273c308a3a6SJeff Roberson fs.map = map; 1274c308a3a6SJeff Roberson fs.lookup_still_valid = false; 1275df794f5cSJeff Roberson fs.oom = 0; 1276b0cd2017SGleb Smirnoff faultcount = 0; 12770c3a4893SAlan Cox nera = -1; 1278320023e2SAlan Cox hardfault = false; 1279df8bae1dSRodney W. Grimes 1280245139c6SKonstantin Belousov RetryFault: 12812c2f4413SJeff Roberson fs.fault_type = fault_type; 1282df8bae1dSRodney W. Grimes 1283df8bae1dSRodney W. Grimes /* 12840d94caffSDavid Greenman * Find the backing store object and offset into it to begin the 12850d94caffSDavid Greenman * search. 1286df8bae1dSRodney W. Grimes */ 1287c308a3a6SJeff Roberson result = vm_fault_lookup(&fs); 128892de35b0SAlan Cox if (result != KERN_SUCCESS) { 1289c308a3a6SJeff Roberson if (result == KERN_RESOURCE_SHORTAGE) 1290c308a3a6SJeff Roberson goto RetryFault; 129192de35b0SAlan Cox return (result); 129209e0c6ccSJohn Dyson } 129309e0c6ccSJohn Dyson 12948d67b8c8SAlan Cox /* 12958d67b8c8SAlan Cox * Try to avoid lock contention on the top-level object through 12968d67b8c8SAlan Cox * special-case handling of some types of page faults, specifically, 129767d0e293SJeff Roberson * those that are mapping an existing page from the top-level object. 129867d0e293SJeff Roberson * Under this condition, a read lock on the object suffices, allowing 129967d0e293SJeff Roberson * multiple page faults of a similar type to run in parallel. 13008d67b8c8SAlan Cox */ 1301afe55ca3SKonstantin Belousov if (fs.vp == NULL /* avoid locked vnode leak */ && 1302d301b358SKonstantin Belousov (fs.entry->eflags & MAP_ENTRY_SPLIT_BOUNDARY_MASK) == 0 && 13032c2f4413SJeff Roberson (fs.fault_flags & (VM_FAULT_WIRE | VM_FAULT_DIRTY)) == 0) { 1304afe55ca3SKonstantin Belousov VM_OBJECT_RLOCK(fs.first_object); 13052c2f4413SJeff Roberson rv = vm_fault_soft_fast(&fs); 130641ddec83SKonstantin Belousov if (rv == KERN_SUCCESS) 130741ddec83SKonstantin Belousov return (rv); 1308afe55ca3SKonstantin Belousov if (!VM_OBJECT_TRYUPGRADE(fs.first_object)) { 1309afe55ca3SKonstantin Belousov VM_OBJECT_RUNLOCK(fs.first_object); 1310afe55ca3SKonstantin Belousov VM_OBJECT_WLOCK(fs.first_object); 1311afe55ca3SKonstantin Belousov } 1312afe55ca3SKonstantin Belousov } else { 1313afe55ca3SKonstantin Belousov VM_OBJECT_WLOCK(fs.first_object); 1314afe55ca3SKonstantin Belousov } 1315afe55ca3SKonstantin Belousov 131695e5e988SJohn Dyson /* 131795e5e988SJohn Dyson * Make a reference to this object to prevent its disposal while we 131895e5e988SJohn Dyson * are messing with it. Once we have the reference, the map is free 131995e5e988SJohn Dyson * to be diddled. Since objects reference their shadows (and copies), 132095e5e988SJohn Dyson * they will stay around as well. 1321fe8e0238SMatthew Dillon * 1322fe8e0238SMatthew Dillon * Bump the paging-in-progress count to prevent size changes (e.g. 1323dda4d369SAlan Cox * truncation operations) during I/O. 132495e5e988SJohn Dyson */ 1325a976eb5eSAlan Cox vm_object_reference_locked(fs.first_object); 1326d474eaaaSDoug Rabson vm_object_pip_add(fs.first_object, 1); 132795e5e988SJohn Dyson 132858447749SJeff Roberson fs.m_cow = fs.m = fs.first_m = NULL; 1329df8bae1dSRodney W. Grimes 1330df8bae1dSRodney W. Grimes /* 1331df8bae1dSRodney W. Grimes * Search for the page at object/offset. 1332df8bae1dSRodney W. Grimes */ 13334866e085SJohn Dyson fs.object = fs.first_object; 13344866e085SJohn Dyson fs.pindex = fs.first_pindex; 1335d301b358SKonstantin Belousov 1336d301b358SKonstantin Belousov if ((fs.entry->eflags & MAP_ENTRY_SPLIT_BOUNDARY_MASK) != 0) { 1337d301b358SKonstantin Belousov rv = vm_fault_allocate(&fs); 1338d301b358SKonstantin Belousov switch (rv) { 1339d301b358SKonstantin Belousov case KERN_RESTART: 1340d301b358SKonstantin Belousov unlock_and_deallocate(&fs); 1341d301b358SKonstantin Belousov /* FALLTHROUGH */ 1342d301b358SKonstantin Belousov case KERN_RESOURCE_SHORTAGE: 1343d301b358SKonstantin Belousov goto RetryFault; 1344d301b358SKonstantin Belousov case KERN_SUCCESS: 1345d301b358SKonstantin Belousov case KERN_FAILURE: 1346d301b358SKonstantin Belousov case KERN_OUT_OF_BOUNDS: 1347d301b358SKonstantin Belousov unlock_and_deallocate(&fs); 1348d301b358SKonstantin Belousov return (rv); 1349d301b358SKonstantin Belousov case KERN_NOT_RECEIVER: 1350d301b358SKonstantin Belousov break; 1351d301b358SKonstantin Belousov default: 1352d301b358SKonstantin Belousov panic("vm_fault: Unhandled rv %d", rv); 1353d301b358SKonstantin Belousov } 1354d301b358SKonstantin Belousov } 1355d301b358SKonstantin Belousov 1356df8bae1dSRodney W. Grimes while (TRUE) { 13574bf95d00SJeff Roberson KASSERT(fs.m == NULL, 13584bf95d00SJeff Roberson ("page still set %p at loop start", fs.m)); 13591c7c3c6aSMatthew Dillon /* 1360725441f6SKonstantin Belousov * If the object is marked for imminent termination, 1361725441f6SKonstantin Belousov * we retry here, since the collapse pass has raced 1362725441f6SKonstantin Belousov * with us. Otherwise, if we see terminally dead 1363725441f6SKonstantin Belousov * object, return fail. 13641c7c3c6aSMatthew Dillon */ 1365725441f6SKonstantin Belousov if ((fs.object->flags & OBJ_DEAD) != 0) { 1366725441f6SKonstantin Belousov dead = fs.object->type == OBJT_DEAD; 13674866e085SJohn Dyson unlock_and_deallocate(&fs); 1368725441f6SKonstantin Belousov if (dead) 136947221757SJohn Dyson return (KERN_PROTECTION_FAILURE); 1370725441f6SKonstantin Belousov pause("vmf_de", 1); 1371725441f6SKonstantin Belousov goto RetryFault; 137247221757SJohn Dyson } 137347221757SJohn Dyson 13741c7c3c6aSMatthew Dillon /* 13751c7c3c6aSMatthew Dillon * See if page is resident 13761c7c3c6aSMatthew Dillon */ 13774866e085SJohn Dyson fs.m = vm_page_lookup(fs.object, fs.pindex); 13784866e085SJohn Dyson if (fs.m != NULL) { 137963e97555SJeff Roberson if (vm_page_tryxbusy(fs.m) == 0) { 1380bef91632SJeff Roberson vm_fault_busy_sleep(&fs); 1381df8bae1dSRodney W. Grimes goto RetryFault; 1382df8bae1dSRodney W. Grimes } 13837615edaaSMatthew Dillon 13841c7c3c6aSMatthew Dillon /* 138563e97555SJeff Roberson * The page is marked busy for other processes and the 1386df794f5cSJeff Roberson * pagedaemon. If it still is completely valid we 1387df794f5cSJeff Roberson * are done. 13881c7c3c6aSMatthew Dillon */ 1389df794f5cSJeff Roberson if (vm_page_all_valid(fs.m)) { 13901e40fe41SJeff Roberson VM_OBJECT_WUNLOCK(fs.object); 13911e40fe41SJeff Roberson break; /* break to PAGE HAS BEEN FOUND. */ 1392df8bae1dSRodney W. Grimes } 1393df794f5cSJeff Roberson } 13941e40fe41SJeff Roberson VM_OBJECT_ASSERT_WLOCKED(fs.object); 13951c7c3c6aSMatthew Dillon 13961c7c3c6aSMatthew Dillon /* 139710b4196bSAlan Cox * Page is not resident. If the pager might contain the page 139810b4196bSAlan Cox * or this is the beginning of the search, allocate a new 139910b4196bSAlan Cox * page. (Default objects are zero-fill, so there is no real 140010b4196bSAlan Cox * pager for them.) 14011c7c3c6aSMatthew Dillon */ 1402df794f5cSJeff Roberson if (fs.m == NULL && (fs.object->type != OBJT_DEFAULT || 1403df794f5cSJeff Roberson fs.object == fs.first_object)) { 1404df794f5cSJeff Roberson rv = vm_fault_allocate(&fs); 1405c42b43a0SKonstantin Belousov switch (rv) { 1406df794f5cSJeff Roberson case KERN_RESTART: 1407df794f5cSJeff Roberson unlock_and_deallocate(&fs); 1408df794f5cSJeff Roberson /* FALLTHROUGH */ 1409df794f5cSJeff Roberson case KERN_RESOURCE_SHORTAGE: 1410df794f5cSJeff Roberson goto RetryFault; 1411c42b43a0SKonstantin Belousov case KERN_SUCCESS: 1412c42b43a0SKonstantin Belousov case KERN_FAILURE: 1413df794f5cSJeff Roberson case KERN_OUT_OF_BOUNDS: 1414c42b43a0SKonstantin Belousov unlock_and_deallocate(&fs); 1415c42b43a0SKonstantin Belousov return (rv); 1416c42b43a0SKonstantin Belousov case KERN_NOT_RECEIVER: 1417c42b43a0SKonstantin Belousov break; 1418c42b43a0SKonstantin Belousov default: 1419df794f5cSJeff Roberson panic("vm_fault: Unhandled rv %d", rv); 1420c42b43a0SKonstantin Belousov } 1421c42b43a0SKonstantin Belousov } 1422c42b43a0SKonstantin Belousov 1423df8bae1dSRodney W. Grimes /* 14241e40fe41SJeff Roberson * Default objects have no pager so no exclusive busy exists 14251e40fe41SJeff Roberson * to protect this page in the chain. Skip to the next 14261e40fe41SJeff Roberson * object without dropping the lock to preserve atomicity of 14271e40fe41SJeff Roberson * shadow faults. 14281e40fe41SJeff Roberson */ 1429be9d4fd6SJeff Roberson if (fs.object->type != OBJT_DEFAULT) { 14301e40fe41SJeff Roberson /* 1431be9d4fd6SJeff Roberson * At this point, we have either allocated a new page 1432be9d4fd6SJeff Roberson * or found an existing page that is only partially 1433be9d4fd6SJeff Roberson * valid. 143485702505SAlan Cox * 1435be9d4fd6SJeff Roberson * We hold a reference on the current object and the 1436be9d4fd6SJeff Roberson * page is exclusive busied. The exclusive busy 1437be9d4fd6SJeff Roberson * prevents simultaneous faults and collapses while 1438be9d4fd6SJeff Roberson * the object lock is dropped. 143985702505SAlan Cox */ 14401e40fe41SJeff Roberson VM_OBJECT_WUNLOCK(fs.object); 144185702505SAlan Cox 144285702505SAlan Cox /* 1443be9d4fd6SJeff Roberson * If the pager for the current object might have 1444be9d4fd6SJeff Roberson * the page, then determine the number of additional 1445be9d4fd6SJeff Roberson * pages to read and potentially reprioritize 1446be9d4fd6SJeff Roberson * previously read pages for earlier reclamation. 1447be9d4fd6SJeff Roberson * These operations should only be performed once per 1448be9d4fd6SJeff Roberson * page fault. Even if the current pager doesn't 1449be9d4fd6SJeff Roberson * have the page, the number of additional pages to 1450be9d4fd6SJeff Roberson * read will apply to subsequent objects in the 1451be9d4fd6SJeff Roberson * shadow chain. 14521c7c3c6aSMatthew Dillon */ 14535909dafeSJeff Roberson if (nera == -1 && !P_KILLED(curproc)) 14545949b1caSJeff Roberson nera = vm_fault_readahead(&fs); 14550c3a4893SAlan Cox 14565909dafeSJeff Roberson rv = vm_fault_getpages(&fs, nera, &behind, &ahead); 14575909dafeSJeff Roberson if (rv == KERN_SUCCESS) { 1458b0cd2017SGleb Smirnoff faultcount = behind + 1 + ahead; 1459320023e2SAlan Cox hardfault = true; 14601e40fe41SJeff Roberson break; /* break to PAGE HAS BEEN FOUND. */ 1461df8bae1dSRodney W. Grimes } 14625909dafeSJeff Roberson if (rv == KERN_RESOURCE_SHORTAGE) 14635909dafeSJeff Roberson goto RetryFault; 14641e40fe41SJeff Roberson VM_OBJECT_WLOCK(fs.object); 14655909dafeSJeff Roberson if (rv == KERN_OUT_OF_BOUNDS) { 14664bf95d00SJeff Roberson fault_page_free(&fs.m); 14674866e085SJohn Dyson unlock_and_deallocate(&fs); 14685909dafeSJeff Roberson return (rv); 14694bf95d00SJeff Roberson } 1470be9d4fd6SJeff Roberson } 14714bf95d00SJeff Roberson 1472521ddf39SAlan Cox /* 14735909dafeSJeff Roberson * The page was not found in the current object. Try to 14745909dafeSJeff Roberson * traverse into a backing object or zero fill if none is 14755909dafeSJeff Roberson * found. 1476521ddf39SAlan Cox */ 1477fb4d37eaSJeff Roberson if (vm_fault_next(&fs)) 1478fb4d37eaSJeff Roberson continue; 1479*f31695ccSMark Johnston if ((fs.fault_flags & VM_FAULT_NOFILL) != 0) { 1480*f31695ccSMark Johnston if (fs.first_object == fs.object) 1481*f31695ccSMark Johnston fault_page_free(&fs.first_m); 1482*f31695ccSMark Johnston unlock_and_deallocate(&fs); 1483*f31695ccSMark Johnston return (KERN_OUT_OF_BOUNDS); 1484*f31695ccSMark Johnston } 1485fb4d37eaSJeff Roberson VM_OBJECT_WUNLOCK(fs.object); 1486fb4d37eaSJeff Roberson vm_fault_zerofill(&fs); 14877b9b301cSAlan Cox /* Don't try to prefault neighboring pages. */ 14887b9b301cSAlan Cox faultcount = 1; 14891e40fe41SJeff Roberson break; /* break to PAGE HAS BEEN FOUND. */ 1490df8bae1dSRodney W. Grimes } 14911c7c3c6aSMatthew Dillon 1492df8bae1dSRodney W. Grimes /* 14931e40fe41SJeff Roberson * PAGE HAS BEEN FOUND. A valid page has been found and exclusively 14941e40fe41SJeff Roberson * busied. The object lock must no longer be held. 1495df8bae1dSRodney W. Grimes */ 14961e40fe41SJeff Roberson vm_page_assert_xbusied(fs.m); 14971e40fe41SJeff Roberson VM_OBJECT_ASSERT_UNLOCKED(fs.object); 1498df8bae1dSRodney W. Grimes 1499df8bae1dSRodney W. Grimes /* 15000d94caffSDavid Greenman * If the page is being written, but isn't already owned by the 15010d94caffSDavid Greenman * top-level object, we have to copy it into a new page owned by the 15020d94caffSDavid Greenman * top-level object. 1503df8bae1dSRodney W. Grimes */ 15044866e085SJohn Dyson if (fs.object != fs.first_object) { 1505df8bae1dSRodney W. Grimes /* 15060d94caffSDavid Greenman * We only really need to copy if we want to write it. 1507df8bae1dSRodney W. Grimes */ 15082c2f4413SJeff Roberson if ((fs.fault_type & (VM_PROT_COPY | VM_PROT_WRITE)) != 0) { 15095936b6a8SJeff Roberson vm_fault_cow(&fs); 15109f1abe3dSAlan Cox /* 15119f1abe3dSAlan Cox * We only try to prefault read-only mappings to the 15129f1abe3dSAlan Cox * neighboring pages when this copy-on-write fault is 15139f1abe3dSAlan Cox * a hard fault. In other cases, trying to prefault 15149f1abe3dSAlan Cox * is typically wasted effort. 15159f1abe3dSAlan Cox */ 15169f1abe3dSAlan Cox if (faultcount == 0) 15179f1abe3dSAlan Cox faultcount = 1; 15189f1abe3dSAlan Cox 15190d94caffSDavid Greenman } else { 15202c2f4413SJeff Roberson fs.prot &= ~VM_PROT_WRITE; 1521df8bae1dSRodney W. Grimes } 1522df8bae1dSRodney W. Grimes } 1523df8bae1dSRodney W. Grimes 1524df8bae1dSRodney W. Grimes /* 15250d94caffSDavid Greenman * We must verify that the maps have not changed since our last 15260d94caffSDavid Greenman * lookup. 1527df8bae1dSRodney W. Grimes */ 152819dc5607STor Egge if (!fs.lookup_still_valid) { 1529fcb04758SJeff Roberson result = vm_fault_relookup(&fs); 1530df8bae1dSRodney W. Grimes if (result != KERN_SUCCESS) { 15311e40fe41SJeff Roberson fault_deallocate(&fs); 1532fcb04758SJeff Roberson if (result == KERN_RESTART) 153319dc5607STor Egge goto RetryFault; 1534df8bae1dSRodney W. Grimes return (result); 1535df8bae1dSRodney W. Grimes } 153619dc5607STor Egge } 15371e40fe41SJeff Roberson VM_OBJECT_ASSERT_UNLOCKED(fs.object); 1538381b7242SAlan Cox 1539d2bf64c3SKonstantin Belousov /* 1540381b7242SAlan Cox * If the page was filled by a pager, save the virtual address that 1541381b7242SAlan Cox * should be faulted on next under a sequential access pattern to the 1542381b7242SAlan Cox * map entry. A read lock on the map suffices to update this address 1543381b7242SAlan Cox * safely. 1544d2bf64c3SKonstantin Belousov */ 15455758fe71SAlan Cox if (hardfault) 1546381b7242SAlan Cox fs.entry->next_read = vaddr + ptoa(ahead) + PAGE_SIZE; 1547d2bf64c3SKonstantin Belousov 15484221e284SAlan Cox /* 154978cfe1f7SAlan Cox * Page must be completely valid or it is not fit to 15504221e284SAlan Cox * map into user space. vm_pager_get_pages() ensures this. 15514221e284SAlan Cox */ 15521e40fe41SJeff Roberson vm_page_assert_xbusied(fs.m); 15530012f373SJeff Roberson KASSERT(vm_page_all_valid(fs.m), 155478cfe1f7SAlan Cox ("vm_fault: page %p partially invalid", fs.m)); 15551e40fe41SJeff Roberson 15562c2f4413SJeff Roberson vm_fault_dirty(&fs, fs.m); 1557cbfbaad8SAlan Cox 155886735996SAlan Cox /* 155986735996SAlan Cox * Put this page into the physical map. We had to do the unlock above 156086735996SAlan Cox * because pmap_enter() may sleep. We don't put the page 156186735996SAlan Cox * back on the active queue until later so that the pageout daemon 156286735996SAlan Cox * won't find it (yet). 156386735996SAlan Cox */ 15642c2f4413SJeff Roberson pmap_enter(fs.map->pmap, vaddr, fs.m, fs.prot, 15652c2f4413SJeff Roberson fs.fault_type | (fs.wired ? PMAP_ENTER_WIRED : 0), 0); 15662c2f4413SJeff Roberson if (faultcount != 1 && (fs.fault_flags & VM_FAULT_WIRE) == 0 && 15672c2f4413SJeff Roberson fs.wired == 0) 1568b0cd2017SGleb Smirnoff vm_fault_prefault(&fs, vaddr, 1569b0cd2017SGleb Smirnoff faultcount > 0 ? behind : PFBAK, 1570a7163bb9SKonstantin Belousov faultcount > 0 ? ahead : PFFOR, false); 1571ff97964aSJohn Dyson 1572df8bae1dSRodney W. Grimes /* 15730d94caffSDavid Greenman * If the page is not wired down, then put it where the pageout daemon 15740d94caffSDavid Greenman * can find it. 1575df8bae1dSRodney W. Grimes */ 15762c2f4413SJeff Roberson if ((fs.fault_flags & VM_FAULT_WIRE) != 0) 15774866e085SJohn Dyson vm_page_wire(fs.m); 15789f5632e6SMark Johnston else 15794866e085SJohn Dyson vm_page_activate(fs.m); 15802c2f4413SJeff Roberson if (fs.m_hold != NULL) { 15812c2f4413SJeff Roberson (*fs.m_hold) = fs.m; 1582eeacb3b0SMark Johnston vm_page_wire(fs.m); 1583acd11c74SAlan Cox } 1584c7aebda8SAttilio Rao vm_page_xunbusy(fs.m); 15854bf95d00SJeff Roberson fs.m = NULL; 1586eeec6babSJohn Baldwin 1587eebf3286SAlan Cox /* 1588eebf3286SAlan Cox * Unlock everything, and return 1589eebf3286SAlan Cox */ 15904b3e0665SJeff Roberson fault_deallocate(&fs); 1591b3a01bdfSAndrey Zonov if (hardfault) { 159283c9dea1SGleb Smirnoff VM_CNT_INC(v_io_faults); 15931c4bcd05SJeff Roberson curthread->td_ru.ru_majflt++; 1594ae34b6ffSEdward Tomasz Napierala #ifdef RACCT 1595ae34b6ffSEdward Tomasz Napierala if (racct_enable && fs.object->type == OBJT_VNODE) { 1596ae34b6ffSEdward Tomasz Napierala PROC_LOCK(curproc); 15972c2f4413SJeff Roberson if ((fs.fault_type & (VM_PROT_COPY | VM_PROT_WRITE)) != 0) { 1598ae34b6ffSEdward Tomasz Napierala racct_add_force(curproc, RACCT_WRITEBPS, 1599ae34b6ffSEdward Tomasz Napierala PAGE_SIZE + behind * PAGE_SIZE); 1600ae34b6ffSEdward Tomasz Napierala racct_add_force(curproc, RACCT_WRITEIOPS, 1); 1601ae34b6ffSEdward Tomasz Napierala } else { 1602ae34b6ffSEdward Tomasz Napierala racct_add_force(curproc, RACCT_READBPS, 1603ae34b6ffSEdward Tomasz Napierala PAGE_SIZE + ahead * PAGE_SIZE); 1604ae34b6ffSEdward Tomasz Napierala racct_add_force(curproc, RACCT_READIOPS, 1); 1605ae34b6ffSEdward Tomasz Napierala } 1606ae34b6ffSEdward Tomasz Napierala PROC_UNLOCK(curproc); 1607ae34b6ffSEdward Tomasz Napierala } 1608ae34b6ffSEdward Tomasz Napierala #endif 1609b3a01bdfSAndrey Zonov } else 16101c4bcd05SJeff Roberson curthread->td_ru.ru_minflt++; 1611df8bae1dSRodney W. Grimes 1612df8bae1dSRodney W. Grimes return (KERN_SUCCESS); 1613df8bae1dSRodney W. Grimes } 1614df8bae1dSRodney W. Grimes 1615df8bae1dSRodney W. Grimes /* 1616a8b0f100SAlan Cox * Speed up the reclamation of pages that precede the faulting pindex within 1617a8b0f100SAlan Cox * the first object of the shadow chain. Essentially, perform the equivalent 1618a8b0f100SAlan Cox * to madvise(..., MADV_DONTNEED) on a large cluster of pages that precedes 1619a8b0f100SAlan Cox * the faulting pindex by the cluster size when the pages read by vm_fault() 1620a8b0f100SAlan Cox * cross a cluster-size boundary. The cluster size is the greater of the 1621a8b0f100SAlan Cox * smallest superpage size and VM_FAULT_DONTNEED_MIN. 1622a8b0f100SAlan Cox * 1623a8b0f100SAlan Cox * When "fs->first_object" is a shadow object, the pages in the backing object 1624a8b0f100SAlan Cox * that precede the faulting pindex are deactivated by vm_fault(). So, this 1625a8b0f100SAlan Cox * function must only be concerned with pages in the first object. 162613458803SAlan Cox */ 162713458803SAlan Cox static void 1628a8b0f100SAlan Cox vm_fault_dontneed(const struct faultstate *fs, vm_offset_t vaddr, int ahead) 162913458803SAlan Cox { 1630a8b0f100SAlan Cox vm_map_entry_t entry; 163113458803SAlan Cox vm_object_t first_object, object; 1632a8b0f100SAlan Cox vm_offset_t end, start; 1633a8b0f100SAlan Cox vm_page_t m, m_next; 1634a8b0f100SAlan Cox vm_pindex_t pend, pstart; 1635a8b0f100SAlan Cox vm_size_t size; 163613458803SAlan Cox 163713458803SAlan Cox object = fs->object; 16381e40fe41SJeff Roberson VM_OBJECT_ASSERT_UNLOCKED(object); 163913458803SAlan Cox first_object = fs->first_object; 1640a8b0f100SAlan Cox /* Neither fictitious nor unmanaged pages can be reclaimed. */ 164128634820SAlan Cox if ((first_object->flags & (OBJ_FICTITIOUS | OBJ_UNMANAGED)) == 0) { 16421e40fe41SJeff Roberson VM_OBJECT_RLOCK(first_object); 1643a8b0f100SAlan Cox size = VM_FAULT_DONTNEED_MIN; 1644a8b0f100SAlan Cox if (MAXPAGESIZES > 1 && size < pagesizes[1]) 1645a8b0f100SAlan Cox size = pagesizes[1]; 1646a8b0f100SAlan Cox end = rounddown2(vaddr, size); 1647a8b0f100SAlan Cox if (vaddr - end >= size - PAGE_SIZE - ptoa(ahead) && 1648a8b0f100SAlan Cox (entry = fs->entry)->start < end) { 1649a8b0f100SAlan Cox if (end - entry->start < size) 1650a8b0f100SAlan Cox start = entry->start; 165113458803SAlan Cox else 1652a8b0f100SAlan Cox start = end - size; 1653a8b0f100SAlan Cox pmap_advise(fs->map->pmap, start, end, MADV_DONTNEED); 1654a8b0f100SAlan Cox pstart = OFF_TO_IDX(entry->offset) + atop(start - 1655a8b0f100SAlan Cox entry->start); 1656a8b0f100SAlan Cox m_next = vm_page_find_least(first_object, pstart); 1657a8b0f100SAlan Cox pend = OFF_TO_IDX(entry->offset) + atop(end - 1658a8b0f100SAlan Cox entry->start); 1659a8b0f100SAlan Cox while ((m = m_next) != NULL && m->pindex < pend) { 1660a8b0f100SAlan Cox m_next = TAILQ_NEXT(m, listq); 16610012f373SJeff Roberson if (!vm_page_all_valid(m) || 1662a8b0f100SAlan Cox vm_page_busied(m)) 166313458803SAlan Cox continue; 1664d8015db3SAlan Cox 1665d8015db3SAlan Cox /* 1666d8015db3SAlan Cox * Don't clear PGA_REFERENCED, since it would 1667d8015db3SAlan Cox * likely represent a reference by a different 1668d8015db3SAlan Cox * process. 1669d8015db3SAlan Cox * 1670d8015db3SAlan Cox * Typically, at this point, prefetched pages 1671d8015db3SAlan Cox * are still in the inactive queue. Only 1672d8015db3SAlan Cox * pages that triggered page faults are in the 16739f5632e6SMark Johnston * active queue. The test for whether the page 16749f5632e6SMark Johnston * is in the inactive queue is racy; in the 16759f5632e6SMark Johnston * worst case we will requeue the page 16769f5632e6SMark Johnston * unnecessarily. 1677d8015db3SAlan Cox */ 16780eb50f9cSMark Johnston if (!vm_page_inactive(m)) 1679d8015db3SAlan Cox vm_page_deactivate(m); 168013458803SAlan Cox } 168113458803SAlan Cox } 16821e40fe41SJeff Roberson VM_OBJECT_RUNLOCK(first_object); 1683a8b0f100SAlan Cox } 168413458803SAlan Cox } 168513458803SAlan Cox 168613458803SAlan Cox /* 1687566526a9SAlan Cox * vm_fault_prefault provides a quick way of clustering 1688566526a9SAlan Cox * pagefaults into a processes address space. It is a "cousin" 1689566526a9SAlan Cox * of vm_map_pmap_enter, except it runs at page fault time instead 1690566526a9SAlan Cox * of mmap time. 1691566526a9SAlan Cox */ 1692566526a9SAlan Cox static void 169363281952SAlan Cox vm_fault_prefault(const struct faultstate *fs, vm_offset_t addra, 1694a7163bb9SKonstantin Belousov int backward, int forward, bool obj_locked) 1695566526a9SAlan Cox { 169663281952SAlan Cox pmap_t pmap; 169763281952SAlan Cox vm_map_entry_t entry; 169863281952SAlan Cox vm_object_t backing_object, lobject; 1699566526a9SAlan Cox vm_offset_t addr, starta; 1700566526a9SAlan Cox vm_pindex_t pindex; 17012053c127SStephan Uphoff vm_page_t m; 1702b0cd2017SGleb Smirnoff int i; 1703566526a9SAlan Cox 170463281952SAlan Cox pmap = fs->map->pmap; 1705950d5f7aSAlan Cox if (pmap != vmspace_pmap(curthread->td_proc->p_vmspace)) 1706566526a9SAlan Cox return; 1707566526a9SAlan Cox 170863281952SAlan Cox entry = fs->entry; 1709566526a9SAlan Cox 171063cdcaaeSKonstantin Belousov if (addra < backward * PAGE_SIZE) { 1711566526a9SAlan Cox starta = entry->start; 171263cdcaaeSKonstantin Belousov } else { 171363cdcaaeSKonstantin Belousov starta = addra - backward * PAGE_SIZE; 171463cdcaaeSKonstantin Belousov if (starta < entry->start) 171563cdcaaeSKonstantin Belousov starta = entry->start; 1716566526a9SAlan Cox } 1717566526a9SAlan Cox 171863281952SAlan Cox /* 171963281952SAlan Cox * Generate the sequence of virtual addresses that are candidates for 172063281952SAlan Cox * prefaulting in an outward spiral from the faulting virtual address, 172163281952SAlan Cox * "addra". Specifically, the sequence is "addra - PAGE_SIZE", "addra 172263281952SAlan Cox * + PAGE_SIZE", "addra - 2 * PAGE_SIZE", "addra + 2 * PAGE_SIZE", ... 172363281952SAlan Cox * If the candidate address doesn't have a backing physical page, then 172463281952SAlan Cox * the loop immediately terminates. 172563281952SAlan Cox */ 172663281952SAlan Cox for (i = 0; i < 2 * imax(backward, forward); i++) { 172763281952SAlan Cox addr = addra + ((i >> 1) + 1) * ((i & 1) == 0 ? -PAGE_SIZE : 172863281952SAlan Cox PAGE_SIZE); 172963281952SAlan Cox if (addr > addra + forward * PAGE_SIZE) 1730566526a9SAlan Cox addr = 0; 1731566526a9SAlan Cox 1732566526a9SAlan Cox if (addr < starta || addr >= entry->end) 1733566526a9SAlan Cox continue; 1734566526a9SAlan Cox 1735566526a9SAlan Cox if (!pmap_is_prefaultable(pmap, addr)) 1736566526a9SAlan Cox continue; 1737566526a9SAlan Cox 1738566526a9SAlan Cox pindex = ((addr - entry->start) + entry->offset) >> PAGE_SHIFT; 173963281952SAlan Cox lobject = entry->object.vm_object; 1740a7163bb9SKonstantin Belousov if (!obj_locked) 1741c141ae7fSAlan Cox VM_OBJECT_RLOCK(lobject); 1742566526a9SAlan Cox while ((m = vm_page_lookup(lobject, pindex)) == NULL && 1743566526a9SAlan Cox lobject->type == OBJT_DEFAULT && 1744566526a9SAlan Cox (backing_object = lobject->backing_object) != NULL) { 174536930fc9SAlan Cox KASSERT((lobject->backing_object_offset & PAGE_MASK) == 174636930fc9SAlan Cox 0, ("vm_fault_prefault: unaligned object offset")); 1747566526a9SAlan Cox pindex += lobject->backing_object_offset >> PAGE_SHIFT; 1748c141ae7fSAlan Cox VM_OBJECT_RLOCK(backing_object); 1749a7163bb9SKonstantin Belousov if (!obj_locked || lobject != entry->object.vm_object) 1750c141ae7fSAlan Cox VM_OBJECT_RUNLOCK(lobject); 1751566526a9SAlan Cox lobject = backing_object; 1752566526a9SAlan Cox } 1753cbfbaad8SAlan Cox if (m == NULL) { 1754a7163bb9SKonstantin Belousov if (!obj_locked || lobject != entry->object.vm_object) 1755c141ae7fSAlan Cox VM_OBJECT_RUNLOCK(lobject); 1756566526a9SAlan Cox break; 1757cbfbaad8SAlan Cox } 17580012f373SJeff Roberson if (vm_page_all_valid(m) && 17593c4a2440SAlan Cox (m->flags & PG_FICTITIOUS) == 0) 17607bfda801SAlan Cox pmap_enter_quick(pmap, addr, m, entry->protection); 1761a7163bb9SKonstantin Belousov if (!obj_locked || lobject != entry->object.vm_object) 1762c141ae7fSAlan Cox VM_OBJECT_RUNLOCK(lobject); 1763566526a9SAlan Cox } 1764566526a9SAlan Cox } 1765566526a9SAlan Cox 1766566526a9SAlan Cox /* 176782de724fSAlan Cox * Hold each of the physical pages that are mapped by the specified range of 176882de724fSAlan Cox * virtual addresses, ["addr", "addr" + "len"), if those mappings are valid 176982de724fSAlan Cox * and allow the specified types of access, "prot". If all of the implied 177082de724fSAlan Cox * pages are successfully held, then the number of held pages is returned 177182de724fSAlan Cox * together with pointers to those pages in the array "ma". However, if any 177282de724fSAlan Cox * of the pages cannot be held, -1 is returned. 177382de724fSAlan Cox */ 177482de724fSAlan Cox int 177582de724fSAlan Cox vm_fault_quick_hold_pages(vm_map_t map, vm_offset_t addr, vm_size_t len, 177682de724fSAlan Cox vm_prot_t prot, vm_page_t *ma, int max_count) 177782de724fSAlan Cox { 177882de724fSAlan Cox vm_offset_t end, va; 177982de724fSAlan Cox vm_page_t *mp; 17807e14088dSKonstantin Belousov int count; 178182de724fSAlan Cox boolean_t pmap_failed; 178282de724fSAlan Cox 1783af32c419SKonstantin Belousov if (len == 0) 1784af32c419SKonstantin Belousov return (0); 178582de724fSAlan Cox end = round_page(addr + len); 178682de724fSAlan Cox addr = trunc_page(addr); 178782de724fSAlan Cox 17880f1e6ec5SMark Johnston if (!vm_map_range_valid(map, addr, end)) 178982de724fSAlan Cox return (-1); 179082de724fSAlan Cox 17917e14088dSKonstantin Belousov if (atop(end - addr) > max_count) 179282de724fSAlan Cox panic("vm_fault_quick_hold_pages: count > max_count"); 17937e14088dSKonstantin Belousov count = atop(end - addr); 179482de724fSAlan Cox 179582de724fSAlan Cox /* 179682de724fSAlan Cox * Most likely, the physical pages are resident in the pmap, so it is 179782de724fSAlan Cox * faster to try pmap_extract_and_hold() first. 179882de724fSAlan Cox */ 179982de724fSAlan Cox pmap_failed = FALSE; 180082de724fSAlan Cox for (mp = ma, va = addr; va < end; mp++, va += PAGE_SIZE) { 180182de724fSAlan Cox *mp = pmap_extract_and_hold(map->pmap, va, prot); 180282de724fSAlan Cox if (*mp == NULL) 180382de724fSAlan Cox pmap_failed = TRUE; 180482de724fSAlan Cox else if ((prot & VM_PROT_WRITE) != 0 && 1805a5dbab54SAlan Cox (*mp)->dirty != VM_PAGE_BITS_ALL) { 180682de724fSAlan Cox /* 180782de724fSAlan Cox * Explicitly dirty the physical page. Otherwise, the 180882de724fSAlan Cox * caller's changes may go unnoticed because they are 180982de724fSAlan Cox * performed through an unmanaged mapping or by a DMA 181082de724fSAlan Cox * operation. 18113c76db4cSAlan Cox * 1812abb9b935SKonstantin Belousov * The object lock is not held here. 1813abb9b935SKonstantin Belousov * See vm_page_clear_dirty_mask(). 181482de724fSAlan Cox */ 18153c76db4cSAlan Cox vm_page_dirty(*mp); 181682de724fSAlan Cox } 181782de724fSAlan Cox } 181882de724fSAlan Cox if (pmap_failed) { 181982de724fSAlan Cox /* 182082de724fSAlan Cox * One or more pages could not be held by the pmap. Either no 182182de724fSAlan Cox * page was mapped at the specified virtual address or that 182282de724fSAlan Cox * mapping had insufficient permissions. Attempt to fault in 182382de724fSAlan Cox * and hold these pages. 18248ec533d3SKonstantin Belousov * 18258ec533d3SKonstantin Belousov * If vm_fault_disable_pagefaults() was called, 18268ec533d3SKonstantin Belousov * i.e., TDP_NOFAULTING is set, we must not sleep nor 18278ec533d3SKonstantin Belousov * acquire MD VM locks, which means we must not call 1828df08823dSKonstantin Belousov * vm_fault(). Some (out of tree) callers mark 18298ec533d3SKonstantin Belousov * too wide a code area with vm_fault_disable_pagefaults() 18308ec533d3SKonstantin Belousov * already, use the VM_PROT_QUICK_NOFAULT flag to request 18318ec533d3SKonstantin Belousov * the proper behaviour explicitly. 183282de724fSAlan Cox */ 18338ec533d3SKonstantin Belousov if ((prot & VM_PROT_QUICK_NOFAULT) != 0 && 18348ec533d3SKonstantin Belousov (curthread->td_pflags & TDP_NOFAULTING) != 0) 18358ec533d3SKonstantin Belousov goto error; 183682de724fSAlan Cox for (mp = ma, va = addr; va < end; mp++, va += PAGE_SIZE) 1837df08823dSKonstantin Belousov if (*mp == NULL && vm_fault(map, va, prot, 183882de724fSAlan Cox VM_FAULT_NORMAL, mp) != KERN_SUCCESS) 183982de724fSAlan Cox goto error; 184082de724fSAlan Cox } 184182de724fSAlan Cox return (count); 184282de724fSAlan Cox error: 184382de724fSAlan Cox for (mp = ma; mp < ma + count; mp++) 1844fee2a2faSMark Johnston if (*mp != NULL) 1845fee2a2faSMark Johnston vm_page_unwire(*mp, PQ_INACTIVE); 184682de724fSAlan Cox return (-1); 184782de724fSAlan Cox } 184882de724fSAlan Cox 184982de724fSAlan Cox /* 1850df8bae1dSRodney W. Grimes * Routine: 1851df8bae1dSRodney W. Grimes * vm_fault_copy_entry 1852df8bae1dSRodney W. Grimes * Function: 1853210a6886SKonstantin Belousov * Create new shadow object backing dst_entry with private copy of 1854210a6886SKonstantin Belousov * all underlying pages. When src_entry is equal to dst_entry, 1855210a6886SKonstantin Belousov * function implements COW for wired-down map entry. Otherwise, 1856210a6886SKonstantin Belousov * it forks wired entry into dst_map. 1857df8bae1dSRodney W. Grimes * 1858df8bae1dSRodney W. Grimes * In/out conditions: 1859df8bae1dSRodney W. Grimes * The source and destination maps must be locked for write. 1860df8bae1dSRodney W. Grimes * The source map entry must be wired down (or be a sharing map 1861df8bae1dSRodney W. Grimes * entry corresponding to a main map entry that is wired down). 1862df8bae1dSRodney W. Grimes */ 186326f9a767SRodney W. Grimes void 1864121fd461SKonstantin Belousov vm_fault_copy_entry(vm_map_t dst_map, vm_map_t src_map, 1865121fd461SKonstantin Belousov vm_map_entry_t dst_entry, vm_map_entry_t src_entry, 1866121fd461SKonstantin Belousov vm_ooffset_t *fork_charge) 1867df8bae1dSRodney W. Grimes { 1868210a6886SKonstantin Belousov vm_object_t backing_object, dst_object, object, src_object; 18697afab86cSAlan Cox vm_pindex_t dst_pindex, pindex, src_pindex; 1870210a6886SKonstantin Belousov vm_prot_t access, prot; 1871df8bae1dSRodney W. Grimes vm_offset_t vaddr; 1872df8bae1dSRodney W. Grimes vm_page_t dst_m; 1873df8bae1dSRodney W. Grimes vm_page_t src_m; 18744c74acf7SKonstantin Belousov boolean_t upgrade; 1875df8bae1dSRodney W. Grimes 1876df8bae1dSRodney W. Grimes #ifdef lint 1877df8bae1dSRodney W. Grimes src_map++; 18780d94caffSDavid Greenman #endif /* lint */ 1879df8bae1dSRodney W. Grimes 1880210a6886SKonstantin Belousov upgrade = src_entry == dst_entry; 18810973283dSKonstantin Belousov access = prot = dst_entry->protection; 1882210a6886SKonstantin Belousov 1883df8bae1dSRodney W. Grimes src_object = src_entry->object.vm_object; 18847afab86cSAlan Cox src_pindex = OFF_TO_IDX(src_entry->offset); 1885df8bae1dSRodney W. Grimes 18860973283dSKonstantin Belousov if (upgrade && (dst_entry->eflags & MAP_ENTRY_NEEDS_COPY) == 0) { 18870973283dSKonstantin Belousov dst_object = src_object; 18880973283dSKonstantin Belousov vm_object_reference(dst_object); 18890973283dSKonstantin Belousov } else { 1890df8bae1dSRodney W. Grimes /* 189167388836SKonstantin Belousov * Create the top-level object for the destination entry. 189267388836SKonstantin Belousov * Doesn't actually shadow anything - we copy the pages 189367388836SKonstantin Belousov * directly. 1894df8bae1dSRodney W. Grimes */ 189567388836SKonstantin Belousov dst_object = vm_object_allocate_anon(atop(dst_entry->end - 189667388836SKonstantin Belousov dst_entry->start), NULL, NULL, 0); 1897f8a47341SAlan Cox #if VM_NRESERVLEVEL > 0 1898f8a47341SAlan Cox dst_object->flags |= OBJ_COLORED; 1899f8a47341SAlan Cox dst_object->pg_color = atop(dst_entry->start); 1900f8a47341SAlan Cox #endif 1901a60d3db1SKonstantin Belousov dst_object->domain = src_object->domain; 1902a60d3db1SKonstantin Belousov dst_object->charge = dst_entry->end - dst_entry->start; 19030973283dSKonstantin Belousov } 1904df8bae1dSRodney W. Grimes 190589f6b863SAttilio Rao VM_OBJECT_WLOCK(dst_object); 1906210a6886SKonstantin Belousov KASSERT(upgrade || dst_entry->object.vm_object == NULL, 1907121fd461SKonstantin Belousov ("vm_fault_copy_entry: vm_object not NULL")); 19080973283dSKonstantin Belousov if (src_object != dst_object) { 1909df8bae1dSRodney W. Grimes dst_entry->object.vm_object = dst_object; 1910df8bae1dSRodney W. Grimes dst_entry->offset = 0; 191178022527SKonstantin Belousov dst_entry->eflags &= ~MAP_ENTRY_VN_EXEC; 19120973283dSKonstantin Belousov } 1913210a6886SKonstantin Belousov if (fork_charge != NULL) { 1914ef694c1aSEdward Tomasz Napierala KASSERT(dst_entry->cred == NULL, 1915121fd461SKonstantin Belousov ("vm_fault_copy_entry: leaked swp charge")); 1916ef694c1aSEdward Tomasz Napierala dst_object->cred = curthread->td_ucred; 1917ef694c1aSEdward Tomasz Napierala crhold(dst_object->cred); 1918121fd461SKonstantin Belousov *fork_charge += dst_object->charge; 19199f25ab83SKonstantin Belousov } else if ((dst_object->type == OBJT_DEFAULT || 19209f25ab83SKonstantin Belousov dst_object->type == OBJT_SWAP) && 19219f25ab83SKonstantin Belousov dst_object->cred == NULL) { 19220973283dSKonstantin Belousov KASSERT(dst_entry->cred != NULL, ("no cred for entry %p", 19230973283dSKonstantin Belousov dst_entry)); 1924ef694c1aSEdward Tomasz Napierala dst_object->cred = dst_entry->cred; 1925ef694c1aSEdward Tomasz Napierala dst_entry->cred = NULL; 1926210a6886SKonstantin Belousov } 19270973283dSKonstantin Belousov 1928210a6886SKonstantin Belousov /* 1929210a6886SKonstantin Belousov * If not an upgrade, then enter the mappings in the pmap as 1930210a6886SKonstantin Belousov * read and/or execute accesses. Otherwise, enter them as 1931210a6886SKonstantin Belousov * write accesses. 1932210a6886SKonstantin Belousov * 1933210a6886SKonstantin Belousov * A writeable large page mapping is only created if all of 1934210a6886SKonstantin Belousov * the constituent small page mappings are modified. Marking 1935210a6886SKonstantin Belousov * PTEs as modified on inception allows promotion to happen 1936210a6886SKonstantin Belousov * without taking potentially large number of soft faults. 1937210a6886SKonstantin Belousov */ 1938210a6886SKonstantin Belousov if (!upgrade) 1939210a6886SKonstantin Belousov access &= ~VM_PROT_WRITE; 1940df8bae1dSRodney W. Grimes 1941df8bae1dSRodney W. Grimes /* 1942ef45823eSKonstantin Belousov * Loop through all of the virtual pages within the entry's 1943ef45823eSKonstantin Belousov * range, copying each page from the source object to the 1944ef45823eSKonstantin Belousov * destination object. Since the source is wired, those pages 1945ef45823eSKonstantin Belousov * must exist. In contrast, the destination is pageable. 19466939b4d3SMark Johnston * Since the destination object doesn't share any backing storage 1947ef45823eSKonstantin Belousov * with the source object, all of its pages must be dirtied, 1948ef45823eSKonstantin Belousov * regardless of whether they can be written. 1949df8bae1dSRodney W. Grimes */ 19507afab86cSAlan Cox for (vaddr = dst_entry->start, dst_pindex = 0; 1951df8bae1dSRodney W. Grimes vaddr < dst_entry->end; 19527afab86cSAlan Cox vaddr += PAGE_SIZE, dst_pindex++) { 19530973283dSKonstantin Belousov again: 1954df8bae1dSRodney W. Grimes /* 1955df8bae1dSRodney W. Grimes * Find the page in the source object, and copy it in. 19564c74acf7SKonstantin Belousov * Because the source is wired down, the page will be 19574c74acf7SKonstantin Belousov * in memory. 1958df8bae1dSRodney W. Grimes */ 19590973283dSKonstantin Belousov if (src_object != dst_object) 196083b375eaSAttilio Rao VM_OBJECT_RLOCK(src_object); 1961c5b65a67SAlan Cox object = src_object; 19627afab86cSAlan Cox pindex = src_pindex + dst_pindex; 19637afab86cSAlan Cox while ((src_m = vm_page_lookup(object, pindex)) == NULL && 1964c5b65a67SAlan Cox (backing_object = object->backing_object) != NULL) { 1965c5b65a67SAlan Cox /* 19664c74acf7SKonstantin Belousov * Unless the source mapping is read-only or 19674c74acf7SKonstantin Belousov * it is presently being upgraded from 19684c74acf7SKonstantin Belousov * read-only, the first object in the shadow 19694c74acf7SKonstantin Belousov * chain should provide all of the pages. In 19704c74acf7SKonstantin Belousov * other words, this loop body should never be 19714c74acf7SKonstantin Belousov * executed when the source mapping is already 19724c74acf7SKonstantin Belousov * read/write. 1973c5b65a67SAlan Cox */ 19744c74acf7SKonstantin Belousov KASSERT((src_entry->protection & VM_PROT_WRITE) == 0 || 19754c74acf7SKonstantin Belousov upgrade, 19764c74acf7SKonstantin Belousov ("vm_fault_copy_entry: main object missing page")); 19774c74acf7SKonstantin Belousov 197883b375eaSAttilio Rao VM_OBJECT_RLOCK(backing_object); 1979c5b65a67SAlan Cox pindex += OFF_TO_IDX(object->backing_object_offset); 19800973283dSKonstantin Belousov if (object != dst_object) 198183b375eaSAttilio Rao VM_OBJECT_RUNLOCK(object); 1982c5b65a67SAlan Cox object = backing_object; 1983c5b65a67SAlan Cox } 19844c74acf7SKonstantin Belousov KASSERT(src_m != NULL, ("vm_fault_copy_entry: page missing")); 19850973283dSKonstantin Belousov 19860973283dSKonstantin Belousov if (object != dst_object) { 19870973283dSKonstantin Belousov /* 19880973283dSKonstantin Belousov * Allocate a page in the destination object. 19890973283dSKonstantin Belousov */ 19902602a2eaSKonstantin Belousov dst_m = vm_page_alloc(dst_object, (src_object == 19912602a2eaSKonstantin Belousov dst_object ? src_pindex : 0) + dst_pindex, 19922602a2eaSKonstantin Belousov VM_ALLOC_NORMAL); 19930973283dSKonstantin Belousov if (dst_m == NULL) { 19940973283dSKonstantin Belousov VM_OBJECT_WUNLOCK(dst_object); 19950973283dSKonstantin Belousov VM_OBJECT_RUNLOCK(object); 19962c0f13aaSKonstantin Belousov vm_wait(dst_object); 1997c8f780e3SKonstantin Belousov VM_OBJECT_WLOCK(dst_object); 19980973283dSKonstantin Belousov goto again; 19990973283dSKonstantin Belousov } 2000669890eaSAlan Cox pmap_copy_page(src_m, dst_m); 200183b375eaSAttilio Rao VM_OBJECT_RUNLOCK(object); 200245d72c7dSKonstantin Belousov dst_m->dirty = dst_m->valid = src_m->valid; 20030973283dSKonstantin Belousov } else { 20040973283dSKonstantin Belousov dst_m = src_m; 200563e97555SJeff Roberson if (vm_page_busy_acquire(dst_m, VM_ALLOC_WAITFAIL) == 0) 20060973283dSKonstantin Belousov goto again; 200763e97555SJeff Roberson if (dst_m->pindex >= dst_object->size) { 2008c62637d6SKonstantin Belousov /* 2009c62637d6SKonstantin Belousov * We are upgrading. Index can occur 2010c62637d6SKonstantin Belousov * out of bounds if the object type is 2011c62637d6SKonstantin Belousov * vnode and the file was truncated. 2012c62637d6SKonstantin Belousov */ 201363e97555SJeff Roberson vm_page_xunbusy(dst_m); 2014c62637d6SKonstantin Belousov break; 201563e97555SJeff Roberson } 20160973283dSKonstantin Belousov } 201789f6b863SAttilio Rao VM_OBJECT_WUNLOCK(dst_object); 2018df8bae1dSRodney W. Grimes 2019df8bae1dSRodney W. Grimes /* 2020210a6886SKonstantin Belousov * Enter it in the pmap. If a wired, copy-on-write 2021210a6886SKonstantin Belousov * mapping is being replaced by a write-enabled 2022210a6886SKonstantin Belousov * mapping, then wire that new mapping. 202345d72c7dSKonstantin Belousov * 202445d72c7dSKonstantin Belousov * The page can be invalid if the user called 202545d72c7dSKonstantin Belousov * msync(MS_INVALIDATE) or truncated the backing vnode 202645d72c7dSKonstantin Belousov * or shared memory object. In this case, do not 202745d72c7dSKonstantin Belousov * insert it into pmap, but still do the copy so that 202845d72c7dSKonstantin Belousov * all copies of the wired map entry have similar 202945d72c7dSKonstantin Belousov * backing pages. 2030df8bae1dSRodney W. Grimes */ 20310012f373SJeff Roberson if (vm_page_all_valid(dst_m)) { 203239ffa8c1SKonstantin Belousov pmap_enter(dst_map->pmap, vaddr, dst_m, prot, 203339ffa8c1SKonstantin Belousov access | (upgrade ? PMAP_ENTER_WIRED : 0), 0); 203445d72c7dSKonstantin Belousov } 2035df8bae1dSRodney W. Grimes 2036df8bae1dSRodney W. Grimes /* 2037df8bae1dSRodney W. Grimes * Mark it no longer busy, and put it on the active list. 2038df8bae1dSRodney W. Grimes */ 203989f6b863SAttilio Rao VM_OBJECT_WLOCK(dst_object); 20402965a453SKip Macy 2041210a6886SKonstantin Belousov if (upgrade) { 20420973283dSKonstantin Belousov if (src_m != dst_m) { 20433ae10f74SAttilio Rao vm_page_unwire(src_m, PQ_INACTIVE); 2044210a6886SKonstantin Belousov vm_page_wire(dst_m); 20452965a453SKip Macy } else { 2046d842aa51SMark Johnston KASSERT(vm_page_wired(dst_m), 20470973283dSKonstantin Belousov ("dst_m %p is not wired", dst_m)); 20480973283dSKonstantin Belousov } 20490973283dSKonstantin Belousov } else { 2050df8bae1dSRodney W. Grimes vm_page_activate(dst_m); 20512965a453SKip Macy } 2052c7aebda8SAttilio Rao vm_page_xunbusy(dst_m); 2053df8bae1dSRodney W. Grimes } 205489f6b863SAttilio Rao VM_OBJECT_WUNLOCK(dst_object); 2055210a6886SKonstantin Belousov if (upgrade) { 2056210a6886SKonstantin Belousov dst_entry->eflags &= ~(MAP_ENTRY_COW | MAP_ENTRY_NEEDS_COPY); 2057210a6886SKonstantin Belousov vm_object_deallocate(src_object); 2058210a6886SKonstantin Belousov } 2059df8bae1dSRodney W. Grimes } 206026f9a767SRodney W. Grimes 20615730afc9SAlan Cox /* 20625730afc9SAlan Cox * Block entry into the machine-independent layer's page fault handler by 20635730afc9SAlan Cox * the calling thread. Subsequent calls to vm_fault() by that thread will 20645730afc9SAlan Cox * return KERN_PROTECTION_FAILURE. Enable machine-dependent handling of 20655730afc9SAlan Cox * spurious page faults. 20665730afc9SAlan Cox */ 20672801687dSKonstantin Belousov int 20682801687dSKonstantin Belousov vm_fault_disable_pagefaults(void) 20692801687dSKonstantin Belousov { 20702801687dSKonstantin Belousov 20715730afc9SAlan Cox return (curthread_pflags_set(TDP_NOFAULTING | TDP_RESETSPUR)); 20722801687dSKonstantin Belousov } 20732801687dSKonstantin Belousov 20742801687dSKonstantin Belousov void 20752801687dSKonstantin Belousov vm_fault_enable_pagefaults(int save) 20762801687dSKonstantin Belousov { 20772801687dSKonstantin Belousov 20782801687dSKonstantin Belousov curthread_pflags_restore(save); 20792801687dSKonstantin Belousov } 2080