160727d8bSWarner Losh /*- 2796df753SPedro F. Giffuni * SPDX-License-Identifier: (BSD-4-Clause AND MIT-CMU) 3df57947fSPedro F. Giffuni * 4df8bae1dSRodney W. Grimes * Copyright (c) 1991, 1993 5df8bae1dSRodney W. Grimes * The Regents of the University of California. All rights reserved. 626f9a767SRodney W. Grimes * Copyright (c) 1994 John S. Dyson 726f9a767SRodney W. Grimes * All rights reserved. 826f9a767SRodney W. Grimes * Copyright (c) 1994 David Greenman 926f9a767SRodney W. Grimes * All rights reserved. 1026f9a767SRodney W. Grimes * 11df8bae1dSRodney W. Grimes * 12df8bae1dSRodney W. Grimes * This code is derived from software contributed to Berkeley by 13df8bae1dSRodney W. Grimes * The Mach Operating System project at Carnegie-Mellon University. 14df8bae1dSRodney W. Grimes * 15df8bae1dSRodney W. Grimes * Redistribution and use in source and binary forms, with or without 16df8bae1dSRodney W. Grimes * modification, are permitted provided that the following conditions 17df8bae1dSRodney W. Grimes * are met: 18df8bae1dSRodney W. Grimes * 1. Redistributions of source code must retain the above copyright 19df8bae1dSRodney W. Grimes * notice, this list of conditions and the following disclaimer. 20df8bae1dSRodney W. Grimes * 2. Redistributions in binary form must reproduce the above copyright 21df8bae1dSRodney W. Grimes * notice, this list of conditions and the following disclaimer in the 22df8bae1dSRodney W. Grimes * documentation and/or other materials provided with the distribution. 23df8bae1dSRodney W. Grimes * 3. All advertising materials mentioning features or use of this software 245929bcfaSPhilippe Charnier * must display the following acknowledgement: 25df8bae1dSRodney W. Grimes * This product includes software developed by the University of 26df8bae1dSRodney W. Grimes * California, Berkeley and its contributors. 27df8bae1dSRodney W. Grimes * 4. Neither the name of the University nor the names of its contributors 28df8bae1dSRodney W. Grimes * may be used to endorse or promote products derived from this software 29df8bae1dSRodney W. Grimes * without specific prior written permission. 30df8bae1dSRodney W. Grimes * 31df8bae1dSRodney W. Grimes * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 32df8bae1dSRodney W. Grimes * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 33df8bae1dSRodney W. Grimes * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 34df8bae1dSRodney W. Grimes * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 35df8bae1dSRodney W. Grimes * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 36df8bae1dSRodney W. Grimes * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 37df8bae1dSRodney W. Grimes * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 38df8bae1dSRodney W. Grimes * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 39df8bae1dSRodney W. Grimes * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 40df8bae1dSRodney W. Grimes * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 41df8bae1dSRodney W. Grimes * SUCH DAMAGE. 42df8bae1dSRodney W. Grimes * 433c4dd356SDavid Greenman * from: @(#)vm_fault.c 8.4 (Berkeley) 1/12/94 44df8bae1dSRodney W. Grimes * 45df8bae1dSRodney W. Grimes * 46df8bae1dSRodney W. Grimes * Copyright (c) 1987, 1990 Carnegie-Mellon University. 47df8bae1dSRodney W. Grimes * All rights reserved. 48df8bae1dSRodney W. Grimes * 49df8bae1dSRodney W. Grimes * Authors: Avadis Tevanian, Jr., Michael Wayne Young 50df8bae1dSRodney W. Grimes * 51df8bae1dSRodney W. Grimes * Permission to use, copy, modify and distribute this software and 52df8bae1dSRodney W. Grimes * its documentation is hereby granted, provided that both the copyright 53df8bae1dSRodney W. Grimes * notice and this permission notice appear in all copies of the 54df8bae1dSRodney W. Grimes * software, derivative works or modified versions, and any portions 55df8bae1dSRodney W. Grimes * thereof, and that both notices appear in supporting documentation. 56df8bae1dSRodney W. Grimes * 57df8bae1dSRodney W. Grimes * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 58df8bae1dSRodney W. Grimes * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 59df8bae1dSRodney W. Grimes * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 60df8bae1dSRodney W. Grimes * 61df8bae1dSRodney W. Grimes * Carnegie Mellon requests users of this software to return to 62df8bae1dSRodney W. Grimes * 63df8bae1dSRodney W. Grimes * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 64df8bae1dSRodney W. Grimes * School of Computer Science 65df8bae1dSRodney W. Grimes * Carnegie Mellon University 66df8bae1dSRodney W. Grimes * Pittsburgh PA 15213-3890 67df8bae1dSRodney W. Grimes * 68df8bae1dSRodney W. Grimes * any improvements or extensions that they make and grant Carnegie the 69df8bae1dSRodney W. Grimes * rights to redistribute these changes. 70df8bae1dSRodney W. Grimes */ 71df8bae1dSRodney W. Grimes 72df8bae1dSRodney W. Grimes /* 73df8bae1dSRodney W. Grimes * Page fault handling module. 74df8bae1dSRodney W. Grimes */ 75874651b1SDavid E. O'Brien 76874651b1SDavid E. O'Brien #include <sys/cdefs.h> 77874651b1SDavid E. O'Brien __FBSDID("$FreeBSD$"); 78874651b1SDavid E. O'Brien 7935818d2eSJohn Baldwin #include "opt_ktrace.h" 80f8a47341SAlan Cox #include "opt_vm.h" 81f8a47341SAlan Cox 82df8bae1dSRodney W. Grimes #include <sys/param.h> 83df8bae1dSRodney W. Grimes #include <sys/systm.h> 844edf4a58SJohn Baldwin #include <sys/kernel.h> 85fb919e4dSMark Murray #include <sys/lock.h> 86a8b0f100SAlan Cox #include <sys/mman.h> 87eeacb3b0SMark Johnston #include <sys/mutex.h> 8826f9a767SRodney W. Grimes #include <sys/proc.h> 89ae34b6ffSEdward Tomasz Napierala #include <sys/racct.h> 9011b57401SHans Petter Selasky #include <sys/refcount.h> 9126f9a767SRodney W. Grimes #include <sys/resourcevar.h> 9289f6b863SAttilio Rao #include <sys/rwlock.h> 93df08823dSKonstantin Belousov #include <sys/signalvar.h> 9423955314SAlfred Perlstein #include <sys/sysctl.h> 95df08823dSKonstantin Belousov #include <sys/sysent.h> 964edf4a58SJohn Baldwin #include <sys/vmmeter.h> 974edf4a58SJohn Baldwin #include <sys/vnode.h> 9835818d2eSJohn Baldwin #ifdef KTRACE 9935818d2eSJohn Baldwin #include <sys/ktrace.h> 10035818d2eSJohn Baldwin #endif 101df8bae1dSRodney W. Grimes 102df8bae1dSRodney W. Grimes #include <vm/vm.h> 103efeaf95aSDavid Greenman #include <vm/vm_param.h> 104efeaf95aSDavid Greenman #include <vm/pmap.h> 105efeaf95aSDavid Greenman #include <vm/vm_map.h> 106efeaf95aSDavid Greenman #include <vm/vm_object.h> 107df8bae1dSRodney W. Grimes #include <vm/vm_page.h> 108df8bae1dSRodney W. Grimes #include <vm/vm_pageout.h> 109a83c285cSDavid Greenman #include <vm/vm_kern.h> 11024a1cce3SDavid Greenman #include <vm/vm_pager.h> 111efeaf95aSDavid Greenman #include <vm/vm_extern.h> 112dfdf9abdSAlan Cox #include <vm/vm_reserv.h> 113df8bae1dSRodney W. Grimes 114566526a9SAlan Cox #define PFBAK 4 115566526a9SAlan Cox #define PFFOR 4 116566526a9SAlan Cox 1175268042bSAlan Cox #define VM_FAULT_READ_DEFAULT (1 + VM_FAULT_READ_AHEAD_INIT) 11813458803SAlan Cox #define VM_FAULT_READ_MAX (1 + VM_FAULT_READ_AHEAD_MAX) 119a8b0f100SAlan Cox 120a8b0f100SAlan Cox #define VM_FAULT_DONTNEED_MIN 1048576 12126f9a767SRodney W. Grimes 1224866e085SJohn Dyson struct faultstate { 1234866e085SJohn Dyson vm_page_t m; 1244866e085SJohn Dyson vm_object_t object; 1254866e085SJohn Dyson vm_pindex_t pindex; 1264866e085SJohn Dyson vm_page_t first_m; 1274866e085SJohn Dyson vm_object_t first_object; 1284866e085SJohn Dyson vm_pindex_t first_pindex; 1294866e085SJohn Dyson vm_map_t map; 1304866e085SJohn Dyson vm_map_entry_t entry; 131dc5401d2SKonstantin Belousov int map_generation; 132cd8a6fe8SAlan Cox bool lookup_still_valid; 1334866e085SJohn Dyson struct vnode *vp; 1344866e085SJohn Dyson }; 1354866e085SJohn Dyson 136a8b0f100SAlan Cox static void vm_fault_dontneed(const struct faultstate *fs, vm_offset_t vaddr, 137a8b0f100SAlan Cox int ahead); 13863281952SAlan Cox static void vm_fault_prefault(const struct faultstate *fs, vm_offset_t addra, 139a7163bb9SKonstantin Belousov int backward, int forward, bool obj_locked); 14013458803SAlan Cox 141245139c6SKonstantin Belousov static int vm_pfault_oom_attempts = 3; 142245139c6SKonstantin Belousov SYSCTL_INT(_vm, OID_AUTO, pfault_oom_attempts, CTLFLAG_RWTUN, 143245139c6SKonstantin Belousov &vm_pfault_oom_attempts, 0, 144245139c6SKonstantin Belousov "Number of page allocation attempts in page fault handler before it " 145245139c6SKonstantin Belousov "triggers OOM handling"); 146245139c6SKonstantin Belousov 147245139c6SKonstantin Belousov static int vm_pfault_oom_wait = 10; 148245139c6SKonstantin Belousov SYSCTL_INT(_vm, OID_AUTO, pfault_oom_wait, CTLFLAG_RWTUN, 149245139c6SKonstantin Belousov &vm_pfault_oom_wait, 0, 150245139c6SKonstantin Belousov "Number of seconds to wait for free pages before retrying " 151245139c6SKonstantin Belousov "the page fault handler"); 152245139c6SKonstantin Belousov 15362a59e8fSWarner Losh static inline void 1544866e085SJohn Dyson release_page(struct faultstate *fs) 1554866e085SJohn Dyson { 1560d0be82aSKonstantin Belousov 157c7aebda8SAttilio Rao vm_page_xunbusy(fs->m); 158e8bcf696SMark Johnston vm_page_lock(fs->m); 1594866e085SJohn Dyson vm_page_deactivate(fs->m); 160e8bcf696SMark Johnston vm_page_unlock(fs->m); 1614866e085SJohn Dyson fs->m = NULL; 1624866e085SJohn Dyson } 1634866e085SJohn Dyson 16462a59e8fSWarner Losh static inline void 1654866e085SJohn Dyson unlock_map(struct faultstate *fs) 1664866e085SJohn Dyson { 1670d0be82aSKonstantin Belousov 16825adb370SBrian Feldman if (fs->lookup_still_valid) { 1694866e085SJohn Dyson vm_map_lookup_done(fs->map, fs->entry); 170cd8a6fe8SAlan Cox fs->lookup_still_valid = false; 1714866e085SJohn Dyson } 1724866e085SJohn Dyson } 1734866e085SJohn Dyson 1744866e085SJohn Dyson static void 175cfabea3dSKonstantin Belousov unlock_vp(struct faultstate *fs) 176cfabea3dSKonstantin Belousov { 177cfabea3dSKonstantin Belousov 178cfabea3dSKonstantin Belousov if (fs->vp != NULL) { 179cfabea3dSKonstantin Belousov vput(fs->vp); 180cfabea3dSKonstantin Belousov fs->vp = NULL; 181cfabea3dSKonstantin Belousov } 182cfabea3dSKonstantin Belousov } 183cfabea3dSKonstantin Belousov 184cfabea3dSKonstantin Belousov static void 185a51b0840SAlan Cox unlock_and_deallocate(struct faultstate *fs) 1864866e085SJohn Dyson { 187f29ba63eSAlan Cox 1884866e085SJohn Dyson vm_object_pip_wakeup(fs->object); 18989f6b863SAttilio Rao VM_OBJECT_WUNLOCK(fs->object); 1904866e085SJohn Dyson if (fs->object != fs->first_object) { 19189f6b863SAttilio Rao VM_OBJECT_WLOCK(fs->first_object); 1924866e085SJohn Dyson vm_page_free(fs->first_m); 1934866e085SJohn Dyson vm_object_pip_wakeup(fs->first_object); 19489f6b863SAttilio Rao VM_OBJECT_WUNLOCK(fs->first_object); 1954866e085SJohn Dyson fs->first_m = NULL; 1964866e085SJohn Dyson } 1974866e085SJohn Dyson vm_object_deallocate(fs->first_object); 1984866e085SJohn Dyson unlock_map(fs); 199cfabea3dSKonstantin Belousov unlock_vp(fs); 2004866e085SJohn Dyson } 2014866e085SJohn Dyson 202a36f5532SKonstantin Belousov static void 203a36f5532SKonstantin Belousov vm_fault_dirty(vm_map_entry_t entry, vm_page_t m, vm_prot_t prot, 204e26236e9SKonstantin Belousov vm_prot_t fault_type, int fault_flags, bool set_wd) 205a36f5532SKonstantin Belousov { 206e26236e9SKonstantin Belousov bool need_dirty; 207a36f5532SKonstantin Belousov 208a36f5532SKonstantin Belousov if (((prot & VM_PROT_WRITE) == 0 && 209a36f5532SKonstantin Belousov (fault_flags & VM_FAULT_DIRTY) == 0) || 210a36f5532SKonstantin Belousov (m->oflags & VPO_UNMANAGED) != 0) 211a36f5532SKonstantin Belousov return; 212a36f5532SKonstantin Belousov 213a36f5532SKonstantin Belousov VM_OBJECT_ASSERT_LOCKED(m->object); 214a36f5532SKonstantin Belousov 215a36f5532SKonstantin Belousov need_dirty = ((fault_type & VM_PROT_WRITE) != 0 && 2166a875bf9SKonstantin Belousov (fault_flags & VM_FAULT_WIRE) == 0) || 217a36f5532SKonstantin Belousov (fault_flags & VM_FAULT_DIRTY) != 0; 218a36f5532SKonstantin Belousov 219a36f5532SKonstantin Belousov if (set_wd) 220a36f5532SKonstantin Belousov vm_object_set_writeable_dirty(m->object); 221a36f5532SKonstantin Belousov else 222a36f5532SKonstantin Belousov /* 223a36f5532SKonstantin Belousov * If two callers of vm_fault_dirty() with set_wd == 224a36f5532SKonstantin Belousov * FALSE, one for the map entry with MAP_ENTRY_NOSYNC 225a36f5532SKonstantin Belousov * flag set, other with flag clear, race, it is 226a36f5532SKonstantin Belousov * possible for the no-NOSYNC thread to see m->dirty 227a36f5532SKonstantin Belousov * != 0 and not clear VPO_NOSYNC. Take vm_page lock 228a36f5532SKonstantin Belousov * around manipulation of VPO_NOSYNC and 229a36f5532SKonstantin Belousov * vm_page_dirty() call, to avoid the race and keep 230a36f5532SKonstantin Belousov * m->oflags consistent. 231a36f5532SKonstantin Belousov */ 232a36f5532SKonstantin Belousov vm_page_lock(m); 233a36f5532SKonstantin Belousov 234a36f5532SKonstantin Belousov /* 235a36f5532SKonstantin Belousov * If this is a NOSYNC mmap we do not want to set VPO_NOSYNC 236a36f5532SKonstantin Belousov * if the page is already dirty to prevent data written with 237a36f5532SKonstantin Belousov * the expectation of being synced from not being synced. 238a36f5532SKonstantin Belousov * Likewise if this entry does not request NOSYNC then make 239a36f5532SKonstantin Belousov * sure the page isn't marked NOSYNC. Applications sharing 240a36f5532SKonstantin Belousov * data should use the same flags to avoid ping ponging. 241a36f5532SKonstantin Belousov */ 242a36f5532SKonstantin Belousov if ((entry->eflags & MAP_ENTRY_NOSYNC) != 0) { 243a36f5532SKonstantin Belousov if (m->dirty == 0) { 244a36f5532SKonstantin Belousov m->oflags |= VPO_NOSYNC; 245a36f5532SKonstantin Belousov } 246a36f5532SKonstantin Belousov } else { 247a36f5532SKonstantin Belousov m->oflags &= ~VPO_NOSYNC; 248a36f5532SKonstantin Belousov } 249a36f5532SKonstantin Belousov 250a36f5532SKonstantin Belousov /* 251a36f5532SKonstantin Belousov * If the fault is a write, we know that this page is being 252a36f5532SKonstantin Belousov * written NOW so dirty it explicitly to save on 253a36f5532SKonstantin Belousov * pmap_is_modified() calls later. 254a36f5532SKonstantin Belousov * 255d5efa0a4SAlan Cox * Also, since the page is now dirty, we can possibly tell 256d5efa0a4SAlan Cox * the pager to release any swap backing the page. Calling 257d5efa0a4SAlan Cox * the pager requires a write lock on the object. 258a36f5532SKonstantin Belousov */ 259a36f5532SKonstantin Belousov if (need_dirty) 260a36f5532SKonstantin Belousov vm_page_dirty(m); 261a36f5532SKonstantin Belousov if (!set_wd) 262a36f5532SKonstantin Belousov vm_page_unlock(m); 263d5efa0a4SAlan Cox else if (need_dirty) 264a36f5532SKonstantin Belousov vm_pager_page_unswapped(m); 265a36f5532SKonstantin Belousov } 266a36f5532SKonstantin Belousov 26741ddec83SKonstantin Belousov /* 26841ddec83SKonstantin Belousov * Unlocks fs.first_object and fs.map on success. 26941ddec83SKonstantin Belousov */ 27041ddec83SKonstantin Belousov static int 27141ddec83SKonstantin Belousov vm_fault_soft_fast(struct faultstate *fs, vm_offset_t vaddr, vm_prot_t prot, 27241ddec83SKonstantin Belousov int fault_type, int fault_flags, boolean_t wired, vm_page_t *m_hold) 27341ddec83SKonstantin Belousov { 2748b5e1472SAlan Cox vm_page_t m, m_map; 2752bf8cb38SAlan Cox #if (defined(__aarch64__) || defined(__amd64__) || (defined(__arm__) && \ 276f6893f09SMark Johnston __ARM_ARCH >= 6) || defined(__i386__) || defined(__riscv)) && \ 277f6893f09SMark Johnston VM_NRESERVLEVEL > 0 2788b5e1472SAlan Cox vm_page_t m_super; 27990ea34bfSAlan Cox int flags; 2808b5e1472SAlan Cox #endif 28190ea34bfSAlan Cox int psind, rv; 28241ddec83SKonstantin Belousov 28341ddec83SKonstantin Belousov MPASS(fs->vp == NULL); 28441ddec83SKonstantin Belousov m = vm_page_lookup(fs->first_object, fs->first_pindex); 28541ddec83SKonstantin Belousov /* A busy page can be mapped for read|execute access. */ 28641ddec83SKonstantin Belousov if (m == NULL || ((prot & VM_PROT_WRITE) != 0 && 28741ddec83SKonstantin Belousov vm_page_busied(m)) || m->valid != VM_PAGE_BITS_ALL) 28841ddec83SKonstantin Belousov return (KERN_FAILURE); 2898b5e1472SAlan Cox m_map = m; 2908b5e1472SAlan Cox psind = 0; 2912bf8cb38SAlan Cox #if (defined(__aarch64__) || defined(__amd64__) || (defined(__arm__) && \ 292f6893f09SMark Johnston __ARM_ARCH >= 6) || defined(__i386__) || defined(__riscv)) && \ 293f6893f09SMark Johnston VM_NRESERVLEVEL > 0 2948b5e1472SAlan Cox if ((m->flags & PG_FICTITIOUS) == 0 && 2958b5e1472SAlan Cox (m_super = vm_reserv_to_superpage(m)) != NULL && 2968b5e1472SAlan Cox rounddown2(vaddr, pagesizes[m_super->psind]) >= fs->entry->start && 2978b5e1472SAlan Cox roundup2(vaddr + 1, pagesizes[m_super->psind]) <= fs->entry->end && 2988b5e1472SAlan Cox (vaddr & (pagesizes[m_super->psind] - 1)) == (VM_PAGE_TO_PHYS(m) & 29964087fd7SMark Johnston (pagesizes[m_super->psind] - 1)) && !wired && 3008b5e1472SAlan Cox pmap_ps_enabled(fs->map->pmap)) { 3018b5e1472SAlan Cox flags = PS_ALL_VALID; 3028b5e1472SAlan Cox if ((prot & VM_PROT_WRITE) != 0) { 3038b5e1472SAlan Cox /* 3048b5e1472SAlan Cox * Create a superpage mapping allowing write access 3058b5e1472SAlan Cox * only if none of the constituent pages are busy and 3068b5e1472SAlan Cox * all of them are already dirty (except possibly for 3078b5e1472SAlan Cox * the page that was faulted on). 3088b5e1472SAlan Cox */ 3098b5e1472SAlan Cox flags |= PS_NONE_BUSY; 3108b5e1472SAlan Cox if ((fs->first_object->flags & OBJ_UNMANAGED) == 0) 3118b5e1472SAlan Cox flags |= PS_ALL_DIRTY; 3128b5e1472SAlan Cox } 3138b5e1472SAlan Cox if (vm_page_ps_test(m_super, flags, m)) { 3148b5e1472SAlan Cox m_map = m_super; 3158b5e1472SAlan Cox psind = m_super->psind; 3168b5e1472SAlan Cox vaddr = rounddown2(vaddr, pagesizes[psind]); 3178b5e1472SAlan Cox /* Preset the modified bit for dirty superpages. */ 3188b5e1472SAlan Cox if ((flags & PS_ALL_DIRTY) != 0) 3198b5e1472SAlan Cox fault_type |= VM_PROT_WRITE; 3208b5e1472SAlan Cox } 3218b5e1472SAlan Cox } 3228b5e1472SAlan Cox #endif 3238b5e1472SAlan Cox rv = pmap_enter(fs->map->pmap, vaddr, m_map, prot, fault_type | 3248b5e1472SAlan Cox PMAP_ENTER_NOSLEEP | (wired ? PMAP_ENTER_WIRED : 0), psind); 32541ddec83SKonstantin Belousov if (rv != KERN_SUCCESS) 32641ddec83SKonstantin Belousov return (rv); 327fee2a2faSMark Johnston if (m_hold != NULL) { 328fee2a2faSMark Johnston *m_hold = m; 329fee2a2faSMark Johnston vm_page_wire(m); 330fee2a2faSMark Johnston } 33141ddec83SKonstantin Belousov vm_fault_dirty(fs->entry, m, prot, fault_type, fault_flags, false); 3328b5e1472SAlan Cox if (psind == 0 && !wired) 333a7163bb9SKonstantin Belousov vm_fault_prefault(fs, vaddr, PFBAK, PFFOR, true); 334a7163bb9SKonstantin Belousov VM_OBJECT_RUNLOCK(fs->first_object); 33541ddec83SKonstantin Belousov vm_map_lookup_done(fs->map, fs->entry); 33641ddec83SKonstantin Belousov curthread->td_ru.ru_minflt++; 33741ddec83SKonstantin Belousov return (KERN_SUCCESS); 33841ddec83SKonstantin Belousov } 33941ddec83SKonstantin Belousov 340c42b43a0SKonstantin Belousov static void 341c42b43a0SKonstantin Belousov vm_fault_restore_map_lock(struct faultstate *fs) 342c42b43a0SKonstantin Belousov { 343c42b43a0SKonstantin Belousov 344c42b43a0SKonstantin Belousov VM_OBJECT_ASSERT_WLOCKED(fs->first_object); 34511b57401SHans Petter Selasky MPASS(REFCOUNT_COUNT(fs->first_object->paging_in_progress) > 0); 346c42b43a0SKonstantin Belousov 347c42b43a0SKonstantin Belousov if (!vm_map_trylock_read(fs->map)) { 348c42b43a0SKonstantin Belousov VM_OBJECT_WUNLOCK(fs->first_object); 349c42b43a0SKonstantin Belousov vm_map_lock_read(fs->map); 350c42b43a0SKonstantin Belousov VM_OBJECT_WLOCK(fs->first_object); 351c42b43a0SKonstantin Belousov } 352c42b43a0SKonstantin Belousov fs->lookup_still_valid = true; 353c42b43a0SKonstantin Belousov } 354c42b43a0SKonstantin Belousov 3557a432b84SKonstantin Belousov static void 3567a432b84SKonstantin Belousov vm_fault_populate_check_page(vm_page_t m) 3577a432b84SKonstantin Belousov { 3587a432b84SKonstantin Belousov 3597a432b84SKonstantin Belousov /* 3607a432b84SKonstantin Belousov * Check each page to ensure that the pager is obeying the 3617a432b84SKonstantin Belousov * interface: the page must be installed in the object, fully 3627a432b84SKonstantin Belousov * valid, and exclusively busied. 3637a432b84SKonstantin Belousov */ 3647a432b84SKonstantin Belousov MPASS(m != NULL); 3657a432b84SKonstantin Belousov MPASS(m->valid == VM_PAGE_BITS_ALL); 3667a432b84SKonstantin Belousov MPASS(vm_page_xbusied(m)); 3677a432b84SKonstantin Belousov } 3687a432b84SKonstantin Belousov 3697a432b84SKonstantin Belousov static void 3707a432b84SKonstantin Belousov vm_fault_populate_cleanup(vm_object_t object, vm_pindex_t first, 3717a432b84SKonstantin Belousov vm_pindex_t last) 3727a432b84SKonstantin Belousov { 3737a432b84SKonstantin Belousov vm_page_t m; 3747a432b84SKonstantin Belousov vm_pindex_t pidx; 3757a432b84SKonstantin Belousov 3767a432b84SKonstantin Belousov VM_OBJECT_ASSERT_WLOCKED(object); 3777a432b84SKonstantin Belousov MPASS(first <= last); 3787a432b84SKonstantin Belousov for (pidx = first, m = vm_page_lookup(object, pidx); 3797a432b84SKonstantin Belousov pidx <= last; pidx++, m = vm_page_next(m)) { 3807a432b84SKonstantin Belousov vm_fault_populate_check_page(m); 381e8bcf696SMark Johnston vm_page_lock(m); 3827a432b84SKonstantin Belousov vm_page_deactivate(m); 383e8bcf696SMark Johnston vm_page_unlock(m); 3847a432b84SKonstantin Belousov vm_page_xunbusy(m); 3857a432b84SKonstantin Belousov } 3867a432b84SKonstantin Belousov } 387c42b43a0SKonstantin Belousov 388c42b43a0SKonstantin Belousov static int 389d3f8534eSAlan Cox vm_fault_populate(struct faultstate *fs, vm_prot_t prot, int fault_type, 390d3f8534eSAlan Cox int fault_flags, boolean_t wired, vm_page_t *m_hold) 391c42b43a0SKonstantin Belousov { 39270183daaSAlan Cox struct mtx *m_mtx; 39370183daaSAlan Cox vm_offset_t vaddr; 394c42b43a0SKonstantin Belousov vm_page_t m; 3957a432b84SKonstantin Belousov vm_pindex_t map_first, map_last, pager_first, pager_last, pidx; 39670183daaSAlan Cox int i, npages, psind, rv; 397c42b43a0SKonstantin Belousov 398c42b43a0SKonstantin Belousov MPASS(fs->object == fs->first_object); 399c42b43a0SKonstantin Belousov VM_OBJECT_ASSERT_WLOCKED(fs->first_object); 40011b57401SHans Petter Selasky MPASS(REFCOUNT_COUNT(fs->first_object->paging_in_progress) > 0); 401c42b43a0SKonstantin Belousov MPASS(fs->first_object->backing_object == NULL); 402c42b43a0SKonstantin Belousov MPASS(fs->lookup_still_valid); 403c42b43a0SKonstantin Belousov 4047a432b84SKonstantin Belousov pager_first = OFF_TO_IDX(fs->entry->offset); 40589564188SAlan Cox pager_last = pager_first + atop(fs->entry->end - fs->entry->start) - 1; 406c42b43a0SKonstantin Belousov unlock_map(fs); 407c42b43a0SKonstantin Belousov unlock_vp(fs); 408c42b43a0SKonstantin Belousov 409c42b43a0SKonstantin Belousov /* 410c42b43a0SKonstantin Belousov * Call the pager (driver) populate() method. 411c42b43a0SKonstantin Belousov * 412c42b43a0SKonstantin Belousov * There is no guarantee that the method will be called again 413c42b43a0SKonstantin Belousov * if the current fault is for read, and a future fault is 414c42b43a0SKonstantin Belousov * for write. Report the entry's maximum allowed protection 415c42b43a0SKonstantin Belousov * to the driver. 416c42b43a0SKonstantin Belousov */ 417c42b43a0SKonstantin Belousov rv = vm_pager_populate(fs->first_object, fs->first_pindex, 4187a432b84SKonstantin Belousov fault_type, fs->entry->max_protection, &pager_first, &pager_last); 419c42b43a0SKonstantin Belousov 420c42b43a0SKonstantin Belousov VM_OBJECT_ASSERT_WLOCKED(fs->first_object); 421c42b43a0SKonstantin Belousov if (rv == VM_PAGER_BAD) { 422c42b43a0SKonstantin Belousov /* 423c42b43a0SKonstantin Belousov * VM_PAGER_BAD is the backdoor for a pager to request 424c42b43a0SKonstantin Belousov * normal fault handling. 425c42b43a0SKonstantin Belousov */ 426c42b43a0SKonstantin Belousov vm_fault_restore_map_lock(fs); 427c42b43a0SKonstantin Belousov if (fs->map->timestamp != fs->map_generation) 428c42b43a0SKonstantin Belousov return (KERN_RESOURCE_SHORTAGE); /* RetryFault */ 429c42b43a0SKonstantin Belousov return (KERN_NOT_RECEIVER); 430c42b43a0SKonstantin Belousov } 431c42b43a0SKonstantin Belousov if (rv != VM_PAGER_OK) 432c42b43a0SKonstantin Belousov return (KERN_FAILURE); /* AKA SIGSEGV */ 433c42b43a0SKonstantin Belousov 434c42b43a0SKonstantin Belousov /* Ensure that the driver is obeying the interface. */ 4357a432b84SKonstantin Belousov MPASS(pager_first <= pager_last); 4367a432b84SKonstantin Belousov MPASS(fs->first_pindex <= pager_last); 4377a432b84SKonstantin Belousov MPASS(fs->first_pindex >= pager_first); 4387a432b84SKonstantin Belousov MPASS(pager_last < fs->first_object->size); 439c42b43a0SKonstantin Belousov 440c42b43a0SKonstantin Belousov vm_fault_restore_map_lock(fs); 4417a432b84SKonstantin Belousov if (fs->map->timestamp != fs->map_generation) { 4427a432b84SKonstantin Belousov vm_fault_populate_cleanup(fs->first_object, pager_first, 4437a432b84SKonstantin Belousov pager_last); 444c42b43a0SKonstantin Belousov return (KERN_RESOURCE_SHORTAGE); /* RetryFault */ 4457a432b84SKonstantin Belousov } 446c42b43a0SKonstantin Belousov 447c42b43a0SKonstantin Belousov /* 4487a432b84SKonstantin Belousov * The map is unchanged after our last unlock. Process the fault. 4497a432b84SKonstantin Belousov * 4507a432b84SKonstantin Belousov * The range [pager_first, pager_last] that is given to the 4517a432b84SKonstantin Belousov * pager is only a hint. The pager may populate any range 4527a432b84SKonstantin Belousov * within the object that includes the requested page index. 4537a432b84SKonstantin Belousov * In case the pager expanded the range, clip it to fit into 4547a432b84SKonstantin Belousov * the map entry. 455c42b43a0SKonstantin Belousov */ 45689564188SAlan Cox map_first = OFF_TO_IDX(fs->entry->offset); 45789564188SAlan Cox if (map_first > pager_first) { 4587a432b84SKonstantin Belousov vm_fault_populate_cleanup(fs->first_object, pager_first, 4597a432b84SKonstantin Belousov map_first - 1); 46089564188SAlan Cox pager_first = map_first; 46189564188SAlan Cox } 46289564188SAlan Cox map_last = map_first + atop(fs->entry->end - fs->entry->start) - 1; 46389564188SAlan Cox if (map_last < pager_last) { 4647a432b84SKonstantin Belousov vm_fault_populate_cleanup(fs->first_object, map_last + 1, 4657a432b84SKonstantin Belousov pager_last); 46689564188SAlan Cox pager_last = map_last; 46789564188SAlan Cox } 46889564188SAlan Cox for (pidx = pager_first, m = vm_page_lookup(fs->first_object, pidx); 46970183daaSAlan Cox pidx <= pager_last; 47070183daaSAlan Cox pidx += npages, m = vm_page_next(&m[npages - 1])) { 47170183daaSAlan Cox vaddr = fs->entry->start + IDX_TO_OFF(pidx) - fs->entry->offset; 4722bf8cb38SAlan Cox #if defined(__aarch64__) || defined(__amd64__) || (defined(__arm__) && \ 473f6893f09SMark Johnston __ARM_ARCH >= 6) || defined(__i386__) || defined(__riscv) 47470183daaSAlan Cox psind = m->psind; 47570183daaSAlan Cox if (psind > 0 && ((vaddr & (pagesizes[psind] - 1)) != 0 || 47670183daaSAlan Cox pidx + OFF_TO_IDX(pagesizes[psind]) - 1 > pager_last || 47764087fd7SMark Johnston !pmap_ps_enabled(fs->map->pmap) || wired)) 47870183daaSAlan Cox psind = 0; 47970183daaSAlan Cox #else 48070183daaSAlan Cox psind = 0; 48170183daaSAlan Cox #endif 48270183daaSAlan Cox npages = atop(pagesizes[psind]); 48370183daaSAlan Cox for (i = 0; i < npages; i++) { 48470183daaSAlan Cox vm_fault_populate_check_page(&m[i]); 48570183daaSAlan Cox vm_fault_dirty(fs->entry, &m[i], prot, fault_type, 48670183daaSAlan Cox fault_flags, true); 48770183daaSAlan Cox } 488c42b43a0SKonstantin Belousov VM_OBJECT_WUNLOCK(fs->first_object); 489e7a9df16SKonstantin Belousov rv = pmap_enter(fs->map->pmap, vaddr, m, prot, fault_type | 490e7a9df16SKonstantin Belousov (wired ? PMAP_ENTER_WIRED : 0), psind); 491e7a9df16SKonstantin Belousov #if defined(__amd64__) 492e7a9df16SKonstantin Belousov if (psind > 0 && rv == KERN_FAILURE) { 493e7a9df16SKonstantin Belousov for (i = 0; i < npages; i++) { 494e7a9df16SKonstantin Belousov rv = pmap_enter(fs->map->pmap, vaddr + ptoa(i), 495e7a9df16SKonstantin Belousov &m[i], prot, fault_type | 496e7a9df16SKonstantin Belousov (wired ? PMAP_ENTER_WIRED : 0), 0); 497e7a9df16SKonstantin Belousov MPASS(rv == KERN_SUCCESS); 498e7a9df16SKonstantin Belousov } 499e7a9df16SKonstantin Belousov } 500e7a9df16SKonstantin Belousov #else 501e7a9df16SKonstantin Belousov MPASS(rv == KERN_SUCCESS); 502e7a9df16SKonstantin Belousov #endif 503c42b43a0SKonstantin Belousov VM_OBJECT_WLOCK(fs->first_object); 50470183daaSAlan Cox m_mtx = NULL; 50570183daaSAlan Cox for (i = 0; i < npages; i++) { 506fee2a2faSMark Johnston if ((fault_flags & VM_FAULT_WIRE) != 0) { 50770183daaSAlan Cox vm_page_wire(&m[i]); 508fee2a2faSMark Johnston } else { 509fee2a2faSMark Johnston vm_page_change_lock(&m[i], &m_mtx); 51070183daaSAlan Cox vm_page_activate(&m[i]); 511fee2a2faSMark Johnston } 51270183daaSAlan Cox if (m_hold != NULL && m[i].pindex == fs->first_pindex) { 51370183daaSAlan Cox *m_hold = &m[i]; 514eeacb3b0SMark Johnston vm_page_wire(&m[i]); 515c42b43a0SKonstantin Belousov } 5164cdea4a8SJeff Roberson vm_page_xunbusy(&m[i]); 51770183daaSAlan Cox } 51870183daaSAlan Cox if (m_mtx != NULL) 51970183daaSAlan Cox mtx_unlock(m_mtx); 520c42b43a0SKonstantin Belousov } 521c42b43a0SKonstantin Belousov curthread->td_ru.ru_majflt++; 522c42b43a0SKonstantin Belousov return (KERN_SUCCESS); 523c42b43a0SKonstantin Belousov } 524c42b43a0SKonstantin Belousov 525df08823dSKonstantin Belousov static int prot_fault_translation; 526df08823dSKonstantin Belousov SYSCTL_INT(_machdep, OID_AUTO, prot_fault_translation, CTLFLAG_RWTUN, 527df08823dSKonstantin Belousov &prot_fault_translation, 0, 528df08823dSKonstantin Belousov "Control signal to deliver on protection fault"); 529df08823dSKonstantin Belousov 530df08823dSKonstantin Belousov /* compat definition to keep common code for signal translation */ 531df08823dSKonstantin Belousov #define UCODE_PAGEFLT 12 532df08823dSKonstantin Belousov #ifdef T_PAGEFLT 533df08823dSKonstantin Belousov _Static_assert(UCODE_PAGEFLT == T_PAGEFLT, "T_PAGEFLT"); 534df08823dSKonstantin Belousov #endif 535df08823dSKonstantin Belousov 536df8bae1dSRodney W. Grimes /* 537df08823dSKonstantin Belousov * vm_fault_trap: 538df8bae1dSRodney W. Grimes * 539956f3135SPhilippe Charnier * Handle a page fault occurring at the given address, 540df8bae1dSRodney W. Grimes * requiring the given permissions, in the map specified. 541df8bae1dSRodney W. Grimes * If successful, the page is inserted into the 542df8bae1dSRodney W. Grimes * associated physical map. 543df8bae1dSRodney W. Grimes * 544df8bae1dSRodney W. Grimes * NOTE: the given address should be truncated to the 545df8bae1dSRodney W. Grimes * proper page address. 546df8bae1dSRodney W. Grimes * 547df8bae1dSRodney W. Grimes * KERN_SUCCESS is returned if the page fault is handled; otherwise, 548df8bae1dSRodney W. Grimes * a standard error specifying why the fault is fatal is returned. 549df8bae1dSRodney W. Grimes * 550df8bae1dSRodney W. Grimes * The map in question must be referenced, and remains so. 5510cddd8f0SMatthew Dillon * Caller may hold no locks. 552df8bae1dSRodney W. Grimes */ 553df8bae1dSRodney W. Grimes int 554df08823dSKonstantin Belousov vm_fault_trap(vm_map_t map, vm_offset_t vaddr, vm_prot_t fault_type, 555df08823dSKonstantin Belousov int fault_flags, int *signo, int *ucode) 55623955314SAlfred Perlstein { 55735818d2eSJohn Baldwin int result; 558acd11c74SAlan Cox 559df08823dSKonstantin Belousov MPASS(signo == NULL || ucode != NULL); 56035818d2eSJohn Baldwin #ifdef KTRACE 561c31cec45SKonstantin Belousov if (map != kernel_map && KTRPOINT(curthread, KTR_FAULT)) 56235818d2eSJohn Baldwin ktrfault(vaddr, fault_type); 56335818d2eSJohn Baldwin #endif 564df08823dSKonstantin Belousov result = vm_fault(map, trunc_page(vaddr), fault_type, fault_flags, 565be996836SAttilio Rao NULL); 566df08823dSKonstantin Belousov KASSERT(result == KERN_SUCCESS || result == KERN_FAILURE || 567df08823dSKonstantin Belousov result == KERN_INVALID_ADDRESS || 568df08823dSKonstantin Belousov result == KERN_RESOURCE_SHORTAGE || 569df08823dSKonstantin Belousov result == KERN_PROTECTION_FAILURE || 570df08823dSKonstantin Belousov result == KERN_OUT_OF_BOUNDS, 571df08823dSKonstantin Belousov ("Unexpected Mach error %d from vm_fault()", result)); 57235818d2eSJohn Baldwin #ifdef KTRACE 573c31cec45SKonstantin Belousov if (map != kernel_map && KTRPOINT(curthread, KTR_FAULTEND)) 57435818d2eSJohn Baldwin ktrfaultend(result); 57535818d2eSJohn Baldwin #endif 576df08823dSKonstantin Belousov if (result != KERN_SUCCESS && signo != NULL) { 577df08823dSKonstantin Belousov switch (result) { 578df08823dSKonstantin Belousov case KERN_FAILURE: 579df08823dSKonstantin Belousov case KERN_INVALID_ADDRESS: 580df08823dSKonstantin Belousov *signo = SIGSEGV; 581df08823dSKonstantin Belousov *ucode = SEGV_MAPERR; 582df08823dSKonstantin Belousov break; 583df08823dSKonstantin Belousov case KERN_RESOURCE_SHORTAGE: 584df08823dSKonstantin Belousov *signo = SIGBUS; 585df08823dSKonstantin Belousov *ucode = BUS_OOMERR; 586df08823dSKonstantin Belousov break; 587df08823dSKonstantin Belousov case KERN_OUT_OF_BOUNDS: 588df08823dSKonstantin Belousov *signo = SIGBUS; 589df08823dSKonstantin Belousov *ucode = BUS_OBJERR; 590df08823dSKonstantin Belousov break; 591df08823dSKonstantin Belousov case KERN_PROTECTION_FAILURE: 592df08823dSKonstantin Belousov if (prot_fault_translation == 0) { 593df08823dSKonstantin Belousov /* 594df08823dSKonstantin Belousov * Autodetect. This check also covers 595df08823dSKonstantin Belousov * the images without the ABI-tag ELF 596df08823dSKonstantin Belousov * note. 597df08823dSKonstantin Belousov */ 598df08823dSKonstantin Belousov if (SV_CURPROC_ABI() == SV_ABI_FREEBSD && 599df08823dSKonstantin Belousov curproc->p_osrel >= P_OSREL_SIGSEGV) { 600df08823dSKonstantin Belousov *signo = SIGSEGV; 601df08823dSKonstantin Belousov *ucode = SEGV_ACCERR; 602df08823dSKonstantin Belousov } else { 603df08823dSKonstantin Belousov *signo = SIGBUS; 604df08823dSKonstantin Belousov *ucode = UCODE_PAGEFLT; 605df08823dSKonstantin Belousov } 606df08823dSKonstantin Belousov } else if (prot_fault_translation == 1) { 607df08823dSKonstantin Belousov /* Always compat mode. */ 608df08823dSKonstantin Belousov *signo = SIGBUS; 609df08823dSKonstantin Belousov *ucode = UCODE_PAGEFLT; 610df08823dSKonstantin Belousov } else { 611df08823dSKonstantin Belousov /* Always SIGSEGV mode. */ 612df08823dSKonstantin Belousov *signo = SIGSEGV; 613df08823dSKonstantin Belousov *ucode = SEGV_ACCERR; 614df08823dSKonstantin Belousov } 615df08823dSKonstantin Belousov break; 616df08823dSKonstantin Belousov default: 617df08823dSKonstantin Belousov KASSERT(0, ("Unexpected Mach error %d from vm_fault()", 618df08823dSKonstantin Belousov result)); 619df08823dSKonstantin Belousov break; 620df08823dSKonstantin Belousov } 621df08823dSKonstantin Belousov } 62235818d2eSJohn Baldwin return (result); 623acd11c74SAlan Cox } 624acd11c74SAlan Cox 625acd11c74SAlan Cox int 626df08823dSKonstantin Belousov vm_fault(vm_map_t map, vm_offset_t vaddr, vm_prot_t fault_type, 627acd11c74SAlan Cox int fault_flags, vm_page_t *m_hold) 628acd11c74SAlan Cox { 6294866e085SJohn Dyson struct faultstate fs; 630d2bf64c3SKonstantin Belousov struct vnode *vp; 63123984ce5SMark Johnston struct domainset *dset; 632ebcddc72SAlan Cox vm_object_t next_object, retry_object; 6330c3a4893SAlan Cox vm_offset_t e_end, e_start; 634ebcddc72SAlan Cox vm_pindex_t retry_pindex; 635ebcddc72SAlan Cox vm_prot_t prot, retry_prot; 636f994b207SAlan Cox int ahead, alloc_req, behind, cluster_offset, error, era, faultcount; 637245139c6SKonstantin Belousov int locked, nera, oom, result, rv; 6380c3a4893SAlan Cox u_char behavior; 639cd8a6fe8SAlan Cox boolean_t wired; /* Passed by reference. */ 64019bd0d9cSKonstantin Belousov bool dead, hardfault, is_first_object_locked; 641df8bae1dSRodney W. Grimes 64283c9dea1SGleb Smirnoff VM_CNT_INC(v_vm_faults); 643c31cec45SKonstantin Belousov 644c31cec45SKonstantin Belousov if ((curthread->td_pflags & TDP_NOFAULTING) != 0) 645c31cec45SKonstantin Belousov return (KERN_PROTECTION_FAILURE); 646c31cec45SKonstantin Belousov 647d2bf64c3SKonstantin Belousov fs.vp = NULL; 648b0cd2017SGleb Smirnoff faultcount = 0; 6490c3a4893SAlan Cox nera = -1; 650320023e2SAlan Cox hardfault = false; 651df8bae1dSRodney W. Grimes 652245139c6SKonstantin Belousov RetryFault: 653245139c6SKonstantin Belousov oom = 0; 654245139c6SKonstantin Belousov RetryFault_oom: 655df8bae1dSRodney W. Grimes 656df8bae1dSRodney W. Grimes /* 6570d94caffSDavid Greenman * Find the backing store object and offset into it to begin the 6580d94caffSDavid Greenman * search. 659df8bae1dSRodney W. Grimes */ 66040360b1bSMatthew Dillon fs.map = map; 66119bd0d9cSKonstantin Belousov result = vm_map_lookup(&fs.map, vaddr, fault_type | 66219bd0d9cSKonstantin Belousov VM_PROT_FAULT_LOOKUP, &fs.entry, &fs.first_object, 66319bd0d9cSKonstantin Belousov &fs.first_pindex, &prot, &wired); 66492de35b0SAlan Cox if (result != KERN_SUCCESS) { 665a9ee028dSMark Johnston unlock_vp(&fs); 66692de35b0SAlan Cox return (result); 66709e0c6ccSJohn Dyson } 66809e0c6ccSJohn Dyson 669dc5401d2SKonstantin Belousov fs.map_generation = fs.map->timestamp; 6702d8acc0fSJohn Dyson 6714866e085SJohn Dyson if (fs.entry->eflags & MAP_ENTRY_NOFAULT) { 67205d58177SBjoern A. Zeeb panic("%s: fault on nofault entry, addr: %#lx", 67305d58177SBjoern A. Zeeb __func__, (u_long)vaddr); 6747aaaa4fdSJohn Dyson } 6757aaaa4fdSJohn Dyson 6764f9c9114SKonstantin Belousov if (fs.entry->eflags & MAP_ENTRY_IN_TRANSITION && 6774f9c9114SKonstantin Belousov fs.entry->wiring_thread != curthread) { 6784f9c9114SKonstantin Belousov vm_map_unlock_read(fs.map); 6794f9c9114SKonstantin Belousov vm_map_lock(fs.map); 6804f9c9114SKonstantin Belousov if (vm_map_lookup_entry(fs.map, vaddr, &fs.entry) && 6814f9c9114SKonstantin Belousov (fs.entry->eflags & MAP_ENTRY_IN_TRANSITION)) { 682cfabea3dSKonstantin Belousov unlock_vp(&fs); 6834f9c9114SKonstantin Belousov fs.entry->eflags |= MAP_ENTRY_NEEDS_WAKEUP; 6844f9c9114SKonstantin Belousov vm_map_unlock_and_wait(fs.map, 0); 6854f9c9114SKonstantin Belousov } else 6864f9c9114SKonstantin Belousov vm_map_unlock(fs.map); 6874f9c9114SKonstantin Belousov goto RetryFault; 6884f9c9114SKonstantin Belousov } 6894f9c9114SKonstantin Belousov 69019bd0d9cSKonstantin Belousov MPASS((fs.entry->eflags & MAP_ENTRY_GUARD) == 0); 69119bd0d9cSKonstantin Belousov 692afe55ca3SKonstantin Belousov if (wired) 693afe55ca3SKonstantin Belousov fault_type = prot | (fault_type & VM_PROT_COPY); 6946a875bf9SKonstantin Belousov else 6956a875bf9SKonstantin Belousov KASSERT((fault_flags & VM_FAULT_WIRE) == 0, 6966a875bf9SKonstantin Belousov ("!wired && VM_FAULT_WIRE")); 697afe55ca3SKonstantin Belousov 6988d67b8c8SAlan Cox /* 6998d67b8c8SAlan Cox * Try to avoid lock contention on the top-level object through 7008d67b8c8SAlan Cox * special-case handling of some types of page faults, specifically, 7018d67b8c8SAlan Cox * those that are both (1) mapping an existing page from the top- 7028d67b8c8SAlan Cox * level object and (2) not having to mark that object as containing 7038d67b8c8SAlan Cox * dirty pages. Under these conditions, a read lock on the top-level 7048d67b8c8SAlan Cox * object suffices, allowing multiple page faults of a similar type to 7058d67b8c8SAlan Cox * run in parallel on the same top-level object. 7068d67b8c8SAlan Cox */ 707afe55ca3SKonstantin Belousov if (fs.vp == NULL /* avoid locked vnode leak */ && 7086a875bf9SKonstantin Belousov (fault_flags & (VM_FAULT_WIRE | VM_FAULT_DIRTY)) == 0 && 709afe55ca3SKonstantin Belousov /* avoid calling vm_object_set_writeable_dirty() */ 710afe55ca3SKonstantin Belousov ((prot & VM_PROT_WRITE) == 0 || 711f40cb1c6SKonstantin Belousov (fs.first_object->type != OBJT_VNODE && 712f40cb1c6SKonstantin Belousov (fs.first_object->flags & OBJ_TMPFS_NODE) == 0) || 713afe55ca3SKonstantin Belousov (fs.first_object->flags & OBJ_MIGHTBEDIRTY) != 0)) { 714afe55ca3SKonstantin Belousov VM_OBJECT_RLOCK(fs.first_object); 71541ddec83SKonstantin Belousov if ((prot & VM_PROT_WRITE) == 0 || 71641ddec83SKonstantin Belousov (fs.first_object->type != OBJT_VNODE && 71741ddec83SKonstantin Belousov (fs.first_object->flags & OBJ_TMPFS_NODE) == 0) || 71841ddec83SKonstantin Belousov (fs.first_object->flags & OBJ_MIGHTBEDIRTY) != 0) { 71941ddec83SKonstantin Belousov rv = vm_fault_soft_fast(&fs, vaddr, prot, fault_type, 72041ddec83SKonstantin Belousov fault_flags, wired, m_hold); 72141ddec83SKonstantin Belousov if (rv == KERN_SUCCESS) 72241ddec83SKonstantin Belousov return (rv); 723afe55ca3SKonstantin Belousov } 724afe55ca3SKonstantin Belousov if (!VM_OBJECT_TRYUPGRADE(fs.first_object)) { 725afe55ca3SKonstantin Belousov VM_OBJECT_RUNLOCK(fs.first_object); 726afe55ca3SKonstantin Belousov VM_OBJECT_WLOCK(fs.first_object); 727afe55ca3SKonstantin Belousov } 728afe55ca3SKonstantin Belousov } else { 729afe55ca3SKonstantin Belousov VM_OBJECT_WLOCK(fs.first_object); 730afe55ca3SKonstantin Belousov } 731afe55ca3SKonstantin Belousov 73295e5e988SJohn Dyson /* 73395e5e988SJohn Dyson * Make a reference to this object to prevent its disposal while we 73495e5e988SJohn Dyson * are messing with it. Once we have the reference, the map is free 73595e5e988SJohn Dyson * to be diddled. Since objects reference their shadows (and copies), 73695e5e988SJohn Dyson * they will stay around as well. 737fe8e0238SMatthew Dillon * 738fe8e0238SMatthew Dillon * Bump the paging-in-progress count to prevent size changes (e.g. 739dda4d369SAlan Cox * truncation operations) during I/O. 74095e5e988SJohn Dyson */ 741a976eb5eSAlan Cox vm_object_reference_locked(fs.first_object); 742d474eaaaSDoug Rabson vm_object_pip_add(fs.first_object, 1); 74395e5e988SJohn Dyson 744cd8a6fe8SAlan Cox fs.lookup_still_valid = true; 745df8bae1dSRodney W. Grimes 7464866e085SJohn Dyson fs.first_m = NULL; 747df8bae1dSRodney W. Grimes 748df8bae1dSRodney W. Grimes /* 749df8bae1dSRodney W. Grimes * Search for the page at object/offset. 750df8bae1dSRodney W. Grimes */ 7514866e085SJohn Dyson fs.object = fs.first_object; 7524866e085SJohn Dyson fs.pindex = fs.first_pindex; 753df8bae1dSRodney W. Grimes while (TRUE) { 7541c7c3c6aSMatthew Dillon /* 755725441f6SKonstantin Belousov * If the object is marked for imminent termination, 756725441f6SKonstantin Belousov * we retry here, since the collapse pass has raced 757725441f6SKonstantin Belousov * with us. Otherwise, if we see terminally dead 758725441f6SKonstantin Belousov * object, return fail. 7591c7c3c6aSMatthew Dillon */ 760725441f6SKonstantin Belousov if ((fs.object->flags & OBJ_DEAD) != 0) { 761725441f6SKonstantin Belousov dead = fs.object->type == OBJT_DEAD; 7624866e085SJohn Dyson unlock_and_deallocate(&fs); 763725441f6SKonstantin Belousov if (dead) 76447221757SJohn Dyson return (KERN_PROTECTION_FAILURE); 765725441f6SKonstantin Belousov pause("vmf_de", 1); 766725441f6SKonstantin Belousov goto RetryFault; 76747221757SJohn Dyson } 76847221757SJohn Dyson 7691c7c3c6aSMatthew Dillon /* 7701c7c3c6aSMatthew Dillon * See if page is resident 7711c7c3c6aSMatthew Dillon */ 7724866e085SJohn Dyson fs.m = vm_page_lookup(fs.object, fs.pindex); 7734866e085SJohn Dyson if (fs.m != NULL) { 77498cb733cSKenneth D. Merry /* 7751c7c3c6aSMatthew Dillon * Wait/Retry if the page is busy. We have to do this 776c7aebda8SAttilio Rao * if the page is either exclusive or shared busy 777c7aebda8SAttilio Rao * because the vm_pager may be using read busy for 778c7aebda8SAttilio Rao * pageouts (and even pageins if it is the vnode 779c7aebda8SAttilio Rao * pager), and we could end up trying to pagein and 780c7aebda8SAttilio Rao * pageout the same page simultaneously. 7811c7c3c6aSMatthew Dillon * 7821c7c3c6aSMatthew Dillon * We can theoretically allow the busy case on a read 7831c7c3c6aSMatthew Dillon * fault if the page is marked valid, but since such 7841c7c3c6aSMatthew Dillon * pages are typically already pmap'd, putting that 7851c7c3c6aSMatthew Dillon * special case in might be more effort then it is 7861c7c3c6aSMatthew Dillon * worth. We cannot under any circumstances mess 787c7aebda8SAttilio Rao * around with a shared busied page except, perhaps, 7881c7c3c6aSMatthew Dillon * to pmap it. 789df8bae1dSRodney W. Grimes */ 790*63e97555SJeff Roberson if (vm_page_tryxbusy(fs.m) == 0) { 791b88b6c9dSAlan Cox /* 792b88b6c9dSAlan Cox * Reference the page before unlocking and 793b88b6c9dSAlan Cox * sleeping so that the page daemon is less 794b88b6c9dSAlan Cox * likely to reclaim it. 795b88b6c9dSAlan Cox */ 7963407fefeSKonstantin Belousov vm_page_aflag_set(fs.m, PGA_REFERENCED); 797a51b0840SAlan Cox if (fs.object != fs.first_object) { 79889f6b863SAttilio Rao if (!VM_OBJECT_TRYWLOCK( 799a6e38685SKonstantin Belousov fs.first_object)) { 80089f6b863SAttilio Rao VM_OBJECT_WUNLOCK(fs.object); 80189f6b863SAttilio Rao VM_OBJECT_WLOCK(fs.first_object); 80289f6b863SAttilio Rao VM_OBJECT_WLOCK(fs.object); 803a6e38685SKonstantin Belousov } 804a51b0840SAlan Cox vm_page_free(fs.first_m); 805a51b0840SAlan Cox vm_object_pip_wakeup(fs.first_object); 80689f6b863SAttilio Rao VM_OBJECT_WUNLOCK(fs.first_object); 807a51b0840SAlan Cox fs.first_m = NULL; 808a51b0840SAlan Cox } 809a51b0840SAlan Cox unlock_map(&fs); 810a51b0840SAlan Cox if (fs.m == vm_page_lookup(fs.object, 811a51b0840SAlan Cox fs.pindex)) { 812c7aebda8SAttilio Rao vm_page_sleep_if_busy(fs.m, "vmpfw"); 813a51b0840SAlan Cox } 814a51b0840SAlan Cox vm_object_pip_wakeup(fs.object); 81589f6b863SAttilio Rao VM_OBJECT_WUNLOCK(fs.object); 81683c9dea1SGleb Smirnoff VM_CNT_INC(v_intrans); 8174866e085SJohn Dyson vm_object_deallocate(fs.first_object); 818df8bae1dSRodney W. Grimes goto RetryFault; 819df8bae1dSRodney W. Grimes } 8207615edaaSMatthew Dillon 8211c7c3c6aSMatthew Dillon /* 822*63e97555SJeff Roberson * The page is marked busy for other processes and the 8231c7c3c6aSMatthew Dillon * pagedaemon. If it still isn't completely valid 8241c7c3c6aSMatthew Dillon * (readable), jump to readrest, else break-out ( we 8251c7c3c6aSMatthew Dillon * found the page ). 8261c7c3c6aSMatthew Dillon */ 827ff5958e7SAlan Cox if (fs.m->valid != VM_PAGE_BITS_ALL) 8280d94caffSDavid Greenman goto readrest; 829c6a70eaeSMark Johnston break; /* break to PAGE HAS BEEN FOUND */ 830df8bae1dSRodney W. Grimes } 83110b4196bSAlan Cox KASSERT(fs.m == NULL, ("fs.m should be NULL, not %p", fs.m)); 8321c7c3c6aSMatthew Dillon 8331c7c3c6aSMatthew Dillon /* 83410b4196bSAlan Cox * Page is not resident. If the pager might contain the page 83510b4196bSAlan Cox * or this is the beginning of the search, allocate a new 83610b4196bSAlan Cox * page. (Default objects are zero-fill, so there is no real 83710b4196bSAlan Cox * pager for them.) 8381c7c3c6aSMatthew Dillon */ 8396a875bf9SKonstantin Belousov if (fs.object->type != OBJT_DEFAULT || 8406a875bf9SKonstantin Belousov fs.object == fs.first_object) { 8414866e085SJohn Dyson if (fs.pindex >= fs.object->size) { 8424866e085SJohn Dyson unlock_and_deallocate(&fs); 843df08823dSKonstantin Belousov return (KERN_OUT_OF_BOUNDS); 8445f55e841SDavid Greenman } 84522ba64e8SJohn Dyson 846c42b43a0SKonstantin Belousov if (fs.object == fs.first_object && 847c42b43a0SKonstantin Belousov (fs.first_object->flags & OBJ_POPULATE) != 0 && 848c42b43a0SKonstantin Belousov fs.first_object->shadow_count == 0) { 849d3f8534eSAlan Cox rv = vm_fault_populate(&fs, prot, fault_type, 850d3f8534eSAlan Cox fault_flags, wired, m_hold); 851c42b43a0SKonstantin Belousov switch (rv) { 852c42b43a0SKonstantin Belousov case KERN_SUCCESS: 853c42b43a0SKonstantin Belousov case KERN_FAILURE: 854c42b43a0SKonstantin Belousov unlock_and_deallocate(&fs); 855c42b43a0SKonstantin Belousov return (rv); 856c42b43a0SKonstantin Belousov case KERN_RESOURCE_SHORTAGE: 857c42b43a0SKonstantin Belousov unlock_and_deallocate(&fs); 858c42b43a0SKonstantin Belousov goto RetryFault; 859c42b43a0SKonstantin Belousov case KERN_NOT_RECEIVER: 860c42b43a0SKonstantin Belousov /* 861c42b43a0SKonstantin Belousov * Pager's populate() method 862c42b43a0SKonstantin Belousov * returned VM_PAGER_BAD. 863c42b43a0SKonstantin Belousov */ 864c42b43a0SKonstantin Belousov break; 865c42b43a0SKonstantin Belousov default: 866c42b43a0SKonstantin Belousov panic("inconsistent return codes"); 867c42b43a0SKonstantin Belousov } 868c42b43a0SKonstantin Belousov } 869c42b43a0SKonstantin Belousov 870df8bae1dSRodney W. Grimes /* 8710d94caffSDavid Greenman * Allocate a new page for this object/offset pair. 8723f1c4c4fSKonstantin Belousov * 8733f1c4c4fSKonstantin Belousov * Unlocked read of the p_flag is harmless. At 8743f1c4c4fSKonstantin Belousov * worst, the P_KILLED might be not observed 8753f1c4c4fSKonstantin Belousov * there, and allocation can fail, causing 8763f1c4c4fSKonstantin Belousov * restart and new reading of the p_flag. 877df8bae1dSRodney W. Grimes */ 87823984ce5SMark Johnston dset = fs.object->domain.dr_policy; 87923984ce5SMark Johnston if (dset == NULL) 88023984ce5SMark Johnston dset = curthread->td_domain.dr_policy; 88123984ce5SMark Johnston if (!vm_page_count_severe_set(&dset->ds_mask) || 88223984ce5SMark Johnston P_KILLED(curproc)) { 883f8a47341SAlan Cox #if VM_NRESERVLEVEL > 0 8843d653db0SAlan Cox vm_object_color(fs.object, atop(vaddr) - 8853d653db0SAlan Cox fs.pindex); 886f8a47341SAlan Cox #endif 8873f1c4c4fSKonstantin Belousov alloc_req = P_KILLED(curproc) ? 8883f1c4c4fSKonstantin Belousov VM_ALLOC_SYSTEM : VM_ALLOC_NORMAL; 8893f1c4c4fSKonstantin Belousov if (fs.object->type != OBJT_VNODE && 8903f1c4c4fSKonstantin Belousov fs.object->backing_object == NULL) 8913f1c4c4fSKonstantin Belousov alloc_req |= VM_ALLOC_ZERO; 8924866e085SJohn Dyson fs.m = vm_page_alloc(fs.object, fs.pindex, 8933f1c4c4fSKonstantin Belousov alloc_req); 89440360b1bSMatthew Dillon } 8954866e085SJohn Dyson if (fs.m == NULL) { 8964866e085SJohn Dyson unlock_and_deallocate(&fs); 897245139c6SKonstantin Belousov if (vm_pfault_oom_attempts < 0 || 898245139c6SKonstantin Belousov oom < vm_pfault_oom_attempts) { 899245139c6SKonstantin Belousov oom++; 900245139c6SKonstantin Belousov vm_waitpfault(dset, 901245139c6SKonstantin Belousov vm_pfault_oom_wait * hz); 902245139c6SKonstantin Belousov goto RetryFault_oom; 903245139c6SKonstantin Belousov } 904245139c6SKonstantin Belousov if (bootverbose) 905245139c6SKonstantin Belousov printf( 906245139c6SKonstantin Belousov "proc %d (%s) failed to alloc page on fault, starting OOM\n", 907245139c6SKonstantin Belousov curproc->p_pid, curproc->p_comm); 908245139c6SKonstantin Belousov vm_pageout_oom(VM_OOM_MEM_PF); 909df8bae1dSRodney W. Grimes goto RetryFault; 9107667839aSAlan Cox } 911df8bae1dSRodney W. Grimes } 91247221757SJohn Dyson 9130d94caffSDavid Greenman readrest: 9141c7c3c6aSMatthew Dillon /* 91585702505SAlan Cox * At this point, we have either allocated a new page or found 91685702505SAlan Cox * an existing page that is only partially valid. 91785702505SAlan Cox * 91885702505SAlan Cox * We hold a reference on the current object and the page is 91985702505SAlan Cox * exclusive busied. 92085702505SAlan Cox */ 92185702505SAlan Cox 92285702505SAlan Cox /* 9230c3a4893SAlan Cox * If the pager for the current object might have the page, 9240c3a4893SAlan Cox * then determine the number of additional pages to read and 9250c3a4893SAlan Cox * potentially reprioritize previously read pages for earlier 9260c3a4893SAlan Cox * reclamation. These operations should only be performed 9270c3a4893SAlan Cox * once per page fault. Even if the current pager doesn't 9280c3a4893SAlan Cox * have the page, the number of additional pages to read will 9290c3a4893SAlan Cox * apply to subsequent objects in the shadow chain. 9301c7c3c6aSMatthew Dillon */ 9310c3a4893SAlan Cox if (fs.object->type != OBJT_DEFAULT && nera == -1 && 9320c3a4893SAlan Cox !P_KILLED(curproc)) { 9330c3a4893SAlan Cox KASSERT(fs.lookup_still_valid, ("map unlocked")); 9345268042bSAlan Cox era = fs.entry->read_ahead; 9350c3a4893SAlan Cox behavior = vm_map_entry_behavior(fs.entry); 9360c3a4893SAlan Cox if (behavior == MAP_ENTRY_BEHAV_RANDOM) { 9375268042bSAlan Cox nera = 0; 93813458803SAlan Cox } else if (behavior == MAP_ENTRY_BEHAV_SEQUENTIAL) { 9395268042bSAlan Cox nera = VM_FAULT_READ_AHEAD_MAX; 940381b7242SAlan Cox if (vaddr == fs.entry->next_read) 9410c3a4893SAlan Cox vm_fault_dontneed(&fs, vaddr, nera); 942381b7242SAlan Cox } else if (vaddr == fs.entry->next_read) { 94313458803SAlan Cox /* 9445268042bSAlan Cox * This is a sequential fault. Arithmetically 9455268042bSAlan Cox * increase the requested number of pages in 9465268042bSAlan Cox * the read-ahead window. The requested 9475268042bSAlan Cox * number of pages is "# of sequential faults 9485268042bSAlan Cox * x (read ahead min + 1) + read ahead min" 94913458803SAlan Cox */ 9505268042bSAlan Cox nera = VM_FAULT_READ_AHEAD_MIN; 9515268042bSAlan Cox if (era > 0) { 9525268042bSAlan Cox nera += era + 1; 95313458803SAlan Cox if (nera > VM_FAULT_READ_AHEAD_MAX) 95413458803SAlan Cox nera = VM_FAULT_READ_AHEAD_MAX; 9555268042bSAlan Cox } 95613458803SAlan Cox if (era == VM_FAULT_READ_AHEAD_MAX) 9570c3a4893SAlan Cox vm_fault_dontneed(&fs, vaddr, nera); 9585268042bSAlan Cox } else { 9595268042bSAlan Cox /* 9600c3a4893SAlan Cox * This is a non-sequential fault. 9615268042bSAlan Cox */ 9625268042bSAlan Cox nera = 0; 963867a482dSJohn Dyson } 9640c3a4893SAlan Cox if (era != nera) { 9650c3a4893SAlan Cox /* 9660c3a4893SAlan Cox * A read lock on the map suffices to update 9670c3a4893SAlan Cox * the read ahead count safely. 9680c3a4893SAlan Cox */ 9695268042bSAlan Cox fs.entry->read_ahead = nera; 9700c3a4893SAlan Cox } 971d2bf64c3SKonstantin Belousov 972d2bf64c3SKonstantin Belousov /* 9730c3a4893SAlan Cox * Prepare for unlocking the map. Save the map 9740c3a4893SAlan Cox * entry's start and end addresses, which are used to 9750c3a4893SAlan Cox * optimize the size of the pager operation below. 9760c3a4893SAlan Cox * Even if the map entry's addresses change after 9770c3a4893SAlan Cox * unlocking the map, using the saved addresses is 9780c3a4893SAlan Cox * safe. 9790c3a4893SAlan Cox */ 9800c3a4893SAlan Cox e_start = fs.entry->start; 9810c3a4893SAlan Cox e_end = fs.entry->end; 9820c3a4893SAlan Cox } 9830c3a4893SAlan Cox 9840c3a4893SAlan Cox /* 9850c3a4893SAlan Cox * Call the pager to retrieve the page if there is a chance 9860c3a4893SAlan Cox * that the pager has it, and potentially retrieve additional 9870c3a4893SAlan Cox * pages at the same time. 9880c3a4893SAlan Cox */ 9890c3a4893SAlan Cox if (fs.object->type != OBJT_DEFAULT) { 9900c3a4893SAlan Cox /* 99185702505SAlan Cox * Release the map lock before locking the vnode or 99285702505SAlan Cox * sleeping in the pager. (If the current object has 99385702505SAlan Cox * a shadow, then an earlier iteration of this loop 99485702505SAlan Cox * may have already unlocked the map.) 995d2bf64c3SKonstantin Belousov */ 996d2bf64c3SKonstantin Belousov unlock_map(&fs); 997d2bf64c3SKonstantin Belousov 998022dfd69SKonstantin Belousov if (fs.object->type == OBJT_VNODE && 999022dfd69SKonstantin Belousov (vp = fs.object->handle) != fs.vp) { 100085702505SAlan Cox /* 100185702505SAlan Cox * Perform an unlock in case the desired vnode 100285702505SAlan Cox * changed while the map was unlocked during a 100385702505SAlan Cox * retry. 100485702505SAlan Cox */ 1005cfabea3dSKonstantin Belousov unlock_vp(&fs); 1006d2bf64c3SKonstantin Belousov 100785702505SAlan Cox locked = VOP_ISLOCKED(vp); 1008d2bf64c3SKonstantin Belousov if (locked != LK_EXCLUSIVE) 1009d2bf64c3SKonstantin Belousov locked = LK_SHARED; 1010dda4d369SAlan Cox 1011dda4d369SAlan Cox /* 1012dda4d369SAlan Cox * We must not sleep acquiring the vnode lock 1013dda4d369SAlan Cox * while we have the page exclusive busied or 1014dda4d369SAlan Cox * the object's paging-in-progress count 1015dda4d369SAlan Cox * incremented. Otherwise, we could deadlock. 1016dda4d369SAlan Cox */ 1017d2bf64c3SKonstantin Belousov error = vget(vp, locked | LK_CANRECURSE | 1018d2bf64c3SKonstantin Belousov LK_NOWAIT, curthread); 1019d2bf64c3SKonstantin Belousov if (error != 0) { 1020d2bf64c3SKonstantin Belousov vhold(vp); 1021d2bf64c3SKonstantin Belousov release_page(&fs); 1022d2bf64c3SKonstantin Belousov unlock_and_deallocate(&fs); 1023d2bf64c3SKonstantin Belousov error = vget(vp, locked | LK_RETRY | 1024d2bf64c3SKonstantin Belousov LK_CANRECURSE, curthread); 1025d2bf64c3SKonstantin Belousov vdrop(vp); 1026d2bf64c3SKonstantin Belousov fs.vp = vp; 1027d2bf64c3SKonstantin Belousov KASSERT(error == 0, 1028d2bf64c3SKonstantin Belousov ("vm_fault: vget failed")); 1029d2bf64c3SKonstantin Belousov goto RetryFault; 1030d2bf64c3SKonstantin Belousov } 1031d2bf64c3SKonstantin Belousov fs.vp = vp; 1032d2bf64c3SKonstantin Belousov } 1033d2bf64c3SKonstantin Belousov KASSERT(fs.vp == NULL || !fs.map->system_map, 1034d2bf64c3SKonstantin Belousov ("vm_fault: vnode-backed object mapped by system map")); 1035d2bf64c3SKonstantin Belousov 1036df8bae1dSRodney W. Grimes /* 1037b0cd2017SGleb Smirnoff * Page in the requested page and hint the pager, 1038b0cd2017SGleb Smirnoff * that it may bring up surrounding pages. 103926f9a767SRodney W. Grimes */ 10400c3a4893SAlan Cox if (nera == -1 || behavior == MAP_ENTRY_BEHAV_RANDOM || 10410c3a4893SAlan Cox P_KILLED(curproc)) { 10420c3a4893SAlan Cox behind = 0; 10430c3a4893SAlan Cox ahead = 0; 10440c3a4893SAlan Cox } else { 10450c3a4893SAlan Cox /* Is this a sequential fault? */ 10460c3a4893SAlan Cox if (nera > 0) { 10470c3a4893SAlan Cox behind = 0; 10480c3a4893SAlan Cox ahead = nera; 10490c3a4893SAlan Cox } else { 10500c3a4893SAlan Cox /* 10510c3a4893SAlan Cox * Request a cluster of pages that is 10520c3a4893SAlan Cox * aligned to a VM_FAULT_READ_DEFAULT 10530c3a4893SAlan Cox * page offset boundary within the 10540c3a4893SAlan Cox * object. Alignment to a page offset 10550c3a4893SAlan Cox * boundary is more likely to coincide 10560c3a4893SAlan Cox * with the underlying file system 10570c3a4893SAlan Cox * block than alignment to a virtual 10580c3a4893SAlan Cox * address boundary. 10590c3a4893SAlan Cox */ 10600c3a4893SAlan Cox cluster_offset = fs.pindex % 10610c3a4893SAlan Cox VM_FAULT_READ_DEFAULT; 10620c3a4893SAlan Cox behind = ulmin(cluster_offset, 10630c3a4893SAlan Cox atop(vaddr - e_start)); 10640c3a4893SAlan Cox ahead = VM_FAULT_READ_DEFAULT - 1 - 10650c3a4893SAlan Cox cluster_offset; 10660c3a4893SAlan Cox } 10670c3a4893SAlan Cox ahead = ulmin(ahead, atop(e_end - vaddr) - 1); 10680c3a4893SAlan Cox } 1069b0cd2017SGleb Smirnoff rv = vm_pager_get_pages(fs.object, &fs.m, 1, 1070b0cd2017SGleb Smirnoff &behind, &ahead); 107126f9a767SRodney W. Grimes if (rv == VM_PAGER_OK) { 1072b0cd2017SGleb Smirnoff faultcount = behind + 1 + ahead; 1073320023e2SAlan Cox hardfault = true; 10741c7c3c6aSMatthew Dillon break; /* break to PAGE HAS BEEN FOUND */ 1075df8bae1dSRodney W. Grimes } 1076a83c285cSDavid Greenman if (rv == VM_PAGER_ERROR) 1077f3679e35SDavid Greenman printf("vm_fault: pager read error, pid %d (%s)\n", 1078f3679e35SDavid Greenman curproc->p_pid, curproc->p_comm); 1079521ddf39SAlan Cox 108026f9a767SRodney W. Grimes /* 1081521ddf39SAlan Cox * If an I/O error occurred or the requested page was 1082521ddf39SAlan Cox * outside the range of the pager, clean up and return 1083521ddf39SAlan Cox * an error. 108426f9a767SRodney W. Grimes */ 1085521ddf39SAlan Cox if (rv == VM_PAGER_ERROR || rv == VM_PAGER_BAD) { 1086d842aa51SMark Johnston if (!vm_page_wired(fs.m)) 10874866e085SJohn Dyson vm_page_free(fs.m); 1088230afe0bSKonstantin Belousov else 1089fee2a2faSMark Johnston vm_page_xunbusy(fs.m); 10904866e085SJohn Dyson fs.m = NULL; 10914866e085SJohn Dyson unlock_and_deallocate(&fs); 1092df08823dSKonstantin Belousov return (KERN_OUT_OF_BOUNDS); 109326f9a767SRodney W. Grimes } 1094521ddf39SAlan Cox 1095521ddf39SAlan Cox /* 1096521ddf39SAlan Cox * The requested page does not exist at this object/ 1097521ddf39SAlan Cox * offset. Remove the invalid page from the object, 1098521ddf39SAlan Cox * waking up anyone waiting for it, and continue on to 1099521ddf39SAlan Cox * the next object. However, if this is the top-level 1100521ddf39SAlan Cox * object, we must leave the busy page in place to 1101521ddf39SAlan Cox * prevent another process from rushing past us, and 1102521ddf39SAlan Cox * inserting the page in that object at the same time 1103521ddf39SAlan Cox * that we are. 1104521ddf39SAlan Cox */ 11054866e085SJohn Dyson if (fs.object != fs.first_object) { 1106d842aa51SMark Johnston if (!vm_page_wired(fs.m)) 11074866e085SJohn Dyson vm_page_free(fs.m); 1108230afe0bSKonstantin Belousov else 1109fee2a2faSMark Johnston vm_page_xunbusy(fs.m); 11104866e085SJohn Dyson fs.m = NULL; 1111df8bae1dSRodney W. Grimes } 1112df8bae1dSRodney W. Grimes } 111340360b1bSMatthew Dillon 1114df8bae1dSRodney W. Grimes /* 11151c7c3c6aSMatthew Dillon * We get here if the object has default pager (or unwiring) 11161c7c3c6aSMatthew Dillon * or the pager doesn't have the page. 1117df8bae1dSRodney W. Grimes */ 11184866e085SJohn Dyson if (fs.object == fs.first_object) 11194866e085SJohn Dyson fs.first_m = fs.m; 1120df8bae1dSRodney W. Grimes 1121df8bae1dSRodney W. Grimes /* 11220d94caffSDavid Greenman * Move on to the next object. Lock the next object before 11230d94caffSDavid Greenman * unlocking the current one. 1124df8bae1dSRodney W. Grimes */ 11254866e085SJohn Dyson next_object = fs.object->backing_object; 1126df8bae1dSRodney W. Grimes if (next_object == NULL) { 1127df8bae1dSRodney W. Grimes /* 11280d94caffSDavid Greenman * If there's no object left, fill the page in the top 11290d94caffSDavid Greenman * object with zeros. 1130df8bae1dSRodney W. Grimes */ 11314866e085SJohn Dyson if (fs.object != fs.first_object) { 11324866e085SJohn Dyson vm_object_pip_wakeup(fs.object); 113389f6b863SAttilio Rao VM_OBJECT_WUNLOCK(fs.object); 1134df8bae1dSRodney W. Grimes 11354866e085SJohn Dyson fs.object = fs.first_object; 11364866e085SJohn Dyson fs.pindex = fs.first_pindex; 11374866e085SJohn Dyson fs.m = fs.first_m; 113889f6b863SAttilio Rao VM_OBJECT_WLOCK(fs.object); 1139df8bae1dSRodney W. Grimes } 11404866e085SJohn Dyson fs.first_m = NULL; 1141df8bae1dSRodney W. Grimes 11424221e284SAlan Cox /* 11434221e284SAlan Cox * Zero the page if necessary and mark it valid. 11444221e284SAlan Cox */ 11454866e085SJohn Dyson if ((fs.m->flags & PG_ZERO) == 0) { 1146fff6062aSAlan Cox pmap_zero_page(fs.m); 11474221e284SAlan Cox } else { 114883c9dea1SGleb Smirnoff VM_CNT_INC(v_ozfod); 11494221e284SAlan Cox } 115083c9dea1SGleb Smirnoff VM_CNT_INC(v_zfod); 11514221e284SAlan Cox fs.m->valid = VM_PAGE_BITS_ALL; 11527b9b301cSAlan Cox /* Don't try to prefault neighboring pages. */ 11537b9b301cSAlan Cox faultcount = 1; 11541c7c3c6aSMatthew Dillon break; /* break to PAGE HAS BEEN FOUND */ 11550d94caffSDavid Greenman } else { 1156c8567c3aSAlan Cox KASSERT(fs.object != next_object, 1157c8567c3aSAlan Cox ("object loop %p", next_object)); 115889f6b863SAttilio Rao VM_OBJECT_WLOCK(next_object); 1159c8567c3aSAlan Cox vm_object_pip_add(next_object, 1); 1160c8567c3aSAlan Cox if (fs.object != fs.first_object) 11614866e085SJohn Dyson vm_object_pip_wakeup(fs.object); 11626753423cSAlan Cox fs.pindex += 11636753423cSAlan Cox OFF_TO_IDX(fs.object->backing_object_offset); 116489f6b863SAttilio Rao VM_OBJECT_WUNLOCK(fs.object); 11654866e085SJohn Dyson fs.object = next_object; 1166df8bae1dSRodney W. Grimes } 1167df8bae1dSRodney W. Grimes } 11681c7c3c6aSMatthew Dillon 1169c7aebda8SAttilio Rao vm_page_assert_xbusied(fs.m); 1170df8bae1dSRodney W. Grimes 1171df8bae1dSRodney W. Grimes /* 11720d94caffSDavid Greenman * PAGE HAS BEEN FOUND. [Loop invariant still holds -- the object lock 1173df8bae1dSRodney W. Grimes * is held.] 1174df8bae1dSRodney W. Grimes */ 1175df8bae1dSRodney W. Grimes 1176df8bae1dSRodney W. Grimes /* 11770d94caffSDavid Greenman * If the page is being written, but isn't already owned by the 11780d94caffSDavid Greenman * top-level object, we have to copy it into a new page owned by the 11790d94caffSDavid Greenman * top-level object. 1180df8bae1dSRodney W. Grimes */ 11814866e085SJohn Dyson if (fs.object != fs.first_object) { 1182df8bae1dSRodney W. Grimes /* 11830d94caffSDavid Greenman * We only really need to copy if we want to write it. 1184df8bae1dSRodney W. Grimes */ 1185a6d42a0dSAlan Cox if ((fault_type & (VM_PROT_COPY | VM_PROT_WRITE)) != 0) { 1186df8bae1dSRodney W. Grimes /* 11871c7c3c6aSMatthew Dillon * This allows pages to be virtually copied from a 11881c7c3c6aSMatthew Dillon * backing_object into the first_object, where the 11891c7c3c6aSMatthew Dillon * backing object has no other refs to it, and cannot 11901c7c3c6aSMatthew Dillon * gain any more refs. Instead of a bcopy, we just 11911c7c3c6aSMatthew Dillon * move the page from the backing object to the 11921c7c3c6aSMatthew Dillon * first object. Note that we must mark the page 11931c7c3c6aSMatthew Dillon * dirty in the first object so that it will go out 11941c7c3c6aSMatthew Dillon * to swap when needed. 1195df8bae1dSRodney W. Grimes */ 1196cd8a6fe8SAlan Cox is_first_object_locked = false; 1197e50346b5SAlan Cox if ( 1198de5f6a77SJohn Dyson /* 1199de5f6a77SJohn Dyson * Only one shadow object 1200de5f6a77SJohn Dyson */ 12014866e085SJohn Dyson (fs.object->shadow_count == 1) && 1202de5f6a77SJohn Dyson /* 1203de5f6a77SJohn Dyson * No COW refs, except us 1204de5f6a77SJohn Dyson */ 12054866e085SJohn Dyson (fs.object->ref_count == 1) && 1206de5f6a77SJohn Dyson /* 12075929bcfaSPhilippe Charnier * No one else can look this object up 1208de5f6a77SJohn Dyson */ 12094866e085SJohn Dyson (fs.object->handle == NULL) && 1210de5f6a77SJohn Dyson /* 1211de5f6a77SJohn Dyson * No other ways to look the object up 1212de5f6a77SJohn Dyson */ 12134866e085SJohn Dyson ((fs.object->type == OBJT_DEFAULT) || 12144866e085SJohn Dyson (fs.object->type == OBJT_SWAP)) && 121589f6b863SAttilio Rao (is_first_object_locked = VM_OBJECT_TRYWLOCK(fs.first_object)) && 1216de5f6a77SJohn Dyson /* 1217de5f6a77SJohn Dyson * We don't chase down the shadow chain 1218de5f6a77SJohn Dyson */ 1219e50346b5SAlan Cox fs.object == fs.first_object->backing_object) { 1220fee2a2faSMark Johnston 12210fd977b3SMark Johnston (void)vm_page_remove(fs.m); 1222bccdea45SAlan Cox vm_page_replace_checked(fs.m, fs.first_object, 1223bccdea45SAlan Cox fs.first_pindex, fs.first_m); 12246fee422eSConrad Meyer vm_page_free(fs.first_m); 1225bccdea45SAlan Cox vm_page_dirty(fs.m); 1226dfdf9abdSAlan Cox #if VM_NRESERVLEVEL > 0 1227dfdf9abdSAlan Cox /* 1228dfdf9abdSAlan Cox * Rename the reservation. 1229dfdf9abdSAlan Cox */ 1230dfdf9abdSAlan Cox vm_reserv_rename(fs.m, fs.first_object, 1231dfdf9abdSAlan Cox fs.object, OFF_TO_IDX( 1232dfdf9abdSAlan Cox fs.first_object->backing_object_offset)); 1233dfdf9abdSAlan Cox #endif 1234bccdea45SAlan Cox /* 1235bccdea45SAlan Cox * Removing the page from the backing object 1236bccdea45SAlan Cox * unbusied it. 1237bccdea45SAlan Cox */ 1238c7aebda8SAttilio Rao vm_page_xbusy(fs.m); 1239d98ddc46SAlan Cox fs.first_m = fs.m; 12404866e085SJohn Dyson fs.m = NULL; 124183c9dea1SGleb Smirnoff VM_CNT_INC(v_cow_optim); 1242de5f6a77SJohn Dyson } else { 1243de5f6a77SJohn Dyson /* 1244de5f6a77SJohn Dyson * Oh, well, lets copy it. 1245de5f6a77SJohn Dyson */ 1246669890eaSAlan Cox pmap_copy_page(fs.m, fs.first_m); 1247669890eaSAlan Cox fs.first_m->valid = VM_PAGE_BITS_ALL; 1248d8778512SAlan Cox if (wired && (fault_flags & 12496a875bf9SKonstantin Belousov VM_FAULT_WIRE) == 0) { 1250d8778512SAlan Cox vm_page_wire(fs.first_m); 12513ae10f74SAttilio Rao vm_page_unwire(fs.m, PQ_INACTIVE); 1252de5f6a77SJohn Dyson } 1253df8bae1dSRodney W. Grimes /* 1254df8bae1dSRodney W. Grimes * We no longer need the old page or object. 1255df8bae1dSRodney W. Grimes */ 12564866e085SJohn Dyson release_page(&fs); 1257de5f6a77SJohn Dyson } 12581c7c3c6aSMatthew Dillon /* 12591c7c3c6aSMatthew Dillon * fs.object != fs.first_object due to above 12601c7c3c6aSMatthew Dillon * conditional 12611c7c3c6aSMatthew Dillon */ 12624866e085SJohn Dyson vm_object_pip_wakeup(fs.object); 126389f6b863SAttilio Rao VM_OBJECT_WUNLOCK(fs.object); 12649f1abe3dSAlan Cox 12659f1abe3dSAlan Cox /* 12669f1abe3dSAlan Cox * We only try to prefault read-only mappings to the 12679f1abe3dSAlan Cox * neighboring pages when this copy-on-write fault is 12689f1abe3dSAlan Cox * a hard fault. In other cases, trying to prefault 12699f1abe3dSAlan Cox * is typically wasted effort. 12709f1abe3dSAlan Cox */ 12719f1abe3dSAlan Cox if (faultcount == 0) 12729f1abe3dSAlan Cox faultcount = 1; 12739f1abe3dSAlan Cox 1274df8bae1dSRodney W. Grimes /* 1275df8bae1dSRodney W. Grimes * Only use the new page below... 1276df8bae1dSRodney W. Grimes */ 12774866e085SJohn Dyson fs.object = fs.first_object; 12784866e085SJohn Dyson fs.pindex = fs.first_pindex; 1279d98ddc46SAlan Cox fs.m = fs.first_m; 1280f29ba63eSAlan Cox if (!is_first_object_locked) 128189f6b863SAttilio Rao VM_OBJECT_WLOCK(fs.object); 128283c9dea1SGleb Smirnoff VM_CNT_INC(v_cow_faults); 12834d34e019SKonstantin Belousov curthread->td_cow++; 12840d94caffSDavid Greenman } else { 1285df8bae1dSRodney W. Grimes prot &= ~VM_PROT_WRITE; 1286df8bae1dSRodney W. Grimes } 1287df8bae1dSRodney W. Grimes } 1288df8bae1dSRodney W. Grimes 1289df8bae1dSRodney W. Grimes /* 12900d94caffSDavid Greenman * We must verify that the maps have not changed since our last 12910d94caffSDavid Greenman * lookup. 1292df8bae1dSRodney W. Grimes */ 129319dc5607STor Egge if (!fs.lookup_still_valid) { 129419dc5607STor Egge if (!vm_map_trylock_read(fs.map)) { 1295b823bbd6SMatthew Dillon release_page(&fs); 1296b823bbd6SMatthew Dillon unlock_and_deallocate(&fs); 1297b823bbd6SMatthew Dillon goto RetryFault; 1298b823bbd6SMatthew Dillon } 1299cd8a6fe8SAlan Cox fs.lookup_still_valid = true; 1300dc5401d2SKonstantin Belousov if (fs.map->timestamp != fs.map_generation) { 130119dc5607STor Egge result = vm_map_lookup_locked(&fs.map, vaddr, fault_type, 13024866e085SJohn Dyson &fs.entry, &retry_object, &retry_pindex, &retry_prot, &wired); 1303df8bae1dSRodney W. Grimes 1304df8bae1dSRodney W. Grimes /* 130544ed3417STor Egge * If we don't need the page any longer, put it on the inactive 13060d94caffSDavid Greenman * list (the easiest thing to do here). If no one needs it, 13070d94caffSDavid Greenman * pageout will grab it eventually. 1308df8bae1dSRodney W. Grimes */ 1309df8bae1dSRodney W. Grimes if (result != KERN_SUCCESS) { 13104866e085SJohn Dyson release_page(&fs); 13114866e085SJohn Dyson unlock_and_deallocate(&fs); 131219dc5607STor Egge 131319dc5607STor Egge /* 131419dc5607STor Egge * If retry of map lookup would have blocked then 131519dc5607STor Egge * retry fault from start. 131619dc5607STor Egge */ 131719dc5607STor Egge if (result == KERN_FAILURE) 131819dc5607STor Egge goto RetryFault; 1319df8bae1dSRodney W. Grimes return (result); 1320df8bae1dSRodney W. Grimes } 13214866e085SJohn Dyson if ((retry_object != fs.first_object) || 13224866e085SJohn Dyson (retry_pindex != fs.first_pindex)) { 13234866e085SJohn Dyson release_page(&fs); 13244866e085SJohn Dyson unlock_and_deallocate(&fs); 1325df8bae1dSRodney W. Grimes goto RetryFault; 1326df8bae1dSRodney W. Grimes } 132719dc5607STor Egge 1328df8bae1dSRodney W. Grimes /* 13290d94caffSDavid Greenman * Check whether the protection has changed or the object has 13300d94caffSDavid Greenman * been copied while we left the map unlocked. Changing from 13310d94caffSDavid Greenman * read to write permission is OK - we leave the page 13320d94caffSDavid Greenman * write-protected, and catch the write fault. Changing from 13330d94caffSDavid Greenman * write to read permission means that we can't mark the page 13340d94caffSDavid Greenman * write-enabled after all. 1335df8bae1dSRodney W. Grimes */ 1336df8bae1dSRodney W. Grimes prot &= retry_prot; 1337607970bcSKonstantin Belousov fault_type &= retry_prot; 1338607970bcSKonstantin Belousov if (prot == 0) { 1339607970bcSKonstantin Belousov release_page(&fs); 1340607970bcSKonstantin Belousov unlock_and_deallocate(&fs); 1341607970bcSKonstantin Belousov goto RetryFault; 1342607970bcSKonstantin Belousov } 1343fccdefa1SAlan Cox 1344fccdefa1SAlan Cox /* Reassert because wired may have changed. */ 1345fccdefa1SAlan Cox KASSERT(wired || (fault_flags & VM_FAULT_WIRE) == 0, 1346fccdefa1SAlan Cox ("!wired && VM_FAULT_WIRE")); 1347df8bae1dSRodney W. Grimes } 134819dc5607STor Egge } 1349381b7242SAlan Cox 1350d2bf64c3SKonstantin Belousov /* 1351381b7242SAlan Cox * If the page was filled by a pager, save the virtual address that 1352381b7242SAlan Cox * should be faulted on next under a sequential access pattern to the 1353381b7242SAlan Cox * map entry. A read lock on the map suffices to update this address 1354381b7242SAlan Cox * safely. 1355d2bf64c3SKonstantin Belousov */ 13565758fe71SAlan Cox if (hardfault) 1357381b7242SAlan Cox fs.entry->next_read = vaddr + ptoa(ahead) + PAGE_SIZE; 1358d2bf64c3SKonstantin Belousov 1359e26236e9SKonstantin Belousov vm_fault_dirty(fs.entry, fs.m, prot, fault_type, fault_flags, true); 1360c7aebda8SAttilio Rao vm_page_assert_xbusied(fs.m); 1361c7aebda8SAttilio Rao 13624221e284SAlan Cox /* 136378cfe1f7SAlan Cox * Page must be completely valid or it is not fit to 13644221e284SAlan Cox * map into user space. vm_pager_get_pages() ensures this. 13654221e284SAlan Cox */ 136678cfe1f7SAlan Cox KASSERT(fs.m->valid == VM_PAGE_BITS_ALL, 136778cfe1f7SAlan Cox ("vm_fault: page %p partially invalid", fs.m)); 136889f6b863SAttilio Rao VM_OBJECT_WUNLOCK(fs.object); 1369cbfbaad8SAlan Cox 137086735996SAlan Cox /* 137186735996SAlan Cox * Put this page into the physical map. We had to do the unlock above 137286735996SAlan Cox * because pmap_enter() may sleep. We don't put the page 137386735996SAlan Cox * back on the active queue until later so that the pageout daemon 137486735996SAlan Cox * won't find it (yet). 137586735996SAlan Cox */ 137639ffa8c1SKonstantin Belousov pmap_enter(fs.map->pmap, vaddr, fs.m, prot, 137739ffa8c1SKonstantin Belousov fault_type | (wired ? PMAP_ENTER_WIRED : 0), 0); 13786a875bf9SKonstantin Belousov if (faultcount != 1 && (fault_flags & VM_FAULT_WIRE) == 0 && 13797b9b301cSAlan Cox wired == 0) 1380b0cd2017SGleb Smirnoff vm_fault_prefault(&fs, vaddr, 1381b0cd2017SGleb Smirnoff faultcount > 0 ? behind : PFBAK, 1382a7163bb9SKonstantin Belousov faultcount > 0 ? ahead : PFFOR, false); 138389f6b863SAttilio Rao VM_OBJECT_WLOCK(fs.object); 1384ff97964aSJohn Dyson 1385df8bae1dSRodney W. Grimes /* 13860d94caffSDavid Greenman * If the page is not wired down, then put it where the pageout daemon 13870d94caffSDavid Greenman * can find it. 1388df8bae1dSRodney W. Grimes */ 1389fee2a2faSMark Johnston if ((fault_flags & VM_FAULT_WIRE) != 0) { 13904866e085SJohn Dyson vm_page_wire(fs.m); 1391fee2a2faSMark Johnston } else { 1392e8bcf696SMark Johnston vm_page_lock(fs.m); 13934866e085SJohn Dyson vm_page_activate(fs.m); 1394e8bcf696SMark Johnston vm_page_unlock(fs.m); 1395fee2a2faSMark Johnston } 1396acd11c74SAlan Cox if (m_hold != NULL) { 1397acd11c74SAlan Cox *m_hold = fs.m; 1398eeacb3b0SMark Johnston vm_page_wire(fs.m); 1399acd11c74SAlan Cox } 1400c7aebda8SAttilio Rao vm_page_xunbusy(fs.m); 1401eeec6babSJohn Baldwin 1402eebf3286SAlan Cox /* 1403eebf3286SAlan Cox * Unlock everything, and return 1404eebf3286SAlan Cox */ 1405eebf3286SAlan Cox unlock_and_deallocate(&fs); 1406b3a01bdfSAndrey Zonov if (hardfault) { 140783c9dea1SGleb Smirnoff VM_CNT_INC(v_io_faults); 14081c4bcd05SJeff Roberson curthread->td_ru.ru_majflt++; 1409ae34b6ffSEdward Tomasz Napierala #ifdef RACCT 1410ae34b6ffSEdward Tomasz Napierala if (racct_enable && fs.object->type == OBJT_VNODE) { 1411ae34b6ffSEdward Tomasz Napierala PROC_LOCK(curproc); 1412ae34b6ffSEdward Tomasz Napierala if ((fault_type & (VM_PROT_COPY | VM_PROT_WRITE)) != 0) { 1413ae34b6ffSEdward Tomasz Napierala racct_add_force(curproc, RACCT_WRITEBPS, 1414ae34b6ffSEdward Tomasz Napierala PAGE_SIZE + behind * PAGE_SIZE); 1415ae34b6ffSEdward Tomasz Napierala racct_add_force(curproc, RACCT_WRITEIOPS, 1); 1416ae34b6ffSEdward Tomasz Napierala } else { 1417ae34b6ffSEdward Tomasz Napierala racct_add_force(curproc, RACCT_READBPS, 1418ae34b6ffSEdward Tomasz Napierala PAGE_SIZE + ahead * PAGE_SIZE); 1419ae34b6ffSEdward Tomasz Napierala racct_add_force(curproc, RACCT_READIOPS, 1); 1420ae34b6ffSEdward Tomasz Napierala } 1421ae34b6ffSEdward Tomasz Napierala PROC_UNLOCK(curproc); 1422ae34b6ffSEdward Tomasz Napierala } 1423ae34b6ffSEdward Tomasz Napierala #endif 1424b3a01bdfSAndrey Zonov } else 14251c4bcd05SJeff Roberson curthread->td_ru.ru_minflt++; 1426df8bae1dSRodney W. Grimes 1427df8bae1dSRodney W. Grimes return (KERN_SUCCESS); 1428df8bae1dSRodney W. Grimes } 1429df8bae1dSRodney W. Grimes 1430df8bae1dSRodney W. Grimes /* 1431a8b0f100SAlan Cox * Speed up the reclamation of pages that precede the faulting pindex within 1432a8b0f100SAlan Cox * the first object of the shadow chain. Essentially, perform the equivalent 1433a8b0f100SAlan Cox * to madvise(..., MADV_DONTNEED) on a large cluster of pages that precedes 1434a8b0f100SAlan Cox * the faulting pindex by the cluster size when the pages read by vm_fault() 1435a8b0f100SAlan Cox * cross a cluster-size boundary. The cluster size is the greater of the 1436a8b0f100SAlan Cox * smallest superpage size and VM_FAULT_DONTNEED_MIN. 1437a8b0f100SAlan Cox * 1438a8b0f100SAlan Cox * When "fs->first_object" is a shadow object, the pages in the backing object 1439a8b0f100SAlan Cox * that precede the faulting pindex are deactivated by vm_fault(). So, this 1440a8b0f100SAlan Cox * function must only be concerned with pages in the first object. 144113458803SAlan Cox */ 144213458803SAlan Cox static void 1443a8b0f100SAlan Cox vm_fault_dontneed(const struct faultstate *fs, vm_offset_t vaddr, int ahead) 144413458803SAlan Cox { 1445a8b0f100SAlan Cox vm_map_entry_t entry; 144613458803SAlan Cox vm_object_t first_object, object; 1447a8b0f100SAlan Cox vm_offset_t end, start; 1448a8b0f100SAlan Cox vm_page_t m, m_next; 1449a8b0f100SAlan Cox vm_pindex_t pend, pstart; 1450a8b0f100SAlan Cox vm_size_t size; 145113458803SAlan Cox 145213458803SAlan Cox object = fs->object; 145389f6b863SAttilio Rao VM_OBJECT_ASSERT_WLOCKED(object); 145413458803SAlan Cox first_object = fs->first_object; 145513458803SAlan Cox if (first_object != object) { 1456b5ab20c0SAlan Cox if (!VM_OBJECT_TRYWLOCK(first_object)) { 145789f6b863SAttilio Rao VM_OBJECT_WUNLOCK(object); 1458b5ab20c0SAlan Cox VM_OBJECT_WLOCK(first_object); 145989f6b863SAttilio Rao VM_OBJECT_WLOCK(object); 146013458803SAlan Cox } 146113458803SAlan Cox } 1462a8b0f100SAlan Cox /* Neither fictitious nor unmanaged pages can be reclaimed. */ 146328634820SAlan Cox if ((first_object->flags & (OBJ_FICTITIOUS | OBJ_UNMANAGED)) == 0) { 1464a8b0f100SAlan Cox size = VM_FAULT_DONTNEED_MIN; 1465a8b0f100SAlan Cox if (MAXPAGESIZES > 1 && size < pagesizes[1]) 1466a8b0f100SAlan Cox size = pagesizes[1]; 1467a8b0f100SAlan Cox end = rounddown2(vaddr, size); 1468a8b0f100SAlan Cox if (vaddr - end >= size - PAGE_SIZE - ptoa(ahead) && 1469a8b0f100SAlan Cox (entry = fs->entry)->start < end) { 1470a8b0f100SAlan Cox if (end - entry->start < size) 1471a8b0f100SAlan Cox start = entry->start; 147213458803SAlan Cox else 1473a8b0f100SAlan Cox start = end - size; 1474a8b0f100SAlan Cox pmap_advise(fs->map->pmap, start, end, MADV_DONTNEED); 1475a8b0f100SAlan Cox pstart = OFF_TO_IDX(entry->offset) + atop(start - 1476a8b0f100SAlan Cox entry->start); 1477a8b0f100SAlan Cox m_next = vm_page_find_least(first_object, pstart); 1478a8b0f100SAlan Cox pend = OFF_TO_IDX(entry->offset) + atop(end - 1479a8b0f100SAlan Cox entry->start); 1480a8b0f100SAlan Cox while ((m = m_next) != NULL && m->pindex < pend) { 1481a8b0f100SAlan Cox m_next = TAILQ_NEXT(m, listq); 1482a8b0f100SAlan Cox if (m->valid != VM_PAGE_BITS_ALL || 1483a8b0f100SAlan Cox vm_page_busied(m)) 148413458803SAlan Cox continue; 1485d8015db3SAlan Cox 1486d8015db3SAlan Cox /* 1487d8015db3SAlan Cox * Don't clear PGA_REFERENCED, since it would 1488d8015db3SAlan Cox * likely represent a reference by a different 1489d8015db3SAlan Cox * process. 1490d8015db3SAlan Cox * 1491d8015db3SAlan Cox * Typically, at this point, prefetched pages 1492d8015db3SAlan Cox * are still in the inactive queue. Only 1493d8015db3SAlan Cox * pages that triggered page faults are in the 1494d8015db3SAlan Cox * active queue. 1495d8015db3SAlan Cox */ 149613458803SAlan Cox vm_page_lock(m); 14970eb50f9cSMark Johnston if (!vm_page_inactive(m)) 1498d8015db3SAlan Cox vm_page_deactivate(m); 149913458803SAlan Cox vm_page_unlock(m); 150013458803SAlan Cox } 150113458803SAlan Cox } 1502a8b0f100SAlan Cox } 150313458803SAlan Cox if (first_object != object) 1504b5ab20c0SAlan Cox VM_OBJECT_WUNLOCK(first_object); 150513458803SAlan Cox } 150613458803SAlan Cox 150713458803SAlan Cox /* 1508566526a9SAlan Cox * vm_fault_prefault provides a quick way of clustering 1509566526a9SAlan Cox * pagefaults into a processes address space. It is a "cousin" 1510566526a9SAlan Cox * of vm_map_pmap_enter, except it runs at page fault time instead 1511566526a9SAlan Cox * of mmap time. 1512566526a9SAlan Cox */ 1513566526a9SAlan Cox static void 151463281952SAlan Cox vm_fault_prefault(const struct faultstate *fs, vm_offset_t addra, 1515a7163bb9SKonstantin Belousov int backward, int forward, bool obj_locked) 1516566526a9SAlan Cox { 151763281952SAlan Cox pmap_t pmap; 151863281952SAlan Cox vm_map_entry_t entry; 151963281952SAlan Cox vm_object_t backing_object, lobject; 1520566526a9SAlan Cox vm_offset_t addr, starta; 1521566526a9SAlan Cox vm_pindex_t pindex; 15222053c127SStephan Uphoff vm_page_t m; 1523b0cd2017SGleb Smirnoff int i; 1524566526a9SAlan Cox 152563281952SAlan Cox pmap = fs->map->pmap; 1526950d5f7aSAlan Cox if (pmap != vmspace_pmap(curthread->td_proc->p_vmspace)) 1527566526a9SAlan Cox return; 1528566526a9SAlan Cox 152963281952SAlan Cox entry = fs->entry; 1530566526a9SAlan Cox 153163cdcaaeSKonstantin Belousov if (addra < backward * PAGE_SIZE) { 1532566526a9SAlan Cox starta = entry->start; 153363cdcaaeSKonstantin Belousov } else { 153463cdcaaeSKonstantin Belousov starta = addra - backward * PAGE_SIZE; 153563cdcaaeSKonstantin Belousov if (starta < entry->start) 153663cdcaaeSKonstantin Belousov starta = entry->start; 1537566526a9SAlan Cox } 1538566526a9SAlan Cox 153963281952SAlan Cox /* 154063281952SAlan Cox * Generate the sequence of virtual addresses that are candidates for 154163281952SAlan Cox * prefaulting in an outward spiral from the faulting virtual address, 154263281952SAlan Cox * "addra". Specifically, the sequence is "addra - PAGE_SIZE", "addra 154363281952SAlan Cox * + PAGE_SIZE", "addra - 2 * PAGE_SIZE", "addra + 2 * PAGE_SIZE", ... 154463281952SAlan Cox * If the candidate address doesn't have a backing physical page, then 154563281952SAlan Cox * the loop immediately terminates. 154663281952SAlan Cox */ 154763281952SAlan Cox for (i = 0; i < 2 * imax(backward, forward); i++) { 154863281952SAlan Cox addr = addra + ((i >> 1) + 1) * ((i & 1) == 0 ? -PAGE_SIZE : 154963281952SAlan Cox PAGE_SIZE); 155063281952SAlan Cox if (addr > addra + forward * PAGE_SIZE) 1551566526a9SAlan Cox addr = 0; 1552566526a9SAlan Cox 1553566526a9SAlan Cox if (addr < starta || addr >= entry->end) 1554566526a9SAlan Cox continue; 1555566526a9SAlan Cox 1556566526a9SAlan Cox if (!pmap_is_prefaultable(pmap, addr)) 1557566526a9SAlan Cox continue; 1558566526a9SAlan Cox 1559566526a9SAlan Cox pindex = ((addr - entry->start) + entry->offset) >> PAGE_SHIFT; 156063281952SAlan Cox lobject = entry->object.vm_object; 1561a7163bb9SKonstantin Belousov if (!obj_locked) 1562c141ae7fSAlan Cox VM_OBJECT_RLOCK(lobject); 1563566526a9SAlan Cox while ((m = vm_page_lookup(lobject, pindex)) == NULL && 1564566526a9SAlan Cox lobject->type == OBJT_DEFAULT && 1565566526a9SAlan Cox (backing_object = lobject->backing_object) != NULL) { 156636930fc9SAlan Cox KASSERT((lobject->backing_object_offset & PAGE_MASK) == 156736930fc9SAlan Cox 0, ("vm_fault_prefault: unaligned object offset")); 1568566526a9SAlan Cox pindex += lobject->backing_object_offset >> PAGE_SHIFT; 1569c141ae7fSAlan Cox VM_OBJECT_RLOCK(backing_object); 1570a7163bb9SKonstantin Belousov if (!obj_locked || lobject != entry->object.vm_object) 1571c141ae7fSAlan Cox VM_OBJECT_RUNLOCK(lobject); 1572566526a9SAlan Cox lobject = backing_object; 1573566526a9SAlan Cox } 1574cbfbaad8SAlan Cox if (m == NULL) { 1575a7163bb9SKonstantin Belousov if (!obj_locked || lobject != entry->object.vm_object) 1576c141ae7fSAlan Cox VM_OBJECT_RUNLOCK(lobject); 1577566526a9SAlan Cox break; 1578cbfbaad8SAlan Cox } 15790a2e596aSAlan Cox if (m->valid == VM_PAGE_BITS_ALL && 15803c4a2440SAlan Cox (m->flags & PG_FICTITIOUS) == 0) 15817bfda801SAlan Cox pmap_enter_quick(pmap, addr, m, entry->protection); 1582a7163bb9SKonstantin Belousov if (!obj_locked || lobject != entry->object.vm_object) 1583c141ae7fSAlan Cox VM_OBJECT_RUNLOCK(lobject); 1584566526a9SAlan Cox } 1585566526a9SAlan Cox } 1586566526a9SAlan Cox 1587566526a9SAlan Cox /* 158882de724fSAlan Cox * Hold each of the physical pages that are mapped by the specified range of 158982de724fSAlan Cox * virtual addresses, ["addr", "addr" + "len"), if those mappings are valid 159082de724fSAlan Cox * and allow the specified types of access, "prot". If all of the implied 159182de724fSAlan Cox * pages are successfully held, then the number of held pages is returned 159282de724fSAlan Cox * together with pointers to those pages in the array "ma". However, if any 159382de724fSAlan Cox * of the pages cannot be held, -1 is returned. 159482de724fSAlan Cox */ 159582de724fSAlan Cox int 159682de724fSAlan Cox vm_fault_quick_hold_pages(vm_map_t map, vm_offset_t addr, vm_size_t len, 159782de724fSAlan Cox vm_prot_t prot, vm_page_t *ma, int max_count) 159882de724fSAlan Cox { 159982de724fSAlan Cox vm_offset_t end, va; 160082de724fSAlan Cox vm_page_t *mp; 16017e14088dSKonstantin Belousov int count; 160282de724fSAlan Cox boolean_t pmap_failed; 160382de724fSAlan Cox 1604af32c419SKonstantin Belousov if (len == 0) 1605af32c419SKonstantin Belousov return (0); 160682de724fSAlan Cox end = round_page(addr + len); 160782de724fSAlan Cox addr = trunc_page(addr); 160882de724fSAlan Cox 160982de724fSAlan Cox /* 161082de724fSAlan Cox * Check for illegal addresses. 161182de724fSAlan Cox */ 161282de724fSAlan Cox if (addr < vm_map_min(map) || addr > end || end > vm_map_max(map)) 161382de724fSAlan Cox return (-1); 161482de724fSAlan Cox 16157e14088dSKonstantin Belousov if (atop(end - addr) > max_count) 161682de724fSAlan Cox panic("vm_fault_quick_hold_pages: count > max_count"); 16177e14088dSKonstantin Belousov count = atop(end - addr); 161882de724fSAlan Cox 161982de724fSAlan Cox /* 162082de724fSAlan Cox * Most likely, the physical pages are resident in the pmap, so it is 162182de724fSAlan Cox * faster to try pmap_extract_and_hold() first. 162282de724fSAlan Cox */ 162382de724fSAlan Cox pmap_failed = FALSE; 162482de724fSAlan Cox for (mp = ma, va = addr; va < end; mp++, va += PAGE_SIZE) { 162582de724fSAlan Cox *mp = pmap_extract_and_hold(map->pmap, va, prot); 162682de724fSAlan Cox if (*mp == NULL) 162782de724fSAlan Cox pmap_failed = TRUE; 162882de724fSAlan Cox else if ((prot & VM_PROT_WRITE) != 0 && 1629a5dbab54SAlan Cox (*mp)->dirty != VM_PAGE_BITS_ALL) { 163082de724fSAlan Cox /* 163182de724fSAlan Cox * Explicitly dirty the physical page. Otherwise, the 163282de724fSAlan Cox * caller's changes may go unnoticed because they are 163382de724fSAlan Cox * performed through an unmanaged mapping or by a DMA 163482de724fSAlan Cox * operation. 16353c76db4cSAlan Cox * 1636abb9b935SKonstantin Belousov * The object lock is not held here. 1637abb9b935SKonstantin Belousov * See vm_page_clear_dirty_mask(). 163882de724fSAlan Cox */ 16393c76db4cSAlan Cox vm_page_dirty(*mp); 164082de724fSAlan Cox } 164182de724fSAlan Cox } 164282de724fSAlan Cox if (pmap_failed) { 164382de724fSAlan Cox /* 164482de724fSAlan Cox * One or more pages could not be held by the pmap. Either no 164582de724fSAlan Cox * page was mapped at the specified virtual address or that 164682de724fSAlan Cox * mapping had insufficient permissions. Attempt to fault in 164782de724fSAlan Cox * and hold these pages. 16488ec533d3SKonstantin Belousov * 16498ec533d3SKonstantin Belousov * If vm_fault_disable_pagefaults() was called, 16508ec533d3SKonstantin Belousov * i.e., TDP_NOFAULTING is set, we must not sleep nor 16518ec533d3SKonstantin Belousov * acquire MD VM locks, which means we must not call 1652df08823dSKonstantin Belousov * vm_fault(). Some (out of tree) callers mark 16538ec533d3SKonstantin Belousov * too wide a code area with vm_fault_disable_pagefaults() 16548ec533d3SKonstantin Belousov * already, use the VM_PROT_QUICK_NOFAULT flag to request 16558ec533d3SKonstantin Belousov * the proper behaviour explicitly. 165682de724fSAlan Cox */ 16578ec533d3SKonstantin Belousov if ((prot & VM_PROT_QUICK_NOFAULT) != 0 && 16588ec533d3SKonstantin Belousov (curthread->td_pflags & TDP_NOFAULTING) != 0) 16598ec533d3SKonstantin Belousov goto error; 166082de724fSAlan Cox for (mp = ma, va = addr; va < end; mp++, va += PAGE_SIZE) 1661df08823dSKonstantin Belousov if (*mp == NULL && vm_fault(map, va, prot, 166282de724fSAlan Cox VM_FAULT_NORMAL, mp) != KERN_SUCCESS) 166382de724fSAlan Cox goto error; 166482de724fSAlan Cox } 166582de724fSAlan Cox return (count); 166682de724fSAlan Cox error: 166782de724fSAlan Cox for (mp = ma; mp < ma + count; mp++) 1668fee2a2faSMark Johnston if (*mp != NULL) 1669fee2a2faSMark Johnston vm_page_unwire(*mp, PQ_INACTIVE); 167082de724fSAlan Cox return (-1); 167182de724fSAlan Cox } 167282de724fSAlan Cox 167382de724fSAlan Cox /* 1674df8bae1dSRodney W. Grimes * Routine: 1675df8bae1dSRodney W. Grimes * vm_fault_copy_entry 1676df8bae1dSRodney W. Grimes * Function: 1677210a6886SKonstantin Belousov * Create new shadow object backing dst_entry with private copy of 1678210a6886SKonstantin Belousov * all underlying pages. When src_entry is equal to dst_entry, 1679210a6886SKonstantin Belousov * function implements COW for wired-down map entry. Otherwise, 1680210a6886SKonstantin Belousov * it forks wired entry into dst_map. 1681df8bae1dSRodney W. Grimes * 1682df8bae1dSRodney W. Grimes * In/out conditions: 1683df8bae1dSRodney W. Grimes * The source and destination maps must be locked for write. 1684df8bae1dSRodney W. Grimes * The source map entry must be wired down (or be a sharing map 1685df8bae1dSRodney W. Grimes * entry corresponding to a main map entry that is wired down). 1686df8bae1dSRodney W. Grimes */ 168726f9a767SRodney W. Grimes void 1688121fd461SKonstantin Belousov vm_fault_copy_entry(vm_map_t dst_map, vm_map_t src_map, 1689121fd461SKonstantin Belousov vm_map_entry_t dst_entry, vm_map_entry_t src_entry, 1690121fd461SKonstantin Belousov vm_ooffset_t *fork_charge) 1691df8bae1dSRodney W. Grimes { 1692210a6886SKonstantin Belousov vm_object_t backing_object, dst_object, object, src_object; 16937afab86cSAlan Cox vm_pindex_t dst_pindex, pindex, src_pindex; 1694210a6886SKonstantin Belousov vm_prot_t access, prot; 1695df8bae1dSRodney W. Grimes vm_offset_t vaddr; 1696df8bae1dSRodney W. Grimes vm_page_t dst_m; 1697df8bae1dSRodney W. Grimes vm_page_t src_m; 16984c74acf7SKonstantin Belousov boolean_t upgrade; 1699df8bae1dSRodney W. Grimes 1700df8bae1dSRodney W. Grimes #ifdef lint 1701df8bae1dSRodney W. Grimes src_map++; 17020d94caffSDavid Greenman #endif /* lint */ 1703df8bae1dSRodney W. Grimes 1704210a6886SKonstantin Belousov upgrade = src_entry == dst_entry; 17050973283dSKonstantin Belousov access = prot = dst_entry->protection; 1706210a6886SKonstantin Belousov 1707df8bae1dSRodney W. Grimes src_object = src_entry->object.vm_object; 17087afab86cSAlan Cox src_pindex = OFF_TO_IDX(src_entry->offset); 1709df8bae1dSRodney W. Grimes 17100973283dSKonstantin Belousov if (upgrade && (dst_entry->eflags & MAP_ENTRY_NEEDS_COPY) == 0) { 17110973283dSKonstantin Belousov dst_object = src_object; 17120973283dSKonstantin Belousov vm_object_reference(dst_object); 17130973283dSKonstantin Belousov } else { 1714df8bae1dSRodney W. Grimes /* 17150d94caffSDavid Greenman * Create the top-level object for the destination entry. (Doesn't 17160d94caffSDavid Greenman * actually shadow anything - we copy the pages directly.) 1717df8bae1dSRodney W. Grimes */ 171824a1cce3SDavid Greenman dst_object = vm_object_allocate(OBJT_DEFAULT, 1719d1780e8dSKonstantin Belousov atop(dst_entry->end - dst_entry->start)); 1720f8a47341SAlan Cox #if VM_NRESERVLEVEL > 0 1721f8a47341SAlan Cox dst_object->flags |= OBJ_COLORED; 1722f8a47341SAlan Cox dst_object->pg_color = atop(dst_entry->start); 1723f8a47341SAlan Cox #endif 1724a60d3db1SKonstantin Belousov dst_object->domain = src_object->domain; 1725a60d3db1SKonstantin Belousov dst_object->charge = dst_entry->end - dst_entry->start; 17260973283dSKonstantin Belousov } 1727df8bae1dSRodney W. Grimes 172889f6b863SAttilio Rao VM_OBJECT_WLOCK(dst_object); 1729210a6886SKonstantin Belousov KASSERT(upgrade || dst_entry->object.vm_object == NULL, 1730121fd461SKonstantin Belousov ("vm_fault_copy_entry: vm_object not NULL")); 17310973283dSKonstantin Belousov if (src_object != dst_object) { 1732df8bae1dSRodney W. Grimes dst_entry->object.vm_object = dst_object; 1733df8bae1dSRodney W. Grimes dst_entry->offset = 0; 173478022527SKonstantin Belousov dst_entry->eflags &= ~MAP_ENTRY_VN_EXEC; 17350973283dSKonstantin Belousov } 1736210a6886SKonstantin Belousov if (fork_charge != NULL) { 1737ef694c1aSEdward Tomasz Napierala KASSERT(dst_entry->cred == NULL, 1738121fd461SKonstantin Belousov ("vm_fault_copy_entry: leaked swp charge")); 1739ef694c1aSEdward Tomasz Napierala dst_object->cred = curthread->td_ucred; 1740ef694c1aSEdward Tomasz Napierala crhold(dst_object->cred); 1741121fd461SKonstantin Belousov *fork_charge += dst_object->charge; 17429f25ab83SKonstantin Belousov } else if ((dst_object->type == OBJT_DEFAULT || 17439f25ab83SKonstantin Belousov dst_object->type == OBJT_SWAP) && 17449f25ab83SKonstantin Belousov dst_object->cred == NULL) { 17450973283dSKonstantin Belousov KASSERT(dst_entry->cred != NULL, ("no cred for entry %p", 17460973283dSKonstantin Belousov dst_entry)); 1747ef694c1aSEdward Tomasz Napierala dst_object->cred = dst_entry->cred; 1748ef694c1aSEdward Tomasz Napierala dst_entry->cred = NULL; 1749210a6886SKonstantin Belousov } 17500973283dSKonstantin Belousov 1751210a6886SKonstantin Belousov /* 1752210a6886SKonstantin Belousov * If not an upgrade, then enter the mappings in the pmap as 1753210a6886SKonstantin Belousov * read and/or execute accesses. Otherwise, enter them as 1754210a6886SKonstantin Belousov * write accesses. 1755210a6886SKonstantin Belousov * 1756210a6886SKonstantin Belousov * A writeable large page mapping is only created if all of 1757210a6886SKonstantin Belousov * the constituent small page mappings are modified. Marking 1758210a6886SKonstantin Belousov * PTEs as modified on inception allows promotion to happen 1759210a6886SKonstantin Belousov * without taking potentially large number of soft faults. 1760210a6886SKonstantin Belousov */ 1761210a6886SKonstantin Belousov if (!upgrade) 1762210a6886SKonstantin Belousov access &= ~VM_PROT_WRITE; 1763df8bae1dSRodney W. Grimes 1764df8bae1dSRodney W. Grimes /* 1765ef45823eSKonstantin Belousov * Loop through all of the virtual pages within the entry's 1766ef45823eSKonstantin Belousov * range, copying each page from the source object to the 1767ef45823eSKonstantin Belousov * destination object. Since the source is wired, those pages 1768ef45823eSKonstantin Belousov * must exist. In contrast, the destination is pageable. 17696939b4d3SMark Johnston * Since the destination object doesn't share any backing storage 1770ef45823eSKonstantin Belousov * with the source object, all of its pages must be dirtied, 1771ef45823eSKonstantin Belousov * regardless of whether they can be written. 1772df8bae1dSRodney W. Grimes */ 17737afab86cSAlan Cox for (vaddr = dst_entry->start, dst_pindex = 0; 1774df8bae1dSRodney W. Grimes vaddr < dst_entry->end; 17757afab86cSAlan Cox vaddr += PAGE_SIZE, dst_pindex++) { 17760973283dSKonstantin Belousov again: 1777df8bae1dSRodney W. Grimes /* 1778df8bae1dSRodney W. Grimes * Find the page in the source object, and copy it in. 17794c74acf7SKonstantin Belousov * Because the source is wired down, the page will be 17804c74acf7SKonstantin Belousov * in memory. 1781df8bae1dSRodney W. Grimes */ 17820973283dSKonstantin Belousov if (src_object != dst_object) 178383b375eaSAttilio Rao VM_OBJECT_RLOCK(src_object); 1784c5b65a67SAlan Cox object = src_object; 17857afab86cSAlan Cox pindex = src_pindex + dst_pindex; 17867afab86cSAlan Cox while ((src_m = vm_page_lookup(object, pindex)) == NULL && 1787c5b65a67SAlan Cox (backing_object = object->backing_object) != NULL) { 1788c5b65a67SAlan Cox /* 17894c74acf7SKonstantin Belousov * Unless the source mapping is read-only or 17904c74acf7SKonstantin Belousov * it is presently being upgraded from 17914c74acf7SKonstantin Belousov * read-only, the first object in the shadow 17924c74acf7SKonstantin Belousov * chain should provide all of the pages. In 17934c74acf7SKonstantin Belousov * other words, this loop body should never be 17944c74acf7SKonstantin Belousov * executed when the source mapping is already 17954c74acf7SKonstantin Belousov * read/write. 1796c5b65a67SAlan Cox */ 17974c74acf7SKonstantin Belousov KASSERT((src_entry->protection & VM_PROT_WRITE) == 0 || 17984c74acf7SKonstantin Belousov upgrade, 17994c74acf7SKonstantin Belousov ("vm_fault_copy_entry: main object missing page")); 18004c74acf7SKonstantin Belousov 180183b375eaSAttilio Rao VM_OBJECT_RLOCK(backing_object); 1802c5b65a67SAlan Cox pindex += OFF_TO_IDX(object->backing_object_offset); 18030973283dSKonstantin Belousov if (object != dst_object) 180483b375eaSAttilio Rao VM_OBJECT_RUNLOCK(object); 1805c5b65a67SAlan Cox object = backing_object; 1806c5b65a67SAlan Cox } 18074c74acf7SKonstantin Belousov KASSERT(src_m != NULL, ("vm_fault_copy_entry: page missing")); 18080973283dSKonstantin Belousov 18090973283dSKonstantin Belousov if (object != dst_object) { 18100973283dSKonstantin Belousov /* 18110973283dSKonstantin Belousov * Allocate a page in the destination object. 18120973283dSKonstantin Belousov */ 18132602a2eaSKonstantin Belousov dst_m = vm_page_alloc(dst_object, (src_object == 18142602a2eaSKonstantin Belousov dst_object ? src_pindex : 0) + dst_pindex, 18152602a2eaSKonstantin Belousov VM_ALLOC_NORMAL); 18160973283dSKonstantin Belousov if (dst_m == NULL) { 18170973283dSKonstantin Belousov VM_OBJECT_WUNLOCK(dst_object); 18180973283dSKonstantin Belousov VM_OBJECT_RUNLOCK(object); 18192c0f13aaSKonstantin Belousov vm_wait(dst_object); 1820c8f780e3SKonstantin Belousov VM_OBJECT_WLOCK(dst_object); 18210973283dSKonstantin Belousov goto again; 18220973283dSKonstantin Belousov } 1823669890eaSAlan Cox pmap_copy_page(src_m, dst_m); 182483b375eaSAttilio Rao VM_OBJECT_RUNLOCK(object); 182545d72c7dSKonstantin Belousov dst_m->dirty = dst_m->valid = src_m->valid; 18260973283dSKonstantin Belousov } else { 18270973283dSKonstantin Belousov dst_m = src_m; 1828*63e97555SJeff Roberson if (vm_page_busy_acquire(dst_m, VM_ALLOC_WAITFAIL) == 0) 18290973283dSKonstantin Belousov goto again; 1830*63e97555SJeff Roberson if (dst_m->pindex >= dst_object->size) { 1831c62637d6SKonstantin Belousov /* 1832c62637d6SKonstantin Belousov * We are upgrading. Index can occur 1833c62637d6SKonstantin Belousov * out of bounds if the object type is 1834c62637d6SKonstantin Belousov * vnode and the file was truncated. 1835c62637d6SKonstantin Belousov */ 1836*63e97555SJeff Roberson vm_page_xunbusy(dst_m); 1837c62637d6SKonstantin Belousov break; 1838*63e97555SJeff Roberson } 18390973283dSKonstantin Belousov } 184089f6b863SAttilio Rao VM_OBJECT_WUNLOCK(dst_object); 1841df8bae1dSRodney W. Grimes 1842df8bae1dSRodney W. Grimes /* 1843210a6886SKonstantin Belousov * Enter it in the pmap. If a wired, copy-on-write 1844210a6886SKonstantin Belousov * mapping is being replaced by a write-enabled 1845210a6886SKonstantin Belousov * mapping, then wire that new mapping. 184645d72c7dSKonstantin Belousov * 184745d72c7dSKonstantin Belousov * The page can be invalid if the user called 184845d72c7dSKonstantin Belousov * msync(MS_INVALIDATE) or truncated the backing vnode 184945d72c7dSKonstantin Belousov * or shared memory object. In this case, do not 185045d72c7dSKonstantin Belousov * insert it into pmap, but still do the copy so that 185145d72c7dSKonstantin Belousov * all copies of the wired map entry have similar 185245d72c7dSKonstantin Belousov * backing pages. 1853df8bae1dSRodney W. Grimes */ 185445d72c7dSKonstantin Belousov if (dst_m->valid == VM_PAGE_BITS_ALL) { 185539ffa8c1SKonstantin Belousov pmap_enter(dst_map->pmap, vaddr, dst_m, prot, 185639ffa8c1SKonstantin Belousov access | (upgrade ? PMAP_ENTER_WIRED : 0), 0); 185745d72c7dSKonstantin Belousov } 1858df8bae1dSRodney W. Grimes 1859df8bae1dSRodney W. Grimes /* 1860df8bae1dSRodney W. Grimes * Mark it no longer busy, and put it on the active list. 1861df8bae1dSRodney W. Grimes */ 186289f6b863SAttilio Rao VM_OBJECT_WLOCK(dst_object); 18632965a453SKip Macy 1864210a6886SKonstantin Belousov if (upgrade) { 18650973283dSKonstantin Belousov if (src_m != dst_m) { 18663ae10f74SAttilio Rao vm_page_unwire(src_m, PQ_INACTIVE); 1867210a6886SKonstantin Belousov vm_page_wire(dst_m); 18682965a453SKip Macy } else { 1869d842aa51SMark Johnston KASSERT(vm_page_wired(dst_m), 18700973283dSKonstantin Belousov ("dst_m %p is not wired", dst_m)); 18710973283dSKonstantin Belousov } 18720973283dSKonstantin Belousov } else { 18732965a453SKip Macy vm_page_lock(dst_m); 1874df8bae1dSRodney W. Grimes vm_page_activate(dst_m); 1875e20e8c15SKonstantin Belousov vm_page_unlock(dst_m); 18762965a453SKip Macy } 1877c7aebda8SAttilio Rao vm_page_xunbusy(dst_m); 1878df8bae1dSRodney W. Grimes } 187989f6b863SAttilio Rao VM_OBJECT_WUNLOCK(dst_object); 1880210a6886SKonstantin Belousov if (upgrade) { 1881210a6886SKonstantin Belousov dst_entry->eflags &= ~(MAP_ENTRY_COW | MAP_ENTRY_NEEDS_COPY); 1882210a6886SKonstantin Belousov vm_object_deallocate(src_object); 1883210a6886SKonstantin Belousov } 1884df8bae1dSRodney W. Grimes } 188526f9a767SRodney W. Grimes 18865730afc9SAlan Cox /* 18875730afc9SAlan Cox * Block entry into the machine-independent layer's page fault handler by 18885730afc9SAlan Cox * the calling thread. Subsequent calls to vm_fault() by that thread will 18895730afc9SAlan Cox * return KERN_PROTECTION_FAILURE. Enable machine-dependent handling of 18905730afc9SAlan Cox * spurious page faults. 18915730afc9SAlan Cox */ 18922801687dSKonstantin Belousov int 18932801687dSKonstantin Belousov vm_fault_disable_pagefaults(void) 18942801687dSKonstantin Belousov { 18952801687dSKonstantin Belousov 18965730afc9SAlan Cox return (curthread_pflags_set(TDP_NOFAULTING | TDP_RESETSPUR)); 18972801687dSKonstantin Belousov } 18982801687dSKonstantin Belousov 18992801687dSKonstantin Belousov void 19002801687dSKonstantin Belousov vm_fault_enable_pagefaults(int save) 19012801687dSKonstantin Belousov { 19022801687dSKonstantin Belousov 19032801687dSKonstantin Belousov curthread_pflags_restore(save); 19042801687dSKonstantin Belousov } 1905