160727d8bSWarner Losh /*- 2796df753SPedro F. Giffuni * SPDX-License-Identifier: (BSD-4-Clause AND MIT-CMU) 3df57947fSPedro F. Giffuni * 4df8bae1dSRodney W. Grimes * Copyright (c) 1991, 1993 5df8bae1dSRodney W. Grimes * The Regents of the University of California. All rights reserved. 626f9a767SRodney W. Grimes * Copyright (c) 1994 John S. Dyson 726f9a767SRodney W. Grimes * All rights reserved. 826f9a767SRodney W. Grimes * Copyright (c) 1994 David Greenman 926f9a767SRodney W. Grimes * All rights reserved. 1026f9a767SRodney W. Grimes * 11df8bae1dSRodney W. Grimes * 12df8bae1dSRodney W. Grimes * This code is derived from software contributed to Berkeley by 13df8bae1dSRodney W. Grimes * The Mach Operating System project at Carnegie-Mellon University. 14df8bae1dSRodney W. Grimes * 15df8bae1dSRodney W. Grimes * Redistribution and use in source and binary forms, with or without 16df8bae1dSRodney W. Grimes * modification, are permitted provided that the following conditions 17df8bae1dSRodney W. Grimes * are met: 18df8bae1dSRodney W. Grimes * 1. Redistributions of source code must retain the above copyright 19df8bae1dSRodney W. Grimes * notice, this list of conditions and the following disclaimer. 20df8bae1dSRodney W. Grimes * 2. Redistributions in binary form must reproduce the above copyright 21df8bae1dSRodney W. Grimes * notice, this list of conditions and the following disclaimer in the 22df8bae1dSRodney W. Grimes * documentation and/or other materials provided with the distribution. 23df8bae1dSRodney W. Grimes * 3. All advertising materials mentioning features or use of this software 245929bcfaSPhilippe Charnier * must display the following acknowledgement: 25df8bae1dSRodney W. Grimes * This product includes software developed by the University of 26df8bae1dSRodney W. Grimes * California, Berkeley and its contributors. 27df8bae1dSRodney W. Grimes * 4. Neither the name of the University nor the names of its contributors 28df8bae1dSRodney W. Grimes * may be used to endorse or promote products derived from this software 29df8bae1dSRodney W. Grimes * without specific prior written permission. 30df8bae1dSRodney W. Grimes * 31df8bae1dSRodney W. Grimes * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 32df8bae1dSRodney W. Grimes * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 33df8bae1dSRodney W. Grimes * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 34df8bae1dSRodney W. Grimes * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 35df8bae1dSRodney W. Grimes * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 36df8bae1dSRodney W. Grimes * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 37df8bae1dSRodney W. Grimes * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 38df8bae1dSRodney W. Grimes * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 39df8bae1dSRodney W. Grimes * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 40df8bae1dSRodney W. Grimes * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 41df8bae1dSRodney W. Grimes * SUCH DAMAGE. 42df8bae1dSRodney W. Grimes * 433c4dd356SDavid Greenman * from: @(#)vm_fault.c 8.4 (Berkeley) 1/12/94 44df8bae1dSRodney W. Grimes * 45df8bae1dSRodney W. Grimes * 46df8bae1dSRodney W. Grimes * Copyright (c) 1987, 1990 Carnegie-Mellon University. 47df8bae1dSRodney W. Grimes * All rights reserved. 48df8bae1dSRodney W. Grimes * 49df8bae1dSRodney W. Grimes * Authors: Avadis Tevanian, Jr., Michael Wayne Young 50df8bae1dSRodney W. Grimes * 51df8bae1dSRodney W. Grimes * Permission to use, copy, modify and distribute this software and 52df8bae1dSRodney W. Grimes * its documentation is hereby granted, provided that both the copyright 53df8bae1dSRodney W. Grimes * notice and this permission notice appear in all copies of the 54df8bae1dSRodney W. Grimes * software, derivative works or modified versions, and any portions 55df8bae1dSRodney W. Grimes * thereof, and that both notices appear in supporting documentation. 56df8bae1dSRodney W. Grimes * 57df8bae1dSRodney W. Grimes * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 58df8bae1dSRodney W. Grimes * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 59df8bae1dSRodney W. Grimes * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 60df8bae1dSRodney W. Grimes * 61df8bae1dSRodney W. Grimes * Carnegie Mellon requests users of this software to return to 62df8bae1dSRodney W. Grimes * 63df8bae1dSRodney W. Grimes * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 64df8bae1dSRodney W. Grimes * School of Computer Science 65df8bae1dSRodney W. Grimes * Carnegie Mellon University 66df8bae1dSRodney W. Grimes * Pittsburgh PA 15213-3890 67df8bae1dSRodney W. Grimes * 68df8bae1dSRodney W. Grimes * any improvements or extensions that they make and grant Carnegie the 69df8bae1dSRodney W. Grimes * rights to redistribute these changes. 70df8bae1dSRodney W. Grimes */ 71df8bae1dSRodney W. Grimes 72df8bae1dSRodney W. Grimes /* 73df8bae1dSRodney W. Grimes * Page fault handling module. 74df8bae1dSRodney W. Grimes */ 75874651b1SDavid E. O'Brien 76874651b1SDavid E. O'Brien #include <sys/cdefs.h> 77874651b1SDavid E. O'Brien __FBSDID("$FreeBSD$"); 78874651b1SDavid E. O'Brien 7935818d2eSJohn Baldwin #include "opt_ktrace.h" 80f8a47341SAlan Cox #include "opt_vm.h" 81f8a47341SAlan Cox 82df8bae1dSRodney W. Grimes #include <sys/param.h> 83df8bae1dSRodney W. Grimes #include <sys/systm.h> 844edf4a58SJohn Baldwin #include <sys/kernel.h> 85fb919e4dSMark Murray #include <sys/lock.h> 86a8b0f100SAlan Cox #include <sys/mman.h> 8726f9a767SRodney W. Grimes #include <sys/proc.h> 88ae34b6ffSEdward Tomasz Napierala #include <sys/racct.h> 8926f9a767SRodney W. Grimes #include <sys/resourcevar.h> 9089f6b863SAttilio Rao #include <sys/rwlock.h> 9123955314SAlfred Perlstein #include <sys/sysctl.h> 924edf4a58SJohn Baldwin #include <sys/vmmeter.h> 934edf4a58SJohn Baldwin #include <sys/vnode.h> 9435818d2eSJohn Baldwin #ifdef KTRACE 9535818d2eSJohn Baldwin #include <sys/ktrace.h> 9635818d2eSJohn Baldwin #endif 97df8bae1dSRodney W. Grimes 98df8bae1dSRodney W. Grimes #include <vm/vm.h> 99efeaf95aSDavid Greenman #include <vm/vm_param.h> 100efeaf95aSDavid Greenman #include <vm/pmap.h> 101efeaf95aSDavid Greenman #include <vm/vm_map.h> 102efeaf95aSDavid Greenman #include <vm/vm_object.h> 103df8bae1dSRodney W. Grimes #include <vm/vm_page.h> 104df8bae1dSRodney W. Grimes #include <vm/vm_pageout.h> 105a83c285cSDavid Greenman #include <vm/vm_kern.h> 10624a1cce3SDavid Greenman #include <vm/vm_pager.h> 107efeaf95aSDavid Greenman #include <vm/vm_extern.h> 108dfdf9abdSAlan Cox #include <vm/vm_reserv.h> 109df8bae1dSRodney W. Grimes 110566526a9SAlan Cox #define PFBAK 4 111566526a9SAlan Cox #define PFFOR 4 112566526a9SAlan Cox 1135268042bSAlan Cox #define VM_FAULT_READ_DEFAULT (1 + VM_FAULT_READ_AHEAD_INIT) 11413458803SAlan Cox #define VM_FAULT_READ_MAX (1 + VM_FAULT_READ_AHEAD_MAX) 115a8b0f100SAlan Cox 116a8b0f100SAlan Cox #define VM_FAULT_DONTNEED_MIN 1048576 11726f9a767SRodney W. Grimes 1184866e085SJohn Dyson struct faultstate { 1194866e085SJohn Dyson vm_page_t m; 1204866e085SJohn Dyson vm_object_t object; 1214866e085SJohn Dyson vm_pindex_t pindex; 1224866e085SJohn Dyson vm_page_t first_m; 1234866e085SJohn Dyson vm_object_t first_object; 1244866e085SJohn Dyson vm_pindex_t first_pindex; 1254866e085SJohn Dyson vm_map_t map; 1264866e085SJohn Dyson vm_map_entry_t entry; 127dc5401d2SKonstantin Belousov int map_generation; 128cd8a6fe8SAlan Cox bool lookup_still_valid; 1294866e085SJohn Dyson struct vnode *vp; 1304866e085SJohn Dyson }; 1314866e085SJohn Dyson 132a8b0f100SAlan Cox static void vm_fault_dontneed(const struct faultstate *fs, vm_offset_t vaddr, 133a8b0f100SAlan Cox int ahead); 13463281952SAlan Cox static void vm_fault_prefault(const struct faultstate *fs, vm_offset_t addra, 135b0cd2017SGleb Smirnoff int backward, int forward); 13613458803SAlan Cox 13762a59e8fSWarner Losh static inline void 1384866e085SJohn Dyson release_page(struct faultstate *fs) 1394866e085SJohn Dyson { 1400d0be82aSKonstantin Belousov 141c7aebda8SAttilio Rao vm_page_xunbusy(fs->m); 1422965a453SKip Macy vm_page_lock(fs->m); 1434866e085SJohn Dyson vm_page_deactivate(fs->m); 1442965a453SKip Macy vm_page_unlock(fs->m); 1454866e085SJohn Dyson fs->m = NULL; 1464866e085SJohn Dyson } 1474866e085SJohn Dyson 14862a59e8fSWarner Losh static inline void 1494866e085SJohn Dyson unlock_map(struct faultstate *fs) 1504866e085SJohn Dyson { 1510d0be82aSKonstantin Belousov 15225adb370SBrian Feldman if (fs->lookup_still_valid) { 1534866e085SJohn Dyson vm_map_lookup_done(fs->map, fs->entry); 154cd8a6fe8SAlan Cox fs->lookup_still_valid = false; 1554866e085SJohn Dyson } 1564866e085SJohn Dyson } 1574866e085SJohn Dyson 1584866e085SJohn Dyson static void 159cfabea3dSKonstantin Belousov unlock_vp(struct faultstate *fs) 160cfabea3dSKonstantin Belousov { 161cfabea3dSKonstantin Belousov 162cfabea3dSKonstantin Belousov if (fs->vp != NULL) { 163cfabea3dSKonstantin Belousov vput(fs->vp); 164cfabea3dSKonstantin Belousov fs->vp = NULL; 165cfabea3dSKonstantin Belousov } 166cfabea3dSKonstantin Belousov } 167cfabea3dSKonstantin Belousov 168cfabea3dSKonstantin Belousov static void 169a51b0840SAlan Cox unlock_and_deallocate(struct faultstate *fs) 1704866e085SJohn Dyson { 171f29ba63eSAlan Cox 1724866e085SJohn Dyson vm_object_pip_wakeup(fs->object); 17389f6b863SAttilio Rao VM_OBJECT_WUNLOCK(fs->object); 1744866e085SJohn Dyson if (fs->object != fs->first_object) { 17589f6b863SAttilio Rao VM_OBJECT_WLOCK(fs->first_object); 1762965a453SKip Macy vm_page_lock(fs->first_m); 1774866e085SJohn Dyson vm_page_free(fs->first_m); 1782965a453SKip Macy vm_page_unlock(fs->first_m); 1794866e085SJohn Dyson vm_object_pip_wakeup(fs->first_object); 18089f6b863SAttilio Rao VM_OBJECT_WUNLOCK(fs->first_object); 1814866e085SJohn Dyson fs->first_m = NULL; 1824866e085SJohn Dyson } 1834866e085SJohn Dyson vm_object_deallocate(fs->first_object); 1844866e085SJohn Dyson unlock_map(fs); 185cfabea3dSKonstantin Belousov unlock_vp(fs); 1864866e085SJohn Dyson } 1874866e085SJohn Dyson 188a36f5532SKonstantin Belousov static void 189a36f5532SKonstantin Belousov vm_fault_dirty(vm_map_entry_t entry, vm_page_t m, vm_prot_t prot, 190e26236e9SKonstantin Belousov vm_prot_t fault_type, int fault_flags, bool set_wd) 191a36f5532SKonstantin Belousov { 192e26236e9SKonstantin Belousov bool need_dirty; 193a36f5532SKonstantin Belousov 194a36f5532SKonstantin Belousov if (((prot & VM_PROT_WRITE) == 0 && 195a36f5532SKonstantin Belousov (fault_flags & VM_FAULT_DIRTY) == 0) || 196a36f5532SKonstantin Belousov (m->oflags & VPO_UNMANAGED) != 0) 197a36f5532SKonstantin Belousov return; 198a36f5532SKonstantin Belousov 199a36f5532SKonstantin Belousov VM_OBJECT_ASSERT_LOCKED(m->object); 200a36f5532SKonstantin Belousov 201a36f5532SKonstantin Belousov need_dirty = ((fault_type & VM_PROT_WRITE) != 0 && 2026a875bf9SKonstantin Belousov (fault_flags & VM_FAULT_WIRE) == 0) || 203a36f5532SKonstantin Belousov (fault_flags & VM_FAULT_DIRTY) != 0; 204a36f5532SKonstantin Belousov 205a36f5532SKonstantin Belousov if (set_wd) 206a36f5532SKonstantin Belousov vm_object_set_writeable_dirty(m->object); 207a36f5532SKonstantin Belousov else 208a36f5532SKonstantin Belousov /* 209a36f5532SKonstantin Belousov * If two callers of vm_fault_dirty() with set_wd == 210a36f5532SKonstantin Belousov * FALSE, one for the map entry with MAP_ENTRY_NOSYNC 211a36f5532SKonstantin Belousov * flag set, other with flag clear, race, it is 212a36f5532SKonstantin Belousov * possible for the no-NOSYNC thread to see m->dirty 213a36f5532SKonstantin Belousov * != 0 and not clear VPO_NOSYNC. Take vm_page lock 214a36f5532SKonstantin Belousov * around manipulation of VPO_NOSYNC and 215a36f5532SKonstantin Belousov * vm_page_dirty() call, to avoid the race and keep 216a36f5532SKonstantin Belousov * m->oflags consistent. 217a36f5532SKonstantin Belousov */ 218a36f5532SKonstantin Belousov vm_page_lock(m); 219a36f5532SKonstantin Belousov 220a36f5532SKonstantin Belousov /* 221a36f5532SKonstantin Belousov * If this is a NOSYNC mmap we do not want to set VPO_NOSYNC 222a36f5532SKonstantin Belousov * if the page is already dirty to prevent data written with 223a36f5532SKonstantin Belousov * the expectation of being synced from not being synced. 224a36f5532SKonstantin Belousov * Likewise if this entry does not request NOSYNC then make 225a36f5532SKonstantin Belousov * sure the page isn't marked NOSYNC. Applications sharing 226a36f5532SKonstantin Belousov * data should use the same flags to avoid ping ponging. 227a36f5532SKonstantin Belousov */ 228a36f5532SKonstantin Belousov if ((entry->eflags & MAP_ENTRY_NOSYNC) != 0) { 229a36f5532SKonstantin Belousov if (m->dirty == 0) { 230a36f5532SKonstantin Belousov m->oflags |= VPO_NOSYNC; 231a36f5532SKonstantin Belousov } 232a36f5532SKonstantin Belousov } else { 233a36f5532SKonstantin Belousov m->oflags &= ~VPO_NOSYNC; 234a36f5532SKonstantin Belousov } 235a36f5532SKonstantin Belousov 236a36f5532SKonstantin Belousov /* 237a36f5532SKonstantin Belousov * If the fault is a write, we know that this page is being 238a36f5532SKonstantin Belousov * written NOW so dirty it explicitly to save on 239a36f5532SKonstantin Belousov * pmap_is_modified() calls later. 240a36f5532SKonstantin Belousov * 241d5efa0a4SAlan Cox * Also, since the page is now dirty, we can possibly tell 242d5efa0a4SAlan Cox * the pager to release any swap backing the page. Calling 243d5efa0a4SAlan Cox * the pager requires a write lock on the object. 244a36f5532SKonstantin Belousov */ 245a36f5532SKonstantin Belousov if (need_dirty) 246a36f5532SKonstantin Belousov vm_page_dirty(m); 247a36f5532SKonstantin Belousov if (!set_wd) 248a36f5532SKonstantin Belousov vm_page_unlock(m); 249d5efa0a4SAlan Cox else if (need_dirty) 250a36f5532SKonstantin Belousov vm_pager_page_unswapped(m); 251a36f5532SKonstantin Belousov } 252a36f5532SKonstantin Belousov 25341ddec83SKonstantin Belousov static void 25441ddec83SKonstantin Belousov vm_fault_fill_hold(vm_page_t *m_hold, vm_page_t m) 25541ddec83SKonstantin Belousov { 25641ddec83SKonstantin Belousov 25741ddec83SKonstantin Belousov if (m_hold != NULL) { 25841ddec83SKonstantin Belousov *m_hold = m; 25941ddec83SKonstantin Belousov vm_page_lock(m); 26041ddec83SKonstantin Belousov vm_page_hold(m); 26141ddec83SKonstantin Belousov vm_page_unlock(m); 26241ddec83SKonstantin Belousov } 26341ddec83SKonstantin Belousov } 26441ddec83SKonstantin Belousov 26541ddec83SKonstantin Belousov /* 26641ddec83SKonstantin Belousov * Unlocks fs.first_object and fs.map on success. 26741ddec83SKonstantin Belousov */ 26841ddec83SKonstantin Belousov static int 26941ddec83SKonstantin Belousov vm_fault_soft_fast(struct faultstate *fs, vm_offset_t vaddr, vm_prot_t prot, 27041ddec83SKonstantin Belousov int fault_type, int fault_flags, boolean_t wired, vm_page_t *m_hold) 27141ddec83SKonstantin Belousov { 2728b5e1472SAlan Cox vm_page_t m, m_map; 2738b5e1472SAlan Cox #if defined(__amd64__) && VM_NRESERVLEVEL > 0 2748b5e1472SAlan Cox vm_page_t m_super; 27590ea34bfSAlan Cox int flags; 2768b5e1472SAlan Cox #endif 27790ea34bfSAlan Cox int psind, rv; 27841ddec83SKonstantin Belousov 27941ddec83SKonstantin Belousov MPASS(fs->vp == NULL); 28041ddec83SKonstantin Belousov m = vm_page_lookup(fs->first_object, fs->first_pindex); 28141ddec83SKonstantin Belousov /* A busy page can be mapped for read|execute access. */ 28241ddec83SKonstantin Belousov if (m == NULL || ((prot & VM_PROT_WRITE) != 0 && 28341ddec83SKonstantin Belousov vm_page_busied(m)) || m->valid != VM_PAGE_BITS_ALL) 28441ddec83SKonstantin Belousov return (KERN_FAILURE); 2858b5e1472SAlan Cox m_map = m; 2868b5e1472SAlan Cox psind = 0; 2878b5e1472SAlan Cox #if defined(__amd64__) && VM_NRESERVLEVEL > 0 2888b5e1472SAlan Cox if ((m->flags & PG_FICTITIOUS) == 0 && 2898b5e1472SAlan Cox (m_super = vm_reserv_to_superpage(m)) != NULL && 2908b5e1472SAlan Cox rounddown2(vaddr, pagesizes[m_super->psind]) >= fs->entry->start && 2918b5e1472SAlan Cox roundup2(vaddr + 1, pagesizes[m_super->psind]) <= fs->entry->end && 2928b5e1472SAlan Cox (vaddr & (pagesizes[m_super->psind] - 1)) == (VM_PAGE_TO_PHYS(m) & 2938b5e1472SAlan Cox (pagesizes[m_super->psind] - 1)) && 2948b5e1472SAlan Cox pmap_ps_enabled(fs->map->pmap)) { 2958b5e1472SAlan Cox flags = PS_ALL_VALID; 2968b5e1472SAlan Cox if ((prot & VM_PROT_WRITE) != 0) { 2978b5e1472SAlan Cox /* 2988b5e1472SAlan Cox * Create a superpage mapping allowing write access 2998b5e1472SAlan Cox * only if none of the constituent pages are busy and 3008b5e1472SAlan Cox * all of them are already dirty (except possibly for 3018b5e1472SAlan Cox * the page that was faulted on). 3028b5e1472SAlan Cox */ 3038b5e1472SAlan Cox flags |= PS_NONE_BUSY; 3048b5e1472SAlan Cox if ((fs->first_object->flags & OBJ_UNMANAGED) == 0) 3058b5e1472SAlan Cox flags |= PS_ALL_DIRTY; 3068b5e1472SAlan Cox } 3078b5e1472SAlan Cox if (vm_page_ps_test(m_super, flags, m)) { 3088b5e1472SAlan Cox m_map = m_super; 3098b5e1472SAlan Cox psind = m_super->psind; 3108b5e1472SAlan Cox vaddr = rounddown2(vaddr, pagesizes[psind]); 3118b5e1472SAlan Cox /* Preset the modified bit for dirty superpages. */ 3128b5e1472SAlan Cox if ((flags & PS_ALL_DIRTY) != 0) 3138b5e1472SAlan Cox fault_type |= VM_PROT_WRITE; 3148b5e1472SAlan Cox } 3158b5e1472SAlan Cox } 3168b5e1472SAlan Cox #endif 3178b5e1472SAlan Cox rv = pmap_enter(fs->map->pmap, vaddr, m_map, prot, fault_type | 3188b5e1472SAlan Cox PMAP_ENTER_NOSLEEP | (wired ? PMAP_ENTER_WIRED : 0), psind); 31941ddec83SKonstantin Belousov if (rv != KERN_SUCCESS) 32041ddec83SKonstantin Belousov return (rv); 32141ddec83SKonstantin Belousov vm_fault_fill_hold(m_hold, m); 32241ddec83SKonstantin Belousov vm_fault_dirty(fs->entry, m, prot, fault_type, fault_flags, false); 32341ddec83SKonstantin Belousov VM_OBJECT_RUNLOCK(fs->first_object); 3248b5e1472SAlan Cox if (psind == 0 && !wired) 32541ddec83SKonstantin Belousov vm_fault_prefault(fs, vaddr, PFBAK, PFFOR); 32641ddec83SKonstantin Belousov vm_map_lookup_done(fs->map, fs->entry); 32741ddec83SKonstantin Belousov curthread->td_ru.ru_minflt++; 32841ddec83SKonstantin Belousov return (KERN_SUCCESS); 32941ddec83SKonstantin Belousov } 33041ddec83SKonstantin Belousov 331c42b43a0SKonstantin Belousov static void 332c42b43a0SKonstantin Belousov vm_fault_restore_map_lock(struct faultstate *fs) 333c42b43a0SKonstantin Belousov { 334c42b43a0SKonstantin Belousov 335c42b43a0SKonstantin Belousov VM_OBJECT_ASSERT_WLOCKED(fs->first_object); 336c42b43a0SKonstantin Belousov MPASS(fs->first_object->paging_in_progress > 0); 337c42b43a0SKonstantin Belousov 338c42b43a0SKonstantin Belousov if (!vm_map_trylock_read(fs->map)) { 339c42b43a0SKonstantin Belousov VM_OBJECT_WUNLOCK(fs->first_object); 340c42b43a0SKonstantin Belousov vm_map_lock_read(fs->map); 341c42b43a0SKonstantin Belousov VM_OBJECT_WLOCK(fs->first_object); 342c42b43a0SKonstantin Belousov } 343c42b43a0SKonstantin Belousov fs->lookup_still_valid = true; 344c42b43a0SKonstantin Belousov } 345c42b43a0SKonstantin Belousov 3467a432b84SKonstantin Belousov static void 3477a432b84SKonstantin Belousov vm_fault_populate_check_page(vm_page_t m) 3487a432b84SKonstantin Belousov { 3497a432b84SKonstantin Belousov 3507a432b84SKonstantin Belousov /* 3517a432b84SKonstantin Belousov * Check each page to ensure that the pager is obeying the 3527a432b84SKonstantin Belousov * interface: the page must be installed in the object, fully 3537a432b84SKonstantin Belousov * valid, and exclusively busied. 3547a432b84SKonstantin Belousov */ 3557a432b84SKonstantin Belousov MPASS(m != NULL); 3567a432b84SKonstantin Belousov MPASS(m->valid == VM_PAGE_BITS_ALL); 3577a432b84SKonstantin Belousov MPASS(vm_page_xbusied(m)); 3587a432b84SKonstantin Belousov } 3597a432b84SKonstantin Belousov 3607a432b84SKonstantin Belousov static void 3617a432b84SKonstantin Belousov vm_fault_populate_cleanup(vm_object_t object, vm_pindex_t first, 3627a432b84SKonstantin Belousov vm_pindex_t last) 3637a432b84SKonstantin Belousov { 3647a432b84SKonstantin Belousov vm_page_t m; 3657a432b84SKonstantin Belousov vm_pindex_t pidx; 3667a432b84SKonstantin Belousov 3677a432b84SKonstantin Belousov VM_OBJECT_ASSERT_WLOCKED(object); 3687a432b84SKonstantin Belousov MPASS(first <= last); 3697a432b84SKonstantin Belousov for (pidx = first, m = vm_page_lookup(object, pidx); 3707a432b84SKonstantin Belousov pidx <= last; pidx++, m = vm_page_next(m)) { 3717a432b84SKonstantin Belousov vm_fault_populate_check_page(m); 3727a432b84SKonstantin Belousov vm_page_lock(m); 3737a432b84SKonstantin Belousov vm_page_deactivate(m); 3747a432b84SKonstantin Belousov vm_page_unlock(m); 3757a432b84SKonstantin Belousov vm_page_xunbusy(m); 3767a432b84SKonstantin Belousov } 3777a432b84SKonstantin Belousov } 378c42b43a0SKonstantin Belousov 379c42b43a0SKonstantin Belousov static int 380c42b43a0SKonstantin Belousov vm_fault_populate(struct faultstate *fs, vm_offset_t vaddr, vm_prot_t prot, 381c42b43a0SKonstantin Belousov int fault_type, int fault_flags, boolean_t wired, vm_page_t *m_hold) 382c42b43a0SKonstantin Belousov { 383c42b43a0SKonstantin Belousov vm_page_t m; 3847a432b84SKonstantin Belousov vm_pindex_t map_first, map_last, pager_first, pager_last, pidx; 385c42b43a0SKonstantin Belousov int rv; 386c42b43a0SKonstantin Belousov 387c42b43a0SKonstantin Belousov MPASS(fs->object == fs->first_object); 388c42b43a0SKonstantin Belousov VM_OBJECT_ASSERT_WLOCKED(fs->first_object); 389c42b43a0SKonstantin Belousov MPASS(fs->first_object->paging_in_progress > 0); 390c42b43a0SKonstantin Belousov MPASS(fs->first_object->backing_object == NULL); 391c42b43a0SKonstantin Belousov MPASS(fs->lookup_still_valid); 392c42b43a0SKonstantin Belousov 3937a432b84SKonstantin Belousov pager_first = OFF_TO_IDX(fs->entry->offset); 39489564188SAlan Cox pager_last = pager_first + atop(fs->entry->end - fs->entry->start) - 1; 395c42b43a0SKonstantin Belousov unlock_map(fs); 396c42b43a0SKonstantin Belousov unlock_vp(fs); 397c42b43a0SKonstantin Belousov 398c42b43a0SKonstantin Belousov /* 399c42b43a0SKonstantin Belousov * Call the pager (driver) populate() method. 400c42b43a0SKonstantin Belousov * 401c42b43a0SKonstantin Belousov * There is no guarantee that the method will be called again 402c42b43a0SKonstantin Belousov * if the current fault is for read, and a future fault is 403c42b43a0SKonstantin Belousov * for write. Report the entry's maximum allowed protection 404c42b43a0SKonstantin Belousov * to the driver. 405c42b43a0SKonstantin Belousov */ 406c42b43a0SKonstantin Belousov rv = vm_pager_populate(fs->first_object, fs->first_pindex, 4077a432b84SKonstantin Belousov fault_type, fs->entry->max_protection, &pager_first, &pager_last); 408c42b43a0SKonstantin Belousov 409c42b43a0SKonstantin Belousov VM_OBJECT_ASSERT_WLOCKED(fs->first_object); 410c42b43a0SKonstantin Belousov if (rv == VM_PAGER_BAD) { 411c42b43a0SKonstantin Belousov /* 412c42b43a0SKonstantin Belousov * VM_PAGER_BAD is the backdoor for a pager to request 413c42b43a0SKonstantin Belousov * normal fault handling. 414c42b43a0SKonstantin Belousov */ 415c42b43a0SKonstantin Belousov vm_fault_restore_map_lock(fs); 416c42b43a0SKonstantin Belousov if (fs->map->timestamp != fs->map_generation) 417c42b43a0SKonstantin Belousov return (KERN_RESOURCE_SHORTAGE); /* RetryFault */ 418c42b43a0SKonstantin Belousov return (KERN_NOT_RECEIVER); 419c42b43a0SKonstantin Belousov } 420c42b43a0SKonstantin Belousov if (rv != VM_PAGER_OK) 421c42b43a0SKonstantin Belousov return (KERN_FAILURE); /* AKA SIGSEGV */ 422c42b43a0SKonstantin Belousov 423c42b43a0SKonstantin Belousov /* Ensure that the driver is obeying the interface. */ 4247a432b84SKonstantin Belousov MPASS(pager_first <= pager_last); 4257a432b84SKonstantin Belousov MPASS(fs->first_pindex <= pager_last); 4267a432b84SKonstantin Belousov MPASS(fs->first_pindex >= pager_first); 4277a432b84SKonstantin Belousov MPASS(pager_last < fs->first_object->size); 428c42b43a0SKonstantin Belousov 429c42b43a0SKonstantin Belousov vm_fault_restore_map_lock(fs); 4307a432b84SKonstantin Belousov if (fs->map->timestamp != fs->map_generation) { 4317a432b84SKonstantin Belousov vm_fault_populate_cleanup(fs->first_object, pager_first, 4327a432b84SKonstantin Belousov pager_last); 433c42b43a0SKonstantin Belousov return (KERN_RESOURCE_SHORTAGE); /* RetryFault */ 4347a432b84SKonstantin Belousov } 435c42b43a0SKonstantin Belousov 436c42b43a0SKonstantin Belousov /* 4377a432b84SKonstantin Belousov * The map is unchanged after our last unlock. Process the fault. 4387a432b84SKonstantin Belousov * 4397a432b84SKonstantin Belousov * The range [pager_first, pager_last] that is given to the 4407a432b84SKonstantin Belousov * pager is only a hint. The pager may populate any range 4417a432b84SKonstantin Belousov * within the object that includes the requested page index. 4427a432b84SKonstantin Belousov * In case the pager expanded the range, clip it to fit into 4437a432b84SKonstantin Belousov * the map entry. 444c42b43a0SKonstantin Belousov */ 44589564188SAlan Cox map_first = OFF_TO_IDX(fs->entry->offset); 44689564188SAlan Cox if (map_first > pager_first) { 4477a432b84SKonstantin Belousov vm_fault_populate_cleanup(fs->first_object, pager_first, 4487a432b84SKonstantin Belousov map_first - 1); 44989564188SAlan Cox pager_first = map_first; 45089564188SAlan Cox } 45189564188SAlan Cox map_last = map_first + atop(fs->entry->end - fs->entry->start) - 1; 45289564188SAlan Cox if (map_last < pager_last) { 4537a432b84SKonstantin Belousov vm_fault_populate_cleanup(fs->first_object, map_last + 1, 4547a432b84SKonstantin Belousov pager_last); 45589564188SAlan Cox pager_last = map_last; 45689564188SAlan Cox } 45789564188SAlan Cox for (pidx = pager_first, m = vm_page_lookup(fs->first_object, pidx); 45889564188SAlan Cox pidx <= pager_last; pidx++, m = vm_page_next(m)) { 4597a432b84SKonstantin Belousov vm_fault_populate_check_page(m); 460c42b43a0SKonstantin Belousov vm_fault_dirty(fs->entry, m, prot, fault_type, fault_flags, 461c42b43a0SKonstantin Belousov true); 462c42b43a0SKonstantin Belousov VM_OBJECT_WUNLOCK(fs->first_object); 463c42b43a0SKonstantin Belousov pmap_enter(fs->map->pmap, fs->entry->start + IDX_TO_OFF(pidx) - 464c42b43a0SKonstantin Belousov fs->entry->offset, m, prot, fault_type | (wired ? 465c42b43a0SKonstantin Belousov PMAP_ENTER_WIRED : 0), 0); 466c42b43a0SKonstantin Belousov VM_OBJECT_WLOCK(fs->first_object); 467c42b43a0SKonstantin Belousov if (pidx == fs->first_pindex) 468c42b43a0SKonstantin Belousov vm_fault_fill_hold(m_hold, m); 469c42b43a0SKonstantin Belousov vm_page_lock(m); 470c42b43a0SKonstantin Belousov if ((fault_flags & VM_FAULT_WIRE) != 0) { 471c42b43a0SKonstantin Belousov KASSERT(wired, ("VM_FAULT_WIRE && !wired")); 472c42b43a0SKonstantin Belousov vm_page_wire(m); 473c42b43a0SKonstantin Belousov } else { 474c42b43a0SKonstantin Belousov vm_page_activate(m); 475c42b43a0SKonstantin Belousov } 476c42b43a0SKonstantin Belousov vm_page_unlock(m); 477c42b43a0SKonstantin Belousov vm_page_xunbusy(m); 478c42b43a0SKonstantin Belousov } 479c42b43a0SKonstantin Belousov curthread->td_ru.ru_majflt++; 480c42b43a0SKonstantin Belousov return (KERN_SUCCESS); 481c42b43a0SKonstantin Belousov } 482c42b43a0SKonstantin Belousov 483df8bae1dSRodney W. Grimes /* 484df8bae1dSRodney W. Grimes * vm_fault: 485df8bae1dSRodney W. Grimes * 486956f3135SPhilippe Charnier * Handle a page fault occurring at the given address, 487df8bae1dSRodney W. Grimes * requiring the given permissions, in the map specified. 488df8bae1dSRodney W. Grimes * If successful, the page is inserted into the 489df8bae1dSRodney W. Grimes * associated physical map. 490df8bae1dSRodney W. Grimes * 491df8bae1dSRodney W. Grimes * NOTE: the given address should be truncated to the 492df8bae1dSRodney W. Grimes * proper page address. 493df8bae1dSRodney W. Grimes * 494df8bae1dSRodney W. Grimes * KERN_SUCCESS is returned if the page fault is handled; otherwise, 495df8bae1dSRodney W. Grimes * a standard error specifying why the fault is fatal is returned. 496df8bae1dSRodney W. Grimes * 497df8bae1dSRodney W. Grimes * The map in question must be referenced, and remains so. 4980cddd8f0SMatthew Dillon * Caller may hold no locks. 499df8bae1dSRodney W. Grimes */ 500df8bae1dSRodney W. Grimes int 50123955314SAlfred Perlstein vm_fault(vm_map_t map, vm_offset_t vaddr, vm_prot_t fault_type, 50223955314SAlfred Perlstein int fault_flags) 50323955314SAlfred Perlstein { 50435818d2eSJohn Baldwin struct thread *td; 50535818d2eSJohn Baldwin int result; 506acd11c74SAlan Cox 50735818d2eSJohn Baldwin td = curthread; 50835818d2eSJohn Baldwin if ((td->td_pflags & TDP_NOFAULTING) != 0) 5092801687dSKonstantin Belousov return (KERN_PROTECTION_FAILURE); 51035818d2eSJohn Baldwin #ifdef KTRACE 51135818d2eSJohn Baldwin if (map != kernel_map && KTRPOINT(td, KTR_FAULT)) 51235818d2eSJohn Baldwin ktrfault(vaddr, fault_type); 51335818d2eSJohn Baldwin #endif 514be996836SAttilio Rao result = vm_fault_hold(map, trunc_page(vaddr), fault_type, fault_flags, 515be996836SAttilio Rao NULL); 51635818d2eSJohn Baldwin #ifdef KTRACE 51735818d2eSJohn Baldwin if (map != kernel_map && KTRPOINT(td, KTR_FAULTEND)) 51835818d2eSJohn Baldwin ktrfaultend(result); 51935818d2eSJohn Baldwin #endif 52035818d2eSJohn Baldwin return (result); 521acd11c74SAlan Cox } 522acd11c74SAlan Cox 523acd11c74SAlan Cox int 524be996836SAttilio Rao vm_fault_hold(vm_map_t map, vm_offset_t vaddr, vm_prot_t fault_type, 525acd11c74SAlan Cox int fault_flags, vm_page_t *m_hold) 526acd11c74SAlan Cox { 5274866e085SJohn Dyson struct faultstate fs; 528d2bf64c3SKonstantin Belousov struct vnode *vp; 529ebcddc72SAlan Cox vm_object_t next_object, retry_object; 5300c3a4893SAlan Cox vm_offset_t e_end, e_start; 531ebcddc72SAlan Cox vm_pindex_t retry_pindex; 532ebcddc72SAlan Cox vm_prot_t prot, retry_prot; 533f994b207SAlan Cox int ahead, alloc_req, behind, cluster_offset, error, era, faultcount; 534dc5401d2SKonstantin Belousov int locked, nera, result, rv; 5350c3a4893SAlan Cox u_char behavior; 536cd8a6fe8SAlan Cox boolean_t wired; /* Passed by reference. */ 53719bd0d9cSKonstantin Belousov bool dead, hardfault, is_first_object_locked; 538df8bae1dSRodney W. Grimes 53983c9dea1SGleb Smirnoff VM_CNT_INC(v_vm_faults); 540d2bf64c3SKonstantin Belousov fs.vp = NULL; 541b0cd2017SGleb Smirnoff faultcount = 0; 5420c3a4893SAlan Cox nera = -1; 543320023e2SAlan Cox hardfault = false; 544df8bae1dSRodney W. Grimes 545df8bae1dSRodney W. Grimes RetryFault:; 546df8bae1dSRodney W. Grimes 547df8bae1dSRodney W. Grimes /* 5480d94caffSDavid Greenman * Find the backing store object and offset into it to begin the 5490d94caffSDavid Greenman * search. 550df8bae1dSRodney W. Grimes */ 55140360b1bSMatthew Dillon fs.map = map; 55219bd0d9cSKonstantin Belousov result = vm_map_lookup(&fs.map, vaddr, fault_type | 55319bd0d9cSKonstantin Belousov VM_PROT_FAULT_LOOKUP, &fs.entry, &fs.first_object, 55419bd0d9cSKonstantin Belousov &fs.first_pindex, &prot, &wired); 55592de35b0SAlan Cox if (result != KERN_SUCCESS) { 556a9ee028dSMark Johnston unlock_vp(&fs); 55792de35b0SAlan Cox return (result); 55809e0c6ccSJohn Dyson } 55909e0c6ccSJohn Dyson 560dc5401d2SKonstantin Belousov fs.map_generation = fs.map->timestamp; 5612d8acc0fSJohn Dyson 5624866e085SJohn Dyson if (fs.entry->eflags & MAP_ENTRY_NOFAULT) { 56305d58177SBjoern A. Zeeb panic("%s: fault on nofault entry, addr: %#lx", 56405d58177SBjoern A. Zeeb __func__, (u_long)vaddr); 5657aaaa4fdSJohn Dyson } 5667aaaa4fdSJohn Dyson 5674f9c9114SKonstantin Belousov if (fs.entry->eflags & MAP_ENTRY_IN_TRANSITION && 5684f9c9114SKonstantin Belousov fs.entry->wiring_thread != curthread) { 5694f9c9114SKonstantin Belousov vm_map_unlock_read(fs.map); 5704f9c9114SKonstantin Belousov vm_map_lock(fs.map); 5714f9c9114SKonstantin Belousov if (vm_map_lookup_entry(fs.map, vaddr, &fs.entry) && 5724f9c9114SKonstantin Belousov (fs.entry->eflags & MAP_ENTRY_IN_TRANSITION)) { 573cfabea3dSKonstantin Belousov unlock_vp(&fs); 5744f9c9114SKonstantin Belousov fs.entry->eflags |= MAP_ENTRY_NEEDS_WAKEUP; 5754f9c9114SKonstantin Belousov vm_map_unlock_and_wait(fs.map, 0); 5764f9c9114SKonstantin Belousov } else 5774f9c9114SKonstantin Belousov vm_map_unlock(fs.map); 5784f9c9114SKonstantin Belousov goto RetryFault; 5794f9c9114SKonstantin Belousov } 5804f9c9114SKonstantin Belousov 58119bd0d9cSKonstantin Belousov MPASS((fs.entry->eflags & MAP_ENTRY_GUARD) == 0); 58219bd0d9cSKonstantin Belousov 583afe55ca3SKonstantin Belousov if (wired) 584afe55ca3SKonstantin Belousov fault_type = prot | (fault_type & VM_PROT_COPY); 5856a875bf9SKonstantin Belousov else 5866a875bf9SKonstantin Belousov KASSERT((fault_flags & VM_FAULT_WIRE) == 0, 5876a875bf9SKonstantin Belousov ("!wired && VM_FAULT_WIRE")); 588afe55ca3SKonstantin Belousov 5898d67b8c8SAlan Cox /* 5908d67b8c8SAlan Cox * Try to avoid lock contention on the top-level object through 5918d67b8c8SAlan Cox * special-case handling of some types of page faults, specifically, 5928d67b8c8SAlan Cox * those that are both (1) mapping an existing page from the top- 5938d67b8c8SAlan Cox * level object and (2) not having to mark that object as containing 5948d67b8c8SAlan Cox * dirty pages. Under these conditions, a read lock on the top-level 5958d67b8c8SAlan Cox * object suffices, allowing multiple page faults of a similar type to 5968d67b8c8SAlan Cox * run in parallel on the same top-level object. 5978d67b8c8SAlan Cox */ 598afe55ca3SKonstantin Belousov if (fs.vp == NULL /* avoid locked vnode leak */ && 5996a875bf9SKonstantin Belousov (fault_flags & (VM_FAULT_WIRE | VM_FAULT_DIRTY)) == 0 && 600afe55ca3SKonstantin Belousov /* avoid calling vm_object_set_writeable_dirty() */ 601afe55ca3SKonstantin Belousov ((prot & VM_PROT_WRITE) == 0 || 602f40cb1c6SKonstantin Belousov (fs.first_object->type != OBJT_VNODE && 603f40cb1c6SKonstantin Belousov (fs.first_object->flags & OBJ_TMPFS_NODE) == 0) || 604afe55ca3SKonstantin Belousov (fs.first_object->flags & OBJ_MIGHTBEDIRTY) != 0)) { 605afe55ca3SKonstantin Belousov VM_OBJECT_RLOCK(fs.first_object); 60641ddec83SKonstantin Belousov if ((prot & VM_PROT_WRITE) == 0 || 60741ddec83SKonstantin Belousov (fs.first_object->type != OBJT_VNODE && 60841ddec83SKonstantin Belousov (fs.first_object->flags & OBJ_TMPFS_NODE) == 0) || 60941ddec83SKonstantin Belousov (fs.first_object->flags & OBJ_MIGHTBEDIRTY) != 0) { 61041ddec83SKonstantin Belousov rv = vm_fault_soft_fast(&fs, vaddr, prot, fault_type, 61141ddec83SKonstantin Belousov fault_flags, wired, m_hold); 61241ddec83SKonstantin Belousov if (rv == KERN_SUCCESS) 61341ddec83SKonstantin Belousov return (rv); 614afe55ca3SKonstantin Belousov } 615afe55ca3SKonstantin Belousov if (!VM_OBJECT_TRYUPGRADE(fs.first_object)) { 616afe55ca3SKonstantin Belousov VM_OBJECT_RUNLOCK(fs.first_object); 617afe55ca3SKonstantin Belousov VM_OBJECT_WLOCK(fs.first_object); 618afe55ca3SKonstantin Belousov } 619afe55ca3SKonstantin Belousov } else { 620afe55ca3SKonstantin Belousov VM_OBJECT_WLOCK(fs.first_object); 621afe55ca3SKonstantin Belousov } 622afe55ca3SKonstantin Belousov 62395e5e988SJohn Dyson /* 62495e5e988SJohn Dyson * Make a reference to this object to prevent its disposal while we 62595e5e988SJohn Dyson * are messing with it. Once we have the reference, the map is free 62695e5e988SJohn Dyson * to be diddled. Since objects reference their shadows (and copies), 62795e5e988SJohn Dyson * they will stay around as well. 628fe8e0238SMatthew Dillon * 629fe8e0238SMatthew Dillon * Bump the paging-in-progress count to prevent size changes (e.g. 630dda4d369SAlan Cox * truncation operations) during I/O. 63195e5e988SJohn Dyson */ 632a976eb5eSAlan Cox vm_object_reference_locked(fs.first_object); 633d474eaaaSDoug Rabson vm_object_pip_add(fs.first_object, 1); 63495e5e988SJohn Dyson 635cd8a6fe8SAlan Cox fs.lookup_still_valid = true; 636df8bae1dSRodney W. Grimes 6374866e085SJohn Dyson fs.first_m = NULL; 638df8bae1dSRodney W. Grimes 639df8bae1dSRodney W. Grimes /* 640df8bae1dSRodney W. Grimes * Search for the page at object/offset. 641df8bae1dSRodney W. Grimes */ 6424866e085SJohn Dyson fs.object = fs.first_object; 6434866e085SJohn Dyson fs.pindex = fs.first_pindex; 644df8bae1dSRodney W. Grimes while (TRUE) { 6451c7c3c6aSMatthew Dillon /* 646725441f6SKonstantin Belousov * If the object is marked for imminent termination, 647725441f6SKonstantin Belousov * we retry here, since the collapse pass has raced 648725441f6SKonstantin Belousov * with us. Otherwise, if we see terminally dead 649725441f6SKonstantin Belousov * object, return fail. 6501c7c3c6aSMatthew Dillon */ 651725441f6SKonstantin Belousov if ((fs.object->flags & OBJ_DEAD) != 0) { 652725441f6SKonstantin Belousov dead = fs.object->type == OBJT_DEAD; 6534866e085SJohn Dyson unlock_and_deallocate(&fs); 654725441f6SKonstantin Belousov if (dead) 65547221757SJohn Dyson return (KERN_PROTECTION_FAILURE); 656725441f6SKonstantin Belousov pause("vmf_de", 1); 657725441f6SKonstantin Belousov goto RetryFault; 65847221757SJohn Dyson } 65947221757SJohn Dyson 6601c7c3c6aSMatthew Dillon /* 6611c7c3c6aSMatthew Dillon * See if page is resident 6621c7c3c6aSMatthew Dillon */ 6634866e085SJohn Dyson fs.m = vm_page_lookup(fs.object, fs.pindex); 6644866e085SJohn Dyson if (fs.m != NULL) { 66598cb733cSKenneth D. Merry /* 6661c7c3c6aSMatthew Dillon * Wait/Retry if the page is busy. We have to do this 667c7aebda8SAttilio Rao * if the page is either exclusive or shared busy 668c7aebda8SAttilio Rao * because the vm_pager may be using read busy for 669c7aebda8SAttilio Rao * pageouts (and even pageins if it is the vnode 670c7aebda8SAttilio Rao * pager), and we could end up trying to pagein and 671c7aebda8SAttilio Rao * pageout the same page simultaneously. 6721c7c3c6aSMatthew Dillon * 6731c7c3c6aSMatthew Dillon * We can theoretically allow the busy case on a read 6741c7c3c6aSMatthew Dillon * fault if the page is marked valid, but since such 6751c7c3c6aSMatthew Dillon * pages are typically already pmap'd, putting that 6761c7c3c6aSMatthew Dillon * special case in might be more effort then it is 6771c7c3c6aSMatthew Dillon * worth. We cannot under any circumstances mess 678c7aebda8SAttilio Rao * around with a shared busied page except, perhaps, 6791c7c3c6aSMatthew Dillon * to pmap it. 680df8bae1dSRodney W. Grimes */ 681c7aebda8SAttilio Rao if (vm_page_busied(fs.m)) { 682b88b6c9dSAlan Cox /* 683b88b6c9dSAlan Cox * Reference the page before unlocking and 684b88b6c9dSAlan Cox * sleeping so that the page daemon is less 685b88b6c9dSAlan Cox * likely to reclaim it. 686b88b6c9dSAlan Cox */ 6873407fefeSKonstantin Belousov vm_page_aflag_set(fs.m, PGA_REFERENCED); 688a51b0840SAlan Cox if (fs.object != fs.first_object) { 68989f6b863SAttilio Rao if (!VM_OBJECT_TRYWLOCK( 690a6e38685SKonstantin Belousov fs.first_object)) { 69189f6b863SAttilio Rao VM_OBJECT_WUNLOCK(fs.object); 69289f6b863SAttilio Rao VM_OBJECT_WLOCK(fs.first_object); 69389f6b863SAttilio Rao VM_OBJECT_WLOCK(fs.object); 694a6e38685SKonstantin Belousov } 6952965a453SKip Macy vm_page_lock(fs.first_m); 696a51b0840SAlan Cox vm_page_free(fs.first_m); 6972965a453SKip Macy vm_page_unlock(fs.first_m); 698a51b0840SAlan Cox vm_object_pip_wakeup(fs.first_object); 69989f6b863SAttilio Rao VM_OBJECT_WUNLOCK(fs.first_object); 700a51b0840SAlan Cox fs.first_m = NULL; 701a51b0840SAlan Cox } 702a51b0840SAlan Cox unlock_map(&fs); 703a51b0840SAlan Cox if (fs.m == vm_page_lookup(fs.object, 704a51b0840SAlan Cox fs.pindex)) { 705c7aebda8SAttilio Rao vm_page_sleep_if_busy(fs.m, "vmpfw"); 706a51b0840SAlan Cox } 707a51b0840SAlan Cox vm_object_pip_wakeup(fs.object); 70889f6b863SAttilio Rao VM_OBJECT_WUNLOCK(fs.object); 70983c9dea1SGleb Smirnoff VM_CNT_INC(v_intrans); 7104866e085SJohn Dyson vm_object_deallocate(fs.first_object); 711df8bae1dSRodney W. Grimes goto RetryFault; 712df8bae1dSRodney W. Grimes } 7137615edaaSMatthew Dillon 7141c7c3c6aSMatthew Dillon /* 7151c7c3c6aSMatthew Dillon * Mark page busy for other processes, and the 7161c7c3c6aSMatthew Dillon * pagedaemon. If it still isn't completely valid 7171c7c3c6aSMatthew Dillon * (readable), jump to readrest, else break-out ( we 7181c7c3c6aSMatthew Dillon * found the page ). 7191c7c3c6aSMatthew Dillon */ 720c7aebda8SAttilio Rao vm_page_xbusy(fs.m); 721ff5958e7SAlan Cox if (fs.m->valid != VM_PAGE_BITS_ALL) 7220d94caffSDavid Greenman goto readrest; 723*c6a70eaeSMark Johnston break; /* break to PAGE HAS BEEN FOUND */ 724df8bae1dSRodney W. Grimes } 72510b4196bSAlan Cox KASSERT(fs.m == NULL, ("fs.m should be NULL, not %p", fs.m)); 7261c7c3c6aSMatthew Dillon 7271c7c3c6aSMatthew Dillon /* 72810b4196bSAlan Cox * Page is not resident. If the pager might contain the page 72910b4196bSAlan Cox * or this is the beginning of the search, allocate a new 73010b4196bSAlan Cox * page. (Default objects are zero-fill, so there is no real 73110b4196bSAlan Cox * pager for them.) 7321c7c3c6aSMatthew Dillon */ 7336a875bf9SKonstantin Belousov if (fs.object->type != OBJT_DEFAULT || 7346a875bf9SKonstantin Belousov fs.object == fs.first_object) { 7354866e085SJohn Dyson if (fs.pindex >= fs.object->size) { 7364866e085SJohn Dyson unlock_and_deallocate(&fs); 7375f55e841SDavid Greenman return (KERN_PROTECTION_FAILURE); 7385f55e841SDavid Greenman } 73922ba64e8SJohn Dyson 740c42b43a0SKonstantin Belousov if (fs.object == fs.first_object && 741c42b43a0SKonstantin Belousov (fs.first_object->flags & OBJ_POPULATE) != 0 && 742c42b43a0SKonstantin Belousov fs.first_object->shadow_count == 0) { 743c42b43a0SKonstantin Belousov rv = vm_fault_populate(&fs, vaddr, prot, 744c42b43a0SKonstantin Belousov fault_type, fault_flags, wired, m_hold); 745c42b43a0SKonstantin Belousov switch (rv) { 746c42b43a0SKonstantin Belousov case KERN_SUCCESS: 747c42b43a0SKonstantin Belousov case KERN_FAILURE: 748c42b43a0SKonstantin Belousov unlock_and_deallocate(&fs); 749c42b43a0SKonstantin Belousov return (rv); 750c42b43a0SKonstantin Belousov case KERN_RESOURCE_SHORTAGE: 751c42b43a0SKonstantin Belousov unlock_and_deallocate(&fs); 752c42b43a0SKonstantin Belousov goto RetryFault; 753c42b43a0SKonstantin Belousov case KERN_NOT_RECEIVER: 754c42b43a0SKonstantin Belousov /* 755c42b43a0SKonstantin Belousov * Pager's populate() method 756c42b43a0SKonstantin Belousov * returned VM_PAGER_BAD. 757c42b43a0SKonstantin Belousov */ 758c42b43a0SKonstantin Belousov break; 759c42b43a0SKonstantin Belousov default: 760c42b43a0SKonstantin Belousov panic("inconsistent return codes"); 761c42b43a0SKonstantin Belousov } 762c42b43a0SKonstantin Belousov } 763c42b43a0SKonstantin Belousov 764df8bae1dSRodney W. Grimes /* 7650d94caffSDavid Greenman * Allocate a new page for this object/offset pair. 7663f1c4c4fSKonstantin Belousov * 7673f1c4c4fSKonstantin Belousov * Unlocked read of the p_flag is harmless. At 7683f1c4c4fSKonstantin Belousov * worst, the P_KILLED might be not observed 7693f1c4c4fSKonstantin Belousov * there, and allocation can fail, causing 7703f1c4c4fSKonstantin Belousov * restart and new reading of the p_flag. 771df8bae1dSRodney W. Grimes */ 7723f1c4c4fSKonstantin Belousov if (!vm_page_count_severe() || P_KILLED(curproc)) { 773f8a47341SAlan Cox #if VM_NRESERVLEVEL > 0 7743d653db0SAlan Cox vm_object_color(fs.object, atop(vaddr) - 7753d653db0SAlan Cox fs.pindex); 776f8a47341SAlan Cox #endif 7773f1c4c4fSKonstantin Belousov alloc_req = P_KILLED(curproc) ? 7783f1c4c4fSKonstantin Belousov VM_ALLOC_SYSTEM : VM_ALLOC_NORMAL; 7793f1c4c4fSKonstantin Belousov if (fs.object->type != OBJT_VNODE && 7803f1c4c4fSKonstantin Belousov fs.object->backing_object == NULL) 7813f1c4c4fSKonstantin Belousov alloc_req |= VM_ALLOC_ZERO; 7824866e085SJohn Dyson fs.m = vm_page_alloc(fs.object, fs.pindex, 7833f1c4c4fSKonstantin Belousov alloc_req); 78440360b1bSMatthew Dillon } 7854866e085SJohn Dyson if (fs.m == NULL) { 7864866e085SJohn Dyson unlock_and_deallocate(&fs); 7872c0f13aaSKonstantin Belousov vm_waitpfault(); 788df8bae1dSRodney W. Grimes goto RetryFault; 7897667839aSAlan Cox } 790df8bae1dSRodney W. Grimes } 79147221757SJohn Dyson 7920d94caffSDavid Greenman readrest: 7931c7c3c6aSMatthew Dillon /* 79485702505SAlan Cox * At this point, we have either allocated a new page or found 79585702505SAlan Cox * an existing page that is only partially valid. 79685702505SAlan Cox * 79785702505SAlan Cox * We hold a reference on the current object and the page is 79885702505SAlan Cox * exclusive busied. 79985702505SAlan Cox */ 80085702505SAlan Cox 80185702505SAlan Cox /* 8020c3a4893SAlan Cox * If the pager for the current object might have the page, 8030c3a4893SAlan Cox * then determine the number of additional pages to read and 8040c3a4893SAlan Cox * potentially reprioritize previously read pages for earlier 8050c3a4893SAlan Cox * reclamation. These operations should only be performed 8060c3a4893SAlan Cox * once per page fault. Even if the current pager doesn't 8070c3a4893SAlan Cox * have the page, the number of additional pages to read will 8080c3a4893SAlan Cox * apply to subsequent objects in the shadow chain. 8091c7c3c6aSMatthew Dillon */ 8100c3a4893SAlan Cox if (fs.object->type != OBJT_DEFAULT && nera == -1 && 8110c3a4893SAlan Cox !P_KILLED(curproc)) { 8120c3a4893SAlan Cox KASSERT(fs.lookup_still_valid, ("map unlocked")); 8135268042bSAlan Cox era = fs.entry->read_ahead; 8140c3a4893SAlan Cox behavior = vm_map_entry_behavior(fs.entry); 8150c3a4893SAlan Cox if (behavior == MAP_ENTRY_BEHAV_RANDOM) { 8165268042bSAlan Cox nera = 0; 81713458803SAlan Cox } else if (behavior == MAP_ENTRY_BEHAV_SEQUENTIAL) { 8185268042bSAlan Cox nera = VM_FAULT_READ_AHEAD_MAX; 819381b7242SAlan Cox if (vaddr == fs.entry->next_read) 8200c3a4893SAlan Cox vm_fault_dontneed(&fs, vaddr, nera); 821381b7242SAlan Cox } else if (vaddr == fs.entry->next_read) { 82213458803SAlan Cox /* 8235268042bSAlan Cox * This is a sequential fault. Arithmetically 8245268042bSAlan Cox * increase the requested number of pages in 8255268042bSAlan Cox * the read-ahead window. The requested 8265268042bSAlan Cox * number of pages is "# of sequential faults 8275268042bSAlan Cox * x (read ahead min + 1) + read ahead min" 82813458803SAlan Cox */ 8295268042bSAlan Cox nera = VM_FAULT_READ_AHEAD_MIN; 8305268042bSAlan Cox if (era > 0) { 8315268042bSAlan Cox nera += era + 1; 83213458803SAlan Cox if (nera > VM_FAULT_READ_AHEAD_MAX) 83313458803SAlan Cox nera = VM_FAULT_READ_AHEAD_MAX; 8345268042bSAlan Cox } 83513458803SAlan Cox if (era == VM_FAULT_READ_AHEAD_MAX) 8360c3a4893SAlan Cox vm_fault_dontneed(&fs, vaddr, nera); 8375268042bSAlan Cox } else { 8385268042bSAlan Cox /* 8390c3a4893SAlan Cox * This is a non-sequential fault. 8405268042bSAlan Cox */ 8415268042bSAlan Cox nera = 0; 842867a482dSJohn Dyson } 8430c3a4893SAlan Cox if (era != nera) { 8440c3a4893SAlan Cox /* 8450c3a4893SAlan Cox * A read lock on the map suffices to update 8460c3a4893SAlan Cox * the read ahead count safely. 8470c3a4893SAlan Cox */ 8485268042bSAlan Cox fs.entry->read_ahead = nera; 8490c3a4893SAlan Cox } 850d2bf64c3SKonstantin Belousov 851d2bf64c3SKonstantin Belousov /* 8520c3a4893SAlan Cox * Prepare for unlocking the map. Save the map 8530c3a4893SAlan Cox * entry's start and end addresses, which are used to 8540c3a4893SAlan Cox * optimize the size of the pager operation below. 8550c3a4893SAlan Cox * Even if the map entry's addresses change after 8560c3a4893SAlan Cox * unlocking the map, using the saved addresses is 8570c3a4893SAlan Cox * safe. 8580c3a4893SAlan Cox */ 8590c3a4893SAlan Cox e_start = fs.entry->start; 8600c3a4893SAlan Cox e_end = fs.entry->end; 8610c3a4893SAlan Cox } 8620c3a4893SAlan Cox 8630c3a4893SAlan Cox /* 8640c3a4893SAlan Cox * Call the pager to retrieve the page if there is a chance 8650c3a4893SAlan Cox * that the pager has it, and potentially retrieve additional 8660c3a4893SAlan Cox * pages at the same time. 8670c3a4893SAlan Cox */ 8680c3a4893SAlan Cox if (fs.object->type != OBJT_DEFAULT) { 8690c3a4893SAlan Cox /* 87085702505SAlan Cox * Release the map lock before locking the vnode or 87185702505SAlan Cox * sleeping in the pager. (If the current object has 87285702505SAlan Cox * a shadow, then an earlier iteration of this loop 87385702505SAlan Cox * may have already unlocked the map.) 874d2bf64c3SKonstantin Belousov */ 875d2bf64c3SKonstantin Belousov unlock_map(&fs); 876d2bf64c3SKonstantin Belousov 877022dfd69SKonstantin Belousov if (fs.object->type == OBJT_VNODE && 878022dfd69SKonstantin Belousov (vp = fs.object->handle) != fs.vp) { 87985702505SAlan Cox /* 88085702505SAlan Cox * Perform an unlock in case the desired vnode 88185702505SAlan Cox * changed while the map was unlocked during a 88285702505SAlan Cox * retry. 88385702505SAlan Cox */ 884cfabea3dSKonstantin Belousov unlock_vp(&fs); 885d2bf64c3SKonstantin Belousov 88685702505SAlan Cox locked = VOP_ISLOCKED(vp); 887d2bf64c3SKonstantin Belousov if (locked != LK_EXCLUSIVE) 888d2bf64c3SKonstantin Belousov locked = LK_SHARED; 889dda4d369SAlan Cox 890dda4d369SAlan Cox /* 891dda4d369SAlan Cox * We must not sleep acquiring the vnode lock 892dda4d369SAlan Cox * while we have the page exclusive busied or 893dda4d369SAlan Cox * the object's paging-in-progress count 894dda4d369SAlan Cox * incremented. Otherwise, we could deadlock. 895dda4d369SAlan Cox */ 896d2bf64c3SKonstantin Belousov error = vget(vp, locked | LK_CANRECURSE | 897d2bf64c3SKonstantin Belousov LK_NOWAIT, curthread); 898d2bf64c3SKonstantin Belousov if (error != 0) { 899d2bf64c3SKonstantin Belousov vhold(vp); 900d2bf64c3SKonstantin Belousov release_page(&fs); 901d2bf64c3SKonstantin Belousov unlock_and_deallocate(&fs); 902d2bf64c3SKonstantin Belousov error = vget(vp, locked | LK_RETRY | 903d2bf64c3SKonstantin Belousov LK_CANRECURSE, curthread); 904d2bf64c3SKonstantin Belousov vdrop(vp); 905d2bf64c3SKonstantin Belousov fs.vp = vp; 906d2bf64c3SKonstantin Belousov KASSERT(error == 0, 907d2bf64c3SKonstantin Belousov ("vm_fault: vget failed")); 908d2bf64c3SKonstantin Belousov goto RetryFault; 909d2bf64c3SKonstantin Belousov } 910d2bf64c3SKonstantin Belousov fs.vp = vp; 911d2bf64c3SKonstantin Belousov } 912d2bf64c3SKonstantin Belousov KASSERT(fs.vp == NULL || !fs.map->system_map, 913d2bf64c3SKonstantin Belousov ("vm_fault: vnode-backed object mapped by system map")); 914d2bf64c3SKonstantin Belousov 915df8bae1dSRodney W. Grimes /* 916b0cd2017SGleb Smirnoff * Page in the requested page and hint the pager, 917b0cd2017SGleb Smirnoff * that it may bring up surrounding pages. 91826f9a767SRodney W. Grimes */ 9190c3a4893SAlan Cox if (nera == -1 || behavior == MAP_ENTRY_BEHAV_RANDOM || 9200c3a4893SAlan Cox P_KILLED(curproc)) { 9210c3a4893SAlan Cox behind = 0; 9220c3a4893SAlan Cox ahead = 0; 9230c3a4893SAlan Cox } else { 9240c3a4893SAlan Cox /* Is this a sequential fault? */ 9250c3a4893SAlan Cox if (nera > 0) { 9260c3a4893SAlan Cox behind = 0; 9270c3a4893SAlan Cox ahead = nera; 9280c3a4893SAlan Cox } else { 9290c3a4893SAlan Cox /* 9300c3a4893SAlan Cox * Request a cluster of pages that is 9310c3a4893SAlan Cox * aligned to a VM_FAULT_READ_DEFAULT 9320c3a4893SAlan Cox * page offset boundary within the 9330c3a4893SAlan Cox * object. Alignment to a page offset 9340c3a4893SAlan Cox * boundary is more likely to coincide 9350c3a4893SAlan Cox * with the underlying file system 9360c3a4893SAlan Cox * block than alignment to a virtual 9370c3a4893SAlan Cox * address boundary. 9380c3a4893SAlan Cox */ 9390c3a4893SAlan Cox cluster_offset = fs.pindex % 9400c3a4893SAlan Cox VM_FAULT_READ_DEFAULT; 9410c3a4893SAlan Cox behind = ulmin(cluster_offset, 9420c3a4893SAlan Cox atop(vaddr - e_start)); 9430c3a4893SAlan Cox ahead = VM_FAULT_READ_DEFAULT - 1 - 9440c3a4893SAlan Cox cluster_offset; 9450c3a4893SAlan Cox } 9460c3a4893SAlan Cox ahead = ulmin(ahead, atop(e_end - vaddr) - 1); 9470c3a4893SAlan Cox } 948b0cd2017SGleb Smirnoff rv = vm_pager_get_pages(fs.object, &fs.m, 1, 949b0cd2017SGleb Smirnoff &behind, &ahead); 95026f9a767SRodney W. Grimes if (rv == VM_PAGER_OK) { 951b0cd2017SGleb Smirnoff faultcount = behind + 1 + ahead; 952320023e2SAlan Cox hardfault = true; 9531c7c3c6aSMatthew Dillon break; /* break to PAGE HAS BEEN FOUND */ 954df8bae1dSRodney W. Grimes } 955a83c285cSDavid Greenman if (rv == VM_PAGER_ERROR) 956f3679e35SDavid Greenman printf("vm_fault: pager read error, pid %d (%s)\n", 957f3679e35SDavid Greenman curproc->p_pid, curproc->p_comm); 958521ddf39SAlan Cox 95926f9a767SRodney W. Grimes /* 960521ddf39SAlan Cox * If an I/O error occurred or the requested page was 961521ddf39SAlan Cox * outside the range of the pager, clean up and return 962521ddf39SAlan Cox * an error. 96326f9a767SRodney W. Grimes */ 964521ddf39SAlan Cox if (rv == VM_PAGER_ERROR || rv == VM_PAGER_BAD) { 9652965a453SKip Macy vm_page_lock(fs.m); 966230afe0bSKonstantin Belousov if (fs.m->wire_count == 0) 9674866e085SJohn Dyson vm_page_free(fs.m); 968230afe0bSKonstantin Belousov else 969230afe0bSKonstantin Belousov vm_page_xunbusy_maybelocked(fs.m); 9702965a453SKip Macy vm_page_unlock(fs.m); 9714866e085SJohn Dyson fs.m = NULL; 9724866e085SJohn Dyson unlock_and_deallocate(&fs); 973521ddf39SAlan Cox return (rv == VM_PAGER_ERROR ? KERN_FAILURE : 974521ddf39SAlan Cox KERN_PROTECTION_FAILURE); 97526f9a767SRodney W. Grimes } 976521ddf39SAlan Cox 977521ddf39SAlan Cox /* 978521ddf39SAlan Cox * The requested page does not exist at this object/ 979521ddf39SAlan Cox * offset. Remove the invalid page from the object, 980521ddf39SAlan Cox * waking up anyone waiting for it, and continue on to 981521ddf39SAlan Cox * the next object. However, if this is the top-level 982521ddf39SAlan Cox * object, we must leave the busy page in place to 983521ddf39SAlan Cox * prevent another process from rushing past us, and 984521ddf39SAlan Cox * inserting the page in that object at the same time 985521ddf39SAlan Cox * that we are. 986521ddf39SAlan Cox */ 9874866e085SJohn Dyson if (fs.object != fs.first_object) { 9882965a453SKip Macy vm_page_lock(fs.m); 989230afe0bSKonstantin Belousov if (fs.m->wire_count == 0) 9904866e085SJohn Dyson vm_page_free(fs.m); 991230afe0bSKonstantin Belousov else 992230afe0bSKonstantin Belousov vm_page_xunbusy_maybelocked(fs.m); 9932965a453SKip Macy vm_page_unlock(fs.m); 9944866e085SJohn Dyson fs.m = NULL; 995df8bae1dSRodney W. Grimes } 996df8bae1dSRodney W. Grimes } 99740360b1bSMatthew Dillon 998df8bae1dSRodney W. Grimes /* 9991c7c3c6aSMatthew Dillon * We get here if the object has default pager (or unwiring) 10001c7c3c6aSMatthew Dillon * or the pager doesn't have the page. 1001df8bae1dSRodney W. Grimes */ 10024866e085SJohn Dyson if (fs.object == fs.first_object) 10034866e085SJohn Dyson fs.first_m = fs.m; 1004df8bae1dSRodney W. Grimes 1005df8bae1dSRodney W. Grimes /* 10060d94caffSDavid Greenman * Move on to the next object. Lock the next object before 10070d94caffSDavid Greenman * unlocking the current one. 1008df8bae1dSRodney W. Grimes */ 10094866e085SJohn Dyson next_object = fs.object->backing_object; 1010df8bae1dSRodney W. Grimes if (next_object == NULL) { 1011df8bae1dSRodney W. Grimes /* 10120d94caffSDavid Greenman * If there's no object left, fill the page in the top 10130d94caffSDavid Greenman * object with zeros. 1014df8bae1dSRodney W. Grimes */ 10154866e085SJohn Dyson if (fs.object != fs.first_object) { 10164866e085SJohn Dyson vm_object_pip_wakeup(fs.object); 101789f6b863SAttilio Rao VM_OBJECT_WUNLOCK(fs.object); 1018df8bae1dSRodney W. Grimes 10194866e085SJohn Dyson fs.object = fs.first_object; 10204866e085SJohn Dyson fs.pindex = fs.first_pindex; 10214866e085SJohn Dyson fs.m = fs.first_m; 102289f6b863SAttilio Rao VM_OBJECT_WLOCK(fs.object); 1023df8bae1dSRodney W. Grimes } 10244866e085SJohn Dyson fs.first_m = NULL; 1025df8bae1dSRodney W. Grimes 10264221e284SAlan Cox /* 10274221e284SAlan Cox * Zero the page if necessary and mark it valid. 10284221e284SAlan Cox */ 10294866e085SJohn Dyson if ((fs.m->flags & PG_ZERO) == 0) { 1030fff6062aSAlan Cox pmap_zero_page(fs.m); 10314221e284SAlan Cox } else { 103283c9dea1SGleb Smirnoff VM_CNT_INC(v_ozfod); 10334221e284SAlan Cox } 103483c9dea1SGleb Smirnoff VM_CNT_INC(v_zfod); 10354221e284SAlan Cox fs.m->valid = VM_PAGE_BITS_ALL; 10367b9b301cSAlan Cox /* Don't try to prefault neighboring pages. */ 10377b9b301cSAlan Cox faultcount = 1; 10381c7c3c6aSMatthew Dillon break; /* break to PAGE HAS BEEN FOUND */ 10390d94caffSDavid Greenman } else { 1040c8567c3aSAlan Cox KASSERT(fs.object != next_object, 1041c8567c3aSAlan Cox ("object loop %p", next_object)); 104289f6b863SAttilio Rao VM_OBJECT_WLOCK(next_object); 1043c8567c3aSAlan Cox vm_object_pip_add(next_object, 1); 1044c8567c3aSAlan Cox if (fs.object != fs.first_object) 10454866e085SJohn Dyson vm_object_pip_wakeup(fs.object); 10466753423cSAlan Cox fs.pindex += 10476753423cSAlan Cox OFF_TO_IDX(fs.object->backing_object_offset); 104889f6b863SAttilio Rao VM_OBJECT_WUNLOCK(fs.object); 10494866e085SJohn Dyson fs.object = next_object; 1050df8bae1dSRodney W. Grimes } 1051df8bae1dSRodney W. Grimes } 10521c7c3c6aSMatthew Dillon 1053c7aebda8SAttilio Rao vm_page_assert_xbusied(fs.m); 1054df8bae1dSRodney W. Grimes 1055df8bae1dSRodney W. Grimes /* 10560d94caffSDavid Greenman * PAGE HAS BEEN FOUND. [Loop invariant still holds -- the object lock 1057df8bae1dSRodney W. Grimes * is held.] 1058df8bae1dSRodney W. Grimes */ 1059df8bae1dSRodney W. Grimes 1060df8bae1dSRodney W. Grimes /* 10610d94caffSDavid Greenman * If the page is being written, but isn't already owned by the 10620d94caffSDavid Greenman * top-level object, we have to copy it into a new page owned by the 10630d94caffSDavid Greenman * top-level object. 1064df8bae1dSRodney W. Grimes */ 10654866e085SJohn Dyson if (fs.object != fs.first_object) { 1066df8bae1dSRodney W. Grimes /* 10670d94caffSDavid Greenman * We only really need to copy if we want to write it. 1068df8bae1dSRodney W. Grimes */ 1069a6d42a0dSAlan Cox if ((fault_type & (VM_PROT_COPY | VM_PROT_WRITE)) != 0) { 1070df8bae1dSRodney W. Grimes /* 10711c7c3c6aSMatthew Dillon * This allows pages to be virtually copied from a 10721c7c3c6aSMatthew Dillon * backing_object into the first_object, where the 10731c7c3c6aSMatthew Dillon * backing object has no other refs to it, and cannot 10741c7c3c6aSMatthew Dillon * gain any more refs. Instead of a bcopy, we just 10751c7c3c6aSMatthew Dillon * move the page from the backing object to the 10761c7c3c6aSMatthew Dillon * first object. Note that we must mark the page 10771c7c3c6aSMatthew Dillon * dirty in the first object so that it will go out 10781c7c3c6aSMatthew Dillon * to swap when needed. 1079df8bae1dSRodney W. Grimes */ 1080cd8a6fe8SAlan Cox is_first_object_locked = false; 1081e50346b5SAlan Cox if ( 1082de5f6a77SJohn Dyson /* 1083de5f6a77SJohn Dyson * Only one shadow object 1084de5f6a77SJohn Dyson */ 10854866e085SJohn Dyson (fs.object->shadow_count == 1) && 1086de5f6a77SJohn Dyson /* 1087de5f6a77SJohn Dyson * No COW refs, except us 1088de5f6a77SJohn Dyson */ 10894866e085SJohn Dyson (fs.object->ref_count == 1) && 1090de5f6a77SJohn Dyson /* 10915929bcfaSPhilippe Charnier * No one else can look this object up 1092de5f6a77SJohn Dyson */ 10934866e085SJohn Dyson (fs.object->handle == NULL) && 1094de5f6a77SJohn Dyson /* 1095de5f6a77SJohn Dyson * No other ways to look the object up 1096de5f6a77SJohn Dyson */ 10974866e085SJohn Dyson ((fs.object->type == OBJT_DEFAULT) || 10984866e085SJohn Dyson (fs.object->type == OBJT_SWAP)) && 109989f6b863SAttilio Rao (is_first_object_locked = VM_OBJECT_TRYWLOCK(fs.first_object)) && 1100de5f6a77SJohn Dyson /* 1101de5f6a77SJohn Dyson * We don't chase down the shadow chain 1102de5f6a77SJohn Dyson */ 1103e50346b5SAlan Cox fs.object == fs.first_object->backing_object) { 1104bccdea45SAlan Cox vm_page_lock(fs.m); 1105*c6a70eaeSMark Johnston vm_page_remque(fs.m); 1106bccdea45SAlan Cox vm_page_remove(fs.m); 1107bccdea45SAlan Cox vm_page_unlock(fs.m); 1108eb00b276SAlan Cox vm_page_lock(fs.first_m); 1109bccdea45SAlan Cox vm_page_replace_checked(fs.m, fs.first_object, 1110bccdea45SAlan Cox fs.first_pindex, fs.first_m); 11116fee422eSConrad Meyer vm_page_free(fs.first_m); 11126fee422eSConrad Meyer vm_page_unlock(fs.first_m); 1113bccdea45SAlan Cox vm_page_dirty(fs.m); 1114dfdf9abdSAlan Cox #if VM_NRESERVLEVEL > 0 1115dfdf9abdSAlan Cox /* 1116dfdf9abdSAlan Cox * Rename the reservation. 1117dfdf9abdSAlan Cox */ 1118dfdf9abdSAlan Cox vm_reserv_rename(fs.m, fs.first_object, 1119dfdf9abdSAlan Cox fs.object, OFF_TO_IDX( 1120dfdf9abdSAlan Cox fs.first_object->backing_object_offset)); 1121dfdf9abdSAlan Cox #endif 1122bccdea45SAlan Cox /* 1123bccdea45SAlan Cox * Removing the page from the backing object 1124bccdea45SAlan Cox * unbusied it. 1125bccdea45SAlan Cox */ 1126c7aebda8SAttilio Rao vm_page_xbusy(fs.m); 1127d98ddc46SAlan Cox fs.first_m = fs.m; 11284866e085SJohn Dyson fs.m = NULL; 112983c9dea1SGleb Smirnoff VM_CNT_INC(v_cow_optim); 1130de5f6a77SJohn Dyson } else { 1131de5f6a77SJohn Dyson /* 1132de5f6a77SJohn Dyson * Oh, well, lets copy it. 1133de5f6a77SJohn Dyson */ 1134669890eaSAlan Cox pmap_copy_page(fs.m, fs.first_m); 1135669890eaSAlan Cox fs.first_m->valid = VM_PAGE_BITS_ALL; 1136d929ad7fSKonstantin Belousov if ((fault_flags & VM_FAULT_WIRE) == 0) { 1137d929ad7fSKonstantin Belousov prot &= ~VM_PROT_WRITE; 1138d929ad7fSKonstantin Belousov fault_type &= ~VM_PROT_WRITE; 1139d929ad7fSKonstantin Belousov } 1140d8778512SAlan Cox if (wired && (fault_flags & 11416a875bf9SKonstantin Belousov VM_FAULT_WIRE) == 0) { 11422965a453SKip Macy vm_page_lock(fs.first_m); 1143d8778512SAlan Cox vm_page_wire(fs.first_m); 11442965a453SKip Macy vm_page_unlock(fs.first_m); 11452965a453SKip Macy 11462965a453SKip Macy vm_page_lock(fs.m); 11473ae10f74SAttilio Rao vm_page_unwire(fs.m, PQ_INACTIVE); 11482965a453SKip Macy vm_page_unlock(fs.m); 1149de5f6a77SJohn Dyson } 1150df8bae1dSRodney W. Grimes /* 1151df8bae1dSRodney W. Grimes * We no longer need the old page or object. 1152df8bae1dSRodney W. Grimes */ 11534866e085SJohn Dyson release_page(&fs); 1154de5f6a77SJohn Dyson } 11551c7c3c6aSMatthew Dillon /* 11561c7c3c6aSMatthew Dillon * fs.object != fs.first_object due to above 11571c7c3c6aSMatthew Dillon * conditional 11581c7c3c6aSMatthew Dillon */ 11594866e085SJohn Dyson vm_object_pip_wakeup(fs.object); 116089f6b863SAttilio Rao VM_OBJECT_WUNLOCK(fs.object); 1161df8bae1dSRodney W. Grimes /* 1162df8bae1dSRodney W. Grimes * Only use the new page below... 1163df8bae1dSRodney W. Grimes */ 11644866e085SJohn Dyson fs.object = fs.first_object; 11654866e085SJohn Dyson fs.pindex = fs.first_pindex; 1166d98ddc46SAlan Cox fs.m = fs.first_m; 1167f29ba63eSAlan Cox if (!is_first_object_locked) 116889f6b863SAttilio Rao VM_OBJECT_WLOCK(fs.object); 116983c9dea1SGleb Smirnoff VM_CNT_INC(v_cow_faults); 11704d34e019SKonstantin Belousov curthread->td_cow++; 11710d94caffSDavid Greenman } else { 1172df8bae1dSRodney W. Grimes prot &= ~VM_PROT_WRITE; 1173df8bae1dSRodney W. Grimes } 1174df8bae1dSRodney W. Grimes } 1175df8bae1dSRodney W. Grimes 1176df8bae1dSRodney W. Grimes /* 11770d94caffSDavid Greenman * We must verify that the maps have not changed since our last 11780d94caffSDavid Greenman * lookup. 1179df8bae1dSRodney W. Grimes */ 118019dc5607STor Egge if (!fs.lookup_still_valid) { 118119dc5607STor Egge if (!vm_map_trylock_read(fs.map)) { 1182b823bbd6SMatthew Dillon release_page(&fs); 1183b823bbd6SMatthew Dillon unlock_and_deallocate(&fs); 1184b823bbd6SMatthew Dillon goto RetryFault; 1185b823bbd6SMatthew Dillon } 1186cd8a6fe8SAlan Cox fs.lookup_still_valid = true; 1187dc5401d2SKonstantin Belousov if (fs.map->timestamp != fs.map_generation) { 118819dc5607STor Egge result = vm_map_lookup_locked(&fs.map, vaddr, fault_type, 11894866e085SJohn Dyson &fs.entry, &retry_object, &retry_pindex, &retry_prot, &wired); 1190df8bae1dSRodney W. Grimes 1191df8bae1dSRodney W. Grimes /* 119244ed3417STor Egge * If we don't need the page any longer, put it on the inactive 11930d94caffSDavid Greenman * list (the easiest thing to do here). If no one needs it, 11940d94caffSDavid Greenman * pageout will grab it eventually. 1195df8bae1dSRodney W. Grimes */ 1196df8bae1dSRodney W. Grimes if (result != KERN_SUCCESS) { 11974866e085SJohn Dyson release_page(&fs); 11984866e085SJohn Dyson unlock_and_deallocate(&fs); 119919dc5607STor Egge 120019dc5607STor Egge /* 120119dc5607STor Egge * If retry of map lookup would have blocked then 120219dc5607STor Egge * retry fault from start. 120319dc5607STor Egge */ 120419dc5607STor Egge if (result == KERN_FAILURE) 120519dc5607STor Egge goto RetryFault; 1206df8bae1dSRodney W. Grimes return (result); 1207df8bae1dSRodney W. Grimes } 12084866e085SJohn Dyson if ((retry_object != fs.first_object) || 12094866e085SJohn Dyson (retry_pindex != fs.first_pindex)) { 12104866e085SJohn Dyson release_page(&fs); 12114866e085SJohn Dyson unlock_and_deallocate(&fs); 1212df8bae1dSRodney W. Grimes goto RetryFault; 1213df8bae1dSRodney W. Grimes } 121419dc5607STor Egge 1215df8bae1dSRodney W. Grimes /* 12160d94caffSDavid Greenman * Check whether the protection has changed or the object has 12170d94caffSDavid Greenman * been copied while we left the map unlocked. Changing from 12180d94caffSDavid Greenman * read to write permission is OK - we leave the page 12190d94caffSDavid Greenman * write-protected, and catch the write fault. Changing from 12200d94caffSDavid Greenman * write to read permission means that we can't mark the page 12210d94caffSDavid Greenman * write-enabled after all. 1222df8bae1dSRodney W. Grimes */ 1223df8bae1dSRodney W. Grimes prot &= retry_prot; 1224607970bcSKonstantin Belousov fault_type &= retry_prot; 1225607970bcSKonstantin Belousov if (prot == 0) { 1226607970bcSKonstantin Belousov release_page(&fs); 1227607970bcSKonstantin Belousov unlock_and_deallocate(&fs); 1228607970bcSKonstantin Belousov goto RetryFault; 1229607970bcSKonstantin Belousov } 1230df8bae1dSRodney W. Grimes } 123119dc5607STor Egge } 1232381b7242SAlan Cox 1233d2bf64c3SKonstantin Belousov /* 1234381b7242SAlan Cox * If the page was filled by a pager, save the virtual address that 1235381b7242SAlan Cox * should be faulted on next under a sequential access pattern to the 1236381b7242SAlan Cox * map entry. A read lock on the map suffices to update this address 1237381b7242SAlan Cox * safely. 1238d2bf64c3SKonstantin Belousov */ 12395758fe71SAlan Cox if (hardfault) 1240381b7242SAlan Cox fs.entry->next_read = vaddr + ptoa(ahead) + PAGE_SIZE; 1241d2bf64c3SKonstantin Belousov 1242e26236e9SKonstantin Belousov vm_fault_dirty(fs.entry, fs.m, prot, fault_type, fault_flags, true); 1243c7aebda8SAttilio Rao vm_page_assert_xbusied(fs.m); 1244c7aebda8SAttilio Rao 12454221e284SAlan Cox /* 124678cfe1f7SAlan Cox * Page must be completely valid or it is not fit to 12474221e284SAlan Cox * map into user space. vm_pager_get_pages() ensures this. 12484221e284SAlan Cox */ 124978cfe1f7SAlan Cox KASSERT(fs.m->valid == VM_PAGE_BITS_ALL, 125078cfe1f7SAlan Cox ("vm_fault: page %p partially invalid", fs.m)); 125189f6b863SAttilio Rao VM_OBJECT_WUNLOCK(fs.object); 1252cbfbaad8SAlan Cox 125386735996SAlan Cox /* 125486735996SAlan Cox * Put this page into the physical map. We had to do the unlock above 125586735996SAlan Cox * because pmap_enter() may sleep. We don't put the page 125686735996SAlan Cox * back on the active queue until later so that the pageout daemon 125786735996SAlan Cox * won't find it (yet). 125886735996SAlan Cox */ 125939ffa8c1SKonstantin Belousov pmap_enter(fs.map->pmap, vaddr, fs.m, prot, 126039ffa8c1SKonstantin Belousov fault_type | (wired ? PMAP_ENTER_WIRED : 0), 0); 12616a875bf9SKonstantin Belousov if (faultcount != 1 && (fault_flags & VM_FAULT_WIRE) == 0 && 12627b9b301cSAlan Cox wired == 0) 1263b0cd2017SGleb Smirnoff vm_fault_prefault(&fs, vaddr, 1264b0cd2017SGleb Smirnoff faultcount > 0 ? behind : PFBAK, 1265b0cd2017SGleb Smirnoff faultcount > 0 ? ahead : PFFOR); 126689f6b863SAttilio Rao VM_OBJECT_WLOCK(fs.object); 12672965a453SKip Macy vm_page_lock(fs.m); 1268ff97964aSJohn Dyson 1269df8bae1dSRodney W. Grimes /* 12700d94caffSDavid Greenman * If the page is not wired down, then put it where the pageout daemon 12710d94caffSDavid Greenman * can find it. 1272df8bae1dSRodney W. Grimes */ 12736a875bf9SKonstantin Belousov if ((fault_flags & VM_FAULT_WIRE) != 0) { 12746a875bf9SKonstantin Belousov KASSERT(wired, ("VM_FAULT_WIRE && !wired")); 12754866e085SJohn Dyson vm_page_wire(fs.m); 127603679e23SAlan Cox } else 12774866e085SJohn Dyson vm_page_activate(fs.m); 1278acd11c74SAlan Cox if (m_hold != NULL) { 1279acd11c74SAlan Cox *m_hold = fs.m; 1280acd11c74SAlan Cox vm_page_hold(fs.m); 1281acd11c74SAlan Cox } 12822965a453SKip Macy vm_page_unlock(fs.m); 1283c7aebda8SAttilio Rao vm_page_xunbusy(fs.m); 1284eeec6babSJohn Baldwin 1285eebf3286SAlan Cox /* 1286eebf3286SAlan Cox * Unlock everything, and return 1287eebf3286SAlan Cox */ 1288eebf3286SAlan Cox unlock_and_deallocate(&fs); 1289b3a01bdfSAndrey Zonov if (hardfault) { 129083c9dea1SGleb Smirnoff VM_CNT_INC(v_io_faults); 12911c4bcd05SJeff Roberson curthread->td_ru.ru_majflt++; 1292ae34b6ffSEdward Tomasz Napierala #ifdef RACCT 1293ae34b6ffSEdward Tomasz Napierala if (racct_enable && fs.object->type == OBJT_VNODE) { 1294ae34b6ffSEdward Tomasz Napierala PROC_LOCK(curproc); 1295ae34b6ffSEdward Tomasz Napierala if ((fault_type & (VM_PROT_COPY | VM_PROT_WRITE)) != 0) { 1296ae34b6ffSEdward Tomasz Napierala racct_add_force(curproc, RACCT_WRITEBPS, 1297ae34b6ffSEdward Tomasz Napierala PAGE_SIZE + behind * PAGE_SIZE); 1298ae34b6ffSEdward Tomasz Napierala racct_add_force(curproc, RACCT_WRITEIOPS, 1); 1299ae34b6ffSEdward Tomasz Napierala } else { 1300ae34b6ffSEdward Tomasz Napierala racct_add_force(curproc, RACCT_READBPS, 1301ae34b6ffSEdward Tomasz Napierala PAGE_SIZE + ahead * PAGE_SIZE); 1302ae34b6ffSEdward Tomasz Napierala racct_add_force(curproc, RACCT_READIOPS, 1); 1303ae34b6ffSEdward Tomasz Napierala } 1304ae34b6ffSEdward Tomasz Napierala PROC_UNLOCK(curproc); 1305ae34b6ffSEdward Tomasz Napierala } 1306ae34b6ffSEdward Tomasz Napierala #endif 1307b3a01bdfSAndrey Zonov } else 13081c4bcd05SJeff Roberson curthread->td_ru.ru_minflt++; 1309df8bae1dSRodney W. Grimes 1310df8bae1dSRodney W. Grimes return (KERN_SUCCESS); 1311df8bae1dSRodney W. Grimes } 1312df8bae1dSRodney W. Grimes 1313df8bae1dSRodney W. Grimes /* 1314a8b0f100SAlan Cox * Speed up the reclamation of pages that precede the faulting pindex within 1315a8b0f100SAlan Cox * the first object of the shadow chain. Essentially, perform the equivalent 1316a8b0f100SAlan Cox * to madvise(..., MADV_DONTNEED) on a large cluster of pages that precedes 1317a8b0f100SAlan Cox * the faulting pindex by the cluster size when the pages read by vm_fault() 1318a8b0f100SAlan Cox * cross a cluster-size boundary. The cluster size is the greater of the 1319a8b0f100SAlan Cox * smallest superpage size and VM_FAULT_DONTNEED_MIN. 1320a8b0f100SAlan Cox * 1321a8b0f100SAlan Cox * When "fs->first_object" is a shadow object, the pages in the backing object 1322a8b0f100SAlan Cox * that precede the faulting pindex are deactivated by vm_fault(). So, this 1323a8b0f100SAlan Cox * function must only be concerned with pages in the first object. 132413458803SAlan Cox */ 132513458803SAlan Cox static void 1326a8b0f100SAlan Cox vm_fault_dontneed(const struct faultstate *fs, vm_offset_t vaddr, int ahead) 132713458803SAlan Cox { 1328a8b0f100SAlan Cox vm_map_entry_t entry; 132913458803SAlan Cox vm_object_t first_object, object; 1330a8b0f100SAlan Cox vm_offset_t end, start; 1331a8b0f100SAlan Cox vm_page_t m, m_next; 1332a8b0f100SAlan Cox vm_pindex_t pend, pstart; 1333a8b0f100SAlan Cox vm_size_t size; 133413458803SAlan Cox 133513458803SAlan Cox object = fs->object; 133689f6b863SAttilio Rao VM_OBJECT_ASSERT_WLOCKED(object); 133713458803SAlan Cox first_object = fs->first_object; 133813458803SAlan Cox if (first_object != object) { 1339b5ab20c0SAlan Cox if (!VM_OBJECT_TRYWLOCK(first_object)) { 134089f6b863SAttilio Rao VM_OBJECT_WUNLOCK(object); 1341b5ab20c0SAlan Cox VM_OBJECT_WLOCK(first_object); 134289f6b863SAttilio Rao VM_OBJECT_WLOCK(object); 134313458803SAlan Cox } 134413458803SAlan Cox } 1345a8b0f100SAlan Cox /* Neither fictitious nor unmanaged pages can be reclaimed. */ 134628634820SAlan Cox if ((first_object->flags & (OBJ_FICTITIOUS | OBJ_UNMANAGED)) == 0) { 1347a8b0f100SAlan Cox size = VM_FAULT_DONTNEED_MIN; 1348a8b0f100SAlan Cox if (MAXPAGESIZES > 1 && size < pagesizes[1]) 1349a8b0f100SAlan Cox size = pagesizes[1]; 1350a8b0f100SAlan Cox end = rounddown2(vaddr, size); 1351a8b0f100SAlan Cox if (vaddr - end >= size - PAGE_SIZE - ptoa(ahead) && 1352a8b0f100SAlan Cox (entry = fs->entry)->start < end) { 1353a8b0f100SAlan Cox if (end - entry->start < size) 1354a8b0f100SAlan Cox start = entry->start; 135513458803SAlan Cox else 1356a8b0f100SAlan Cox start = end - size; 1357a8b0f100SAlan Cox pmap_advise(fs->map->pmap, start, end, MADV_DONTNEED); 1358a8b0f100SAlan Cox pstart = OFF_TO_IDX(entry->offset) + atop(start - 1359a8b0f100SAlan Cox entry->start); 1360a8b0f100SAlan Cox m_next = vm_page_find_least(first_object, pstart); 1361a8b0f100SAlan Cox pend = OFF_TO_IDX(entry->offset) + atop(end - 1362a8b0f100SAlan Cox entry->start); 1363a8b0f100SAlan Cox while ((m = m_next) != NULL && m->pindex < pend) { 1364a8b0f100SAlan Cox m_next = TAILQ_NEXT(m, listq); 1365a8b0f100SAlan Cox if (m->valid != VM_PAGE_BITS_ALL || 1366a8b0f100SAlan Cox vm_page_busied(m)) 136713458803SAlan Cox continue; 1368d8015db3SAlan Cox 1369d8015db3SAlan Cox /* 1370d8015db3SAlan Cox * Don't clear PGA_REFERENCED, since it would 1371d8015db3SAlan Cox * likely represent a reference by a different 1372d8015db3SAlan Cox * process. 1373d8015db3SAlan Cox * 1374d8015db3SAlan Cox * Typically, at this point, prefetched pages 1375d8015db3SAlan Cox * are still in the inactive queue. Only 1376d8015db3SAlan Cox * pages that triggered page faults are in the 1377d8015db3SAlan Cox * active queue. 1378d8015db3SAlan Cox */ 137913458803SAlan Cox vm_page_lock(m); 13800eb50f9cSMark Johnston if (!vm_page_inactive(m)) 1381d8015db3SAlan Cox vm_page_deactivate(m); 138213458803SAlan Cox vm_page_unlock(m); 138313458803SAlan Cox } 138413458803SAlan Cox } 1385a8b0f100SAlan Cox } 138613458803SAlan Cox if (first_object != object) 1387b5ab20c0SAlan Cox VM_OBJECT_WUNLOCK(first_object); 138813458803SAlan Cox } 138913458803SAlan Cox 139013458803SAlan Cox /* 1391566526a9SAlan Cox * vm_fault_prefault provides a quick way of clustering 1392566526a9SAlan Cox * pagefaults into a processes address space. It is a "cousin" 1393566526a9SAlan Cox * of vm_map_pmap_enter, except it runs at page fault time instead 1394566526a9SAlan Cox * of mmap time. 1395566526a9SAlan Cox */ 1396566526a9SAlan Cox static void 139763281952SAlan Cox vm_fault_prefault(const struct faultstate *fs, vm_offset_t addra, 1398b0cd2017SGleb Smirnoff int backward, int forward) 1399566526a9SAlan Cox { 140063281952SAlan Cox pmap_t pmap; 140163281952SAlan Cox vm_map_entry_t entry; 140263281952SAlan Cox vm_object_t backing_object, lobject; 1403566526a9SAlan Cox vm_offset_t addr, starta; 1404566526a9SAlan Cox vm_pindex_t pindex; 14052053c127SStephan Uphoff vm_page_t m; 1406b0cd2017SGleb Smirnoff int i; 1407566526a9SAlan Cox 140863281952SAlan Cox pmap = fs->map->pmap; 1409950d5f7aSAlan Cox if (pmap != vmspace_pmap(curthread->td_proc->p_vmspace)) 1410566526a9SAlan Cox return; 1411566526a9SAlan Cox 141263281952SAlan Cox entry = fs->entry; 1413566526a9SAlan Cox 141463cdcaaeSKonstantin Belousov if (addra < backward * PAGE_SIZE) { 1415566526a9SAlan Cox starta = entry->start; 141663cdcaaeSKonstantin Belousov } else { 141763cdcaaeSKonstantin Belousov starta = addra - backward * PAGE_SIZE; 141863cdcaaeSKonstantin Belousov if (starta < entry->start) 141963cdcaaeSKonstantin Belousov starta = entry->start; 1420566526a9SAlan Cox } 1421566526a9SAlan Cox 142263281952SAlan Cox /* 142363281952SAlan Cox * Generate the sequence of virtual addresses that are candidates for 142463281952SAlan Cox * prefaulting in an outward spiral from the faulting virtual address, 142563281952SAlan Cox * "addra". Specifically, the sequence is "addra - PAGE_SIZE", "addra 142663281952SAlan Cox * + PAGE_SIZE", "addra - 2 * PAGE_SIZE", "addra + 2 * PAGE_SIZE", ... 142763281952SAlan Cox * If the candidate address doesn't have a backing physical page, then 142863281952SAlan Cox * the loop immediately terminates. 142963281952SAlan Cox */ 143063281952SAlan Cox for (i = 0; i < 2 * imax(backward, forward); i++) { 143163281952SAlan Cox addr = addra + ((i >> 1) + 1) * ((i & 1) == 0 ? -PAGE_SIZE : 143263281952SAlan Cox PAGE_SIZE); 143363281952SAlan Cox if (addr > addra + forward * PAGE_SIZE) 1434566526a9SAlan Cox addr = 0; 1435566526a9SAlan Cox 1436566526a9SAlan Cox if (addr < starta || addr >= entry->end) 1437566526a9SAlan Cox continue; 1438566526a9SAlan Cox 1439566526a9SAlan Cox if (!pmap_is_prefaultable(pmap, addr)) 1440566526a9SAlan Cox continue; 1441566526a9SAlan Cox 1442566526a9SAlan Cox pindex = ((addr - entry->start) + entry->offset) >> PAGE_SHIFT; 144363281952SAlan Cox lobject = entry->object.vm_object; 1444c141ae7fSAlan Cox VM_OBJECT_RLOCK(lobject); 1445566526a9SAlan Cox while ((m = vm_page_lookup(lobject, pindex)) == NULL && 1446566526a9SAlan Cox lobject->type == OBJT_DEFAULT && 1447566526a9SAlan Cox (backing_object = lobject->backing_object) != NULL) { 144836930fc9SAlan Cox KASSERT((lobject->backing_object_offset & PAGE_MASK) == 144936930fc9SAlan Cox 0, ("vm_fault_prefault: unaligned object offset")); 1450566526a9SAlan Cox pindex += lobject->backing_object_offset >> PAGE_SHIFT; 1451c141ae7fSAlan Cox VM_OBJECT_RLOCK(backing_object); 1452c141ae7fSAlan Cox VM_OBJECT_RUNLOCK(lobject); 1453566526a9SAlan Cox lobject = backing_object; 1454566526a9SAlan Cox } 1455cbfbaad8SAlan Cox if (m == NULL) { 1456c141ae7fSAlan Cox VM_OBJECT_RUNLOCK(lobject); 1457566526a9SAlan Cox break; 1458cbfbaad8SAlan Cox } 14590a2e596aSAlan Cox if (m->valid == VM_PAGE_BITS_ALL && 14603c4a2440SAlan Cox (m->flags & PG_FICTITIOUS) == 0) 14617bfda801SAlan Cox pmap_enter_quick(pmap, addr, m, entry->protection); 1462c141ae7fSAlan Cox VM_OBJECT_RUNLOCK(lobject); 1463566526a9SAlan Cox } 1464566526a9SAlan Cox } 1465566526a9SAlan Cox 1466566526a9SAlan Cox /* 146782de724fSAlan Cox * Hold each of the physical pages that are mapped by the specified range of 146882de724fSAlan Cox * virtual addresses, ["addr", "addr" + "len"), if those mappings are valid 146982de724fSAlan Cox * and allow the specified types of access, "prot". If all of the implied 147082de724fSAlan Cox * pages are successfully held, then the number of held pages is returned 147182de724fSAlan Cox * together with pointers to those pages in the array "ma". However, if any 147282de724fSAlan Cox * of the pages cannot be held, -1 is returned. 147382de724fSAlan Cox */ 147482de724fSAlan Cox int 147582de724fSAlan Cox vm_fault_quick_hold_pages(vm_map_t map, vm_offset_t addr, vm_size_t len, 147682de724fSAlan Cox vm_prot_t prot, vm_page_t *ma, int max_count) 147782de724fSAlan Cox { 147882de724fSAlan Cox vm_offset_t end, va; 147982de724fSAlan Cox vm_page_t *mp; 14807e14088dSKonstantin Belousov int count; 148182de724fSAlan Cox boolean_t pmap_failed; 148282de724fSAlan Cox 1483af32c419SKonstantin Belousov if (len == 0) 1484af32c419SKonstantin Belousov return (0); 148582de724fSAlan Cox end = round_page(addr + len); 148682de724fSAlan Cox addr = trunc_page(addr); 148782de724fSAlan Cox 148882de724fSAlan Cox /* 148982de724fSAlan Cox * Check for illegal addresses. 149082de724fSAlan Cox */ 149182de724fSAlan Cox if (addr < vm_map_min(map) || addr > end || end > vm_map_max(map)) 149282de724fSAlan Cox return (-1); 149382de724fSAlan Cox 14947e14088dSKonstantin Belousov if (atop(end - addr) > max_count) 149582de724fSAlan Cox panic("vm_fault_quick_hold_pages: count > max_count"); 14967e14088dSKonstantin Belousov count = atop(end - addr); 149782de724fSAlan Cox 149882de724fSAlan Cox /* 149982de724fSAlan Cox * Most likely, the physical pages are resident in the pmap, so it is 150082de724fSAlan Cox * faster to try pmap_extract_and_hold() first. 150182de724fSAlan Cox */ 150282de724fSAlan Cox pmap_failed = FALSE; 150382de724fSAlan Cox for (mp = ma, va = addr; va < end; mp++, va += PAGE_SIZE) { 150482de724fSAlan Cox *mp = pmap_extract_and_hold(map->pmap, va, prot); 150582de724fSAlan Cox if (*mp == NULL) 150682de724fSAlan Cox pmap_failed = TRUE; 150782de724fSAlan Cox else if ((prot & VM_PROT_WRITE) != 0 && 1508a5dbab54SAlan Cox (*mp)->dirty != VM_PAGE_BITS_ALL) { 150982de724fSAlan Cox /* 151082de724fSAlan Cox * Explicitly dirty the physical page. Otherwise, the 151182de724fSAlan Cox * caller's changes may go unnoticed because they are 151282de724fSAlan Cox * performed through an unmanaged mapping or by a DMA 151382de724fSAlan Cox * operation. 15143c76db4cSAlan Cox * 1515abb9b935SKonstantin Belousov * The object lock is not held here. 1516abb9b935SKonstantin Belousov * See vm_page_clear_dirty_mask(). 151782de724fSAlan Cox */ 15183c76db4cSAlan Cox vm_page_dirty(*mp); 151982de724fSAlan Cox } 152082de724fSAlan Cox } 152182de724fSAlan Cox if (pmap_failed) { 152282de724fSAlan Cox /* 152382de724fSAlan Cox * One or more pages could not be held by the pmap. Either no 152482de724fSAlan Cox * page was mapped at the specified virtual address or that 152582de724fSAlan Cox * mapping had insufficient permissions. Attempt to fault in 152682de724fSAlan Cox * and hold these pages. 152782de724fSAlan Cox */ 152882de724fSAlan Cox for (mp = ma, va = addr; va < end; mp++, va += PAGE_SIZE) 1529be996836SAttilio Rao if (*mp == NULL && vm_fault_hold(map, va, prot, 153082de724fSAlan Cox VM_FAULT_NORMAL, mp) != KERN_SUCCESS) 153182de724fSAlan Cox goto error; 153282de724fSAlan Cox } 153382de724fSAlan Cox return (count); 153482de724fSAlan Cox error: 153582de724fSAlan Cox for (mp = ma; mp < ma + count; mp++) 153682de724fSAlan Cox if (*mp != NULL) { 153782de724fSAlan Cox vm_page_lock(*mp); 153882de724fSAlan Cox vm_page_unhold(*mp); 153982de724fSAlan Cox vm_page_unlock(*mp); 154082de724fSAlan Cox } 154182de724fSAlan Cox return (-1); 154282de724fSAlan Cox } 154382de724fSAlan Cox 154482de724fSAlan Cox /* 1545df8bae1dSRodney W. Grimes * Routine: 1546df8bae1dSRodney W. Grimes * vm_fault_copy_entry 1547df8bae1dSRodney W. Grimes * Function: 1548210a6886SKonstantin Belousov * Create new shadow object backing dst_entry with private copy of 1549210a6886SKonstantin Belousov * all underlying pages. When src_entry is equal to dst_entry, 1550210a6886SKonstantin Belousov * function implements COW for wired-down map entry. Otherwise, 1551210a6886SKonstantin Belousov * it forks wired entry into dst_map. 1552df8bae1dSRodney W. Grimes * 1553df8bae1dSRodney W. Grimes * In/out conditions: 1554df8bae1dSRodney W. Grimes * The source and destination maps must be locked for write. 1555df8bae1dSRodney W. Grimes * The source map entry must be wired down (or be a sharing map 1556df8bae1dSRodney W. Grimes * entry corresponding to a main map entry that is wired down). 1557df8bae1dSRodney W. Grimes */ 155826f9a767SRodney W. Grimes void 1559121fd461SKonstantin Belousov vm_fault_copy_entry(vm_map_t dst_map, vm_map_t src_map, 1560121fd461SKonstantin Belousov vm_map_entry_t dst_entry, vm_map_entry_t src_entry, 1561121fd461SKonstantin Belousov vm_ooffset_t *fork_charge) 1562df8bae1dSRodney W. Grimes { 1563210a6886SKonstantin Belousov vm_object_t backing_object, dst_object, object, src_object; 15647afab86cSAlan Cox vm_pindex_t dst_pindex, pindex, src_pindex; 1565210a6886SKonstantin Belousov vm_prot_t access, prot; 1566df8bae1dSRodney W. Grimes vm_offset_t vaddr; 1567df8bae1dSRodney W. Grimes vm_page_t dst_m; 1568df8bae1dSRodney W. Grimes vm_page_t src_m; 15694c74acf7SKonstantin Belousov boolean_t upgrade; 1570df8bae1dSRodney W. Grimes 1571df8bae1dSRodney W. Grimes #ifdef lint 1572df8bae1dSRodney W. Grimes src_map++; 15730d94caffSDavid Greenman #endif /* lint */ 1574df8bae1dSRodney W. Grimes 1575210a6886SKonstantin Belousov upgrade = src_entry == dst_entry; 15760973283dSKonstantin Belousov access = prot = dst_entry->protection; 1577210a6886SKonstantin Belousov 1578df8bae1dSRodney W. Grimes src_object = src_entry->object.vm_object; 15797afab86cSAlan Cox src_pindex = OFF_TO_IDX(src_entry->offset); 1580df8bae1dSRodney W. Grimes 15810973283dSKonstantin Belousov if (upgrade && (dst_entry->eflags & MAP_ENTRY_NEEDS_COPY) == 0) { 15820973283dSKonstantin Belousov dst_object = src_object; 15830973283dSKonstantin Belousov vm_object_reference(dst_object); 15840973283dSKonstantin Belousov } else { 1585df8bae1dSRodney W. Grimes /* 15860d94caffSDavid Greenman * Create the top-level object for the destination entry. (Doesn't 15870d94caffSDavid Greenman * actually shadow anything - we copy the pages directly.) 1588df8bae1dSRodney W. Grimes */ 158924a1cce3SDavid Greenman dst_object = vm_object_allocate(OBJT_DEFAULT, 1590d1780e8dSKonstantin Belousov atop(dst_entry->end - dst_entry->start)); 1591f8a47341SAlan Cox #if VM_NRESERVLEVEL > 0 1592f8a47341SAlan Cox dst_object->flags |= OBJ_COLORED; 1593f8a47341SAlan Cox dst_object->pg_color = atop(dst_entry->start); 1594f8a47341SAlan Cox #endif 15950973283dSKonstantin Belousov } 1596df8bae1dSRodney W. Grimes 159789f6b863SAttilio Rao VM_OBJECT_WLOCK(dst_object); 1598210a6886SKonstantin Belousov KASSERT(upgrade || dst_entry->object.vm_object == NULL, 1599121fd461SKonstantin Belousov ("vm_fault_copy_entry: vm_object not NULL")); 16000973283dSKonstantin Belousov if (src_object != dst_object) { 16013f289c3fSJeff Roberson dst_object->domain = src_object->domain; 1602df8bae1dSRodney W. Grimes dst_entry->object.vm_object = dst_object; 1603df8bae1dSRodney W. Grimes dst_entry->offset = 0; 16043364c323SKonstantin Belousov dst_object->charge = dst_entry->end - dst_entry->start; 16050973283dSKonstantin Belousov } 1606210a6886SKonstantin Belousov if (fork_charge != NULL) { 1607ef694c1aSEdward Tomasz Napierala KASSERT(dst_entry->cred == NULL, 1608121fd461SKonstantin Belousov ("vm_fault_copy_entry: leaked swp charge")); 1609ef694c1aSEdward Tomasz Napierala dst_object->cred = curthread->td_ucred; 1610ef694c1aSEdward Tomasz Napierala crhold(dst_object->cred); 1611121fd461SKonstantin Belousov *fork_charge += dst_object->charge; 16120973283dSKonstantin Belousov } else if (dst_object->cred == NULL) { 16130973283dSKonstantin Belousov KASSERT(dst_entry->cred != NULL, ("no cred for entry %p", 16140973283dSKonstantin Belousov dst_entry)); 1615ef694c1aSEdward Tomasz Napierala dst_object->cred = dst_entry->cred; 1616ef694c1aSEdward Tomasz Napierala dst_entry->cred = NULL; 1617210a6886SKonstantin Belousov } 16180973283dSKonstantin Belousov 1619210a6886SKonstantin Belousov /* 1620210a6886SKonstantin Belousov * If not an upgrade, then enter the mappings in the pmap as 1621210a6886SKonstantin Belousov * read and/or execute accesses. Otherwise, enter them as 1622210a6886SKonstantin Belousov * write accesses. 1623210a6886SKonstantin Belousov * 1624210a6886SKonstantin Belousov * A writeable large page mapping is only created if all of 1625210a6886SKonstantin Belousov * the constituent small page mappings are modified. Marking 1626210a6886SKonstantin Belousov * PTEs as modified on inception allows promotion to happen 1627210a6886SKonstantin Belousov * without taking potentially large number of soft faults. 1628210a6886SKonstantin Belousov */ 1629210a6886SKonstantin Belousov if (!upgrade) 1630210a6886SKonstantin Belousov access &= ~VM_PROT_WRITE; 1631df8bae1dSRodney W. Grimes 1632df8bae1dSRodney W. Grimes /* 1633ef45823eSKonstantin Belousov * Loop through all of the virtual pages within the entry's 1634ef45823eSKonstantin Belousov * range, copying each page from the source object to the 1635ef45823eSKonstantin Belousov * destination object. Since the source is wired, those pages 1636ef45823eSKonstantin Belousov * must exist. In contrast, the destination is pageable. 1637ef45823eSKonstantin Belousov * Since the destination object does share any backing storage 1638ef45823eSKonstantin Belousov * with the source object, all of its pages must be dirtied, 1639ef45823eSKonstantin Belousov * regardless of whether they can be written. 1640df8bae1dSRodney W. Grimes */ 16417afab86cSAlan Cox for (vaddr = dst_entry->start, dst_pindex = 0; 1642df8bae1dSRodney W. Grimes vaddr < dst_entry->end; 16437afab86cSAlan Cox vaddr += PAGE_SIZE, dst_pindex++) { 16440973283dSKonstantin Belousov again: 1645df8bae1dSRodney W. Grimes /* 1646df8bae1dSRodney W. Grimes * Find the page in the source object, and copy it in. 16474c74acf7SKonstantin Belousov * Because the source is wired down, the page will be 16484c74acf7SKonstantin Belousov * in memory. 1649df8bae1dSRodney W. Grimes */ 16500973283dSKonstantin Belousov if (src_object != dst_object) 165183b375eaSAttilio Rao VM_OBJECT_RLOCK(src_object); 1652c5b65a67SAlan Cox object = src_object; 16537afab86cSAlan Cox pindex = src_pindex + dst_pindex; 16547afab86cSAlan Cox while ((src_m = vm_page_lookup(object, pindex)) == NULL && 1655c5b65a67SAlan Cox (backing_object = object->backing_object) != NULL) { 1656c5b65a67SAlan Cox /* 16574c74acf7SKonstantin Belousov * Unless the source mapping is read-only or 16584c74acf7SKonstantin Belousov * it is presently being upgraded from 16594c74acf7SKonstantin Belousov * read-only, the first object in the shadow 16604c74acf7SKonstantin Belousov * chain should provide all of the pages. In 16614c74acf7SKonstantin Belousov * other words, this loop body should never be 16624c74acf7SKonstantin Belousov * executed when the source mapping is already 16634c74acf7SKonstantin Belousov * read/write. 1664c5b65a67SAlan Cox */ 16654c74acf7SKonstantin Belousov KASSERT((src_entry->protection & VM_PROT_WRITE) == 0 || 16664c74acf7SKonstantin Belousov upgrade, 16674c74acf7SKonstantin Belousov ("vm_fault_copy_entry: main object missing page")); 16684c74acf7SKonstantin Belousov 166983b375eaSAttilio Rao VM_OBJECT_RLOCK(backing_object); 1670c5b65a67SAlan Cox pindex += OFF_TO_IDX(object->backing_object_offset); 16710973283dSKonstantin Belousov if (object != dst_object) 167283b375eaSAttilio Rao VM_OBJECT_RUNLOCK(object); 1673c5b65a67SAlan Cox object = backing_object; 1674c5b65a67SAlan Cox } 16754c74acf7SKonstantin Belousov KASSERT(src_m != NULL, ("vm_fault_copy_entry: page missing")); 16760973283dSKonstantin Belousov 16770973283dSKonstantin Belousov if (object != dst_object) { 16780973283dSKonstantin Belousov /* 16790973283dSKonstantin Belousov * Allocate a page in the destination object. 16800973283dSKonstantin Belousov */ 16812602a2eaSKonstantin Belousov dst_m = vm_page_alloc(dst_object, (src_object == 16822602a2eaSKonstantin Belousov dst_object ? src_pindex : 0) + dst_pindex, 16832602a2eaSKonstantin Belousov VM_ALLOC_NORMAL); 16840973283dSKonstantin Belousov if (dst_m == NULL) { 16850973283dSKonstantin Belousov VM_OBJECT_WUNLOCK(dst_object); 16860973283dSKonstantin Belousov VM_OBJECT_RUNLOCK(object); 16872c0f13aaSKonstantin Belousov vm_wait(dst_object); 1688c8f780e3SKonstantin Belousov VM_OBJECT_WLOCK(dst_object); 16890973283dSKonstantin Belousov goto again; 16900973283dSKonstantin Belousov } 1691669890eaSAlan Cox pmap_copy_page(src_m, dst_m); 169283b375eaSAttilio Rao VM_OBJECT_RUNLOCK(object); 1693669890eaSAlan Cox dst_m->valid = VM_PAGE_BITS_ALL; 1694bc79b37fSKonstantin Belousov dst_m->dirty = VM_PAGE_BITS_ALL; 16950973283dSKonstantin Belousov } else { 16960973283dSKonstantin Belousov dst_m = src_m; 16970973283dSKonstantin Belousov if (vm_page_sleep_if_busy(dst_m, "fltupg")) 16980973283dSKonstantin Belousov goto again; 16990973283dSKonstantin Belousov vm_page_xbusy(dst_m); 17000973283dSKonstantin Belousov KASSERT(dst_m->valid == VM_PAGE_BITS_ALL, 17010973283dSKonstantin Belousov ("invalid dst page %p", dst_m)); 17020973283dSKonstantin Belousov } 170389f6b863SAttilio Rao VM_OBJECT_WUNLOCK(dst_object); 1704df8bae1dSRodney W. Grimes 1705df8bae1dSRodney W. Grimes /* 1706210a6886SKonstantin Belousov * Enter it in the pmap. If a wired, copy-on-write 1707210a6886SKonstantin Belousov * mapping is being replaced by a write-enabled 1708210a6886SKonstantin Belousov * mapping, then wire that new mapping. 1709df8bae1dSRodney W. Grimes */ 171039ffa8c1SKonstantin Belousov pmap_enter(dst_map->pmap, vaddr, dst_m, prot, 171139ffa8c1SKonstantin Belousov access | (upgrade ? PMAP_ENTER_WIRED : 0), 0); 1712df8bae1dSRodney W. Grimes 1713df8bae1dSRodney W. Grimes /* 1714df8bae1dSRodney W. Grimes * Mark it no longer busy, and put it on the active list. 1715df8bae1dSRodney W. Grimes */ 171689f6b863SAttilio Rao VM_OBJECT_WLOCK(dst_object); 17172965a453SKip Macy 1718210a6886SKonstantin Belousov if (upgrade) { 17190973283dSKonstantin Belousov if (src_m != dst_m) { 17202965a453SKip Macy vm_page_lock(src_m); 17213ae10f74SAttilio Rao vm_page_unwire(src_m, PQ_INACTIVE); 1722e20e8c15SKonstantin Belousov vm_page_unlock(src_m); 17232965a453SKip Macy vm_page_lock(dst_m); 1724210a6886SKonstantin Belousov vm_page_wire(dst_m); 1725e20e8c15SKonstantin Belousov vm_page_unlock(dst_m); 17262965a453SKip Macy } else { 17270973283dSKonstantin Belousov KASSERT(dst_m->wire_count > 0, 17280973283dSKonstantin Belousov ("dst_m %p is not wired", dst_m)); 17290973283dSKonstantin Belousov } 17300973283dSKonstantin Belousov } else { 17312965a453SKip Macy vm_page_lock(dst_m); 1732df8bae1dSRodney W. Grimes vm_page_activate(dst_m); 1733e20e8c15SKonstantin Belousov vm_page_unlock(dst_m); 17342965a453SKip Macy } 1735c7aebda8SAttilio Rao vm_page_xunbusy(dst_m); 1736df8bae1dSRodney W. Grimes } 173789f6b863SAttilio Rao VM_OBJECT_WUNLOCK(dst_object); 1738210a6886SKonstantin Belousov if (upgrade) { 1739210a6886SKonstantin Belousov dst_entry->eflags &= ~(MAP_ENTRY_COW | MAP_ENTRY_NEEDS_COPY); 1740210a6886SKonstantin Belousov vm_object_deallocate(src_object); 1741210a6886SKonstantin Belousov } 1742df8bae1dSRodney W. Grimes } 174326f9a767SRodney W. Grimes 17445730afc9SAlan Cox /* 17455730afc9SAlan Cox * Block entry into the machine-independent layer's page fault handler by 17465730afc9SAlan Cox * the calling thread. Subsequent calls to vm_fault() by that thread will 17475730afc9SAlan Cox * return KERN_PROTECTION_FAILURE. Enable machine-dependent handling of 17485730afc9SAlan Cox * spurious page faults. 17495730afc9SAlan Cox */ 17502801687dSKonstantin Belousov int 17512801687dSKonstantin Belousov vm_fault_disable_pagefaults(void) 17522801687dSKonstantin Belousov { 17532801687dSKonstantin Belousov 17545730afc9SAlan Cox return (curthread_pflags_set(TDP_NOFAULTING | TDP_RESETSPUR)); 17552801687dSKonstantin Belousov } 17562801687dSKonstantin Belousov 17572801687dSKonstantin Belousov void 17582801687dSKonstantin Belousov vm_fault_enable_pagefaults(int save) 17592801687dSKonstantin Belousov { 17602801687dSKonstantin Belousov 17612801687dSKonstantin Belousov curthread_pflags_restore(save); 17622801687dSKonstantin Belousov } 1763