160727d8bSWarner Losh /*- 2df57947fSPedro F. Giffuni * SPDX-License-Identifier: BSD-4-Clause 3df57947fSPedro F. Giffuni * 4df8bae1dSRodney W. Grimes * Copyright (c) 1990 University of Utah. 526f9a767SRodney W. Grimes * Copyright (c) 1991 The Regents of the University of California. 626f9a767SRodney W. Grimes * All rights reserved. 726f9a767SRodney W. Grimes * Copyright (c) 1993, 1994 John S. Dyson 824a1cce3SDavid Greenman * Copyright (c) 1995, David Greenman 9df8bae1dSRodney W. Grimes * 10df8bae1dSRodney W. Grimes * This code is derived from software contributed to Berkeley by 11df8bae1dSRodney W. Grimes * the Systems Programming Group of the University of Utah Computer 12df8bae1dSRodney W. Grimes * Science Department. 13df8bae1dSRodney W. Grimes * 14df8bae1dSRodney W. Grimes * Redistribution and use in source and binary forms, with or without 15df8bae1dSRodney W. Grimes * modification, are permitted provided that the following conditions 16df8bae1dSRodney W. Grimes * are met: 17df8bae1dSRodney W. Grimes * 1. Redistributions of source code must retain the above copyright 18df8bae1dSRodney W. Grimes * notice, this list of conditions and the following disclaimer. 19df8bae1dSRodney W. Grimes * 2. Redistributions in binary form must reproduce the above copyright 20df8bae1dSRodney W. Grimes * notice, this list of conditions and the following disclaimer in the 21df8bae1dSRodney W. Grimes * documentation and/or other materials provided with the distribution. 22df8bae1dSRodney W. Grimes * 3. All advertising materials mentioning features or use of this software 235929bcfaSPhilippe Charnier * must display the following acknowledgement: 24df8bae1dSRodney W. Grimes * This product includes software developed by the University of 25df8bae1dSRodney W. Grimes * California, Berkeley and its contributors. 26df8bae1dSRodney W. Grimes * 4. Neither the name of the University nor the names of its contributors 27df8bae1dSRodney W. Grimes * may be used to endorse or promote products derived from this software 28df8bae1dSRodney W. Grimes * without specific prior written permission. 29df8bae1dSRodney W. Grimes * 30df8bae1dSRodney W. Grimes * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 31df8bae1dSRodney W. Grimes * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 32df8bae1dSRodney W. Grimes * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 33df8bae1dSRodney W. Grimes * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 34df8bae1dSRodney W. Grimes * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 35df8bae1dSRodney W. Grimes * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 36df8bae1dSRodney W. Grimes * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 37df8bae1dSRodney W. Grimes * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 38df8bae1dSRodney W. Grimes * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 39df8bae1dSRodney W. Grimes * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 40df8bae1dSRodney W. Grimes * SUCH DAMAGE. 41df8bae1dSRodney W. Grimes * 4226f9a767SRodney W. Grimes * from: @(#)vnode_pager.c 7.5 (Berkeley) 4/20/91 43df8bae1dSRodney W. Grimes */ 44df8bae1dSRodney W. Grimes 45df8bae1dSRodney W. Grimes /* 46df8bae1dSRodney W. Grimes * Page to/from files (vnodes). 47df8bae1dSRodney W. Grimes */ 48df8bae1dSRodney W. Grimes 4926f9a767SRodney W. Grimes /* 5026f9a767SRodney W. Grimes * TODO: 5124a1cce3SDavid Greenman * Implement VOP_GETPAGES/PUTPAGES interface for filesystems. Will 52f6b04d2bSDavid Greenman * greatly re-simplify the vnode_pager. 5326f9a767SRodney W. Grimes */ 5426f9a767SRodney W. Grimes 55874651b1SDavid E. O'Brien #include <sys/cdefs.h> 56874651b1SDavid E. O'Brien __FBSDID("$FreeBSD$"); 57874651b1SDavid E. O'Brien 583d653db0SAlan Cox #include "opt_vm.h" 593d653db0SAlan Cox 60df8bae1dSRodney W. Grimes #include <sys/param.h> 61756a5412SGleb Smirnoff #include <sys/kernel.h> 62df8bae1dSRodney W. Grimes #include <sys/systm.h> 63e5818a53SJeff Roberson #include <sys/sysctl.h> 64df8bae1dSRodney W. Grimes #include <sys/proc.h> 65df8bae1dSRodney W. Grimes #include <sys/vnode.h> 66df8bae1dSRodney W. Grimes #include <sys/mount.h> 679626b608SPoul-Henning Kamp #include <sys/bio.h> 6824a1cce3SDavid Greenman #include <sys/buf.h> 69efeaf95aSDavid Greenman #include <sys/vmmeter.h> 70daec9284SConrad Meyer #include <sys/ktr.h> 71d07a6d3fSPoul-Henning Kamp #include <sys/limits.h> 7224579ca1SMatthew Dillon #include <sys/conf.h> 7389f6b863SAttilio Rao #include <sys/rwlock.h> 749e0ddbd0SAlan Cox #include <sys/sf_buf.h> 75e5818a53SJeff Roberson #include <sys/domainset.h> 76df8bae1dSRodney W. Grimes 774f12e0acSSuleiman Souhlal #include <machine/atomic.h> 784f12e0acSSuleiman Souhlal 79df8bae1dSRodney W. Grimes #include <vm/vm.h> 801c771f92SKonstantin Belousov #include <vm/vm_param.h> 81efeaf95aSDavid Greenman #include <vm/vm_object.h> 82df8bae1dSRodney W. Grimes #include <vm/vm_page.h> 8324a1cce3SDavid Greenman #include <vm/vm_pager.h> 841efb74fbSJohn Dyson #include <vm/vm_map.h> 85df8bae1dSRodney W. Grimes #include <vm/vnode_pager.h> 86efeaf95aSDavid Greenman #include <vm/vm_extern.h> 87756a5412SGleb Smirnoff #include <vm/uma.h> 88df8bae1dSRodney W. Grimes 89bff76343SAlan Cox static int vnode_pager_addr(struct vnode *vp, vm_ooffset_t address, 90bff76343SAlan Cox daddr_t *rtaddress, int *run); 9111caded3SAlfred Perlstein static int vnode_pager_input_smlfs(vm_object_t object, vm_page_t m); 9211caded3SAlfred Perlstein static int vnode_pager_input_old(vm_object_t object, vm_page_t m); 9311caded3SAlfred Perlstein static void vnode_pager_dealloc(vm_object_t); 94b0cd2017SGleb Smirnoff static int vnode_pager_getpages(vm_object_t, vm_page_t *, int, int *, int *); 95b0cd2017SGleb Smirnoff static int vnode_pager_getpages_async(vm_object_t, vm_page_t *, int, int *, 96b0cd2017SGleb Smirnoff int *, vop_getpages_iodone_t, void *); 9733cad9e9SKonstantin Belousov static void vnode_pager_putpages(vm_object_t, vm_page_t *, int, int, int *); 9811caded3SAlfred Perlstein static boolean_t vnode_pager_haspage(vm_object_t, vm_pindex_t, int *, int *); 993364c323SKonstantin Belousov static vm_object_t vnode_pager_alloc(void *, vm_ooffset_t, vm_prot_t, 1003364c323SKonstantin Belousov vm_ooffset_t, struct ucred *cred); 10190effb23SGleb Smirnoff static int vnode_pager_generic_getpages_done(struct buf *); 10290effb23SGleb Smirnoff static void vnode_pager_generic_getpages_done_async(struct buf *); 103*fe7bcbafSKyle Evans static void vnode_pager_update_writecount(vm_object_t, vm_offset_t, 104*fe7bcbafSKyle Evans vm_offset_t); 105*fe7bcbafSKyle Evans static void vnode_pager_release_writecount(vm_object_t, vm_offset_t, 106*fe7bcbafSKyle Evans vm_offset_t); 1070b8253a7SBruce Evans 108df8bae1dSRodney W. Grimes struct pagerops vnodepagerops = { 1094e658600SPoul-Henning Kamp .pgo_alloc = vnode_pager_alloc, 1104e658600SPoul-Henning Kamp .pgo_dealloc = vnode_pager_dealloc, 1114e658600SPoul-Henning Kamp .pgo_getpages = vnode_pager_getpages, 11290effb23SGleb Smirnoff .pgo_getpages_async = vnode_pager_getpages_async, 1134e658600SPoul-Henning Kamp .pgo_putpages = vnode_pager_putpages, 1144e658600SPoul-Henning Kamp .pgo_haspage = vnode_pager_haspage, 115*fe7bcbafSKyle Evans .pgo_update_writecount = vnode_pager_update_writecount, 116*fe7bcbafSKyle Evans .pgo_release_writecount = vnode_pager_release_writecount, 117df8bae1dSRodney W. Grimes }; 118df8bae1dSRodney W. Grimes 119e5818a53SJeff Roberson static struct domainset *vnode_domainset = NULL; 120e5818a53SJeff Roberson 121e5818a53SJeff Roberson SYSCTL_PROC(_debug, OID_AUTO, vnode_domainset, CTLTYPE_STRING | CTLFLAG_RW, 122e5818a53SJeff Roberson &vnode_domainset, 0, sysctl_handle_domainset, "A", 123e5818a53SJeff Roberson "Default vnode NUMA policy"); 124e5818a53SJeff Roberson 12566fb0b1aSGleb Smirnoff static int nvnpbufs; 12666fb0b1aSGleb Smirnoff SYSCTL_INT(_vm, OID_AUTO, vnode_pbufs, CTLFLAG_RDTUN | CTLFLAG_NOFETCH, 12766fb0b1aSGleb Smirnoff &nvnpbufs, 0, "number of physical buffers allocated for vnode pager"); 12866fb0b1aSGleb Smirnoff 129756a5412SGleb Smirnoff static uma_zone_t vnode_pbuf_zone; 130756a5412SGleb Smirnoff 131756a5412SGleb Smirnoff static void 132756a5412SGleb Smirnoff vnode_pager_init(void *dummy) 133756a5412SGleb Smirnoff { 134756a5412SGleb Smirnoff 13566fb0b1aSGleb Smirnoff #ifdef __LP64__ 13666fb0b1aSGleb Smirnoff nvnpbufs = nswbuf * 2; 13766fb0b1aSGleb Smirnoff #else 13866fb0b1aSGleb Smirnoff nvnpbufs = nswbuf / 2; 13966fb0b1aSGleb Smirnoff #endif 14066fb0b1aSGleb Smirnoff TUNABLE_INT_FETCH("vm.vnode_pbufs", &nvnpbufs); 14166fb0b1aSGleb Smirnoff vnode_pbuf_zone = pbuf_zsecond_create("vnpbuf", nvnpbufs); 142756a5412SGleb Smirnoff } 143756a5412SGleb Smirnoff SYSINIT(vnode_pager, SI_SUB_CPU, SI_ORDER_ANY, vnode_pager_init, NULL); 144756a5412SGleb Smirnoff 145d07a6d3fSPoul-Henning Kamp /* Create the VM system backing object for this vnode */ 146d07a6d3fSPoul-Henning Kamp int 147731959b1SYaroslav Tykhiy vnode_create_vobject(struct vnode *vp, off_t isize, struct thread *td) 148d07a6d3fSPoul-Henning Kamp { 149d07a6d3fSPoul-Henning Kamp vm_object_t object; 150d07a6d3fSPoul-Henning Kamp vm_ooffset_t size = isize; 151d07a6d3fSPoul-Henning Kamp struct vattr va; 152d07a6d3fSPoul-Henning Kamp 153d07a6d3fSPoul-Henning Kamp if (!vn_isdisk(vp, NULL) && vn_canvmio(vp) == FALSE) 154d07a6d3fSPoul-Henning Kamp return (0); 155d07a6d3fSPoul-Henning Kamp 1566470c8d3SKonstantin Belousov object = vp->v_object; 1576470c8d3SKonstantin Belousov if (object != NULL) 158d07a6d3fSPoul-Henning Kamp return (0); 159d07a6d3fSPoul-Henning Kamp 160d07a6d3fSPoul-Henning Kamp if (size == 0) { 161d07a6d3fSPoul-Henning Kamp if (vn_isdisk(vp, NULL)) { 162d07a6d3fSPoul-Henning Kamp size = IDX_TO_OFF(INT_MAX); 163d07a6d3fSPoul-Henning Kamp } else { 1640359a12eSAttilio Rao if (VOP_GETATTR(vp, &va, td->td_ucred)) 165d07a6d3fSPoul-Henning Kamp return (0); 166d07a6d3fSPoul-Henning Kamp size = va.va_size; 167d07a6d3fSPoul-Henning Kamp } 168d07a6d3fSPoul-Henning Kamp } 169d07a6d3fSPoul-Henning Kamp 1703364c323SKonstantin Belousov object = vnode_pager_alloc(vp, size, 0, 0, td->td_ucred); 171d07a6d3fSPoul-Henning Kamp /* 172d07a6d3fSPoul-Henning Kamp * Dereference the reference we just created. This assumes 173d07a6d3fSPoul-Henning Kamp * that the object is associated with the vp. 174d07a6d3fSPoul-Henning Kamp */ 17589f6b863SAttilio Rao VM_OBJECT_WLOCK(object); 176d07a6d3fSPoul-Henning Kamp object->ref_count--; 17789f6b863SAttilio Rao VM_OBJECT_WUNLOCK(object); 178d07a6d3fSPoul-Henning Kamp vrele(vp); 179d07a6d3fSPoul-Henning Kamp 180d07a6d3fSPoul-Henning Kamp KASSERT(vp->v_object != NULL, ("vnode_create_vobject: NULL object")); 181d07a6d3fSPoul-Henning Kamp 182d07a6d3fSPoul-Henning Kamp return (0); 183d07a6d3fSPoul-Henning Kamp } 184d07a6d3fSPoul-Henning Kamp 1857146d6cbSPoul-Henning Kamp void 1867146d6cbSPoul-Henning Kamp vnode_destroy_vobject(struct vnode *vp) 1877146d6cbSPoul-Henning Kamp { 1887146d6cbSPoul-Henning Kamp struct vm_object *obj; 1897146d6cbSPoul-Henning Kamp 1907146d6cbSPoul-Henning Kamp obj = vp->v_object; 1916470c8d3SKonstantin Belousov if (obj == NULL || obj->handle != vp) 1927146d6cbSPoul-Henning Kamp return; 19357fd3d55SPawel Jakub Dawidek ASSERT_VOP_ELOCKED(vp, "vnode_destroy_vobject"); 19489f6b863SAttilio Rao VM_OBJECT_WLOCK(obj); 1956470c8d3SKonstantin Belousov MPASS(obj->type == OBJT_VNODE); 1962a339d9eSKonstantin Belousov umtx_shm_object_terminated(obj); 1977146d6cbSPoul-Henning Kamp if (obj->ref_count == 0) { 1987146d6cbSPoul-Henning Kamp /* 1997146d6cbSPoul-Henning Kamp * don't double-terminate the object 2007146d6cbSPoul-Henning Kamp */ 20190880a1bSKonstantin Belousov if ((obj->flags & OBJ_DEAD) == 0) { 202783a68aaSKonstantin Belousov vm_object_set_flag(obj, OBJ_DEAD); 203783a68aaSKonstantin Belousov 204783a68aaSKonstantin Belousov /* 205783a68aaSKonstantin Belousov * Clean pages and flush buffers. 206783a68aaSKonstantin Belousov */ 207783a68aaSKonstantin Belousov vm_object_page_clean(obj, 0, 0, OBJPC_SYNC); 208783a68aaSKonstantin Belousov VM_OBJECT_WUNLOCK(obj); 209783a68aaSKonstantin Belousov 210783a68aaSKonstantin Belousov vinvalbuf(vp, V_SAVE, 0, 0); 211783a68aaSKonstantin Belousov 212783a68aaSKonstantin Belousov BO_LOCK(&vp->v_bufobj); 213783a68aaSKonstantin Belousov vp->v_bufobj.bo_flag |= BO_DEAD; 214783a68aaSKonstantin Belousov BO_UNLOCK(&vp->v_bufobj); 215783a68aaSKonstantin Belousov 216783a68aaSKonstantin Belousov VM_OBJECT_WLOCK(obj); 2177146d6cbSPoul-Henning Kamp vm_object_terminate(obj); 21890880a1bSKonstantin Belousov } else { 21990880a1bSKonstantin Belousov /* 22090880a1bSKonstantin Belousov * Waiters were already handled during object 22190880a1bSKonstantin Belousov * termination. The exclusive vnode lock hopefully 22290880a1bSKonstantin Belousov * prevented new waiters from referencing the dying 22390880a1bSKonstantin Belousov * object. 22490880a1bSKonstantin Belousov */ 22590880a1bSKonstantin Belousov vp->v_object = NULL; 22689f6b863SAttilio Rao VM_OBJECT_WUNLOCK(obj); 22790880a1bSKonstantin Belousov } 2287146d6cbSPoul-Henning Kamp } else { 2297146d6cbSPoul-Henning Kamp /* 2307146d6cbSPoul-Henning Kamp * Woe to the process that tries to page now :-). 2317146d6cbSPoul-Henning Kamp */ 2327146d6cbSPoul-Henning Kamp vm_pager_deallocate(obj); 23389f6b863SAttilio Rao VM_OBJECT_WUNLOCK(obj); 2347146d6cbSPoul-Henning Kamp } 23590880a1bSKonstantin Belousov KASSERT(vp->v_object == NULL, ("vp %p obj %p", vp, vp->v_object)); 2367146d6cbSPoul-Henning Kamp } 2377146d6cbSPoul-Henning Kamp 2387146d6cbSPoul-Henning Kamp 239df8bae1dSRodney W. Grimes /* 240df8bae1dSRodney W. Grimes * Allocate (or lookup) pager for a vnode. 241df8bae1dSRodney W. Grimes * Handle is a vnode pointer. 242df8bae1dSRodney W. Grimes */ 24324a1cce3SDavid Greenman vm_object_t 2446cde7a16SDavid Greenman vnode_pager_alloc(void *handle, vm_ooffset_t size, vm_prot_t prot, 2453364c323SKonstantin Belousov vm_ooffset_t offset, struct ucred *cred) 246df8bae1dSRodney W. Grimes { 24706cb7259SDavid Greenman vm_object_t object; 248df8bae1dSRodney W. Grimes struct vnode *vp; 249df8bae1dSRodney W. Grimes 250df8bae1dSRodney W. Grimes /* 251df8bae1dSRodney W. Grimes * Pageout to vnode, no can do yet. 252df8bae1dSRodney W. Grimes */ 253df8bae1dSRodney W. Grimes if (handle == NULL) 254df8bae1dSRodney W. Grimes return (NULL); 255df8bae1dSRodney W. Grimes 256df8bae1dSRodney W. Grimes vp = (struct vnode *)handle; 2576470c8d3SKonstantin Belousov ASSERT_VOP_LOCKED(vp, "vnode_pager_alloc"); 2586ded8427SKonstantin Belousov KASSERT(vp->v_usecount != 0, ("vnode_pager_alloc: no vnode reference")); 2596470c8d3SKonstantin Belousov retry: 2606470c8d3SKonstantin Belousov object = vp->v_object; 2612be70f79SJohn Dyson 26224a1cce3SDavid Greenman if (object == NULL) { 263df8bae1dSRodney W. Grimes /* 2642ac78f0eSStephan Uphoff * Add an object of the appropriate size 265df8bae1dSRodney W. Grimes */ 2666470c8d3SKonstantin Belousov object = vm_object_allocate(OBJT_VNODE, 2676470c8d3SKonstantin Belousov OFF_TO_IDX(round_page(size))); 268bbc0ec52SDavid Greenman 2696cde7a16SDavid Greenman object->un_pager.vnp.vnp_size = size; 27084110e7eSKonstantin Belousov object->un_pager.vnp.writemappings = 0; 271e5818a53SJeff Roberson object->domain.dr_policy = vnode_domainset; 27226f9a767SRodney W. Grimes 27324a1cce3SDavid Greenman object->handle = handle; 27411be8415SStephan Uphoff VI_LOCK(vp); 2752ac78f0eSStephan Uphoff if (vp->v_object != NULL) { 2762ac78f0eSStephan Uphoff /* 2776470c8d3SKonstantin Belousov * Object has been created while we were allocating. 2782ac78f0eSStephan Uphoff */ 27911be8415SStephan Uphoff VI_UNLOCK(vp); 2809cddade7SKonstantin Belousov VM_OBJECT_WLOCK(object); 2819cddade7SKonstantin Belousov KASSERT(object->ref_count == 1, 2829cddade7SKonstantin Belousov ("leaked ref %p %d", object, object->ref_count)); 2839cddade7SKonstantin Belousov object->type = OBJT_DEAD; 2849cddade7SKonstantin Belousov object->ref_count = 0; 2859cddade7SKonstantin Belousov VM_OBJECT_WUNLOCK(object); 2862ac78f0eSStephan Uphoff vm_object_destroy(object); 2872ac78f0eSStephan Uphoff goto retry; 288df8bae1dSRodney W. Grimes } 2892ac78f0eSStephan Uphoff vp->v_object = object; 29011be8415SStephan Uphoff VI_UNLOCK(vp); 29111be8415SStephan Uphoff } else { 2926470c8d3SKonstantin Belousov VM_OBJECT_WLOCK(object); 2932ac78f0eSStephan Uphoff object->ref_count++; 2943d653db0SAlan Cox #if VM_NRESERVLEVEL > 0 2953d653db0SAlan Cox vm_object_color(object, 0); 2963d653db0SAlan Cox #endif 29789f6b863SAttilio Rao VM_OBJECT_WUNLOCK(object); 29811be8415SStephan Uphoff } 2996ff51a36SMateusz Guzik vrefact(vp); 30024a1cce3SDavid Greenman return (object); 301df8bae1dSRodney W. Grimes } 302df8bae1dSRodney W. Grimes 303658ad5ffSAlan Cox /* 304658ad5ffSAlan Cox * The object must be locked. 305658ad5ffSAlan Cox */ 306f708ef1bSPoul-Henning Kamp static void 3077ebba1f8SGleb Smirnoff vnode_pager_dealloc(vm_object_t object) 30824a1cce3SDavid Greenman { 309b9f180d1SKonstantin Belousov struct vnode *vp; 310b9f180d1SKonstantin Belousov int refs; 311df8bae1dSRodney W. Grimes 312b9f180d1SKonstantin Belousov vp = object->handle; 31324a1cce3SDavid Greenman if (vp == NULL) 31424a1cce3SDavid Greenman panic("vnode_pager_dealloc: pager already dealloced"); 31524a1cce3SDavid Greenman 31689f6b863SAttilio Rao VM_OBJECT_ASSERT_WLOCKED(object); 31766095752SJohn Dyson vm_object_pip_wait(object, "vnpdea"); 318b9f180d1SKonstantin Belousov refs = object->ref_count; 31924a1cce3SDavid Greenman 32024a1cce3SDavid Greenman object->handle = NULL; 32195461b45SJohn Dyson object->type = OBJT_DEAD; 32257fd3d55SPawel Jakub Dawidek ASSERT_VOP_ELOCKED(vp, "vnode_pager_dealloc"); 32384110e7eSKonstantin Belousov if (object->un_pager.vnp.writemappings > 0) { 32484110e7eSKonstantin Belousov object->un_pager.vnp.writemappings = 0; 32578022527SKonstantin Belousov VOP_ADD_WRITECOUNT_CHECKED(vp, -1); 326b47f6241SJohn Baldwin CTR3(KTR_VFS, "%s: vp %p v_writecount decreased to %d", 327b47f6241SJohn Baldwin __func__, vp, vp->v_writecount); 32884110e7eSKonstantin Belousov } 329aa2cabb9SDavid Greenman vp->v_object = NULL; 33078022527SKonstantin Belousov VI_LOCK(vp); 33178022527SKonstantin Belousov 33278022527SKonstantin Belousov /* 33378022527SKonstantin Belousov * vm_map_entry_set_vnode_text() cannot reach this vnode by 33478022527SKonstantin Belousov * following object->handle. Clear all text references now. 33578022527SKonstantin Belousov * This also clears the transient references from 33678022527SKonstantin Belousov * kern_execve(), which is fine because dead_vnodeops uses nop 33778022527SKonstantin Belousov * for VOP_UNSET_TEXT(). 33878022527SKonstantin Belousov */ 33978022527SKonstantin Belousov if (vp->v_writecount < 0) 34078022527SKonstantin Belousov vp->v_writecount = 0; 34178022527SKonstantin Belousov VI_UNLOCK(vp); 34289f6b863SAttilio Rao VM_OBJECT_WUNLOCK(object); 343b9f180d1SKonstantin Belousov while (refs-- > 0) 344b9f180d1SKonstantin Belousov vunref(vp); 34589f6b863SAttilio Rao VM_OBJECT_WLOCK(object); 346df8bae1dSRodney W. Grimes } 34726f9a767SRodney W. Grimes 348f708ef1bSPoul-Henning Kamp static boolean_t 3497ebba1f8SGleb Smirnoff vnode_pager_haspage(vm_object_t object, vm_pindex_t pindex, int *before, 3507ebba1f8SGleb Smirnoff int *after) 351df8bae1dSRodney W. Grimes { 35224a1cce3SDavid Greenman struct vnode *vp = object->handle; 35398b0c789SPoul-Henning Kamp daddr_t bn; 3544153054aSJeff Roberson uintptr_t lockstate; 3553af76890SPoul-Henning Kamp int err; 356170db9c6SJohn Dyson daddr_t reqblock; 3572c4488fcSJohn Dyson int poff; 3582c4488fcSJohn Dyson int bsize; 359d63596ceSJohn Dyson int pagesperblock, blocksperpage; 360df8bae1dSRodney W. Grimes 3614153054aSJeff Roberson VM_OBJECT_ASSERT_LOCKED(object); 36224579ca1SMatthew Dillon /* 36324579ca1SMatthew Dillon * If no vp or vp is doomed or marked transparent to VM, we do not 36424579ca1SMatthew Dillon * have the page. 36524579ca1SMatthew Dillon */ 366b73f64c4SJeff Roberson if (vp == NULL || vp->v_iflag & VI_DOOMED) 36747221757SJohn Dyson return FALSE; 368df8bae1dSRodney W. Grimes /* 369b73f64c4SJeff Roberson * If the offset is beyond end of file we do 3700d94caffSDavid Greenman * not have the page. 371df8bae1dSRodney W. Grimes */ 372b73f64c4SJeff Roberson if (IDX_TO_OFF(pindex) >= object->un_pager.vnp.vnp_size) 3734abc71c0SDavid Greenman return FALSE; 374df8bae1dSRodney W. Grimes 375eed2d59bSDavid Greenman bsize = vp->v_mount->mnt_stat.f_iosize; 376170db9c6SJohn Dyson pagesperblock = bsize / PAGE_SIZE; 377d63596ceSJohn Dyson blocksperpage = 0; 378d63596ceSJohn Dyson if (pagesperblock > 0) { 379a316d390SJohn Dyson reqblock = pindex / pagesperblock; 380d63596ceSJohn Dyson } else { 381d63596ceSJohn Dyson blocksperpage = (PAGE_SIZE / bsize); 382d63596ceSJohn Dyson reqblock = pindex * blocksperpage; 383d63596ceSJohn Dyson } 3844153054aSJeff Roberson lockstate = VM_OBJECT_DROP(object); 385ec794849SPoul-Henning Kamp err = VOP_BMAP(vp, reqblock, NULL, &bn, after, before); 3864153054aSJeff Roberson VM_OBJECT_PICKUP(object, lockstate); 3870d94caffSDavid Greenman if (err) 38824a1cce3SDavid Greenman return TRUE; 3896eab77f2SJohn Dyson if (bn == -1) 390ced399eeSJohn Dyson return FALSE; 391d63596ceSJohn Dyson if (pagesperblock > 0) { 392a316d390SJohn Dyson poff = pindex - (reqblock * pagesperblock); 393170db9c6SJohn Dyson if (before) { 394170db9c6SJohn Dyson *before *= pagesperblock; 395170db9c6SJohn Dyson *before += poff; 396170db9c6SJohn Dyson } 397170db9c6SJohn Dyson if (after) { 39884d31376SGleb Smirnoff /* 39984d31376SGleb Smirnoff * The BMAP vop can report a partial block in the 400d2596d17SGleb Smirnoff * 'after', but must not report blocks after EOF. 40184d31376SGleb Smirnoff * Assert the latter, and truncate 'after' in case 40284d31376SGleb Smirnoff * of the former. 40384d31376SGleb Smirnoff */ 404d2596d17SGleb Smirnoff KASSERT((reqblock + *after) * pagesperblock < 405d2596d17SGleb Smirnoff roundup2(object->size, pagesperblock), 40684d31376SGleb Smirnoff ("%s: reqblock %jd after %d size %ju", __func__, 40784d31376SGleb Smirnoff (intmax_t )reqblock, *after, 40884d31376SGleb Smirnoff (uintmax_t )object->size)); 409170db9c6SJohn Dyson *after *= pagesperblock; 41084d31376SGleb Smirnoff *after += pagesperblock - (poff + 1); 41184d31376SGleb Smirnoff if (pindex + *after >= object->size) 41284d31376SGleb Smirnoff *after = object->size - 1 - pindex; 413170db9c6SJohn Dyson } 414d63596ceSJohn Dyson } else { 415d63596ceSJohn Dyson if (before) { 416d63596ceSJohn Dyson *before /= blocksperpage; 417d63596ceSJohn Dyson } 418d63596ceSJohn Dyson 419d63596ceSJohn Dyson if (after) { 420d63596ceSJohn Dyson *after /= blocksperpage; 421d63596ceSJohn Dyson } 422d63596ceSJohn Dyson } 423ced399eeSJohn Dyson return TRUE; 424df8bae1dSRodney W. Grimes } 425df8bae1dSRodney W. Grimes 426df8bae1dSRodney W. Grimes /* 427df8bae1dSRodney W. Grimes * Lets the VM system know about a change in size for a file. 42824a1cce3SDavid Greenman * We adjust our own internal size and flush any cached pages in 429df8bae1dSRodney W. Grimes * the associated object that are affected by the size change. 430df8bae1dSRodney W. Grimes * 431df8bae1dSRodney W. Grimes * Note: this routine may be invoked as a result of a pager put 432df8bae1dSRodney W. Grimes * operation (possibly at object termination time), so we must be careful. 433df8bae1dSRodney W. Grimes */ 434df8bae1dSRodney W. Grimes void 4357ebba1f8SGleb Smirnoff vnode_pager_setsize(struct vnode *vp, vm_ooffset_t nsize) 436df8bae1dSRodney W. Grimes { 4372a8f9ab5SAlan Cox vm_object_t object; 4382a8f9ab5SAlan Cox vm_page_t m; 439c576d121SLuoqi Chen vm_pindex_t nobjsize; 440df8bae1dSRodney W. Grimes 4412a8f9ab5SAlan Cox if ((object = vp->v_object) == NULL) 442df8bae1dSRodney W. Grimes return; 443cb61d698SKonstantin Belousov /* ASSERT_VOP_ELOCKED(vp, "vnode_pager_setsize and not locked vnode"); */ 44489f6b863SAttilio Rao VM_OBJECT_WLOCK(object); 4459b8851faSKonstantin Belousov if (object->type == OBJT_DEAD) { 4469b8851faSKonstantin Belousov VM_OBJECT_WUNLOCK(object); 4479b8851faSKonstantin Belousov return; 4489b8851faSKonstantin Belousov } 4499b8851faSKonstantin Belousov KASSERT(object->type == OBJT_VNODE, 4509b8851faSKonstantin Belousov ("not vnode-backed object %p", object)); 4512a8f9ab5SAlan Cox if (nsize == object->un_pager.vnp.vnp_size) { 452df8bae1dSRodney W. Grimes /* 453df8bae1dSRodney W. Grimes * Hasn't changed size 454df8bae1dSRodney W. Grimes */ 45589f6b863SAttilio Rao VM_OBJECT_WUNLOCK(object); 456df8bae1dSRodney W. Grimes return; 4572a8f9ab5SAlan Cox } 458c576d121SLuoqi Chen nobjsize = OFF_TO_IDX(nsize + PAGE_MASK); 4592a8f9ab5SAlan Cox if (nsize < object->un_pager.vnp.vnp_size) { 460df8bae1dSRodney W. Grimes /* 461bbc0ec52SDavid Greenman * File has shrunk. Toss any cached pages beyond the new EOF. 462df8bae1dSRodney W. Grimes */ 4632a8f9ab5SAlan Cox if (nobjsize < object->size) 464c576d121SLuoqi Chen vm_object_page_remove(object, nobjsize, object->size, 4656bbee8e2SAlan Cox 0); 466bbc0ec52SDavid Greenman /* 467bbc0ec52SDavid Greenman * this gets rid of garbage at the end of a page that is now 4683ebeaf59SMatthew Dillon * only partially backed by the vnode. 4693ebeaf59SMatthew Dillon * 4703ebeaf59SMatthew Dillon * XXX for some reason (I don't know yet), if we take a 4713ebeaf59SMatthew Dillon * completely invalid page and mark it partially valid 4723ebeaf59SMatthew Dillon * it can screw up NFS reads, so we don't allow the case. 473bbc0ec52SDavid Greenman */ 4742a8f9ab5SAlan Cox if ((nsize & PAGE_MASK) && 4751b26eb10SAlan Cox (m = vm_page_lookup(object, OFF_TO_IDX(nsize))) != NULL && 4761b26eb10SAlan Cox m->valid != 0) { 4772b6b0df7SMatthew Dillon int base = (int)nsize & PAGE_MASK; 4782b6b0df7SMatthew Dillon int size = PAGE_SIZE - base; 4792b6b0df7SMatthew Dillon 4802b6b0df7SMatthew Dillon /* 4812b6b0df7SMatthew Dillon * Clear out partial-page garbage in case 4822b6b0df7SMatthew Dillon * the page has been mapped. 4832b6b0df7SMatthew Dillon */ 484fff6062aSAlan Cox pmap_zero_page_area(m, base, size); 4852b6b0df7SMatthew Dillon 4862b6b0df7SMatthew Dillon /* 4873c33df62SAlan Cox * Update the valid bits to reflect the blocks that 4883c33df62SAlan Cox * have been zeroed. Some of these valid bits may 4893c33df62SAlan Cox * have already been set. 4903c33df62SAlan Cox */ 491dc874f98SKonstantin Belousov vm_page_set_valid_range(m, base, size); 4923c33df62SAlan Cox 4933c33df62SAlan Cox /* 4943c33df62SAlan Cox * Round "base" to the next block boundary so that the 4953c33df62SAlan Cox * dirty bit for a partially zeroed block is not 4963c33df62SAlan Cox * cleared. 4973c33df62SAlan Cox */ 4983c33df62SAlan Cox base = roundup2(base, DEV_BSIZE); 4993c33df62SAlan Cox 5003c33df62SAlan Cox /* 5013c33df62SAlan Cox * Clear out partial-page dirty bits. 5023ebeaf59SMatthew Dillon * 5033ebeaf59SMatthew Dillon * note that we do not clear out the valid 5043ebeaf59SMatthew Dillon * bits. This would prevent bogus_page 5053ebeaf59SMatthew Dillon * replacement from working properly. 5062b6b0df7SMatthew Dillon */ 5073c33df62SAlan Cox vm_page_clear_dirty(m, base, PAGE_SIZE - base); 508bbc0ec52SDavid Greenman } 509bbc0ec52SDavid Greenman } 510a316d390SJohn Dyson object->un_pager.vnp.vnp_size = nsize; 511c576d121SLuoqi Chen object->size = nobjsize; 51289f6b863SAttilio Rao VM_OBJECT_WUNLOCK(object); 513df8bae1dSRodney W. Grimes } 514df8bae1dSRodney W. Grimes 51526f9a767SRodney W. Grimes /* 51626f9a767SRodney W. Grimes * calculate the linear (byte) disk address of specified virtual 51726f9a767SRodney W. Grimes * file address 51826f9a767SRodney W. Grimes */ 519bff76343SAlan Cox static int 520bff76343SAlan Cox vnode_pager_addr(struct vnode *vp, vm_ooffset_t address, daddr_t *rtaddress, 521bff76343SAlan Cox int *run) 52226f9a767SRodney W. Grimes { 52326f9a767SRodney W. Grimes int bsize; 52426f9a767SRodney W. Grimes int err; 525a316d390SJohn Dyson daddr_t vblock; 526f3aad9a6SBjoern A. Zeeb daddr_t voffset; 52726f9a767SRodney W. Grimes 5282ad036b6SAlan Cox if (address < 0) 5290d94caffSDavid Greenman return -1; 5300d94caffSDavid Greenman 531b73f64c4SJeff Roberson if (vp->v_iflag & VI_DOOMED) 5322c4488fcSJohn Dyson return -1; 5332c4488fcSJohn Dyson 53426f9a767SRodney W. Grimes bsize = vp->v_mount->mnt_stat.f_iosize; 53526f9a767SRodney W. Grimes vblock = address / bsize; 53626f9a767SRodney W. Grimes voffset = address % bsize; 53726f9a767SRodney W. Grimes 538bff76343SAlan Cox err = VOP_BMAP(vp, vblock, NULL, rtaddress, run, NULL); 539bff76343SAlan Cox if (err == 0) { 540bff76343SAlan Cox if (*rtaddress != -1) 541bff76343SAlan Cox *rtaddress += voffset / DEV_BSIZE; 542efc68ce1SDavid Greenman if (run) { 543efc68ce1SDavid Greenman *run += 1; 544efc68ce1SDavid Greenman *run *= bsize / PAGE_SIZE; 545efc68ce1SDavid Greenman *run -= voffset / PAGE_SIZE; 546efc68ce1SDavid Greenman } 547efc68ce1SDavid Greenman } 54826f9a767SRodney W. Grimes 549bff76343SAlan Cox return (err); 55026f9a767SRodney W. Grimes } 55126f9a767SRodney W. Grimes 55226f9a767SRodney W. Grimes /* 55326f9a767SRodney W. Grimes * small block filesystem vnode pager input 55426f9a767SRodney W. Grimes */ 555f708ef1bSPoul-Henning Kamp static int 5567ebba1f8SGleb Smirnoff vnode_pager_input_smlfs(vm_object_t object, vm_page_t m) 55726f9a767SRodney W. Grimes { 5589c83534dSPoul-Henning Kamp struct vnode *vp; 5599c83534dSPoul-Henning Kamp struct bufobj *bo; 56026f9a767SRodney W. Grimes struct buf *bp; 5619e0ddbd0SAlan Cox struct sf_buf *sf; 562f3aad9a6SBjoern A. Zeeb daddr_t fileaddr; 56326f9a767SRodney W. Grimes vm_offset_t bsize; 564561cc9fcSKonstantin Belousov vm_page_bits_t bits; 565561cc9fcSKonstantin Belousov int error, i; 56626f9a767SRodney W. Grimes 567561cc9fcSKonstantin Belousov error = 0; 56824a1cce3SDavid Greenman vp = object->handle; 569b73f64c4SJeff Roberson if (vp->v_iflag & VI_DOOMED) 5702c4488fcSJohn Dyson return VM_PAGER_BAD; 5712c4488fcSJohn Dyson 57226f9a767SRodney W. Grimes bsize = vp->v_mount->mnt_stat.f_iosize; 5730bdb7528SDavid Greenman 5749c83534dSPoul-Henning Kamp VOP_BMAP(vp, 0, &bo, 0, NULL, NULL); 57526f9a767SRodney W. Grimes 5769e0ddbd0SAlan Cox sf = sf_buf_alloc(m, 0); 57726f9a767SRodney W. Grimes 57826f9a767SRodney W. Grimes for (i = 0; i < PAGE_SIZE / bsize; i++) { 57933c67741SMatthew Dillon vm_ooffset_t address; 580bbc0ec52SDavid Greenman 5810d53a17bSAlan Cox bits = vm_page_bits(i * bsize, bsize); 5820d53a17bSAlan Cox if (m->valid & bits) 58326f9a767SRodney W. Grimes continue; 58426f9a767SRodney W. Grimes 58533c67741SMatthew Dillon address = IDX_TO_OFF(m->pindex) + i * bsize; 58633c67741SMatthew Dillon if (address >= object->un_pager.vnp.vnp_size) { 58733c67741SMatthew Dillon fileaddr = -1; 58833c67741SMatthew Dillon } else { 589bff76343SAlan Cox error = vnode_pager_addr(vp, address, &fileaddr, NULL); 590bff76343SAlan Cox if (error) 591bff76343SAlan Cox break; 59233c67741SMatthew Dillon } 59326f9a767SRodney W. Grimes if (fileaddr != -1) { 594756a5412SGleb Smirnoff bp = uma_zalloc(vnode_pbuf_zone, M_WAITOK); 59526f9a767SRodney W. Grimes 59626f9a767SRodney W. Grimes /* build a minimal buffer header */ 59721144e3bSPoul-Henning Kamp bp->b_iocmd = BIO_READ; 5986a4b5823SPoul-Henning Kamp bp->b_iodone = bdone; 599bd78ceceSJohn Baldwin KASSERT(bp->b_rcred == NOCRED, ("leaking read ucred")); 600bd78ceceSJohn Baldwin KASSERT(bp->b_wcred == NOCRED, ("leaking write ucred")); 601a854ed98SJohn Baldwin bp->b_rcred = crhold(curthread->td_ucred); 602a854ed98SJohn Baldwin bp->b_wcred = crhold(curthread->td_ucred); 6039e0ddbd0SAlan Cox bp->b_data = (caddr_t)sf_buf_kva(sf) + i * bsize; 604187f0071SDavid Greenman bp->b_blkno = fileaddr; 6059c83534dSPoul-Henning Kamp pbgetbo(bo, bp); 6061faacf5dSKirk McKusick bp->b_vp = vp; 60726f9a767SRodney W. Grimes bp->b_bcount = bsize; 60826f9a767SRodney W. Grimes bp->b_bufsize = bsize; 6092b6b0df7SMatthew Dillon bp->b_runningbufspace = bp->b_bufsize; 6105bd65606SJohn Baldwin atomic_add_long(&runningbufspace, bp->b_runningbufspace); 61126f9a767SRodney W. Grimes 61226f9a767SRodney W. Grimes /* do the input */ 6132c18019fSPoul-Henning Kamp bp->b_iooffset = dbtob(bp->b_blkno); 614b792bebeSPoul-Henning Kamp bstrategy(bp); 61526f9a767SRodney W. Grimes 6166a4b5823SPoul-Henning Kamp bwait(bp, PVM, "vnsrd"); 6176a4b5823SPoul-Henning Kamp 618c244d2deSPoul-Henning Kamp if ((bp->b_ioflags & BIO_ERROR) != 0) 61926f9a767SRodney W. Grimes error = EIO; 62026f9a767SRodney W. Grimes 62126f9a767SRodney W. Grimes /* 62226f9a767SRodney W. Grimes * free the buffer header back to the swap buffer pool 62326f9a767SRodney W. Grimes */ 6241faacf5dSKirk McKusick bp->b_vp = NULL; 6259c83534dSPoul-Henning Kamp pbrelbo(bp); 626756a5412SGleb Smirnoff uma_zfree(vnode_pbuf_zone, bp); 62726f9a767SRodney W. Grimes if (error) 62826f9a767SRodney W. Grimes break; 6290d53a17bSAlan Cox } else 6309e0ddbd0SAlan Cox bzero((caddr_t)sf_buf_kva(sf) + i * bsize, bsize); 6310d53a17bSAlan Cox KASSERT((m->dirty & bits) == 0, 6320d53a17bSAlan Cox ("vnode_pager_input_smlfs: page %p is dirty", m)); 63389f6b863SAttilio Rao VM_OBJECT_WLOCK(object); 6340d53a17bSAlan Cox m->valid |= bits; 63589f6b863SAttilio Rao VM_OBJECT_WUNLOCK(object); 63626f9a767SRodney W. Grimes } 6379e0ddbd0SAlan Cox sf_buf_free(sf); 63826f9a767SRodney W. Grimes if (error) { 639a83c285cSDavid Greenman return VM_PAGER_ERROR; 64026f9a767SRodney W. Grimes } 64126f9a767SRodney W. Grimes return VM_PAGER_OK; 64226f9a767SRodney W. Grimes } 64326f9a767SRodney W. Grimes 64426f9a767SRodney W. Grimes /* 645475e8cc3SPoul-Henning Kamp * old style vnode pager input routine 64626f9a767SRodney W. Grimes */ 647f708ef1bSPoul-Henning Kamp static int 6487ebba1f8SGleb Smirnoff vnode_pager_input_old(vm_object_t object, vm_page_t m) 64926f9a767SRodney W. Grimes { 650df8bae1dSRodney W. Grimes struct uio auio; 651df8bae1dSRodney W. Grimes struct iovec aiov; 65226f9a767SRodney W. Grimes int error; 65326f9a767SRodney W. Grimes int size; 6549e0ddbd0SAlan Cox struct sf_buf *sf; 655342a1480SJohn Baldwin struct vnode *vp; 656df8bae1dSRodney W. Grimes 65789f6b863SAttilio Rao VM_OBJECT_ASSERT_WLOCKED(object); 65826f9a767SRodney W. Grimes error = 0; 659bbc0ec52SDavid Greenman 660df8bae1dSRodney W. Grimes /* 66126f9a767SRodney W. Grimes * Return failure if beyond current EOF 66226f9a767SRodney W. Grimes */ 663a316d390SJohn Dyson if (IDX_TO_OFF(m->pindex) >= object->un_pager.vnp.vnp_size) { 66426f9a767SRodney W. Grimes return VM_PAGER_BAD; 66526f9a767SRodney W. Grimes } else { 66626f9a767SRodney W. Grimes size = PAGE_SIZE; 667a316d390SJohn Dyson if (IDX_TO_OFF(m->pindex) + size > object->un_pager.vnp.vnp_size) 668a316d390SJohn Dyson size = object->un_pager.vnp.vnp_size - IDX_TO_OFF(m->pindex); 66952051abcSAlan Cox vp = object->handle; 67089f6b863SAttilio Rao VM_OBJECT_WUNLOCK(object); 6710bdb7528SDavid Greenman 67226f9a767SRodney W. Grimes /* 673df8bae1dSRodney W. Grimes * Allocate a kernel virtual address and initialize so that 674df8bae1dSRodney W. Grimes * we can use VOP_READ/WRITE routines. 675df8bae1dSRodney W. Grimes */ 6769e0ddbd0SAlan Cox sf = sf_buf_alloc(m, 0); 6770bdb7528SDavid Greenman 6789e0ddbd0SAlan Cox aiov.iov_base = (caddr_t)sf_buf_kva(sf); 679df8bae1dSRodney W. Grimes aiov.iov_len = size; 680df8bae1dSRodney W. Grimes auio.uio_iov = &aiov; 681df8bae1dSRodney W. Grimes auio.uio_iovcnt = 1; 682a316d390SJohn Dyson auio.uio_offset = IDX_TO_OFF(m->pindex); 683df8bae1dSRodney W. Grimes auio.uio_segflg = UIO_SYSSPACE; 68426f9a767SRodney W. Grimes auio.uio_rw = UIO_READ; 685df8bae1dSRodney W. Grimes auio.uio_resid = size; 686b40ce416SJulian Elischer auio.uio_td = curthread; 68726f9a767SRodney W. Grimes 688a854ed98SJohn Baldwin error = VOP_READ(vp, &auio, 0, curthread->td_ucred); 689df8bae1dSRodney W. Grimes if (!error) { 69054d92145SMatthew Dillon int count = size - auio.uio_resid; 691df8bae1dSRodney W. Grimes 692df8bae1dSRodney W. Grimes if (count == 0) 693df8bae1dSRodney W. Grimes error = EINVAL; 69426f9a767SRodney W. Grimes else if (count != PAGE_SIZE) 6959e0ddbd0SAlan Cox bzero((caddr_t)sf_buf_kva(sf) + count, 6969e0ddbd0SAlan Cox PAGE_SIZE - count); 697df8bae1dSRodney W. Grimes } 6989e0ddbd0SAlan Cox sf_buf_free(sf); 6991b26eb10SAlan Cox 70089f6b863SAttilio Rao VM_OBJECT_WLOCK(object); 701df8bae1dSRodney W. Grimes } 7020d53a17bSAlan Cox KASSERT(m->dirty == 0, ("vnode_pager_input_old: page %p is dirty", m)); 7036e3a3f38SRobert V. Baron if (!error) 7046e3a3f38SRobert V. Baron m->valid = VM_PAGE_BITS_ALL; 705a83c285cSDavid Greenman return error ? VM_PAGER_ERROR : VM_PAGER_OK; 70626f9a767SRodney W. Grimes } 70726f9a767SRodney W. Grimes 70826f9a767SRodney W. Grimes /* 70926f9a767SRodney W. Grimes * generic vnode pager input routine 71026f9a767SRodney W. Grimes */ 711170db9c6SJohn Dyson 712ce75f2c3SMike Smith /* 71323955314SAlfred Perlstein * Local media VFS's that do not implement their own VOP_GETPAGES 71447e151ddSRobert Drehmel * should have their VOP_GETPAGES call to vnode_pager_generic_getpages() 71547e151ddSRobert Drehmel * to implement the previous behaviour. 716ce75f2c3SMike Smith * 717ce75f2c3SMike Smith * All other FS's should use the bypass to get to the local media 718ce75f2c3SMike Smith * backing vp's VOP_GETPAGES. 719ce75f2c3SMike Smith */ 720f708ef1bSPoul-Henning Kamp static int 721b0cd2017SGleb Smirnoff vnode_pager_getpages(vm_object_t object, vm_page_t *m, int count, int *rbehind, 722b0cd2017SGleb Smirnoff int *rahead) 72324a1cce3SDavid Greenman { 724170db9c6SJohn Dyson struct vnode *vp; 725b0cd2017SGleb Smirnoff int rtval; 72695e5e988SJohn Dyson 727170db9c6SJohn Dyson vp = object->handle; 72889f6b863SAttilio Rao VM_OBJECT_WUNLOCK(object); 729b0cd2017SGleb Smirnoff rtval = VOP_GETPAGES(vp, m, count, rbehind, rahead); 73023955314SAlfred Perlstein KASSERT(rtval != EOPNOTSUPP, 73123955314SAlfred Perlstein ("vnode_pager: FS getpages not implemented\n")); 73289f6b863SAttilio Rao VM_OBJECT_WLOCK(object); 733170db9c6SJohn Dyson return rtval; 734170db9c6SJohn Dyson } 735170db9c6SJohn Dyson 73690effb23SGleb Smirnoff static int 73790effb23SGleb Smirnoff vnode_pager_getpages_async(vm_object_t object, vm_page_t *m, int count, 738b0cd2017SGleb Smirnoff int *rbehind, int *rahead, vop_getpages_iodone_t iodone, void *arg) 73990effb23SGleb Smirnoff { 74090effb23SGleb Smirnoff struct vnode *vp; 74190effb23SGleb Smirnoff int rtval; 74290effb23SGleb Smirnoff 74390effb23SGleb Smirnoff vp = object->handle; 74490effb23SGleb Smirnoff VM_OBJECT_WUNLOCK(object); 745b0cd2017SGleb Smirnoff rtval = VOP_GETPAGES_ASYNC(vp, m, count, rbehind, rahead, iodone, arg); 74690effb23SGleb Smirnoff KASSERT(rtval != EOPNOTSUPP, 74790effb23SGleb Smirnoff ("vnode_pager: FS getpages_async not implemented\n")); 74890effb23SGleb Smirnoff VM_OBJECT_WLOCK(object); 74990effb23SGleb Smirnoff return (rtval); 75090effb23SGleb Smirnoff } 75190effb23SGleb Smirnoff 752ce75f2c3SMike Smith /* 75390effb23SGleb Smirnoff * The implementation of VOP_GETPAGES() and VOP_GETPAGES_ASYNC() for 75490effb23SGleb Smirnoff * local filesystems, where partially valid pages can only occur at 75590effb23SGleb Smirnoff * the end of file. 756d15b55c5SKonstantin Belousov */ 757d15b55c5SKonstantin Belousov int 758d15b55c5SKonstantin Belousov vnode_pager_local_getpages(struct vop_getpages_args *ap) 759d15b55c5SKonstantin Belousov { 76090effb23SGleb Smirnoff 761b0cd2017SGleb Smirnoff return (vnode_pager_generic_getpages(ap->a_vp, ap->a_m, ap->a_count, 762b0cd2017SGleb Smirnoff ap->a_rbehind, ap->a_rahead, NULL, NULL)); 76390effb23SGleb Smirnoff } 76490effb23SGleb Smirnoff 76590effb23SGleb Smirnoff int 76690effb23SGleb Smirnoff vnode_pager_local_getpages_async(struct vop_getpages_async_args *ap) 76790effb23SGleb Smirnoff { 76890effb23SGleb Smirnoff 769b0cd2017SGleb Smirnoff return (vnode_pager_generic_getpages(ap->a_vp, ap->a_m, ap->a_count, 770b0cd2017SGleb Smirnoff ap->a_rbehind, ap->a_rahead, ap->a_iodone, ap->a_arg)); 771d15b55c5SKonstantin Belousov } 772d15b55c5SKonstantin Belousov 773d15b55c5SKonstantin Belousov /* 774ce75f2c3SMike Smith * This is now called from local media FS's to operate against their 775ce75f2c3SMike Smith * own vnodes if they fail to implement VOP_GETPAGES. 776ce75f2c3SMike Smith */ 777ce75f2c3SMike Smith int 778b0cd2017SGleb Smirnoff vnode_pager_generic_getpages(struct vnode *vp, vm_page_t *m, int count, 779b0cd2017SGleb Smirnoff int *a_rbehind, int *a_rahead, vop_getpages_iodone_t iodone, void *arg) 780170db9c6SJohn Dyson { 781ce75f2c3SMike Smith vm_object_t object; 7829c83534dSPoul-Henning Kamp struct bufobj *bo; 7830bdb7528SDavid Greenman struct buf *bp; 784b0cd2017SGleb Smirnoff off_t foff; 785e48b82bdSGleb Smirnoff #ifdef INVARIANTS 786e48b82bdSGleb Smirnoff off_t blkno0; 787e48b82bdSGleb Smirnoff #endif 788756a5412SGleb Smirnoff int bsize, pagesperblock; 789b0cd2017SGleb Smirnoff int error, before, after, rbehind, rahead, poff, i; 790b0cd2017SGleb Smirnoff int bytecount, secmask; 791ce75f2c3SMike Smith 7929c83534dSPoul-Henning Kamp KASSERT(vp->v_type != VCHR && vp->v_type != VBLK, 793b0cd2017SGleb Smirnoff ("%s does not support devices", __func__)); 794b0cd2017SGleb Smirnoff 795b73f64c4SJeff Roberson if (vp->v_iflag & VI_DOOMED) 796eac91e32SKonstantin Belousov return (VM_PAGER_BAD); 7972c4488fcSJohn Dyson 798eac91e32SKonstantin Belousov object = vp->v_object; 799b0cd2017SGleb Smirnoff foff = IDX_TO_OFF(m[0]->pindex); 80026f9a767SRodney W. Grimes bsize = vp->v_mount->mnt_stat.f_iosize; 801b0cd2017SGleb Smirnoff pagesperblock = bsize / PAGE_SIZE; 802b0cd2017SGleb Smirnoff 803b0cd2017SGleb Smirnoff KASSERT(foff < object->un_pager.vnp.vnp_size, 804b0cd2017SGleb Smirnoff ("%s: page %p offset beyond vp %p size", __func__, m[0], vp)); 80540a51684SJason A. Harmening KASSERT(count <= nitems(bp->b_pages), 806b0cd2017SGleb Smirnoff ("%s: requested %d pages", __func__, count)); 807b0cd2017SGleb Smirnoff 808b0cd2017SGleb Smirnoff /* 809b0cd2017SGleb Smirnoff * The last page has valid blocks. Invalid part can only 810b0cd2017SGleb Smirnoff * exist at the end of file, and the page is made fully valid 811b0cd2017SGleb Smirnoff * by zeroing in vm_pager_get_pages(). 812b0cd2017SGleb Smirnoff */ 813b0cd2017SGleb Smirnoff if (m[count - 1]->valid != 0 && --count == 0) { 814b0cd2017SGleb Smirnoff if (iodone != NULL) 815b0cd2017SGleb Smirnoff iodone(arg, m, 1, 0); 816b0cd2017SGleb Smirnoff return (VM_PAGER_OK); 817b0cd2017SGleb Smirnoff } 818bbc0ec52SDavid Greenman 819756a5412SGleb Smirnoff bp = uma_zalloc(vnode_pbuf_zone, M_WAITOK); 82073e9030eSGleb Smirnoff 82126f9a767SRodney W. Grimes /* 822e122dfc1SGleb Smirnoff * Get the underlying device blocks for the file with VOP_BMAP(). 823e122dfc1SGleb Smirnoff * If the file system doesn't support VOP_BMAP, use old way of 824e122dfc1SGleb Smirnoff * getting pages via VOP_READ. 82526f9a767SRodney W. Grimes */ 826b0cd2017SGleb Smirnoff error = VOP_BMAP(vp, foff / bsize, &bo, &bp->b_blkno, &after, &before); 8271de11f1aSAlan Cox if (error == EOPNOTSUPP) { 828756a5412SGleb Smirnoff uma_zfree(vnode_pbuf_zone, bp); 82989f6b863SAttilio Rao VM_OBJECT_WLOCK(object); 830b0cd2017SGleb Smirnoff for (i = 0; i < count; i++) { 83183c9dea1SGleb Smirnoff VM_CNT_INC(v_vnodein); 83283c9dea1SGleb Smirnoff VM_CNT_INC(v_vnodepgsin); 833b0cd2017SGleb Smirnoff error = vnode_pager_input_old(object, m[i]); 834b0cd2017SGleb Smirnoff if (error) 835b0cd2017SGleb Smirnoff break; 836b0cd2017SGleb Smirnoff } 83789f6b863SAttilio Rao VM_OBJECT_WUNLOCK(object); 83852051abcSAlan Cox return (error); 8391de11f1aSAlan Cox } else if (error != 0) { 840756a5412SGleb Smirnoff uma_zfree(vnode_pbuf_zone, bp); 8411de11f1aSAlan Cox return (VM_PAGER_ERROR); 842b0cd2017SGleb Smirnoff } 843bbc0ec52SDavid Greenman 84426f9a767SRodney W. Grimes /* 845b0cd2017SGleb Smirnoff * If the file system supports BMAP, but blocksize is smaller 846b0cd2017SGleb Smirnoff * than a page size, then use special small filesystem code. 84726f9a767SRodney W. Grimes */ 848b0cd2017SGleb Smirnoff if (pagesperblock == 0) { 849756a5412SGleb Smirnoff uma_zfree(vnode_pbuf_zone, bp); 850b0cd2017SGleb Smirnoff for (i = 0; i < count; i++) { 85183c9dea1SGleb Smirnoff VM_CNT_INC(v_vnodein); 85283c9dea1SGleb Smirnoff VM_CNT_INC(v_vnodepgsin); 853b0cd2017SGleb Smirnoff error = vnode_pager_input_smlfs(object, m[i]); 854b0cd2017SGleb Smirnoff if (error) 855b0cd2017SGleb Smirnoff break; 856b0cd2017SGleb Smirnoff } 857b0cd2017SGleb Smirnoff return (error); 85826f9a767SRodney W. Grimes } 8598d17e694SJulian Elischer 86026f9a767SRodney W. Grimes /* 861b0cd2017SGleb Smirnoff * A sparse file can be encountered only for a single page request, 862763df3ecSPedro F. Giffuni * which may not be preceded by call to vm_pager_haspage(). 863a7fecb4dSAlan Cox */ 864b0cd2017SGleb Smirnoff if (bp->b_blkno == -1) { 865b0cd2017SGleb Smirnoff KASSERT(count == 1, 866b0cd2017SGleb Smirnoff ("%s: array[%d] request to a sparse file %p", __func__, 867b0cd2017SGleb Smirnoff count, vp)); 868756a5412SGleb Smirnoff uma_zfree(vnode_pbuf_zone, bp); 869b0cd2017SGleb Smirnoff pmap_zero_page(m[0]); 870b0cd2017SGleb Smirnoff KASSERT(m[0]->dirty == 0, ("%s: page %p is dirty", 871b0cd2017SGleb Smirnoff __func__, m[0])); 872a7fecb4dSAlan Cox VM_OBJECT_WLOCK(object); 873b0cd2017SGleb Smirnoff m[0]->valid = VM_PAGE_BITS_ALL; 87489f6b863SAttilio Rao VM_OBJECT_WUNLOCK(object); 875f4f83da0SAlan Cox return (VM_PAGER_OK); 876b0cd2017SGleb Smirnoff } 877b0cd2017SGleb Smirnoff 878e48b82bdSGleb Smirnoff #ifdef INVARIANTS 879e48b82bdSGleb Smirnoff blkno0 = bp->b_blkno; 880e48b82bdSGleb Smirnoff #endif 881b0cd2017SGleb Smirnoff bp->b_blkno += (foff % bsize) / DEV_BSIZE; 882b0cd2017SGleb Smirnoff 883b0cd2017SGleb Smirnoff /* Recalculate blocks available after/before to pages. */ 884b0cd2017SGleb Smirnoff poff = (foff % bsize) / PAGE_SIZE; 885b0cd2017SGleb Smirnoff before *= pagesperblock; 886b0cd2017SGleb Smirnoff before += poff; 887b0cd2017SGleb Smirnoff after *= pagesperblock; 888b0cd2017SGleb Smirnoff after += pagesperblock - (poff + 1); 889b0cd2017SGleb Smirnoff if (m[0]->pindex + after >= object->size) 890b0cd2017SGleb Smirnoff after = object->size - 1 - m[0]->pindex; 891b0cd2017SGleb Smirnoff KASSERT(count <= after + 1, ("%s: %d pages asked, can do only %d", 892b0cd2017SGleb Smirnoff __func__, count, after + 1)); 893b0cd2017SGleb Smirnoff after -= count - 1; 894b0cd2017SGleb Smirnoff 895b0cd2017SGleb Smirnoff /* Trim requested rbehind/rahead to possible values. */ 896b0cd2017SGleb Smirnoff rbehind = a_rbehind ? *a_rbehind : 0; 897b0cd2017SGleb Smirnoff rahead = a_rahead ? *a_rahead : 0; 898b0cd2017SGleb Smirnoff rbehind = min(rbehind, before); 899b0cd2017SGleb Smirnoff rbehind = min(rbehind, m[0]->pindex); 900b0cd2017SGleb Smirnoff rahead = min(rahead, after); 901b0cd2017SGleb Smirnoff rahead = min(rahead, object->size - m[count - 1]->pindex); 902e48b82bdSGleb Smirnoff /* 903e48b82bdSGleb Smirnoff * Check that total amount of pages fit into buf. Trim rbehind and 904e48b82bdSGleb Smirnoff * rahead evenly if not. 905e48b82bdSGleb Smirnoff */ 906e48b82bdSGleb Smirnoff if (rbehind + rahead + count > nitems(bp->b_pages)) { 907e48b82bdSGleb Smirnoff int trim, sum; 908e48b82bdSGleb Smirnoff 909e48b82bdSGleb Smirnoff trim = rbehind + rahead + count - nitems(bp->b_pages) + 1; 910e48b82bdSGleb Smirnoff sum = rbehind + rahead; 911e48b82bdSGleb Smirnoff if (rbehind == before) { 912e48b82bdSGleb Smirnoff /* Roundup rbehind trim to block size. */ 913e48b82bdSGleb Smirnoff rbehind -= roundup(trim * rbehind / sum, pagesperblock); 914e48b82bdSGleb Smirnoff if (rbehind < 0) 915e48b82bdSGleb Smirnoff rbehind = 0; 916e48b82bdSGleb Smirnoff } else 917e48b82bdSGleb Smirnoff rbehind -= trim * rbehind / sum; 918e48b82bdSGleb Smirnoff rahead -= trim * rahead / sum; 919e48b82bdSGleb Smirnoff } 920e48b82bdSGleb Smirnoff KASSERT(rbehind + rahead + count <= nitems(bp->b_pages), 921b0cd2017SGleb Smirnoff ("%s: behind %d ahead %d count %d", __func__, 922b0cd2017SGleb Smirnoff rbehind, rahead, count)); 923b0cd2017SGleb Smirnoff 924b0cd2017SGleb Smirnoff /* 925b0cd2017SGleb Smirnoff * Fill in the bp->b_pages[] array with requested and optional 926b0cd2017SGleb Smirnoff * read behind or read ahead pages. Read behind pages are looked 927b0cd2017SGleb Smirnoff * up in a backward direction, down to a first cached page. Same 928b0cd2017SGleb Smirnoff * for read ahead pages, but there is no need to shift the array 929b0cd2017SGleb Smirnoff * in case of encountering a cached page. 930b0cd2017SGleb Smirnoff */ 931b0cd2017SGleb Smirnoff i = bp->b_npages = 0; 932b0cd2017SGleb Smirnoff if (rbehind) { 933b0cd2017SGleb Smirnoff vm_pindex_t startpindex, tpindex; 934b0cd2017SGleb Smirnoff vm_page_t p; 935b0cd2017SGleb Smirnoff 936a7fecb4dSAlan Cox VM_OBJECT_WLOCK(object); 937b0cd2017SGleb Smirnoff startpindex = m[0]->pindex - rbehind; 938b0cd2017SGleb Smirnoff if ((p = TAILQ_PREV(m[0], pglist, listq)) != NULL && 939b0cd2017SGleb Smirnoff p->pindex >= startpindex) 940b0cd2017SGleb Smirnoff startpindex = p->pindex + 1; 941b0cd2017SGleb Smirnoff 942b0cd2017SGleb Smirnoff /* tpindex is unsigned; beware of numeric underflow. */ 943b0cd2017SGleb Smirnoff for (tpindex = m[0]->pindex - 1; 944b0cd2017SGleb Smirnoff tpindex >= startpindex && tpindex < m[0]->pindex; 945b0cd2017SGleb Smirnoff tpindex--, i++) { 9467667839aSAlan Cox p = vm_page_alloc(object, tpindex, VM_ALLOC_NORMAL); 947b0cd2017SGleb Smirnoff if (p == NULL) { 948b0cd2017SGleb Smirnoff /* Shift the array. */ 949b0cd2017SGleb Smirnoff for (int j = 0; j < i; j++) 950b0cd2017SGleb Smirnoff bp->b_pages[j] = bp->b_pages[j + 951b0cd2017SGleb Smirnoff tpindex + 1 - startpindex]; 952b0cd2017SGleb Smirnoff break; 953b0cd2017SGleb Smirnoff } 954b0cd2017SGleb Smirnoff bp->b_pages[tpindex - startpindex] = p; 955a7fecb4dSAlan Cox } 9560bdb7528SDavid Greenman 957b0cd2017SGleb Smirnoff bp->b_pgbefore = i; 958b0cd2017SGleb Smirnoff bp->b_npages += i; 959b0cd2017SGleb Smirnoff bp->b_blkno -= IDX_TO_OFF(i) / DEV_BSIZE; 960b0cd2017SGleb Smirnoff } else 961b0cd2017SGleb Smirnoff bp->b_pgbefore = 0; 962b0cd2017SGleb Smirnoff 963b0cd2017SGleb Smirnoff /* Requested pages. */ 964b0cd2017SGleb Smirnoff for (int j = 0; j < count; j++, i++) 965b0cd2017SGleb Smirnoff bp->b_pages[i] = m[j]; 966b0cd2017SGleb Smirnoff bp->b_npages += count; 967b0cd2017SGleb Smirnoff 968b0cd2017SGleb Smirnoff if (rahead) { 969b0cd2017SGleb Smirnoff vm_pindex_t endpindex, tpindex; 970b0cd2017SGleb Smirnoff vm_page_t p; 971b0cd2017SGleb Smirnoff 972b0cd2017SGleb Smirnoff if (!VM_OBJECT_WOWNED(object)) 973eac91e32SKonstantin Belousov VM_OBJECT_WLOCK(object); 974b0cd2017SGleb Smirnoff endpindex = m[count - 1]->pindex + rahead + 1; 975b0cd2017SGleb Smirnoff if ((p = TAILQ_NEXT(m[count - 1], listq)) != NULL && 976b0cd2017SGleb Smirnoff p->pindex < endpindex) 977b0cd2017SGleb Smirnoff endpindex = p->pindex; 978b0cd2017SGleb Smirnoff if (endpindex > object->size) 979b0cd2017SGleb Smirnoff endpindex = object->size; 980b0cd2017SGleb Smirnoff 981b0cd2017SGleb Smirnoff for (tpindex = m[count - 1]->pindex + 1; 982b0cd2017SGleb Smirnoff tpindex < endpindex; i++, tpindex++) { 9837667839aSAlan Cox p = vm_page_alloc(object, tpindex, VM_ALLOC_NORMAL); 984b0cd2017SGleb Smirnoff if (p == NULL) 985b0cd2017SGleb Smirnoff break; 986b0cd2017SGleb Smirnoff bp->b_pages[i] = p; 987eac91e32SKonstantin Belousov } 988b0cd2017SGleb Smirnoff 989b0cd2017SGleb Smirnoff bp->b_pgafter = i - bp->b_npages; 990b0cd2017SGleb Smirnoff bp->b_npages = i; 991b0cd2017SGleb Smirnoff } else 992b0cd2017SGleb Smirnoff bp->b_pgafter = 0; 993b0cd2017SGleb Smirnoff 994b0cd2017SGleb Smirnoff if (VM_OBJECT_WOWNED(object)) 995eac91e32SKonstantin Belousov VM_OBJECT_WUNLOCK(object); 996b0cd2017SGleb Smirnoff 997b0cd2017SGleb Smirnoff /* Report back actual behind/ahead read. */ 998b0cd2017SGleb Smirnoff if (a_rbehind) 999b0cd2017SGleb Smirnoff *a_rbehind = bp->b_pgbefore; 1000b0cd2017SGleb Smirnoff if (a_rahead) 1001b0cd2017SGleb Smirnoff *a_rahead = bp->b_pgafter; 1002b0cd2017SGleb Smirnoff 1003e48b82bdSGleb Smirnoff #ifdef INVARIANTS 1004dcc0ff5aSGleb Smirnoff KASSERT(bp->b_npages <= nitems(bp->b_pages), 1005b0cd2017SGleb Smirnoff ("%s: buf %p overflowed", __func__, bp)); 10064f56243aSGleb Smirnoff for (int j = 1, prev = 0; j < bp->b_npages; j++) { 10071e0c121fSGleb Smirnoff if (bp->b_pages[j] == bogus_page) 10081e0c121fSGleb Smirnoff continue; 10091e0c121fSGleb Smirnoff KASSERT(bp->b_pages[j]->pindex - bp->b_pages[prev]->pindex == 10101e0c121fSGleb Smirnoff j - prev, ("%s: pages array not consecutive, bp %p", 10111e0c121fSGleb Smirnoff __func__, bp)); 10121e0c121fSGleb Smirnoff prev = j; 10131e0c121fSGleb Smirnoff } 1014e48b82bdSGleb Smirnoff #endif 1015eac91e32SKonstantin Belousov 10160d94caffSDavid Greenman /* 1017b0cd2017SGleb Smirnoff * Recalculate first offset and bytecount with regards to read behind. 1018b0cd2017SGleb Smirnoff * Truncate bytecount to vnode real size and round up physical size 1019b0cd2017SGleb Smirnoff * for real devices. 102026f9a767SRodney W. Grimes */ 1021b0cd2017SGleb Smirnoff foff = IDX_TO_OFF(bp->b_pages[0]->pindex); 1022b0cd2017SGleb Smirnoff bytecount = bp->b_npages << PAGE_SHIFT; 1023b0cd2017SGleb Smirnoff if ((foff + bytecount) > object->un_pager.vnp.vnp_size) 1024b0cd2017SGleb Smirnoff bytecount = object->un_pager.vnp.vnp_size - foff; 1025eac91e32SKonstantin Belousov secmask = bo->bo_bsize - 1; 10266229cc50SPoul-Henning Kamp KASSERT(secmask < PAGE_SIZE && secmask > 0, 1027b0cd2017SGleb Smirnoff ("%s: sector size %d too large", __func__, secmask + 1)); 1028b0cd2017SGleb Smirnoff bytecount = (bytecount + secmask) & ~secmask; 102926f9a767SRodney W. Grimes 103026f9a767SRodney W. Grimes /* 1031b0cd2017SGleb Smirnoff * And map the pages to be read into the kva, if the filesystem 10326ce697dcSKonstantin Belousov * requires mapped buffers. 103326f9a767SRodney W. Grimes */ 10342a5eef69SGleb Smirnoff if ((vp->v_mount->mnt_kern_flag & MNTK_UNMAPPED_BUFS) != 0 && 10356ce697dcSKonstantin Belousov unmapped_buf_allowed) { 10366ce697dcSKonstantin Belousov bp->b_data = unmapped_buf; 10376ce697dcSKonstantin Belousov bp->b_offset = 0; 1038fade8dd7SJeff Roberson } else { 1039fade8dd7SJeff Roberson bp->b_data = bp->b_kvabase; 1040b0cd2017SGleb Smirnoff pmap_qenter((vm_offset_t)bp->b_data, bp->b_pages, bp->b_npages); 1041fade8dd7SJeff Roberson } 104226f9a767SRodney W. Grimes 1043b0cd2017SGleb Smirnoff /* Build a minimal buffer header. */ 104421144e3bSPoul-Henning Kamp bp->b_iocmd = BIO_READ; 1045bd78ceceSJohn Baldwin KASSERT(bp->b_rcred == NOCRED, ("leaking read ucred")); 1046bd78ceceSJohn Baldwin KASSERT(bp->b_wcred == NOCRED, ("leaking write ucred")); 1047a854ed98SJohn Baldwin bp->b_rcred = crhold(curthread->td_ucred); 1048a854ed98SJohn Baldwin bp->b_wcred = crhold(curthread->td_ucred); 10499c83534dSPoul-Henning Kamp pbgetbo(bo, bp); 10501faacf5dSKirk McKusick bp->b_vp = vp; 1051b0cd2017SGleb Smirnoff bp->b_bcount = bp->b_bufsize = bp->b_runningbufspace = bytecount; 10522c18019fSPoul-Henning Kamp bp->b_iooffset = dbtob(bp->b_blkno); 1053e48b82bdSGleb Smirnoff KASSERT(IDX_TO_OFF(m[0]->pindex - bp->b_pages[0]->pindex) == 1054e48b82bdSGleb Smirnoff (blkno0 - bp->b_blkno) * DEV_BSIZE + 1055e48b82bdSGleb Smirnoff IDX_TO_OFF(m[0]->pindex) % bsize, 1056e48b82bdSGleb Smirnoff ("wrong offsets bsize %d m[0] %ju b_pages[0] %ju " 1057e48b82bdSGleb Smirnoff "blkno0 %ju b_blkno %ju", bsize, 1058e48b82bdSGleb Smirnoff (uintmax_t)m[0]->pindex, (uintmax_t)bp->b_pages[0]->pindex, 1059e48b82bdSGleb Smirnoff (uintmax_t)blkno0, (uintmax_t)bp->b_blkno)); 106090effb23SGleb Smirnoff 1061b0cd2017SGleb Smirnoff atomic_add_long(&runningbufspace, bp->b_runningbufspace); 106283c9dea1SGleb Smirnoff VM_CNT_INC(v_vnodein); 106383c9dea1SGleb Smirnoff VM_CNT_ADD(v_vnodepgsin, bp->b_npages); 1064b0cd2017SGleb Smirnoff 106590effb23SGleb Smirnoff if (iodone != NULL) { /* async */ 1066b0cd2017SGleb Smirnoff bp->b_pgiodone = iodone; 106790effb23SGleb Smirnoff bp->b_caller1 = arg; 106890effb23SGleb Smirnoff bp->b_iodone = vnode_pager_generic_getpages_done_async; 106990effb23SGleb Smirnoff bp->b_flags |= B_ASYNC; 107090effb23SGleb Smirnoff BUF_KERNPROC(bp); 1071b792bebeSPoul-Henning Kamp bstrategy(bp); 1072b0cd2017SGleb Smirnoff return (VM_PAGER_OK); 107390effb23SGleb Smirnoff } else { 107490effb23SGleb Smirnoff bp->b_iodone = bdone; 107590effb23SGleb Smirnoff bstrategy(bp); 10766a4b5823SPoul-Henning Kamp bwait(bp, PVM, "vnread"); 107790effb23SGleb Smirnoff error = vnode_pager_generic_getpages_done(bp); 10781bb5ad63SGleb Smirnoff for (i = 0; i < bp->b_npages; i++) 10796ce697dcSKonstantin Belousov bp->b_pages[i] = NULL; 10801faacf5dSKirk McKusick bp->b_vp = NULL; 10819c83534dSPoul-Henning Kamp pbrelbo(bp); 1082756a5412SGleb Smirnoff uma_zfree(vnode_pbuf_zone, bp); 108390effb23SGleb Smirnoff return (error != 0 ? VM_PAGER_ERROR : VM_PAGER_OK); 108490effb23SGleb Smirnoff } 1085b0cd2017SGleb Smirnoff } 108690effb23SGleb Smirnoff 108790effb23SGleb Smirnoff static void 108890effb23SGleb Smirnoff vnode_pager_generic_getpages_done_async(struct buf *bp) 108990effb23SGleb Smirnoff { 109090effb23SGleb Smirnoff int error; 109190effb23SGleb Smirnoff 109290effb23SGleb Smirnoff error = vnode_pager_generic_getpages_done(bp); 1093b0cd2017SGleb Smirnoff /* Run the iodone upon the requested range. */ 1094b0cd2017SGleb Smirnoff bp->b_pgiodone(bp->b_caller1, bp->b_pages + bp->b_pgbefore, 1095b0cd2017SGleb Smirnoff bp->b_npages - bp->b_pgbefore - bp->b_pgafter, error); 109690effb23SGleb Smirnoff for (int i = 0; i < bp->b_npages; i++) 109790effb23SGleb Smirnoff bp->b_pages[i] = NULL; 109890effb23SGleb Smirnoff bp->b_vp = NULL; 109990effb23SGleb Smirnoff pbrelbo(bp); 1100756a5412SGleb Smirnoff uma_zfree(vnode_pbuf_zone, bp); 110190effb23SGleb Smirnoff } 110290effb23SGleb Smirnoff 110390effb23SGleb Smirnoff static int 110490effb23SGleb Smirnoff vnode_pager_generic_getpages_done(struct buf *bp) 110590effb23SGleb Smirnoff { 110690effb23SGleb Smirnoff vm_object_t object; 110790effb23SGleb Smirnoff off_t tfoff, nextoff; 110890effb23SGleb Smirnoff int i, error; 110990effb23SGleb Smirnoff 111090effb23SGleb Smirnoff error = (bp->b_ioflags & BIO_ERROR) != 0 ? EIO : 0; 111190effb23SGleb Smirnoff object = bp->b_vp->v_object; 111290effb23SGleb Smirnoff 111390effb23SGleb Smirnoff if (error == 0 && bp->b_bcount != bp->b_npages * PAGE_SIZE) { 1114fade8dd7SJeff Roberson if (!buf_mapped(bp)) { 1115fade8dd7SJeff Roberson bp->b_data = bp->b_kvabase; 1116fade8dd7SJeff Roberson pmap_qenter((vm_offset_t)bp->b_data, bp->b_pages, 111790effb23SGleb Smirnoff bp->b_npages); 111890effb23SGleb Smirnoff } 1119fade8dd7SJeff Roberson bzero(bp->b_data + bp->b_bcount, 112090effb23SGleb Smirnoff PAGE_SIZE * bp->b_npages - bp->b_bcount); 112190effb23SGleb Smirnoff } 1122fade8dd7SJeff Roberson if (buf_mapped(bp)) { 1123fade8dd7SJeff Roberson pmap_qremove((vm_offset_t)bp->b_data, bp->b_npages); 1124fade8dd7SJeff Roberson bp->b_data = unmapped_buf; 112590effb23SGleb Smirnoff } 112626f9a767SRodney W. Grimes 112789f6b863SAttilio Rao VM_OBJECT_WLOCK(object); 112890effb23SGleb Smirnoff for (i = 0, tfoff = IDX_TO_OFF(bp->b_pages[0]->pindex); 112990effb23SGleb Smirnoff i < bp->b_npages; i++, tfoff = nextoff) { 11308f9110f6SJohn Dyson vm_page_t mt; 11318f9110f6SJohn Dyson 11328f9110f6SJohn Dyson nextoff = tfoff + PAGE_SIZE; 113390effb23SGleb Smirnoff mt = bp->b_pages[i]; 11348f9110f6SJohn Dyson 113554746b67SDmitrij Tejblum if (nextoff <= object->un_pager.vnp.vnp_size) { 11368d17e694SJulian Elischer /* 11378d17e694SJulian Elischer * Read filled up entire page. 11388d17e694SJulian Elischer */ 11398f9110f6SJohn Dyson mt->valid = VM_PAGE_BITS_ALL; 1140016a3c93SAlan Cox KASSERT(mt->dirty == 0, 114179f0deb9SGleb Smirnoff ("%s: page %p is dirty", __func__, mt)); 1142016a3c93SAlan Cox KASSERT(!pmap_page_is_mapped(mt), 114379f0deb9SGleb Smirnoff ("%s: page %p is mapped", __func__, mt)); 11448f9110f6SJohn Dyson } else { 11458d17e694SJulian Elischer /* 114642eb4108SAlan Cox * Read did not fill up entire page. 11478d17e694SJulian Elischer * 11488d17e694SJulian Elischer * Currently we do not set the entire page valid, 11498d17e694SJulian Elischer * we just try to clear the piece that we couldn't 11508d17e694SJulian Elischer * read. 11518d17e694SJulian Elischer */ 1152dc874f98SKonstantin Belousov vm_page_set_valid_range(mt, 0, 115354746b67SDmitrij Tejblum object->un_pager.vnp.vnp_size - tfoff); 115442eb4108SAlan Cox KASSERT((mt->dirty & vm_page_bits(0, 115542eb4108SAlan Cox object->un_pager.vnp.vnp_size - tfoff)) == 0, 115679f0deb9SGleb Smirnoff ("%s: page %p is dirty", __func__, mt)); 11578f9110f6SJohn Dyson } 11588f9110f6SJohn Dyson 1159b0cd2017SGleb Smirnoff if (i < bp->b_pgbefore || i >= bp->b_npages - bp->b_pgafter) 1160b6c00483SKonstantin Belousov vm_page_readahead_finish(mt); 116103679e23SAlan Cox } 116289f6b863SAttilio Rao VM_OBJECT_WUNLOCK(object); 116390effb23SGleb Smirnoff if (error != 0) 116490effb23SGleb Smirnoff printf("%s: I/O read error %d\n", __func__, error); 116590effb23SGleb Smirnoff 116690effb23SGleb Smirnoff return (error); 116726f9a767SRodney W. Grimes } 116826f9a767SRodney W. Grimes 1169ce75f2c3SMike Smith /* 1170ce75f2c3SMike Smith * EOPNOTSUPP is no longer legal. For local media VFS's that do not 1171ce75f2c3SMike Smith * implement their own VOP_PUTPAGES, their VOP_PUTPAGES should call to 1172ce75f2c3SMike Smith * vnode_pager_generic_putpages() to implement the previous behaviour. 1173ce75f2c3SMike Smith * 1174ce75f2c3SMike Smith * All other FS's should use the bypass to get to the local media 1175ce75f2c3SMike Smith * backing vp's VOP_PUTPAGES. 1176ce75f2c3SMike Smith */ 1177e4542174SMatthew Dillon static void 11787ebba1f8SGleb Smirnoff vnode_pager_putpages(vm_object_t object, vm_page_t *m, int count, 117933cad9e9SKonstantin Belousov int flags, int *rtvals) 1180170db9c6SJohn Dyson { 1181170db9c6SJohn Dyson int rtval; 1182170db9c6SJohn Dyson struct vnode *vp; 118386ffbd76SMike Smith int bytes = count * PAGE_SIZE; 1184ad980522SJohn Dyson 11850e3cdf2cSAlan Cox /* 11860e3cdf2cSAlan Cox * Force synchronous operation if we are extremely low on memory 11870e3cdf2cSAlan Cox * to prevent a low-memory deadlock. VOP operations often need to 11880e3cdf2cSAlan Cox * allocate more memory to initiate the I/O ( i.e. do a BMAP 11890e3cdf2cSAlan Cox * operation ). The swapper handles the case by limiting the amount 11900e3cdf2cSAlan Cox * of asynchronous I/O, but that sort of solution doesn't scale well 11910e3cdf2cSAlan Cox * for the vnode pager without a lot of work. 11920e3cdf2cSAlan Cox * 11930e3cdf2cSAlan Cox * Also, the backing vnode's iodone routine may not wake the pageout 11940e3cdf2cSAlan Cox * daemon up. This should be probably be addressed XXX. 11950e3cdf2cSAlan Cox */ 11960e3cdf2cSAlan Cox 1197e2068d0bSJeff Roberson if (vm_page_count_min()) 119833cad9e9SKonstantin Belousov flags |= VM_PAGER_PUT_SYNC; 11990e3cdf2cSAlan Cox 12000e3cdf2cSAlan Cox /* 12010e3cdf2cSAlan Cox * Call device-specific putpages function 12020e3cdf2cSAlan Cox */ 1203170db9c6SJohn Dyson vp = object->handle; 120489f6b863SAttilio Rao VM_OBJECT_WUNLOCK(object); 120533cad9e9SKonstantin Belousov rtval = VOP_PUTPAGES(vp, m, bytes, flags, rtvals); 120623955314SAlfred Perlstein KASSERT(rtval != EOPNOTSUPP, 120723955314SAlfred Perlstein ("vnode_pager: stale FS putpages\n")); 120889f6b863SAttilio Rao VM_OBJECT_WLOCK(object); 1209170db9c6SJohn Dyson } 1210170db9c6SJohn Dyson 121105877a85SKonstantin Belousov static int 121205877a85SKonstantin Belousov vn_off2bidx(vm_ooffset_t offset) 121305877a85SKonstantin Belousov { 121405877a85SKonstantin Belousov 121505877a85SKonstantin Belousov return ((offset & PAGE_MASK) / DEV_BSIZE); 121605877a85SKonstantin Belousov } 121705877a85SKonstantin Belousov 121805877a85SKonstantin Belousov static bool 121905877a85SKonstantin Belousov vn_dirty_blk(vm_page_t m, vm_ooffset_t offset) 122005877a85SKonstantin Belousov { 122105877a85SKonstantin Belousov 122205877a85SKonstantin Belousov KASSERT(IDX_TO_OFF(m->pindex) <= offset && 122305877a85SKonstantin Belousov offset < IDX_TO_OFF(m->pindex + 1), 122405877a85SKonstantin Belousov ("page %p pidx %ju offset %ju", m, (uintmax_t)m->pindex, 122505877a85SKonstantin Belousov (uintmax_t)offset)); 122605877a85SKonstantin Belousov return ((m->dirty & ((vm_page_bits_t)1 << vn_off2bidx(offset))) != 0); 122705877a85SKonstantin Belousov } 1228ce75f2c3SMike Smith 122926f9a767SRodney W. Grimes /* 1230ce75f2c3SMike Smith * This is now called from local media FS's to operate against their 12314491ea91SEivind Eklund * own vnodes if they fail to implement VOP_PUTPAGES. 12322b6b0df7SMatthew Dillon * 12332b6b0df7SMatthew Dillon * This is typically called indirectly via the pageout daemon and 1234763df3ecSPedro F. Giffuni * clustering has already typically occurred, so in general we ask the 12352b6b0df7SMatthew Dillon * underlying filesystem to write the data out asynchronously rather 12362b6b0df7SMatthew Dillon * then delayed. 123726f9a767SRodney W. Grimes */ 1238ce75f2c3SMike Smith int 1239c46b90e9SAlan Cox vnode_pager_generic_putpages(struct vnode *vp, vm_page_t *ma, int bytecount, 1240c46b90e9SAlan Cox int flags, int *rtvals) 124126f9a767SRodney W. Grimes { 1242ce75f2c3SMike Smith vm_object_t object; 1243c46b90e9SAlan Cox vm_page_t m; 124405877a85SKonstantin Belousov vm_ooffset_t maxblksz, next_offset, poffset, prev_offset; 1245f6b04d2bSDavid Greenman struct uio auio; 1246f6b04d2bSDavid Greenman struct iovec aiov; 124705877a85SKonstantin Belousov off_t prev_resid, wrsz; 1248e6c44f65SKonstantin Belousov int count, error, i, maxsize, ncount, pgoff, ppscheck; 124905877a85SKonstantin Belousov bool in_hole; 1250dd498befSPaul Saab static struct timeval lastfail; 1251dd498befSPaul Saab static int curfail; 125226f9a767SRodney W. Grimes 1253ce75f2c3SMike Smith object = vp->v_object; 1254ce75f2c3SMike Smith count = bytecount / PAGE_SIZE; 1255ce75f2c3SMike Smith 125626f9a767SRodney W. Grimes for (i = 0; i < count; i++) 1257031ec8c1SKonstantin Belousov rtvals[i] = VM_PAGER_ERROR; 125826f9a767SRodney W. Grimes 1259c46b90e9SAlan Cox if ((int64_t)ma[0]->pindex < 0) { 1260e6c44f65SKonstantin Belousov printf("vnode_pager_generic_putpages: " 1261e6c44f65SKonstantin Belousov "attempt to write meta-data 0x%jx(%lx)\n", 1262e6c44f65SKonstantin Belousov (uintmax_t)ma[0]->pindex, (u_long)ma[0]->dirty); 1263f6b04d2bSDavid Greenman rtvals[0] = VM_PAGER_BAD; 1264e6c44f65SKonstantin Belousov return (VM_PAGER_BAD); 12650d94caffSDavid Greenman } 12660bdb7528SDavid Greenman 1267f6b04d2bSDavid Greenman maxsize = count * PAGE_SIZE; 1268f6b04d2bSDavid Greenman ncount = count; 126926f9a767SRodney W. Grimes 1270c46b90e9SAlan Cox poffset = IDX_TO_OFF(ma[0]->pindex); 127100a6f47fSMatthew Dillon 127200a6f47fSMatthew Dillon /* 127300a6f47fSMatthew Dillon * If the page-aligned write is larger then the actual file we 1274763df3ecSPedro F. Giffuni * have to invalidate pages occurring beyond the file EOF. However, 127500a6f47fSMatthew Dillon * there is an edge case where a file may not be page-aligned where 127600a6f47fSMatthew Dillon * the last page is partially invalid. In this case the filesystem 127700a6f47fSMatthew Dillon * may not properly clear the dirty bits for the entire page (which 127800a6f47fSMatthew Dillon * could be VM_PAGE_BITS_ALL due to the page having been mmap()d). 127900a6f47fSMatthew Dillon * With the page locked we are free to fix-up the dirty bits here. 12803ebeaf59SMatthew Dillon * 12813ebeaf59SMatthew Dillon * We do not under any circumstances truncate the valid bits, as 12823ebeaf59SMatthew Dillon * this will screw up bogus page replacement. 128300a6f47fSMatthew Dillon */ 1284b3d4ab66SKonstantin Belousov VM_OBJECT_RLOCK(object); 1285a316d390SJohn Dyson if (maxsize + poffset > object->un_pager.vnp.vnp_size) { 1286b3d4ab66SKonstantin Belousov if (!VM_OBJECT_TRYUPGRADE(object)) { 1287b3d4ab66SKonstantin Belousov VM_OBJECT_RUNLOCK(object); 1288b3d4ab66SKonstantin Belousov VM_OBJECT_WLOCK(object); 1289b3d4ab66SKonstantin Belousov if (maxsize + poffset <= object->un_pager.vnp.vnp_size) 1290b3d4ab66SKonstantin Belousov goto downgrade; 1291b3d4ab66SKonstantin Belousov } 129200a6f47fSMatthew Dillon if (object->un_pager.vnp.vnp_size > poffset) { 1293a316d390SJohn Dyson maxsize = object->un_pager.vnp.vnp_size - poffset; 1294aa8de40aSPoul-Henning Kamp ncount = btoc(maxsize); 129500a6f47fSMatthew Dillon if ((pgoff = (int)maxsize & PAGE_MASK) != 0) { 1296938cdc42SKonstantin Belousov pgoff = roundup2(pgoff, DEV_BSIZE); 1297938cdc42SKonstantin Belousov 1298c46b90e9SAlan Cox /* 1299c46b90e9SAlan Cox * If the object is locked and the following 1300c46b90e9SAlan Cox * conditions hold, then the page's dirty 1301c46b90e9SAlan Cox * field cannot be concurrently changed by a 1302c46b90e9SAlan Cox * pmap operation. 1303c46b90e9SAlan Cox */ 1304c46b90e9SAlan Cox m = ma[ncount - 1]; 1305c7aebda8SAttilio Rao vm_page_assert_sbusied(m); 13066031c68dSAlan Cox KASSERT(!pmap_page_is_write_mapped(m), 1307c46b90e9SAlan Cox ("vnode_pager_generic_putpages: page %p is not read-only", m)); 1308e6c44f65SKonstantin Belousov MPASS(m->dirty != 0); 1309c46b90e9SAlan Cox vm_page_clear_dirty(m, pgoff, PAGE_SIZE - 1310c46b90e9SAlan Cox pgoff); 131100a6f47fSMatthew Dillon } 131200a6f47fSMatthew Dillon } else { 131300a6f47fSMatthew Dillon maxsize = 0; 131400a6f47fSMatthew Dillon ncount = 0; 131500a6f47fSMatthew Dillon } 1316e6c44f65SKonstantin Belousov for (i = ncount; i < count; i++) 1317f6b04d2bSDavid Greenman rtvals[i] = VM_PAGER_BAD; 1318b3d4ab66SKonstantin Belousov downgrade: 1319b3d4ab66SKonstantin Belousov VM_OBJECT_LOCK_DOWNGRADE(object); 1320f6b04d2bSDavid Greenman } 132126f9a767SRodney W. Grimes 1322f6b04d2bSDavid Greenman auio.uio_iov = &aiov; 1323f6b04d2bSDavid Greenman auio.uio_segflg = UIO_NOCOPY; 1324f6b04d2bSDavid Greenman auio.uio_rw = UIO_WRITE; 1325e6c44f65SKonstantin Belousov auio.uio_td = NULL; 132605877a85SKonstantin Belousov maxblksz = roundup2(poffset + maxsize, DEV_BSIZE); 132705877a85SKonstantin Belousov 132805877a85SKonstantin Belousov for (prev_offset = poffset; prev_offset < maxblksz;) { 132905877a85SKonstantin Belousov /* Skip clean blocks. */ 133005877a85SKonstantin Belousov for (in_hole = true; in_hole && prev_offset < maxblksz;) { 133105877a85SKonstantin Belousov m = ma[OFF_TO_IDX(prev_offset - poffset)]; 133205877a85SKonstantin Belousov for (i = vn_off2bidx(prev_offset); 133305877a85SKonstantin Belousov i < sizeof(vm_page_bits_t) * NBBY && 133405877a85SKonstantin Belousov prev_offset < maxblksz; i++) { 133505877a85SKonstantin Belousov if (vn_dirty_blk(m, prev_offset)) { 133605877a85SKonstantin Belousov in_hole = false; 133705877a85SKonstantin Belousov break; 133805877a85SKonstantin Belousov } 133905877a85SKonstantin Belousov prev_offset += DEV_BSIZE; 134005877a85SKonstantin Belousov } 134105877a85SKonstantin Belousov } 134205877a85SKonstantin Belousov if (in_hole) 134305877a85SKonstantin Belousov goto write_done; 134405877a85SKonstantin Belousov 134505877a85SKonstantin Belousov /* Find longest run of dirty blocks. */ 134605877a85SKonstantin Belousov for (next_offset = prev_offset; next_offset < maxblksz;) { 134705877a85SKonstantin Belousov m = ma[OFF_TO_IDX(next_offset - poffset)]; 134805877a85SKonstantin Belousov for (i = vn_off2bidx(next_offset); 134905877a85SKonstantin Belousov i < sizeof(vm_page_bits_t) * NBBY && 135005877a85SKonstantin Belousov next_offset < maxblksz; i++) { 135105877a85SKonstantin Belousov if (!vn_dirty_blk(m, next_offset)) 135205877a85SKonstantin Belousov goto start_write; 135305877a85SKonstantin Belousov next_offset += DEV_BSIZE; 135405877a85SKonstantin Belousov } 135505877a85SKonstantin Belousov } 135605877a85SKonstantin Belousov start_write: 135705877a85SKonstantin Belousov if (next_offset > poffset + maxsize) 135805877a85SKonstantin Belousov next_offset = poffset + maxsize; 135905877a85SKonstantin Belousov 136005877a85SKonstantin Belousov /* 136105877a85SKonstantin Belousov * Getting here requires finding a dirty block in the 136205877a85SKonstantin Belousov * 'skip clean blocks' loop. 136305877a85SKonstantin Belousov */ 136405877a85SKonstantin Belousov MPASS(prev_offset < next_offset); 136505877a85SKonstantin Belousov 1366b3d4ab66SKonstantin Belousov VM_OBJECT_RUNLOCK(object); 136705877a85SKonstantin Belousov aiov.iov_base = NULL; 136805877a85SKonstantin Belousov auio.uio_iovcnt = 1; 136905877a85SKonstantin Belousov auio.uio_offset = prev_offset; 137005877a85SKonstantin Belousov prev_resid = auio.uio_resid = aiov.iov_len = next_offset - 137105877a85SKonstantin Belousov prev_offset; 137205877a85SKonstantin Belousov error = VOP_WRITE(vp, &auio, 137305877a85SKonstantin Belousov vnode_pager_putpages_ioflags(flags), curthread->td_ucred); 137405877a85SKonstantin Belousov 137505877a85SKonstantin Belousov wrsz = prev_resid - auio.uio_resid; 137605877a85SKonstantin Belousov if (wrsz == 0) { 137705877a85SKonstantin Belousov if (ppsratecheck(&lastfail, &curfail, 1) != 0) { 137805877a85SKonstantin Belousov vn_printf(vp, "vnode_pager_putpages: " 137905877a85SKonstantin Belousov "zero-length write at %ju resid %zd\n", 138005877a85SKonstantin Belousov auio.uio_offset, auio.uio_resid); 138105877a85SKonstantin Belousov } 1382b3d4ab66SKonstantin Belousov VM_OBJECT_RLOCK(object); 138305877a85SKonstantin Belousov break; 138405877a85SKonstantin Belousov } 138505877a85SKonstantin Belousov 138605877a85SKonstantin Belousov /* Adjust the starting offset for next iteration. */ 138705877a85SKonstantin Belousov prev_offset += wrsz; 138805877a85SKonstantin Belousov MPASS(auio.uio_offset == prev_offset); 1389f6b04d2bSDavid Greenman 13903dbb0ca6SKonstantin Belousov ppscheck = 0; 139105877a85SKonstantin Belousov if (error != 0 && (ppscheck = ppsratecheck(&lastfail, 139205877a85SKonstantin Belousov &curfail, 1)) != 0) 139305877a85SKonstantin Belousov vn_printf(vp, "vnode_pager_putpages: I/O error %d\n", 139405877a85SKonstantin Belousov error); 1395e6c44f65SKonstantin Belousov if (auio.uio_resid != 0 && (ppscheck != 0 || 1396e6c44f65SKonstantin Belousov ppsratecheck(&lastfail, &curfail, 1) != 0)) 139705877a85SKonstantin Belousov vn_printf(vp, "vnode_pager_putpages: residual I/O %zd " 139805877a85SKonstantin Belousov "at %ju\n", auio.uio_resid, 139905877a85SKonstantin Belousov (uintmax_t)ma[0]->pindex); 1400b3d4ab66SKonstantin Belousov VM_OBJECT_RLOCK(object); 140105877a85SKonstantin Belousov if (error != 0 || auio.uio_resid != 0) 140205877a85SKonstantin Belousov break; 140305877a85SKonstantin Belousov } 140405877a85SKonstantin Belousov write_done: 140505877a85SKonstantin Belousov /* Mark completely processed pages. */ 140605877a85SKonstantin Belousov for (i = 0; i < OFF_TO_IDX(prev_offset - poffset); i++) 140726f9a767SRodney W. Grimes rtvals[i] = VM_PAGER_OK; 140805877a85SKonstantin Belousov /* Mark partial EOF page. */ 140905877a85SKonstantin Belousov if (prev_offset == poffset + maxsize && (prev_offset & PAGE_MASK) != 0) 141005877a85SKonstantin Belousov rtvals[i++] = VM_PAGER_OK; 141105877a85SKonstantin Belousov /* Unwritten pages in range, free bonus if the page is clean. */ 141205877a85SKonstantin Belousov for (; i < ncount; i++) 141305877a85SKonstantin Belousov rtvals[i] = ma[i]->dirty == 0 ? VM_PAGER_OK : VM_PAGER_ERROR; 1414b3d4ab66SKonstantin Belousov VM_OBJECT_RUNLOCK(object); 141505877a85SKonstantin Belousov VM_CNT_ADD(v_vnodepgsout, i); 141605877a85SKonstantin Belousov VM_CNT_INC(v_vnodeout); 1417e6c44f65SKonstantin Belousov return (rtvals[0]); 141826f9a767SRodney W. Grimes } 1419031ec8c1SKonstantin Belousov 142065b9599aSKonstantin Belousov int 142165b9599aSKonstantin Belousov vnode_pager_putpages_ioflags(int pager_flags) 142265b9599aSKonstantin Belousov { 142365b9599aSKonstantin Belousov int ioflags; 142465b9599aSKonstantin Belousov 142565b9599aSKonstantin Belousov /* 142665b9599aSKonstantin Belousov * Pageouts are already clustered, use IO_ASYNC to force a 142765b9599aSKonstantin Belousov * bawrite() rather then a bdwrite() to prevent paging I/O 142865b9599aSKonstantin Belousov * from saturating the buffer cache. Dummy-up the sequential 142965b9599aSKonstantin Belousov * heuristic to cause large ranges to cluster. If neither 143065b9599aSKonstantin Belousov * IO_SYNC or IO_ASYNC is set, the system decides how to 143165b9599aSKonstantin Belousov * cluster. 143265b9599aSKonstantin Belousov */ 143365b9599aSKonstantin Belousov ioflags = IO_VMIO; 143465b9599aSKonstantin Belousov if ((pager_flags & (VM_PAGER_PUT_SYNC | VM_PAGER_PUT_INVAL)) != 0) 143565b9599aSKonstantin Belousov ioflags |= IO_SYNC; 143665b9599aSKonstantin Belousov else if ((pager_flags & VM_PAGER_CLUSTER_OK) == 0) 143765b9599aSKonstantin Belousov ioflags |= IO_ASYNC; 143865b9599aSKonstantin Belousov ioflags |= (pager_flags & VM_PAGER_PUT_INVAL) != 0 ? IO_INVAL: 0; 143965b9599aSKonstantin Belousov ioflags |= (pager_flags & VM_PAGER_PUT_NOREUSE) != 0 ? IO_NOREUSE : 0; 144065b9599aSKonstantin Belousov ioflags |= IO_SEQMAX << IO_SEQSHIFT; 144165b9599aSKonstantin Belousov return (ioflags); 144265b9599aSKonstantin Belousov } 144365b9599aSKonstantin Belousov 1444555b7bb4SKonstantin Belousov /* 1445555b7bb4SKonstantin Belousov * vnode_pager_undirty_pages(). 1446555b7bb4SKonstantin Belousov * 1447555b7bb4SKonstantin Belousov * A helper to mark pages as clean after pageout that was possibly 1448555b7bb4SKonstantin Belousov * done with a short write. The lpos argument specifies the page run 1449555b7bb4SKonstantin Belousov * length in bytes, and the written argument specifies how many bytes 1450555b7bb4SKonstantin Belousov * were actually written. eof is the offset past the last valid byte 1451555b7bb4SKonstantin Belousov * in the vnode using the absolute file position of the first byte in 1452555b7bb4SKonstantin Belousov * the run as the base from which it is computed. 1453555b7bb4SKonstantin Belousov */ 1454031ec8c1SKonstantin Belousov void 1455555b7bb4SKonstantin Belousov vnode_pager_undirty_pages(vm_page_t *ma, int *rtvals, int written, off_t eof, 1456555b7bb4SKonstantin Belousov int lpos) 1457031ec8c1SKonstantin Belousov { 14589d17da3bSKonstantin Belousov vm_object_t obj; 1459555b7bb4SKonstantin Belousov int i, pos, pos_devb; 1460031ec8c1SKonstantin Belousov 1461555b7bb4SKonstantin Belousov if (written == 0 && eof >= lpos) 14629d17da3bSKonstantin Belousov return; 14639d17da3bSKonstantin Belousov obj = ma[0]->object; 146489f6b863SAttilio Rao VM_OBJECT_WLOCK(obj); 1465031ec8c1SKonstantin Belousov for (i = 0, pos = 0; pos < written; i++, pos += PAGE_SIZE) { 1466031ec8c1SKonstantin Belousov if (pos < trunc_page(written)) { 1467031ec8c1SKonstantin Belousov rtvals[i] = VM_PAGER_OK; 1468031ec8c1SKonstantin Belousov vm_page_undirty(ma[i]); 1469031ec8c1SKonstantin Belousov } else { 1470031ec8c1SKonstantin Belousov /* Partially written page. */ 1471031ec8c1SKonstantin Belousov rtvals[i] = VM_PAGER_AGAIN; 1472031ec8c1SKonstantin Belousov vm_page_clear_dirty(ma[i], 0, written & PAGE_MASK); 1473031ec8c1SKonstantin Belousov } 1474031ec8c1SKonstantin Belousov } 1475555b7bb4SKonstantin Belousov if (eof >= lpos) /* avoid truncation */ 1476555b7bb4SKonstantin Belousov goto done; 1477555b7bb4SKonstantin Belousov for (pos = eof, i = OFF_TO_IDX(trunc_page(pos)); pos < lpos; i++) { 1478555b7bb4SKonstantin Belousov if (pos != trunc_page(pos)) { 1479555b7bb4SKonstantin Belousov /* 1480555b7bb4SKonstantin Belousov * The page contains the last valid byte in 1481555b7bb4SKonstantin Belousov * the vnode, mark the rest of the page as 1482555b7bb4SKonstantin Belousov * clean, potentially making the whole page 1483555b7bb4SKonstantin Belousov * clean. 1484555b7bb4SKonstantin Belousov */ 1485555b7bb4SKonstantin Belousov pos_devb = roundup2(pos & PAGE_MASK, DEV_BSIZE); 1486555b7bb4SKonstantin Belousov vm_page_clear_dirty(ma[i], pos_devb, PAGE_SIZE - 1487555b7bb4SKonstantin Belousov pos_devb); 1488555b7bb4SKonstantin Belousov 1489555b7bb4SKonstantin Belousov /* 1490555b7bb4SKonstantin Belousov * If the page was cleaned, report the pageout 1491555b7bb4SKonstantin Belousov * on it as successful. msync() no longer 1492555b7bb4SKonstantin Belousov * needs to write out the page, endlessly 1493555b7bb4SKonstantin Belousov * creating write requests and dirty buffers. 1494555b7bb4SKonstantin Belousov */ 1495555b7bb4SKonstantin Belousov if (ma[i]->dirty == 0) 1496555b7bb4SKonstantin Belousov rtvals[i] = VM_PAGER_OK; 1497555b7bb4SKonstantin Belousov 1498555b7bb4SKonstantin Belousov pos = round_page(pos); 1499555b7bb4SKonstantin Belousov } else { 1500555b7bb4SKonstantin Belousov /* vm_pageout_flush() clears dirty */ 1501555b7bb4SKonstantin Belousov rtvals[i] = VM_PAGER_BAD; 1502555b7bb4SKonstantin Belousov pos += PAGE_SIZE; 1503555b7bb4SKonstantin Belousov } 1504555b7bb4SKonstantin Belousov } 1505555b7bb4SKonstantin Belousov done: 150689f6b863SAttilio Rao VM_OBJECT_WUNLOCK(obj); 1507031ec8c1SKonstantin Belousov } 150884110e7eSKonstantin Belousov 1509*fe7bcbafSKyle Evans static void 151084110e7eSKonstantin Belousov vnode_pager_update_writecount(vm_object_t object, vm_offset_t start, 151184110e7eSKonstantin Belousov vm_offset_t end) 151284110e7eSKonstantin Belousov { 151384110e7eSKonstantin Belousov struct vnode *vp; 151484110e7eSKonstantin Belousov vm_ooffset_t old_wm; 151584110e7eSKonstantin Belousov 151689f6b863SAttilio Rao VM_OBJECT_WLOCK(object); 151784110e7eSKonstantin Belousov if (object->type != OBJT_VNODE) { 151889f6b863SAttilio Rao VM_OBJECT_WUNLOCK(object); 151984110e7eSKonstantin Belousov return; 152084110e7eSKonstantin Belousov } 152184110e7eSKonstantin Belousov old_wm = object->un_pager.vnp.writemappings; 152284110e7eSKonstantin Belousov object->un_pager.vnp.writemappings += (vm_ooffset_t)end - start; 152384110e7eSKonstantin Belousov vp = object->handle; 152484110e7eSKonstantin Belousov if (old_wm == 0 && object->un_pager.vnp.writemappings != 0) { 152578022527SKonstantin Belousov ASSERT_VOP_LOCKED(vp, "v_writecount inc"); 152678022527SKonstantin Belousov VOP_ADD_WRITECOUNT_CHECKED(vp, 1); 1527b47f6241SJohn Baldwin CTR3(KTR_VFS, "%s: vp %p v_writecount increased to %d", 1528b47f6241SJohn Baldwin __func__, vp, vp->v_writecount); 152984110e7eSKonstantin Belousov } else if (old_wm != 0 && object->un_pager.vnp.writemappings == 0) { 153078022527SKonstantin Belousov ASSERT_VOP_LOCKED(vp, "v_writecount dec"); 153178022527SKonstantin Belousov VOP_ADD_WRITECOUNT_CHECKED(vp, -1); 1532b47f6241SJohn Baldwin CTR3(KTR_VFS, "%s: vp %p v_writecount decreased to %d", 1533b47f6241SJohn Baldwin __func__, vp, vp->v_writecount); 153484110e7eSKonstantin Belousov } 153589f6b863SAttilio Rao VM_OBJECT_WUNLOCK(object); 153684110e7eSKonstantin Belousov } 153784110e7eSKonstantin Belousov 1538*fe7bcbafSKyle Evans static void 153984110e7eSKonstantin Belousov vnode_pager_release_writecount(vm_object_t object, vm_offset_t start, 154084110e7eSKonstantin Belousov vm_offset_t end) 154184110e7eSKonstantin Belousov { 154284110e7eSKonstantin Belousov struct vnode *vp; 154384110e7eSKonstantin Belousov struct mount *mp; 154484110e7eSKonstantin Belousov vm_offset_t inc; 154584110e7eSKonstantin Belousov 154689f6b863SAttilio Rao VM_OBJECT_WLOCK(object); 154784110e7eSKonstantin Belousov 154884110e7eSKonstantin Belousov /* 154984110e7eSKonstantin Belousov * First, recheck the object type to account for the race when 155084110e7eSKonstantin Belousov * the vnode is reclaimed. 155184110e7eSKonstantin Belousov */ 155284110e7eSKonstantin Belousov if (object->type != OBJT_VNODE) { 155389f6b863SAttilio Rao VM_OBJECT_WUNLOCK(object); 155484110e7eSKonstantin Belousov return; 155584110e7eSKonstantin Belousov } 155684110e7eSKonstantin Belousov 155784110e7eSKonstantin Belousov /* 155884110e7eSKonstantin Belousov * Optimize for the case when writemappings is not going to 155984110e7eSKonstantin Belousov * zero. 156084110e7eSKonstantin Belousov */ 156184110e7eSKonstantin Belousov inc = end - start; 156284110e7eSKonstantin Belousov if (object->un_pager.vnp.writemappings != inc) { 156384110e7eSKonstantin Belousov object->un_pager.vnp.writemappings -= inc; 156489f6b863SAttilio Rao VM_OBJECT_WUNLOCK(object); 156584110e7eSKonstantin Belousov return; 156684110e7eSKonstantin Belousov } 156784110e7eSKonstantin Belousov 156884110e7eSKonstantin Belousov vp = object->handle; 156984110e7eSKonstantin Belousov vhold(vp); 157089f6b863SAttilio Rao VM_OBJECT_WUNLOCK(object); 157184110e7eSKonstantin Belousov mp = NULL; 157284110e7eSKonstantin Belousov vn_start_write(vp, &mp, V_WAIT); 157378022527SKonstantin Belousov vn_lock(vp, LK_SHARED | LK_RETRY); 157484110e7eSKonstantin Belousov 157584110e7eSKonstantin Belousov /* 157684110e7eSKonstantin Belousov * Decrement the object's writemappings, by swapping the start 157784110e7eSKonstantin Belousov * and end arguments for vnode_pager_update_writecount(). If 157884110e7eSKonstantin Belousov * there was not a race with vnode reclaimation, then the 157984110e7eSKonstantin Belousov * vnode's v_writecount is decremented. 158084110e7eSKonstantin Belousov */ 158184110e7eSKonstantin Belousov vnode_pager_update_writecount(object, end, start); 158284110e7eSKonstantin Belousov VOP_UNLOCK(vp, 0); 158384110e7eSKonstantin Belousov vdrop(vp); 158484110e7eSKonstantin Belousov if (mp != NULL) 158584110e7eSKonstantin Belousov vn_finished_write(mp); 158684110e7eSKonstantin Belousov } 1587