160727d8bSWarner Losh /*- 2df57947fSPedro F. Giffuni * SPDX-License-Identifier: BSD-4-Clause 3df57947fSPedro F. Giffuni * 4df8bae1dSRodney W. Grimes * Copyright (c) 1990 University of Utah. 526f9a767SRodney W. Grimes * Copyright (c) 1991 The Regents of the University of California. 626f9a767SRodney W. Grimes * All rights reserved. 726f9a767SRodney W. Grimes * Copyright (c) 1993, 1994 John S. Dyson 824a1cce3SDavid Greenman * Copyright (c) 1995, David Greenman 9df8bae1dSRodney W. Grimes * 10df8bae1dSRodney W. Grimes * This code is derived from software contributed to Berkeley by 11df8bae1dSRodney W. Grimes * the Systems Programming Group of the University of Utah Computer 12df8bae1dSRodney W. Grimes * Science Department. 13df8bae1dSRodney W. Grimes * 14df8bae1dSRodney W. Grimes * Redistribution and use in source and binary forms, with or without 15df8bae1dSRodney W. Grimes * modification, are permitted provided that the following conditions 16df8bae1dSRodney W. Grimes * are met: 17df8bae1dSRodney W. Grimes * 1. Redistributions of source code must retain the above copyright 18df8bae1dSRodney W. Grimes * notice, this list of conditions and the following disclaimer. 19df8bae1dSRodney W. Grimes * 2. Redistributions in binary form must reproduce the above copyright 20df8bae1dSRodney W. Grimes * notice, this list of conditions and the following disclaimer in the 21df8bae1dSRodney W. Grimes * documentation and/or other materials provided with the distribution. 22df8bae1dSRodney W. Grimes * 3. All advertising materials mentioning features or use of this software 235929bcfaSPhilippe Charnier * must display the following acknowledgement: 24df8bae1dSRodney W. Grimes * This product includes software developed by the University of 25df8bae1dSRodney W. Grimes * California, Berkeley and its contributors. 26df8bae1dSRodney W. Grimes * 4. Neither the name of the University nor the names of its contributors 27df8bae1dSRodney W. Grimes * may be used to endorse or promote products derived from this software 28df8bae1dSRodney W. Grimes * without specific prior written permission. 29df8bae1dSRodney W. Grimes * 30df8bae1dSRodney W. Grimes * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 31df8bae1dSRodney W. Grimes * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 32df8bae1dSRodney W. Grimes * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 33df8bae1dSRodney W. Grimes * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 34df8bae1dSRodney W. Grimes * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 35df8bae1dSRodney W. Grimes * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 36df8bae1dSRodney W. Grimes * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 37df8bae1dSRodney W. Grimes * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 38df8bae1dSRodney W. Grimes * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 39df8bae1dSRodney W. Grimes * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 40df8bae1dSRodney W. Grimes * SUCH DAMAGE. 41df8bae1dSRodney W. Grimes * 4226f9a767SRodney W. Grimes * from: @(#)vnode_pager.c 7.5 (Berkeley) 4/20/91 43df8bae1dSRodney W. Grimes */ 44df8bae1dSRodney W. Grimes 45df8bae1dSRodney W. Grimes /* 46df8bae1dSRodney W. Grimes * Page to/from files (vnodes). 47df8bae1dSRodney W. Grimes */ 48df8bae1dSRodney W. Grimes 4926f9a767SRodney W. Grimes /* 5026f9a767SRodney W. Grimes * TODO: 5124a1cce3SDavid Greenman * Implement VOP_GETPAGES/PUTPAGES interface for filesystems. Will 52f6b04d2bSDavid Greenman * greatly re-simplify the vnode_pager. 5326f9a767SRodney W. Grimes */ 5426f9a767SRodney W. Grimes 55874651b1SDavid E. O'Brien #include <sys/cdefs.h> 56874651b1SDavid E. O'Brien __FBSDID("$FreeBSD$"); 57874651b1SDavid E. O'Brien 583d653db0SAlan Cox #include "opt_vm.h" 593d653db0SAlan Cox 60df8bae1dSRodney W. Grimes #include <sys/param.h> 61756a5412SGleb Smirnoff #include <sys/kernel.h> 62df8bae1dSRodney W. Grimes #include <sys/systm.h> 63e5818a53SJeff Roberson #include <sys/sysctl.h> 64df8bae1dSRodney W. Grimes #include <sys/proc.h> 65df8bae1dSRodney W. Grimes #include <sys/vnode.h> 66df8bae1dSRodney W. Grimes #include <sys/mount.h> 679626b608SPoul-Henning Kamp #include <sys/bio.h> 6824a1cce3SDavid Greenman #include <sys/buf.h> 69efeaf95aSDavid Greenman #include <sys/vmmeter.h> 70daec9284SConrad Meyer #include <sys/ktr.h> 71d07a6d3fSPoul-Henning Kamp #include <sys/limits.h> 7224579ca1SMatthew Dillon #include <sys/conf.h> 7351df5321SJeff Roberson #include <sys/refcount.h> 7489f6b863SAttilio Rao #include <sys/rwlock.h> 759e0ddbd0SAlan Cox #include <sys/sf_buf.h> 76e5818a53SJeff Roberson #include <sys/domainset.h> 7700a3fe96SKonstantin Belousov #include <sys/user.h> 78df8bae1dSRodney W. Grimes 794f12e0acSSuleiman Souhlal #include <machine/atomic.h> 804f12e0acSSuleiman Souhlal 81df8bae1dSRodney W. Grimes #include <vm/vm.h> 821c771f92SKonstantin Belousov #include <vm/vm_param.h> 83efeaf95aSDavid Greenman #include <vm/vm_object.h> 84df8bae1dSRodney W. Grimes #include <vm/vm_page.h> 8524a1cce3SDavid Greenman #include <vm/vm_pager.h> 861efb74fbSJohn Dyson #include <vm/vm_map.h> 87df8bae1dSRodney W. Grimes #include <vm/vnode_pager.h> 88efeaf95aSDavid Greenman #include <vm/vm_extern.h> 89756a5412SGleb Smirnoff #include <vm/uma.h> 90df8bae1dSRodney W. Grimes 91bff76343SAlan Cox static int vnode_pager_addr(struct vnode *vp, vm_ooffset_t address, 92bff76343SAlan Cox daddr_t *rtaddress, int *run); 9311caded3SAlfred Perlstein static int vnode_pager_input_smlfs(vm_object_t object, vm_page_t m); 9411caded3SAlfred Perlstein static int vnode_pager_input_old(vm_object_t object, vm_page_t m); 9511caded3SAlfred Perlstein static void vnode_pager_dealloc(vm_object_t); 96b0cd2017SGleb Smirnoff static int vnode_pager_getpages(vm_object_t, vm_page_t *, int, int *, int *); 97b0cd2017SGleb Smirnoff static int vnode_pager_getpages_async(vm_object_t, vm_page_t *, int, int *, 98b0cd2017SGleb Smirnoff int *, vop_getpages_iodone_t, void *); 9933cad9e9SKonstantin Belousov static void vnode_pager_putpages(vm_object_t, vm_page_t *, int, int, int *); 10011caded3SAlfred Perlstein static boolean_t vnode_pager_haspage(vm_object_t, vm_pindex_t, int *, int *); 1013364c323SKonstantin Belousov static vm_object_t vnode_pager_alloc(void *, vm_ooffset_t, vm_prot_t, 1023364c323SKonstantin Belousov vm_ooffset_t, struct ucred *cred); 10390effb23SGleb Smirnoff static int vnode_pager_generic_getpages_done(struct buf *); 10490effb23SGleb Smirnoff static void vnode_pager_generic_getpages_done_async(struct buf *); 105fe7bcbafSKyle Evans static void vnode_pager_update_writecount(vm_object_t, vm_offset_t, 106fe7bcbafSKyle Evans vm_offset_t); 107fe7bcbafSKyle Evans static void vnode_pager_release_writecount(vm_object_t, vm_offset_t, 108fe7bcbafSKyle Evans vm_offset_t); 109192112b7SKonstantin Belousov static void vnode_pager_getvp(vm_object_t, struct vnode **, bool *); 1100b8253a7SBruce Evans 111d474440aSKonstantin Belousov const struct pagerops vnodepagerops = { 11200a3fe96SKonstantin Belousov .pgo_kvme_type = KVME_TYPE_VNODE, 1134e658600SPoul-Henning Kamp .pgo_alloc = vnode_pager_alloc, 1144e658600SPoul-Henning Kamp .pgo_dealloc = vnode_pager_dealloc, 1154e658600SPoul-Henning Kamp .pgo_getpages = vnode_pager_getpages, 11690effb23SGleb Smirnoff .pgo_getpages_async = vnode_pager_getpages_async, 1174e658600SPoul-Henning Kamp .pgo_putpages = vnode_pager_putpages, 1184e658600SPoul-Henning Kamp .pgo_haspage = vnode_pager_haspage, 119fe7bcbafSKyle Evans .pgo_update_writecount = vnode_pager_update_writecount, 120fe7bcbafSKyle Evans .pgo_release_writecount = vnode_pager_release_writecount, 121180bcaa4SKonstantin Belousov .pgo_set_writeable_dirty = vm_object_set_writeable_dirty_, 122c23c555bSKonstantin Belousov .pgo_mightbedirty = vm_object_mightbedirty_, 123192112b7SKonstantin Belousov .pgo_getvp = vnode_pager_getvp, 124df8bae1dSRodney W. Grimes }; 125df8bae1dSRodney W. Grimes 126e5818a53SJeff Roberson static struct domainset *vnode_domainset = NULL; 127e5818a53SJeff Roberson 128a314aba8SMateusz Guzik SYSCTL_PROC(_debug, OID_AUTO, vnode_domainset, 129a314aba8SMateusz Guzik CTLTYPE_STRING | CTLFLAG_MPSAFE | CTLFLAG_RW, &vnode_domainset, 0, 130a314aba8SMateusz Guzik sysctl_handle_domainset, "A", "Default vnode NUMA policy"); 131e5818a53SJeff Roberson 13266fb0b1aSGleb Smirnoff static int nvnpbufs; 13366fb0b1aSGleb Smirnoff SYSCTL_INT(_vm, OID_AUTO, vnode_pbufs, CTLFLAG_RDTUN | CTLFLAG_NOFETCH, 13466fb0b1aSGleb Smirnoff &nvnpbufs, 0, "number of physical buffers allocated for vnode pager"); 13566fb0b1aSGleb Smirnoff 136756a5412SGleb Smirnoff static uma_zone_t vnode_pbuf_zone; 137756a5412SGleb Smirnoff 138756a5412SGleb Smirnoff static void 139756a5412SGleb Smirnoff vnode_pager_init(void *dummy) 140756a5412SGleb Smirnoff { 141756a5412SGleb Smirnoff 14266fb0b1aSGleb Smirnoff #ifdef __LP64__ 14366fb0b1aSGleb Smirnoff nvnpbufs = nswbuf * 2; 14466fb0b1aSGleb Smirnoff #else 14566fb0b1aSGleb Smirnoff nvnpbufs = nswbuf / 2; 14666fb0b1aSGleb Smirnoff #endif 14766fb0b1aSGleb Smirnoff TUNABLE_INT_FETCH("vm.vnode_pbufs", &nvnpbufs); 14866fb0b1aSGleb Smirnoff vnode_pbuf_zone = pbuf_zsecond_create("vnpbuf", nvnpbufs); 149756a5412SGleb Smirnoff } 150756a5412SGleb Smirnoff SYSINIT(vnode_pager, SI_SUB_CPU, SI_ORDER_ANY, vnode_pager_init, NULL); 151756a5412SGleb Smirnoff 152d07a6d3fSPoul-Henning Kamp /* Create the VM system backing object for this vnode */ 153d07a6d3fSPoul-Henning Kamp int 154731959b1SYaroslav Tykhiy vnode_create_vobject(struct vnode *vp, off_t isize, struct thread *td) 155d07a6d3fSPoul-Henning Kamp { 156d07a6d3fSPoul-Henning Kamp vm_object_t object; 157d07a6d3fSPoul-Henning Kamp vm_ooffset_t size = isize; 158d07a6d3fSPoul-Henning Kamp struct vattr va; 159a67d5408SJeff Roberson bool last; 160d07a6d3fSPoul-Henning Kamp 1617ad2a82dSMateusz Guzik if (!vn_isdisk(vp) && vn_canvmio(vp) == FALSE) 162d07a6d3fSPoul-Henning Kamp return (0); 163d07a6d3fSPoul-Henning Kamp 1646470c8d3SKonstantin Belousov object = vp->v_object; 1656470c8d3SKonstantin Belousov if (object != NULL) 166d07a6d3fSPoul-Henning Kamp return (0); 167d07a6d3fSPoul-Henning Kamp 168d07a6d3fSPoul-Henning Kamp if (size == 0) { 1697ad2a82dSMateusz Guzik if (vn_isdisk(vp)) { 170d07a6d3fSPoul-Henning Kamp size = IDX_TO_OFF(INT_MAX); 171d07a6d3fSPoul-Henning Kamp } else { 1720359a12eSAttilio Rao if (VOP_GETATTR(vp, &va, td->td_ucred)) 173d07a6d3fSPoul-Henning Kamp return (0); 174d07a6d3fSPoul-Henning Kamp size = va.va_size; 175d07a6d3fSPoul-Henning Kamp } 176d07a6d3fSPoul-Henning Kamp } 177d07a6d3fSPoul-Henning Kamp 1783364c323SKonstantin Belousov object = vnode_pager_alloc(vp, size, 0, 0, td->td_ucred); 179d07a6d3fSPoul-Henning Kamp /* 180d07a6d3fSPoul-Henning Kamp * Dereference the reference we just created. This assumes 181a67d5408SJeff Roberson * that the object is associated with the vp. We still have 182a67d5408SJeff Roberson * to serialize with vnode_pager_dealloc() for the last 183a67d5408SJeff Roberson * potential reference. 184d07a6d3fSPoul-Henning Kamp */ 18551df5321SJeff Roberson VM_OBJECT_RLOCK(object); 186a67d5408SJeff Roberson last = refcount_release(&object->ref_count); 18751df5321SJeff Roberson VM_OBJECT_RUNLOCK(object); 188a67d5408SJeff Roberson if (last) 189d07a6d3fSPoul-Henning Kamp vrele(vp); 190d07a6d3fSPoul-Henning Kamp 191d07a6d3fSPoul-Henning Kamp KASSERT(vp->v_object != NULL, ("vnode_create_vobject: NULL object")); 192d07a6d3fSPoul-Henning Kamp 193d07a6d3fSPoul-Henning Kamp return (0); 194d07a6d3fSPoul-Henning Kamp } 195d07a6d3fSPoul-Henning Kamp 1967146d6cbSPoul-Henning Kamp void 1977146d6cbSPoul-Henning Kamp vnode_destroy_vobject(struct vnode *vp) 1987146d6cbSPoul-Henning Kamp { 1997146d6cbSPoul-Henning Kamp struct vm_object *obj; 2007146d6cbSPoul-Henning Kamp 2017146d6cbSPoul-Henning Kamp obj = vp->v_object; 2026470c8d3SKonstantin Belousov if (obj == NULL || obj->handle != vp) 2037146d6cbSPoul-Henning Kamp return; 20457fd3d55SPawel Jakub Dawidek ASSERT_VOP_ELOCKED(vp, "vnode_destroy_vobject"); 20589f6b863SAttilio Rao VM_OBJECT_WLOCK(obj); 2066470c8d3SKonstantin Belousov MPASS(obj->type == OBJT_VNODE); 2072a339d9eSKonstantin Belousov umtx_shm_object_terminated(obj); 2087146d6cbSPoul-Henning Kamp if (obj->ref_count == 0) { 2099c83ff2dSJeff Roberson KASSERT((obj->flags & OBJ_DEAD) == 0, 2109c83ff2dSJeff Roberson ("vnode_destroy_vobject: Terminating dead object")); 211783a68aaSKonstantin Belousov vm_object_set_flag(obj, OBJ_DEAD); 212783a68aaSKonstantin Belousov 213783a68aaSKonstantin Belousov /* 214783a68aaSKonstantin Belousov * Clean pages and flush buffers. 215783a68aaSKonstantin Belousov */ 216783a68aaSKonstantin Belousov vm_object_page_clean(obj, 0, 0, OBJPC_SYNC); 217783a68aaSKonstantin Belousov VM_OBJECT_WUNLOCK(obj); 218783a68aaSKonstantin Belousov 219783a68aaSKonstantin Belousov vinvalbuf(vp, V_SAVE, 0, 0); 220783a68aaSKonstantin Belousov 221783a68aaSKonstantin Belousov BO_LOCK(&vp->v_bufobj); 222783a68aaSKonstantin Belousov vp->v_bufobj.bo_flag |= BO_DEAD; 223783a68aaSKonstantin Belousov BO_UNLOCK(&vp->v_bufobj); 224783a68aaSKonstantin Belousov 225783a68aaSKonstantin Belousov VM_OBJECT_WLOCK(obj); 2267146d6cbSPoul-Henning Kamp vm_object_terminate(obj); 22790880a1bSKonstantin Belousov } else { 22890880a1bSKonstantin Belousov /* 2297146d6cbSPoul-Henning Kamp * Woe to the process that tries to page now :-). 2307146d6cbSPoul-Henning Kamp */ 2317146d6cbSPoul-Henning Kamp vm_pager_deallocate(obj); 23289f6b863SAttilio Rao VM_OBJECT_WUNLOCK(obj); 2337146d6cbSPoul-Henning Kamp } 23490880a1bSKonstantin Belousov KASSERT(vp->v_object == NULL, ("vp %p obj %p", vp, vp->v_object)); 2357146d6cbSPoul-Henning Kamp } 2367146d6cbSPoul-Henning Kamp 237df8bae1dSRodney W. Grimes /* 238df8bae1dSRodney W. Grimes * Allocate (or lookup) pager for a vnode. 239df8bae1dSRodney W. Grimes * Handle is a vnode pointer. 240df8bae1dSRodney W. Grimes */ 24124a1cce3SDavid Greenman vm_object_t 2426cde7a16SDavid Greenman vnode_pager_alloc(void *handle, vm_ooffset_t size, vm_prot_t prot, 2433364c323SKonstantin Belousov vm_ooffset_t offset, struct ucred *cred) 244df8bae1dSRodney W. Grimes { 24506cb7259SDavid Greenman vm_object_t object; 246df8bae1dSRodney W. Grimes struct vnode *vp; 247df8bae1dSRodney W. Grimes 248df8bae1dSRodney W. Grimes /* 249df8bae1dSRodney W. Grimes * Pageout to vnode, no can do yet. 250df8bae1dSRodney W. Grimes */ 251df8bae1dSRodney W. Grimes if (handle == NULL) 252df8bae1dSRodney W. Grimes return (NULL); 253df8bae1dSRodney W. Grimes 254df8bae1dSRodney W. Grimes vp = (struct vnode *)handle; 2556470c8d3SKonstantin Belousov ASSERT_VOP_LOCKED(vp, "vnode_pager_alloc"); 256f1fa1ba3SMateusz Guzik VNPASS(vp->v_usecount > 0, vp); 2576470c8d3SKonstantin Belousov retry: 2586470c8d3SKonstantin Belousov object = vp->v_object; 2592be70f79SJohn Dyson 26024a1cce3SDavid Greenman if (object == NULL) { 261df8bae1dSRodney W. Grimes /* 2622ac78f0eSStephan Uphoff * Add an object of the appropriate size 263df8bae1dSRodney W. Grimes */ 2646470c8d3SKonstantin Belousov object = vm_object_allocate(OBJT_VNODE, 2656470c8d3SKonstantin Belousov OFF_TO_IDX(round_page(size))); 266bbc0ec52SDavid Greenman 2676cde7a16SDavid Greenman object->un_pager.vnp.vnp_size = size; 26884110e7eSKonstantin Belousov object->un_pager.vnp.writemappings = 0; 269e5818a53SJeff Roberson object->domain.dr_policy = vnode_domainset; 27024a1cce3SDavid Greenman object->handle = handle; 271208b81bbSKonstantin Belousov if ((vp->v_vflag & VV_VMSIZEVNLOCK) != 0) { 272208b81bbSKonstantin Belousov VM_OBJECT_WLOCK(object); 273208b81bbSKonstantin Belousov vm_object_set_flag(object, OBJ_SIZEVNLOCK); 274208b81bbSKonstantin Belousov VM_OBJECT_WUNLOCK(object); 275208b81bbSKonstantin Belousov } 27611be8415SStephan Uphoff VI_LOCK(vp); 2772ac78f0eSStephan Uphoff if (vp->v_object != NULL) { 2782ac78f0eSStephan Uphoff /* 2796470c8d3SKonstantin Belousov * Object has been created while we were allocating. 2802ac78f0eSStephan Uphoff */ 28111be8415SStephan Uphoff VI_UNLOCK(vp); 2829cddade7SKonstantin Belousov VM_OBJECT_WLOCK(object); 2839cddade7SKonstantin Belousov KASSERT(object->ref_count == 1, 2849cddade7SKonstantin Belousov ("leaked ref %p %d", object, object->ref_count)); 2859cddade7SKonstantin Belousov object->type = OBJT_DEAD; 28651df5321SJeff Roberson refcount_init(&object->ref_count, 0); 2879cddade7SKonstantin Belousov VM_OBJECT_WUNLOCK(object); 2882ac78f0eSStephan Uphoff vm_object_destroy(object); 2892ac78f0eSStephan Uphoff goto retry; 290df8bae1dSRodney W. Grimes } 2912ac78f0eSStephan Uphoff vp->v_object = object; 29211be8415SStephan Uphoff VI_UNLOCK(vp); 293a67d5408SJeff Roberson vrefact(vp); 29411be8415SStephan Uphoff } else { 295a67d5408SJeff Roberson vm_object_reference(object); 2963d653db0SAlan Cox #if VM_NRESERVLEVEL > 0 297a67d5408SJeff Roberson if ((object->flags & OBJ_COLORED) == 0) { 298a67d5408SJeff Roberson VM_OBJECT_WLOCK(object); 2993d653db0SAlan Cox vm_object_color(object, 0); 30089f6b863SAttilio Rao VM_OBJECT_WUNLOCK(object); 30111be8415SStephan Uphoff } 302a67d5408SJeff Roberson #endif 303a67d5408SJeff Roberson } 30424a1cce3SDavid Greenman return (object); 305df8bae1dSRodney W. Grimes } 306df8bae1dSRodney W. Grimes 307658ad5ffSAlan Cox /* 308658ad5ffSAlan Cox * The object must be locked. 309658ad5ffSAlan Cox */ 310f708ef1bSPoul-Henning Kamp static void 3117ebba1f8SGleb Smirnoff vnode_pager_dealloc(vm_object_t object) 31224a1cce3SDavid Greenman { 313b9f180d1SKonstantin Belousov struct vnode *vp; 314b9f180d1SKonstantin Belousov int refs; 315df8bae1dSRodney W. Grimes 316b9f180d1SKonstantin Belousov vp = object->handle; 31724a1cce3SDavid Greenman if (vp == NULL) 31824a1cce3SDavid Greenman panic("vnode_pager_dealloc: pager already dealloced"); 31924a1cce3SDavid Greenman 32089f6b863SAttilio Rao VM_OBJECT_ASSERT_WLOCKED(object); 32166095752SJohn Dyson vm_object_pip_wait(object, "vnpdea"); 322b9f180d1SKonstantin Belousov refs = object->ref_count; 32324a1cce3SDavid Greenman 32424a1cce3SDavid Greenman object->handle = NULL; 32595461b45SJohn Dyson object->type = OBJT_DEAD; 32657fd3d55SPawel Jakub Dawidek ASSERT_VOP_ELOCKED(vp, "vnode_pager_dealloc"); 32784110e7eSKonstantin Belousov if (object->un_pager.vnp.writemappings > 0) { 32884110e7eSKonstantin Belousov object->un_pager.vnp.writemappings = 0; 32978022527SKonstantin Belousov VOP_ADD_WRITECOUNT_CHECKED(vp, -1); 330b47f6241SJohn Baldwin CTR3(KTR_VFS, "%s: vp %p v_writecount decreased to %d", 331b47f6241SJohn Baldwin __func__, vp, vp->v_writecount); 33284110e7eSKonstantin Belousov } 333aa2cabb9SDavid Greenman vp->v_object = NULL; 33478022527SKonstantin Belousov VI_LOCK(vp); 33578022527SKonstantin Belousov 33678022527SKonstantin Belousov /* 33778022527SKonstantin Belousov * vm_map_entry_set_vnode_text() cannot reach this vnode by 33878022527SKonstantin Belousov * following object->handle. Clear all text references now. 33978022527SKonstantin Belousov * This also clears the transient references from 34078022527SKonstantin Belousov * kern_execve(), which is fine because dead_vnodeops uses nop 34178022527SKonstantin Belousov * for VOP_UNSET_TEXT(). 34278022527SKonstantin Belousov */ 34378022527SKonstantin Belousov if (vp->v_writecount < 0) 34478022527SKonstantin Belousov vp->v_writecount = 0; 34578022527SKonstantin Belousov VI_UNLOCK(vp); 34689f6b863SAttilio Rao VM_OBJECT_WUNLOCK(object); 347a67d5408SJeff Roberson if (refs > 0) 348b9f180d1SKonstantin Belousov vunref(vp); 34989f6b863SAttilio Rao VM_OBJECT_WLOCK(object); 350df8bae1dSRodney W. Grimes } 35126f9a767SRodney W. Grimes 352f708ef1bSPoul-Henning Kamp static boolean_t 3537ebba1f8SGleb Smirnoff vnode_pager_haspage(vm_object_t object, vm_pindex_t pindex, int *before, 3547ebba1f8SGleb Smirnoff int *after) 355df8bae1dSRodney W. Grimes { 35624a1cce3SDavid Greenman struct vnode *vp = object->handle; 35798b0c789SPoul-Henning Kamp daddr_t bn; 3584153054aSJeff Roberson uintptr_t lockstate; 3593af76890SPoul-Henning Kamp int err; 360170db9c6SJohn Dyson daddr_t reqblock; 3612c4488fcSJohn Dyson int poff; 3622c4488fcSJohn Dyson int bsize; 363d63596ceSJohn Dyson int pagesperblock, blocksperpage; 364df8bae1dSRodney W. Grimes 3654153054aSJeff Roberson VM_OBJECT_ASSERT_LOCKED(object); 36624579ca1SMatthew Dillon /* 36724579ca1SMatthew Dillon * If no vp or vp is doomed or marked transparent to VM, we do not 36824579ca1SMatthew Dillon * have the page. 36924579ca1SMatthew Dillon */ 370abd80ddbSMateusz Guzik if (vp == NULL || VN_IS_DOOMED(vp)) 37147221757SJohn Dyson return FALSE; 372df8bae1dSRodney W. Grimes /* 373b73f64c4SJeff Roberson * If the offset is beyond end of file we do 3740d94caffSDavid Greenman * not have the page. 375df8bae1dSRodney W. Grimes */ 376b73f64c4SJeff Roberson if (IDX_TO_OFF(pindex) >= object->un_pager.vnp.vnp_size) 3774abc71c0SDavid Greenman return FALSE; 378df8bae1dSRodney W. Grimes 379eed2d59bSDavid Greenman bsize = vp->v_mount->mnt_stat.f_iosize; 380170db9c6SJohn Dyson pagesperblock = bsize / PAGE_SIZE; 381d63596ceSJohn Dyson blocksperpage = 0; 382d63596ceSJohn Dyson if (pagesperblock > 0) { 383a316d390SJohn Dyson reqblock = pindex / pagesperblock; 384d63596ceSJohn Dyson } else { 385d63596ceSJohn Dyson blocksperpage = (PAGE_SIZE / bsize); 386d63596ceSJohn Dyson reqblock = pindex * blocksperpage; 387d63596ceSJohn Dyson } 3884153054aSJeff Roberson lockstate = VM_OBJECT_DROP(object); 389ec794849SPoul-Henning Kamp err = VOP_BMAP(vp, reqblock, NULL, &bn, after, before); 3904153054aSJeff Roberson VM_OBJECT_PICKUP(object, lockstate); 3910d94caffSDavid Greenman if (err) 39224a1cce3SDavid Greenman return TRUE; 3936eab77f2SJohn Dyson if (bn == -1) 394ced399eeSJohn Dyson return FALSE; 395d63596ceSJohn Dyson if (pagesperblock > 0) { 396a316d390SJohn Dyson poff = pindex - (reqblock * pagesperblock); 397170db9c6SJohn Dyson if (before) { 398170db9c6SJohn Dyson *before *= pagesperblock; 399170db9c6SJohn Dyson *before += poff; 400170db9c6SJohn Dyson } 401170db9c6SJohn Dyson if (after) { 40284d31376SGleb Smirnoff /* 40384d31376SGleb Smirnoff * The BMAP vop can report a partial block in the 404d2596d17SGleb Smirnoff * 'after', but must not report blocks after EOF. 40584d31376SGleb Smirnoff * Assert the latter, and truncate 'after' in case 40684d31376SGleb Smirnoff * of the former. 40784d31376SGleb Smirnoff */ 408d2596d17SGleb Smirnoff KASSERT((reqblock + *after) * pagesperblock < 409d2596d17SGleb Smirnoff roundup2(object->size, pagesperblock), 41084d31376SGleb Smirnoff ("%s: reqblock %jd after %d size %ju", __func__, 41184d31376SGleb Smirnoff (intmax_t )reqblock, *after, 41284d31376SGleb Smirnoff (uintmax_t )object->size)); 413170db9c6SJohn Dyson *after *= pagesperblock; 41484d31376SGleb Smirnoff *after += pagesperblock - (poff + 1); 41584d31376SGleb Smirnoff if (pindex + *after >= object->size) 41684d31376SGleb Smirnoff *after = object->size - 1 - pindex; 417170db9c6SJohn Dyson } 418d63596ceSJohn Dyson } else { 419d63596ceSJohn Dyson if (before) { 420d63596ceSJohn Dyson *before /= blocksperpage; 421d63596ceSJohn Dyson } 422d63596ceSJohn Dyson 423d63596ceSJohn Dyson if (after) { 424d63596ceSJohn Dyson *after /= blocksperpage; 425d63596ceSJohn Dyson } 426d63596ceSJohn Dyson } 427ced399eeSJohn Dyson return TRUE; 428df8bae1dSRodney W. Grimes } 429df8bae1dSRodney W. Grimes 430df8bae1dSRodney W. Grimes /* 431de2e1529SKa Ho Ng * Internal routine clearing partial-page content 432de2e1529SKa Ho Ng */ 433de2e1529SKa Ho Ng static void 434de2e1529SKa Ho Ng vnode_pager_subpage_purge(struct vm_page *m, int base, int end) 435de2e1529SKa Ho Ng { 436de2e1529SKa Ho Ng int size; 437de2e1529SKa Ho Ng 438de2e1529SKa Ho Ng KASSERT(end > base && end <= PAGE_SIZE, 439de2e1529SKa Ho Ng ("%s: start %d end %d", __func__, base, end)); 440de2e1529SKa Ho Ng size = end - base; 441de2e1529SKa Ho Ng 442de2e1529SKa Ho Ng /* 443de2e1529SKa Ho Ng * Clear out partial-page garbage in case 444de2e1529SKa Ho Ng * the page has been mapped. 445de2e1529SKa Ho Ng */ 446de2e1529SKa Ho Ng pmap_zero_page_area(m, base, size); 447de2e1529SKa Ho Ng 448de2e1529SKa Ho Ng /* 449de2e1529SKa Ho Ng * Update the valid bits to reflect the blocks 450de2e1529SKa Ho Ng * that have been zeroed. Some of these valid 451de2e1529SKa Ho Ng * bits may have already been set. 452de2e1529SKa Ho Ng */ 453de2e1529SKa Ho Ng vm_page_set_valid_range(m, base, size); 454de2e1529SKa Ho Ng 455de2e1529SKa Ho Ng /* 456de2e1529SKa Ho Ng * Round up "base" to the next block boundary so 457de2e1529SKa Ho Ng * that the dirty bit for a partially zeroed 458de2e1529SKa Ho Ng * block is not cleared. 459de2e1529SKa Ho Ng */ 460de2e1529SKa Ho Ng base = roundup2(base, DEV_BSIZE); 461de2e1529SKa Ho Ng end = rounddown2(end, DEV_BSIZE); 462de2e1529SKa Ho Ng 463de2e1529SKa Ho Ng if (end > base) { 464de2e1529SKa Ho Ng /* 465de2e1529SKa Ho Ng * Clear out partial-page dirty bits. 466de2e1529SKa Ho Ng * 467de2e1529SKa Ho Ng * note that we do not clear out the 468de2e1529SKa Ho Ng * valid bits. This would prevent 469de2e1529SKa Ho Ng * bogus_page replacement from working 470de2e1529SKa Ho Ng * properly. 471de2e1529SKa Ho Ng */ 472de2e1529SKa Ho Ng vm_page_clear_dirty(m, base, end - base); 473de2e1529SKa Ho Ng } 474de2e1529SKa Ho Ng 475de2e1529SKa Ho Ng } 476de2e1529SKa Ho Ng 477de2e1529SKa Ho Ng /* 478df8bae1dSRodney W. Grimes * Lets the VM system know about a change in size for a file. 47924a1cce3SDavid Greenman * We adjust our own internal size and flush any cached pages in 480df8bae1dSRodney W. Grimes * the associated object that are affected by the size change. 481df8bae1dSRodney W. Grimes * 482df8bae1dSRodney W. Grimes * Note: this routine may be invoked as a result of a pager put 483df8bae1dSRodney W. Grimes * operation (possibly at object termination time), so we must be careful. 484df8bae1dSRodney W. Grimes */ 485df8bae1dSRodney W. Grimes void 4867ebba1f8SGleb Smirnoff vnode_pager_setsize(struct vnode *vp, vm_ooffset_t nsize) 487df8bae1dSRodney W. Grimes { 4882a8f9ab5SAlan Cox vm_object_t object; 4892a8f9ab5SAlan Cox vm_page_t m; 490c576d121SLuoqi Chen vm_pindex_t nobjsize; 491df8bae1dSRodney W. Grimes 4922a8f9ab5SAlan Cox if ((object = vp->v_object) == NULL) 493df8bae1dSRodney W. Grimes return; 4945b87ecc6SKonstantin Belousov #ifdef DEBUG_VFS_LOCKS 4955b87ecc6SKonstantin Belousov { 4965b87ecc6SKonstantin Belousov struct mount *mp; 4975b87ecc6SKonstantin Belousov 4985b87ecc6SKonstantin Belousov mp = vp->v_mount; 4995b87ecc6SKonstantin Belousov if (mp != NULL && (mp->mnt_kern_flag & MNTK_VMSETSIZE_BUG) == 0) 5005b87ecc6SKonstantin Belousov assert_vop_elocked(vp, 5015b87ecc6SKonstantin Belousov "vnode_pager_setsize and not locked vnode"); 5025b87ecc6SKonstantin Belousov } 5035b87ecc6SKonstantin Belousov #endif 50489f6b863SAttilio Rao VM_OBJECT_WLOCK(object); 5059b8851faSKonstantin Belousov if (object->type == OBJT_DEAD) { 5069b8851faSKonstantin Belousov VM_OBJECT_WUNLOCK(object); 5079b8851faSKonstantin Belousov return; 5089b8851faSKonstantin Belousov } 5099b8851faSKonstantin Belousov KASSERT(object->type == OBJT_VNODE, 5109b8851faSKonstantin Belousov ("not vnode-backed object %p", object)); 5112a8f9ab5SAlan Cox if (nsize == object->un_pager.vnp.vnp_size) { 512df8bae1dSRodney W. Grimes /* 513df8bae1dSRodney W. Grimes * Hasn't changed size 514df8bae1dSRodney W. Grimes */ 51589f6b863SAttilio Rao VM_OBJECT_WUNLOCK(object); 516df8bae1dSRodney W. Grimes return; 5172a8f9ab5SAlan Cox } 518c576d121SLuoqi Chen nobjsize = OFF_TO_IDX(nsize + PAGE_MASK); 5192a8f9ab5SAlan Cox if (nsize < object->un_pager.vnp.vnp_size) { 520df8bae1dSRodney W. Grimes /* 521bbc0ec52SDavid Greenman * File has shrunk. Toss any cached pages beyond the new EOF. 522df8bae1dSRodney W. Grimes */ 5232a8f9ab5SAlan Cox if (nobjsize < object->size) 524c576d121SLuoqi Chen vm_object_page_remove(object, nobjsize, object->size, 5256bbee8e2SAlan Cox 0); 526bbc0ec52SDavid Greenman /* 527bbc0ec52SDavid Greenman * this gets rid of garbage at the end of a page that is now 5283ebeaf59SMatthew Dillon * only partially backed by the vnode. 5293ebeaf59SMatthew Dillon * 5303ebeaf59SMatthew Dillon * XXX for some reason (I don't know yet), if we take a 5313ebeaf59SMatthew Dillon * completely invalid page and mark it partially valid 5323ebeaf59SMatthew Dillon * it can screw up NFS reads, so we don't allow the case. 533bbc0ec52SDavid Greenman */ 5340012f373SJeff Roberson if (!(nsize & PAGE_MASK)) 5350012f373SJeff Roberson goto out; 5360012f373SJeff Roberson m = vm_page_grab(object, OFF_TO_IDX(nsize), VM_ALLOC_NOCREAT); 5370012f373SJeff Roberson if (m == NULL) 5380012f373SJeff Roberson goto out; 539de2e1529SKa Ho Ng if (!vm_page_none_valid(m)) 540de2e1529SKa Ho Ng vnode_pager_subpage_purge(m, (int)nsize & PAGE_MASK, 541de2e1529SKa Ho Ng PAGE_SIZE); 5420012f373SJeff Roberson vm_page_xunbusy(m); 543bbc0ec52SDavid Greenman } 5440012f373SJeff Roberson out: 545419e5698SKonstantin Belousov #if defined(__powerpc__) && !defined(__powerpc64__) 546a316d390SJohn Dyson object->un_pager.vnp.vnp_size = nsize; 547419e5698SKonstantin Belousov #else 548419e5698SKonstantin Belousov atomic_store_64(&object->un_pager.vnp.vnp_size, nsize); 549419e5698SKonstantin Belousov #endif 550c576d121SLuoqi Chen object->size = nobjsize; 55189f6b863SAttilio Rao VM_OBJECT_WUNLOCK(object); 552df8bae1dSRodney W. Grimes } 553df8bae1dSRodney W. Grimes 55426f9a767SRodney W. Grimes /* 555de2e1529SKa Ho Ng * Lets the VM system know about the purged range for a file. We toss away any 556de2e1529SKa Ho Ng * cached pages in the associated object that are affected by the purge 557de2e1529SKa Ho Ng * operation. Partial-page area not aligned to page boundaries will be zeroed 558de2e1529SKa Ho Ng * and the dirty blocks in DEV_BSIZE unit within a page will not be flushed. 559de2e1529SKa Ho Ng */ 560de2e1529SKa Ho Ng void 561de2e1529SKa Ho Ng vnode_pager_purge_range(struct vnode *vp, vm_ooffset_t start, vm_ooffset_t end) 562de2e1529SKa Ho Ng { 563de2e1529SKa Ho Ng struct vm_page *m; 564de2e1529SKa Ho Ng struct vm_object *object; 565de2e1529SKa Ho Ng vm_pindex_t pi, pistart, piend; 566de2e1529SKa Ho Ng bool same_page; 567de2e1529SKa Ho Ng int base, pend; 568de2e1529SKa Ho Ng 569de2e1529SKa Ho Ng ASSERT_VOP_LOCKED(vp, "vnode_pager_purge_range"); 570de2e1529SKa Ho Ng 571de2e1529SKa Ho Ng object = vp->v_object; 572de2e1529SKa Ho Ng pi = start + PAGE_MASK < start ? OBJ_MAX_SIZE : 573de2e1529SKa Ho Ng OFF_TO_IDX(start + PAGE_MASK); 574de2e1529SKa Ho Ng pistart = OFF_TO_IDX(start); 575de2e1529SKa Ho Ng piend = end == 0 ? OBJ_MAX_SIZE : OFF_TO_IDX(end); 576de2e1529SKa Ho Ng same_page = pistart == piend; 577de2e1529SKa Ho Ng if ((end != 0 && end <= start) || object == NULL) 578de2e1529SKa Ho Ng return; 579de2e1529SKa Ho Ng 580de2e1529SKa Ho Ng VM_OBJECT_WLOCK(object); 581de2e1529SKa Ho Ng 582de2e1529SKa Ho Ng if (pi < piend) 583de2e1529SKa Ho Ng vm_object_page_remove(object, pi, piend, 0); 584de2e1529SKa Ho Ng 585de2e1529SKa Ho Ng if ((start & PAGE_MASK) != 0) { 586de2e1529SKa Ho Ng base = (int)start & PAGE_MASK; 587de2e1529SKa Ho Ng pend = same_page ? (int)end & PAGE_MASK : PAGE_SIZE; 588de2e1529SKa Ho Ng m = vm_page_grab(object, pistart, VM_ALLOC_NOCREAT); 589de2e1529SKa Ho Ng if (m != NULL) { 590de2e1529SKa Ho Ng if (!vm_page_none_valid(m)) 591de2e1529SKa Ho Ng vnode_pager_subpage_purge(m, base, pend); 592de2e1529SKa Ho Ng vm_page_xunbusy(m); 593de2e1529SKa Ho Ng } 594de2e1529SKa Ho Ng if (same_page) 595de2e1529SKa Ho Ng goto out; 596de2e1529SKa Ho Ng } 597de2e1529SKa Ho Ng if ((end & PAGE_MASK) != 0) { 598de2e1529SKa Ho Ng base = same_page ? (int)start & PAGE_MASK : 0 ; 599de2e1529SKa Ho Ng pend = (int)end & PAGE_MASK; 600de2e1529SKa Ho Ng m = vm_page_grab(object, piend, VM_ALLOC_NOCREAT); 601de2e1529SKa Ho Ng if (m != NULL) { 602de2e1529SKa Ho Ng if (!vm_page_none_valid(m)) 603de2e1529SKa Ho Ng vnode_pager_subpage_purge(m, base, pend); 604de2e1529SKa Ho Ng vm_page_xunbusy(m); 605de2e1529SKa Ho Ng } 606de2e1529SKa Ho Ng } 607de2e1529SKa Ho Ng out: 608de2e1529SKa Ho Ng VM_OBJECT_WUNLOCK(object); 609de2e1529SKa Ho Ng } 610de2e1529SKa Ho Ng 611de2e1529SKa Ho Ng /* 61226f9a767SRodney W. Grimes * calculate the linear (byte) disk address of specified virtual 61326f9a767SRodney W. Grimes * file address 61426f9a767SRodney W. Grimes */ 615bff76343SAlan Cox static int 616bff76343SAlan Cox vnode_pager_addr(struct vnode *vp, vm_ooffset_t address, daddr_t *rtaddress, 617bff76343SAlan Cox int *run) 61826f9a767SRodney W. Grimes { 61926f9a767SRodney W. Grimes int bsize; 62026f9a767SRodney W. Grimes int err; 621a316d390SJohn Dyson daddr_t vblock; 622f3aad9a6SBjoern A. Zeeb daddr_t voffset; 62326f9a767SRodney W. Grimes 624abd80ddbSMateusz Guzik if (VN_IS_DOOMED(vp)) 6252c4488fcSJohn Dyson return -1; 6262c4488fcSJohn Dyson 62726f9a767SRodney W. Grimes bsize = vp->v_mount->mnt_stat.f_iosize; 62826f9a767SRodney W. Grimes vblock = address / bsize; 62926f9a767SRodney W. Grimes voffset = address % bsize; 63026f9a767SRodney W. Grimes 631bff76343SAlan Cox err = VOP_BMAP(vp, vblock, NULL, rtaddress, run, NULL); 632bff76343SAlan Cox if (err == 0) { 633bff76343SAlan Cox if (*rtaddress != -1) 634bff76343SAlan Cox *rtaddress += voffset / DEV_BSIZE; 635efc68ce1SDavid Greenman if (run) { 636efc68ce1SDavid Greenman *run += 1; 637efc68ce1SDavid Greenman *run *= bsize / PAGE_SIZE; 638efc68ce1SDavid Greenman *run -= voffset / PAGE_SIZE; 639efc68ce1SDavid Greenman } 640efc68ce1SDavid Greenman } 64126f9a767SRodney W. Grimes 642bff76343SAlan Cox return (err); 64326f9a767SRodney W. Grimes } 64426f9a767SRodney W. Grimes 64526f9a767SRodney W. Grimes /* 64626f9a767SRodney W. Grimes * small block filesystem vnode pager input 64726f9a767SRodney W. Grimes */ 648f708ef1bSPoul-Henning Kamp static int 6497ebba1f8SGleb Smirnoff vnode_pager_input_smlfs(vm_object_t object, vm_page_t m) 65026f9a767SRodney W. Grimes { 6519c83534dSPoul-Henning Kamp struct vnode *vp; 6529c83534dSPoul-Henning Kamp struct bufobj *bo; 65326f9a767SRodney W. Grimes struct buf *bp; 6549e0ddbd0SAlan Cox struct sf_buf *sf; 655f3aad9a6SBjoern A. Zeeb daddr_t fileaddr; 65626f9a767SRodney W. Grimes vm_offset_t bsize; 657561cc9fcSKonstantin Belousov vm_page_bits_t bits; 658561cc9fcSKonstantin Belousov int error, i; 65926f9a767SRodney W. Grimes 660561cc9fcSKonstantin Belousov error = 0; 66124a1cce3SDavid Greenman vp = object->handle; 662abd80ddbSMateusz Guzik if (VN_IS_DOOMED(vp)) 6632c4488fcSJohn Dyson return VM_PAGER_BAD; 6642c4488fcSJohn Dyson 66526f9a767SRodney W. Grimes bsize = vp->v_mount->mnt_stat.f_iosize; 6660bdb7528SDavid Greenman 6679c83534dSPoul-Henning Kamp VOP_BMAP(vp, 0, &bo, 0, NULL, NULL); 66826f9a767SRodney W. Grimes 6699e0ddbd0SAlan Cox sf = sf_buf_alloc(m, 0); 67026f9a767SRodney W. Grimes 67126f9a767SRodney W. Grimes for (i = 0; i < PAGE_SIZE / bsize; i++) { 67233c67741SMatthew Dillon vm_ooffset_t address; 673bbc0ec52SDavid Greenman 6740d53a17bSAlan Cox bits = vm_page_bits(i * bsize, bsize); 6750d53a17bSAlan Cox if (m->valid & bits) 67626f9a767SRodney W. Grimes continue; 67726f9a767SRodney W. Grimes 67833c67741SMatthew Dillon address = IDX_TO_OFF(m->pindex) + i * bsize; 67933c67741SMatthew Dillon if (address >= object->un_pager.vnp.vnp_size) { 68033c67741SMatthew Dillon fileaddr = -1; 68133c67741SMatthew Dillon } else { 682bff76343SAlan Cox error = vnode_pager_addr(vp, address, &fileaddr, NULL); 683bff76343SAlan Cox if (error) 684bff76343SAlan Cox break; 68533c67741SMatthew Dillon } 68626f9a767SRodney W. Grimes if (fileaddr != -1) { 687756a5412SGleb Smirnoff bp = uma_zalloc(vnode_pbuf_zone, M_WAITOK); 68826f9a767SRodney W. Grimes 68926f9a767SRodney W. Grimes /* build a minimal buffer header */ 69021144e3bSPoul-Henning Kamp bp->b_iocmd = BIO_READ; 6916a4b5823SPoul-Henning Kamp bp->b_iodone = bdone; 692bd78ceceSJohn Baldwin KASSERT(bp->b_rcred == NOCRED, ("leaking read ucred")); 693bd78ceceSJohn Baldwin KASSERT(bp->b_wcred == NOCRED, ("leaking write ucred")); 694a854ed98SJohn Baldwin bp->b_rcred = crhold(curthread->td_ucred); 695a854ed98SJohn Baldwin bp->b_wcred = crhold(curthread->td_ucred); 6969e0ddbd0SAlan Cox bp->b_data = (caddr_t)sf_buf_kva(sf) + i * bsize; 697187f0071SDavid Greenman bp->b_blkno = fileaddr; 6989c83534dSPoul-Henning Kamp pbgetbo(bo, bp); 6991faacf5dSKirk McKusick bp->b_vp = vp; 70026f9a767SRodney W. Grimes bp->b_bcount = bsize; 70126f9a767SRodney W. Grimes bp->b_bufsize = bsize; 7022b6b0df7SMatthew Dillon bp->b_runningbufspace = bp->b_bufsize; 7035bd65606SJohn Baldwin atomic_add_long(&runningbufspace, bp->b_runningbufspace); 70426f9a767SRodney W. Grimes 70526f9a767SRodney W. Grimes /* do the input */ 7062c18019fSPoul-Henning Kamp bp->b_iooffset = dbtob(bp->b_blkno); 707b792bebeSPoul-Henning Kamp bstrategy(bp); 70826f9a767SRodney W. Grimes 7096a4b5823SPoul-Henning Kamp bwait(bp, PVM, "vnsrd"); 7106a4b5823SPoul-Henning Kamp 711cafbf0c6SWarner Losh if ((bp->b_ioflags & BIO_ERROR) != 0) { 712cafbf0c6SWarner Losh KASSERT(bp->b_error != 0, 713cafbf0c6SWarner Losh ("%s: buf error but b_error == 0\n", __func__)); 714cafbf0c6SWarner Losh error = bp->b_error; 715cafbf0c6SWarner Losh } 71626f9a767SRodney W. Grimes 71726f9a767SRodney W. Grimes /* 71826f9a767SRodney W. Grimes * free the buffer header back to the swap buffer pool 71926f9a767SRodney W. Grimes */ 7201faacf5dSKirk McKusick bp->b_vp = NULL; 7219c83534dSPoul-Henning Kamp pbrelbo(bp); 722756a5412SGleb Smirnoff uma_zfree(vnode_pbuf_zone, bp); 72326f9a767SRodney W. Grimes if (error) 72426f9a767SRodney W. Grimes break; 7250d53a17bSAlan Cox } else 7269e0ddbd0SAlan Cox bzero((caddr_t)sf_buf_kva(sf) + i * bsize, bsize); 7270d53a17bSAlan Cox KASSERT((m->dirty & bits) == 0, 7280d53a17bSAlan Cox ("vnode_pager_input_smlfs: page %p is dirty", m)); 7297f935055SJeff Roberson vm_page_bits_set(m, &m->valid, bits); 73026f9a767SRodney W. Grimes } 7319e0ddbd0SAlan Cox sf_buf_free(sf); 73226f9a767SRodney W. Grimes if (error) { 733a83c285cSDavid Greenman return VM_PAGER_ERROR; 73426f9a767SRodney W. Grimes } 73526f9a767SRodney W. Grimes return VM_PAGER_OK; 73626f9a767SRodney W. Grimes } 73726f9a767SRodney W. Grimes 73826f9a767SRodney W. Grimes /* 739475e8cc3SPoul-Henning Kamp * old style vnode pager input routine 74026f9a767SRodney W. Grimes */ 741f708ef1bSPoul-Henning Kamp static int 7427ebba1f8SGleb Smirnoff vnode_pager_input_old(vm_object_t object, vm_page_t m) 74326f9a767SRodney W. Grimes { 744df8bae1dSRodney W. Grimes struct uio auio; 745df8bae1dSRodney W. Grimes struct iovec aiov; 74626f9a767SRodney W. Grimes int error; 74726f9a767SRodney W. Grimes int size; 7489e0ddbd0SAlan Cox struct sf_buf *sf; 749342a1480SJohn Baldwin struct vnode *vp; 750df8bae1dSRodney W. Grimes 75189f6b863SAttilio Rao VM_OBJECT_ASSERT_WLOCKED(object); 75226f9a767SRodney W. Grimes error = 0; 753bbc0ec52SDavid Greenman 754df8bae1dSRodney W. Grimes /* 75526f9a767SRodney W. Grimes * Return failure if beyond current EOF 75626f9a767SRodney W. Grimes */ 757a316d390SJohn Dyson if (IDX_TO_OFF(m->pindex) >= object->un_pager.vnp.vnp_size) { 75826f9a767SRodney W. Grimes return VM_PAGER_BAD; 75926f9a767SRodney W. Grimes } else { 76026f9a767SRodney W. Grimes size = PAGE_SIZE; 761a316d390SJohn Dyson if (IDX_TO_OFF(m->pindex) + size > object->un_pager.vnp.vnp_size) 762a316d390SJohn Dyson size = object->un_pager.vnp.vnp_size - IDX_TO_OFF(m->pindex); 76352051abcSAlan Cox vp = object->handle; 76489f6b863SAttilio Rao VM_OBJECT_WUNLOCK(object); 7650bdb7528SDavid Greenman 76626f9a767SRodney W. Grimes /* 767df8bae1dSRodney W. Grimes * Allocate a kernel virtual address and initialize so that 768df8bae1dSRodney W. Grimes * we can use VOP_READ/WRITE routines. 769df8bae1dSRodney W. Grimes */ 7709e0ddbd0SAlan Cox sf = sf_buf_alloc(m, 0); 7710bdb7528SDavid Greenman 7729e0ddbd0SAlan Cox aiov.iov_base = (caddr_t)sf_buf_kva(sf); 773df8bae1dSRodney W. Grimes aiov.iov_len = size; 774df8bae1dSRodney W. Grimes auio.uio_iov = &aiov; 775df8bae1dSRodney W. Grimes auio.uio_iovcnt = 1; 776a316d390SJohn Dyson auio.uio_offset = IDX_TO_OFF(m->pindex); 777df8bae1dSRodney W. Grimes auio.uio_segflg = UIO_SYSSPACE; 77826f9a767SRodney W. Grimes auio.uio_rw = UIO_READ; 779df8bae1dSRodney W. Grimes auio.uio_resid = size; 780b40ce416SJulian Elischer auio.uio_td = curthread; 78126f9a767SRodney W. Grimes 782a854ed98SJohn Baldwin error = VOP_READ(vp, &auio, 0, curthread->td_ucred); 783df8bae1dSRodney W. Grimes if (!error) { 78454d92145SMatthew Dillon int count = size - auio.uio_resid; 785df8bae1dSRodney W. Grimes 786df8bae1dSRodney W. Grimes if (count == 0) 787df8bae1dSRodney W. Grimes error = EINVAL; 78826f9a767SRodney W. Grimes else if (count != PAGE_SIZE) 7899e0ddbd0SAlan Cox bzero((caddr_t)sf_buf_kva(sf) + count, 7909e0ddbd0SAlan Cox PAGE_SIZE - count); 791df8bae1dSRodney W. Grimes } 7929e0ddbd0SAlan Cox sf_buf_free(sf); 7931b26eb10SAlan Cox 79489f6b863SAttilio Rao VM_OBJECT_WLOCK(object); 795df8bae1dSRodney W. Grimes } 7960d53a17bSAlan Cox KASSERT(m->dirty == 0, ("vnode_pager_input_old: page %p is dirty", m)); 7976e3a3f38SRobert V. Baron if (!error) 7980012f373SJeff Roberson vm_page_valid(m); 799a83c285cSDavid Greenman return error ? VM_PAGER_ERROR : VM_PAGER_OK; 80026f9a767SRodney W. Grimes } 80126f9a767SRodney W. Grimes 80226f9a767SRodney W. Grimes /* 80326f9a767SRodney W. Grimes * generic vnode pager input routine 80426f9a767SRodney W. Grimes */ 805170db9c6SJohn Dyson 806ce75f2c3SMike Smith /* 80723955314SAlfred Perlstein * Local media VFS's that do not implement their own VOP_GETPAGES 80847e151ddSRobert Drehmel * should have their VOP_GETPAGES call to vnode_pager_generic_getpages() 80947e151ddSRobert Drehmel * to implement the previous behaviour. 810ce75f2c3SMike Smith * 811ce75f2c3SMike Smith * All other FS's should use the bypass to get to the local media 812ce75f2c3SMike Smith * backing vp's VOP_GETPAGES. 813ce75f2c3SMike Smith */ 814f708ef1bSPoul-Henning Kamp static int 815b0cd2017SGleb Smirnoff vnode_pager_getpages(vm_object_t object, vm_page_t *m, int count, int *rbehind, 816b0cd2017SGleb Smirnoff int *rahead) 81724a1cce3SDavid Greenman { 818170db9c6SJohn Dyson struct vnode *vp; 819b0cd2017SGleb Smirnoff int rtval; 82095e5e988SJohn Dyson 821d6e13f3bSJeff Roberson /* Handle is stable with paging in progress. */ 822170db9c6SJohn Dyson vp = object->handle; 823b0cd2017SGleb Smirnoff rtval = VOP_GETPAGES(vp, m, count, rbehind, rahead); 82423955314SAlfred Perlstein KASSERT(rtval != EOPNOTSUPP, 82523955314SAlfred Perlstein ("vnode_pager: FS getpages not implemented\n")); 826170db9c6SJohn Dyson return rtval; 827170db9c6SJohn Dyson } 828170db9c6SJohn Dyson 82990effb23SGleb Smirnoff static int 83090effb23SGleb Smirnoff vnode_pager_getpages_async(vm_object_t object, vm_page_t *m, int count, 831b0cd2017SGleb Smirnoff int *rbehind, int *rahead, vop_getpages_iodone_t iodone, void *arg) 83290effb23SGleb Smirnoff { 83390effb23SGleb Smirnoff struct vnode *vp; 83490effb23SGleb Smirnoff int rtval; 83590effb23SGleb Smirnoff 83690effb23SGleb Smirnoff vp = object->handle; 837b0cd2017SGleb Smirnoff rtval = VOP_GETPAGES_ASYNC(vp, m, count, rbehind, rahead, iodone, arg); 83890effb23SGleb Smirnoff KASSERT(rtval != EOPNOTSUPP, 83990effb23SGleb Smirnoff ("vnode_pager: FS getpages_async not implemented\n")); 84090effb23SGleb Smirnoff return (rtval); 84190effb23SGleb Smirnoff } 84290effb23SGleb Smirnoff 843ce75f2c3SMike Smith /* 84490effb23SGleb Smirnoff * The implementation of VOP_GETPAGES() and VOP_GETPAGES_ASYNC() for 84590effb23SGleb Smirnoff * local filesystems, where partially valid pages can only occur at 84690effb23SGleb Smirnoff * the end of file. 847d15b55c5SKonstantin Belousov */ 848d15b55c5SKonstantin Belousov int 849d15b55c5SKonstantin Belousov vnode_pager_local_getpages(struct vop_getpages_args *ap) 850d15b55c5SKonstantin Belousov { 85190effb23SGleb Smirnoff 852b0cd2017SGleb Smirnoff return (vnode_pager_generic_getpages(ap->a_vp, ap->a_m, ap->a_count, 853b0cd2017SGleb Smirnoff ap->a_rbehind, ap->a_rahead, NULL, NULL)); 85490effb23SGleb Smirnoff } 85590effb23SGleb Smirnoff 85690effb23SGleb Smirnoff int 85790effb23SGleb Smirnoff vnode_pager_local_getpages_async(struct vop_getpages_async_args *ap) 85890effb23SGleb Smirnoff { 859abfdf767SKonstantin Belousov int error; 86090effb23SGleb Smirnoff 861abfdf767SKonstantin Belousov error = vnode_pager_generic_getpages(ap->a_vp, ap->a_m, ap->a_count, 862abfdf767SKonstantin Belousov ap->a_rbehind, ap->a_rahead, ap->a_iodone, ap->a_arg); 863abfdf767SKonstantin Belousov if (error != 0 && ap->a_iodone != NULL) 864abfdf767SKonstantin Belousov ap->a_iodone(ap->a_arg, ap->a_m, ap->a_count, error); 865abfdf767SKonstantin Belousov return (error); 866d15b55c5SKonstantin Belousov } 867d15b55c5SKonstantin Belousov 868d15b55c5SKonstantin Belousov /* 869ce75f2c3SMike Smith * This is now called from local media FS's to operate against their 870ce75f2c3SMike Smith * own vnodes if they fail to implement VOP_GETPAGES. 871ce75f2c3SMike Smith */ 872ce75f2c3SMike Smith int 873b0cd2017SGleb Smirnoff vnode_pager_generic_getpages(struct vnode *vp, vm_page_t *m, int count, 874b0cd2017SGleb Smirnoff int *a_rbehind, int *a_rahead, vop_getpages_iodone_t iodone, void *arg) 875170db9c6SJohn Dyson { 876ce75f2c3SMike Smith vm_object_t object; 8779c83534dSPoul-Henning Kamp struct bufobj *bo; 8780bdb7528SDavid Greenman struct buf *bp; 879b0cd2017SGleb Smirnoff off_t foff; 880e48b82bdSGleb Smirnoff #ifdef INVARIANTS 881e48b82bdSGleb Smirnoff off_t blkno0; 882e48b82bdSGleb Smirnoff #endif 883756a5412SGleb Smirnoff int bsize, pagesperblock; 884b0cd2017SGleb Smirnoff int error, before, after, rbehind, rahead, poff, i; 885b0cd2017SGleb Smirnoff int bytecount, secmask; 886ce75f2c3SMike Smith 8879c83534dSPoul-Henning Kamp KASSERT(vp->v_type != VCHR && vp->v_type != VBLK, 888b0cd2017SGleb Smirnoff ("%s does not support devices", __func__)); 889b0cd2017SGleb Smirnoff 890abd80ddbSMateusz Guzik if (VN_IS_DOOMED(vp)) 891eac91e32SKonstantin Belousov return (VM_PAGER_BAD); 8922c4488fcSJohn Dyson 893eac91e32SKonstantin Belousov object = vp->v_object; 894b0cd2017SGleb Smirnoff foff = IDX_TO_OFF(m[0]->pindex); 89526f9a767SRodney W. Grimes bsize = vp->v_mount->mnt_stat.f_iosize; 896b0cd2017SGleb Smirnoff pagesperblock = bsize / PAGE_SIZE; 897b0cd2017SGleb Smirnoff 898b0cd2017SGleb Smirnoff KASSERT(foff < object->un_pager.vnp.vnp_size, 899b0cd2017SGleb Smirnoff ("%s: page %p offset beyond vp %p size", __func__, m[0], vp)); 900cd853791SKonstantin Belousov KASSERT(count <= atop(maxphys), 901b0cd2017SGleb Smirnoff ("%s: requested %d pages", __func__, count)); 902b0cd2017SGleb Smirnoff 903b0cd2017SGleb Smirnoff /* 904b0cd2017SGleb Smirnoff * The last page has valid blocks. Invalid part can only 905b0cd2017SGleb Smirnoff * exist at the end of file, and the page is made fully valid 906b0cd2017SGleb Smirnoff * by zeroing in vm_pager_get_pages(). 907b0cd2017SGleb Smirnoff */ 9080012f373SJeff Roberson if (!vm_page_none_valid(m[count - 1]) && --count == 0) { 909b0cd2017SGleb Smirnoff if (iodone != NULL) 910b0cd2017SGleb Smirnoff iodone(arg, m, 1, 0); 911b0cd2017SGleb Smirnoff return (VM_PAGER_OK); 912b0cd2017SGleb Smirnoff } 913bbc0ec52SDavid Greenman 914756a5412SGleb Smirnoff bp = uma_zalloc(vnode_pbuf_zone, M_WAITOK); 915cd853791SKonstantin Belousov MPASS((bp->b_flags & B_MAXPHYS) != 0); 91673e9030eSGleb Smirnoff 91726f9a767SRodney W. Grimes /* 918e122dfc1SGleb Smirnoff * Get the underlying device blocks for the file with VOP_BMAP(). 919e122dfc1SGleb Smirnoff * If the file system doesn't support VOP_BMAP, use old way of 920e122dfc1SGleb Smirnoff * getting pages via VOP_READ. 92126f9a767SRodney W. Grimes */ 922b0cd2017SGleb Smirnoff error = VOP_BMAP(vp, foff / bsize, &bo, &bp->b_blkno, &after, &before); 9231de11f1aSAlan Cox if (error == EOPNOTSUPP) { 924756a5412SGleb Smirnoff uma_zfree(vnode_pbuf_zone, bp); 92589f6b863SAttilio Rao VM_OBJECT_WLOCK(object); 926b0cd2017SGleb Smirnoff for (i = 0; i < count; i++) { 92783c9dea1SGleb Smirnoff VM_CNT_INC(v_vnodein); 92883c9dea1SGleb Smirnoff VM_CNT_INC(v_vnodepgsin); 929b0cd2017SGleb Smirnoff error = vnode_pager_input_old(object, m[i]); 930b0cd2017SGleb Smirnoff if (error) 931b0cd2017SGleb Smirnoff break; 932b0cd2017SGleb Smirnoff } 93389f6b863SAttilio Rao VM_OBJECT_WUNLOCK(object); 93452051abcSAlan Cox return (error); 9351de11f1aSAlan Cox } else if (error != 0) { 936756a5412SGleb Smirnoff uma_zfree(vnode_pbuf_zone, bp); 9371de11f1aSAlan Cox return (VM_PAGER_ERROR); 938b0cd2017SGleb Smirnoff } 939bbc0ec52SDavid Greenman 94026f9a767SRodney W. Grimes /* 941b0cd2017SGleb Smirnoff * If the file system supports BMAP, but blocksize is smaller 942b0cd2017SGleb Smirnoff * than a page size, then use special small filesystem code. 94326f9a767SRodney W. Grimes */ 944b0cd2017SGleb Smirnoff if (pagesperblock == 0) { 945756a5412SGleb Smirnoff uma_zfree(vnode_pbuf_zone, bp); 946b0cd2017SGleb Smirnoff for (i = 0; i < count; i++) { 94783c9dea1SGleb Smirnoff VM_CNT_INC(v_vnodein); 94883c9dea1SGleb Smirnoff VM_CNT_INC(v_vnodepgsin); 949b0cd2017SGleb Smirnoff error = vnode_pager_input_smlfs(object, m[i]); 950b0cd2017SGleb Smirnoff if (error) 951b0cd2017SGleb Smirnoff break; 952b0cd2017SGleb Smirnoff } 953b0cd2017SGleb Smirnoff return (error); 95426f9a767SRodney W. Grimes } 9558d17e694SJulian Elischer 95626f9a767SRodney W. Grimes /* 957b0cd2017SGleb Smirnoff * A sparse file can be encountered only for a single page request, 958763df3ecSPedro F. Giffuni * which may not be preceded by call to vm_pager_haspage(). 959a7fecb4dSAlan Cox */ 960b0cd2017SGleb Smirnoff if (bp->b_blkno == -1) { 961b0cd2017SGleb Smirnoff KASSERT(count == 1, 962b0cd2017SGleb Smirnoff ("%s: array[%d] request to a sparse file %p", __func__, 963b0cd2017SGleb Smirnoff count, vp)); 964756a5412SGleb Smirnoff uma_zfree(vnode_pbuf_zone, bp); 965b0cd2017SGleb Smirnoff pmap_zero_page(m[0]); 966b0cd2017SGleb Smirnoff KASSERT(m[0]->dirty == 0, ("%s: page %p is dirty", 967b0cd2017SGleb Smirnoff __func__, m[0])); 9680012f373SJeff Roberson vm_page_valid(m[0]); 969f4f83da0SAlan Cox return (VM_PAGER_OK); 970b0cd2017SGleb Smirnoff } 971b0cd2017SGleb Smirnoff 972e48b82bdSGleb Smirnoff #ifdef INVARIANTS 973e48b82bdSGleb Smirnoff blkno0 = bp->b_blkno; 974e48b82bdSGleb Smirnoff #endif 975b0cd2017SGleb Smirnoff bp->b_blkno += (foff % bsize) / DEV_BSIZE; 976b0cd2017SGleb Smirnoff 977b0cd2017SGleb Smirnoff /* Recalculate blocks available after/before to pages. */ 978b0cd2017SGleb Smirnoff poff = (foff % bsize) / PAGE_SIZE; 979b0cd2017SGleb Smirnoff before *= pagesperblock; 980b0cd2017SGleb Smirnoff before += poff; 981b0cd2017SGleb Smirnoff after *= pagesperblock; 982b0cd2017SGleb Smirnoff after += pagesperblock - (poff + 1); 983b0cd2017SGleb Smirnoff if (m[0]->pindex + after >= object->size) 984b0cd2017SGleb Smirnoff after = object->size - 1 - m[0]->pindex; 985b0cd2017SGleb Smirnoff KASSERT(count <= after + 1, ("%s: %d pages asked, can do only %d", 986b0cd2017SGleb Smirnoff __func__, count, after + 1)); 987b0cd2017SGleb Smirnoff after -= count - 1; 988b0cd2017SGleb Smirnoff 989b0cd2017SGleb Smirnoff /* Trim requested rbehind/rahead to possible values. */ 990b0cd2017SGleb Smirnoff rbehind = a_rbehind ? *a_rbehind : 0; 991b0cd2017SGleb Smirnoff rahead = a_rahead ? *a_rahead : 0; 992b0cd2017SGleb Smirnoff rbehind = min(rbehind, before); 993b0cd2017SGleb Smirnoff rbehind = min(rbehind, m[0]->pindex); 994b0cd2017SGleb Smirnoff rahead = min(rahead, after); 995b0cd2017SGleb Smirnoff rahead = min(rahead, object->size - m[count - 1]->pindex); 996e48b82bdSGleb Smirnoff /* 997e48b82bdSGleb Smirnoff * Check that total amount of pages fit into buf. Trim rbehind and 998e48b82bdSGleb Smirnoff * rahead evenly if not. 999e48b82bdSGleb Smirnoff */ 1000cd853791SKonstantin Belousov if (rbehind + rahead + count > atop(maxphys)) { 1001e48b82bdSGleb Smirnoff int trim, sum; 1002e48b82bdSGleb Smirnoff 1003cd853791SKonstantin Belousov trim = rbehind + rahead + count - atop(maxphys) + 1; 1004e48b82bdSGleb Smirnoff sum = rbehind + rahead; 1005e48b82bdSGleb Smirnoff if (rbehind == before) { 1006e48b82bdSGleb Smirnoff /* Roundup rbehind trim to block size. */ 1007e48b82bdSGleb Smirnoff rbehind -= roundup(trim * rbehind / sum, pagesperblock); 1008e48b82bdSGleb Smirnoff if (rbehind < 0) 1009e48b82bdSGleb Smirnoff rbehind = 0; 1010e48b82bdSGleb Smirnoff } else 1011e48b82bdSGleb Smirnoff rbehind -= trim * rbehind / sum; 1012e48b82bdSGleb Smirnoff rahead -= trim * rahead / sum; 1013e48b82bdSGleb Smirnoff } 1014cd853791SKonstantin Belousov KASSERT(rbehind + rahead + count <= atop(maxphys), 1015cd853791SKonstantin Belousov ("%s: behind %d ahead %d count %d maxphys %lu", __func__, 1016cd853791SKonstantin Belousov rbehind, rahead, count, maxphys)); 1017b0cd2017SGleb Smirnoff 1018b0cd2017SGleb Smirnoff /* 1019b0cd2017SGleb Smirnoff * Fill in the bp->b_pages[] array with requested and optional 1020b0cd2017SGleb Smirnoff * read behind or read ahead pages. Read behind pages are looked 1021b0cd2017SGleb Smirnoff * up in a backward direction, down to a first cached page. Same 1022b0cd2017SGleb Smirnoff * for read ahead pages, but there is no need to shift the array 1023b0cd2017SGleb Smirnoff * in case of encountering a cached page. 1024b0cd2017SGleb Smirnoff */ 1025b0cd2017SGleb Smirnoff i = bp->b_npages = 0; 1026b0cd2017SGleb Smirnoff if (rbehind) { 1027b0cd2017SGleb Smirnoff vm_pindex_t startpindex, tpindex; 1028b0cd2017SGleb Smirnoff vm_page_t p; 1029b0cd2017SGleb Smirnoff 1030a7fecb4dSAlan Cox VM_OBJECT_WLOCK(object); 1031b0cd2017SGleb Smirnoff startpindex = m[0]->pindex - rbehind; 1032b0cd2017SGleb Smirnoff if ((p = TAILQ_PREV(m[0], pglist, listq)) != NULL && 1033b0cd2017SGleb Smirnoff p->pindex >= startpindex) 1034b0cd2017SGleb Smirnoff startpindex = p->pindex + 1; 1035b0cd2017SGleb Smirnoff 1036b0cd2017SGleb Smirnoff /* tpindex is unsigned; beware of numeric underflow. */ 1037b0cd2017SGleb Smirnoff for (tpindex = m[0]->pindex - 1; 1038b0cd2017SGleb Smirnoff tpindex >= startpindex && tpindex < m[0]->pindex; 1039b0cd2017SGleb Smirnoff tpindex--, i++) { 10407667839aSAlan Cox p = vm_page_alloc(object, tpindex, VM_ALLOC_NORMAL); 1041b0cd2017SGleb Smirnoff if (p == NULL) { 1042b0cd2017SGleb Smirnoff /* Shift the array. */ 1043b0cd2017SGleb Smirnoff for (int j = 0; j < i; j++) 1044b0cd2017SGleb Smirnoff bp->b_pages[j] = bp->b_pages[j + 1045b0cd2017SGleb Smirnoff tpindex + 1 - startpindex]; 1046b0cd2017SGleb Smirnoff break; 1047b0cd2017SGleb Smirnoff } 1048b0cd2017SGleb Smirnoff bp->b_pages[tpindex - startpindex] = p; 1049a7fecb4dSAlan Cox } 10500bdb7528SDavid Greenman 1051b0cd2017SGleb Smirnoff bp->b_pgbefore = i; 1052b0cd2017SGleb Smirnoff bp->b_npages += i; 1053b0cd2017SGleb Smirnoff bp->b_blkno -= IDX_TO_OFF(i) / DEV_BSIZE; 1054b0cd2017SGleb Smirnoff } else 1055b0cd2017SGleb Smirnoff bp->b_pgbefore = 0; 1056b0cd2017SGleb Smirnoff 1057b0cd2017SGleb Smirnoff /* Requested pages. */ 1058b0cd2017SGleb Smirnoff for (int j = 0; j < count; j++, i++) 1059b0cd2017SGleb Smirnoff bp->b_pages[i] = m[j]; 1060b0cd2017SGleb Smirnoff bp->b_npages += count; 1061b0cd2017SGleb Smirnoff 1062b0cd2017SGleb Smirnoff if (rahead) { 1063b0cd2017SGleb Smirnoff vm_pindex_t endpindex, tpindex; 1064b0cd2017SGleb Smirnoff vm_page_t p; 1065b0cd2017SGleb Smirnoff 1066b0cd2017SGleb Smirnoff if (!VM_OBJECT_WOWNED(object)) 1067eac91e32SKonstantin Belousov VM_OBJECT_WLOCK(object); 1068b0cd2017SGleb Smirnoff endpindex = m[count - 1]->pindex + rahead + 1; 1069b0cd2017SGleb Smirnoff if ((p = TAILQ_NEXT(m[count - 1], listq)) != NULL && 1070b0cd2017SGleb Smirnoff p->pindex < endpindex) 1071b0cd2017SGleb Smirnoff endpindex = p->pindex; 1072b0cd2017SGleb Smirnoff if (endpindex > object->size) 1073b0cd2017SGleb Smirnoff endpindex = object->size; 1074b0cd2017SGleb Smirnoff 1075b0cd2017SGleb Smirnoff for (tpindex = m[count - 1]->pindex + 1; 1076b0cd2017SGleb Smirnoff tpindex < endpindex; i++, tpindex++) { 10777667839aSAlan Cox p = vm_page_alloc(object, tpindex, VM_ALLOC_NORMAL); 1078b0cd2017SGleb Smirnoff if (p == NULL) 1079b0cd2017SGleb Smirnoff break; 1080b0cd2017SGleb Smirnoff bp->b_pages[i] = p; 1081eac91e32SKonstantin Belousov } 1082b0cd2017SGleb Smirnoff 1083b0cd2017SGleb Smirnoff bp->b_pgafter = i - bp->b_npages; 1084b0cd2017SGleb Smirnoff bp->b_npages = i; 1085b0cd2017SGleb Smirnoff } else 1086b0cd2017SGleb Smirnoff bp->b_pgafter = 0; 1087b0cd2017SGleb Smirnoff 1088b0cd2017SGleb Smirnoff if (VM_OBJECT_WOWNED(object)) 1089eac91e32SKonstantin Belousov VM_OBJECT_WUNLOCK(object); 1090b0cd2017SGleb Smirnoff 1091b0cd2017SGleb Smirnoff /* Report back actual behind/ahead read. */ 1092b0cd2017SGleb Smirnoff if (a_rbehind) 1093b0cd2017SGleb Smirnoff *a_rbehind = bp->b_pgbefore; 1094b0cd2017SGleb Smirnoff if (a_rahead) 1095b0cd2017SGleb Smirnoff *a_rahead = bp->b_pgafter; 1096b0cd2017SGleb Smirnoff 1097e48b82bdSGleb Smirnoff #ifdef INVARIANTS 1098cd853791SKonstantin Belousov KASSERT(bp->b_npages <= atop(maxphys), 1099b0cd2017SGleb Smirnoff ("%s: buf %p overflowed", __func__, bp)); 11004f56243aSGleb Smirnoff for (int j = 1, prev = 0; j < bp->b_npages; j++) { 11011e0c121fSGleb Smirnoff if (bp->b_pages[j] == bogus_page) 11021e0c121fSGleb Smirnoff continue; 11031e0c121fSGleb Smirnoff KASSERT(bp->b_pages[j]->pindex - bp->b_pages[prev]->pindex == 11041e0c121fSGleb Smirnoff j - prev, ("%s: pages array not consecutive, bp %p", 11051e0c121fSGleb Smirnoff __func__, bp)); 11061e0c121fSGleb Smirnoff prev = j; 11071e0c121fSGleb Smirnoff } 1108e48b82bdSGleb Smirnoff #endif 1109eac91e32SKonstantin Belousov 11100d94caffSDavid Greenman /* 1111b0cd2017SGleb Smirnoff * Recalculate first offset and bytecount with regards to read behind. 1112b0cd2017SGleb Smirnoff * Truncate bytecount to vnode real size and round up physical size 1113b0cd2017SGleb Smirnoff * for real devices. 111426f9a767SRodney W. Grimes */ 1115b0cd2017SGleb Smirnoff foff = IDX_TO_OFF(bp->b_pages[0]->pindex); 1116b0cd2017SGleb Smirnoff bytecount = bp->b_npages << PAGE_SHIFT; 1117b0cd2017SGleb Smirnoff if ((foff + bytecount) > object->un_pager.vnp.vnp_size) 1118b0cd2017SGleb Smirnoff bytecount = object->un_pager.vnp.vnp_size - foff; 1119eac91e32SKonstantin Belousov secmask = bo->bo_bsize - 1; 11206229cc50SPoul-Henning Kamp KASSERT(secmask < PAGE_SIZE && secmask > 0, 1121b0cd2017SGleb Smirnoff ("%s: sector size %d too large", __func__, secmask + 1)); 1122b0cd2017SGleb Smirnoff bytecount = (bytecount + secmask) & ~secmask; 112326f9a767SRodney W. Grimes 112426f9a767SRodney W. Grimes /* 1125b0cd2017SGleb Smirnoff * And map the pages to be read into the kva, if the filesystem 11266ce697dcSKonstantin Belousov * requires mapped buffers. 112726f9a767SRodney W. Grimes */ 11282a5eef69SGleb Smirnoff if ((vp->v_mount->mnt_kern_flag & MNTK_UNMAPPED_BUFS) != 0 && 11296ce697dcSKonstantin Belousov unmapped_buf_allowed) { 11306ce697dcSKonstantin Belousov bp->b_data = unmapped_buf; 11316ce697dcSKonstantin Belousov bp->b_offset = 0; 1132fade8dd7SJeff Roberson } else { 1133fade8dd7SJeff Roberson bp->b_data = bp->b_kvabase; 1134b0cd2017SGleb Smirnoff pmap_qenter((vm_offset_t)bp->b_data, bp->b_pages, bp->b_npages); 1135fade8dd7SJeff Roberson } 113626f9a767SRodney W. Grimes 1137b0cd2017SGleb Smirnoff /* Build a minimal buffer header. */ 113821144e3bSPoul-Henning Kamp bp->b_iocmd = BIO_READ; 1139bd78ceceSJohn Baldwin KASSERT(bp->b_rcred == NOCRED, ("leaking read ucred")); 1140bd78ceceSJohn Baldwin KASSERT(bp->b_wcred == NOCRED, ("leaking write ucred")); 1141a854ed98SJohn Baldwin bp->b_rcred = crhold(curthread->td_ucred); 1142a854ed98SJohn Baldwin bp->b_wcred = crhold(curthread->td_ucred); 11439c83534dSPoul-Henning Kamp pbgetbo(bo, bp); 11441faacf5dSKirk McKusick bp->b_vp = vp; 1145b0cd2017SGleb Smirnoff bp->b_bcount = bp->b_bufsize = bp->b_runningbufspace = bytecount; 11462c18019fSPoul-Henning Kamp bp->b_iooffset = dbtob(bp->b_blkno); 1147e48b82bdSGleb Smirnoff KASSERT(IDX_TO_OFF(m[0]->pindex - bp->b_pages[0]->pindex) == 1148e48b82bdSGleb Smirnoff (blkno0 - bp->b_blkno) * DEV_BSIZE + 1149e48b82bdSGleb Smirnoff IDX_TO_OFF(m[0]->pindex) % bsize, 1150e48b82bdSGleb Smirnoff ("wrong offsets bsize %d m[0] %ju b_pages[0] %ju " 1151e48b82bdSGleb Smirnoff "blkno0 %ju b_blkno %ju", bsize, 1152e48b82bdSGleb Smirnoff (uintmax_t)m[0]->pindex, (uintmax_t)bp->b_pages[0]->pindex, 1153e48b82bdSGleb Smirnoff (uintmax_t)blkno0, (uintmax_t)bp->b_blkno)); 115490effb23SGleb Smirnoff 1155b0cd2017SGleb Smirnoff atomic_add_long(&runningbufspace, bp->b_runningbufspace); 115683c9dea1SGleb Smirnoff VM_CNT_INC(v_vnodein); 115783c9dea1SGleb Smirnoff VM_CNT_ADD(v_vnodepgsin, bp->b_npages); 1158b0cd2017SGleb Smirnoff 115990effb23SGleb Smirnoff if (iodone != NULL) { /* async */ 1160b0cd2017SGleb Smirnoff bp->b_pgiodone = iodone; 116190effb23SGleb Smirnoff bp->b_caller1 = arg; 116290effb23SGleb Smirnoff bp->b_iodone = vnode_pager_generic_getpages_done_async; 116390effb23SGleb Smirnoff bp->b_flags |= B_ASYNC; 116490effb23SGleb Smirnoff BUF_KERNPROC(bp); 1165b792bebeSPoul-Henning Kamp bstrategy(bp); 1166b0cd2017SGleb Smirnoff return (VM_PAGER_OK); 116790effb23SGleb Smirnoff } else { 116890effb23SGleb Smirnoff bp->b_iodone = bdone; 116990effb23SGleb Smirnoff bstrategy(bp); 11706a4b5823SPoul-Henning Kamp bwait(bp, PVM, "vnread"); 117190effb23SGleb Smirnoff error = vnode_pager_generic_getpages_done(bp); 11721bb5ad63SGleb Smirnoff for (i = 0; i < bp->b_npages; i++) 11736ce697dcSKonstantin Belousov bp->b_pages[i] = NULL; 11741faacf5dSKirk McKusick bp->b_vp = NULL; 11759c83534dSPoul-Henning Kamp pbrelbo(bp); 1176756a5412SGleb Smirnoff uma_zfree(vnode_pbuf_zone, bp); 117790effb23SGleb Smirnoff return (error != 0 ? VM_PAGER_ERROR : VM_PAGER_OK); 117890effb23SGleb Smirnoff } 1179b0cd2017SGleb Smirnoff } 118090effb23SGleb Smirnoff 118190effb23SGleb Smirnoff static void 118290effb23SGleb Smirnoff vnode_pager_generic_getpages_done_async(struct buf *bp) 118390effb23SGleb Smirnoff { 118490effb23SGleb Smirnoff int error; 118590effb23SGleb Smirnoff 118690effb23SGleb Smirnoff error = vnode_pager_generic_getpages_done(bp); 1187b0cd2017SGleb Smirnoff /* Run the iodone upon the requested range. */ 1188b0cd2017SGleb Smirnoff bp->b_pgiodone(bp->b_caller1, bp->b_pages + bp->b_pgbefore, 1189b0cd2017SGleb Smirnoff bp->b_npages - bp->b_pgbefore - bp->b_pgafter, error); 119090effb23SGleb Smirnoff for (int i = 0; i < bp->b_npages; i++) 119190effb23SGleb Smirnoff bp->b_pages[i] = NULL; 119290effb23SGleb Smirnoff bp->b_vp = NULL; 119390effb23SGleb Smirnoff pbrelbo(bp); 1194756a5412SGleb Smirnoff uma_zfree(vnode_pbuf_zone, bp); 119590effb23SGleb Smirnoff } 119690effb23SGleb Smirnoff 119790effb23SGleb Smirnoff static int 119890effb23SGleb Smirnoff vnode_pager_generic_getpages_done(struct buf *bp) 119990effb23SGleb Smirnoff { 120090effb23SGleb Smirnoff vm_object_t object; 120190effb23SGleb Smirnoff off_t tfoff, nextoff; 120290effb23SGleb Smirnoff int i, error; 120390effb23SGleb Smirnoff 1204cafbf0c6SWarner Losh KASSERT((bp->b_ioflags & BIO_ERROR) == 0 || bp->b_error != 0, 1205cafbf0c6SWarner Losh ("%s: buf error but b_error == 0\n", __func__)); 1206cafbf0c6SWarner Losh error = (bp->b_ioflags & BIO_ERROR) != 0 ? bp->b_error : 0; 120790effb23SGleb Smirnoff object = bp->b_vp->v_object; 120890effb23SGleb Smirnoff 120990effb23SGleb Smirnoff if (error == 0 && bp->b_bcount != bp->b_npages * PAGE_SIZE) { 1210fade8dd7SJeff Roberson if (!buf_mapped(bp)) { 1211fade8dd7SJeff Roberson bp->b_data = bp->b_kvabase; 1212fade8dd7SJeff Roberson pmap_qenter((vm_offset_t)bp->b_data, bp->b_pages, 121390effb23SGleb Smirnoff bp->b_npages); 121490effb23SGleb Smirnoff } 1215fade8dd7SJeff Roberson bzero(bp->b_data + bp->b_bcount, 121690effb23SGleb Smirnoff PAGE_SIZE * bp->b_npages - bp->b_bcount); 121790effb23SGleb Smirnoff } 1218fade8dd7SJeff Roberson if (buf_mapped(bp)) { 1219fade8dd7SJeff Roberson pmap_qremove((vm_offset_t)bp->b_data, bp->b_npages); 1220fade8dd7SJeff Roberson bp->b_data = unmapped_buf; 122190effb23SGleb Smirnoff } 122226f9a767SRodney W. Grimes 12231bd12a3bSChuck Silvers /* 12241bd12a3bSChuck Silvers * If the read failed, we must free any read ahead/behind pages here. 12251bd12a3bSChuck Silvers * The requested pages are freed by the caller (for sync requests) 12261bd12a3bSChuck Silvers * or by the bp->b_pgiodone callback (for async requests). 12271bd12a3bSChuck Silvers */ 12281bd12a3bSChuck Silvers if (error != 0) { 12291bd12a3bSChuck Silvers VM_OBJECT_WLOCK(object); 12301bd12a3bSChuck Silvers for (i = 0; i < bp->b_pgbefore; i++) 12311bd12a3bSChuck Silvers vm_page_free_invalid(bp->b_pages[i]); 12321bd12a3bSChuck Silvers for (i = bp->b_npages - bp->b_pgafter; i < bp->b_npages; i++) 12331bd12a3bSChuck Silvers vm_page_free_invalid(bp->b_pages[i]); 12341bd12a3bSChuck Silvers VM_OBJECT_WUNLOCK(object); 12351bd12a3bSChuck Silvers return (error); 12361bd12a3bSChuck Silvers } 12371bd12a3bSChuck Silvers 12387f935055SJeff Roberson /* Read lock to protect size. */ 12397f935055SJeff Roberson VM_OBJECT_RLOCK(object); 124090effb23SGleb Smirnoff for (i = 0, tfoff = IDX_TO_OFF(bp->b_pages[0]->pindex); 124190effb23SGleb Smirnoff i < bp->b_npages; i++, tfoff = nextoff) { 12428f9110f6SJohn Dyson vm_page_t mt; 12438f9110f6SJohn Dyson 12448f9110f6SJohn Dyson nextoff = tfoff + PAGE_SIZE; 124590effb23SGleb Smirnoff mt = bp->b_pages[i]; 12462f81c92eSMark Johnston if (mt == bogus_page) 12472f81c92eSMark Johnston continue; 12488f9110f6SJohn Dyson 124954746b67SDmitrij Tejblum if (nextoff <= object->un_pager.vnp.vnp_size) { 12508d17e694SJulian Elischer /* 12518d17e694SJulian Elischer * Read filled up entire page. 12528d17e694SJulian Elischer */ 12530012f373SJeff Roberson vm_page_valid(mt); 1254016a3c93SAlan Cox KASSERT(mt->dirty == 0, 125579f0deb9SGleb Smirnoff ("%s: page %p is dirty", __func__, mt)); 1256016a3c93SAlan Cox KASSERT(!pmap_page_is_mapped(mt), 125779f0deb9SGleb Smirnoff ("%s: page %p is mapped", __func__, mt)); 12588f9110f6SJohn Dyson } else { 12598d17e694SJulian Elischer /* 126042eb4108SAlan Cox * Read did not fill up entire page. 12618d17e694SJulian Elischer * 1262c3dbadc1SChuck Silvers * Currently we do not set the entire page valid, 1263c3dbadc1SChuck Silvers * we just try to clear the piece that we couldn't 1264c3dbadc1SChuck Silvers * read. 12658d17e694SJulian Elischer */ 1266dc874f98SKonstantin Belousov vm_page_set_valid_range(mt, 0, 126754746b67SDmitrij Tejblum object->un_pager.vnp.vnp_size - tfoff); 126842eb4108SAlan Cox KASSERT((mt->dirty & vm_page_bits(0, 1269c3dbadc1SChuck Silvers object->un_pager.vnp.vnp_size - tfoff)) == 0, 1270c3dbadc1SChuck Silvers ("%s: page %p is dirty", __func__, mt)); 12718f9110f6SJohn Dyson } 12728f9110f6SJohn Dyson 1273b0cd2017SGleb Smirnoff if (i < bp->b_pgbefore || i >= bp->b_npages - bp->b_pgafter) 1274b6c00483SKonstantin Belousov vm_page_readahead_finish(mt); 127503679e23SAlan Cox } 12767f935055SJeff Roberson VM_OBJECT_RUNLOCK(object); 127790effb23SGleb Smirnoff 127890effb23SGleb Smirnoff return (error); 127926f9a767SRodney W. Grimes } 128026f9a767SRodney W. Grimes 1281ce75f2c3SMike Smith /* 1282ce75f2c3SMike Smith * EOPNOTSUPP is no longer legal. For local media VFS's that do not 1283ce75f2c3SMike Smith * implement their own VOP_PUTPAGES, their VOP_PUTPAGES should call to 1284ce75f2c3SMike Smith * vnode_pager_generic_putpages() to implement the previous behaviour. 1285ce75f2c3SMike Smith * 1286ce75f2c3SMike Smith * All other FS's should use the bypass to get to the local media 1287ce75f2c3SMike Smith * backing vp's VOP_PUTPAGES. 1288ce75f2c3SMike Smith */ 1289e4542174SMatthew Dillon static void 12907ebba1f8SGleb Smirnoff vnode_pager_putpages(vm_object_t object, vm_page_t *m, int count, 129133cad9e9SKonstantin Belousov int flags, int *rtvals) 1292170db9c6SJohn Dyson { 1293*b8ebd99aSJohn Baldwin int rtval __diagused; 1294170db9c6SJohn Dyson struct vnode *vp; 129586ffbd76SMike Smith int bytes = count * PAGE_SIZE; 1296ad980522SJohn Dyson 12970e3cdf2cSAlan Cox /* 12980e3cdf2cSAlan Cox * Force synchronous operation if we are extremely low on memory 12990e3cdf2cSAlan Cox * to prevent a low-memory deadlock. VOP operations often need to 13000e3cdf2cSAlan Cox * allocate more memory to initiate the I/O ( i.e. do a BMAP 13010e3cdf2cSAlan Cox * operation ). The swapper handles the case by limiting the amount 13020e3cdf2cSAlan Cox * of asynchronous I/O, but that sort of solution doesn't scale well 13030e3cdf2cSAlan Cox * for the vnode pager without a lot of work. 13040e3cdf2cSAlan Cox * 13050e3cdf2cSAlan Cox * Also, the backing vnode's iodone routine may not wake the pageout 13060e3cdf2cSAlan Cox * daemon up. This should be probably be addressed XXX. 13070e3cdf2cSAlan Cox */ 13080e3cdf2cSAlan Cox 1309e2068d0bSJeff Roberson if (vm_page_count_min()) 131033cad9e9SKonstantin Belousov flags |= VM_PAGER_PUT_SYNC; 13110e3cdf2cSAlan Cox 13120e3cdf2cSAlan Cox /* 13130e3cdf2cSAlan Cox * Call device-specific putpages function 13140e3cdf2cSAlan Cox */ 1315170db9c6SJohn Dyson vp = object->handle; 131689f6b863SAttilio Rao VM_OBJECT_WUNLOCK(object); 131733cad9e9SKonstantin Belousov rtval = VOP_PUTPAGES(vp, m, bytes, flags, rtvals); 131823955314SAlfred Perlstein KASSERT(rtval != EOPNOTSUPP, 131923955314SAlfred Perlstein ("vnode_pager: stale FS putpages\n")); 132089f6b863SAttilio Rao VM_OBJECT_WLOCK(object); 1321170db9c6SJohn Dyson } 1322170db9c6SJohn Dyson 132305877a85SKonstantin Belousov static int 132405877a85SKonstantin Belousov vn_off2bidx(vm_ooffset_t offset) 132505877a85SKonstantin Belousov { 132605877a85SKonstantin Belousov 132705877a85SKonstantin Belousov return ((offset & PAGE_MASK) / DEV_BSIZE); 132805877a85SKonstantin Belousov } 132905877a85SKonstantin Belousov 133005877a85SKonstantin Belousov static bool 133105877a85SKonstantin Belousov vn_dirty_blk(vm_page_t m, vm_ooffset_t offset) 133205877a85SKonstantin Belousov { 133305877a85SKonstantin Belousov 133405877a85SKonstantin Belousov KASSERT(IDX_TO_OFF(m->pindex) <= offset && 133505877a85SKonstantin Belousov offset < IDX_TO_OFF(m->pindex + 1), 133605877a85SKonstantin Belousov ("page %p pidx %ju offset %ju", m, (uintmax_t)m->pindex, 133705877a85SKonstantin Belousov (uintmax_t)offset)); 133805877a85SKonstantin Belousov return ((m->dirty & ((vm_page_bits_t)1 << vn_off2bidx(offset))) != 0); 133905877a85SKonstantin Belousov } 1340ce75f2c3SMike Smith 134126f9a767SRodney W. Grimes /* 1342ce75f2c3SMike Smith * This is now called from local media FS's to operate against their 13434491ea91SEivind Eklund * own vnodes if they fail to implement VOP_PUTPAGES. 13442b6b0df7SMatthew Dillon * 13452b6b0df7SMatthew Dillon * This is typically called indirectly via the pageout daemon and 1346763df3ecSPedro F. Giffuni * clustering has already typically occurred, so in general we ask the 13472b6b0df7SMatthew Dillon * underlying filesystem to write the data out asynchronously rather 13482b6b0df7SMatthew Dillon * then delayed. 134926f9a767SRodney W. Grimes */ 1350ce75f2c3SMike Smith int 1351c46b90e9SAlan Cox vnode_pager_generic_putpages(struct vnode *vp, vm_page_t *ma, int bytecount, 1352c46b90e9SAlan Cox int flags, int *rtvals) 135326f9a767SRodney W. Grimes { 1354ce75f2c3SMike Smith vm_object_t object; 1355c46b90e9SAlan Cox vm_page_t m; 135605877a85SKonstantin Belousov vm_ooffset_t maxblksz, next_offset, poffset, prev_offset; 1357f6b04d2bSDavid Greenman struct uio auio; 1358f6b04d2bSDavid Greenman struct iovec aiov; 135905877a85SKonstantin Belousov off_t prev_resid, wrsz; 1360e6c44f65SKonstantin Belousov int count, error, i, maxsize, ncount, pgoff, ppscheck; 136105877a85SKonstantin Belousov bool in_hole; 1362dd498befSPaul Saab static struct timeval lastfail; 1363dd498befSPaul Saab static int curfail; 136426f9a767SRodney W. Grimes 1365ce75f2c3SMike Smith object = vp->v_object; 1366ce75f2c3SMike Smith count = bytecount / PAGE_SIZE; 1367ce75f2c3SMike Smith 136826f9a767SRodney W. Grimes for (i = 0; i < count; i++) 1369031ec8c1SKonstantin Belousov rtvals[i] = VM_PAGER_ERROR; 137026f9a767SRodney W. Grimes 1371c46b90e9SAlan Cox if ((int64_t)ma[0]->pindex < 0) { 1372e6c44f65SKonstantin Belousov printf("vnode_pager_generic_putpages: " 1373e6c44f65SKonstantin Belousov "attempt to write meta-data 0x%jx(%lx)\n", 1374e6c44f65SKonstantin Belousov (uintmax_t)ma[0]->pindex, (u_long)ma[0]->dirty); 1375f6b04d2bSDavid Greenman rtvals[0] = VM_PAGER_BAD; 1376e6c44f65SKonstantin Belousov return (VM_PAGER_BAD); 13770d94caffSDavid Greenman } 13780bdb7528SDavid Greenman 1379f6b04d2bSDavid Greenman maxsize = count * PAGE_SIZE; 1380f6b04d2bSDavid Greenman ncount = count; 138126f9a767SRodney W. Grimes 1382c46b90e9SAlan Cox poffset = IDX_TO_OFF(ma[0]->pindex); 138300a6f47fSMatthew Dillon 138400a6f47fSMatthew Dillon /* 138500a6f47fSMatthew Dillon * If the page-aligned write is larger then the actual file we 1386763df3ecSPedro F. Giffuni * have to invalidate pages occurring beyond the file EOF. However, 138700a6f47fSMatthew Dillon * there is an edge case where a file may not be page-aligned where 138800a6f47fSMatthew Dillon * the last page is partially invalid. In this case the filesystem 138900a6f47fSMatthew Dillon * may not properly clear the dirty bits for the entire page (which 139000a6f47fSMatthew Dillon * could be VM_PAGE_BITS_ALL due to the page having been mmap()d). 1391efec381dSMark Johnston * With the page busied we are free to fix up the dirty bits here. 13923ebeaf59SMatthew Dillon * 13933ebeaf59SMatthew Dillon * We do not under any circumstances truncate the valid bits, as 13943ebeaf59SMatthew Dillon * this will screw up bogus page replacement. 139500a6f47fSMatthew Dillon */ 1396b3d4ab66SKonstantin Belousov VM_OBJECT_RLOCK(object); 1397a316d390SJohn Dyson if (maxsize + poffset > object->un_pager.vnp.vnp_size) { 139800a6f47fSMatthew Dillon if (object->un_pager.vnp.vnp_size > poffset) { 1399a316d390SJohn Dyson maxsize = object->un_pager.vnp.vnp_size - poffset; 1400aa8de40aSPoul-Henning Kamp ncount = btoc(maxsize); 140100a6f47fSMatthew Dillon if ((pgoff = (int)maxsize & PAGE_MASK) != 0) { 1402938cdc42SKonstantin Belousov pgoff = roundup2(pgoff, DEV_BSIZE); 1403938cdc42SKonstantin Belousov 1404c46b90e9SAlan Cox /* 14057f935055SJeff Roberson * If the page is busy and the following 1406c46b90e9SAlan Cox * conditions hold, then the page's dirty 1407c46b90e9SAlan Cox * field cannot be concurrently changed by a 1408c46b90e9SAlan Cox * pmap operation. 1409c46b90e9SAlan Cox */ 1410c46b90e9SAlan Cox m = ma[ncount - 1]; 1411c7aebda8SAttilio Rao vm_page_assert_sbusied(m); 14126031c68dSAlan Cox KASSERT(!pmap_page_is_write_mapped(m), 1413c46b90e9SAlan Cox ("vnode_pager_generic_putpages: page %p is not read-only", m)); 1414e6c44f65SKonstantin Belousov MPASS(m->dirty != 0); 1415c46b90e9SAlan Cox vm_page_clear_dirty(m, pgoff, PAGE_SIZE - 1416c46b90e9SAlan Cox pgoff); 141700a6f47fSMatthew Dillon } 141800a6f47fSMatthew Dillon } else { 141900a6f47fSMatthew Dillon maxsize = 0; 142000a6f47fSMatthew Dillon ncount = 0; 142100a6f47fSMatthew Dillon } 1422e6c44f65SKonstantin Belousov for (i = ncount; i < count; i++) 1423f6b04d2bSDavid Greenman rtvals[i] = VM_PAGER_BAD; 1424f6b04d2bSDavid Greenman } 14257f935055SJeff Roberson VM_OBJECT_RUNLOCK(object); 142626f9a767SRodney W. Grimes 1427f6b04d2bSDavid Greenman auio.uio_iov = &aiov; 1428f6b04d2bSDavid Greenman auio.uio_segflg = UIO_NOCOPY; 1429f6b04d2bSDavid Greenman auio.uio_rw = UIO_WRITE; 1430e6c44f65SKonstantin Belousov auio.uio_td = NULL; 143105877a85SKonstantin Belousov maxblksz = roundup2(poffset + maxsize, DEV_BSIZE); 143205877a85SKonstantin Belousov 143305877a85SKonstantin Belousov for (prev_offset = poffset; prev_offset < maxblksz;) { 143405877a85SKonstantin Belousov /* Skip clean blocks. */ 143505877a85SKonstantin Belousov for (in_hole = true; in_hole && prev_offset < maxblksz;) { 143605877a85SKonstantin Belousov m = ma[OFF_TO_IDX(prev_offset - poffset)]; 143705877a85SKonstantin Belousov for (i = vn_off2bidx(prev_offset); 143805877a85SKonstantin Belousov i < sizeof(vm_page_bits_t) * NBBY && 143905877a85SKonstantin Belousov prev_offset < maxblksz; i++) { 144005877a85SKonstantin Belousov if (vn_dirty_blk(m, prev_offset)) { 144105877a85SKonstantin Belousov in_hole = false; 144205877a85SKonstantin Belousov break; 144305877a85SKonstantin Belousov } 144405877a85SKonstantin Belousov prev_offset += DEV_BSIZE; 144505877a85SKonstantin Belousov } 144605877a85SKonstantin Belousov } 144705877a85SKonstantin Belousov if (in_hole) 144805877a85SKonstantin Belousov goto write_done; 144905877a85SKonstantin Belousov 145005877a85SKonstantin Belousov /* Find longest run of dirty blocks. */ 145105877a85SKonstantin Belousov for (next_offset = prev_offset; next_offset < maxblksz;) { 145205877a85SKonstantin Belousov m = ma[OFF_TO_IDX(next_offset - poffset)]; 145305877a85SKonstantin Belousov for (i = vn_off2bidx(next_offset); 145405877a85SKonstantin Belousov i < sizeof(vm_page_bits_t) * NBBY && 145505877a85SKonstantin Belousov next_offset < maxblksz; i++) { 145605877a85SKonstantin Belousov if (!vn_dirty_blk(m, next_offset)) 145705877a85SKonstantin Belousov goto start_write; 145805877a85SKonstantin Belousov next_offset += DEV_BSIZE; 145905877a85SKonstantin Belousov } 146005877a85SKonstantin Belousov } 146105877a85SKonstantin Belousov start_write: 146205877a85SKonstantin Belousov if (next_offset > poffset + maxsize) 146305877a85SKonstantin Belousov next_offset = poffset + maxsize; 146405877a85SKonstantin Belousov 146505877a85SKonstantin Belousov /* 146605877a85SKonstantin Belousov * Getting here requires finding a dirty block in the 146705877a85SKonstantin Belousov * 'skip clean blocks' loop. 146805877a85SKonstantin Belousov */ 146905877a85SKonstantin Belousov MPASS(prev_offset < next_offset); 147005877a85SKonstantin Belousov 147105877a85SKonstantin Belousov aiov.iov_base = NULL; 147205877a85SKonstantin Belousov auio.uio_iovcnt = 1; 147305877a85SKonstantin Belousov auio.uio_offset = prev_offset; 147405877a85SKonstantin Belousov prev_resid = auio.uio_resid = aiov.iov_len = next_offset - 147505877a85SKonstantin Belousov prev_offset; 147605877a85SKonstantin Belousov error = VOP_WRITE(vp, &auio, 147705877a85SKonstantin Belousov vnode_pager_putpages_ioflags(flags), curthread->td_ucred); 147805877a85SKonstantin Belousov 147905877a85SKonstantin Belousov wrsz = prev_resid - auio.uio_resid; 148005877a85SKonstantin Belousov if (wrsz == 0) { 148105877a85SKonstantin Belousov if (ppsratecheck(&lastfail, &curfail, 1) != 0) { 148205877a85SKonstantin Belousov vn_printf(vp, "vnode_pager_putpages: " 148305877a85SKonstantin Belousov "zero-length write at %ju resid %zd\n", 148405877a85SKonstantin Belousov auio.uio_offset, auio.uio_resid); 148505877a85SKonstantin Belousov } 148605877a85SKonstantin Belousov break; 148705877a85SKonstantin Belousov } 148805877a85SKonstantin Belousov 148905877a85SKonstantin Belousov /* Adjust the starting offset for next iteration. */ 149005877a85SKonstantin Belousov prev_offset += wrsz; 149105877a85SKonstantin Belousov MPASS(auio.uio_offset == prev_offset); 1492f6b04d2bSDavid Greenman 14933dbb0ca6SKonstantin Belousov ppscheck = 0; 149405877a85SKonstantin Belousov if (error != 0 && (ppscheck = ppsratecheck(&lastfail, 149505877a85SKonstantin Belousov &curfail, 1)) != 0) 149605877a85SKonstantin Belousov vn_printf(vp, "vnode_pager_putpages: I/O error %d\n", 149705877a85SKonstantin Belousov error); 1498e6c44f65SKonstantin Belousov if (auio.uio_resid != 0 && (ppscheck != 0 || 1499e6c44f65SKonstantin Belousov ppsratecheck(&lastfail, &curfail, 1) != 0)) 150005877a85SKonstantin Belousov vn_printf(vp, "vnode_pager_putpages: residual I/O %zd " 150105877a85SKonstantin Belousov "at %ju\n", auio.uio_resid, 150205877a85SKonstantin Belousov (uintmax_t)ma[0]->pindex); 150305877a85SKonstantin Belousov if (error != 0 || auio.uio_resid != 0) 150405877a85SKonstantin Belousov break; 150505877a85SKonstantin Belousov } 150605877a85SKonstantin Belousov write_done: 150705877a85SKonstantin Belousov /* Mark completely processed pages. */ 150805877a85SKonstantin Belousov for (i = 0; i < OFF_TO_IDX(prev_offset - poffset); i++) 150926f9a767SRodney W. Grimes rtvals[i] = VM_PAGER_OK; 151005877a85SKonstantin Belousov /* Mark partial EOF page. */ 151105877a85SKonstantin Belousov if (prev_offset == poffset + maxsize && (prev_offset & PAGE_MASK) != 0) 151205877a85SKonstantin Belousov rtvals[i++] = VM_PAGER_OK; 151305877a85SKonstantin Belousov /* Unwritten pages in range, free bonus if the page is clean. */ 151405877a85SKonstantin Belousov for (; i < ncount; i++) 151505877a85SKonstantin Belousov rtvals[i] = ma[i]->dirty == 0 ? VM_PAGER_OK : VM_PAGER_ERROR; 151605877a85SKonstantin Belousov VM_CNT_ADD(v_vnodepgsout, i); 151705877a85SKonstantin Belousov VM_CNT_INC(v_vnodeout); 1518e6c44f65SKonstantin Belousov return (rtvals[0]); 151926f9a767SRodney W. Grimes } 1520031ec8c1SKonstantin Belousov 152165b9599aSKonstantin Belousov int 152265b9599aSKonstantin Belousov vnode_pager_putpages_ioflags(int pager_flags) 152365b9599aSKonstantin Belousov { 152465b9599aSKonstantin Belousov int ioflags; 152565b9599aSKonstantin Belousov 152665b9599aSKonstantin Belousov /* 152765b9599aSKonstantin Belousov * Pageouts are already clustered, use IO_ASYNC to force a 152865b9599aSKonstantin Belousov * bawrite() rather then a bdwrite() to prevent paging I/O 152965b9599aSKonstantin Belousov * from saturating the buffer cache. Dummy-up the sequential 153065b9599aSKonstantin Belousov * heuristic to cause large ranges to cluster. If neither 153165b9599aSKonstantin Belousov * IO_SYNC or IO_ASYNC is set, the system decides how to 153265b9599aSKonstantin Belousov * cluster. 153365b9599aSKonstantin Belousov */ 153465b9599aSKonstantin Belousov ioflags = IO_VMIO; 153565b9599aSKonstantin Belousov if ((pager_flags & (VM_PAGER_PUT_SYNC | VM_PAGER_PUT_INVAL)) != 0) 153665b9599aSKonstantin Belousov ioflags |= IO_SYNC; 153765b9599aSKonstantin Belousov else if ((pager_flags & VM_PAGER_CLUSTER_OK) == 0) 153865b9599aSKonstantin Belousov ioflags |= IO_ASYNC; 153965b9599aSKonstantin Belousov ioflags |= (pager_flags & VM_PAGER_PUT_INVAL) != 0 ? IO_INVAL: 0; 154065b9599aSKonstantin Belousov ioflags |= (pager_flags & VM_PAGER_PUT_NOREUSE) != 0 ? IO_NOREUSE : 0; 154165b9599aSKonstantin Belousov ioflags |= IO_SEQMAX << IO_SEQSHIFT; 154265b9599aSKonstantin Belousov return (ioflags); 154365b9599aSKonstantin Belousov } 154465b9599aSKonstantin Belousov 1545555b7bb4SKonstantin Belousov /* 1546555b7bb4SKonstantin Belousov * vnode_pager_undirty_pages(). 1547555b7bb4SKonstantin Belousov * 1548555b7bb4SKonstantin Belousov * A helper to mark pages as clean after pageout that was possibly 1549555b7bb4SKonstantin Belousov * done with a short write. The lpos argument specifies the page run 1550555b7bb4SKonstantin Belousov * length in bytes, and the written argument specifies how many bytes 1551555b7bb4SKonstantin Belousov * were actually written. eof is the offset past the last valid byte 1552555b7bb4SKonstantin Belousov * in the vnode using the absolute file position of the first byte in 1553555b7bb4SKonstantin Belousov * the run as the base from which it is computed. 1554555b7bb4SKonstantin Belousov */ 1555031ec8c1SKonstantin Belousov void 1556555b7bb4SKonstantin Belousov vnode_pager_undirty_pages(vm_page_t *ma, int *rtvals, int written, off_t eof, 1557555b7bb4SKonstantin Belousov int lpos) 1558031ec8c1SKonstantin Belousov { 1559555b7bb4SKonstantin Belousov int i, pos, pos_devb; 1560031ec8c1SKonstantin Belousov 1561555b7bb4SKonstantin Belousov if (written == 0 && eof >= lpos) 15629d17da3bSKonstantin Belousov return; 1563031ec8c1SKonstantin Belousov for (i = 0, pos = 0; pos < written; i++, pos += PAGE_SIZE) { 1564031ec8c1SKonstantin Belousov if (pos < trunc_page(written)) { 1565031ec8c1SKonstantin Belousov rtvals[i] = VM_PAGER_OK; 1566031ec8c1SKonstantin Belousov vm_page_undirty(ma[i]); 1567031ec8c1SKonstantin Belousov } else { 1568031ec8c1SKonstantin Belousov /* Partially written page. */ 1569031ec8c1SKonstantin Belousov rtvals[i] = VM_PAGER_AGAIN; 1570031ec8c1SKonstantin Belousov vm_page_clear_dirty(ma[i], 0, written & PAGE_MASK); 1571031ec8c1SKonstantin Belousov } 1572031ec8c1SKonstantin Belousov } 1573555b7bb4SKonstantin Belousov if (eof >= lpos) /* avoid truncation */ 15747f935055SJeff Roberson return; 1575555b7bb4SKonstantin Belousov for (pos = eof, i = OFF_TO_IDX(trunc_page(pos)); pos < lpos; i++) { 1576555b7bb4SKonstantin Belousov if (pos != trunc_page(pos)) { 1577555b7bb4SKonstantin Belousov /* 1578555b7bb4SKonstantin Belousov * The page contains the last valid byte in 1579555b7bb4SKonstantin Belousov * the vnode, mark the rest of the page as 1580555b7bb4SKonstantin Belousov * clean, potentially making the whole page 1581555b7bb4SKonstantin Belousov * clean. 1582555b7bb4SKonstantin Belousov */ 1583555b7bb4SKonstantin Belousov pos_devb = roundup2(pos & PAGE_MASK, DEV_BSIZE); 1584555b7bb4SKonstantin Belousov vm_page_clear_dirty(ma[i], pos_devb, PAGE_SIZE - 1585555b7bb4SKonstantin Belousov pos_devb); 1586555b7bb4SKonstantin Belousov 1587555b7bb4SKonstantin Belousov /* 1588555b7bb4SKonstantin Belousov * If the page was cleaned, report the pageout 1589555b7bb4SKonstantin Belousov * on it as successful. msync() no longer 1590555b7bb4SKonstantin Belousov * needs to write out the page, endlessly 1591555b7bb4SKonstantin Belousov * creating write requests and dirty buffers. 1592555b7bb4SKonstantin Belousov */ 1593555b7bb4SKonstantin Belousov if (ma[i]->dirty == 0) 1594555b7bb4SKonstantin Belousov rtvals[i] = VM_PAGER_OK; 1595555b7bb4SKonstantin Belousov 1596555b7bb4SKonstantin Belousov pos = round_page(pos); 1597555b7bb4SKonstantin Belousov } else { 1598555b7bb4SKonstantin Belousov /* vm_pageout_flush() clears dirty */ 1599555b7bb4SKonstantin Belousov rtvals[i] = VM_PAGER_BAD; 1600555b7bb4SKonstantin Belousov pos += PAGE_SIZE; 1601555b7bb4SKonstantin Belousov } 1602555b7bb4SKonstantin Belousov } 1603031ec8c1SKonstantin Belousov } 160484110e7eSKonstantin Belousov 1605fe7bcbafSKyle Evans static void 160684110e7eSKonstantin Belousov vnode_pager_update_writecount(vm_object_t object, vm_offset_t start, 160784110e7eSKonstantin Belousov vm_offset_t end) 160884110e7eSKonstantin Belousov { 160984110e7eSKonstantin Belousov struct vnode *vp; 161084110e7eSKonstantin Belousov vm_ooffset_t old_wm; 161184110e7eSKonstantin Belousov 161289f6b863SAttilio Rao VM_OBJECT_WLOCK(object); 161384110e7eSKonstantin Belousov if (object->type != OBJT_VNODE) { 161489f6b863SAttilio Rao VM_OBJECT_WUNLOCK(object); 161584110e7eSKonstantin Belousov return; 161684110e7eSKonstantin Belousov } 161784110e7eSKonstantin Belousov old_wm = object->un_pager.vnp.writemappings; 161884110e7eSKonstantin Belousov object->un_pager.vnp.writemappings += (vm_ooffset_t)end - start; 161984110e7eSKonstantin Belousov vp = object->handle; 162084110e7eSKonstantin Belousov if (old_wm == 0 && object->un_pager.vnp.writemappings != 0) { 162178022527SKonstantin Belousov ASSERT_VOP_LOCKED(vp, "v_writecount inc"); 162278022527SKonstantin Belousov VOP_ADD_WRITECOUNT_CHECKED(vp, 1); 1623b47f6241SJohn Baldwin CTR3(KTR_VFS, "%s: vp %p v_writecount increased to %d", 1624b47f6241SJohn Baldwin __func__, vp, vp->v_writecount); 162584110e7eSKonstantin Belousov } else if (old_wm != 0 && object->un_pager.vnp.writemappings == 0) { 162678022527SKonstantin Belousov ASSERT_VOP_LOCKED(vp, "v_writecount dec"); 162778022527SKonstantin Belousov VOP_ADD_WRITECOUNT_CHECKED(vp, -1); 1628b47f6241SJohn Baldwin CTR3(KTR_VFS, "%s: vp %p v_writecount decreased to %d", 1629b47f6241SJohn Baldwin __func__, vp, vp->v_writecount); 163084110e7eSKonstantin Belousov } 163189f6b863SAttilio Rao VM_OBJECT_WUNLOCK(object); 163284110e7eSKonstantin Belousov } 163384110e7eSKonstantin Belousov 1634fe7bcbafSKyle Evans static void 163584110e7eSKonstantin Belousov vnode_pager_release_writecount(vm_object_t object, vm_offset_t start, 163684110e7eSKonstantin Belousov vm_offset_t end) 163784110e7eSKonstantin Belousov { 163884110e7eSKonstantin Belousov struct vnode *vp; 163984110e7eSKonstantin Belousov struct mount *mp; 164084110e7eSKonstantin Belousov vm_offset_t inc; 164184110e7eSKonstantin Belousov 164289f6b863SAttilio Rao VM_OBJECT_WLOCK(object); 164384110e7eSKonstantin Belousov 164484110e7eSKonstantin Belousov /* 164584110e7eSKonstantin Belousov * First, recheck the object type to account for the race when 164684110e7eSKonstantin Belousov * the vnode is reclaimed. 164784110e7eSKonstantin Belousov */ 164884110e7eSKonstantin Belousov if (object->type != OBJT_VNODE) { 164989f6b863SAttilio Rao VM_OBJECT_WUNLOCK(object); 165084110e7eSKonstantin Belousov return; 165184110e7eSKonstantin Belousov } 165284110e7eSKonstantin Belousov 165384110e7eSKonstantin Belousov /* 165484110e7eSKonstantin Belousov * Optimize for the case when writemappings is not going to 165584110e7eSKonstantin Belousov * zero. 165684110e7eSKonstantin Belousov */ 165784110e7eSKonstantin Belousov inc = end - start; 165884110e7eSKonstantin Belousov if (object->un_pager.vnp.writemappings != inc) { 165984110e7eSKonstantin Belousov object->un_pager.vnp.writemappings -= inc; 166089f6b863SAttilio Rao VM_OBJECT_WUNLOCK(object); 166184110e7eSKonstantin Belousov return; 166284110e7eSKonstantin Belousov } 166384110e7eSKonstantin Belousov 166484110e7eSKonstantin Belousov vp = object->handle; 166584110e7eSKonstantin Belousov vhold(vp); 166689f6b863SAttilio Rao VM_OBJECT_WUNLOCK(object); 166784110e7eSKonstantin Belousov mp = NULL; 166884110e7eSKonstantin Belousov vn_start_write(vp, &mp, V_WAIT); 166978022527SKonstantin Belousov vn_lock(vp, LK_SHARED | LK_RETRY); 167084110e7eSKonstantin Belousov 167184110e7eSKonstantin Belousov /* 167284110e7eSKonstantin Belousov * Decrement the object's writemappings, by swapping the start 167384110e7eSKonstantin Belousov * and end arguments for vnode_pager_update_writecount(). If 167484110e7eSKonstantin Belousov * there was not a race with vnode reclaimation, then the 167584110e7eSKonstantin Belousov * vnode's v_writecount is decremented. 167684110e7eSKonstantin Belousov */ 167784110e7eSKonstantin Belousov vnode_pager_update_writecount(object, end, start); 1678b249ce48SMateusz Guzik VOP_UNLOCK(vp); 167984110e7eSKonstantin Belousov vdrop(vp); 168084110e7eSKonstantin Belousov if (mp != NULL) 168184110e7eSKonstantin Belousov vn_finished_write(mp); 168284110e7eSKonstantin Belousov } 1683192112b7SKonstantin Belousov 1684192112b7SKonstantin Belousov static void 1685192112b7SKonstantin Belousov vnode_pager_getvp(vm_object_t object, struct vnode **vpp, bool *vp_heldp) 1686192112b7SKonstantin Belousov { 1687192112b7SKonstantin Belousov *vpp = object->handle; 1688192112b7SKonstantin Belousov } 1689