160727d8bSWarner Losh /*- 2df57947fSPedro F. Giffuni * SPDX-License-Identifier: BSD-4-Clause 3df57947fSPedro F. Giffuni * 4df8bae1dSRodney W. Grimes * Copyright (c) 1990 University of Utah. 526f9a767SRodney W. Grimes * Copyright (c) 1991 The Regents of the University of California. 626f9a767SRodney W. Grimes * All rights reserved. 726f9a767SRodney W. Grimes * Copyright (c) 1993, 1994 John S. Dyson 824a1cce3SDavid Greenman * Copyright (c) 1995, David Greenman 9df8bae1dSRodney W. Grimes * 10df8bae1dSRodney W. Grimes * This code is derived from software contributed to Berkeley by 11df8bae1dSRodney W. Grimes * the Systems Programming Group of the University of Utah Computer 12df8bae1dSRodney W. Grimes * Science Department. 13df8bae1dSRodney W. Grimes * 14df8bae1dSRodney W. Grimes * Redistribution and use in source and binary forms, with or without 15df8bae1dSRodney W. Grimes * modification, are permitted provided that the following conditions 16df8bae1dSRodney W. Grimes * are met: 17df8bae1dSRodney W. Grimes * 1. Redistributions of source code must retain the above copyright 18df8bae1dSRodney W. Grimes * notice, this list of conditions and the following disclaimer. 19df8bae1dSRodney W. Grimes * 2. Redistributions in binary form must reproduce the above copyright 20df8bae1dSRodney W. Grimes * notice, this list of conditions and the following disclaimer in the 21df8bae1dSRodney W. Grimes * documentation and/or other materials provided with the distribution. 22df8bae1dSRodney W. Grimes * 3. All advertising materials mentioning features or use of this software 235929bcfaSPhilippe Charnier * must display the following acknowledgement: 24df8bae1dSRodney W. Grimes * This product includes software developed by the University of 25df8bae1dSRodney W. Grimes * California, Berkeley and its contributors. 26df8bae1dSRodney W. Grimes * 4. Neither the name of the University nor the names of its contributors 27df8bae1dSRodney W. Grimes * may be used to endorse or promote products derived from this software 28df8bae1dSRodney W. Grimes * without specific prior written permission. 29df8bae1dSRodney W. Grimes * 30df8bae1dSRodney W. Grimes * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 31df8bae1dSRodney W. Grimes * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 32df8bae1dSRodney W. Grimes * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 33df8bae1dSRodney W. Grimes * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 34df8bae1dSRodney W. Grimes * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 35df8bae1dSRodney W. Grimes * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 36df8bae1dSRodney W. Grimes * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 37df8bae1dSRodney W. Grimes * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 38df8bae1dSRodney W. Grimes * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 39df8bae1dSRodney W. Grimes * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 40df8bae1dSRodney W. Grimes * SUCH DAMAGE. 41df8bae1dSRodney W. Grimes * 4226f9a767SRodney W. Grimes * from: @(#)vnode_pager.c 7.5 (Berkeley) 4/20/91 43df8bae1dSRodney W. Grimes */ 44df8bae1dSRodney W. Grimes 45df8bae1dSRodney W. Grimes /* 46df8bae1dSRodney W. Grimes * Page to/from files (vnodes). 47df8bae1dSRodney W. Grimes */ 48df8bae1dSRodney W. Grimes 4926f9a767SRodney W. Grimes /* 5026f9a767SRodney W. Grimes * TODO: 5124a1cce3SDavid Greenman * Implement VOP_GETPAGES/PUTPAGES interface for filesystems. Will 52f6b04d2bSDavid Greenman * greatly re-simplify the vnode_pager. 5326f9a767SRodney W. Grimes */ 5426f9a767SRodney W. Grimes 55874651b1SDavid E. O'Brien #include <sys/cdefs.h> 56874651b1SDavid E. O'Brien __FBSDID("$FreeBSD$"); 57874651b1SDavid E. O'Brien 583d653db0SAlan Cox #include "opt_vm.h" 593d653db0SAlan Cox 60df8bae1dSRodney W. Grimes #include <sys/param.h> 61756a5412SGleb Smirnoff #include <sys/kernel.h> 62df8bae1dSRodney W. Grimes #include <sys/systm.h> 63e5818a53SJeff Roberson #include <sys/sysctl.h> 64df8bae1dSRodney W. Grimes #include <sys/proc.h> 65df8bae1dSRodney W. Grimes #include <sys/vnode.h> 66df8bae1dSRodney W. Grimes #include <sys/mount.h> 679626b608SPoul-Henning Kamp #include <sys/bio.h> 6824a1cce3SDavid Greenman #include <sys/buf.h> 69efeaf95aSDavid Greenman #include <sys/vmmeter.h> 70daec9284SConrad Meyer #include <sys/ktr.h> 71d07a6d3fSPoul-Henning Kamp #include <sys/limits.h> 7224579ca1SMatthew Dillon #include <sys/conf.h> 7351df5321SJeff Roberson #include <sys/refcount.h> 7489f6b863SAttilio Rao #include <sys/rwlock.h> 759e0ddbd0SAlan Cox #include <sys/sf_buf.h> 76e5818a53SJeff Roberson #include <sys/domainset.h> 77df8bae1dSRodney W. Grimes 784f12e0acSSuleiman Souhlal #include <machine/atomic.h> 794f12e0acSSuleiman Souhlal 80df8bae1dSRodney W. Grimes #include <vm/vm.h> 811c771f92SKonstantin Belousov #include <vm/vm_param.h> 82efeaf95aSDavid Greenman #include <vm/vm_object.h> 83df8bae1dSRodney W. Grimes #include <vm/vm_page.h> 8424a1cce3SDavid Greenman #include <vm/vm_pager.h> 851efb74fbSJohn Dyson #include <vm/vm_map.h> 86df8bae1dSRodney W. Grimes #include <vm/vnode_pager.h> 87efeaf95aSDavid Greenman #include <vm/vm_extern.h> 88756a5412SGleb Smirnoff #include <vm/uma.h> 89df8bae1dSRodney W. Grimes 90bff76343SAlan Cox static int vnode_pager_addr(struct vnode *vp, vm_ooffset_t address, 91bff76343SAlan Cox daddr_t *rtaddress, int *run); 9211caded3SAlfred Perlstein static int vnode_pager_input_smlfs(vm_object_t object, vm_page_t m); 9311caded3SAlfred Perlstein static int vnode_pager_input_old(vm_object_t object, vm_page_t m); 9411caded3SAlfred Perlstein static void vnode_pager_dealloc(vm_object_t); 95b0cd2017SGleb Smirnoff static int vnode_pager_getpages(vm_object_t, vm_page_t *, int, int *, int *); 96b0cd2017SGleb Smirnoff static int vnode_pager_getpages_async(vm_object_t, vm_page_t *, int, int *, 97b0cd2017SGleb Smirnoff int *, vop_getpages_iodone_t, void *); 9833cad9e9SKonstantin Belousov static void vnode_pager_putpages(vm_object_t, vm_page_t *, int, int, int *); 9911caded3SAlfred Perlstein static boolean_t vnode_pager_haspage(vm_object_t, vm_pindex_t, int *, int *); 1003364c323SKonstantin Belousov static vm_object_t vnode_pager_alloc(void *, vm_ooffset_t, vm_prot_t, 1013364c323SKonstantin Belousov vm_ooffset_t, struct ucred *cred); 10290effb23SGleb Smirnoff static int vnode_pager_generic_getpages_done(struct buf *); 10390effb23SGleb Smirnoff static void vnode_pager_generic_getpages_done_async(struct buf *); 104fe7bcbafSKyle Evans static void vnode_pager_update_writecount(vm_object_t, vm_offset_t, 105fe7bcbafSKyle Evans vm_offset_t); 106fe7bcbafSKyle Evans static void vnode_pager_release_writecount(vm_object_t, vm_offset_t, 107fe7bcbafSKyle Evans vm_offset_t); 1080b8253a7SBruce Evans 109df8bae1dSRodney W. Grimes struct pagerops vnodepagerops = { 1104e658600SPoul-Henning Kamp .pgo_alloc = vnode_pager_alloc, 1114e658600SPoul-Henning Kamp .pgo_dealloc = vnode_pager_dealloc, 1124e658600SPoul-Henning Kamp .pgo_getpages = vnode_pager_getpages, 11390effb23SGleb Smirnoff .pgo_getpages_async = vnode_pager_getpages_async, 1144e658600SPoul-Henning Kamp .pgo_putpages = vnode_pager_putpages, 1154e658600SPoul-Henning Kamp .pgo_haspage = vnode_pager_haspage, 116fe7bcbafSKyle Evans .pgo_update_writecount = vnode_pager_update_writecount, 117fe7bcbafSKyle Evans .pgo_release_writecount = vnode_pager_release_writecount, 118df8bae1dSRodney W. Grimes }; 119df8bae1dSRodney W. Grimes 120e5818a53SJeff Roberson static struct domainset *vnode_domainset = NULL; 121e5818a53SJeff Roberson 122a314aba8SMateusz Guzik SYSCTL_PROC(_debug, OID_AUTO, vnode_domainset, 123a314aba8SMateusz Guzik CTLTYPE_STRING | CTLFLAG_MPSAFE | CTLFLAG_RW, &vnode_domainset, 0, 124a314aba8SMateusz Guzik sysctl_handle_domainset, "A", "Default vnode NUMA policy"); 125e5818a53SJeff Roberson 12666fb0b1aSGleb Smirnoff static int nvnpbufs; 12766fb0b1aSGleb Smirnoff SYSCTL_INT(_vm, OID_AUTO, vnode_pbufs, CTLFLAG_RDTUN | CTLFLAG_NOFETCH, 12866fb0b1aSGleb Smirnoff &nvnpbufs, 0, "number of physical buffers allocated for vnode pager"); 12966fb0b1aSGleb Smirnoff 130756a5412SGleb Smirnoff static uma_zone_t vnode_pbuf_zone; 131756a5412SGleb Smirnoff 132756a5412SGleb Smirnoff static void 133756a5412SGleb Smirnoff vnode_pager_init(void *dummy) 134756a5412SGleb Smirnoff { 135756a5412SGleb Smirnoff 13666fb0b1aSGleb Smirnoff #ifdef __LP64__ 13766fb0b1aSGleb Smirnoff nvnpbufs = nswbuf * 2; 13866fb0b1aSGleb Smirnoff #else 13966fb0b1aSGleb Smirnoff nvnpbufs = nswbuf / 2; 14066fb0b1aSGleb Smirnoff #endif 14166fb0b1aSGleb Smirnoff TUNABLE_INT_FETCH("vm.vnode_pbufs", &nvnpbufs); 14266fb0b1aSGleb Smirnoff vnode_pbuf_zone = pbuf_zsecond_create("vnpbuf", nvnpbufs); 143756a5412SGleb Smirnoff } 144756a5412SGleb Smirnoff SYSINIT(vnode_pager, SI_SUB_CPU, SI_ORDER_ANY, vnode_pager_init, NULL); 145756a5412SGleb Smirnoff 146d07a6d3fSPoul-Henning Kamp /* Create the VM system backing object for this vnode */ 147d07a6d3fSPoul-Henning Kamp int 148731959b1SYaroslav Tykhiy vnode_create_vobject(struct vnode *vp, off_t isize, struct thread *td) 149d07a6d3fSPoul-Henning Kamp { 150d07a6d3fSPoul-Henning Kamp vm_object_t object; 151d07a6d3fSPoul-Henning Kamp vm_ooffset_t size = isize; 152d07a6d3fSPoul-Henning Kamp struct vattr va; 153a67d5408SJeff Roberson bool last; 154d07a6d3fSPoul-Henning Kamp 155d07a6d3fSPoul-Henning Kamp if (!vn_isdisk(vp, NULL) && vn_canvmio(vp) == FALSE) 156d07a6d3fSPoul-Henning Kamp return (0); 157d07a6d3fSPoul-Henning Kamp 1586470c8d3SKonstantin Belousov object = vp->v_object; 1596470c8d3SKonstantin Belousov if (object != NULL) 160d07a6d3fSPoul-Henning Kamp return (0); 161d07a6d3fSPoul-Henning Kamp 162d07a6d3fSPoul-Henning Kamp if (size == 0) { 163d07a6d3fSPoul-Henning Kamp if (vn_isdisk(vp, NULL)) { 164d07a6d3fSPoul-Henning Kamp size = IDX_TO_OFF(INT_MAX); 165d07a6d3fSPoul-Henning Kamp } else { 1660359a12eSAttilio Rao if (VOP_GETATTR(vp, &va, td->td_ucred)) 167d07a6d3fSPoul-Henning Kamp return (0); 168d07a6d3fSPoul-Henning Kamp size = va.va_size; 169d07a6d3fSPoul-Henning Kamp } 170d07a6d3fSPoul-Henning Kamp } 171d07a6d3fSPoul-Henning Kamp 1723364c323SKonstantin Belousov object = vnode_pager_alloc(vp, size, 0, 0, td->td_ucred); 173d07a6d3fSPoul-Henning Kamp /* 174d07a6d3fSPoul-Henning Kamp * Dereference the reference we just created. This assumes 175a67d5408SJeff Roberson * that the object is associated with the vp. We still have 176a67d5408SJeff Roberson * to serialize with vnode_pager_dealloc() for the last 177a67d5408SJeff Roberson * potential reference. 178d07a6d3fSPoul-Henning Kamp */ 17951df5321SJeff Roberson VM_OBJECT_RLOCK(object); 180a67d5408SJeff Roberson last = refcount_release(&object->ref_count); 18151df5321SJeff Roberson VM_OBJECT_RUNLOCK(object); 182a67d5408SJeff Roberson if (last) 183d07a6d3fSPoul-Henning Kamp vrele(vp); 184d07a6d3fSPoul-Henning Kamp 185d07a6d3fSPoul-Henning Kamp KASSERT(vp->v_object != NULL, ("vnode_create_vobject: NULL object")); 186d07a6d3fSPoul-Henning Kamp 187d07a6d3fSPoul-Henning Kamp return (0); 188d07a6d3fSPoul-Henning Kamp } 189d07a6d3fSPoul-Henning Kamp 1907146d6cbSPoul-Henning Kamp void 1917146d6cbSPoul-Henning Kamp vnode_destroy_vobject(struct vnode *vp) 1927146d6cbSPoul-Henning Kamp { 1937146d6cbSPoul-Henning Kamp struct vm_object *obj; 1947146d6cbSPoul-Henning Kamp 1957146d6cbSPoul-Henning Kamp obj = vp->v_object; 1966470c8d3SKonstantin Belousov if (obj == NULL || obj->handle != vp) 1977146d6cbSPoul-Henning Kamp return; 19857fd3d55SPawel Jakub Dawidek ASSERT_VOP_ELOCKED(vp, "vnode_destroy_vobject"); 19989f6b863SAttilio Rao VM_OBJECT_WLOCK(obj); 2006470c8d3SKonstantin Belousov MPASS(obj->type == OBJT_VNODE); 2012a339d9eSKonstantin Belousov umtx_shm_object_terminated(obj); 2027146d6cbSPoul-Henning Kamp if (obj->ref_count == 0) { 2039c83ff2dSJeff Roberson KASSERT((obj->flags & OBJ_DEAD) == 0, 2049c83ff2dSJeff Roberson ("vnode_destroy_vobject: Terminating dead object")); 205783a68aaSKonstantin Belousov vm_object_set_flag(obj, OBJ_DEAD); 206783a68aaSKonstantin Belousov 207783a68aaSKonstantin Belousov /* 208783a68aaSKonstantin Belousov * Clean pages and flush buffers. 209783a68aaSKonstantin Belousov */ 210783a68aaSKonstantin Belousov vm_object_page_clean(obj, 0, 0, OBJPC_SYNC); 211783a68aaSKonstantin Belousov VM_OBJECT_WUNLOCK(obj); 212783a68aaSKonstantin Belousov 213783a68aaSKonstantin Belousov vinvalbuf(vp, V_SAVE, 0, 0); 214783a68aaSKonstantin Belousov 215783a68aaSKonstantin Belousov BO_LOCK(&vp->v_bufobj); 216783a68aaSKonstantin Belousov vp->v_bufobj.bo_flag |= BO_DEAD; 217783a68aaSKonstantin Belousov BO_UNLOCK(&vp->v_bufobj); 218783a68aaSKonstantin Belousov 219783a68aaSKonstantin Belousov VM_OBJECT_WLOCK(obj); 2207146d6cbSPoul-Henning Kamp vm_object_terminate(obj); 22190880a1bSKonstantin Belousov } else { 22290880a1bSKonstantin Belousov /* 2237146d6cbSPoul-Henning Kamp * Woe to the process that tries to page now :-). 2247146d6cbSPoul-Henning Kamp */ 2257146d6cbSPoul-Henning Kamp vm_pager_deallocate(obj); 22689f6b863SAttilio Rao VM_OBJECT_WUNLOCK(obj); 2277146d6cbSPoul-Henning Kamp } 22890880a1bSKonstantin Belousov KASSERT(vp->v_object == NULL, ("vp %p obj %p", vp, vp->v_object)); 2297146d6cbSPoul-Henning Kamp } 2307146d6cbSPoul-Henning Kamp 2317146d6cbSPoul-Henning Kamp 232df8bae1dSRodney W. Grimes /* 233df8bae1dSRodney W. Grimes * Allocate (or lookup) pager for a vnode. 234df8bae1dSRodney W. Grimes * Handle is a vnode pointer. 235df8bae1dSRodney W. Grimes */ 23624a1cce3SDavid Greenman vm_object_t 2376cde7a16SDavid Greenman vnode_pager_alloc(void *handle, vm_ooffset_t size, vm_prot_t prot, 2383364c323SKonstantin Belousov vm_ooffset_t offset, struct ucred *cred) 239df8bae1dSRodney W. Grimes { 24006cb7259SDavid Greenman vm_object_t object; 241df8bae1dSRodney W. Grimes struct vnode *vp; 242df8bae1dSRodney W. Grimes 243df8bae1dSRodney W. Grimes /* 244df8bae1dSRodney W. Grimes * Pageout to vnode, no can do yet. 245df8bae1dSRodney W. Grimes */ 246df8bae1dSRodney W. Grimes if (handle == NULL) 247df8bae1dSRodney W. Grimes return (NULL); 248df8bae1dSRodney W. Grimes 249df8bae1dSRodney W. Grimes vp = (struct vnode *)handle; 2506470c8d3SKonstantin Belousov ASSERT_VOP_LOCKED(vp, "vnode_pager_alloc"); 251*f1fa1ba3SMateusz Guzik VNPASS(vp->v_usecount > 0, vp); 2526470c8d3SKonstantin Belousov retry: 2536470c8d3SKonstantin Belousov object = vp->v_object; 2542be70f79SJohn Dyson 25524a1cce3SDavid Greenman if (object == NULL) { 256df8bae1dSRodney W. Grimes /* 2572ac78f0eSStephan Uphoff * Add an object of the appropriate size 258df8bae1dSRodney W. Grimes */ 2596470c8d3SKonstantin Belousov object = vm_object_allocate(OBJT_VNODE, 2606470c8d3SKonstantin Belousov OFF_TO_IDX(round_page(size))); 261bbc0ec52SDavid Greenman 2626cde7a16SDavid Greenman object->un_pager.vnp.vnp_size = size; 26384110e7eSKonstantin Belousov object->un_pager.vnp.writemappings = 0; 264e5818a53SJeff Roberson object->domain.dr_policy = vnode_domainset; 26524a1cce3SDavid Greenman object->handle = handle; 266208b81bbSKonstantin Belousov if ((vp->v_vflag & VV_VMSIZEVNLOCK) != 0) { 267208b81bbSKonstantin Belousov VM_OBJECT_WLOCK(object); 268208b81bbSKonstantin Belousov vm_object_set_flag(object, OBJ_SIZEVNLOCK); 269208b81bbSKonstantin Belousov VM_OBJECT_WUNLOCK(object); 270208b81bbSKonstantin Belousov } 27111be8415SStephan Uphoff VI_LOCK(vp); 2722ac78f0eSStephan Uphoff if (vp->v_object != NULL) { 2732ac78f0eSStephan Uphoff /* 2746470c8d3SKonstantin Belousov * Object has been created while we were allocating. 2752ac78f0eSStephan Uphoff */ 27611be8415SStephan Uphoff VI_UNLOCK(vp); 2779cddade7SKonstantin Belousov VM_OBJECT_WLOCK(object); 2789cddade7SKonstantin Belousov KASSERT(object->ref_count == 1, 2799cddade7SKonstantin Belousov ("leaked ref %p %d", object, object->ref_count)); 2809cddade7SKonstantin Belousov object->type = OBJT_DEAD; 28151df5321SJeff Roberson refcount_init(&object->ref_count, 0); 2829cddade7SKonstantin Belousov VM_OBJECT_WUNLOCK(object); 2832ac78f0eSStephan Uphoff vm_object_destroy(object); 2842ac78f0eSStephan Uphoff goto retry; 285df8bae1dSRodney W. Grimes } 2862ac78f0eSStephan Uphoff vp->v_object = object; 28711be8415SStephan Uphoff VI_UNLOCK(vp); 288a67d5408SJeff Roberson vrefact(vp); 28911be8415SStephan Uphoff } else { 290a67d5408SJeff Roberson vm_object_reference(object); 2913d653db0SAlan Cox #if VM_NRESERVLEVEL > 0 292a67d5408SJeff Roberson if ((object->flags & OBJ_COLORED) == 0) { 293a67d5408SJeff Roberson VM_OBJECT_WLOCK(object); 2943d653db0SAlan Cox vm_object_color(object, 0); 29589f6b863SAttilio Rao VM_OBJECT_WUNLOCK(object); 29611be8415SStephan Uphoff } 297a67d5408SJeff Roberson #endif 298a67d5408SJeff Roberson } 29924a1cce3SDavid Greenman return (object); 300df8bae1dSRodney W. Grimes } 301df8bae1dSRodney W. Grimes 302658ad5ffSAlan Cox /* 303658ad5ffSAlan Cox * The object must be locked. 304658ad5ffSAlan Cox */ 305f708ef1bSPoul-Henning Kamp static void 3067ebba1f8SGleb Smirnoff vnode_pager_dealloc(vm_object_t object) 30724a1cce3SDavid Greenman { 308b9f180d1SKonstantin Belousov struct vnode *vp; 309b9f180d1SKonstantin Belousov int refs; 310df8bae1dSRodney W. Grimes 311b9f180d1SKonstantin Belousov vp = object->handle; 31224a1cce3SDavid Greenman if (vp == NULL) 31324a1cce3SDavid Greenman panic("vnode_pager_dealloc: pager already dealloced"); 31424a1cce3SDavid Greenman 31589f6b863SAttilio Rao VM_OBJECT_ASSERT_WLOCKED(object); 31666095752SJohn Dyson vm_object_pip_wait(object, "vnpdea"); 317b9f180d1SKonstantin Belousov refs = object->ref_count; 31824a1cce3SDavid Greenman 31924a1cce3SDavid Greenman object->handle = NULL; 32095461b45SJohn Dyson object->type = OBJT_DEAD; 32157fd3d55SPawel Jakub Dawidek ASSERT_VOP_ELOCKED(vp, "vnode_pager_dealloc"); 32284110e7eSKonstantin Belousov if (object->un_pager.vnp.writemappings > 0) { 32384110e7eSKonstantin Belousov object->un_pager.vnp.writemappings = 0; 32478022527SKonstantin Belousov VOP_ADD_WRITECOUNT_CHECKED(vp, -1); 325b47f6241SJohn Baldwin CTR3(KTR_VFS, "%s: vp %p v_writecount decreased to %d", 326b47f6241SJohn Baldwin __func__, vp, vp->v_writecount); 32784110e7eSKonstantin Belousov } 328aa2cabb9SDavid Greenman vp->v_object = NULL; 32978022527SKonstantin Belousov VI_LOCK(vp); 33078022527SKonstantin Belousov 33178022527SKonstantin Belousov /* 33278022527SKonstantin Belousov * vm_map_entry_set_vnode_text() cannot reach this vnode by 33378022527SKonstantin Belousov * following object->handle. Clear all text references now. 33478022527SKonstantin Belousov * This also clears the transient references from 33578022527SKonstantin Belousov * kern_execve(), which is fine because dead_vnodeops uses nop 33678022527SKonstantin Belousov * for VOP_UNSET_TEXT(). 33778022527SKonstantin Belousov */ 33878022527SKonstantin Belousov if (vp->v_writecount < 0) 33978022527SKonstantin Belousov vp->v_writecount = 0; 34078022527SKonstantin Belousov VI_UNLOCK(vp); 34189f6b863SAttilio Rao VM_OBJECT_WUNLOCK(object); 342a67d5408SJeff Roberson if (refs > 0) 343b9f180d1SKonstantin Belousov vunref(vp); 34489f6b863SAttilio Rao VM_OBJECT_WLOCK(object); 345df8bae1dSRodney W. Grimes } 34626f9a767SRodney W. Grimes 347f708ef1bSPoul-Henning Kamp static boolean_t 3487ebba1f8SGleb Smirnoff vnode_pager_haspage(vm_object_t object, vm_pindex_t pindex, int *before, 3497ebba1f8SGleb Smirnoff int *after) 350df8bae1dSRodney W. Grimes { 35124a1cce3SDavid Greenman struct vnode *vp = object->handle; 35298b0c789SPoul-Henning Kamp daddr_t bn; 3534153054aSJeff Roberson uintptr_t lockstate; 3543af76890SPoul-Henning Kamp int err; 355170db9c6SJohn Dyson daddr_t reqblock; 3562c4488fcSJohn Dyson int poff; 3572c4488fcSJohn Dyson int bsize; 358d63596ceSJohn Dyson int pagesperblock, blocksperpage; 359df8bae1dSRodney W. Grimes 3604153054aSJeff Roberson VM_OBJECT_ASSERT_LOCKED(object); 36124579ca1SMatthew Dillon /* 36224579ca1SMatthew Dillon * If no vp or vp is doomed or marked transparent to VM, we do not 36324579ca1SMatthew Dillon * have the page. 36424579ca1SMatthew Dillon */ 365abd80ddbSMateusz Guzik if (vp == NULL || VN_IS_DOOMED(vp)) 36647221757SJohn Dyson return FALSE; 367df8bae1dSRodney W. Grimes /* 368b73f64c4SJeff Roberson * If the offset is beyond end of file we do 3690d94caffSDavid Greenman * not have the page. 370df8bae1dSRodney W. Grimes */ 371b73f64c4SJeff Roberson if (IDX_TO_OFF(pindex) >= object->un_pager.vnp.vnp_size) 3724abc71c0SDavid Greenman return FALSE; 373df8bae1dSRodney W. Grimes 374eed2d59bSDavid Greenman bsize = vp->v_mount->mnt_stat.f_iosize; 375170db9c6SJohn Dyson pagesperblock = bsize / PAGE_SIZE; 376d63596ceSJohn Dyson blocksperpage = 0; 377d63596ceSJohn Dyson if (pagesperblock > 0) { 378a316d390SJohn Dyson reqblock = pindex / pagesperblock; 379d63596ceSJohn Dyson } else { 380d63596ceSJohn Dyson blocksperpage = (PAGE_SIZE / bsize); 381d63596ceSJohn Dyson reqblock = pindex * blocksperpage; 382d63596ceSJohn Dyson } 3834153054aSJeff Roberson lockstate = VM_OBJECT_DROP(object); 384ec794849SPoul-Henning Kamp err = VOP_BMAP(vp, reqblock, NULL, &bn, after, before); 3854153054aSJeff Roberson VM_OBJECT_PICKUP(object, lockstate); 3860d94caffSDavid Greenman if (err) 38724a1cce3SDavid Greenman return TRUE; 3886eab77f2SJohn Dyson if (bn == -1) 389ced399eeSJohn Dyson return FALSE; 390d63596ceSJohn Dyson if (pagesperblock > 0) { 391a316d390SJohn Dyson poff = pindex - (reqblock * pagesperblock); 392170db9c6SJohn Dyson if (before) { 393170db9c6SJohn Dyson *before *= pagesperblock; 394170db9c6SJohn Dyson *before += poff; 395170db9c6SJohn Dyson } 396170db9c6SJohn Dyson if (after) { 39784d31376SGleb Smirnoff /* 39884d31376SGleb Smirnoff * The BMAP vop can report a partial block in the 399d2596d17SGleb Smirnoff * 'after', but must not report blocks after EOF. 40084d31376SGleb Smirnoff * Assert the latter, and truncate 'after' in case 40184d31376SGleb Smirnoff * of the former. 40284d31376SGleb Smirnoff */ 403d2596d17SGleb Smirnoff KASSERT((reqblock + *after) * pagesperblock < 404d2596d17SGleb Smirnoff roundup2(object->size, pagesperblock), 40584d31376SGleb Smirnoff ("%s: reqblock %jd after %d size %ju", __func__, 40684d31376SGleb Smirnoff (intmax_t )reqblock, *after, 40784d31376SGleb Smirnoff (uintmax_t )object->size)); 408170db9c6SJohn Dyson *after *= pagesperblock; 40984d31376SGleb Smirnoff *after += pagesperblock - (poff + 1); 41084d31376SGleb Smirnoff if (pindex + *after >= object->size) 41184d31376SGleb Smirnoff *after = object->size - 1 - pindex; 412170db9c6SJohn Dyson } 413d63596ceSJohn Dyson } else { 414d63596ceSJohn Dyson if (before) { 415d63596ceSJohn Dyson *before /= blocksperpage; 416d63596ceSJohn Dyson } 417d63596ceSJohn Dyson 418d63596ceSJohn Dyson if (after) { 419d63596ceSJohn Dyson *after /= blocksperpage; 420d63596ceSJohn Dyson } 421d63596ceSJohn Dyson } 422ced399eeSJohn Dyson return TRUE; 423df8bae1dSRodney W. Grimes } 424df8bae1dSRodney W. Grimes 425df8bae1dSRodney W. Grimes /* 426df8bae1dSRodney W. Grimes * Lets the VM system know about a change in size for a file. 42724a1cce3SDavid Greenman * We adjust our own internal size and flush any cached pages in 428df8bae1dSRodney W. Grimes * the associated object that are affected by the size change. 429df8bae1dSRodney W. Grimes * 430df8bae1dSRodney W. Grimes * Note: this routine may be invoked as a result of a pager put 431df8bae1dSRodney W. Grimes * operation (possibly at object termination time), so we must be careful. 432df8bae1dSRodney W. Grimes */ 433df8bae1dSRodney W. Grimes void 4347ebba1f8SGleb Smirnoff vnode_pager_setsize(struct vnode *vp, vm_ooffset_t nsize) 435df8bae1dSRodney W. Grimes { 4362a8f9ab5SAlan Cox vm_object_t object; 4372a8f9ab5SAlan Cox vm_page_t m; 438c576d121SLuoqi Chen vm_pindex_t nobjsize; 439df8bae1dSRodney W. Grimes 4402a8f9ab5SAlan Cox if ((object = vp->v_object) == NULL) 441df8bae1dSRodney W. Grimes return; 4425b87ecc6SKonstantin Belousov #ifdef DEBUG_VFS_LOCKS 4435b87ecc6SKonstantin Belousov { 4445b87ecc6SKonstantin Belousov struct mount *mp; 4455b87ecc6SKonstantin Belousov 4465b87ecc6SKonstantin Belousov mp = vp->v_mount; 4475b87ecc6SKonstantin Belousov if (mp != NULL && (mp->mnt_kern_flag & MNTK_VMSETSIZE_BUG) == 0) 4485b87ecc6SKonstantin Belousov assert_vop_elocked(vp, 4495b87ecc6SKonstantin Belousov "vnode_pager_setsize and not locked vnode"); 4505b87ecc6SKonstantin Belousov } 4515b87ecc6SKonstantin Belousov #endif 45289f6b863SAttilio Rao VM_OBJECT_WLOCK(object); 4539b8851faSKonstantin Belousov if (object->type == OBJT_DEAD) { 4549b8851faSKonstantin Belousov VM_OBJECT_WUNLOCK(object); 4559b8851faSKonstantin Belousov return; 4569b8851faSKonstantin Belousov } 4579b8851faSKonstantin Belousov KASSERT(object->type == OBJT_VNODE, 4589b8851faSKonstantin Belousov ("not vnode-backed object %p", object)); 4592a8f9ab5SAlan Cox if (nsize == object->un_pager.vnp.vnp_size) { 460df8bae1dSRodney W. Grimes /* 461df8bae1dSRodney W. Grimes * Hasn't changed size 462df8bae1dSRodney W. Grimes */ 46389f6b863SAttilio Rao VM_OBJECT_WUNLOCK(object); 464df8bae1dSRodney W. Grimes return; 4652a8f9ab5SAlan Cox } 466c576d121SLuoqi Chen nobjsize = OFF_TO_IDX(nsize + PAGE_MASK); 4672a8f9ab5SAlan Cox if (nsize < object->un_pager.vnp.vnp_size) { 468df8bae1dSRodney W. Grimes /* 469bbc0ec52SDavid Greenman * File has shrunk. Toss any cached pages beyond the new EOF. 470df8bae1dSRodney W. Grimes */ 4712a8f9ab5SAlan Cox if (nobjsize < object->size) 472c576d121SLuoqi Chen vm_object_page_remove(object, nobjsize, object->size, 4736bbee8e2SAlan Cox 0); 474bbc0ec52SDavid Greenman /* 475bbc0ec52SDavid Greenman * this gets rid of garbage at the end of a page that is now 4763ebeaf59SMatthew Dillon * only partially backed by the vnode. 4773ebeaf59SMatthew Dillon * 4783ebeaf59SMatthew Dillon * XXX for some reason (I don't know yet), if we take a 4793ebeaf59SMatthew Dillon * completely invalid page and mark it partially valid 4803ebeaf59SMatthew Dillon * it can screw up NFS reads, so we don't allow the case. 481bbc0ec52SDavid Greenman */ 4820012f373SJeff Roberson if (!(nsize & PAGE_MASK)) 4830012f373SJeff Roberson goto out; 4840012f373SJeff Roberson m = vm_page_grab(object, OFF_TO_IDX(nsize), VM_ALLOC_NOCREAT); 4850012f373SJeff Roberson if (m == NULL) 4860012f373SJeff Roberson goto out; 4870012f373SJeff Roberson if (!vm_page_none_valid(m)) { 4882b6b0df7SMatthew Dillon int base = (int)nsize & PAGE_MASK; 4892b6b0df7SMatthew Dillon int size = PAGE_SIZE - base; 4902b6b0df7SMatthew Dillon 4912b6b0df7SMatthew Dillon /* 4922b6b0df7SMatthew Dillon * Clear out partial-page garbage in case 4932b6b0df7SMatthew Dillon * the page has been mapped. 4942b6b0df7SMatthew Dillon */ 495fff6062aSAlan Cox pmap_zero_page_area(m, base, size); 4962b6b0df7SMatthew Dillon 4972b6b0df7SMatthew Dillon /* 4983c33df62SAlan Cox * Update the valid bits to reflect the blocks that 4993c33df62SAlan Cox * have been zeroed. Some of these valid bits may 5003c33df62SAlan Cox * have already been set. 5013c33df62SAlan Cox */ 502dc874f98SKonstantin Belousov vm_page_set_valid_range(m, base, size); 5033c33df62SAlan Cox 5043c33df62SAlan Cox /* 5053c33df62SAlan Cox * Round "base" to the next block boundary so that the 5063c33df62SAlan Cox * dirty bit for a partially zeroed block is not 5073c33df62SAlan Cox * cleared. 5083c33df62SAlan Cox */ 5093c33df62SAlan Cox base = roundup2(base, DEV_BSIZE); 5103c33df62SAlan Cox 5113c33df62SAlan Cox /* 5123c33df62SAlan Cox * Clear out partial-page dirty bits. 5133ebeaf59SMatthew Dillon * 5143ebeaf59SMatthew Dillon * note that we do not clear out the valid 5153ebeaf59SMatthew Dillon * bits. This would prevent bogus_page 5163ebeaf59SMatthew Dillon * replacement from working properly. 5172b6b0df7SMatthew Dillon */ 5183c33df62SAlan Cox vm_page_clear_dirty(m, base, PAGE_SIZE - base); 519bbc0ec52SDavid Greenman } 5200012f373SJeff Roberson vm_page_xunbusy(m); 521bbc0ec52SDavid Greenman } 5220012f373SJeff Roberson out: 523a316d390SJohn Dyson object->un_pager.vnp.vnp_size = nsize; 524c576d121SLuoqi Chen object->size = nobjsize; 52589f6b863SAttilio Rao VM_OBJECT_WUNLOCK(object); 526df8bae1dSRodney W. Grimes } 527df8bae1dSRodney W. Grimes 52826f9a767SRodney W. Grimes /* 52926f9a767SRodney W. Grimes * calculate the linear (byte) disk address of specified virtual 53026f9a767SRodney W. Grimes * file address 53126f9a767SRodney W. Grimes */ 532bff76343SAlan Cox static int 533bff76343SAlan Cox vnode_pager_addr(struct vnode *vp, vm_ooffset_t address, daddr_t *rtaddress, 534bff76343SAlan Cox int *run) 53526f9a767SRodney W. Grimes { 53626f9a767SRodney W. Grimes int bsize; 53726f9a767SRodney W. Grimes int err; 538a316d390SJohn Dyson daddr_t vblock; 539f3aad9a6SBjoern A. Zeeb daddr_t voffset; 54026f9a767SRodney W. Grimes 5412ad036b6SAlan Cox if (address < 0) 5420d94caffSDavid Greenman return -1; 5430d94caffSDavid Greenman 544abd80ddbSMateusz Guzik if (VN_IS_DOOMED(vp)) 5452c4488fcSJohn Dyson return -1; 5462c4488fcSJohn Dyson 54726f9a767SRodney W. Grimes bsize = vp->v_mount->mnt_stat.f_iosize; 54826f9a767SRodney W. Grimes vblock = address / bsize; 54926f9a767SRodney W. Grimes voffset = address % bsize; 55026f9a767SRodney W. Grimes 551bff76343SAlan Cox err = VOP_BMAP(vp, vblock, NULL, rtaddress, run, NULL); 552bff76343SAlan Cox if (err == 0) { 553bff76343SAlan Cox if (*rtaddress != -1) 554bff76343SAlan Cox *rtaddress += voffset / DEV_BSIZE; 555efc68ce1SDavid Greenman if (run) { 556efc68ce1SDavid Greenman *run += 1; 557efc68ce1SDavid Greenman *run *= bsize / PAGE_SIZE; 558efc68ce1SDavid Greenman *run -= voffset / PAGE_SIZE; 559efc68ce1SDavid Greenman } 560efc68ce1SDavid Greenman } 56126f9a767SRodney W. Grimes 562bff76343SAlan Cox return (err); 56326f9a767SRodney W. Grimes } 56426f9a767SRodney W. Grimes 56526f9a767SRodney W. Grimes /* 56626f9a767SRodney W. Grimes * small block filesystem vnode pager input 56726f9a767SRodney W. Grimes */ 568f708ef1bSPoul-Henning Kamp static int 5697ebba1f8SGleb Smirnoff vnode_pager_input_smlfs(vm_object_t object, vm_page_t m) 57026f9a767SRodney W. Grimes { 5719c83534dSPoul-Henning Kamp struct vnode *vp; 5729c83534dSPoul-Henning Kamp struct bufobj *bo; 57326f9a767SRodney W. Grimes struct buf *bp; 5749e0ddbd0SAlan Cox struct sf_buf *sf; 575f3aad9a6SBjoern A. Zeeb daddr_t fileaddr; 57626f9a767SRodney W. Grimes vm_offset_t bsize; 577561cc9fcSKonstantin Belousov vm_page_bits_t bits; 578561cc9fcSKonstantin Belousov int error, i; 57926f9a767SRodney W. Grimes 580561cc9fcSKonstantin Belousov error = 0; 58124a1cce3SDavid Greenman vp = object->handle; 582abd80ddbSMateusz Guzik if (VN_IS_DOOMED(vp)) 5832c4488fcSJohn Dyson return VM_PAGER_BAD; 5842c4488fcSJohn Dyson 58526f9a767SRodney W. Grimes bsize = vp->v_mount->mnt_stat.f_iosize; 5860bdb7528SDavid Greenman 5879c83534dSPoul-Henning Kamp VOP_BMAP(vp, 0, &bo, 0, NULL, NULL); 58826f9a767SRodney W. Grimes 5899e0ddbd0SAlan Cox sf = sf_buf_alloc(m, 0); 59026f9a767SRodney W. Grimes 59126f9a767SRodney W. Grimes for (i = 0; i < PAGE_SIZE / bsize; i++) { 59233c67741SMatthew Dillon vm_ooffset_t address; 593bbc0ec52SDavid Greenman 5940d53a17bSAlan Cox bits = vm_page_bits(i * bsize, bsize); 5950d53a17bSAlan Cox if (m->valid & bits) 59626f9a767SRodney W. Grimes continue; 59726f9a767SRodney W. Grimes 59833c67741SMatthew Dillon address = IDX_TO_OFF(m->pindex) + i * bsize; 59933c67741SMatthew Dillon if (address >= object->un_pager.vnp.vnp_size) { 60033c67741SMatthew Dillon fileaddr = -1; 60133c67741SMatthew Dillon } else { 602bff76343SAlan Cox error = vnode_pager_addr(vp, address, &fileaddr, NULL); 603bff76343SAlan Cox if (error) 604bff76343SAlan Cox break; 60533c67741SMatthew Dillon } 60626f9a767SRodney W. Grimes if (fileaddr != -1) { 607756a5412SGleb Smirnoff bp = uma_zalloc(vnode_pbuf_zone, M_WAITOK); 60826f9a767SRodney W. Grimes 60926f9a767SRodney W. Grimes /* build a minimal buffer header */ 61021144e3bSPoul-Henning Kamp bp->b_iocmd = BIO_READ; 6116a4b5823SPoul-Henning Kamp bp->b_iodone = bdone; 612bd78ceceSJohn Baldwin KASSERT(bp->b_rcred == NOCRED, ("leaking read ucred")); 613bd78ceceSJohn Baldwin KASSERT(bp->b_wcred == NOCRED, ("leaking write ucred")); 614a854ed98SJohn Baldwin bp->b_rcred = crhold(curthread->td_ucred); 615a854ed98SJohn Baldwin bp->b_wcred = crhold(curthread->td_ucred); 6169e0ddbd0SAlan Cox bp->b_data = (caddr_t)sf_buf_kva(sf) + i * bsize; 617187f0071SDavid Greenman bp->b_blkno = fileaddr; 6189c83534dSPoul-Henning Kamp pbgetbo(bo, bp); 6191faacf5dSKirk McKusick bp->b_vp = vp; 62026f9a767SRodney W. Grimes bp->b_bcount = bsize; 62126f9a767SRodney W. Grimes bp->b_bufsize = bsize; 6222b6b0df7SMatthew Dillon bp->b_runningbufspace = bp->b_bufsize; 6235bd65606SJohn Baldwin atomic_add_long(&runningbufspace, bp->b_runningbufspace); 62426f9a767SRodney W. Grimes 62526f9a767SRodney W. Grimes /* do the input */ 6262c18019fSPoul-Henning Kamp bp->b_iooffset = dbtob(bp->b_blkno); 627b792bebeSPoul-Henning Kamp bstrategy(bp); 62826f9a767SRodney W. Grimes 6296a4b5823SPoul-Henning Kamp bwait(bp, PVM, "vnsrd"); 6306a4b5823SPoul-Henning Kamp 631c244d2deSPoul-Henning Kamp if ((bp->b_ioflags & BIO_ERROR) != 0) 63226f9a767SRodney W. Grimes error = EIO; 63326f9a767SRodney W. Grimes 63426f9a767SRodney W. Grimes /* 63526f9a767SRodney W. Grimes * free the buffer header back to the swap buffer pool 63626f9a767SRodney W. Grimes */ 6371faacf5dSKirk McKusick bp->b_vp = NULL; 6389c83534dSPoul-Henning Kamp pbrelbo(bp); 639756a5412SGleb Smirnoff uma_zfree(vnode_pbuf_zone, bp); 64026f9a767SRodney W. Grimes if (error) 64126f9a767SRodney W. Grimes break; 6420d53a17bSAlan Cox } else 6439e0ddbd0SAlan Cox bzero((caddr_t)sf_buf_kva(sf) + i * bsize, bsize); 6440d53a17bSAlan Cox KASSERT((m->dirty & bits) == 0, 6450d53a17bSAlan Cox ("vnode_pager_input_smlfs: page %p is dirty", m)); 6467f935055SJeff Roberson vm_page_bits_set(m, &m->valid, bits); 64726f9a767SRodney W. Grimes } 6489e0ddbd0SAlan Cox sf_buf_free(sf); 64926f9a767SRodney W. Grimes if (error) { 650a83c285cSDavid Greenman return VM_PAGER_ERROR; 65126f9a767SRodney W. Grimes } 65226f9a767SRodney W. Grimes return VM_PAGER_OK; 65326f9a767SRodney W. Grimes } 65426f9a767SRodney W. Grimes 65526f9a767SRodney W. Grimes /* 656475e8cc3SPoul-Henning Kamp * old style vnode pager input routine 65726f9a767SRodney W. Grimes */ 658f708ef1bSPoul-Henning Kamp static int 6597ebba1f8SGleb Smirnoff vnode_pager_input_old(vm_object_t object, vm_page_t m) 66026f9a767SRodney W. Grimes { 661df8bae1dSRodney W. Grimes struct uio auio; 662df8bae1dSRodney W. Grimes struct iovec aiov; 66326f9a767SRodney W. Grimes int error; 66426f9a767SRodney W. Grimes int size; 6659e0ddbd0SAlan Cox struct sf_buf *sf; 666342a1480SJohn Baldwin struct vnode *vp; 667df8bae1dSRodney W. Grimes 66889f6b863SAttilio Rao VM_OBJECT_ASSERT_WLOCKED(object); 66926f9a767SRodney W. Grimes error = 0; 670bbc0ec52SDavid Greenman 671df8bae1dSRodney W. Grimes /* 67226f9a767SRodney W. Grimes * Return failure if beyond current EOF 67326f9a767SRodney W. Grimes */ 674a316d390SJohn Dyson if (IDX_TO_OFF(m->pindex) >= object->un_pager.vnp.vnp_size) { 67526f9a767SRodney W. Grimes return VM_PAGER_BAD; 67626f9a767SRodney W. Grimes } else { 67726f9a767SRodney W. Grimes size = PAGE_SIZE; 678a316d390SJohn Dyson if (IDX_TO_OFF(m->pindex) + size > object->un_pager.vnp.vnp_size) 679a316d390SJohn Dyson size = object->un_pager.vnp.vnp_size - IDX_TO_OFF(m->pindex); 68052051abcSAlan Cox vp = object->handle; 68189f6b863SAttilio Rao VM_OBJECT_WUNLOCK(object); 6820bdb7528SDavid Greenman 68326f9a767SRodney W. Grimes /* 684df8bae1dSRodney W. Grimes * Allocate a kernel virtual address and initialize so that 685df8bae1dSRodney W. Grimes * we can use VOP_READ/WRITE routines. 686df8bae1dSRodney W. Grimes */ 6879e0ddbd0SAlan Cox sf = sf_buf_alloc(m, 0); 6880bdb7528SDavid Greenman 6899e0ddbd0SAlan Cox aiov.iov_base = (caddr_t)sf_buf_kva(sf); 690df8bae1dSRodney W. Grimes aiov.iov_len = size; 691df8bae1dSRodney W. Grimes auio.uio_iov = &aiov; 692df8bae1dSRodney W. Grimes auio.uio_iovcnt = 1; 693a316d390SJohn Dyson auio.uio_offset = IDX_TO_OFF(m->pindex); 694df8bae1dSRodney W. Grimes auio.uio_segflg = UIO_SYSSPACE; 69526f9a767SRodney W. Grimes auio.uio_rw = UIO_READ; 696df8bae1dSRodney W. Grimes auio.uio_resid = size; 697b40ce416SJulian Elischer auio.uio_td = curthread; 69826f9a767SRodney W. Grimes 699a854ed98SJohn Baldwin error = VOP_READ(vp, &auio, 0, curthread->td_ucred); 700df8bae1dSRodney W. Grimes if (!error) { 70154d92145SMatthew Dillon int count = size - auio.uio_resid; 702df8bae1dSRodney W. Grimes 703df8bae1dSRodney W. Grimes if (count == 0) 704df8bae1dSRodney W. Grimes error = EINVAL; 70526f9a767SRodney W. Grimes else if (count != PAGE_SIZE) 7069e0ddbd0SAlan Cox bzero((caddr_t)sf_buf_kva(sf) + count, 7079e0ddbd0SAlan Cox PAGE_SIZE - count); 708df8bae1dSRodney W. Grimes } 7099e0ddbd0SAlan Cox sf_buf_free(sf); 7101b26eb10SAlan Cox 71189f6b863SAttilio Rao VM_OBJECT_WLOCK(object); 712df8bae1dSRodney W. Grimes } 7130d53a17bSAlan Cox KASSERT(m->dirty == 0, ("vnode_pager_input_old: page %p is dirty", m)); 7146e3a3f38SRobert V. Baron if (!error) 7150012f373SJeff Roberson vm_page_valid(m); 716a83c285cSDavid Greenman return error ? VM_PAGER_ERROR : VM_PAGER_OK; 71726f9a767SRodney W. Grimes } 71826f9a767SRodney W. Grimes 71926f9a767SRodney W. Grimes /* 72026f9a767SRodney W. Grimes * generic vnode pager input routine 72126f9a767SRodney W. Grimes */ 722170db9c6SJohn Dyson 723ce75f2c3SMike Smith /* 72423955314SAlfred Perlstein * Local media VFS's that do not implement their own VOP_GETPAGES 72547e151ddSRobert Drehmel * should have their VOP_GETPAGES call to vnode_pager_generic_getpages() 72647e151ddSRobert Drehmel * to implement the previous behaviour. 727ce75f2c3SMike Smith * 728ce75f2c3SMike Smith * All other FS's should use the bypass to get to the local media 729ce75f2c3SMike Smith * backing vp's VOP_GETPAGES. 730ce75f2c3SMike Smith */ 731f708ef1bSPoul-Henning Kamp static int 732b0cd2017SGleb Smirnoff vnode_pager_getpages(vm_object_t object, vm_page_t *m, int count, int *rbehind, 733b0cd2017SGleb Smirnoff int *rahead) 73424a1cce3SDavid Greenman { 735170db9c6SJohn Dyson struct vnode *vp; 736b0cd2017SGleb Smirnoff int rtval; 73795e5e988SJohn Dyson 738d6e13f3bSJeff Roberson /* Handle is stable with paging in progress. */ 739170db9c6SJohn Dyson vp = object->handle; 740b0cd2017SGleb Smirnoff rtval = VOP_GETPAGES(vp, m, count, rbehind, rahead); 74123955314SAlfred Perlstein KASSERT(rtval != EOPNOTSUPP, 74223955314SAlfred Perlstein ("vnode_pager: FS getpages not implemented\n")); 743170db9c6SJohn Dyson return rtval; 744170db9c6SJohn Dyson } 745170db9c6SJohn Dyson 74690effb23SGleb Smirnoff static int 74790effb23SGleb Smirnoff vnode_pager_getpages_async(vm_object_t object, vm_page_t *m, int count, 748b0cd2017SGleb Smirnoff int *rbehind, int *rahead, vop_getpages_iodone_t iodone, void *arg) 74990effb23SGleb Smirnoff { 75090effb23SGleb Smirnoff struct vnode *vp; 75190effb23SGleb Smirnoff int rtval; 75290effb23SGleb Smirnoff 75390effb23SGleb Smirnoff vp = object->handle; 754b0cd2017SGleb Smirnoff rtval = VOP_GETPAGES_ASYNC(vp, m, count, rbehind, rahead, iodone, arg); 75590effb23SGleb Smirnoff KASSERT(rtval != EOPNOTSUPP, 75690effb23SGleb Smirnoff ("vnode_pager: FS getpages_async not implemented\n")); 75790effb23SGleb Smirnoff return (rtval); 75890effb23SGleb Smirnoff } 75990effb23SGleb Smirnoff 760ce75f2c3SMike Smith /* 76190effb23SGleb Smirnoff * The implementation of VOP_GETPAGES() and VOP_GETPAGES_ASYNC() for 76290effb23SGleb Smirnoff * local filesystems, where partially valid pages can only occur at 76390effb23SGleb Smirnoff * the end of file. 764d15b55c5SKonstantin Belousov */ 765d15b55c5SKonstantin Belousov int 766d15b55c5SKonstantin Belousov vnode_pager_local_getpages(struct vop_getpages_args *ap) 767d15b55c5SKonstantin Belousov { 76890effb23SGleb Smirnoff 769b0cd2017SGleb Smirnoff return (vnode_pager_generic_getpages(ap->a_vp, ap->a_m, ap->a_count, 770b0cd2017SGleb Smirnoff ap->a_rbehind, ap->a_rahead, NULL, NULL)); 77190effb23SGleb Smirnoff } 77290effb23SGleb Smirnoff 77390effb23SGleb Smirnoff int 77490effb23SGleb Smirnoff vnode_pager_local_getpages_async(struct vop_getpages_async_args *ap) 77590effb23SGleb Smirnoff { 77690effb23SGleb Smirnoff 777b0cd2017SGleb Smirnoff return (vnode_pager_generic_getpages(ap->a_vp, ap->a_m, ap->a_count, 778b0cd2017SGleb Smirnoff ap->a_rbehind, ap->a_rahead, ap->a_iodone, ap->a_arg)); 779d15b55c5SKonstantin Belousov } 780d15b55c5SKonstantin Belousov 781d15b55c5SKonstantin Belousov /* 782ce75f2c3SMike Smith * This is now called from local media FS's to operate against their 783ce75f2c3SMike Smith * own vnodes if they fail to implement VOP_GETPAGES. 784ce75f2c3SMike Smith */ 785ce75f2c3SMike Smith int 786b0cd2017SGleb Smirnoff vnode_pager_generic_getpages(struct vnode *vp, vm_page_t *m, int count, 787b0cd2017SGleb Smirnoff int *a_rbehind, int *a_rahead, vop_getpages_iodone_t iodone, void *arg) 788170db9c6SJohn Dyson { 789ce75f2c3SMike Smith vm_object_t object; 7909c83534dSPoul-Henning Kamp struct bufobj *bo; 7910bdb7528SDavid Greenman struct buf *bp; 792b0cd2017SGleb Smirnoff off_t foff; 793e48b82bdSGleb Smirnoff #ifdef INVARIANTS 794e48b82bdSGleb Smirnoff off_t blkno0; 795e48b82bdSGleb Smirnoff #endif 796756a5412SGleb Smirnoff int bsize, pagesperblock; 797b0cd2017SGleb Smirnoff int error, before, after, rbehind, rahead, poff, i; 798b0cd2017SGleb Smirnoff int bytecount, secmask; 799ce75f2c3SMike Smith 8009c83534dSPoul-Henning Kamp KASSERT(vp->v_type != VCHR && vp->v_type != VBLK, 801b0cd2017SGleb Smirnoff ("%s does not support devices", __func__)); 802b0cd2017SGleb Smirnoff 803abd80ddbSMateusz Guzik if (VN_IS_DOOMED(vp)) 804eac91e32SKonstantin Belousov return (VM_PAGER_BAD); 8052c4488fcSJohn Dyson 806eac91e32SKonstantin Belousov object = vp->v_object; 807b0cd2017SGleb Smirnoff foff = IDX_TO_OFF(m[0]->pindex); 80826f9a767SRodney W. Grimes bsize = vp->v_mount->mnt_stat.f_iosize; 809b0cd2017SGleb Smirnoff pagesperblock = bsize / PAGE_SIZE; 810b0cd2017SGleb Smirnoff 811b0cd2017SGleb Smirnoff KASSERT(foff < object->un_pager.vnp.vnp_size, 812b0cd2017SGleb Smirnoff ("%s: page %p offset beyond vp %p size", __func__, m[0], vp)); 81340a51684SJason A. Harmening KASSERT(count <= nitems(bp->b_pages), 814b0cd2017SGleb Smirnoff ("%s: requested %d pages", __func__, count)); 815b0cd2017SGleb Smirnoff 816b0cd2017SGleb Smirnoff /* 817b0cd2017SGleb Smirnoff * The last page has valid blocks. Invalid part can only 818b0cd2017SGleb Smirnoff * exist at the end of file, and the page is made fully valid 819b0cd2017SGleb Smirnoff * by zeroing in vm_pager_get_pages(). 820b0cd2017SGleb Smirnoff */ 8210012f373SJeff Roberson if (!vm_page_none_valid(m[count - 1]) && --count == 0) { 822b0cd2017SGleb Smirnoff if (iodone != NULL) 823b0cd2017SGleb Smirnoff iodone(arg, m, 1, 0); 824b0cd2017SGleb Smirnoff return (VM_PAGER_OK); 825b0cd2017SGleb Smirnoff } 826bbc0ec52SDavid Greenman 827756a5412SGleb Smirnoff bp = uma_zalloc(vnode_pbuf_zone, M_WAITOK); 82873e9030eSGleb Smirnoff 82926f9a767SRodney W. Grimes /* 830e122dfc1SGleb Smirnoff * Get the underlying device blocks for the file with VOP_BMAP(). 831e122dfc1SGleb Smirnoff * If the file system doesn't support VOP_BMAP, use old way of 832e122dfc1SGleb Smirnoff * getting pages via VOP_READ. 83326f9a767SRodney W. Grimes */ 834b0cd2017SGleb Smirnoff error = VOP_BMAP(vp, foff / bsize, &bo, &bp->b_blkno, &after, &before); 8351de11f1aSAlan Cox if (error == EOPNOTSUPP) { 836756a5412SGleb Smirnoff uma_zfree(vnode_pbuf_zone, bp); 83789f6b863SAttilio Rao VM_OBJECT_WLOCK(object); 838b0cd2017SGleb Smirnoff for (i = 0; i < count; i++) { 83983c9dea1SGleb Smirnoff VM_CNT_INC(v_vnodein); 84083c9dea1SGleb Smirnoff VM_CNT_INC(v_vnodepgsin); 841b0cd2017SGleb Smirnoff error = vnode_pager_input_old(object, m[i]); 842b0cd2017SGleb Smirnoff if (error) 843b0cd2017SGleb Smirnoff break; 844b0cd2017SGleb Smirnoff } 84589f6b863SAttilio Rao VM_OBJECT_WUNLOCK(object); 84652051abcSAlan Cox return (error); 8471de11f1aSAlan Cox } else if (error != 0) { 848756a5412SGleb Smirnoff uma_zfree(vnode_pbuf_zone, bp); 8491de11f1aSAlan Cox return (VM_PAGER_ERROR); 850b0cd2017SGleb Smirnoff } 851bbc0ec52SDavid Greenman 85226f9a767SRodney W. Grimes /* 853b0cd2017SGleb Smirnoff * If the file system supports BMAP, but blocksize is smaller 854b0cd2017SGleb Smirnoff * than a page size, then use special small filesystem code. 85526f9a767SRodney W. Grimes */ 856b0cd2017SGleb Smirnoff if (pagesperblock == 0) { 857756a5412SGleb Smirnoff uma_zfree(vnode_pbuf_zone, bp); 858b0cd2017SGleb Smirnoff for (i = 0; i < count; i++) { 85983c9dea1SGleb Smirnoff VM_CNT_INC(v_vnodein); 86083c9dea1SGleb Smirnoff VM_CNT_INC(v_vnodepgsin); 861b0cd2017SGleb Smirnoff error = vnode_pager_input_smlfs(object, m[i]); 862b0cd2017SGleb Smirnoff if (error) 863b0cd2017SGleb Smirnoff break; 864b0cd2017SGleb Smirnoff } 865b0cd2017SGleb Smirnoff return (error); 86626f9a767SRodney W. Grimes } 8678d17e694SJulian Elischer 86826f9a767SRodney W. Grimes /* 869b0cd2017SGleb Smirnoff * A sparse file can be encountered only for a single page request, 870763df3ecSPedro F. Giffuni * which may not be preceded by call to vm_pager_haspage(). 871a7fecb4dSAlan Cox */ 872b0cd2017SGleb Smirnoff if (bp->b_blkno == -1) { 873b0cd2017SGleb Smirnoff KASSERT(count == 1, 874b0cd2017SGleb Smirnoff ("%s: array[%d] request to a sparse file %p", __func__, 875b0cd2017SGleb Smirnoff count, vp)); 876756a5412SGleb Smirnoff uma_zfree(vnode_pbuf_zone, bp); 877b0cd2017SGleb Smirnoff pmap_zero_page(m[0]); 878b0cd2017SGleb Smirnoff KASSERT(m[0]->dirty == 0, ("%s: page %p is dirty", 879b0cd2017SGleb Smirnoff __func__, m[0])); 8800012f373SJeff Roberson vm_page_valid(m[0]); 881f4f83da0SAlan Cox return (VM_PAGER_OK); 882b0cd2017SGleb Smirnoff } 883b0cd2017SGleb Smirnoff 884e48b82bdSGleb Smirnoff #ifdef INVARIANTS 885e48b82bdSGleb Smirnoff blkno0 = bp->b_blkno; 886e48b82bdSGleb Smirnoff #endif 887b0cd2017SGleb Smirnoff bp->b_blkno += (foff % bsize) / DEV_BSIZE; 888b0cd2017SGleb Smirnoff 889b0cd2017SGleb Smirnoff /* Recalculate blocks available after/before to pages. */ 890b0cd2017SGleb Smirnoff poff = (foff % bsize) / PAGE_SIZE; 891b0cd2017SGleb Smirnoff before *= pagesperblock; 892b0cd2017SGleb Smirnoff before += poff; 893b0cd2017SGleb Smirnoff after *= pagesperblock; 894b0cd2017SGleb Smirnoff after += pagesperblock - (poff + 1); 895b0cd2017SGleb Smirnoff if (m[0]->pindex + after >= object->size) 896b0cd2017SGleb Smirnoff after = object->size - 1 - m[0]->pindex; 897b0cd2017SGleb Smirnoff KASSERT(count <= after + 1, ("%s: %d pages asked, can do only %d", 898b0cd2017SGleb Smirnoff __func__, count, after + 1)); 899b0cd2017SGleb Smirnoff after -= count - 1; 900b0cd2017SGleb Smirnoff 901b0cd2017SGleb Smirnoff /* Trim requested rbehind/rahead to possible values. */ 902b0cd2017SGleb Smirnoff rbehind = a_rbehind ? *a_rbehind : 0; 903b0cd2017SGleb Smirnoff rahead = a_rahead ? *a_rahead : 0; 904b0cd2017SGleb Smirnoff rbehind = min(rbehind, before); 905b0cd2017SGleb Smirnoff rbehind = min(rbehind, m[0]->pindex); 906b0cd2017SGleb Smirnoff rahead = min(rahead, after); 907b0cd2017SGleb Smirnoff rahead = min(rahead, object->size - m[count - 1]->pindex); 908e48b82bdSGleb Smirnoff /* 909e48b82bdSGleb Smirnoff * Check that total amount of pages fit into buf. Trim rbehind and 910e48b82bdSGleb Smirnoff * rahead evenly if not. 911e48b82bdSGleb Smirnoff */ 912e48b82bdSGleb Smirnoff if (rbehind + rahead + count > nitems(bp->b_pages)) { 913e48b82bdSGleb Smirnoff int trim, sum; 914e48b82bdSGleb Smirnoff 915e48b82bdSGleb Smirnoff trim = rbehind + rahead + count - nitems(bp->b_pages) + 1; 916e48b82bdSGleb Smirnoff sum = rbehind + rahead; 917e48b82bdSGleb Smirnoff if (rbehind == before) { 918e48b82bdSGleb Smirnoff /* Roundup rbehind trim to block size. */ 919e48b82bdSGleb Smirnoff rbehind -= roundup(trim * rbehind / sum, pagesperblock); 920e48b82bdSGleb Smirnoff if (rbehind < 0) 921e48b82bdSGleb Smirnoff rbehind = 0; 922e48b82bdSGleb Smirnoff } else 923e48b82bdSGleb Smirnoff rbehind -= trim * rbehind / sum; 924e48b82bdSGleb Smirnoff rahead -= trim * rahead / sum; 925e48b82bdSGleb Smirnoff } 926e48b82bdSGleb Smirnoff KASSERT(rbehind + rahead + count <= nitems(bp->b_pages), 927b0cd2017SGleb Smirnoff ("%s: behind %d ahead %d count %d", __func__, 928b0cd2017SGleb Smirnoff rbehind, rahead, count)); 929b0cd2017SGleb Smirnoff 930b0cd2017SGleb Smirnoff /* 931b0cd2017SGleb Smirnoff * Fill in the bp->b_pages[] array with requested and optional 932b0cd2017SGleb Smirnoff * read behind or read ahead pages. Read behind pages are looked 933b0cd2017SGleb Smirnoff * up in a backward direction, down to a first cached page. Same 934b0cd2017SGleb Smirnoff * for read ahead pages, but there is no need to shift the array 935b0cd2017SGleb Smirnoff * in case of encountering a cached page. 936b0cd2017SGleb Smirnoff */ 937b0cd2017SGleb Smirnoff i = bp->b_npages = 0; 938b0cd2017SGleb Smirnoff if (rbehind) { 939b0cd2017SGleb Smirnoff vm_pindex_t startpindex, tpindex; 940b0cd2017SGleb Smirnoff vm_page_t p; 941b0cd2017SGleb Smirnoff 942a7fecb4dSAlan Cox VM_OBJECT_WLOCK(object); 943b0cd2017SGleb Smirnoff startpindex = m[0]->pindex - rbehind; 944b0cd2017SGleb Smirnoff if ((p = TAILQ_PREV(m[0], pglist, listq)) != NULL && 945b0cd2017SGleb Smirnoff p->pindex >= startpindex) 946b0cd2017SGleb Smirnoff startpindex = p->pindex + 1; 947b0cd2017SGleb Smirnoff 948b0cd2017SGleb Smirnoff /* tpindex is unsigned; beware of numeric underflow. */ 949b0cd2017SGleb Smirnoff for (tpindex = m[0]->pindex - 1; 950b0cd2017SGleb Smirnoff tpindex >= startpindex && tpindex < m[0]->pindex; 951b0cd2017SGleb Smirnoff tpindex--, i++) { 9527667839aSAlan Cox p = vm_page_alloc(object, tpindex, VM_ALLOC_NORMAL); 953b0cd2017SGleb Smirnoff if (p == NULL) { 954b0cd2017SGleb Smirnoff /* Shift the array. */ 955b0cd2017SGleb Smirnoff for (int j = 0; j < i; j++) 956b0cd2017SGleb Smirnoff bp->b_pages[j] = bp->b_pages[j + 957b0cd2017SGleb Smirnoff tpindex + 1 - startpindex]; 958b0cd2017SGleb Smirnoff break; 959b0cd2017SGleb Smirnoff } 960b0cd2017SGleb Smirnoff bp->b_pages[tpindex - startpindex] = p; 961a7fecb4dSAlan Cox } 9620bdb7528SDavid Greenman 963b0cd2017SGleb Smirnoff bp->b_pgbefore = i; 964b0cd2017SGleb Smirnoff bp->b_npages += i; 965b0cd2017SGleb Smirnoff bp->b_blkno -= IDX_TO_OFF(i) / DEV_BSIZE; 966b0cd2017SGleb Smirnoff } else 967b0cd2017SGleb Smirnoff bp->b_pgbefore = 0; 968b0cd2017SGleb Smirnoff 969b0cd2017SGleb Smirnoff /* Requested pages. */ 970b0cd2017SGleb Smirnoff for (int j = 0; j < count; j++, i++) 971b0cd2017SGleb Smirnoff bp->b_pages[i] = m[j]; 972b0cd2017SGleb Smirnoff bp->b_npages += count; 973b0cd2017SGleb Smirnoff 974b0cd2017SGleb Smirnoff if (rahead) { 975b0cd2017SGleb Smirnoff vm_pindex_t endpindex, tpindex; 976b0cd2017SGleb Smirnoff vm_page_t p; 977b0cd2017SGleb Smirnoff 978b0cd2017SGleb Smirnoff if (!VM_OBJECT_WOWNED(object)) 979eac91e32SKonstantin Belousov VM_OBJECT_WLOCK(object); 980b0cd2017SGleb Smirnoff endpindex = m[count - 1]->pindex + rahead + 1; 981b0cd2017SGleb Smirnoff if ((p = TAILQ_NEXT(m[count - 1], listq)) != NULL && 982b0cd2017SGleb Smirnoff p->pindex < endpindex) 983b0cd2017SGleb Smirnoff endpindex = p->pindex; 984b0cd2017SGleb Smirnoff if (endpindex > object->size) 985b0cd2017SGleb Smirnoff endpindex = object->size; 986b0cd2017SGleb Smirnoff 987b0cd2017SGleb Smirnoff for (tpindex = m[count - 1]->pindex + 1; 988b0cd2017SGleb Smirnoff tpindex < endpindex; i++, tpindex++) { 9897667839aSAlan Cox p = vm_page_alloc(object, tpindex, VM_ALLOC_NORMAL); 990b0cd2017SGleb Smirnoff if (p == NULL) 991b0cd2017SGleb Smirnoff break; 992b0cd2017SGleb Smirnoff bp->b_pages[i] = p; 993eac91e32SKonstantin Belousov } 994b0cd2017SGleb Smirnoff 995b0cd2017SGleb Smirnoff bp->b_pgafter = i - bp->b_npages; 996b0cd2017SGleb Smirnoff bp->b_npages = i; 997b0cd2017SGleb Smirnoff } else 998b0cd2017SGleb Smirnoff bp->b_pgafter = 0; 999b0cd2017SGleb Smirnoff 1000b0cd2017SGleb Smirnoff if (VM_OBJECT_WOWNED(object)) 1001eac91e32SKonstantin Belousov VM_OBJECT_WUNLOCK(object); 1002b0cd2017SGleb Smirnoff 1003b0cd2017SGleb Smirnoff /* Report back actual behind/ahead read. */ 1004b0cd2017SGleb Smirnoff if (a_rbehind) 1005b0cd2017SGleb Smirnoff *a_rbehind = bp->b_pgbefore; 1006b0cd2017SGleb Smirnoff if (a_rahead) 1007b0cd2017SGleb Smirnoff *a_rahead = bp->b_pgafter; 1008b0cd2017SGleb Smirnoff 1009e48b82bdSGleb Smirnoff #ifdef INVARIANTS 1010dcc0ff5aSGleb Smirnoff KASSERT(bp->b_npages <= nitems(bp->b_pages), 1011b0cd2017SGleb Smirnoff ("%s: buf %p overflowed", __func__, bp)); 10124f56243aSGleb Smirnoff for (int j = 1, prev = 0; j < bp->b_npages; j++) { 10131e0c121fSGleb Smirnoff if (bp->b_pages[j] == bogus_page) 10141e0c121fSGleb Smirnoff continue; 10151e0c121fSGleb Smirnoff KASSERT(bp->b_pages[j]->pindex - bp->b_pages[prev]->pindex == 10161e0c121fSGleb Smirnoff j - prev, ("%s: pages array not consecutive, bp %p", 10171e0c121fSGleb Smirnoff __func__, bp)); 10181e0c121fSGleb Smirnoff prev = j; 10191e0c121fSGleb Smirnoff } 1020e48b82bdSGleb Smirnoff #endif 1021eac91e32SKonstantin Belousov 10220d94caffSDavid Greenman /* 1023b0cd2017SGleb Smirnoff * Recalculate first offset and bytecount with regards to read behind. 1024b0cd2017SGleb Smirnoff * Truncate bytecount to vnode real size and round up physical size 1025b0cd2017SGleb Smirnoff * for real devices. 102626f9a767SRodney W. Grimes */ 1027b0cd2017SGleb Smirnoff foff = IDX_TO_OFF(bp->b_pages[0]->pindex); 1028b0cd2017SGleb Smirnoff bytecount = bp->b_npages << PAGE_SHIFT; 1029b0cd2017SGleb Smirnoff if ((foff + bytecount) > object->un_pager.vnp.vnp_size) 1030b0cd2017SGleb Smirnoff bytecount = object->un_pager.vnp.vnp_size - foff; 1031eac91e32SKonstantin Belousov secmask = bo->bo_bsize - 1; 10326229cc50SPoul-Henning Kamp KASSERT(secmask < PAGE_SIZE && secmask > 0, 1033b0cd2017SGleb Smirnoff ("%s: sector size %d too large", __func__, secmask + 1)); 1034b0cd2017SGleb Smirnoff bytecount = (bytecount + secmask) & ~secmask; 103526f9a767SRodney W. Grimes 103626f9a767SRodney W. Grimes /* 1037b0cd2017SGleb Smirnoff * And map the pages to be read into the kva, if the filesystem 10386ce697dcSKonstantin Belousov * requires mapped buffers. 103926f9a767SRodney W. Grimes */ 10402a5eef69SGleb Smirnoff if ((vp->v_mount->mnt_kern_flag & MNTK_UNMAPPED_BUFS) != 0 && 10416ce697dcSKonstantin Belousov unmapped_buf_allowed) { 10426ce697dcSKonstantin Belousov bp->b_data = unmapped_buf; 10436ce697dcSKonstantin Belousov bp->b_offset = 0; 1044fade8dd7SJeff Roberson } else { 1045fade8dd7SJeff Roberson bp->b_data = bp->b_kvabase; 1046b0cd2017SGleb Smirnoff pmap_qenter((vm_offset_t)bp->b_data, bp->b_pages, bp->b_npages); 1047fade8dd7SJeff Roberson } 104826f9a767SRodney W. Grimes 1049b0cd2017SGleb Smirnoff /* Build a minimal buffer header. */ 105021144e3bSPoul-Henning Kamp bp->b_iocmd = BIO_READ; 1051bd78ceceSJohn Baldwin KASSERT(bp->b_rcred == NOCRED, ("leaking read ucred")); 1052bd78ceceSJohn Baldwin KASSERT(bp->b_wcred == NOCRED, ("leaking write ucred")); 1053a854ed98SJohn Baldwin bp->b_rcred = crhold(curthread->td_ucred); 1054a854ed98SJohn Baldwin bp->b_wcred = crhold(curthread->td_ucred); 10559c83534dSPoul-Henning Kamp pbgetbo(bo, bp); 10561faacf5dSKirk McKusick bp->b_vp = vp; 1057b0cd2017SGleb Smirnoff bp->b_bcount = bp->b_bufsize = bp->b_runningbufspace = bytecount; 10582c18019fSPoul-Henning Kamp bp->b_iooffset = dbtob(bp->b_blkno); 1059e48b82bdSGleb Smirnoff KASSERT(IDX_TO_OFF(m[0]->pindex - bp->b_pages[0]->pindex) == 1060e48b82bdSGleb Smirnoff (blkno0 - bp->b_blkno) * DEV_BSIZE + 1061e48b82bdSGleb Smirnoff IDX_TO_OFF(m[0]->pindex) % bsize, 1062e48b82bdSGleb Smirnoff ("wrong offsets bsize %d m[0] %ju b_pages[0] %ju " 1063e48b82bdSGleb Smirnoff "blkno0 %ju b_blkno %ju", bsize, 1064e48b82bdSGleb Smirnoff (uintmax_t)m[0]->pindex, (uintmax_t)bp->b_pages[0]->pindex, 1065e48b82bdSGleb Smirnoff (uintmax_t)blkno0, (uintmax_t)bp->b_blkno)); 106690effb23SGleb Smirnoff 1067b0cd2017SGleb Smirnoff atomic_add_long(&runningbufspace, bp->b_runningbufspace); 106883c9dea1SGleb Smirnoff VM_CNT_INC(v_vnodein); 106983c9dea1SGleb Smirnoff VM_CNT_ADD(v_vnodepgsin, bp->b_npages); 1070b0cd2017SGleb Smirnoff 107190effb23SGleb Smirnoff if (iodone != NULL) { /* async */ 1072b0cd2017SGleb Smirnoff bp->b_pgiodone = iodone; 107390effb23SGleb Smirnoff bp->b_caller1 = arg; 107490effb23SGleb Smirnoff bp->b_iodone = vnode_pager_generic_getpages_done_async; 107590effb23SGleb Smirnoff bp->b_flags |= B_ASYNC; 107690effb23SGleb Smirnoff BUF_KERNPROC(bp); 1077b792bebeSPoul-Henning Kamp bstrategy(bp); 1078b0cd2017SGleb Smirnoff return (VM_PAGER_OK); 107990effb23SGleb Smirnoff } else { 108090effb23SGleb Smirnoff bp->b_iodone = bdone; 108190effb23SGleb Smirnoff bstrategy(bp); 10826a4b5823SPoul-Henning Kamp bwait(bp, PVM, "vnread"); 108390effb23SGleb Smirnoff error = vnode_pager_generic_getpages_done(bp); 10841bb5ad63SGleb Smirnoff for (i = 0; i < bp->b_npages; i++) 10856ce697dcSKonstantin Belousov bp->b_pages[i] = NULL; 10861faacf5dSKirk McKusick bp->b_vp = NULL; 10879c83534dSPoul-Henning Kamp pbrelbo(bp); 1088756a5412SGleb Smirnoff uma_zfree(vnode_pbuf_zone, bp); 108990effb23SGleb Smirnoff return (error != 0 ? VM_PAGER_ERROR : VM_PAGER_OK); 109090effb23SGleb Smirnoff } 1091b0cd2017SGleb Smirnoff } 109290effb23SGleb Smirnoff 109390effb23SGleb Smirnoff static void 109490effb23SGleb Smirnoff vnode_pager_generic_getpages_done_async(struct buf *bp) 109590effb23SGleb Smirnoff { 109690effb23SGleb Smirnoff int error; 109790effb23SGleb Smirnoff 109890effb23SGleb Smirnoff error = vnode_pager_generic_getpages_done(bp); 1099b0cd2017SGleb Smirnoff /* Run the iodone upon the requested range. */ 1100b0cd2017SGleb Smirnoff bp->b_pgiodone(bp->b_caller1, bp->b_pages + bp->b_pgbefore, 1101b0cd2017SGleb Smirnoff bp->b_npages - bp->b_pgbefore - bp->b_pgafter, error); 110290effb23SGleb Smirnoff for (int i = 0; i < bp->b_npages; i++) 110390effb23SGleb Smirnoff bp->b_pages[i] = NULL; 110490effb23SGleb Smirnoff bp->b_vp = NULL; 110590effb23SGleb Smirnoff pbrelbo(bp); 1106756a5412SGleb Smirnoff uma_zfree(vnode_pbuf_zone, bp); 110790effb23SGleb Smirnoff } 110890effb23SGleb Smirnoff 110990effb23SGleb Smirnoff static int 111090effb23SGleb Smirnoff vnode_pager_generic_getpages_done(struct buf *bp) 111190effb23SGleb Smirnoff { 111290effb23SGleb Smirnoff vm_object_t object; 111390effb23SGleb Smirnoff off_t tfoff, nextoff; 111490effb23SGleb Smirnoff int i, error; 111590effb23SGleb Smirnoff 111690effb23SGleb Smirnoff error = (bp->b_ioflags & BIO_ERROR) != 0 ? EIO : 0; 111790effb23SGleb Smirnoff object = bp->b_vp->v_object; 111890effb23SGleb Smirnoff 111990effb23SGleb Smirnoff if (error == 0 && bp->b_bcount != bp->b_npages * PAGE_SIZE) { 1120fade8dd7SJeff Roberson if (!buf_mapped(bp)) { 1121fade8dd7SJeff Roberson bp->b_data = bp->b_kvabase; 1122fade8dd7SJeff Roberson pmap_qenter((vm_offset_t)bp->b_data, bp->b_pages, 112390effb23SGleb Smirnoff bp->b_npages); 112490effb23SGleb Smirnoff } 1125fade8dd7SJeff Roberson bzero(bp->b_data + bp->b_bcount, 112690effb23SGleb Smirnoff PAGE_SIZE * bp->b_npages - bp->b_bcount); 112790effb23SGleb Smirnoff } 1128fade8dd7SJeff Roberson if (buf_mapped(bp)) { 1129fade8dd7SJeff Roberson pmap_qremove((vm_offset_t)bp->b_data, bp->b_npages); 1130fade8dd7SJeff Roberson bp->b_data = unmapped_buf; 113190effb23SGleb Smirnoff } 113226f9a767SRodney W. Grimes 11337f935055SJeff Roberson /* Read lock to protect size. */ 11347f935055SJeff Roberson VM_OBJECT_RLOCK(object); 113590effb23SGleb Smirnoff for (i = 0, tfoff = IDX_TO_OFF(bp->b_pages[0]->pindex); 113690effb23SGleb Smirnoff i < bp->b_npages; i++, tfoff = nextoff) { 11378f9110f6SJohn Dyson vm_page_t mt; 11388f9110f6SJohn Dyson 11398f9110f6SJohn Dyson nextoff = tfoff + PAGE_SIZE; 114090effb23SGleb Smirnoff mt = bp->b_pages[i]; 11412f81c92eSMark Johnston if (mt == bogus_page) 11422f81c92eSMark Johnston continue; 11438f9110f6SJohn Dyson 114454746b67SDmitrij Tejblum if (nextoff <= object->un_pager.vnp.vnp_size) { 11458d17e694SJulian Elischer /* 11468d17e694SJulian Elischer * Read filled up entire page. 11478d17e694SJulian Elischer */ 11480012f373SJeff Roberson vm_page_valid(mt); 1149016a3c93SAlan Cox KASSERT(mt->dirty == 0, 115079f0deb9SGleb Smirnoff ("%s: page %p is dirty", __func__, mt)); 1151016a3c93SAlan Cox KASSERT(!pmap_page_is_mapped(mt), 115279f0deb9SGleb Smirnoff ("%s: page %p is mapped", __func__, mt)); 11538f9110f6SJohn Dyson } else { 11548d17e694SJulian Elischer /* 115542eb4108SAlan Cox * Read did not fill up entire page. 11568d17e694SJulian Elischer * 11578d17e694SJulian Elischer * Currently we do not set the entire page valid, 11588d17e694SJulian Elischer * we just try to clear the piece that we couldn't 11598d17e694SJulian Elischer * read. 11608d17e694SJulian Elischer */ 1161dc874f98SKonstantin Belousov vm_page_set_valid_range(mt, 0, 116254746b67SDmitrij Tejblum object->un_pager.vnp.vnp_size - tfoff); 116342eb4108SAlan Cox KASSERT((mt->dirty & vm_page_bits(0, 116442eb4108SAlan Cox object->un_pager.vnp.vnp_size - tfoff)) == 0, 116579f0deb9SGleb Smirnoff ("%s: page %p is dirty", __func__, mt)); 11668f9110f6SJohn Dyson } 11678f9110f6SJohn Dyson 1168b0cd2017SGleb Smirnoff if (i < bp->b_pgbefore || i >= bp->b_npages - bp->b_pgafter) 1169b6c00483SKonstantin Belousov vm_page_readahead_finish(mt); 117003679e23SAlan Cox } 11717f935055SJeff Roberson VM_OBJECT_RUNLOCK(object); 117290effb23SGleb Smirnoff if (error != 0) 117390effb23SGleb Smirnoff printf("%s: I/O read error %d\n", __func__, error); 117490effb23SGleb Smirnoff 117590effb23SGleb Smirnoff return (error); 117626f9a767SRodney W. Grimes } 117726f9a767SRodney W. Grimes 1178ce75f2c3SMike Smith /* 1179ce75f2c3SMike Smith * EOPNOTSUPP is no longer legal. For local media VFS's that do not 1180ce75f2c3SMike Smith * implement their own VOP_PUTPAGES, their VOP_PUTPAGES should call to 1181ce75f2c3SMike Smith * vnode_pager_generic_putpages() to implement the previous behaviour. 1182ce75f2c3SMike Smith * 1183ce75f2c3SMike Smith * All other FS's should use the bypass to get to the local media 1184ce75f2c3SMike Smith * backing vp's VOP_PUTPAGES. 1185ce75f2c3SMike Smith */ 1186e4542174SMatthew Dillon static void 11877ebba1f8SGleb Smirnoff vnode_pager_putpages(vm_object_t object, vm_page_t *m, int count, 118833cad9e9SKonstantin Belousov int flags, int *rtvals) 1189170db9c6SJohn Dyson { 1190170db9c6SJohn Dyson int rtval; 1191170db9c6SJohn Dyson struct vnode *vp; 119286ffbd76SMike Smith int bytes = count * PAGE_SIZE; 1193ad980522SJohn Dyson 11940e3cdf2cSAlan Cox /* 11950e3cdf2cSAlan Cox * Force synchronous operation if we are extremely low on memory 11960e3cdf2cSAlan Cox * to prevent a low-memory deadlock. VOP operations often need to 11970e3cdf2cSAlan Cox * allocate more memory to initiate the I/O ( i.e. do a BMAP 11980e3cdf2cSAlan Cox * operation ). The swapper handles the case by limiting the amount 11990e3cdf2cSAlan Cox * of asynchronous I/O, but that sort of solution doesn't scale well 12000e3cdf2cSAlan Cox * for the vnode pager without a lot of work. 12010e3cdf2cSAlan Cox * 12020e3cdf2cSAlan Cox * Also, the backing vnode's iodone routine may not wake the pageout 12030e3cdf2cSAlan Cox * daemon up. This should be probably be addressed XXX. 12040e3cdf2cSAlan Cox */ 12050e3cdf2cSAlan Cox 1206e2068d0bSJeff Roberson if (vm_page_count_min()) 120733cad9e9SKonstantin Belousov flags |= VM_PAGER_PUT_SYNC; 12080e3cdf2cSAlan Cox 12090e3cdf2cSAlan Cox /* 12100e3cdf2cSAlan Cox * Call device-specific putpages function 12110e3cdf2cSAlan Cox */ 1212170db9c6SJohn Dyson vp = object->handle; 121389f6b863SAttilio Rao VM_OBJECT_WUNLOCK(object); 121433cad9e9SKonstantin Belousov rtval = VOP_PUTPAGES(vp, m, bytes, flags, rtvals); 121523955314SAlfred Perlstein KASSERT(rtval != EOPNOTSUPP, 121623955314SAlfred Perlstein ("vnode_pager: stale FS putpages\n")); 121789f6b863SAttilio Rao VM_OBJECT_WLOCK(object); 1218170db9c6SJohn Dyson } 1219170db9c6SJohn Dyson 122005877a85SKonstantin Belousov static int 122105877a85SKonstantin Belousov vn_off2bidx(vm_ooffset_t offset) 122205877a85SKonstantin Belousov { 122305877a85SKonstantin Belousov 122405877a85SKonstantin Belousov return ((offset & PAGE_MASK) / DEV_BSIZE); 122505877a85SKonstantin Belousov } 122605877a85SKonstantin Belousov 122705877a85SKonstantin Belousov static bool 122805877a85SKonstantin Belousov vn_dirty_blk(vm_page_t m, vm_ooffset_t offset) 122905877a85SKonstantin Belousov { 123005877a85SKonstantin Belousov 123105877a85SKonstantin Belousov KASSERT(IDX_TO_OFF(m->pindex) <= offset && 123205877a85SKonstantin Belousov offset < IDX_TO_OFF(m->pindex + 1), 123305877a85SKonstantin Belousov ("page %p pidx %ju offset %ju", m, (uintmax_t)m->pindex, 123405877a85SKonstantin Belousov (uintmax_t)offset)); 123505877a85SKonstantin Belousov return ((m->dirty & ((vm_page_bits_t)1 << vn_off2bidx(offset))) != 0); 123605877a85SKonstantin Belousov } 1237ce75f2c3SMike Smith 123826f9a767SRodney W. Grimes /* 1239ce75f2c3SMike Smith * This is now called from local media FS's to operate against their 12404491ea91SEivind Eklund * own vnodes if they fail to implement VOP_PUTPAGES. 12412b6b0df7SMatthew Dillon * 12422b6b0df7SMatthew Dillon * This is typically called indirectly via the pageout daemon and 1243763df3ecSPedro F. Giffuni * clustering has already typically occurred, so in general we ask the 12442b6b0df7SMatthew Dillon * underlying filesystem to write the data out asynchronously rather 12452b6b0df7SMatthew Dillon * then delayed. 124626f9a767SRodney W. Grimes */ 1247ce75f2c3SMike Smith int 1248c46b90e9SAlan Cox vnode_pager_generic_putpages(struct vnode *vp, vm_page_t *ma, int bytecount, 1249c46b90e9SAlan Cox int flags, int *rtvals) 125026f9a767SRodney W. Grimes { 1251ce75f2c3SMike Smith vm_object_t object; 1252c46b90e9SAlan Cox vm_page_t m; 125305877a85SKonstantin Belousov vm_ooffset_t maxblksz, next_offset, poffset, prev_offset; 1254f6b04d2bSDavid Greenman struct uio auio; 1255f6b04d2bSDavid Greenman struct iovec aiov; 125605877a85SKonstantin Belousov off_t prev_resid, wrsz; 1257e6c44f65SKonstantin Belousov int count, error, i, maxsize, ncount, pgoff, ppscheck; 125805877a85SKonstantin Belousov bool in_hole; 1259dd498befSPaul Saab static struct timeval lastfail; 1260dd498befSPaul Saab static int curfail; 126126f9a767SRodney W. Grimes 1262ce75f2c3SMike Smith object = vp->v_object; 1263ce75f2c3SMike Smith count = bytecount / PAGE_SIZE; 1264ce75f2c3SMike Smith 126526f9a767SRodney W. Grimes for (i = 0; i < count; i++) 1266031ec8c1SKonstantin Belousov rtvals[i] = VM_PAGER_ERROR; 126726f9a767SRodney W. Grimes 1268c46b90e9SAlan Cox if ((int64_t)ma[0]->pindex < 0) { 1269e6c44f65SKonstantin Belousov printf("vnode_pager_generic_putpages: " 1270e6c44f65SKonstantin Belousov "attempt to write meta-data 0x%jx(%lx)\n", 1271e6c44f65SKonstantin Belousov (uintmax_t)ma[0]->pindex, (u_long)ma[0]->dirty); 1272f6b04d2bSDavid Greenman rtvals[0] = VM_PAGER_BAD; 1273e6c44f65SKonstantin Belousov return (VM_PAGER_BAD); 12740d94caffSDavid Greenman } 12750bdb7528SDavid Greenman 1276f6b04d2bSDavid Greenman maxsize = count * PAGE_SIZE; 1277f6b04d2bSDavid Greenman ncount = count; 127826f9a767SRodney W. Grimes 1279c46b90e9SAlan Cox poffset = IDX_TO_OFF(ma[0]->pindex); 128000a6f47fSMatthew Dillon 128100a6f47fSMatthew Dillon /* 128200a6f47fSMatthew Dillon * If the page-aligned write is larger then the actual file we 1283763df3ecSPedro F. Giffuni * have to invalidate pages occurring beyond the file EOF. However, 128400a6f47fSMatthew Dillon * there is an edge case where a file may not be page-aligned where 128500a6f47fSMatthew Dillon * the last page is partially invalid. In this case the filesystem 128600a6f47fSMatthew Dillon * may not properly clear the dirty bits for the entire page (which 128700a6f47fSMatthew Dillon * could be VM_PAGE_BITS_ALL due to the page having been mmap()d). 128800a6f47fSMatthew Dillon * With the page locked we are free to fix-up the dirty bits here. 12893ebeaf59SMatthew Dillon * 12903ebeaf59SMatthew Dillon * We do not under any circumstances truncate the valid bits, as 12913ebeaf59SMatthew Dillon * this will screw up bogus page replacement. 129200a6f47fSMatthew Dillon */ 1293b3d4ab66SKonstantin Belousov VM_OBJECT_RLOCK(object); 1294a316d390SJohn Dyson if (maxsize + poffset > object->un_pager.vnp.vnp_size) { 129500a6f47fSMatthew Dillon if (object->un_pager.vnp.vnp_size > poffset) { 1296a316d390SJohn Dyson maxsize = object->un_pager.vnp.vnp_size - poffset; 1297aa8de40aSPoul-Henning Kamp ncount = btoc(maxsize); 129800a6f47fSMatthew Dillon if ((pgoff = (int)maxsize & PAGE_MASK) != 0) { 1299938cdc42SKonstantin Belousov pgoff = roundup2(pgoff, DEV_BSIZE); 1300938cdc42SKonstantin Belousov 1301c46b90e9SAlan Cox /* 13027f935055SJeff Roberson * If the page is busy and the following 1303c46b90e9SAlan Cox * conditions hold, then the page's dirty 1304c46b90e9SAlan Cox * field cannot be concurrently changed by a 1305c46b90e9SAlan Cox * pmap operation. 1306c46b90e9SAlan Cox */ 1307c46b90e9SAlan Cox m = ma[ncount - 1]; 1308c7aebda8SAttilio Rao vm_page_assert_sbusied(m); 13096031c68dSAlan Cox KASSERT(!pmap_page_is_write_mapped(m), 1310c46b90e9SAlan Cox ("vnode_pager_generic_putpages: page %p is not read-only", m)); 1311e6c44f65SKonstantin Belousov MPASS(m->dirty != 0); 1312c46b90e9SAlan Cox vm_page_clear_dirty(m, pgoff, PAGE_SIZE - 1313c46b90e9SAlan Cox pgoff); 131400a6f47fSMatthew Dillon } 131500a6f47fSMatthew Dillon } else { 131600a6f47fSMatthew Dillon maxsize = 0; 131700a6f47fSMatthew Dillon ncount = 0; 131800a6f47fSMatthew Dillon } 1319e6c44f65SKonstantin Belousov for (i = ncount; i < count; i++) 1320f6b04d2bSDavid Greenman rtvals[i] = VM_PAGER_BAD; 1321f6b04d2bSDavid Greenman } 13227f935055SJeff Roberson VM_OBJECT_RUNLOCK(object); 132326f9a767SRodney W. Grimes 1324f6b04d2bSDavid Greenman auio.uio_iov = &aiov; 1325f6b04d2bSDavid Greenman auio.uio_segflg = UIO_NOCOPY; 1326f6b04d2bSDavid Greenman auio.uio_rw = UIO_WRITE; 1327e6c44f65SKonstantin Belousov auio.uio_td = NULL; 132805877a85SKonstantin Belousov maxblksz = roundup2(poffset + maxsize, DEV_BSIZE); 132905877a85SKonstantin Belousov 133005877a85SKonstantin Belousov for (prev_offset = poffset; prev_offset < maxblksz;) { 133105877a85SKonstantin Belousov /* Skip clean blocks. */ 133205877a85SKonstantin Belousov for (in_hole = true; in_hole && prev_offset < maxblksz;) { 133305877a85SKonstantin Belousov m = ma[OFF_TO_IDX(prev_offset - poffset)]; 133405877a85SKonstantin Belousov for (i = vn_off2bidx(prev_offset); 133505877a85SKonstantin Belousov i < sizeof(vm_page_bits_t) * NBBY && 133605877a85SKonstantin Belousov prev_offset < maxblksz; i++) { 133705877a85SKonstantin Belousov if (vn_dirty_blk(m, prev_offset)) { 133805877a85SKonstantin Belousov in_hole = false; 133905877a85SKonstantin Belousov break; 134005877a85SKonstantin Belousov } 134105877a85SKonstantin Belousov prev_offset += DEV_BSIZE; 134205877a85SKonstantin Belousov } 134305877a85SKonstantin Belousov } 134405877a85SKonstantin Belousov if (in_hole) 134505877a85SKonstantin Belousov goto write_done; 134605877a85SKonstantin Belousov 134705877a85SKonstantin Belousov /* Find longest run of dirty blocks. */ 134805877a85SKonstantin Belousov for (next_offset = prev_offset; next_offset < maxblksz;) { 134905877a85SKonstantin Belousov m = ma[OFF_TO_IDX(next_offset - poffset)]; 135005877a85SKonstantin Belousov for (i = vn_off2bidx(next_offset); 135105877a85SKonstantin Belousov i < sizeof(vm_page_bits_t) * NBBY && 135205877a85SKonstantin Belousov next_offset < maxblksz; i++) { 135305877a85SKonstantin Belousov if (!vn_dirty_blk(m, next_offset)) 135405877a85SKonstantin Belousov goto start_write; 135505877a85SKonstantin Belousov next_offset += DEV_BSIZE; 135605877a85SKonstantin Belousov } 135705877a85SKonstantin Belousov } 135805877a85SKonstantin Belousov start_write: 135905877a85SKonstantin Belousov if (next_offset > poffset + maxsize) 136005877a85SKonstantin Belousov next_offset = poffset + maxsize; 136105877a85SKonstantin Belousov 136205877a85SKonstantin Belousov /* 136305877a85SKonstantin Belousov * Getting here requires finding a dirty block in the 136405877a85SKonstantin Belousov * 'skip clean blocks' loop. 136505877a85SKonstantin Belousov */ 136605877a85SKonstantin Belousov MPASS(prev_offset < next_offset); 136705877a85SKonstantin Belousov 136805877a85SKonstantin Belousov aiov.iov_base = NULL; 136905877a85SKonstantin Belousov auio.uio_iovcnt = 1; 137005877a85SKonstantin Belousov auio.uio_offset = prev_offset; 137105877a85SKonstantin Belousov prev_resid = auio.uio_resid = aiov.iov_len = next_offset - 137205877a85SKonstantin Belousov prev_offset; 137305877a85SKonstantin Belousov error = VOP_WRITE(vp, &auio, 137405877a85SKonstantin Belousov vnode_pager_putpages_ioflags(flags), curthread->td_ucred); 137505877a85SKonstantin Belousov 137605877a85SKonstantin Belousov wrsz = prev_resid - auio.uio_resid; 137705877a85SKonstantin Belousov if (wrsz == 0) { 137805877a85SKonstantin Belousov if (ppsratecheck(&lastfail, &curfail, 1) != 0) { 137905877a85SKonstantin Belousov vn_printf(vp, "vnode_pager_putpages: " 138005877a85SKonstantin Belousov "zero-length write at %ju resid %zd\n", 138105877a85SKonstantin Belousov auio.uio_offset, auio.uio_resid); 138205877a85SKonstantin Belousov } 138305877a85SKonstantin Belousov break; 138405877a85SKonstantin Belousov } 138505877a85SKonstantin Belousov 138605877a85SKonstantin Belousov /* Adjust the starting offset for next iteration. */ 138705877a85SKonstantin Belousov prev_offset += wrsz; 138805877a85SKonstantin Belousov MPASS(auio.uio_offset == prev_offset); 1389f6b04d2bSDavid Greenman 13903dbb0ca6SKonstantin Belousov ppscheck = 0; 139105877a85SKonstantin Belousov if (error != 0 && (ppscheck = ppsratecheck(&lastfail, 139205877a85SKonstantin Belousov &curfail, 1)) != 0) 139305877a85SKonstantin Belousov vn_printf(vp, "vnode_pager_putpages: I/O error %d\n", 139405877a85SKonstantin Belousov error); 1395e6c44f65SKonstantin Belousov if (auio.uio_resid != 0 && (ppscheck != 0 || 1396e6c44f65SKonstantin Belousov ppsratecheck(&lastfail, &curfail, 1) != 0)) 139705877a85SKonstantin Belousov vn_printf(vp, "vnode_pager_putpages: residual I/O %zd " 139805877a85SKonstantin Belousov "at %ju\n", auio.uio_resid, 139905877a85SKonstantin Belousov (uintmax_t)ma[0]->pindex); 140005877a85SKonstantin Belousov if (error != 0 || auio.uio_resid != 0) 140105877a85SKonstantin Belousov break; 140205877a85SKonstantin Belousov } 140305877a85SKonstantin Belousov write_done: 140405877a85SKonstantin Belousov /* Mark completely processed pages. */ 140505877a85SKonstantin Belousov for (i = 0; i < OFF_TO_IDX(prev_offset - poffset); i++) 140626f9a767SRodney W. Grimes rtvals[i] = VM_PAGER_OK; 140705877a85SKonstantin Belousov /* Mark partial EOF page. */ 140805877a85SKonstantin Belousov if (prev_offset == poffset + maxsize && (prev_offset & PAGE_MASK) != 0) 140905877a85SKonstantin Belousov rtvals[i++] = VM_PAGER_OK; 141005877a85SKonstantin Belousov /* Unwritten pages in range, free bonus if the page is clean. */ 141105877a85SKonstantin Belousov for (; i < ncount; i++) 141205877a85SKonstantin Belousov rtvals[i] = ma[i]->dirty == 0 ? VM_PAGER_OK : VM_PAGER_ERROR; 141305877a85SKonstantin Belousov VM_CNT_ADD(v_vnodepgsout, i); 141405877a85SKonstantin Belousov VM_CNT_INC(v_vnodeout); 1415e6c44f65SKonstantin Belousov return (rtvals[0]); 141626f9a767SRodney W. Grimes } 1417031ec8c1SKonstantin Belousov 141865b9599aSKonstantin Belousov int 141965b9599aSKonstantin Belousov vnode_pager_putpages_ioflags(int pager_flags) 142065b9599aSKonstantin Belousov { 142165b9599aSKonstantin Belousov int ioflags; 142265b9599aSKonstantin Belousov 142365b9599aSKonstantin Belousov /* 142465b9599aSKonstantin Belousov * Pageouts are already clustered, use IO_ASYNC to force a 142565b9599aSKonstantin Belousov * bawrite() rather then a bdwrite() to prevent paging I/O 142665b9599aSKonstantin Belousov * from saturating the buffer cache. Dummy-up the sequential 142765b9599aSKonstantin Belousov * heuristic to cause large ranges to cluster. If neither 142865b9599aSKonstantin Belousov * IO_SYNC or IO_ASYNC is set, the system decides how to 142965b9599aSKonstantin Belousov * cluster. 143065b9599aSKonstantin Belousov */ 143165b9599aSKonstantin Belousov ioflags = IO_VMIO; 143265b9599aSKonstantin Belousov if ((pager_flags & (VM_PAGER_PUT_SYNC | VM_PAGER_PUT_INVAL)) != 0) 143365b9599aSKonstantin Belousov ioflags |= IO_SYNC; 143465b9599aSKonstantin Belousov else if ((pager_flags & VM_PAGER_CLUSTER_OK) == 0) 143565b9599aSKonstantin Belousov ioflags |= IO_ASYNC; 143665b9599aSKonstantin Belousov ioflags |= (pager_flags & VM_PAGER_PUT_INVAL) != 0 ? IO_INVAL: 0; 143765b9599aSKonstantin Belousov ioflags |= (pager_flags & VM_PAGER_PUT_NOREUSE) != 0 ? IO_NOREUSE : 0; 143865b9599aSKonstantin Belousov ioflags |= IO_SEQMAX << IO_SEQSHIFT; 143965b9599aSKonstantin Belousov return (ioflags); 144065b9599aSKonstantin Belousov } 144165b9599aSKonstantin Belousov 1442555b7bb4SKonstantin Belousov /* 1443555b7bb4SKonstantin Belousov * vnode_pager_undirty_pages(). 1444555b7bb4SKonstantin Belousov * 1445555b7bb4SKonstantin Belousov * A helper to mark pages as clean after pageout that was possibly 1446555b7bb4SKonstantin Belousov * done with a short write. The lpos argument specifies the page run 1447555b7bb4SKonstantin Belousov * length in bytes, and the written argument specifies how many bytes 1448555b7bb4SKonstantin Belousov * were actually written. eof is the offset past the last valid byte 1449555b7bb4SKonstantin Belousov * in the vnode using the absolute file position of the first byte in 1450555b7bb4SKonstantin Belousov * the run as the base from which it is computed. 1451555b7bb4SKonstantin Belousov */ 1452031ec8c1SKonstantin Belousov void 1453555b7bb4SKonstantin Belousov vnode_pager_undirty_pages(vm_page_t *ma, int *rtvals, int written, off_t eof, 1454555b7bb4SKonstantin Belousov int lpos) 1455031ec8c1SKonstantin Belousov { 14569d17da3bSKonstantin Belousov vm_object_t obj; 1457555b7bb4SKonstantin Belousov int i, pos, pos_devb; 1458031ec8c1SKonstantin Belousov 1459555b7bb4SKonstantin Belousov if (written == 0 && eof >= lpos) 14609d17da3bSKonstantin Belousov return; 14619d17da3bSKonstantin Belousov obj = ma[0]->object; 1462031ec8c1SKonstantin Belousov for (i = 0, pos = 0; pos < written; i++, pos += PAGE_SIZE) { 1463031ec8c1SKonstantin Belousov if (pos < trunc_page(written)) { 1464031ec8c1SKonstantin Belousov rtvals[i] = VM_PAGER_OK; 1465031ec8c1SKonstantin Belousov vm_page_undirty(ma[i]); 1466031ec8c1SKonstantin Belousov } else { 1467031ec8c1SKonstantin Belousov /* Partially written page. */ 1468031ec8c1SKonstantin Belousov rtvals[i] = VM_PAGER_AGAIN; 1469031ec8c1SKonstantin Belousov vm_page_clear_dirty(ma[i], 0, written & PAGE_MASK); 1470031ec8c1SKonstantin Belousov } 1471031ec8c1SKonstantin Belousov } 1472555b7bb4SKonstantin Belousov if (eof >= lpos) /* avoid truncation */ 14737f935055SJeff Roberson return; 1474555b7bb4SKonstantin Belousov for (pos = eof, i = OFF_TO_IDX(trunc_page(pos)); pos < lpos; i++) { 1475555b7bb4SKonstantin Belousov if (pos != trunc_page(pos)) { 1476555b7bb4SKonstantin Belousov /* 1477555b7bb4SKonstantin Belousov * The page contains the last valid byte in 1478555b7bb4SKonstantin Belousov * the vnode, mark the rest of the page as 1479555b7bb4SKonstantin Belousov * clean, potentially making the whole page 1480555b7bb4SKonstantin Belousov * clean. 1481555b7bb4SKonstantin Belousov */ 1482555b7bb4SKonstantin Belousov pos_devb = roundup2(pos & PAGE_MASK, DEV_BSIZE); 1483555b7bb4SKonstantin Belousov vm_page_clear_dirty(ma[i], pos_devb, PAGE_SIZE - 1484555b7bb4SKonstantin Belousov pos_devb); 1485555b7bb4SKonstantin Belousov 1486555b7bb4SKonstantin Belousov /* 1487555b7bb4SKonstantin Belousov * If the page was cleaned, report the pageout 1488555b7bb4SKonstantin Belousov * on it as successful. msync() no longer 1489555b7bb4SKonstantin Belousov * needs to write out the page, endlessly 1490555b7bb4SKonstantin Belousov * creating write requests and dirty buffers. 1491555b7bb4SKonstantin Belousov */ 1492555b7bb4SKonstantin Belousov if (ma[i]->dirty == 0) 1493555b7bb4SKonstantin Belousov rtvals[i] = VM_PAGER_OK; 1494555b7bb4SKonstantin Belousov 1495555b7bb4SKonstantin Belousov pos = round_page(pos); 1496555b7bb4SKonstantin Belousov } else { 1497555b7bb4SKonstantin Belousov /* vm_pageout_flush() clears dirty */ 1498555b7bb4SKonstantin Belousov rtvals[i] = VM_PAGER_BAD; 1499555b7bb4SKonstantin Belousov pos += PAGE_SIZE; 1500555b7bb4SKonstantin Belousov } 1501555b7bb4SKonstantin Belousov } 1502031ec8c1SKonstantin Belousov } 150384110e7eSKonstantin Belousov 1504fe7bcbafSKyle Evans static void 150584110e7eSKonstantin Belousov vnode_pager_update_writecount(vm_object_t object, vm_offset_t start, 150684110e7eSKonstantin Belousov vm_offset_t end) 150784110e7eSKonstantin Belousov { 150884110e7eSKonstantin Belousov struct vnode *vp; 150984110e7eSKonstantin Belousov vm_ooffset_t old_wm; 151084110e7eSKonstantin Belousov 151189f6b863SAttilio Rao VM_OBJECT_WLOCK(object); 151284110e7eSKonstantin Belousov if (object->type != OBJT_VNODE) { 151389f6b863SAttilio Rao VM_OBJECT_WUNLOCK(object); 151484110e7eSKonstantin Belousov return; 151584110e7eSKonstantin Belousov } 151684110e7eSKonstantin Belousov old_wm = object->un_pager.vnp.writemappings; 151784110e7eSKonstantin Belousov object->un_pager.vnp.writemappings += (vm_ooffset_t)end - start; 151884110e7eSKonstantin Belousov vp = object->handle; 151984110e7eSKonstantin Belousov if (old_wm == 0 && object->un_pager.vnp.writemappings != 0) { 152078022527SKonstantin Belousov ASSERT_VOP_LOCKED(vp, "v_writecount inc"); 152178022527SKonstantin Belousov VOP_ADD_WRITECOUNT_CHECKED(vp, 1); 1522b47f6241SJohn Baldwin CTR3(KTR_VFS, "%s: vp %p v_writecount increased to %d", 1523b47f6241SJohn Baldwin __func__, vp, vp->v_writecount); 152484110e7eSKonstantin Belousov } else if (old_wm != 0 && object->un_pager.vnp.writemappings == 0) { 152578022527SKonstantin Belousov ASSERT_VOP_LOCKED(vp, "v_writecount dec"); 152678022527SKonstantin Belousov VOP_ADD_WRITECOUNT_CHECKED(vp, -1); 1527b47f6241SJohn Baldwin CTR3(KTR_VFS, "%s: vp %p v_writecount decreased to %d", 1528b47f6241SJohn Baldwin __func__, vp, vp->v_writecount); 152984110e7eSKonstantin Belousov } 153089f6b863SAttilio Rao VM_OBJECT_WUNLOCK(object); 153184110e7eSKonstantin Belousov } 153284110e7eSKonstantin Belousov 1533fe7bcbafSKyle Evans static void 153484110e7eSKonstantin Belousov vnode_pager_release_writecount(vm_object_t object, vm_offset_t start, 153584110e7eSKonstantin Belousov vm_offset_t end) 153684110e7eSKonstantin Belousov { 153784110e7eSKonstantin Belousov struct vnode *vp; 153884110e7eSKonstantin Belousov struct mount *mp; 153984110e7eSKonstantin Belousov vm_offset_t inc; 154084110e7eSKonstantin Belousov 154189f6b863SAttilio Rao VM_OBJECT_WLOCK(object); 154284110e7eSKonstantin Belousov 154384110e7eSKonstantin Belousov /* 154484110e7eSKonstantin Belousov * First, recheck the object type to account for the race when 154584110e7eSKonstantin Belousov * the vnode is reclaimed. 154684110e7eSKonstantin Belousov */ 154784110e7eSKonstantin Belousov if (object->type != OBJT_VNODE) { 154889f6b863SAttilio Rao VM_OBJECT_WUNLOCK(object); 154984110e7eSKonstantin Belousov return; 155084110e7eSKonstantin Belousov } 155184110e7eSKonstantin Belousov 155284110e7eSKonstantin Belousov /* 155384110e7eSKonstantin Belousov * Optimize for the case when writemappings is not going to 155484110e7eSKonstantin Belousov * zero. 155584110e7eSKonstantin Belousov */ 155684110e7eSKonstantin Belousov inc = end - start; 155784110e7eSKonstantin Belousov if (object->un_pager.vnp.writemappings != inc) { 155884110e7eSKonstantin Belousov object->un_pager.vnp.writemappings -= inc; 155989f6b863SAttilio Rao VM_OBJECT_WUNLOCK(object); 156084110e7eSKonstantin Belousov return; 156184110e7eSKonstantin Belousov } 156284110e7eSKonstantin Belousov 156384110e7eSKonstantin Belousov vp = object->handle; 156484110e7eSKonstantin Belousov vhold(vp); 156589f6b863SAttilio Rao VM_OBJECT_WUNLOCK(object); 156684110e7eSKonstantin Belousov mp = NULL; 156784110e7eSKonstantin Belousov vn_start_write(vp, &mp, V_WAIT); 156878022527SKonstantin Belousov vn_lock(vp, LK_SHARED | LK_RETRY); 156984110e7eSKonstantin Belousov 157084110e7eSKonstantin Belousov /* 157184110e7eSKonstantin Belousov * Decrement the object's writemappings, by swapping the start 157284110e7eSKonstantin Belousov * and end arguments for vnode_pager_update_writecount(). If 157384110e7eSKonstantin Belousov * there was not a race with vnode reclaimation, then the 157484110e7eSKonstantin Belousov * vnode's v_writecount is decremented. 157584110e7eSKonstantin Belousov */ 157684110e7eSKonstantin Belousov vnode_pager_update_writecount(object, end, start); 1577b249ce48SMateusz Guzik VOP_UNLOCK(vp); 157884110e7eSKonstantin Belousov vdrop(vp); 157984110e7eSKonstantin Belousov if (mp != NULL) 158084110e7eSKonstantin Belousov vn_finished_write(mp); 158184110e7eSKonstantin Belousov } 1582