160727d8bSWarner Losh /*- 2df57947fSPedro F. Giffuni * SPDX-License-Identifier: BSD-4-Clause 3df57947fSPedro F. Giffuni * 4df8bae1dSRodney W. Grimes * Copyright (c) 1990 University of Utah. 526f9a767SRodney W. Grimes * Copyright (c) 1991 The Regents of the University of California. 626f9a767SRodney W. Grimes * All rights reserved. 726f9a767SRodney W. Grimes * Copyright (c) 1993, 1994 John S. Dyson 824a1cce3SDavid Greenman * Copyright (c) 1995, David Greenman 9df8bae1dSRodney W. Grimes * 10df8bae1dSRodney W. Grimes * This code is derived from software contributed to Berkeley by 11df8bae1dSRodney W. Grimes * the Systems Programming Group of the University of Utah Computer 12df8bae1dSRodney W. Grimes * Science Department. 13df8bae1dSRodney W. Grimes * 14df8bae1dSRodney W. Grimes * Redistribution and use in source and binary forms, with or without 15df8bae1dSRodney W. Grimes * modification, are permitted provided that the following conditions 16df8bae1dSRodney W. Grimes * are met: 17df8bae1dSRodney W. Grimes * 1. Redistributions of source code must retain the above copyright 18df8bae1dSRodney W. Grimes * notice, this list of conditions and the following disclaimer. 19df8bae1dSRodney W. Grimes * 2. Redistributions in binary form must reproduce the above copyright 20df8bae1dSRodney W. Grimes * notice, this list of conditions and the following disclaimer in the 21df8bae1dSRodney W. Grimes * documentation and/or other materials provided with the distribution. 22df8bae1dSRodney W. Grimes * 3. All advertising materials mentioning features or use of this software 235929bcfaSPhilippe Charnier * must display the following acknowledgement: 24df8bae1dSRodney W. Grimes * This product includes software developed by the University of 25df8bae1dSRodney W. Grimes * California, Berkeley and its contributors. 26df8bae1dSRodney W. Grimes * 4. Neither the name of the University nor the names of its contributors 27df8bae1dSRodney W. Grimes * may be used to endorse or promote products derived from this software 28df8bae1dSRodney W. Grimes * without specific prior written permission. 29df8bae1dSRodney W. Grimes * 30df8bae1dSRodney W. Grimes * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 31df8bae1dSRodney W. Grimes * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 32df8bae1dSRodney W. Grimes * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 33df8bae1dSRodney W. Grimes * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 34df8bae1dSRodney W. Grimes * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 35df8bae1dSRodney W. Grimes * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 36df8bae1dSRodney W. Grimes * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 37df8bae1dSRodney W. Grimes * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 38df8bae1dSRodney W. Grimes * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 39df8bae1dSRodney W. Grimes * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 40df8bae1dSRodney W. Grimes * SUCH DAMAGE. 41df8bae1dSRodney W. Grimes * 4226f9a767SRodney W. Grimes * from: @(#)vnode_pager.c 7.5 (Berkeley) 4/20/91 43df8bae1dSRodney W. Grimes */ 44df8bae1dSRodney W. Grimes 45df8bae1dSRodney W. Grimes /* 46df8bae1dSRodney W. Grimes * Page to/from files (vnodes). 47df8bae1dSRodney W. Grimes */ 48df8bae1dSRodney W. Grimes 4926f9a767SRodney W. Grimes /* 5026f9a767SRodney W. Grimes * TODO: 5124a1cce3SDavid Greenman * Implement VOP_GETPAGES/PUTPAGES interface for filesystems. Will 52f6b04d2bSDavid Greenman * greatly re-simplify the vnode_pager. 5326f9a767SRodney W. Grimes */ 5426f9a767SRodney W. Grimes 55874651b1SDavid E. O'Brien #include <sys/cdefs.h> 56874651b1SDavid E. O'Brien __FBSDID("$FreeBSD$"); 57874651b1SDavid E. O'Brien 583d653db0SAlan Cox #include "opt_vm.h" 593d653db0SAlan Cox 60df8bae1dSRodney W. Grimes #include <sys/param.h> 61756a5412SGleb Smirnoff #include <sys/kernel.h> 62df8bae1dSRodney W. Grimes #include <sys/systm.h> 63e5818a53SJeff Roberson #include <sys/sysctl.h> 64df8bae1dSRodney W. Grimes #include <sys/proc.h> 65df8bae1dSRodney W. Grimes #include <sys/vnode.h> 66df8bae1dSRodney W. Grimes #include <sys/mount.h> 679626b608SPoul-Henning Kamp #include <sys/bio.h> 6824a1cce3SDavid Greenman #include <sys/buf.h> 69efeaf95aSDavid Greenman #include <sys/vmmeter.h> 70daec9284SConrad Meyer #include <sys/ktr.h> 71d07a6d3fSPoul-Henning Kamp #include <sys/limits.h> 7224579ca1SMatthew Dillon #include <sys/conf.h> 7351df5321SJeff Roberson #include <sys/refcount.h> 7489f6b863SAttilio Rao #include <sys/rwlock.h> 759e0ddbd0SAlan Cox #include <sys/sf_buf.h> 76e5818a53SJeff Roberson #include <sys/domainset.h> 77df8bae1dSRodney W. Grimes 784f12e0acSSuleiman Souhlal #include <machine/atomic.h> 794f12e0acSSuleiman Souhlal 80df8bae1dSRodney W. Grimes #include <vm/vm.h> 811c771f92SKonstantin Belousov #include <vm/vm_param.h> 82efeaf95aSDavid Greenman #include <vm/vm_object.h> 83df8bae1dSRodney W. Grimes #include <vm/vm_page.h> 8424a1cce3SDavid Greenman #include <vm/vm_pager.h> 851efb74fbSJohn Dyson #include <vm/vm_map.h> 86df8bae1dSRodney W. Grimes #include <vm/vnode_pager.h> 87efeaf95aSDavid Greenman #include <vm/vm_extern.h> 88756a5412SGleb Smirnoff #include <vm/uma.h> 89df8bae1dSRodney W. Grimes 90bff76343SAlan Cox static int vnode_pager_addr(struct vnode *vp, vm_ooffset_t address, 91bff76343SAlan Cox daddr_t *rtaddress, int *run); 9211caded3SAlfred Perlstein static int vnode_pager_input_smlfs(vm_object_t object, vm_page_t m); 9311caded3SAlfred Perlstein static int vnode_pager_input_old(vm_object_t object, vm_page_t m); 9411caded3SAlfred Perlstein static void vnode_pager_dealloc(vm_object_t); 95b0cd2017SGleb Smirnoff static int vnode_pager_getpages(vm_object_t, vm_page_t *, int, int *, int *); 96b0cd2017SGleb Smirnoff static int vnode_pager_getpages_async(vm_object_t, vm_page_t *, int, int *, 97b0cd2017SGleb Smirnoff int *, vop_getpages_iodone_t, void *); 9833cad9e9SKonstantin Belousov static void vnode_pager_putpages(vm_object_t, vm_page_t *, int, int, int *); 9911caded3SAlfred Perlstein static boolean_t vnode_pager_haspage(vm_object_t, vm_pindex_t, int *, int *); 1003364c323SKonstantin Belousov static vm_object_t vnode_pager_alloc(void *, vm_ooffset_t, vm_prot_t, 1013364c323SKonstantin Belousov vm_ooffset_t, struct ucred *cred); 10290effb23SGleb Smirnoff static int vnode_pager_generic_getpages_done(struct buf *); 10390effb23SGleb Smirnoff static void vnode_pager_generic_getpages_done_async(struct buf *); 104fe7bcbafSKyle Evans static void vnode_pager_update_writecount(vm_object_t, vm_offset_t, 105fe7bcbafSKyle Evans vm_offset_t); 106fe7bcbafSKyle Evans static void vnode_pager_release_writecount(vm_object_t, vm_offset_t, 107fe7bcbafSKyle Evans vm_offset_t); 1080b8253a7SBruce Evans 109df8bae1dSRodney W. Grimes struct pagerops vnodepagerops = { 1104e658600SPoul-Henning Kamp .pgo_alloc = vnode_pager_alloc, 1114e658600SPoul-Henning Kamp .pgo_dealloc = vnode_pager_dealloc, 1124e658600SPoul-Henning Kamp .pgo_getpages = vnode_pager_getpages, 11390effb23SGleb Smirnoff .pgo_getpages_async = vnode_pager_getpages_async, 1144e658600SPoul-Henning Kamp .pgo_putpages = vnode_pager_putpages, 1154e658600SPoul-Henning Kamp .pgo_haspage = vnode_pager_haspage, 116fe7bcbafSKyle Evans .pgo_update_writecount = vnode_pager_update_writecount, 117fe7bcbafSKyle Evans .pgo_release_writecount = vnode_pager_release_writecount, 118180bcaa4SKonstantin Belousov .pgo_set_writeable_dirty = vm_object_set_writeable_dirty_, 119*c23c555bSKonstantin Belousov .pgo_mightbedirty = vm_object_mightbedirty_, 120df8bae1dSRodney W. Grimes }; 121df8bae1dSRodney W. Grimes 122e5818a53SJeff Roberson static struct domainset *vnode_domainset = NULL; 123e5818a53SJeff Roberson 124a314aba8SMateusz Guzik SYSCTL_PROC(_debug, OID_AUTO, vnode_domainset, 125a314aba8SMateusz Guzik CTLTYPE_STRING | CTLFLAG_MPSAFE | CTLFLAG_RW, &vnode_domainset, 0, 126a314aba8SMateusz Guzik sysctl_handle_domainset, "A", "Default vnode NUMA policy"); 127e5818a53SJeff Roberson 12866fb0b1aSGleb Smirnoff static int nvnpbufs; 12966fb0b1aSGleb Smirnoff SYSCTL_INT(_vm, OID_AUTO, vnode_pbufs, CTLFLAG_RDTUN | CTLFLAG_NOFETCH, 13066fb0b1aSGleb Smirnoff &nvnpbufs, 0, "number of physical buffers allocated for vnode pager"); 13166fb0b1aSGleb Smirnoff 132756a5412SGleb Smirnoff static uma_zone_t vnode_pbuf_zone; 133756a5412SGleb Smirnoff 134756a5412SGleb Smirnoff static void 135756a5412SGleb Smirnoff vnode_pager_init(void *dummy) 136756a5412SGleb Smirnoff { 137756a5412SGleb Smirnoff 13866fb0b1aSGleb Smirnoff #ifdef __LP64__ 13966fb0b1aSGleb Smirnoff nvnpbufs = nswbuf * 2; 14066fb0b1aSGleb Smirnoff #else 14166fb0b1aSGleb Smirnoff nvnpbufs = nswbuf / 2; 14266fb0b1aSGleb Smirnoff #endif 14366fb0b1aSGleb Smirnoff TUNABLE_INT_FETCH("vm.vnode_pbufs", &nvnpbufs); 14466fb0b1aSGleb Smirnoff vnode_pbuf_zone = pbuf_zsecond_create("vnpbuf", nvnpbufs); 145756a5412SGleb Smirnoff } 146756a5412SGleb Smirnoff SYSINIT(vnode_pager, SI_SUB_CPU, SI_ORDER_ANY, vnode_pager_init, NULL); 147756a5412SGleb Smirnoff 148d07a6d3fSPoul-Henning Kamp /* Create the VM system backing object for this vnode */ 149d07a6d3fSPoul-Henning Kamp int 150731959b1SYaroslav Tykhiy vnode_create_vobject(struct vnode *vp, off_t isize, struct thread *td) 151d07a6d3fSPoul-Henning Kamp { 152d07a6d3fSPoul-Henning Kamp vm_object_t object; 153d07a6d3fSPoul-Henning Kamp vm_ooffset_t size = isize; 154d07a6d3fSPoul-Henning Kamp struct vattr va; 155a67d5408SJeff Roberson bool last; 156d07a6d3fSPoul-Henning Kamp 1577ad2a82dSMateusz Guzik if (!vn_isdisk(vp) && vn_canvmio(vp) == FALSE) 158d07a6d3fSPoul-Henning Kamp return (0); 159d07a6d3fSPoul-Henning Kamp 1606470c8d3SKonstantin Belousov object = vp->v_object; 1616470c8d3SKonstantin Belousov if (object != NULL) 162d07a6d3fSPoul-Henning Kamp return (0); 163d07a6d3fSPoul-Henning Kamp 164d07a6d3fSPoul-Henning Kamp if (size == 0) { 1657ad2a82dSMateusz Guzik if (vn_isdisk(vp)) { 166d07a6d3fSPoul-Henning Kamp size = IDX_TO_OFF(INT_MAX); 167d07a6d3fSPoul-Henning Kamp } else { 1680359a12eSAttilio Rao if (VOP_GETATTR(vp, &va, td->td_ucred)) 169d07a6d3fSPoul-Henning Kamp return (0); 170d07a6d3fSPoul-Henning Kamp size = va.va_size; 171d07a6d3fSPoul-Henning Kamp } 172d07a6d3fSPoul-Henning Kamp } 173d07a6d3fSPoul-Henning Kamp 1743364c323SKonstantin Belousov object = vnode_pager_alloc(vp, size, 0, 0, td->td_ucred); 175d07a6d3fSPoul-Henning Kamp /* 176d07a6d3fSPoul-Henning Kamp * Dereference the reference we just created. This assumes 177a67d5408SJeff Roberson * that the object is associated with the vp. We still have 178a67d5408SJeff Roberson * to serialize with vnode_pager_dealloc() for the last 179a67d5408SJeff Roberson * potential reference. 180d07a6d3fSPoul-Henning Kamp */ 18151df5321SJeff Roberson VM_OBJECT_RLOCK(object); 182a67d5408SJeff Roberson last = refcount_release(&object->ref_count); 18351df5321SJeff Roberson VM_OBJECT_RUNLOCK(object); 184a67d5408SJeff Roberson if (last) 185d07a6d3fSPoul-Henning Kamp vrele(vp); 186d07a6d3fSPoul-Henning Kamp 187d07a6d3fSPoul-Henning Kamp KASSERT(vp->v_object != NULL, ("vnode_create_vobject: NULL object")); 188d07a6d3fSPoul-Henning Kamp 189d07a6d3fSPoul-Henning Kamp return (0); 190d07a6d3fSPoul-Henning Kamp } 191d07a6d3fSPoul-Henning Kamp 1927146d6cbSPoul-Henning Kamp void 1937146d6cbSPoul-Henning Kamp vnode_destroy_vobject(struct vnode *vp) 1947146d6cbSPoul-Henning Kamp { 1957146d6cbSPoul-Henning Kamp struct vm_object *obj; 1967146d6cbSPoul-Henning Kamp 1977146d6cbSPoul-Henning Kamp obj = vp->v_object; 1986470c8d3SKonstantin Belousov if (obj == NULL || obj->handle != vp) 1997146d6cbSPoul-Henning Kamp return; 20057fd3d55SPawel Jakub Dawidek ASSERT_VOP_ELOCKED(vp, "vnode_destroy_vobject"); 20189f6b863SAttilio Rao VM_OBJECT_WLOCK(obj); 2026470c8d3SKonstantin Belousov MPASS(obj->type == OBJT_VNODE); 2032a339d9eSKonstantin Belousov umtx_shm_object_terminated(obj); 2047146d6cbSPoul-Henning Kamp if (obj->ref_count == 0) { 2059c83ff2dSJeff Roberson KASSERT((obj->flags & OBJ_DEAD) == 0, 2069c83ff2dSJeff Roberson ("vnode_destroy_vobject: Terminating dead object")); 207783a68aaSKonstantin Belousov vm_object_set_flag(obj, OBJ_DEAD); 208783a68aaSKonstantin Belousov 209783a68aaSKonstantin Belousov /* 210783a68aaSKonstantin Belousov * Clean pages and flush buffers. 211783a68aaSKonstantin Belousov */ 212783a68aaSKonstantin Belousov vm_object_page_clean(obj, 0, 0, OBJPC_SYNC); 213783a68aaSKonstantin Belousov VM_OBJECT_WUNLOCK(obj); 214783a68aaSKonstantin Belousov 215783a68aaSKonstantin Belousov vinvalbuf(vp, V_SAVE, 0, 0); 216783a68aaSKonstantin Belousov 217783a68aaSKonstantin Belousov BO_LOCK(&vp->v_bufobj); 218783a68aaSKonstantin Belousov vp->v_bufobj.bo_flag |= BO_DEAD; 219783a68aaSKonstantin Belousov BO_UNLOCK(&vp->v_bufobj); 220783a68aaSKonstantin Belousov 221783a68aaSKonstantin Belousov VM_OBJECT_WLOCK(obj); 2227146d6cbSPoul-Henning Kamp vm_object_terminate(obj); 22390880a1bSKonstantin Belousov } else { 22490880a1bSKonstantin Belousov /* 2257146d6cbSPoul-Henning Kamp * Woe to the process that tries to page now :-). 2267146d6cbSPoul-Henning Kamp */ 2277146d6cbSPoul-Henning Kamp vm_pager_deallocate(obj); 22889f6b863SAttilio Rao VM_OBJECT_WUNLOCK(obj); 2297146d6cbSPoul-Henning Kamp } 23090880a1bSKonstantin Belousov KASSERT(vp->v_object == NULL, ("vp %p obj %p", vp, vp->v_object)); 2317146d6cbSPoul-Henning Kamp } 2327146d6cbSPoul-Henning Kamp 233df8bae1dSRodney W. Grimes /* 234df8bae1dSRodney W. Grimes * Allocate (or lookup) pager for a vnode. 235df8bae1dSRodney W. Grimes * Handle is a vnode pointer. 236df8bae1dSRodney W. Grimes */ 23724a1cce3SDavid Greenman vm_object_t 2386cde7a16SDavid Greenman vnode_pager_alloc(void *handle, vm_ooffset_t size, vm_prot_t prot, 2393364c323SKonstantin Belousov vm_ooffset_t offset, struct ucred *cred) 240df8bae1dSRodney W. Grimes { 24106cb7259SDavid Greenman vm_object_t object; 242df8bae1dSRodney W. Grimes struct vnode *vp; 243df8bae1dSRodney W. Grimes 244df8bae1dSRodney W. Grimes /* 245df8bae1dSRodney W. Grimes * Pageout to vnode, no can do yet. 246df8bae1dSRodney W. Grimes */ 247df8bae1dSRodney W. Grimes if (handle == NULL) 248df8bae1dSRodney W. Grimes return (NULL); 249df8bae1dSRodney W. Grimes 250df8bae1dSRodney W. Grimes vp = (struct vnode *)handle; 2516470c8d3SKonstantin Belousov ASSERT_VOP_LOCKED(vp, "vnode_pager_alloc"); 252f1fa1ba3SMateusz Guzik VNPASS(vp->v_usecount > 0, vp); 2536470c8d3SKonstantin Belousov retry: 2546470c8d3SKonstantin Belousov object = vp->v_object; 2552be70f79SJohn Dyson 25624a1cce3SDavid Greenman if (object == NULL) { 257df8bae1dSRodney W. Grimes /* 2582ac78f0eSStephan Uphoff * Add an object of the appropriate size 259df8bae1dSRodney W. Grimes */ 2606470c8d3SKonstantin Belousov object = vm_object_allocate(OBJT_VNODE, 2616470c8d3SKonstantin Belousov OFF_TO_IDX(round_page(size))); 262bbc0ec52SDavid Greenman 2636cde7a16SDavid Greenman object->un_pager.vnp.vnp_size = size; 26484110e7eSKonstantin Belousov object->un_pager.vnp.writemappings = 0; 265e5818a53SJeff Roberson object->domain.dr_policy = vnode_domainset; 26624a1cce3SDavid Greenman object->handle = handle; 267208b81bbSKonstantin Belousov if ((vp->v_vflag & VV_VMSIZEVNLOCK) != 0) { 268208b81bbSKonstantin Belousov VM_OBJECT_WLOCK(object); 269208b81bbSKonstantin Belousov vm_object_set_flag(object, OBJ_SIZEVNLOCK); 270208b81bbSKonstantin Belousov VM_OBJECT_WUNLOCK(object); 271208b81bbSKonstantin Belousov } 27211be8415SStephan Uphoff VI_LOCK(vp); 2732ac78f0eSStephan Uphoff if (vp->v_object != NULL) { 2742ac78f0eSStephan Uphoff /* 2756470c8d3SKonstantin Belousov * Object has been created while we were allocating. 2762ac78f0eSStephan Uphoff */ 27711be8415SStephan Uphoff VI_UNLOCK(vp); 2789cddade7SKonstantin Belousov VM_OBJECT_WLOCK(object); 2799cddade7SKonstantin Belousov KASSERT(object->ref_count == 1, 2809cddade7SKonstantin Belousov ("leaked ref %p %d", object, object->ref_count)); 2819cddade7SKonstantin Belousov object->type = OBJT_DEAD; 28251df5321SJeff Roberson refcount_init(&object->ref_count, 0); 2839cddade7SKonstantin Belousov VM_OBJECT_WUNLOCK(object); 2842ac78f0eSStephan Uphoff vm_object_destroy(object); 2852ac78f0eSStephan Uphoff goto retry; 286df8bae1dSRodney W. Grimes } 2872ac78f0eSStephan Uphoff vp->v_object = object; 28811be8415SStephan Uphoff VI_UNLOCK(vp); 289a67d5408SJeff Roberson vrefact(vp); 29011be8415SStephan Uphoff } else { 291a67d5408SJeff Roberson vm_object_reference(object); 2923d653db0SAlan Cox #if VM_NRESERVLEVEL > 0 293a67d5408SJeff Roberson if ((object->flags & OBJ_COLORED) == 0) { 294a67d5408SJeff Roberson VM_OBJECT_WLOCK(object); 2953d653db0SAlan Cox vm_object_color(object, 0); 29689f6b863SAttilio Rao VM_OBJECT_WUNLOCK(object); 29711be8415SStephan Uphoff } 298a67d5408SJeff Roberson #endif 299a67d5408SJeff Roberson } 30024a1cce3SDavid Greenman return (object); 301df8bae1dSRodney W. Grimes } 302df8bae1dSRodney W. Grimes 303658ad5ffSAlan Cox /* 304658ad5ffSAlan Cox * The object must be locked. 305658ad5ffSAlan Cox */ 306f708ef1bSPoul-Henning Kamp static void 3077ebba1f8SGleb Smirnoff vnode_pager_dealloc(vm_object_t object) 30824a1cce3SDavid Greenman { 309b9f180d1SKonstantin Belousov struct vnode *vp; 310b9f180d1SKonstantin Belousov int refs; 311df8bae1dSRodney W. Grimes 312b9f180d1SKonstantin Belousov vp = object->handle; 31324a1cce3SDavid Greenman if (vp == NULL) 31424a1cce3SDavid Greenman panic("vnode_pager_dealloc: pager already dealloced"); 31524a1cce3SDavid Greenman 31689f6b863SAttilio Rao VM_OBJECT_ASSERT_WLOCKED(object); 31766095752SJohn Dyson vm_object_pip_wait(object, "vnpdea"); 318b9f180d1SKonstantin Belousov refs = object->ref_count; 31924a1cce3SDavid Greenman 32024a1cce3SDavid Greenman object->handle = NULL; 32195461b45SJohn Dyson object->type = OBJT_DEAD; 32257fd3d55SPawel Jakub Dawidek ASSERT_VOP_ELOCKED(vp, "vnode_pager_dealloc"); 32384110e7eSKonstantin Belousov if (object->un_pager.vnp.writemappings > 0) { 32484110e7eSKonstantin Belousov object->un_pager.vnp.writemappings = 0; 32578022527SKonstantin Belousov VOP_ADD_WRITECOUNT_CHECKED(vp, -1); 326b47f6241SJohn Baldwin CTR3(KTR_VFS, "%s: vp %p v_writecount decreased to %d", 327b47f6241SJohn Baldwin __func__, vp, vp->v_writecount); 32884110e7eSKonstantin Belousov } 329aa2cabb9SDavid Greenman vp->v_object = NULL; 33078022527SKonstantin Belousov VI_LOCK(vp); 33178022527SKonstantin Belousov 33278022527SKonstantin Belousov /* 33378022527SKonstantin Belousov * vm_map_entry_set_vnode_text() cannot reach this vnode by 33478022527SKonstantin Belousov * following object->handle. Clear all text references now. 33578022527SKonstantin Belousov * This also clears the transient references from 33678022527SKonstantin Belousov * kern_execve(), which is fine because dead_vnodeops uses nop 33778022527SKonstantin Belousov * for VOP_UNSET_TEXT(). 33878022527SKonstantin Belousov */ 33978022527SKonstantin Belousov if (vp->v_writecount < 0) 34078022527SKonstantin Belousov vp->v_writecount = 0; 34178022527SKonstantin Belousov VI_UNLOCK(vp); 34289f6b863SAttilio Rao VM_OBJECT_WUNLOCK(object); 343a67d5408SJeff Roberson if (refs > 0) 344b9f180d1SKonstantin Belousov vunref(vp); 34589f6b863SAttilio Rao VM_OBJECT_WLOCK(object); 346df8bae1dSRodney W. Grimes } 34726f9a767SRodney W. Grimes 348f708ef1bSPoul-Henning Kamp static boolean_t 3497ebba1f8SGleb Smirnoff vnode_pager_haspage(vm_object_t object, vm_pindex_t pindex, int *before, 3507ebba1f8SGleb Smirnoff int *after) 351df8bae1dSRodney W. Grimes { 35224a1cce3SDavid Greenman struct vnode *vp = object->handle; 35398b0c789SPoul-Henning Kamp daddr_t bn; 3544153054aSJeff Roberson uintptr_t lockstate; 3553af76890SPoul-Henning Kamp int err; 356170db9c6SJohn Dyson daddr_t reqblock; 3572c4488fcSJohn Dyson int poff; 3582c4488fcSJohn Dyson int bsize; 359d63596ceSJohn Dyson int pagesperblock, blocksperpage; 360df8bae1dSRodney W. Grimes 3614153054aSJeff Roberson VM_OBJECT_ASSERT_LOCKED(object); 36224579ca1SMatthew Dillon /* 36324579ca1SMatthew Dillon * If no vp or vp is doomed or marked transparent to VM, we do not 36424579ca1SMatthew Dillon * have the page. 36524579ca1SMatthew Dillon */ 366abd80ddbSMateusz Guzik if (vp == NULL || VN_IS_DOOMED(vp)) 36747221757SJohn Dyson return FALSE; 368df8bae1dSRodney W. Grimes /* 369b73f64c4SJeff Roberson * If the offset is beyond end of file we do 3700d94caffSDavid Greenman * not have the page. 371df8bae1dSRodney W. Grimes */ 372b73f64c4SJeff Roberson if (IDX_TO_OFF(pindex) >= object->un_pager.vnp.vnp_size) 3734abc71c0SDavid Greenman return FALSE; 374df8bae1dSRodney W. Grimes 375eed2d59bSDavid Greenman bsize = vp->v_mount->mnt_stat.f_iosize; 376170db9c6SJohn Dyson pagesperblock = bsize / PAGE_SIZE; 377d63596ceSJohn Dyson blocksperpage = 0; 378d63596ceSJohn Dyson if (pagesperblock > 0) { 379a316d390SJohn Dyson reqblock = pindex / pagesperblock; 380d63596ceSJohn Dyson } else { 381d63596ceSJohn Dyson blocksperpage = (PAGE_SIZE / bsize); 382d63596ceSJohn Dyson reqblock = pindex * blocksperpage; 383d63596ceSJohn Dyson } 3844153054aSJeff Roberson lockstate = VM_OBJECT_DROP(object); 385ec794849SPoul-Henning Kamp err = VOP_BMAP(vp, reqblock, NULL, &bn, after, before); 3864153054aSJeff Roberson VM_OBJECT_PICKUP(object, lockstate); 3870d94caffSDavid Greenman if (err) 38824a1cce3SDavid Greenman return TRUE; 3896eab77f2SJohn Dyson if (bn == -1) 390ced399eeSJohn Dyson return FALSE; 391d63596ceSJohn Dyson if (pagesperblock > 0) { 392a316d390SJohn Dyson poff = pindex - (reqblock * pagesperblock); 393170db9c6SJohn Dyson if (before) { 394170db9c6SJohn Dyson *before *= pagesperblock; 395170db9c6SJohn Dyson *before += poff; 396170db9c6SJohn Dyson } 397170db9c6SJohn Dyson if (after) { 39884d31376SGleb Smirnoff /* 39984d31376SGleb Smirnoff * The BMAP vop can report a partial block in the 400d2596d17SGleb Smirnoff * 'after', but must not report blocks after EOF. 40184d31376SGleb Smirnoff * Assert the latter, and truncate 'after' in case 40284d31376SGleb Smirnoff * of the former. 40384d31376SGleb Smirnoff */ 404d2596d17SGleb Smirnoff KASSERT((reqblock + *after) * pagesperblock < 405d2596d17SGleb Smirnoff roundup2(object->size, pagesperblock), 40684d31376SGleb Smirnoff ("%s: reqblock %jd after %d size %ju", __func__, 40784d31376SGleb Smirnoff (intmax_t )reqblock, *after, 40884d31376SGleb Smirnoff (uintmax_t )object->size)); 409170db9c6SJohn Dyson *after *= pagesperblock; 41084d31376SGleb Smirnoff *after += pagesperblock - (poff + 1); 41184d31376SGleb Smirnoff if (pindex + *after >= object->size) 41284d31376SGleb Smirnoff *after = object->size - 1 - pindex; 413170db9c6SJohn Dyson } 414d63596ceSJohn Dyson } else { 415d63596ceSJohn Dyson if (before) { 416d63596ceSJohn Dyson *before /= blocksperpage; 417d63596ceSJohn Dyson } 418d63596ceSJohn Dyson 419d63596ceSJohn Dyson if (after) { 420d63596ceSJohn Dyson *after /= blocksperpage; 421d63596ceSJohn Dyson } 422d63596ceSJohn Dyson } 423ced399eeSJohn Dyson return TRUE; 424df8bae1dSRodney W. Grimes } 425df8bae1dSRodney W. Grimes 426df8bae1dSRodney W. Grimes /* 427df8bae1dSRodney W. Grimes * Lets the VM system know about a change in size for a file. 42824a1cce3SDavid Greenman * We adjust our own internal size and flush any cached pages in 429df8bae1dSRodney W. Grimes * the associated object that are affected by the size change. 430df8bae1dSRodney W. Grimes * 431df8bae1dSRodney W. Grimes * Note: this routine may be invoked as a result of a pager put 432df8bae1dSRodney W. Grimes * operation (possibly at object termination time), so we must be careful. 433df8bae1dSRodney W. Grimes */ 434df8bae1dSRodney W. Grimes void 4357ebba1f8SGleb Smirnoff vnode_pager_setsize(struct vnode *vp, vm_ooffset_t nsize) 436df8bae1dSRodney W. Grimes { 4372a8f9ab5SAlan Cox vm_object_t object; 4382a8f9ab5SAlan Cox vm_page_t m; 439c576d121SLuoqi Chen vm_pindex_t nobjsize; 440df8bae1dSRodney W. Grimes 4412a8f9ab5SAlan Cox if ((object = vp->v_object) == NULL) 442df8bae1dSRodney W. Grimes return; 4435b87ecc6SKonstantin Belousov #ifdef DEBUG_VFS_LOCKS 4445b87ecc6SKonstantin Belousov { 4455b87ecc6SKonstantin Belousov struct mount *mp; 4465b87ecc6SKonstantin Belousov 4475b87ecc6SKonstantin Belousov mp = vp->v_mount; 4485b87ecc6SKonstantin Belousov if (mp != NULL && (mp->mnt_kern_flag & MNTK_VMSETSIZE_BUG) == 0) 4495b87ecc6SKonstantin Belousov assert_vop_elocked(vp, 4505b87ecc6SKonstantin Belousov "vnode_pager_setsize and not locked vnode"); 4515b87ecc6SKonstantin Belousov } 4525b87ecc6SKonstantin Belousov #endif 45389f6b863SAttilio Rao VM_OBJECT_WLOCK(object); 4549b8851faSKonstantin Belousov if (object->type == OBJT_DEAD) { 4559b8851faSKonstantin Belousov VM_OBJECT_WUNLOCK(object); 4569b8851faSKonstantin Belousov return; 4579b8851faSKonstantin Belousov } 4589b8851faSKonstantin Belousov KASSERT(object->type == OBJT_VNODE, 4599b8851faSKonstantin Belousov ("not vnode-backed object %p", object)); 4602a8f9ab5SAlan Cox if (nsize == object->un_pager.vnp.vnp_size) { 461df8bae1dSRodney W. Grimes /* 462df8bae1dSRodney W. Grimes * Hasn't changed size 463df8bae1dSRodney W. Grimes */ 46489f6b863SAttilio Rao VM_OBJECT_WUNLOCK(object); 465df8bae1dSRodney W. Grimes return; 4662a8f9ab5SAlan Cox } 467c576d121SLuoqi Chen nobjsize = OFF_TO_IDX(nsize + PAGE_MASK); 4682a8f9ab5SAlan Cox if (nsize < object->un_pager.vnp.vnp_size) { 469df8bae1dSRodney W. Grimes /* 470bbc0ec52SDavid Greenman * File has shrunk. Toss any cached pages beyond the new EOF. 471df8bae1dSRodney W. Grimes */ 4722a8f9ab5SAlan Cox if (nobjsize < object->size) 473c576d121SLuoqi Chen vm_object_page_remove(object, nobjsize, object->size, 4746bbee8e2SAlan Cox 0); 475bbc0ec52SDavid Greenman /* 476bbc0ec52SDavid Greenman * this gets rid of garbage at the end of a page that is now 4773ebeaf59SMatthew Dillon * only partially backed by the vnode. 4783ebeaf59SMatthew Dillon * 4793ebeaf59SMatthew Dillon * XXX for some reason (I don't know yet), if we take a 4803ebeaf59SMatthew Dillon * completely invalid page and mark it partially valid 4813ebeaf59SMatthew Dillon * it can screw up NFS reads, so we don't allow the case. 482bbc0ec52SDavid Greenman */ 4830012f373SJeff Roberson if (!(nsize & PAGE_MASK)) 4840012f373SJeff Roberson goto out; 4850012f373SJeff Roberson m = vm_page_grab(object, OFF_TO_IDX(nsize), VM_ALLOC_NOCREAT); 4860012f373SJeff Roberson if (m == NULL) 4870012f373SJeff Roberson goto out; 4880012f373SJeff Roberson if (!vm_page_none_valid(m)) { 4892b6b0df7SMatthew Dillon int base = (int)nsize & PAGE_MASK; 4902b6b0df7SMatthew Dillon int size = PAGE_SIZE - base; 4912b6b0df7SMatthew Dillon 4922b6b0df7SMatthew Dillon /* 4932b6b0df7SMatthew Dillon * Clear out partial-page garbage in case 4942b6b0df7SMatthew Dillon * the page has been mapped. 4952b6b0df7SMatthew Dillon */ 496fff6062aSAlan Cox pmap_zero_page_area(m, base, size); 4972b6b0df7SMatthew Dillon 4982b6b0df7SMatthew Dillon /* 4993c33df62SAlan Cox * Update the valid bits to reflect the blocks that 5003c33df62SAlan Cox * have been zeroed. Some of these valid bits may 5013c33df62SAlan Cox * have already been set. 5023c33df62SAlan Cox */ 503dc874f98SKonstantin Belousov vm_page_set_valid_range(m, base, size); 5043c33df62SAlan Cox 5053c33df62SAlan Cox /* 5063c33df62SAlan Cox * Round "base" to the next block boundary so that the 5073c33df62SAlan Cox * dirty bit for a partially zeroed block is not 5083c33df62SAlan Cox * cleared. 5093c33df62SAlan Cox */ 5103c33df62SAlan Cox base = roundup2(base, DEV_BSIZE); 5113c33df62SAlan Cox 5123c33df62SAlan Cox /* 5133c33df62SAlan Cox * Clear out partial-page dirty bits. 5143ebeaf59SMatthew Dillon * 5153ebeaf59SMatthew Dillon * note that we do not clear out the valid 5163ebeaf59SMatthew Dillon * bits. This would prevent bogus_page 5173ebeaf59SMatthew Dillon * replacement from working properly. 5182b6b0df7SMatthew Dillon */ 5193c33df62SAlan Cox vm_page_clear_dirty(m, base, PAGE_SIZE - base); 520bbc0ec52SDavid Greenman } 5210012f373SJeff Roberson vm_page_xunbusy(m); 522bbc0ec52SDavid Greenman } 5230012f373SJeff Roberson out: 524419e5698SKonstantin Belousov #if defined(__powerpc__) && !defined(__powerpc64__) 525a316d390SJohn Dyson object->un_pager.vnp.vnp_size = nsize; 526419e5698SKonstantin Belousov #else 527419e5698SKonstantin Belousov atomic_store_64(&object->un_pager.vnp.vnp_size, nsize); 528419e5698SKonstantin Belousov #endif 529c576d121SLuoqi Chen object->size = nobjsize; 53089f6b863SAttilio Rao VM_OBJECT_WUNLOCK(object); 531df8bae1dSRodney W. Grimes } 532df8bae1dSRodney W. Grimes 53326f9a767SRodney W. Grimes /* 53426f9a767SRodney W. Grimes * calculate the linear (byte) disk address of specified virtual 53526f9a767SRodney W. Grimes * file address 53626f9a767SRodney W. Grimes */ 537bff76343SAlan Cox static int 538bff76343SAlan Cox vnode_pager_addr(struct vnode *vp, vm_ooffset_t address, daddr_t *rtaddress, 539bff76343SAlan Cox int *run) 54026f9a767SRodney W. Grimes { 54126f9a767SRodney W. Grimes int bsize; 54226f9a767SRodney W. Grimes int err; 543a316d390SJohn Dyson daddr_t vblock; 544f3aad9a6SBjoern A. Zeeb daddr_t voffset; 54526f9a767SRodney W. Grimes 546abd80ddbSMateusz Guzik if (VN_IS_DOOMED(vp)) 5472c4488fcSJohn Dyson return -1; 5482c4488fcSJohn Dyson 54926f9a767SRodney W. Grimes bsize = vp->v_mount->mnt_stat.f_iosize; 55026f9a767SRodney W. Grimes vblock = address / bsize; 55126f9a767SRodney W. Grimes voffset = address % bsize; 55226f9a767SRodney W. Grimes 553bff76343SAlan Cox err = VOP_BMAP(vp, vblock, NULL, rtaddress, run, NULL); 554bff76343SAlan Cox if (err == 0) { 555bff76343SAlan Cox if (*rtaddress != -1) 556bff76343SAlan Cox *rtaddress += voffset / DEV_BSIZE; 557efc68ce1SDavid Greenman if (run) { 558efc68ce1SDavid Greenman *run += 1; 559efc68ce1SDavid Greenman *run *= bsize / PAGE_SIZE; 560efc68ce1SDavid Greenman *run -= voffset / PAGE_SIZE; 561efc68ce1SDavid Greenman } 562efc68ce1SDavid Greenman } 56326f9a767SRodney W. Grimes 564bff76343SAlan Cox return (err); 56526f9a767SRodney W. Grimes } 56626f9a767SRodney W. Grimes 56726f9a767SRodney W. Grimes /* 56826f9a767SRodney W. Grimes * small block filesystem vnode pager input 56926f9a767SRodney W. Grimes */ 570f708ef1bSPoul-Henning Kamp static int 5717ebba1f8SGleb Smirnoff vnode_pager_input_smlfs(vm_object_t object, vm_page_t m) 57226f9a767SRodney W. Grimes { 5739c83534dSPoul-Henning Kamp struct vnode *vp; 5749c83534dSPoul-Henning Kamp struct bufobj *bo; 57526f9a767SRodney W. Grimes struct buf *bp; 5769e0ddbd0SAlan Cox struct sf_buf *sf; 577f3aad9a6SBjoern A. Zeeb daddr_t fileaddr; 57826f9a767SRodney W. Grimes vm_offset_t bsize; 579561cc9fcSKonstantin Belousov vm_page_bits_t bits; 580561cc9fcSKonstantin Belousov int error, i; 58126f9a767SRodney W. Grimes 582561cc9fcSKonstantin Belousov error = 0; 58324a1cce3SDavid Greenman vp = object->handle; 584abd80ddbSMateusz Guzik if (VN_IS_DOOMED(vp)) 5852c4488fcSJohn Dyson return VM_PAGER_BAD; 5862c4488fcSJohn Dyson 58726f9a767SRodney W. Grimes bsize = vp->v_mount->mnt_stat.f_iosize; 5880bdb7528SDavid Greenman 5899c83534dSPoul-Henning Kamp VOP_BMAP(vp, 0, &bo, 0, NULL, NULL); 59026f9a767SRodney W. Grimes 5919e0ddbd0SAlan Cox sf = sf_buf_alloc(m, 0); 59226f9a767SRodney W. Grimes 59326f9a767SRodney W. Grimes for (i = 0; i < PAGE_SIZE / bsize; i++) { 59433c67741SMatthew Dillon vm_ooffset_t address; 595bbc0ec52SDavid Greenman 5960d53a17bSAlan Cox bits = vm_page_bits(i * bsize, bsize); 5970d53a17bSAlan Cox if (m->valid & bits) 59826f9a767SRodney W. Grimes continue; 59926f9a767SRodney W. Grimes 60033c67741SMatthew Dillon address = IDX_TO_OFF(m->pindex) + i * bsize; 60133c67741SMatthew Dillon if (address >= object->un_pager.vnp.vnp_size) { 60233c67741SMatthew Dillon fileaddr = -1; 60333c67741SMatthew Dillon } else { 604bff76343SAlan Cox error = vnode_pager_addr(vp, address, &fileaddr, NULL); 605bff76343SAlan Cox if (error) 606bff76343SAlan Cox break; 60733c67741SMatthew Dillon } 60826f9a767SRodney W. Grimes if (fileaddr != -1) { 609756a5412SGleb Smirnoff bp = uma_zalloc(vnode_pbuf_zone, M_WAITOK); 61026f9a767SRodney W. Grimes 61126f9a767SRodney W. Grimes /* build a minimal buffer header */ 61221144e3bSPoul-Henning Kamp bp->b_iocmd = BIO_READ; 6136a4b5823SPoul-Henning Kamp bp->b_iodone = bdone; 614bd78ceceSJohn Baldwin KASSERT(bp->b_rcred == NOCRED, ("leaking read ucred")); 615bd78ceceSJohn Baldwin KASSERT(bp->b_wcred == NOCRED, ("leaking write ucred")); 616a854ed98SJohn Baldwin bp->b_rcred = crhold(curthread->td_ucred); 617a854ed98SJohn Baldwin bp->b_wcred = crhold(curthread->td_ucred); 6189e0ddbd0SAlan Cox bp->b_data = (caddr_t)sf_buf_kva(sf) + i * bsize; 619187f0071SDavid Greenman bp->b_blkno = fileaddr; 6209c83534dSPoul-Henning Kamp pbgetbo(bo, bp); 6211faacf5dSKirk McKusick bp->b_vp = vp; 62226f9a767SRodney W. Grimes bp->b_bcount = bsize; 62326f9a767SRodney W. Grimes bp->b_bufsize = bsize; 6242b6b0df7SMatthew Dillon bp->b_runningbufspace = bp->b_bufsize; 6255bd65606SJohn Baldwin atomic_add_long(&runningbufspace, bp->b_runningbufspace); 62626f9a767SRodney W. Grimes 62726f9a767SRodney W. Grimes /* do the input */ 6282c18019fSPoul-Henning Kamp bp->b_iooffset = dbtob(bp->b_blkno); 629b792bebeSPoul-Henning Kamp bstrategy(bp); 63026f9a767SRodney W. Grimes 6316a4b5823SPoul-Henning Kamp bwait(bp, PVM, "vnsrd"); 6326a4b5823SPoul-Henning Kamp 633cafbf0c6SWarner Losh if ((bp->b_ioflags & BIO_ERROR) != 0) { 634cafbf0c6SWarner Losh KASSERT(bp->b_error != 0, 635cafbf0c6SWarner Losh ("%s: buf error but b_error == 0\n", __func__)); 636cafbf0c6SWarner Losh error = bp->b_error; 637cafbf0c6SWarner Losh } 63826f9a767SRodney W. Grimes 63926f9a767SRodney W. Grimes /* 64026f9a767SRodney W. Grimes * free the buffer header back to the swap buffer pool 64126f9a767SRodney W. Grimes */ 6421faacf5dSKirk McKusick bp->b_vp = NULL; 6439c83534dSPoul-Henning Kamp pbrelbo(bp); 644756a5412SGleb Smirnoff uma_zfree(vnode_pbuf_zone, bp); 64526f9a767SRodney W. Grimes if (error) 64626f9a767SRodney W. Grimes break; 6470d53a17bSAlan Cox } else 6489e0ddbd0SAlan Cox bzero((caddr_t)sf_buf_kva(sf) + i * bsize, bsize); 6490d53a17bSAlan Cox KASSERT((m->dirty & bits) == 0, 6500d53a17bSAlan Cox ("vnode_pager_input_smlfs: page %p is dirty", m)); 6517f935055SJeff Roberson vm_page_bits_set(m, &m->valid, bits); 65226f9a767SRodney W. Grimes } 6539e0ddbd0SAlan Cox sf_buf_free(sf); 65426f9a767SRodney W. Grimes if (error) { 655a83c285cSDavid Greenman return VM_PAGER_ERROR; 65626f9a767SRodney W. Grimes } 65726f9a767SRodney W. Grimes return VM_PAGER_OK; 65826f9a767SRodney W. Grimes } 65926f9a767SRodney W. Grimes 66026f9a767SRodney W. Grimes /* 661475e8cc3SPoul-Henning Kamp * old style vnode pager input routine 66226f9a767SRodney W. Grimes */ 663f708ef1bSPoul-Henning Kamp static int 6647ebba1f8SGleb Smirnoff vnode_pager_input_old(vm_object_t object, vm_page_t m) 66526f9a767SRodney W. Grimes { 666df8bae1dSRodney W. Grimes struct uio auio; 667df8bae1dSRodney W. Grimes struct iovec aiov; 66826f9a767SRodney W. Grimes int error; 66926f9a767SRodney W. Grimes int size; 6709e0ddbd0SAlan Cox struct sf_buf *sf; 671342a1480SJohn Baldwin struct vnode *vp; 672df8bae1dSRodney W. Grimes 67389f6b863SAttilio Rao VM_OBJECT_ASSERT_WLOCKED(object); 67426f9a767SRodney W. Grimes error = 0; 675bbc0ec52SDavid Greenman 676df8bae1dSRodney W. Grimes /* 67726f9a767SRodney W. Grimes * Return failure if beyond current EOF 67826f9a767SRodney W. Grimes */ 679a316d390SJohn Dyson if (IDX_TO_OFF(m->pindex) >= object->un_pager.vnp.vnp_size) { 68026f9a767SRodney W. Grimes return VM_PAGER_BAD; 68126f9a767SRodney W. Grimes } else { 68226f9a767SRodney W. Grimes size = PAGE_SIZE; 683a316d390SJohn Dyson if (IDX_TO_OFF(m->pindex) + size > object->un_pager.vnp.vnp_size) 684a316d390SJohn Dyson size = object->un_pager.vnp.vnp_size - IDX_TO_OFF(m->pindex); 68552051abcSAlan Cox vp = object->handle; 68689f6b863SAttilio Rao VM_OBJECT_WUNLOCK(object); 6870bdb7528SDavid Greenman 68826f9a767SRodney W. Grimes /* 689df8bae1dSRodney W. Grimes * Allocate a kernel virtual address and initialize so that 690df8bae1dSRodney W. Grimes * we can use VOP_READ/WRITE routines. 691df8bae1dSRodney W. Grimes */ 6929e0ddbd0SAlan Cox sf = sf_buf_alloc(m, 0); 6930bdb7528SDavid Greenman 6949e0ddbd0SAlan Cox aiov.iov_base = (caddr_t)sf_buf_kva(sf); 695df8bae1dSRodney W. Grimes aiov.iov_len = size; 696df8bae1dSRodney W. Grimes auio.uio_iov = &aiov; 697df8bae1dSRodney W. Grimes auio.uio_iovcnt = 1; 698a316d390SJohn Dyson auio.uio_offset = IDX_TO_OFF(m->pindex); 699df8bae1dSRodney W. Grimes auio.uio_segflg = UIO_SYSSPACE; 70026f9a767SRodney W. Grimes auio.uio_rw = UIO_READ; 701df8bae1dSRodney W. Grimes auio.uio_resid = size; 702b40ce416SJulian Elischer auio.uio_td = curthread; 70326f9a767SRodney W. Grimes 704a854ed98SJohn Baldwin error = VOP_READ(vp, &auio, 0, curthread->td_ucred); 705df8bae1dSRodney W. Grimes if (!error) { 70654d92145SMatthew Dillon int count = size - auio.uio_resid; 707df8bae1dSRodney W. Grimes 708df8bae1dSRodney W. Grimes if (count == 0) 709df8bae1dSRodney W. Grimes error = EINVAL; 71026f9a767SRodney W. Grimes else if (count != PAGE_SIZE) 7119e0ddbd0SAlan Cox bzero((caddr_t)sf_buf_kva(sf) + count, 7129e0ddbd0SAlan Cox PAGE_SIZE - count); 713df8bae1dSRodney W. Grimes } 7149e0ddbd0SAlan Cox sf_buf_free(sf); 7151b26eb10SAlan Cox 71689f6b863SAttilio Rao VM_OBJECT_WLOCK(object); 717df8bae1dSRodney W. Grimes } 7180d53a17bSAlan Cox KASSERT(m->dirty == 0, ("vnode_pager_input_old: page %p is dirty", m)); 7196e3a3f38SRobert V. Baron if (!error) 7200012f373SJeff Roberson vm_page_valid(m); 721a83c285cSDavid Greenman return error ? VM_PAGER_ERROR : VM_PAGER_OK; 72226f9a767SRodney W. Grimes } 72326f9a767SRodney W. Grimes 72426f9a767SRodney W. Grimes /* 72526f9a767SRodney W. Grimes * generic vnode pager input routine 72626f9a767SRodney W. Grimes */ 727170db9c6SJohn Dyson 728ce75f2c3SMike Smith /* 72923955314SAlfred Perlstein * Local media VFS's that do not implement their own VOP_GETPAGES 73047e151ddSRobert Drehmel * should have their VOP_GETPAGES call to vnode_pager_generic_getpages() 73147e151ddSRobert Drehmel * to implement the previous behaviour. 732ce75f2c3SMike Smith * 733ce75f2c3SMike Smith * All other FS's should use the bypass to get to the local media 734ce75f2c3SMike Smith * backing vp's VOP_GETPAGES. 735ce75f2c3SMike Smith */ 736f708ef1bSPoul-Henning Kamp static int 737b0cd2017SGleb Smirnoff vnode_pager_getpages(vm_object_t object, vm_page_t *m, int count, int *rbehind, 738b0cd2017SGleb Smirnoff int *rahead) 73924a1cce3SDavid Greenman { 740170db9c6SJohn Dyson struct vnode *vp; 741b0cd2017SGleb Smirnoff int rtval; 74295e5e988SJohn Dyson 743d6e13f3bSJeff Roberson /* Handle is stable with paging in progress. */ 744170db9c6SJohn Dyson vp = object->handle; 745b0cd2017SGleb Smirnoff rtval = VOP_GETPAGES(vp, m, count, rbehind, rahead); 74623955314SAlfred Perlstein KASSERT(rtval != EOPNOTSUPP, 74723955314SAlfred Perlstein ("vnode_pager: FS getpages not implemented\n")); 748170db9c6SJohn Dyson return rtval; 749170db9c6SJohn Dyson } 750170db9c6SJohn Dyson 75190effb23SGleb Smirnoff static int 75290effb23SGleb Smirnoff vnode_pager_getpages_async(vm_object_t object, vm_page_t *m, int count, 753b0cd2017SGleb Smirnoff int *rbehind, int *rahead, vop_getpages_iodone_t iodone, void *arg) 75490effb23SGleb Smirnoff { 75590effb23SGleb Smirnoff struct vnode *vp; 75690effb23SGleb Smirnoff int rtval; 75790effb23SGleb Smirnoff 75890effb23SGleb Smirnoff vp = object->handle; 759b0cd2017SGleb Smirnoff rtval = VOP_GETPAGES_ASYNC(vp, m, count, rbehind, rahead, iodone, arg); 76090effb23SGleb Smirnoff KASSERT(rtval != EOPNOTSUPP, 76190effb23SGleb Smirnoff ("vnode_pager: FS getpages_async not implemented\n")); 76290effb23SGleb Smirnoff return (rtval); 76390effb23SGleb Smirnoff } 76490effb23SGleb Smirnoff 765ce75f2c3SMike Smith /* 76690effb23SGleb Smirnoff * The implementation of VOP_GETPAGES() and VOP_GETPAGES_ASYNC() for 76790effb23SGleb Smirnoff * local filesystems, where partially valid pages can only occur at 76890effb23SGleb Smirnoff * the end of file. 769d15b55c5SKonstantin Belousov */ 770d15b55c5SKonstantin Belousov int 771d15b55c5SKonstantin Belousov vnode_pager_local_getpages(struct vop_getpages_args *ap) 772d15b55c5SKonstantin Belousov { 77390effb23SGleb Smirnoff 774b0cd2017SGleb Smirnoff return (vnode_pager_generic_getpages(ap->a_vp, ap->a_m, ap->a_count, 775b0cd2017SGleb Smirnoff ap->a_rbehind, ap->a_rahead, NULL, NULL)); 77690effb23SGleb Smirnoff } 77790effb23SGleb Smirnoff 77890effb23SGleb Smirnoff int 77990effb23SGleb Smirnoff vnode_pager_local_getpages_async(struct vop_getpages_async_args *ap) 78090effb23SGleb Smirnoff { 781abfdf767SKonstantin Belousov int error; 78290effb23SGleb Smirnoff 783abfdf767SKonstantin Belousov error = vnode_pager_generic_getpages(ap->a_vp, ap->a_m, ap->a_count, 784abfdf767SKonstantin Belousov ap->a_rbehind, ap->a_rahead, ap->a_iodone, ap->a_arg); 785abfdf767SKonstantin Belousov if (error != 0 && ap->a_iodone != NULL) 786abfdf767SKonstantin Belousov ap->a_iodone(ap->a_arg, ap->a_m, ap->a_count, error); 787abfdf767SKonstantin Belousov return (error); 788d15b55c5SKonstantin Belousov } 789d15b55c5SKonstantin Belousov 790d15b55c5SKonstantin Belousov /* 791ce75f2c3SMike Smith * This is now called from local media FS's to operate against their 792ce75f2c3SMike Smith * own vnodes if they fail to implement VOP_GETPAGES. 793ce75f2c3SMike Smith */ 794ce75f2c3SMike Smith int 795b0cd2017SGleb Smirnoff vnode_pager_generic_getpages(struct vnode *vp, vm_page_t *m, int count, 796b0cd2017SGleb Smirnoff int *a_rbehind, int *a_rahead, vop_getpages_iodone_t iodone, void *arg) 797170db9c6SJohn Dyson { 798ce75f2c3SMike Smith vm_object_t object; 7999c83534dSPoul-Henning Kamp struct bufobj *bo; 8000bdb7528SDavid Greenman struct buf *bp; 801b0cd2017SGleb Smirnoff off_t foff; 802e48b82bdSGleb Smirnoff #ifdef INVARIANTS 803e48b82bdSGleb Smirnoff off_t blkno0; 804e48b82bdSGleb Smirnoff #endif 805756a5412SGleb Smirnoff int bsize, pagesperblock; 806b0cd2017SGleb Smirnoff int error, before, after, rbehind, rahead, poff, i; 807b0cd2017SGleb Smirnoff int bytecount, secmask; 808ce75f2c3SMike Smith 8099c83534dSPoul-Henning Kamp KASSERT(vp->v_type != VCHR && vp->v_type != VBLK, 810b0cd2017SGleb Smirnoff ("%s does not support devices", __func__)); 811b0cd2017SGleb Smirnoff 812abd80ddbSMateusz Guzik if (VN_IS_DOOMED(vp)) 813eac91e32SKonstantin Belousov return (VM_PAGER_BAD); 8142c4488fcSJohn Dyson 815eac91e32SKonstantin Belousov object = vp->v_object; 816b0cd2017SGleb Smirnoff foff = IDX_TO_OFF(m[0]->pindex); 81726f9a767SRodney W. Grimes bsize = vp->v_mount->mnt_stat.f_iosize; 818b0cd2017SGleb Smirnoff pagesperblock = bsize / PAGE_SIZE; 819b0cd2017SGleb Smirnoff 820b0cd2017SGleb Smirnoff KASSERT(foff < object->un_pager.vnp.vnp_size, 821b0cd2017SGleb Smirnoff ("%s: page %p offset beyond vp %p size", __func__, m[0], vp)); 822cd853791SKonstantin Belousov KASSERT(count <= atop(maxphys), 823b0cd2017SGleb Smirnoff ("%s: requested %d pages", __func__, count)); 824b0cd2017SGleb Smirnoff 825b0cd2017SGleb Smirnoff /* 826b0cd2017SGleb Smirnoff * The last page has valid blocks. Invalid part can only 827b0cd2017SGleb Smirnoff * exist at the end of file, and the page is made fully valid 828b0cd2017SGleb Smirnoff * by zeroing in vm_pager_get_pages(). 829b0cd2017SGleb Smirnoff */ 8300012f373SJeff Roberson if (!vm_page_none_valid(m[count - 1]) && --count == 0) { 831b0cd2017SGleb Smirnoff if (iodone != NULL) 832b0cd2017SGleb Smirnoff iodone(arg, m, 1, 0); 833b0cd2017SGleb Smirnoff return (VM_PAGER_OK); 834b0cd2017SGleb Smirnoff } 835bbc0ec52SDavid Greenman 836756a5412SGleb Smirnoff bp = uma_zalloc(vnode_pbuf_zone, M_WAITOK); 837cd853791SKonstantin Belousov MPASS((bp->b_flags & B_MAXPHYS) != 0); 83873e9030eSGleb Smirnoff 83926f9a767SRodney W. Grimes /* 840e122dfc1SGleb Smirnoff * Get the underlying device blocks for the file with VOP_BMAP(). 841e122dfc1SGleb Smirnoff * If the file system doesn't support VOP_BMAP, use old way of 842e122dfc1SGleb Smirnoff * getting pages via VOP_READ. 84326f9a767SRodney W. Grimes */ 844b0cd2017SGleb Smirnoff error = VOP_BMAP(vp, foff / bsize, &bo, &bp->b_blkno, &after, &before); 8451de11f1aSAlan Cox if (error == EOPNOTSUPP) { 846756a5412SGleb Smirnoff uma_zfree(vnode_pbuf_zone, bp); 84789f6b863SAttilio Rao VM_OBJECT_WLOCK(object); 848b0cd2017SGleb Smirnoff for (i = 0; i < count; i++) { 84983c9dea1SGleb Smirnoff VM_CNT_INC(v_vnodein); 85083c9dea1SGleb Smirnoff VM_CNT_INC(v_vnodepgsin); 851b0cd2017SGleb Smirnoff error = vnode_pager_input_old(object, m[i]); 852b0cd2017SGleb Smirnoff if (error) 853b0cd2017SGleb Smirnoff break; 854b0cd2017SGleb Smirnoff } 85589f6b863SAttilio Rao VM_OBJECT_WUNLOCK(object); 85652051abcSAlan Cox return (error); 8571de11f1aSAlan Cox } else if (error != 0) { 858756a5412SGleb Smirnoff uma_zfree(vnode_pbuf_zone, bp); 8591de11f1aSAlan Cox return (VM_PAGER_ERROR); 860b0cd2017SGleb Smirnoff } 861bbc0ec52SDavid Greenman 86226f9a767SRodney W. Grimes /* 863b0cd2017SGleb Smirnoff * If the file system supports BMAP, but blocksize is smaller 864b0cd2017SGleb Smirnoff * than a page size, then use special small filesystem code. 86526f9a767SRodney W. Grimes */ 866b0cd2017SGleb Smirnoff if (pagesperblock == 0) { 867756a5412SGleb Smirnoff uma_zfree(vnode_pbuf_zone, bp); 868b0cd2017SGleb Smirnoff for (i = 0; i < count; i++) { 86983c9dea1SGleb Smirnoff VM_CNT_INC(v_vnodein); 87083c9dea1SGleb Smirnoff VM_CNT_INC(v_vnodepgsin); 871b0cd2017SGleb Smirnoff error = vnode_pager_input_smlfs(object, m[i]); 872b0cd2017SGleb Smirnoff if (error) 873b0cd2017SGleb Smirnoff break; 874b0cd2017SGleb Smirnoff } 875b0cd2017SGleb Smirnoff return (error); 87626f9a767SRodney W. Grimes } 8778d17e694SJulian Elischer 87826f9a767SRodney W. Grimes /* 879b0cd2017SGleb Smirnoff * A sparse file can be encountered only for a single page request, 880763df3ecSPedro F. Giffuni * which may not be preceded by call to vm_pager_haspage(). 881a7fecb4dSAlan Cox */ 882b0cd2017SGleb Smirnoff if (bp->b_blkno == -1) { 883b0cd2017SGleb Smirnoff KASSERT(count == 1, 884b0cd2017SGleb Smirnoff ("%s: array[%d] request to a sparse file %p", __func__, 885b0cd2017SGleb Smirnoff count, vp)); 886756a5412SGleb Smirnoff uma_zfree(vnode_pbuf_zone, bp); 887b0cd2017SGleb Smirnoff pmap_zero_page(m[0]); 888b0cd2017SGleb Smirnoff KASSERT(m[0]->dirty == 0, ("%s: page %p is dirty", 889b0cd2017SGleb Smirnoff __func__, m[0])); 8900012f373SJeff Roberson vm_page_valid(m[0]); 891f4f83da0SAlan Cox return (VM_PAGER_OK); 892b0cd2017SGleb Smirnoff } 893b0cd2017SGleb Smirnoff 894e48b82bdSGleb Smirnoff #ifdef INVARIANTS 895e48b82bdSGleb Smirnoff blkno0 = bp->b_blkno; 896e48b82bdSGleb Smirnoff #endif 897b0cd2017SGleb Smirnoff bp->b_blkno += (foff % bsize) / DEV_BSIZE; 898b0cd2017SGleb Smirnoff 899b0cd2017SGleb Smirnoff /* Recalculate blocks available after/before to pages. */ 900b0cd2017SGleb Smirnoff poff = (foff % bsize) / PAGE_SIZE; 901b0cd2017SGleb Smirnoff before *= pagesperblock; 902b0cd2017SGleb Smirnoff before += poff; 903b0cd2017SGleb Smirnoff after *= pagesperblock; 904b0cd2017SGleb Smirnoff after += pagesperblock - (poff + 1); 905b0cd2017SGleb Smirnoff if (m[0]->pindex + after >= object->size) 906b0cd2017SGleb Smirnoff after = object->size - 1 - m[0]->pindex; 907b0cd2017SGleb Smirnoff KASSERT(count <= after + 1, ("%s: %d pages asked, can do only %d", 908b0cd2017SGleb Smirnoff __func__, count, after + 1)); 909b0cd2017SGleb Smirnoff after -= count - 1; 910b0cd2017SGleb Smirnoff 911b0cd2017SGleb Smirnoff /* Trim requested rbehind/rahead to possible values. */ 912b0cd2017SGleb Smirnoff rbehind = a_rbehind ? *a_rbehind : 0; 913b0cd2017SGleb Smirnoff rahead = a_rahead ? *a_rahead : 0; 914b0cd2017SGleb Smirnoff rbehind = min(rbehind, before); 915b0cd2017SGleb Smirnoff rbehind = min(rbehind, m[0]->pindex); 916b0cd2017SGleb Smirnoff rahead = min(rahead, after); 917b0cd2017SGleb Smirnoff rahead = min(rahead, object->size - m[count - 1]->pindex); 918e48b82bdSGleb Smirnoff /* 919e48b82bdSGleb Smirnoff * Check that total amount of pages fit into buf. Trim rbehind and 920e48b82bdSGleb Smirnoff * rahead evenly if not. 921e48b82bdSGleb Smirnoff */ 922cd853791SKonstantin Belousov if (rbehind + rahead + count > atop(maxphys)) { 923e48b82bdSGleb Smirnoff int trim, sum; 924e48b82bdSGleb Smirnoff 925cd853791SKonstantin Belousov trim = rbehind + rahead + count - atop(maxphys) + 1; 926e48b82bdSGleb Smirnoff sum = rbehind + rahead; 927e48b82bdSGleb Smirnoff if (rbehind == before) { 928e48b82bdSGleb Smirnoff /* Roundup rbehind trim to block size. */ 929e48b82bdSGleb Smirnoff rbehind -= roundup(trim * rbehind / sum, pagesperblock); 930e48b82bdSGleb Smirnoff if (rbehind < 0) 931e48b82bdSGleb Smirnoff rbehind = 0; 932e48b82bdSGleb Smirnoff } else 933e48b82bdSGleb Smirnoff rbehind -= trim * rbehind / sum; 934e48b82bdSGleb Smirnoff rahead -= trim * rahead / sum; 935e48b82bdSGleb Smirnoff } 936cd853791SKonstantin Belousov KASSERT(rbehind + rahead + count <= atop(maxphys), 937cd853791SKonstantin Belousov ("%s: behind %d ahead %d count %d maxphys %lu", __func__, 938cd853791SKonstantin Belousov rbehind, rahead, count, maxphys)); 939b0cd2017SGleb Smirnoff 940b0cd2017SGleb Smirnoff /* 941b0cd2017SGleb Smirnoff * Fill in the bp->b_pages[] array with requested and optional 942b0cd2017SGleb Smirnoff * read behind or read ahead pages. Read behind pages are looked 943b0cd2017SGleb Smirnoff * up in a backward direction, down to a first cached page. Same 944b0cd2017SGleb Smirnoff * for read ahead pages, but there is no need to shift the array 945b0cd2017SGleb Smirnoff * in case of encountering a cached page. 946b0cd2017SGleb Smirnoff */ 947b0cd2017SGleb Smirnoff i = bp->b_npages = 0; 948b0cd2017SGleb Smirnoff if (rbehind) { 949b0cd2017SGleb Smirnoff vm_pindex_t startpindex, tpindex; 950b0cd2017SGleb Smirnoff vm_page_t p; 951b0cd2017SGleb Smirnoff 952a7fecb4dSAlan Cox VM_OBJECT_WLOCK(object); 953b0cd2017SGleb Smirnoff startpindex = m[0]->pindex - rbehind; 954b0cd2017SGleb Smirnoff if ((p = TAILQ_PREV(m[0], pglist, listq)) != NULL && 955b0cd2017SGleb Smirnoff p->pindex >= startpindex) 956b0cd2017SGleb Smirnoff startpindex = p->pindex + 1; 957b0cd2017SGleb Smirnoff 958b0cd2017SGleb Smirnoff /* tpindex is unsigned; beware of numeric underflow. */ 959b0cd2017SGleb Smirnoff for (tpindex = m[0]->pindex - 1; 960b0cd2017SGleb Smirnoff tpindex >= startpindex && tpindex < m[0]->pindex; 961b0cd2017SGleb Smirnoff tpindex--, i++) { 9627667839aSAlan Cox p = vm_page_alloc(object, tpindex, VM_ALLOC_NORMAL); 963b0cd2017SGleb Smirnoff if (p == NULL) { 964b0cd2017SGleb Smirnoff /* Shift the array. */ 965b0cd2017SGleb Smirnoff for (int j = 0; j < i; j++) 966b0cd2017SGleb Smirnoff bp->b_pages[j] = bp->b_pages[j + 967b0cd2017SGleb Smirnoff tpindex + 1 - startpindex]; 968b0cd2017SGleb Smirnoff break; 969b0cd2017SGleb Smirnoff } 970b0cd2017SGleb Smirnoff bp->b_pages[tpindex - startpindex] = p; 971a7fecb4dSAlan Cox } 9720bdb7528SDavid Greenman 973b0cd2017SGleb Smirnoff bp->b_pgbefore = i; 974b0cd2017SGleb Smirnoff bp->b_npages += i; 975b0cd2017SGleb Smirnoff bp->b_blkno -= IDX_TO_OFF(i) / DEV_BSIZE; 976b0cd2017SGleb Smirnoff } else 977b0cd2017SGleb Smirnoff bp->b_pgbefore = 0; 978b0cd2017SGleb Smirnoff 979b0cd2017SGleb Smirnoff /* Requested pages. */ 980b0cd2017SGleb Smirnoff for (int j = 0; j < count; j++, i++) 981b0cd2017SGleb Smirnoff bp->b_pages[i] = m[j]; 982b0cd2017SGleb Smirnoff bp->b_npages += count; 983b0cd2017SGleb Smirnoff 984b0cd2017SGleb Smirnoff if (rahead) { 985b0cd2017SGleb Smirnoff vm_pindex_t endpindex, tpindex; 986b0cd2017SGleb Smirnoff vm_page_t p; 987b0cd2017SGleb Smirnoff 988b0cd2017SGleb Smirnoff if (!VM_OBJECT_WOWNED(object)) 989eac91e32SKonstantin Belousov VM_OBJECT_WLOCK(object); 990b0cd2017SGleb Smirnoff endpindex = m[count - 1]->pindex + rahead + 1; 991b0cd2017SGleb Smirnoff if ((p = TAILQ_NEXT(m[count - 1], listq)) != NULL && 992b0cd2017SGleb Smirnoff p->pindex < endpindex) 993b0cd2017SGleb Smirnoff endpindex = p->pindex; 994b0cd2017SGleb Smirnoff if (endpindex > object->size) 995b0cd2017SGleb Smirnoff endpindex = object->size; 996b0cd2017SGleb Smirnoff 997b0cd2017SGleb Smirnoff for (tpindex = m[count - 1]->pindex + 1; 998b0cd2017SGleb Smirnoff tpindex < endpindex; i++, tpindex++) { 9997667839aSAlan Cox p = vm_page_alloc(object, tpindex, VM_ALLOC_NORMAL); 1000b0cd2017SGleb Smirnoff if (p == NULL) 1001b0cd2017SGleb Smirnoff break; 1002b0cd2017SGleb Smirnoff bp->b_pages[i] = p; 1003eac91e32SKonstantin Belousov } 1004b0cd2017SGleb Smirnoff 1005b0cd2017SGleb Smirnoff bp->b_pgafter = i - bp->b_npages; 1006b0cd2017SGleb Smirnoff bp->b_npages = i; 1007b0cd2017SGleb Smirnoff } else 1008b0cd2017SGleb Smirnoff bp->b_pgafter = 0; 1009b0cd2017SGleb Smirnoff 1010b0cd2017SGleb Smirnoff if (VM_OBJECT_WOWNED(object)) 1011eac91e32SKonstantin Belousov VM_OBJECT_WUNLOCK(object); 1012b0cd2017SGleb Smirnoff 1013b0cd2017SGleb Smirnoff /* Report back actual behind/ahead read. */ 1014b0cd2017SGleb Smirnoff if (a_rbehind) 1015b0cd2017SGleb Smirnoff *a_rbehind = bp->b_pgbefore; 1016b0cd2017SGleb Smirnoff if (a_rahead) 1017b0cd2017SGleb Smirnoff *a_rahead = bp->b_pgafter; 1018b0cd2017SGleb Smirnoff 1019e48b82bdSGleb Smirnoff #ifdef INVARIANTS 1020cd853791SKonstantin Belousov KASSERT(bp->b_npages <= atop(maxphys), 1021b0cd2017SGleb Smirnoff ("%s: buf %p overflowed", __func__, bp)); 10224f56243aSGleb Smirnoff for (int j = 1, prev = 0; j < bp->b_npages; j++) { 10231e0c121fSGleb Smirnoff if (bp->b_pages[j] == bogus_page) 10241e0c121fSGleb Smirnoff continue; 10251e0c121fSGleb Smirnoff KASSERT(bp->b_pages[j]->pindex - bp->b_pages[prev]->pindex == 10261e0c121fSGleb Smirnoff j - prev, ("%s: pages array not consecutive, bp %p", 10271e0c121fSGleb Smirnoff __func__, bp)); 10281e0c121fSGleb Smirnoff prev = j; 10291e0c121fSGleb Smirnoff } 1030e48b82bdSGleb Smirnoff #endif 1031eac91e32SKonstantin Belousov 10320d94caffSDavid Greenman /* 1033b0cd2017SGleb Smirnoff * Recalculate first offset and bytecount with regards to read behind. 1034b0cd2017SGleb Smirnoff * Truncate bytecount to vnode real size and round up physical size 1035b0cd2017SGleb Smirnoff * for real devices. 103626f9a767SRodney W. Grimes */ 1037b0cd2017SGleb Smirnoff foff = IDX_TO_OFF(bp->b_pages[0]->pindex); 1038b0cd2017SGleb Smirnoff bytecount = bp->b_npages << PAGE_SHIFT; 1039b0cd2017SGleb Smirnoff if ((foff + bytecount) > object->un_pager.vnp.vnp_size) 1040b0cd2017SGleb Smirnoff bytecount = object->un_pager.vnp.vnp_size - foff; 1041eac91e32SKonstantin Belousov secmask = bo->bo_bsize - 1; 10426229cc50SPoul-Henning Kamp KASSERT(secmask < PAGE_SIZE && secmask > 0, 1043b0cd2017SGleb Smirnoff ("%s: sector size %d too large", __func__, secmask + 1)); 1044b0cd2017SGleb Smirnoff bytecount = (bytecount + secmask) & ~secmask; 104526f9a767SRodney W. Grimes 104626f9a767SRodney W. Grimes /* 1047b0cd2017SGleb Smirnoff * And map the pages to be read into the kva, if the filesystem 10486ce697dcSKonstantin Belousov * requires mapped buffers. 104926f9a767SRodney W. Grimes */ 10502a5eef69SGleb Smirnoff if ((vp->v_mount->mnt_kern_flag & MNTK_UNMAPPED_BUFS) != 0 && 10516ce697dcSKonstantin Belousov unmapped_buf_allowed) { 10526ce697dcSKonstantin Belousov bp->b_data = unmapped_buf; 10536ce697dcSKonstantin Belousov bp->b_offset = 0; 1054fade8dd7SJeff Roberson } else { 1055fade8dd7SJeff Roberson bp->b_data = bp->b_kvabase; 1056b0cd2017SGleb Smirnoff pmap_qenter((vm_offset_t)bp->b_data, bp->b_pages, bp->b_npages); 1057fade8dd7SJeff Roberson } 105826f9a767SRodney W. Grimes 1059b0cd2017SGleb Smirnoff /* Build a minimal buffer header. */ 106021144e3bSPoul-Henning Kamp bp->b_iocmd = BIO_READ; 1061bd78ceceSJohn Baldwin KASSERT(bp->b_rcred == NOCRED, ("leaking read ucred")); 1062bd78ceceSJohn Baldwin KASSERT(bp->b_wcred == NOCRED, ("leaking write ucred")); 1063a854ed98SJohn Baldwin bp->b_rcred = crhold(curthread->td_ucred); 1064a854ed98SJohn Baldwin bp->b_wcred = crhold(curthread->td_ucred); 10659c83534dSPoul-Henning Kamp pbgetbo(bo, bp); 10661faacf5dSKirk McKusick bp->b_vp = vp; 1067b0cd2017SGleb Smirnoff bp->b_bcount = bp->b_bufsize = bp->b_runningbufspace = bytecount; 10682c18019fSPoul-Henning Kamp bp->b_iooffset = dbtob(bp->b_blkno); 1069e48b82bdSGleb Smirnoff KASSERT(IDX_TO_OFF(m[0]->pindex - bp->b_pages[0]->pindex) == 1070e48b82bdSGleb Smirnoff (blkno0 - bp->b_blkno) * DEV_BSIZE + 1071e48b82bdSGleb Smirnoff IDX_TO_OFF(m[0]->pindex) % bsize, 1072e48b82bdSGleb Smirnoff ("wrong offsets bsize %d m[0] %ju b_pages[0] %ju " 1073e48b82bdSGleb Smirnoff "blkno0 %ju b_blkno %ju", bsize, 1074e48b82bdSGleb Smirnoff (uintmax_t)m[0]->pindex, (uintmax_t)bp->b_pages[0]->pindex, 1075e48b82bdSGleb Smirnoff (uintmax_t)blkno0, (uintmax_t)bp->b_blkno)); 107690effb23SGleb Smirnoff 1077b0cd2017SGleb Smirnoff atomic_add_long(&runningbufspace, bp->b_runningbufspace); 107883c9dea1SGleb Smirnoff VM_CNT_INC(v_vnodein); 107983c9dea1SGleb Smirnoff VM_CNT_ADD(v_vnodepgsin, bp->b_npages); 1080b0cd2017SGleb Smirnoff 108190effb23SGleb Smirnoff if (iodone != NULL) { /* async */ 1082b0cd2017SGleb Smirnoff bp->b_pgiodone = iodone; 108390effb23SGleb Smirnoff bp->b_caller1 = arg; 108490effb23SGleb Smirnoff bp->b_iodone = vnode_pager_generic_getpages_done_async; 108590effb23SGleb Smirnoff bp->b_flags |= B_ASYNC; 108690effb23SGleb Smirnoff BUF_KERNPROC(bp); 1087b792bebeSPoul-Henning Kamp bstrategy(bp); 1088b0cd2017SGleb Smirnoff return (VM_PAGER_OK); 108990effb23SGleb Smirnoff } else { 109090effb23SGleb Smirnoff bp->b_iodone = bdone; 109190effb23SGleb Smirnoff bstrategy(bp); 10926a4b5823SPoul-Henning Kamp bwait(bp, PVM, "vnread"); 109390effb23SGleb Smirnoff error = vnode_pager_generic_getpages_done(bp); 10941bb5ad63SGleb Smirnoff for (i = 0; i < bp->b_npages; i++) 10956ce697dcSKonstantin Belousov bp->b_pages[i] = NULL; 10961faacf5dSKirk McKusick bp->b_vp = NULL; 10979c83534dSPoul-Henning Kamp pbrelbo(bp); 1098756a5412SGleb Smirnoff uma_zfree(vnode_pbuf_zone, bp); 109990effb23SGleb Smirnoff return (error != 0 ? VM_PAGER_ERROR : VM_PAGER_OK); 110090effb23SGleb Smirnoff } 1101b0cd2017SGleb Smirnoff } 110290effb23SGleb Smirnoff 110390effb23SGleb Smirnoff static void 110490effb23SGleb Smirnoff vnode_pager_generic_getpages_done_async(struct buf *bp) 110590effb23SGleb Smirnoff { 110690effb23SGleb Smirnoff int error; 110790effb23SGleb Smirnoff 110890effb23SGleb Smirnoff error = vnode_pager_generic_getpages_done(bp); 1109b0cd2017SGleb Smirnoff /* Run the iodone upon the requested range. */ 1110b0cd2017SGleb Smirnoff bp->b_pgiodone(bp->b_caller1, bp->b_pages + bp->b_pgbefore, 1111b0cd2017SGleb Smirnoff bp->b_npages - bp->b_pgbefore - bp->b_pgafter, error); 111290effb23SGleb Smirnoff for (int i = 0; i < bp->b_npages; i++) 111390effb23SGleb Smirnoff bp->b_pages[i] = NULL; 111490effb23SGleb Smirnoff bp->b_vp = NULL; 111590effb23SGleb Smirnoff pbrelbo(bp); 1116756a5412SGleb Smirnoff uma_zfree(vnode_pbuf_zone, bp); 111790effb23SGleb Smirnoff } 111890effb23SGleb Smirnoff 111990effb23SGleb Smirnoff static int 112090effb23SGleb Smirnoff vnode_pager_generic_getpages_done(struct buf *bp) 112190effb23SGleb Smirnoff { 112290effb23SGleb Smirnoff vm_object_t object; 112390effb23SGleb Smirnoff off_t tfoff, nextoff; 112490effb23SGleb Smirnoff int i, error; 112590effb23SGleb Smirnoff 1126cafbf0c6SWarner Losh KASSERT((bp->b_ioflags & BIO_ERROR) == 0 || bp->b_error != 0, 1127cafbf0c6SWarner Losh ("%s: buf error but b_error == 0\n", __func__)); 1128cafbf0c6SWarner Losh error = (bp->b_ioflags & BIO_ERROR) != 0 ? bp->b_error : 0; 112990effb23SGleb Smirnoff object = bp->b_vp->v_object; 113090effb23SGleb Smirnoff 113190effb23SGleb Smirnoff if (error == 0 && bp->b_bcount != bp->b_npages * PAGE_SIZE) { 1132fade8dd7SJeff Roberson if (!buf_mapped(bp)) { 1133fade8dd7SJeff Roberson bp->b_data = bp->b_kvabase; 1134fade8dd7SJeff Roberson pmap_qenter((vm_offset_t)bp->b_data, bp->b_pages, 113590effb23SGleb Smirnoff bp->b_npages); 113690effb23SGleb Smirnoff } 1137fade8dd7SJeff Roberson bzero(bp->b_data + bp->b_bcount, 113890effb23SGleb Smirnoff PAGE_SIZE * bp->b_npages - bp->b_bcount); 113990effb23SGleb Smirnoff } 1140fade8dd7SJeff Roberson if (buf_mapped(bp)) { 1141fade8dd7SJeff Roberson pmap_qremove((vm_offset_t)bp->b_data, bp->b_npages); 1142fade8dd7SJeff Roberson bp->b_data = unmapped_buf; 114390effb23SGleb Smirnoff } 114426f9a767SRodney W. Grimes 11451bd12a3bSChuck Silvers /* 11461bd12a3bSChuck Silvers * If the read failed, we must free any read ahead/behind pages here. 11471bd12a3bSChuck Silvers * The requested pages are freed by the caller (for sync requests) 11481bd12a3bSChuck Silvers * or by the bp->b_pgiodone callback (for async requests). 11491bd12a3bSChuck Silvers */ 11501bd12a3bSChuck Silvers if (error != 0) { 11511bd12a3bSChuck Silvers VM_OBJECT_WLOCK(object); 11521bd12a3bSChuck Silvers for (i = 0; i < bp->b_pgbefore; i++) 11531bd12a3bSChuck Silvers vm_page_free_invalid(bp->b_pages[i]); 11541bd12a3bSChuck Silvers for (i = bp->b_npages - bp->b_pgafter; i < bp->b_npages; i++) 11551bd12a3bSChuck Silvers vm_page_free_invalid(bp->b_pages[i]); 11561bd12a3bSChuck Silvers VM_OBJECT_WUNLOCK(object); 11571bd12a3bSChuck Silvers return (error); 11581bd12a3bSChuck Silvers } 11591bd12a3bSChuck Silvers 11607f935055SJeff Roberson /* Read lock to protect size. */ 11617f935055SJeff Roberson VM_OBJECT_RLOCK(object); 116290effb23SGleb Smirnoff for (i = 0, tfoff = IDX_TO_OFF(bp->b_pages[0]->pindex); 116390effb23SGleb Smirnoff i < bp->b_npages; i++, tfoff = nextoff) { 11648f9110f6SJohn Dyson vm_page_t mt; 11658f9110f6SJohn Dyson 11668f9110f6SJohn Dyson nextoff = tfoff + PAGE_SIZE; 116790effb23SGleb Smirnoff mt = bp->b_pages[i]; 11682f81c92eSMark Johnston if (mt == bogus_page) 11692f81c92eSMark Johnston continue; 11708f9110f6SJohn Dyson 117154746b67SDmitrij Tejblum if (nextoff <= object->un_pager.vnp.vnp_size) { 11728d17e694SJulian Elischer /* 11738d17e694SJulian Elischer * Read filled up entire page. 11748d17e694SJulian Elischer */ 11750012f373SJeff Roberson vm_page_valid(mt); 1176016a3c93SAlan Cox KASSERT(mt->dirty == 0, 117779f0deb9SGleb Smirnoff ("%s: page %p is dirty", __func__, mt)); 1178016a3c93SAlan Cox KASSERT(!pmap_page_is_mapped(mt), 117979f0deb9SGleb Smirnoff ("%s: page %p is mapped", __func__, mt)); 11808f9110f6SJohn Dyson } else { 11818d17e694SJulian Elischer /* 118242eb4108SAlan Cox * Read did not fill up entire page. 11838d17e694SJulian Elischer * 1184c3dbadc1SChuck Silvers * Currently we do not set the entire page valid, 1185c3dbadc1SChuck Silvers * we just try to clear the piece that we couldn't 1186c3dbadc1SChuck Silvers * read. 11878d17e694SJulian Elischer */ 1188dc874f98SKonstantin Belousov vm_page_set_valid_range(mt, 0, 118954746b67SDmitrij Tejblum object->un_pager.vnp.vnp_size - tfoff); 119042eb4108SAlan Cox KASSERT((mt->dirty & vm_page_bits(0, 1191c3dbadc1SChuck Silvers object->un_pager.vnp.vnp_size - tfoff)) == 0, 1192c3dbadc1SChuck Silvers ("%s: page %p is dirty", __func__, mt)); 11938f9110f6SJohn Dyson } 11948f9110f6SJohn Dyson 1195b0cd2017SGleb Smirnoff if (i < bp->b_pgbefore || i >= bp->b_npages - bp->b_pgafter) 1196b6c00483SKonstantin Belousov vm_page_readahead_finish(mt); 119703679e23SAlan Cox } 11987f935055SJeff Roberson VM_OBJECT_RUNLOCK(object); 119990effb23SGleb Smirnoff 120090effb23SGleb Smirnoff return (error); 120126f9a767SRodney W. Grimes } 120226f9a767SRodney W. Grimes 1203ce75f2c3SMike Smith /* 1204ce75f2c3SMike Smith * EOPNOTSUPP is no longer legal. For local media VFS's that do not 1205ce75f2c3SMike Smith * implement their own VOP_PUTPAGES, their VOP_PUTPAGES should call to 1206ce75f2c3SMike Smith * vnode_pager_generic_putpages() to implement the previous behaviour. 1207ce75f2c3SMike Smith * 1208ce75f2c3SMike Smith * All other FS's should use the bypass to get to the local media 1209ce75f2c3SMike Smith * backing vp's VOP_PUTPAGES. 1210ce75f2c3SMike Smith */ 1211e4542174SMatthew Dillon static void 12127ebba1f8SGleb Smirnoff vnode_pager_putpages(vm_object_t object, vm_page_t *m, int count, 121333cad9e9SKonstantin Belousov int flags, int *rtvals) 1214170db9c6SJohn Dyson { 1215170db9c6SJohn Dyson int rtval; 1216170db9c6SJohn Dyson struct vnode *vp; 121786ffbd76SMike Smith int bytes = count * PAGE_SIZE; 1218ad980522SJohn Dyson 12190e3cdf2cSAlan Cox /* 12200e3cdf2cSAlan Cox * Force synchronous operation if we are extremely low on memory 12210e3cdf2cSAlan Cox * to prevent a low-memory deadlock. VOP operations often need to 12220e3cdf2cSAlan Cox * allocate more memory to initiate the I/O ( i.e. do a BMAP 12230e3cdf2cSAlan Cox * operation ). The swapper handles the case by limiting the amount 12240e3cdf2cSAlan Cox * of asynchronous I/O, but that sort of solution doesn't scale well 12250e3cdf2cSAlan Cox * for the vnode pager without a lot of work. 12260e3cdf2cSAlan Cox * 12270e3cdf2cSAlan Cox * Also, the backing vnode's iodone routine may not wake the pageout 12280e3cdf2cSAlan Cox * daemon up. This should be probably be addressed XXX. 12290e3cdf2cSAlan Cox */ 12300e3cdf2cSAlan Cox 1231e2068d0bSJeff Roberson if (vm_page_count_min()) 123233cad9e9SKonstantin Belousov flags |= VM_PAGER_PUT_SYNC; 12330e3cdf2cSAlan Cox 12340e3cdf2cSAlan Cox /* 12350e3cdf2cSAlan Cox * Call device-specific putpages function 12360e3cdf2cSAlan Cox */ 1237170db9c6SJohn Dyson vp = object->handle; 123889f6b863SAttilio Rao VM_OBJECT_WUNLOCK(object); 123933cad9e9SKonstantin Belousov rtval = VOP_PUTPAGES(vp, m, bytes, flags, rtvals); 124023955314SAlfred Perlstein KASSERT(rtval != EOPNOTSUPP, 124123955314SAlfred Perlstein ("vnode_pager: stale FS putpages\n")); 124289f6b863SAttilio Rao VM_OBJECT_WLOCK(object); 1243170db9c6SJohn Dyson } 1244170db9c6SJohn Dyson 124505877a85SKonstantin Belousov static int 124605877a85SKonstantin Belousov vn_off2bidx(vm_ooffset_t offset) 124705877a85SKonstantin Belousov { 124805877a85SKonstantin Belousov 124905877a85SKonstantin Belousov return ((offset & PAGE_MASK) / DEV_BSIZE); 125005877a85SKonstantin Belousov } 125105877a85SKonstantin Belousov 125205877a85SKonstantin Belousov static bool 125305877a85SKonstantin Belousov vn_dirty_blk(vm_page_t m, vm_ooffset_t offset) 125405877a85SKonstantin Belousov { 125505877a85SKonstantin Belousov 125605877a85SKonstantin Belousov KASSERT(IDX_TO_OFF(m->pindex) <= offset && 125705877a85SKonstantin Belousov offset < IDX_TO_OFF(m->pindex + 1), 125805877a85SKonstantin Belousov ("page %p pidx %ju offset %ju", m, (uintmax_t)m->pindex, 125905877a85SKonstantin Belousov (uintmax_t)offset)); 126005877a85SKonstantin Belousov return ((m->dirty & ((vm_page_bits_t)1 << vn_off2bidx(offset))) != 0); 126105877a85SKonstantin Belousov } 1262ce75f2c3SMike Smith 126326f9a767SRodney W. Grimes /* 1264ce75f2c3SMike Smith * This is now called from local media FS's to operate against their 12654491ea91SEivind Eklund * own vnodes if they fail to implement VOP_PUTPAGES. 12662b6b0df7SMatthew Dillon * 12672b6b0df7SMatthew Dillon * This is typically called indirectly via the pageout daemon and 1268763df3ecSPedro F. Giffuni * clustering has already typically occurred, so in general we ask the 12692b6b0df7SMatthew Dillon * underlying filesystem to write the data out asynchronously rather 12702b6b0df7SMatthew Dillon * then delayed. 127126f9a767SRodney W. Grimes */ 1272ce75f2c3SMike Smith int 1273c46b90e9SAlan Cox vnode_pager_generic_putpages(struct vnode *vp, vm_page_t *ma, int bytecount, 1274c46b90e9SAlan Cox int flags, int *rtvals) 127526f9a767SRodney W. Grimes { 1276ce75f2c3SMike Smith vm_object_t object; 1277c46b90e9SAlan Cox vm_page_t m; 127805877a85SKonstantin Belousov vm_ooffset_t maxblksz, next_offset, poffset, prev_offset; 1279f6b04d2bSDavid Greenman struct uio auio; 1280f6b04d2bSDavid Greenman struct iovec aiov; 128105877a85SKonstantin Belousov off_t prev_resid, wrsz; 1282e6c44f65SKonstantin Belousov int count, error, i, maxsize, ncount, pgoff, ppscheck; 128305877a85SKonstantin Belousov bool in_hole; 1284dd498befSPaul Saab static struct timeval lastfail; 1285dd498befSPaul Saab static int curfail; 128626f9a767SRodney W. Grimes 1287ce75f2c3SMike Smith object = vp->v_object; 1288ce75f2c3SMike Smith count = bytecount / PAGE_SIZE; 1289ce75f2c3SMike Smith 129026f9a767SRodney W. Grimes for (i = 0; i < count; i++) 1291031ec8c1SKonstantin Belousov rtvals[i] = VM_PAGER_ERROR; 129226f9a767SRodney W. Grimes 1293c46b90e9SAlan Cox if ((int64_t)ma[0]->pindex < 0) { 1294e6c44f65SKonstantin Belousov printf("vnode_pager_generic_putpages: " 1295e6c44f65SKonstantin Belousov "attempt to write meta-data 0x%jx(%lx)\n", 1296e6c44f65SKonstantin Belousov (uintmax_t)ma[0]->pindex, (u_long)ma[0]->dirty); 1297f6b04d2bSDavid Greenman rtvals[0] = VM_PAGER_BAD; 1298e6c44f65SKonstantin Belousov return (VM_PAGER_BAD); 12990d94caffSDavid Greenman } 13000bdb7528SDavid Greenman 1301f6b04d2bSDavid Greenman maxsize = count * PAGE_SIZE; 1302f6b04d2bSDavid Greenman ncount = count; 130326f9a767SRodney W. Grimes 1304c46b90e9SAlan Cox poffset = IDX_TO_OFF(ma[0]->pindex); 130500a6f47fSMatthew Dillon 130600a6f47fSMatthew Dillon /* 130700a6f47fSMatthew Dillon * If the page-aligned write is larger then the actual file we 1308763df3ecSPedro F. Giffuni * have to invalidate pages occurring beyond the file EOF. However, 130900a6f47fSMatthew Dillon * there is an edge case where a file may not be page-aligned where 131000a6f47fSMatthew Dillon * the last page is partially invalid. In this case the filesystem 131100a6f47fSMatthew Dillon * may not properly clear the dirty bits for the entire page (which 131200a6f47fSMatthew Dillon * could be VM_PAGE_BITS_ALL due to the page having been mmap()d). 1313efec381dSMark Johnston * With the page busied we are free to fix up the dirty bits here. 13143ebeaf59SMatthew Dillon * 13153ebeaf59SMatthew Dillon * We do not under any circumstances truncate the valid bits, as 13163ebeaf59SMatthew Dillon * this will screw up bogus page replacement. 131700a6f47fSMatthew Dillon */ 1318b3d4ab66SKonstantin Belousov VM_OBJECT_RLOCK(object); 1319a316d390SJohn Dyson if (maxsize + poffset > object->un_pager.vnp.vnp_size) { 132000a6f47fSMatthew Dillon if (object->un_pager.vnp.vnp_size > poffset) { 1321a316d390SJohn Dyson maxsize = object->un_pager.vnp.vnp_size - poffset; 1322aa8de40aSPoul-Henning Kamp ncount = btoc(maxsize); 132300a6f47fSMatthew Dillon if ((pgoff = (int)maxsize & PAGE_MASK) != 0) { 1324938cdc42SKonstantin Belousov pgoff = roundup2(pgoff, DEV_BSIZE); 1325938cdc42SKonstantin Belousov 1326c46b90e9SAlan Cox /* 13277f935055SJeff Roberson * If the page is busy and the following 1328c46b90e9SAlan Cox * conditions hold, then the page's dirty 1329c46b90e9SAlan Cox * field cannot be concurrently changed by a 1330c46b90e9SAlan Cox * pmap operation. 1331c46b90e9SAlan Cox */ 1332c46b90e9SAlan Cox m = ma[ncount - 1]; 1333c7aebda8SAttilio Rao vm_page_assert_sbusied(m); 13346031c68dSAlan Cox KASSERT(!pmap_page_is_write_mapped(m), 1335c46b90e9SAlan Cox ("vnode_pager_generic_putpages: page %p is not read-only", m)); 1336e6c44f65SKonstantin Belousov MPASS(m->dirty != 0); 1337c46b90e9SAlan Cox vm_page_clear_dirty(m, pgoff, PAGE_SIZE - 1338c46b90e9SAlan Cox pgoff); 133900a6f47fSMatthew Dillon } 134000a6f47fSMatthew Dillon } else { 134100a6f47fSMatthew Dillon maxsize = 0; 134200a6f47fSMatthew Dillon ncount = 0; 134300a6f47fSMatthew Dillon } 1344e6c44f65SKonstantin Belousov for (i = ncount; i < count; i++) 1345f6b04d2bSDavid Greenman rtvals[i] = VM_PAGER_BAD; 1346f6b04d2bSDavid Greenman } 13477f935055SJeff Roberson VM_OBJECT_RUNLOCK(object); 134826f9a767SRodney W. Grimes 1349f6b04d2bSDavid Greenman auio.uio_iov = &aiov; 1350f6b04d2bSDavid Greenman auio.uio_segflg = UIO_NOCOPY; 1351f6b04d2bSDavid Greenman auio.uio_rw = UIO_WRITE; 1352e6c44f65SKonstantin Belousov auio.uio_td = NULL; 135305877a85SKonstantin Belousov maxblksz = roundup2(poffset + maxsize, DEV_BSIZE); 135405877a85SKonstantin Belousov 135505877a85SKonstantin Belousov for (prev_offset = poffset; prev_offset < maxblksz;) { 135605877a85SKonstantin Belousov /* Skip clean blocks. */ 135705877a85SKonstantin Belousov for (in_hole = true; in_hole && prev_offset < maxblksz;) { 135805877a85SKonstantin Belousov m = ma[OFF_TO_IDX(prev_offset - poffset)]; 135905877a85SKonstantin Belousov for (i = vn_off2bidx(prev_offset); 136005877a85SKonstantin Belousov i < sizeof(vm_page_bits_t) * NBBY && 136105877a85SKonstantin Belousov prev_offset < maxblksz; i++) { 136205877a85SKonstantin Belousov if (vn_dirty_blk(m, prev_offset)) { 136305877a85SKonstantin Belousov in_hole = false; 136405877a85SKonstantin Belousov break; 136505877a85SKonstantin Belousov } 136605877a85SKonstantin Belousov prev_offset += DEV_BSIZE; 136705877a85SKonstantin Belousov } 136805877a85SKonstantin Belousov } 136905877a85SKonstantin Belousov if (in_hole) 137005877a85SKonstantin Belousov goto write_done; 137105877a85SKonstantin Belousov 137205877a85SKonstantin Belousov /* Find longest run of dirty blocks. */ 137305877a85SKonstantin Belousov for (next_offset = prev_offset; next_offset < maxblksz;) { 137405877a85SKonstantin Belousov m = ma[OFF_TO_IDX(next_offset - poffset)]; 137505877a85SKonstantin Belousov for (i = vn_off2bidx(next_offset); 137605877a85SKonstantin Belousov i < sizeof(vm_page_bits_t) * NBBY && 137705877a85SKonstantin Belousov next_offset < maxblksz; i++) { 137805877a85SKonstantin Belousov if (!vn_dirty_blk(m, next_offset)) 137905877a85SKonstantin Belousov goto start_write; 138005877a85SKonstantin Belousov next_offset += DEV_BSIZE; 138105877a85SKonstantin Belousov } 138205877a85SKonstantin Belousov } 138305877a85SKonstantin Belousov start_write: 138405877a85SKonstantin Belousov if (next_offset > poffset + maxsize) 138505877a85SKonstantin Belousov next_offset = poffset + maxsize; 138605877a85SKonstantin Belousov 138705877a85SKonstantin Belousov /* 138805877a85SKonstantin Belousov * Getting here requires finding a dirty block in the 138905877a85SKonstantin Belousov * 'skip clean blocks' loop. 139005877a85SKonstantin Belousov */ 139105877a85SKonstantin Belousov MPASS(prev_offset < next_offset); 139205877a85SKonstantin Belousov 139305877a85SKonstantin Belousov aiov.iov_base = NULL; 139405877a85SKonstantin Belousov auio.uio_iovcnt = 1; 139505877a85SKonstantin Belousov auio.uio_offset = prev_offset; 139605877a85SKonstantin Belousov prev_resid = auio.uio_resid = aiov.iov_len = next_offset - 139705877a85SKonstantin Belousov prev_offset; 139805877a85SKonstantin Belousov error = VOP_WRITE(vp, &auio, 139905877a85SKonstantin Belousov vnode_pager_putpages_ioflags(flags), curthread->td_ucred); 140005877a85SKonstantin Belousov 140105877a85SKonstantin Belousov wrsz = prev_resid - auio.uio_resid; 140205877a85SKonstantin Belousov if (wrsz == 0) { 140305877a85SKonstantin Belousov if (ppsratecheck(&lastfail, &curfail, 1) != 0) { 140405877a85SKonstantin Belousov vn_printf(vp, "vnode_pager_putpages: " 140505877a85SKonstantin Belousov "zero-length write at %ju resid %zd\n", 140605877a85SKonstantin Belousov auio.uio_offset, auio.uio_resid); 140705877a85SKonstantin Belousov } 140805877a85SKonstantin Belousov break; 140905877a85SKonstantin Belousov } 141005877a85SKonstantin Belousov 141105877a85SKonstantin Belousov /* Adjust the starting offset for next iteration. */ 141205877a85SKonstantin Belousov prev_offset += wrsz; 141305877a85SKonstantin Belousov MPASS(auio.uio_offset == prev_offset); 1414f6b04d2bSDavid Greenman 14153dbb0ca6SKonstantin Belousov ppscheck = 0; 141605877a85SKonstantin Belousov if (error != 0 && (ppscheck = ppsratecheck(&lastfail, 141705877a85SKonstantin Belousov &curfail, 1)) != 0) 141805877a85SKonstantin Belousov vn_printf(vp, "vnode_pager_putpages: I/O error %d\n", 141905877a85SKonstantin Belousov error); 1420e6c44f65SKonstantin Belousov if (auio.uio_resid != 0 && (ppscheck != 0 || 1421e6c44f65SKonstantin Belousov ppsratecheck(&lastfail, &curfail, 1) != 0)) 142205877a85SKonstantin Belousov vn_printf(vp, "vnode_pager_putpages: residual I/O %zd " 142305877a85SKonstantin Belousov "at %ju\n", auio.uio_resid, 142405877a85SKonstantin Belousov (uintmax_t)ma[0]->pindex); 142505877a85SKonstantin Belousov if (error != 0 || auio.uio_resid != 0) 142605877a85SKonstantin Belousov break; 142705877a85SKonstantin Belousov } 142805877a85SKonstantin Belousov write_done: 142905877a85SKonstantin Belousov /* Mark completely processed pages. */ 143005877a85SKonstantin Belousov for (i = 0; i < OFF_TO_IDX(prev_offset - poffset); i++) 143126f9a767SRodney W. Grimes rtvals[i] = VM_PAGER_OK; 143205877a85SKonstantin Belousov /* Mark partial EOF page. */ 143305877a85SKonstantin Belousov if (prev_offset == poffset + maxsize && (prev_offset & PAGE_MASK) != 0) 143405877a85SKonstantin Belousov rtvals[i++] = VM_PAGER_OK; 143505877a85SKonstantin Belousov /* Unwritten pages in range, free bonus if the page is clean. */ 143605877a85SKonstantin Belousov for (; i < ncount; i++) 143705877a85SKonstantin Belousov rtvals[i] = ma[i]->dirty == 0 ? VM_PAGER_OK : VM_PAGER_ERROR; 143805877a85SKonstantin Belousov VM_CNT_ADD(v_vnodepgsout, i); 143905877a85SKonstantin Belousov VM_CNT_INC(v_vnodeout); 1440e6c44f65SKonstantin Belousov return (rtvals[0]); 144126f9a767SRodney W. Grimes } 1442031ec8c1SKonstantin Belousov 144365b9599aSKonstantin Belousov int 144465b9599aSKonstantin Belousov vnode_pager_putpages_ioflags(int pager_flags) 144565b9599aSKonstantin Belousov { 144665b9599aSKonstantin Belousov int ioflags; 144765b9599aSKonstantin Belousov 144865b9599aSKonstantin Belousov /* 144965b9599aSKonstantin Belousov * Pageouts are already clustered, use IO_ASYNC to force a 145065b9599aSKonstantin Belousov * bawrite() rather then a bdwrite() to prevent paging I/O 145165b9599aSKonstantin Belousov * from saturating the buffer cache. Dummy-up the sequential 145265b9599aSKonstantin Belousov * heuristic to cause large ranges to cluster. If neither 145365b9599aSKonstantin Belousov * IO_SYNC or IO_ASYNC is set, the system decides how to 145465b9599aSKonstantin Belousov * cluster. 145565b9599aSKonstantin Belousov */ 145665b9599aSKonstantin Belousov ioflags = IO_VMIO; 145765b9599aSKonstantin Belousov if ((pager_flags & (VM_PAGER_PUT_SYNC | VM_PAGER_PUT_INVAL)) != 0) 145865b9599aSKonstantin Belousov ioflags |= IO_SYNC; 145965b9599aSKonstantin Belousov else if ((pager_flags & VM_PAGER_CLUSTER_OK) == 0) 146065b9599aSKonstantin Belousov ioflags |= IO_ASYNC; 146165b9599aSKonstantin Belousov ioflags |= (pager_flags & VM_PAGER_PUT_INVAL) != 0 ? IO_INVAL: 0; 146265b9599aSKonstantin Belousov ioflags |= (pager_flags & VM_PAGER_PUT_NOREUSE) != 0 ? IO_NOREUSE : 0; 146365b9599aSKonstantin Belousov ioflags |= IO_SEQMAX << IO_SEQSHIFT; 146465b9599aSKonstantin Belousov return (ioflags); 146565b9599aSKonstantin Belousov } 146665b9599aSKonstantin Belousov 1467555b7bb4SKonstantin Belousov /* 1468555b7bb4SKonstantin Belousov * vnode_pager_undirty_pages(). 1469555b7bb4SKonstantin Belousov * 1470555b7bb4SKonstantin Belousov * A helper to mark pages as clean after pageout that was possibly 1471555b7bb4SKonstantin Belousov * done with a short write. The lpos argument specifies the page run 1472555b7bb4SKonstantin Belousov * length in bytes, and the written argument specifies how many bytes 1473555b7bb4SKonstantin Belousov * were actually written. eof is the offset past the last valid byte 1474555b7bb4SKonstantin Belousov * in the vnode using the absolute file position of the first byte in 1475555b7bb4SKonstantin Belousov * the run as the base from which it is computed. 1476555b7bb4SKonstantin Belousov */ 1477031ec8c1SKonstantin Belousov void 1478555b7bb4SKonstantin Belousov vnode_pager_undirty_pages(vm_page_t *ma, int *rtvals, int written, off_t eof, 1479555b7bb4SKonstantin Belousov int lpos) 1480031ec8c1SKonstantin Belousov { 1481555b7bb4SKonstantin Belousov int i, pos, pos_devb; 1482031ec8c1SKonstantin Belousov 1483555b7bb4SKonstantin Belousov if (written == 0 && eof >= lpos) 14849d17da3bSKonstantin Belousov return; 1485031ec8c1SKonstantin Belousov for (i = 0, pos = 0; pos < written; i++, pos += PAGE_SIZE) { 1486031ec8c1SKonstantin Belousov if (pos < trunc_page(written)) { 1487031ec8c1SKonstantin Belousov rtvals[i] = VM_PAGER_OK; 1488031ec8c1SKonstantin Belousov vm_page_undirty(ma[i]); 1489031ec8c1SKonstantin Belousov } else { 1490031ec8c1SKonstantin Belousov /* Partially written page. */ 1491031ec8c1SKonstantin Belousov rtvals[i] = VM_PAGER_AGAIN; 1492031ec8c1SKonstantin Belousov vm_page_clear_dirty(ma[i], 0, written & PAGE_MASK); 1493031ec8c1SKonstantin Belousov } 1494031ec8c1SKonstantin Belousov } 1495555b7bb4SKonstantin Belousov if (eof >= lpos) /* avoid truncation */ 14967f935055SJeff Roberson return; 1497555b7bb4SKonstantin Belousov for (pos = eof, i = OFF_TO_IDX(trunc_page(pos)); pos < lpos; i++) { 1498555b7bb4SKonstantin Belousov if (pos != trunc_page(pos)) { 1499555b7bb4SKonstantin Belousov /* 1500555b7bb4SKonstantin Belousov * The page contains the last valid byte in 1501555b7bb4SKonstantin Belousov * the vnode, mark the rest of the page as 1502555b7bb4SKonstantin Belousov * clean, potentially making the whole page 1503555b7bb4SKonstantin Belousov * clean. 1504555b7bb4SKonstantin Belousov */ 1505555b7bb4SKonstantin Belousov pos_devb = roundup2(pos & PAGE_MASK, DEV_BSIZE); 1506555b7bb4SKonstantin Belousov vm_page_clear_dirty(ma[i], pos_devb, PAGE_SIZE - 1507555b7bb4SKonstantin Belousov pos_devb); 1508555b7bb4SKonstantin Belousov 1509555b7bb4SKonstantin Belousov /* 1510555b7bb4SKonstantin Belousov * If the page was cleaned, report the pageout 1511555b7bb4SKonstantin Belousov * on it as successful. msync() no longer 1512555b7bb4SKonstantin Belousov * needs to write out the page, endlessly 1513555b7bb4SKonstantin Belousov * creating write requests and dirty buffers. 1514555b7bb4SKonstantin Belousov */ 1515555b7bb4SKonstantin Belousov if (ma[i]->dirty == 0) 1516555b7bb4SKonstantin Belousov rtvals[i] = VM_PAGER_OK; 1517555b7bb4SKonstantin Belousov 1518555b7bb4SKonstantin Belousov pos = round_page(pos); 1519555b7bb4SKonstantin Belousov } else { 1520555b7bb4SKonstantin Belousov /* vm_pageout_flush() clears dirty */ 1521555b7bb4SKonstantin Belousov rtvals[i] = VM_PAGER_BAD; 1522555b7bb4SKonstantin Belousov pos += PAGE_SIZE; 1523555b7bb4SKonstantin Belousov } 1524555b7bb4SKonstantin Belousov } 1525031ec8c1SKonstantin Belousov } 152684110e7eSKonstantin Belousov 1527fe7bcbafSKyle Evans static void 152884110e7eSKonstantin Belousov vnode_pager_update_writecount(vm_object_t object, vm_offset_t start, 152984110e7eSKonstantin Belousov vm_offset_t end) 153084110e7eSKonstantin Belousov { 153184110e7eSKonstantin Belousov struct vnode *vp; 153284110e7eSKonstantin Belousov vm_ooffset_t old_wm; 153384110e7eSKonstantin Belousov 153489f6b863SAttilio Rao VM_OBJECT_WLOCK(object); 153584110e7eSKonstantin Belousov if (object->type != OBJT_VNODE) { 153689f6b863SAttilio Rao VM_OBJECT_WUNLOCK(object); 153784110e7eSKonstantin Belousov return; 153884110e7eSKonstantin Belousov } 153984110e7eSKonstantin Belousov old_wm = object->un_pager.vnp.writemappings; 154084110e7eSKonstantin Belousov object->un_pager.vnp.writemappings += (vm_ooffset_t)end - start; 154184110e7eSKonstantin Belousov vp = object->handle; 154284110e7eSKonstantin Belousov if (old_wm == 0 && object->un_pager.vnp.writemappings != 0) { 154378022527SKonstantin Belousov ASSERT_VOP_LOCKED(vp, "v_writecount inc"); 154478022527SKonstantin Belousov VOP_ADD_WRITECOUNT_CHECKED(vp, 1); 1545b47f6241SJohn Baldwin CTR3(KTR_VFS, "%s: vp %p v_writecount increased to %d", 1546b47f6241SJohn Baldwin __func__, vp, vp->v_writecount); 154784110e7eSKonstantin Belousov } else if (old_wm != 0 && object->un_pager.vnp.writemappings == 0) { 154878022527SKonstantin Belousov ASSERT_VOP_LOCKED(vp, "v_writecount dec"); 154978022527SKonstantin Belousov VOP_ADD_WRITECOUNT_CHECKED(vp, -1); 1550b47f6241SJohn Baldwin CTR3(KTR_VFS, "%s: vp %p v_writecount decreased to %d", 1551b47f6241SJohn Baldwin __func__, vp, vp->v_writecount); 155284110e7eSKonstantin Belousov } 155389f6b863SAttilio Rao VM_OBJECT_WUNLOCK(object); 155484110e7eSKonstantin Belousov } 155584110e7eSKonstantin Belousov 1556fe7bcbafSKyle Evans static void 155784110e7eSKonstantin Belousov vnode_pager_release_writecount(vm_object_t object, vm_offset_t start, 155884110e7eSKonstantin Belousov vm_offset_t end) 155984110e7eSKonstantin Belousov { 156084110e7eSKonstantin Belousov struct vnode *vp; 156184110e7eSKonstantin Belousov struct mount *mp; 156284110e7eSKonstantin Belousov vm_offset_t inc; 156384110e7eSKonstantin Belousov 156489f6b863SAttilio Rao VM_OBJECT_WLOCK(object); 156584110e7eSKonstantin Belousov 156684110e7eSKonstantin Belousov /* 156784110e7eSKonstantin Belousov * First, recheck the object type to account for the race when 156884110e7eSKonstantin Belousov * the vnode is reclaimed. 156984110e7eSKonstantin Belousov */ 157084110e7eSKonstantin Belousov if (object->type != OBJT_VNODE) { 157189f6b863SAttilio Rao VM_OBJECT_WUNLOCK(object); 157284110e7eSKonstantin Belousov return; 157384110e7eSKonstantin Belousov } 157484110e7eSKonstantin Belousov 157584110e7eSKonstantin Belousov /* 157684110e7eSKonstantin Belousov * Optimize for the case when writemappings is not going to 157784110e7eSKonstantin Belousov * zero. 157884110e7eSKonstantin Belousov */ 157984110e7eSKonstantin Belousov inc = end - start; 158084110e7eSKonstantin Belousov if (object->un_pager.vnp.writemappings != inc) { 158184110e7eSKonstantin Belousov object->un_pager.vnp.writemappings -= inc; 158289f6b863SAttilio Rao VM_OBJECT_WUNLOCK(object); 158384110e7eSKonstantin Belousov return; 158484110e7eSKonstantin Belousov } 158584110e7eSKonstantin Belousov 158684110e7eSKonstantin Belousov vp = object->handle; 158784110e7eSKonstantin Belousov vhold(vp); 158889f6b863SAttilio Rao VM_OBJECT_WUNLOCK(object); 158984110e7eSKonstantin Belousov mp = NULL; 159084110e7eSKonstantin Belousov vn_start_write(vp, &mp, V_WAIT); 159178022527SKonstantin Belousov vn_lock(vp, LK_SHARED | LK_RETRY); 159284110e7eSKonstantin Belousov 159384110e7eSKonstantin Belousov /* 159484110e7eSKonstantin Belousov * Decrement the object's writemappings, by swapping the start 159584110e7eSKonstantin Belousov * and end arguments for vnode_pager_update_writecount(). If 159684110e7eSKonstantin Belousov * there was not a race with vnode reclaimation, then the 159784110e7eSKonstantin Belousov * vnode's v_writecount is decremented. 159884110e7eSKonstantin Belousov */ 159984110e7eSKonstantin Belousov vnode_pager_update_writecount(object, end, start); 1600b249ce48SMateusz Guzik VOP_UNLOCK(vp); 160184110e7eSKonstantin Belousov vdrop(vp); 160284110e7eSKonstantin Belousov if (mp != NULL) 160384110e7eSKonstantin Belousov vn_finished_write(mp); 160484110e7eSKonstantin Belousov } 1605