160727d8bSWarner Losh /*- 2df57947fSPedro F. Giffuni * SPDX-License-Identifier: BSD-4-Clause 3df57947fSPedro F. Giffuni * 4df8bae1dSRodney W. Grimes * Copyright (c) 1990 University of Utah. 526f9a767SRodney W. Grimes * Copyright (c) 1991 The Regents of the University of California. 626f9a767SRodney W. Grimes * All rights reserved. 726f9a767SRodney W. Grimes * Copyright (c) 1993, 1994 John S. Dyson 824a1cce3SDavid Greenman * Copyright (c) 1995, David Greenman 9df8bae1dSRodney W. Grimes * 10df8bae1dSRodney W. Grimes * This code is derived from software contributed to Berkeley by 11df8bae1dSRodney W. Grimes * the Systems Programming Group of the University of Utah Computer 12df8bae1dSRodney W. Grimes * Science Department. 13df8bae1dSRodney W. Grimes * 14df8bae1dSRodney W. Grimes * Redistribution and use in source and binary forms, with or without 15df8bae1dSRodney W. Grimes * modification, are permitted provided that the following conditions 16df8bae1dSRodney W. Grimes * are met: 17df8bae1dSRodney W. Grimes * 1. Redistributions of source code must retain the above copyright 18df8bae1dSRodney W. Grimes * notice, this list of conditions and the following disclaimer. 19df8bae1dSRodney W. Grimes * 2. Redistributions in binary form must reproduce the above copyright 20df8bae1dSRodney W. Grimes * notice, this list of conditions and the following disclaimer in the 21df8bae1dSRodney W. Grimes * documentation and/or other materials provided with the distribution. 22df8bae1dSRodney W. Grimes * 3. All advertising materials mentioning features or use of this software 235929bcfaSPhilippe Charnier * must display the following acknowledgement: 24df8bae1dSRodney W. Grimes * This product includes software developed by the University of 25df8bae1dSRodney W. Grimes * California, Berkeley and its contributors. 26df8bae1dSRodney W. Grimes * 4. Neither the name of the University nor the names of its contributors 27df8bae1dSRodney W. Grimes * may be used to endorse or promote products derived from this software 28df8bae1dSRodney W. Grimes * without specific prior written permission. 29df8bae1dSRodney W. Grimes * 30df8bae1dSRodney W. Grimes * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 31df8bae1dSRodney W. Grimes * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 32df8bae1dSRodney W. Grimes * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 33df8bae1dSRodney W. Grimes * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 34df8bae1dSRodney W. Grimes * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 35df8bae1dSRodney W. Grimes * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 36df8bae1dSRodney W. Grimes * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 37df8bae1dSRodney W. Grimes * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 38df8bae1dSRodney W. Grimes * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 39df8bae1dSRodney W. Grimes * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 40df8bae1dSRodney W. Grimes * SUCH DAMAGE. 41df8bae1dSRodney W. Grimes */ 42df8bae1dSRodney W. Grimes 43df8bae1dSRodney W. Grimes /* 44df8bae1dSRodney W. Grimes * Page to/from files (vnodes). 45df8bae1dSRodney W. Grimes */ 46df8bae1dSRodney W. Grimes 4726f9a767SRodney W. Grimes /* 4826f9a767SRodney W. Grimes * TODO: 4924a1cce3SDavid Greenman * Implement VOP_GETPAGES/PUTPAGES interface for filesystems. Will 50f6b04d2bSDavid Greenman * greatly re-simplify the vnode_pager. 5126f9a767SRodney W. Grimes */ 5226f9a767SRodney W. Grimes 53874651b1SDavid E. O'Brien #include <sys/cdefs.h> 543d653db0SAlan Cox #include "opt_vm.h" 553d653db0SAlan Cox 56df8bae1dSRodney W. Grimes #include <sys/param.h> 57756a5412SGleb Smirnoff #include <sys/kernel.h> 58df8bae1dSRodney W. Grimes #include <sys/systm.h> 59e5818a53SJeff Roberson #include <sys/sysctl.h> 60df8bae1dSRodney W. Grimes #include <sys/proc.h> 61df8bae1dSRodney W. Grimes #include <sys/vnode.h> 62df8bae1dSRodney W. Grimes #include <sys/mount.h> 639626b608SPoul-Henning Kamp #include <sys/bio.h> 6424a1cce3SDavid Greenman #include <sys/buf.h> 65efeaf95aSDavid Greenman #include <sys/vmmeter.h> 66daec9284SConrad Meyer #include <sys/ktr.h> 67d07a6d3fSPoul-Henning Kamp #include <sys/limits.h> 6824579ca1SMatthew Dillon #include <sys/conf.h> 6951df5321SJeff Roberson #include <sys/refcount.h> 7089f6b863SAttilio Rao #include <sys/rwlock.h> 719e0ddbd0SAlan Cox #include <sys/sf_buf.h> 72e5818a53SJeff Roberson #include <sys/domainset.h> 7300a3fe96SKonstantin Belousov #include <sys/user.h> 74df8bae1dSRodney W. Grimes 754f12e0acSSuleiman Souhlal #include <machine/atomic.h> 764f12e0acSSuleiman Souhlal 77df8bae1dSRodney W. Grimes #include <vm/vm.h> 781c771f92SKonstantin Belousov #include <vm/vm_param.h> 79efeaf95aSDavid Greenman #include <vm/vm_object.h> 80df8bae1dSRodney W. Grimes #include <vm/vm_page.h> 8124a1cce3SDavid Greenman #include <vm/vm_pager.h> 821efb74fbSJohn Dyson #include <vm/vm_map.h> 83df8bae1dSRodney W. Grimes #include <vm/vnode_pager.h> 84efeaf95aSDavid Greenman #include <vm/vm_extern.h> 85756a5412SGleb Smirnoff #include <vm/uma.h> 86df8bae1dSRodney W. Grimes 87bff76343SAlan Cox static int vnode_pager_addr(struct vnode *vp, vm_ooffset_t address, 88bff76343SAlan Cox daddr_t *rtaddress, int *run); 8911caded3SAlfred Perlstein static int vnode_pager_input_smlfs(vm_object_t object, vm_page_t m); 9011caded3SAlfred Perlstein static int vnode_pager_input_old(vm_object_t object, vm_page_t m); 9111caded3SAlfred Perlstein static void vnode_pager_dealloc(vm_object_t); 92b0cd2017SGleb Smirnoff static int vnode_pager_getpages(vm_object_t, vm_page_t *, int, int *, int *); 93b0cd2017SGleb Smirnoff static int vnode_pager_getpages_async(vm_object_t, vm_page_t *, int, int *, 94b0cd2017SGleb Smirnoff int *, vop_getpages_iodone_t, void *); 9533cad9e9SKonstantin Belousov static void vnode_pager_putpages(vm_object_t, vm_page_t *, int, int, int *); 9611caded3SAlfred Perlstein static boolean_t vnode_pager_haspage(vm_object_t, vm_pindex_t, int *, int *); 973364c323SKonstantin Belousov static vm_object_t vnode_pager_alloc(void *, vm_ooffset_t, vm_prot_t, 983364c323SKonstantin Belousov vm_ooffset_t, struct ucred *cred); 9990effb23SGleb Smirnoff static int vnode_pager_generic_getpages_done(struct buf *); 10090effb23SGleb Smirnoff static void vnode_pager_generic_getpages_done_async(struct buf *); 101fe7bcbafSKyle Evans static void vnode_pager_update_writecount(vm_object_t, vm_offset_t, 102fe7bcbafSKyle Evans vm_offset_t); 103fe7bcbafSKyle Evans static void vnode_pager_release_writecount(vm_object_t, vm_offset_t, 104fe7bcbafSKyle Evans vm_offset_t); 105192112b7SKonstantin Belousov static void vnode_pager_getvp(vm_object_t, struct vnode **, bool *); 1060b8253a7SBruce Evans 107d474440aSKonstantin Belousov const struct pagerops vnodepagerops = { 10800a3fe96SKonstantin Belousov .pgo_kvme_type = KVME_TYPE_VNODE, 1094e658600SPoul-Henning Kamp .pgo_alloc = vnode_pager_alloc, 1104e658600SPoul-Henning Kamp .pgo_dealloc = vnode_pager_dealloc, 1114e658600SPoul-Henning Kamp .pgo_getpages = vnode_pager_getpages, 11290effb23SGleb Smirnoff .pgo_getpages_async = vnode_pager_getpages_async, 1134e658600SPoul-Henning Kamp .pgo_putpages = vnode_pager_putpages, 1144e658600SPoul-Henning Kamp .pgo_haspage = vnode_pager_haspage, 115fe7bcbafSKyle Evans .pgo_update_writecount = vnode_pager_update_writecount, 116fe7bcbafSKyle Evans .pgo_release_writecount = vnode_pager_release_writecount, 117180bcaa4SKonstantin Belousov .pgo_set_writeable_dirty = vm_object_set_writeable_dirty_, 118c23c555bSKonstantin Belousov .pgo_mightbedirty = vm_object_mightbedirty_, 119192112b7SKonstantin Belousov .pgo_getvp = vnode_pager_getvp, 120df8bae1dSRodney W. Grimes }; 121df8bae1dSRodney W. Grimes 122e5818a53SJeff Roberson static struct domainset *vnode_domainset = NULL; 123e5818a53SJeff Roberson 124a314aba8SMateusz Guzik SYSCTL_PROC(_debug, OID_AUTO, vnode_domainset, 125a314aba8SMateusz Guzik CTLTYPE_STRING | CTLFLAG_MPSAFE | CTLFLAG_RW, &vnode_domainset, 0, 126a314aba8SMateusz Guzik sysctl_handle_domainset, "A", "Default vnode NUMA policy"); 127e5818a53SJeff Roberson 12866fb0b1aSGleb Smirnoff static int nvnpbufs; 12966fb0b1aSGleb Smirnoff SYSCTL_INT(_vm, OID_AUTO, vnode_pbufs, CTLFLAG_RDTUN | CTLFLAG_NOFETCH, 13066fb0b1aSGleb Smirnoff &nvnpbufs, 0, "number of physical buffers allocated for vnode pager"); 13166fb0b1aSGleb Smirnoff 132756a5412SGleb Smirnoff static uma_zone_t vnode_pbuf_zone; 133756a5412SGleb Smirnoff 134756a5412SGleb Smirnoff static void 135756a5412SGleb Smirnoff vnode_pager_init(void *dummy) 136756a5412SGleb Smirnoff { 137756a5412SGleb Smirnoff 13866fb0b1aSGleb Smirnoff #ifdef __LP64__ 13966fb0b1aSGleb Smirnoff nvnpbufs = nswbuf * 2; 14066fb0b1aSGleb Smirnoff #else 14166fb0b1aSGleb Smirnoff nvnpbufs = nswbuf / 2; 14266fb0b1aSGleb Smirnoff #endif 14366fb0b1aSGleb Smirnoff TUNABLE_INT_FETCH("vm.vnode_pbufs", &nvnpbufs); 14466fb0b1aSGleb Smirnoff vnode_pbuf_zone = pbuf_zsecond_create("vnpbuf", nvnpbufs); 145756a5412SGleb Smirnoff } 146756a5412SGleb Smirnoff SYSINIT(vnode_pager, SI_SUB_CPU, SI_ORDER_ANY, vnode_pager_init, NULL); 147756a5412SGleb Smirnoff 148d07a6d3fSPoul-Henning Kamp /* Create the VM system backing object for this vnode */ 14956a8aca8SPawel Jakub Dawidek static int 15056a8aca8SPawel Jakub Dawidek vnode_create_vobject_any(struct vnode *vp, off_t isize, struct thread *td) 151d07a6d3fSPoul-Henning Kamp { 152d07a6d3fSPoul-Henning Kamp vm_object_t object; 15356a8aca8SPawel Jakub Dawidek vm_ooffset_t size; 154a67d5408SJeff Roberson bool last; 155d07a6d3fSPoul-Henning Kamp 1566470c8d3SKonstantin Belousov object = vp->v_object; 1576470c8d3SKonstantin Belousov if (object != NULL) 158d07a6d3fSPoul-Henning Kamp return (0); 159d07a6d3fSPoul-Henning Kamp 16056a8aca8SPawel Jakub Dawidek if (isize == VNODE_NO_SIZE) { 161f45feecfSMateusz Guzik if (vn_getsize_locked(vp, &size, td->td_ucred) != 0) 162d07a6d3fSPoul-Henning Kamp return (0); 16356a8aca8SPawel Jakub Dawidek } else { 16456a8aca8SPawel Jakub Dawidek size = isize; 165d07a6d3fSPoul-Henning Kamp } 166d07a6d3fSPoul-Henning Kamp 1673364c323SKonstantin Belousov object = vnode_pager_alloc(vp, size, 0, 0, td->td_ucred); 168d07a6d3fSPoul-Henning Kamp /* 169d07a6d3fSPoul-Henning Kamp * Dereference the reference we just created. This assumes 170a67d5408SJeff Roberson * that the object is associated with the vp. We still have 171a67d5408SJeff Roberson * to serialize with vnode_pager_dealloc() for the last 172a67d5408SJeff Roberson * potential reference. 173d07a6d3fSPoul-Henning Kamp */ 17451df5321SJeff Roberson VM_OBJECT_RLOCK(object); 175a67d5408SJeff Roberson last = refcount_release(&object->ref_count); 17651df5321SJeff Roberson VM_OBJECT_RUNLOCK(object); 177a67d5408SJeff Roberson if (last) 178d07a6d3fSPoul-Henning Kamp vrele(vp); 179d07a6d3fSPoul-Henning Kamp 18056a8aca8SPawel Jakub Dawidek VNASSERT(vp->v_object != NULL, vp, ("%s: NULL object", __func__)); 181d07a6d3fSPoul-Henning Kamp 182d07a6d3fSPoul-Henning Kamp return (0); 183d07a6d3fSPoul-Henning Kamp } 184d07a6d3fSPoul-Henning Kamp 18556a8aca8SPawel Jakub Dawidek int 18656a8aca8SPawel Jakub Dawidek vnode_create_vobject(struct vnode *vp, off_t isize, struct thread *td) 18756a8aca8SPawel Jakub Dawidek { 18856a8aca8SPawel Jakub Dawidek VNASSERT(!vn_isdisk(vp), vp, ("%s: disk vnode", __func__)); 18956a8aca8SPawel Jakub Dawidek VNASSERT(isize == VNODE_NO_SIZE || isize >= 0, vp, 19056a8aca8SPawel Jakub Dawidek ("%s: invalid size (%jd)", __func__, (intmax_t)isize)); 19156a8aca8SPawel Jakub Dawidek 19256a8aca8SPawel Jakub Dawidek if (!vn_canvmio(vp)) 19356a8aca8SPawel Jakub Dawidek return (0); 19456a8aca8SPawel Jakub Dawidek 19556a8aca8SPawel Jakub Dawidek return (vnode_create_vobject_any(vp, isize, td)); 19656a8aca8SPawel Jakub Dawidek } 19756a8aca8SPawel Jakub Dawidek 19856a8aca8SPawel Jakub Dawidek int 19956a8aca8SPawel Jakub Dawidek vnode_create_disk_vobject(struct vnode *vp, off_t isize, struct thread *td) 20056a8aca8SPawel Jakub Dawidek { 20156a8aca8SPawel Jakub Dawidek VNASSERT(isize > 0, vp, ("%s: invalid size (%jd)", __func__, 20256a8aca8SPawel Jakub Dawidek (intmax_t)isize)); 20356a8aca8SPawel Jakub Dawidek 20456a8aca8SPawel Jakub Dawidek return (vnode_create_vobject_any(vp, isize, td)); 20556a8aca8SPawel Jakub Dawidek } 20656a8aca8SPawel Jakub Dawidek 2077146d6cbSPoul-Henning Kamp void 2087146d6cbSPoul-Henning Kamp vnode_destroy_vobject(struct vnode *vp) 2097146d6cbSPoul-Henning Kamp { 2107146d6cbSPoul-Henning Kamp struct vm_object *obj; 2117146d6cbSPoul-Henning Kamp 2127146d6cbSPoul-Henning Kamp obj = vp->v_object; 2136470c8d3SKonstantin Belousov if (obj == NULL || obj->handle != vp) 2147146d6cbSPoul-Henning Kamp return; 21557fd3d55SPawel Jakub Dawidek ASSERT_VOP_ELOCKED(vp, "vnode_destroy_vobject"); 21689f6b863SAttilio Rao VM_OBJECT_WLOCK(obj); 2176470c8d3SKonstantin Belousov MPASS(obj->type == OBJT_VNODE); 2182a339d9eSKonstantin Belousov umtx_shm_object_terminated(obj); 2197146d6cbSPoul-Henning Kamp if (obj->ref_count == 0) { 2209c83ff2dSJeff Roberson KASSERT((obj->flags & OBJ_DEAD) == 0, 2219c83ff2dSJeff Roberson ("vnode_destroy_vobject: Terminating dead object")); 222783a68aaSKonstantin Belousov vm_object_set_flag(obj, OBJ_DEAD); 223783a68aaSKonstantin Belousov 224783a68aaSKonstantin Belousov /* 225783a68aaSKonstantin Belousov * Clean pages and flush buffers. 226783a68aaSKonstantin Belousov */ 227783a68aaSKonstantin Belousov vm_object_page_clean(obj, 0, 0, OBJPC_SYNC); 228783a68aaSKonstantin Belousov VM_OBJECT_WUNLOCK(obj); 229783a68aaSKonstantin Belousov 230783a68aaSKonstantin Belousov vinvalbuf(vp, V_SAVE, 0, 0); 231783a68aaSKonstantin Belousov 232783a68aaSKonstantin Belousov BO_LOCK(&vp->v_bufobj); 233783a68aaSKonstantin Belousov vp->v_bufobj.bo_flag |= BO_DEAD; 234783a68aaSKonstantin Belousov BO_UNLOCK(&vp->v_bufobj); 235783a68aaSKonstantin Belousov 236783a68aaSKonstantin Belousov VM_OBJECT_WLOCK(obj); 2377146d6cbSPoul-Henning Kamp vm_object_terminate(obj); 23890880a1bSKonstantin Belousov } else { 23990880a1bSKonstantin Belousov /* 2407146d6cbSPoul-Henning Kamp * Woe to the process that tries to page now :-). 2417146d6cbSPoul-Henning Kamp */ 2427146d6cbSPoul-Henning Kamp vm_pager_deallocate(obj); 24389f6b863SAttilio Rao VM_OBJECT_WUNLOCK(obj); 2447146d6cbSPoul-Henning Kamp } 24590880a1bSKonstantin Belousov KASSERT(vp->v_object == NULL, ("vp %p obj %p", vp, vp->v_object)); 2467146d6cbSPoul-Henning Kamp } 2477146d6cbSPoul-Henning Kamp 248df8bae1dSRodney W. Grimes /* 249df8bae1dSRodney W. Grimes * Allocate (or lookup) pager for a vnode. 250df8bae1dSRodney W. Grimes * Handle is a vnode pointer. 251df8bae1dSRodney W. Grimes */ 25224a1cce3SDavid Greenman vm_object_t 2536cde7a16SDavid Greenman vnode_pager_alloc(void *handle, vm_ooffset_t size, vm_prot_t prot, 2543364c323SKonstantin Belousov vm_ooffset_t offset, struct ucred *cred) 255df8bae1dSRodney W. Grimes { 25606cb7259SDavid Greenman vm_object_t object; 257df8bae1dSRodney W. Grimes struct vnode *vp; 258df8bae1dSRodney W. Grimes 259df8bae1dSRodney W. Grimes /* 260df8bae1dSRodney W. Grimes * Pageout to vnode, no can do yet. 261df8bae1dSRodney W. Grimes */ 262df8bae1dSRodney W. Grimes if (handle == NULL) 263df8bae1dSRodney W. Grimes return (NULL); 264df8bae1dSRodney W. Grimes 265df8bae1dSRodney W. Grimes vp = (struct vnode *)handle; 2666470c8d3SKonstantin Belousov ASSERT_VOP_LOCKED(vp, "vnode_pager_alloc"); 267f1fa1ba3SMateusz Guzik VNPASS(vp->v_usecount > 0, vp); 2686470c8d3SKonstantin Belousov retry: 2696470c8d3SKonstantin Belousov object = vp->v_object; 2702be70f79SJohn Dyson 27124a1cce3SDavid Greenman if (object == NULL) { 272df8bae1dSRodney W. Grimes /* 2732ac78f0eSStephan Uphoff * Add an object of the appropriate size 274df8bae1dSRodney W. Grimes */ 2756470c8d3SKonstantin Belousov object = vm_object_allocate(OBJT_VNODE, 2766470c8d3SKonstantin Belousov OFF_TO_IDX(round_page(size))); 277bbc0ec52SDavid Greenman 2786cde7a16SDavid Greenman object->un_pager.vnp.vnp_size = size; 27984110e7eSKonstantin Belousov object->un_pager.vnp.writemappings = 0; 280e5818a53SJeff Roberson object->domain.dr_policy = vnode_domainset; 28124a1cce3SDavid Greenman object->handle = handle; 282208b81bbSKonstantin Belousov if ((vp->v_vflag & VV_VMSIZEVNLOCK) != 0) { 283208b81bbSKonstantin Belousov VM_OBJECT_WLOCK(object); 284208b81bbSKonstantin Belousov vm_object_set_flag(object, OBJ_SIZEVNLOCK); 285208b81bbSKonstantin Belousov VM_OBJECT_WUNLOCK(object); 286208b81bbSKonstantin Belousov } 28711be8415SStephan Uphoff VI_LOCK(vp); 2882ac78f0eSStephan Uphoff if (vp->v_object != NULL) { 2892ac78f0eSStephan Uphoff /* 2906470c8d3SKonstantin Belousov * Object has been created while we were allocating. 2912ac78f0eSStephan Uphoff */ 29211be8415SStephan Uphoff VI_UNLOCK(vp); 2939cddade7SKonstantin Belousov VM_OBJECT_WLOCK(object); 2949cddade7SKonstantin Belousov KASSERT(object->ref_count == 1, 2959cddade7SKonstantin Belousov ("leaked ref %p %d", object, object->ref_count)); 2969cddade7SKonstantin Belousov object->type = OBJT_DEAD; 29751df5321SJeff Roberson refcount_init(&object->ref_count, 0); 2989cddade7SKonstantin Belousov VM_OBJECT_WUNLOCK(object); 2992ac78f0eSStephan Uphoff vm_object_destroy(object); 3002ac78f0eSStephan Uphoff goto retry; 301df8bae1dSRodney W. Grimes } 3022ac78f0eSStephan Uphoff vp->v_object = object; 30311be8415SStephan Uphoff VI_UNLOCK(vp); 304a67d5408SJeff Roberson vrefact(vp); 30511be8415SStephan Uphoff } else { 306a67d5408SJeff Roberson vm_object_reference(object); 3073d653db0SAlan Cox #if VM_NRESERVLEVEL > 0 308a67d5408SJeff Roberson if ((object->flags & OBJ_COLORED) == 0) { 309a67d5408SJeff Roberson VM_OBJECT_WLOCK(object); 3103d653db0SAlan Cox vm_object_color(object, 0); 31189f6b863SAttilio Rao VM_OBJECT_WUNLOCK(object); 31211be8415SStephan Uphoff } 313a67d5408SJeff Roberson #endif 314a67d5408SJeff Roberson } 31524a1cce3SDavid Greenman return (object); 316df8bae1dSRodney W. Grimes } 317df8bae1dSRodney W. Grimes 318658ad5ffSAlan Cox /* 319658ad5ffSAlan Cox * The object must be locked. 320658ad5ffSAlan Cox */ 321f708ef1bSPoul-Henning Kamp static void 3227ebba1f8SGleb Smirnoff vnode_pager_dealloc(vm_object_t object) 32324a1cce3SDavid Greenman { 324b9f180d1SKonstantin Belousov struct vnode *vp; 325b9f180d1SKonstantin Belousov int refs; 326df8bae1dSRodney W. Grimes 327b9f180d1SKonstantin Belousov vp = object->handle; 32824a1cce3SDavid Greenman if (vp == NULL) 32924a1cce3SDavid Greenman panic("vnode_pager_dealloc: pager already dealloced"); 33024a1cce3SDavid Greenman 33189f6b863SAttilio Rao VM_OBJECT_ASSERT_WLOCKED(object); 33266095752SJohn Dyson vm_object_pip_wait(object, "vnpdea"); 333b9f180d1SKonstantin Belousov refs = object->ref_count; 33424a1cce3SDavid Greenman 33524a1cce3SDavid Greenman object->handle = NULL; 33695461b45SJohn Dyson object->type = OBJT_DEAD; 33757fd3d55SPawel Jakub Dawidek ASSERT_VOP_ELOCKED(vp, "vnode_pager_dealloc"); 33884110e7eSKonstantin Belousov if (object->un_pager.vnp.writemappings > 0) { 33984110e7eSKonstantin Belousov object->un_pager.vnp.writemappings = 0; 34078022527SKonstantin Belousov VOP_ADD_WRITECOUNT_CHECKED(vp, -1); 341b47f6241SJohn Baldwin CTR3(KTR_VFS, "%s: vp %p v_writecount decreased to %d", 342b47f6241SJohn Baldwin __func__, vp, vp->v_writecount); 34384110e7eSKonstantin Belousov } 344aa2cabb9SDavid Greenman vp->v_object = NULL; 34578022527SKonstantin Belousov VI_LOCK(vp); 34678022527SKonstantin Belousov 34778022527SKonstantin Belousov /* 34878022527SKonstantin Belousov * vm_map_entry_set_vnode_text() cannot reach this vnode by 34978022527SKonstantin Belousov * following object->handle. Clear all text references now. 35078022527SKonstantin Belousov * This also clears the transient references from 35178022527SKonstantin Belousov * kern_execve(), which is fine because dead_vnodeops uses nop 35278022527SKonstantin Belousov * for VOP_UNSET_TEXT(). 35378022527SKonstantin Belousov */ 35478022527SKonstantin Belousov if (vp->v_writecount < 0) 35578022527SKonstantin Belousov vp->v_writecount = 0; 35678022527SKonstantin Belousov VI_UNLOCK(vp); 35789f6b863SAttilio Rao VM_OBJECT_WUNLOCK(object); 358a67d5408SJeff Roberson if (refs > 0) 359b9f180d1SKonstantin Belousov vunref(vp); 36089f6b863SAttilio Rao VM_OBJECT_WLOCK(object); 361df8bae1dSRodney W. Grimes } 36226f9a767SRodney W. Grimes 363f708ef1bSPoul-Henning Kamp static boolean_t 3647ebba1f8SGleb Smirnoff vnode_pager_haspage(vm_object_t object, vm_pindex_t pindex, int *before, 3657ebba1f8SGleb Smirnoff int *after) 366df8bae1dSRodney W. Grimes { 36724a1cce3SDavid Greenman struct vnode *vp = object->handle; 36898b0c789SPoul-Henning Kamp daddr_t bn; 3694153054aSJeff Roberson uintptr_t lockstate; 3703af76890SPoul-Henning Kamp int err; 371170db9c6SJohn Dyson daddr_t reqblock; 3722c4488fcSJohn Dyson int poff; 3732c4488fcSJohn Dyson int bsize; 374d63596ceSJohn Dyson int pagesperblock, blocksperpage; 375df8bae1dSRodney W. Grimes 3764153054aSJeff Roberson VM_OBJECT_ASSERT_LOCKED(object); 37724579ca1SMatthew Dillon /* 37824579ca1SMatthew Dillon * If no vp or vp is doomed or marked transparent to VM, we do not 37924579ca1SMatthew Dillon * have the page. 38024579ca1SMatthew Dillon */ 381abd80ddbSMateusz Guzik if (vp == NULL || VN_IS_DOOMED(vp)) 38247221757SJohn Dyson return FALSE; 383df8bae1dSRodney W. Grimes /* 384b73f64c4SJeff Roberson * If the offset is beyond end of file we do 3850d94caffSDavid Greenman * not have the page. 386df8bae1dSRodney W. Grimes */ 387b73f64c4SJeff Roberson if (IDX_TO_OFF(pindex) >= object->un_pager.vnp.vnp_size) 3884abc71c0SDavid Greenman return FALSE; 389df8bae1dSRodney W. Grimes 390eed2d59bSDavid Greenman bsize = vp->v_mount->mnt_stat.f_iosize; 391170db9c6SJohn Dyson pagesperblock = bsize / PAGE_SIZE; 392d63596ceSJohn Dyson blocksperpage = 0; 393d63596ceSJohn Dyson if (pagesperblock > 0) { 394a316d390SJohn Dyson reqblock = pindex / pagesperblock; 395d63596ceSJohn Dyson } else { 396d63596ceSJohn Dyson blocksperpage = (PAGE_SIZE / bsize); 397d63596ceSJohn Dyson reqblock = pindex * blocksperpage; 398d63596ceSJohn Dyson } 3994153054aSJeff Roberson lockstate = VM_OBJECT_DROP(object); 400ec794849SPoul-Henning Kamp err = VOP_BMAP(vp, reqblock, NULL, &bn, after, before); 4014153054aSJeff Roberson VM_OBJECT_PICKUP(object, lockstate); 4020d94caffSDavid Greenman if (err) 40324a1cce3SDavid Greenman return TRUE; 4046eab77f2SJohn Dyson if (bn == -1) 405ced399eeSJohn Dyson return FALSE; 406d63596ceSJohn Dyson if (pagesperblock > 0) { 407a316d390SJohn Dyson poff = pindex - (reqblock * pagesperblock); 408170db9c6SJohn Dyson if (before) { 409170db9c6SJohn Dyson *before *= pagesperblock; 410170db9c6SJohn Dyson *before += poff; 411170db9c6SJohn Dyson } 412170db9c6SJohn Dyson if (after) { 41384d31376SGleb Smirnoff /* 41484d31376SGleb Smirnoff * The BMAP vop can report a partial block in the 415d2596d17SGleb Smirnoff * 'after', but must not report blocks after EOF. 41684d31376SGleb Smirnoff * Assert the latter, and truncate 'after' in case 41784d31376SGleb Smirnoff * of the former. 41884d31376SGleb Smirnoff */ 419d2596d17SGleb Smirnoff KASSERT((reqblock + *after) * pagesperblock < 420d2596d17SGleb Smirnoff roundup2(object->size, pagesperblock), 42184d31376SGleb Smirnoff ("%s: reqblock %jd after %d size %ju", __func__, 42284d31376SGleb Smirnoff (intmax_t )reqblock, *after, 42384d31376SGleb Smirnoff (uintmax_t )object->size)); 424170db9c6SJohn Dyson *after *= pagesperblock; 42584d31376SGleb Smirnoff *after += pagesperblock - (poff + 1); 42684d31376SGleb Smirnoff if (pindex + *after >= object->size) 42784d31376SGleb Smirnoff *after = object->size - 1 - pindex; 428170db9c6SJohn Dyson } 429d63596ceSJohn Dyson } else { 430d63596ceSJohn Dyson if (before) { 431d63596ceSJohn Dyson *before /= blocksperpage; 432d63596ceSJohn Dyson } 433d63596ceSJohn Dyson 434d63596ceSJohn Dyson if (after) { 435d63596ceSJohn Dyson *after /= blocksperpage; 436d63596ceSJohn Dyson } 437d63596ceSJohn Dyson } 438ced399eeSJohn Dyson return TRUE; 439df8bae1dSRodney W. Grimes } 440df8bae1dSRodney W. Grimes 441df8bae1dSRodney W. Grimes /* 442de2e1529SKa Ho Ng * Internal routine clearing partial-page content 443de2e1529SKa Ho Ng */ 444de2e1529SKa Ho Ng static void 445de2e1529SKa Ho Ng vnode_pager_subpage_purge(struct vm_page *m, int base, int end) 446de2e1529SKa Ho Ng { 447de2e1529SKa Ho Ng int size; 448de2e1529SKa Ho Ng 449de2e1529SKa Ho Ng KASSERT(end > base && end <= PAGE_SIZE, 450de2e1529SKa Ho Ng ("%s: start %d end %d", __func__, base, end)); 451de2e1529SKa Ho Ng size = end - base; 452de2e1529SKa Ho Ng 453de2e1529SKa Ho Ng /* 454de2e1529SKa Ho Ng * Clear out partial-page garbage in case 455de2e1529SKa Ho Ng * the page has been mapped. 456de2e1529SKa Ho Ng */ 457de2e1529SKa Ho Ng pmap_zero_page_area(m, base, size); 458de2e1529SKa Ho Ng 459de2e1529SKa Ho Ng /* 460de2e1529SKa Ho Ng * Update the valid bits to reflect the blocks 461de2e1529SKa Ho Ng * that have been zeroed. Some of these valid 462de2e1529SKa Ho Ng * bits may have already been set. 463de2e1529SKa Ho Ng */ 464de2e1529SKa Ho Ng vm_page_set_valid_range(m, base, size); 465de2e1529SKa Ho Ng 466de2e1529SKa Ho Ng /* 467de2e1529SKa Ho Ng * Round up "base" to the next block boundary so 468de2e1529SKa Ho Ng * that the dirty bit for a partially zeroed 469de2e1529SKa Ho Ng * block is not cleared. 470de2e1529SKa Ho Ng */ 471de2e1529SKa Ho Ng base = roundup2(base, DEV_BSIZE); 472de2e1529SKa Ho Ng end = rounddown2(end, DEV_BSIZE); 473de2e1529SKa Ho Ng 474de2e1529SKa Ho Ng if (end > base) { 475de2e1529SKa Ho Ng /* 476de2e1529SKa Ho Ng * Clear out partial-page dirty bits. 477de2e1529SKa Ho Ng * 478de2e1529SKa Ho Ng * note that we do not clear out the 479de2e1529SKa Ho Ng * valid bits. This would prevent 480de2e1529SKa Ho Ng * bogus_page replacement from working 481de2e1529SKa Ho Ng * properly. 482de2e1529SKa Ho Ng */ 483de2e1529SKa Ho Ng vm_page_clear_dirty(m, base, end - base); 484de2e1529SKa Ho Ng } 485de2e1529SKa Ho Ng 486de2e1529SKa Ho Ng } 487de2e1529SKa Ho Ng 488de2e1529SKa Ho Ng /* 489df8bae1dSRodney W. Grimes * Lets the VM system know about a change in size for a file. 49024a1cce3SDavid Greenman * We adjust our own internal size and flush any cached pages in 491df8bae1dSRodney W. Grimes * the associated object that are affected by the size change. 492df8bae1dSRodney W. Grimes * 493df8bae1dSRodney W. Grimes * Note: this routine may be invoked as a result of a pager put 494df8bae1dSRodney W. Grimes * operation (possibly at object termination time), so we must be careful. 495df8bae1dSRodney W. Grimes */ 496df8bae1dSRodney W. Grimes void 4977ebba1f8SGleb Smirnoff vnode_pager_setsize(struct vnode *vp, vm_ooffset_t nsize) 498df8bae1dSRodney W. Grimes { 4992a8f9ab5SAlan Cox vm_object_t object; 5002a8f9ab5SAlan Cox vm_page_t m; 501c576d121SLuoqi Chen vm_pindex_t nobjsize; 502df8bae1dSRodney W. Grimes 5032a8f9ab5SAlan Cox if ((object = vp->v_object) == NULL) 504df8bae1dSRodney W. Grimes return; 5055b87ecc6SKonstantin Belousov #ifdef DEBUG_VFS_LOCKS 5065b87ecc6SKonstantin Belousov { 5075b87ecc6SKonstantin Belousov struct mount *mp; 5085b87ecc6SKonstantin Belousov 5095b87ecc6SKonstantin Belousov mp = vp->v_mount; 5105b87ecc6SKonstantin Belousov if (mp != NULL && (mp->mnt_kern_flag & MNTK_VMSETSIZE_BUG) == 0) 5115b87ecc6SKonstantin Belousov assert_vop_elocked(vp, 5125b87ecc6SKonstantin Belousov "vnode_pager_setsize and not locked vnode"); 5135b87ecc6SKonstantin Belousov } 5145b87ecc6SKonstantin Belousov #endif 51589f6b863SAttilio Rao VM_OBJECT_WLOCK(object); 5169b8851faSKonstantin Belousov if (object->type == OBJT_DEAD) { 5179b8851faSKonstantin Belousov VM_OBJECT_WUNLOCK(object); 5189b8851faSKonstantin Belousov return; 5199b8851faSKonstantin Belousov } 5209b8851faSKonstantin Belousov KASSERT(object->type == OBJT_VNODE, 5219b8851faSKonstantin Belousov ("not vnode-backed object %p", object)); 5222a8f9ab5SAlan Cox if (nsize == object->un_pager.vnp.vnp_size) { 523df8bae1dSRodney W. Grimes /* 524df8bae1dSRodney W. Grimes * Hasn't changed size 525df8bae1dSRodney W. Grimes */ 52689f6b863SAttilio Rao VM_OBJECT_WUNLOCK(object); 527df8bae1dSRodney W. Grimes return; 5282a8f9ab5SAlan Cox } 529c576d121SLuoqi Chen nobjsize = OFF_TO_IDX(nsize + PAGE_MASK); 5302a8f9ab5SAlan Cox if (nsize < object->un_pager.vnp.vnp_size) { 531df8bae1dSRodney W. Grimes /* 532bbc0ec52SDavid Greenman * File has shrunk. Toss any cached pages beyond the new EOF. 533df8bae1dSRodney W. Grimes */ 5342a8f9ab5SAlan Cox if (nobjsize < object->size) 535c576d121SLuoqi Chen vm_object_page_remove(object, nobjsize, object->size, 5366bbee8e2SAlan Cox 0); 537bbc0ec52SDavid Greenman /* 538bbc0ec52SDavid Greenman * this gets rid of garbage at the end of a page that is now 5393ebeaf59SMatthew Dillon * only partially backed by the vnode. 5403ebeaf59SMatthew Dillon * 5413ebeaf59SMatthew Dillon * XXX for some reason (I don't know yet), if we take a 5423ebeaf59SMatthew Dillon * completely invalid page and mark it partially valid 5433ebeaf59SMatthew Dillon * it can screw up NFS reads, so we don't allow the case. 544bbc0ec52SDavid Greenman */ 5450012f373SJeff Roberson if (!(nsize & PAGE_MASK)) 5460012f373SJeff Roberson goto out; 5470012f373SJeff Roberson m = vm_page_grab(object, OFF_TO_IDX(nsize), VM_ALLOC_NOCREAT); 5480012f373SJeff Roberson if (m == NULL) 5490012f373SJeff Roberson goto out; 550de2e1529SKa Ho Ng if (!vm_page_none_valid(m)) 551de2e1529SKa Ho Ng vnode_pager_subpage_purge(m, (int)nsize & PAGE_MASK, 552de2e1529SKa Ho Ng PAGE_SIZE); 5530012f373SJeff Roberson vm_page_xunbusy(m); 554bbc0ec52SDavid Greenman } 5550012f373SJeff Roberson out: 556419e5698SKonstantin Belousov #if defined(__powerpc__) && !defined(__powerpc64__) 557a316d390SJohn Dyson object->un_pager.vnp.vnp_size = nsize; 558419e5698SKonstantin Belousov #else 559419e5698SKonstantin Belousov atomic_store_64(&object->un_pager.vnp.vnp_size, nsize); 560419e5698SKonstantin Belousov #endif 561c576d121SLuoqi Chen object->size = nobjsize; 56289f6b863SAttilio Rao VM_OBJECT_WUNLOCK(object); 563df8bae1dSRodney W. Grimes } 564df8bae1dSRodney W. Grimes 56526f9a767SRodney W. Grimes /* 566de2e1529SKa Ho Ng * Lets the VM system know about the purged range for a file. We toss away any 567de2e1529SKa Ho Ng * cached pages in the associated object that are affected by the purge 568de2e1529SKa Ho Ng * operation. Partial-page area not aligned to page boundaries will be zeroed 569de2e1529SKa Ho Ng * and the dirty blocks in DEV_BSIZE unit within a page will not be flushed. 570de2e1529SKa Ho Ng */ 571de2e1529SKa Ho Ng void 572de2e1529SKa Ho Ng vnode_pager_purge_range(struct vnode *vp, vm_ooffset_t start, vm_ooffset_t end) 573de2e1529SKa Ho Ng { 574de2e1529SKa Ho Ng struct vm_page *m; 575de2e1529SKa Ho Ng struct vm_object *object; 576de2e1529SKa Ho Ng vm_pindex_t pi, pistart, piend; 577de2e1529SKa Ho Ng bool same_page; 578de2e1529SKa Ho Ng int base, pend; 579de2e1529SKa Ho Ng 580de2e1529SKa Ho Ng ASSERT_VOP_LOCKED(vp, "vnode_pager_purge_range"); 581de2e1529SKa Ho Ng 582de2e1529SKa Ho Ng object = vp->v_object; 583de2e1529SKa Ho Ng pi = start + PAGE_MASK < start ? OBJ_MAX_SIZE : 584de2e1529SKa Ho Ng OFF_TO_IDX(start + PAGE_MASK); 585de2e1529SKa Ho Ng pistart = OFF_TO_IDX(start); 586de2e1529SKa Ho Ng piend = end == 0 ? OBJ_MAX_SIZE : OFF_TO_IDX(end); 587de2e1529SKa Ho Ng same_page = pistart == piend; 588de2e1529SKa Ho Ng if ((end != 0 && end <= start) || object == NULL) 589de2e1529SKa Ho Ng return; 590de2e1529SKa Ho Ng 591de2e1529SKa Ho Ng VM_OBJECT_WLOCK(object); 592de2e1529SKa Ho Ng 593de2e1529SKa Ho Ng if (pi < piend) 594de2e1529SKa Ho Ng vm_object_page_remove(object, pi, piend, 0); 595de2e1529SKa Ho Ng 596de2e1529SKa Ho Ng if ((start & PAGE_MASK) != 0) { 597de2e1529SKa Ho Ng base = (int)start & PAGE_MASK; 598de2e1529SKa Ho Ng pend = same_page ? (int)end & PAGE_MASK : PAGE_SIZE; 599de2e1529SKa Ho Ng m = vm_page_grab(object, pistart, VM_ALLOC_NOCREAT); 600de2e1529SKa Ho Ng if (m != NULL) { 601de2e1529SKa Ho Ng if (!vm_page_none_valid(m)) 602de2e1529SKa Ho Ng vnode_pager_subpage_purge(m, base, pend); 603de2e1529SKa Ho Ng vm_page_xunbusy(m); 604de2e1529SKa Ho Ng } 605de2e1529SKa Ho Ng if (same_page) 606de2e1529SKa Ho Ng goto out; 607de2e1529SKa Ho Ng } 608de2e1529SKa Ho Ng if ((end & PAGE_MASK) != 0) { 609de2e1529SKa Ho Ng base = same_page ? (int)start & PAGE_MASK : 0 ; 610de2e1529SKa Ho Ng pend = (int)end & PAGE_MASK; 611de2e1529SKa Ho Ng m = vm_page_grab(object, piend, VM_ALLOC_NOCREAT); 612de2e1529SKa Ho Ng if (m != NULL) { 613de2e1529SKa Ho Ng if (!vm_page_none_valid(m)) 614de2e1529SKa Ho Ng vnode_pager_subpage_purge(m, base, pend); 615de2e1529SKa Ho Ng vm_page_xunbusy(m); 616de2e1529SKa Ho Ng } 617de2e1529SKa Ho Ng } 618de2e1529SKa Ho Ng out: 619de2e1529SKa Ho Ng VM_OBJECT_WUNLOCK(object); 620de2e1529SKa Ho Ng } 621de2e1529SKa Ho Ng 622de2e1529SKa Ho Ng /* 62326f9a767SRodney W. Grimes * calculate the linear (byte) disk address of specified virtual 62426f9a767SRodney W. Grimes * file address 62526f9a767SRodney W. Grimes */ 626bff76343SAlan Cox static int 627bff76343SAlan Cox vnode_pager_addr(struct vnode *vp, vm_ooffset_t address, daddr_t *rtaddress, 628bff76343SAlan Cox int *run) 62926f9a767SRodney W. Grimes { 63026f9a767SRodney W. Grimes int bsize; 63126f9a767SRodney W. Grimes int err; 632a316d390SJohn Dyson daddr_t vblock; 633f3aad9a6SBjoern A. Zeeb daddr_t voffset; 63426f9a767SRodney W. Grimes 635abd80ddbSMateusz Guzik if (VN_IS_DOOMED(vp)) 6362c4488fcSJohn Dyson return -1; 6372c4488fcSJohn Dyson 63826f9a767SRodney W. Grimes bsize = vp->v_mount->mnt_stat.f_iosize; 63926f9a767SRodney W. Grimes vblock = address / bsize; 64026f9a767SRodney W. Grimes voffset = address % bsize; 64126f9a767SRodney W. Grimes 642bff76343SAlan Cox err = VOP_BMAP(vp, vblock, NULL, rtaddress, run, NULL); 643bff76343SAlan Cox if (err == 0) { 644bff76343SAlan Cox if (*rtaddress != -1) 645bff76343SAlan Cox *rtaddress += voffset / DEV_BSIZE; 646efc68ce1SDavid Greenman if (run) { 647efc68ce1SDavid Greenman *run += 1; 648efc68ce1SDavid Greenman *run *= bsize / PAGE_SIZE; 649efc68ce1SDavid Greenman *run -= voffset / PAGE_SIZE; 650efc68ce1SDavid Greenman } 651efc68ce1SDavid Greenman } 65226f9a767SRodney W. Grimes 653bff76343SAlan Cox return (err); 65426f9a767SRodney W. Grimes } 65526f9a767SRodney W. Grimes 65628f957b8SKonstantin Belousov static void 65728f957b8SKonstantin Belousov vnode_pager_input_bdone(struct buf *bp) 65828f957b8SKonstantin Belousov { 65928f957b8SKonstantin Belousov runningbufwakeup(bp); 66028f957b8SKonstantin Belousov bdone(bp); 66128f957b8SKonstantin Belousov } 66228f957b8SKonstantin Belousov 66326f9a767SRodney W. Grimes /* 66426f9a767SRodney W. Grimes * small block filesystem vnode pager input 66526f9a767SRodney W. Grimes */ 666f708ef1bSPoul-Henning Kamp static int 6677ebba1f8SGleb Smirnoff vnode_pager_input_smlfs(vm_object_t object, vm_page_t m) 66826f9a767SRodney W. Grimes { 6699c83534dSPoul-Henning Kamp struct vnode *vp; 6709c83534dSPoul-Henning Kamp struct bufobj *bo; 67126f9a767SRodney W. Grimes struct buf *bp; 6729e0ddbd0SAlan Cox struct sf_buf *sf; 673f3aad9a6SBjoern A. Zeeb daddr_t fileaddr; 67426f9a767SRodney W. Grimes vm_offset_t bsize; 675561cc9fcSKonstantin Belousov vm_page_bits_t bits; 676561cc9fcSKonstantin Belousov int error, i; 67726f9a767SRodney W. Grimes 678561cc9fcSKonstantin Belousov error = 0; 67924a1cce3SDavid Greenman vp = object->handle; 680abd80ddbSMateusz Guzik if (VN_IS_DOOMED(vp)) 6812c4488fcSJohn Dyson return VM_PAGER_BAD; 6822c4488fcSJohn Dyson 68326f9a767SRodney W. Grimes bsize = vp->v_mount->mnt_stat.f_iosize; 6840bdb7528SDavid Greenman 6859c83534dSPoul-Henning Kamp VOP_BMAP(vp, 0, &bo, 0, NULL, NULL); 68626f9a767SRodney W. Grimes 6879e0ddbd0SAlan Cox sf = sf_buf_alloc(m, 0); 68826f9a767SRodney W. Grimes 68926f9a767SRodney W. Grimes for (i = 0; i < PAGE_SIZE / bsize; i++) { 69033c67741SMatthew Dillon vm_ooffset_t address; 691bbc0ec52SDavid Greenman 6920d53a17bSAlan Cox bits = vm_page_bits(i * bsize, bsize); 6930d53a17bSAlan Cox if (m->valid & bits) 69426f9a767SRodney W. Grimes continue; 69526f9a767SRodney W. Grimes 69633c67741SMatthew Dillon address = IDX_TO_OFF(m->pindex) + i * bsize; 69733c67741SMatthew Dillon if (address >= object->un_pager.vnp.vnp_size) { 69833c67741SMatthew Dillon fileaddr = -1; 69933c67741SMatthew Dillon } else { 700bff76343SAlan Cox error = vnode_pager_addr(vp, address, &fileaddr, NULL); 701bff76343SAlan Cox if (error) 702bff76343SAlan Cox break; 70333c67741SMatthew Dillon } 70426f9a767SRodney W. Grimes if (fileaddr != -1) { 705756a5412SGleb Smirnoff bp = uma_zalloc(vnode_pbuf_zone, M_WAITOK); 70626f9a767SRodney W. Grimes 70726f9a767SRodney W. Grimes /* build a minimal buffer header */ 70821144e3bSPoul-Henning Kamp bp->b_iocmd = BIO_READ; 70928f957b8SKonstantin Belousov bp->b_iodone = vnode_pager_input_bdone; 710bd78ceceSJohn Baldwin KASSERT(bp->b_rcred == NOCRED, ("leaking read ucred")); 711bd78ceceSJohn Baldwin KASSERT(bp->b_wcred == NOCRED, ("leaking write ucred")); 712a854ed98SJohn Baldwin bp->b_rcred = crhold(curthread->td_ucred); 713a854ed98SJohn Baldwin bp->b_wcred = crhold(curthread->td_ucred); 7149e0ddbd0SAlan Cox bp->b_data = (caddr_t)sf_buf_kva(sf) + i * bsize; 715187f0071SDavid Greenman bp->b_blkno = fileaddr; 7169c83534dSPoul-Henning Kamp pbgetbo(bo, bp); 7171faacf5dSKirk McKusick bp->b_vp = vp; 71826f9a767SRodney W. Grimes bp->b_bcount = bsize; 71926f9a767SRodney W. Grimes bp->b_bufsize = bsize; 720*4efe531cSMark Johnston (void)runningbufclaim(bp, bp->b_bufsize); 72126f9a767SRodney W. Grimes 72226f9a767SRodney W. Grimes /* do the input */ 7232c18019fSPoul-Henning Kamp bp->b_iooffset = dbtob(bp->b_blkno); 724b792bebeSPoul-Henning Kamp bstrategy(bp); 72526f9a767SRodney W. Grimes 7266a4b5823SPoul-Henning Kamp bwait(bp, PVM, "vnsrd"); 7276a4b5823SPoul-Henning Kamp 728cafbf0c6SWarner Losh if ((bp->b_ioflags & BIO_ERROR) != 0) { 729cafbf0c6SWarner Losh KASSERT(bp->b_error != 0, 730cafbf0c6SWarner Losh ("%s: buf error but b_error == 0\n", __func__)); 731cafbf0c6SWarner Losh error = bp->b_error; 732cafbf0c6SWarner Losh } 73326f9a767SRodney W. Grimes 73426f9a767SRodney W. Grimes /* 73526f9a767SRodney W. Grimes * free the buffer header back to the swap buffer pool 73626f9a767SRodney W. Grimes */ 7371faacf5dSKirk McKusick bp->b_vp = NULL; 7389c83534dSPoul-Henning Kamp pbrelbo(bp); 739756a5412SGleb Smirnoff uma_zfree(vnode_pbuf_zone, bp); 74026f9a767SRodney W. Grimes if (error) 74126f9a767SRodney W. Grimes break; 7420d53a17bSAlan Cox } else 7439e0ddbd0SAlan Cox bzero((caddr_t)sf_buf_kva(sf) + i * bsize, bsize); 7440d53a17bSAlan Cox KASSERT((m->dirty & bits) == 0, 7450d53a17bSAlan Cox ("vnode_pager_input_smlfs: page %p is dirty", m)); 7467f935055SJeff Roberson vm_page_bits_set(m, &m->valid, bits); 74726f9a767SRodney W. Grimes } 7489e0ddbd0SAlan Cox sf_buf_free(sf); 74926f9a767SRodney W. Grimes if (error) { 750a83c285cSDavid Greenman return VM_PAGER_ERROR; 75126f9a767SRodney W. Grimes } 75226f9a767SRodney W. Grimes return VM_PAGER_OK; 75326f9a767SRodney W. Grimes } 75426f9a767SRodney W. Grimes 75526f9a767SRodney W. Grimes /* 756475e8cc3SPoul-Henning Kamp * old style vnode pager input routine 75726f9a767SRodney W. Grimes */ 758f708ef1bSPoul-Henning Kamp static int 7597ebba1f8SGleb Smirnoff vnode_pager_input_old(vm_object_t object, vm_page_t m) 76026f9a767SRodney W. Grimes { 761df8bae1dSRodney W. Grimes struct uio auio; 762df8bae1dSRodney W. Grimes struct iovec aiov; 76326f9a767SRodney W. Grimes int error; 76426f9a767SRodney W. Grimes int size; 7659e0ddbd0SAlan Cox struct sf_buf *sf; 766342a1480SJohn Baldwin struct vnode *vp; 767df8bae1dSRodney W. Grimes 76889f6b863SAttilio Rao VM_OBJECT_ASSERT_WLOCKED(object); 76926f9a767SRodney W. Grimes error = 0; 770bbc0ec52SDavid Greenman 771df8bae1dSRodney W. Grimes /* 77226f9a767SRodney W. Grimes * Return failure if beyond current EOF 77326f9a767SRodney W. Grimes */ 774a316d390SJohn Dyson if (IDX_TO_OFF(m->pindex) >= object->un_pager.vnp.vnp_size) { 77526f9a767SRodney W. Grimes return VM_PAGER_BAD; 77626f9a767SRodney W. Grimes } else { 77726f9a767SRodney W. Grimes size = PAGE_SIZE; 778a316d390SJohn Dyson if (IDX_TO_OFF(m->pindex) + size > object->un_pager.vnp.vnp_size) 779a316d390SJohn Dyson size = object->un_pager.vnp.vnp_size - IDX_TO_OFF(m->pindex); 78052051abcSAlan Cox vp = object->handle; 78189f6b863SAttilio Rao VM_OBJECT_WUNLOCK(object); 7820bdb7528SDavid Greenman 78326f9a767SRodney W. Grimes /* 784df8bae1dSRodney W. Grimes * Allocate a kernel virtual address and initialize so that 785df8bae1dSRodney W. Grimes * we can use VOP_READ/WRITE routines. 786df8bae1dSRodney W. Grimes */ 7879e0ddbd0SAlan Cox sf = sf_buf_alloc(m, 0); 7880bdb7528SDavid Greenman 7899e0ddbd0SAlan Cox aiov.iov_base = (caddr_t)sf_buf_kva(sf); 790df8bae1dSRodney W. Grimes aiov.iov_len = size; 791df8bae1dSRodney W. Grimes auio.uio_iov = &aiov; 792df8bae1dSRodney W. Grimes auio.uio_iovcnt = 1; 793a316d390SJohn Dyson auio.uio_offset = IDX_TO_OFF(m->pindex); 794df8bae1dSRodney W. Grimes auio.uio_segflg = UIO_SYSSPACE; 79526f9a767SRodney W. Grimes auio.uio_rw = UIO_READ; 796df8bae1dSRodney W. Grimes auio.uio_resid = size; 797b40ce416SJulian Elischer auio.uio_td = curthread; 79826f9a767SRodney W. Grimes 799a854ed98SJohn Baldwin error = VOP_READ(vp, &auio, 0, curthread->td_ucred); 800df8bae1dSRodney W. Grimes if (!error) { 80154d92145SMatthew Dillon int count = size - auio.uio_resid; 802df8bae1dSRodney W. Grimes 803df8bae1dSRodney W. Grimes if (count == 0) 804df8bae1dSRodney W. Grimes error = EINVAL; 80526f9a767SRodney W. Grimes else if (count != PAGE_SIZE) 8069e0ddbd0SAlan Cox bzero((caddr_t)sf_buf_kva(sf) + count, 8079e0ddbd0SAlan Cox PAGE_SIZE - count); 808df8bae1dSRodney W. Grimes } 8099e0ddbd0SAlan Cox sf_buf_free(sf); 8101b26eb10SAlan Cox 81189f6b863SAttilio Rao VM_OBJECT_WLOCK(object); 812df8bae1dSRodney W. Grimes } 8130d53a17bSAlan Cox KASSERT(m->dirty == 0, ("vnode_pager_input_old: page %p is dirty", m)); 8146e3a3f38SRobert V. Baron if (!error) 8150012f373SJeff Roberson vm_page_valid(m); 816a83c285cSDavid Greenman return error ? VM_PAGER_ERROR : VM_PAGER_OK; 81726f9a767SRodney W. Grimes } 81826f9a767SRodney W. Grimes 81926f9a767SRodney W. Grimes /* 82026f9a767SRodney W. Grimes * generic vnode pager input routine 82126f9a767SRodney W. Grimes */ 822170db9c6SJohn Dyson 823ce75f2c3SMike Smith /* 82423955314SAlfred Perlstein * Local media VFS's that do not implement their own VOP_GETPAGES 82547e151ddSRobert Drehmel * should have their VOP_GETPAGES call to vnode_pager_generic_getpages() 82647e151ddSRobert Drehmel * to implement the previous behaviour. 827ce75f2c3SMike Smith * 828ce75f2c3SMike Smith * All other FS's should use the bypass to get to the local media 829ce75f2c3SMike Smith * backing vp's VOP_GETPAGES. 830ce75f2c3SMike Smith */ 831f708ef1bSPoul-Henning Kamp static int 832b0cd2017SGleb Smirnoff vnode_pager_getpages(vm_object_t object, vm_page_t *m, int count, int *rbehind, 833b0cd2017SGleb Smirnoff int *rahead) 83424a1cce3SDavid Greenman { 835170db9c6SJohn Dyson struct vnode *vp; 836b0cd2017SGleb Smirnoff int rtval; 83795e5e988SJohn Dyson 838d6e13f3bSJeff Roberson /* Handle is stable with paging in progress. */ 839170db9c6SJohn Dyson vp = object->handle; 840b0cd2017SGleb Smirnoff rtval = VOP_GETPAGES(vp, m, count, rbehind, rahead); 84123955314SAlfred Perlstein KASSERT(rtval != EOPNOTSUPP, 84223955314SAlfred Perlstein ("vnode_pager: FS getpages not implemented\n")); 843170db9c6SJohn Dyson return rtval; 844170db9c6SJohn Dyson } 845170db9c6SJohn Dyson 84690effb23SGleb Smirnoff static int 84790effb23SGleb Smirnoff vnode_pager_getpages_async(vm_object_t object, vm_page_t *m, int count, 848b0cd2017SGleb Smirnoff int *rbehind, int *rahead, vop_getpages_iodone_t iodone, void *arg) 84990effb23SGleb Smirnoff { 85090effb23SGleb Smirnoff struct vnode *vp; 85190effb23SGleb Smirnoff int rtval; 85290effb23SGleb Smirnoff 85390effb23SGleb Smirnoff vp = object->handle; 854b0cd2017SGleb Smirnoff rtval = VOP_GETPAGES_ASYNC(vp, m, count, rbehind, rahead, iodone, arg); 85590effb23SGleb Smirnoff KASSERT(rtval != EOPNOTSUPP, 85690effb23SGleb Smirnoff ("vnode_pager: FS getpages_async not implemented\n")); 85790effb23SGleb Smirnoff return (rtval); 85890effb23SGleb Smirnoff } 85990effb23SGleb Smirnoff 860ce75f2c3SMike Smith /* 86190effb23SGleb Smirnoff * The implementation of VOP_GETPAGES() and VOP_GETPAGES_ASYNC() for 86290effb23SGleb Smirnoff * local filesystems, where partially valid pages can only occur at 86390effb23SGleb Smirnoff * the end of file. 864d15b55c5SKonstantin Belousov */ 865d15b55c5SKonstantin Belousov int 866d15b55c5SKonstantin Belousov vnode_pager_local_getpages(struct vop_getpages_args *ap) 867d15b55c5SKonstantin Belousov { 86890effb23SGleb Smirnoff 869b0cd2017SGleb Smirnoff return (vnode_pager_generic_getpages(ap->a_vp, ap->a_m, ap->a_count, 870b0cd2017SGleb Smirnoff ap->a_rbehind, ap->a_rahead, NULL, NULL)); 87190effb23SGleb Smirnoff } 87290effb23SGleb Smirnoff 87390effb23SGleb Smirnoff int 87490effb23SGleb Smirnoff vnode_pager_local_getpages_async(struct vop_getpages_async_args *ap) 87590effb23SGleb Smirnoff { 876abfdf767SKonstantin Belousov int error; 87790effb23SGleb Smirnoff 878abfdf767SKonstantin Belousov error = vnode_pager_generic_getpages(ap->a_vp, ap->a_m, ap->a_count, 879abfdf767SKonstantin Belousov ap->a_rbehind, ap->a_rahead, ap->a_iodone, ap->a_arg); 880abfdf767SKonstantin Belousov if (error != 0 && ap->a_iodone != NULL) 881abfdf767SKonstantin Belousov ap->a_iodone(ap->a_arg, ap->a_m, ap->a_count, error); 882abfdf767SKonstantin Belousov return (error); 883d15b55c5SKonstantin Belousov } 884d15b55c5SKonstantin Belousov 885d15b55c5SKonstantin Belousov /* 886ce75f2c3SMike Smith * This is now called from local media FS's to operate against their 887ce75f2c3SMike Smith * own vnodes if they fail to implement VOP_GETPAGES. 888ce75f2c3SMike Smith */ 889ce75f2c3SMike Smith int 890b0cd2017SGleb Smirnoff vnode_pager_generic_getpages(struct vnode *vp, vm_page_t *m, int count, 891b0cd2017SGleb Smirnoff int *a_rbehind, int *a_rahead, vop_getpages_iodone_t iodone, void *arg) 892170db9c6SJohn Dyson { 893ce75f2c3SMike Smith vm_object_t object; 8949c83534dSPoul-Henning Kamp struct bufobj *bo; 8950bdb7528SDavid Greenman struct buf *bp; 896b0cd2017SGleb Smirnoff off_t foff; 897e48b82bdSGleb Smirnoff #ifdef INVARIANTS 898e48b82bdSGleb Smirnoff off_t blkno0; 899e48b82bdSGleb Smirnoff #endif 900756a5412SGleb Smirnoff int bsize, pagesperblock; 901b0cd2017SGleb Smirnoff int error, before, after, rbehind, rahead, poff, i; 902b0cd2017SGleb Smirnoff int bytecount, secmask; 903ce75f2c3SMike Smith 9049c83534dSPoul-Henning Kamp KASSERT(vp->v_type != VCHR && vp->v_type != VBLK, 905b0cd2017SGleb Smirnoff ("%s does not support devices", __func__)); 906b0cd2017SGleb Smirnoff 907abd80ddbSMateusz Guzik if (VN_IS_DOOMED(vp)) 908eac91e32SKonstantin Belousov return (VM_PAGER_BAD); 9092c4488fcSJohn Dyson 910eac91e32SKonstantin Belousov object = vp->v_object; 911b0cd2017SGleb Smirnoff foff = IDX_TO_OFF(m[0]->pindex); 91226f9a767SRodney W. Grimes bsize = vp->v_mount->mnt_stat.f_iosize; 913b0cd2017SGleb Smirnoff pagesperblock = bsize / PAGE_SIZE; 914b0cd2017SGleb Smirnoff 915b0cd2017SGleb Smirnoff KASSERT(foff < object->un_pager.vnp.vnp_size, 916b0cd2017SGleb Smirnoff ("%s: page %p offset beyond vp %p size", __func__, m[0], vp)); 917cd853791SKonstantin Belousov KASSERT(count <= atop(maxphys), 918b0cd2017SGleb Smirnoff ("%s: requested %d pages", __func__, count)); 919b0cd2017SGleb Smirnoff 920b0cd2017SGleb Smirnoff /* 921b0cd2017SGleb Smirnoff * The last page has valid blocks. Invalid part can only 922b0cd2017SGleb Smirnoff * exist at the end of file, and the page is made fully valid 923b0cd2017SGleb Smirnoff * by zeroing in vm_pager_get_pages(). 924b0cd2017SGleb Smirnoff */ 9250012f373SJeff Roberson if (!vm_page_none_valid(m[count - 1]) && --count == 0) { 926b0cd2017SGleb Smirnoff if (iodone != NULL) 927b0cd2017SGleb Smirnoff iodone(arg, m, 1, 0); 928b0cd2017SGleb Smirnoff return (VM_PAGER_OK); 929b0cd2017SGleb Smirnoff } 930bbc0ec52SDavid Greenman 931756a5412SGleb Smirnoff bp = uma_zalloc(vnode_pbuf_zone, M_WAITOK); 932cd853791SKonstantin Belousov MPASS((bp->b_flags & B_MAXPHYS) != 0); 93373e9030eSGleb Smirnoff 93426f9a767SRodney W. Grimes /* 935e122dfc1SGleb Smirnoff * Get the underlying device blocks for the file with VOP_BMAP(). 936e122dfc1SGleb Smirnoff * If the file system doesn't support VOP_BMAP, use old way of 937e122dfc1SGleb Smirnoff * getting pages via VOP_READ. 93826f9a767SRodney W. Grimes */ 939b0cd2017SGleb Smirnoff error = VOP_BMAP(vp, foff / bsize, &bo, &bp->b_blkno, &after, &before); 9401de11f1aSAlan Cox if (error == EOPNOTSUPP) { 941756a5412SGleb Smirnoff uma_zfree(vnode_pbuf_zone, bp); 94289f6b863SAttilio Rao VM_OBJECT_WLOCK(object); 943b0cd2017SGleb Smirnoff for (i = 0; i < count; i++) { 94483c9dea1SGleb Smirnoff VM_CNT_INC(v_vnodein); 94583c9dea1SGleb Smirnoff VM_CNT_INC(v_vnodepgsin); 946b0cd2017SGleb Smirnoff error = vnode_pager_input_old(object, m[i]); 947b0cd2017SGleb Smirnoff if (error) 948b0cd2017SGleb Smirnoff break; 949b0cd2017SGleb Smirnoff } 95089f6b863SAttilio Rao VM_OBJECT_WUNLOCK(object); 95152051abcSAlan Cox return (error); 9521de11f1aSAlan Cox } else if (error != 0) { 953756a5412SGleb Smirnoff uma_zfree(vnode_pbuf_zone, bp); 9541de11f1aSAlan Cox return (VM_PAGER_ERROR); 955b0cd2017SGleb Smirnoff } 956bbc0ec52SDavid Greenman 95726f9a767SRodney W. Grimes /* 958b0cd2017SGleb Smirnoff * If the file system supports BMAP, but blocksize is smaller 959b0cd2017SGleb Smirnoff * than a page size, then use special small filesystem code. 96026f9a767SRodney W. Grimes */ 961b0cd2017SGleb Smirnoff if (pagesperblock == 0) { 962756a5412SGleb Smirnoff uma_zfree(vnode_pbuf_zone, bp); 963b0cd2017SGleb Smirnoff for (i = 0; i < count; i++) { 96483c9dea1SGleb Smirnoff VM_CNT_INC(v_vnodein); 96583c9dea1SGleb Smirnoff VM_CNT_INC(v_vnodepgsin); 966b0cd2017SGleb Smirnoff error = vnode_pager_input_smlfs(object, m[i]); 967b0cd2017SGleb Smirnoff if (error) 968b0cd2017SGleb Smirnoff break; 969b0cd2017SGleb Smirnoff } 970b0cd2017SGleb Smirnoff return (error); 97126f9a767SRodney W. Grimes } 9728d17e694SJulian Elischer 97326f9a767SRodney W. Grimes /* 974b0cd2017SGleb Smirnoff * A sparse file can be encountered only for a single page request, 975763df3ecSPedro F. Giffuni * which may not be preceded by call to vm_pager_haspage(). 976a7fecb4dSAlan Cox */ 977b0cd2017SGleb Smirnoff if (bp->b_blkno == -1) { 978b0cd2017SGleb Smirnoff KASSERT(count == 1, 979b0cd2017SGleb Smirnoff ("%s: array[%d] request to a sparse file %p", __func__, 980b0cd2017SGleb Smirnoff count, vp)); 981756a5412SGleb Smirnoff uma_zfree(vnode_pbuf_zone, bp); 982b0cd2017SGleb Smirnoff pmap_zero_page(m[0]); 983b0cd2017SGleb Smirnoff KASSERT(m[0]->dirty == 0, ("%s: page %p is dirty", 984b0cd2017SGleb Smirnoff __func__, m[0])); 9850012f373SJeff Roberson vm_page_valid(m[0]); 986f4f83da0SAlan Cox return (VM_PAGER_OK); 987b0cd2017SGleb Smirnoff } 988b0cd2017SGleb Smirnoff 989e48b82bdSGleb Smirnoff #ifdef INVARIANTS 990e48b82bdSGleb Smirnoff blkno0 = bp->b_blkno; 991e48b82bdSGleb Smirnoff #endif 992b0cd2017SGleb Smirnoff bp->b_blkno += (foff % bsize) / DEV_BSIZE; 993b0cd2017SGleb Smirnoff 994b0cd2017SGleb Smirnoff /* Recalculate blocks available after/before to pages. */ 995b0cd2017SGleb Smirnoff poff = (foff % bsize) / PAGE_SIZE; 996b0cd2017SGleb Smirnoff before *= pagesperblock; 997b0cd2017SGleb Smirnoff before += poff; 998b0cd2017SGleb Smirnoff after *= pagesperblock; 999b0cd2017SGleb Smirnoff after += pagesperblock - (poff + 1); 1000b0cd2017SGleb Smirnoff if (m[0]->pindex + after >= object->size) 1001b0cd2017SGleb Smirnoff after = object->size - 1 - m[0]->pindex; 1002b0cd2017SGleb Smirnoff KASSERT(count <= after + 1, ("%s: %d pages asked, can do only %d", 1003b0cd2017SGleb Smirnoff __func__, count, after + 1)); 1004b0cd2017SGleb Smirnoff after -= count - 1; 1005b0cd2017SGleb Smirnoff 1006b0cd2017SGleb Smirnoff /* Trim requested rbehind/rahead to possible values. */ 1007b0cd2017SGleb Smirnoff rbehind = a_rbehind ? *a_rbehind : 0; 1008b0cd2017SGleb Smirnoff rahead = a_rahead ? *a_rahead : 0; 1009b0cd2017SGleb Smirnoff rbehind = min(rbehind, before); 1010b0cd2017SGleb Smirnoff rbehind = min(rbehind, m[0]->pindex); 1011b0cd2017SGleb Smirnoff rahead = min(rahead, after); 1012b0cd2017SGleb Smirnoff rahead = min(rahead, object->size - m[count - 1]->pindex); 1013e48b82bdSGleb Smirnoff /* 1014e48b82bdSGleb Smirnoff * Check that total amount of pages fit into buf. Trim rbehind and 1015e48b82bdSGleb Smirnoff * rahead evenly if not. 1016e48b82bdSGleb Smirnoff */ 1017cd853791SKonstantin Belousov if (rbehind + rahead + count > atop(maxphys)) { 1018e48b82bdSGleb Smirnoff int trim, sum; 1019e48b82bdSGleb Smirnoff 1020cd853791SKonstantin Belousov trim = rbehind + rahead + count - atop(maxphys) + 1; 1021e48b82bdSGleb Smirnoff sum = rbehind + rahead; 1022e48b82bdSGleb Smirnoff if (rbehind == before) { 1023e48b82bdSGleb Smirnoff /* Roundup rbehind trim to block size. */ 1024e48b82bdSGleb Smirnoff rbehind -= roundup(trim * rbehind / sum, pagesperblock); 1025e48b82bdSGleb Smirnoff if (rbehind < 0) 1026e48b82bdSGleb Smirnoff rbehind = 0; 1027e48b82bdSGleb Smirnoff } else 1028e48b82bdSGleb Smirnoff rbehind -= trim * rbehind / sum; 1029e48b82bdSGleb Smirnoff rahead -= trim * rahead / sum; 1030e48b82bdSGleb Smirnoff } 1031cd853791SKonstantin Belousov KASSERT(rbehind + rahead + count <= atop(maxphys), 1032cd853791SKonstantin Belousov ("%s: behind %d ahead %d count %d maxphys %lu", __func__, 1033cd853791SKonstantin Belousov rbehind, rahead, count, maxphys)); 1034b0cd2017SGleb Smirnoff 1035b0cd2017SGleb Smirnoff /* 1036b0cd2017SGleb Smirnoff * Fill in the bp->b_pages[] array with requested and optional 1037b0cd2017SGleb Smirnoff * read behind or read ahead pages. Read behind pages are looked 1038b0cd2017SGleb Smirnoff * up in a backward direction, down to a first cached page. Same 1039b0cd2017SGleb Smirnoff * for read ahead pages, but there is no need to shift the array 1040b0cd2017SGleb Smirnoff * in case of encountering a cached page. 1041b0cd2017SGleb Smirnoff */ 1042b0cd2017SGleb Smirnoff i = bp->b_npages = 0; 1043b0cd2017SGleb Smirnoff if (rbehind) { 1044b0cd2017SGleb Smirnoff vm_pindex_t startpindex, tpindex; 1045b0cd2017SGleb Smirnoff vm_page_t p; 1046b0cd2017SGleb Smirnoff 1047a7fecb4dSAlan Cox VM_OBJECT_WLOCK(object); 1048b0cd2017SGleb Smirnoff startpindex = m[0]->pindex - rbehind; 1049b0cd2017SGleb Smirnoff if ((p = TAILQ_PREV(m[0], pglist, listq)) != NULL && 1050b0cd2017SGleb Smirnoff p->pindex >= startpindex) 1051b0cd2017SGleb Smirnoff startpindex = p->pindex + 1; 1052b0cd2017SGleb Smirnoff 1053b0cd2017SGleb Smirnoff /* tpindex is unsigned; beware of numeric underflow. */ 1054b0cd2017SGleb Smirnoff for (tpindex = m[0]->pindex - 1; 1055b0cd2017SGleb Smirnoff tpindex >= startpindex && tpindex < m[0]->pindex; 1056b0cd2017SGleb Smirnoff tpindex--, i++) { 10577667839aSAlan Cox p = vm_page_alloc(object, tpindex, VM_ALLOC_NORMAL); 1058b0cd2017SGleb Smirnoff if (p == NULL) { 1059b0cd2017SGleb Smirnoff /* Shift the array. */ 1060b0cd2017SGleb Smirnoff for (int j = 0; j < i; j++) 1061b0cd2017SGleb Smirnoff bp->b_pages[j] = bp->b_pages[j + 1062b0cd2017SGleb Smirnoff tpindex + 1 - startpindex]; 1063b0cd2017SGleb Smirnoff break; 1064b0cd2017SGleb Smirnoff } 1065b0cd2017SGleb Smirnoff bp->b_pages[tpindex - startpindex] = p; 1066a7fecb4dSAlan Cox } 10670bdb7528SDavid Greenman 1068b0cd2017SGleb Smirnoff bp->b_pgbefore = i; 1069b0cd2017SGleb Smirnoff bp->b_npages += i; 1070b0cd2017SGleb Smirnoff bp->b_blkno -= IDX_TO_OFF(i) / DEV_BSIZE; 1071b0cd2017SGleb Smirnoff } else 1072b0cd2017SGleb Smirnoff bp->b_pgbefore = 0; 1073b0cd2017SGleb Smirnoff 1074b0cd2017SGleb Smirnoff /* Requested pages. */ 1075b0cd2017SGleb Smirnoff for (int j = 0; j < count; j++, i++) 1076b0cd2017SGleb Smirnoff bp->b_pages[i] = m[j]; 1077b0cd2017SGleb Smirnoff bp->b_npages += count; 1078b0cd2017SGleb Smirnoff 1079b0cd2017SGleb Smirnoff if (rahead) { 1080b0cd2017SGleb Smirnoff vm_pindex_t endpindex, tpindex; 1081b0cd2017SGleb Smirnoff vm_page_t p; 1082b0cd2017SGleb Smirnoff 1083b0cd2017SGleb Smirnoff if (!VM_OBJECT_WOWNED(object)) 1084eac91e32SKonstantin Belousov VM_OBJECT_WLOCK(object); 1085b0cd2017SGleb Smirnoff endpindex = m[count - 1]->pindex + rahead + 1; 1086b0cd2017SGleb Smirnoff if ((p = TAILQ_NEXT(m[count - 1], listq)) != NULL && 1087b0cd2017SGleb Smirnoff p->pindex < endpindex) 1088b0cd2017SGleb Smirnoff endpindex = p->pindex; 1089b0cd2017SGleb Smirnoff if (endpindex > object->size) 1090b0cd2017SGleb Smirnoff endpindex = object->size; 1091b0cd2017SGleb Smirnoff 1092b0cd2017SGleb Smirnoff for (tpindex = m[count - 1]->pindex + 1; 1093b0cd2017SGleb Smirnoff tpindex < endpindex; i++, tpindex++) { 10947667839aSAlan Cox p = vm_page_alloc(object, tpindex, VM_ALLOC_NORMAL); 1095b0cd2017SGleb Smirnoff if (p == NULL) 1096b0cd2017SGleb Smirnoff break; 1097b0cd2017SGleb Smirnoff bp->b_pages[i] = p; 1098eac91e32SKonstantin Belousov } 1099b0cd2017SGleb Smirnoff 1100b0cd2017SGleb Smirnoff bp->b_pgafter = i - bp->b_npages; 1101b0cd2017SGleb Smirnoff bp->b_npages = i; 1102b0cd2017SGleb Smirnoff } else 1103b0cd2017SGleb Smirnoff bp->b_pgafter = 0; 1104b0cd2017SGleb Smirnoff 1105b0cd2017SGleb Smirnoff if (VM_OBJECT_WOWNED(object)) 1106eac91e32SKonstantin Belousov VM_OBJECT_WUNLOCK(object); 1107b0cd2017SGleb Smirnoff 1108b0cd2017SGleb Smirnoff /* Report back actual behind/ahead read. */ 1109b0cd2017SGleb Smirnoff if (a_rbehind) 1110b0cd2017SGleb Smirnoff *a_rbehind = bp->b_pgbefore; 1111b0cd2017SGleb Smirnoff if (a_rahead) 1112b0cd2017SGleb Smirnoff *a_rahead = bp->b_pgafter; 1113b0cd2017SGleb Smirnoff 1114e48b82bdSGleb Smirnoff #ifdef INVARIANTS 1115cd853791SKonstantin Belousov KASSERT(bp->b_npages <= atop(maxphys), 1116b0cd2017SGleb Smirnoff ("%s: buf %p overflowed", __func__, bp)); 11174f56243aSGleb Smirnoff for (int j = 1, prev = 0; j < bp->b_npages; j++) { 11181e0c121fSGleb Smirnoff if (bp->b_pages[j] == bogus_page) 11191e0c121fSGleb Smirnoff continue; 11201e0c121fSGleb Smirnoff KASSERT(bp->b_pages[j]->pindex - bp->b_pages[prev]->pindex == 11211e0c121fSGleb Smirnoff j - prev, ("%s: pages array not consecutive, bp %p", 11221e0c121fSGleb Smirnoff __func__, bp)); 11231e0c121fSGleb Smirnoff prev = j; 11241e0c121fSGleb Smirnoff } 1125e48b82bdSGleb Smirnoff #endif 1126eac91e32SKonstantin Belousov 11270d94caffSDavid Greenman /* 1128b0cd2017SGleb Smirnoff * Recalculate first offset and bytecount with regards to read behind. 1129b0cd2017SGleb Smirnoff * Truncate bytecount to vnode real size and round up physical size 1130b0cd2017SGleb Smirnoff * for real devices. 113126f9a767SRodney W. Grimes */ 1132b0cd2017SGleb Smirnoff foff = IDX_TO_OFF(bp->b_pages[0]->pindex); 1133b0cd2017SGleb Smirnoff bytecount = bp->b_npages << PAGE_SHIFT; 1134b0cd2017SGleb Smirnoff if ((foff + bytecount) > object->un_pager.vnp.vnp_size) 1135b0cd2017SGleb Smirnoff bytecount = object->un_pager.vnp.vnp_size - foff; 1136eac91e32SKonstantin Belousov secmask = bo->bo_bsize - 1; 11376229cc50SPoul-Henning Kamp KASSERT(secmask < PAGE_SIZE && secmask > 0, 1138b0cd2017SGleb Smirnoff ("%s: sector size %d too large", __func__, secmask + 1)); 1139b0cd2017SGleb Smirnoff bytecount = (bytecount + secmask) & ~secmask; 114026f9a767SRodney W. Grimes 114126f9a767SRodney W. Grimes /* 1142b0cd2017SGleb Smirnoff * And map the pages to be read into the kva, if the filesystem 11436ce697dcSKonstantin Belousov * requires mapped buffers. 114426f9a767SRodney W. Grimes */ 11452a5eef69SGleb Smirnoff if ((vp->v_mount->mnt_kern_flag & MNTK_UNMAPPED_BUFS) != 0 && 11466ce697dcSKonstantin Belousov unmapped_buf_allowed) { 11476ce697dcSKonstantin Belousov bp->b_data = unmapped_buf; 11486ce697dcSKonstantin Belousov bp->b_offset = 0; 1149fade8dd7SJeff Roberson } else { 1150fade8dd7SJeff Roberson bp->b_data = bp->b_kvabase; 1151b0cd2017SGleb Smirnoff pmap_qenter((vm_offset_t)bp->b_data, bp->b_pages, bp->b_npages); 1152fade8dd7SJeff Roberson } 115326f9a767SRodney W. Grimes 1154b0cd2017SGleb Smirnoff /* Build a minimal buffer header. */ 115521144e3bSPoul-Henning Kamp bp->b_iocmd = BIO_READ; 1156bd78ceceSJohn Baldwin KASSERT(bp->b_rcred == NOCRED, ("leaking read ucred")); 1157bd78ceceSJohn Baldwin KASSERT(bp->b_wcred == NOCRED, ("leaking write ucred")); 1158a854ed98SJohn Baldwin bp->b_rcred = crhold(curthread->td_ucred); 1159a854ed98SJohn Baldwin bp->b_wcred = crhold(curthread->td_ucred); 11609c83534dSPoul-Henning Kamp pbgetbo(bo, bp); 11611faacf5dSKirk McKusick bp->b_vp = vp; 1162*4efe531cSMark Johnston bp->b_bcount = bp->b_bufsize = bytecount; 11632c18019fSPoul-Henning Kamp bp->b_iooffset = dbtob(bp->b_blkno); 1164e48b82bdSGleb Smirnoff KASSERT(IDX_TO_OFF(m[0]->pindex - bp->b_pages[0]->pindex) == 1165e48b82bdSGleb Smirnoff (blkno0 - bp->b_blkno) * DEV_BSIZE + 1166e48b82bdSGleb Smirnoff IDX_TO_OFF(m[0]->pindex) % bsize, 1167e48b82bdSGleb Smirnoff ("wrong offsets bsize %d m[0] %ju b_pages[0] %ju " 1168e48b82bdSGleb Smirnoff "blkno0 %ju b_blkno %ju", bsize, 1169e48b82bdSGleb Smirnoff (uintmax_t)m[0]->pindex, (uintmax_t)bp->b_pages[0]->pindex, 1170e48b82bdSGleb Smirnoff (uintmax_t)blkno0, (uintmax_t)bp->b_blkno)); 117190effb23SGleb Smirnoff 1172*4efe531cSMark Johnston (void)runningbufclaim(bp, bp->b_bufsize); 1173*4efe531cSMark Johnston 117483c9dea1SGleb Smirnoff VM_CNT_INC(v_vnodein); 117583c9dea1SGleb Smirnoff VM_CNT_ADD(v_vnodepgsin, bp->b_npages); 1176b0cd2017SGleb Smirnoff 117790effb23SGleb Smirnoff if (iodone != NULL) { /* async */ 1178b0cd2017SGleb Smirnoff bp->b_pgiodone = iodone; 117990effb23SGleb Smirnoff bp->b_caller1 = arg; 118090effb23SGleb Smirnoff bp->b_iodone = vnode_pager_generic_getpages_done_async; 118190effb23SGleb Smirnoff bp->b_flags |= B_ASYNC; 118290effb23SGleb Smirnoff BUF_KERNPROC(bp); 1183b792bebeSPoul-Henning Kamp bstrategy(bp); 1184b0cd2017SGleb Smirnoff return (VM_PAGER_OK); 118590effb23SGleb Smirnoff } else { 118690effb23SGleb Smirnoff bp->b_iodone = bdone; 118790effb23SGleb Smirnoff bstrategy(bp); 11886a4b5823SPoul-Henning Kamp bwait(bp, PVM, "vnread"); 118990effb23SGleb Smirnoff error = vnode_pager_generic_getpages_done(bp); 11901bb5ad63SGleb Smirnoff for (i = 0; i < bp->b_npages; i++) 11916ce697dcSKonstantin Belousov bp->b_pages[i] = NULL; 11921faacf5dSKirk McKusick bp->b_vp = NULL; 11939c83534dSPoul-Henning Kamp pbrelbo(bp); 1194756a5412SGleb Smirnoff uma_zfree(vnode_pbuf_zone, bp); 119590effb23SGleb Smirnoff return (error != 0 ? VM_PAGER_ERROR : VM_PAGER_OK); 119690effb23SGleb Smirnoff } 1197b0cd2017SGleb Smirnoff } 119890effb23SGleb Smirnoff 119990effb23SGleb Smirnoff static void 120090effb23SGleb Smirnoff vnode_pager_generic_getpages_done_async(struct buf *bp) 120190effb23SGleb Smirnoff { 120290effb23SGleb Smirnoff int error; 120390effb23SGleb Smirnoff 120490effb23SGleb Smirnoff error = vnode_pager_generic_getpages_done(bp); 1205b0cd2017SGleb Smirnoff /* Run the iodone upon the requested range. */ 1206b0cd2017SGleb Smirnoff bp->b_pgiodone(bp->b_caller1, bp->b_pages + bp->b_pgbefore, 1207b0cd2017SGleb Smirnoff bp->b_npages - bp->b_pgbefore - bp->b_pgafter, error); 120890effb23SGleb Smirnoff for (int i = 0; i < bp->b_npages; i++) 120990effb23SGleb Smirnoff bp->b_pages[i] = NULL; 121090effb23SGleb Smirnoff bp->b_vp = NULL; 121190effb23SGleb Smirnoff pbrelbo(bp); 1212756a5412SGleb Smirnoff uma_zfree(vnode_pbuf_zone, bp); 121390effb23SGleb Smirnoff } 121490effb23SGleb Smirnoff 121590effb23SGleb Smirnoff static int 121690effb23SGleb Smirnoff vnode_pager_generic_getpages_done(struct buf *bp) 121790effb23SGleb Smirnoff { 121890effb23SGleb Smirnoff vm_object_t object; 121990effb23SGleb Smirnoff off_t tfoff, nextoff; 122090effb23SGleb Smirnoff int i, error; 122190effb23SGleb Smirnoff 1222cafbf0c6SWarner Losh KASSERT((bp->b_ioflags & BIO_ERROR) == 0 || bp->b_error != 0, 1223cafbf0c6SWarner Losh ("%s: buf error but b_error == 0\n", __func__)); 1224cafbf0c6SWarner Losh error = (bp->b_ioflags & BIO_ERROR) != 0 ? bp->b_error : 0; 122590effb23SGleb Smirnoff object = bp->b_vp->v_object; 122690effb23SGleb Smirnoff 122728f957b8SKonstantin Belousov runningbufwakeup(bp); 122828f957b8SKonstantin Belousov 122990effb23SGleb Smirnoff if (error == 0 && bp->b_bcount != bp->b_npages * PAGE_SIZE) { 1230fade8dd7SJeff Roberson if (!buf_mapped(bp)) { 1231fade8dd7SJeff Roberson bp->b_data = bp->b_kvabase; 1232fade8dd7SJeff Roberson pmap_qenter((vm_offset_t)bp->b_data, bp->b_pages, 123390effb23SGleb Smirnoff bp->b_npages); 123490effb23SGleb Smirnoff } 1235fade8dd7SJeff Roberson bzero(bp->b_data + bp->b_bcount, 123690effb23SGleb Smirnoff PAGE_SIZE * bp->b_npages - bp->b_bcount); 123790effb23SGleb Smirnoff } 1238fade8dd7SJeff Roberson if (buf_mapped(bp)) { 1239fade8dd7SJeff Roberson pmap_qremove((vm_offset_t)bp->b_data, bp->b_npages); 1240fade8dd7SJeff Roberson bp->b_data = unmapped_buf; 124190effb23SGleb Smirnoff } 124226f9a767SRodney W. Grimes 12431bd12a3bSChuck Silvers /* 12441bd12a3bSChuck Silvers * If the read failed, we must free any read ahead/behind pages here. 12451bd12a3bSChuck Silvers * The requested pages are freed by the caller (for sync requests) 12461bd12a3bSChuck Silvers * or by the bp->b_pgiodone callback (for async requests). 12471bd12a3bSChuck Silvers */ 12481bd12a3bSChuck Silvers if (error != 0) { 12491bd12a3bSChuck Silvers VM_OBJECT_WLOCK(object); 12501bd12a3bSChuck Silvers for (i = 0; i < bp->b_pgbefore; i++) 12511bd12a3bSChuck Silvers vm_page_free_invalid(bp->b_pages[i]); 12521bd12a3bSChuck Silvers for (i = bp->b_npages - bp->b_pgafter; i < bp->b_npages; i++) 12531bd12a3bSChuck Silvers vm_page_free_invalid(bp->b_pages[i]); 12541bd12a3bSChuck Silvers VM_OBJECT_WUNLOCK(object); 12551bd12a3bSChuck Silvers return (error); 12561bd12a3bSChuck Silvers } 12571bd12a3bSChuck Silvers 12587f935055SJeff Roberson /* Read lock to protect size. */ 12597f935055SJeff Roberson VM_OBJECT_RLOCK(object); 126090effb23SGleb Smirnoff for (i = 0, tfoff = IDX_TO_OFF(bp->b_pages[0]->pindex); 126190effb23SGleb Smirnoff i < bp->b_npages; i++, tfoff = nextoff) { 12628f9110f6SJohn Dyson vm_page_t mt; 12638f9110f6SJohn Dyson 12648f9110f6SJohn Dyson nextoff = tfoff + PAGE_SIZE; 126590effb23SGleb Smirnoff mt = bp->b_pages[i]; 12662f81c92eSMark Johnston if (mt == bogus_page) 12672f81c92eSMark Johnston continue; 12688f9110f6SJohn Dyson 126954746b67SDmitrij Tejblum if (nextoff <= object->un_pager.vnp.vnp_size) { 12708d17e694SJulian Elischer /* 12718d17e694SJulian Elischer * Read filled up entire page. 12728d17e694SJulian Elischer */ 12730012f373SJeff Roberson vm_page_valid(mt); 1274016a3c93SAlan Cox KASSERT(mt->dirty == 0, 127579f0deb9SGleb Smirnoff ("%s: page %p is dirty", __func__, mt)); 1276016a3c93SAlan Cox KASSERT(!pmap_page_is_mapped(mt), 127779f0deb9SGleb Smirnoff ("%s: page %p is mapped", __func__, mt)); 12788f9110f6SJohn Dyson } else { 12798d17e694SJulian Elischer /* 128042eb4108SAlan Cox * Read did not fill up entire page. 12818d17e694SJulian Elischer * 1282c3dbadc1SChuck Silvers * Currently we do not set the entire page valid, 1283c3dbadc1SChuck Silvers * we just try to clear the piece that we couldn't 1284c3dbadc1SChuck Silvers * read. 12858d17e694SJulian Elischer */ 1286dc874f98SKonstantin Belousov vm_page_set_valid_range(mt, 0, 128754746b67SDmitrij Tejblum object->un_pager.vnp.vnp_size - tfoff); 128842eb4108SAlan Cox KASSERT((mt->dirty & vm_page_bits(0, 1289c3dbadc1SChuck Silvers object->un_pager.vnp.vnp_size - tfoff)) == 0, 1290c3dbadc1SChuck Silvers ("%s: page %p is dirty", __func__, mt)); 12918f9110f6SJohn Dyson } 12928f9110f6SJohn Dyson 1293b0cd2017SGleb Smirnoff if (i < bp->b_pgbefore || i >= bp->b_npages - bp->b_pgafter) 1294b6c00483SKonstantin Belousov vm_page_readahead_finish(mt); 129503679e23SAlan Cox } 12967f935055SJeff Roberson VM_OBJECT_RUNLOCK(object); 129790effb23SGleb Smirnoff 129890effb23SGleb Smirnoff return (error); 129926f9a767SRodney W. Grimes } 130026f9a767SRodney W. Grimes 1301ce75f2c3SMike Smith /* 1302ce75f2c3SMike Smith * EOPNOTSUPP is no longer legal. For local media VFS's that do not 1303ce75f2c3SMike Smith * implement their own VOP_PUTPAGES, their VOP_PUTPAGES should call to 1304ce75f2c3SMike Smith * vnode_pager_generic_putpages() to implement the previous behaviour. 1305ce75f2c3SMike Smith * 1306ce75f2c3SMike Smith * All other FS's should use the bypass to get to the local media 1307ce75f2c3SMike Smith * backing vp's VOP_PUTPAGES. 1308ce75f2c3SMike Smith */ 1309e4542174SMatthew Dillon static void 13107ebba1f8SGleb Smirnoff vnode_pager_putpages(vm_object_t object, vm_page_t *m, int count, 131133cad9e9SKonstantin Belousov int flags, int *rtvals) 1312170db9c6SJohn Dyson { 1313b8ebd99aSJohn Baldwin int rtval __diagused; 1314170db9c6SJohn Dyson struct vnode *vp; 131586ffbd76SMike Smith int bytes = count * PAGE_SIZE; 1316ad980522SJohn Dyson 13170e3cdf2cSAlan Cox /* 13180e3cdf2cSAlan Cox * Force synchronous operation if we are extremely low on memory 13190e3cdf2cSAlan Cox * to prevent a low-memory deadlock. VOP operations often need to 13200e3cdf2cSAlan Cox * allocate more memory to initiate the I/O ( i.e. do a BMAP 13210e3cdf2cSAlan Cox * operation ). The swapper handles the case by limiting the amount 13220e3cdf2cSAlan Cox * of asynchronous I/O, but that sort of solution doesn't scale well 13230e3cdf2cSAlan Cox * for the vnode pager without a lot of work. 13240e3cdf2cSAlan Cox * 13250e3cdf2cSAlan Cox * Also, the backing vnode's iodone routine may not wake the pageout 13260e3cdf2cSAlan Cox * daemon up. This should be probably be addressed XXX. 13270e3cdf2cSAlan Cox */ 13280e3cdf2cSAlan Cox 1329e2068d0bSJeff Roberson if (vm_page_count_min()) 133033cad9e9SKonstantin Belousov flags |= VM_PAGER_PUT_SYNC; 13310e3cdf2cSAlan Cox 13320e3cdf2cSAlan Cox /* 13330e3cdf2cSAlan Cox * Call device-specific putpages function 13340e3cdf2cSAlan Cox */ 1335170db9c6SJohn Dyson vp = object->handle; 133689f6b863SAttilio Rao VM_OBJECT_WUNLOCK(object); 133733cad9e9SKonstantin Belousov rtval = VOP_PUTPAGES(vp, m, bytes, flags, rtvals); 133823955314SAlfred Perlstein KASSERT(rtval != EOPNOTSUPP, 133923955314SAlfred Perlstein ("vnode_pager: stale FS putpages\n")); 134089f6b863SAttilio Rao VM_OBJECT_WLOCK(object); 1341170db9c6SJohn Dyson } 1342170db9c6SJohn Dyson 134305877a85SKonstantin Belousov static int 134405877a85SKonstantin Belousov vn_off2bidx(vm_ooffset_t offset) 134505877a85SKonstantin Belousov { 134605877a85SKonstantin Belousov 134705877a85SKonstantin Belousov return ((offset & PAGE_MASK) / DEV_BSIZE); 134805877a85SKonstantin Belousov } 134905877a85SKonstantin Belousov 135005877a85SKonstantin Belousov static bool 135105877a85SKonstantin Belousov vn_dirty_blk(vm_page_t m, vm_ooffset_t offset) 135205877a85SKonstantin Belousov { 135305877a85SKonstantin Belousov 135405877a85SKonstantin Belousov KASSERT(IDX_TO_OFF(m->pindex) <= offset && 135505877a85SKonstantin Belousov offset < IDX_TO_OFF(m->pindex + 1), 135605877a85SKonstantin Belousov ("page %p pidx %ju offset %ju", m, (uintmax_t)m->pindex, 135705877a85SKonstantin Belousov (uintmax_t)offset)); 135805877a85SKonstantin Belousov return ((m->dirty & ((vm_page_bits_t)1 << vn_off2bidx(offset))) != 0); 135905877a85SKonstantin Belousov } 1360ce75f2c3SMike Smith 136126f9a767SRodney W. Grimes /* 1362ce75f2c3SMike Smith * This is now called from local media FS's to operate against their 13634491ea91SEivind Eklund * own vnodes if they fail to implement VOP_PUTPAGES. 13642b6b0df7SMatthew Dillon * 13652b6b0df7SMatthew Dillon * This is typically called indirectly via the pageout daemon and 1366763df3ecSPedro F. Giffuni * clustering has already typically occurred, so in general we ask the 13672b6b0df7SMatthew Dillon * underlying filesystem to write the data out asynchronously rather 13682b6b0df7SMatthew Dillon * then delayed. 136926f9a767SRodney W. Grimes */ 1370ce75f2c3SMike Smith int 1371c46b90e9SAlan Cox vnode_pager_generic_putpages(struct vnode *vp, vm_page_t *ma, int bytecount, 1372c46b90e9SAlan Cox int flags, int *rtvals) 137326f9a767SRodney W. Grimes { 1374ce75f2c3SMike Smith vm_object_t object; 1375c46b90e9SAlan Cox vm_page_t m; 1376ed1a88a3SKonstantin Belousov vm_ooffset_t max_offset, next_offset, poffset, prev_offset; 1377f6b04d2bSDavid Greenman struct uio auio; 1378f6b04d2bSDavid Greenman struct iovec aiov; 137905877a85SKonstantin Belousov off_t prev_resid, wrsz; 1380e6c44f65SKonstantin Belousov int count, error, i, maxsize, ncount, pgoff, ppscheck; 138105877a85SKonstantin Belousov bool in_hole; 1382dd498befSPaul Saab static struct timeval lastfail; 1383dd498befSPaul Saab static int curfail; 138426f9a767SRodney W. Grimes 1385ce75f2c3SMike Smith object = vp->v_object; 1386ce75f2c3SMike Smith count = bytecount / PAGE_SIZE; 1387ce75f2c3SMike Smith 138826f9a767SRodney W. Grimes for (i = 0; i < count; i++) 1389031ec8c1SKonstantin Belousov rtvals[i] = VM_PAGER_ERROR; 139026f9a767SRodney W. Grimes 1391c46b90e9SAlan Cox if ((int64_t)ma[0]->pindex < 0) { 1392e6c44f65SKonstantin Belousov printf("vnode_pager_generic_putpages: " 1393e6c44f65SKonstantin Belousov "attempt to write meta-data 0x%jx(%lx)\n", 1394e6c44f65SKonstantin Belousov (uintmax_t)ma[0]->pindex, (u_long)ma[0]->dirty); 1395f6b04d2bSDavid Greenman rtvals[0] = VM_PAGER_BAD; 1396e6c44f65SKonstantin Belousov return (VM_PAGER_BAD); 13970d94caffSDavid Greenman } 13980bdb7528SDavid Greenman 1399f6b04d2bSDavid Greenman maxsize = count * PAGE_SIZE; 1400f6b04d2bSDavid Greenman ncount = count; 140126f9a767SRodney W. Grimes 1402c46b90e9SAlan Cox poffset = IDX_TO_OFF(ma[0]->pindex); 140300a6f47fSMatthew Dillon 140400a6f47fSMatthew Dillon /* 140500a6f47fSMatthew Dillon * If the page-aligned write is larger then the actual file we 1406763df3ecSPedro F. Giffuni * have to invalidate pages occurring beyond the file EOF. However, 140700a6f47fSMatthew Dillon * there is an edge case where a file may not be page-aligned where 140800a6f47fSMatthew Dillon * the last page is partially invalid. In this case the filesystem 140900a6f47fSMatthew Dillon * may not properly clear the dirty bits for the entire page (which 141000a6f47fSMatthew Dillon * could be VM_PAGE_BITS_ALL due to the page having been mmap()d). 1411efec381dSMark Johnston * With the page busied we are free to fix up the dirty bits here. 14123ebeaf59SMatthew Dillon * 14133ebeaf59SMatthew Dillon * We do not under any circumstances truncate the valid bits, as 14143ebeaf59SMatthew Dillon * this will screw up bogus page replacement. 141500a6f47fSMatthew Dillon */ 1416b3d4ab66SKonstantin Belousov VM_OBJECT_RLOCK(object); 1417a316d390SJohn Dyson if (maxsize + poffset > object->un_pager.vnp.vnp_size) { 141800a6f47fSMatthew Dillon if (object->un_pager.vnp.vnp_size > poffset) { 1419a316d390SJohn Dyson maxsize = object->un_pager.vnp.vnp_size - poffset; 1420aa8de40aSPoul-Henning Kamp ncount = btoc(maxsize); 142100a6f47fSMatthew Dillon if ((pgoff = (int)maxsize & PAGE_MASK) != 0) { 1422938cdc42SKonstantin Belousov pgoff = roundup2(pgoff, DEV_BSIZE); 1423938cdc42SKonstantin Belousov 1424c46b90e9SAlan Cox /* 14257f935055SJeff Roberson * If the page is busy and the following 1426c46b90e9SAlan Cox * conditions hold, then the page's dirty 1427c46b90e9SAlan Cox * field cannot be concurrently changed by a 1428c46b90e9SAlan Cox * pmap operation. 1429c46b90e9SAlan Cox */ 1430c46b90e9SAlan Cox m = ma[ncount - 1]; 1431c7aebda8SAttilio Rao vm_page_assert_sbusied(m); 14326031c68dSAlan Cox KASSERT(!pmap_page_is_write_mapped(m), 1433c46b90e9SAlan Cox ("vnode_pager_generic_putpages: page %p is not read-only", m)); 1434e6c44f65SKonstantin Belousov MPASS(m->dirty != 0); 1435c46b90e9SAlan Cox vm_page_clear_dirty(m, pgoff, PAGE_SIZE - 1436c46b90e9SAlan Cox pgoff); 143700a6f47fSMatthew Dillon } 143800a6f47fSMatthew Dillon } else { 143900a6f47fSMatthew Dillon maxsize = 0; 144000a6f47fSMatthew Dillon ncount = 0; 144100a6f47fSMatthew Dillon } 1442e6c44f65SKonstantin Belousov for (i = ncount; i < count; i++) 1443f6b04d2bSDavid Greenman rtvals[i] = VM_PAGER_BAD; 1444f6b04d2bSDavid Greenman } 14457f935055SJeff Roberson VM_OBJECT_RUNLOCK(object); 144626f9a767SRodney W. Grimes 1447f6b04d2bSDavid Greenman auio.uio_iov = &aiov; 1448f6b04d2bSDavid Greenman auio.uio_segflg = UIO_NOCOPY; 1449f6b04d2bSDavid Greenman auio.uio_rw = UIO_WRITE; 1450e6c44f65SKonstantin Belousov auio.uio_td = NULL; 1451ed1a88a3SKonstantin Belousov max_offset = roundup2(poffset + maxsize, DEV_BSIZE); 145205877a85SKonstantin Belousov 1453ed1a88a3SKonstantin Belousov for (prev_offset = poffset; prev_offset < max_offset;) { 145405877a85SKonstantin Belousov /* Skip clean blocks. */ 1455ed1a88a3SKonstantin Belousov for (in_hole = true; in_hole && prev_offset < max_offset;) { 145605877a85SKonstantin Belousov m = ma[OFF_TO_IDX(prev_offset - poffset)]; 145705877a85SKonstantin Belousov for (i = vn_off2bidx(prev_offset); 145805877a85SKonstantin Belousov i < sizeof(vm_page_bits_t) * NBBY && 1459ed1a88a3SKonstantin Belousov prev_offset < max_offset; i++) { 146005877a85SKonstantin Belousov if (vn_dirty_blk(m, prev_offset)) { 146105877a85SKonstantin Belousov in_hole = false; 146205877a85SKonstantin Belousov break; 146305877a85SKonstantin Belousov } 146405877a85SKonstantin Belousov prev_offset += DEV_BSIZE; 146505877a85SKonstantin Belousov } 146605877a85SKonstantin Belousov } 146705877a85SKonstantin Belousov if (in_hole) 146805877a85SKonstantin Belousov goto write_done; 146905877a85SKonstantin Belousov 147005877a85SKonstantin Belousov /* Find longest run of dirty blocks. */ 1471ed1a88a3SKonstantin Belousov for (next_offset = prev_offset; next_offset < max_offset;) { 147205877a85SKonstantin Belousov m = ma[OFF_TO_IDX(next_offset - poffset)]; 147305877a85SKonstantin Belousov for (i = vn_off2bidx(next_offset); 147405877a85SKonstantin Belousov i < sizeof(vm_page_bits_t) * NBBY && 1475ed1a88a3SKonstantin Belousov next_offset < max_offset; i++) { 147605877a85SKonstantin Belousov if (!vn_dirty_blk(m, next_offset)) 147705877a85SKonstantin Belousov goto start_write; 147805877a85SKonstantin Belousov next_offset += DEV_BSIZE; 147905877a85SKonstantin Belousov } 148005877a85SKonstantin Belousov } 148105877a85SKonstantin Belousov start_write: 148205877a85SKonstantin Belousov if (next_offset > poffset + maxsize) 148305877a85SKonstantin Belousov next_offset = poffset + maxsize; 1484bdb46c21SKonstantin Belousov if (prev_offset == next_offset) 1485bdb46c21SKonstantin Belousov goto write_done; 148605877a85SKonstantin Belousov 148705877a85SKonstantin Belousov /* 148805877a85SKonstantin Belousov * Getting here requires finding a dirty block in the 148905877a85SKonstantin Belousov * 'skip clean blocks' loop. 149005877a85SKonstantin Belousov */ 149105877a85SKonstantin Belousov 149205877a85SKonstantin Belousov aiov.iov_base = NULL; 149305877a85SKonstantin Belousov auio.uio_iovcnt = 1; 149405877a85SKonstantin Belousov auio.uio_offset = prev_offset; 149505877a85SKonstantin Belousov prev_resid = auio.uio_resid = aiov.iov_len = next_offset - 149605877a85SKonstantin Belousov prev_offset; 149705877a85SKonstantin Belousov error = VOP_WRITE(vp, &auio, 149805877a85SKonstantin Belousov vnode_pager_putpages_ioflags(flags), curthread->td_ucred); 149905877a85SKonstantin Belousov 150005877a85SKonstantin Belousov wrsz = prev_resid - auio.uio_resid; 150105877a85SKonstantin Belousov if (wrsz == 0) { 150205877a85SKonstantin Belousov if (ppsratecheck(&lastfail, &curfail, 1) != 0) { 150305877a85SKonstantin Belousov vn_printf(vp, "vnode_pager_putpages: " 150405877a85SKonstantin Belousov "zero-length write at %ju resid %zd\n", 150505877a85SKonstantin Belousov auio.uio_offset, auio.uio_resid); 150605877a85SKonstantin Belousov } 150705877a85SKonstantin Belousov break; 150805877a85SKonstantin Belousov } 150905877a85SKonstantin Belousov 151005877a85SKonstantin Belousov /* Adjust the starting offset for next iteration. */ 151105877a85SKonstantin Belousov prev_offset += wrsz; 151205877a85SKonstantin Belousov MPASS(auio.uio_offset == prev_offset); 1513f6b04d2bSDavid Greenman 15143dbb0ca6SKonstantin Belousov ppscheck = 0; 151505877a85SKonstantin Belousov if (error != 0 && (ppscheck = ppsratecheck(&lastfail, 151605877a85SKonstantin Belousov &curfail, 1)) != 0) 151705877a85SKonstantin Belousov vn_printf(vp, "vnode_pager_putpages: I/O error %d\n", 151805877a85SKonstantin Belousov error); 1519e6c44f65SKonstantin Belousov if (auio.uio_resid != 0 && (ppscheck != 0 || 1520e6c44f65SKonstantin Belousov ppsratecheck(&lastfail, &curfail, 1) != 0)) 152105877a85SKonstantin Belousov vn_printf(vp, "vnode_pager_putpages: residual I/O %zd " 152205877a85SKonstantin Belousov "at %ju\n", auio.uio_resid, 152305877a85SKonstantin Belousov (uintmax_t)ma[0]->pindex); 152405877a85SKonstantin Belousov if (error != 0 || auio.uio_resid != 0) 152505877a85SKonstantin Belousov break; 152605877a85SKonstantin Belousov } 152705877a85SKonstantin Belousov write_done: 152805877a85SKonstantin Belousov /* Mark completely processed pages. */ 152905877a85SKonstantin Belousov for (i = 0; i < OFF_TO_IDX(prev_offset - poffset); i++) 153026f9a767SRodney W. Grimes rtvals[i] = VM_PAGER_OK; 153105877a85SKonstantin Belousov /* Mark partial EOF page. */ 153205877a85SKonstantin Belousov if (prev_offset == poffset + maxsize && (prev_offset & PAGE_MASK) != 0) 153305877a85SKonstantin Belousov rtvals[i++] = VM_PAGER_OK; 153405877a85SKonstantin Belousov /* Unwritten pages in range, free bonus if the page is clean. */ 153505877a85SKonstantin Belousov for (; i < ncount; i++) 153605877a85SKonstantin Belousov rtvals[i] = ma[i]->dirty == 0 ? VM_PAGER_OK : VM_PAGER_ERROR; 153705877a85SKonstantin Belousov VM_CNT_ADD(v_vnodepgsout, i); 153805877a85SKonstantin Belousov VM_CNT_INC(v_vnodeout); 1539e6c44f65SKonstantin Belousov return (rtvals[0]); 154026f9a767SRodney W. Grimes } 1541031ec8c1SKonstantin Belousov 154265b9599aSKonstantin Belousov int 154365b9599aSKonstantin Belousov vnode_pager_putpages_ioflags(int pager_flags) 154465b9599aSKonstantin Belousov { 154565b9599aSKonstantin Belousov int ioflags; 154665b9599aSKonstantin Belousov 154765b9599aSKonstantin Belousov /* 154865b9599aSKonstantin Belousov * Pageouts are already clustered, use IO_ASYNC to force a 154965b9599aSKonstantin Belousov * bawrite() rather then a bdwrite() to prevent paging I/O 155065b9599aSKonstantin Belousov * from saturating the buffer cache. Dummy-up the sequential 155165b9599aSKonstantin Belousov * heuristic to cause large ranges to cluster. If neither 155265b9599aSKonstantin Belousov * IO_SYNC or IO_ASYNC is set, the system decides how to 155365b9599aSKonstantin Belousov * cluster. 155465b9599aSKonstantin Belousov */ 155565b9599aSKonstantin Belousov ioflags = IO_VMIO; 155665b9599aSKonstantin Belousov if ((pager_flags & (VM_PAGER_PUT_SYNC | VM_PAGER_PUT_INVAL)) != 0) 155765b9599aSKonstantin Belousov ioflags |= IO_SYNC; 155865b9599aSKonstantin Belousov else if ((pager_flags & VM_PAGER_CLUSTER_OK) == 0) 155965b9599aSKonstantin Belousov ioflags |= IO_ASYNC; 156065b9599aSKonstantin Belousov ioflags |= (pager_flags & VM_PAGER_PUT_INVAL) != 0 ? IO_INVAL: 0; 156165b9599aSKonstantin Belousov ioflags |= (pager_flags & VM_PAGER_PUT_NOREUSE) != 0 ? IO_NOREUSE : 0; 156265b9599aSKonstantin Belousov ioflags |= IO_SEQMAX << IO_SEQSHIFT; 156365b9599aSKonstantin Belousov return (ioflags); 156465b9599aSKonstantin Belousov } 156565b9599aSKonstantin Belousov 1566555b7bb4SKonstantin Belousov /* 1567555b7bb4SKonstantin Belousov * vnode_pager_undirty_pages(). 1568555b7bb4SKonstantin Belousov * 1569555b7bb4SKonstantin Belousov * A helper to mark pages as clean after pageout that was possibly 1570555b7bb4SKonstantin Belousov * done with a short write. The lpos argument specifies the page run 1571555b7bb4SKonstantin Belousov * length in bytes, and the written argument specifies how many bytes 1572555b7bb4SKonstantin Belousov * were actually written. eof is the offset past the last valid byte 1573555b7bb4SKonstantin Belousov * in the vnode using the absolute file position of the first byte in 1574555b7bb4SKonstantin Belousov * the run as the base from which it is computed. 1575555b7bb4SKonstantin Belousov */ 1576031ec8c1SKonstantin Belousov void 1577555b7bb4SKonstantin Belousov vnode_pager_undirty_pages(vm_page_t *ma, int *rtvals, int written, off_t eof, 1578555b7bb4SKonstantin Belousov int lpos) 1579031ec8c1SKonstantin Belousov { 1580555b7bb4SKonstantin Belousov int i, pos, pos_devb; 1581031ec8c1SKonstantin Belousov 1582555b7bb4SKonstantin Belousov if (written == 0 && eof >= lpos) 15839d17da3bSKonstantin Belousov return; 1584031ec8c1SKonstantin Belousov for (i = 0, pos = 0; pos < written; i++, pos += PAGE_SIZE) { 1585031ec8c1SKonstantin Belousov if (pos < trunc_page(written)) { 1586031ec8c1SKonstantin Belousov rtvals[i] = VM_PAGER_OK; 1587031ec8c1SKonstantin Belousov vm_page_undirty(ma[i]); 1588031ec8c1SKonstantin Belousov } else { 1589031ec8c1SKonstantin Belousov /* Partially written page. */ 1590031ec8c1SKonstantin Belousov rtvals[i] = VM_PAGER_AGAIN; 1591031ec8c1SKonstantin Belousov vm_page_clear_dirty(ma[i], 0, written & PAGE_MASK); 1592031ec8c1SKonstantin Belousov } 1593031ec8c1SKonstantin Belousov } 1594555b7bb4SKonstantin Belousov if (eof >= lpos) /* avoid truncation */ 15957f935055SJeff Roberson return; 1596555b7bb4SKonstantin Belousov for (pos = eof, i = OFF_TO_IDX(trunc_page(pos)); pos < lpos; i++) { 1597555b7bb4SKonstantin Belousov if (pos != trunc_page(pos)) { 1598555b7bb4SKonstantin Belousov /* 1599555b7bb4SKonstantin Belousov * The page contains the last valid byte in 1600555b7bb4SKonstantin Belousov * the vnode, mark the rest of the page as 1601555b7bb4SKonstantin Belousov * clean, potentially making the whole page 1602555b7bb4SKonstantin Belousov * clean. 1603555b7bb4SKonstantin Belousov */ 1604555b7bb4SKonstantin Belousov pos_devb = roundup2(pos & PAGE_MASK, DEV_BSIZE); 1605555b7bb4SKonstantin Belousov vm_page_clear_dirty(ma[i], pos_devb, PAGE_SIZE - 1606555b7bb4SKonstantin Belousov pos_devb); 1607555b7bb4SKonstantin Belousov 1608555b7bb4SKonstantin Belousov /* 1609555b7bb4SKonstantin Belousov * If the page was cleaned, report the pageout 1610555b7bb4SKonstantin Belousov * on it as successful. msync() no longer 1611555b7bb4SKonstantin Belousov * needs to write out the page, endlessly 1612555b7bb4SKonstantin Belousov * creating write requests and dirty buffers. 1613555b7bb4SKonstantin Belousov */ 1614555b7bb4SKonstantin Belousov if (ma[i]->dirty == 0) 1615555b7bb4SKonstantin Belousov rtvals[i] = VM_PAGER_OK; 1616555b7bb4SKonstantin Belousov 1617555b7bb4SKonstantin Belousov pos = round_page(pos); 1618555b7bb4SKonstantin Belousov } else { 1619555b7bb4SKonstantin Belousov /* vm_pageout_flush() clears dirty */ 1620555b7bb4SKonstantin Belousov rtvals[i] = VM_PAGER_BAD; 1621555b7bb4SKonstantin Belousov pos += PAGE_SIZE; 1622555b7bb4SKonstantin Belousov } 1623555b7bb4SKonstantin Belousov } 1624031ec8c1SKonstantin Belousov } 162584110e7eSKonstantin Belousov 1626fe7bcbafSKyle Evans static void 162784110e7eSKonstantin Belousov vnode_pager_update_writecount(vm_object_t object, vm_offset_t start, 162884110e7eSKonstantin Belousov vm_offset_t end) 162984110e7eSKonstantin Belousov { 163084110e7eSKonstantin Belousov struct vnode *vp; 163184110e7eSKonstantin Belousov vm_ooffset_t old_wm; 163284110e7eSKonstantin Belousov 163389f6b863SAttilio Rao VM_OBJECT_WLOCK(object); 163484110e7eSKonstantin Belousov if (object->type != OBJT_VNODE) { 163589f6b863SAttilio Rao VM_OBJECT_WUNLOCK(object); 163684110e7eSKonstantin Belousov return; 163784110e7eSKonstantin Belousov } 163884110e7eSKonstantin Belousov old_wm = object->un_pager.vnp.writemappings; 163984110e7eSKonstantin Belousov object->un_pager.vnp.writemappings += (vm_ooffset_t)end - start; 164084110e7eSKonstantin Belousov vp = object->handle; 164184110e7eSKonstantin Belousov if (old_wm == 0 && object->un_pager.vnp.writemappings != 0) { 164278022527SKonstantin Belousov ASSERT_VOP_LOCKED(vp, "v_writecount inc"); 164378022527SKonstantin Belousov VOP_ADD_WRITECOUNT_CHECKED(vp, 1); 1644b47f6241SJohn Baldwin CTR3(KTR_VFS, "%s: vp %p v_writecount increased to %d", 1645b47f6241SJohn Baldwin __func__, vp, vp->v_writecount); 164684110e7eSKonstantin Belousov } else if (old_wm != 0 && object->un_pager.vnp.writemappings == 0) { 164778022527SKonstantin Belousov ASSERT_VOP_LOCKED(vp, "v_writecount dec"); 164878022527SKonstantin Belousov VOP_ADD_WRITECOUNT_CHECKED(vp, -1); 1649b47f6241SJohn Baldwin CTR3(KTR_VFS, "%s: vp %p v_writecount decreased to %d", 1650b47f6241SJohn Baldwin __func__, vp, vp->v_writecount); 165184110e7eSKonstantin Belousov } 165289f6b863SAttilio Rao VM_OBJECT_WUNLOCK(object); 165384110e7eSKonstantin Belousov } 165484110e7eSKonstantin Belousov 1655fe7bcbafSKyle Evans static void 165684110e7eSKonstantin Belousov vnode_pager_release_writecount(vm_object_t object, vm_offset_t start, 165784110e7eSKonstantin Belousov vm_offset_t end) 165884110e7eSKonstantin Belousov { 165984110e7eSKonstantin Belousov struct vnode *vp; 166084110e7eSKonstantin Belousov struct mount *mp; 166184110e7eSKonstantin Belousov vm_offset_t inc; 166284110e7eSKonstantin Belousov 166389f6b863SAttilio Rao VM_OBJECT_WLOCK(object); 166484110e7eSKonstantin Belousov 166584110e7eSKonstantin Belousov /* 166684110e7eSKonstantin Belousov * First, recheck the object type to account for the race when 166784110e7eSKonstantin Belousov * the vnode is reclaimed. 166884110e7eSKonstantin Belousov */ 166984110e7eSKonstantin Belousov if (object->type != OBJT_VNODE) { 167089f6b863SAttilio Rao VM_OBJECT_WUNLOCK(object); 167184110e7eSKonstantin Belousov return; 167284110e7eSKonstantin Belousov } 167384110e7eSKonstantin Belousov 167484110e7eSKonstantin Belousov /* 167584110e7eSKonstantin Belousov * Optimize for the case when writemappings is not going to 167684110e7eSKonstantin Belousov * zero. 167784110e7eSKonstantin Belousov */ 167884110e7eSKonstantin Belousov inc = end - start; 167984110e7eSKonstantin Belousov if (object->un_pager.vnp.writemappings != inc) { 168084110e7eSKonstantin Belousov object->un_pager.vnp.writemappings -= inc; 168189f6b863SAttilio Rao VM_OBJECT_WUNLOCK(object); 168284110e7eSKonstantin Belousov return; 168384110e7eSKonstantin Belousov } 168484110e7eSKonstantin Belousov 168584110e7eSKonstantin Belousov vp = object->handle; 168684110e7eSKonstantin Belousov vhold(vp); 168789f6b863SAttilio Rao VM_OBJECT_WUNLOCK(object); 168884110e7eSKonstantin Belousov mp = NULL; 168984110e7eSKonstantin Belousov vn_start_write(vp, &mp, V_WAIT); 169078022527SKonstantin Belousov vn_lock(vp, LK_SHARED | LK_RETRY); 169184110e7eSKonstantin Belousov 169284110e7eSKonstantin Belousov /* 169384110e7eSKonstantin Belousov * Decrement the object's writemappings, by swapping the start 169484110e7eSKonstantin Belousov * and end arguments for vnode_pager_update_writecount(). If 169584110e7eSKonstantin Belousov * there was not a race with vnode reclaimation, then the 169684110e7eSKonstantin Belousov * vnode's v_writecount is decremented. 169784110e7eSKonstantin Belousov */ 169884110e7eSKonstantin Belousov vnode_pager_update_writecount(object, end, start); 1699b249ce48SMateusz Guzik VOP_UNLOCK(vp); 170084110e7eSKonstantin Belousov vdrop(vp); 170184110e7eSKonstantin Belousov if (mp != NULL) 170284110e7eSKonstantin Belousov vn_finished_write(mp); 170384110e7eSKonstantin Belousov } 1704192112b7SKonstantin Belousov 1705192112b7SKonstantin Belousov static void 1706192112b7SKonstantin Belousov vnode_pager_getvp(vm_object_t object, struct vnode **vpp, bool *vp_heldp) 1707192112b7SKonstantin Belousov { 1708192112b7SKonstantin Belousov *vpp = object->handle; 1709192112b7SKonstantin Belousov } 1710b068bb09SKonstantin Belousov 1711b068bb09SKonstantin Belousov static void 1712b068bb09SKonstantin Belousov vnode_pager_clean1(struct vnode *vp, int sync_flags) 1713b068bb09SKonstantin Belousov { 1714b068bb09SKonstantin Belousov struct vm_object *obj; 1715b068bb09SKonstantin Belousov 1716b068bb09SKonstantin Belousov ASSERT_VOP_LOCKED(vp, "needs lock for writes"); 1717b068bb09SKonstantin Belousov obj = vp->v_object; 1718b068bb09SKonstantin Belousov if (obj == NULL) 1719b068bb09SKonstantin Belousov return; 1720b068bb09SKonstantin Belousov 1721b068bb09SKonstantin Belousov VM_OBJECT_WLOCK(obj); 1722b068bb09SKonstantin Belousov vm_object_page_clean(obj, 0, 0, sync_flags); 1723b068bb09SKonstantin Belousov VM_OBJECT_WUNLOCK(obj); 1724b068bb09SKonstantin Belousov } 1725b068bb09SKonstantin Belousov 1726b068bb09SKonstantin Belousov void 1727b068bb09SKonstantin Belousov vnode_pager_clean_sync(struct vnode *vp) 1728b068bb09SKonstantin Belousov { 1729b068bb09SKonstantin Belousov vnode_pager_clean1(vp, OBJPC_SYNC); 1730b068bb09SKonstantin Belousov } 1731b068bb09SKonstantin Belousov 1732b068bb09SKonstantin Belousov void 1733b068bb09SKonstantin Belousov vnode_pager_clean_async(struct vnode *vp) 1734b068bb09SKonstantin Belousov { 1735b068bb09SKonstantin Belousov vnode_pager_clean1(vp, 0); 1736b068bb09SKonstantin Belousov } 1737