1df8bae1dSRodney W. Grimes /* 2df8bae1dSRodney W. Grimes * Copyright (c) 1990 University of Utah. 326f9a767SRodney W. Grimes * Copyright (c) 1991 The Regents of the University of California. 426f9a767SRodney W. Grimes * All rights reserved. 526f9a767SRodney W. Grimes * Copyright (c) 1993, 1994 John S. Dyson 624a1cce3SDavid Greenman * Copyright (c) 1995, David Greenman 7df8bae1dSRodney W. Grimes * 8df8bae1dSRodney W. Grimes * This code is derived from software contributed to Berkeley by 9df8bae1dSRodney W. Grimes * the Systems Programming Group of the University of Utah Computer 10df8bae1dSRodney W. Grimes * Science Department. 11df8bae1dSRodney W. Grimes * 12df8bae1dSRodney W. Grimes * Redistribution and use in source and binary forms, with or without 13df8bae1dSRodney W. Grimes * modification, are permitted provided that the following conditions 14df8bae1dSRodney W. Grimes * are met: 15df8bae1dSRodney W. Grimes * 1. Redistributions of source code must retain the above copyright 16df8bae1dSRodney W. Grimes * notice, this list of conditions and the following disclaimer. 17df8bae1dSRodney W. Grimes * 2. Redistributions in binary form must reproduce the above copyright 18df8bae1dSRodney W. Grimes * notice, this list of conditions and the following disclaimer in the 19df8bae1dSRodney W. Grimes * documentation and/or other materials provided with the distribution. 20df8bae1dSRodney W. Grimes * 3. All advertising materials mentioning features or use of this software 215929bcfaSPhilippe Charnier * must display the following acknowledgement: 22df8bae1dSRodney W. Grimes * This product includes software developed by the University of 23df8bae1dSRodney W. Grimes * California, Berkeley and its contributors. 24df8bae1dSRodney W. Grimes * 4. Neither the name of the University nor the names of its contributors 25df8bae1dSRodney W. Grimes * may be used to endorse or promote products derived from this software 26df8bae1dSRodney W. Grimes * without specific prior written permission. 27df8bae1dSRodney W. Grimes * 28df8bae1dSRodney W. Grimes * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 29df8bae1dSRodney W. Grimes * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 30df8bae1dSRodney W. Grimes * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 31df8bae1dSRodney W. Grimes * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 32df8bae1dSRodney W. Grimes * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 33df8bae1dSRodney W. Grimes * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 34df8bae1dSRodney W. Grimes * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 35df8bae1dSRodney W. Grimes * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 36df8bae1dSRodney W. Grimes * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 37df8bae1dSRodney W. Grimes * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 38df8bae1dSRodney W. Grimes * SUCH DAMAGE. 39df8bae1dSRodney W. Grimes * 4026f9a767SRodney W. Grimes * from: @(#)vnode_pager.c 7.5 (Berkeley) 4/20/91 41df8bae1dSRodney W. Grimes */ 42df8bae1dSRodney W. Grimes 43df8bae1dSRodney W. Grimes /* 44df8bae1dSRodney W. Grimes * Page to/from files (vnodes). 45df8bae1dSRodney W. Grimes */ 46df8bae1dSRodney W. Grimes 4726f9a767SRodney W. Grimes /* 4826f9a767SRodney W. Grimes * TODO: 4924a1cce3SDavid Greenman * Implement VOP_GETPAGES/PUTPAGES interface for filesystems. Will 50f6b04d2bSDavid Greenman * greatly re-simplify the vnode_pager. 5126f9a767SRodney W. Grimes */ 5226f9a767SRodney W. Grimes 53874651b1SDavid E. O'Brien #include <sys/cdefs.h> 54874651b1SDavid E. O'Brien __FBSDID("$FreeBSD$"); 55874651b1SDavid E. O'Brien 56df8bae1dSRodney W. Grimes #include <sys/param.h> 57df8bae1dSRodney W. Grimes #include <sys/systm.h> 58df8bae1dSRodney W. Grimes #include <sys/proc.h> 59df8bae1dSRodney W. Grimes #include <sys/vnode.h> 60df8bae1dSRodney W. Grimes #include <sys/mount.h> 619626b608SPoul-Henning Kamp #include <sys/bio.h> 6224a1cce3SDavid Greenman #include <sys/buf.h> 63efeaf95aSDavid Greenman #include <sys/vmmeter.h> 6424579ca1SMatthew Dillon #include <sys/conf.h> 659e0ddbd0SAlan Cox #include <sys/sf_buf.h> 66df8bae1dSRodney W. Grimes 67df8bae1dSRodney W. Grimes #include <vm/vm.h> 68efeaf95aSDavid Greenman #include <vm/vm_object.h> 69df8bae1dSRodney W. Grimes #include <vm/vm_page.h> 7024a1cce3SDavid Greenman #include <vm/vm_pager.h> 711efb74fbSJohn Dyson #include <vm/vm_map.h> 72df8bae1dSRodney W. Grimes #include <vm/vnode_pager.h> 73efeaf95aSDavid Greenman #include <vm/vm_extern.h> 74df8bae1dSRodney W. Grimes 7511caded3SAlfred Perlstein static void vnode_pager_init(void); 7611caded3SAlfred Perlstein static vm_offset_t vnode_pager_addr(struct vnode *vp, vm_ooffset_t address, 7711caded3SAlfred Perlstein int *run); 7811caded3SAlfred Perlstein static int vnode_pager_input_smlfs(vm_object_t object, vm_page_t m); 7911caded3SAlfred Perlstein static int vnode_pager_input_old(vm_object_t object, vm_page_t m); 8011caded3SAlfred Perlstein static void vnode_pager_dealloc(vm_object_t); 8111caded3SAlfred Perlstein static int vnode_pager_getpages(vm_object_t, vm_page_t *, int, int); 8211caded3SAlfred Perlstein static void vnode_pager_putpages(vm_object_t, vm_page_t *, int, boolean_t, int *); 8311caded3SAlfred Perlstein static boolean_t vnode_pager_haspage(vm_object_t, vm_pindex_t, int *, int *); 840b8253a7SBruce Evans 85df8bae1dSRodney W. Grimes struct pagerops vnodepagerops = { 864e658600SPoul-Henning Kamp .pgo_init = vnode_pager_init, 874e658600SPoul-Henning Kamp .pgo_alloc = vnode_pager_alloc, 884e658600SPoul-Henning Kamp .pgo_dealloc = vnode_pager_dealloc, 894e658600SPoul-Henning Kamp .pgo_getpages = vnode_pager_getpages, 904e658600SPoul-Henning Kamp .pgo_putpages = vnode_pager_putpages, 914e658600SPoul-Henning Kamp .pgo_haspage = vnode_pager_haspage, 92df8bae1dSRodney W. Grimes }; 93df8bae1dSRodney W. Grimes 94b62b9b64SJohn Baldwin int vnode_pbuf_freecnt; 951c7c3c6aSMatthew Dillon 9637c84183SPoul-Henning Kamp static void 97b62b9b64SJohn Baldwin vnode_pager_init(void) 98b62b9b64SJohn Baldwin { 99b62b9b64SJohn Baldwin 100b62b9b64SJohn Baldwin vnode_pbuf_freecnt = nswbuf / 2 + 1; 101b62b9b64SJohn Baldwin } 102170db9c6SJohn Dyson 103df8bae1dSRodney W. Grimes /* 104df8bae1dSRodney W. Grimes * Allocate (or lookup) pager for a vnode. 105df8bae1dSRodney W. Grimes * Handle is a vnode pointer. 106990ab7adSAlan Cox * 107990ab7adSAlan Cox * MPSAFE 108df8bae1dSRodney W. Grimes */ 10924a1cce3SDavid Greenman vm_object_t 1106cde7a16SDavid Greenman vnode_pager_alloc(void *handle, vm_ooffset_t size, vm_prot_t prot, 111b9dcd593SBruce Evans vm_ooffset_t offset) 112df8bae1dSRodney W. Grimes { 11306cb7259SDavid Greenman vm_object_t object; 114df8bae1dSRodney W. Grimes struct vnode *vp; 115df8bae1dSRodney W. Grimes 116df8bae1dSRodney W. Grimes /* 117df8bae1dSRodney W. Grimes * Pageout to vnode, no can do yet. 118df8bae1dSRodney W. Grimes */ 119df8bae1dSRodney W. Grimes if (handle == NULL) 120df8bae1dSRodney W. Grimes return (NULL); 121df8bae1dSRodney W. Grimes 122df8bae1dSRodney W. Grimes vp = (struct vnode *) handle; 12339d38f93SDavid Greenman 12463e7e60dSJeff Roberson ASSERT_VOP_LOCKED(vp, "vnode_pager_alloc"); 12563e7e60dSJeff Roberson 12639d38f93SDavid Greenman /* 12739d38f93SDavid Greenman * Prevent race condition when allocating the object. This 12839d38f93SDavid Greenman * can happen with NFS vnodes since the nfsnode isn't locked. 12939d38f93SDavid Greenman */ 130e6e370a7SJeff Roberson VI_LOCK(vp); 131e6e370a7SJeff Roberson while (vp->v_iflag & VI_OLOCK) { 132e6e370a7SJeff Roberson vp->v_iflag |= VI_OWANT; 133e6e370a7SJeff Roberson msleep(vp, VI_MTX(vp), PVM, "vnpobj", 0); 13439d38f93SDavid Greenman } 135e6e370a7SJeff Roberson vp->v_iflag |= VI_OLOCK; 136e6e370a7SJeff Roberson VI_UNLOCK(vp); 13739d38f93SDavid Greenman 13839d38f93SDavid Greenman /* 13939d38f93SDavid Greenman * If the object is being terminated, wait for it to 14039d38f93SDavid Greenman * go away. 14139d38f93SDavid Greenman */ 1421ca58953SAlan Cox while ((object = vp->v_object) != NULL) { 1431ca58953SAlan Cox VM_OBJECT_LOCK(object); 1441ca58953SAlan Cox if ((object->flags & OBJ_DEAD) == 0) 1451ca58953SAlan Cox break; 1461ca58953SAlan Cox msleep(object, VM_OBJECT_MTX(object), PDROP | PVM, "vadead", 0); 14724a1cce3SDavid Greenman } 1480d94caffSDavid Greenman 1492be70f79SJohn Dyson if (vp->v_usecount == 0) 1502be70f79SJohn Dyson panic("vnode_pager_alloc: no vnode reference"); 1512be70f79SJohn Dyson 15224a1cce3SDavid Greenman if (object == NULL) { 153df8bae1dSRodney W. Grimes /* 154df8bae1dSRodney W. Grimes * And an object of the appropriate size 155df8bae1dSRodney W. Grimes */ 1566cde7a16SDavid Greenman object = vm_object_allocate(OBJT_VNODE, OFF_TO_IDX(round_page(size))); 157bbc0ec52SDavid Greenman 1586cde7a16SDavid Greenman object->un_pager.vnp.vnp_size = size; 15926f9a767SRodney W. Grimes 16024a1cce3SDavid Greenman object->handle = handle; 16124a1cce3SDavid Greenman vp->v_object = object; 162df8bae1dSRodney W. Grimes } else { 16395e5e988SJohn Dyson object->ref_count++; 1641ca58953SAlan Cox VM_OBJECT_UNLOCK(object); 165df8bae1dSRodney W. Grimes } 166e6e370a7SJeff Roberson VI_LOCK(vp); 167990ab7adSAlan Cox vp->v_usecount++; 168e6e370a7SJeff Roberson vp->v_iflag &= ~VI_OLOCK; 169e6e370a7SJeff Roberson if (vp->v_iflag & VI_OWANT) { 170e6e370a7SJeff Roberson vp->v_iflag &= ~VI_OWANT; 17139d38f93SDavid Greenman wakeup(vp); 17239d38f93SDavid Greenman } 173e6e370a7SJeff Roberson VI_UNLOCK(vp); 17424a1cce3SDavid Greenman return (object); 175df8bae1dSRodney W. Grimes } 176df8bae1dSRodney W. Grimes 177658ad5ffSAlan Cox /* 178658ad5ffSAlan Cox * The object must be locked. 179658ad5ffSAlan Cox */ 180f708ef1bSPoul-Henning Kamp static void 18124a1cce3SDavid Greenman vnode_pager_dealloc(object) 1820d94caffSDavid Greenman vm_object_t object; 18324a1cce3SDavid Greenman { 18454d92145SMatthew Dillon struct vnode *vp = object->handle; 185df8bae1dSRodney W. Grimes 18624a1cce3SDavid Greenman if (vp == NULL) 18724a1cce3SDavid Greenman panic("vnode_pager_dealloc: pager already dealloced"); 18824a1cce3SDavid Greenman 189658ad5ffSAlan Cox VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); 19066095752SJohn Dyson vm_object_pip_wait(object, "vnpdea"); 19124a1cce3SDavid Greenman 19224a1cce3SDavid Greenman object->handle = NULL; 19395461b45SJohn Dyson object->type = OBJT_DEAD; 194e6e370a7SJeff Roberson ASSERT_VOP_LOCKED(vp, "vnode_pager_dealloc"); 195aa2cabb9SDavid Greenman vp->v_object = NULL; 196e6e370a7SJeff Roberson vp->v_vflag &= ~(VV_TEXT | VV_OBJBUF); 197df8bae1dSRodney W. Grimes } 19826f9a767SRodney W. Grimes 199f708ef1bSPoul-Henning Kamp static boolean_t 200a316d390SJohn Dyson vnode_pager_haspage(object, pindex, before, after) 20124a1cce3SDavid Greenman vm_object_t object; 202a316d390SJohn Dyson vm_pindex_t pindex; 20324a1cce3SDavid Greenman int *before; 20424a1cce3SDavid Greenman int *after; 205df8bae1dSRodney W. Grimes { 20624a1cce3SDavid Greenman struct vnode *vp = object->handle; 20798b0c789SPoul-Henning Kamp daddr_t bn; 2083af76890SPoul-Henning Kamp int err; 209170db9c6SJohn Dyson daddr_t reqblock; 2102c4488fcSJohn Dyson int poff; 2112c4488fcSJohn Dyson int bsize; 212d63596ceSJohn Dyson int pagesperblock, blocksperpage; 213df8bae1dSRodney W. Grimes 214f29ba63eSAlan Cox VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); 21524579ca1SMatthew Dillon /* 21624579ca1SMatthew Dillon * If no vp or vp is doomed or marked transparent to VM, we do not 21724579ca1SMatthew Dillon * have the page. 21824579ca1SMatthew Dillon */ 219e6e370a7SJeff Roberson if (vp == NULL) 22047221757SJohn Dyson return FALSE; 22147221757SJohn Dyson 22263e7e60dSJeff Roberson VI_LOCK(vp); 22363e7e60dSJeff Roberson if (vp->v_iflag & VI_DOOMED) { 22463e7e60dSJeff Roberson VI_UNLOCK(vp); 225e6e370a7SJeff Roberson return FALSE; 22663e7e60dSJeff Roberson } 22763e7e60dSJeff Roberson VI_UNLOCK(vp); 228df8bae1dSRodney W. Grimes /* 2290d94caffSDavid Greenman * If filesystem no longer mounted or offset beyond end of file we do 2300d94caffSDavid Greenman * not have the page. 231df8bae1dSRodney W. Grimes */ 232a316d390SJohn Dyson if ((vp->v_mount == NULL) || 233a316d390SJohn Dyson (IDX_TO_OFF(pindex) >= object->un_pager.vnp.vnp_size)) 2344abc71c0SDavid Greenman return FALSE; 235df8bae1dSRodney W. Grimes 236eed2d59bSDavid Greenman bsize = vp->v_mount->mnt_stat.f_iosize; 237170db9c6SJohn Dyson pagesperblock = bsize / PAGE_SIZE; 238d63596ceSJohn Dyson blocksperpage = 0; 239d63596ceSJohn Dyson if (pagesperblock > 0) { 240a316d390SJohn Dyson reqblock = pindex / pagesperblock; 241d63596ceSJohn Dyson } else { 242d63596ceSJohn Dyson blocksperpage = (PAGE_SIZE / bsize); 243d63596ceSJohn Dyson reqblock = pindex * blocksperpage; 244d63596ceSJohn Dyson } 245f29ba63eSAlan Cox VM_OBJECT_UNLOCK(object); 2460cb507cbSAlan Cox mtx_lock(&Giant); 247ec794849SPoul-Henning Kamp err = VOP_BMAP(vp, reqblock, NULL, &bn, after, before); 2480cb507cbSAlan Cox mtx_unlock(&Giant); 249f29ba63eSAlan Cox VM_OBJECT_LOCK(object); 2500d94caffSDavid Greenman if (err) 25124a1cce3SDavid Greenman return TRUE; 2526eab77f2SJohn Dyson if (bn == -1) 253ced399eeSJohn Dyson return FALSE; 254d63596ceSJohn Dyson if (pagesperblock > 0) { 255a316d390SJohn Dyson poff = pindex - (reqblock * pagesperblock); 256170db9c6SJohn Dyson if (before) { 257170db9c6SJohn Dyson *before *= pagesperblock; 258170db9c6SJohn Dyson *before += poff; 259170db9c6SJohn Dyson } 260170db9c6SJohn Dyson if (after) { 261b1fc01b7SJohn Dyson int numafter; 262170db9c6SJohn Dyson *after *= pagesperblock; 263b1fc01b7SJohn Dyson numafter = pagesperblock - (poff + 1); 26447e151ddSRobert Drehmel if (IDX_TO_OFF(pindex + numafter) > 26547e151ddSRobert Drehmel object->un_pager.vnp.vnp_size) { 26647e151ddSRobert Drehmel numafter = 26747e151ddSRobert Drehmel OFF_TO_IDX(object->un_pager.vnp.vnp_size) - 26847e151ddSRobert Drehmel pindex; 269b1fc01b7SJohn Dyson } 270b1fc01b7SJohn Dyson *after += numafter; 271170db9c6SJohn Dyson } 272d63596ceSJohn Dyson } else { 273d63596ceSJohn Dyson if (before) { 274d63596ceSJohn Dyson *before /= blocksperpage; 275d63596ceSJohn Dyson } 276d63596ceSJohn Dyson 277d63596ceSJohn Dyson if (after) { 278d63596ceSJohn Dyson *after /= blocksperpage; 279d63596ceSJohn Dyson } 280d63596ceSJohn Dyson } 281ced399eeSJohn Dyson return TRUE; 282df8bae1dSRodney W. Grimes } 283df8bae1dSRodney W. Grimes 284df8bae1dSRodney W. Grimes /* 285df8bae1dSRodney W. Grimes * Lets the VM system know about a change in size for a file. 28624a1cce3SDavid Greenman * We adjust our own internal size and flush any cached pages in 287df8bae1dSRodney W. Grimes * the associated object that are affected by the size change. 288df8bae1dSRodney W. Grimes * 289df8bae1dSRodney W. Grimes * Note: this routine may be invoked as a result of a pager put 290df8bae1dSRodney W. Grimes * operation (possibly at object termination time), so we must be careful. 291df8bae1dSRodney W. Grimes */ 292df8bae1dSRodney W. Grimes void 293df8bae1dSRodney W. Grimes vnode_pager_setsize(vp, nsize) 294df8bae1dSRodney W. Grimes struct vnode *vp; 295a316d390SJohn Dyson vm_ooffset_t nsize; 296df8bae1dSRodney W. Grimes { 2972a8f9ab5SAlan Cox vm_object_t object; 2982a8f9ab5SAlan Cox vm_page_t m; 299c576d121SLuoqi Chen vm_pindex_t nobjsize; 300df8bae1dSRodney W. Grimes 3012a8f9ab5SAlan Cox if ((object = vp->v_object) == NULL) 302df8bae1dSRodney W. Grimes return; 3032a8f9ab5SAlan Cox VM_OBJECT_LOCK(object); 3042a8f9ab5SAlan Cox if (nsize == object->un_pager.vnp.vnp_size) { 305df8bae1dSRodney W. Grimes /* 306df8bae1dSRodney W. Grimes * Hasn't changed size 307df8bae1dSRodney W. Grimes */ 3082a8f9ab5SAlan Cox VM_OBJECT_UNLOCK(object); 309df8bae1dSRodney W. Grimes return; 3102a8f9ab5SAlan Cox } 311c576d121SLuoqi Chen nobjsize = OFF_TO_IDX(nsize + PAGE_MASK); 3122a8f9ab5SAlan Cox if (nsize < object->un_pager.vnp.vnp_size) { 313df8bae1dSRodney W. Grimes /* 314bbc0ec52SDavid Greenman * File has shrunk. Toss any cached pages beyond the new EOF. 315df8bae1dSRodney W. Grimes */ 3162a8f9ab5SAlan Cox if (nobjsize < object->size) 317c576d121SLuoqi Chen vm_object_page_remove(object, nobjsize, object->size, 318c576d121SLuoqi Chen FALSE); 319bbc0ec52SDavid Greenman /* 320bbc0ec52SDavid Greenman * this gets rid of garbage at the end of a page that is now 3213ebeaf59SMatthew Dillon * only partially backed by the vnode. 3223ebeaf59SMatthew Dillon * 3233ebeaf59SMatthew Dillon * XXX for some reason (I don't know yet), if we take a 3243ebeaf59SMatthew Dillon * completely invalid page and mark it partially valid 3253ebeaf59SMatthew Dillon * it can screw up NFS reads, so we don't allow the case. 326bbc0ec52SDavid Greenman */ 3272a8f9ab5SAlan Cox if ((nsize & PAGE_MASK) && 3281b26eb10SAlan Cox (m = vm_page_lookup(object, OFF_TO_IDX(nsize))) != NULL && 3291b26eb10SAlan Cox m->valid != 0) { 3302b6b0df7SMatthew Dillon int base = (int)nsize & PAGE_MASK; 3312b6b0df7SMatthew Dillon int size = PAGE_SIZE - base; 3322b6b0df7SMatthew Dillon 3332b6b0df7SMatthew Dillon /* 3342b6b0df7SMatthew Dillon * Clear out partial-page garbage in case 3352b6b0df7SMatthew Dillon * the page has been mapped. 3362b6b0df7SMatthew Dillon */ 337fff6062aSAlan Cox pmap_zero_page_area(m, base, size); 3382b6b0df7SMatthew Dillon 3392b6b0df7SMatthew Dillon /* 3403ebeaf59SMatthew Dillon * XXX work around SMP data integrity race 3413ebeaf59SMatthew Dillon * by unmapping the page from user processes. 3423ebeaf59SMatthew Dillon * The garbage we just cleared may be mapped 3433ebeaf59SMatthew Dillon * to a user process running on another cpu 3443ebeaf59SMatthew Dillon * and this code is not running through normal 3453ebeaf59SMatthew Dillon * I/O channels which handle SMP issues for 3463ebeaf59SMatthew Dillon * us, so unmap page to synchronize all cpus. 3473ebeaf59SMatthew Dillon * 3483ebeaf59SMatthew Dillon * XXX should vm_pager_unmap_page() have 3493ebeaf59SMatthew Dillon * dealt with this? 3503ebeaf59SMatthew Dillon */ 3511b26eb10SAlan Cox vm_page_lock_queues(); 3524fec79beSAlan Cox pmap_remove_all(m); 3533ebeaf59SMatthew Dillon 3543ebeaf59SMatthew Dillon /* 3552b6b0df7SMatthew Dillon * Clear out partial-page dirty bits. This 3562b6b0df7SMatthew Dillon * has the side effect of setting the valid 3572b6b0df7SMatthew Dillon * bits, but that is ok. There are a bunch 3582b6b0df7SMatthew Dillon * of places in the VM system where we expected 3592b6b0df7SMatthew Dillon * m->dirty == VM_PAGE_BITS_ALL. The file EOF 3602b6b0df7SMatthew Dillon * case is one of them. If the page is still 3612b6b0df7SMatthew Dillon * partially dirty, make it fully dirty. 3623ebeaf59SMatthew Dillon * 3633ebeaf59SMatthew Dillon * note that we do not clear out the valid 3643ebeaf59SMatthew Dillon * bits. This would prevent bogus_page 3653ebeaf59SMatthew Dillon * replacement from working properly. 3662b6b0df7SMatthew Dillon */ 3672b6b0df7SMatthew Dillon vm_page_set_validclean(m, base, size); 3682b6b0df7SMatthew Dillon if (m->dirty != 0) 3692b6b0df7SMatthew Dillon m->dirty = VM_PAGE_BITS_ALL; 3702a8f9ab5SAlan Cox vm_page_unlock_queues(); 371bbc0ec52SDavid Greenman } 372bbc0ec52SDavid Greenman } 373a316d390SJohn Dyson object->un_pager.vnp.vnp_size = nsize; 374c576d121SLuoqi Chen object->size = nobjsize; 3752a8f9ab5SAlan Cox VM_OBJECT_UNLOCK(object); 376df8bae1dSRodney W. Grimes } 377df8bae1dSRodney W. Grimes 37826f9a767SRodney W. Grimes /* 37926f9a767SRodney W. Grimes * calculate the linear (byte) disk address of specified virtual 38026f9a767SRodney W. Grimes * file address 38126f9a767SRodney W. Grimes */ 382f708ef1bSPoul-Henning Kamp static vm_offset_t 383efc68ce1SDavid Greenman vnode_pager_addr(vp, address, run) 38426f9a767SRodney W. Grimes struct vnode *vp; 385a316d390SJohn Dyson vm_ooffset_t address; 386efc68ce1SDavid Greenman int *run; 38726f9a767SRodney W. Grimes { 38826f9a767SRodney W. Grimes int rtaddress; 38926f9a767SRodney W. Grimes int bsize; 39098b0c789SPoul-Henning Kamp daddr_t block; 39126f9a767SRodney W. Grimes int err; 392a316d390SJohn Dyson daddr_t vblock; 393a316d390SJohn Dyson int voffset; 39426f9a767SRodney W. Grimes 3950cddd8f0SMatthew Dillon GIANT_REQUIRED; 3960d94caffSDavid Greenman if ((int) address < 0) 3970d94caffSDavid Greenman return -1; 3980d94caffSDavid Greenman 3992c4488fcSJohn Dyson if (vp->v_mount == NULL) 4002c4488fcSJohn Dyson return -1; 4012c4488fcSJohn Dyson 40226f9a767SRodney W. Grimes bsize = vp->v_mount->mnt_stat.f_iosize; 40326f9a767SRodney W. Grimes vblock = address / bsize; 40426f9a767SRodney W. Grimes voffset = address % bsize; 40526f9a767SRodney W. Grimes 406ec794849SPoul-Henning Kamp err = VOP_BMAP(vp, vblock, NULL, &block, run, NULL); 40726f9a767SRodney W. Grimes 408efc68ce1SDavid Greenman if (err || (block == -1)) 40926f9a767SRodney W. Grimes rtaddress = -1; 410efc68ce1SDavid Greenman else { 411187f0071SDavid Greenman rtaddress = block + voffset / DEV_BSIZE; 412efc68ce1SDavid Greenman if (run) { 413efc68ce1SDavid Greenman *run += 1; 414efc68ce1SDavid Greenman *run *= bsize/PAGE_SIZE; 415efc68ce1SDavid Greenman *run -= voffset/PAGE_SIZE; 416efc68ce1SDavid Greenman } 417efc68ce1SDavid Greenman } 41826f9a767SRodney W. Grimes 41926f9a767SRodney W. Grimes return rtaddress; 42026f9a767SRodney W. Grimes } 42126f9a767SRodney W. Grimes 42226f9a767SRodney W. Grimes /* 42326f9a767SRodney W. Grimes * small block filesystem vnode pager input 42426f9a767SRodney W. Grimes */ 425f708ef1bSPoul-Henning Kamp static int 42624a1cce3SDavid Greenman vnode_pager_input_smlfs(object, m) 42724a1cce3SDavid Greenman vm_object_t object; 42826f9a767SRodney W. Grimes vm_page_t m; 42926f9a767SRodney W. Grimes { 43026f9a767SRodney W. Grimes int i; 43126f9a767SRodney W. Grimes struct vnode *dp, *vp; 43226f9a767SRodney W. Grimes struct buf *bp; 4339e0ddbd0SAlan Cox struct sf_buf *sf; 43426f9a767SRodney W. Grimes int fileaddr; 43526f9a767SRodney W. Grimes vm_offset_t bsize; 43626f9a767SRodney W. Grimes int error = 0; 43726f9a767SRodney W. Grimes 4380cddd8f0SMatthew Dillon GIANT_REQUIRED; 4390cddd8f0SMatthew Dillon 44024a1cce3SDavid Greenman vp = object->handle; 4412c4488fcSJohn Dyson if (vp->v_mount == NULL) 4422c4488fcSJohn Dyson return VM_PAGER_BAD; 4432c4488fcSJohn Dyson 44426f9a767SRodney W. Grimes bsize = vp->v_mount->mnt_stat.f_iosize; 4450bdb7528SDavid Greenman 446c83ebe77SJohn Dyson VOP_BMAP(vp, 0, &dp, 0, NULL, NULL); 44726f9a767SRodney W. Grimes 4489e0ddbd0SAlan Cox sf = sf_buf_alloc(m, 0); 44926f9a767SRodney W. Grimes 45026f9a767SRodney W. Grimes for (i = 0; i < PAGE_SIZE / bsize; i++) { 45133c67741SMatthew Dillon vm_ooffset_t address; 452bbc0ec52SDavid Greenman 453897a45efSDmitrij Tejblum if (vm_page_bits(i * bsize, bsize) & m->valid) 45426f9a767SRodney W. Grimes continue; 45526f9a767SRodney W. Grimes 45633c67741SMatthew Dillon address = IDX_TO_OFF(m->pindex) + i * bsize; 45733c67741SMatthew Dillon if (address >= object->un_pager.vnp.vnp_size) { 45833c67741SMatthew Dillon fileaddr = -1; 45933c67741SMatthew Dillon } else { 46033c67741SMatthew Dillon fileaddr = vnode_pager_addr(vp, address, NULL); 46133c67741SMatthew Dillon } 46226f9a767SRodney W. Grimes if (fileaddr != -1) { 4631c7c3c6aSMatthew Dillon bp = getpbuf(&vnode_pbuf_freecnt); 46426f9a767SRodney W. Grimes 46526f9a767SRodney W. Grimes /* build a minimal buffer header */ 46621144e3bSPoul-Henning Kamp bp->b_iocmd = BIO_READ; 4676a4b5823SPoul-Henning Kamp bp->b_iodone = bdone; 468bd78ceceSJohn Baldwin KASSERT(bp->b_rcred == NOCRED, ("leaking read ucred")); 469bd78ceceSJohn Baldwin KASSERT(bp->b_wcred == NOCRED, ("leaking write ucred")); 470a854ed98SJohn Baldwin bp->b_rcred = crhold(curthread->td_ucred); 471a854ed98SJohn Baldwin bp->b_wcred = crhold(curthread->td_ucred); 4729e0ddbd0SAlan Cox bp->b_data = (caddr_t)sf_buf_kva(sf) + i * bsize; 473187f0071SDavid Greenman bp->b_blkno = fileaddr; 4740d94caffSDavid Greenman pbgetvp(dp, bp); 47526f9a767SRodney W. Grimes bp->b_bcount = bsize; 47626f9a767SRodney W. Grimes bp->b_bufsize = bsize; 4772b6b0df7SMatthew Dillon bp->b_runningbufspace = bp->b_bufsize; 4782b6b0df7SMatthew Dillon runningbufspace += bp->b_runningbufspace; 47926f9a767SRodney W. Grimes 48026f9a767SRodney W. Grimes /* do the input */ 4812c18019fSPoul-Henning Kamp bp->b_iooffset = dbtob(bp->b_blkno); 482b792bebeSPoul-Henning Kamp bstrategy(bp); 48326f9a767SRodney W. Grimes 484e47ed70bSJohn Dyson /* we definitely need to be at splvm here */ 48526f9a767SRodney W. Grimes 4866a4b5823SPoul-Henning Kamp bwait(bp, PVM, "vnsrd"); 4876a4b5823SPoul-Henning Kamp 488c244d2deSPoul-Henning Kamp if ((bp->b_ioflags & BIO_ERROR) != 0) 48926f9a767SRodney W. Grimes error = EIO; 49026f9a767SRodney W. Grimes 49126f9a767SRodney W. Grimes /* 49226f9a767SRodney W. Grimes * free the buffer header back to the swap buffer pool 49326f9a767SRodney W. Grimes */ 4941c7c3c6aSMatthew Dillon relpbuf(bp, &vnode_pbuf_freecnt); 49526f9a767SRodney W. Grimes if (error) 49626f9a767SRodney W. Grimes break; 4970d94caffSDavid Greenman 4982bf43e43SAlan Cox VM_OBJECT_LOCK(object); 499178949e0SAlan Cox vm_page_lock_queues(); 500aa8de40aSPoul-Henning Kamp vm_page_set_validclean(m, (i * bsize) & PAGE_MASK, bsize); 501178949e0SAlan Cox vm_page_unlock_queues(); 5022bf43e43SAlan Cox VM_OBJECT_UNLOCK(object); 50326f9a767SRodney W. Grimes } else { 5042bf43e43SAlan Cox VM_OBJECT_LOCK(object); 505178949e0SAlan Cox vm_page_lock_queues(); 506aa8de40aSPoul-Henning Kamp vm_page_set_validclean(m, (i * bsize) & PAGE_MASK, bsize); 507178949e0SAlan Cox vm_page_unlock_queues(); 5082bf43e43SAlan Cox VM_OBJECT_UNLOCK(object); 5099e0ddbd0SAlan Cox bzero((caddr_t)sf_buf_kva(sf) + i * bsize, bsize); 51026f9a767SRodney W. Grimes } 51126f9a767SRodney W. Grimes } 5129e0ddbd0SAlan Cox sf_buf_free(sf); 51385e01243SAlan Cox vm_page_lock_queues(); 5140385347cSPeter Wemm pmap_clear_modify(m); 51585e01243SAlan Cox vm_page_unlock_queues(); 51626f9a767SRodney W. Grimes if (error) { 517a83c285cSDavid Greenman return VM_PAGER_ERROR; 51826f9a767SRodney W. Grimes } 51926f9a767SRodney W. Grimes return VM_PAGER_OK; 52026f9a767SRodney W. Grimes 52126f9a767SRodney W. Grimes } 52226f9a767SRodney W. Grimes 52326f9a767SRodney W. Grimes 52426f9a767SRodney W. Grimes /* 52526f9a767SRodney W. Grimes * old style vnode pager output routine 52626f9a767SRodney W. Grimes */ 527f708ef1bSPoul-Henning Kamp static int 52824a1cce3SDavid Greenman vnode_pager_input_old(object, m) 52924a1cce3SDavid Greenman vm_object_t object; 53026f9a767SRodney W. Grimes vm_page_t m; 53126f9a767SRodney W. Grimes { 532df8bae1dSRodney W. Grimes struct uio auio; 533df8bae1dSRodney W. Grimes struct iovec aiov; 53426f9a767SRodney W. Grimes int error; 53526f9a767SRodney W. Grimes int size; 5369e0ddbd0SAlan Cox struct sf_buf *sf; 537342a1480SJohn Baldwin struct vnode *vp; 538df8bae1dSRodney W. Grimes 53952051abcSAlan Cox VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); 54026f9a767SRodney W. Grimes error = 0; 541bbc0ec52SDavid Greenman 542df8bae1dSRodney W. Grimes /* 54326f9a767SRodney W. Grimes * Return failure if beyond current EOF 54426f9a767SRodney W. Grimes */ 545a316d390SJohn Dyson if (IDX_TO_OFF(m->pindex) >= object->un_pager.vnp.vnp_size) { 54626f9a767SRodney W. Grimes return VM_PAGER_BAD; 54726f9a767SRodney W. Grimes } else { 54826f9a767SRodney W. Grimes size = PAGE_SIZE; 549a316d390SJohn Dyson if (IDX_TO_OFF(m->pindex) + size > object->un_pager.vnp.vnp_size) 550a316d390SJohn Dyson size = object->un_pager.vnp.vnp_size - IDX_TO_OFF(m->pindex); 55152051abcSAlan Cox vp = object->handle; 55252051abcSAlan Cox VM_OBJECT_UNLOCK(object); 5530bdb7528SDavid Greenman 55426f9a767SRodney W. Grimes /* 555df8bae1dSRodney W. Grimes * Allocate a kernel virtual address and initialize so that 556df8bae1dSRodney W. Grimes * we can use VOP_READ/WRITE routines. 557df8bae1dSRodney W. Grimes */ 5589e0ddbd0SAlan Cox sf = sf_buf_alloc(m, 0); 5590bdb7528SDavid Greenman 5609e0ddbd0SAlan Cox aiov.iov_base = (caddr_t)sf_buf_kva(sf); 561df8bae1dSRodney W. Grimes aiov.iov_len = size; 562df8bae1dSRodney W. Grimes auio.uio_iov = &aiov; 563df8bae1dSRodney W. Grimes auio.uio_iovcnt = 1; 564a316d390SJohn Dyson auio.uio_offset = IDX_TO_OFF(m->pindex); 565df8bae1dSRodney W. Grimes auio.uio_segflg = UIO_SYSSPACE; 56626f9a767SRodney W. Grimes auio.uio_rw = UIO_READ; 567df8bae1dSRodney W. Grimes auio.uio_resid = size; 568b40ce416SJulian Elischer auio.uio_td = curthread; 56926f9a767SRodney W. Grimes 570a854ed98SJohn Baldwin error = VOP_READ(vp, &auio, 0, curthread->td_ucred); 571df8bae1dSRodney W. Grimes if (!error) { 57254d92145SMatthew Dillon int count = size - auio.uio_resid; 573df8bae1dSRodney W. Grimes 574df8bae1dSRodney W. Grimes if (count == 0) 575df8bae1dSRodney W. Grimes error = EINVAL; 57626f9a767SRodney W. Grimes else if (count != PAGE_SIZE) 5779e0ddbd0SAlan Cox bzero((caddr_t)sf_buf_kva(sf) + count, 5789e0ddbd0SAlan Cox PAGE_SIZE - count); 579df8bae1dSRodney W. Grimes } 5809e0ddbd0SAlan Cox sf_buf_free(sf); 5811b26eb10SAlan Cox 5821b26eb10SAlan Cox VM_OBJECT_LOCK(object); 583df8bae1dSRodney W. Grimes } 58485e01243SAlan Cox vm_page_lock_queues(); 5850385347cSPeter Wemm pmap_clear_modify(m); 5862c28a105SAlan Cox vm_page_undirty(m); 5871b26eb10SAlan Cox vm_page_unlock_queues(); 5886e3a3f38SRobert V. Baron if (!error) 5896e3a3f38SRobert V. Baron m->valid = VM_PAGE_BITS_ALL; 590a83c285cSDavid Greenman return error ? VM_PAGER_ERROR : VM_PAGER_OK; 59126f9a767SRodney W. Grimes } 59226f9a767SRodney W. Grimes 59326f9a767SRodney W. Grimes /* 59426f9a767SRodney W. Grimes * generic vnode pager input routine 59526f9a767SRodney W. Grimes */ 596170db9c6SJohn Dyson 597ce75f2c3SMike Smith /* 59823955314SAlfred Perlstein * Local media VFS's that do not implement their own VOP_GETPAGES 59947e151ddSRobert Drehmel * should have their VOP_GETPAGES call to vnode_pager_generic_getpages() 60047e151ddSRobert Drehmel * to implement the previous behaviour. 601ce75f2c3SMike Smith * 602ce75f2c3SMike Smith * All other FS's should use the bypass to get to the local media 603ce75f2c3SMike Smith * backing vp's VOP_GETPAGES. 604ce75f2c3SMike Smith */ 605f708ef1bSPoul-Henning Kamp static int 60624a1cce3SDavid Greenman vnode_pager_getpages(object, m, count, reqpage) 60726f9a767SRodney W. Grimes vm_object_t object; 60824a1cce3SDavid Greenman vm_page_t *m; 60924a1cce3SDavid Greenman int count; 61024a1cce3SDavid Greenman int reqpage; 61124a1cce3SDavid Greenman { 612170db9c6SJohn Dyson int rtval; 613170db9c6SJohn Dyson struct vnode *vp; 61486ffbd76SMike Smith int bytes = count * PAGE_SIZE; 61595e5e988SJohn Dyson 616170db9c6SJohn Dyson vp = object->handle; 6178630c117SAlan Cox VM_OBJECT_UNLOCK(object); 61887aefa49SAlan Cox mtx_lock(&Giant); 61986ffbd76SMike Smith rtval = VOP_GETPAGES(vp, m, bytes, reqpage, 0); 62023955314SAlfred Perlstein KASSERT(rtval != EOPNOTSUPP, 62123955314SAlfred Perlstein ("vnode_pager: FS getpages not implemented\n")); 62287aefa49SAlan Cox mtx_unlock(&Giant); 6238630c117SAlan Cox VM_OBJECT_LOCK(object); 624170db9c6SJohn Dyson return rtval; 625170db9c6SJohn Dyson } 626170db9c6SJohn Dyson 627ce75f2c3SMike Smith /* 628ce75f2c3SMike Smith * This is now called from local media FS's to operate against their 629ce75f2c3SMike Smith * own vnodes if they fail to implement VOP_GETPAGES. 630ce75f2c3SMike Smith */ 631ce75f2c3SMike Smith int 632ce75f2c3SMike Smith vnode_pager_generic_getpages(vp, m, bytecount, reqpage) 633ce75f2c3SMike Smith struct vnode *vp; 634170db9c6SJohn Dyson vm_page_t *m; 635ce75f2c3SMike Smith int bytecount; 636170db9c6SJohn Dyson int reqpage; 637170db9c6SJohn Dyson { 638ce75f2c3SMike Smith vm_object_t object; 639a316d390SJohn Dyson vm_offset_t kva; 6408f9110f6SJohn Dyson off_t foff, tfoff, nextoff; 641e43c2eabSAlan Cox int i, j, size, bsize, first, firstaddr; 642ce75f2c3SMike Smith struct vnode *dp; 643efc68ce1SDavid Greenman int runpg; 644efc68ce1SDavid Greenman int runend; 6450bdb7528SDavid Greenman struct buf *bp; 646ce75f2c3SMike Smith int count; 64726f9a767SRodney W. Grimes int error = 0; 64826f9a767SRodney W. Grimes 6490cddd8f0SMatthew Dillon GIANT_REQUIRED; 650ce75f2c3SMike Smith object = vp->v_object; 651ce75f2c3SMike Smith count = bytecount / PAGE_SIZE; 652ce75f2c3SMike Smith 6532c4488fcSJohn Dyson if (vp->v_mount == NULL) 6542c4488fcSJohn Dyson return VM_PAGER_BAD; 6552c4488fcSJohn Dyson 65626f9a767SRodney W. Grimes bsize = vp->v_mount->mnt_stat.f_iosize; 65726f9a767SRodney W. Grimes 65826f9a767SRodney W. Grimes /* get the UNDERLYING device for the file with VOP_BMAP() */ 659bbc0ec52SDavid Greenman 66026f9a767SRodney W. Grimes /* 661bbc0ec52SDavid Greenman * originally, we did not check for an error return value -- assuming 662bbc0ec52SDavid Greenman * an fs always has a bmap entry point -- that assumption is wrong!!! 66326f9a767SRodney W. Grimes */ 664a316d390SJohn Dyson foff = IDX_TO_OFF(m[reqpage]->pindex); 665bbc0ec52SDavid Greenman 66626f9a767SRodney W. Grimes /* 66716f62314SDavid Greenman * if we can't bmap, use old VOP code 66826f9a767SRodney W. Grimes */ 669c83ebe77SJohn Dyson if (VOP_BMAP(vp, 0, &dp, 0, NULL, NULL)) { 67031953be9SAlan Cox VM_OBJECT_LOCK(object); 671e43c2eabSAlan Cox vm_page_lock_queues(); 672e43c2eabSAlan Cox for (i = 0; i < count; i++) 673e43c2eabSAlan Cox if (i != reqpage) 674d8d5fa88SAlfred Perlstein vm_page_free(m[i]); 675e43c2eabSAlan Cox vm_page_unlock_queues(); 676976e77fcSDavid Greenman cnt.v_vnodein++; 677976e77fcSDavid Greenman cnt.v_vnodepgsin++; 67852051abcSAlan Cox error = vnode_pager_input_old(object, m[reqpage]); 67952051abcSAlan Cox VM_OBJECT_UNLOCK(object); 68052051abcSAlan Cox return (error); 681bbc0ec52SDavid Greenman 68226f9a767SRodney W. Grimes /* 68326f9a767SRodney W. Grimes * if the blocksize is smaller than a page size, then use 68426f9a767SRodney W. Grimes * special small filesystem code. NFS sometimes has a small 68526f9a767SRodney W. Grimes * blocksize, but it can handle large reads itself. 68626f9a767SRodney W. Grimes */ 68726f9a767SRodney W. Grimes } else if ((PAGE_SIZE / bsize) > 1 && 688500b04a2SBruce Evans (vp->v_mount->mnt_stat.f_type != nfs_mount_type)) { 68931953be9SAlan Cox VM_OBJECT_LOCK(object); 690e43c2eabSAlan Cox vm_page_lock_queues(); 691e43c2eabSAlan Cox for (i = 0; i < count; i++) 692e43c2eabSAlan Cox if (i != reqpage) 693d8d5fa88SAlfred Perlstein vm_page_free(m[i]); 694e43c2eabSAlan Cox vm_page_unlock_queues(); 69531953be9SAlan Cox VM_OBJECT_UNLOCK(object); 696976e77fcSDavid Greenman cnt.v_vnodein++; 697976e77fcSDavid Greenman cnt.v_vnodepgsin++; 69824a1cce3SDavid Greenman return vnode_pager_input_smlfs(object, m[reqpage]); 69926f9a767SRodney W. Grimes } 7008d17e694SJulian Elischer 70126f9a767SRodney W. Grimes /* 7028d17e694SJulian Elischer * If we have a completely valid page available to us, we can 7038d17e694SJulian Elischer * clean up and return. Otherwise we have to re-read the 7048d17e694SJulian Elischer * media. 7050d94caffSDavid Greenman */ 70631953be9SAlan Cox VM_OBJECT_LOCK(object); 7078b575f6cSAlan Cox if (m[reqpage]->valid == VM_PAGE_BITS_ALL) { 708e43c2eabSAlan Cox vm_page_lock_queues(); 709e43c2eabSAlan Cox for (i = 0; i < count; i++) 7100d94caffSDavid Greenman if (i != reqpage) 711d8d5fa88SAlfred Perlstein vm_page_free(m[i]); 712e43c2eabSAlan Cox vm_page_unlock_queues(); 71331953be9SAlan Cox VM_OBJECT_UNLOCK(object); 7140d94caffSDavid Greenman return VM_PAGER_OK; 7150d94caffSDavid Greenman } 7168d17e694SJulian Elischer m[reqpage]->valid = 0; 7178b575f6cSAlan Cox VM_OBJECT_UNLOCK(object); 7180bdb7528SDavid Greenman 7190d94caffSDavid Greenman /* 72026f9a767SRodney W. Grimes * here on direct device I/O 72126f9a767SRodney W. Grimes */ 722efc68ce1SDavid Greenman firstaddr = -1; 723a1287949SEivind Eklund 72426f9a767SRodney W. Grimes /* 725efc68ce1SDavid Greenman * calculate the run that includes the required page 72626f9a767SRodney W. Grimes */ 727efc68ce1SDavid Greenman for (first = 0, i = 0; i < count; i = runend) { 728a316d390SJohn Dyson firstaddr = vnode_pager_addr(vp, 729a316d390SJohn Dyson IDX_TO_OFF(m[i]->pindex), &runpg); 730efc68ce1SDavid Greenman if (firstaddr == -1) { 73131953be9SAlan Cox VM_OBJECT_LOCK(object); 73224a1cce3SDavid Greenman if (i == reqpage && foff < object->un_pager.vnp.vnp_size) { 733bf1001faSMaxime Henrion panic("vnode_pager_getpages: unexpected missing page: firstaddr: %d, foff: 0x%jx%08jx, vnp_size: 0x%jx%08jx", 734bf1001faSMaxime Henrion firstaddr, (uintmax_t)(foff >> 32), 735bf1001faSMaxime Henrion (uintmax_t)foff, 736bf1001faSMaxime Henrion (uintmax_t) 737fc62ef1fSBruce Evans (object->un_pager.vnp.vnp_size >> 32), 738bf1001faSMaxime Henrion (uintmax_t)object->un_pager.vnp.vnp_size); 739efc68ce1SDavid Greenman } 740e43c2eabSAlan Cox vm_page_lock_queues(); 741d8d5fa88SAlfred Perlstein vm_page_free(m[i]); 742e43c2eabSAlan Cox vm_page_unlock_queues(); 74331953be9SAlan Cox VM_OBJECT_UNLOCK(object); 744efc68ce1SDavid Greenman runend = i + 1; 745efc68ce1SDavid Greenman first = runend; 746efc68ce1SDavid Greenman continue; 747efc68ce1SDavid Greenman } 748efc68ce1SDavid Greenman runend = i + runpg; 749efc68ce1SDavid Greenman if (runend <= reqpage) { 75031953be9SAlan Cox VM_OBJECT_LOCK(object); 751e43c2eabSAlan Cox vm_page_lock_queues(); 752e43c2eabSAlan Cox for (j = i; j < runend; j++) 753d8d5fa88SAlfred Perlstein vm_page_free(m[j]); 754e43c2eabSAlan Cox vm_page_unlock_queues(); 75531953be9SAlan Cox VM_OBJECT_UNLOCK(object); 75626f9a767SRodney W. Grimes } else { 757efc68ce1SDavid Greenman if (runpg < (count - first)) { 75831953be9SAlan Cox VM_OBJECT_LOCK(object); 759e43c2eabSAlan Cox vm_page_lock_queues(); 760efc68ce1SDavid Greenman for (i = first + runpg; i < count; i++) 761d8d5fa88SAlfred Perlstein vm_page_free(m[i]); 762e43c2eabSAlan Cox vm_page_unlock_queues(); 76331953be9SAlan Cox VM_OBJECT_UNLOCK(object); 764efc68ce1SDavid Greenman count = first + runpg; 76526f9a767SRodney W. Grimes } 766efc68ce1SDavid Greenman break; 76726f9a767SRodney W. Grimes } 768efc68ce1SDavid Greenman first = runend; 769efc68ce1SDavid Greenman } 77026f9a767SRodney W. Grimes 77126f9a767SRodney W. Grimes /* 772bbc0ec52SDavid Greenman * the first and last page have been calculated now, move input pages 773bbc0ec52SDavid Greenman * to be zero based... 77426f9a767SRodney W. Grimes */ 77526f9a767SRodney W. Grimes if (first != 0) { 77626f9a767SRodney W. Grimes for (i = first; i < count; i++) { 77726f9a767SRodney W. Grimes m[i - first] = m[i]; 77826f9a767SRodney W. Grimes } 77926f9a767SRodney W. Grimes count -= first; 78026f9a767SRodney W. Grimes reqpage -= first; 78126f9a767SRodney W. Grimes } 782efc68ce1SDavid Greenman 78326f9a767SRodney W. Grimes /* 78426f9a767SRodney W. Grimes * calculate the file virtual address for the transfer 78526f9a767SRodney W. Grimes */ 786a316d390SJohn Dyson foff = IDX_TO_OFF(m[0]->pindex); 78726f9a767SRodney W. Grimes 78826f9a767SRodney W. Grimes /* 78926f9a767SRodney W. Grimes * calculate the size of the transfer 79026f9a767SRodney W. Grimes */ 79126f9a767SRodney W. Grimes size = count * PAGE_SIZE; 7921a31a6c3SPoul-Henning Kamp KASSERT(count > 0, ("zero count")); 79324a1cce3SDavid Greenman if ((foff + size) > object->un_pager.vnp.vnp_size) 79424a1cce3SDavid Greenman size = object->un_pager.vnp.vnp_size - foff; 7951a31a6c3SPoul-Henning Kamp KASSERT(size > 0, ("zero size")); 79626f9a767SRodney W. Grimes 79726f9a767SRodney W. Grimes /* 79824579ca1SMatthew Dillon * round up physical size for real devices. 79926f9a767SRodney W. Grimes */ 80024579ca1SMatthew Dillon if (dp->v_type == VBLK || dp->v_type == VCHR) { 8015d9d81e7SPoul-Henning Kamp int secmask = dp->v_bufobj.bo_bsize - 1; 8026229cc50SPoul-Henning Kamp KASSERT(secmask < PAGE_SIZE && secmask > 0, 8036229cc50SPoul-Henning Kamp ("vnode_pager_generic_getpages: sector size %d too large", 8046229cc50SPoul-Henning Kamp secmask + 1)); 80524579ca1SMatthew Dillon size = (size + secmask) & ~secmask; 80624579ca1SMatthew Dillon } 80726f9a767SRodney W. Grimes 8081c7c3c6aSMatthew Dillon bp = getpbuf(&vnode_pbuf_freecnt); 80916f62314SDavid Greenman kva = (vm_offset_t) bp->b_data; 81016f62314SDavid Greenman 81126f9a767SRodney W. Grimes /* 81226f9a767SRodney W. Grimes * and map the pages to be read into the kva 81326f9a767SRodney W. Grimes */ 81416f62314SDavid Greenman pmap_qenter(kva, m, count); 81526f9a767SRodney W. Grimes 81626f9a767SRodney W. Grimes /* build a minimal buffer header */ 81721144e3bSPoul-Henning Kamp bp->b_iocmd = BIO_READ; 8186a4b5823SPoul-Henning Kamp bp->b_iodone = bdone; 819bd78ceceSJohn Baldwin KASSERT(bp->b_rcred == NOCRED, ("leaking read ucred")); 820bd78ceceSJohn Baldwin KASSERT(bp->b_wcred == NOCRED, ("leaking write ucred")); 821a854ed98SJohn Baldwin bp->b_rcred = crhold(curthread->td_ucred); 822a854ed98SJohn Baldwin bp->b_wcred = crhold(curthread->td_ucred); 823187f0071SDavid Greenman bp->b_blkno = firstaddr; 8240d94caffSDavid Greenman pbgetvp(dp, bp); 82526f9a767SRodney W. Grimes bp->b_bcount = size; 82626f9a767SRodney W. Grimes bp->b_bufsize = size; 8272b6b0df7SMatthew Dillon bp->b_runningbufspace = bp->b_bufsize; 8282b6b0df7SMatthew Dillon runningbufspace += bp->b_runningbufspace; 82926f9a767SRodney W. Grimes 830976e77fcSDavid Greenman cnt.v_vnodein++; 831976e77fcSDavid Greenman cnt.v_vnodepgsin += count; 832976e77fcSDavid Greenman 83326f9a767SRodney W. Grimes /* do the input */ 8342c18019fSPoul-Henning Kamp bp->b_iooffset = dbtob(bp->b_blkno); 835b792bebeSPoul-Henning Kamp bstrategy(bp); 836976e77fcSDavid Greenman 8376a4b5823SPoul-Henning Kamp bwait(bp, PVM, "vnread"); 83826f9a767SRodney W. Grimes 839c244d2deSPoul-Henning Kamp if ((bp->b_ioflags & BIO_ERROR) != 0) 84026f9a767SRodney W. Grimes error = EIO; 84126f9a767SRodney W. Grimes 84226f9a767SRodney W. Grimes if (!error) { 84326f9a767SRodney W. Grimes if (size != count * PAGE_SIZE) 84426f9a767SRodney W. Grimes bzero((caddr_t) kva + size, PAGE_SIZE * count - size); 84526f9a767SRodney W. Grimes } 84616f62314SDavid Greenman pmap_qremove(kva, count); 84726f9a767SRodney W. Grimes 84826f9a767SRodney W. Grimes /* 84926f9a767SRodney W. Grimes * free the buffer header back to the swap buffer pool 85026f9a767SRodney W. Grimes */ 8511c7c3c6aSMatthew Dillon relpbuf(bp, &vnode_pbuf_freecnt); 85226f9a767SRodney W. Grimes 85331953be9SAlan Cox VM_OBJECT_LOCK(object); 8549d522888SAlan Cox vm_page_lock_queues(); 8558f9110f6SJohn Dyson for (i = 0, tfoff = foff; i < count; i++, tfoff = nextoff) { 8568f9110f6SJohn Dyson vm_page_t mt; 8578f9110f6SJohn Dyson 8588f9110f6SJohn Dyson nextoff = tfoff + PAGE_SIZE; 8598f9110f6SJohn Dyson mt = m[i]; 8608f9110f6SJohn Dyson 86154746b67SDmitrij Tejblum if (nextoff <= object->un_pager.vnp.vnp_size) { 8628d17e694SJulian Elischer /* 8638d17e694SJulian Elischer * Read filled up entire page. 8648d17e694SJulian Elischer */ 8658f9110f6SJohn Dyson mt->valid = VM_PAGE_BITS_ALL; 8662c28a105SAlan Cox vm_page_undirty(mt); /* should be an assert? XXX */ 8670385347cSPeter Wemm pmap_clear_modify(mt); 8688f9110f6SJohn Dyson } else { 8698d17e694SJulian Elischer /* 8708d17e694SJulian Elischer * Read did not fill up entire page. Since this 8718d17e694SJulian Elischer * is getpages, the page may be mapped, so we have 8728d17e694SJulian Elischer * to zero the invalid portions of the page even 8738d17e694SJulian Elischer * though we aren't setting them valid. 8748d17e694SJulian Elischer * 8758d17e694SJulian Elischer * Currently we do not set the entire page valid, 8768d17e694SJulian Elischer * we just try to clear the piece that we couldn't 8778d17e694SJulian Elischer * read. 8788d17e694SJulian Elischer */ 87954746b67SDmitrij Tejblum vm_page_set_validclean(mt, 0, 88054746b67SDmitrij Tejblum object->un_pager.vnp.vnp_size - tfoff); 8814221e284SAlan Cox /* handled by vm_fault now */ 8824221e284SAlan Cox /* vm_page_zero_invalid(mt, FALSE); */ 8838f9110f6SJohn Dyson } 8848f9110f6SJohn Dyson 88526f9a767SRodney W. Grimes if (i != reqpage) { 886bbc0ec52SDavid Greenman 88726f9a767SRodney W. Grimes /* 888bbc0ec52SDavid Greenman * whether or not to leave the page activated is up in 889bbc0ec52SDavid Greenman * the air, but we should put the page on a page queue 890bbc0ec52SDavid Greenman * somewhere. (it already is in the object). Result: 891956f3135SPhilippe Charnier * It appears that empirical results show that 892bbc0ec52SDavid Greenman * deactivating pages is best. 89326f9a767SRodney W. Grimes */ 894bbc0ec52SDavid Greenman 89526f9a767SRodney W. Grimes /* 896bbc0ec52SDavid Greenman * just in case someone was asking for this page we 897bbc0ec52SDavid Greenman * now tell them that it is ok to use 89826f9a767SRodney W. Grimes */ 89926f9a767SRodney W. Grimes if (!error) { 9008f9110f6SJohn Dyson if (mt->flags & PG_WANTED) 9018f9110f6SJohn Dyson vm_page_activate(mt); 90295461b45SJohn Dyson else 9038f9110f6SJohn Dyson vm_page_deactivate(mt); 904e69763a3SDoug Rabson vm_page_wakeup(mt); 90526f9a767SRodney W. Grimes } else { 906d8d5fa88SAlfred Perlstein vm_page_free(mt); 90726f9a767SRodney W. Grimes } 90826f9a767SRodney W. Grimes } 90926f9a767SRodney W. Grimes } 9109d522888SAlan Cox vm_page_unlock_queues(); 91131953be9SAlan Cox VM_OBJECT_UNLOCK(object); 91226f9a767SRodney W. Grimes if (error) { 91324a1cce3SDavid Greenman printf("vnode_pager_getpages: I/O read error\n"); 91426f9a767SRodney W. Grimes } 915a83c285cSDavid Greenman return (error ? VM_PAGER_ERROR : VM_PAGER_OK); 91626f9a767SRodney W. Grimes } 91726f9a767SRodney W. Grimes 918ce75f2c3SMike Smith /* 919ce75f2c3SMike Smith * EOPNOTSUPP is no longer legal. For local media VFS's that do not 920ce75f2c3SMike Smith * implement their own VOP_PUTPAGES, their VOP_PUTPAGES should call to 921ce75f2c3SMike Smith * vnode_pager_generic_putpages() to implement the previous behaviour. 922ce75f2c3SMike Smith * 923ce75f2c3SMike Smith * All other FS's should use the bypass to get to the local media 924ce75f2c3SMike Smith * backing vp's VOP_PUTPAGES. 925ce75f2c3SMike Smith */ 926e4542174SMatthew Dillon static void 927170db9c6SJohn Dyson vnode_pager_putpages(object, m, count, sync, rtvals) 928170db9c6SJohn Dyson vm_object_t object; 929170db9c6SJohn Dyson vm_page_t *m; 930170db9c6SJohn Dyson int count; 931170db9c6SJohn Dyson boolean_t sync; 932170db9c6SJohn Dyson int *rtvals; 933170db9c6SJohn Dyson { 934170db9c6SJohn Dyson int rtval; 935170db9c6SJohn Dyson struct vnode *vp; 936f2a2857bSKirk McKusick struct mount *mp; 93786ffbd76SMike Smith int bytes = count * PAGE_SIZE; 938ad980522SJohn Dyson 9390cddd8f0SMatthew Dillon GIANT_REQUIRED; 9400e3cdf2cSAlan Cox /* 9410e3cdf2cSAlan Cox * Force synchronous operation if we are extremely low on memory 9420e3cdf2cSAlan Cox * to prevent a low-memory deadlock. VOP operations often need to 9430e3cdf2cSAlan Cox * allocate more memory to initiate the I/O ( i.e. do a BMAP 9440e3cdf2cSAlan Cox * operation ). The swapper handles the case by limiting the amount 9450e3cdf2cSAlan Cox * of asynchronous I/O, but that sort of solution doesn't scale well 9460e3cdf2cSAlan Cox * for the vnode pager without a lot of work. 9470e3cdf2cSAlan Cox * 9480e3cdf2cSAlan Cox * Also, the backing vnode's iodone routine may not wake the pageout 9490e3cdf2cSAlan Cox * daemon up. This should be probably be addressed XXX. 9500e3cdf2cSAlan Cox */ 9510e3cdf2cSAlan Cox 9520e3cdf2cSAlan Cox if ((cnt.v_free_count + cnt.v_cache_count) < cnt.v_pageout_free_min) 9530e3cdf2cSAlan Cox sync |= OBJPC_SYNC; 9540e3cdf2cSAlan Cox 9550e3cdf2cSAlan Cox /* 9560e3cdf2cSAlan Cox * Call device-specific putpages function 9570e3cdf2cSAlan Cox */ 958170db9c6SJohn Dyson vp = object->handle; 9592e3b314dSAlan Cox VM_OBJECT_UNLOCK(object); 960f2a2857bSKirk McKusick if (vp->v_type != VREG) 961f2a2857bSKirk McKusick mp = NULL; 962f2a2857bSKirk McKusick (void)vn_start_write(vp, &mp, V_WAIT); 96386ffbd76SMike Smith rtval = VOP_PUTPAGES(vp, m, bytes, sync, rtvals, 0); 96423955314SAlfred Perlstein KASSERT(rtval != EOPNOTSUPP, 96523955314SAlfred Perlstein ("vnode_pager: stale FS putpages\n")); 966f2a2857bSKirk McKusick vn_finished_write(mp); 9672e3b314dSAlan Cox VM_OBJECT_LOCK(object); 968170db9c6SJohn Dyson } 969170db9c6SJohn Dyson 970ce75f2c3SMike Smith 97126f9a767SRodney W. Grimes /* 972ce75f2c3SMike Smith * This is now called from local media FS's to operate against their 9734491ea91SEivind Eklund * own vnodes if they fail to implement VOP_PUTPAGES. 9742b6b0df7SMatthew Dillon * 9752b6b0df7SMatthew Dillon * This is typically called indirectly via the pageout daemon and 9762b6b0df7SMatthew Dillon * clustering has already typically occured, so in general we ask the 9772b6b0df7SMatthew Dillon * underlying filesystem to write the data out asynchronously rather 9782b6b0df7SMatthew Dillon * then delayed. 97926f9a767SRodney W. Grimes */ 980ce75f2c3SMike Smith int 9818f9110f6SJohn Dyson vnode_pager_generic_putpages(vp, m, bytecount, flags, rtvals) 982ce75f2c3SMike Smith struct vnode *vp; 98326f9a767SRodney W. Grimes vm_page_t *m; 984ce75f2c3SMike Smith int bytecount; 9858f9110f6SJohn Dyson int flags; 98626f9a767SRodney W. Grimes int *rtvals; 98726f9a767SRodney W. Grimes { 988f6b04d2bSDavid Greenman int i; 989ce75f2c3SMike Smith vm_object_t object; 990ce75f2c3SMike Smith int count; 99126f9a767SRodney W. Grimes 992f6b04d2bSDavid Greenman int maxsize, ncount; 993a316d390SJohn Dyson vm_ooffset_t poffset; 994f6b04d2bSDavid Greenman struct uio auio; 995f6b04d2bSDavid Greenman struct iovec aiov; 996f6b04d2bSDavid Greenman int error; 9978f9110f6SJohn Dyson int ioflags; 99826f9a767SRodney W. Grimes 9990cddd8f0SMatthew Dillon GIANT_REQUIRED; 1000ce75f2c3SMike Smith object = vp->v_object; 1001ce75f2c3SMike Smith count = bytecount / PAGE_SIZE; 1002ce75f2c3SMike Smith 100326f9a767SRodney W. Grimes for (i = 0; i < count; i++) 100426f9a767SRodney W. Grimes rtvals[i] = VM_PAGER_AGAIN; 100526f9a767SRodney W. Grimes 1006a316d390SJohn Dyson if ((int) m[0]->pindex < 0) { 100723562e4bSMarcel Moolenaar printf("vnode_pager_putpages: attempt to write meta-data!!! -- 0x%lx(%lx)\n", 100823562e4bSMarcel Moolenaar (long)m[0]->pindex, (u_long)m[0]->dirty); 1009f6b04d2bSDavid Greenman rtvals[0] = VM_PAGER_BAD; 1010f6b04d2bSDavid Greenman return VM_PAGER_BAD; 10110d94caffSDavid Greenman } 10120bdb7528SDavid Greenman 1013f6b04d2bSDavid Greenman maxsize = count * PAGE_SIZE; 1014f6b04d2bSDavid Greenman ncount = count; 101526f9a767SRodney W. Grimes 1016a316d390SJohn Dyson poffset = IDX_TO_OFF(m[0]->pindex); 101700a6f47fSMatthew Dillon 101800a6f47fSMatthew Dillon /* 101900a6f47fSMatthew Dillon * If the page-aligned write is larger then the actual file we 102000a6f47fSMatthew Dillon * have to invalidate pages occuring beyond the file EOF. However, 102100a6f47fSMatthew Dillon * there is an edge case where a file may not be page-aligned where 102200a6f47fSMatthew Dillon * the last page is partially invalid. In this case the filesystem 102300a6f47fSMatthew Dillon * may not properly clear the dirty bits for the entire page (which 102400a6f47fSMatthew Dillon * could be VM_PAGE_BITS_ALL due to the page having been mmap()d). 102500a6f47fSMatthew Dillon * With the page locked we are free to fix-up the dirty bits here. 10263ebeaf59SMatthew Dillon * 10273ebeaf59SMatthew Dillon * We do not under any circumstances truncate the valid bits, as 10283ebeaf59SMatthew Dillon * this will screw up bogus page replacement. 102900a6f47fSMatthew Dillon */ 1030a316d390SJohn Dyson if (maxsize + poffset > object->un_pager.vnp.vnp_size) { 103100a6f47fSMatthew Dillon if (object->un_pager.vnp.vnp_size > poffset) { 103200a6f47fSMatthew Dillon int pgoff; 103300a6f47fSMatthew Dillon 1034a316d390SJohn Dyson maxsize = object->un_pager.vnp.vnp_size - poffset; 1035aa8de40aSPoul-Henning Kamp ncount = btoc(maxsize); 103600a6f47fSMatthew Dillon if ((pgoff = (int)maxsize & PAGE_MASK) != 0) { 1037b7ad744dSAlan Cox vm_page_lock_queues(); 103800a6f47fSMatthew Dillon vm_page_clear_dirty(m[ncount - 1], pgoff, 103900a6f47fSMatthew Dillon PAGE_SIZE - pgoff); 1040b7ad744dSAlan Cox vm_page_unlock_queues(); 104100a6f47fSMatthew Dillon } 104200a6f47fSMatthew Dillon } else { 104300a6f47fSMatthew Dillon maxsize = 0; 104400a6f47fSMatthew Dillon ncount = 0; 104500a6f47fSMatthew Dillon } 1046f6b04d2bSDavid Greenman if (ncount < count) { 1047f6b04d2bSDavid Greenman for (i = ncount; i < count; i++) { 1048f6b04d2bSDavid Greenman rtvals[i] = VM_PAGER_BAD; 1049f6b04d2bSDavid Greenman } 1050f6b04d2bSDavid Greenman } 1051f6b04d2bSDavid Greenman } 105226f9a767SRodney W. Grimes 10532b6b0df7SMatthew Dillon /* 10542b6b0df7SMatthew Dillon * pageouts are already clustered, use IO_ASYNC t o force a bawrite() 10552b6b0df7SMatthew Dillon * rather then a bdwrite() to prevent paging I/O from saturating 105643b7990eSMatthew Dillon * the buffer cache. Dummy-up the sequential heuristic to cause 105743b7990eSMatthew Dillon * large ranges to cluster. If neither IO_SYNC or IO_ASYNC is set, 105843b7990eSMatthew Dillon * the system decides how to cluster. 10592b6b0df7SMatthew Dillon */ 10608f9110f6SJohn Dyson ioflags = IO_VMIO; 106143b7990eSMatthew Dillon if (flags & (VM_PAGER_PUT_SYNC | VM_PAGER_PUT_INVAL)) 106243b7990eSMatthew Dillon ioflags |= IO_SYNC; 106343b7990eSMatthew Dillon else if ((flags & VM_PAGER_CLUSTER_OK) == 0) 106443b7990eSMatthew Dillon ioflags |= IO_ASYNC; 10658f9110f6SJohn Dyson ioflags |= (flags & VM_PAGER_PUT_INVAL) ? IO_INVAL: 0; 106643b7990eSMatthew Dillon ioflags |= IO_SEQMAX << IO_SEQSHIFT; 1067f6b04d2bSDavid Greenman 1068f6b04d2bSDavid Greenman aiov.iov_base = (caddr_t) 0; 1069f6b04d2bSDavid Greenman aiov.iov_len = maxsize; 1070f6b04d2bSDavid Greenman auio.uio_iov = &aiov; 1071f6b04d2bSDavid Greenman auio.uio_iovcnt = 1; 1072a316d390SJohn Dyson auio.uio_offset = poffset; 1073f6b04d2bSDavid Greenman auio.uio_segflg = UIO_NOCOPY; 1074f6b04d2bSDavid Greenman auio.uio_rw = UIO_WRITE; 1075f6b04d2bSDavid Greenman auio.uio_resid = maxsize; 1076b40ce416SJulian Elischer auio.uio_td = (struct thread *) 0; 1077a854ed98SJohn Baldwin error = VOP_WRITE(vp, &auio, ioflags, curthread->td_ucred); 1078976e77fcSDavid Greenman cnt.v_vnodeout++; 1079f6b04d2bSDavid Greenman cnt.v_vnodepgsout += ncount; 1080f6b04d2bSDavid Greenman 1081f6b04d2bSDavid Greenman if (error) { 108224a1cce3SDavid Greenman printf("vnode_pager_putpages: I/O error %d\n", error); 1083f6b04d2bSDavid Greenman } 1084f6b04d2bSDavid Greenman if (auio.uio_resid) { 1085ac1e407bSBruce Evans printf("vnode_pager_putpages: residual I/O %d at %lu\n", 1086ac1e407bSBruce Evans auio.uio_resid, (u_long)m[0]->pindex); 108726f9a767SRodney W. Grimes } 1088ffc82b0aSJohn Dyson for (i = 0; i < ncount; i++) { 108926f9a767SRodney W. Grimes rtvals[i] = VM_PAGER_OK; 109026f9a767SRodney W. Grimes } 1091f6b04d2bSDavid Greenman return rtvals[0]; 109226f9a767SRodney W. Grimes } 1093f6b04d2bSDavid Greenman 1094f6b04d2bSDavid Greenman struct vnode * 1095417a26a1SAlan Cox vnode_pager_lock(vm_object_t first_object) 109624a1cce3SDavid Greenman { 1097417a26a1SAlan Cox struct vnode *vp; 1098417a26a1SAlan Cox vm_object_t backing_object, object; 1099996c772fSJohn Dyson 1100417a26a1SAlan Cox VM_OBJECT_LOCK_ASSERT(first_object, MA_OWNED); 1101417a26a1SAlan Cox for (object = first_object; object != NULL; object = backing_object) { 1102417a26a1SAlan Cox if (object->type != OBJT_VNODE) { 1103417a26a1SAlan Cox if ((backing_object = object->backing_object) != NULL) 1104417a26a1SAlan Cox VM_OBJECT_LOCK(backing_object); 1105417a26a1SAlan Cox if (object != first_object) 1106417a26a1SAlan Cox VM_OBJECT_UNLOCK(object); 1107f6b04d2bSDavid Greenman continue; 1108417a26a1SAlan Cox } 1109417a26a1SAlan Cox retry: 1110e6b961ffSJohn Baldwin if (object->flags & OBJ_DEAD) { 1111417a26a1SAlan Cox if (object != first_object) 1112417a26a1SAlan Cox VM_OBJECT_UNLOCK(object); 111347221757SJohn Dyson return NULL; 1114e6b961ffSJohn Baldwin } 1115417a26a1SAlan Cox vp = object->handle; 1116417a26a1SAlan Cox VI_LOCK(vp); 1117417a26a1SAlan Cox VM_OBJECT_UNLOCK(object); 1118417a26a1SAlan Cox if (first_object != object) 1119417a26a1SAlan Cox VM_OBJECT_UNLOCK(first_object); 1120417a26a1SAlan Cox if (vget(vp, LK_CANRECURSE | LK_INTERLOCK | LK_NOPAUSE | 1121417a26a1SAlan Cox LK_RETRY | LK_SHARED, curthread)) { 1122417a26a1SAlan Cox VM_OBJECT_LOCK(first_object); 1123417a26a1SAlan Cox if (object != first_object) 1124417a26a1SAlan Cox VM_OBJECT_LOCK(object); 1125417a26a1SAlan Cox if (object->type != OBJT_VNODE) { 1126417a26a1SAlan Cox if (object != first_object) 1127417a26a1SAlan Cox VM_OBJECT_UNLOCK(object); 1128bef608bdSJohn Dyson return NULL; 1129417a26a1SAlan Cox } 113047221757SJohn Dyson printf("vnode_pager_lock: retrying\n"); 1131417a26a1SAlan Cox goto retry; 113247221757SJohn Dyson } 1133417a26a1SAlan Cox VM_OBJECT_LOCK(first_object); 1134417a26a1SAlan Cox return (vp); 113526f9a767SRodney W. Grimes } 113624a1cce3SDavid Greenman return NULL; 1137f6b04d2bSDavid Greenman } 1138