160727d8bSWarner Losh /*- 2df8bae1dSRodney W. Grimes * Copyright (c) 1990 University of Utah. 326f9a767SRodney W. Grimes * Copyright (c) 1991 The Regents of the University of California. 426f9a767SRodney W. Grimes * All rights reserved. 526f9a767SRodney W. Grimes * Copyright (c) 1993, 1994 John S. Dyson 624a1cce3SDavid Greenman * Copyright (c) 1995, David Greenman 7df8bae1dSRodney W. Grimes * 8df8bae1dSRodney W. Grimes * This code is derived from software contributed to Berkeley by 9df8bae1dSRodney W. Grimes * the Systems Programming Group of the University of Utah Computer 10df8bae1dSRodney W. Grimes * Science Department. 11df8bae1dSRodney W. Grimes * 12df8bae1dSRodney W. Grimes * Redistribution and use in source and binary forms, with or without 13df8bae1dSRodney W. Grimes * modification, are permitted provided that the following conditions 14df8bae1dSRodney W. Grimes * are met: 15df8bae1dSRodney W. Grimes * 1. Redistributions of source code must retain the above copyright 16df8bae1dSRodney W. Grimes * notice, this list of conditions and the following disclaimer. 17df8bae1dSRodney W. Grimes * 2. Redistributions in binary form must reproduce the above copyright 18df8bae1dSRodney W. Grimes * notice, this list of conditions and the following disclaimer in the 19df8bae1dSRodney W. Grimes * documentation and/or other materials provided with the distribution. 20df8bae1dSRodney W. Grimes * 3. All advertising materials mentioning features or use of this software 215929bcfaSPhilippe Charnier * must display the following acknowledgement: 22df8bae1dSRodney W. Grimes * This product includes software developed by the University of 23df8bae1dSRodney W. Grimes * California, Berkeley and its contributors. 24df8bae1dSRodney W. Grimes * 4. Neither the name of the University nor the names of its contributors 25df8bae1dSRodney W. Grimes * may be used to endorse or promote products derived from this software 26df8bae1dSRodney W. Grimes * without specific prior written permission. 27df8bae1dSRodney W. Grimes * 28df8bae1dSRodney W. Grimes * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 29df8bae1dSRodney W. Grimes * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 30df8bae1dSRodney W. Grimes * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 31df8bae1dSRodney W. Grimes * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 32df8bae1dSRodney W. Grimes * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 33df8bae1dSRodney W. Grimes * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 34df8bae1dSRodney W. Grimes * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 35df8bae1dSRodney W. Grimes * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 36df8bae1dSRodney W. Grimes * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 37df8bae1dSRodney W. Grimes * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 38df8bae1dSRodney W. Grimes * SUCH DAMAGE. 39df8bae1dSRodney W. Grimes * 4026f9a767SRodney W. Grimes * from: @(#)vnode_pager.c 7.5 (Berkeley) 4/20/91 41df8bae1dSRodney W. Grimes */ 42df8bae1dSRodney W. Grimes 43df8bae1dSRodney W. Grimes /* 44df8bae1dSRodney W. Grimes * Page to/from files (vnodes). 45df8bae1dSRodney W. Grimes */ 46df8bae1dSRodney W. Grimes 4726f9a767SRodney W. Grimes /* 4826f9a767SRodney W. Grimes * TODO: 4924a1cce3SDavid Greenman * Implement VOP_GETPAGES/PUTPAGES interface for filesystems. Will 50f6b04d2bSDavid Greenman * greatly re-simplify the vnode_pager. 5126f9a767SRodney W. Grimes */ 5226f9a767SRodney W. Grimes 53874651b1SDavid E. O'Brien #include <sys/cdefs.h> 54874651b1SDavid E. O'Brien __FBSDID("$FreeBSD$"); 55874651b1SDavid E. O'Brien 56df8bae1dSRodney W. Grimes #include <sys/param.h> 57df8bae1dSRodney W. Grimes #include <sys/systm.h> 58df8bae1dSRodney W. Grimes #include <sys/proc.h> 59df8bae1dSRodney W. Grimes #include <sys/vnode.h> 60df8bae1dSRodney W. Grimes #include <sys/mount.h> 619626b608SPoul-Henning Kamp #include <sys/bio.h> 6224a1cce3SDavid Greenman #include <sys/buf.h> 63efeaf95aSDavid Greenman #include <sys/vmmeter.h> 64d07a6d3fSPoul-Henning Kamp #include <sys/limits.h> 6524579ca1SMatthew Dillon #include <sys/conf.h> 669e0ddbd0SAlan Cox #include <sys/sf_buf.h> 67df8bae1dSRodney W. Grimes 684f12e0acSSuleiman Souhlal #include <machine/atomic.h> 694f12e0acSSuleiman Souhlal 70df8bae1dSRodney W. Grimes #include <vm/vm.h> 71efeaf95aSDavid Greenman #include <vm/vm_object.h> 72df8bae1dSRodney W. Grimes #include <vm/vm_page.h> 7324a1cce3SDavid Greenman #include <vm/vm_pager.h> 741efb74fbSJohn Dyson #include <vm/vm_map.h> 75df8bae1dSRodney W. Grimes #include <vm/vnode_pager.h> 76efeaf95aSDavid Greenman #include <vm/vm_extern.h> 77df8bae1dSRodney W. Grimes 78bff76343SAlan Cox static int vnode_pager_addr(struct vnode *vp, vm_ooffset_t address, 79bff76343SAlan Cox daddr_t *rtaddress, int *run); 8011caded3SAlfred Perlstein static int vnode_pager_input_smlfs(vm_object_t object, vm_page_t m); 8111caded3SAlfred Perlstein static int vnode_pager_input_old(vm_object_t object, vm_page_t m); 8211caded3SAlfred Perlstein static void vnode_pager_dealloc(vm_object_t); 8311caded3SAlfred Perlstein static int vnode_pager_getpages(vm_object_t, vm_page_t *, int, int); 8411caded3SAlfred Perlstein static void vnode_pager_putpages(vm_object_t, vm_page_t *, int, boolean_t, int *); 8511caded3SAlfred Perlstein static boolean_t vnode_pager_haspage(vm_object_t, vm_pindex_t, int *, int *); 86d07a6d3fSPoul-Henning Kamp static vm_object_t vnode_pager_alloc(void *, vm_ooffset_t, vm_prot_t, vm_ooffset_t); 870b8253a7SBruce Evans 88df8bae1dSRodney W. Grimes struct pagerops vnodepagerops = { 894e658600SPoul-Henning Kamp .pgo_alloc = vnode_pager_alloc, 904e658600SPoul-Henning Kamp .pgo_dealloc = vnode_pager_dealloc, 914e658600SPoul-Henning Kamp .pgo_getpages = vnode_pager_getpages, 924e658600SPoul-Henning Kamp .pgo_putpages = vnode_pager_putpages, 934e658600SPoul-Henning Kamp .pgo_haspage = vnode_pager_haspage, 94df8bae1dSRodney W. Grimes }; 95df8bae1dSRodney W. Grimes 96b62b9b64SJohn Baldwin int vnode_pbuf_freecnt; 971c7c3c6aSMatthew Dillon 98d07a6d3fSPoul-Henning Kamp /* Create the VM system backing object for this vnode */ 99d07a6d3fSPoul-Henning Kamp int 100731959b1SYaroslav Tykhiy vnode_create_vobject(struct vnode *vp, off_t isize, struct thread *td) 101d07a6d3fSPoul-Henning Kamp { 102d07a6d3fSPoul-Henning Kamp vm_object_t object; 103d07a6d3fSPoul-Henning Kamp vm_ooffset_t size = isize; 104d07a6d3fSPoul-Henning Kamp struct vattr va; 105d07a6d3fSPoul-Henning Kamp 106d07a6d3fSPoul-Henning Kamp if (!vn_isdisk(vp, NULL) && vn_canvmio(vp) == FALSE) 107d07a6d3fSPoul-Henning Kamp return (0); 108d07a6d3fSPoul-Henning Kamp 109d07a6d3fSPoul-Henning Kamp while ((object = vp->v_object) != NULL) { 110d07a6d3fSPoul-Henning Kamp VM_OBJECT_LOCK(object); 111d07a6d3fSPoul-Henning Kamp if (!(object->flags & OBJ_DEAD)) { 112d07a6d3fSPoul-Henning Kamp VM_OBJECT_UNLOCK(object); 113d07a6d3fSPoul-Henning Kamp return (0); 114d07a6d3fSPoul-Henning Kamp } 11522db15c0SAttilio Rao VOP_UNLOCK(vp, 0); 116d07a6d3fSPoul-Henning Kamp vm_object_set_flag(object, OBJ_DISCONNECTWNT); 117d07a6d3fSPoul-Henning Kamp msleep(object, VM_OBJECT_MTX(object), PDROP | PVM, "vodead", 0); 118cb05b60aSAttilio Rao vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 119d07a6d3fSPoul-Henning Kamp } 120d07a6d3fSPoul-Henning Kamp 121d07a6d3fSPoul-Henning Kamp if (size == 0) { 122d07a6d3fSPoul-Henning Kamp if (vn_isdisk(vp, NULL)) { 123d07a6d3fSPoul-Henning Kamp size = IDX_TO_OFF(INT_MAX); 124d07a6d3fSPoul-Henning Kamp } else { 1250359a12eSAttilio Rao if (VOP_GETATTR(vp, &va, td->td_ucred)) 126d07a6d3fSPoul-Henning Kamp return (0); 127d07a6d3fSPoul-Henning Kamp size = va.va_size; 128d07a6d3fSPoul-Henning Kamp } 129d07a6d3fSPoul-Henning Kamp } 130d07a6d3fSPoul-Henning Kamp 131d07a6d3fSPoul-Henning Kamp object = vnode_pager_alloc(vp, size, 0, 0); 132d07a6d3fSPoul-Henning Kamp /* 133d07a6d3fSPoul-Henning Kamp * Dereference the reference we just created. This assumes 134d07a6d3fSPoul-Henning Kamp * that the object is associated with the vp. 135d07a6d3fSPoul-Henning Kamp */ 136d07a6d3fSPoul-Henning Kamp VM_OBJECT_LOCK(object); 137d07a6d3fSPoul-Henning Kamp object->ref_count--; 138d07a6d3fSPoul-Henning Kamp VM_OBJECT_UNLOCK(object); 139d07a6d3fSPoul-Henning Kamp vrele(vp); 140d07a6d3fSPoul-Henning Kamp 141d07a6d3fSPoul-Henning Kamp KASSERT(vp->v_object != NULL, ("vnode_create_vobject: NULL object")); 142d07a6d3fSPoul-Henning Kamp 143d07a6d3fSPoul-Henning Kamp return (0); 144d07a6d3fSPoul-Henning Kamp } 145d07a6d3fSPoul-Henning Kamp 1467146d6cbSPoul-Henning Kamp void 1477146d6cbSPoul-Henning Kamp vnode_destroy_vobject(struct vnode *vp) 1487146d6cbSPoul-Henning Kamp { 1497146d6cbSPoul-Henning Kamp struct vm_object *obj; 1507146d6cbSPoul-Henning Kamp 1517146d6cbSPoul-Henning Kamp obj = vp->v_object; 1527146d6cbSPoul-Henning Kamp if (obj == NULL) 1537146d6cbSPoul-Henning Kamp return; 15457fd3d55SPawel Jakub Dawidek ASSERT_VOP_ELOCKED(vp, "vnode_destroy_vobject"); 1557146d6cbSPoul-Henning Kamp VM_OBJECT_LOCK(obj); 1567146d6cbSPoul-Henning Kamp if (obj->ref_count == 0) { 1577146d6cbSPoul-Henning Kamp /* 1587146d6cbSPoul-Henning Kamp * vclean() may be called twice. The first time 1597146d6cbSPoul-Henning Kamp * removes the primary reference to the object, 1607146d6cbSPoul-Henning Kamp * the second time goes one further and is a 1617146d6cbSPoul-Henning Kamp * special-case to terminate the object. 1627146d6cbSPoul-Henning Kamp * 1637146d6cbSPoul-Henning Kamp * don't double-terminate the object 1647146d6cbSPoul-Henning Kamp */ 1657146d6cbSPoul-Henning Kamp if ((obj->flags & OBJ_DEAD) == 0) 1667146d6cbSPoul-Henning Kamp vm_object_terminate(obj); 1677146d6cbSPoul-Henning Kamp else 1687146d6cbSPoul-Henning Kamp VM_OBJECT_UNLOCK(obj); 1697146d6cbSPoul-Henning Kamp } else { 1707146d6cbSPoul-Henning Kamp /* 1717146d6cbSPoul-Henning Kamp * Woe to the process that tries to page now :-). 1727146d6cbSPoul-Henning Kamp */ 1737146d6cbSPoul-Henning Kamp vm_pager_deallocate(obj); 1747146d6cbSPoul-Henning Kamp VM_OBJECT_UNLOCK(obj); 1757146d6cbSPoul-Henning Kamp } 1766e4b2820SJeff Roberson vp->v_object = NULL; 1777146d6cbSPoul-Henning Kamp } 1787146d6cbSPoul-Henning Kamp 1797146d6cbSPoul-Henning Kamp 180df8bae1dSRodney W. Grimes /* 181df8bae1dSRodney W. Grimes * Allocate (or lookup) pager for a vnode. 182df8bae1dSRodney W. Grimes * Handle is a vnode pointer. 183990ab7adSAlan Cox * 184990ab7adSAlan Cox * MPSAFE 185df8bae1dSRodney W. Grimes */ 18624a1cce3SDavid Greenman vm_object_t 1876cde7a16SDavid Greenman vnode_pager_alloc(void *handle, vm_ooffset_t size, vm_prot_t prot, 188b9dcd593SBruce Evans vm_ooffset_t offset) 189df8bae1dSRodney W. Grimes { 19006cb7259SDavid Greenman vm_object_t object; 191df8bae1dSRodney W. Grimes struct vnode *vp; 192df8bae1dSRodney W. Grimes 193df8bae1dSRodney W. Grimes /* 194df8bae1dSRodney W. Grimes * Pageout to vnode, no can do yet. 195df8bae1dSRodney W. Grimes */ 196df8bae1dSRodney W. Grimes if (handle == NULL) 197df8bae1dSRodney W. Grimes return (NULL); 198df8bae1dSRodney W. Grimes 199df8bae1dSRodney W. Grimes vp = (struct vnode *) handle; 20039d38f93SDavid Greenman 20139d38f93SDavid Greenman /* 20239d38f93SDavid Greenman * If the object is being terminated, wait for it to 20339d38f93SDavid Greenman * go away. 20439d38f93SDavid Greenman */ 2052ac78f0eSStephan Uphoff retry: 2061ca58953SAlan Cox while ((object = vp->v_object) != NULL) { 2071ca58953SAlan Cox VM_OBJECT_LOCK(object); 2081ca58953SAlan Cox if ((object->flags & OBJ_DEAD) == 0) 2091ca58953SAlan Cox break; 21019187819SAlan Cox vm_object_set_flag(object, OBJ_DISCONNECTWNT); 2111ca58953SAlan Cox msleep(object, VM_OBJECT_MTX(object), PDROP | PVM, "vadead", 0); 21224a1cce3SDavid Greenman } 2130d94caffSDavid Greenman 2142be70f79SJohn Dyson if (vp->v_usecount == 0) 2152be70f79SJohn Dyson panic("vnode_pager_alloc: no vnode reference"); 2162be70f79SJohn Dyson 21724a1cce3SDavid Greenman if (object == NULL) { 218df8bae1dSRodney W. Grimes /* 2192ac78f0eSStephan Uphoff * Add an object of the appropriate size 220df8bae1dSRodney W. Grimes */ 2216cde7a16SDavid Greenman object = vm_object_allocate(OBJT_VNODE, OFF_TO_IDX(round_page(size))); 222bbc0ec52SDavid Greenman 2236cde7a16SDavid Greenman object->un_pager.vnp.vnp_size = size; 22426f9a767SRodney W. Grimes 22524a1cce3SDavid Greenman object->handle = handle; 22611be8415SStephan Uphoff VI_LOCK(vp); 2272ac78f0eSStephan Uphoff if (vp->v_object != NULL) { 2282ac78f0eSStephan Uphoff /* 2292ac78f0eSStephan Uphoff * Object has been created while we were sleeping 2302ac78f0eSStephan Uphoff */ 23111be8415SStephan Uphoff VI_UNLOCK(vp); 2322ac78f0eSStephan Uphoff vm_object_destroy(object); 2332ac78f0eSStephan Uphoff goto retry; 234df8bae1dSRodney W. Grimes } 2352ac78f0eSStephan Uphoff vp->v_object = object; 23611be8415SStephan Uphoff VI_UNLOCK(vp); 23711be8415SStephan Uphoff } else { 2382ac78f0eSStephan Uphoff object->ref_count++; 2392ac78f0eSStephan Uphoff VM_OBJECT_UNLOCK(object); 24011be8415SStephan Uphoff } 2417747c038SJeff Roberson vref(vp); 24224a1cce3SDavid Greenman return (object); 243df8bae1dSRodney W. Grimes } 244df8bae1dSRodney W. Grimes 245658ad5ffSAlan Cox /* 246658ad5ffSAlan Cox * The object must be locked. 247658ad5ffSAlan Cox */ 248f708ef1bSPoul-Henning Kamp static void 24924a1cce3SDavid Greenman vnode_pager_dealloc(object) 2500d94caffSDavid Greenman vm_object_t object; 25124a1cce3SDavid Greenman { 25254d92145SMatthew Dillon struct vnode *vp = object->handle; 253df8bae1dSRodney W. Grimes 25424a1cce3SDavid Greenman if (vp == NULL) 25524a1cce3SDavid Greenman panic("vnode_pager_dealloc: pager already dealloced"); 25624a1cce3SDavid Greenman 257658ad5ffSAlan Cox VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); 25866095752SJohn Dyson vm_object_pip_wait(object, "vnpdea"); 25924a1cce3SDavid Greenman 26024a1cce3SDavid Greenman object->handle = NULL; 26195461b45SJohn Dyson object->type = OBJT_DEAD; 26219187819SAlan Cox if (object->flags & OBJ_DISCONNECTWNT) { 26319187819SAlan Cox vm_object_clear_flag(object, OBJ_DISCONNECTWNT); 26419187819SAlan Cox wakeup(object); 26519187819SAlan Cox } 26657fd3d55SPawel Jakub Dawidek ASSERT_VOP_ELOCKED(vp, "vnode_pager_dealloc"); 267aa2cabb9SDavid Greenman vp->v_object = NULL; 26835764be3SPoul-Henning Kamp vp->v_vflag &= ~VV_TEXT; 269df8bae1dSRodney W. Grimes } 27026f9a767SRodney W. Grimes 271f708ef1bSPoul-Henning Kamp static boolean_t 272a316d390SJohn Dyson vnode_pager_haspage(object, pindex, before, after) 27324a1cce3SDavid Greenman vm_object_t object; 274a316d390SJohn Dyson vm_pindex_t pindex; 27524a1cce3SDavid Greenman int *before; 27624a1cce3SDavid Greenman int *after; 277df8bae1dSRodney W. Grimes { 27824a1cce3SDavid Greenman struct vnode *vp = object->handle; 27998b0c789SPoul-Henning Kamp daddr_t bn; 2803af76890SPoul-Henning Kamp int err; 281170db9c6SJohn Dyson daddr_t reqblock; 2822c4488fcSJohn Dyson int poff; 2832c4488fcSJohn Dyson int bsize; 284d63596ceSJohn Dyson int pagesperblock, blocksperpage; 285ae51ff11SJeff Roberson int vfslocked; 286df8bae1dSRodney W. Grimes 287f29ba63eSAlan Cox VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); 28824579ca1SMatthew Dillon /* 28924579ca1SMatthew Dillon * If no vp or vp is doomed or marked transparent to VM, we do not 29024579ca1SMatthew Dillon * have the page. 29124579ca1SMatthew Dillon */ 292b73f64c4SJeff Roberson if (vp == NULL || vp->v_iflag & VI_DOOMED) 29347221757SJohn Dyson return FALSE; 294df8bae1dSRodney W. Grimes /* 295b73f64c4SJeff Roberson * If the offset is beyond end of file we do 2960d94caffSDavid Greenman * not have the page. 297df8bae1dSRodney W. Grimes */ 298b73f64c4SJeff Roberson if (IDX_TO_OFF(pindex) >= object->un_pager.vnp.vnp_size) 2994abc71c0SDavid Greenman return FALSE; 300df8bae1dSRodney W. Grimes 301eed2d59bSDavid Greenman bsize = vp->v_mount->mnt_stat.f_iosize; 302170db9c6SJohn Dyson pagesperblock = bsize / PAGE_SIZE; 303d63596ceSJohn Dyson blocksperpage = 0; 304d63596ceSJohn Dyson if (pagesperblock > 0) { 305a316d390SJohn Dyson reqblock = pindex / pagesperblock; 306d63596ceSJohn Dyson } else { 307d63596ceSJohn Dyson blocksperpage = (PAGE_SIZE / bsize); 308d63596ceSJohn Dyson reqblock = pindex * blocksperpage; 309d63596ceSJohn Dyson } 310f29ba63eSAlan Cox VM_OBJECT_UNLOCK(object); 311ae51ff11SJeff Roberson vfslocked = VFS_LOCK_GIANT(vp->v_mount); 312ec794849SPoul-Henning Kamp err = VOP_BMAP(vp, reqblock, NULL, &bn, after, before); 313ae51ff11SJeff Roberson VFS_UNLOCK_GIANT(vfslocked); 314f29ba63eSAlan Cox VM_OBJECT_LOCK(object); 3150d94caffSDavid Greenman if (err) 31624a1cce3SDavid Greenman return TRUE; 3176eab77f2SJohn Dyson if (bn == -1) 318ced399eeSJohn Dyson return FALSE; 319d63596ceSJohn Dyson if (pagesperblock > 0) { 320a316d390SJohn Dyson poff = pindex - (reqblock * pagesperblock); 321170db9c6SJohn Dyson if (before) { 322170db9c6SJohn Dyson *before *= pagesperblock; 323170db9c6SJohn Dyson *before += poff; 324170db9c6SJohn Dyson } 325170db9c6SJohn Dyson if (after) { 326b1fc01b7SJohn Dyson int numafter; 327170db9c6SJohn Dyson *after *= pagesperblock; 328b1fc01b7SJohn Dyson numafter = pagesperblock - (poff + 1); 32947e151ddSRobert Drehmel if (IDX_TO_OFF(pindex + numafter) > 33047e151ddSRobert Drehmel object->un_pager.vnp.vnp_size) { 33147e151ddSRobert Drehmel numafter = 33247e151ddSRobert Drehmel OFF_TO_IDX(object->un_pager.vnp.vnp_size) - 33347e151ddSRobert Drehmel pindex; 334b1fc01b7SJohn Dyson } 335b1fc01b7SJohn Dyson *after += numafter; 336170db9c6SJohn Dyson } 337d63596ceSJohn Dyson } else { 338d63596ceSJohn Dyson if (before) { 339d63596ceSJohn Dyson *before /= blocksperpage; 340d63596ceSJohn Dyson } 341d63596ceSJohn Dyson 342d63596ceSJohn Dyson if (after) { 343d63596ceSJohn Dyson *after /= blocksperpage; 344d63596ceSJohn Dyson } 345d63596ceSJohn Dyson } 346ced399eeSJohn Dyson return TRUE; 347df8bae1dSRodney W. Grimes } 348df8bae1dSRodney W. Grimes 349df8bae1dSRodney W. Grimes /* 350df8bae1dSRodney W. Grimes * Lets the VM system know about a change in size for a file. 35124a1cce3SDavid Greenman * We adjust our own internal size and flush any cached pages in 352df8bae1dSRodney W. Grimes * the associated object that are affected by the size change. 353df8bae1dSRodney W. Grimes * 354df8bae1dSRodney W. Grimes * Note: this routine may be invoked as a result of a pager put 355df8bae1dSRodney W. Grimes * operation (possibly at object termination time), so we must be careful. 356df8bae1dSRodney W. Grimes */ 357df8bae1dSRodney W. Grimes void 358df8bae1dSRodney W. Grimes vnode_pager_setsize(vp, nsize) 359df8bae1dSRodney W. Grimes struct vnode *vp; 360a316d390SJohn Dyson vm_ooffset_t nsize; 361df8bae1dSRodney W. Grimes { 3622a8f9ab5SAlan Cox vm_object_t object; 3632a8f9ab5SAlan Cox vm_page_t m; 364c576d121SLuoqi Chen vm_pindex_t nobjsize; 365df8bae1dSRodney W. Grimes 3662a8f9ab5SAlan Cox if ((object = vp->v_object) == NULL) 367df8bae1dSRodney W. Grimes return; 368cb61d698SKonstantin Belousov /* ASSERT_VOP_ELOCKED(vp, "vnode_pager_setsize and not locked vnode"); */ 3692a8f9ab5SAlan Cox VM_OBJECT_LOCK(object); 3702a8f9ab5SAlan Cox if (nsize == object->un_pager.vnp.vnp_size) { 371df8bae1dSRodney W. Grimes /* 372df8bae1dSRodney W. Grimes * Hasn't changed size 373df8bae1dSRodney W. Grimes */ 3742a8f9ab5SAlan Cox VM_OBJECT_UNLOCK(object); 375df8bae1dSRodney W. Grimes return; 3762a8f9ab5SAlan Cox } 377c576d121SLuoqi Chen nobjsize = OFF_TO_IDX(nsize + PAGE_MASK); 3782a8f9ab5SAlan Cox if (nsize < object->un_pager.vnp.vnp_size) { 379df8bae1dSRodney W. Grimes /* 380bbc0ec52SDavid Greenman * File has shrunk. Toss any cached pages beyond the new EOF. 381df8bae1dSRodney W. Grimes */ 3822a8f9ab5SAlan Cox if (nobjsize < object->size) 383c576d121SLuoqi Chen vm_object_page_remove(object, nobjsize, object->size, 384c576d121SLuoqi Chen FALSE); 385bbc0ec52SDavid Greenman /* 386bbc0ec52SDavid Greenman * this gets rid of garbage at the end of a page that is now 3873ebeaf59SMatthew Dillon * only partially backed by the vnode. 3883ebeaf59SMatthew Dillon * 3893ebeaf59SMatthew Dillon * XXX for some reason (I don't know yet), if we take a 3903ebeaf59SMatthew Dillon * completely invalid page and mark it partially valid 3913ebeaf59SMatthew Dillon * it can screw up NFS reads, so we don't allow the case. 392bbc0ec52SDavid Greenman */ 3932a8f9ab5SAlan Cox if ((nsize & PAGE_MASK) && 3941b26eb10SAlan Cox (m = vm_page_lookup(object, OFF_TO_IDX(nsize))) != NULL && 3951b26eb10SAlan Cox m->valid != 0) { 3962b6b0df7SMatthew Dillon int base = (int)nsize & PAGE_MASK; 3972b6b0df7SMatthew Dillon int size = PAGE_SIZE - base; 3982b6b0df7SMatthew Dillon 3992b6b0df7SMatthew Dillon /* 4002b6b0df7SMatthew Dillon * Clear out partial-page garbage in case 4012b6b0df7SMatthew Dillon * the page has been mapped. 4022b6b0df7SMatthew Dillon */ 403fff6062aSAlan Cox pmap_zero_page_area(m, base, size); 4042b6b0df7SMatthew Dillon 4052b6b0df7SMatthew Dillon /* 4062b6b0df7SMatthew Dillon * Clear out partial-page dirty bits. This 4072b6b0df7SMatthew Dillon * has the side effect of setting the valid 4082b6b0df7SMatthew Dillon * bits, but that is ok. There are a bunch 4092b6b0df7SMatthew Dillon * of places in the VM system where we expected 4102b6b0df7SMatthew Dillon * m->dirty == VM_PAGE_BITS_ALL. The file EOF 4112b6b0df7SMatthew Dillon * case is one of them. If the page is still 4122b6b0df7SMatthew Dillon * partially dirty, make it fully dirty. 4133ebeaf59SMatthew Dillon * 4143ebeaf59SMatthew Dillon * note that we do not clear out the valid 4153ebeaf59SMatthew Dillon * bits. This would prevent bogus_page 4163ebeaf59SMatthew Dillon * replacement from working properly. 4172b6b0df7SMatthew Dillon */ 41882cfdd5aSAlan Cox vm_page_lock_queues(); 4192b6b0df7SMatthew Dillon vm_page_set_validclean(m, base, size); 4202b6b0df7SMatthew Dillon if (m->dirty != 0) 4212b6b0df7SMatthew Dillon m->dirty = VM_PAGE_BITS_ALL; 4222a8f9ab5SAlan Cox vm_page_unlock_queues(); 4230ab3c7a5SAlan Cox } else if ((nsize & PAGE_MASK) && 4240ab3c7a5SAlan Cox __predict_false(object->cache != NULL)) { 4250ab3c7a5SAlan Cox vm_page_cache_free(object, OFF_TO_IDX(nsize), 4260ab3c7a5SAlan Cox nobjsize); 427bbc0ec52SDavid Greenman } 428bbc0ec52SDavid Greenman } 429a316d390SJohn Dyson object->un_pager.vnp.vnp_size = nsize; 430c576d121SLuoqi Chen object->size = nobjsize; 4312a8f9ab5SAlan Cox VM_OBJECT_UNLOCK(object); 432df8bae1dSRodney W. Grimes } 433df8bae1dSRodney W. Grimes 43426f9a767SRodney W. Grimes /* 43526f9a767SRodney W. Grimes * calculate the linear (byte) disk address of specified virtual 43626f9a767SRodney W. Grimes * file address 43726f9a767SRodney W. Grimes */ 438bff76343SAlan Cox static int 439bff76343SAlan Cox vnode_pager_addr(struct vnode *vp, vm_ooffset_t address, daddr_t *rtaddress, 440bff76343SAlan Cox int *run) 44126f9a767SRodney W. Grimes { 44226f9a767SRodney W. Grimes int bsize; 44326f9a767SRodney W. Grimes int err; 444a316d390SJohn Dyson daddr_t vblock; 445f3aad9a6SBjoern A. Zeeb daddr_t voffset; 44626f9a767SRodney W. Grimes 4472ad036b6SAlan Cox if (address < 0) 4480d94caffSDavid Greenman return -1; 4490d94caffSDavid Greenman 450b73f64c4SJeff Roberson if (vp->v_iflag & VI_DOOMED) 4512c4488fcSJohn Dyson return -1; 4522c4488fcSJohn Dyson 45326f9a767SRodney W. Grimes bsize = vp->v_mount->mnt_stat.f_iosize; 45426f9a767SRodney W. Grimes vblock = address / bsize; 45526f9a767SRodney W. Grimes voffset = address % bsize; 45626f9a767SRodney W. Grimes 457bff76343SAlan Cox err = VOP_BMAP(vp, vblock, NULL, rtaddress, run, NULL); 458bff76343SAlan Cox if (err == 0) { 459bff76343SAlan Cox if (*rtaddress != -1) 460bff76343SAlan Cox *rtaddress += voffset / DEV_BSIZE; 461efc68ce1SDavid Greenman if (run) { 462efc68ce1SDavid Greenman *run += 1; 463efc68ce1SDavid Greenman *run *= bsize/PAGE_SIZE; 464efc68ce1SDavid Greenman *run -= voffset/PAGE_SIZE; 465efc68ce1SDavid Greenman } 466efc68ce1SDavid Greenman } 46726f9a767SRodney W. Grimes 468bff76343SAlan Cox return (err); 46926f9a767SRodney W. Grimes } 47026f9a767SRodney W. Grimes 47126f9a767SRodney W. Grimes /* 47226f9a767SRodney W. Grimes * small block filesystem vnode pager input 47326f9a767SRodney W. Grimes */ 474f708ef1bSPoul-Henning Kamp static int 47524a1cce3SDavid Greenman vnode_pager_input_smlfs(object, m) 47624a1cce3SDavid Greenman vm_object_t object; 47726f9a767SRodney W. Grimes vm_page_t m; 47826f9a767SRodney W. Grimes { 4790d53a17bSAlan Cox int bits, i; 4809c83534dSPoul-Henning Kamp struct vnode *vp; 4819c83534dSPoul-Henning Kamp struct bufobj *bo; 48226f9a767SRodney W. Grimes struct buf *bp; 4839e0ddbd0SAlan Cox struct sf_buf *sf; 484f3aad9a6SBjoern A. Zeeb daddr_t fileaddr; 48526f9a767SRodney W. Grimes vm_offset_t bsize; 48626f9a767SRodney W. Grimes int error = 0; 48726f9a767SRodney W. Grimes 48824a1cce3SDavid Greenman vp = object->handle; 489b73f64c4SJeff Roberson if (vp->v_iflag & VI_DOOMED) 4902c4488fcSJohn Dyson return VM_PAGER_BAD; 4912c4488fcSJohn Dyson 49226f9a767SRodney W. Grimes bsize = vp->v_mount->mnt_stat.f_iosize; 4930bdb7528SDavid Greenman 4949c83534dSPoul-Henning Kamp VOP_BMAP(vp, 0, &bo, 0, NULL, NULL); 49526f9a767SRodney W. Grimes 4969e0ddbd0SAlan Cox sf = sf_buf_alloc(m, 0); 49726f9a767SRodney W. Grimes 49826f9a767SRodney W. Grimes for (i = 0; i < PAGE_SIZE / bsize; i++) { 49933c67741SMatthew Dillon vm_ooffset_t address; 500bbc0ec52SDavid Greenman 5010d53a17bSAlan Cox bits = vm_page_bits(i * bsize, bsize); 5020d53a17bSAlan Cox if (m->valid & bits) 50326f9a767SRodney W. Grimes continue; 50426f9a767SRodney W. Grimes 50533c67741SMatthew Dillon address = IDX_TO_OFF(m->pindex) + i * bsize; 50633c67741SMatthew Dillon if (address >= object->un_pager.vnp.vnp_size) { 50733c67741SMatthew Dillon fileaddr = -1; 50833c67741SMatthew Dillon } else { 509bff76343SAlan Cox error = vnode_pager_addr(vp, address, &fileaddr, NULL); 510bff76343SAlan Cox if (error) 511bff76343SAlan Cox break; 51233c67741SMatthew Dillon } 51326f9a767SRodney W. Grimes if (fileaddr != -1) { 5141c7c3c6aSMatthew Dillon bp = getpbuf(&vnode_pbuf_freecnt); 51526f9a767SRodney W. Grimes 51626f9a767SRodney W. Grimes /* build a minimal buffer header */ 51721144e3bSPoul-Henning Kamp bp->b_iocmd = BIO_READ; 5186a4b5823SPoul-Henning Kamp bp->b_iodone = bdone; 519bd78ceceSJohn Baldwin KASSERT(bp->b_rcred == NOCRED, ("leaking read ucred")); 520bd78ceceSJohn Baldwin KASSERT(bp->b_wcred == NOCRED, ("leaking write ucred")); 521a854ed98SJohn Baldwin bp->b_rcred = crhold(curthread->td_ucred); 522a854ed98SJohn Baldwin bp->b_wcred = crhold(curthread->td_ucred); 5239e0ddbd0SAlan Cox bp->b_data = (caddr_t)sf_buf_kva(sf) + i * bsize; 524187f0071SDavid Greenman bp->b_blkno = fileaddr; 5259c83534dSPoul-Henning Kamp pbgetbo(bo, bp); 52626f9a767SRodney W. Grimes bp->b_bcount = bsize; 52726f9a767SRodney W. Grimes bp->b_bufsize = bsize; 5282b6b0df7SMatthew Dillon bp->b_runningbufspace = bp->b_bufsize; 5295bd65606SJohn Baldwin atomic_add_long(&runningbufspace, bp->b_runningbufspace); 53026f9a767SRodney W. Grimes 53126f9a767SRodney W. Grimes /* do the input */ 5322c18019fSPoul-Henning Kamp bp->b_iooffset = dbtob(bp->b_blkno); 533b792bebeSPoul-Henning Kamp bstrategy(bp); 53426f9a767SRodney W. Grimes 5356a4b5823SPoul-Henning Kamp bwait(bp, PVM, "vnsrd"); 5366a4b5823SPoul-Henning Kamp 537c244d2deSPoul-Henning Kamp if ((bp->b_ioflags & BIO_ERROR) != 0) 53826f9a767SRodney W. Grimes error = EIO; 53926f9a767SRodney W. Grimes 54026f9a767SRodney W. Grimes /* 54126f9a767SRodney W. Grimes * free the buffer header back to the swap buffer pool 54226f9a767SRodney W. Grimes */ 5439c83534dSPoul-Henning Kamp pbrelbo(bp); 5441c7c3c6aSMatthew Dillon relpbuf(bp, &vnode_pbuf_freecnt); 54526f9a767SRodney W. Grimes if (error) 54626f9a767SRodney W. Grimes break; 5470d53a17bSAlan Cox } else 5489e0ddbd0SAlan Cox bzero((caddr_t)sf_buf_kva(sf) + i * bsize, bsize); 5490d53a17bSAlan Cox KASSERT((m->dirty & bits) == 0, 5500d53a17bSAlan Cox ("vnode_pager_input_smlfs: page %p is dirty", m)); 5510d53a17bSAlan Cox VM_OBJECT_LOCK(object); 5520d53a17bSAlan Cox m->valid |= bits; 5530d53a17bSAlan Cox VM_OBJECT_UNLOCK(object); 55426f9a767SRodney W. Grimes } 5559e0ddbd0SAlan Cox sf_buf_free(sf); 55626f9a767SRodney W. Grimes if (error) { 557a83c285cSDavid Greenman return VM_PAGER_ERROR; 55826f9a767SRodney W. Grimes } 55926f9a767SRodney W. Grimes return VM_PAGER_OK; 56026f9a767SRodney W. Grimes } 56126f9a767SRodney W. Grimes 56226f9a767SRodney W. Grimes /* 563475e8cc3SPoul-Henning Kamp * old style vnode pager input routine 56426f9a767SRodney W. Grimes */ 565f708ef1bSPoul-Henning Kamp static int 56624a1cce3SDavid Greenman vnode_pager_input_old(object, m) 56724a1cce3SDavid Greenman vm_object_t object; 56826f9a767SRodney W. Grimes vm_page_t m; 56926f9a767SRodney W. Grimes { 570df8bae1dSRodney W. Grimes struct uio auio; 571df8bae1dSRodney W. Grimes struct iovec aiov; 57226f9a767SRodney W. Grimes int error; 57326f9a767SRodney W. Grimes int size; 5749e0ddbd0SAlan Cox struct sf_buf *sf; 575342a1480SJohn Baldwin struct vnode *vp; 576df8bae1dSRodney W. Grimes 57752051abcSAlan Cox VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); 57826f9a767SRodney W. Grimes error = 0; 579bbc0ec52SDavid Greenman 580df8bae1dSRodney W. Grimes /* 58126f9a767SRodney W. Grimes * Return failure if beyond current EOF 58226f9a767SRodney W. Grimes */ 583a316d390SJohn Dyson if (IDX_TO_OFF(m->pindex) >= object->un_pager.vnp.vnp_size) { 58426f9a767SRodney W. Grimes return VM_PAGER_BAD; 58526f9a767SRodney W. Grimes } else { 58626f9a767SRodney W. Grimes size = PAGE_SIZE; 587a316d390SJohn Dyson if (IDX_TO_OFF(m->pindex) + size > object->un_pager.vnp.vnp_size) 588a316d390SJohn Dyson size = object->un_pager.vnp.vnp_size - IDX_TO_OFF(m->pindex); 58952051abcSAlan Cox vp = object->handle; 59052051abcSAlan Cox VM_OBJECT_UNLOCK(object); 5910bdb7528SDavid Greenman 59226f9a767SRodney W. Grimes /* 593df8bae1dSRodney W. Grimes * Allocate a kernel virtual address and initialize so that 594df8bae1dSRodney W. Grimes * we can use VOP_READ/WRITE routines. 595df8bae1dSRodney W. Grimes */ 5969e0ddbd0SAlan Cox sf = sf_buf_alloc(m, 0); 5970bdb7528SDavid Greenman 5989e0ddbd0SAlan Cox aiov.iov_base = (caddr_t)sf_buf_kva(sf); 599df8bae1dSRodney W. Grimes aiov.iov_len = size; 600df8bae1dSRodney W. Grimes auio.uio_iov = &aiov; 601df8bae1dSRodney W. Grimes auio.uio_iovcnt = 1; 602a316d390SJohn Dyson auio.uio_offset = IDX_TO_OFF(m->pindex); 603df8bae1dSRodney W. Grimes auio.uio_segflg = UIO_SYSSPACE; 60426f9a767SRodney W. Grimes auio.uio_rw = UIO_READ; 605df8bae1dSRodney W. Grimes auio.uio_resid = size; 606b40ce416SJulian Elischer auio.uio_td = curthread; 60726f9a767SRodney W. Grimes 608a854ed98SJohn Baldwin error = VOP_READ(vp, &auio, 0, curthread->td_ucred); 609df8bae1dSRodney W. Grimes if (!error) { 61054d92145SMatthew Dillon int count = size - auio.uio_resid; 611df8bae1dSRodney W. Grimes 612df8bae1dSRodney W. Grimes if (count == 0) 613df8bae1dSRodney W. Grimes error = EINVAL; 61426f9a767SRodney W. Grimes else if (count != PAGE_SIZE) 6159e0ddbd0SAlan Cox bzero((caddr_t)sf_buf_kva(sf) + count, 6169e0ddbd0SAlan Cox PAGE_SIZE - count); 617df8bae1dSRodney W. Grimes } 6189e0ddbd0SAlan Cox sf_buf_free(sf); 6191b26eb10SAlan Cox 6201b26eb10SAlan Cox VM_OBJECT_LOCK(object); 621df8bae1dSRodney W. Grimes } 6220d53a17bSAlan Cox KASSERT(m->dirty == 0, ("vnode_pager_input_old: page %p is dirty", m)); 6236e3a3f38SRobert V. Baron if (!error) 6246e3a3f38SRobert V. Baron m->valid = VM_PAGE_BITS_ALL; 625a83c285cSDavid Greenman return error ? VM_PAGER_ERROR : VM_PAGER_OK; 62626f9a767SRodney W. Grimes } 62726f9a767SRodney W. Grimes 62826f9a767SRodney W. Grimes /* 62926f9a767SRodney W. Grimes * generic vnode pager input routine 63026f9a767SRodney W. Grimes */ 631170db9c6SJohn Dyson 632ce75f2c3SMike Smith /* 63323955314SAlfred Perlstein * Local media VFS's that do not implement their own VOP_GETPAGES 63447e151ddSRobert Drehmel * should have their VOP_GETPAGES call to vnode_pager_generic_getpages() 63547e151ddSRobert Drehmel * to implement the previous behaviour. 636ce75f2c3SMike Smith * 637ce75f2c3SMike Smith * All other FS's should use the bypass to get to the local media 638ce75f2c3SMike Smith * backing vp's VOP_GETPAGES. 639ce75f2c3SMike Smith */ 640f708ef1bSPoul-Henning Kamp static int 64124a1cce3SDavid Greenman vnode_pager_getpages(object, m, count, reqpage) 64226f9a767SRodney W. Grimes vm_object_t object; 64324a1cce3SDavid Greenman vm_page_t *m; 64424a1cce3SDavid Greenman int count; 64524a1cce3SDavid Greenman int reqpage; 64624a1cce3SDavid Greenman { 647170db9c6SJohn Dyson int rtval; 648170db9c6SJohn Dyson struct vnode *vp; 64986ffbd76SMike Smith int bytes = count * PAGE_SIZE; 650ae51ff11SJeff Roberson int vfslocked; 65195e5e988SJohn Dyson 652170db9c6SJohn Dyson vp = object->handle; 6538630c117SAlan Cox VM_OBJECT_UNLOCK(object); 654ae51ff11SJeff Roberson vfslocked = VFS_LOCK_GIANT(vp->v_mount); 65586ffbd76SMike Smith rtval = VOP_GETPAGES(vp, m, bytes, reqpage, 0); 65623955314SAlfred Perlstein KASSERT(rtval != EOPNOTSUPP, 65723955314SAlfred Perlstein ("vnode_pager: FS getpages not implemented\n")); 658ae51ff11SJeff Roberson VFS_UNLOCK_GIANT(vfslocked); 6598630c117SAlan Cox VM_OBJECT_LOCK(object); 660170db9c6SJohn Dyson return rtval; 661170db9c6SJohn Dyson } 662170db9c6SJohn Dyson 663ce75f2c3SMike Smith /* 664ce75f2c3SMike Smith * This is now called from local media FS's to operate against their 665ce75f2c3SMike Smith * own vnodes if they fail to implement VOP_GETPAGES. 666ce75f2c3SMike Smith */ 667ce75f2c3SMike Smith int 668ce75f2c3SMike Smith vnode_pager_generic_getpages(vp, m, bytecount, reqpage) 669ce75f2c3SMike Smith struct vnode *vp; 670170db9c6SJohn Dyson vm_page_t *m; 671ce75f2c3SMike Smith int bytecount; 672170db9c6SJohn Dyson int reqpage; 673170db9c6SJohn Dyson { 674ce75f2c3SMike Smith vm_object_t object; 675a316d390SJohn Dyson vm_offset_t kva; 6768f9110f6SJohn Dyson off_t foff, tfoff, nextoff; 677f3aad9a6SBjoern A. Zeeb int i, j, size, bsize, first; 678f4f83da0SAlan Cox daddr_t firstaddr, reqblock; 6799c83534dSPoul-Henning Kamp struct bufobj *bo; 680efc68ce1SDavid Greenman int runpg; 681efc68ce1SDavid Greenman int runend; 6820bdb7528SDavid Greenman struct buf *bp; 683ce75f2c3SMike Smith int count; 6841de11f1aSAlan Cox int error; 68526f9a767SRodney W. Grimes 686ce75f2c3SMike Smith object = vp->v_object; 687ce75f2c3SMike Smith count = bytecount / PAGE_SIZE; 688ce75f2c3SMike Smith 6899c83534dSPoul-Henning Kamp KASSERT(vp->v_type != VCHR && vp->v_type != VBLK, 6909c83534dSPoul-Henning Kamp ("vnode_pager_generic_getpages does not support devices")); 691b73f64c4SJeff Roberson if (vp->v_iflag & VI_DOOMED) 6922c4488fcSJohn Dyson return VM_PAGER_BAD; 6932c4488fcSJohn Dyson 69426f9a767SRodney W. Grimes bsize = vp->v_mount->mnt_stat.f_iosize; 69526f9a767SRodney W. Grimes 69626f9a767SRodney W. Grimes /* get the UNDERLYING device for the file with VOP_BMAP() */ 697bbc0ec52SDavid Greenman 69826f9a767SRodney W. Grimes /* 699bbc0ec52SDavid Greenman * originally, we did not check for an error return value -- assuming 700bbc0ec52SDavid Greenman * an fs always has a bmap entry point -- that assumption is wrong!!! 70126f9a767SRodney W. Grimes */ 702a316d390SJohn Dyson foff = IDX_TO_OFF(m[reqpage]->pindex); 703bbc0ec52SDavid Greenman 70426f9a767SRodney W. Grimes /* 70516f62314SDavid Greenman * if we can't bmap, use old VOP code 70626f9a767SRodney W. Grimes */ 7071de11f1aSAlan Cox error = VOP_BMAP(vp, foff / bsize, &bo, &reqblock, NULL, NULL); 7081de11f1aSAlan Cox if (error == EOPNOTSUPP) { 70931953be9SAlan Cox VM_OBJECT_LOCK(object); 710e43c2eabSAlan Cox vm_page_lock_queues(); 711e43c2eabSAlan Cox for (i = 0; i < count; i++) 712e43c2eabSAlan Cox if (i != reqpage) 713d8d5fa88SAlfred Perlstein vm_page_free(m[i]); 714e43c2eabSAlan Cox vm_page_unlock_queues(); 715b4b70819SAttilio Rao PCPU_INC(cnt.v_vnodein); 716b4b70819SAttilio Rao PCPU_INC(cnt.v_vnodepgsin); 71752051abcSAlan Cox error = vnode_pager_input_old(object, m[reqpage]); 71852051abcSAlan Cox VM_OBJECT_UNLOCK(object); 71952051abcSAlan Cox return (error); 7201de11f1aSAlan Cox } else if (error != 0) { 7211de11f1aSAlan Cox VM_OBJECT_LOCK(object); 7221de11f1aSAlan Cox vm_page_lock_queues(); 7231de11f1aSAlan Cox for (i = 0; i < count; i++) 7241de11f1aSAlan Cox if (i != reqpage) 7251de11f1aSAlan Cox vm_page_free(m[i]); 7261de11f1aSAlan Cox vm_page_unlock_queues(); 7271de11f1aSAlan Cox VM_OBJECT_UNLOCK(object); 7281de11f1aSAlan Cox return (VM_PAGER_ERROR); 729bbc0ec52SDavid Greenman 73026f9a767SRodney W. Grimes /* 73126f9a767SRodney W. Grimes * if the blocksize is smaller than a page size, then use 73226f9a767SRodney W. Grimes * special small filesystem code. NFS sometimes has a small 73326f9a767SRodney W. Grimes * blocksize, but it can handle large reads itself. 73426f9a767SRodney W. Grimes */ 73526f9a767SRodney W. Grimes } else if ((PAGE_SIZE / bsize) > 1 && 736500b04a2SBruce Evans (vp->v_mount->mnt_stat.f_type != nfs_mount_type)) { 73731953be9SAlan Cox VM_OBJECT_LOCK(object); 738e43c2eabSAlan Cox vm_page_lock_queues(); 739e43c2eabSAlan Cox for (i = 0; i < count; i++) 740e43c2eabSAlan Cox if (i != reqpage) 741d8d5fa88SAlfred Perlstein vm_page_free(m[i]); 742e43c2eabSAlan Cox vm_page_unlock_queues(); 74331953be9SAlan Cox VM_OBJECT_UNLOCK(object); 744b4b70819SAttilio Rao PCPU_INC(cnt.v_vnodein); 745b4b70819SAttilio Rao PCPU_INC(cnt.v_vnodepgsin); 74624a1cce3SDavid Greenman return vnode_pager_input_smlfs(object, m[reqpage]); 74726f9a767SRodney W. Grimes } 7488d17e694SJulian Elischer 74926f9a767SRodney W. Grimes /* 7508d17e694SJulian Elischer * If we have a completely valid page available to us, we can 7518d17e694SJulian Elischer * clean up and return. Otherwise we have to re-read the 7528d17e694SJulian Elischer * media. 7530d94caffSDavid Greenman */ 75431953be9SAlan Cox VM_OBJECT_LOCK(object); 7558b575f6cSAlan Cox if (m[reqpage]->valid == VM_PAGE_BITS_ALL) { 756e43c2eabSAlan Cox vm_page_lock_queues(); 757e43c2eabSAlan Cox for (i = 0; i < count; i++) 7580d94caffSDavid Greenman if (i != reqpage) 759d8d5fa88SAlfred Perlstein vm_page_free(m[i]); 760e43c2eabSAlan Cox vm_page_unlock_queues(); 76131953be9SAlan Cox VM_OBJECT_UNLOCK(object); 7620d94caffSDavid Greenman return VM_PAGER_OK; 763f4f83da0SAlan Cox } else if (reqblock == -1) { 764f4f83da0SAlan Cox pmap_zero_page(m[reqpage]); 765f4f83da0SAlan Cox vm_page_undirty(m[reqpage]); 766f4f83da0SAlan Cox m[reqpage]->valid = VM_PAGE_BITS_ALL; 767f4f83da0SAlan Cox vm_page_lock_queues(); 768f4f83da0SAlan Cox for (i = 0; i < count; i++) 769f4f83da0SAlan Cox if (i != reqpage) 770f4f83da0SAlan Cox vm_page_free(m[i]); 771f4f83da0SAlan Cox vm_page_unlock_queues(); 772f4f83da0SAlan Cox VM_OBJECT_UNLOCK(object); 773f4f83da0SAlan Cox return (VM_PAGER_OK); 7740d94caffSDavid Greenman } 7758d17e694SJulian Elischer m[reqpage]->valid = 0; 7768b575f6cSAlan Cox VM_OBJECT_UNLOCK(object); 7770bdb7528SDavid Greenman 7780d94caffSDavid Greenman /* 77926f9a767SRodney W. Grimes * here on direct device I/O 78026f9a767SRodney W. Grimes */ 781efc68ce1SDavid Greenman firstaddr = -1; 782a1287949SEivind Eklund 78326f9a767SRodney W. Grimes /* 784efc68ce1SDavid Greenman * calculate the run that includes the required page 78526f9a767SRodney W. Grimes */ 786efc68ce1SDavid Greenman for (first = 0, i = 0; i < count; i = runend) { 787bff76343SAlan Cox if (vnode_pager_addr(vp, IDX_TO_OFF(m[i]->pindex), &firstaddr, 788bff76343SAlan Cox &runpg) != 0) { 789bff76343SAlan Cox VM_OBJECT_LOCK(object); 790bff76343SAlan Cox vm_page_lock_queues(); 791bff76343SAlan Cox for (; i < count; i++) 792bff76343SAlan Cox if (i != reqpage) 793bff76343SAlan Cox vm_page_free(m[i]); 794bff76343SAlan Cox vm_page_unlock_queues(); 795bff76343SAlan Cox VM_OBJECT_UNLOCK(object); 796bff76343SAlan Cox return (VM_PAGER_ERROR); 797bff76343SAlan Cox } 798efc68ce1SDavid Greenman if (firstaddr == -1) { 79931953be9SAlan Cox VM_OBJECT_LOCK(object); 80024a1cce3SDavid Greenman if (i == reqpage && foff < object->un_pager.vnp.vnp_size) { 801f3aad9a6SBjoern A. Zeeb panic("vnode_pager_getpages: unexpected missing page: firstaddr: %jd, foff: 0x%jx%08jx, vnp_size: 0x%jx%08jx", 802f3aad9a6SBjoern A. Zeeb (intmax_t)firstaddr, (uintmax_t)(foff >> 32), 803bf1001faSMaxime Henrion (uintmax_t)foff, 804bf1001faSMaxime Henrion (uintmax_t) 805fc62ef1fSBruce Evans (object->un_pager.vnp.vnp_size >> 32), 806bf1001faSMaxime Henrion (uintmax_t)object->un_pager.vnp.vnp_size); 807efc68ce1SDavid Greenman } 808e43c2eabSAlan Cox vm_page_lock_queues(); 809d8d5fa88SAlfred Perlstein vm_page_free(m[i]); 810e43c2eabSAlan Cox vm_page_unlock_queues(); 81131953be9SAlan Cox VM_OBJECT_UNLOCK(object); 812efc68ce1SDavid Greenman runend = i + 1; 813efc68ce1SDavid Greenman first = runend; 814efc68ce1SDavid Greenman continue; 815efc68ce1SDavid Greenman } 816efc68ce1SDavid Greenman runend = i + runpg; 817efc68ce1SDavid Greenman if (runend <= reqpage) { 81831953be9SAlan Cox VM_OBJECT_LOCK(object); 819e43c2eabSAlan Cox vm_page_lock_queues(); 820e43c2eabSAlan Cox for (j = i; j < runend; j++) 821d8d5fa88SAlfred Perlstein vm_page_free(m[j]); 822e43c2eabSAlan Cox vm_page_unlock_queues(); 82331953be9SAlan Cox VM_OBJECT_UNLOCK(object); 82426f9a767SRodney W. Grimes } else { 825efc68ce1SDavid Greenman if (runpg < (count - first)) { 82631953be9SAlan Cox VM_OBJECT_LOCK(object); 827e43c2eabSAlan Cox vm_page_lock_queues(); 828efc68ce1SDavid Greenman for (i = first + runpg; i < count; i++) 829d8d5fa88SAlfred Perlstein vm_page_free(m[i]); 830e43c2eabSAlan Cox vm_page_unlock_queues(); 83131953be9SAlan Cox VM_OBJECT_UNLOCK(object); 832efc68ce1SDavid Greenman count = first + runpg; 83326f9a767SRodney W. Grimes } 834efc68ce1SDavid Greenman break; 83526f9a767SRodney W. Grimes } 836efc68ce1SDavid Greenman first = runend; 837efc68ce1SDavid Greenman } 83826f9a767SRodney W. Grimes 83926f9a767SRodney W. Grimes /* 840bbc0ec52SDavid Greenman * the first and last page have been calculated now, move input pages 841bbc0ec52SDavid Greenman * to be zero based... 84226f9a767SRodney W. Grimes */ 84326f9a767SRodney W. Grimes if (first != 0) { 8447e2393ffSAlan Cox m += first; 84526f9a767SRodney W. Grimes count -= first; 84626f9a767SRodney W. Grimes reqpage -= first; 84726f9a767SRodney W. Grimes } 848efc68ce1SDavid Greenman 84926f9a767SRodney W. Grimes /* 85026f9a767SRodney W. Grimes * calculate the file virtual address for the transfer 85126f9a767SRodney W. Grimes */ 852a316d390SJohn Dyson foff = IDX_TO_OFF(m[0]->pindex); 85326f9a767SRodney W. Grimes 85426f9a767SRodney W. Grimes /* 85526f9a767SRodney W. Grimes * calculate the size of the transfer 85626f9a767SRodney W. Grimes */ 85726f9a767SRodney W. Grimes size = count * PAGE_SIZE; 8581a31a6c3SPoul-Henning Kamp KASSERT(count > 0, ("zero count")); 85924a1cce3SDavid Greenman if ((foff + size) > object->un_pager.vnp.vnp_size) 86024a1cce3SDavid Greenman size = object->un_pager.vnp.vnp_size - foff; 8611a31a6c3SPoul-Henning Kamp KASSERT(size > 0, ("zero size")); 86226f9a767SRodney W. Grimes 86326f9a767SRodney W. Grimes /* 86424579ca1SMatthew Dillon * round up physical size for real devices. 86526f9a767SRodney W. Grimes */ 8669c83534dSPoul-Henning Kamp if (1) { 8679c83534dSPoul-Henning Kamp int secmask = bo->bo_bsize - 1; 8686229cc50SPoul-Henning Kamp KASSERT(secmask < PAGE_SIZE && secmask > 0, 8696229cc50SPoul-Henning Kamp ("vnode_pager_generic_getpages: sector size %d too large", 8706229cc50SPoul-Henning Kamp secmask + 1)); 87124579ca1SMatthew Dillon size = (size + secmask) & ~secmask; 87224579ca1SMatthew Dillon } 87326f9a767SRodney W. Grimes 8741c7c3c6aSMatthew Dillon bp = getpbuf(&vnode_pbuf_freecnt); 87516f62314SDavid Greenman kva = (vm_offset_t) bp->b_data; 87616f62314SDavid Greenman 87726f9a767SRodney W. Grimes /* 87826f9a767SRodney W. Grimes * and map the pages to be read into the kva 87926f9a767SRodney W. Grimes */ 88016f62314SDavid Greenman pmap_qenter(kva, m, count); 88126f9a767SRodney W. Grimes 88226f9a767SRodney W. Grimes /* build a minimal buffer header */ 88321144e3bSPoul-Henning Kamp bp->b_iocmd = BIO_READ; 8846a4b5823SPoul-Henning Kamp bp->b_iodone = bdone; 885bd78ceceSJohn Baldwin KASSERT(bp->b_rcred == NOCRED, ("leaking read ucred")); 886bd78ceceSJohn Baldwin KASSERT(bp->b_wcred == NOCRED, ("leaking write ucred")); 887a854ed98SJohn Baldwin bp->b_rcred = crhold(curthread->td_ucred); 888a854ed98SJohn Baldwin bp->b_wcred = crhold(curthread->td_ucred); 889187f0071SDavid Greenman bp->b_blkno = firstaddr; 8909c83534dSPoul-Henning Kamp pbgetbo(bo, bp); 89126f9a767SRodney W. Grimes bp->b_bcount = size; 89226f9a767SRodney W. Grimes bp->b_bufsize = size; 8932b6b0df7SMatthew Dillon bp->b_runningbufspace = bp->b_bufsize; 8945bd65606SJohn Baldwin atomic_add_long(&runningbufspace, bp->b_runningbufspace); 89526f9a767SRodney W. Grimes 896b4b70819SAttilio Rao PCPU_INC(cnt.v_vnodein); 897b4b70819SAttilio Rao PCPU_ADD(cnt.v_vnodepgsin, count); 898976e77fcSDavid Greenman 89926f9a767SRodney W. Grimes /* do the input */ 9002c18019fSPoul-Henning Kamp bp->b_iooffset = dbtob(bp->b_blkno); 901b792bebeSPoul-Henning Kamp bstrategy(bp); 902976e77fcSDavid Greenman 9036a4b5823SPoul-Henning Kamp bwait(bp, PVM, "vnread"); 90426f9a767SRodney W. Grimes 905c244d2deSPoul-Henning Kamp if ((bp->b_ioflags & BIO_ERROR) != 0) 90626f9a767SRodney W. Grimes error = EIO; 90726f9a767SRodney W. Grimes 90826f9a767SRodney W. Grimes if (!error) { 90926f9a767SRodney W. Grimes if (size != count * PAGE_SIZE) 91026f9a767SRodney W. Grimes bzero((caddr_t) kva + size, PAGE_SIZE * count - size); 91126f9a767SRodney W. Grimes } 91216f62314SDavid Greenman pmap_qremove(kva, count); 91326f9a767SRodney W. Grimes 91426f9a767SRodney W. Grimes /* 91526f9a767SRodney W. Grimes * free the buffer header back to the swap buffer pool 91626f9a767SRodney W. Grimes */ 9179c83534dSPoul-Henning Kamp pbrelbo(bp); 9181c7c3c6aSMatthew Dillon relpbuf(bp, &vnode_pbuf_freecnt); 91926f9a767SRodney W. Grimes 92031953be9SAlan Cox VM_OBJECT_LOCK(object); 9219d522888SAlan Cox vm_page_lock_queues(); 9228f9110f6SJohn Dyson for (i = 0, tfoff = foff; i < count; i++, tfoff = nextoff) { 9238f9110f6SJohn Dyson vm_page_t mt; 9248f9110f6SJohn Dyson 9258f9110f6SJohn Dyson nextoff = tfoff + PAGE_SIZE; 9268f9110f6SJohn Dyson mt = m[i]; 9278f9110f6SJohn Dyson 92854746b67SDmitrij Tejblum if (nextoff <= object->un_pager.vnp.vnp_size) { 9298d17e694SJulian Elischer /* 9308d17e694SJulian Elischer * Read filled up entire page. 9318d17e694SJulian Elischer */ 9328f9110f6SJohn Dyson mt->valid = VM_PAGE_BITS_ALL; 933016a3c93SAlan Cox KASSERT(mt->dirty == 0, 934016a3c93SAlan Cox ("vnode_pager_generic_getpages: page %p is dirty", 935016a3c93SAlan Cox mt)); 936016a3c93SAlan Cox KASSERT(!pmap_page_is_mapped(mt), 937016a3c93SAlan Cox ("vnode_pager_generic_getpages: page %p is mapped", 938016a3c93SAlan Cox mt)); 9398f9110f6SJohn Dyson } else { 9408d17e694SJulian Elischer /* 9418d17e694SJulian Elischer * Read did not fill up entire page. Since this 9428d17e694SJulian Elischer * is getpages, the page may be mapped, so we have 9438d17e694SJulian Elischer * to zero the invalid portions of the page even 9448d17e694SJulian Elischer * though we aren't setting them valid. 9458d17e694SJulian Elischer * 9468d17e694SJulian Elischer * Currently we do not set the entire page valid, 9478d17e694SJulian Elischer * we just try to clear the piece that we couldn't 9488d17e694SJulian Elischer * read. 9498d17e694SJulian Elischer */ 95054746b67SDmitrij Tejblum vm_page_set_validclean(mt, 0, 95154746b67SDmitrij Tejblum object->un_pager.vnp.vnp_size - tfoff); 9528f9110f6SJohn Dyson } 9538f9110f6SJohn Dyson 95426f9a767SRodney W. Grimes if (i != reqpage) { 955bbc0ec52SDavid Greenman 95626f9a767SRodney W. Grimes /* 957bbc0ec52SDavid Greenman * whether or not to leave the page activated is up in 958bbc0ec52SDavid Greenman * the air, but we should put the page on a page queue 959bbc0ec52SDavid Greenman * somewhere. (it already is in the object). Result: 960956f3135SPhilippe Charnier * It appears that empirical results show that 961bbc0ec52SDavid Greenman * deactivating pages is best. 96226f9a767SRodney W. Grimes */ 963bbc0ec52SDavid Greenman 96426f9a767SRodney W. Grimes /* 965bbc0ec52SDavid Greenman * just in case someone was asking for this page we 966bbc0ec52SDavid Greenman * now tell them that it is ok to use 96726f9a767SRodney W. Grimes */ 96826f9a767SRodney W. Grimes if (!error) { 9695786be7cSAlan Cox if (mt->oflags & VPO_WANTED) 9708f9110f6SJohn Dyson vm_page_activate(mt); 97195461b45SJohn Dyson else 9728f9110f6SJohn Dyson vm_page_deactivate(mt); 973e69763a3SDoug Rabson vm_page_wakeup(mt); 97426f9a767SRodney W. Grimes } else { 975d8d5fa88SAlfred Perlstein vm_page_free(mt); 97626f9a767SRodney W. Grimes } 97726f9a767SRodney W. Grimes } 97826f9a767SRodney W. Grimes } 9799d522888SAlan Cox vm_page_unlock_queues(); 98031953be9SAlan Cox VM_OBJECT_UNLOCK(object); 98126f9a767SRodney W. Grimes if (error) { 98224a1cce3SDavid Greenman printf("vnode_pager_getpages: I/O read error\n"); 98326f9a767SRodney W. Grimes } 984a83c285cSDavid Greenman return (error ? VM_PAGER_ERROR : VM_PAGER_OK); 98526f9a767SRodney W. Grimes } 98626f9a767SRodney W. Grimes 987ce75f2c3SMike Smith /* 988ce75f2c3SMike Smith * EOPNOTSUPP is no longer legal. For local media VFS's that do not 989ce75f2c3SMike Smith * implement their own VOP_PUTPAGES, their VOP_PUTPAGES should call to 990ce75f2c3SMike Smith * vnode_pager_generic_putpages() to implement the previous behaviour. 991ce75f2c3SMike Smith * 992ce75f2c3SMike Smith * All other FS's should use the bypass to get to the local media 993ce75f2c3SMike Smith * backing vp's VOP_PUTPAGES. 994ce75f2c3SMike Smith */ 995e4542174SMatthew Dillon static void 996170db9c6SJohn Dyson vnode_pager_putpages(object, m, count, sync, rtvals) 997170db9c6SJohn Dyson vm_object_t object; 998170db9c6SJohn Dyson vm_page_t *m; 999170db9c6SJohn Dyson int count; 1000170db9c6SJohn Dyson boolean_t sync; 1001170db9c6SJohn Dyson int *rtvals; 1002170db9c6SJohn Dyson { 1003170db9c6SJohn Dyson int rtval; 1004170db9c6SJohn Dyson struct vnode *vp; 1005f2a2857bSKirk McKusick struct mount *mp; 100686ffbd76SMike Smith int bytes = count * PAGE_SIZE; 1007ad980522SJohn Dyson 10080e3cdf2cSAlan Cox /* 10090e3cdf2cSAlan Cox * Force synchronous operation if we are extremely low on memory 10100e3cdf2cSAlan Cox * to prevent a low-memory deadlock. VOP operations often need to 10110e3cdf2cSAlan Cox * allocate more memory to initiate the I/O ( i.e. do a BMAP 10120e3cdf2cSAlan Cox * operation ). The swapper handles the case by limiting the amount 10130e3cdf2cSAlan Cox * of asynchronous I/O, but that sort of solution doesn't scale well 10140e3cdf2cSAlan Cox * for the vnode pager without a lot of work. 10150e3cdf2cSAlan Cox * 10160e3cdf2cSAlan Cox * Also, the backing vnode's iodone routine may not wake the pageout 10170e3cdf2cSAlan Cox * daemon up. This should be probably be addressed XXX. 10180e3cdf2cSAlan Cox */ 10190e3cdf2cSAlan Cox 10202feb50bfSAttilio Rao if ((cnt.v_free_count + cnt.v_cache_count) < cnt.v_pageout_free_min) 10210e3cdf2cSAlan Cox sync |= OBJPC_SYNC; 10220e3cdf2cSAlan Cox 10230e3cdf2cSAlan Cox /* 10240e3cdf2cSAlan Cox * Call device-specific putpages function 10250e3cdf2cSAlan Cox */ 1026170db9c6SJohn Dyson vp = object->handle; 10272e3b314dSAlan Cox VM_OBJECT_UNLOCK(object); 1028f2a2857bSKirk McKusick if (vp->v_type != VREG) 1029f2a2857bSKirk McKusick mp = NULL; 103086ffbd76SMike Smith rtval = VOP_PUTPAGES(vp, m, bytes, sync, rtvals, 0); 103123955314SAlfred Perlstein KASSERT(rtval != EOPNOTSUPP, 103223955314SAlfred Perlstein ("vnode_pager: stale FS putpages\n")); 10332e3b314dSAlan Cox VM_OBJECT_LOCK(object); 1034170db9c6SJohn Dyson } 1035170db9c6SJohn Dyson 1036ce75f2c3SMike Smith 103726f9a767SRodney W. Grimes /* 1038ce75f2c3SMike Smith * This is now called from local media FS's to operate against their 10394491ea91SEivind Eklund * own vnodes if they fail to implement VOP_PUTPAGES. 10402b6b0df7SMatthew Dillon * 10412b6b0df7SMatthew Dillon * This is typically called indirectly via the pageout daemon and 10422b6b0df7SMatthew Dillon * clustering has already typically occured, so in general we ask the 10432b6b0df7SMatthew Dillon * underlying filesystem to write the data out asynchronously rather 10442b6b0df7SMatthew Dillon * then delayed. 104526f9a767SRodney W. Grimes */ 1046ce75f2c3SMike Smith int 10478f9110f6SJohn Dyson vnode_pager_generic_putpages(vp, m, bytecount, flags, rtvals) 1048ce75f2c3SMike Smith struct vnode *vp; 104926f9a767SRodney W. Grimes vm_page_t *m; 1050ce75f2c3SMike Smith int bytecount; 10518f9110f6SJohn Dyson int flags; 105226f9a767SRodney W. Grimes int *rtvals; 105326f9a767SRodney W. Grimes { 1054f6b04d2bSDavid Greenman int i; 1055ce75f2c3SMike Smith vm_object_t object; 1056ce75f2c3SMike Smith int count; 105726f9a767SRodney W. Grimes 1058f6b04d2bSDavid Greenman int maxsize, ncount; 1059a316d390SJohn Dyson vm_ooffset_t poffset; 1060f6b04d2bSDavid Greenman struct uio auio; 1061f6b04d2bSDavid Greenman struct iovec aiov; 1062f6b04d2bSDavid Greenman int error; 10638f9110f6SJohn Dyson int ioflags; 1064dd498befSPaul Saab int ppscheck = 0; 1065dd498befSPaul Saab static struct timeval lastfail; 1066dd498befSPaul Saab static int curfail; 106726f9a767SRodney W. Grimes 1068ce75f2c3SMike Smith object = vp->v_object; 1069ce75f2c3SMike Smith count = bytecount / PAGE_SIZE; 1070ce75f2c3SMike Smith 107126f9a767SRodney W. Grimes for (i = 0; i < count; i++) 107226f9a767SRodney W. Grimes rtvals[i] = VM_PAGER_AGAIN; 107326f9a767SRodney W. Grimes 1074d8fed1d0SAlan Cox if ((int64_t)m[0]->pindex < 0) { 107523562e4bSMarcel Moolenaar printf("vnode_pager_putpages: attempt to write meta-data!!! -- 0x%lx(%lx)\n", 107623562e4bSMarcel Moolenaar (long)m[0]->pindex, (u_long)m[0]->dirty); 1077f6b04d2bSDavid Greenman rtvals[0] = VM_PAGER_BAD; 1078f6b04d2bSDavid Greenman return VM_PAGER_BAD; 10790d94caffSDavid Greenman } 10800bdb7528SDavid Greenman 1081f6b04d2bSDavid Greenman maxsize = count * PAGE_SIZE; 1082f6b04d2bSDavid Greenman ncount = count; 108326f9a767SRodney W. Grimes 1084a316d390SJohn Dyson poffset = IDX_TO_OFF(m[0]->pindex); 108500a6f47fSMatthew Dillon 108600a6f47fSMatthew Dillon /* 108700a6f47fSMatthew Dillon * If the page-aligned write is larger then the actual file we 108800a6f47fSMatthew Dillon * have to invalidate pages occuring beyond the file EOF. However, 108900a6f47fSMatthew Dillon * there is an edge case where a file may not be page-aligned where 109000a6f47fSMatthew Dillon * the last page is partially invalid. In this case the filesystem 109100a6f47fSMatthew Dillon * may not properly clear the dirty bits for the entire page (which 109200a6f47fSMatthew Dillon * could be VM_PAGE_BITS_ALL due to the page having been mmap()d). 109300a6f47fSMatthew Dillon * With the page locked we are free to fix-up the dirty bits here. 10943ebeaf59SMatthew Dillon * 10953ebeaf59SMatthew Dillon * We do not under any circumstances truncate the valid bits, as 10963ebeaf59SMatthew Dillon * this will screw up bogus page replacement. 109700a6f47fSMatthew Dillon */ 1098a316d390SJohn Dyson if (maxsize + poffset > object->un_pager.vnp.vnp_size) { 109900a6f47fSMatthew Dillon if (object->un_pager.vnp.vnp_size > poffset) { 110000a6f47fSMatthew Dillon int pgoff; 110100a6f47fSMatthew Dillon 1102a316d390SJohn Dyson maxsize = object->un_pager.vnp.vnp_size - poffset; 1103aa8de40aSPoul-Henning Kamp ncount = btoc(maxsize); 110400a6f47fSMatthew Dillon if ((pgoff = (int)maxsize & PAGE_MASK) != 0) { 1105b7ad744dSAlan Cox vm_page_lock_queues(); 110600a6f47fSMatthew Dillon vm_page_clear_dirty(m[ncount - 1], pgoff, 110700a6f47fSMatthew Dillon PAGE_SIZE - pgoff); 1108b7ad744dSAlan Cox vm_page_unlock_queues(); 110900a6f47fSMatthew Dillon } 111000a6f47fSMatthew Dillon } else { 111100a6f47fSMatthew Dillon maxsize = 0; 111200a6f47fSMatthew Dillon ncount = 0; 111300a6f47fSMatthew Dillon } 1114f6b04d2bSDavid Greenman if (ncount < count) { 1115f6b04d2bSDavid Greenman for (i = ncount; i < count; i++) { 1116f6b04d2bSDavid Greenman rtvals[i] = VM_PAGER_BAD; 1117f6b04d2bSDavid Greenman } 1118f6b04d2bSDavid Greenman } 1119f6b04d2bSDavid Greenman } 112026f9a767SRodney W. Grimes 11212b6b0df7SMatthew Dillon /* 11222b6b0df7SMatthew Dillon * pageouts are already clustered, use IO_ASYNC t o force a bawrite() 11232b6b0df7SMatthew Dillon * rather then a bdwrite() to prevent paging I/O from saturating 112443b7990eSMatthew Dillon * the buffer cache. Dummy-up the sequential heuristic to cause 112543b7990eSMatthew Dillon * large ranges to cluster. If neither IO_SYNC or IO_ASYNC is set, 112643b7990eSMatthew Dillon * the system decides how to cluster. 11272b6b0df7SMatthew Dillon */ 11288f9110f6SJohn Dyson ioflags = IO_VMIO; 112943b7990eSMatthew Dillon if (flags & (VM_PAGER_PUT_SYNC | VM_PAGER_PUT_INVAL)) 113043b7990eSMatthew Dillon ioflags |= IO_SYNC; 113143b7990eSMatthew Dillon else if ((flags & VM_PAGER_CLUSTER_OK) == 0) 113243b7990eSMatthew Dillon ioflags |= IO_ASYNC; 11338f9110f6SJohn Dyson ioflags |= (flags & VM_PAGER_PUT_INVAL) ? IO_INVAL: 0; 113443b7990eSMatthew Dillon ioflags |= IO_SEQMAX << IO_SEQSHIFT; 1135f6b04d2bSDavid Greenman 1136f6b04d2bSDavid Greenman aiov.iov_base = (caddr_t) 0; 1137f6b04d2bSDavid Greenman aiov.iov_len = maxsize; 1138f6b04d2bSDavid Greenman auio.uio_iov = &aiov; 1139f6b04d2bSDavid Greenman auio.uio_iovcnt = 1; 1140a316d390SJohn Dyson auio.uio_offset = poffset; 1141f6b04d2bSDavid Greenman auio.uio_segflg = UIO_NOCOPY; 1142f6b04d2bSDavid Greenman auio.uio_rw = UIO_WRITE; 1143f6b04d2bSDavid Greenman auio.uio_resid = maxsize; 1144b40ce416SJulian Elischer auio.uio_td = (struct thread *) 0; 1145a854ed98SJohn Baldwin error = VOP_WRITE(vp, &auio, ioflags, curthread->td_ucred); 1146b4b70819SAttilio Rao PCPU_INC(cnt.v_vnodeout); 1147b4b70819SAttilio Rao PCPU_ADD(cnt.v_vnodepgsout, ncount); 1148f6b04d2bSDavid Greenman 1149f6b04d2bSDavid Greenman if (error) { 1150dd498befSPaul Saab if ((ppscheck = ppsratecheck(&lastfail, &curfail, 1))) 115124a1cce3SDavid Greenman printf("vnode_pager_putpages: I/O error %d\n", error); 1152f6b04d2bSDavid Greenman } 1153f6b04d2bSDavid Greenman if (auio.uio_resid) { 1154dd498befSPaul Saab if (ppscheck || ppsratecheck(&lastfail, &curfail, 1)) 1155ac1e407bSBruce Evans printf("vnode_pager_putpages: residual I/O %d at %lu\n", 1156ac1e407bSBruce Evans auio.uio_resid, (u_long)m[0]->pindex); 115726f9a767SRodney W. Grimes } 1158ffc82b0aSJohn Dyson for (i = 0; i < ncount; i++) { 115926f9a767SRodney W. Grimes rtvals[i] = VM_PAGER_OK; 116026f9a767SRodney W. Grimes } 1161f6b04d2bSDavid Greenman return rtvals[0]; 116226f9a767SRodney W. Grimes } 1163