1df8bae1dSRodney W. Grimes /* 2df8bae1dSRodney W. Grimes * Copyright (c) 1990 University of Utah. 326f9a767SRodney W. Grimes * Copyright (c) 1991 The Regents of the University of California. 426f9a767SRodney W. Grimes * All rights reserved. 526f9a767SRodney W. Grimes * Copyright (c) 1993, 1994 John S. Dyson 624a1cce3SDavid Greenman * Copyright (c) 1995, David Greenman 7df8bae1dSRodney W. Grimes * 8df8bae1dSRodney W. Grimes * This code is derived from software contributed to Berkeley by 9df8bae1dSRodney W. Grimes * the Systems Programming Group of the University of Utah Computer 10df8bae1dSRodney W. Grimes * Science Department. 11df8bae1dSRodney W. Grimes * 12df8bae1dSRodney W. Grimes * Redistribution and use in source and binary forms, with or without 13df8bae1dSRodney W. Grimes * modification, are permitted provided that the following conditions 14df8bae1dSRodney W. Grimes * are met: 15df8bae1dSRodney W. Grimes * 1. Redistributions of source code must retain the above copyright 16df8bae1dSRodney W. Grimes * notice, this list of conditions and the following disclaimer. 17df8bae1dSRodney W. Grimes * 2. Redistributions in binary form must reproduce the above copyright 18df8bae1dSRodney W. Grimes * notice, this list of conditions and the following disclaimer in the 19df8bae1dSRodney W. Grimes * documentation and/or other materials provided with the distribution. 20df8bae1dSRodney W. Grimes * 3. All advertising materials mentioning features or use of this software 21df8bae1dSRodney W. Grimes * must display the following acknowledgement: 22df8bae1dSRodney W. Grimes * This product includes software developed by the University of 23df8bae1dSRodney W. Grimes * California, Berkeley and its contributors. 24df8bae1dSRodney W. Grimes * 4. Neither the name of the University nor the names of its contributors 25df8bae1dSRodney W. Grimes * may be used to endorse or promote products derived from this software 26df8bae1dSRodney W. Grimes * without specific prior written permission. 27df8bae1dSRodney W. Grimes * 28df8bae1dSRodney W. Grimes * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 29df8bae1dSRodney W. Grimes * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 30df8bae1dSRodney W. Grimes * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 31df8bae1dSRodney W. Grimes * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 32df8bae1dSRodney W. Grimes * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 33df8bae1dSRodney W. Grimes * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 34df8bae1dSRodney W. Grimes * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 35df8bae1dSRodney W. Grimes * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 36df8bae1dSRodney W. Grimes * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 37df8bae1dSRodney W. Grimes * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 38df8bae1dSRodney W. Grimes * SUCH DAMAGE. 39df8bae1dSRodney W. Grimes * 4026f9a767SRodney W. Grimes * from: @(#)vnode_pager.c 7.5 (Berkeley) 4/20/91 410b8253a7SBruce Evans * $Id: vnode_pager.c,v 1.51 1995/10/23 02:23:29 dyson Exp $ 42df8bae1dSRodney W. Grimes */ 43df8bae1dSRodney W. Grimes 44df8bae1dSRodney W. Grimes /* 45df8bae1dSRodney W. Grimes * Page to/from files (vnodes). 46df8bae1dSRodney W. Grimes */ 47df8bae1dSRodney W. Grimes 4826f9a767SRodney W. Grimes /* 4926f9a767SRodney W. Grimes * TODO: 5024a1cce3SDavid Greenman * Implement VOP_GETPAGES/PUTPAGES interface for filesystems. Will 51f6b04d2bSDavid Greenman * greatly re-simplify the vnode_pager. 5226f9a767SRodney W. Grimes */ 5326f9a767SRodney W. Grimes 54df8bae1dSRodney W. Grimes #include <sys/param.h> 55df8bae1dSRodney W. Grimes #include <sys/systm.h> 560d94caffSDavid Greenman #include <sys/kernel.h> 57df8bae1dSRodney W. Grimes #include <sys/proc.h> 58df8bae1dSRodney W. Grimes #include <sys/malloc.h> 59df8bae1dSRodney W. Grimes #include <sys/vnode.h> 60df8bae1dSRodney W. Grimes #include <sys/uio.h> 61df8bae1dSRodney W. Grimes #include <sys/mount.h> 6224a1cce3SDavid Greenman #include <sys/buf.h> 63df8bae1dSRodney W. Grimes 64df8bae1dSRodney W. Grimes #include <vm/vm.h> 65df8bae1dSRodney W. Grimes #include <vm/vm_page.h> 6624a1cce3SDavid Greenman #include <vm/vm_pager.h> 67df8bae1dSRodney W. Grimes #include <vm/vnode_pager.h> 68df8bae1dSRodney W. Grimes 690b8253a7SBruce Evans extern vm_offset_t vnode_pager_addr __P((struct vnode *vp, vm_offset_t address, 700b8253a7SBruce Evans int *run)); 710b8253a7SBruce Evans extern void vnode_pager_iodone __P((struct buf *bp)); 720b8253a7SBruce Evans extern int vnode_pager_input_smlfs __P((vm_object_t object, vm_page_t m)); 730b8253a7SBruce Evans extern int vnode_pager_input_old __P((vm_object_t object, vm_page_t m)); 740b8253a7SBruce Evans 75df8bae1dSRodney W. Grimes struct pagerops vnodepagerops = { 7624a1cce3SDavid Greenman NULL, 77df8bae1dSRodney W. Grimes vnode_pager_alloc, 78df8bae1dSRodney W. Grimes vnode_pager_dealloc, 7924a1cce3SDavid Greenman vnode_pager_getpages, 8024a1cce3SDavid Greenman vnode_pager_putpages, 8124a1cce3SDavid Greenman vnode_pager_haspage, 8224a1cce3SDavid Greenman NULL 83df8bae1dSRodney W. Grimes }; 84df8bae1dSRodney W. Grimes 850b8253a7SBruce Evans static int vnode_pager_leaf_getpages __P((vm_object_t object, vm_page_t *m, 860b8253a7SBruce Evans int count, int reqpage)); 870b8253a7SBruce Evans static int vnode_pager_leaf_putpages __P((vm_object_t object, vm_page_t *m, 880b8253a7SBruce Evans int count, boolean_t sync, 890b8253a7SBruce Evans int *rtvals)); 90170db9c6SJohn Dyson 91df8bae1dSRodney W. Grimes /* 92df8bae1dSRodney W. Grimes * Allocate (or lookup) pager for a vnode. 93df8bae1dSRodney W. Grimes * Handle is a vnode pointer. 94df8bae1dSRodney W. Grimes */ 9524a1cce3SDavid Greenman vm_object_t 9626f9a767SRodney W. Grimes vnode_pager_alloc(handle, size, prot, offset) 97ee3a64c9SDavid Greenman void *handle; 98df8bae1dSRodney W. Grimes vm_size_t size; 99df8bae1dSRodney W. Grimes vm_prot_t prot; 10026f9a767SRodney W. Grimes vm_offset_t offset; 101df8bae1dSRodney W. Grimes { 10206cb7259SDavid Greenman vm_object_t object; 103df8bae1dSRodney W. Grimes struct vnode *vp; 104df8bae1dSRodney W. Grimes 105df8bae1dSRodney W. Grimes /* 106df8bae1dSRodney W. Grimes * Pageout to vnode, no can do yet. 107df8bae1dSRodney W. Grimes */ 108df8bae1dSRodney W. Grimes if (handle == NULL) 109df8bae1dSRodney W. Grimes return (NULL); 110df8bae1dSRodney W. Grimes 111df8bae1dSRodney W. Grimes vp = (struct vnode *) handle; 11239d38f93SDavid Greenman 11339d38f93SDavid Greenman /* 11439d38f93SDavid Greenman * Prevent race condition when allocating the object. This 11539d38f93SDavid Greenman * can happen with NFS vnodes since the nfsnode isn't locked. 11639d38f93SDavid Greenman */ 11739d38f93SDavid Greenman while (vp->v_flag & VOLOCK) { 11839d38f93SDavid Greenman vp->v_flag |= VOWANT; 11939d38f93SDavid Greenman tsleep(vp, PVM, "vnpobj", 0); 12039d38f93SDavid Greenman } 12139d38f93SDavid Greenman vp->v_flag |= VOLOCK; 12239d38f93SDavid Greenman 12339d38f93SDavid Greenman /* 12439d38f93SDavid Greenman * If the object is being terminated, wait for it to 12539d38f93SDavid Greenman * go away. 12639d38f93SDavid Greenman */ 12724a1cce3SDavid Greenman while (((object = vp->v_object) != NULL) && (object->flags & OBJ_DEAD)) { 128aa2cabb9SDavid Greenman tsleep(object, PVM, "vadead", 0); 12924a1cce3SDavid Greenman } 1300d94caffSDavid Greenman 13124a1cce3SDavid Greenman if (object == NULL) { 132df8bae1dSRodney W. Grimes /* 133df8bae1dSRodney W. Grimes * And an object of the appropriate size 134df8bae1dSRodney W. Grimes */ 13524a1cce3SDavid Greenman object = vm_object_allocate(OBJT_VNODE, round_page(size)); 1364bb62461SDavid Greenman object->flags = OBJ_CANPERSIST; 137bbc0ec52SDavid Greenman 138df8bae1dSRodney W. Grimes /* 13924a1cce3SDavid Greenman * Hold a reference to the vnode and initialize object data. 140df8bae1dSRodney W. Grimes */ 141df8bae1dSRodney W. Grimes VREF(vp); 14224a1cce3SDavid Greenman object->un_pager.vnp.vnp_size = size; 14326f9a767SRodney W. Grimes 14424a1cce3SDavid Greenman object->handle = handle; 14524a1cce3SDavid Greenman vp->v_object = object; 146df8bae1dSRodney W. Grimes } else { 147df8bae1dSRodney W. Grimes /* 14824a1cce3SDavid Greenman * vm_object_reference() will remove the object from the cache if 14924a1cce3SDavid Greenman * found and gain a reference to the object. 150df8bae1dSRodney W. Grimes */ 15124a1cce3SDavid Greenman vm_object_reference(object); 152df8bae1dSRodney W. Grimes } 15339d38f93SDavid Greenman 154f6b04d2bSDavid Greenman if (vp->v_type == VREG) 155f6b04d2bSDavid Greenman vp->v_flag |= VVMIO; 15639d38f93SDavid Greenman 15739d38f93SDavid Greenman vp->v_flag &= ~VOLOCK; 15839d38f93SDavid Greenman if (vp->v_flag & VOWANT) { 15939d38f93SDavid Greenman vp->v_flag &= ~VOWANT; 16039d38f93SDavid Greenman wakeup(vp); 16139d38f93SDavid Greenman } 16224a1cce3SDavid Greenman return (object); 163df8bae1dSRodney W. Grimes } 164df8bae1dSRodney W. Grimes 16526f9a767SRodney W. Grimes void 16624a1cce3SDavid Greenman vnode_pager_dealloc(object) 1670d94caffSDavid Greenman vm_object_t object; 16824a1cce3SDavid Greenman { 16924a1cce3SDavid Greenman register struct vnode *vp = object->handle; 170df8bae1dSRodney W. Grimes 17124a1cce3SDavid Greenman if (vp == NULL) 17224a1cce3SDavid Greenman panic("vnode_pager_dealloc: pager already dealloced"); 17324a1cce3SDavid Greenman 17424a1cce3SDavid Greenman if (object->paging_in_progress) { 1750d94caffSDavid Greenman int s = splbio(); 1760d94caffSDavid Greenman while (object->paging_in_progress) { 177c0503609SDavid Greenman object->flags |= OBJ_PIPWNT; 1780d94caffSDavid Greenman tsleep(object, PVM, "vnpdea", 0); 1790d94caffSDavid Greenman } 1800d94caffSDavid Greenman splx(s); 18124a1cce3SDavid Greenman } 18224a1cce3SDavid Greenman 18324a1cce3SDavid Greenman object->handle = NULL; 1840d94caffSDavid Greenman 185aa2cabb9SDavid Greenman vp->v_object = NULL; 1868e58bf68SDavid Greenman vp->v_flag &= ~(VTEXT | VVMIO); 18700072442SDavid Greenman vp->v_flag |= VAGE; 188df8bae1dSRodney W. Grimes vrele(vp); 189df8bae1dSRodney W. Grimes } 19026f9a767SRodney W. Grimes 19126f9a767SRodney W. Grimes boolean_t 19224a1cce3SDavid Greenman vnode_pager_haspage(object, offset, before, after) 19324a1cce3SDavid Greenman vm_object_t object; 194df8bae1dSRodney W. Grimes vm_offset_t offset; 19524a1cce3SDavid Greenman int *before; 19624a1cce3SDavid Greenman int *after; 197df8bae1dSRodney W. Grimes { 19824a1cce3SDavid Greenman struct vnode *vp = object->handle; 199df8bae1dSRodney W. Grimes daddr_t bn; 2002c4488fcSJohn Dyson int err, run; 201170db9c6SJohn Dyson daddr_t reqblock; 2022c4488fcSJohn Dyson int poff; 2032c4488fcSJohn Dyson int bsize; 2042c4488fcSJohn Dyson int pagesperblock; 205df8bae1dSRodney W. Grimes 206df8bae1dSRodney W. Grimes /* 2070d94caffSDavid Greenman * If filesystem no longer mounted or offset beyond end of file we do 2080d94caffSDavid Greenman * not have the page. 209df8bae1dSRodney W. Grimes */ 21024a1cce3SDavid Greenman if ((vp->v_mount == NULL) || (offset >= object->un_pager.vnp.vnp_size)) 2114abc71c0SDavid Greenman return FALSE; 212df8bae1dSRodney W. Grimes 213eed2d59bSDavid Greenman bsize = vp->v_mount->mnt_stat.f_iosize; 214170db9c6SJohn Dyson pagesperblock = bsize / PAGE_SIZE; 215170db9c6SJohn Dyson reqblock = offset / bsize; 216170db9c6SJohn Dyson err = VOP_BMAP(vp, reqblock, (struct vnode **) 0, &bn, 217170db9c6SJohn Dyson after, before); 2180d94caffSDavid Greenman if (err) 21924a1cce3SDavid Greenman return TRUE; 2206eab77f2SJohn Dyson if ( bn == -1) 221ced399eeSJohn Dyson return FALSE; 222170db9c6SJohn Dyson poff = (offset - (reqblock * bsize)) / PAGE_SIZE; 223170db9c6SJohn Dyson if (before) { 224170db9c6SJohn Dyson *before *= pagesperblock; 225170db9c6SJohn Dyson *before += poff; 226170db9c6SJohn Dyson } 227170db9c6SJohn Dyson if (after) { 228b1fc01b7SJohn Dyson int numafter; 229170db9c6SJohn Dyson *after *= pagesperblock; 230b1fc01b7SJohn Dyson numafter = pagesperblock - (poff + 1); 231b1fc01b7SJohn Dyson if (offset + numafter * PAGE_SIZE > object->un_pager.vnp.vnp_size) { 232b1fc01b7SJohn Dyson numafter = (object->un_pager.vnp.vnp_size - offset)/PAGE_SIZE; 233b1fc01b7SJohn Dyson } 234b1fc01b7SJohn Dyson *after += numafter; 235170db9c6SJohn Dyson } 236ced399eeSJohn Dyson return TRUE; 237df8bae1dSRodney W. Grimes } 238df8bae1dSRodney W. Grimes 239df8bae1dSRodney W. Grimes /* 240df8bae1dSRodney W. Grimes * Lets the VM system know about a change in size for a file. 24124a1cce3SDavid Greenman * We adjust our own internal size and flush any cached pages in 242df8bae1dSRodney W. Grimes * the associated object that are affected by the size change. 243df8bae1dSRodney W. Grimes * 244df8bae1dSRodney W. Grimes * Note: this routine may be invoked as a result of a pager put 245df8bae1dSRodney W. Grimes * operation (possibly at object termination time), so we must be careful. 246df8bae1dSRodney W. Grimes */ 247df8bae1dSRodney W. Grimes void 248df8bae1dSRodney W. Grimes vnode_pager_setsize(vp, nsize) 249df8bae1dSRodney W. Grimes struct vnode *vp; 250df8bae1dSRodney W. Grimes u_long nsize; 251df8bae1dSRodney W. Grimes { 25224a1cce3SDavid Greenman vm_object_t object = vp->v_object; 253df8bae1dSRodney W. Grimes 25424a1cce3SDavid Greenman if (object == NULL) 255df8bae1dSRodney W. Grimes return; 256bbc0ec52SDavid Greenman 257df8bae1dSRodney W. Grimes /* 258df8bae1dSRodney W. Grimes * Hasn't changed size 259df8bae1dSRodney W. Grimes */ 26024a1cce3SDavid Greenman if (nsize == object->un_pager.vnp.vnp_size) 261df8bae1dSRodney W. Grimes return; 262bbc0ec52SDavid Greenman 263df8bae1dSRodney W. Grimes /* 264bbc0ec52SDavid Greenman * File has shrunk. Toss any cached pages beyond the new EOF. 265df8bae1dSRodney W. Grimes */ 26624a1cce3SDavid Greenman if (nsize < object->un_pager.vnp.vnp_size) { 26724a1cce3SDavid Greenman if (round_page((vm_offset_t) nsize) < object->un_pager.vnp.vnp_size) { 268df8bae1dSRodney W. Grimes vm_object_page_remove(object, 26924a1cce3SDavid Greenman round_page((vm_offset_t) nsize), object->un_pager.vnp.vnp_size, FALSE); 2700d94caffSDavid Greenman } 271bbc0ec52SDavid Greenman /* 272bbc0ec52SDavid Greenman * this gets rid of garbage at the end of a page that is now 273bbc0ec52SDavid Greenman * only partially backed by the vnode... 274bbc0ec52SDavid Greenman */ 275bbc0ec52SDavid Greenman if (nsize & PAGE_MASK) { 276bbc0ec52SDavid Greenman vm_offset_t kva; 277bbc0ec52SDavid Greenman vm_page_t m; 278bbc0ec52SDavid Greenman 279bbc0ec52SDavid Greenman m = vm_page_lookup(object, trunc_page((vm_offset_t) nsize)); 280bbc0ec52SDavid Greenman if (m) { 281bbc0ec52SDavid Greenman kva = vm_pager_map_page(m); 282bbc0ec52SDavid Greenman bzero((caddr_t) kva + (nsize & PAGE_MASK), 283bbc0ec52SDavid Greenman round_page(nsize) - nsize); 284bbc0ec52SDavid Greenman vm_pager_unmap_page(kva); 285bbc0ec52SDavid Greenman } 286bbc0ec52SDavid Greenman } 287bbc0ec52SDavid Greenman } 28824a1cce3SDavid Greenman object->un_pager.vnp.vnp_size = (vm_offset_t) nsize; 289bbc0ec52SDavid Greenman object->size = round_page(nsize); 290df8bae1dSRodney W. Grimes } 291df8bae1dSRodney W. Grimes 292df8bae1dSRodney W. Grimes void 293df8bae1dSRodney W. Grimes vnode_pager_umount(mp) 294df8bae1dSRodney W. Grimes register struct mount *mp; 295df8bae1dSRodney W. Grimes { 29624a1cce3SDavid Greenman struct vnode *vp, *nvp; 297df8bae1dSRodney W. Grimes 29824a1cce3SDavid Greenman loop: 29924a1cce3SDavid Greenman for (vp = mp->mnt_vnodelist.lh_first; vp != NULL; vp = nvp) { 30024a1cce3SDavid Greenman /* 30124a1cce3SDavid Greenman * Vnode can be reclaimed by getnewvnode() while we 30224a1cce3SDavid Greenman * traverse the list. 30324a1cce3SDavid Greenman */ 30424a1cce3SDavid Greenman if (vp->v_mount != mp) 30524a1cce3SDavid Greenman goto loop; 30624a1cce3SDavid Greenman 307df8bae1dSRodney W. Grimes /* 308bbc0ec52SDavid Greenman * Save the next pointer now since uncaching may terminate the 30924a1cce3SDavid Greenman * object and render vnode invalid 310df8bae1dSRodney W. Grimes */ 31124a1cce3SDavid Greenman nvp = vp->v_mntvnodes.le_next; 31224a1cce3SDavid Greenman 31324a1cce3SDavid Greenman if (vp->v_object != NULL) { 314c01a9b8cSDavid Greenman VOP_LOCK(vp); 31524a1cce3SDavid Greenman vnode_pager_uncache(vp); 316c01a9b8cSDavid Greenman VOP_UNLOCK(vp); 317c01a9b8cSDavid Greenman } 318df8bae1dSRodney W. Grimes } 319df8bae1dSRodney W. Grimes } 320df8bae1dSRodney W. Grimes 321df8bae1dSRodney W. Grimes /* 322df8bae1dSRodney W. Grimes * Remove vnode associated object from the object cache. 323c01a9b8cSDavid Greenman * This routine must be called with the vnode locked. 324df8bae1dSRodney W. Grimes * 325c01a9b8cSDavid Greenman * XXX unlock the vnode. 326c01a9b8cSDavid Greenman * We must do this since uncaching the object may result in its 327c01a9b8cSDavid Greenman * destruction which may initiate paging activity which may necessitate 328c01a9b8cSDavid Greenman * re-locking the vnode. 32926f9a767SRodney W. Grimes */ 33024a1cce3SDavid Greenman void 33126f9a767SRodney W. Grimes vnode_pager_uncache(vp) 33224a1cce3SDavid Greenman struct vnode *vp; 33326f9a767SRodney W. Grimes { 33424a1cce3SDavid Greenman vm_object_t object; 33526f9a767SRodney W. Grimes 33626f9a767SRodney W. Grimes /* 33726f9a767SRodney W. Grimes * Not a mapped vnode 33826f9a767SRodney W. Grimes */ 339aa2cabb9SDavid Greenman object = vp->v_object; 3408e58bf68SDavid Greenman if (object == NULL) 34124a1cce3SDavid Greenman return; 3420d94caffSDavid Greenman 34324a1cce3SDavid Greenman vm_object_reference(object); 344c01a9b8cSDavid Greenman VOP_UNLOCK(vp); 34526f9a767SRodney W. Grimes pager_cache(object, FALSE); 346c01a9b8cSDavid Greenman VOP_LOCK(vp); 34724a1cce3SDavid Greenman return; 34826f9a767SRodney W. Grimes } 349df8bae1dSRodney W. Grimes 35026f9a767SRodney W. Grimes 35126f9a767SRodney W. Grimes void 35226f9a767SRodney W. Grimes vnode_pager_freepage(m) 35326f9a767SRodney W. Grimes vm_page_t m; 354df8bae1dSRodney W. Grimes { 35526f9a767SRodney W. Grimes PAGE_WAKEUP(m); 35626f9a767SRodney W. Grimes vm_page_free(m); 35726f9a767SRodney W. Grimes } 35826f9a767SRodney W. Grimes 35926f9a767SRodney W. Grimes /* 36026f9a767SRodney W. Grimes * calculate the linear (byte) disk address of specified virtual 36126f9a767SRodney W. Grimes * file address 36226f9a767SRodney W. Grimes */ 36326f9a767SRodney W. Grimes vm_offset_t 364efc68ce1SDavid Greenman vnode_pager_addr(vp, address, run) 36526f9a767SRodney W. Grimes struct vnode *vp; 36626f9a767SRodney W. Grimes vm_offset_t address; 367efc68ce1SDavid Greenman int *run; 36826f9a767SRodney W. Grimes { 36926f9a767SRodney W. Grimes int rtaddress; 37026f9a767SRodney W. Grimes int bsize; 37126f9a767SRodney W. Grimes vm_offset_t block; 37226f9a767SRodney W. Grimes struct vnode *rtvp; 37326f9a767SRodney W. Grimes int err; 37426f9a767SRodney W. Grimes int vblock, voffset; 37526f9a767SRodney W. Grimes 3760d94caffSDavid Greenman if ((int) address < 0) 3770d94caffSDavid Greenman return -1; 3780d94caffSDavid Greenman 3792c4488fcSJohn Dyson if (vp->v_mount == NULL) 3802c4488fcSJohn Dyson return -1; 3812c4488fcSJohn Dyson 38226f9a767SRodney W. Grimes bsize = vp->v_mount->mnt_stat.f_iosize; 38326f9a767SRodney W. Grimes vblock = address / bsize; 38426f9a767SRodney W. Grimes voffset = address % bsize; 38526f9a767SRodney W. Grimes 386c83ebe77SJohn Dyson err = VOP_BMAP(vp, vblock, &rtvp, &block, run, NULL); 38726f9a767SRodney W. Grimes 388efc68ce1SDavid Greenman if (err || (block == -1)) 38926f9a767SRodney W. Grimes rtaddress = -1; 390efc68ce1SDavid Greenman else { 391187f0071SDavid Greenman rtaddress = block + voffset / DEV_BSIZE; 392efc68ce1SDavid Greenman if( run) { 393efc68ce1SDavid Greenman *run += 1; 394efc68ce1SDavid Greenman *run *= bsize/PAGE_SIZE; 395efc68ce1SDavid Greenman *run -= voffset/PAGE_SIZE; 396efc68ce1SDavid Greenman } 397efc68ce1SDavid Greenman } 39826f9a767SRodney W. Grimes 39926f9a767SRodney W. Grimes return rtaddress; 40026f9a767SRodney W. Grimes } 40126f9a767SRodney W. Grimes 40226f9a767SRodney W. Grimes /* 40326f9a767SRodney W. Grimes * interrupt routine for I/O completion 40426f9a767SRodney W. Grimes */ 40526f9a767SRodney W. Grimes void 40626f9a767SRodney W. Grimes vnode_pager_iodone(bp) 40726f9a767SRodney W. Grimes struct buf *bp; 40826f9a767SRodney W. Grimes { 40926f9a767SRodney W. Grimes bp->b_flags |= B_DONE; 41024a1cce3SDavid Greenman wakeup(bp); 41126f9a767SRodney W. Grimes } 41226f9a767SRodney W. Grimes 41326f9a767SRodney W. Grimes /* 41426f9a767SRodney W. Grimes * small block file system vnode pager input 41526f9a767SRodney W. Grimes */ 41626f9a767SRodney W. Grimes int 41724a1cce3SDavid Greenman vnode_pager_input_smlfs(object, m) 41824a1cce3SDavid Greenman vm_object_t object; 41926f9a767SRodney W. Grimes vm_page_t m; 42026f9a767SRodney W. Grimes { 42126f9a767SRodney W. Grimes int i; 42226f9a767SRodney W. Grimes int s; 42326f9a767SRodney W. Grimes struct vnode *dp, *vp; 42426f9a767SRodney W. Grimes struct buf *bp; 42526f9a767SRodney W. Grimes vm_offset_t kva; 42626f9a767SRodney W. Grimes int fileaddr; 42726f9a767SRodney W. Grimes vm_offset_t bsize; 42826f9a767SRodney W. Grimes int error = 0; 42926f9a767SRodney W. Grimes 43024a1cce3SDavid Greenman vp = object->handle; 4312c4488fcSJohn Dyson if (vp->v_mount == NULL) 4322c4488fcSJohn Dyson return VM_PAGER_BAD; 4332c4488fcSJohn Dyson 43426f9a767SRodney W. Grimes bsize = vp->v_mount->mnt_stat.f_iosize; 43526f9a767SRodney W. Grimes 4360bdb7528SDavid Greenman 437c83ebe77SJohn Dyson VOP_BMAP(vp, 0, &dp, 0, NULL, NULL); 43826f9a767SRodney W. Grimes 43926f9a767SRodney W. Grimes kva = vm_pager_map_page(m); 44026f9a767SRodney W. Grimes 44126f9a767SRodney W. Grimes for (i = 0; i < PAGE_SIZE / bsize; i++) { 442bbc0ec52SDavid Greenman 4430d94caffSDavid Greenman if ((vm_page_bits(m->offset + i * bsize, bsize) & m->valid)) 44426f9a767SRodney W. Grimes continue; 44526f9a767SRodney W. Grimes 446efc68ce1SDavid Greenman fileaddr = vnode_pager_addr(vp, m->offset + i * bsize, (int *)0); 44726f9a767SRodney W. Grimes if (fileaddr != -1) { 44826f9a767SRodney W. Grimes bp = getpbuf(); 44926f9a767SRodney W. Grimes 45026f9a767SRodney W. Grimes /* build a minimal buffer header */ 45126f9a767SRodney W. Grimes bp->b_flags = B_BUSY | B_READ | B_CALL; 45226f9a767SRodney W. Grimes bp->b_iodone = vnode_pager_iodone; 45326f9a767SRodney W. Grimes bp->b_proc = curproc; 45426f9a767SRodney W. Grimes bp->b_rcred = bp->b_wcred = bp->b_proc->p_ucred; 45526f9a767SRodney W. Grimes if (bp->b_rcred != NOCRED) 45626f9a767SRodney W. Grimes crhold(bp->b_rcred); 45726f9a767SRodney W. Grimes if (bp->b_wcred != NOCRED) 45826f9a767SRodney W. Grimes crhold(bp->b_wcred); 45926f9a767SRodney W. Grimes bp->b_un.b_addr = (caddr_t) kva + i * bsize; 460187f0071SDavid Greenman bp->b_blkno = fileaddr; 4610d94caffSDavid Greenman pbgetvp(dp, bp); 46226f9a767SRodney W. Grimes bp->b_bcount = bsize; 46326f9a767SRodney W. Grimes bp->b_bufsize = bsize; 46426f9a767SRodney W. Grimes 46526f9a767SRodney W. Grimes /* do the input */ 46626f9a767SRodney W. Grimes VOP_STRATEGY(bp); 46726f9a767SRodney W. Grimes 46826f9a767SRodney W. Grimes /* we definitely need to be at splbio here */ 46926f9a767SRodney W. Grimes 47026f9a767SRodney W. Grimes s = splbio(); 47126f9a767SRodney W. Grimes while ((bp->b_flags & B_DONE) == 0) { 472aa2cabb9SDavid Greenman tsleep(bp, PVM, "vnsrd", 0); 47326f9a767SRodney W. Grimes } 47426f9a767SRodney W. Grimes splx(s); 47526f9a767SRodney W. Grimes if ((bp->b_flags & B_ERROR) != 0) 47626f9a767SRodney W. Grimes error = EIO; 47726f9a767SRodney W. Grimes 47826f9a767SRodney W. Grimes /* 47926f9a767SRodney W. Grimes * free the buffer header back to the swap buffer pool 48026f9a767SRodney W. Grimes */ 48126f9a767SRodney W. Grimes relpbuf(bp); 48226f9a767SRodney W. Grimes if (error) 48326f9a767SRodney W. Grimes break; 4840d94caffSDavid Greenman 485170db9c6SJohn Dyson vm_page_set_validclean(m, (i * bsize) & (PAGE_SIZE-1), bsize); 48626f9a767SRodney W. Grimes } else { 487b1fc01b7SJohn Dyson vm_page_set_validclean(m, (i * bsize) & (PAGE_SIZE-1), bsize); 48826f9a767SRodney W. Grimes bzero((caddr_t) kva + i * bsize, bsize); 48926f9a767SRodney W. Grimes } 49026f9a767SRodney W. Grimes } 49126f9a767SRodney W. Grimes vm_pager_unmap_page(kva); 4920d94caffSDavid Greenman pmap_clear_modify(VM_PAGE_TO_PHYS(m)); 493b1fc01b7SJohn Dyson m->flags &= ~PG_ZERO; 49426f9a767SRodney W. Grimes if (error) { 495a83c285cSDavid Greenman return VM_PAGER_ERROR; 49626f9a767SRodney W. Grimes } 49726f9a767SRodney W. Grimes return VM_PAGER_OK; 49826f9a767SRodney W. Grimes 49926f9a767SRodney W. Grimes } 50026f9a767SRodney W. Grimes 50126f9a767SRodney W. Grimes 50226f9a767SRodney W. Grimes /* 50326f9a767SRodney W. Grimes * old style vnode pager output routine 50426f9a767SRodney W. Grimes */ 50526f9a767SRodney W. Grimes int 50624a1cce3SDavid Greenman vnode_pager_input_old(object, m) 50724a1cce3SDavid Greenman vm_object_t object; 50826f9a767SRodney W. Grimes vm_page_t m; 50926f9a767SRodney W. Grimes { 510df8bae1dSRodney W. Grimes struct uio auio; 511df8bae1dSRodney W. Grimes struct iovec aiov; 51226f9a767SRodney W. Grimes int error; 51326f9a767SRodney W. Grimes int size; 51426f9a767SRodney W. Grimes vm_offset_t kva; 515df8bae1dSRodney W. Grimes 51626f9a767SRodney W. Grimes error = 0; 517bbc0ec52SDavid Greenman 518df8bae1dSRodney W. Grimes /* 51926f9a767SRodney W. Grimes * Return failure if beyond current EOF 52026f9a767SRodney W. Grimes */ 52124a1cce3SDavid Greenman if (m->offset >= object->un_pager.vnp.vnp_size) { 52226f9a767SRodney W. Grimes return VM_PAGER_BAD; 52326f9a767SRodney W. Grimes } else { 52426f9a767SRodney W. Grimes size = PAGE_SIZE; 52524a1cce3SDavid Greenman if (m->offset + size > object->un_pager.vnp.vnp_size) 52624a1cce3SDavid Greenman size = object->un_pager.vnp.vnp_size - m->offset; 5270bdb7528SDavid Greenman 52826f9a767SRodney W. Grimes /* 529df8bae1dSRodney W. Grimes * Allocate a kernel virtual address and initialize so that 530df8bae1dSRodney W. Grimes * we can use VOP_READ/WRITE routines. 531df8bae1dSRodney W. Grimes */ 53226f9a767SRodney W. Grimes kva = vm_pager_map_page(m); 5330bdb7528SDavid Greenman 534df8bae1dSRodney W. Grimes aiov.iov_base = (caddr_t) kva; 535df8bae1dSRodney W. Grimes aiov.iov_len = size; 536df8bae1dSRodney W. Grimes auio.uio_iov = &aiov; 537df8bae1dSRodney W. Grimes auio.uio_iovcnt = 1; 5380d94caffSDavid Greenman auio.uio_offset = m->offset; 539df8bae1dSRodney W. Grimes auio.uio_segflg = UIO_SYSSPACE; 54026f9a767SRodney W. Grimes auio.uio_rw = UIO_READ; 541df8bae1dSRodney W. Grimes auio.uio_resid = size; 542df8bae1dSRodney W. Grimes auio.uio_procp = (struct proc *) 0; 54326f9a767SRodney W. Grimes 54424a1cce3SDavid Greenman error = VOP_READ(object->handle, &auio, 0, curproc->p_ucred); 545df8bae1dSRodney W. Grimes if (!error) { 546df8bae1dSRodney W. Grimes register int count = size - auio.uio_resid; 547df8bae1dSRodney W. Grimes 548df8bae1dSRodney W. Grimes if (count == 0) 549df8bae1dSRodney W. Grimes error = EINVAL; 55026f9a767SRodney W. Grimes else if (count != PAGE_SIZE) 55126f9a767SRodney W. Grimes bzero((caddr_t) kva + count, PAGE_SIZE - count); 552df8bae1dSRodney W. Grimes } 55326f9a767SRodney W. Grimes vm_pager_unmap_page(kva); 554df8bae1dSRodney W. Grimes } 55526f9a767SRodney W. Grimes pmap_clear_modify(VM_PAGE_TO_PHYS(m)); 5560d94caffSDavid Greenman m->dirty = 0; 557b1fc01b7SJohn Dyson m->flags &= ~PG_ZERO; 558a83c285cSDavid Greenman return error ? VM_PAGER_ERROR : VM_PAGER_OK; 55926f9a767SRodney W. Grimes } 56026f9a767SRodney W. Grimes 56126f9a767SRodney W. Grimes /* 56226f9a767SRodney W. Grimes * generic vnode pager input routine 56326f9a767SRodney W. Grimes */ 564170db9c6SJohn Dyson 56526f9a767SRodney W. Grimes int 56624a1cce3SDavid Greenman vnode_pager_getpages(object, m, count, reqpage) 56726f9a767SRodney W. Grimes vm_object_t object; 56824a1cce3SDavid Greenman vm_page_t *m; 56924a1cce3SDavid Greenman int count; 57024a1cce3SDavid Greenman int reqpage; 57124a1cce3SDavid Greenman { 572170db9c6SJohn Dyson int rtval; 573170db9c6SJohn Dyson struct vnode *vp; 574170db9c6SJohn Dyson vp = object->handle; 5752c4488fcSJohn Dyson rtval = VOP_GETPAGES(vp, m, count*PAGE_SIZE, reqpage, 0); 576170db9c6SJohn Dyson if (rtval == EOPNOTSUPP) 5770b8253a7SBruce Evans return vnode_pager_leaf_getpages(object, m, count, reqpage); 578170db9c6SJohn Dyson else 579170db9c6SJohn Dyson return rtval; 580170db9c6SJohn Dyson } 581170db9c6SJohn Dyson 582170db9c6SJohn Dyson static int 583170db9c6SJohn Dyson vnode_pager_leaf_getpages(object, m, count, reqpage) 584170db9c6SJohn Dyson vm_object_t object; 585170db9c6SJohn Dyson vm_page_t *m; 586170db9c6SJohn Dyson int count; 587170db9c6SJohn Dyson int reqpage; 588170db9c6SJohn Dyson { 58924a1cce3SDavid Greenman vm_offset_t kva, foff; 59024a1cce3SDavid Greenman int i, size, bsize, first, firstaddr; 59126f9a767SRodney W. Grimes struct vnode *dp, *vp; 592efc68ce1SDavid Greenman int runpg; 593efc68ce1SDavid Greenman int runend; 5940bdb7528SDavid Greenman struct buf *bp; 59526f9a767SRodney W. Grimes int s; 59626f9a767SRodney W. Grimes int error = 0; 59726f9a767SRodney W. Grimes 59824a1cce3SDavid Greenman vp = object->handle; 5992c4488fcSJohn Dyson if (vp->v_mount == NULL) 6002c4488fcSJohn Dyson return VM_PAGER_BAD; 6012c4488fcSJohn Dyson 60226f9a767SRodney W. Grimes bsize = vp->v_mount->mnt_stat.f_iosize; 60326f9a767SRodney W. Grimes 60426f9a767SRodney W. Grimes /* get the UNDERLYING device for the file with VOP_BMAP() */ 605bbc0ec52SDavid Greenman 60626f9a767SRodney W. Grimes /* 607bbc0ec52SDavid Greenman * originally, we did not check for an error return value -- assuming 608bbc0ec52SDavid Greenman * an fs always has a bmap entry point -- that assumption is wrong!!! 60926f9a767SRodney W. Grimes */ 6100d94caffSDavid Greenman foff = m[reqpage]->offset; 611bbc0ec52SDavid Greenman 61226f9a767SRodney W. Grimes /* 61316f62314SDavid Greenman * if we can't bmap, use old VOP code 61426f9a767SRodney W. Grimes */ 615c83ebe77SJohn Dyson if (VOP_BMAP(vp, 0, &dp, 0, NULL, NULL)) { 61626f9a767SRodney W. Grimes for (i = 0; i < count; i++) { 61726f9a767SRodney W. Grimes if (i != reqpage) { 61826f9a767SRodney W. Grimes vnode_pager_freepage(m[i]); 61926f9a767SRodney W. Grimes } 62026f9a767SRodney W. Grimes } 621976e77fcSDavid Greenman cnt.v_vnodein++; 622976e77fcSDavid Greenman cnt.v_vnodepgsin++; 62324a1cce3SDavid Greenman return vnode_pager_input_old(object, m[reqpage]); 624bbc0ec52SDavid Greenman 62526f9a767SRodney W. Grimes /* 62626f9a767SRodney W. Grimes * if the blocksize is smaller than a page size, then use 62726f9a767SRodney W. Grimes * special small filesystem code. NFS sometimes has a small 62826f9a767SRodney W. Grimes * blocksize, but it can handle large reads itself. 62926f9a767SRodney W. Grimes */ 63026f9a767SRodney W. Grimes } else if ((PAGE_SIZE / bsize) > 1 && 63126f9a767SRodney W. Grimes (vp->v_mount->mnt_stat.f_type != MOUNT_NFS)) { 63226f9a767SRodney W. Grimes 63326f9a767SRodney W. Grimes for (i = 0; i < count; i++) { 63426f9a767SRodney W. Grimes if (i != reqpage) { 63526f9a767SRodney W. Grimes vnode_pager_freepage(m[i]); 63626f9a767SRodney W. Grimes } 63726f9a767SRodney W. Grimes } 638976e77fcSDavid Greenman cnt.v_vnodein++; 639976e77fcSDavid Greenman cnt.v_vnodepgsin++; 64024a1cce3SDavid Greenman return vnode_pager_input_smlfs(object, m[reqpage]); 64126f9a767SRodney W. Grimes } 64226f9a767SRodney W. Grimes /* 6430d94caffSDavid Greenman * if ANY DEV_BSIZE blocks are valid on a large filesystem block 6440d94caffSDavid Greenman * then, the entire page is valid -- 6450d94caffSDavid Greenman */ 6460d94caffSDavid Greenman if (m[reqpage]->valid) { 6470d94caffSDavid Greenman m[reqpage]->valid = VM_PAGE_BITS_ALL; 6480d94caffSDavid Greenman for (i = 0; i < count; i++) { 6490d94caffSDavid Greenman if (i != reqpage) 6500d94caffSDavid Greenman vnode_pager_freepage(m[i]); 6510d94caffSDavid Greenman } 6520d94caffSDavid Greenman return VM_PAGER_OK; 6530d94caffSDavid Greenman } 6540bdb7528SDavid Greenman 6550d94caffSDavid Greenman /* 65626f9a767SRodney W. Grimes * here on direct device I/O 65726f9a767SRodney W. Grimes */ 65826f9a767SRodney W. Grimes 659efc68ce1SDavid Greenman firstaddr = -1; 66026f9a767SRodney W. Grimes /* 661efc68ce1SDavid Greenman * calculate the run that includes the required page 66226f9a767SRodney W. Grimes */ 663efc68ce1SDavid Greenman for(first = 0, i = 0; i < count; i = runend) { 664efc68ce1SDavid Greenman firstaddr = vnode_pager_addr(vp, m[i]->offset, &runpg); 665efc68ce1SDavid Greenman if (firstaddr == -1) { 66624a1cce3SDavid Greenman if (i == reqpage && foff < object->un_pager.vnp.vnp_size) { 66724a1cce3SDavid Greenman panic("vnode_pager_putpages: unexpected missing page: firstaddr: %d, foff: %ld, vnp_size: %d", 66824a1cce3SDavid Greenman firstaddr, foff, object->un_pager.vnp.vnp_size); 669efc68ce1SDavid Greenman } 67026f9a767SRodney W. Grimes vnode_pager_freepage(m[i]); 671efc68ce1SDavid Greenman runend = i + 1; 672efc68ce1SDavid Greenman first = runend; 673efc68ce1SDavid Greenman continue; 674efc68ce1SDavid Greenman } 675efc68ce1SDavid Greenman runend = i + runpg; 676efc68ce1SDavid Greenman if (runend <= reqpage) { 677efc68ce1SDavid Greenman int j; 678efc68ce1SDavid Greenman for (j = i; j < runend; j++) { 679efc68ce1SDavid Greenman vnode_pager_freepage(m[j]); 680efc68ce1SDavid Greenman } 68126f9a767SRodney W. Grimes } else { 682efc68ce1SDavid Greenman if (runpg < (count - first)) { 683efc68ce1SDavid Greenman for (i = first + runpg; i < count; i++) 68426f9a767SRodney W. Grimes vnode_pager_freepage(m[i]); 685efc68ce1SDavid Greenman count = first + runpg; 68626f9a767SRodney W. Grimes } 687efc68ce1SDavid Greenman break; 68826f9a767SRodney W. Grimes } 689efc68ce1SDavid Greenman first = runend; 690efc68ce1SDavid Greenman } 69126f9a767SRodney W. Grimes 69226f9a767SRodney W. Grimes /* 693bbc0ec52SDavid Greenman * the first and last page have been calculated now, move input pages 694bbc0ec52SDavid Greenman * to be zero based... 69526f9a767SRodney W. Grimes */ 69626f9a767SRodney W. Grimes if (first != 0) { 69726f9a767SRodney W. Grimes for (i = first; i < count; i++) { 69826f9a767SRodney W. Grimes m[i - first] = m[i]; 69926f9a767SRodney W. Grimes } 70026f9a767SRodney W. Grimes count -= first; 70126f9a767SRodney W. Grimes reqpage -= first; 70226f9a767SRodney W. Grimes } 703efc68ce1SDavid Greenman 70426f9a767SRodney W. Grimes /* 70526f9a767SRodney W. Grimes * calculate the file virtual address for the transfer 70626f9a767SRodney W. Grimes */ 7070d94caffSDavid Greenman foff = m[0]->offset; 70826f9a767SRodney W. Grimes 70926f9a767SRodney W. Grimes /* 71026f9a767SRodney W. Grimes * calculate the size of the transfer 71126f9a767SRodney W. Grimes */ 71226f9a767SRodney W. Grimes size = count * PAGE_SIZE; 71324a1cce3SDavid Greenman if ((foff + size) > object->un_pager.vnp.vnp_size) 71424a1cce3SDavid Greenman size = object->un_pager.vnp.vnp_size - foff; 71526f9a767SRodney W. Grimes 71626f9a767SRodney W. Grimes /* 71726f9a767SRodney W. Grimes * round up physical size for real devices 71826f9a767SRodney W. Grimes */ 71926f9a767SRodney W. Grimes if (dp->v_type == VBLK || dp->v_type == VCHR) 72026f9a767SRodney W. Grimes size = (size + DEV_BSIZE - 1) & ~(DEV_BSIZE - 1); 72126f9a767SRodney W. Grimes 7226d40c3d3SDavid Greenman bp = getpbuf(); 72316f62314SDavid Greenman kva = (vm_offset_t) bp->b_data; 72416f62314SDavid Greenman 72526f9a767SRodney W. Grimes /* 72626f9a767SRodney W. Grimes * and map the pages to be read into the kva 72726f9a767SRodney W. Grimes */ 72816f62314SDavid Greenman pmap_qenter(kva, m, count); 72926f9a767SRodney W. Grimes 73026f9a767SRodney W. Grimes /* build a minimal buffer header */ 73126f9a767SRodney W. Grimes bp->b_flags = B_BUSY | B_READ | B_CALL; 73226f9a767SRodney W. Grimes bp->b_iodone = vnode_pager_iodone; 73326f9a767SRodney W. Grimes /* B_PHYS is not set, but it is nice to fill this in */ 73426f9a767SRodney W. Grimes bp->b_proc = curproc; 73526f9a767SRodney W. Grimes bp->b_rcred = bp->b_wcred = bp->b_proc->p_ucred; 73626f9a767SRodney W. Grimes if (bp->b_rcred != NOCRED) 73726f9a767SRodney W. Grimes crhold(bp->b_rcred); 73826f9a767SRodney W. Grimes if (bp->b_wcred != NOCRED) 73926f9a767SRodney W. Grimes crhold(bp->b_wcred); 740187f0071SDavid Greenman bp->b_blkno = firstaddr; 7410d94caffSDavid Greenman pbgetvp(dp, bp); 74226f9a767SRodney W. Grimes bp->b_bcount = size; 74326f9a767SRodney W. Grimes bp->b_bufsize = size; 74426f9a767SRodney W. Grimes 745976e77fcSDavid Greenman cnt.v_vnodein++; 746976e77fcSDavid Greenman cnt.v_vnodepgsin += count; 747976e77fcSDavid Greenman 74826f9a767SRodney W. Grimes /* do the input */ 74926f9a767SRodney W. Grimes VOP_STRATEGY(bp); 750976e77fcSDavid Greenman 75126f9a767SRodney W. Grimes s = splbio(); 75226f9a767SRodney W. Grimes /* we definitely need to be at splbio here */ 75326f9a767SRodney W. Grimes 75426f9a767SRodney W. Grimes while ((bp->b_flags & B_DONE) == 0) { 755aa2cabb9SDavid Greenman tsleep(bp, PVM, "vnread", 0); 75626f9a767SRodney W. Grimes } 75726f9a767SRodney W. Grimes splx(s); 75826f9a767SRodney W. Grimes if ((bp->b_flags & B_ERROR) != 0) 75926f9a767SRodney W. Grimes error = EIO; 76026f9a767SRodney W. Grimes 76126f9a767SRodney W. Grimes if (!error) { 76226f9a767SRodney W. Grimes if (size != count * PAGE_SIZE) 76326f9a767SRodney W. Grimes bzero((caddr_t) kva + size, PAGE_SIZE * count - size); 76426f9a767SRodney W. Grimes } 76516f62314SDavid Greenman pmap_qremove(kva, count); 76626f9a767SRodney W. Grimes 76726f9a767SRodney W. Grimes /* 76826f9a767SRodney W. Grimes * free the buffer header back to the swap buffer pool 76926f9a767SRodney W. Grimes */ 77026f9a767SRodney W. Grimes relpbuf(bp); 77126f9a767SRodney W. Grimes 77226f9a767SRodney W. Grimes for (i = 0; i < count; i++) { 773fff93ab6SDavid Greenman pmap_clear_modify(VM_PAGE_TO_PHYS(m[i])); 7740d94caffSDavid Greenman m[i]->dirty = 0; 7750d94caffSDavid Greenman m[i]->valid = VM_PAGE_BITS_ALL; 776b1fc01b7SJohn Dyson m[i]->flags &= ~PG_ZERO; 77726f9a767SRodney W. Grimes if (i != reqpage) { 778bbc0ec52SDavid Greenman 77926f9a767SRodney W. Grimes /* 780bbc0ec52SDavid Greenman * whether or not to leave the page activated is up in 781bbc0ec52SDavid Greenman * the air, but we should put the page on a page queue 782bbc0ec52SDavid Greenman * somewhere. (it already is in the object). Result: 783bbc0ec52SDavid Greenman * It appears that emperical results show that 784bbc0ec52SDavid Greenman * deactivating pages is best. 78526f9a767SRodney W. Grimes */ 786bbc0ec52SDavid Greenman 78726f9a767SRodney W. Grimes /* 788bbc0ec52SDavid Greenman * just in case someone was asking for this page we 789bbc0ec52SDavid Greenman * now tell them that it is ok to use 79026f9a767SRodney W. Grimes */ 79126f9a767SRodney W. Grimes if (!error) { 79226f9a767SRodney W. Grimes vm_page_deactivate(m[i]); 79326f9a767SRodney W. Grimes PAGE_WAKEUP(m[i]); 79426f9a767SRodney W. Grimes } else { 79526f9a767SRodney W. Grimes vnode_pager_freepage(m[i]); 79626f9a767SRodney W. Grimes } 79726f9a767SRodney W. Grimes } 79826f9a767SRodney W. Grimes } 79926f9a767SRodney W. Grimes if (error) { 80024a1cce3SDavid Greenman printf("vnode_pager_getpages: I/O read error\n"); 80126f9a767SRodney W. Grimes } 802a83c285cSDavid Greenman return (error ? VM_PAGER_ERROR : VM_PAGER_OK); 80326f9a767SRodney W. Grimes } 80426f9a767SRodney W. Grimes 805170db9c6SJohn Dyson int 806170db9c6SJohn Dyson vnode_pager_putpages(object, m, count, sync, rtvals) 807170db9c6SJohn Dyson vm_object_t object; 808170db9c6SJohn Dyson vm_page_t *m; 809170db9c6SJohn Dyson int count; 810170db9c6SJohn Dyson boolean_t sync; 811170db9c6SJohn Dyson int *rtvals; 812170db9c6SJohn Dyson { 813170db9c6SJohn Dyson int rtval; 814170db9c6SJohn Dyson struct vnode *vp; 815170db9c6SJohn Dyson vp = object->handle; 8162c4488fcSJohn Dyson rtval = VOP_PUTPAGES(vp, m, count*PAGE_SIZE, sync, rtvals, 0); 817170db9c6SJohn Dyson if (rtval == EOPNOTSUPP) 8180b8253a7SBruce Evans return vnode_pager_leaf_putpages(object, m, count, sync, rtvals); 819170db9c6SJohn Dyson else 820170db9c6SJohn Dyson return rtval; 821170db9c6SJohn Dyson } 822170db9c6SJohn Dyson 82326f9a767SRodney W. Grimes /* 82426f9a767SRodney W. Grimes * generic vnode pager output routine 82526f9a767SRodney W. Grimes */ 826170db9c6SJohn Dyson static int 827170db9c6SJohn Dyson vnode_pager_leaf_putpages(object, m, count, sync, rtvals) 82824a1cce3SDavid Greenman vm_object_t object; 82926f9a767SRodney W. Grimes vm_page_t *m; 83026f9a767SRodney W. Grimes int count; 83124a1cce3SDavid Greenman boolean_t sync; 83226f9a767SRodney W. Grimes int *rtvals; 83326f9a767SRodney W. Grimes { 834f6b04d2bSDavid Greenman int i; 83526f9a767SRodney W. Grimes 836f6b04d2bSDavid Greenman struct vnode *vp; 837f6b04d2bSDavid Greenman int maxsize, ncount; 838f6b04d2bSDavid Greenman struct uio auio; 839f6b04d2bSDavid Greenman struct iovec aiov; 840f6b04d2bSDavid Greenman int error; 84126f9a767SRodney W. Grimes 84224a1cce3SDavid Greenman vp = object->handle;; 84326f9a767SRodney W. Grimes for (i = 0; i < count; i++) 84426f9a767SRodney W. Grimes rtvals[i] = VM_PAGER_AGAIN; 84526f9a767SRodney W. Grimes 8460d94caffSDavid Greenman if ((int) m[0]->offset < 0) { 84724a1cce3SDavid Greenman printf("vnode_pager_putpages: attempt to write meta-data!!! -- 0x%x(%x)\n", m[0]->offset, m[0]->dirty); 848f6b04d2bSDavid Greenman rtvals[0] = VM_PAGER_BAD; 849f6b04d2bSDavid Greenman return VM_PAGER_BAD; 8500d94caffSDavid Greenman } 8510bdb7528SDavid Greenman 852f6b04d2bSDavid Greenman maxsize = count * PAGE_SIZE; 853f6b04d2bSDavid Greenman ncount = count; 85426f9a767SRodney W. Grimes 85524a1cce3SDavid Greenman if (maxsize + m[0]->offset > object->un_pager.vnp.vnp_size) { 85624a1cce3SDavid Greenman if (object->un_pager.vnp.vnp_size > m[0]->offset) 85724a1cce3SDavid Greenman maxsize = object->un_pager.vnp.vnp_size - m[0]->offset; 8585f55e841SDavid Greenman else 8595f55e841SDavid Greenman maxsize = 0; 860f6b04d2bSDavid Greenman ncount = (maxsize + PAGE_SIZE - 1) / PAGE_SIZE; 861f6b04d2bSDavid Greenman if (ncount < count) { 862f6b04d2bSDavid Greenman for (i = ncount; i < count; i++) { 863f6b04d2bSDavid Greenman rtvals[i] = VM_PAGER_BAD; 864f6b04d2bSDavid Greenman } 865f6b04d2bSDavid Greenman if (ncount == 0) { 86624a1cce3SDavid Greenman printf("vnode_pager_putpages: write past end of file: %d, %d\n", 86724a1cce3SDavid Greenman m[0]->offset, object->un_pager.vnp.vnp_size); 86826f9a767SRodney W. Grimes return rtvals[0]; 86926f9a767SRodney W. Grimes } 870f6b04d2bSDavid Greenman } 871f6b04d2bSDavid Greenman } 87226f9a767SRodney W. Grimes 87326f9a767SRodney W. Grimes for (i = 0; i < count; i++) { 8745f55e841SDavid Greenman m[i]->busy++; 875f6b04d2bSDavid Greenman m[i]->flags &= ~PG_BUSY; 87626f9a767SRodney W. Grimes } 877f6b04d2bSDavid Greenman 878f6b04d2bSDavid Greenman aiov.iov_base = (caddr_t) 0; 879f6b04d2bSDavid Greenman aiov.iov_len = maxsize; 880f6b04d2bSDavid Greenman auio.uio_iov = &aiov; 881f6b04d2bSDavid Greenman auio.uio_iovcnt = 1; 882f6b04d2bSDavid Greenman auio.uio_offset = m[0]->offset; 883f6b04d2bSDavid Greenman auio.uio_segflg = UIO_NOCOPY; 884f6b04d2bSDavid Greenman auio.uio_rw = UIO_WRITE; 885f6b04d2bSDavid Greenman auio.uio_resid = maxsize; 886f6b04d2bSDavid Greenman auio.uio_procp = (struct proc *) 0; 887f6b04d2bSDavid Greenman error = VOP_WRITE(vp, &auio, IO_VMIO, curproc->p_ucred); 888976e77fcSDavid Greenman cnt.v_vnodeout++; 889f6b04d2bSDavid Greenman cnt.v_vnodepgsout += ncount; 890f6b04d2bSDavid Greenman 891f6b04d2bSDavid Greenman if (error) { 89224a1cce3SDavid Greenman printf("vnode_pager_putpages: I/O error %d\n", error); 893f6b04d2bSDavid Greenman } 894f6b04d2bSDavid Greenman if (auio.uio_resid) { 89524a1cce3SDavid Greenman printf("vnode_pager_putpages: residual I/O %d at %d\n", auio.uio_resid, m[0]->offset); 89626f9a767SRodney W. Grimes } 89726f9a767SRodney W. Grimes for (i = 0; i < count; i++) { 8985f55e841SDavid Greenman m[i]->busy--; 899f6b04d2bSDavid Greenman if (i < ncount) { 90026f9a767SRodney W. Grimes rtvals[i] = VM_PAGER_OK; 90126f9a767SRodney W. Grimes } 902f6b04d2bSDavid Greenman if ((m[i]->busy == 0) && (m[i]->flags & PG_WANTED)) 90324a1cce3SDavid Greenman wakeup(m[i]); 90426f9a767SRodney W. Grimes } 905f6b04d2bSDavid Greenman return rtvals[0]; 90626f9a767SRodney W. Grimes } 907f6b04d2bSDavid Greenman 908f6b04d2bSDavid Greenman struct vnode * 90924a1cce3SDavid Greenman vnode_pager_lock(object) 91024a1cce3SDavid Greenman vm_object_t object; 91124a1cce3SDavid Greenman { 91224a1cce3SDavid Greenman for (; object != NULL; object = object->backing_object) { 91324a1cce3SDavid Greenman if (object->type != OBJT_VNODE) 914f6b04d2bSDavid Greenman continue; 915f6b04d2bSDavid Greenman 91624a1cce3SDavid Greenman VOP_LOCK(object->handle); 91724a1cce3SDavid Greenman return object->handle; 91826f9a767SRodney W. Grimes } 91924a1cce3SDavid Greenman return NULL; 920f6b04d2bSDavid Greenman } 921