160727d8bSWarner Losh /*-
2df57947fSPedro F. Giffuni * SPDX-License-Identifier: BSD-4-Clause
3df57947fSPedro F. Giffuni *
4df8bae1dSRodney W. Grimes * Copyright (c) 1990 University of Utah.
526f9a767SRodney W. Grimes * Copyright (c) 1991 The Regents of the University of California.
626f9a767SRodney W. Grimes * All rights reserved.
726f9a767SRodney W. Grimes * Copyright (c) 1993, 1994 John S. Dyson
824a1cce3SDavid Greenman * Copyright (c) 1995, David Greenman
9df8bae1dSRodney W. Grimes *
10df8bae1dSRodney W. Grimes * This code is derived from software contributed to Berkeley by
11df8bae1dSRodney W. Grimes * the Systems Programming Group of the University of Utah Computer
12df8bae1dSRodney W. Grimes * Science Department.
13df8bae1dSRodney W. Grimes *
14df8bae1dSRodney W. Grimes * Redistribution and use in source and binary forms, with or without
15df8bae1dSRodney W. Grimes * modification, are permitted provided that the following conditions
16df8bae1dSRodney W. Grimes * are met:
17df8bae1dSRodney W. Grimes * 1. Redistributions of source code must retain the above copyright
18df8bae1dSRodney W. Grimes * notice, this list of conditions and the following disclaimer.
19df8bae1dSRodney W. Grimes * 2. Redistributions in binary form must reproduce the above copyright
20df8bae1dSRodney W. Grimes * notice, this list of conditions and the following disclaimer in the
21df8bae1dSRodney W. Grimes * documentation and/or other materials provided with the distribution.
22df8bae1dSRodney W. Grimes * 3. All advertising materials mentioning features or use of this software
235929bcfaSPhilippe Charnier * must display the following acknowledgement:
24df8bae1dSRodney W. Grimes * This product includes software developed by the University of
25df8bae1dSRodney W. Grimes * California, Berkeley and its contributors.
26df8bae1dSRodney W. Grimes * 4. Neither the name of the University nor the names of its contributors
27df8bae1dSRodney W. Grimes * may be used to endorse or promote products derived from this software
28df8bae1dSRodney W. Grimes * without specific prior written permission.
29df8bae1dSRodney W. Grimes *
30df8bae1dSRodney W. Grimes * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
31df8bae1dSRodney W. Grimes * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
32df8bae1dSRodney W. Grimes * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
33df8bae1dSRodney W. Grimes * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
34df8bae1dSRodney W. Grimes * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
35df8bae1dSRodney W. Grimes * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
36df8bae1dSRodney W. Grimes * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
37df8bae1dSRodney W. Grimes * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
38df8bae1dSRodney W. Grimes * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
39df8bae1dSRodney W. Grimes * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
40df8bae1dSRodney W. Grimes * SUCH DAMAGE.
41df8bae1dSRodney W. Grimes */
42df8bae1dSRodney W. Grimes
43df8bae1dSRodney W. Grimes /*
44df8bae1dSRodney W. Grimes * Page to/from files (vnodes).
45df8bae1dSRodney W. Grimes */
46df8bae1dSRodney W. Grimes
4726f9a767SRodney W. Grimes /*
4826f9a767SRodney W. Grimes * TODO:
4924a1cce3SDavid Greenman * Implement VOP_GETPAGES/PUTPAGES interface for filesystems. Will
50f6b04d2bSDavid Greenman * greatly re-simplify the vnode_pager.
5126f9a767SRodney W. Grimes */
5226f9a767SRodney W. Grimes
53874651b1SDavid E. O'Brien #include <sys/cdefs.h>
543d653db0SAlan Cox #include "opt_vm.h"
553d653db0SAlan Cox
56df8bae1dSRodney W. Grimes #include <sys/param.h>
57756a5412SGleb Smirnoff #include <sys/kernel.h>
58df8bae1dSRodney W. Grimes #include <sys/systm.h>
59e5818a53SJeff Roberson #include <sys/sysctl.h>
60df8bae1dSRodney W. Grimes #include <sys/proc.h>
61df8bae1dSRodney W. Grimes #include <sys/vnode.h>
62df8bae1dSRodney W. Grimes #include <sys/mount.h>
639626b608SPoul-Henning Kamp #include <sys/bio.h>
6424a1cce3SDavid Greenman #include <sys/buf.h>
65efeaf95aSDavid Greenman #include <sys/vmmeter.h>
66daec9284SConrad Meyer #include <sys/ktr.h>
67d07a6d3fSPoul-Henning Kamp #include <sys/limits.h>
6824579ca1SMatthew Dillon #include <sys/conf.h>
6951df5321SJeff Roberson #include <sys/refcount.h>
7089f6b863SAttilio Rao #include <sys/rwlock.h>
719e0ddbd0SAlan Cox #include <sys/sf_buf.h>
72e5818a53SJeff Roberson #include <sys/domainset.h>
7300a3fe96SKonstantin Belousov #include <sys/user.h>
74df8bae1dSRodney W. Grimes
754f12e0acSSuleiman Souhlal #include <machine/atomic.h>
764f12e0acSSuleiman Souhlal
77df8bae1dSRodney W. Grimes #include <vm/vm.h>
781c771f92SKonstantin Belousov #include <vm/vm_param.h>
79efeaf95aSDavid Greenman #include <vm/vm_object.h>
80df8bae1dSRodney W. Grimes #include <vm/vm_page.h>
8124a1cce3SDavid Greenman #include <vm/vm_pager.h>
821efb74fbSJohn Dyson #include <vm/vm_map.h>
83df8bae1dSRodney W. Grimes #include <vm/vnode_pager.h>
84efeaf95aSDavid Greenman #include <vm/vm_extern.h>
85756a5412SGleb Smirnoff #include <vm/uma.h>
86df8bae1dSRodney W. Grimes
87bff76343SAlan Cox static int vnode_pager_addr(struct vnode *vp, vm_ooffset_t address,
88bff76343SAlan Cox daddr_t *rtaddress, int *run);
8911caded3SAlfred Perlstein static int vnode_pager_input_smlfs(vm_object_t object, vm_page_t m);
9011caded3SAlfred Perlstein static int vnode_pager_input_old(vm_object_t object, vm_page_t m);
9111caded3SAlfred Perlstein static void vnode_pager_dealloc(vm_object_t);
92b0cd2017SGleb Smirnoff static int vnode_pager_getpages(vm_object_t, vm_page_t *, int, int *, int *);
93b0cd2017SGleb Smirnoff static int vnode_pager_getpages_async(vm_object_t, vm_page_t *, int, int *,
94b0cd2017SGleb Smirnoff int *, vop_getpages_iodone_t, void *);
9533cad9e9SKonstantin Belousov static void vnode_pager_putpages(vm_object_t, vm_page_t *, int, int, int *);
9611caded3SAlfred Perlstein static boolean_t vnode_pager_haspage(vm_object_t, vm_pindex_t, int *, int *);
973364c323SKonstantin Belousov static vm_object_t vnode_pager_alloc(void *, vm_ooffset_t, vm_prot_t,
983364c323SKonstantin Belousov vm_ooffset_t, struct ucred *cred);
9990effb23SGleb Smirnoff static int vnode_pager_generic_getpages_done(struct buf *);
10090effb23SGleb Smirnoff static void vnode_pager_generic_getpages_done_async(struct buf *);
101fe7bcbafSKyle Evans static void vnode_pager_update_writecount(vm_object_t, vm_offset_t,
102fe7bcbafSKyle Evans vm_offset_t);
103fe7bcbafSKyle Evans static void vnode_pager_release_writecount(vm_object_t, vm_offset_t,
104fe7bcbafSKyle Evans vm_offset_t);
105192112b7SKonstantin Belousov static void vnode_pager_getvp(vm_object_t, struct vnode **, bool *);
1060b8253a7SBruce Evans
107d474440aSKonstantin Belousov const struct pagerops vnodepagerops = {
10800a3fe96SKonstantin Belousov .pgo_kvme_type = KVME_TYPE_VNODE,
1094e658600SPoul-Henning Kamp .pgo_alloc = vnode_pager_alloc,
1104e658600SPoul-Henning Kamp .pgo_dealloc = vnode_pager_dealloc,
1114e658600SPoul-Henning Kamp .pgo_getpages = vnode_pager_getpages,
11290effb23SGleb Smirnoff .pgo_getpages_async = vnode_pager_getpages_async,
1134e658600SPoul-Henning Kamp .pgo_putpages = vnode_pager_putpages,
1144e658600SPoul-Henning Kamp .pgo_haspage = vnode_pager_haspage,
115fe7bcbafSKyle Evans .pgo_update_writecount = vnode_pager_update_writecount,
116fe7bcbafSKyle Evans .pgo_release_writecount = vnode_pager_release_writecount,
117180bcaa4SKonstantin Belousov .pgo_set_writeable_dirty = vm_object_set_writeable_dirty_,
118c23c555bSKonstantin Belousov .pgo_mightbedirty = vm_object_mightbedirty_,
119192112b7SKonstantin Belousov .pgo_getvp = vnode_pager_getvp,
120df8bae1dSRodney W. Grimes };
121df8bae1dSRodney W. Grimes
122e5818a53SJeff Roberson static struct domainset *vnode_domainset = NULL;
123e5818a53SJeff Roberson
124a314aba8SMateusz Guzik SYSCTL_PROC(_debug, OID_AUTO, vnode_domainset,
125a314aba8SMateusz Guzik CTLTYPE_STRING | CTLFLAG_MPSAFE | CTLFLAG_RW, &vnode_domainset, 0,
126a314aba8SMateusz Guzik sysctl_handle_domainset, "A", "Default vnode NUMA policy");
127e5818a53SJeff Roberson
12866fb0b1aSGleb Smirnoff static int nvnpbufs;
12966fb0b1aSGleb Smirnoff SYSCTL_INT(_vm, OID_AUTO, vnode_pbufs, CTLFLAG_RDTUN | CTLFLAG_NOFETCH,
13066fb0b1aSGleb Smirnoff &nvnpbufs, 0, "number of physical buffers allocated for vnode pager");
13166fb0b1aSGleb Smirnoff
132756a5412SGleb Smirnoff static uma_zone_t vnode_pbuf_zone;
133756a5412SGleb Smirnoff
134756a5412SGleb Smirnoff static void
vnode_pager_init(void * dummy)135756a5412SGleb Smirnoff vnode_pager_init(void *dummy)
136756a5412SGleb Smirnoff {
137756a5412SGleb Smirnoff
13866fb0b1aSGleb Smirnoff #ifdef __LP64__
13966fb0b1aSGleb Smirnoff nvnpbufs = nswbuf * 2;
14066fb0b1aSGleb Smirnoff #else
14166fb0b1aSGleb Smirnoff nvnpbufs = nswbuf / 2;
14266fb0b1aSGleb Smirnoff #endif
14366fb0b1aSGleb Smirnoff TUNABLE_INT_FETCH("vm.vnode_pbufs", &nvnpbufs);
14466fb0b1aSGleb Smirnoff vnode_pbuf_zone = pbuf_zsecond_create("vnpbuf", nvnpbufs);
145756a5412SGleb Smirnoff }
146756a5412SGleb Smirnoff SYSINIT(vnode_pager, SI_SUB_CPU, SI_ORDER_ANY, vnode_pager_init, NULL);
147756a5412SGleb Smirnoff
148d07a6d3fSPoul-Henning Kamp /* Create the VM system backing object for this vnode */
14956a8aca8SPawel Jakub Dawidek static int
vnode_create_vobject_any(struct vnode * vp,off_t isize,struct thread * td)15056a8aca8SPawel Jakub Dawidek vnode_create_vobject_any(struct vnode *vp, off_t isize, struct thread *td)
151d07a6d3fSPoul-Henning Kamp {
152d07a6d3fSPoul-Henning Kamp vm_object_t object;
15356a8aca8SPawel Jakub Dawidek vm_ooffset_t size;
154a67d5408SJeff Roberson bool last;
155d07a6d3fSPoul-Henning Kamp
1566470c8d3SKonstantin Belousov object = vp->v_object;
1576470c8d3SKonstantin Belousov if (object != NULL)
158d07a6d3fSPoul-Henning Kamp return (0);
159d07a6d3fSPoul-Henning Kamp
16056a8aca8SPawel Jakub Dawidek if (isize == VNODE_NO_SIZE) {
161f45feecfSMateusz Guzik if (vn_getsize_locked(vp, &size, td->td_ucred) != 0)
162d07a6d3fSPoul-Henning Kamp return (0);
16356a8aca8SPawel Jakub Dawidek } else {
16456a8aca8SPawel Jakub Dawidek size = isize;
165d07a6d3fSPoul-Henning Kamp }
166d07a6d3fSPoul-Henning Kamp
1673364c323SKonstantin Belousov object = vnode_pager_alloc(vp, size, 0, 0, td->td_ucred);
168d07a6d3fSPoul-Henning Kamp /*
169d07a6d3fSPoul-Henning Kamp * Dereference the reference we just created. This assumes
170a67d5408SJeff Roberson * that the object is associated with the vp. We still have
171a67d5408SJeff Roberson * to serialize with vnode_pager_dealloc() for the last
172a67d5408SJeff Roberson * potential reference.
173d07a6d3fSPoul-Henning Kamp */
17451df5321SJeff Roberson VM_OBJECT_RLOCK(object);
175a67d5408SJeff Roberson last = refcount_release(&object->ref_count);
17651df5321SJeff Roberson VM_OBJECT_RUNLOCK(object);
177a67d5408SJeff Roberson if (last)
178d07a6d3fSPoul-Henning Kamp vrele(vp);
179d07a6d3fSPoul-Henning Kamp
18056a8aca8SPawel Jakub Dawidek VNASSERT(vp->v_object != NULL, vp, ("%s: NULL object", __func__));
181d07a6d3fSPoul-Henning Kamp
182d07a6d3fSPoul-Henning Kamp return (0);
183d07a6d3fSPoul-Henning Kamp }
184d07a6d3fSPoul-Henning Kamp
18556a8aca8SPawel Jakub Dawidek int
vnode_create_vobject(struct vnode * vp,off_t isize,struct thread * td)18656a8aca8SPawel Jakub Dawidek vnode_create_vobject(struct vnode *vp, off_t isize, struct thread *td)
18756a8aca8SPawel Jakub Dawidek {
18856a8aca8SPawel Jakub Dawidek VNASSERT(!vn_isdisk(vp), vp, ("%s: disk vnode", __func__));
18956a8aca8SPawel Jakub Dawidek VNASSERT(isize == VNODE_NO_SIZE || isize >= 0, vp,
19056a8aca8SPawel Jakub Dawidek ("%s: invalid size (%jd)", __func__, (intmax_t)isize));
19156a8aca8SPawel Jakub Dawidek
19256a8aca8SPawel Jakub Dawidek if (!vn_canvmio(vp))
19356a8aca8SPawel Jakub Dawidek return (0);
19456a8aca8SPawel Jakub Dawidek
19556a8aca8SPawel Jakub Dawidek return (vnode_create_vobject_any(vp, isize, td));
19656a8aca8SPawel Jakub Dawidek }
19756a8aca8SPawel Jakub Dawidek
19856a8aca8SPawel Jakub Dawidek int
vnode_create_disk_vobject(struct vnode * vp,off_t isize,struct thread * td)19956a8aca8SPawel Jakub Dawidek vnode_create_disk_vobject(struct vnode *vp, off_t isize, struct thread *td)
20056a8aca8SPawel Jakub Dawidek {
20156a8aca8SPawel Jakub Dawidek VNASSERT(isize > 0, vp, ("%s: invalid size (%jd)", __func__,
20256a8aca8SPawel Jakub Dawidek (intmax_t)isize));
20356a8aca8SPawel Jakub Dawidek
20456a8aca8SPawel Jakub Dawidek return (vnode_create_vobject_any(vp, isize, td));
20556a8aca8SPawel Jakub Dawidek }
20656a8aca8SPawel Jakub Dawidek
2077146d6cbSPoul-Henning Kamp void
vnode_destroy_vobject(struct vnode * vp)2087146d6cbSPoul-Henning Kamp vnode_destroy_vobject(struct vnode *vp)
2097146d6cbSPoul-Henning Kamp {
2107146d6cbSPoul-Henning Kamp struct vm_object *obj;
2117146d6cbSPoul-Henning Kamp
2127146d6cbSPoul-Henning Kamp obj = vp->v_object;
2136470c8d3SKonstantin Belousov if (obj == NULL || obj->handle != vp)
2147146d6cbSPoul-Henning Kamp return;
21557fd3d55SPawel Jakub Dawidek ASSERT_VOP_ELOCKED(vp, "vnode_destroy_vobject");
21689f6b863SAttilio Rao VM_OBJECT_WLOCK(obj);
2176470c8d3SKonstantin Belousov MPASS(obj->type == OBJT_VNODE);
2182a339d9eSKonstantin Belousov umtx_shm_object_terminated(obj);
2197146d6cbSPoul-Henning Kamp if (obj->ref_count == 0) {
2209c83ff2dSJeff Roberson KASSERT((obj->flags & OBJ_DEAD) == 0,
2219c83ff2dSJeff Roberson ("vnode_destroy_vobject: Terminating dead object"));
222783a68aaSKonstantin Belousov vm_object_set_flag(obj, OBJ_DEAD);
223783a68aaSKonstantin Belousov
224783a68aaSKonstantin Belousov /*
225783a68aaSKonstantin Belousov * Clean pages and flush buffers.
226783a68aaSKonstantin Belousov */
227783a68aaSKonstantin Belousov vm_object_page_clean(obj, 0, 0, OBJPC_SYNC);
228783a68aaSKonstantin Belousov VM_OBJECT_WUNLOCK(obj);
229783a68aaSKonstantin Belousov
230783a68aaSKonstantin Belousov vinvalbuf(vp, V_SAVE, 0, 0);
231783a68aaSKonstantin Belousov
232783a68aaSKonstantin Belousov BO_LOCK(&vp->v_bufobj);
233783a68aaSKonstantin Belousov vp->v_bufobj.bo_flag |= BO_DEAD;
234783a68aaSKonstantin Belousov BO_UNLOCK(&vp->v_bufobj);
235783a68aaSKonstantin Belousov
236783a68aaSKonstantin Belousov VM_OBJECT_WLOCK(obj);
2377146d6cbSPoul-Henning Kamp vm_object_terminate(obj);
23890880a1bSKonstantin Belousov } else {
23990880a1bSKonstantin Belousov /*
2407146d6cbSPoul-Henning Kamp * Woe to the process that tries to page now :-).
2417146d6cbSPoul-Henning Kamp */
2427146d6cbSPoul-Henning Kamp vm_pager_deallocate(obj);
24389f6b863SAttilio Rao VM_OBJECT_WUNLOCK(obj);
2447146d6cbSPoul-Henning Kamp }
24590880a1bSKonstantin Belousov KASSERT(vp->v_object == NULL, ("vp %p obj %p", vp, vp->v_object));
2467146d6cbSPoul-Henning Kamp }
2477146d6cbSPoul-Henning Kamp
248df8bae1dSRodney W. Grimes /*
249df8bae1dSRodney W. Grimes * Allocate (or lookup) pager for a vnode.
250df8bae1dSRodney W. Grimes * Handle is a vnode pointer.
251df8bae1dSRodney W. Grimes */
25224a1cce3SDavid Greenman vm_object_t
vnode_pager_alloc(void * handle,vm_ooffset_t size,vm_prot_t prot,vm_ooffset_t offset,struct ucred * cred)2536cde7a16SDavid Greenman vnode_pager_alloc(void *handle, vm_ooffset_t size, vm_prot_t prot,
2543364c323SKonstantin Belousov vm_ooffset_t offset, struct ucred *cred)
255df8bae1dSRodney W. Grimes {
25606cb7259SDavid Greenman vm_object_t object;
257df8bae1dSRodney W. Grimes struct vnode *vp;
258df8bae1dSRodney W. Grimes
259df8bae1dSRodney W. Grimes /*
260df8bae1dSRodney W. Grimes * Pageout to vnode, no can do yet.
261df8bae1dSRodney W. Grimes */
262df8bae1dSRodney W. Grimes if (handle == NULL)
263df8bae1dSRodney W. Grimes return (NULL);
264df8bae1dSRodney W. Grimes
265df8bae1dSRodney W. Grimes vp = (struct vnode *)handle;
2666470c8d3SKonstantin Belousov ASSERT_VOP_LOCKED(vp, "vnode_pager_alloc");
267f1fa1ba3SMateusz Guzik VNPASS(vp->v_usecount > 0, vp);
2686470c8d3SKonstantin Belousov retry:
2696470c8d3SKonstantin Belousov object = vp->v_object;
2702be70f79SJohn Dyson
27124a1cce3SDavid Greenman if (object == NULL) {
272df8bae1dSRodney W. Grimes /*
2732ac78f0eSStephan Uphoff * Add an object of the appropriate size
274df8bae1dSRodney W. Grimes */
2756470c8d3SKonstantin Belousov object = vm_object_allocate(OBJT_VNODE,
2766470c8d3SKonstantin Belousov OFF_TO_IDX(round_page(size)));
277bbc0ec52SDavid Greenman
2786cde7a16SDavid Greenman object->un_pager.vnp.vnp_size = size;
27984110e7eSKonstantin Belousov object->un_pager.vnp.writemappings = 0;
280e5818a53SJeff Roberson object->domain.dr_policy = vnode_domainset;
28124a1cce3SDavid Greenman object->handle = handle;
282208b81bbSKonstantin Belousov if ((vp->v_vflag & VV_VMSIZEVNLOCK) != 0) {
283208b81bbSKonstantin Belousov VM_OBJECT_WLOCK(object);
284208b81bbSKonstantin Belousov vm_object_set_flag(object, OBJ_SIZEVNLOCK);
285208b81bbSKonstantin Belousov VM_OBJECT_WUNLOCK(object);
286208b81bbSKonstantin Belousov }
28711be8415SStephan Uphoff VI_LOCK(vp);
2882ac78f0eSStephan Uphoff if (vp->v_object != NULL) {
2892ac78f0eSStephan Uphoff /*
2906470c8d3SKonstantin Belousov * Object has been created while we were allocating.
2912ac78f0eSStephan Uphoff */
29211be8415SStephan Uphoff VI_UNLOCK(vp);
2939cddade7SKonstantin Belousov VM_OBJECT_WLOCK(object);
2949cddade7SKonstantin Belousov KASSERT(object->ref_count == 1,
2959cddade7SKonstantin Belousov ("leaked ref %p %d", object, object->ref_count));
2969cddade7SKonstantin Belousov object->type = OBJT_DEAD;
29751df5321SJeff Roberson refcount_init(&object->ref_count, 0);
2989cddade7SKonstantin Belousov VM_OBJECT_WUNLOCK(object);
2992ac78f0eSStephan Uphoff vm_object_destroy(object);
3002ac78f0eSStephan Uphoff goto retry;
301df8bae1dSRodney W. Grimes }
3022ac78f0eSStephan Uphoff vp->v_object = object;
30311be8415SStephan Uphoff VI_UNLOCK(vp);
304a67d5408SJeff Roberson vrefact(vp);
30511be8415SStephan Uphoff } else {
306a67d5408SJeff Roberson vm_object_reference(object);
3073d653db0SAlan Cox #if VM_NRESERVLEVEL > 0
308a67d5408SJeff Roberson if ((object->flags & OBJ_COLORED) == 0) {
309a67d5408SJeff Roberson VM_OBJECT_WLOCK(object);
3103d653db0SAlan Cox vm_object_color(object, 0);
31189f6b863SAttilio Rao VM_OBJECT_WUNLOCK(object);
31211be8415SStephan Uphoff }
313a67d5408SJeff Roberson #endif
314a67d5408SJeff Roberson }
31524a1cce3SDavid Greenman return (object);
316df8bae1dSRodney W. Grimes }
317df8bae1dSRodney W. Grimes
318658ad5ffSAlan Cox /*
319658ad5ffSAlan Cox * The object must be locked.
320658ad5ffSAlan Cox */
321f708ef1bSPoul-Henning Kamp static void
vnode_pager_dealloc(vm_object_t object)3227ebba1f8SGleb Smirnoff vnode_pager_dealloc(vm_object_t object)
32324a1cce3SDavid Greenman {
324b9f180d1SKonstantin Belousov struct vnode *vp;
325b9f180d1SKonstantin Belousov int refs;
326df8bae1dSRodney W. Grimes
327b9f180d1SKonstantin Belousov vp = object->handle;
32824a1cce3SDavid Greenman if (vp == NULL)
32924a1cce3SDavid Greenman panic("vnode_pager_dealloc: pager already dealloced");
33024a1cce3SDavid Greenman
33189f6b863SAttilio Rao VM_OBJECT_ASSERT_WLOCKED(object);
33266095752SJohn Dyson vm_object_pip_wait(object, "vnpdea");
333b9f180d1SKonstantin Belousov refs = object->ref_count;
33424a1cce3SDavid Greenman
33524a1cce3SDavid Greenman object->handle = NULL;
33695461b45SJohn Dyson object->type = OBJT_DEAD;
33757fd3d55SPawel Jakub Dawidek ASSERT_VOP_ELOCKED(vp, "vnode_pager_dealloc");
33884110e7eSKonstantin Belousov if (object->un_pager.vnp.writemappings > 0) {
33984110e7eSKonstantin Belousov object->un_pager.vnp.writemappings = 0;
34078022527SKonstantin Belousov VOP_ADD_WRITECOUNT_CHECKED(vp, -1);
341b47f6241SJohn Baldwin CTR3(KTR_VFS, "%s: vp %p v_writecount decreased to %d",
342b47f6241SJohn Baldwin __func__, vp, vp->v_writecount);
34384110e7eSKonstantin Belousov }
344aa2cabb9SDavid Greenman vp->v_object = NULL;
34578022527SKonstantin Belousov VI_LOCK(vp);
34678022527SKonstantin Belousov
34778022527SKonstantin Belousov /*
34878022527SKonstantin Belousov * vm_map_entry_set_vnode_text() cannot reach this vnode by
34978022527SKonstantin Belousov * following object->handle. Clear all text references now.
35078022527SKonstantin Belousov * This also clears the transient references from
35178022527SKonstantin Belousov * kern_execve(), which is fine because dead_vnodeops uses nop
35278022527SKonstantin Belousov * for VOP_UNSET_TEXT().
35378022527SKonstantin Belousov */
35478022527SKonstantin Belousov if (vp->v_writecount < 0)
35578022527SKonstantin Belousov vp->v_writecount = 0;
35678022527SKonstantin Belousov VI_UNLOCK(vp);
35789f6b863SAttilio Rao VM_OBJECT_WUNLOCK(object);
358a67d5408SJeff Roberson if (refs > 0)
359b9f180d1SKonstantin Belousov vunref(vp);
36089f6b863SAttilio Rao VM_OBJECT_WLOCK(object);
361df8bae1dSRodney W. Grimes }
36226f9a767SRodney W. Grimes
363f708ef1bSPoul-Henning Kamp static boolean_t
vnode_pager_haspage(vm_object_t object,vm_pindex_t pindex,int * before,int * after)3647ebba1f8SGleb Smirnoff vnode_pager_haspage(vm_object_t object, vm_pindex_t pindex, int *before,
3657ebba1f8SGleb Smirnoff int *after)
366df8bae1dSRodney W. Grimes {
36724a1cce3SDavid Greenman struct vnode *vp = object->handle;
36898b0c789SPoul-Henning Kamp daddr_t bn;
3694153054aSJeff Roberson uintptr_t lockstate;
3703af76890SPoul-Henning Kamp int err;
371170db9c6SJohn Dyson daddr_t reqblock;
3722c4488fcSJohn Dyson int poff;
3732c4488fcSJohn Dyson int bsize;
374d63596ceSJohn Dyson int pagesperblock, blocksperpage;
375df8bae1dSRodney W. Grimes
3764153054aSJeff Roberson VM_OBJECT_ASSERT_LOCKED(object);
37724579ca1SMatthew Dillon /*
37824579ca1SMatthew Dillon * If no vp or vp is doomed or marked transparent to VM, we do not
37924579ca1SMatthew Dillon * have the page.
38024579ca1SMatthew Dillon */
381abd80ddbSMateusz Guzik if (vp == NULL || VN_IS_DOOMED(vp))
38247221757SJohn Dyson return FALSE;
383df8bae1dSRodney W. Grimes /*
384b73f64c4SJeff Roberson * If the offset is beyond end of file we do
3850d94caffSDavid Greenman * not have the page.
386df8bae1dSRodney W. Grimes */
387b73f64c4SJeff Roberson if (IDX_TO_OFF(pindex) >= object->un_pager.vnp.vnp_size)
3884abc71c0SDavid Greenman return FALSE;
389df8bae1dSRodney W. Grimes
390eed2d59bSDavid Greenman bsize = vp->v_mount->mnt_stat.f_iosize;
391170db9c6SJohn Dyson pagesperblock = bsize / PAGE_SIZE;
392d63596ceSJohn Dyson blocksperpage = 0;
393d63596ceSJohn Dyson if (pagesperblock > 0) {
394a316d390SJohn Dyson reqblock = pindex / pagesperblock;
395d63596ceSJohn Dyson } else {
396d63596ceSJohn Dyson blocksperpage = (PAGE_SIZE / bsize);
397d63596ceSJohn Dyson reqblock = pindex * blocksperpage;
398d63596ceSJohn Dyson }
3994153054aSJeff Roberson lockstate = VM_OBJECT_DROP(object);
400ec794849SPoul-Henning Kamp err = VOP_BMAP(vp, reqblock, NULL, &bn, after, before);
4014153054aSJeff Roberson VM_OBJECT_PICKUP(object, lockstate);
4020d94caffSDavid Greenman if (err)
40324a1cce3SDavid Greenman return TRUE;
4046eab77f2SJohn Dyson if (bn == -1)
405ced399eeSJohn Dyson return FALSE;
406d63596ceSJohn Dyson if (pagesperblock > 0) {
407a316d390SJohn Dyson poff = pindex - (reqblock * pagesperblock);
408170db9c6SJohn Dyson if (before) {
409170db9c6SJohn Dyson *before *= pagesperblock;
410170db9c6SJohn Dyson *before += poff;
411170db9c6SJohn Dyson }
412170db9c6SJohn Dyson if (after) {
41384d31376SGleb Smirnoff /*
41484d31376SGleb Smirnoff * The BMAP vop can report a partial block in the
415d2596d17SGleb Smirnoff * 'after', but must not report blocks after EOF.
41684d31376SGleb Smirnoff * Assert the latter, and truncate 'after' in case
41784d31376SGleb Smirnoff * of the former.
41884d31376SGleb Smirnoff */
419d2596d17SGleb Smirnoff KASSERT((reqblock + *after) * pagesperblock <
420d2596d17SGleb Smirnoff roundup2(object->size, pagesperblock),
42184d31376SGleb Smirnoff ("%s: reqblock %jd after %d size %ju", __func__,
42284d31376SGleb Smirnoff (intmax_t )reqblock, *after,
42384d31376SGleb Smirnoff (uintmax_t )object->size));
424170db9c6SJohn Dyson *after *= pagesperblock;
42584d31376SGleb Smirnoff *after += pagesperblock - (poff + 1);
42684d31376SGleb Smirnoff if (pindex + *after >= object->size)
42784d31376SGleb Smirnoff *after = object->size - 1 - pindex;
428170db9c6SJohn Dyson }
429d63596ceSJohn Dyson } else {
430d63596ceSJohn Dyson if (before) {
431d63596ceSJohn Dyson *before /= blocksperpage;
432d63596ceSJohn Dyson }
433d63596ceSJohn Dyson
434d63596ceSJohn Dyson if (after) {
435d63596ceSJohn Dyson *after /= blocksperpage;
436d63596ceSJohn Dyson }
437d63596ceSJohn Dyson }
438ced399eeSJohn Dyson return TRUE;
439df8bae1dSRodney W. Grimes }
440df8bae1dSRodney W. Grimes
441df8bae1dSRodney W. Grimes /*
442de2e1529SKa Ho Ng * Internal routine clearing partial-page content
443de2e1529SKa Ho Ng */
444de2e1529SKa Ho Ng static void
vnode_pager_subpage_purge(struct vm_page * m,int base,int end)445de2e1529SKa Ho Ng vnode_pager_subpage_purge(struct vm_page *m, int base, int end)
446de2e1529SKa Ho Ng {
447de2e1529SKa Ho Ng int size;
448de2e1529SKa Ho Ng
449de2e1529SKa Ho Ng KASSERT(end > base && end <= PAGE_SIZE,
450de2e1529SKa Ho Ng ("%s: start %d end %d", __func__, base, end));
451de2e1529SKa Ho Ng size = end - base;
452de2e1529SKa Ho Ng
453de2e1529SKa Ho Ng /*
454de2e1529SKa Ho Ng * Clear out partial-page garbage in case
455de2e1529SKa Ho Ng * the page has been mapped.
456de2e1529SKa Ho Ng */
457de2e1529SKa Ho Ng pmap_zero_page_area(m, base, size);
458de2e1529SKa Ho Ng
459de2e1529SKa Ho Ng /*
460de2e1529SKa Ho Ng * Update the valid bits to reflect the blocks
461de2e1529SKa Ho Ng * that have been zeroed. Some of these valid
462de2e1529SKa Ho Ng * bits may have already been set.
463de2e1529SKa Ho Ng */
464de2e1529SKa Ho Ng vm_page_set_valid_range(m, base, size);
465de2e1529SKa Ho Ng
466de2e1529SKa Ho Ng /*
467de2e1529SKa Ho Ng * Round up "base" to the next block boundary so
468de2e1529SKa Ho Ng * that the dirty bit for a partially zeroed
469de2e1529SKa Ho Ng * block is not cleared.
470de2e1529SKa Ho Ng */
471de2e1529SKa Ho Ng base = roundup2(base, DEV_BSIZE);
472de2e1529SKa Ho Ng end = rounddown2(end, DEV_BSIZE);
473de2e1529SKa Ho Ng
474de2e1529SKa Ho Ng if (end > base) {
475de2e1529SKa Ho Ng /*
476de2e1529SKa Ho Ng * Clear out partial-page dirty bits.
477de2e1529SKa Ho Ng *
478de2e1529SKa Ho Ng * note that we do not clear out the
479de2e1529SKa Ho Ng * valid bits. This would prevent
480de2e1529SKa Ho Ng * bogus_page replacement from working
481de2e1529SKa Ho Ng * properly.
482de2e1529SKa Ho Ng */
483de2e1529SKa Ho Ng vm_page_clear_dirty(m, base, end - base);
484de2e1529SKa Ho Ng }
485de2e1529SKa Ho Ng
486de2e1529SKa Ho Ng }
487de2e1529SKa Ho Ng
488de2e1529SKa Ho Ng /*
489df8bae1dSRodney W. Grimes * Lets the VM system know about a change in size for a file.
49024a1cce3SDavid Greenman * We adjust our own internal size and flush any cached pages in
491df8bae1dSRodney W. Grimes * the associated object that are affected by the size change.
492df8bae1dSRodney W. Grimes *
493df8bae1dSRodney W. Grimes * Note: this routine may be invoked as a result of a pager put
494df8bae1dSRodney W. Grimes * operation (possibly at object termination time), so we must be careful.
495df8bae1dSRodney W. Grimes */
496df8bae1dSRodney W. Grimes void
vnode_pager_setsize(struct vnode * vp,vm_ooffset_t nsize)4977ebba1f8SGleb Smirnoff vnode_pager_setsize(struct vnode *vp, vm_ooffset_t nsize)
498df8bae1dSRodney W. Grimes {
4992a8f9ab5SAlan Cox vm_object_t object;
5002a8f9ab5SAlan Cox vm_page_t m;
501c576d121SLuoqi Chen vm_pindex_t nobjsize;
502df8bae1dSRodney W. Grimes
5032a8f9ab5SAlan Cox if ((object = vp->v_object) == NULL)
504df8bae1dSRodney W. Grimes return;
5055b87ecc6SKonstantin Belousov #ifdef DEBUG_VFS_LOCKS
5065b87ecc6SKonstantin Belousov {
5075b87ecc6SKonstantin Belousov struct mount *mp;
5085b87ecc6SKonstantin Belousov
5095b87ecc6SKonstantin Belousov mp = vp->v_mount;
5105b87ecc6SKonstantin Belousov if (mp != NULL && (mp->mnt_kern_flag & MNTK_VMSETSIZE_BUG) == 0)
5115b87ecc6SKonstantin Belousov assert_vop_elocked(vp,
5125b87ecc6SKonstantin Belousov "vnode_pager_setsize and not locked vnode");
5135b87ecc6SKonstantin Belousov }
5145b87ecc6SKonstantin Belousov #endif
51589f6b863SAttilio Rao VM_OBJECT_WLOCK(object);
5169b8851faSKonstantin Belousov if (object->type == OBJT_DEAD) {
5179b8851faSKonstantin Belousov VM_OBJECT_WUNLOCK(object);
5189b8851faSKonstantin Belousov return;
5199b8851faSKonstantin Belousov }
5209b8851faSKonstantin Belousov KASSERT(object->type == OBJT_VNODE,
5219b8851faSKonstantin Belousov ("not vnode-backed object %p", object));
5222a8f9ab5SAlan Cox if (nsize == object->un_pager.vnp.vnp_size) {
523df8bae1dSRodney W. Grimes /*
524df8bae1dSRodney W. Grimes * Hasn't changed size
525df8bae1dSRodney W. Grimes */
52689f6b863SAttilio Rao VM_OBJECT_WUNLOCK(object);
527df8bae1dSRodney W. Grimes return;
5282a8f9ab5SAlan Cox }
529c576d121SLuoqi Chen nobjsize = OFF_TO_IDX(nsize + PAGE_MASK);
5302a8f9ab5SAlan Cox if (nsize < object->un_pager.vnp.vnp_size) {
531df8bae1dSRodney W. Grimes /*
532bbc0ec52SDavid Greenman * File has shrunk. Toss any cached pages beyond the new EOF.
533df8bae1dSRodney W. Grimes */
5342a8f9ab5SAlan Cox if (nobjsize < object->size)
535c576d121SLuoqi Chen vm_object_page_remove(object, nobjsize, object->size,
5366bbee8e2SAlan Cox 0);
537bbc0ec52SDavid Greenman /*
538bbc0ec52SDavid Greenman * this gets rid of garbage at the end of a page that is now
5393ebeaf59SMatthew Dillon * only partially backed by the vnode.
5403ebeaf59SMatthew Dillon *
5413ebeaf59SMatthew Dillon * XXX for some reason (I don't know yet), if we take a
5423ebeaf59SMatthew Dillon * completely invalid page and mark it partially valid
5433ebeaf59SMatthew Dillon * it can screw up NFS reads, so we don't allow the case.
544bbc0ec52SDavid Greenman */
5450012f373SJeff Roberson if (!(nsize & PAGE_MASK))
5460012f373SJeff Roberson goto out;
5470012f373SJeff Roberson m = vm_page_grab(object, OFF_TO_IDX(nsize), VM_ALLOC_NOCREAT);
5480012f373SJeff Roberson if (m == NULL)
5490012f373SJeff Roberson goto out;
550de2e1529SKa Ho Ng if (!vm_page_none_valid(m))
551de2e1529SKa Ho Ng vnode_pager_subpage_purge(m, (int)nsize & PAGE_MASK,
552de2e1529SKa Ho Ng PAGE_SIZE);
5530012f373SJeff Roberson vm_page_xunbusy(m);
554bbc0ec52SDavid Greenman }
5550012f373SJeff Roberson out:
556419e5698SKonstantin Belousov #if defined(__powerpc__) && !defined(__powerpc64__)
557a316d390SJohn Dyson object->un_pager.vnp.vnp_size = nsize;
558419e5698SKonstantin Belousov #else
559419e5698SKonstantin Belousov atomic_store_64(&object->un_pager.vnp.vnp_size, nsize);
560419e5698SKonstantin Belousov #endif
561c576d121SLuoqi Chen object->size = nobjsize;
56289f6b863SAttilio Rao VM_OBJECT_WUNLOCK(object);
563df8bae1dSRodney W. Grimes }
564df8bae1dSRodney W. Grimes
56526f9a767SRodney W. Grimes /*
566de2e1529SKa Ho Ng * Lets the VM system know about the purged range for a file. We toss away any
567de2e1529SKa Ho Ng * cached pages in the associated object that are affected by the purge
568de2e1529SKa Ho Ng * operation. Partial-page area not aligned to page boundaries will be zeroed
569de2e1529SKa Ho Ng * and the dirty blocks in DEV_BSIZE unit within a page will not be flushed.
570de2e1529SKa Ho Ng */
571de2e1529SKa Ho Ng void
vnode_pager_purge_range(struct vnode * vp,vm_ooffset_t start,vm_ooffset_t end)572de2e1529SKa Ho Ng vnode_pager_purge_range(struct vnode *vp, vm_ooffset_t start, vm_ooffset_t end)
573de2e1529SKa Ho Ng {
574de2e1529SKa Ho Ng struct vm_page *m;
575de2e1529SKa Ho Ng struct vm_object *object;
576de2e1529SKa Ho Ng vm_pindex_t pi, pistart, piend;
577de2e1529SKa Ho Ng bool same_page;
578de2e1529SKa Ho Ng int base, pend;
579de2e1529SKa Ho Ng
580de2e1529SKa Ho Ng ASSERT_VOP_LOCKED(vp, "vnode_pager_purge_range");
581de2e1529SKa Ho Ng
582de2e1529SKa Ho Ng object = vp->v_object;
583de2e1529SKa Ho Ng pi = start + PAGE_MASK < start ? OBJ_MAX_SIZE :
584de2e1529SKa Ho Ng OFF_TO_IDX(start + PAGE_MASK);
585de2e1529SKa Ho Ng pistart = OFF_TO_IDX(start);
586de2e1529SKa Ho Ng piend = end == 0 ? OBJ_MAX_SIZE : OFF_TO_IDX(end);
587de2e1529SKa Ho Ng same_page = pistart == piend;
588de2e1529SKa Ho Ng if ((end != 0 && end <= start) || object == NULL)
589de2e1529SKa Ho Ng return;
590de2e1529SKa Ho Ng
591de2e1529SKa Ho Ng VM_OBJECT_WLOCK(object);
592de2e1529SKa Ho Ng
593de2e1529SKa Ho Ng if (pi < piend)
594de2e1529SKa Ho Ng vm_object_page_remove(object, pi, piend, 0);
595de2e1529SKa Ho Ng
596de2e1529SKa Ho Ng if ((start & PAGE_MASK) != 0) {
597de2e1529SKa Ho Ng base = (int)start & PAGE_MASK;
598de2e1529SKa Ho Ng pend = same_page ? (int)end & PAGE_MASK : PAGE_SIZE;
599de2e1529SKa Ho Ng m = vm_page_grab(object, pistart, VM_ALLOC_NOCREAT);
600de2e1529SKa Ho Ng if (m != NULL) {
601de2e1529SKa Ho Ng if (!vm_page_none_valid(m))
602de2e1529SKa Ho Ng vnode_pager_subpage_purge(m, base, pend);
603de2e1529SKa Ho Ng vm_page_xunbusy(m);
604de2e1529SKa Ho Ng }
605de2e1529SKa Ho Ng if (same_page)
606de2e1529SKa Ho Ng goto out;
607de2e1529SKa Ho Ng }
608de2e1529SKa Ho Ng if ((end & PAGE_MASK) != 0) {
609de2e1529SKa Ho Ng base = same_page ? (int)start & PAGE_MASK : 0 ;
610de2e1529SKa Ho Ng pend = (int)end & PAGE_MASK;
611de2e1529SKa Ho Ng m = vm_page_grab(object, piend, VM_ALLOC_NOCREAT);
612de2e1529SKa Ho Ng if (m != NULL) {
613de2e1529SKa Ho Ng if (!vm_page_none_valid(m))
614de2e1529SKa Ho Ng vnode_pager_subpage_purge(m, base, pend);
615de2e1529SKa Ho Ng vm_page_xunbusy(m);
616de2e1529SKa Ho Ng }
617de2e1529SKa Ho Ng }
618de2e1529SKa Ho Ng out:
619de2e1529SKa Ho Ng VM_OBJECT_WUNLOCK(object);
620de2e1529SKa Ho Ng }
621de2e1529SKa Ho Ng
622de2e1529SKa Ho Ng /*
62326f9a767SRodney W. Grimes * calculate the linear (byte) disk address of specified virtual
62426f9a767SRodney W. Grimes * file address
62526f9a767SRodney W. Grimes */
626bff76343SAlan Cox static int
vnode_pager_addr(struct vnode * vp,vm_ooffset_t address,daddr_t * rtaddress,int * run)627bff76343SAlan Cox vnode_pager_addr(struct vnode *vp, vm_ooffset_t address, daddr_t *rtaddress,
628bff76343SAlan Cox int *run)
62926f9a767SRodney W. Grimes {
63026f9a767SRodney W. Grimes int bsize;
63126f9a767SRodney W. Grimes int err;
632a316d390SJohn Dyson daddr_t vblock;
633f3aad9a6SBjoern A. Zeeb daddr_t voffset;
63426f9a767SRodney W. Grimes
635abd80ddbSMateusz Guzik if (VN_IS_DOOMED(vp))
6362c4488fcSJohn Dyson return -1;
6372c4488fcSJohn Dyson
63826f9a767SRodney W. Grimes bsize = vp->v_mount->mnt_stat.f_iosize;
63926f9a767SRodney W. Grimes vblock = address / bsize;
64026f9a767SRodney W. Grimes voffset = address % bsize;
64126f9a767SRodney W. Grimes
642bff76343SAlan Cox err = VOP_BMAP(vp, vblock, NULL, rtaddress, run, NULL);
643bff76343SAlan Cox if (err == 0) {
644bff76343SAlan Cox if (*rtaddress != -1)
645bff76343SAlan Cox *rtaddress += voffset / DEV_BSIZE;
646efc68ce1SDavid Greenman if (run) {
647efc68ce1SDavid Greenman *run += 1;
648efc68ce1SDavid Greenman *run *= bsize / PAGE_SIZE;
649efc68ce1SDavid Greenman *run -= voffset / PAGE_SIZE;
650efc68ce1SDavid Greenman }
651efc68ce1SDavid Greenman }
65226f9a767SRodney W. Grimes
653bff76343SAlan Cox return (err);
65426f9a767SRodney W. Grimes }
65526f9a767SRodney W. Grimes
65628f957b8SKonstantin Belousov static void
vnode_pager_input_bdone(struct buf * bp)65728f957b8SKonstantin Belousov vnode_pager_input_bdone(struct buf *bp)
65828f957b8SKonstantin Belousov {
65928f957b8SKonstantin Belousov runningbufwakeup(bp);
66028f957b8SKonstantin Belousov bdone(bp);
66128f957b8SKonstantin Belousov }
66228f957b8SKonstantin Belousov
66326f9a767SRodney W. Grimes /*
66426f9a767SRodney W. Grimes * small block filesystem vnode pager input
66526f9a767SRodney W. Grimes */
666f708ef1bSPoul-Henning Kamp static int
vnode_pager_input_smlfs(vm_object_t object,vm_page_t m)6677ebba1f8SGleb Smirnoff vnode_pager_input_smlfs(vm_object_t object, vm_page_t m)
66826f9a767SRodney W. Grimes {
6699c83534dSPoul-Henning Kamp struct vnode *vp;
6709c83534dSPoul-Henning Kamp struct bufobj *bo;
67126f9a767SRodney W. Grimes struct buf *bp;
6729e0ddbd0SAlan Cox struct sf_buf *sf;
673f3aad9a6SBjoern A. Zeeb daddr_t fileaddr;
67426f9a767SRodney W. Grimes vm_offset_t bsize;
675561cc9fcSKonstantin Belousov vm_page_bits_t bits;
676561cc9fcSKonstantin Belousov int error, i;
67726f9a767SRodney W. Grimes
678561cc9fcSKonstantin Belousov error = 0;
67924a1cce3SDavid Greenman vp = object->handle;
680abd80ddbSMateusz Guzik if (VN_IS_DOOMED(vp))
6812c4488fcSJohn Dyson return VM_PAGER_BAD;
6822c4488fcSJohn Dyson
68326f9a767SRodney W. Grimes bsize = vp->v_mount->mnt_stat.f_iosize;
6840bdb7528SDavid Greenman
6859c83534dSPoul-Henning Kamp VOP_BMAP(vp, 0, &bo, 0, NULL, NULL);
68626f9a767SRodney W. Grimes
6879e0ddbd0SAlan Cox sf = sf_buf_alloc(m, 0);
68826f9a767SRodney W. Grimes
68926f9a767SRodney W. Grimes for (i = 0; i < PAGE_SIZE / bsize; i++) {
69033c67741SMatthew Dillon vm_ooffset_t address;
691bbc0ec52SDavid Greenman
6920d53a17bSAlan Cox bits = vm_page_bits(i * bsize, bsize);
6930d53a17bSAlan Cox if (m->valid & bits)
69426f9a767SRodney W. Grimes continue;
69526f9a767SRodney W. Grimes
69633c67741SMatthew Dillon address = IDX_TO_OFF(m->pindex) + i * bsize;
69733c67741SMatthew Dillon if (address >= object->un_pager.vnp.vnp_size) {
69833c67741SMatthew Dillon fileaddr = -1;
69933c67741SMatthew Dillon } else {
700bff76343SAlan Cox error = vnode_pager_addr(vp, address, &fileaddr, NULL);
701bff76343SAlan Cox if (error)
702bff76343SAlan Cox break;
70333c67741SMatthew Dillon }
70426f9a767SRodney W. Grimes if (fileaddr != -1) {
705756a5412SGleb Smirnoff bp = uma_zalloc(vnode_pbuf_zone, M_WAITOK);
70626f9a767SRodney W. Grimes
70726f9a767SRodney W. Grimes /* build a minimal buffer header */
70821144e3bSPoul-Henning Kamp bp->b_iocmd = BIO_READ;
70928f957b8SKonstantin Belousov bp->b_iodone = vnode_pager_input_bdone;
710bd78ceceSJohn Baldwin KASSERT(bp->b_rcred == NOCRED, ("leaking read ucred"));
711bd78ceceSJohn Baldwin KASSERT(bp->b_wcred == NOCRED, ("leaking write ucred"));
712a854ed98SJohn Baldwin bp->b_rcred = crhold(curthread->td_ucred);
713a854ed98SJohn Baldwin bp->b_wcred = crhold(curthread->td_ucred);
7149e0ddbd0SAlan Cox bp->b_data = (caddr_t)sf_buf_kva(sf) + i * bsize;
715187f0071SDavid Greenman bp->b_blkno = fileaddr;
7169c83534dSPoul-Henning Kamp pbgetbo(bo, bp);
7171faacf5dSKirk McKusick bp->b_vp = vp;
71826f9a767SRodney W. Grimes bp->b_bcount = bsize;
71926f9a767SRodney W. Grimes bp->b_bufsize = bsize;
7204efe531cSMark Johnston (void)runningbufclaim(bp, bp->b_bufsize);
72126f9a767SRodney W. Grimes
72226f9a767SRodney W. Grimes /* do the input */
7232c18019fSPoul-Henning Kamp bp->b_iooffset = dbtob(bp->b_blkno);
724b792bebeSPoul-Henning Kamp bstrategy(bp);
72526f9a767SRodney W. Grimes
7266a4b5823SPoul-Henning Kamp bwait(bp, PVM, "vnsrd");
7276a4b5823SPoul-Henning Kamp
728cafbf0c6SWarner Losh if ((bp->b_ioflags & BIO_ERROR) != 0) {
729cafbf0c6SWarner Losh KASSERT(bp->b_error != 0,
730cafbf0c6SWarner Losh ("%s: buf error but b_error == 0\n", __func__));
731cafbf0c6SWarner Losh error = bp->b_error;
732cafbf0c6SWarner Losh }
73326f9a767SRodney W. Grimes
73426f9a767SRodney W. Grimes /*
73526f9a767SRodney W. Grimes * free the buffer header back to the swap buffer pool
73626f9a767SRodney W. Grimes */
7371faacf5dSKirk McKusick bp->b_vp = NULL;
7389c83534dSPoul-Henning Kamp pbrelbo(bp);
739756a5412SGleb Smirnoff uma_zfree(vnode_pbuf_zone, bp);
74026f9a767SRodney W. Grimes if (error)
74126f9a767SRodney W. Grimes break;
7420d53a17bSAlan Cox } else
7439e0ddbd0SAlan Cox bzero((caddr_t)sf_buf_kva(sf) + i * bsize, bsize);
7440d53a17bSAlan Cox KASSERT((m->dirty & bits) == 0,
7450d53a17bSAlan Cox ("vnode_pager_input_smlfs: page %p is dirty", m));
7467f935055SJeff Roberson vm_page_bits_set(m, &m->valid, bits);
74726f9a767SRodney W. Grimes }
7489e0ddbd0SAlan Cox sf_buf_free(sf);
74926f9a767SRodney W. Grimes if (error) {
750a83c285cSDavid Greenman return VM_PAGER_ERROR;
75126f9a767SRodney W. Grimes }
75226f9a767SRodney W. Grimes return VM_PAGER_OK;
75326f9a767SRodney W. Grimes }
75426f9a767SRodney W. Grimes
75526f9a767SRodney W. Grimes /*
756475e8cc3SPoul-Henning Kamp * old style vnode pager input routine
75726f9a767SRodney W. Grimes */
758f708ef1bSPoul-Henning Kamp static int
vnode_pager_input_old(vm_object_t object,vm_page_t m)7597ebba1f8SGleb Smirnoff vnode_pager_input_old(vm_object_t object, vm_page_t m)
76026f9a767SRodney W. Grimes {
761df8bae1dSRodney W. Grimes struct uio auio;
762df8bae1dSRodney W. Grimes struct iovec aiov;
76326f9a767SRodney W. Grimes int error;
76426f9a767SRodney W. Grimes int size;
7659e0ddbd0SAlan Cox struct sf_buf *sf;
766342a1480SJohn Baldwin struct vnode *vp;
767df8bae1dSRodney W. Grimes
76889f6b863SAttilio Rao VM_OBJECT_ASSERT_WLOCKED(object);
76926f9a767SRodney W. Grimes error = 0;
770bbc0ec52SDavid Greenman
771df8bae1dSRodney W. Grimes /*
77226f9a767SRodney W. Grimes * Return failure if beyond current EOF
77326f9a767SRodney W. Grimes */
774a316d390SJohn Dyson if (IDX_TO_OFF(m->pindex) >= object->un_pager.vnp.vnp_size) {
77526f9a767SRodney W. Grimes return VM_PAGER_BAD;
77626f9a767SRodney W. Grimes } else {
77726f9a767SRodney W. Grimes size = PAGE_SIZE;
778a316d390SJohn Dyson if (IDX_TO_OFF(m->pindex) + size > object->un_pager.vnp.vnp_size)
779a316d390SJohn Dyson size = object->un_pager.vnp.vnp_size - IDX_TO_OFF(m->pindex);
78052051abcSAlan Cox vp = object->handle;
78189f6b863SAttilio Rao VM_OBJECT_WUNLOCK(object);
7820bdb7528SDavid Greenman
78326f9a767SRodney W. Grimes /*
784df8bae1dSRodney W. Grimes * Allocate a kernel virtual address and initialize so that
785df8bae1dSRodney W. Grimes * we can use VOP_READ/WRITE routines.
786df8bae1dSRodney W. Grimes */
7879e0ddbd0SAlan Cox sf = sf_buf_alloc(m, 0);
7880bdb7528SDavid Greenman
7899e0ddbd0SAlan Cox aiov.iov_base = (caddr_t)sf_buf_kva(sf);
790df8bae1dSRodney W. Grimes aiov.iov_len = size;
791df8bae1dSRodney W. Grimes auio.uio_iov = &aiov;
792df8bae1dSRodney W. Grimes auio.uio_iovcnt = 1;
793a316d390SJohn Dyson auio.uio_offset = IDX_TO_OFF(m->pindex);
794df8bae1dSRodney W. Grimes auio.uio_segflg = UIO_SYSSPACE;
79526f9a767SRodney W. Grimes auio.uio_rw = UIO_READ;
796df8bae1dSRodney W. Grimes auio.uio_resid = size;
797b40ce416SJulian Elischer auio.uio_td = curthread;
79826f9a767SRodney W. Grimes
799a854ed98SJohn Baldwin error = VOP_READ(vp, &auio, 0, curthread->td_ucred);
800df8bae1dSRodney W. Grimes if (!error) {
80154d92145SMatthew Dillon int count = size - auio.uio_resid;
802df8bae1dSRodney W. Grimes
803df8bae1dSRodney W. Grimes if (count == 0)
804df8bae1dSRodney W. Grimes error = EINVAL;
80526f9a767SRodney W. Grimes else if (count != PAGE_SIZE)
8069e0ddbd0SAlan Cox bzero((caddr_t)sf_buf_kva(sf) + count,
8079e0ddbd0SAlan Cox PAGE_SIZE - count);
808df8bae1dSRodney W. Grimes }
8099e0ddbd0SAlan Cox sf_buf_free(sf);
8101b26eb10SAlan Cox
81189f6b863SAttilio Rao VM_OBJECT_WLOCK(object);
812df8bae1dSRodney W. Grimes }
8130d53a17bSAlan Cox KASSERT(m->dirty == 0, ("vnode_pager_input_old: page %p is dirty", m));
8146e3a3f38SRobert V. Baron if (!error)
8150012f373SJeff Roberson vm_page_valid(m);
816a83c285cSDavid Greenman return error ? VM_PAGER_ERROR : VM_PAGER_OK;
81726f9a767SRodney W. Grimes }
81826f9a767SRodney W. Grimes
81926f9a767SRodney W. Grimes /*
82026f9a767SRodney W. Grimes * generic vnode pager input routine
82126f9a767SRodney W. Grimes */
822170db9c6SJohn Dyson
823ce75f2c3SMike Smith /*
82423955314SAlfred Perlstein * Local media VFS's that do not implement their own VOP_GETPAGES
82547e151ddSRobert Drehmel * should have their VOP_GETPAGES call to vnode_pager_generic_getpages()
82647e151ddSRobert Drehmel * to implement the previous behaviour.
827ce75f2c3SMike Smith *
828ce75f2c3SMike Smith * All other FS's should use the bypass to get to the local media
829ce75f2c3SMike Smith * backing vp's VOP_GETPAGES.
830ce75f2c3SMike Smith */
831f708ef1bSPoul-Henning Kamp static int
vnode_pager_getpages(vm_object_t object,vm_page_t * m,int count,int * rbehind,int * rahead)832b0cd2017SGleb Smirnoff vnode_pager_getpages(vm_object_t object, vm_page_t *m, int count, int *rbehind,
833b0cd2017SGleb Smirnoff int *rahead)
83424a1cce3SDavid Greenman {
835170db9c6SJohn Dyson struct vnode *vp;
836b0cd2017SGleb Smirnoff int rtval;
83795e5e988SJohn Dyson
838d6e13f3bSJeff Roberson /* Handle is stable with paging in progress. */
839170db9c6SJohn Dyson vp = object->handle;
840b0cd2017SGleb Smirnoff rtval = VOP_GETPAGES(vp, m, count, rbehind, rahead);
84123955314SAlfred Perlstein KASSERT(rtval != EOPNOTSUPP,
84223955314SAlfred Perlstein ("vnode_pager: FS getpages not implemented\n"));
843170db9c6SJohn Dyson return rtval;
844170db9c6SJohn Dyson }
845170db9c6SJohn Dyson
84690effb23SGleb Smirnoff static int
vnode_pager_getpages_async(vm_object_t object,vm_page_t * m,int count,int * rbehind,int * rahead,vop_getpages_iodone_t iodone,void * arg)84790effb23SGleb Smirnoff vnode_pager_getpages_async(vm_object_t object, vm_page_t *m, int count,
848b0cd2017SGleb Smirnoff int *rbehind, int *rahead, vop_getpages_iodone_t iodone, void *arg)
84990effb23SGleb Smirnoff {
85090effb23SGleb Smirnoff struct vnode *vp;
85190effb23SGleb Smirnoff int rtval;
85290effb23SGleb Smirnoff
85390effb23SGleb Smirnoff vp = object->handle;
854b0cd2017SGleb Smirnoff rtval = VOP_GETPAGES_ASYNC(vp, m, count, rbehind, rahead, iodone, arg);
85590effb23SGleb Smirnoff KASSERT(rtval != EOPNOTSUPP,
85690effb23SGleb Smirnoff ("vnode_pager: FS getpages_async not implemented\n"));
85790effb23SGleb Smirnoff return (rtval);
85890effb23SGleb Smirnoff }
85990effb23SGleb Smirnoff
860ce75f2c3SMike Smith /*
86190effb23SGleb Smirnoff * The implementation of VOP_GETPAGES() and VOP_GETPAGES_ASYNC() for
86290effb23SGleb Smirnoff * local filesystems, where partially valid pages can only occur at
86390effb23SGleb Smirnoff * the end of file.
864d15b55c5SKonstantin Belousov */
865d15b55c5SKonstantin Belousov int
vnode_pager_local_getpages(struct vop_getpages_args * ap)866d15b55c5SKonstantin Belousov vnode_pager_local_getpages(struct vop_getpages_args *ap)
867d15b55c5SKonstantin Belousov {
86890effb23SGleb Smirnoff
869b0cd2017SGleb Smirnoff return (vnode_pager_generic_getpages(ap->a_vp, ap->a_m, ap->a_count,
870b0cd2017SGleb Smirnoff ap->a_rbehind, ap->a_rahead, NULL, NULL));
87190effb23SGleb Smirnoff }
87290effb23SGleb Smirnoff
87390effb23SGleb Smirnoff int
vnode_pager_local_getpages_async(struct vop_getpages_async_args * ap)87490effb23SGleb Smirnoff vnode_pager_local_getpages_async(struct vop_getpages_async_args *ap)
87590effb23SGleb Smirnoff {
876abfdf767SKonstantin Belousov int error;
87790effb23SGleb Smirnoff
878abfdf767SKonstantin Belousov error = vnode_pager_generic_getpages(ap->a_vp, ap->a_m, ap->a_count,
879abfdf767SKonstantin Belousov ap->a_rbehind, ap->a_rahead, ap->a_iodone, ap->a_arg);
880abfdf767SKonstantin Belousov if (error != 0 && ap->a_iodone != NULL)
881abfdf767SKonstantin Belousov ap->a_iodone(ap->a_arg, ap->a_m, ap->a_count, error);
882abfdf767SKonstantin Belousov return (error);
883d15b55c5SKonstantin Belousov }
884d15b55c5SKonstantin Belousov
885d15b55c5SKonstantin Belousov /*
886ce75f2c3SMike Smith * This is now called from local media FS's to operate against their
887ce75f2c3SMike Smith * own vnodes if they fail to implement VOP_GETPAGES.
888ce75f2c3SMike Smith */
889ce75f2c3SMike Smith int
vnode_pager_generic_getpages(struct vnode * vp,vm_page_t * m,int count,int * a_rbehind,int * a_rahead,vop_getpages_iodone_t iodone,void * arg)890b0cd2017SGleb Smirnoff vnode_pager_generic_getpages(struct vnode *vp, vm_page_t *m, int count,
891b0cd2017SGleb Smirnoff int *a_rbehind, int *a_rahead, vop_getpages_iodone_t iodone, void *arg)
892170db9c6SJohn Dyson {
893ce75f2c3SMike Smith vm_object_t object;
8949c83534dSPoul-Henning Kamp struct bufobj *bo;
8950bdb7528SDavid Greenman struct buf *bp;
896b0cd2017SGleb Smirnoff off_t foff;
897e48b82bdSGleb Smirnoff #ifdef INVARIANTS
898e48b82bdSGleb Smirnoff off_t blkno0;
899e48b82bdSGleb Smirnoff #endif
900756a5412SGleb Smirnoff int bsize, pagesperblock;
901b0cd2017SGleb Smirnoff int error, before, after, rbehind, rahead, poff, i;
902b0cd2017SGleb Smirnoff int bytecount, secmask;
903ce75f2c3SMike Smith
9049c83534dSPoul-Henning Kamp KASSERT(vp->v_type != VCHR && vp->v_type != VBLK,
905b0cd2017SGleb Smirnoff ("%s does not support devices", __func__));
906b0cd2017SGleb Smirnoff
907abd80ddbSMateusz Guzik if (VN_IS_DOOMED(vp))
908eac91e32SKonstantin Belousov return (VM_PAGER_BAD);
9092c4488fcSJohn Dyson
910eac91e32SKonstantin Belousov object = vp->v_object;
911b0cd2017SGleb Smirnoff foff = IDX_TO_OFF(m[0]->pindex);
91226f9a767SRodney W. Grimes bsize = vp->v_mount->mnt_stat.f_iosize;
913b0cd2017SGleb Smirnoff pagesperblock = bsize / PAGE_SIZE;
914b0cd2017SGleb Smirnoff
915b0cd2017SGleb Smirnoff KASSERT(foff < object->un_pager.vnp.vnp_size,
916b0cd2017SGleb Smirnoff ("%s: page %p offset beyond vp %p size", __func__, m[0], vp));
917cd853791SKonstantin Belousov KASSERT(count <= atop(maxphys),
918b0cd2017SGleb Smirnoff ("%s: requested %d pages", __func__, count));
919b0cd2017SGleb Smirnoff
920b0cd2017SGleb Smirnoff /*
921b0cd2017SGleb Smirnoff * The last page has valid blocks. Invalid part can only
922b0cd2017SGleb Smirnoff * exist at the end of file, and the page is made fully valid
923b0cd2017SGleb Smirnoff * by zeroing in vm_pager_get_pages().
924b0cd2017SGleb Smirnoff */
9250012f373SJeff Roberson if (!vm_page_none_valid(m[count - 1]) && --count == 0) {
926b0cd2017SGleb Smirnoff if (iodone != NULL)
927b0cd2017SGleb Smirnoff iodone(arg, m, 1, 0);
928b0cd2017SGleb Smirnoff return (VM_PAGER_OK);
929b0cd2017SGleb Smirnoff }
930bbc0ec52SDavid Greenman
931756a5412SGleb Smirnoff bp = uma_zalloc(vnode_pbuf_zone, M_WAITOK);
932cd853791SKonstantin Belousov MPASS((bp->b_flags & B_MAXPHYS) != 0);
93373e9030eSGleb Smirnoff
93426f9a767SRodney W. Grimes /*
935e122dfc1SGleb Smirnoff * Get the underlying device blocks for the file with VOP_BMAP().
936e122dfc1SGleb Smirnoff * If the file system doesn't support VOP_BMAP, use old way of
937e122dfc1SGleb Smirnoff * getting pages via VOP_READ.
93826f9a767SRodney W. Grimes */
939b0cd2017SGleb Smirnoff error = VOP_BMAP(vp, foff / bsize, &bo, &bp->b_blkno, &after, &before);
9401de11f1aSAlan Cox if (error == EOPNOTSUPP) {
941756a5412SGleb Smirnoff uma_zfree(vnode_pbuf_zone, bp);
94289f6b863SAttilio Rao VM_OBJECT_WLOCK(object);
943b0cd2017SGleb Smirnoff for (i = 0; i < count; i++) {
94483c9dea1SGleb Smirnoff VM_CNT_INC(v_vnodein);
94583c9dea1SGleb Smirnoff VM_CNT_INC(v_vnodepgsin);
946b0cd2017SGleb Smirnoff error = vnode_pager_input_old(object, m[i]);
947b0cd2017SGleb Smirnoff if (error)
948b0cd2017SGleb Smirnoff break;
949b0cd2017SGleb Smirnoff }
95089f6b863SAttilio Rao VM_OBJECT_WUNLOCK(object);
95152051abcSAlan Cox return (error);
9521de11f1aSAlan Cox } else if (error != 0) {
953756a5412SGleb Smirnoff uma_zfree(vnode_pbuf_zone, bp);
9541de11f1aSAlan Cox return (VM_PAGER_ERROR);
955b0cd2017SGleb Smirnoff }
956bbc0ec52SDavid Greenman
95726f9a767SRodney W. Grimes /*
958b0cd2017SGleb Smirnoff * If the file system supports BMAP, but blocksize is smaller
959b0cd2017SGleb Smirnoff * than a page size, then use special small filesystem code.
96026f9a767SRodney W. Grimes */
961b0cd2017SGleb Smirnoff if (pagesperblock == 0) {
962756a5412SGleb Smirnoff uma_zfree(vnode_pbuf_zone, bp);
963b0cd2017SGleb Smirnoff for (i = 0; i < count; i++) {
96483c9dea1SGleb Smirnoff VM_CNT_INC(v_vnodein);
96583c9dea1SGleb Smirnoff VM_CNT_INC(v_vnodepgsin);
966b0cd2017SGleb Smirnoff error = vnode_pager_input_smlfs(object, m[i]);
967b0cd2017SGleb Smirnoff if (error)
968b0cd2017SGleb Smirnoff break;
969b0cd2017SGleb Smirnoff }
970b0cd2017SGleb Smirnoff return (error);
97126f9a767SRodney W. Grimes }
9728d17e694SJulian Elischer
97326f9a767SRodney W. Grimes /*
974b0cd2017SGleb Smirnoff * A sparse file can be encountered only for a single page request,
975763df3ecSPedro F. Giffuni * which may not be preceded by call to vm_pager_haspage().
976a7fecb4dSAlan Cox */
977b0cd2017SGleb Smirnoff if (bp->b_blkno == -1) {
978b0cd2017SGleb Smirnoff KASSERT(count == 1,
979b0cd2017SGleb Smirnoff ("%s: array[%d] request to a sparse file %p", __func__,
980b0cd2017SGleb Smirnoff count, vp));
981756a5412SGleb Smirnoff uma_zfree(vnode_pbuf_zone, bp);
982b0cd2017SGleb Smirnoff pmap_zero_page(m[0]);
983b0cd2017SGleb Smirnoff KASSERT(m[0]->dirty == 0, ("%s: page %p is dirty",
984b0cd2017SGleb Smirnoff __func__, m[0]));
9850012f373SJeff Roberson vm_page_valid(m[0]);
986f4f83da0SAlan Cox return (VM_PAGER_OK);
987b0cd2017SGleb Smirnoff }
988b0cd2017SGleb Smirnoff
989e48b82bdSGleb Smirnoff #ifdef INVARIANTS
990e48b82bdSGleb Smirnoff blkno0 = bp->b_blkno;
991e48b82bdSGleb Smirnoff #endif
992b0cd2017SGleb Smirnoff bp->b_blkno += (foff % bsize) / DEV_BSIZE;
993b0cd2017SGleb Smirnoff
994b0cd2017SGleb Smirnoff /* Recalculate blocks available after/before to pages. */
995b0cd2017SGleb Smirnoff poff = (foff % bsize) / PAGE_SIZE;
996b0cd2017SGleb Smirnoff before *= pagesperblock;
997b0cd2017SGleb Smirnoff before += poff;
998b0cd2017SGleb Smirnoff after *= pagesperblock;
999b0cd2017SGleb Smirnoff after += pagesperblock - (poff + 1);
1000b0cd2017SGleb Smirnoff if (m[0]->pindex + after >= object->size)
1001b0cd2017SGleb Smirnoff after = object->size - 1 - m[0]->pindex;
1002b0cd2017SGleb Smirnoff KASSERT(count <= after + 1, ("%s: %d pages asked, can do only %d",
1003b0cd2017SGleb Smirnoff __func__, count, after + 1));
1004b0cd2017SGleb Smirnoff after -= count - 1;
1005b0cd2017SGleb Smirnoff
1006b0cd2017SGleb Smirnoff /* Trim requested rbehind/rahead to possible values. */
1007b0cd2017SGleb Smirnoff rbehind = a_rbehind ? *a_rbehind : 0;
1008b0cd2017SGleb Smirnoff rahead = a_rahead ? *a_rahead : 0;
1009b0cd2017SGleb Smirnoff rbehind = min(rbehind, before);
1010b0cd2017SGleb Smirnoff rahead = min(rahead, after);
1011*b82d7897SDoug Moore
1012e48b82bdSGleb Smirnoff /*
1013e48b82bdSGleb Smirnoff * Check that total amount of pages fit into buf. Trim rbehind and
1014e48b82bdSGleb Smirnoff * rahead evenly if not.
1015e48b82bdSGleb Smirnoff */
1016cd853791SKonstantin Belousov if (rbehind + rahead + count > atop(maxphys)) {
1017e48b82bdSGleb Smirnoff int trim, sum;
1018e48b82bdSGleb Smirnoff
1019cd853791SKonstantin Belousov trim = rbehind + rahead + count - atop(maxphys) + 1;
1020e48b82bdSGleb Smirnoff sum = rbehind + rahead;
1021e48b82bdSGleb Smirnoff if (rbehind == before) {
1022e48b82bdSGleb Smirnoff /* Roundup rbehind trim to block size. */
1023e48b82bdSGleb Smirnoff rbehind -= roundup(trim * rbehind / sum, pagesperblock);
1024e48b82bdSGleb Smirnoff if (rbehind < 0)
1025e48b82bdSGleb Smirnoff rbehind = 0;
1026e48b82bdSGleb Smirnoff } else
1027e48b82bdSGleb Smirnoff rbehind -= trim * rbehind / sum;
1028e48b82bdSGleb Smirnoff rahead -= trim * rahead / sum;
1029e48b82bdSGleb Smirnoff }
1030cd853791SKonstantin Belousov KASSERT(rbehind + rahead + count <= atop(maxphys),
1031cd853791SKonstantin Belousov ("%s: behind %d ahead %d count %d maxphys %lu", __func__,
1032cd853791SKonstantin Belousov rbehind, rahead, count, maxphys));
1033b0cd2017SGleb Smirnoff
1034b0cd2017SGleb Smirnoff /*
1035b0cd2017SGleb Smirnoff * Fill in the bp->b_pages[] array with requested and optional
1036b0cd2017SGleb Smirnoff * read behind or read ahead pages. Read behind pages are looked
1037b0cd2017SGleb Smirnoff * up in a backward direction, down to a first cached page. Same
1038b0cd2017SGleb Smirnoff * for read ahead pages, but there is no need to shift the array
1039b0cd2017SGleb Smirnoff * in case of encountering a cached page.
1040b0cd2017SGleb Smirnoff */
1041*b82d7897SDoug Moore if (rbehind != 0 || rahead != 0) {
1042a7fecb4dSAlan Cox VM_OBJECT_WLOCK(object);
1043*b82d7897SDoug Moore vm_object_prepare_buf_pages(object, bp->b_pages, count,
1044*b82d7897SDoug Moore &rbehind, &rahead, m);
1045eac91e32SKonstantin Belousov VM_OBJECT_WUNLOCK(object);
1046*b82d7897SDoug Moore } else {
1047*b82d7897SDoug Moore for (int j = 0; j < count; j++)
1048*b82d7897SDoug Moore bp->b_pages[j] = m[j];
1049*b82d7897SDoug Moore }
1050*b82d7897SDoug Moore bp->b_blkno -= IDX_TO_OFF(rbehind) / DEV_BSIZE;
1051*b82d7897SDoug Moore bp->b_pgbefore = rbehind;
1052*b82d7897SDoug Moore bp->b_pgafter = rahead;
1053*b82d7897SDoug Moore bp->b_npages = rbehind + count + rahead;
1054b0cd2017SGleb Smirnoff
1055b0cd2017SGleb Smirnoff /* Report back actual behind/ahead read. */
1056b0cd2017SGleb Smirnoff if (a_rbehind)
1057b0cd2017SGleb Smirnoff *a_rbehind = bp->b_pgbefore;
1058b0cd2017SGleb Smirnoff if (a_rahead)
1059b0cd2017SGleb Smirnoff *a_rahead = bp->b_pgafter;
1060b0cd2017SGleb Smirnoff
1061e48b82bdSGleb Smirnoff #ifdef INVARIANTS
1062cd853791SKonstantin Belousov KASSERT(bp->b_npages <= atop(maxphys),
1063b0cd2017SGleb Smirnoff ("%s: buf %p overflowed", __func__, bp));
10644f56243aSGleb Smirnoff for (int j = 1, prev = 0; j < bp->b_npages; j++) {
10651e0c121fSGleb Smirnoff if (bp->b_pages[j] == bogus_page)
10661e0c121fSGleb Smirnoff continue;
10671e0c121fSGleb Smirnoff KASSERT(bp->b_pages[j]->pindex - bp->b_pages[prev]->pindex ==
10681e0c121fSGleb Smirnoff j - prev, ("%s: pages array not consecutive, bp %p",
10691e0c121fSGleb Smirnoff __func__, bp));
10701e0c121fSGleb Smirnoff prev = j;
10711e0c121fSGleb Smirnoff }
1072e48b82bdSGleb Smirnoff #endif
1073eac91e32SKonstantin Belousov
10740d94caffSDavid Greenman /*
1075b0cd2017SGleb Smirnoff * Recalculate first offset and bytecount with regards to read behind.
1076b0cd2017SGleb Smirnoff * Truncate bytecount to vnode real size and round up physical size
1077b0cd2017SGleb Smirnoff * for real devices.
107826f9a767SRodney W. Grimes */
1079b0cd2017SGleb Smirnoff foff = IDX_TO_OFF(bp->b_pages[0]->pindex);
1080*b82d7897SDoug Moore bytecount = ptoa(bp->b_npages);
1081b0cd2017SGleb Smirnoff if ((foff + bytecount) > object->un_pager.vnp.vnp_size)
1082b0cd2017SGleb Smirnoff bytecount = object->un_pager.vnp.vnp_size - foff;
1083eac91e32SKonstantin Belousov secmask = bo->bo_bsize - 1;
10846229cc50SPoul-Henning Kamp KASSERT(secmask < PAGE_SIZE && secmask > 0,
1085b0cd2017SGleb Smirnoff ("%s: sector size %d too large", __func__, secmask + 1));
1086b0cd2017SGleb Smirnoff bytecount = (bytecount + secmask) & ~secmask;
108726f9a767SRodney W. Grimes
108826f9a767SRodney W. Grimes /*
1089b0cd2017SGleb Smirnoff * And map the pages to be read into the kva, if the filesystem
10906ce697dcSKonstantin Belousov * requires mapped buffers.
109126f9a767SRodney W. Grimes */
10922a5eef69SGleb Smirnoff if ((vp->v_mount->mnt_kern_flag & MNTK_UNMAPPED_BUFS) != 0 &&
10936ce697dcSKonstantin Belousov unmapped_buf_allowed) {
10946ce697dcSKonstantin Belousov bp->b_data = unmapped_buf;
10956ce697dcSKonstantin Belousov bp->b_offset = 0;
1096fade8dd7SJeff Roberson } else {
1097fade8dd7SJeff Roberson bp->b_data = bp->b_kvabase;
1098b0cd2017SGleb Smirnoff pmap_qenter((vm_offset_t)bp->b_data, bp->b_pages, bp->b_npages);
1099fade8dd7SJeff Roberson }
110026f9a767SRodney W. Grimes
1101b0cd2017SGleb Smirnoff /* Build a minimal buffer header. */
110221144e3bSPoul-Henning Kamp bp->b_iocmd = BIO_READ;
1103bd78ceceSJohn Baldwin KASSERT(bp->b_rcred == NOCRED, ("leaking read ucred"));
1104bd78ceceSJohn Baldwin KASSERT(bp->b_wcred == NOCRED, ("leaking write ucred"));
1105a854ed98SJohn Baldwin bp->b_rcred = crhold(curthread->td_ucred);
1106a854ed98SJohn Baldwin bp->b_wcred = crhold(curthread->td_ucred);
11079c83534dSPoul-Henning Kamp pbgetbo(bo, bp);
11081faacf5dSKirk McKusick bp->b_vp = vp;
11094efe531cSMark Johnston bp->b_bcount = bp->b_bufsize = bytecount;
11102c18019fSPoul-Henning Kamp bp->b_iooffset = dbtob(bp->b_blkno);
1111e48b82bdSGleb Smirnoff KASSERT(IDX_TO_OFF(m[0]->pindex - bp->b_pages[0]->pindex) ==
1112e48b82bdSGleb Smirnoff (blkno0 - bp->b_blkno) * DEV_BSIZE +
1113e48b82bdSGleb Smirnoff IDX_TO_OFF(m[0]->pindex) % bsize,
1114e48b82bdSGleb Smirnoff ("wrong offsets bsize %d m[0] %ju b_pages[0] %ju "
1115e48b82bdSGleb Smirnoff "blkno0 %ju b_blkno %ju", bsize,
1116e48b82bdSGleb Smirnoff (uintmax_t)m[0]->pindex, (uintmax_t)bp->b_pages[0]->pindex,
1117e48b82bdSGleb Smirnoff (uintmax_t)blkno0, (uintmax_t)bp->b_blkno));
111890effb23SGleb Smirnoff
11194efe531cSMark Johnston (void)runningbufclaim(bp, bp->b_bufsize);
11204efe531cSMark Johnston
112183c9dea1SGleb Smirnoff VM_CNT_INC(v_vnodein);
112283c9dea1SGleb Smirnoff VM_CNT_ADD(v_vnodepgsin, bp->b_npages);
1123b0cd2017SGleb Smirnoff
112490effb23SGleb Smirnoff if (iodone != NULL) { /* async */
1125b0cd2017SGleb Smirnoff bp->b_pgiodone = iodone;
112690effb23SGleb Smirnoff bp->b_caller1 = arg;
112790effb23SGleb Smirnoff bp->b_iodone = vnode_pager_generic_getpages_done_async;
112890effb23SGleb Smirnoff bp->b_flags |= B_ASYNC;
112990effb23SGleb Smirnoff BUF_KERNPROC(bp);
1130b792bebeSPoul-Henning Kamp bstrategy(bp);
1131b0cd2017SGleb Smirnoff return (VM_PAGER_OK);
113290effb23SGleb Smirnoff } else {
113390effb23SGleb Smirnoff bp->b_iodone = bdone;
113490effb23SGleb Smirnoff bstrategy(bp);
11356a4b5823SPoul-Henning Kamp bwait(bp, PVM, "vnread");
113690effb23SGleb Smirnoff error = vnode_pager_generic_getpages_done(bp);
11371bb5ad63SGleb Smirnoff for (i = 0; i < bp->b_npages; i++)
11386ce697dcSKonstantin Belousov bp->b_pages[i] = NULL;
11391faacf5dSKirk McKusick bp->b_vp = NULL;
11409c83534dSPoul-Henning Kamp pbrelbo(bp);
1141756a5412SGleb Smirnoff uma_zfree(vnode_pbuf_zone, bp);
114290effb23SGleb Smirnoff return (error != 0 ? VM_PAGER_ERROR : VM_PAGER_OK);
114390effb23SGleb Smirnoff }
1144b0cd2017SGleb Smirnoff }
114590effb23SGleb Smirnoff
114690effb23SGleb Smirnoff static void
vnode_pager_generic_getpages_done_async(struct buf * bp)114790effb23SGleb Smirnoff vnode_pager_generic_getpages_done_async(struct buf *bp)
114890effb23SGleb Smirnoff {
114990effb23SGleb Smirnoff int error;
115090effb23SGleb Smirnoff
115190effb23SGleb Smirnoff error = vnode_pager_generic_getpages_done(bp);
1152b0cd2017SGleb Smirnoff /* Run the iodone upon the requested range. */
1153b0cd2017SGleb Smirnoff bp->b_pgiodone(bp->b_caller1, bp->b_pages + bp->b_pgbefore,
1154b0cd2017SGleb Smirnoff bp->b_npages - bp->b_pgbefore - bp->b_pgafter, error);
115590effb23SGleb Smirnoff for (int i = 0; i < bp->b_npages; i++)
115690effb23SGleb Smirnoff bp->b_pages[i] = NULL;
115790effb23SGleb Smirnoff bp->b_vp = NULL;
115890effb23SGleb Smirnoff pbrelbo(bp);
1159756a5412SGleb Smirnoff uma_zfree(vnode_pbuf_zone, bp);
116090effb23SGleb Smirnoff }
116190effb23SGleb Smirnoff
116290effb23SGleb Smirnoff static int
vnode_pager_generic_getpages_done(struct buf * bp)116390effb23SGleb Smirnoff vnode_pager_generic_getpages_done(struct buf *bp)
116490effb23SGleb Smirnoff {
116590effb23SGleb Smirnoff vm_object_t object;
116690effb23SGleb Smirnoff off_t tfoff, nextoff;
116790effb23SGleb Smirnoff int i, error;
116890effb23SGleb Smirnoff
1169cafbf0c6SWarner Losh KASSERT((bp->b_ioflags & BIO_ERROR) == 0 || bp->b_error != 0,
1170cafbf0c6SWarner Losh ("%s: buf error but b_error == 0\n", __func__));
1171cafbf0c6SWarner Losh error = (bp->b_ioflags & BIO_ERROR) != 0 ? bp->b_error : 0;
117290effb23SGleb Smirnoff object = bp->b_vp->v_object;
117390effb23SGleb Smirnoff
117428f957b8SKonstantin Belousov runningbufwakeup(bp);
117528f957b8SKonstantin Belousov
117690effb23SGleb Smirnoff if (error == 0 && bp->b_bcount != bp->b_npages * PAGE_SIZE) {
1177fade8dd7SJeff Roberson if (!buf_mapped(bp)) {
1178fade8dd7SJeff Roberson bp->b_data = bp->b_kvabase;
1179fade8dd7SJeff Roberson pmap_qenter((vm_offset_t)bp->b_data, bp->b_pages,
118090effb23SGleb Smirnoff bp->b_npages);
118190effb23SGleb Smirnoff }
1182fade8dd7SJeff Roberson bzero(bp->b_data + bp->b_bcount,
118390effb23SGleb Smirnoff PAGE_SIZE * bp->b_npages - bp->b_bcount);
118490effb23SGleb Smirnoff }
1185fade8dd7SJeff Roberson if (buf_mapped(bp)) {
1186fade8dd7SJeff Roberson pmap_qremove((vm_offset_t)bp->b_data, bp->b_npages);
1187fade8dd7SJeff Roberson bp->b_data = unmapped_buf;
118890effb23SGleb Smirnoff }
118926f9a767SRodney W. Grimes
11901bd12a3bSChuck Silvers /*
11911bd12a3bSChuck Silvers * If the read failed, we must free any read ahead/behind pages here.
11921bd12a3bSChuck Silvers * The requested pages are freed by the caller (for sync requests)
11931bd12a3bSChuck Silvers * or by the bp->b_pgiodone callback (for async requests).
11941bd12a3bSChuck Silvers */
11951bd12a3bSChuck Silvers if (error != 0) {
11961bd12a3bSChuck Silvers VM_OBJECT_WLOCK(object);
11971bd12a3bSChuck Silvers for (i = 0; i < bp->b_pgbefore; i++)
11981bd12a3bSChuck Silvers vm_page_free_invalid(bp->b_pages[i]);
11991bd12a3bSChuck Silvers for (i = bp->b_npages - bp->b_pgafter; i < bp->b_npages; i++)
12001bd12a3bSChuck Silvers vm_page_free_invalid(bp->b_pages[i]);
12011bd12a3bSChuck Silvers VM_OBJECT_WUNLOCK(object);
12021bd12a3bSChuck Silvers return (error);
12031bd12a3bSChuck Silvers }
12041bd12a3bSChuck Silvers
12057f935055SJeff Roberson /* Read lock to protect size. */
12067f935055SJeff Roberson VM_OBJECT_RLOCK(object);
120790effb23SGleb Smirnoff for (i = 0, tfoff = IDX_TO_OFF(bp->b_pages[0]->pindex);
120890effb23SGleb Smirnoff i < bp->b_npages; i++, tfoff = nextoff) {
12098f9110f6SJohn Dyson vm_page_t mt;
12108f9110f6SJohn Dyson
12118f9110f6SJohn Dyson nextoff = tfoff + PAGE_SIZE;
121290effb23SGleb Smirnoff mt = bp->b_pages[i];
12132f81c92eSMark Johnston if (mt == bogus_page)
12142f81c92eSMark Johnston continue;
12158f9110f6SJohn Dyson
121654746b67SDmitrij Tejblum if (nextoff <= object->un_pager.vnp.vnp_size) {
12178d17e694SJulian Elischer /*
12188d17e694SJulian Elischer * Read filled up entire page.
12198d17e694SJulian Elischer */
12200012f373SJeff Roberson vm_page_valid(mt);
1221016a3c93SAlan Cox KASSERT(mt->dirty == 0,
122279f0deb9SGleb Smirnoff ("%s: page %p is dirty", __func__, mt));
1223016a3c93SAlan Cox KASSERT(!pmap_page_is_mapped(mt),
122479f0deb9SGleb Smirnoff ("%s: page %p is mapped", __func__, mt));
12258f9110f6SJohn Dyson } else {
12268d17e694SJulian Elischer /*
122742eb4108SAlan Cox * Read did not fill up entire page.
12288d17e694SJulian Elischer *
1229c3dbadc1SChuck Silvers * Currently we do not set the entire page valid,
1230c3dbadc1SChuck Silvers * we just try to clear the piece that we couldn't
1231c3dbadc1SChuck Silvers * read.
12328d17e694SJulian Elischer */
1233dc874f98SKonstantin Belousov vm_page_set_valid_range(mt, 0,
123454746b67SDmitrij Tejblum object->un_pager.vnp.vnp_size - tfoff);
123542eb4108SAlan Cox KASSERT((mt->dirty & vm_page_bits(0,
1236c3dbadc1SChuck Silvers object->un_pager.vnp.vnp_size - tfoff)) == 0,
1237c3dbadc1SChuck Silvers ("%s: page %p is dirty", __func__, mt));
12388f9110f6SJohn Dyson }
12398f9110f6SJohn Dyson
1240b0cd2017SGleb Smirnoff if (i < bp->b_pgbefore || i >= bp->b_npages - bp->b_pgafter)
1241b6c00483SKonstantin Belousov vm_page_readahead_finish(mt);
124203679e23SAlan Cox }
12437f935055SJeff Roberson VM_OBJECT_RUNLOCK(object);
124490effb23SGleb Smirnoff
124590effb23SGleb Smirnoff return (error);
124626f9a767SRodney W. Grimes }
124726f9a767SRodney W. Grimes
1248ce75f2c3SMike Smith /*
1249ce75f2c3SMike Smith * EOPNOTSUPP is no longer legal. For local media VFS's that do not
1250ce75f2c3SMike Smith * implement their own VOP_PUTPAGES, their VOP_PUTPAGES should call to
1251ce75f2c3SMike Smith * vnode_pager_generic_putpages() to implement the previous behaviour.
1252ce75f2c3SMike Smith *
1253ce75f2c3SMike Smith * All other FS's should use the bypass to get to the local media
1254ce75f2c3SMike Smith * backing vp's VOP_PUTPAGES.
1255ce75f2c3SMike Smith */
1256e4542174SMatthew Dillon static void
vnode_pager_putpages(vm_object_t object,vm_page_t * m,int count,int flags,int * rtvals)12577ebba1f8SGleb Smirnoff vnode_pager_putpages(vm_object_t object, vm_page_t *m, int count,
125833cad9e9SKonstantin Belousov int flags, int *rtvals)
1259170db9c6SJohn Dyson {
1260b8ebd99aSJohn Baldwin int rtval __diagused;
1261170db9c6SJohn Dyson struct vnode *vp;
126286ffbd76SMike Smith int bytes = count * PAGE_SIZE;
1263ad980522SJohn Dyson
12640e3cdf2cSAlan Cox /*
12650e3cdf2cSAlan Cox * Force synchronous operation if we are extremely low on memory
12660e3cdf2cSAlan Cox * to prevent a low-memory deadlock. VOP operations often need to
12670e3cdf2cSAlan Cox * allocate more memory to initiate the I/O ( i.e. do a BMAP
12680e3cdf2cSAlan Cox * operation ). The swapper handles the case by limiting the amount
12690e3cdf2cSAlan Cox * of asynchronous I/O, but that sort of solution doesn't scale well
12700e3cdf2cSAlan Cox * for the vnode pager without a lot of work.
12710e3cdf2cSAlan Cox *
12720e3cdf2cSAlan Cox * Also, the backing vnode's iodone routine may not wake the pageout
12730e3cdf2cSAlan Cox * daemon up. This should be probably be addressed XXX.
12740e3cdf2cSAlan Cox */
12750e3cdf2cSAlan Cox
1276e2068d0bSJeff Roberson if (vm_page_count_min())
127733cad9e9SKonstantin Belousov flags |= VM_PAGER_PUT_SYNC;
12780e3cdf2cSAlan Cox
12790e3cdf2cSAlan Cox /*
12800e3cdf2cSAlan Cox * Call device-specific putpages function
12810e3cdf2cSAlan Cox */
1282170db9c6SJohn Dyson vp = object->handle;
128389f6b863SAttilio Rao VM_OBJECT_WUNLOCK(object);
128433cad9e9SKonstantin Belousov rtval = VOP_PUTPAGES(vp, m, bytes, flags, rtvals);
128523955314SAlfred Perlstein KASSERT(rtval != EOPNOTSUPP,
128623955314SAlfred Perlstein ("vnode_pager: stale FS putpages\n"));
128789f6b863SAttilio Rao VM_OBJECT_WLOCK(object);
1288170db9c6SJohn Dyson }
1289170db9c6SJohn Dyson
129005877a85SKonstantin Belousov static int
vn_off2bidx(vm_ooffset_t offset)129105877a85SKonstantin Belousov vn_off2bidx(vm_ooffset_t offset)
129205877a85SKonstantin Belousov {
129305877a85SKonstantin Belousov
129405877a85SKonstantin Belousov return ((offset & PAGE_MASK) / DEV_BSIZE);
129505877a85SKonstantin Belousov }
129605877a85SKonstantin Belousov
129705877a85SKonstantin Belousov static bool
vn_dirty_blk(vm_page_t m,vm_ooffset_t offset)129805877a85SKonstantin Belousov vn_dirty_blk(vm_page_t m, vm_ooffset_t offset)
129905877a85SKonstantin Belousov {
130005877a85SKonstantin Belousov
130105877a85SKonstantin Belousov KASSERT(IDX_TO_OFF(m->pindex) <= offset &&
130205877a85SKonstantin Belousov offset < IDX_TO_OFF(m->pindex + 1),
130305877a85SKonstantin Belousov ("page %p pidx %ju offset %ju", m, (uintmax_t)m->pindex,
130405877a85SKonstantin Belousov (uintmax_t)offset));
130505877a85SKonstantin Belousov return ((m->dirty & ((vm_page_bits_t)1 << vn_off2bidx(offset))) != 0);
130605877a85SKonstantin Belousov }
1307ce75f2c3SMike Smith
130826f9a767SRodney W. Grimes /*
1309ce75f2c3SMike Smith * This is now called from local media FS's to operate against their
13104491ea91SEivind Eklund * own vnodes if they fail to implement VOP_PUTPAGES.
13112b6b0df7SMatthew Dillon *
13122b6b0df7SMatthew Dillon * This is typically called indirectly via the pageout daemon and
1313763df3ecSPedro F. Giffuni * clustering has already typically occurred, so in general we ask the
13142b6b0df7SMatthew Dillon * underlying filesystem to write the data out asynchronously rather
13152b6b0df7SMatthew Dillon * then delayed.
131626f9a767SRodney W. Grimes */
1317ce75f2c3SMike Smith int
vnode_pager_generic_putpages(struct vnode * vp,vm_page_t * ma,int bytecount,int flags,int * rtvals)1318c46b90e9SAlan Cox vnode_pager_generic_putpages(struct vnode *vp, vm_page_t *ma, int bytecount,
1319c46b90e9SAlan Cox int flags, int *rtvals)
132026f9a767SRodney W. Grimes {
1321ce75f2c3SMike Smith vm_object_t object;
1322c46b90e9SAlan Cox vm_page_t m;
1323ed1a88a3SKonstantin Belousov vm_ooffset_t max_offset, next_offset, poffset, prev_offset;
1324f6b04d2bSDavid Greenman struct uio auio;
1325f6b04d2bSDavid Greenman struct iovec aiov;
132605877a85SKonstantin Belousov off_t prev_resid, wrsz;
1327e6c44f65SKonstantin Belousov int count, error, i, maxsize, ncount, pgoff, ppscheck;
132805877a85SKonstantin Belousov bool in_hole;
1329dd498befSPaul Saab static struct timeval lastfail;
1330dd498befSPaul Saab static int curfail;
133126f9a767SRodney W. Grimes
1332ce75f2c3SMike Smith object = vp->v_object;
1333ce75f2c3SMike Smith count = bytecount / PAGE_SIZE;
1334ce75f2c3SMike Smith
133526f9a767SRodney W. Grimes for (i = 0; i < count; i++)
1336031ec8c1SKonstantin Belousov rtvals[i] = VM_PAGER_ERROR;
133726f9a767SRodney W. Grimes
1338c46b90e9SAlan Cox if ((int64_t)ma[0]->pindex < 0) {
1339e6c44f65SKonstantin Belousov printf("vnode_pager_generic_putpages: "
1340e6c44f65SKonstantin Belousov "attempt to write meta-data 0x%jx(%lx)\n",
1341e6c44f65SKonstantin Belousov (uintmax_t)ma[0]->pindex, (u_long)ma[0]->dirty);
1342f6b04d2bSDavid Greenman rtvals[0] = VM_PAGER_BAD;
1343e6c44f65SKonstantin Belousov return (VM_PAGER_BAD);
13440d94caffSDavid Greenman }
13450bdb7528SDavid Greenman
1346f6b04d2bSDavid Greenman maxsize = count * PAGE_SIZE;
1347f6b04d2bSDavid Greenman ncount = count;
134826f9a767SRodney W. Grimes
1349c46b90e9SAlan Cox poffset = IDX_TO_OFF(ma[0]->pindex);
135000a6f47fSMatthew Dillon
135100a6f47fSMatthew Dillon /*
135200a6f47fSMatthew Dillon * If the page-aligned write is larger then the actual file we
1353763df3ecSPedro F. Giffuni * have to invalidate pages occurring beyond the file EOF. However,
135400a6f47fSMatthew Dillon * there is an edge case where a file may not be page-aligned where
135500a6f47fSMatthew Dillon * the last page is partially invalid. In this case the filesystem
135600a6f47fSMatthew Dillon * may not properly clear the dirty bits for the entire page (which
135700a6f47fSMatthew Dillon * could be VM_PAGE_BITS_ALL due to the page having been mmap()d).
1358efec381dSMark Johnston * With the page busied we are free to fix up the dirty bits here.
13593ebeaf59SMatthew Dillon *
13603ebeaf59SMatthew Dillon * We do not under any circumstances truncate the valid bits, as
13613ebeaf59SMatthew Dillon * this will screw up bogus page replacement.
136200a6f47fSMatthew Dillon */
1363b3d4ab66SKonstantin Belousov VM_OBJECT_RLOCK(object);
1364a316d390SJohn Dyson if (maxsize + poffset > object->un_pager.vnp.vnp_size) {
136500a6f47fSMatthew Dillon if (object->un_pager.vnp.vnp_size > poffset) {
1366a316d390SJohn Dyson maxsize = object->un_pager.vnp.vnp_size - poffset;
1367aa8de40aSPoul-Henning Kamp ncount = btoc(maxsize);
136800a6f47fSMatthew Dillon if ((pgoff = (int)maxsize & PAGE_MASK) != 0) {
1369938cdc42SKonstantin Belousov pgoff = roundup2(pgoff, DEV_BSIZE);
1370938cdc42SKonstantin Belousov
1371c46b90e9SAlan Cox /*
13727f935055SJeff Roberson * If the page is busy and the following
1373c46b90e9SAlan Cox * conditions hold, then the page's dirty
1374c46b90e9SAlan Cox * field cannot be concurrently changed by a
1375c46b90e9SAlan Cox * pmap operation.
1376c46b90e9SAlan Cox */
1377c46b90e9SAlan Cox m = ma[ncount - 1];
1378c7aebda8SAttilio Rao vm_page_assert_sbusied(m);
13796031c68dSAlan Cox KASSERT(!pmap_page_is_write_mapped(m),
1380c46b90e9SAlan Cox ("vnode_pager_generic_putpages: page %p is not read-only", m));
1381e6c44f65SKonstantin Belousov MPASS(m->dirty != 0);
1382c46b90e9SAlan Cox vm_page_clear_dirty(m, pgoff, PAGE_SIZE -
1383c46b90e9SAlan Cox pgoff);
138400a6f47fSMatthew Dillon }
138500a6f47fSMatthew Dillon } else {
138600a6f47fSMatthew Dillon maxsize = 0;
138700a6f47fSMatthew Dillon ncount = 0;
138800a6f47fSMatthew Dillon }
1389e6c44f65SKonstantin Belousov for (i = ncount; i < count; i++)
1390f6b04d2bSDavid Greenman rtvals[i] = VM_PAGER_BAD;
1391f6b04d2bSDavid Greenman }
13927f935055SJeff Roberson VM_OBJECT_RUNLOCK(object);
139326f9a767SRodney W. Grimes
1394f6b04d2bSDavid Greenman auio.uio_iov = &aiov;
1395f6b04d2bSDavid Greenman auio.uio_segflg = UIO_NOCOPY;
1396f6b04d2bSDavid Greenman auio.uio_rw = UIO_WRITE;
1397e6c44f65SKonstantin Belousov auio.uio_td = NULL;
1398ed1a88a3SKonstantin Belousov max_offset = roundup2(poffset + maxsize, DEV_BSIZE);
139905877a85SKonstantin Belousov
1400ed1a88a3SKonstantin Belousov for (prev_offset = poffset; prev_offset < max_offset;) {
140105877a85SKonstantin Belousov /* Skip clean blocks. */
1402ed1a88a3SKonstantin Belousov for (in_hole = true; in_hole && prev_offset < max_offset;) {
140305877a85SKonstantin Belousov m = ma[OFF_TO_IDX(prev_offset - poffset)];
140405877a85SKonstantin Belousov for (i = vn_off2bidx(prev_offset);
140505877a85SKonstantin Belousov i < sizeof(vm_page_bits_t) * NBBY &&
1406ed1a88a3SKonstantin Belousov prev_offset < max_offset; i++) {
140705877a85SKonstantin Belousov if (vn_dirty_blk(m, prev_offset)) {
140805877a85SKonstantin Belousov in_hole = false;
140905877a85SKonstantin Belousov break;
141005877a85SKonstantin Belousov }
141105877a85SKonstantin Belousov prev_offset += DEV_BSIZE;
141205877a85SKonstantin Belousov }
141305877a85SKonstantin Belousov }
141405877a85SKonstantin Belousov if (in_hole)
141505877a85SKonstantin Belousov goto write_done;
141605877a85SKonstantin Belousov
141705877a85SKonstantin Belousov /* Find longest run of dirty blocks. */
1418ed1a88a3SKonstantin Belousov for (next_offset = prev_offset; next_offset < max_offset;) {
141905877a85SKonstantin Belousov m = ma[OFF_TO_IDX(next_offset - poffset)];
142005877a85SKonstantin Belousov for (i = vn_off2bidx(next_offset);
142105877a85SKonstantin Belousov i < sizeof(vm_page_bits_t) * NBBY &&
1422ed1a88a3SKonstantin Belousov next_offset < max_offset; i++) {
142305877a85SKonstantin Belousov if (!vn_dirty_blk(m, next_offset))
142405877a85SKonstantin Belousov goto start_write;
142505877a85SKonstantin Belousov next_offset += DEV_BSIZE;
142605877a85SKonstantin Belousov }
142705877a85SKonstantin Belousov }
142805877a85SKonstantin Belousov start_write:
142905877a85SKonstantin Belousov if (next_offset > poffset + maxsize)
143005877a85SKonstantin Belousov next_offset = poffset + maxsize;
1431bdb46c21SKonstantin Belousov if (prev_offset == next_offset)
1432bdb46c21SKonstantin Belousov goto write_done;
143305877a85SKonstantin Belousov
143405877a85SKonstantin Belousov /*
143505877a85SKonstantin Belousov * Getting here requires finding a dirty block in the
143605877a85SKonstantin Belousov * 'skip clean blocks' loop.
143705877a85SKonstantin Belousov */
143805877a85SKonstantin Belousov
143905877a85SKonstantin Belousov aiov.iov_base = NULL;
144005877a85SKonstantin Belousov auio.uio_iovcnt = 1;
144105877a85SKonstantin Belousov auio.uio_offset = prev_offset;
144205877a85SKonstantin Belousov prev_resid = auio.uio_resid = aiov.iov_len = next_offset -
144305877a85SKonstantin Belousov prev_offset;
144405877a85SKonstantin Belousov error = VOP_WRITE(vp, &auio,
144505877a85SKonstantin Belousov vnode_pager_putpages_ioflags(flags), curthread->td_ucred);
144605877a85SKonstantin Belousov
144705877a85SKonstantin Belousov wrsz = prev_resid - auio.uio_resid;
144805877a85SKonstantin Belousov if (wrsz == 0) {
144905877a85SKonstantin Belousov if (ppsratecheck(&lastfail, &curfail, 1) != 0) {
145005877a85SKonstantin Belousov vn_printf(vp, "vnode_pager_putpages: "
145105877a85SKonstantin Belousov "zero-length write at %ju resid %zd\n",
145205877a85SKonstantin Belousov auio.uio_offset, auio.uio_resid);
145305877a85SKonstantin Belousov }
145405877a85SKonstantin Belousov break;
145505877a85SKonstantin Belousov }
145605877a85SKonstantin Belousov
145705877a85SKonstantin Belousov /* Adjust the starting offset for next iteration. */
145805877a85SKonstantin Belousov prev_offset += wrsz;
145905877a85SKonstantin Belousov MPASS(auio.uio_offset == prev_offset);
1460f6b04d2bSDavid Greenman
14613dbb0ca6SKonstantin Belousov ppscheck = 0;
146205877a85SKonstantin Belousov if (error != 0 && (ppscheck = ppsratecheck(&lastfail,
146305877a85SKonstantin Belousov &curfail, 1)) != 0)
146405877a85SKonstantin Belousov vn_printf(vp, "vnode_pager_putpages: I/O error %d\n",
146505877a85SKonstantin Belousov error);
1466e6c44f65SKonstantin Belousov if (auio.uio_resid != 0 && (ppscheck != 0 ||
1467e6c44f65SKonstantin Belousov ppsratecheck(&lastfail, &curfail, 1) != 0))
146805877a85SKonstantin Belousov vn_printf(vp, "vnode_pager_putpages: residual I/O %zd "
146905877a85SKonstantin Belousov "at %ju\n", auio.uio_resid,
147005877a85SKonstantin Belousov (uintmax_t)ma[0]->pindex);
147105877a85SKonstantin Belousov if (error != 0 || auio.uio_resid != 0)
147205877a85SKonstantin Belousov break;
147305877a85SKonstantin Belousov }
147405877a85SKonstantin Belousov write_done:
147505877a85SKonstantin Belousov /* Mark completely processed pages. */
147605877a85SKonstantin Belousov for (i = 0; i < OFF_TO_IDX(prev_offset - poffset); i++)
147726f9a767SRodney W. Grimes rtvals[i] = VM_PAGER_OK;
147805877a85SKonstantin Belousov /* Mark partial EOF page. */
147905877a85SKonstantin Belousov if (prev_offset == poffset + maxsize && (prev_offset & PAGE_MASK) != 0)
148005877a85SKonstantin Belousov rtvals[i++] = VM_PAGER_OK;
148105877a85SKonstantin Belousov /* Unwritten pages in range, free bonus if the page is clean. */
148205877a85SKonstantin Belousov for (; i < ncount; i++)
148305877a85SKonstantin Belousov rtvals[i] = ma[i]->dirty == 0 ? VM_PAGER_OK : VM_PAGER_ERROR;
148405877a85SKonstantin Belousov VM_CNT_ADD(v_vnodepgsout, i);
148505877a85SKonstantin Belousov VM_CNT_INC(v_vnodeout);
1486e6c44f65SKonstantin Belousov return (rtvals[0]);
148726f9a767SRodney W. Grimes }
1488031ec8c1SKonstantin Belousov
148965b9599aSKonstantin Belousov int
vnode_pager_putpages_ioflags(int pager_flags)149065b9599aSKonstantin Belousov vnode_pager_putpages_ioflags(int pager_flags)
149165b9599aSKonstantin Belousov {
149265b9599aSKonstantin Belousov int ioflags;
149365b9599aSKonstantin Belousov
149465b9599aSKonstantin Belousov /*
149565b9599aSKonstantin Belousov * Pageouts are already clustered, use IO_ASYNC to force a
149665b9599aSKonstantin Belousov * bawrite() rather then a bdwrite() to prevent paging I/O
149765b9599aSKonstantin Belousov * from saturating the buffer cache. Dummy-up the sequential
149865b9599aSKonstantin Belousov * heuristic to cause large ranges to cluster. If neither
149965b9599aSKonstantin Belousov * IO_SYNC or IO_ASYNC is set, the system decides how to
150065b9599aSKonstantin Belousov * cluster.
150165b9599aSKonstantin Belousov */
150265b9599aSKonstantin Belousov ioflags = IO_VMIO;
150365b9599aSKonstantin Belousov if ((pager_flags & (VM_PAGER_PUT_SYNC | VM_PAGER_PUT_INVAL)) != 0)
150465b9599aSKonstantin Belousov ioflags |= IO_SYNC;
150565b9599aSKonstantin Belousov else if ((pager_flags & VM_PAGER_CLUSTER_OK) == 0)
150665b9599aSKonstantin Belousov ioflags |= IO_ASYNC;
150765b9599aSKonstantin Belousov ioflags |= (pager_flags & VM_PAGER_PUT_INVAL) != 0 ? IO_INVAL: 0;
150865b9599aSKonstantin Belousov ioflags |= (pager_flags & VM_PAGER_PUT_NOREUSE) != 0 ? IO_NOREUSE : 0;
150965b9599aSKonstantin Belousov ioflags |= IO_SEQMAX << IO_SEQSHIFT;
151065b9599aSKonstantin Belousov return (ioflags);
151165b9599aSKonstantin Belousov }
151265b9599aSKonstantin Belousov
1513555b7bb4SKonstantin Belousov /*
1514555b7bb4SKonstantin Belousov * vnode_pager_undirty_pages().
1515555b7bb4SKonstantin Belousov *
1516555b7bb4SKonstantin Belousov * A helper to mark pages as clean after pageout that was possibly
1517555b7bb4SKonstantin Belousov * done with a short write. The lpos argument specifies the page run
1518555b7bb4SKonstantin Belousov * length in bytes, and the written argument specifies how many bytes
1519555b7bb4SKonstantin Belousov * were actually written. eof is the offset past the last valid byte
1520555b7bb4SKonstantin Belousov * in the vnode using the absolute file position of the first byte in
1521555b7bb4SKonstantin Belousov * the run as the base from which it is computed.
1522555b7bb4SKonstantin Belousov */
1523031ec8c1SKonstantin Belousov void
vnode_pager_undirty_pages(vm_page_t * ma,int * rtvals,int written,off_t eof,int lpos)1524555b7bb4SKonstantin Belousov vnode_pager_undirty_pages(vm_page_t *ma, int *rtvals, int written, off_t eof,
1525555b7bb4SKonstantin Belousov int lpos)
1526031ec8c1SKonstantin Belousov {
1527555b7bb4SKonstantin Belousov int i, pos, pos_devb;
1528031ec8c1SKonstantin Belousov
1529555b7bb4SKonstantin Belousov if (written == 0 && eof >= lpos)
15309d17da3bSKonstantin Belousov return;
1531031ec8c1SKonstantin Belousov for (i = 0, pos = 0; pos < written; i++, pos += PAGE_SIZE) {
1532031ec8c1SKonstantin Belousov if (pos < trunc_page(written)) {
1533031ec8c1SKonstantin Belousov rtvals[i] = VM_PAGER_OK;
1534031ec8c1SKonstantin Belousov vm_page_undirty(ma[i]);
1535031ec8c1SKonstantin Belousov } else {
1536031ec8c1SKonstantin Belousov /* Partially written page. */
1537031ec8c1SKonstantin Belousov rtvals[i] = VM_PAGER_AGAIN;
1538031ec8c1SKonstantin Belousov vm_page_clear_dirty(ma[i], 0, written & PAGE_MASK);
1539031ec8c1SKonstantin Belousov }
1540031ec8c1SKonstantin Belousov }
1541555b7bb4SKonstantin Belousov if (eof >= lpos) /* avoid truncation */
15427f935055SJeff Roberson return;
1543555b7bb4SKonstantin Belousov for (pos = eof, i = OFF_TO_IDX(trunc_page(pos)); pos < lpos; i++) {
1544555b7bb4SKonstantin Belousov if (pos != trunc_page(pos)) {
1545555b7bb4SKonstantin Belousov /*
1546555b7bb4SKonstantin Belousov * The page contains the last valid byte in
1547555b7bb4SKonstantin Belousov * the vnode, mark the rest of the page as
1548555b7bb4SKonstantin Belousov * clean, potentially making the whole page
1549555b7bb4SKonstantin Belousov * clean.
1550555b7bb4SKonstantin Belousov */
1551555b7bb4SKonstantin Belousov pos_devb = roundup2(pos & PAGE_MASK, DEV_BSIZE);
1552555b7bb4SKonstantin Belousov vm_page_clear_dirty(ma[i], pos_devb, PAGE_SIZE -
1553555b7bb4SKonstantin Belousov pos_devb);
1554555b7bb4SKonstantin Belousov
1555555b7bb4SKonstantin Belousov /*
1556555b7bb4SKonstantin Belousov * If the page was cleaned, report the pageout
1557555b7bb4SKonstantin Belousov * on it as successful. msync() no longer
1558555b7bb4SKonstantin Belousov * needs to write out the page, endlessly
1559555b7bb4SKonstantin Belousov * creating write requests and dirty buffers.
1560555b7bb4SKonstantin Belousov */
1561555b7bb4SKonstantin Belousov if (ma[i]->dirty == 0)
1562555b7bb4SKonstantin Belousov rtvals[i] = VM_PAGER_OK;
1563555b7bb4SKonstantin Belousov
1564555b7bb4SKonstantin Belousov pos = round_page(pos);
1565555b7bb4SKonstantin Belousov } else {
1566555b7bb4SKonstantin Belousov /* vm_pageout_flush() clears dirty */
1567555b7bb4SKonstantin Belousov rtvals[i] = VM_PAGER_BAD;
1568555b7bb4SKonstantin Belousov pos += PAGE_SIZE;
1569555b7bb4SKonstantin Belousov }
1570555b7bb4SKonstantin Belousov }
1571031ec8c1SKonstantin Belousov }
157284110e7eSKonstantin Belousov
1573fe7bcbafSKyle Evans static void
vnode_pager_update_writecount(vm_object_t object,vm_offset_t start,vm_offset_t end)157484110e7eSKonstantin Belousov vnode_pager_update_writecount(vm_object_t object, vm_offset_t start,
157584110e7eSKonstantin Belousov vm_offset_t end)
157684110e7eSKonstantin Belousov {
157784110e7eSKonstantin Belousov struct vnode *vp;
157884110e7eSKonstantin Belousov vm_ooffset_t old_wm;
157984110e7eSKonstantin Belousov
158089f6b863SAttilio Rao VM_OBJECT_WLOCK(object);
158184110e7eSKonstantin Belousov if (object->type != OBJT_VNODE) {
158289f6b863SAttilio Rao VM_OBJECT_WUNLOCK(object);
158384110e7eSKonstantin Belousov return;
158484110e7eSKonstantin Belousov }
158584110e7eSKonstantin Belousov old_wm = object->un_pager.vnp.writemappings;
158684110e7eSKonstantin Belousov object->un_pager.vnp.writemappings += (vm_ooffset_t)end - start;
158784110e7eSKonstantin Belousov vp = object->handle;
158884110e7eSKonstantin Belousov if (old_wm == 0 && object->un_pager.vnp.writemappings != 0) {
158978022527SKonstantin Belousov ASSERT_VOP_LOCKED(vp, "v_writecount inc");
159078022527SKonstantin Belousov VOP_ADD_WRITECOUNT_CHECKED(vp, 1);
1591b47f6241SJohn Baldwin CTR3(KTR_VFS, "%s: vp %p v_writecount increased to %d",
1592b47f6241SJohn Baldwin __func__, vp, vp->v_writecount);
159384110e7eSKonstantin Belousov } else if (old_wm != 0 && object->un_pager.vnp.writemappings == 0) {
159478022527SKonstantin Belousov ASSERT_VOP_LOCKED(vp, "v_writecount dec");
159578022527SKonstantin Belousov VOP_ADD_WRITECOUNT_CHECKED(vp, -1);
1596b47f6241SJohn Baldwin CTR3(KTR_VFS, "%s: vp %p v_writecount decreased to %d",
1597b47f6241SJohn Baldwin __func__, vp, vp->v_writecount);
159884110e7eSKonstantin Belousov }
159989f6b863SAttilio Rao VM_OBJECT_WUNLOCK(object);
160084110e7eSKonstantin Belousov }
160184110e7eSKonstantin Belousov
1602fe7bcbafSKyle Evans static void
vnode_pager_release_writecount(vm_object_t object,vm_offset_t start,vm_offset_t end)160384110e7eSKonstantin Belousov vnode_pager_release_writecount(vm_object_t object, vm_offset_t start,
160484110e7eSKonstantin Belousov vm_offset_t end)
160584110e7eSKonstantin Belousov {
160684110e7eSKonstantin Belousov struct vnode *vp;
160784110e7eSKonstantin Belousov struct mount *mp;
160884110e7eSKonstantin Belousov vm_offset_t inc;
160984110e7eSKonstantin Belousov
161089f6b863SAttilio Rao VM_OBJECT_WLOCK(object);
161184110e7eSKonstantin Belousov
161284110e7eSKonstantin Belousov /*
161384110e7eSKonstantin Belousov * First, recheck the object type to account for the race when
161484110e7eSKonstantin Belousov * the vnode is reclaimed.
161584110e7eSKonstantin Belousov */
161684110e7eSKonstantin Belousov if (object->type != OBJT_VNODE) {
161789f6b863SAttilio Rao VM_OBJECT_WUNLOCK(object);
161884110e7eSKonstantin Belousov return;
161984110e7eSKonstantin Belousov }
162084110e7eSKonstantin Belousov
162184110e7eSKonstantin Belousov /*
162284110e7eSKonstantin Belousov * Optimize for the case when writemappings is not going to
162384110e7eSKonstantin Belousov * zero.
162484110e7eSKonstantin Belousov */
162584110e7eSKonstantin Belousov inc = end - start;
162684110e7eSKonstantin Belousov if (object->un_pager.vnp.writemappings != inc) {
162784110e7eSKonstantin Belousov object->un_pager.vnp.writemappings -= inc;
162889f6b863SAttilio Rao VM_OBJECT_WUNLOCK(object);
162984110e7eSKonstantin Belousov return;
163084110e7eSKonstantin Belousov }
163184110e7eSKonstantin Belousov
163284110e7eSKonstantin Belousov vp = object->handle;
163384110e7eSKonstantin Belousov vhold(vp);
163489f6b863SAttilio Rao VM_OBJECT_WUNLOCK(object);
163584110e7eSKonstantin Belousov mp = NULL;
163684110e7eSKonstantin Belousov vn_start_write(vp, &mp, V_WAIT);
163778022527SKonstantin Belousov vn_lock(vp, LK_SHARED | LK_RETRY);
163884110e7eSKonstantin Belousov
163984110e7eSKonstantin Belousov /*
164084110e7eSKonstantin Belousov * Decrement the object's writemappings, by swapping the start
164184110e7eSKonstantin Belousov * and end arguments for vnode_pager_update_writecount(). If
164284110e7eSKonstantin Belousov * there was not a race with vnode reclaimation, then the
164384110e7eSKonstantin Belousov * vnode's v_writecount is decremented.
164484110e7eSKonstantin Belousov */
164584110e7eSKonstantin Belousov vnode_pager_update_writecount(object, end, start);
1646b249ce48SMateusz Guzik VOP_UNLOCK(vp);
164784110e7eSKonstantin Belousov vdrop(vp);
164884110e7eSKonstantin Belousov if (mp != NULL)
164984110e7eSKonstantin Belousov vn_finished_write(mp);
165084110e7eSKonstantin Belousov }
1651192112b7SKonstantin Belousov
1652192112b7SKonstantin Belousov static void
vnode_pager_getvp(vm_object_t object,struct vnode ** vpp,bool * vp_heldp)1653192112b7SKonstantin Belousov vnode_pager_getvp(vm_object_t object, struct vnode **vpp, bool *vp_heldp)
1654192112b7SKonstantin Belousov {
1655192112b7SKonstantin Belousov *vpp = object->handle;
1656192112b7SKonstantin Belousov }
1657b068bb09SKonstantin Belousov
1658b068bb09SKonstantin Belousov static void
vnode_pager_clean1(struct vnode * vp,int sync_flags)1659b068bb09SKonstantin Belousov vnode_pager_clean1(struct vnode *vp, int sync_flags)
1660b068bb09SKonstantin Belousov {
1661b068bb09SKonstantin Belousov struct vm_object *obj;
1662b068bb09SKonstantin Belousov
1663b068bb09SKonstantin Belousov ASSERT_VOP_LOCKED(vp, "needs lock for writes");
1664b068bb09SKonstantin Belousov obj = vp->v_object;
1665b068bb09SKonstantin Belousov if (obj == NULL)
1666b068bb09SKonstantin Belousov return;
1667b068bb09SKonstantin Belousov
1668b068bb09SKonstantin Belousov VM_OBJECT_WLOCK(obj);
1669b068bb09SKonstantin Belousov vm_object_page_clean(obj, 0, 0, sync_flags);
1670b068bb09SKonstantin Belousov VM_OBJECT_WUNLOCK(obj);
1671b068bb09SKonstantin Belousov }
1672b068bb09SKonstantin Belousov
1673b068bb09SKonstantin Belousov void
vnode_pager_clean_sync(struct vnode * vp)1674b068bb09SKonstantin Belousov vnode_pager_clean_sync(struct vnode *vp)
1675b068bb09SKonstantin Belousov {
1676b068bb09SKonstantin Belousov vnode_pager_clean1(vp, OBJPC_SYNC);
1677b068bb09SKonstantin Belousov }
1678b068bb09SKonstantin Belousov
1679b068bb09SKonstantin Belousov void
vnode_pager_clean_async(struct vnode * vp)1680b068bb09SKonstantin Belousov vnode_pager_clean_async(struct vnode *vp)
1681b068bb09SKonstantin Belousov {
1682b068bb09SKonstantin Belousov vnode_pager_clean1(vp, 0);
1683b068bb09SKonstantin Belousov }
1684