xref: /freebsd/sys/vm/vnode_pager.c (revision bc96366c864c07ef352edb92017357917c75b36c)
1 /*-
2  * Copyright (c) 1990 University of Utah.
3  * Copyright (c) 1991 The Regents of the University of California.
4  * All rights reserved.
5  * Copyright (c) 1993, 1994 John S. Dyson
6  * Copyright (c) 1995, David Greenman
7  *
8  * This code is derived from software contributed to Berkeley by
9  * the Systems Programming Group of the University of Utah Computer
10  * Science Department.
11  *
12  * Redistribution and use in source and binary forms, with or without
13  * modification, are permitted provided that the following conditions
14  * are met:
15  * 1. Redistributions of source code must retain the above copyright
16  *    notice, this list of conditions and the following disclaimer.
17  * 2. Redistributions in binary form must reproduce the above copyright
18  *    notice, this list of conditions and the following disclaimer in the
19  *    documentation and/or other materials provided with the distribution.
20  * 3. All advertising materials mentioning features or use of this software
21  *    must display the following acknowledgement:
22  *	This product includes software developed by the University of
23  *	California, Berkeley and its contributors.
24  * 4. Neither the name of the University nor the names of its contributors
25  *    may be used to endorse or promote products derived from this software
26  *    without specific prior written permission.
27  *
28  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
29  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
30  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
31  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
32  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
33  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
34  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
35  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
36  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
37  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
38  * SUCH DAMAGE.
39  *
40  *	from: @(#)vnode_pager.c	7.5 (Berkeley) 4/20/91
41  */
42 
43 /*
44  * Page to/from files (vnodes).
45  */
46 
47 /*
48  * TODO:
49  *	Implement VOP_GETPAGES/PUTPAGES interface for filesystems. Will
50  *	greatly re-simplify the vnode_pager.
51  */
52 
53 #include <sys/cdefs.h>
54 __FBSDID("$FreeBSD$");
55 
56 #include <sys/param.h>
57 #include <sys/systm.h>
58 #include <sys/proc.h>
59 #include <sys/vnode.h>
60 #include <sys/mount.h>
61 #include <sys/bio.h>
62 #include <sys/buf.h>
63 #include <sys/vmmeter.h>
64 #include <sys/limits.h>
65 #include <sys/conf.h>
66 #include <sys/rwlock.h>
67 #include <sys/sf_buf.h>
68 
69 #include <machine/atomic.h>
70 
71 #include <vm/vm.h>
72 #include <vm/vm_param.h>
73 #include <vm/vm_object.h>
74 #include <vm/vm_page.h>
75 #include <vm/vm_pager.h>
76 #include <vm/vm_map.h>
77 #include <vm/vnode_pager.h>
78 #include <vm/vm_extern.h>
79 
80 static int vnode_pager_addr(struct vnode *vp, vm_ooffset_t address,
81     daddr_t *rtaddress, int *run);
82 static int vnode_pager_input_smlfs(vm_object_t object, vm_page_t m);
83 static int vnode_pager_input_old(vm_object_t object, vm_page_t m);
84 static void vnode_pager_dealloc(vm_object_t);
85 static int vnode_pager_local_getpages0(struct vnode *, vm_page_t *, int, int,
86     vop_getpages_iodone_t, void *);
87 static int vnode_pager_getpages(vm_object_t, vm_page_t *, int, int);
88 static int vnode_pager_getpages_async(vm_object_t, vm_page_t *, int, int,
89     vop_getpages_iodone_t, void *);
90 static void vnode_pager_putpages(vm_object_t, vm_page_t *, int, int, int *);
91 static boolean_t vnode_pager_haspage(vm_object_t, vm_pindex_t, int *, int *);
92 static vm_object_t vnode_pager_alloc(void *, vm_ooffset_t, vm_prot_t,
93     vm_ooffset_t, struct ucred *cred);
94 static int vnode_pager_generic_getpages_done(struct buf *);
95 static void vnode_pager_generic_getpages_done_async(struct buf *);
96 
97 struct pagerops vnodepagerops = {
98 	.pgo_alloc =	vnode_pager_alloc,
99 	.pgo_dealloc =	vnode_pager_dealloc,
100 	.pgo_getpages =	vnode_pager_getpages,
101 	.pgo_getpages_async = vnode_pager_getpages_async,
102 	.pgo_putpages =	vnode_pager_putpages,
103 	.pgo_haspage =	vnode_pager_haspage,
104 };
105 
106 int vnode_pbuf_freecnt;
107 
108 /* Create the VM system backing object for this vnode */
109 int
110 vnode_create_vobject(struct vnode *vp, off_t isize, struct thread *td)
111 {
112 	vm_object_t object;
113 	vm_ooffset_t size = isize;
114 	struct vattr va;
115 
116 	if (!vn_isdisk(vp, NULL) && vn_canvmio(vp) == FALSE)
117 		return (0);
118 
119 	while ((object = vp->v_object) != NULL) {
120 		VM_OBJECT_WLOCK(object);
121 		if (!(object->flags & OBJ_DEAD)) {
122 			VM_OBJECT_WUNLOCK(object);
123 			return (0);
124 		}
125 		VOP_UNLOCK(vp, 0);
126 		vm_object_set_flag(object, OBJ_DISCONNECTWNT);
127 		VM_OBJECT_SLEEP(object, object, PDROP | PVM, "vodead", 0);
128 		vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
129 	}
130 
131 	if (size == 0) {
132 		if (vn_isdisk(vp, NULL)) {
133 			size = IDX_TO_OFF(INT_MAX);
134 		} else {
135 			if (VOP_GETATTR(vp, &va, td->td_ucred))
136 				return (0);
137 			size = va.va_size;
138 		}
139 	}
140 
141 	object = vnode_pager_alloc(vp, size, 0, 0, td->td_ucred);
142 	/*
143 	 * Dereference the reference we just created.  This assumes
144 	 * that the object is associated with the vp.
145 	 */
146 	VM_OBJECT_WLOCK(object);
147 	object->ref_count--;
148 	VM_OBJECT_WUNLOCK(object);
149 	vrele(vp);
150 
151 	KASSERT(vp->v_object != NULL, ("vnode_create_vobject: NULL object"));
152 
153 	return (0);
154 }
155 
156 void
157 vnode_destroy_vobject(struct vnode *vp)
158 {
159 	struct vm_object *obj;
160 
161 	obj = vp->v_object;
162 	if (obj == NULL)
163 		return;
164 	ASSERT_VOP_ELOCKED(vp, "vnode_destroy_vobject");
165 	VM_OBJECT_WLOCK(obj);
166 	if (obj->ref_count == 0) {
167 		/*
168 		 * don't double-terminate the object
169 		 */
170 		if ((obj->flags & OBJ_DEAD) == 0)
171 			vm_object_terminate(obj);
172 		else
173 			VM_OBJECT_WUNLOCK(obj);
174 	} else {
175 		/*
176 		 * Woe to the process that tries to page now :-).
177 		 */
178 		vm_pager_deallocate(obj);
179 		VM_OBJECT_WUNLOCK(obj);
180 	}
181 	vp->v_object = NULL;
182 }
183 
184 
185 /*
186  * Allocate (or lookup) pager for a vnode.
187  * Handle is a vnode pointer.
188  *
189  * MPSAFE
190  */
191 vm_object_t
192 vnode_pager_alloc(void *handle, vm_ooffset_t size, vm_prot_t prot,
193     vm_ooffset_t offset, struct ucred *cred)
194 {
195 	vm_object_t object;
196 	struct vnode *vp;
197 
198 	/*
199 	 * Pageout to vnode, no can do yet.
200 	 */
201 	if (handle == NULL)
202 		return (NULL);
203 
204 	vp = (struct vnode *) handle;
205 
206 	/*
207 	 * If the object is being terminated, wait for it to
208 	 * go away.
209 	 */
210 retry:
211 	while ((object = vp->v_object) != NULL) {
212 		VM_OBJECT_WLOCK(object);
213 		if ((object->flags & OBJ_DEAD) == 0)
214 			break;
215 		vm_object_set_flag(object, OBJ_DISCONNECTWNT);
216 		VM_OBJECT_SLEEP(object, object, PDROP | PVM, "vadead", 0);
217 	}
218 
219 	KASSERT(vp->v_usecount != 0, ("vnode_pager_alloc: no vnode reference"));
220 
221 	if (object == NULL) {
222 		/*
223 		 * Add an object of the appropriate size
224 		 */
225 		object = vm_object_allocate(OBJT_VNODE, OFF_TO_IDX(round_page(size)));
226 
227 		object->un_pager.vnp.vnp_size = size;
228 		object->un_pager.vnp.writemappings = 0;
229 
230 		object->handle = handle;
231 		VI_LOCK(vp);
232 		if (vp->v_object != NULL) {
233 			/*
234 			 * Object has been created while we were sleeping
235 			 */
236 			VI_UNLOCK(vp);
237 			vm_object_destroy(object);
238 			goto retry;
239 		}
240 		vp->v_object = object;
241 		VI_UNLOCK(vp);
242 	} else {
243 		object->ref_count++;
244 		VM_OBJECT_WUNLOCK(object);
245 	}
246 	vref(vp);
247 	return (object);
248 }
249 
250 /*
251  *	The object must be locked.
252  */
253 static void
254 vnode_pager_dealloc(vm_object_t object)
255 {
256 	struct vnode *vp;
257 	int refs;
258 
259 	vp = object->handle;
260 	if (vp == NULL)
261 		panic("vnode_pager_dealloc: pager already dealloced");
262 
263 	VM_OBJECT_ASSERT_WLOCKED(object);
264 	vm_object_pip_wait(object, "vnpdea");
265 	refs = object->ref_count;
266 
267 	object->handle = NULL;
268 	object->type = OBJT_DEAD;
269 	if (object->flags & OBJ_DISCONNECTWNT) {
270 		vm_object_clear_flag(object, OBJ_DISCONNECTWNT);
271 		wakeup(object);
272 	}
273 	ASSERT_VOP_ELOCKED(vp, "vnode_pager_dealloc");
274 	if (object->un_pager.vnp.writemappings > 0) {
275 		object->un_pager.vnp.writemappings = 0;
276 		VOP_ADD_WRITECOUNT(vp, -1);
277 		CTR3(KTR_VFS, "%s: vp %p v_writecount decreased to %d",
278 		    __func__, vp, vp->v_writecount);
279 	}
280 	vp->v_object = NULL;
281 	VOP_UNSET_TEXT(vp);
282 	VM_OBJECT_WUNLOCK(object);
283 	while (refs-- > 0)
284 		vunref(vp);
285 	VM_OBJECT_WLOCK(object);
286 }
287 
288 static boolean_t
289 vnode_pager_haspage(vm_object_t object, vm_pindex_t pindex, int *before,
290     int *after)
291 {
292 	struct vnode *vp = object->handle;
293 	daddr_t bn;
294 	int err;
295 	daddr_t reqblock;
296 	int poff;
297 	int bsize;
298 	int pagesperblock, blocksperpage;
299 
300 	VM_OBJECT_ASSERT_WLOCKED(object);
301 	/*
302 	 * If no vp or vp is doomed or marked transparent to VM, we do not
303 	 * have the page.
304 	 */
305 	if (vp == NULL || vp->v_iflag & VI_DOOMED)
306 		return FALSE;
307 	/*
308 	 * If the offset is beyond end of file we do
309 	 * not have the page.
310 	 */
311 	if (IDX_TO_OFF(pindex) >= object->un_pager.vnp.vnp_size)
312 		return FALSE;
313 
314 	bsize = vp->v_mount->mnt_stat.f_iosize;
315 	pagesperblock = bsize / PAGE_SIZE;
316 	blocksperpage = 0;
317 	if (pagesperblock > 0) {
318 		reqblock = pindex / pagesperblock;
319 	} else {
320 		blocksperpage = (PAGE_SIZE / bsize);
321 		reqblock = pindex * blocksperpage;
322 	}
323 	VM_OBJECT_WUNLOCK(object);
324 	err = VOP_BMAP(vp, reqblock, NULL, &bn, after, before);
325 	VM_OBJECT_WLOCK(object);
326 	if (err)
327 		return TRUE;
328 	if (bn == -1)
329 		return FALSE;
330 	if (pagesperblock > 0) {
331 		poff = pindex - (reqblock * pagesperblock);
332 		if (before) {
333 			*before *= pagesperblock;
334 			*before += poff;
335 		}
336 		if (after) {
337 			int numafter;
338 			*after *= pagesperblock;
339 			numafter = pagesperblock - (poff + 1);
340 			if (IDX_TO_OFF(pindex + numafter) >
341 			    object->un_pager.vnp.vnp_size) {
342 				numafter =
343 		    		    OFF_TO_IDX(object->un_pager.vnp.vnp_size) -
344 				    pindex;
345 			}
346 			*after += numafter;
347 		}
348 	} else {
349 		if (before) {
350 			*before /= blocksperpage;
351 		}
352 
353 		if (after) {
354 			*after /= blocksperpage;
355 		}
356 	}
357 	return TRUE;
358 }
359 
360 /*
361  * Lets the VM system know about a change in size for a file.
362  * We adjust our own internal size and flush any cached pages in
363  * the associated object that are affected by the size change.
364  *
365  * Note: this routine may be invoked as a result of a pager put
366  * operation (possibly at object termination time), so we must be careful.
367  */
368 void
369 vnode_pager_setsize(struct vnode *vp, vm_ooffset_t nsize)
370 {
371 	vm_object_t object;
372 	vm_page_t m;
373 	vm_pindex_t nobjsize;
374 
375 	if ((object = vp->v_object) == NULL)
376 		return;
377 /* 	ASSERT_VOP_ELOCKED(vp, "vnode_pager_setsize and not locked vnode"); */
378 	VM_OBJECT_WLOCK(object);
379 	if (object->type == OBJT_DEAD) {
380 		VM_OBJECT_WUNLOCK(object);
381 		return;
382 	}
383 	KASSERT(object->type == OBJT_VNODE,
384 	    ("not vnode-backed object %p", object));
385 	if (nsize == object->un_pager.vnp.vnp_size) {
386 		/*
387 		 * Hasn't changed size
388 		 */
389 		VM_OBJECT_WUNLOCK(object);
390 		return;
391 	}
392 	nobjsize = OFF_TO_IDX(nsize + PAGE_MASK);
393 	if (nsize < object->un_pager.vnp.vnp_size) {
394 		/*
395 		 * File has shrunk. Toss any cached pages beyond the new EOF.
396 		 */
397 		if (nobjsize < object->size)
398 			vm_object_page_remove(object, nobjsize, object->size,
399 			    0);
400 		/*
401 		 * this gets rid of garbage at the end of a page that is now
402 		 * only partially backed by the vnode.
403 		 *
404 		 * XXX for some reason (I don't know yet), if we take a
405 		 * completely invalid page and mark it partially valid
406 		 * it can screw up NFS reads, so we don't allow the case.
407 		 */
408 		if ((nsize & PAGE_MASK) &&
409 		    (m = vm_page_lookup(object, OFF_TO_IDX(nsize))) != NULL &&
410 		    m->valid != 0) {
411 			int base = (int)nsize & PAGE_MASK;
412 			int size = PAGE_SIZE - base;
413 
414 			/*
415 			 * Clear out partial-page garbage in case
416 			 * the page has been mapped.
417 			 */
418 			pmap_zero_page_area(m, base, size);
419 
420 			/*
421 			 * Update the valid bits to reflect the blocks that
422 			 * have been zeroed.  Some of these valid bits may
423 			 * have already been set.
424 			 */
425 			vm_page_set_valid_range(m, base, size);
426 
427 			/*
428 			 * Round "base" to the next block boundary so that the
429 			 * dirty bit for a partially zeroed block is not
430 			 * cleared.
431 			 */
432 			base = roundup2(base, DEV_BSIZE);
433 
434 			/*
435 			 * Clear out partial-page dirty bits.
436 			 *
437 			 * note that we do not clear out the valid
438 			 * bits.  This would prevent bogus_page
439 			 * replacement from working properly.
440 			 */
441 			vm_page_clear_dirty(m, base, PAGE_SIZE - base);
442 		} else if ((nsize & PAGE_MASK) &&
443 		    vm_page_is_cached(object, OFF_TO_IDX(nsize))) {
444 			vm_page_cache_free(object, OFF_TO_IDX(nsize),
445 			    nobjsize);
446 		}
447 	}
448 	object->un_pager.vnp.vnp_size = nsize;
449 	object->size = nobjsize;
450 	VM_OBJECT_WUNLOCK(object);
451 }
452 
453 /*
454  * calculate the linear (byte) disk address of specified virtual
455  * file address
456  */
457 static int
458 vnode_pager_addr(struct vnode *vp, vm_ooffset_t address, daddr_t *rtaddress,
459     int *run)
460 {
461 	int bsize;
462 	int err;
463 	daddr_t vblock;
464 	daddr_t voffset;
465 
466 	if (address < 0)
467 		return -1;
468 
469 	if (vp->v_iflag & VI_DOOMED)
470 		return -1;
471 
472 	bsize = vp->v_mount->mnt_stat.f_iosize;
473 	vblock = address / bsize;
474 	voffset = address % bsize;
475 
476 	err = VOP_BMAP(vp, vblock, NULL, rtaddress, run, NULL);
477 	if (err == 0) {
478 		if (*rtaddress != -1)
479 			*rtaddress += voffset / DEV_BSIZE;
480 		if (run) {
481 			*run += 1;
482 			*run *= bsize/PAGE_SIZE;
483 			*run -= voffset/PAGE_SIZE;
484 		}
485 	}
486 
487 	return (err);
488 }
489 
490 /*
491  * small block filesystem vnode pager input
492  */
493 static int
494 vnode_pager_input_smlfs(vm_object_t object, vm_page_t m)
495 {
496 	struct vnode *vp;
497 	struct bufobj *bo;
498 	struct buf *bp;
499 	struct sf_buf *sf;
500 	daddr_t fileaddr;
501 	vm_offset_t bsize;
502 	vm_page_bits_t bits;
503 	int error, i;
504 
505 	error = 0;
506 	vp = object->handle;
507 	if (vp->v_iflag & VI_DOOMED)
508 		return VM_PAGER_BAD;
509 
510 	bsize = vp->v_mount->mnt_stat.f_iosize;
511 
512 	VOP_BMAP(vp, 0, &bo, 0, NULL, NULL);
513 
514 	sf = sf_buf_alloc(m, 0);
515 
516 	for (i = 0; i < PAGE_SIZE / bsize; i++) {
517 		vm_ooffset_t address;
518 
519 		bits = vm_page_bits(i * bsize, bsize);
520 		if (m->valid & bits)
521 			continue;
522 
523 		address = IDX_TO_OFF(m->pindex) + i * bsize;
524 		if (address >= object->un_pager.vnp.vnp_size) {
525 			fileaddr = -1;
526 		} else {
527 			error = vnode_pager_addr(vp, address, &fileaddr, NULL);
528 			if (error)
529 				break;
530 		}
531 		if (fileaddr != -1) {
532 			bp = getpbuf(&vnode_pbuf_freecnt);
533 
534 			/* build a minimal buffer header */
535 			bp->b_iocmd = BIO_READ;
536 			bp->b_iodone = bdone;
537 			KASSERT(bp->b_rcred == NOCRED, ("leaking read ucred"));
538 			KASSERT(bp->b_wcred == NOCRED, ("leaking write ucred"));
539 			bp->b_rcred = crhold(curthread->td_ucred);
540 			bp->b_wcred = crhold(curthread->td_ucred);
541 			bp->b_data = (caddr_t)sf_buf_kva(sf) + i * bsize;
542 			bp->b_blkno = fileaddr;
543 			pbgetbo(bo, bp);
544 			bp->b_vp = vp;
545 			bp->b_bcount = bsize;
546 			bp->b_bufsize = bsize;
547 			bp->b_runningbufspace = bp->b_bufsize;
548 			atomic_add_long(&runningbufspace, bp->b_runningbufspace);
549 
550 			/* do the input */
551 			bp->b_iooffset = dbtob(bp->b_blkno);
552 			bstrategy(bp);
553 
554 			bwait(bp, PVM, "vnsrd");
555 
556 			if ((bp->b_ioflags & BIO_ERROR) != 0)
557 				error = EIO;
558 
559 			/*
560 			 * free the buffer header back to the swap buffer pool
561 			 */
562 			bp->b_vp = NULL;
563 			pbrelbo(bp);
564 			relpbuf(bp, &vnode_pbuf_freecnt);
565 			if (error)
566 				break;
567 		} else
568 			bzero((caddr_t)sf_buf_kva(sf) + i * bsize, bsize);
569 		KASSERT((m->dirty & bits) == 0,
570 		    ("vnode_pager_input_smlfs: page %p is dirty", m));
571 		VM_OBJECT_WLOCK(object);
572 		m->valid |= bits;
573 		VM_OBJECT_WUNLOCK(object);
574 	}
575 	sf_buf_free(sf);
576 	if (error) {
577 		return VM_PAGER_ERROR;
578 	}
579 	return VM_PAGER_OK;
580 }
581 
582 /*
583  * old style vnode pager input routine
584  */
585 static int
586 vnode_pager_input_old(vm_object_t object, vm_page_t m)
587 {
588 	struct uio auio;
589 	struct iovec aiov;
590 	int error;
591 	int size;
592 	struct sf_buf *sf;
593 	struct vnode *vp;
594 
595 	VM_OBJECT_ASSERT_WLOCKED(object);
596 	error = 0;
597 
598 	/*
599 	 * Return failure if beyond current EOF
600 	 */
601 	if (IDX_TO_OFF(m->pindex) >= object->un_pager.vnp.vnp_size) {
602 		return VM_PAGER_BAD;
603 	} else {
604 		size = PAGE_SIZE;
605 		if (IDX_TO_OFF(m->pindex) + size > object->un_pager.vnp.vnp_size)
606 			size = object->un_pager.vnp.vnp_size - IDX_TO_OFF(m->pindex);
607 		vp = object->handle;
608 		VM_OBJECT_WUNLOCK(object);
609 
610 		/*
611 		 * Allocate a kernel virtual address and initialize so that
612 		 * we can use VOP_READ/WRITE routines.
613 		 */
614 		sf = sf_buf_alloc(m, 0);
615 
616 		aiov.iov_base = (caddr_t)sf_buf_kva(sf);
617 		aiov.iov_len = size;
618 		auio.uio_iov = &aiov;
619 		auio.uio_iovcnt = 1;
620 		auio.uio_offset = IDX_TO_OFF(m->pindex);
621 		auio.uio_segflg = UIO_SYSSPACE;
622 		auio.uio_rw = UIO_READ;
623 		auio.uio_resid = size;
624 		auio.uio_td = curthread;
625 
626 		error = VOP_READ(vp, &auio, 0, curthread->td_ucred);
627 		if (!error) {
628 			int count = size - auio.uio_resid;
629 
630 			if (count == 0)
631 				error = EINVAL;
632 			else if (count != PAGE_SIZE)
633 				bzero((caddr_t)sf_buf_kva(sf) + count,
634 				    PAGE_SIZE - count);
635 		}
636 		sf_buf_free(sf);
637 
638 		VM_OBJECT_WLOCK(object);
639 	}
640 	KASSERT(m->dirty == 0, ("vnode_pager_input_old: page %p is dirty", m));
641 	if (!error)
642 		m->valid = VM_PAGE_BITS_ALL;
643 	return error ? VM_PAGER_ERROR : VM_PAGER_OK;
644 }
645 
646 /*
647  * generic vnode pager input routine
648  */
649 
650 /*
651  * Local media VFS's that do not implement their own VOP_GETPAGES
652  * should have their VOP_GETPAGES call to vnode_pager_generic_getpages()
653  * to implement the previous behaviour.
654  *
655  * All other FS's should use the bypass to get to the local media
656  * backing vp's VOP_GETPAGES.
657  */
658 static int
659 vnode_pager_getpages(vm_object_t object, vm_page_t *m, int count, int reqpage)
660 {
661 	int rtval;
662 	struct vnode *vp;
663 	int bytes = count * PAGE_SIZE;
664 
665 	vp = object->handle;
666 	VM_OBJECT_WUNLOCK(object);
667 	rtval = VOP_GETPAGES(vp, m, bytes, reqpage);
668 	KASSERT(rtval != EOPNOTSUPP,
669 	    ("vnode_pager: FS getpages not implemented\n"));
670 	VM_OBJECT_WLOCK(object);
671 	return rtval;
672 }
673 
674 static int
675 vnode_pager_getpages_async(vm_object_t object, vm_page_t *m, int count,
676     int reqpage, vop_getpages_iodone_t iodone, void *arg)
677 {
678 	struct vnode *vp;
679 	int rtval;
680 
681 	vp = object->handle;
682 	VM_OBJECT_WUNLOCK(object);
683 	rtval = VOP_GETPAGES_ASYNC(vp, m, count * PAGE_SIZE, reqpage, 0,
684 	    iodone, arg);
685 	KASSERT(rtval != EOPNOTSUPP,
686 	    ("vnode_pager: FS getpages_async not implemented\n"));
687 	VM_OBJECT_WLOCK(object);
688 	return (rtval);
689 }
690 
691 /*
692  * The implementation of VOP_GETPAGES() and VOP_GETPAGES_ASYNC() for
693  * local filesystems, where partially valid pages can only occur at
694  * the end of file.
695  */
696 int
697 vnode_pager_local_getpages(struct vop_getpages_args *ap)
698 {
699 
700 	return (vnode_pager_local_getpages0(ap->a_vp, ap->a_m, ap->a_count,
701 	    ap->a_reqpage, NULL, NULL));
702 }
703 
704 int
705 vnode_pager_local_getpages_async(struct vop_getpages_async_args *ap)
706 {
707 
708 	return (vnode_pager_local_getpages0(ap->a_vp, ap->a_m, ap->a_count,
709 	    ap->a_reqpage, ap->a_iodone, ap->a_arg));
710 }
711 
712 static int
713 vnode_pager_local_getpages0(struct vnode *vp, vm_page_t *m, int bytecount,
714     int reqpage, vop_getpages_iodone_t iodone, void *arg)
715 {
716 	vm_page_t mreq;
717 
718 	mreq = m[reqpage];
719 
720 	/*
721 	 * Since the caller has busied the requested page, that page's valid
722 	 * field will not be changed by other threads.
723 	 */
724 	vm_page_assert_xbusied(mreq);
725 
726 	/*
727 	 * The requested page has valid blocks.  Invalid part can only
728 	 * exist at the end of file, and the page is made fully valid
729 	 * by zeroing in vm_pager_getpages().  Free non-requested
730 	 * pages, since no i/o is done to read its content.
731 	 */
732 	if (mreq->valid != 0) {
733 		vm_pager_free_nonreq(mreq->object, m, reqpage,
734 		    round_page(bytecount) / PAGE_SIZE);
735 		if (iodone != NULL)
736 			iodone(arg, m, reqpage, 0);
737 		return (VM_PAGER_OK);
738 	}
739 
740 	return (vnode_pager_generic_getpages(vp, m, bytecount, reqpage,
741 	    iodone, arg));
742 }
743 
744 /*
745  * This is now called from local media FS's to operate against their
746  * own vnodes if they fail to implement VOP_GETPAGES.
747  */
748 int
749 vnode_pager_generic_getpages(struct vnode *vp, vm_page_t *m, int bytecount,
750     int reqpage, vop_getpages_iodone_t iodone, void *arg)
751 {
752 	vm_object_t object;
753 	off_t foff;
754 	int i, j, size, bsize, first;
755 	daddr_t firstaddr, reqblock;
756 	struct bufobj *bo;
757 	int runpg;
758 	int runend;
759 	struct buf *bp;
760 	int count;
761 	int error;
762 
763 	object = vp->v_object;
764 	count = bytecount / PAGE_SIZE;
765 
766 	KASSERT(vp->v_type != VCHR && vp->v_type != VBLK,
767 	    ("vnode_pager_generic_getpages does not support devices"));
768 	if (vp->v_iflag & VI_DOOMED)
769 		return VM_PAGER_BAD;
770 
771 	bsize = vp->v_mount->mnt_stat.f_iosize;
772 	foff = IDX_TO_OFF(m[reqpage]->pindex);
773 
774 	/*
775 	 * Get the underlying device blocks for the file with VOP_BMAP().
776 	 * If the file system doesn't support VOP_BMAP, use old way of
777 	 * getting pages via VOP_READ.
778 	 */
779 	error = VOP_BMAP(vp, foff / bsize, &bo, &reqblock, NULL, NULL);
780 	if (error == EOPNOTSUPP) {
781 		VM_OBJECT_WLOCK(object);
782 
783 		for (i = 0; i < count; i++)
784 			if (i != reqpage) {
785 				vm_page_lock(m[i]);
786 				vm_page_free(m[i]);
787 				vm_page_unlock(m[i]);
788 			}
789 		PCPU_INC(cnt.v_vnodein);
790 		PCPU_INC(cnt.v_vnodepgsin);
791 		error = vnode_pager_input_old(object, m[reqpage]);
792 		VM_OBJECT_WUNLOCK(object);
793 		return (error);
794 	} else if (error != 0) {
795 		vm_pager_free_nonreq(object, m, reqpage, count);
796 		return (VM_PAGER_ERROR);
797 
798 		/*
799 		 * if the blocksize is smaller than a page size, then use
800 		 * special small filesystem code.  NFS sometimes has a small
801 		 * blocksize, but it can handle large reads itself.
802 		 */
803 	} else if ((PAGE_SIZE / bsize) > 1 &&
804 	    (vp->v_mount->mnt_stat.f_type != nfs_mount_type)) {
805 		vm_pager_free_nonreq(object, m, reqpage, count);
806 		PCPU_INC(cnt.v_vnodein);
807 		PCPU_INC(cnt.v_vnodepgsin);
808 		return vnode_pager_input_smlfs(object, m[reqpage]);
809 	}
810 
811 	/*
812 	 * Since the caller has busied the requested page, that page's valid
813 	 * field will not be changed by other threads.
814 	 */
815 	vm_page_assert_xbusied(m[reqpage]);
816 
817 	/*
818 	 * If we have a completely valid page available to us, we can
819 	 * clean up and return.  Otherwise we have to re-read the
820 	 * media.
821 	 */
822 	if (m[reqpage]->valid == VM_PAGE_BITS_ALL) {
823 		vm_pager_free_nonreq(object, m, reqpage, count);
824 		return (VM_PAGER_OK);
825 	} else if (reqblock == -1) {
826 		pmap_zero_page(m[reqpage]);
827 		KASSERT(m[reqpage]->dirty == 0,
828 		    ("vnode_pager_generic_getpages: page %p is dirty", m));
829 		VM_OBJECT_WLOCK(object);
830 		m[reqpage]->valid = VM_PAGE_BITS_ALL;
831 		for (i = 0; i < count; i++)
832 			if (i != reqpage) {
833 				vm_page_lock(m[i]);
834 				vm_page_free(m[i]);
835 				vm_page_unlock(m[i]);
836 			}
837 		VM_OBJECT_WUNLOCK(object);
838 		return (VM_PAGER_OK);
839 	} else if (m[reqpage]->valid != 0) {
840 		VM_OBJECT_WLOCK(object);
841 		m[reqpage]->valid = 0;
842 		VM_OBJECT_WUNLOCK(object);
843 	}
844 
845 	/*
846 	 * here on direct device I/O
847 	 */
848 	firstaddr = -1;
849 
850 	/*
851 	 * calculate the run that includes the required page
852 	 */
853 	for (first = 0, i = 0; i < count; i = runend) {
854 		if (vnode_pager_addr(vp, IDX_TO_OFF(m[i]->pindex), &firstaddr,
855 		    &runpg) != 0) {
856 			VM_OBJECT_WLOCK(object);
857 			for (; i < count; i++)
858 				if (i != reqpage) {
859 					vm_page_lock(m[i]);
860 					vm_page_free(m[i]);
861 					vm_page_unlock(m[i]);
862 				}
863 			VM_OBJECT_WUNLOCK(object);
864 			return (VM_PAGER_ERROR);
865 		}
866 		if (firstaddr == -1) {
867 			VM_OBJECT_WLOCK(object);
868 			if (i == reqpage && foff < object->un_pager.vnp.vnp_size) {
869 				panic("vnode_pager_getpages: unexpected missing page: firstaddr: %jd, foff: 0x%jx%08jx, vnp_size: 0x%jx%08jx",
870 				    (intmax_t)firstaddr, (uintmax_t)(foff >> 32),
871 				    (uintmax_t)foff,
872 				    (uintmax_t)
873 				    (object->un_pager.vnp.vnp_size >> 32),
874 				    (uintmax_t)object->un_pager.vnp.vnp_size);
875 			}
876 			vm_page_lock(m[i]);
877 			vm_page_free(m[i]);
878 			vm_page_unlock(m[i]);
879 			VM_OBJECT_WUNLOCK(object);
880 			runend = i + 1;
881 			first = runend;
882 			continue;
883 		}
884 		runend = i + runpg;
885 		if (runend <= reqpage) {
886 			VM_OBJECT_WLOCK(object);
887 			for (j = i; j < runend; j++) {
888 				vm_page_lock(m[j]);
889 				vm_page_free(m[j]);
890 				vm_page_unlock(m[j]);
891 			}
892 			VM_OBJECT_WUNLOCK(object);
893 		} else {
894 			if (runpg < (count - first)) {
895 				VM_OBJECT_WLOCK(object);
896 				for (i = first + runpg; i < count; i++) {
897 					vm_page_lock(m[i]);
898 					vm_page_free(m[i]);
899 					vm_page_unlock(m[i]);
900 				}
901 				VM_OBJECT_WUNLOCK(object);
902 				count = first + runpg;
903 			}
904 			break;
905 		}
906 		first = runend;
907 	}
908 
909 	/*
910 	 * the first and last page have been calculated now, move input pages
911 	 * to be zero based...
912 	 */
913 	if (first != 0) {
914 		m += first;
915 		count -= first;
916 		reqpage -= first;
917 	}
918 
919 	/*
920 	 * calculate the file virtual address for the transfer
921 	 */
922 	foff = IDX_TO_OFF(m[0]->pindex);
923 
924 	/*
925 	 * calculate the size of the transfer
926 	 */
927 	size = count * PAGE_SIZE;
928 	KASSERT(count > 0, ("zero count"));
929 	if ((foff + size) > object->un_pager.vnp.vnp_size)
930 		size = object->un_pager.vnp.vnp_size - foff;
931 	KASSERT(size > 0, ("zero size"));
932 
933 	/*
934 	 * round up physical size for real devices.
935 	 */
936 	if (1) {
937 		int secmask = bo->bo_bsize - 1;
938 		KASSERT(secmask < PAGE_SIZE && secmask > 0,
939 		    ("vnode_pager_generic_getpages: sector size %d too large",
940 		    secmask + 1));
941 		size = (size + secmask) & ~secmask;
942 	}
943 
944 	bp = getpbuf(&vnode_pbuf_freecnt);
945 	bp->b_kvaalloc = bp->b_data;
946 
947 	/*
948 	 * and map the pages to be read into the kva, if the filesystem
949 	 * requires mapped buffers.
950 	 */
951 	if ((vp->v_mount->mnt_kern_flag & MNTK_UNMAPPED_BUFS) != 0 &&
952 	    unmapped_buf_allowed) {
953 		bp->b_data = unmapped_buf;
954 		bp->b_kvabase = unmapped_buf;
955 		bp->b_offset = 0;
956 		bp->b_flags |= B_UNMAPPED;
957 	} else
958 		pmap_qenter((vm_offset_t)bp->b_kvaalloc, m, count);
959 
960 	/* build a minimal buffer header */
961 	bp->b_iocmd = BIO_READ;
962 	KASSERT(bp->b_rcred == NOCRED, ("leaking read ucred"));
963 	KASSERT(bp->b_wcred == NOCRED, ("leaking write ucred"));
964 	bp->b_rcred = crhold(curthread->td_ucred);
965 	bp->b_wcred = crhold(curthread->td_ucred);
966 	bp->b_blkno = firstaddr;
967 	pbgetbo(bo, bp);
968 	bp->b_vp = vp;
969 	bp->b_bcount = size;
970 	bp->b_bufsize = size;
971 	bp->b_runningbufspace = bp->b_bufsize;
972 	for (i = 0; i < count; i++)
973 		bp->b_pages[i] = m[i];
974 	bp->b_npages = count;
975 	bp->b_pager.pg_reqpage = reqpage;
976 	atomic_add_long(&runningbufspace, bp->b_runningbufspace);
977 
978 	PCPU_INC(cnt.v_vnodein);
979 	PCPU_ADD(cnt.v_vnodepgsin, count);
980 
981 	/* do the input */
982 	bp->b_iooffset = dbtob(bp->b_blkno);
983 
984 	if (iodone != NULL) { /* async */
985 		bp->b_pager.pg_iodone = iodone;
986 		bp->b_caller1 = arg;
987 		bp->b_iodone = vnode_pager_generic_getpages_done_async;
988 		bp->b_flags |= B_ASYNC;
989 		BUF_KERNPROC(bp);
990 		bstrategy(bp);
991 		/* Good bye! */
992 	} else {
993 		bp->b_iodone = bdone;
994 		bstrategy(bp);
995 		bwait(bp, PVM, "vnread");
996 		error = vnode_pager_generic_getpages_done(bp);
997 		for (i = 0; i < bp->b_npages; i++)
998 			bp->b_pages[i] = NULL;
999 		bp->b_vp = NULL;
1000 		pbrelbo(bp);
1001 		relpbuf(bp, &vnode_pbuf_freecnt);
1002 	}
1003 
1004 	return (error != 0 ? VM_PAGER_ERROR : VM_PAGER_OK);
1005 }
1006 
1007 static void
1008 vnode_pager_generic_getpages_done_async(struct buf *bp)
1009 {
1010 	int error;
1011 
1012 	error = vnode_pager_generic_getpages_done(bp);
1013 	bp->b_pager.pg_iodone(bp->b_caller1, bp->b_pages,
1014 	  bp->b_pager.pg_reqpage, error);
1015 	for (int i = 0; i < bp->b_npages; i++)
1016 		bp->b_pages[i] = NULL;
1017 	bp->b_vp = NULL;
1018 	pbrelbo(bp);
1019 	relpbuf(bp, &vnode_pbuf_freecnt);
1020 }
1021 
1022 static int
1023 vnode_pager_generic_getpages_done(struct buf *bp)
1024 {
1025 	vm_object_t object;
1026 	off_t tfoff, nextoff;
1027 	int i, error;
1028 
1029 	error = (bp->b_ioflags & BIO_ERROR) != 0 ? EIO : 0;
1030 	object = bp->b_vp->v_object;
1031 
1032 	if (error == 0 && bp->b_bcount != bp->b_npages * PAGE_SIZE) {
1033 		if ((bp->b_flags & B_UNMAPPED) != 0) {
1034 			bp->b_flags &= ~B_UNMAPPED;
1035 			pmap_qenter((vm_offset_t)bp->b_kvaalloc, bp->b_pages,
1036 			    bp->b_npages);
1037 		}
1038 		bzero(bp->b_kvaalloc + bp->b_bcount,
1039 		    PAGE_SIZE * bp->b_npages - bp->b_bcount);
1040 	}
1041 	if ((bp->b_flags & B_UNMAPPED) == 0)
1042 		pmap_qremove((vm_offset_t)bp->b_kvaalloc, bp->b_npages);
1043 	if ((bp->b_vp->v_mount->mnt_kern_flag & MNTK_UNMAPPED_BUFS) != 0) {
1044 		bp->b_data = bp->b_kvaalloc;
1045 		bp->b_kvabase = bp->b_kvaalloc;
1046 		bp->b_flags &= ~B_UNMAPPED;
1047 	}
1048 
1049 	VM_OBJECT_WLOCK(object);
1050 	for (i = 0, tfoff = IDX_TO_OFF(bp->b_pages[0]->pindex);
1051 	    i < bp->b_npages; i++, tfoff = nextoff) {
1052 		vm_page_t mt;
1053 
1054 		nextoff = tfoff + PAGE_SIZE;
1055 		mt = bp->b_pages[i];
1056 
1057 		if (nextoff <= object->un_pager.vnp.vnp_size) {
1058 			/*
1059 			 * Read filled up entire page.
1060 			 */
1061 			mt->valid = VM_PAGE_BITS_ALL;
1062 			KASSERT(mt->dirty == 0,
1063 			    ("%s: page %p is dirty", __func__, mt));
1064 			KASSERT(!pmap_page_is_mapped(mt),
1065 			    ("%s: page %p is mapped", __func__, mt));
1066 		} else {
1067 			/*
1068 			 * Read did not fill up entire page.
1069 			 *
1070 			 * Currently we do not set the entire page valid,
1071 			 * we just try to clear the piece that we couldn't
1072 			 * read.
1073 			 */
1074 			vm_page_set_valid_range(mt, 0,
1075 			    object->un_pager.vnp.vnp_size - tfoff);
1076 			KASSERT((mt->dirty & vm_page_bits(0,
1077 			    object->un_pager.vnp.vnp_size - tfoff)) == 0,
1078 			    ("%s: page %p is dirty", __func__, mt));
1079 		}
1080 
1081 		if (i != bp->b_pager.pg_reqpage)
1082 			vm_page_readahead_finish(mt);
1083 	}
1084 	VM_OBJECT_WUNLOCK(object);
1085 	if (error != 0)
1086 		printf("%s: I/O read error %d\n", __func__, error);
1087 
1088 	return (error);
1089 }
1090 
1091 /*
1092  * EOPNOTSUPP is no longer legal.  For local media VFS's that do not
1093  * implement their own VOP_PUTPAGES, their VOP_PUTPAGES should call to
1094  * vnode_pager_generic_putpages() to implement the previous behaviour.
1095  *
1096  * All other FS's should use the bypass to get to the local media
1097  * backing vp's VOP_PUTPAGES.
1098  */
1099 static void
1100 vnode_pager_putpages(vm_object_t object, vm_page_t *m, int count,
1101     int flags, int *rtvals)
1102 {
1103 	int rtval;
1104 	struct vnode *vp;
1105 	int bytes = count * PAGE_SIZE;
1106 
1107 	/*
1108 	 * Force synchronous operation if we are extremely low on memory
1109 	 * to prevent a low-memory deadlock.  VOP operations often need to
1110 	 * allocate more memory to initiate the I/O ( i.e. do a BMAP
1111 	 * operation ).  The swapper handles the case by limiting the amount
1112 	 * of asynchronous I/O, but that sort of solution doesn't scale well
1113 	 * for the vnode pager without a lot of work.
1114 	 *
1115 	 * Also, the backing vnode's iodone routine may not wake the pageout
1116 	 * daemon up.  This should be probably be addressed XXX.
1117 	 */
1118 
1119 	if (vm_cnt.v_free_count + vm_cnt.v_cache_count <
1120 	    vm_cnt.v_pageout_free_min)
1121 		flags |= VM_PAGER_PUT_SYNC;
1122 
1123 	/*
1124 	 * Call device-specific putpages function
1125 	 */
1126 	vp = object->handle;
1127 	VM_OBJECT_WUNLOCK(object);
1128 	rtval = VOP_PUTPAGES(vp, m, bytes, flags, rtvals);
1129 	KASSERT(rtval != EOPNOTSUPP,
1130 	    ("vnode_pager: stale FS putpages\n"));
1131 	VM_OBJECT_WLOCK(object);
1132 }
1133 
1134 
1135 /*
1136  * This is now called from local media FS's to operate against their
1137  * own vnodes if they fail to implement VOP_PUTPAGES.
1138  *
1139  * This is typically called indirectly via the pageout daemon and
1140  * clustering has already typically occured, so in general we ask the
1141  * underlying filesystem to write the data out asynchronously rather
1142  * then delayed.
1143  */
1144 int
1145 vnode_pager_generic_putpages(struct vnode *vp, vm_page_t *ma, int bytecount,
1146     int flags, int *rtvals)
1147 {
1148 	int i;
1149 	vm_object_t object;
1150 	vm_page_t m;
1151 	int count;
1152 
1153 	int maxsize, ncount;
1154 	vm_ooffset_t poffset;
1155 	struct uio auio;
1156 	struct iovec aiov;
1157 	int error;
1158 	int ioflags;
1159 	int ppscheck = 0;
1160 	static struct timeval lastfail;
1161 	static int curfail;
1162 
1163 	object = vp->v_object;
1164 	count = bytecount / PAGE_SIZE;
1165 
1166 	for (i = 0; i < count; i++)
1167 		rtvals[i] = VM_PAGER_ERROR;
1168 
1169 	if ((int64_t)ma[0]->pindex < 0) {
1170 		printf("vnode_pager_putpages: attempt to write meta-data!!! -- 0x%lx(%lx)\n",
1171 		    (long)ma[0]->pindex, (u_long)ma[0]->dirty);
1172 		rtvals[0] = VM_PAGER_BAD;
1173 		return VM_PAGER_BAD;
1174 	}
1175 
1176 	maxsize = count * PAGE_SIZE;
1177 	ncount = count;
1178 
1179 	poffset = IDX_TO_OFF(ma[0]->pindex);
1180 
1181 	/*
1182 	 * If the page-aligned write is larger then the actual file we
1183 	 * have to invalidate pages occuring beyond the file EOF.  However,
1184 	 * there is an edge case where a file may not be page-aligned where
1185 	 * the last page is partially invalid.  In this case the filesystem
1186 	 * may not properly clear the dirty bits for the entire page (which
1187 	 * could be VM_PAGE_BITS_ALL due to the page having been mmap()d).
1188 	 * With the page locked we are free to fix-up the dirty bits here.
1189 	 *
1190 	 * We do not under any circumstances truncate the valid bits, as
1191 	 * this will screw up bogus page replacement.
1192 	 */
1193 	VM_OBJECT_WLOCK(object);
1194 	if (maxsize + poffset > object->un_pager.vnp.vnp_size) {
1195 		if (object->un_pager.vnp.vnp_size > poffset) {
1196 			int pgoff;
1197 
1198 			maxsize = object->un_pager.vnp.vnp_size - poffset;
1199 			ncount = btoc(maxsize);
1200 			if ((pgoff = (int)maxsize & PAGE_MASK) != 0) {
1201 				/*
1202 				 * If the object is locked and the following
1203 				 * conditions hold, then the page's dirty
1204 				 * field cannot be concurrently changed by a
1205 				 * pmap operation.
1206 				 */
1207 				m = ma[ncount - 1];
1208 				vm_page_assert_sbusied(m);
1209 				KASSERT(!pmap_page_is_write_mapped(m),
1210 		("vnode_pager_generic_putpages: page %p is not read-only", m));
1211 				vm_page_clear_dirty(m, pgoff, PAGE_SIZE -
1212 				    pgoff);
1213 			}
1214 		} else {
1215 			maxsize = 0;
1216 			ncount = 0;
1217 		}
1218 		if (ncount < count) {
1219 			for (i = ncount; i < count; i++) {
1220 				rtvals[i] = VM_PAGER_BAD;
1221 			}
1222 		}
1223 	}
1224 	VM_OBJECT_WUNLOCK(object);
1225 
1226 	/*
1227 	 * pageouts are already clustered, use IO_ASYNC to force a bawrite()
1228 	 * rather then a bdwrite() to prevent paging I/O from saturating
1229 	 * the buffer cache.  Dummy-up the sequential heuristic to cause
1230 	 * large ranges to cluster.  If neither IO_SYNC or IO_ASYNC is set,
1231 	 * the system decides how to cluster.
1232 	 */
1233 	ioflags = IO_VMIO;
1234 	if (flags & (VM_PAGER_PUT_SYNC | VM_PAGER_PUT_INVAL))
1235 		ioflags |= IO_SYNC;
1236 	else if ((flags & VM_PAGER_CLUSTER_OK) == 0)
1237 		ioflags |= IO_ASYNC;
1238 	ioflags |= (flags & VM_PAGER_PUT_INVAL) ? IO_INVAL: 0;
1239 	ioflags |= IO_SEQMAX << IO_SEQSHIFT;
1240 
1241 	aiov.iov_base = (caddr_t) 0;
1242 	aiov.iov_len = maxsize;
1243 	auio.uio_iov = &aiov;
1244 	auio.uio_iovcnt = 1;
1245 	auio.uio_offset = poffset;
1246 	auio.uio_segflg = UIO_NOCOPY;
1247 	auio.uio_rw = UIO_WRITE;
1248 	auio.uio_resid = maxsize;
1249 	auio.uio_td = (struct thread *) 0;
1250 	error = VOP_WRITE(vp, &auio, ioflags, curthread->td_ucred);
1251 	PCPU_INC(cnt.v_vnodeout);
1252 	PCPU_ADD(cnt.v_vnodepgsout, ncount);
1253 
1254 	if (error) {
1255 		if ((ppscheck = ppsratecheck(&lastfail, &curfail, 1)))
1256 			printf("vnode_pager_putpages: I/O error %d\n", error);
1257 	}
1258 	if (auio.uio_resid) {
1259 		if (ppscheck || ppsratecheck(&lastfail, &curfail, 1))
1260 			printf("vnode_pager_putpages: residual I/O %zd at %lu\n",
1261 			    auio.uio_resid, (u_long)ma[0]->pindex);
1262 	}
1263 	for (i = 0; i < ncount; i++) {
1264 		rtvals[i] = VM_PAGER_OK;
1265 	}
1266 	return rtvals[0];
1267 }
1268 
1269 void
1270 vnode_pager_undirty_pages(vm_page_t *ma, int *rtvals, int written)
1271 {
1272 	vm_object_t obj;
1273 	int i, pos;
1274 
1275 	if (written == 0)
1276 		return;
1277 	obj = ma[0]->object;
1278 	VM_OBJECT_WLOCK(obj);
1279 	for (i = 0, pos = 0; pos < written; i++, pos += PAGE_SIZE) {
1280 		if (pos < trunc_page(written)) {
1281 			rtvals[i] = VM_PAGER_OK;
1282 			vm_page_undirty(ma[i]);
1283 		} else {
1284 			/* Partially written page. */
1285 			rtvals[i] = VM_PAGER_AGAIN;
1286 			vm_page_clear_dirty(ma[i], 0, written & PAGE_MASK);
1287 		}
1288 	}
1289 	VM_OBJECT_WUNLOCK(obj);
1290 }
1291 
1292 void
1293 vnode_pager_update_writecount(vm_object_t object, vm_offset_t start,
1294     vm_offset_t end)
1295 {
1296 	struct vnode *vp;
1297 	vm_ooffset_t old_wm;
1298 
1299 	VM_OBJECT_WLOCK(object);
1300 	if (object->type != OBJT_VNODE) {
1301 		VM_OBJECT_WUNLOCK(object);
1302 		return;
1303 	}
1304 	old_wm = object->un_pager.vnp.writemappings;
1305 	object->un_pager.vnp.writemappings += (vm_ooffset_t)end - start;
1306 	vp = object->handle;
1307 	if (old_wm == 0 && object->un_pager.vnp.writemappings != 0) {
1308 		ASSERT_VOP_ELOCKED(vp, "v_writecount inc");
1309 		VOP_ADD_WRITECOUNT(vp, 1);
1310 		CTR3(KTR_VFS, "%s: vp %p v_writecount increased to %d",
1311 		    __func__, vp, vp->v_writecount);
1312 	} else if (old_wm != 0 && object->un_pager.vnp.writemappings == 0) {
1313 		ASSERT_VOP_ELOCKED(vp, "v_writecount dec");
1314 		VOP_ADD_WRITECOUNT(vp, -1);
1315 		CTR3(KTR_VFS, "%s: vp %p v_writecount decreased to %d",
1316 		    __func__, vp, vp->v_writecount);
1317 	}
1318 	VM_OBJECT_WUNLOCK(object);
1319 }
1320 
1321 void
1322 vnode_pager_release_writecount(vm_object_t object, vm_offset_t start,
1323     vm_offset_t end)
1324 {
1325 	struct vnode *vp;
1326 	struct mount *mp;
1327 	vm_offset_t inc;
1328 
1329 	VM_OBJECT_WLOCK(object);
1330 
1331 	/*
1332 	 * First, recheck the object type to account for the race when
1333 	 * the vnode is reclaimed.
1334 	 */
1335 	if (object->type != OBJT_VNODE) {
1336 		VM_OBJECT_WUNLOCK(object);
1337 		return;
1338 	}
1339 
1340 	/*
1341 	 * Optimize for the case when writemappings is not going to
1342 	 * zero.
1343 	 */
1344 	inc = end - start;
1345 	if (object->un_pager.vnp.writemappings != inc) {
1346 		object->un_pager.vnp.writemappings -= inc;
1347 		VM_OBJECT_WUNLOCK(object);
1348 		return;
1349 	}
1350 
1351 	vp = object->handle;
1352 	vhold(vp);
1353 	VM_OBJECT_WUNLOCK(object);
1354 	mp = NULL;
1355 	vn_start_write(vp, &mp, V_WAIT);
1356 	vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
1357 
1358 	/*
1359 	 * Decrement the object's writemappings, by swapping the start
1360 	 * and end arguments for vnode_pager_update_writecount().  If
1361 	 * there was not a race with vnode reclaimation, then the
1362 	 * vnode's v_writecount is decremented.
1363 	 */
1364 	vnode_pager_update_writecount(object, end, start);
1365 	VOP_UNLOCK(vp, 0);
1366 	vdrop(vp);
1367 	if (mp != NULL)
1368 		vn_finished_write(mp);
1369 }
1370