xref: /freebsd/sys/vm/swap_pager.c (revision 09e8dea79366f1e5b3a73e8a271b26e4b6bf2e6a)
1 /*
2  * Copyright (c) 1998 Matthew Dillon,
3  * Copyright (c) 1994 John S. Dyson
4  * Copyright (c) 1990 University of Utah.
5  * Copyright (c) 1991, 1993
6  *	The Regents of the University of California.  All rights reserved.
7  *
8  * This code is derived from software contributed to Berkeley by
9  * the Systems Programming Group of the University of Utah Computer
10  * Science Department.
11  *
12  * Redistribution and use in source and binary forms, with or without
13  * modification, are permitted provided that the following conditions
14  * are met:
15  * 1. Redistributions of source code must retain the above copyright
16  *    notice, this list of conditions and the following disclaimer.
17  * 2. Redistributions in binary form must reproduce the above copyright
18  *    notice, this list of conditions and the following disclaimer in the
19  *    documentation and/or other materials provided with the distribution.
20  * 3. All advertising materials mentioning features or use of this software
21  *    must display the following acknowledgement:
22  *	This product includes software developed by the University of
23  *	California, Berkeley and its contributors.
24  * 4. Neither the name of the University nor the names of its contributors
25  *    may be used to endorse or promote products derived from this software
26  *    without specific prior written permission.
27  *
28  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
29  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
30  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
31  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
32  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
33  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
34  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
35  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
36  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
37  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
38  * SUCH DAMAGE.
39  *
40  *				New Swap System
41  *				Matthew Dillon
42  *
43  * Radix Bitmap 'blists'.
44  *
45  *	- The new swapper uses the new radix bitmap code.  This should scale
46  *	  to arbitrarily small or arbitrarily large swap spaces and an almost
47  *	  arbitrary degree of fragmentation.
48  *
49  * Features:
50  *
51  *	- on the fly reallocation of swap during putpages.  The new system
52  *	  does not try to keep previously allocated swap blocks for dirty
53  *	  pages.
54  *
55  *	- on the fly deallocation of swap
56  *
57  *	- No more garbage collection required.  Unnecessarily allocated swap
58  *	  blocks only exist for dirty vm_page_t's now and these are already
59  *	  cycled (in a high-load system) by the pager.  We also do on-the-fly
60  *	  removal of invalidated swap blocks when a page is destroyed
61  *	  or renamed.
62  *
63  * from: Utah $Hdr: swap_pager.c 1.4 91/04/30$
64  *
65  *	@(#)swap_pager.c	8.9 (Berkeley) 3/21/94
66  *
67  * $FreeBSD$
68  */
69 
70 #include <sys/param.h>
71 #include <sys/systm.h>
72 #include <sys/conf.h>
73 #include <sys/kernel.h>
74 #include <sys/proc.h>
75 #include <sys/bio.h>
76 #include <sys/buf.h>
77 #include <sys/vnode.h>
78 #include <sys/malloc.h>
79 #include <sys/vmmeter.h>
80 #include <sys/sysctl.h>
81 #include <sys/blist.h>
82 #include <sys/lock.h>
83 #include <sys/sx.h>
84 #include <sys/vmmeter.h>
85 
86 #ifndef MAX_PAGEOUT_CLUSTER
87 #define MAX_PAGEOUT_CLUSTER 16
88 #endif
89 
90 #define SWB_NPAGES	MAX_PAGEOUT_CLUSTER
91 
92 #include "opt_swap.h"
93 #include <vm/vm.h>
94 #include <vm/pmap.h>
95 #include <vm/vm_map.h>
96 #include <vm/vm_kern.h>
97 #include <vm/vm_object.h>
98 #include <vm/vm_page.h>
99 #include <vm/vm_pager.h>
100 #include <vm/vm_pageout.h>
101 #include <vm/swap_pager.h>
102 #include <vm/vm_extern.h>
103 #include <vm/uma.h>
104 
105 #define SWM_FREE	0x02	/* free, period			*/
106 #define SWM_POP		0x04	/* pop out			*/
107 
108 /*
109  * vm_swap_size is in page-sized chunks now.  It was DEV_BSIZE'd chunks
110  * in the old system.
111  */
112 extern int vm_swap_size;	/* number of free swap blocks, in pages */
113 
114 int swap_pager_full;		/* swap space exhaustion (task killing) */
115 static int swap_pager_almost_full; /* swap space exhaustion (w/ hysteresis)*/
116 static int nsw_rcount;		/* free read buffers			*/
117 static int nsw_wcount_sync;	/* limit write buffers / synchronous	*/
118 static int nsw_wcount_async;	/* limit write buffers / asynchronous	*/
119 static int nsw_wcount_async_max;/* assigned maximum			*/
120 static int nsw_cluster_max;	/* maximum VOP I/O allowed		*/
121 
122 struct blist *swapblist;
123 static struct swblock **swhash;
124 static int swhash_mask;
125 static int swap_async_max = 4;	/* maximum in-progress async I/O's	*/
126 static struct sx sw_alloc_sx;
127 
128 /* from vm_swap.c */
129 extern struct vnode *swapdev_vp;
130 extern struct swdevt *swdevt;
131 extern int nswdev;
132 
133 SYSCTL_INT(_vm, OID_AUTO, swap_async_max,
134         CTLFLAG_RW, &swap_async_max, 0, "Maximum running async swap ops");
135 
136 #define BLK2DEVIDX(blk) (nswdev > 1 ? blk / dmmax % nswdev : 0)
137 
138 /*
139  * "named" and "unnamed" anon region objects.  Try to reduce the overhead
140  * of searching a named list by hashing it just a little.
141  */
142 
143 #define NOBJLISTS		8
144 
145 #define NOBJLIST(handle)	\
146 	(&swap_pager_object_list[((int)(intptr_t)handle >> 4) & (NOBJLISTS-1)])
147 
148 static struct mtx sw_alloc_mtx;	/* protect list manipulation */
149 static struct pagerlst	swap_pager_object_list[NOBJLISTS];
150 struct pagerlst		swap_pager_un_object_list;
151 uma_zone_t		swap_zone;
152 
153 /*
154  * pagerops for OBJT_SWAP - "swap pager".  Some ops are also global procedure
155  * calls hooked from other parts of the VM system and do not appear here.
156  * (see vm/swap_pager.h).
157  */
158 static vm_object_t
159 		swap_pager_alloc(void *handle, vm_ooffset_t size,
160 				      vm_prot_t prot, vm_ooffset_t offset);
161 static void	swap_pager_dealloc(vm_object_t object);
162 static int	swap_pager_getpages(vm_object_t, vm_page_t *, int, int);
163 static void	swap_pager_init(void);
164 static void	swap_pager_unswapped(vm_page_t);
165 static void	swap_pager_strategy(vm_object_t, struct bio *);
166 
167 struct pagerops swappagerops = {
168 	swap_pager_init,	/* early system initialization of pager	*/
169 	swap_pager_alloc,	/* allocate an OBJT_SWAP object		*/
170 	swap_pager_dealloc,	/* deallocate an OBJT_SWAP object	*/
171 	swap_pager_getpages,	/* pagein				*/
172 	swap_pager_putpages,	/* pageout				*/
173 	swap_pager_haspage,	/* get backing store status for page	*/
174 	swap_pager_unswapped,	/* remove swap related to page		*/
175 	swap_pager_strategy	/* pager strategy call			*/
176 };
177 
178 static struct buf *getchainbuf(struct bio *bp, struct vnode *vp, int flags);
179 static void flushchainbuf(struct buf *nbp);
180 static void waitchainbuf(struct bio *bp, int count, int done);
181 
182 /*
183  * dmmax is in page-sized chunks with the new swap system.  It was
184  * dev-bsized chunks in the old.  dmmax is always a power of 2.
185  *
186  * swap_*() routines are externally accessible.  swp_*() routines are
187  * internal.
188  */
189 int dmmax;
190 static int dmmax_mask;
191 int nswap_lowat = 128;		/* in pages, swap_pager_almost_full warn */
192 int nswap_hiwat = 512;		/* in pages, swap_pager_almost_full warn */
193 
194 SYSCTL_INT(_vm, OID_AUTO, dmmax,
195 	CTLFLAG_RD, &dmmax, 0, "Maximum size of a swap block");
196 
197 static __inline void	swp_sizecheck(void);
198 static void	swp_pager_sync_iodone(struct buf *bp);
199 static void	swp_pager_async_iodone(struct buf *bp);
200 
201 /*
202  * Swap bitmap functions
203  */
204 static __inline void	swp_pager_freeswapspace(daddr_t blk, int npages);
205 static __inline daddr_t	swp_pager_getswapspace(int npages);
206 
207 /*
208  * Metadata functions
209  */
210 static void swp_pager_meta_build(vm_object_t, vm_pindex_t, daddr_t);
211 static void swp_pager_meta_free(vm_object_t, vm_pindex_t, daddr_t);
212 static void swp_pager_meta_free_all(vm_object_t);
213 static daddr_t swp_pager_meta_ctl(vm_object_t, vm_pindex_t, int);
214 
215 /*
216  * SWP_SIZECHECK() -	update swap_pager_full indication
217  *
218  *	update the swap_pager_almost_full indication and warn when we are
219  *	about to run out of swap space, using lowat/hiwat hysteresis.
220  *
221  *	Clear swap_pager_full ( task killing ) indication when lowat is met.
222  *
223  *	No restrictions on call
224  *	This routine may not block.
225  *	This routine must be called at splvm()
226  */
227 static __inline void
228 swp_sizecheck()
229 {
230 	GIANT_REQUIRED;
231 
232 	if (vm_swap_size < nswap_lowat) {
233 		if (swap_pager_almost_full == 0) {
234 			printf("swap_pager: out of swap space\n");
235 			swap_pager_almost_full = 1;
236 		}
237 	} else {
238 		swap_pager_full = 0;
239 		if (vm_swap_size > nswap_hiwat)
240 			swap_pager_almost_full = 0;
241 	}
242 }
243 
244 /*
245  * SWAP_PAGER_INIT() -	initialize the swap pager!
246  *
247  *	Expected to be started from system init.  NOTE:  This code is run
248  *	before much else so be careful what you depend on.  Most of the VM
249  *	system has yet to be initialized at this point.
250  */
251 static void
252 swap_pager_init()
253 {
254 	/*
255 	 * Initialize object lists
256 	 */
257 	int i;
258 
259 	for (i = 0; i < NOBJLISTS; ++i)
260 		TAILQ_INIT(&swap_pager_object_list[i]);
261 	TAILQ_INIT(&swap_pager_un_object_list);
262 	mtx_init(&sw_alloc_mtx, "swap_pager list", NULL, MTX_DEF);
263 
264 	/*
265 	 * Device Stripe, in PAGE_SIZE'd blocks
266 	 */
267 	dmmax = SWB_NPAGES * 2;
268 	dmmax_mask = ~(dmmax - 1);
269 }
270 
271 /*
272  * SWAP_PAGER_SWAP_INIT() - swap pager initialization from pageout process
273  *
274  *	Expected to be started from pageout process once, prior to entering
275  *	its main loop.
276  */
277 void
278 swap_pager_swap_init()
279 {
280 	int n, n2;
281 
282 	/*
283 	 * Number of in-transit swap bp operations.  Don't
284 	 * exhaust the pbufs completely.  Make sure we
285 	 * initialize workable values (0 will work for hysteresis
286 	 * but it isn't very efficient).
287 	 *
288 	 * The nsw_cluster_max is constrained by the bp->b_pages[]
289 	 * array (MAXPHYS/PAGE_SIZE) and our locally defined
290 	 * MAX_PAGEOUT_CLUSTER.   Also be aware that swap ops are
291 	 * constrained by the swap device interleave stripe size.
292 	 *
293 	 * Currently we hardwire nsw_wcount_async to 4.  This limit is
294 	 * designed to prevent other I/O from having high latencies due to
295 	 * our pageout I/O.  The value 4 works well for one or two active swap
296 	 * devices but is probably a little low if you have more.  Even so,
297 	 * a higher value would probably generate only a limited improvement
298 	 * with three or four active swap devices since the system does not
299 	 * typically have to pageout at extreme bandwidths.   We will want
300 	 * at least 2 per swap devices, and 4 is a pretty good value if you
301 	 * have one NFS swap device due to the command/ack latency over NFS.
302 	 * So it all works out pretty well.
303 	 */
304 	nsw_cluster_max = min((MAXPHYS/PAGE_SIZE), MAX_PAGEOUT_CLUSTER);
305 
306 	mtx_lock(&pbuf_mtx);
307 	nsw_rcount = (nswbuf + 1) / 2;
308 	nsw_wcount_sync = (nswbuf + 3) / 4;
309 	nsw_wcount_async = 4;
310 	nsw_wcount_async_max = nsw_wcount_async;
311 	mtx_unlock(&pbuf_mtx);
312 
313 	/*
314 	 * Initialize our zone.  Right now I'm just guessing on the number
315 	 * we need based on the number of pages in the system.  Each swblock
316 	 * can hold 16 pages, so this is probably overkill.  This reservation
317 	 * is typically limited to around 70MB by default.
318 	 */
319 	n = cnt.v_page_count;
320 	if (maxswzone && n > maxswzone / sizeof(struct swblock))
321 		n = maxswzone / sizeof(struct swblock);
322 	n2 = n;
323 	swap_zone = uma_zcreate("SWAPMETA", sizeof(struct swblock), NULL, NULL,
324 	    NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
325 	do {
326 		if (uma_zone_set_obj(swap_zone, NULL, n))
327 			break;
328 		/*
329 		 * if the allocation failed, try a zone two thirds the
330 		 * size of the previous attempt.
331 		 */
332 		n -= ((n + 2) / 3);
333 	} while (n > 0);
334 	if (swap_zone == NULL)
335 		panic("failed to create swap_zone.");
336 	if (n2 != n)
337 		printf("Swap zone entries reduced from %d to %d.\n", n2, n);
338 	n2 = n;
339 
340 	/*
341 	 * Initialize our meta-data hash table.  The swapper does not need to
342 	 * be quite as efficient as the VM system, so we do not use an
343 	 * oversized hash table.
344 	 *
345 	 * 	n: 		size of hash table, must be power of 2
346 	 *	swhash_mask:	hash table index mask
347 	 */
348 	for (n = 1; n < n2 / 8; n *= 2)
349 		;
350 	swhash = malloc(sizeof(struct swblock *) * n, M_VMPGDATA, M_WAITOK | M_ZERO);
351 	swhash_mask = n - 1;
352 }
353 
354 /*
355  * SWAP_PAGER_ALLOC() -	allocate a new OBJT_SWAP VM object and instantiate
356  *			its metadata structures.
357  *
358  *	This routine is called from the mmap and fork code to create a new
359  *	OBJT_SWAP object.  We do this by creating an OBJT_DEFAULT object
360  *	and then converting it with swp_pager_meta_build().
361  *
362  *	This routine may block in vm_object_allocate() and create a named
363  *	object lookup race, so we must interlock.   We must also run at
364  *	splvm() for the object lookup to handle races with interrupts, but
365  *	we do not have to maintain splvm() in between the lookup and the
366  *	add because (I believe) it is not possible to attempt to create
367  *	a new swap object w/handle when a default object with that handle
368  *	already exists.
369  *
370  * MPSAFE
371  */
372 static vm_object_t
373 swap_pager_alloc(void *handle, vm_ooffset_t size, vm_prot_t prot,
374 		 vm_ooffset_t offset)
375 {
376 	vm_object_t object;
377 
378 	mtx_lock(&Giant);
379 	if (handle) {
380 		/*
381 		 * Reference existing named region or allocate new one.  There
382 		 * should not be a race here against swp_pager_meta_build()
383 		 * as called from vm_page_remove() in regards to the lookup
384 		 * of the handle.
385 		 */
386 		sx_xlock(&sw_alloc_sx);
387 		object = vm_pager_object_lookup(NOBJLIST(handle), handle);
388 
389 		if (object != NULL) {
390 			vm_object_reference(object);
391 		} else {
392 			object = vm_object_allocate(OBJT_DEFAULT,
393 				OFF_TO_IDX(offset + PAGE_MASK + size));
394 			object->handle = handle;
395 
396 			swp_pager_meta_build(object, 0, SWAPBLK_NONE);
397 		}
398 		sx_xunlock(&sw_alloc_sx);
399 	} else {
400 		object = vm_object_allocate(OBJT_DEFAULT,
401 			OFF_TO_IDX(offset + PAGE_MASK + size));
402 
403 		swp_pager_meta_build(object, 0, SWAPBLK_NONE);
404 	}
405 	mtx_unlock(&Giant);
406 	return (object);
407 }
408 
409 /*
410  * SWAP_PAGER_DEALLOC() -	remove swap metadata from object
411  *
412  *	The swap backing for the object is destroyed.  The code is
413  *	designed such that we can reinstantiate it later, but this
414  *	routine is typically called only when the entire object is
415  *	about to be destroyed.
416  *
417  *	This routine may block, but no longer does.
418  *
419  *	The object must be locked or unreferenceable.
420  */
421 static void
422 swap_pager_dealloc(object)
423 	vm_object_t object;
424 {
425 	int s;
426 
427 	GIANT_REQUIRED;
428 
429 	/*
430 	 * Remove from list right away so lookups will fail if we block for
431 	 * pageout completion.
432 	 */
433 	mtx_lock(&sw_alloc_mtx);
434 	if (object->handle == NULL) {
435 		TAILQ_REMOVE(&swap_pager_un_object_list, object, pager_object_list);
436 	} else {
437 		TAILQ_REMOVE(NOBJLIST(object->handle), object, pager_object_list);
438 	}
439 	mtx_unlock(&sw_alloc_mtx);
440 
441 	vm_object_pip_wait(object, "swpdea");
442 
443 	/*
444 	 * Free all remaining metadata.  We only bother to free it from
445 	 * the swap meta data.  We do not attempt to free swapblk's still
446 	 * associated with vm_page_t's for this object.  We do not care
447 	 * if paging is still in progress on some objects.
448 	 */
449 	s = splvm();
450 	swp_pager_meta_free_all(object);
451 	splx(s);
452 }
453 
454 /************************************************************************
455  *			SWAP PAGER BITMAP ROUTINES			*
456  ************************************************************************/
457 
458 /*
459  * SWP_PAGER_GETSWAPSPACE() -	allocate raw swap space
460  *
461  *	Allocate swap for the requested number of pages.  The starting
462  *	swap block number (a page index) is returned or SWAPBLK_NONE
463  *	if the allocation failed.
464  *
465  *	Also has the side effect of advising that somebody made a mistake
466  *	when they configured swap and didn't configure enough.
467  *
468  *	Must be called at splvm() to avoid races with bitmap frees from
469  *	vm_page_remove() aka swap_pager_page_removed().
470  *
471  *	This routine may not block
472  *	This routine must be called at splvm().
473  */
474 static __inline daddr_t
475 swp_pager_getswapspace(npages)
476 	int npages;
477 {
478 	daddr_t blk;
479 
480 	GIANT_REQUIRED;
481 
482 	if ((blk = blist_alloc(swapblist, npages)) == SWAPBLK_NONE) {
483 		if (swap_pager_full != 2) {
484 			printf("swap_pager_getswapspace: failed\n");
485 			swap_pager_full = 2;
486 			swap_pager_almost_full = 1;
487 		}
488 	} else {
489 		vm_swap_size -= npages;
490 		/* per-swap area stats */
491 		swdevt[BLK2DEVIDX(blk)].sw_used += npages;
492 		swp_sizecheck();
493 	}
494 	return (blk);
495 }
496 
497 /*
498  * SWP_PAGER_FREESWAPSPACE() -	free raw swap space
499  *
500  *	This routine returns the specified swap blocks back to the bitmap.
501  *
502  *	Note:  This routine may not block (it could in the old swap code),
503  *	and through the use of the new blist routines it does not block.
504  *
505  *	We must be called at splvm() to avoid races with bitmap frees from
506  *	vm_page_remove() aka swap_pager_page_removed().
507  *
508  *	This routine may not block
509  *	This routine must be called at splvm().
510  */
511 static __inline void
512 swp_pager_freeswapspace(blk, npages)
513 	daddr_t blk;
514 	int npages;
515 {
516 	GIANT_REQUIRED;
517 
518 	blist_free(swapblist, blk, npages);
519 	vm_swap_size += npages;
520 	/* per-swap area stats */
521 	swdevt[BLK2DEVIDX(blk)].sw_used -= npages;
522 	swp_sizecheck();
523 }
524 
525 /*
526  * SWAP_PAGER_FREESPACE() -	frees swap blocks associated with a page
527  *				range within an object.
528  *
529  *	This is a globally accessible routine.
530  *
531  *	This routine removes swapblk assignments from swap metadata.
532  *
533  *	The external callers of this routine typically have already destroyed
534  *	or renamed vm_page_t's associated with this range in the object so
535  *	we should be ok.
536  *
537  *	This routine may be called at any spl.  We up our spl to splvm temporarily
538  *	in order to perform the metadata removal.
539  */
540 void
541 swap_pager_freespace(object, start, size)
542 	vm_object_t object;
543 	vm_pindex_t start;
544 	vm_size_t size;
545 {
546 	int s = splvm();
547 
548 	GIANT_REQUIRED;
549 	swp_pager_meta_free(object, start, size);
550 	splx(s);
551 }
552 
553 /*
554  * SWAP_PAGER_RESERVE() - reserve swap blocks in object
555  *
556  *	Assigns swap blocks to the specified range within the object.  The
557  *	swap blocks are not zerod.  Any previous swap assignment is destroyed.
558  *
559  *	Returns 0 on success, -1 on failure.
560  */
561 int
562 swap_pager_reserve(vm_object_t object, vm_pindex_t start, vm_size_t size)
563 {
564 	int s;
565 	int n = 0;
566 	daddr_t blk = SWAPBLK_NONE;
567 	vm_pindex_t beg = start;	/* save start index */
568 
569 	s = splvm();
570 	while (size) {
571 		if (n == 0) {
572 			n = BLIST_MAX_ALLOC;
573 			while ((blk = swp_pager_getswapspace(n)) == SWAPBLK_NONE) {
574 				n >>= 1;
575 				if (n == 0) {
576 					swp_pager_meta_free(object, beg, start - beg);
577 					splx(s);
578 					return (-1);
579 				}
580 			}
581 		}
582 		swp_pager_meta_build(object, start, blk);
583 		--size;
584 		++start;
585 		++blk;
586 		--n;
587 	}
588 	swp_pager_meta_free(object, start, n);
589 	splx(s);
590 	return (0);
591 }
592 
593 /*
594  * SWAP_PAGER_COPY() -  copy blocks from source pager to destination pager
595  *			and destroy the source.
596  *
597  *	Copy any valid swapblks from the source to the destination.  In
598  *	cases where both the source and destination have a valid swapblk,
599  *	we keep the destination's.
600  *
601  *	This routine is allowed to block.  It may block allocating metadata
602  *	indirectly through swp_pager_meta_build() or if paging is still in
603  *	progress on the source.
604  *
605  *	This routine can be called at any spl
606  *
607  *	XXX vm_page_collapse() kinda expects us not to block because we
608  *	supposedly do not need to allocate memory, but for the moment we
609  *	*may* have to get a little memory from the zone allocator, but
610  *	it is taken from the interrupt memory.  We should be ok.
611  *
612  *	The source object contains no vm_page_t's (which is just as well)
613  *
614  *	The source object is of type OBJT_SWAP.
615  *
616  *	The source and destination objects must be locked or
617  *	inaccessible (XXX are they ?)
618  */
619 void
620 swap_pager_copy(srcobject, dstobject, offset, destroysource)
621 	vm_object_t srcobject;
622 	vm_object_t dstobject;
623 	vm_pindex_t offset;
624 	int destroysource;
625 {
626 	vm_pindex_t i;
627 	int s;
628 
629 	GIANT_REQUIRED;
630 
631 	s = splvm();
632 	/*
633 	 * If destroysource is set, we remove the source object from the
634 	 * swap_pager internal queue now.
635 	 */
636 	if (destroysource) {
637 		mtx_lock(&sw_alloc_mtx);
638 		if (srcobject->handle == NULL) {
639 			TAILQ_REMOVE(
640 			    &swap_pager_un_object_list,
641 			    srcobject,
642 			    pager_object_list
643 			);
644 		} else {
645 			TAILQ_REMOVE(
646 			    NOBJLIST(srcobject->handle),
647 			    srcobject,
648 			    pager_object_list
649 			);
650 		}
651 		mtx_unlock(&sw_alloc_mtx);
652 	}
653 
654 	/*
655 	 * transfer source to destination.
656 	 */
657 	for (i = 0; i < dstobject->size; ++i) {
658 		daddr_t dstaddr;
659 
660 		/*
661 		 * Locate (without changing) the swapblk on the destination,
662 		 * unless it is invalid in which case free it silently, or
663 		 * if the destination is a resident page, in which case the
664 		 * source is thrown away.
665 		 */
666 		dstaddr = swp_pager_meta_ctl(dstobject, i, 0);
667 
668 		if (dstaddr == SWAPBLK_NONE) {
669 			/*
670 			 * Destination has no swapblk and is not resident,
671 			 * copy source.
672 			 */
673 			daddr_t srcaddr;
674 
675 			srcaddr = swp_pager_meta_ctl(
676 			    srcobject,
677 			    i + offset,
678 			    SWM_POP
679 			);
680 
681 			if (srcaddr != SWAPBLK_NONE)
682 				swp_pager_meta_build(dstobject, i, srcaddr);
683 		} else {
684 			/*
685 			 * Destination has valid swapblk or it is represented
686 			 * by a resident page.  We destroy the sourceblock.
687 			 */
688 
689 			swp_pager_meta_ctl(srcobject, i + offset, SWM_FREE);
690 		}
691 	}
692 
693 	/*
694 	 * Free left over swap blocks in source.
695 	 *
696 	 * We have to revert the type to OBJT_DEFAULT so we do not accidently
697 	 * double-remove the object from the swap queues.
698 	 */
699 	if (destroysource) {
700 		swp_pager_meta_free_all(srcobject);
701 		/*
702 		 * Reverting the type is not necessary, the caller is going
703 		 * to destroy srcobject directly, but I'm doing it here
704 		 * for consistency since we've removed the object from its
705 		 * queues.
706 		 */
707 		srcobject->type = OBJT_DEFAULT;
708 	}
709 	splx(s);
710 }
711 
712 /*
713  * SWAP_PAGER_HASPAGE() -	determine if we have good backing store for
714  *				the requested page.
715  *
716  *	We determine whether good backing store exists for the requested
717  *	page and return TRUE if it does, FALSE if it doesn't.
718  *
719  *	If TRUE, we also try to determine how much valid, contiguous backing
720  *	store exists before and after the requested page within a reasonable
721  *	distance.  We do not try to restrict it to the swap device stripe
722  *	(that is handled in getpages/putpages).  It probably isn't worth
723  *	doing here.
724  */
725 boolean_t
726 swap_pager_haspage(object, pindex, before, after)
727 	vm_object_t object;
728 	vm_pindex_t pindex;
729 	int *before;
730 	int *after;
731 {
732 	daddr_t blk0;
733 	int s;
734 
735 	/*
736 	 * do we have good backing store at the requested index ?
737 	 */
738 	s = splvm();
739 	blk0 = swp_pager_meta_ctl(object, pindex, 0);
740 
741 	if (blk0 == SWAPBLK_NONE) {
742 		splx(s);
743 		if (before)
744 			*before = 0;
745 		if (after)
746 			*after = 0;
747 		return (FALSE);
748 	}
749 
750 	/*
751 	 * find backwards-looking contiguous good backing store
752 	 */
753 	if (before != NULL) {
754 		int i;
755 
756 		for (i = 1; i < (SWB_NPAGES/2); ++i) {
757 			daddr_t blk;
758 
759 			if (i > pindex)
760 				break;
761 			blk = swp_pager_meta_ctl(object, pindex - i, 0);
762 			if (blk != blk0 - i)
763 				break;
764 		}
765 		*before = (i - 1);
766 	}
767 
768 	/*
769 	 * find forward-looking contiguous good backing store
770 	 */
771 	if (after != NULL) {
772 		int i;
773 
774 		for (i = 1; i < (SWB_NPAGES/2); ++i) {
775 			daddr_t blk;
776 
777 			blk = swp_pager_meta_ctl(object, pindex + i, 0);
778 			if (blk != blk0 + i)
779 				break;
780 		}
781 		*after = (i - 1);
782 	}
783 	splx(s);
784 	return (TRUE);
785 }
786 
787 /*
788  * SWAP_PAGER_PAGE_UNSWAPPED() - remove swap backing store related to page
789  *
790  *	This removes any associated swap backing store, whether valid or
791  *	not, from the page.
792  *
793  *	This routine is typically called when a page is made dirty, at
794  *	which point any associated swap can be freed.  MADV_FREE also
795  *	calls us in a special-case situation
796  *
797  *	NOTE!!!  If the page is clean and the swap was valid, the caller
798  *	should make the page dirty before calling this routine.  This routine
799  *	does NOT change the m->dirty status of the page.  Also: MADV_FREE
800  *	depends on it.
801  *
802  *	This routine may not block
803  *	This routine must be called at splvm()
804  */
805 static void
806 swap_pager_unswapped(m)
807 	vm_page_t m;
808 {
809 	swp_pager_meta_ctl(m->object, m->pindex, SWM_FREE);
810 }
811 
812 /*
813  * SWAP_PAGER_STRATEGY() - read, write, free blocks
814  *
815  *	This implements the vm_pager_strategy() interface to swap and allows
816  *	other parts of the system to directly access swap as backing store
817  *	through vm_objects of type OBJT_SWAP.  This is intended to be a
818  *	cacheless interface ( i.e. caching occurs at higher levels ).
819  *	Therefore we do not maintain any resident pages.  All I/O goes
820  *	directly to and from the swap device.
821  *
822  *	Note that b_blkno is scaled for PAGE_SIZE
823  *
824  *	We currently attempt to run I/O synchronously or asynchronously as
825  *	the caller requests.  This isn't perfect because we loose error
826  *	sequencing when we run multiple ops in parallel to satisfy a request.
827  *	But this is swap, so we let it all hang out.
828  */
829 static void
830 swap_pager_strategy(vm_object_t object, struct bio *bp)
831 {
832 	vm_pindex_t start;
833 	int count;
834 	int s;
835 	char *data;
836 	struct buf *nbp = NULL;
837 
838 	GIANT_REQUIRED;
839 
840 	/* XXX: KASSERT instead ? */
841 	if (bp->bio_bcount & PAGE_MASK) {
842 		biofinish(bp, NULL, EINVAL);
843 		printf("swap_pager_strategy: bp %p blk %d size %d, not page bounded\n", bp, (int)bp->bio_pblkno, (int)bp->bio_bcount);
844 		return;
845 	}
846 
847 	/*
848 	 * Clear error indication, initialize page index, count, data pointer.
849 	 */
850 	bp->bio_error = 0;
851 	bp->bio_flags &= ~BIO_ERROR;
852 	bp->bio_resid = bp->bio_bcount;
853 	*(u_int *) &bp->bio_driver1 = 0;
854 
855 	start = bp->bio_pblkno;
856 	count = howmany(bp->bio_bcount, PAGE_SIZE);
857 	data = bp->bio_data;
858 
859 	s = splvm();
860 
861 	/*
862 	 * Deal with BIO_DELETE
863 	 */
864 	if (bp->bio_cmd == BIO_DELETE) {
865 		/*
866 		 * FREE PAGE(s) - destroy underlying swap that is no longer
867 		 *		  needed.
868 		 */
869 		swp_pager_meta_free(object, start, count);
870 		splx(s);
871 		bp->bio_resid = 0;
872 		biodone(bp);
873 		return;
874 	}
875 
876 	/*
877 	 * Execute read or write
878 	 */
879 	while (count > 0) {
880 		daddr_t blk;
881 
882 		/*
883 		 * Obtain block.  If block not found and writing, allocate a
884 		 * new block and build it into the object.
885 		 */
886 
887 		blk = swp_pager_meta_ctl(object, start, 0);
888 		if ((blk == SWAPBLK_NONE) && (bp->bio_cmd == BIO_WRITE)) {
889 			blk = swp_pager_getswapspace(1);
890 			if (blk == SWAPBLK_NONE) {
891 				bp->bio_error = ENOMEM;
892 				bp->bio_flags |= BIO_ERROR;
893 				break;
894 			}
895 			swp_pager_meta_build(object, start, blk);
896 		}
897 
898 		/*
899 		 * Do we have to flush our current collection?  Yes if:
900 		 *
901 		 *	- no swap block at this index
902 		 *	- swap block is not contiguous
903 		 *	- we cross a physical disk boundry in the
904 		 *	  stripe.
905 		 */
906 		if (
907 		    nbp && (nbp->b_blkno + btoc(nbp->b_bcount) != blk ||
908 		     ((nbp->b_blkno ^ blk) & dmmax_mask)
909 		    )
910 		) {
911 			splx(s);
912 			if (bp->bio_cmd == BIO_READ) {
913 				++cnt.v_swapin;
914 				cnt.v_swappgsin += btoc(nbp->b_bcount);
915 			} else {
916 				++cnt.v_swapout;
917 				cnt.v_swappgsout += btoc(nbp->b_bcount);
918 				nbp->b_dirtyend = nbp->b_bcount;
919 			}
920 			flushchainbuf(nbp);
921 			s = splvm();
922 			nbp = NULL;
923 		}
924 
925 		/*
926 		 * Add new swapblk to nbp, instantiating nbp if necessary.
927 		 * Zero-fill reads are able to take a shortcut.
928 		 */
929 		if (blk == SWAPBLK_NONE) {
930 			/*
931 			 * We can only get here if we are reading.  Since
932 			 * we are at splvm() we can safely modify b_resid,
933 			 * even if chain ops are in progress.
934 			 */
935 			bzero(data, PAGE_SIZE);
936 			bp->bio_resid -= PAGE_SIZE;
937 		} else {
938 			if (nbp == NULL) {
939 				nbp = getchainbuf(bp, swapdev_vp, B_ASYNC);
940 				nbp->b_blkno = blk;
941 				nbp->b_bcount = 0;
942 				nbp->b_data = data;
943 			}
944 			nbp->b_bcount += PAGE_SIZE;
945 		}
946 		--count;
947 		++start;
948 		data += PAGE_SIZE;
949 	}
950 
951 	/*
952 	 *  Flush out last buffer
953 	 */
954 	splx(s);
955 
956 	if (nbp) {
957 		if (nbp->b_iocmd == BIO_READ) {
958 			++cnt.v_swapin;
959 			cnt.v_swappgsin += btoc(nbp->b_bcount);
960 		} else {
961 			++cnt.v_swapout;
962 			cnt.v_swappgsout += btoc(nbp->b_bcount);
963 			nbp->b_dirtyend = nbp->b_bcount;
964 		}
965 		flushchainbuf(nbp);
966 		/* nbp = NULL; */
967 	}
968 	/*
969 	 * Wait for completion.
970 	 */
971 	waitchainbuf(bp, 0, 1);
972 }
973 
974 /*
975  * SWAP_PAGER_GETPAGES() - bring pages in from swap
976  *
977  *	Attempt to retrieve (m, count) pages from backing store, but make
978  *	sure we retrieve at least m[reqpage].  We try to load in as large
979  *	a chunk surrounding m[reqpage] as is contiguous in swap and which
980  *	belongs to the same object.
981  *
982  *	The code is designed for asynchronous operation and
983  *	immediate-notification of 'reqpage' but tends not to be
984  *	used that way.  Please do not optimize-out this algorithmic
985  *	feature, I intend to improve on it in the future.
986  *
987  *	The parent has a single vm_object_pip_add() reference prior to
988  *	calling us and we should return with the same.
989  *
990  *	The parent has BUSY'd the pages.  We should return with 'm'
991  *	left busy, but the others adjusted.
992  */
993 static int
994 swap_pager_getpages(object, m, count, reqpage)
995 	vm_object_t object;
996 	vm_page_t *m;
997 	int count, reqpage;
998 {
999 	struct buf *bp;
1000 	vm_page_t mreq;
1001 	int s;
1002 	int i;
1003 	int j;
1004 	daddr_t blk;
1005 	vm_offset_t kva;
1006 	vm_pindex_t lastpindex;
1007 
1008 	GIANT_REQUIRED;
1009 
1010 	mreq = m[reqpage];
1011 
1012 	if (mreq->object != object) {
1013 		panic("swap_pager_getpages: object mismatch %p/%p",
1014 		    object,
1015 		    mreq->object
1016 		);
1017 	}
1018 	/*
1019 	 * Calculate range to retrieve.  The pages have already been assigned
1020 	 * their swapblks.  We require a *contiguous* range that falls entirely
1021 	 * within a single device stripe.   If we do not supply it, bad things
1022 	 * happen.  Note that blk, iblk & jblk can be SWAPBLK_NONE, but the
1023 	 * loops are set up such that the case(s) are handled implicitly.
1024 	 *
1025 	 * The swp_*() calls must be made at splvm().  vm_page_free() does
1026 	 * not need to be, but it will go a little faster if it is.
1027 	 */
1028 	s = splvm();
1029 	blk = swp_pager_meta_ctl(mreq->object, mreq->pindex, 0);
1030 
1031 	for (i = reqpage - 1; i >= 0; --i) {
1032 		daddr_t iblk;
1033 
1034 		iblk = swp_pager_meta_ctl(m[i]->object, m[i]->pindex, 0);
1035 		if (blk != iblk + (reqpage - i))
1036 			break;
1037 		if ((blk ^ iblk) & dmmax_mask)
1038 			break;
1039 	}
1040 	++i;
1041 
1042 	for (j = reqpage + 1; j < count; ++j) {
1043 		daddr_t jblk;
1044 
1045 		jblk = swp_pager_meta_ctl(m[j]->object, m[j]->pindex, 0);
1046 		if (blk != jblk - (j - reqpage))
1047 			break;
1048 		if ((blk ^ jblk) & dmmax_mask)
1049 			break;
1050 	}
1051 
1052 	/*
1053 	 * free pages outside our collection range.   Note: we never free
1054 	 * mreq, it must remain busy throughout.
1055 	 */
1056 	{
1057 		int k;
1058 
1059 		for (k = 0; k < i; ++k)
1060 			vm_page_free(m[k]);
1061 		for (k = j; k < count; ++k)
1062 			vm_page_free(m[k]);
1063 	}
1064 	splx(s);
1065 
1066 
1067 	/*
1068 	 * Return VM_PAGER_FAIL if we have nothing to do.  Return mreq
1069 	 * still busy, but the others unbusied.
1070 	 */
1071 	if (blk == SWAPBLK_NONE)
1072 		return (VM_PAGER_FAIL);
1073 
1074 	/*
1075 	 * Get a swap buffer header to perform the IO
1076 	 */
1077 	bp = getpbuf(&nsw_rcount);
1078 	kva = (vm_offset_t) bp->b_data;
1079 
1080 	/*
1081 	 * map our page(s) into kva for input
1082 	 *
1083 	 * NOTE: B_PAGING is set by pbgetvp()
1084 	 */
1085 	pmap_qenter(kva, m + i, j - i);
1086 
1087 	bp->b_iocmd = BIO_READ;
1088 	bp->b_iodone = swp_pager_async_iodone;
1089 	bp->b_rcred = crhold(thread0.td_ucred);
1090 	bp->b_wcred = crhold(thread0.td_ucred);
1091 	bp->b_data = (caddr_t) kva;
1092 	bp->b_blkno = blk - (reqpage - i);
1093 	bp->b_bcount = PAGE_SIZE * (j - i);
1094 	bp->b_bufsize = PAGE_SIZE * (j - i);
1095 	bp->b_pager.pg_reqpage = reqpage - i;
1096 
1097 	{
1098 		int k;
1099 
1100 		for (k = i; k < j; ++k) {
1101 			bp->b_pages[k - i] = m[k];
1102 			vm_page_flag_set(m[k], PG_SWAPINPROG);
1103 		}
1104 	}
1105 	bp->b_npages = j - i;
1106 
1107 	pbgetvp(swapdev_vp, bp);
1108 
1109 	cnt.v_swapin++;
1110 	cnt.v_swappgsin += bp->b_npages;
1111 
1112 	/*
1113 	 * We still hold the lock on mreq, and our automatic completion routine
1114 	 * does not remove it.
1115 	 */
1116 	vm_object_pip_add(mreq->object, bp->b_npages);
1117 	lastpindex = m[j-1]->pindex;
1118 
1119 	/*
1120 	 * perform the I/O.  NOTE!!!  bp cannot be considered valid after
1121 	 * this point because we automatically release it on completion.
1122 	 * Instead, we look at the one page we are interested in which we
1123 	 * still hold a lock on even through the I/O completion.
1124 	 *
1125 	 * The other pages in our m[] array are also released on completion,
1126 	 * so we cannot assume they are valid anymore either.
1127 	 *
1128 	 * NOTE: b_blkno is destroyed by the call to VOP_STRATEGY
1129 	 */
1130 	BUF_KERNPROC(bp);
1131 	BUF_STRATEGY(bp);
1132 
1133 	/*
1134 	 * wait for the page we want to complete.  PG_SWAPINPROG is always
1135 	 * cleared on completion.  If an I/O error occurs, SWAPBLK_NONE
1136 	 * is set in the meta-data.
1137 	 */
1138 	s = splvm();
1139 	while ((mreq->flags & PG_SWAPINPROG) != 0) {
1140 		vm_page_flag_set(mreq, PG_WANTED | PG_REFERENCED);
1141 		cnt.v_intrans++;
1142 		if (tsleep(mreq, PSWP, "swread", hz*20)) {
1143 			printf(
1144 			    "swap_pager: indefinite wait buffer: device:"
1145 				" %s, blkno: %ld, size: %ld\n",
1146 			    devtoname(bp->b_dev), (long)bp->b_blkno,
1147 			    bp->b_bcount
1148 			);
1149 		}
1150 	}
1151 	splx(s);
1152 
1153 	/*
1154 	 * mreq is left busied after completion, but all the other pages
1155 	 * are freed.  If we had an unrecoverable read error the page will
1156 	 * not be valid.
1157 	 */
1158 	if (mreq->valid != VM_PAGE_BITS_ALL) {
1159 		return (VM_PAGER_ERROR);
1160 	} else {
1161 		return (VM_PAGER_OK);
1162 	}
1163 
1164 	/*
1165 	 * A final note: in a low swap situation, we cannot deallocate swap
1166 	 * and mark a page dirty here because the caller is likely to mark
1167 	 * the page clean when we return, causing the page to possibly revert
1168 	 * to all-zero's later.
1169 	 */
1170 }
1171 
1172 /*
1173  *	swap_pager_putpages:
1174  *
1175  *	Assign swap (if necessary) and initiate I/O on the specified pages.
1176  *
1177  *	We support both OBJT_DEFAULT and OBJT_SWAP objects.  DEFAULT objects
1178  *	are automatically converted to SWAP objects.
1179  *
1180  *	In a low memory situation we may block in VOP_STRATEGY(), but the new
1181  *	vm_page reservation system coupled with properly written VFS devices
1182  *	should ensure that no low-memory deadlock occurs.  This is an area
1183  *	which needs work.
1184  *
1185  *	The parent has N vm_object_pip_add() references prior to
1186  *	calling us and will remove references for rtvals[] that are
1187  *	not set to VM_PAGER_PEND.  We need to remove the rest on I/O
1188  *	completion.
1189  *
1190  *	The parent has soft-busy'd the pages it passes us and will unbusy
1191  *	those whos rtvals[] entry is not set to VM_PAGER_PEND on return.
1192  *	We need to unbusy the rest on I/O completion.
1193  */
1194 void
1195 swap_pager_putpages(object, m, count, sync, rtvals)
1196 	vm_object_t object;
1197 	vm_page_t *m;
1198 	int count;
1199 	boolean_t sync;
1200 	int *rtvals;
1201 {
1202 	int i;
1203 	int n = 0;
1204 
1205 	GIANT_REQUIRED;
1206 	if (count && m[0]->object != object) {
1207 		panic("swap_pager_getpages: object mismatch %p/%p",
1208 		    object,
1209 		    m[0]->object
1210 		);
1211 	}
1212 	/*
1213 	 * Step 1
1214 	 *
1215 	 * Turn object into OBJT_SWAP
1216 	 * check for bogus sysops
1217 	 * force sync if not pageout process
1218 	 */
1219 	if (object->type != OBJT_SWAP)
1220 		swp_pager_meta_build(object, 0, SWAPBLK_NONE);
1221 
1222 	if (curproc != pageproc)
1223 		sync = TRUE;
1224 
1225 	/*
1226 	 * Step 2
1227 	 *
1228 	 * Update nsw parameters from swap_async_max sysctl values.
1229 	 * Do not let the sysop crash the machine with bogus numbers.
1230 	 */
1231 	mtx_lock(&pbuf_mtx);
1232 	if (swap_async_max != nsw_wcount_async_max) {
1233 		int n;
1234 		int s;
1235 
1236 		/*
1237 		 * limit range
1238 		 */
1239 		if ((n = swap_async_max) > nswbuf / 2)
1240 			n = nswbuf / 2;
1241 		if (n < 1)
1242 			n = 1;
1243 		swap_async_max = n;
1244 
1245 		/*
1246 		 * Adjust difference ( if possible ).  If the current async
1247 		 * count is too low, we may not be able to make the adjustment
1248 		 * at this time.
1249 		 */
1250 		s = splvm();
1251 		n -= nsw_wcount_async_max;
1252 		if (nsw_wcount_async + n >= 0) {
1253 			nsw_wcount_async += n;
1254 			nsw_wcount_async_max += n;
1255 			wakeup(&nsw_wcount_async);
1256 		}
1257 		splx(s);
1258 	}
1259 	mtx_unlock(&pbuf_mtx);
1260 
1261 	/*
1262 	 * Step 3
1263 	 *
1264 	 * Assign swap blocks and issue I/O.  We reallocate swap on the fly.
1265 	 * The page is left dirty until the pageout operation completes
1266 	 * successfully.
1267 	 */
1268 	for (i = 0; i < count; i += n) {
1269 		int s;
1270 		int j;
1271 		struct buf *bp;
1272 		daddr_t blk;
1273 
1274 		/*
1275 		 * Maximum I/O size is limited by a number of factors.
1276 		 */
1277 		n = min(BLIST_MAX_ALLOC, count - i);
1278 		n = min(n, nsw_cluster_max);
1279 
1280 		s = splvm();
1281 
1282 		/*
1283 		 * Get biggest block of swap we can.  If we fail, fall
1284 		 * back and try to allocate a smaller block.  Don't go
1285 		 * overboard trying to allocate space if it would overly
1286 		 * fragment swap.
1287 		 */
1288 		while (
1289 		    (blk = swp_pager_getswapspace(n)) == SWAPBLK_NONE &&
1290 		    n > 4
1291 		) {
1292 			n >>= 1;
1293 		}
1294 		if (blk == SWAPBLK_NONE) {
1295 			for (j = 0; j < n; ++j)
1296 				rtvals[i+j] = VM_PAGER_FAIL;
1297 			splx(s);
1298 			continue;
1299 		}
1300 
1301 		/*
1302 		 * The I/O we are constructing cannot cross a physical
1303 		 * disk boundry in the swap stripe.  Note: we are still
1304 		 * at splvm().
1305 		 */
1306 		if ((blk ^ (blk + n)) & dmmax_mask) {
1307 			j = ((blk + dmmax) & dmmax_mask) - blk;
1308 			swp_pager_freeswapspace(blk + j, n - j);
1309 			n = j;
1310 		}
1311 
1312 		/*
1313 		 * All I/O parameters have been satisfied, build the I/O
1314 		 * request and assign the swap space.
1315 		 *
1316 		 * NOTE: B_PAGING is set by pbgetvp()
1317 		 */
1318 		if (sync == TRUE) {
1319 			bp = getpbuf(&nsw_wcount_sync);
1320 		} else {
1321 			bp = getpbuf(&nsw_wcount_async);
1322 			bp->b_flags = B_ASYNC;
1323 		}
1324 		bp->b_iocmd = BIO_WRITE;
1325 		bp->b_spc = NULL;	/* not used, but NULL-out anyway */
1326 
1327 		pmap_qenter((vm_offset_t)bp->b_data, &m[i], n);
1328 
1329 		bp->b_rcred = crhold(thread0.td_ucred);
1330 		bp->b_wcred = crhold(thread0.td_ucred);
1331 		bp->b_bcount = PAGE_SIZE * n;
1332 		bp->b_bufsize = PAGE_SIZE * n;
1333 		bp->b_blkno = blk;
1334 
1335 		pbgetvp(swapdev_vp, bp);
1336 
1337 		for (j = 0; j < n; ++j) {
1338 			vm_page_t mreq = m[i+j];
1339 
1340 			swp_pager_meta_build(
1341 			    mreq->object,
1342 			    mreq->pindex,
1343 			    blk + j
1344 			);
1345 			vm_page_dirty(mreq);
1346 			rtvals[i+j] = VM_PAGER_OK;
1347 
1348 			vm_page_flag_set(mreq, PG_SWAPINPROG);
1349 			bp->b_pages[j] = mreq;
1350 		}
1351 		bp->b_npages = n;
1352 		/*
1353 		 * Must set dirty range for NFS to work.
1354 		 */
1355 		bp->b_dirtyoff = 0;
1356 		bp->b_dirtyend = bp->b_bcount;
1357 
1358 		cnt.v_swapout++;
1359 		cnt.v_swappgsout += bp->b_npages;
1360 		swapdev_vp->v_numoutput++;
1361 
1362 		splx(s);
1363 
1364 		/*
1365 		 * asynchronous
1366 		 *
1367 		 * NOTE: b_blkno is destroyed by the call to VOP_STRATEGY
1368 		 */
1369 		if (sync == FALSE) {
1370 			bp->b_iodone = swp_pager_async_iodone;
1371 			BUF_KERNPROC(bp);
1372 			BUF_STRATEGY(bp);
1373 
1374 			for (j = 0; j < n; ++j)
1375 				rtvals[i+j] = VM_PAGER_PEND;
1376 			/* restart outter loop */
1377 			continue;
1378 		}
1379 
1380 		/*
1381 		 * synchronous
1382 		 *
1383 		 * NOTE: b_blkno is destroyed by the call to VOP_STRATEGY
1384 		 */
1385 		bp->b_iodone = swp_pager_sync_iodone;
1386 		BUF_STRATEGY(bp);
1387 
1388 		/*
1389 		 * Wait for the sync I/O to complete, then update rtvals.
1390 		 * We just set the rtvals[] to VM_PAGER_PEND so we can call
1391 		 * our async completion routine at the end, thus avoiding a
1392 		 * double-free.
1393 		 */
1394 		s = splbio();
1395 		while ((bp->b_flags & B_DONE) == 0) {
1396 			tsleep(bp, PVM, "swwrt", 0);
1397 		}
1398 		for (j = 0; j < n; ++j)
1399 			rtvals[i+j] = VM_PAGER_PEND;
1400 		/*
1401 		 * Now that we are through with the bp, we can call the
1402 		 * normal async completion, which frees everything up.
1403 		 */
1404 		swp_pager_async_iodone(bp);
1405 		splx(s);
1406 	}
1407 }
1408 
1409 /*
1410  *	swap_pager_sync_iodone:
1411  *
1412  *	Completion routine for synchronous reads and writes from/to swap.
1413  *	We just mark the bp is complete and wake up anyone waiting on it.
1414  *
1415  *	This routine may not block.  This routine is called at splbio() or better.
1416  */
1417 static void
1418 swp_pager_sync_iodone(bp)
1419 	struct buf *bp;
1420 {
1421 	bp->b_flags |= B_DONE;
1422 	bp->b_flags &= ~B_ASYNC;
1423 	wakeup(bp);
1424 }
1425 
1426 /*
1427  *	swp_pager_async_iodone:
1428  *
1429  *	Completion routine for asynchronous reads and writes from/to swap.
1430  *	Also called manually by synchronous code to finish up a bp.
1431  *
1432  *	For READ operations, the pages are PG_BUSY'd.  For WRITE operations,
1433  *	the pages are vm_page_t->busy'd.  For READ operations, we PG_BUSY
1434  *	unbusy all pages except the 'main' request page.  For WRITE
1435  *	operations, we vm_page_t->busy'd unbusy all pages ( we can do this
1436  *	because we marked them all VM_PAGER_PEND on return from putpages ).
1437  *
1438  *	This routine may not block.
1439  *	This routine is called at splbio() or better
1440  *
1441  *	We up ourselves to splvm() as required for various vm_page related
1442  *	calls.
1443  */
1444 static void
1445 swp_pager_async_iodone(bp)
1446 	struct buf *bp;
1447 {
1448 	int s;
1449 	int i;
1450 	vm_object_t object = NULL;
1451 
1452 	GIANT_REQUIRED;
1453 	bp->b_flags |= B_DONE;
1454 
1455 	/*
1456 	 * report error
1457 	 */
1458 	if (bp->b_ioflags & BIO_ERROR) {
1459 		printf(
1460 		    "swap_pager: I/O error - %s failed; blkno %ld,"
1461 			"size %ld, error %d\n",
1462 		    ((bp->b_iocmd == BIO_READ) ? "pagein" : "pageout"),
1463 		    (long)bp->b_blkno,
1464 		    (long)bp->b_bcount,
1465 		    bp->b_error
1466 		);
1467 	}
1468 
1469 	/*
1470 	 * set object, raise to splvm().
1471 	 */
1472 	if (bp->b_npages)
1473 		object = bp->b_pages[0]->object;
1474 	s = splvm();
1475 
1476 	/*
1477 	 * remove the mapping for kernel virtual
1478 	 */
1479 	pmap_qremove((vm_offset_t)bp->b_data, bp->b_npages);
1480 
1481 	/*
1482 	 * cleanup pages.  If an error occurs writing to swap, we are in
1483 	 * very serious trouble.  If it happens to be a disk error, though,
1484 	 * we may be able to recover by reassigning the swap later on.  So
1485 	 * in this case we remove the m->swapblk assignment for the page
1486 	 * but do not free it in the rlist.  The errornous block(s) are thus
1487 	 * never reallocated as swap.  Redirty the page and continue.
1488 	 */
1489 	for (i = 0; i < bp->b_npages; ++i) {
1490 		vm_page_t m = bp->b_pages[i];
1491 
1492 		vm_page_flag_clear(m, PG_SWAPINPROG);
1493 
1494 		if (bp->b_ioflags & BIO_ERROR) {
1495 			/*
1496 			 * If an error occurs I'd love to throw the swapblk
1497 			 * away without freeing it back to swapspace, so it
1498 			 * can never be used again.  But I can't from an
1499 			 * interrupt.
1500 			 */
1501 			if (bp->b_iocmd == BIO_READ) {
1502 				/*
1503 				 * When reading, reqpage needs to stay
1504 				 * locked for the parent, but all other
1505 				 * pages can be freed.  We still want to
1506 				 * wakeup the parent waiting on the page,
1507 				 * though.  ( also: pg_reqpage can be -1 and
1508 				 * not match anything ).
1509 				 *
1510 				 * We have to wake specifically requested pages
1511 				 * up too because we cleared PG_SWAPINPROG and
1512 				 * someone may be waiting for that.
1513 				 *
1514 				 * NOTE: for reads, m->dirty will probably
1515 				 * be overridden by the original caller of
1516 				 * getpages so don't play cute tricks here.
1517 				 *
1518 				 * XXX IT IS NOT LEGAL TO FREE THE PAGE HERE
1519 				 * AS THIS MESSES WITH object->memq, and it is
1520 				 * not legal to mess with object->memq from an
1521 				 * interrupt.
1522 				 */
1523 				m->valid = 0;
1524 				vm_page_flag_clear(m, PG_ZERO);
1525 				if (i != bp->b_pager.pg_reqpage)
1526 					vm_page_free(m);
1527 				else
1528 					vm_page_flash(m);
1529 				/*
1530 				 * If i == bp->b_pager.pg_reqpage, do not wake
1531 				 * the page up.  The caller needs to.
1532 				 */
1533 			} else {
1534 				/*
1535 				 * If a write error occurs, reactivate page
1536 				 * so it doesn't clog the inactive list,
1537 				 * then finish the I/O.
1538 				 */
1539 				vm_page_dirty(m);
1540 				vm_page_activate(m);
1541 				vm_page_io_finish(m);
1542 			}
1543 		} else if (bp->b_iocmd == BIO_READ) {
1544 			/*
1545 			 * For read success, clear dirty bits.  Nobody should
1546 			 * have this page mapped but don't take any chances,
1547 			 * make sure the pmap modify bits are also cleared.
1548 			 *
1549 			 * NOTE: for reads, m->dirty will probably be
1550 			 * overridden by the original caller of getpages so
1551 			 * we cannot set them in order to free the underlying
1552 			 * swap in a low-swap situation.  I don't think we'd
1553 			 * want to do that anyway, but it was an optimization
1554 			 * that existed in the old swapper for a time before
1555 			 * it got ripped out due to precisely this problem.
1556 			 *
1557 			 * clear PG_ZERO in page.
1558 			 *
1559 			 * If not the requested page then deactivate it.
1560 			 *
1561 			 * Note that the requested page, reqpage, is left
1562 			 * busied, but we still have to wake it up.  The
1563 			 * other pages are released (unbusied) by
1564 			 * vm_page_wakeup().  We do not set reqpage's
1565 			 * valid bits here, it is up to the caller.
1566 			 */
1567 			pmap_clear_modify(m);
1568 			m->valid = VM_PAGE_BITS_ALL;
1569 			vm_page_undirty(m);
1570 			vm_page_flag_clear(m, PG_ZERO);
1571 
1572 			/*
1573 			 * We have to wake specifically requested pages
1574 			 * up too because we cleared PG_SWAPINPROG and
1575 			 * could be waiting for it in getpages.  However,
1576 			 * be sure to not unbusy getpages specifically
1577 			 * requested page - getpages expects it to be
1578 			 * left busy.
1579 			 */
1580 			if (i != bp->b_pager.pg_reqpage) {
1581 				vm_page_deactivate(m);
1582 				vm_page_wakeup(m);
1583 			} else {
1584 				vm_page_flash(m);
1585 			}
1586 		} else {
1587 			/*
1588 			 * For write success, clear the modify and dirty
1589 			 * status, then finish the I/O ( which decrements the
1590 			 * busy count and possibly wakes waiter's up ).
1591 			 */
1592 			pmap_clear_modify(m);
1593 			vm_page_undirty(m);
1594 			vm_page_io_finish(m);
1595 			if (!vm_page_count_severe() || !vm_page_try_to_cache(m))
1596 				vm_page_protect(m, VM_PROT_READ);
1597 		}
1598 	}
1599 
1600 	/*
1601 	 * adjust pip.  NOTE: the original parent may still have its own
1602 	 * pip refs on the object.
1603 	 */
1604 	if (object)
1605 		vm_object_pip_wakeupn(object, bp->b_npages);
1606 
1607 	/*
1608 	 * release the physical I/O buffer
1609 	 */
1610 	relpbuf(
1611 	    bp,
1612 	    ((bp->b_iocmd == BIO_READ) ? &nsw_rcount :
1613 		((bp->b_flags & B_ASYNC) ?
1614 		    &nsw_wcount_async :
1615 		    &nsw_wcount_sync
1616 		)
1617 	    )
1618 	);
1619 	splx(s);
1620 }
1621 
1622 /************************************************************************
1623  *				SWAP META DATA 				*
1624  ************************************************************************
1625  *
1626  *	These routines manipulate the swap metadata stored in the
1627  *	OBJT_SWAP object.  All swp_*() routines must be called at
1628  *	splvm() because swap can be freed up by the low level vm_page
1629  *	code which might be called from interrupts beyond what splbio() covers.
1630  *
1631  *	Swap metadata is implemented with a global hash and not directly
1632  *	linked into the object.  Instead the object simply contains
1633  *	appropriate tracking counters.
1634  */
1635 
1636 /*
1637  * SWP_PAGER_HASH() -	hash swap meta data
1638  *
1639  *	This is an inline helper function which hashes the swapblk given
1640  *	the object and page index.  It returns a pointer to a pointer
1641  *	to the object, or a pointer to a NULL pointer if it could not
1642  *	find a swapblk.
1643  *
1644  *	This routine must be called at splvm().
1645  */
1646 static __inline struct swblock **
1647 swp_pager_hash(vm_object_t object, vm_pindex_t index)
1648 {
1649 	struct swblock **pswap;
1650 	struct swblock *swap;
1651 
1652 	index &= ~SWAP_META_MASK;
1653 	pswap = &swhash[(index ^ (int)(intptr_t)object) & swhash_mask];
1654 	while ((swap = *pswap) != NULL) {
1655 		if (swap->swb_object == object &&
1656 		    swap->swb_index == index
1657 		) {
1658 			break;
1659 		}
1660 		pswap = &swap->swb_hnext;
1661 	}
1662 	return (pswap);
1663 }
1664 
1665 /*
1666  * SWP_PAGER_META_BUILD() -	add swap block to swap meta data for object
1667  *
1668  *	We first convert the object to a swap object if it is a default
1669  *	object.
1670  *
1671  *	The specified swapblk is added to the object's swap metadata.  If
1672  *	the swapblk is not valid, it is freed instead.  Any previously
1673  *	assigned swapblk is freed.
1674  *
1675  *	This routine must be called at splvm(), except when used to convert
1676  *	an OBJT_DEFAULT object into an OBJT_SWAP object.
1677  */
1678 static void
1679 swp_pager_meta_build(
1680 	vm_object_t object,
1681 	vm_pindex_t index,
1682 	daddr_t swapblk
1683 ) {
1684 	struct swblock *swap;
1685 	struct swblock **pswap;
1686 
1687 	GIANT_REQUIRED;
1688 	/*
1689 	 * Convert default object to swap object if necessary
1690 	 */
1691 	if (object->type != OBJT_SWAP) {
1692 		object->type = OBJT_SWAP;
1693 		object->un_pager.swp.swp_bcount = 0;
1694 
1695 		mtx_lock(&sw_alloc_mtx);
1696 		if (object->handle != NULL) {
1697 			TAILQ_INSERT_TAIL(
1698 			    NOBJLIST(object->handle),
1699 			    object,
1700 			    pager_object_list
1701 			);
1702 		} else {
1703 			TAILQ_INSERT_TAIL(
1704 			    &swap_pager_un_object_list,
1705 			    object,
1706 			    pager_object_list
1707 			);
1708 		}
1709 		mtx_unlock(&sw_alloc_mtx);
1710 	}
1711 
1712 	/*
1713 	 * Locate hash entry.  If not found create, but if we aren't adding
1714 	 * anything just return.  If we run out of space in the map we wait
1715 	 * and, since the hash table may have changed, retry.
1716 	 */
1717 retry:
1718 	pswap = swp_pager_hash(object, index);
1719 
1720 	if ((swap = *pswap) == NULL) {
1721 		int i;
1722 
1723 		if (swapblk == SWAPBLK_NONE)
1724 			return;
1725 
1726 		swap = *pswap = uma_zalloc(swap_zone, M_NOWAIT);
1727 		if (swap == NULL) {
1728 			VM_WAIT;
1729 			goto retry;
1730 		}
1731 
1732 		swap->swb_hnext = NULL;
1733 		swap->swb_object = object;
1734 		swap->swb_index = index & ~SWAP_META_MASK;
1735 		swap->swb_count = 0;
1736 
1737 		++object->un_pager.swp.swp_bcount;
1738 
1739 		for (i = 0; i < SWAP_META_PAGES; ++i)
1740 			swap->swb_pages[i] = SWAPBLK_NONE;
1741 	}
1742 
1743 	/*
1744 	 * Delete prior contents of metadata
1745 	 */
1746 	index &= SWAP_META_MASK;
1747 
1748 	if (swap->swb_pages[index] != SWAPBLK_NONE) {
1749 		swp_pager_freeswapspace(swap->swb_pages[index], 1);
1750 		--swap->swb_count;
1751 	}
1752 
1753 	/*
1754 	 * Enter block into metadata
1755 	 */
1756 	swap->swb_pages[index] = swapblk;
1757 	if (swapblk != SWAPBLK_NONE)
1758 		++swap->swb_count;
1759 }
1760 
1761 /*
1762  * SWP_PAGER_META_FREE() - free a range of blocks in the object's swap metadata
1763  *
1764  *	The requested range of blocks is freed, with any associated swap
1765  *	returned to the swap bitmap.
1766  *
1767  *	This routine will free swap metadata structures as they are cleaned
1768  *	out.  This routine does *NOT* operate on swap metadata associated
1769  *	with resident pages.
1770  *
1771  *	This routine must be called at splvm()
1772  */
1773 static void
1774 swp_pager_meta_free(vm_object_t object, vm_pindex_t index, daddr_t count)
1775 {
1776 	GIANT_REQUIRED;
1777 
1778 	if (object->type != OBJT_SWAP)
1779 		return;
1780 
1781 	while (count > 0) {
1782 		struct swblock **pswap;
1783 		struct swblock *swap;
1784 
1785 		pswap = swp_pager_hash(object, index);
1786 
1787 		if ((swap = *pswap) != NULL) {
1788 			daddr_t v = swap->swb_pages[index & SWAP_META_MASK];
1789 
1790 			if (v != SWAPBLK_NONE) {
1791 				swp_pager_freeswapspace(v, 1);
1792 				swap->swb_pages[index & SWAP_META_MASK] =
1793 					SWAPBLK_NONE;
1794 				if (--swap->swb_count == 0) {
1795 					*pswap = swap->swb_hnext;
1796 					uma_zfree(swap_zone, swap);
1797 					--object->un_pager.swp.swp_bcount;
1798 				}
1799 			}
1800 			--count;
1801 			++index;
1802 		} else {
1803 			int n = SWAP_META_PAGES - (index & SWAP_META_MASK);
1804 			count -= n;
1805 			index += n;
1806 		}
1807 	}
1808 }
1809 
1810 /*
1811  * SWP_PAGER_META_FREE_ALL() - destroy all swap metadata associated with object
1812  *
1813  *	This routine locates and destroys all swap metadata associated with
1814  *	an object.
1815  *
1816  *	This routine must be called at splvm()
1817  */
1818 static void
1819 swp_pager_meta_free_all(vm_object_t object)
1820 {
1821 	daddr_t index = 0;
1822 
1823 	GIANT_REQUIRED;
1824 
1825 	if (object->type != OBJT_SWAP)
1826 		return;
1827 
1828 	while (object->un_pager.swp.swp_bcount) {
1829 		struct swblock **pswap;
1830 		struct swblock *swap;
1831 
1832 		pswap = swp_pager_hash(object, index);
1833 		if ((swap = *pswap) != NULL) {
1834 			int i;
1835 
1836 			for (i = 0; i < SWAP_META_PAGES; ++i) {
1837 				daddr_t v = swap->swb_pages[i];
1838 				if (v != SWAPBLK_NONE) {
1839 					--swap->swb_count;
1840 					swp_pager_freeswapspace(v, 1);
1841 				}
1842 			}
1843 			if (swap->swb_count != 0)
1844 				panic("swap_pager_meta_free_all: swb_count != 0");
1845 			*pswap = swap->swb_hnext;
1846 			uma_zfree(swap_zone, swap);
1847 			--object->un_pager.swp.swp_bcount;
1848 		}
1849 		index += SWAP_META_PAGES;
1850 		if (index > 0x20000000)
1851 			panic("swp_pager_meta_free_all: failed to locate all swap meta blocks");
1852 	}
1853 }
1854 
1855 /*
1856  * SWP_PAGER_METACTL() -  misc control of swap and vm_page_t meta data.
1857  *
1858  *	This routine is capable of looking up, popping, or freeing
1859  *	swapblk assignments in the swap meta data or in the vm_page_t.
1860  *	The routine typically returns the swapblk being looked-up, or popped,
1861  *	or SWAPBLK_NONE if the block was freed, or SWAPBLK_NONE if the block
1862  *	was invalid.  This routine will automatically free any invalid
1863  *	meta-data swapblks.
1864  *
1865  *	It is not possible to store invalid swapblks in the swap meta data
1866  *	(other then a literal 'SWAPBLK_NONE'), so we don't bother checking.
1867  *
1868  *	When acting on a busy resident page and paging is in progress, we
1869  *	have to wait until paging is complete but otherwise can act on the
1870  *	busy page.
1871  *
1872  *	This routine must be called at splvm().
1873  *
1874  *	SWM_FREE	remove and free swap block from metadata
1875  *	SWM_POP		remove from meta data but do not free.. pop it out
1876  */
1877 static daddr_t
1878 swp_pager_meta_ctl(
1879 	vm_object_t object,
1880 	vm_pindex_t index,
1881 	int flags
1882 ) {
1883 	struct swblock **pswap;
1884 	struct swblock *swap;
1885 	daddr_t r1;
1886 
1887 	GIANT_REQUIRED;
1888 	/*
1889 	 * The meta data only exists of the object is OBJT_SWAP
1890 	 * and even then might not be allocated yet.
1891 	 */
1892 	if (object->type != OBJT_SWAP)
1893 		return (SWAPBLK_NONE);
1894 
1895 	r1 = SWAPBLK_NONE;
1896 	pswap = swp_pager_hash(object, index);
1897 
1898 	if ((swap = *pswap) != NULL) {
1899 		index &= SWAP_META_MASK;
1900 		r1 = swap->swb_pages[index];
1901 
1902 		if (r1 != SWAPBLK_NONE) {
1903 			if (flags & SWM_FREE) {
1904 				swp_pager_freeswapspace(r1, 1);
1905 				r1 = SWAPBLK_NONE;
1906 			}
1907 			if (flags & (SWM_FREE|SWM_POP)) {
1908 				swap->swb_pages[index] = SWAPBLK_NONE;
1909 				if (--swap->swb_count == 0) {
1910 					*pswap = swap->swb_hnext;
1911 					uma_zfree(swap_zone, swap);
1912 					--object->un_pager.swp.swp_bcount;
1913 				}
1914 			}
1915 		}
1916 	}
1917 	return (r1);
1918 }
1919 
1920 /********************************************************
1921  *		CHAINING FUNCTIONS			*
1922  ********************************************************
1923  *
1924  *	These functions support recursion of I/O operations
1925  *	on bp's, typically by chaining one or more 'child' bp's
1926  *	to the parent.  Synchronous, asynchronous, and semi-synchronous
1927  *	chaining is possible.
1928  */
1929 
1930 /*
1931  *	vm_pager_chain_iodone:
1932  *
1933  *	io completion routine for child bp.  Currently we fudge a bit
1934  *	on dealing with b_resid.   Since users of these routines may issue
1935  *	multiple children simultaneously, sequencing of the error can be lost.
1936  */
1937 static void
1938 vm_pager_chain_iodone(struct buf *nbp)
1939 {
1940 	struct bio *bp;
1941 	u_int *count;
1942 
1943 	bp = nbp->b_caller1;
1944 	count = (u_int *)&(bp->bio_driver1);
1945 	if (bp != NULL) {
1946 		if (nbp->b_ioflags & BIO_ERROR) {
1947 			bp->bio_flags |= BIO_ERROR;
1948 			bp->bio_error = nbp->b_error;
1949 		} else if (nbp->b_resid != 0) {
1950 			bp->bio_flags |= BIO_ERROR;
1951 			bp->bio_error = EINVAL;
1952 		} else {
1953 			bp->bio_resid -= nbp->b_bcount;
1954 		}
1955 		nbp->b_caller1 = NULL;
1956 		--(*count);
1957 		if (bp->bio_flags & BIO_FLAG1) {
1958 			bp->bio_flags &= ~BIO_FLAG1;
1959 			wakeup(bp);
1960 		}
1961 	}
1962 	nbp->b_flags |= B_DONE;
1963 	nbp->b_flags &= ~B_ASYNC;
1964 	relpbuf(nbp, NULL);
1965 }
1966 
1967 /*
1968  *	getchainbuf:
1969  *
1970  *	Obtain a physical buffer and chain it to its parent buffer.  When
1971  *	I/O completes, the parent buffer will be B_SIGNAL'd.  Errors are
1972  *	automatically propagated to the parent
1973  */
1974 struct buf *
1975 getchainbuf(struct bio *bp, struct vnode *vp, int flags)
1976 {
1977 	struct buf *nbp;
1978 	u_int *count;
1979 
1980 	GIANT_REQUIRED;
1981 	nbp = getpbuf(NULL);
1982 	count = (u_int *)&(bp->bio_driver1);
1983 
1984 	nbp->b_caller1 = bp;
1985 	++(*count);
1986 
1987 	if (*count > 4)
1988 		waitchainbuf(bp, 4, 0);
1989 
1990 	nbp->b_iocmd = bp->bio_cmd;
1991 	nbp->b_ioflags = 0;
1992 	nbp->b_flags = flags;
1993 	nbp->b_rcred = crhold(thread0.td_ucred);
1994 	nbp->b_wcred = crhold(thread0.td_ucred);
1995 	nbp->b_iodone = vm_pager_chain_iodone;
1996 
1997 	if (vp)
1998 		pbgetvp(vp, nbp);
1999 	return (nbp);
2000 }
2001 
2002 void
2003 flushchainbuf(struct buf *nbp)
2004 {
2005 	GIANT_REQUIRED;
2006 	if (nbp->b_bcount) {
2007 		nbp->b_bufsize = nbp->b_bcount;
2008 		if (nbp->b_iocmd == BIO_WRITE)
2009 			nbp->b_dirtyend = nbp->b_bcount;
2010 		BUF_KERNPROC(nbp);
2011 		BUF_STRATEGY(nbp);
2012 	} else {
2013 		bufdone(nbp);
2014 	}
2015 }
2016 
2017 static void
2018 waitchainbuf(struct bio *bp, int limit, int done)
2019 {
2020  	int s;
2021 	u_int *count;
2022 
2023 	GIANT_REQUIRED;
2024 	count = (u_int *)&(bp->bio_driver1);
2025 	s = splbio();
2026 	while (*count > limit) {
2027 		bp->bio_flags |= BIO_FLAG1;
2028 		tsleep(bp, PRIBIO + 4, "bpchain", 0);
2029 	}
2030 	if (done) {
2031 		if (bp->bio_resid != 0 && !(bp->bio_flags & BIO_ERROR)) {
2032 			bp->bio_flags |= BIO_ERROR;
2033 			bp->bio_error = EINVAL;
2034 		}
2035 		biodone(bp);
2036 	}
2037 	splx(s);
2038 }
2039 
2040