xref: /freebsd/sys/vm/swap_pager.c (revision 936524aa02cdcfc3c7e153dd3147b4e5a013c62d)
1df8bae1dSRodney W. Grimes /*
21c7c3c6aSMatthew Dillon  * Copyright (c) 1998 Matthew Dillon,
326f9a767SRodney W. Grimes  * Copyright (c) 1994 John S. Dyson
4df8bae1dSRodney W. Grimes  * Copyright (c) 1990 University of Utah.
5df8bae1dSRodney W. Grimes  * Copyright (c) 1991, 1993
6df8bae1dSRodney W. Grimes  *	The Regents of the University of California.  All rights reserved.
7df8bae1dSRodney W. Grimes  *
8df8bae1dSRodney W. Grimes  * This code is derived from software contributed to Berkeley by
9df8bae1dSRodney W. Grimes  * the Systems Programming Group of the University of Utah Computer
10df8bae1dSRodney W. Grimes  * Science Department.
11df8bae1dSRodney W. Grimes  *
12df8bae1dSRodney W. Grimes  * Redistribution and use in source and binary forms, with or without
13df8bae1dSRodney W. Grimes  * modification, are permitted provided that the following conditions
14df8bae1dSRodney W. Grimes  * are met:
15df8bae1dSRodney W. Grimes  * 1. Redistributions of source code must retain the above copyright
16df8bae1dSRodney W. Grimes  *    notice, this list of conditions and the following disclaimer.
17df8bae1dSRodney W. Grimes  * 2. Redistributions in binary form must reproduce the above copyright
18df8bae1dSRodney W. Grimes  *    notice, this list of conditions and the following disclaimer in the
19df8bae1dSRodney W. Grimes  *    documentation and/or other materials provided with the distribution.
20df8bae1dSRodney W. Grimes  * 3. All advertising materials mentioning features or use of this software
215929bcfaSPhilippe Charnier  *    must display the following acknowledgement:
22df8bae1dSRodney W. Grimes  *	This product includes software developed by the University of
23df8bae1dSRodney W. Grimes  *	California, Berkeley and its contributors.
24df8bae1dSRodney W. Grimes  * 4. Neither the name of the University nor the names of its contributors
25df8bae1dSRodney W. Grimes  *    may be used to endorse or promote products derived from this software
26df8bae1dSRodney W. Grimes  *    without specific prior written permission.
27df8bae1dSRodney W. Grimes  *
28df8bae1dSRodney W. Grimes  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
29df8bae1dSRodney W. Grimes  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
30df8bae1dSRodney W. Grimes  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
31df8bae1dSRodney W. Grimes  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
32df8bae1dSRodney W. Grimes  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
33df8bae1dSRodney W. Grimes  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
34df8bae1dSRodney W. Grimes  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
35df8bae1dSRodney W. Grimes  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
36df8bae1dSRodney W. Grimes  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
37df8bae1dSRodney W. Grimes  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
38df8bae1dSRodney W. Grimes  * SUCH DAMAGE.
39df8bae1dSRodney W. Grimes  *
401c7c3c6aSMatthew Dillon  *				New Swap System
411c7c3c6aSMatthew Dillon  *				Matthew Dillon
421c7c3c6aSMatthew Dillon  *
431c7c3c6aSMatthew Dillon  * Radix Bitmap 'blists'.
441c7c3c6aSMatthew Dillon  *
451c7c3c6aSMatthew Dillon  *	- The new swapper uses the new radix bitmap code.  This should scale
461c7c3c6aSMatthew Dillon  *	  to arbitrarily small or arbitrarily large swap spaces and an almost
471c7c3c6aSMatthew Dillon  *	  arbitrary degree of fragmentation.
481c7c3c6aSMatthew Dillon  *
491c7c3c6aSMatthew Dillon  * Features:
501c7c3c6aSMatthew Dillon  *
511c7c3c6aSMatthew Dillon  *	- on the fly reallocation of swap during putpages.  The new system
521c7c3c6aSMatthew Dillon  *	  does not try to keep previously allocated swap blocks for dirty
531c7c3c6aSMatthew Dillon  *	  pages.
541c7c3c6aSMatthew Dillon  *
551c7c3c6aSMatthew Dillon  *	- on the fly deallocation of swap
561c7c3c6aSMatthew Dillon  *
571c7c3c6aSMatthew Dillon  *	- No more garbage collection required.  Unnecessarily allocated swap
581c7c3c6aSMatthew Dillon  *	  blocks only exist for dirty vm_page_t's now and these are already
591c7c3c6aSMatthew Dillon  *	  cycled (in a high-load system) by the pager.  We also do on-the-fly
601c7c3c6aSMatthew Dillon  *	  removal of invalidated swap blocks when a page is destroyed
611c7c3c6aSMatthew Dillon  *	  or renamed.
621c7c3c6aSMatthew Dillon  *
63df8bae1dSRodney W. Grimes  * from: Utah $Hdr: swap_pager.c 1.4 91/04/30$
64df8bae1dSRodney W. Grimes  *
65df8bae1dSRodney W. Grimes  *	@(#)swap_pager.c	8.9 (Berkeley) 3/21/94
661c7c3c6aSMatthew Dillon  *
67c3aac50fSPeter Wemm  * $FreeBSD$
68df8bae1dSRodney W. Grimes  */
69df8bae1dSRodney W. Grimes 
70df8bae1dSRodney W. Grimes #include <sys/param.h>
71df8bae1dSRodney W. Grimes #include <sys/systm.h>
72af647ddeSBruce Evans #include <sys/conf.h>
7364abb5a5SDavid Greenman #include <sys/kernel.h>
74df8bae1dSRodney W. Grimes #include <sys/proc.h>
759626b608SPoul-Henning Kamp #include <sys/bio.h>
76df8bae1dSRodney W. Grimes #include <sys/buf.h>
77df8bae1dSRodney W. Grimes #include <sys/vnode.h>
78df8bae1dSRodney W. Grimes #include <sys/malloc.h>
79efeaf95aSDavid Greenman #include <sys/vmmeter.h>
80327f4e83SMatthew Dillon #include <sys/sysctl.h>
811c7c3c6aSMatthew Dillon #include <sys/blist.h>
821c7c3c6aSMatthew Dillon #include <sys/lock.h>
83936524aaSMatthew Dillon #include <sys/vmmeter.h>
84df8bae1dSRodney W. Grimes 
85e47ed70bSJohn Dyson #ifndef MAX_PAGEOUT_CLUSTER
86ffc82b0aSJohn Dyson #define MAX_PAGEOUT_CLUSTER 16
87e47ed70bSJohn Dyson #endif
88e47ed70bSJohn Dyson 
89e47ed70bSJohn Dyson #define SWB_NPAGES	MAX_PAGEOUT_CLUSTER
90e47ed70bSJohn Dyson 
911c7c3c6aSMatthew Dillon #include "opt_swap.h"
92df8bae1dSRodney W. Grimes #include <vm/vm.h>
93efeaf95aSDavid Greenman #include <vm/vm_object.h>
94df8bae1dSRodney W. Grimes #include <vm/vm_page.h>
95efeaf95aSDavid Greenman #include <vm/vm_pager.h>
96df8bae1dSRodney W. Grimes #include <vm/vm_pageout.h>
97df8bae1dSRodney W. Grimes #include <vm/swap_pager.h>
98efeaf95aSDavid Greenman #include <vm/vm_extern.h>
991c7c3c6aSMatthew Dillon #include <vm/vm_zone.h>
100df8bae1dSRodney W. Grimes 
1011c7c3c6aSMatthew Dillon #define SWM_FREE	0x02	/* free, period			*/
1021c7c3c6aSMatthew Dillon #define SWM_POP		0x04	/* pop out			*/
10326f9a767SRodney W. Grimes 
10424a1cce3SDavid Greenman /*
1051c7c3c6aSMatthew Dillon  * vm_swap_size is in page-sized chunks now.  It was DEV_BSIZE'd chunks
1061c7c3c6aSMatthew Dillon  * in the old system.
10724a1cce3SDavid Greenman  */
1081c7c3c6aSMatthew Dillon 
1091c7c3c6aSMatthew Dillon extern int vm_swap_size;	/* number of free swap blocks, in pages */
1101c7c3c6aSMatthew Dillon 
11120d3034fSMatthew Dillon int swap_pager_full;		/* swap space exhaustion (task killing) */
11220d3034fSMatthew Dillon static int swap_pager_almost_full; /* swap space exhaustion (w/ hysteresis)*/
1131c7c3c6aSMatthew Dillon static int nsw_rcount;		/* free read buffers			*/
114327f4e83SMatthew Dillon static int nsw_wcount_sync;	/* limit write buffers / synchronous	*/
115327f4e83SMatthew Dillon static int nsw_wcount_async;	/* limit write buffers / asynchronous	*/
116327f4e83SMatthew Dillon static int nsw_wcount_async_max;/* assigned maximum			*/
117327f4e83SMatthew Dillon static int nsw_cluster_max;	/* maximum VOP I/O allowed		*/
1181c7c3c6aSMatthew Dillon static int sw_alloc_interlock;	/* swap pager allocation interlock	*/
1191c7c3c6aSMatthew Dillon 
1201c7c3c6aSMatthew Dillon struct blist *swapblist;
1211c7c3c6aSMatthew Dillon static struct swblock **swhash;
1221c7c3c6aSMatthew Dillon static int swhash_mask;
123327f4e83SMatthew Dillon static int swap_async_max = 4;	/* maximum in-progress async I/O's	*/
124327f4e83SMatthew Dillon 
125ea3aecf5SPeter Wemm extern struct vnode *swapdev_vp;	/* from vm_swap.c */
12624e7ab7cSPoul-Henning Kamp 
127327f4e83SMatthew Dillon SYSCTL_INT(_vm, OID_AUTO, swap_async_max,
128327f4e83SMatthew Dillon         CTLFLAG_RW, &swap_async_max, 0, "Maximum running async swap ops");
1291c7c3c6aSMatthew Dillon 
1301c7c3c6aSMatthew Dillon /*
1311c7c3c6aSMatthew Dillon  * "named" and "unnamed" anon region objects.  Try to reduce the overhead
1321c7c3c6aSMatthew Dillon  * of searching a named list by hashing it just a little.
1331c7c3c6aSMatthew Dillon  */
1341c7c3c6aSMatthew Dillon 
1351c7c3c6aSMatthew Dillon #define NOBJLISTS		8
1361c7c3c6aSMatthew Dillon 
1371c7c3c6aSMatthew Dillon #define NOBJLIST(handle)	\
138af647ddeSBruce Evans 	(&swap_pager_object_list[((int)(intptr_t)handle >> 4) & (NOBJLISTS-1)])
1391c7c3c6aSMatthew Dillon 
1401c7c3c6aSMatthew Dillon static struct pagerlst	swap_pager_object_list[NOBJLISTS];
1411c7c3c6aSMatthew Dillon struct pagerlst		swap_pager_un_object_list;
1421c7c3c6aSMatthew Dillon vm_zone_t		swap_zone;
1431c7c3c6aSMatthew Dillon 
1441c7c3c6aSMatthew Dillon /*
1451c7c3c6aSMatthew Dillon  * pagerops for OBJT_SWAP - "swap pager".  Some ops are also global procedure
1461c7c3c6aSMatthew Dillon  * calls hooked from other parts of the VM system and do not appear here.
1471c7c3c6aSMatthew Dillon  * (see vm/swap_pager.h).
1481c7c3c6aSMatthew Dillon  */
1491c7c3c6aSMatthew Dillon 
150ff98689dSBruce Evans static vm_object_t
1516cde7a16SDavid Greenman 		swap_pager_alloc __P((void *handle, vm_ooffset_t size,
152a316d390SJohn Dyson 				      vm_prot_t prot, vm_ooffset_t offset));
153ff98689dSBruce Evans static void	swap_pager_dealloc __P((vm_object_t object));
154f708ef1bSPoul-Henning Kamp static int	swap_pager_getpages __P((vm_object_t, vm_page_t *, int, int));
155ff98689dSBruce Evans static void	swap_pager_init __P((void));
1561c7c3c6aSMatthew Dillon static void	swap_pager_unswapped __P((vm_page_t));
1570b441832SPoul-Henning Kamp static void	swap_pager_strategy __P((vm_object_t, struct bio *));
158f708ef1bSPoul-Henning Kamp 
159df8bae1dSRodney W. Grimes struct pagerops swappagerops = {
1601c7c3c6aSMatthew Dillon 	swap_pager_init,	/* early system initialization of pager	*/
1611c7c3c6aSMatthew Dillon 	swap_pager_alloc,	/* allocate an OBJT_SWAP object		*/
1621c7c3c6aSMatthew Dillon 	swap_pager_dealloc,	/* deallocate an OBJT_SWAP object	*/
1631c7c3c6aSMatthew Dillon 	swap_pager_getpages,	/* pagein				*/
1641c7c3c6aSMatthew Dillon 	swap_pager_putpages,	/* pageout				*/
1651c7c3c6aSMatthew Dillon 	swap_pager_haspage,	/* get backing store status for page	*/
166a5296b05SJulian Elischer 	swap_pager_unswapped,	/* remove swap related to page		*/
167a5296b05SJulian Elischer 	swap_pager_strategy	/* pager strategy call			*/
168df8bae1dSRodney W. Grimes };
169df8bae1dSRodney W. Grimes 
1700b441832SPoul-Henning Kamp static struct buf *getchainbuf(struct bio *bp, struct vnode *vp, int flags);
171e4057dbdSPoul-Henning Kamp static void flushchainbuf(struct buf *nbp);
1720b441832SPoul-Henning Kamp static void waitchainbuf(struct bio *bp, int count, int done);
173e4057dbdSPoul-Henning Kamp 
1741c7c3c6aSMatthew Dillon /*
1751c7c3c6aSMatthew Dillon  * dmmax is in page-sized chunks with the new swap system.  It was
17664bcb9c8SMatthew Dillon  * dev-bsized chunks in the old.  dmmax is always a power of 2.
1771c7c3c6aSMatthew Dillon  *
1781c7c3c6aSMatthew Dillon  * swap_*() routines are externally accessible.  swp_*() routines are
1791c7c3c6aSMatthew Dillon  * internal.
1801c7c3c6aSMatthew Dillon  */
1811c7c3c6aSMatthew Dillon 
182f708ef1bSPoul-Henning Kamp int dmmax;
1831c7c3c6aSMatthew Dillon static int dmmax_mask;
18420d3034fSMatthew Dillon int nswap_lowat = 128;		/* in pages, swap_pager_almost_full warn */
18520d3034fSMatthew Dillon int nswap_hiwat = 512;		/* in pages, swap_pager_almost_full warn */
18626f9a767SRodney W. Grimes 
1871c7c3c6aSMatthew Dillon static __inline void	swp_sizecheck __P((void));
1881c7c3c6aSMatthew Dillon static void	swp_pager_sync_iodone __P((struct buf *bp));
1891c7c3c6aSMatthew Dillon static void	swp_pager_async_iodone __P((struct buf *bp));
19024a1cce3SDavid Greenman 
1911c7c3c6aSMatthew Dillon /*
1921c7c3c6aSMatthew Dillon  * Swap bitmap functions
1931c7c3c6aSMatthew Dillon  */
1941c7c3c6aSMatthew Dillon 
1951c7c3c6aSMatthew Dillon static __inline void	swp_pager_freeswapspace __P((daddr_t blk, int npages));
1961c7c3c6aSMatthew Dillon static __inline daddr_t	swp_pager_getswapspace __P((int npages));
1971c7c3c6aSMatthew Dillon 
1981c7c3c6aSMatthew Dillon /*
1991c7c3c6aSMatthew Dillon  * Metadata functions
2001c7c3c6aSMatthew Dillon  */
2011c7c3c6aSMatthew Dillon 
2024dcc5c2dSMatthew Dillon static void swp_pager_meta_build __P((vm_object_t, vm_pindex_t, daddr_t));
2034dcc5c2dSMatthew Dillon static void swp_pager_meta_free __P((vm_object_t, vm_pindex_t, daddr_t));
2041c7c3c6aSMatthew Dillon static void swp_pager_meta_free_all __P((vm_object_t));
2051c7c3c6aSMatthew Dillon static daddr_t swp_pager_meta_ctl __P((vm_object_t, vm_pindex_t, int));
2061c7c3c6aSMatthew Dillon 
2071c7c3c6aSMatthew Dillon /*
2081c7c3c6aSMatthew Dillon  * SWP_SIZECHECK() -	update swap_pager_full indication
2091c7c3c6aSMatthew Dillon  *
21020d3034fSMatthew Dillon  *	update the swap_pager_almost_full indication and warn when we are
21120d3034fSMatthew Dillon  *	about to run out of swap space, using lowat/hiwat hysteresis.
21220d3034fSMatthew Dillon  *
21320d3034fSMatthew Dillon  *	Clear swap_pager_full ( task killing ) indication when lowat is met.
2141c7c3c6aSMatthew Dillon  *
2151c7c3c6aSMatthew Dillon  *	No restrictions on call
2161c7c3c6aSMatthew Dillon  *	This routine may not block.
2171c7c3c6aSMatthew Dillon  *	This routine must be called at splvm()
2181c7c3c6aSMatthew Dillon  */
219de5f6a77SJohn Dyson 
220c1087c13SBruce Evans static __inline void
2211c7c3c6aSMatthew Dillon swp_sizecheck()
2220d94caffSDavid Greenman {
2231c7c3c6aSMatthew Dillon 	if (vm_swap_size < nswap_lowat) {
22420d3034fSMatthew Dillon 		if (swap_pager_almost_full == 0) {
2251af87c92SDavid Greenman 			printf("swap_pager: out of swap space\n");
22620d3034fSMatthew Dillon 			swap_pager_almost_full = 1;
2272b0d37a4SMatthew Dillon 		}
22820d3034fSMatthew Dillon 	} else {
22926f9a767SRodney W. Grimes 		swap_pager_full = 0;
23020d3034fSMatthew Dillon 		if (vm_swap_size > nswap_hiwat)
23120d3034fSMatthew Dillon 			swap_pager_almost_full = 0;
23226f9a767SRodney W. Grimes 	}
2331c7c3c6aSMatthew Dillon }
2341c7c3c6aSMatthew Dillon 
2351c7c3c6aSMatthew Dillon /*
2361c7c3c6aSMatthew Dillon  * SWAP_PAGER_INIT() -	initialize the swap pager!
2371c7c3c6aSMatthew Dillon  *
2381c7c3c6aSMatthew Dillon  *	Expected to be started from system init.  NOTE:  This code is run
2391c7c3c6aSMatthew Dillon  *	before much else so be careful what you depend on.  Most of the VM
2401c7c3c6aSMatthew Dillon  *	system has yet to be initialized at this point.
2411c7c3c6aSMatthew Dillon  */
24226f9a767SRodney W. Grimes 
243f5a12711SPoul-Henning Kamp static void
244df8bae1dSRodney W. Grimes swap_pager_init()
245df8bae1dSRodney W. Grimes {
2461c7c3c6aSMatthew Dillon 	/*
2471c7c3c6aSMatthew Dillon 	 * Initialize object lists
2481c7c3c6aSMatthew Dillon 	 */
2491c7c3c6aSMatthew Dillon 	int i;
2501c7c3c6aSMatthew Dillon 
2511c7c3c6aSMatthew Dillon 	for (i = 0; i < NOBJLISTS; ++i)
2521c7c3c6aSMatthew Dillon 		TAILQ_INIT(&swap_pager_object_list[i]);
25324a1cce3SDavid Greenman 	TAILQ_INIT(&swap_pager_un_object_list);
254df8bae1dSRodney W. Grimes 
255df8bae1dSRodney W. Grimes 	/*
2561c7c3c6aSMatthew Dillon 	 * Device Stripe, in PAGE_SIZE'd blocks
257df8bae1dSRodney W. Grimes 	 */
2581c7c3c6aSMatthew Dillon 
2591c7c3c6aSMatthew Dillon 	dmmax = SWB_NPAGES * 2;
2601c7c3c6aSMatthew Dillon 	dmmax_mask = ~(dmmax - 1);
2611c7c3c6aSMatthew Dillon }
26226f9a767SRodney W. Grimes 
263df8bae1dSRodney W. Grimes /*
2641c7c3c6aSMatthew Dillon  * SWAP_PAGER_SWAP_INIT() - swap pager initialization from pageout process
2651c7c3c6aSMatthew Dillon  *
2661c7c3c6aSMatthew Dillon  *	Expected to be started from pageout process once, prior to entering
2671c7c3c6aSMatthew Dillon  *	its main loop.
268df8bae1dSRodney W. Grimes  */
269df8bae1dSRodney W. Grimes 
27024a1cce3SDavid Greenman void
27124a1cce3SDavid Greenman swap_pager_swap_init()
272df8bae1dSRodney W. Grimes {
2731c7c3c6aSMatthew Dillon 	int n;
2740d94caffSDavid Greenman 
27526f9a767SRodney W. Grimes 	/*
2761c7c3c6aSMatthew Dillon 	 * Number of in-transit swap bp operations.  Don't
2771c7c3c6aSMatthew Dillon 	 * exhaust the pbufs completely.  Make sure we
2781c7c3c6aSMatthew Dillon 	 * initialize workable values (0 will work for hysteresis
2791c7c3c6aSMatthew Dillon 	 * but it isn't very efficient).
2801c7c3c6aSMatthew Dillon 	 *
281327f4e83SMatthew Dillon 	 * The nsw_cluster_max is constrained by the bp->b_pages[]
2821c7c3c6aSMatthew Dillon 	 * array (MAXPHYS/PAGE_SIZE) and our locally defined
2831c7c3c6aSMatthew Dillon 	 * MAX_PAGEOUT_CLUSTER.   Also be aware that swap ops are
2841c7c3c6aSMatthew Dillon 	 * constrained by the swap device interleave stripe size.
285327f4e83SMatthew Dillon 	 *
286327f4e83SMatthew Dillon 	 * Currently we hardwire nsw_wcount_async to 4.  This limit is
287327f4e83SMatthew Dillon 	 * designed to prevent other I/O from having high latencies due to
288327f4e83SMatthew Dillon 	 * our pageout I/O.  The value 4 works well for one or two active swap
289327f4e83SMatthew Dillon 	 * devices but is probably a little low if you have more.  Even so,
290327f4e83SMatthew Dillon 	 * a higher value would probably generate only a limited improvement
291327f4e83SMatthew Dillon 	 * with three or four active swap devices since the system does not
292327f4e83SMatthew Dillon 	 * typically have to pageout at extreme bandwidths.   We will want
293327f4e83SMatthew Dillon 	 * at least 2 per swap devices, and 4 is a pretty good value if you
294327f4e83SMatthew Dillon 	 * have one NFS swap device due to the command/ack latency over NFS.
295327f4e83SMatthew Dillon 	 * So it all works out pretty well.
29626f9a767SRodney W. Grimes 	 */
29724a1cce3SDavid Greenman 
298ad3cce20SMatthew Dillon 	nsw_cluster_max = min((MAXPHYS/PAGE_SIZE), MAX_PAGEOUT_CLUSTER);
299327f4e83SMatthew Dillon 
3001c7c3c6aSMatthew Dillon 	nsw_rcount = (nswbuf + 1) / 2;
301327f4e83SMatthew Dillon 	nsw_wcount_sync = (nswbuf + 3) / 4;
302327f4e83SMatthew Dillon 	nsw_wcount_async = 4;
303327f4e83SMatthew Dillon 	nsw_wcount_async_max = nsw_wcount_async;
30424a1cce3SDavid Greenman 
3051c7c3c6aSMatthew Dillon 	/*
3061c7c3c6aSMatthew Dillon 	 * Initialize our zone.  Right now I'm just guessing on the number
3071c7c3c6aSMatthew Dillon 	 * we need based on the number of pages in the system.  Each swblock
3081c7c3c6aSMatthew Dillon 	 * can hold 16 pages, so this is probably overkill.
3091c7c3c6aSMatthew Dillon 	 */
31024a1cce3SDavid Greenman 
3111c7c3c6aSMatthew Dillon 	n = cnt.v_page_count * 2;
31226f9a767SRodney W. Grimes 
3131c7c3c6aSMatthew Dillon 	swap_zone = zinit(
3141c7c3c6aSMatthew Dillon 	    "SWAPMETA",
3151c7c3c6aSMatthew Dillon 	    sizeof(struct swblock),
3161c7c3c6aSMatthew Dillon 	    n,
3171c7c3c6aSMatthew Dillon 	    ZONE_INTERRUPT,
3181c7c3c6aSMatthew Dillon 	    1
3191c7c3c6aSMatthew Dillon 	);
32024a1cce3SDavid Greenman 
3211c7c3c6aSMatthew Dillon 	/*
3221c7c3c6aSMatthew Dillon 	 * Initialize our meta-data hash table.  The swapper does not need to
3231c7c3c6aSMatthew Dillon 	 * be quite as efficient as the VM system, so we do not use an
3241c7c3c6aSMatthew Dillon 	 * oversized hash table.
3251c7c3c6aSMatthew Dillon 	 *
3261c7c3c6aSMatthew Dillon 	 * 	n: 		size of hash table, must be power of 2
3271c7c3c6aSMatthew Dillon 	 *	swhash_mask:	hash table index mask
3281c7c3c6aSMatthew Dillon 	 */
329df8bae1dSRodney W. Grimes 
3301c7c3c6aSMatthew Dillon 	for (n = 1; n < cnt.v_page_count / 4; n <<= 1)
3311c7c3c6aSMatthew Dillon 		;
3321c7c3c6aSMatthew Dillon 
3331c7c3c6aSMatthew Dillon 	swhash = malloc(sizeof(struct swblock *) * n, M_VMPGDATA, M_WAITOK);
3341c7c3c6aSMatthew Dillon 	bzero(swhash, sizeof(struct swblock *) * n);
3351c7c3c6aSMatthew Dillon 
3361c7c3c6aSMatthew Dillon 	swhash_mask = n - 1;
33724a1cce3SDavid Greenman }
33824a1cce3SDavid Greenman 
33924a1cce3SDavid Greenman /*
3401c7c3c6aSMatthew Dillon  * SWAP_PAGER_ALLOC() -	allocate a new OBJT_SWAP VM object and instantiate
3411c7c3c6aSMatthew Dillon  *			its metadata structures.
3421c7c3c6aSMatthew Dillon  *
3431c7c3c6aSMatthew Dillon  *	This routine is called from the mmap and fork code to create a new
3441c7c3c6aSMatthew Dillon  *	OBJT_SWAP object.  We do this by creating an OBJT_DEFAULT object
3451c7c3c6aSMatthew Dillon  *	and then converting it with swp_pager_meta_build().
3461c7c3c6aSMatthew Dillon  *
3471c7c3c6aSMatthew Dillon  *	This routine may block in vm_object_allocate() and create a named
3481c7c3c6aSMatthew Dillon  *	object lookup race, so we must interlock.   We must also run at
3491c7c3c6aSMatthew Dillon  *	splvm() for the object lookup to handle races with interrupts, but
3501c7c3c6aSMatthew Dillon  *	we do not have to maintain splvm() in between the lookup and the
3511c7c3c6aSMatthew Dillon  *	add because (I believe) it is not possible to attempt to create
3521c7c3c6aSMatthew Dillon  *	a new swap object w/handle when a default object with that handle
3531c7c3c6aSMatthew Dillon  *	already exists.
35424a1cce3SDavid Greenman  */
3551c7c3c6aSMatthew Dillon 
356f5a12711SPoul-Henning Kamp static vm_object_t
3576cde7a16SDavid Greenman swap_pager_alloc(void *handle, vm_ooffset_t size, vm_prot_t prot,
358b9dcd593SBruce Evans 		 vm_ooffset_t offset)
35924a1cce3SDavid Greenman {
36024a1cce3SDavid Greenman 	vm_object_t object;
36124a1cce3SDavid Greenman 
36224a1cce3SDavid Greenman 	if (handle) {
3631c7c3c6aSMatthew Dillon 		/*
3641c7c3c6aSMatthew Dillon 		 * Reference existing named region or allocate new one.  There
3651c7c3c6aSMatthew Dillon 		 * should not be a race here against swp_pager_meta_build()
3661c7c3c6aSMatthew Dillon 		 * as called from vm_page_remove() in regards to the lookup
3671c7c3c6aSMatthew Dillon 		 * of the handle.
3681c7c3c6aSMatthew Dillon 		 */
3691c7c3c6aSMatthew Dillon 
3701c7c3c6aSMatthew Dillon 		while (sw_alloc_interlock) {
3711c7c3c6aSMatthew Dillon 			sw_alloc_interlock = -1;
3721c7c3c6aSMatthew Dillon 			tsleep(&sw_alloc_interlock, PVM, "swpalc", 0);
3731c7c3c6aSMatthew Dillon 		}
3741c7c3c6aSMatthew Dillon 		sw_alloc_interlock = 1;
3751c7c3c6aSMatthew Dillon 
3761c7c3c6aSMatthew Dillon 		object = vm_pager_object_lookup(NOBJLIST(handle), handle);
3771c7c3c6aSMatthew Dillon 
37824a1cce3SDavid Greenman 		if (object != NULL) {
37924a1cce3SDavid Greenman 			vm_object_reference(object);
38024a1cce3SDavid Greenman 		} else {
3811c7c3c6aSMatthew Dillon 			object = vm_object_allocate(OBJT_DEFAULT,
3826cde7a16SDavid Greenman 				OFF_TO_IDX(offset + PAGE_MASK + size));
38324a1cce3SDavid Greenman 			object->handle = handle;
3841c7c3c6aSMatthew Dillon 
3854dcc5c2dSMatthew Dillon 			swp_pager_meta_build(object, 0, SWAPBLK_NONE);
38624a1cce3SDavid Greenman 		}
3871c7c3c6aSMatthew Dillon 
3881c7c3c6aSMatthew Dillon 		if (sw_alloc_interlock < 0)
3891c7c3c6aSMatthew Dillon 			wakeup(&sw_alloc_interlock);
3901c7c3c6aSMatthew Dillon 
3911c7c3c6aSMatthew Dillon 		sw_alloc_interlock = 0;
39224a1cce3SDavid Greenman 	} else {
3931c7c3c6aSMatthew Dillon 		object = vm_object_allocate(OBJT_DEFAULT,
3946cde7a16SDavid Greenman 			OFF_TO_IDX(offset + PAGE_MASK + size));
3951c7c3c6aSMatthew Dillon 
3964dcc5c2dSMatthew Dillon 		swp_pager_meta_build(object, 0, SWAPBLK_NONE);
39724a1cce3SDavid Greenman 	}
39824a1cce3SDavid Greenman 
39924a1cce3SDavid Greenman 	return (object);
400df8bae1dSRodney W. Grimes }
401df8bae1dSRodney W. Grimes 
40226f9a767SRodney W. Grimes /*
4031c7c3c6aSMatthew Dillon  * SWAP_PAGER_DEALLOC() -	remove swap metadata from object
4041c7c3c6aSMatthew Dillon  *
4051c7c3c6aSMatthew Dillon  *	The swap backing for the object is destroyed.  The code is
4061c7c3c6aSMatthew Dillon  *	designed such that we can reinstantiate it later, but this
4071c7c3c6aSMatthew Dillon  *	routine is typically called only when the entire object is
4081c7c3c6aSMatthew Dillon  *	about to be destroyed.
4091c7c3c6aSMatthew Dillon  *
4101c7c3c6aSMatthew Dillon  *	This routine may block, but no longer does.
4111c7c3c6aSMatthew Dillon  *
4121c7c3c6aSMatthew Dillon  *	The object must be locked or unreferenceable.
41326f9a767SRodney W. Grimes  */
41426f9a767SRodney W. Grimes 
415df8bae1dSRodney W. Grimes static void
4161c7c3c6aSMatthew Dillon swap_pager_dealloc(object)
4172a4895f4SDavid Greenman 	vm_object_t object;
41826f9a767SRodney W. Grimes {
4194dcc5c2dSMatthew Dillon 	int s;
4204dcc5c2dSMatthew Dillon 
42126f9a767SRodney W. Grimes 	/*
4221c7c3c6aSMatthew Dillon 	 * Remove from list right away so lookups will fail if we block for
4231c7c3c6aSMatthew Dillon 	 * pageout completion.
42426f9a767SRodney W. Grimes 	 */
425b44e4b7aSJohn Dyson 
4261c7c3c6aSMatthew Dillon 	if (object->handle == NULL) {
4271c7c3c6aSMatthew Dillon 		TAILQ_REMOVE(&swap_pager_un_object_list, object, pager_object_list);
42824ea4a96SDavid Greenman 	} else {
4291c7c3c6aSMatthew Dillon 		TAILQ_REMOVE(NOBJLIST(object->handle), object, pager_object_list);
43026f9a767SRodney W. Grimes 	}
4311c7c3c6aSMatthew Dillon 
4321c7c3c6aSMatthew Dillon 	vm_object_pip_wait(object, "swpdea");
4331c7c3c6aSMatthew Dillon 
4341c7c3c6aSMatthew Dillon 	/*
4351c7c3c6aSMatthew Dillon 	 * Free all remaining metadata.  We only bother to free it from
4361c7c3c6aSMatthew Dillon 	 * the swap meta data.  We do not attempt to free swapblk's still
4371c7c3c6aSMatthew Dillon 	 * associated with vm_page_t's for this object.  We do not care
4381c7c3c6aSMatthew Dillon 	 * if paging is still in progress on some objects.
4391c7c3c6aSMatthew Dillon 	 */
4404dcc5c2dSMatthew Dillon 	s = splvm();
4411c7c3c6aSMatthew Dillon 	swp_pager_meta_free_all(object);
4424dcc5c2dSMatthew Dillon 	splx(s);
4431c7c3c6aSMatthew Dillon }
4441c7c3c6aSMatthew Dillon 
4451c7c3c6aSMatthew Dillon /************************************************************************
4461c7c3c6aSMatthew Dillon  *			SWAP PAGER BITMAP ROUTINES			*
4471c7c3c6aSMatthew Dillon  ************************************************************************/
4481c7c3c6aSMatthew Dillon 
4491c7c3c6aSMatthew Dillon /*
4501c7c3c6aSMatthew Dillon  * SWP_PAGER_GETSWAPSPACE() -	allocate raw swap space
4511c7c3c6aSMatthew Dillon  *
4521c7c3c6aSMatthew Dillon  *	Allocate swap for the requested number of pages.  The starting
4531c7c3c6aSMatthew Dillon  *	swap block number (a page index) is returned or SWAPBLK_NONE
4541c7c3c6aSMatthew Dillon  *	if the allocation failed.
4551c7c3c6aSMatthew Dillon  *
4561c7c3c6aSMatthew Dillon  *	Also has the side effect of advising that somebody made a mistake
4571c7c3c6aSMatthew Dillon  *	when they configured swap and didn't configure enough.
4581c7c3c6aSMatthew Dillon  *
4591c7c3c6aSMatthew Dillon  *	Must be called at splvm() to avoid races with bitmap frees from
4601c7c3c6aSMatthew Dillon  *	vm_page_remove() aka swap_pager_page_removed().
4611c7c3c6aSMatthew Dillon  *
4621c7c3c6aSMatthew Dillon  *	This routine may not block
4631c7c3c6aSMatthew Dillon  *	This routine must be called at splvm().
4641c7c3c6aSMatthew Dillon  */
4651c7c3c6aSMatthew Dillon 
4661c7c3c6aSMatthew Dillon static __inline daddr_t
4671c7c3c6aSMatthew Dillon swp_pager_getswapspace(npages)
4681c7c3c6aSMatthew Dillon 	int npages;
4691c7c3c6aSMatthew Dillon {
4701c7c3c6aSMatthew Dillon 	daddr_t blk;
4711c7c3c6aSMatthew Dillon 
4721c7c3c6aSMatthew Dillon 	if ((blk = blist_alloc(swapblist, npages)) == SWAPBLK_NONE) {
4732b0d37a4SMatthew Dillon 		if (swap_pager_full != 2) {
4741c7c3c6aSMatthew Dillon 			printf("swap_pager_getswapspace: failed\n");
4752b0d37a4SMatthew Dillon 			swap_pager_full = 2;
47620d3034fSMatthew Dillon 			swap_pager_almost_full = 1;
4772b0d37a4SMatthew Dillon 		}
4781c7c3c6aSMatthew Dillon 	} else {
4791c7c3c6aSMatthew Dillon 		vm_swap_size -= npages;
4801c7c3c6aSMatthew Dillon 		swp_sizecheck();
4811c7c3c6aSMatthew Dillon 	}
4821c7c3c6aSMatthew Dillon 	return(blk);
48326f9a767SRodney W. Grimes }
48426f9a767SRodney W. Grimes 
48526f9a767SRodney W. Grimes /*
4861c7c3c6aSMatthew Dillon  * SWP_PAGER_FREESWAPSPACE() -	free raw swap space
4871c7c3c6aSMatthew Dillon  *
4881c7c3c6aSMatthew Dillon  *	This routine returns the specified swap blocks back to the bitmap.
4891c7c3c6aSMatthew Dillon  *
4901c7c3c6aSMatthew Dillon  *	Note:  This routine may not block (it could in the old swap code),
4911c7c3c6aSMatthew Dillon  *	and through the use of the new blist routines it does not block.
4921c7c3c6aSMatthew Dillon  *
4931c7c3c6aSMatthew Dillon  *	We must be called at splvm() to avoid races with bitmap frees from
4941c7c3c6aSMatthew Dillon  *	vm_page_remove() aka swap_pager_page_removed().
4951c7c3c6aSMatthew Dillon  *
4961c7c3c6aSMatthew Dillon  *	This routine may not block
4971c7c3c6aSMatthew Dillon  *	This routine must be called at splvm().
49826f9a767SRodney W. Grimes  */
4991c7c3c6aSMatthew Dillon 
5001c7c3c6aSMatthew Dillon static __inline void
5011c7c3c6aSMatthew Dillon swp_pager_freeswapspace(blk, npages)
5021c7c3c6aSMatthew Dillon 	daddr_t blk;
5031c7c3c6aSMatthew Dillon 	int npages;
5040d94caffSDavid Greenman {
5051c7c3c6aSMatthew Dillon 	blist_free(swapblist, blk, npages);
5061c7c3c6aSMatthew Dillon 	vm_swap_size += npages;
5071c7c3c6aSMatthew Dillon 	swp_sizecheck();
50826f9a767SRodney W. Grimes }
5091c7c3c6aSMatthew Dillon 
51026f9a767SRodney W. Grimes /*
5111c7c3c6aSMatthew Dillon  * SWAP_PAGER_FREESPACE() -	frees swap blocks associated with a page
5121c7c3c6aSMatthew Dillon  *				range within an object.
5131c7c3c6aSMatthew Dillon  *
5141c7c3c6aSMatthew Dillon  *	This is a globally accessible routine.
5151c7c3c6aSMatthew Dillon  *
5161c7c3c6aSMatthew Dillon  *	This routine removes swapblk assignments from swap metadata.
5171c7c3c6aSMatthew Dillon  *
5181c7c3c6aSMatthew Dillon  *	The external callers of this routine typically have already destroyed
5191c7c3c6aSMatthew Dillon  *	or renamed vm_page_t's associated with this range in the object so
5201c7c3c6aSMatthew Dillon  *	we should be ok.
5214dcc5c2dSMatthew Dillon  *
5224dcc5c2dSMatthew Dillon  *	This routine may be called at any spl.  We up our spl to splvm temporarily
5234dcc5c2dSMatthew Dillon  *	in order to perform the metadata removal.
52426f9a767SRodney W. Grimes  */
5251c7c3c6aSMatthew Dillon 
52626f9a767SRodney W. Grimes void
52724a1cce3SDavid Greenman swap_pager_freespace(object, start, size)
52824a1cce3SDavid Greenman 	vm_object_t object;
529a316d390SJohn Dyson 	vm_pindex_t start;
530a316d390SJohn Dyson 	vm_size_t size;
53126f9a767SRodney W. Grimes {
5324dcc5c2dSMatthew Dillon 	int s = splvm();
5331c7c3c6aSMatthew Dillon 	swp_pager_meta_free(object, start, size);
5344dcc5c2dSMatthew Dillon 	splx(s);
5354dcc5c2dSMatthew Dillon }
5364dcc5c2dSMatthew Dillon 
5374dcc5c2dSMatthew Dillon /*
5384dcc5c2dSMatthew Dillon  * SWAP_PAGER_RESERVE() - reserve swap blocks in object
5394dcc5c2dSMatthew Dillon  *
5404dcc5c2dSMatthew Dillon  *	Assigns swap blocks to the specified range within the object.  The
5414dcc5c2dSMatthew Dillon  *	swap blocks are not zerod.  Any previous swap assignment is destroyed.
5424dcc5c2dSMatthew Dillon  *
5434dcc5c2dSMatthew Dillon  *	Returns 0 on success, -1 on failure.
5444dcc5c2dSMatthew Dillon  */
5454dcc5c2dSMatthew Dillon 
5464dcc5c2dSMatthew Dillon int
5474dcc5c2dSMatthew Dillon swap_pager_reserve(vm_object_t object, vm_pindex_t start, vm_size_t size)
5484dcc5c2dSMatthew Dillon {
5494dcc5c2dSMatthew Dillon 	int s;
5504dcc5c2dSMatthew Dillon 	int n = 0;
5514dcc5c2dSMatthew Dillon 	daddr_t blk = SWAPBLK_NONE;
5524dcc5c2dSMatthew Dillon 	vm_pindex_t beg = start;	/* save start index */
5534dcc5c2dSMatthew Dillon 
5544dcc5c2dSMatthew Dillon 	s = splvm();
5554dcc5c2dSMatthew Dillon 	while (size) {
5564dcc5c2dSMatthew Dillon 		if (n == 0) {
5574dcc5c2dSMatthew Dillon 			n = BLIST_MAX_ALLOC;
5584dcc5c2dSMatthew Dillon 			while ((blk = swp_pager_getswapspace(n)) == SWAPBLK_NONE) {
5594dcc5c2dSMatthew Dillon 				n >>= 1;
5604dcc5c2dSMatthew Dillon 				if (n == 0) {
5614dcc5c2dSMatthew Dillon 					swp_pager_meta_free(object, beg, start - beg);
5624dcc5c2dSMatthew Dillon 					splx(s);
5634dcc5c2dSMatthew Dillon 					return(-1);
5644dcc5c2dSMatthew Dillon 				}
5654dcc5c2dSMatthew Dillon 			}
5664dcc5c2dSMatthew Dillon 		}
5674dcc5c2dSMatthew Dillon 		swp_pager_meta_build(object, start, blk);
5684dcc5c2dSMatthew Dillon 		--size;
5694dcc5c2dSMatthew Dillon 		++start;
5704dcc5c2dSMatthew Dillon 		++blk;
5714dcc5c2dSMatthew Dillon 		--n;
5724dcc5c2dSMatthew Dillon 	}
5734dcc5c2dSMatthew Dillon 	swp_pager_meta_free(object, start, n);
5744dcc5c2dSMatthew Dillon 	splx(s);
5754dcc5c2dSMatthew Dillon 	return(0);
57626f9a767SRodney W. Grimes }
57726f9a767SRodney W. Grimes 
5780a47b48bSJohn Dyson /*
5791c7c3c6aSMatthew Dillon  * SWAP_PAGER_COPY() -  copy blocks from source pager to destination pager
5801c7c3c6aSMatthew Dillon  *			and destroy the source.
5811c7c3c6aSMatthew Dillon  *
5821c7c3c6aSMatthew Dillon  *	Copy any valid swapblks from the source to the destination.  In
5831c7c3c6aSMatthew Dillon  *	cases where both the source and destination have a valid swapblk,
5841c7c3c6aSMatthew Dillon  *	we keep the destination's.
5851c7c3c6aSMatthew Dillon  *
5861c7c3c6aSMatthew Dillon  *	This routine is allowed to block.  It may block allocating metadata
5871c7c3c6aSMatthew Dillon  *	indirectly through swp_pager_meta_build() or if paging is still in
5881c7c3c6aSMatthew Dillon  *	progress on the source.
5891c7c3c6aSMatthew Dillon  *
5904dcc5c2dSMatthew Dillon  *	This routine can be called at any spl
5914dcc5c2dSMatthew Dillon  *
5921c7c3c6aSMatthew Dillon  *	XXX vm_page_collapse() kinda expects us not to block because we
5931c7c3c6aSMatthew Dillon  *	supposedly do not need to allocate memory, but for the moment we
5941c7c3c6aSMatthew Dillon  *	*may* have to get a little memory from the zone allocator, but
5951c7c3c6aSMatthew Dillon  *	it is taken from the interrupt memory.  We should be ok.
5961c7c3c6aSMatthew Dillon  *
5971c7c3c6aSMatthew Dillon  *	The source object contains no vm_page_t's (which is just as well)
5981c7c3c6aSMatthew Dillon  *
5991c7c3c6aSMatthew Dillon  *	The source object is of type OBJT_SWAP.
6001c7c3c6aSMatthew Dillon  *
6014dcc5c2dSMatthew Dillon  *	The source and destination objects must be locked or
6024dcc5c2dSMatthew Dillon  *	inaccessible (XXX are they ?)
60326f9a767SRodney W. Grimes  */
60426f9a767SRodney W. Grimes 
60526f9a767SRodney W. Grimes void
6061c7c3c6aSMatthew Dillon swap_pager_copy(srcobject, dstobject, offset, destroysource)
60724a1cce3SDavid Greenman 	vm_object_t srcobject;
60824a1cce3SDavid Greenman 	vm_object_t dstobject;
609a316d390SJohn Dyson 	vm_pindex_t offset;
610c0877f10SJohn Dyson 	int destroysource;
61126f9a767SRodney W. Grimes {
612a316d390SJohn Dyson 	vm_pindex_t i;
6134dcc5c2dSMatthew Dillon 	int s;
6144dcc5c2dSMatthew Dillon 
6154dcc5c2dSMatthew Dillon 	s = splvm();
61626f9a767SRodney W. Grimes 
61726f9a767SRodney W. Grimes 	/*
6181c7c3c6aSMatthew Dillon 	 * If destroysource is set, we remove the source object from the
6191c7c3c6aSMatthew Dillon 	 * swap_pager internal queue now.
62026f9a767SRodney W. Grimes 	 */
6211c7c3c6aSMatthew Dillon 
622cbd8ec09SJohn Dyson 	if (destroysource) {
62324a1cce3SDavid Greenman 		if (srcobject->handle == NULL) {
6241c7c3c6aSMatthew Dillon 			TAILQ_REMOVE(
6251c7c3c6aSMatthew Dillon 			    &swap_pager_un_object_list,
6261c7c3c6aSMatthew Dillon 			    srcobject,
6271c7c3c6aSMatthew Dillon 			    pager_object_list
6281c7c3c6aSMatthew Dillon 			);
62926f9a767SRodney W. Grimes 		} else {
6301c7c3c6aSMatthew Dillon 			TAILQ_REMOVE(
6311c7c3c6aSMatthew Dillon 			    NOBJLIST(srcobject->handle),
6321c7c3c6aSMatthew Dillon 			    srcobject,
6331c7c3c6aSMatthew Dillon 			    pager_object_list
6341c7c3c6aSMatthew Dillon 			);
63526f9a767SRodney W. Grimes 		}
636cbd8ec09SJohn Dyson 	}
63726f9a767SRodney W. Grimes 
6381c7c3c6aSMatthew Dillon 	/*
6391c7c3c6aSMatthew Dillon 	 * transfer source to destination.
6401c7c3c6aSMatthew Dillon 	 */
6411c7c3c6aSMatthew Dillon 
6421c7c3c6aSMatthew Dillon 	for (i = 0; i < dstobject->size; ++i) {
6431c7c3c6aSMatthew Dillon 		daddr_t dstaddr;
6441c7c3c6aSMatthew Dillon 
6451c7c3c6aSMatthew Dillon 		/*
6461c7c3c6aSMatthew Dillon 		 * Locate (without changing) the swapblk on the destination,
6471c7c3c6aSMatthew Dillon 		 * unless it is invalid in which case free it silently, or
6481c7c3c6aSMatthew Dillon 		 * if the destination is a resident page, in which case the
6491c7c3c6aSMatthew Dillon 		 * source is thrown away.
6501c7c3c6aSMatthew Dillon 		 */
6511c7c3c6aSMatthew Dillon 
6521c7c3c6aSMatthew Dillon 		dstaddr = swp_pager_meta_ctl(dstobject, i, 0);
6531c7c3c6aSMatthew Dillon 
6541c7c3c6aSMatthew Dillon 		if (dstaddr == SWAPBLK_NONE) {
6551c7c3c6aSMatthew Dillon 			/*
6561c7c3c6aSMatthew Dillon 			 * Destination has no swapblk and is not resident,
6571c7c3c6aSMatthew Dillon 			 * copy source.
6581c7c3c6aSMatthew Dillon 			 */
6591c7c3c6aSMatthew Dillon 			daddr_t srcaddr;
6601c7c3c6aSMatthew Dillon 
6611c7c3c6aSMatthew Dillon 			srcaddr = swp_pager_meta_ctl(
6621c7c3c6aSMatthew Dillon 			    srcobject,
6631c7c3c6aSMatthew Dillon 			    i + offset,
6641c7c3c6aSMatthew Dillon 			    SWM_POP
6651c7c3c6aSMatthew Dillon 			);
6661c7c3c6aSMatthew Dillon 
6671c7c3c6aSMatthew Dillon 			if (srcaddr != SWAPBLK_NONE)
6684dcc5c2dSMatthew Dillon 				swp_pager_meta_build(dstobject, i, srcaddr);
6691c7c3c6aSMatthew Dillon 		} else {
6701c7c3c6aSMatthew Dillon 			/*
6711c7c3c6aSMatthew Dillon 			 * Destination has valid swapblk or it is represented
6721c7c3c6aSMatthew Dillon 			 * by a resident page.  We destroy the sourceblock.
6731c7c3c6aSMatthew Dillon 			 */
6741c7c3c6aSMatthew Dillon 
6751c7c3c6aSMatthew Dillon 			swp_pager_meta_ctl(srcobject, i + offset, SWM_FREE);
6761c7c3c6aSMatthew Dillon 		}
67726f9a767SRodney W. Grimes 	}
67826f9a767SRodney W. Grimes 
67926f9a767SRodney W. Grimes 	/*
6801c7c3c6aSMatthew Dillon 	 * Free left over swap blocks in source.
6811c7c3c6aSMatthew Dillon 	 *
6821c7c3c6aSMatthew Dillon 	 * We have to revert the type to OBJT_DEFAULT so we do not accidently
6831c7c3c6aSMatthew Dillon 	 * double-remove the object from the swap queues.
68426f9a767SRodney W. Grimes 	 */
68526f9a767SRodney W. Grimes 
686c0877f10SJohn Dyson 	if (destroysource) {
6871c7c3c6aSMatthew Dillon 		swp_pager_meta_free_all(srcobject);
6881c7c3c6aSMatthew Dillon 		/*
6891c7c3c6aSMatthew Dillon 		 * Reverting the type is not necessary, the caller is going
6901c7c3c6aSMatthew Dillon 		 * to destroy srcobject directly, but I'm doing it here
691956f3135SPhilippe Charnier 		 * for consistency since we've removed the object from its
6921c7c3c6aSMatthew Dillon 		 * queues.
6931c7c3c6aSMatthew Dillon 		 */
6941c7c3c6aSMatthew Dillon 		srcobject->type = OBJT_DEFAULT;
695c0877f10SJohn Dyson 	}
6964dcc5c2dSMatthew Dillon 	splx(s);
69726f9a767SRodney W. Grimes }
69826f9a767SRodney W. Grimes 
699df8bae1dSRodney W. Grimes /*
7001c7c3c6aSMatthew Dillon  * SWAP_PAGER_HASPAGE() -	determine if we have good backing store for
7011c7c3c6aSMatthew Dillon  *				the requested page.
7021c7c3c6aSMatthew Dillon  *
7031c7c3c6aSMatthew Dillon  *	We determine whether good backing store exists for the requested
7041c7c3c6aSMatthew Dillon  *	page and return TRUE if it does, FALSE if it doesn't.
7051c7c3c6aSMatthew Dillon  *
7061c7c3c6aSMatthew Dillon  *	If TRUE, we also try to determine how much valid, contiguous backing
7071c7c3c6aSMatthew Dillon  *	store exists before and after the requested page within a reasonable
7081c7c3c6aSMatthew Dillon  *	distance.  We do not try to restrict it to the swap device stripe
7091c7c3c6aSMatthew Dillon  *	(that is handled in getpages/putpages).  It probably isn't worth
7101c7c3c6aSMatthew Dillon  *	doing here.
711df8bae1dSRodney W. Grimes  */
71226f9a767SRodney W. Grimes 
7131c7c3c6aSMatthew Dillon boolean_t
714a316d390SJohn Dyson swap_pager_haspage(object, pindex, before, after)
71524a1cce3SDavid Greenman 	vm_object_t object;
716a316d390SJohn Dyson 	vm_pindex_t pindex;
71724a1cce3SDavid Greenman 	int *before;
71824a1cce3SDavid Greenman 	int *after;
71926f9a767SRodney W. Grimes {
7201c7c3c6aSMatthew Dillon 	daddr_t blk0;
72125db2c54SMatthew Dillon 	int s;
72226f9a767SRodney W. Grimes 
7231c7c3c6aSMatthew Dillon 	/*
7241c7c3c6aSMatthew Dillon 	 * do we have good backing store at the requested index ?
7251c7c3c6aSMatthew Dillon 	 */
7261c7c3c6aSMatthew Dillon 
72725db2c54SMatthew Dillon 	s = splvm();
7281c7c3c6aSMatthew Dillon 	blk0 = swp_pager_meta_ctl(object, pindex, 0);
7291c7c3c6aSMatthew Dillon 
7304dcc5c2dSMatthew Dillon 	if (blk0 == SWAPBLK_NONE) {
73125db2c54SMatthew Dillon 		splx(s);
7321c7c3c6aSMatthew Dillon 		if (before)
73324a1cce3SDavid Greenman 			*before = 0;
7341c7c3c6aSMatthew Dillon 		if (after)
73524a1cce3SDavid Greenman 			*after = 0;
73626f9a767SRodney W. Grimes 		return (FALSE);
73726f9a767SRodney W. Grimes 	}
73826f9a767SRodney W. Grimes 
73926f9a767SRodney W. Grimes 	/*
7401c7c3c6aSMatthew Dillon 	 * find backwards-looking contiguous good backing store
741e47ed70bSJohn Dyson 	 */
742e47ed70bSJohn Dyson 
7431c7c3c6aSMatthew Dillon 	if (before != NULL) {
74426f9a767SRodney W. Grimes 		int i;
7450d94caffSDavid Greenman 
7461c7c3c6aSMatthew Dillon 		for (i = 1; i < (SWB_NPAGES/2); ++i) {
7471c7c3c6aSMatthew Dillon 			daddr_t blk;
7481c7c3c6aSMatthew Dillon 
7491c7c3c6aSMatthew Dillon 			if (i > pindex)
7501c7c3c6aSMatthew Dillon 				break;
7511c7c3c6aSMatthew Dillon 			blk = swp_pager_meta_ctl(object, pindex - i, 0);
7521c7c3c6aSMatthew Dillon 			if (blk != blk0 - i)
7531c7c3c6aSMatthew Dillon 				break;
754ffc82b0aSJohn Dyson 		}
7551c7c3c6aSMatthew Dillon 		*before = (i - 1);
75626f9a767SRodney W. Grimes 	}
75726f9a767SRodney W. Grimes 
75826f9a767SRodney W. Grimes 	/*
7591c7c3c6aSMatthew Dillon 	 * find forward-looking contiguous good backing store
76026f9a767SRodney W. Grimes 	 */
7611c7c3c6aSMatthew Dillon 
7621c7c3c6aSMatthew Dillon 	if (after != NULL) {
7631c7c3c6aSMatthew Dillon 		int i;
7641c7c3c6aSMatthew Dillon 
7651c7c3c6aSMatthew Dillon 		for (i = 1; i < (SWB_NPAGES/2); ++i) {
7661c7c3c6aSMatthew Dillon 			daddr_t blk;
7671c7c3c6aSMatthew Dillon 
7681c7c3c6aSMatthew Dillon 			blk = swp_pager_meta_ctl(object, pindex + i, 0);
7691c7c3c6aSMatthew Dillon 			if (blk != blk0 + i)
7701c7c3c6aSMatthew Dillon 				break;
77126f9a767SRodney W. Grimes 		}
7721c7c3c6aSMatthew Dillon 		*after = (i - 1);
7731c7c3c6aSMatthew Dillon 	}
77425db2c54SMatthew Dillon 	splx(s);
7751c7c3c6aSMatthew Dillon 	return (TRUE);
7761c7c3c6aSMatthew Dillon }
7771c7c3c6aSMatthew Dillon 
7781c7c3c6aSMatthew Dillon /*
7791c7c3c6aSMatthew Dillon  * SWAP_PAGER_PAGE_UNSWAPPED() - remove swap backing store related to page
7801c7c3c6aSMatthew Dillon  *
7811c7c3c6aSMatthew Dillon  *	This removes any associated swap backing store, whether valid or
7821c7c3c6aSMatthew Dillon  *	not, from the page.
7831c7c3c6aSMatthew Dillon  *
7841c7c3c6aSMatthew Dillon  *	This routine is typically called when a page is made dirty, at
7851c7c3c6aSMatthew Dillon  *	which point any associated swap can be freed.  MADV_FREE also
7861c7c3c6aSMatthew Dillon  *	calls us in a special-case situation
7871c7c3c6aSMatthew Dillon  *
7881c7c3c6aSMatthew Dillon  *	NOTE!!!  If the page is clean and the swap was valid, the caller
7891c7c3c6aSMatthew Dillon  *	should make the page dirty before calling this routine.  This routine
7901c7c3c6aSMatthew Dillon  *	does NOT change the m->dirty status of the page.  Also: MADV_FREE
7911c7c3c6aSMatthew Dillon  *	depends on it.
7921c7c3c6aSMatthew Dillon  *
7931c7c3c6aSMatthew Dillon  *	This routine may not block
7944dcc5c2dSMatthew Dillon  *	This routine must be called at splvm()
7951c7c3c6aSMatthew Dillon  */
7961c7c3c6aSMatthew Dillon 
7971c7c3c6aSMatthew Dillon static void
7981c7c3c6aSMatthew Dillon swap_pager_unswapped(m)
7991c7c3c6aSMatthew Dillon 	vm_page_t m;
8001c7c3c6aSMatthew Dillon {
8011c7c3c6aSMatthew Dillon 	swp_pager_meta_ctl(m->object, m->pindex, SWM_FREE);
8021c7c3c6aSMatthew Dillon }
8031c7c3c6aSMatthew Dillon 
8041c7c3c6aSMatthew Dillon /*
805a5296b05SJulian Elischer  * SWAP_PAGER_STRATEGY() - read, write, free blocks
806a5296b05SJulian Elischer  *
807a5296b05SJulian Elischer  *	This implements the vm_pager_strategy() interface to swap and allows
808a5296b05SJulian Elischer  *	other parts of the system to directly access swap as backing store
809a5296b05SJulian Elischer  *	through vm_objects of type OBJT_SWAP.  This is intended to be a
810a5296b05SJulian Elischer  *	cacheless interface ( i.e. caching occurs at higher levels ).
811a5296b05SJulian Elischer  *	Therefore we do not maintain any resident pages.  All I/O goes
8124dcc5c2dSMatthew Dillon  *	directly to and from the swap device.
813a5296b05SJulian Elischer  *
814a5296b05SJulian Elischer  *	Note that b_blkno is scaled for PAGE_SIZE
815a5296b05SJulian Elischer  *
816a5296b05SJulian Elischer  *	We currently attempt to run I/O synchronously or asynchronously as
817a5296b05SJulian Elischer  *	the caller requests.  This isn't perfect because we loose error
818a5296b05SJulian Elischer  *	sequencing when we run multiple ops in parallel to satisfy a request.
819a5296b05SJulian Elischer  *	But this is swap, so we let it all hang out.
820a5296b05SJulian Elischer  */
821a5296b05SJulian Elischer 
822a5296b05SJulian Elischer static void
8230b441832SPoul-Henning Kamp swap_pager_strategy(vm_object_t object, struct bio *bp)
824a5296b05SJulian Elischer {
825a5296b05SJulian Elischer 	vm_pindex_t start;
826a5296b05SJulian Elischer 	int count;
8274dcc5c2dSMatthew Dillon 	int s;
828a5296b05SJulian Elischer 	char *data;
829a5296b05SJulian Elischer 	struct buf *nbp = NULL;
830a5296b05SJulian Elischer 
8310b441832SPoul-Henning Kamp 	/* XXX: KASSERT instead ? */
8320b441832SPoul-Henning Kamp 	if (bp->bio_bcount & PAGE_MASK) {
8330b441832SPoul-Henning Kamp 		bp->bio_error = EINVAL;
8340b441832SPoul-Henning Kamp 		bp->bio_flags |= BIO_ERROR;
8350b441832SPoul-Henning Kamp 		biodone(bp);
8360b441832SPoul-Henning Kamp 		printf("swap_pager_strategy: bp %p blk %d size %d, not page bounded\n", bp, (int)bp->bio_pblkno, (int)bp->bio_bcount);
837a5296b05SJulian Elischer 		return;
838a5296b05SJulian Elischer 	}
839a5296b05SJulian Elischer 
840a5296b05SJulian Elischer 	/*
841a5296b05SJulian Elischer 	 * Clear error indication, initialize page index, count, data pointer.
842a5296b05SJulian Elischer 	 */
843a5296b05SJulian Elischer 
8440b441832SPoul-Henning Kamp 	bp->bio_error = 0;
8450b441832SPoul-Henning Kamp 	bp->bio_flags &= ~BIO_ERROR;
8460b441832SPoul-Henning Kamp 	bp->bio_resid = bp->bio_bcount;
847a5296b05SJulian Elischer 
8480b441832SPoul-Henning Kamp 	start = bp->bio_pblkno;
8490b441832SPoul-Henning Kamp 	count = howmany(bp->bio_bcount, PAGE_SIZE);
8500b441832SPoul-Henning Kamp 	data = bp->bio_data;
851a5296b05SJulian Elischer 
8524dcc5c2dSMatthew Dillon 	s = splvm();
8534dcc5c2dSMatthew Dillon 
854a5296b05SJulian Elischer 	/*
85521144e3bSPoul-Henning Kamp 	 * Deal with BIO_DELETE
856a5296b05SJulian Elischer 	 */
857a5296b05SJulian Elischer 
8580b441832SPoul-Henning Kamp 	if (bp->bio_cmd == BIO_DELETE) {
859a5296b05SJulian Elischer 		/*
860a5296b05SJulian Elischer 		 * FREE PAGE(s) - destroy underlying swap that is no longer
861a5296b05SJulian Elischer 		 *		  needed.
862a5296b05SJulian Elischer 		 */
863a5296b05SJulian Elischer 		swp_pager_meta_free(object, start, count);
864a5296b05SJulian Elischer 		splx(s);
8650b441832SPoul-Henning Kamp 		bp->bio_resid = 0;
8660b441832SPoul-Henning Kamp 		biodone(bp);
8674dcc5c2dSMatthew Dillon 		return;
8684dcc5c2dSMatthew Dillon 	}
8694dcc5c2dSMatthew Dillon 
870a5296b05SJulian Elischer 	/*
8714dcc5c2dSMatthew Dillon 	 * Execute read or write
872a5296b05SJulian Elischer 	 */
873a5296b05SJulian Elischer 
874a5296b05SJulian Elischer 	while (count > 0) {
875a5296b05SJulian Elischer 		daddr_t blk;
876a5296b05SJulian Elischer 
877a5296b05SJulian Elischer 		/*
8784dcc5c2dSMatthew Dillon 		 * Obtain block.  If block not found and writing, allocate a
8794dcc5c2dSMatthew Dillon 		 * new block and build it into the object.
8804dcc5c2dSMatthew Dillon 		 */
8814dcc5c2dSMatthew Dillon 
8824dcc5c2dSMatthew Dillon 		blk = swp_pager_meta_ctl(object, start, 0);
8830b441832SPoul-Henning Kamp 		if ((blk == SWAPBLK_NONE) && (bp->bio_cmd == BIO_WRITE)) {
8844dcc5c2dSMatthew Dillon 			blk = swp_pager_getswapspace(1);
8854dcc5c2dSMatthew Dillon 			if (blk == SWAPBLK_NONE) {
8860b441832SPoul-Henning Kamp 				bp->bio_error = ENOMEM;
8870b441832SPoul-Henning Kamp 				bp->bio_flags |= BIO_ERROR;
8884dcc5c2dSMatthew Dillon 				break;
8894dcc5c2dSMatthew Dillon 			}
8904dcc5c2dSMatthew Dillon 			swp_pager_meta_build(object, start, blk);
8914dcc5c2dSMatthew Dillon 		}
8924dcc5c2dSMatthew Dillon 
8934dcc5c2dSMatthew Dillon 		/*
8944dcc5c2dSMatthew Dillon 		 * Do we have to flush our current collection?  Yes if:
8954dcc5c2dSMatthew Dillon 		 *
8964dcc5c2dSMatthew Dillon 		 *	- no swap block at this index
8974dcc5c2dSMatthew Dillon 		 *	- swap block is not contiguous
8984dcc5c2dSMatthew Dillon 		 *	- we cross a physical disk boundry in the
8994dcc5c2dSMatthew Dillon 		 *	  stripe.
900a5296b05SJulian Elischer 		 */
901a5296b05SJulian Elischer 
902a5296b05SJulian Elischer 		if (
9034dcc5c2dSMatthew Dillon 		    nbp && (nbp->b_blkno + btoc(nbp->b_bcount) != blk ||
9044dcc5c2dSMatthew Dillon 		     ((nbp->b_blkno ^ blk) & dmmax_mask)
905a5296b05SJulian Elischer 		    )
906a5296b05SJulian Elischer 		) {
9074dcc5c2dSMatthew Dillon 			splx(s);
9080b441832SPoul-Henning Kamp 			if (bp->bio_cmd == BIO_READ) {
909a5296b05SJulian Elischer 				++cnt.v_swapin;
910a5296b05SJulian Elischer 				cnt.v_swappgsin += btoc(nbp->b_bcount);
9114dcc5c2dSMatthew Dillon 			} else {
9124dcc5c2dSMatthew Dillon 				++cnt.v_swapout;
9134dcc5c2dSMatthew Dillon 				cnt.v_swappgsout += btoc(nbp->b_bcount);
9144dcc5c2dSMatthew Dillon 				nbp->b_dirtyend = nbp->b_bcount;
9154dcc5c2dSMatthew Dillon 			}
916a5296b05SJulian Elischer 			flushchainbuf(nbp);
9174dcc5c2dSMatthew Dillon 			s = splvm();
918a5296b05SJulian Elischer 			nbp = NULL;
919a5296b05SJulian Elischer 		}
920a5296b05SJulian Elischer 
921a5296b05SJulian Elischer 		/*
9224dcc5c2dSMatthew Dillon 		 * Add new swapblk to nbp, instantiating nbp if necessary.
9234dcc5c2dSMatthew Dillon 		 * Zero-fill reads are able to take a shortcut.
924a5296b05SJulian Elischer 		 */
9254dcc5c2dSMatthew Dillon 
9264dcc5c2dSMatthew Dillon 		if (blk == SWAPBLK_NONE) {
9274dcc5c2dSMatthew Dillon 			/*
9284dcc5c2dSMatthew Dillon 			 * We can only get here if we are reading.  Since
9294dcc5c2dSMatthew Dillon 			 * we are at splvm() we can safely modify b_resid,
9304dcc5c2dSMatthew Dillon 			 * even if chain ops are in progress.
9314dcc5c2dSMatthew Dillon 			 */
932a5296b05SJulian Elischer 			bzero(data, PAGE_SIZE);
9330b441832SPoul-Henning Kamp 			bp->bio_resid -= PAGE_SIZE;
934a5296b05SJulian Elischer 		} else {
935a5296b05SJulian Elischer 			if (nbp == NULL) {
9360b441832SPoul-Henning Kamp 				nbp = getchainbuf(bp, swapdev_vp, B_ASYNC);
937a5296b05SJulian Elischer 				nbp->b_blkno = blk;
9384dcc5c2dSMatthew Dillon 				nbp->b_bcount = 0;
939a5296b05SJulian Elischer 				nbp->b_data = data;
940a5296b05SJulian Elischer 			}
941a5296b05SJulian Elischer 			nbp->b_bcount += PAGE_SIZE;
942a5296b05SJulian Elischer 		}
943a5296b05SJulian Elischer 		--count;
944a5296b05SJulian Elischer 		++start;
945a5296b05SJulian Elischer 		data += PAGE_SIZE;
946a5296b05SJulian Elischer 	}
947a5296b05SJulian Elischer 
948a5296b05SJulian Elischer 	/*
9494dcc5c2dSMatthew Dillon 	 *  Flush out last buffer
950a5296b05SJulian Elischer 	 */
951a5296b05SJulian Elischer 
952a5296b05SJulian Elischer 	splx(s);
953a5296b05SJulian Elischer 
954a5296b05SJulian Elischer 	if (nbp) {
95521144e3bSPoul-Henning Kamp 		if (nbp->b_iocmd == BIO_READ) {
956a5296b05SJulian Elischer 			++cnt.v_swapin;
957a5296b05SJulian Elischer 			cnt.v_swappgsin += btoc(nbp->b_bcount);
958a5296b05SJulian Elischer 		} else {
959a5296b05SJulian Elischer 			++cnt.v_swapout;
960a5296b05SJulian Elischer 			cnt.v_swappgsout += btoc(nbp->b_bcount);
9614dcc5c2dSMatthew Dillon 			nbp->b_dirtyend = nbp->b_bcount;
962a5296b05SJulian Elischer 		}
963a5296b05SJulian Elischer 		flushchainbuf(nbp);
9644dcc5c2dSMatthew Dillon 		/* nbp = NULL; */
965a5296b05SJulian Elischer 	}
9664dcc5c2dSMatthew Dillon 
9674dcc5c2dSMatthew Dillon 	/*
9684dcc5c2dSMatthew Dillon 	 * Wait for completion.
9694dcc5c2dSMatthew Dillon 	 */
9704dcc5c2dSMatthew Dillon 
971a5296b05SJulian Elischer 	waitchainbuf(bp, 0, 1);
972a5296b05SJulian Elischer }
973a5296b05SJulian Elischer 
974a5296b05SJulian Elischer /*
9751c7c3c6aSMatthew Dillon  * SWAP_PAGER_GETPAGES() - bring pages in from swap
9761c7c3c6aSMatthew Dillon  *
9771c7c3c6aSMatthew Dillon  *	Attempt to retrieve (m, count) pages from backing store, but make
9781c7c3c6aSMatthew Dillon  *	sure we retrieve at least m[reqpage].  We try to load in as large
9791c7c3c6aSMatthew Dillon  *	a chunk surrounding m[reqpage] as is contiguous in swap and which
9801c7c3c6aSMatthew Dillon  *	belongs to the same object.
9811c7c3c6aSMatthew Dillon  *
9821c7c3c6aSMatthew Dillon  *	The code is designed for asynchronous operation and
9831c7c3c6aSMatthew Dillon  *	immediate-notification of 'reqpage' but tends not to be
9841c7c3c6aSMatthew Dillon  *	used that way.  Please do not optimize-out this algorithmic
9851c7c3c6aSMatthew Dillon  *	feature, I intend to improve on it in the future.
9861c7c3c6aSMatthew Dillon  *
9871c7c3c6aSMatthew Dillon  *	The parent has a single vm_object_pip_add() reference prior to
9881c7c3c6aSMatthew Dillon  *	calling us and we should return with the same.
9891c7c3c6aSMatthew Dillon  *
9901c7c3c6aSMatthew Dillon  *	The parent has BUSY'd the pages.  We should return with 'm'
9911c7c3c6aSMatthew Dillon  *	left busy, but the others adjusted.
9921c7c3c6aSMatthew Dillon  */
99326f9a767SRodney W. Grimes 
994f708ef1bSPoul-Henning Kamp static int
99524a1cce3SDavid Greenman swap_pager_getpages(object, m, count, reqpage)
99624a1cce3SDavid Greenman 	vm_object_t object;
99726f9a767SRodney W. Grimes 	vm_page_t *m;
99826f9a767SRodney W. Grimes 	int count, reqpage;
999df8bae1dSRodney W. Grimes {
10001c7c3c6aSMatthew Dillon 	struct buf *bp;
10011c7c3c6aSMatthew Dillon 	vm_page_t mreq;
10021c7c3c6aSMatthew Dillon 	int s;
100326f9a767SRodney W. Grimes 	int i;
100426f9a767SRodney W. Grimes 	int j;
10051c7c3c6aSMatthew Dillon 	daddr_t blk;
10061c7c3c6aSMatthew Dillon 	vm_offset_t kva;
10071c7c3c6aSMatthew Dillon 	vm_pindex_t lastpindex;
10080d94caffSDavid Greenman 
10091c7c3c6aSMatthew Dillon 	mreq = m[reqpage];
10101c7c3c6aSMatthew Dillon 
10111c7c3c6aSMatthew Dillon 	if (mreq->object != object) {
10121c7c3c6aSMatthew Dillon 		panic("swap_pager_getpages: object mismatch %p/%p",
10131c7c3c6aSMatthew Dillon 		    object,
10141c7c3c6aSMatthew Dillon 		    mreq->object
10151c7c3c6aSMatthew Dillon 		);
101626f9a767SRodney W. Grimes 	}
10171c7c3c6aSMatthew Dillon 	/*
10181c7c3c6aSMatthew Dillon 	 * Calculate range to retrieve.  The pages have already been assigned
10191c7c3c6aSMatthew Dillon 	 * their swapblks.  We require a *contiguous* range that falls entirely
10201c7c3c6aSMatthew Dillon 	 * within a single device stripe.   If we do not supply it, bad things
10214dcc5c2dSMatthew Dillon 	 * happen.  Note that blk, iblk & jblk can be SWAPBLK_NONE, but the
10224dcc5c2dSMatthew Dillon 	 * loops are set up such that the case(s) are handled implicitly.
10234dcc5c2dSMatthew Dillon 	 *
10244dcc5c2dSMatthew Dillon 	 * The swp_*() calls must be made at splvm().  vm_page_free() does
10254dcc5c2dSMatthew Dillon 	 * not need to be, but it will go a little faster if it is.
10261c7c3c6aSMatthew Dillon 	 */
10271c7c3c6aSMatthew Dillon 
10284dcc5c2dSMatthew Dillon 	s = splvm();
10291c7c3c6aSMatthew Dillon 	blk = swp_pager_meta_ctl(mreq->object, mreq->pindex, 0);
10301c7c3c6aSMatthew Dillon 
10311c7c3c6aSMatthew Dillon 	for (i = reqpage - 1; i >= 0; --i) {
10321c7c3c6aSMatthew Dillon 		daddr_t iblk;
10331c7c3c6aSMatthew Dillon 
10341c7c3c6aSMatthew Dillon 		iblk = swp_pager_meta_ctl(m[i]->object, m[i]->pindex, 0);
10351c7c3c6aSMatthew Dillon 		if (blk != iblk + (reqpage - i))
103626f9a767SRodney W. Grimes 			break;
10374dcc5c2dSMatthew Dillon 		if ((blk ^ iblk) & dmmax_mask)
10384dcc5c2dSMatthew Dillon 			break;
103926f9a767SRodney W. Grimes 	}
10401c7c3c6aSMatthew Dillon 	++i;
10411c7c3c6aSMatthew Dillon 
10421c7c3c6aSMatthew Dillon 	for (j = reqpage + 1; j < count; ++j) {
10431c7c3c6aSMatthew Dillon 		daddr_t jblk;
10441c7c3c6aSMatthew Dillon 
10451c7c3c6aSMatthew Dillon 		jblk = swp_pager_meta_ctl(m[j]->object, m[j]->pindex, 0);
10461c7c3c6aSMatthew Dillon 		if (blk != jblk - (j - reqpage))
10471c7c3c6aSMatthew Dillon 			break;
10484dcc5c2dSMatthew Dillon 		if ((blk ^ jblk) & dmmax_mask)
10494dcc5c2dSMatthew Dillon 			break;
10501c7c3c6aSMatthew Dillon 	}
10511c7c3c6aSMatthew Dillon 
10521c7c3c6aSMatthew Dillon 	/*
10531c7c3c6aSMatthew Dillon 	 * free pages outside our collection range.   Note: we never free
10541c7c3c6aSMatthew Dillon 	 * mreq, it must remain busy throughout.
10551c7c3c6aSMatthew Dillon 	 */
10561c7c3c6aSMatthew Dillon 
10571c7c3c6aSMatthew Dillon 	{
10581c7c3c6aSMatthew Dillon 		int k;
10591c7c3c6aSMatthew Dillon 
10604dcc5c2dSMatthew Dillon 		for (k = 0; k < i; ++k)
10614dcc5c2dSMatthew Dillon 			vm_page_free(m[k]);
10624dcc5c2dSMatthew Dillon 		for (k = j; k < count; ++k)
10631c7c3c6aSMatthew Dillon 			vm_page_free(m[k]);
10641c7c3c6aSMatthew Dillon 	}
10654dcc5c2dSMatthew Dillon 	splx(s);
10664dcc5c2dSMatthew Dillon 
10671c7c3c6aSMatthew Dillon 
10681c7c3c6aSMatthew Dillon 	/*
10694dcc5c2dSMatthew Dillon 	 * Return VM_PAGER_FAIL if we have nothing to do.  Return mreq
10704dcc5c2dSMatthew Dillon 	 * still busy, but the others unbusied.
10711c7c3c6aSMatthew Dillon 	 */
10721c7c3c6aSMatthew Dillon 
10734dcc5c2dSMatthew Dillon 	if (blk == SWAPBLK_NONE)
107426f9a767SRodney W. Grimes 		return(VM_PAGER_FAIL);
1075df8bae1dSRodney W. Grimes 
107616f62314SDavid Greenman 	/*
107716f62314SDavid Greenman 	 * Get a swap buffer header to perform the IO
107816f62314SDavid Greenman 	 */
10791c7c3c6aSMatthew Dillon 
10801c7c3c6aSMatthew Dillon 	bp = getpbuf(&nsw_rcount);
108116f62314SDavid Greenman 	kva = (vm_offset_t) bp->b_data;
108226f9a767SRodney W. Grimes 
108316f62314SDavid Greenman 	/*
108416f62314SDavid Greenman 	 * map our page(s) into kva for input
10851c7c3c6aSMatthew Dillon 	 *
10861c7c3c6aSMatthew Dillon 	 * NOTE: B_PAGING is set by pbgetvp()
108716f62314SDavid Greenman 	 */
108816f62314SDavid Greenman 
10891c7c3c6aSMatthew Dillon 	pmap_qenter(kva, m + i, j - i);
10901c7c3c6aSMatthew Dillon 
109121144e3bSPoul-Henning Kamp 	bp->b_iocmd = BIO_READ;
10921c7c3c6aSMatthew Dillon 	bp->b_iodone = swp_pager_async_iodone;
1093b0eeea20SPoul-Henning Kamp 	bp->b_rcred = bp->b_wcred = proc0.p_ucred;
1094a5296b05SJulian Elischer 	bp->b_data = (caddr_t) kva;
109526f9a767SRodney W. Grimes 	crhold(bp->b_rcred);
109626f9a767SRodney W. Grimes 	crhold(bp->b_wcred);
10971c7c3c6aSMatthew Dillon 	bp->b_blkno = blk - (reqpage - i);
10981c7c3c6aSMatthew Dillon 	bp->b_bcount = PAGE_SIZE * (j - i);
10991c7c3c6aSMatthew Dillon 	bp->b_bufsize = PAGE_SIZE * (j - i);
11001c7c3c6aSMatthew Dillon 	bp->b_pager.pg_reqpage = reqpage - i;
11011c7c3c6aSMatthew Dillon 
11021c7c3c6aSMatthew Dillon 	{
11031c7c3c6aSMatthew Dillon 		int k;
11041c7c3c6aSMatthew Dillon 
11051c7c3c6aSMatthew Dillon 		for (k = i; k < j; ++k) {
11061c7c3c6aSMatthew Dillon 			bp->b_pages[k - i] = m[k];
11071c7c3c6aSMatthew Dillon 			vm_page_flag_set(m[k], PG_SWAPINPROG);
11081c7c3c6aSMatthew Dillon 		}
11091c7c3c6aSMatthew Dillon 	}
11101c7c3c6aSMatthew Dillon 	bp->b_npages = j - i;
111126f9a767SRodney W. Grimes 
11120d94caffSDavid Greenman 	pbgetvp(swapdev_vp, bp);
1113df8bae1dSRodney W. Grimes 
1114976e77fcSDavid Greenman 	cnt.v_swapin++;
11151c7c3c6aSMatthew Dillon 	cnt.v_swappgsin += bp->b_npages;
11161c7c3c6aSMatthew Dillon 
1117df8bae1dSRodney W. Grimes 	/*
11181c7c3c6aSMatthew Dillon 	 * We still hold the lock on mreq, and our automatic completion routine
11191c7c3c6aSMatthew Dillon 	 * does not remove it.
1120df8bae1dSRodney W. Grimes 	 */
11211c7c3c6aSMatthew Dillon 
11221c7c3c6aSMatthew Dillon 	vm_object_pip_add(mreq->object, bp->b_npages);
11231c7c3c6aSMatthew Dillon 	lastpindex = m[j-1]->pindex;
11241c7c3c6aSMatthew Dillon 
11251c7c3c6aSMatthew Dillon 	/*
11261c7c3c6aSMatthew Dillon 	 * perform the I/O.  NOTE!!!  bp cannot be considered valid after
11271c7c3c6aSMatthew Dillon 	 * this point because we automatically release it on completion.
11281c7c3c6aSMatthew Dillon 	 * Instead, we look at the one page we are interested in which we
11291c7c3c6aSMatthew Dillon 	 * still hold a lock on even through the I/O completion.
11301c7c3c6aSMatthew Dillon 	 *
11311c7c3c6aSMatthew Dillon 	 * The other pages in our m[] array are also released on completion,
11321c7c3c6aSMatthew Dillon 	 * so we cannot assume they are valid anymore either.
11331c7c3c6aSMatthew Dillon 	 *
1134ea3aecf5SPeter Wemm 	 * NOTE: b_blkno is destroyed by the call to VOP_STRATEGY
11351c7c3c6aSMatthew Dillon 	 */
11361c7c3c6aSMatthew Dillon 
1137b890cb2cSPeter Wemm 	BUF_KERNPROC(bp);
1138b99c307aSPoul-Henning Kamp 	BUF_STRATEGY(bp);
113926f9a767SRodney W. Grimes 
114026f9a767SRodney W. Grimes 	/*
11411c7c3c6aSMatthew Dillon 	 * wait for the page we want to complete.  PG_SWAPINPROG is always
11421c7c3c6aSMatthew Dillon 	 * cleared on completion.  If an I/O error occurs, SWAPBLK_NONE
11431c7c3c6aSMatthew Dillon 	 * is set in the meta-data.
114426f9a767SRodney W. Grimes 	 */
11451b119d9dSDavid Greenman 
11461c7c3c6aSMatthew Dillon 	s = splvm();
11471c7c3c6aSMatthew Dillon 
11481c7c3c6aSMatthew Dillon 	while ((mreq->flags & PG_SWAPINPROG) != 0) {
11491c7c3c6aSMatthew Dillon 		vm_page_flag_set(mreq, PG_WANTED | PG_REFERENCED);
11501c7c3c6aSMatthew Dillon 		cnt.v_intrans++;
11511c7c3c6aSMatthew Dillon 		if (tsleep(mreq, PSWP, "swread", hz*20)) {
1152ac1e407bSBruce Evans 			printf(
11531c7c3c6aSMatthew Dillon 			    "swap_pager: indefinite wait buffer: device:"
1154af647ddeSBruce Evans 				" %s, blkno: %ld, size: %ld\n",
1155af647ddeSBruce Evans 			    devtoname(bp->b_dev), (long)bp->b_blkno,
1156af647ddeSBruce Evans 			    bp->b_bcount
11571c7c3c6aSMatthew Dillon 			);
11581c7c3c6aSMatthew Dillon 		}
11591b119d9dSDavid Greenman 	}
116026f9a767SRodney W. Grimes 
1161df8bae1dSRodney W. Grimes 	splx(s);
116226f9a767SRodney W. Grimes 
116326f9a767SRodney W. Grimes 	/*
11641c7c3c6aSMatthew Dillon 	 * mreq is left bussied after completion, but all the other pages
11651c7c3c6aSMatthew Dillon 	 * are freed.  If we had an unrecoverable read error the page will
11661c7c3c6aSMatthew Dillon 	 * not be valid.
116726f9a767SRodney W. Grimes 	 */
116826f9a767SRodney W. Grimes 
11691c7c3c6aSMatthew Dillon 	if (mreq->valid != VM_PAGE_BITS_ALL) {
11701c7c3c6aSMatthew Dillon 		return(VM_PAGER_ERROR);
117126f9a767SRodney W. Grimes 	} else {
11721c7c3c6aSMatthew Dillon 		return(VM_PAGER_OK);
117326f9a767SRodney W. Grimes 	}
11741c7c3c6aSMatthew Dillon 
11751c7c3c6aSMatthew Dillon 	/*
11761c7c3c6aSMatthew Dillon 	 * A final note: in a low swap situation, we cannot deallocate swap
11771c7c3c6aSMatthew Dillon 	 * and mark a page dirty here because the caller is likely to mark
11781c7c3c6aSMatthew Dillon 	 * the page clean when we return, causing the page to possibly revert
11791c7c3c6aSMatthew Dillon 	 * to all-zero's later.
11801c7c3c6aSMatthew Dillon 	 */
1181df8bae1dSRodney W. Grimes }
1182df8bae1dSRodney W. Grimes 
11831c7c3c6aSMatthew Dillon /*
11841c7c3c6aSMatthew Dillon  *	swap_pager_putpages:
11851c7c3c6aSMatthew Dillon  *
11861c7c3c6aSMatthew Dillon  *	Assign swap (if necessary) and initiate I/O on the specified pages.
11871c7c3c6aSMatthew Dillon  *
11881c7c3c6aSMatthew Dillon  *	We support both OBJT_DEFAULT and OBJT_SWAP objects.  DEFAULT objects
11891c7c3c6aSMatthew Dillon  *	are automatically converted to SWAP objects.
11901c7c3c6aSMatthew Dillon  *
1191ea3aecf5SPeter Wemm  *	In a low memory situation we may block in VOP_STRATEGY(), but the new
11921c7c3c6aSMatthew Dillon  *	vm_page reservation system coupled with properly written VFS devices
11931c7c3c6aSMatthew Dillon  *	should ensure that no low-memory deadlock occurs.  This is an area
11941c7c3c6aSMatthew Dillon  *	which needs work.
11951c7c3c6aSMatthew Dillon  *
11961c7c3c6aSMatthew Dillon  *	The parent has N vm_object_pip_add() references prior to
11971c7c3c6aSMatthew Dillon  *	calling us and will remove references for rtvals[] that are
11981c7c3c6aSMatthew Dillon  *	not set to VM_PAGER_PEND.  We need to remove the rest on I/O
11991c7c3c6aSMatthew Dillon  *	completion.
12001c7c3c6aSMatthew Dillon  *
12011c7c3c6aSMatthew Dillon  *	The parent has soft-busy'd the pages it passes us and will unbusy
12021c7c3c6aSMatthew Dillon  *	those whos rtvals[] entry is not set to VM_PAGER_PEND on return.
12031c7c3c6aSMatthew Dillon  *	We need to unbusy the rest on I/O completion.
12041c7c3c6aSMatthew Dillon  */
12051c7c3c6aSMatthew Dillon 
1206e4542174SMatthew Dillon void
120724a1cce3SDavid Greenman swap_pager_putpages(object, m, count, sync, rtvals)
120824a1cce3SDavid Greenman 	vm_object_t object;
120926f9a767SRodney W. Grimes 	vm_page_t *m;
121026f9a767SRodney W. Grimes 	int count;
121124a1cce3SDavid Greenman 	boolean_t sync;
121226f9a767SRodney W. Grimes 	int *rtvals;
1213df8bae1dSRodney W. Grimes {
12141c7c3c6aSMatthew Dillon 	int i;
12151c7c3c6aSMatthew Dillon 	int n = 0;
1216df8bae1dSRodney W. Grimes 
12171c7c3c6aSMatthew Dillon 	if (count && m[0]->object != object) {
12181c7c3c6aSMatthew Dillon 		panic("swap_pager_getpages: object mismatch %p/%p",
12191c7c3c6aSMatthew Dillon 		    object,
12201c7c3c6aSMatthew Dillon 		    m[0]->object
12211c7c3c6aSMatthew Dillon 		);
12221c7c3c6aSMatthew Dillon 	}
12231c7c3c6aSMatthew Dillon 	/*
12241c7c3c6aSMatthew Dillon 	 * Step 1
12251c7c3c6aSMatthew Dillon 	 *
12261c7c3c6aSMatthew Dillon 	 * Turn object into OBJT_SWAP
12271c7c3c6aSMatthew Dillon 	 * check for bogus sysops
12281c7c3c6aSMatthew Dillon 	 * force sync if not pageout process
12291c7c3c6aSMatthew Dillon 	 */
1230e736cd05SJohn Dyson 
12314dcc5c2dSMatthew Dillon 	if (object->type != OBJT_SWAP)
12324dcc5c2dSMatthew Dillon 		swp_pager_meta_build(object, 0, SWAPBLK_NONE);
1233e47ed70bSJohn Dyson 
1234e47ed70bSJohn Dyson 	if (curproc != pageproc)
1235e47ed70bSJohn Dyson 		sync = TRUE;
123626f9a767SRodney W. Grimes 
12371c7c3c6aSMatthew Dillon 	/*
12381c7c3c6aSMatthew Dillon 	 * Step 2
12391c7c3c6aSMatthew Dillon 	 *
1240ad3cce20SMatthew Dillon 	 * Update nsw parameters from swap_async_max sysctl values.
1241ad3cce20SMatthew Dillon 	 * Do not let the sysop crash the machine with bogus numbers.
1242327f4e83SMatthew Dillon 	 */
1243327f4e83SMatthew Dillon 
1244327f4e83SMatthew Dillon 	if (swap_async_max != nsw_wcount_async_max) {
1245327f4e83SMatthew Dillon 		int n;
1246327f4e83SMatthew Dillon 		int s;
1247327f4e83SMatthew Dillon 
1248327f4e83SMatthew Dillon 		/*
1249327f4e83SMatthew Dillon 		 * limit range
1250327f4e83SMatthew Dillon 		 */
1251327f4e83SMatthew Dillon 		if ((n = swap_async_max) > nswbuf / 2)
1252327f4e83SMatthew Dillon 			n = nswbuf / 2;
1253327f4e83SMatthew Dillon 		if (n < 1)
1254327f4e83SMatthew Dillon 			n = 1;
1255327f4e83SMatthew Dillon 		swap_async_max = n;
1256327f4e83SMatthew Dillon 
1257327f4e83SMatthew Dillon 		/*
1258327f4e83SMatthew Dillon 		 * Adjust difference ( if possible ).  If the current async
1259327f4e83SMatthew Dillon 		 * count is too low, we may not be able to make the adjustment
1260327f4e83SMatthew Dillon 		 * at this time.
1261327f4e83SMatthew Dillon 		 */
1262327f4e83SMatthew Dillon 		s = splvm();
1263327f4e83SMatthew Dillon 		n -= nsw_wcount_async_max;
1264327f4e83SMatthew Dillon 		if (nsw_wcount_async + n >= 0) {
1265327f4e83SMatthew Dillon 			nsw_wcount_async += n;
1266327f4e83SMatthew Dillon 			nsw_wcount_async_max += n;
1267327f4e83SMatthew Dillon 			wakeup(&nsw_wcount_async);
1268327f4e83SMatthew Dillon 		}
1269327f4e83SMatthew Dillon 		splx(s);
1270327f4e83SMatthew Dillon 	}
1271327f4e83SMatthew Dillon 
1272327f4e83SMatthew Dillon 	/*
1273327f4e83SMatthew Dillon 	 * Step 3
1274327f4e83SMatthew Dillon 	 *
12751c7c3c6aSMatthew Dillon 	 * Assign swap blocks and issue I/O.  We reallocate swap on the fly.
12761c7c3c6aSMatthew Dillon 	 * The page is left dirty until the pageout operation completes
12771c7c3c6aSMatthew Dillon 	 * successfully.
12781c7c3c6aSMatthew Dillon 	 */
127926f9a767SRodney W. Grimes 
12801c7c3c6aSMatthew Dillon 	for (i = 0; i < count; i += n) {
12811c7c3c6aSMatthew Dillon 		int s;
12821c7c3c6aSMatthew Dillon 		int j;
12831c7c3c6aSMatthew Dillon 		struct buf *bp;
1284a316d390SJohn Dyson 		daddr_t blk;
128526f9a767SRodney W. Grimes 
1286df8bae1dSRodney W. Grimes 		/*
12871c7c3c6aSMatthew Dillon 		 * Maximum I/O size is limited by a number of factors.
1288df8bae1dSRodney W. Grimes 		 */
128926f9a767SRodney W. Grimes 
12901c7c3c6aSMatthew Dillon 		n = min(BLIST_MAX_ALLOC, count - i);
1291327f4e83SMatthew Dillon 		n = min(n, nsw_cluster_max);
12921c7c3c6aSMatthew Dillon 
12934dcc5c2dSMatthew Dillon 		s = splvm();
12944dcc5c2dSMatthew Dillon 
129526f9a767SRodney W. Grimes 		/*
12961c7c3c6aSMatthew Dillon 		 * Get biggest block of swap we can.  If we fail, fall
12971c7c3c6aSMatthew Dillon 		 * back and try to allocate a smaller block.  Don't go
12981c7c3c6aSMatthew Dillon 		 * overboard trying to allocate space if it would overly
12991c7c3c6aSMatthew Dillon 		 * fragment swap.
130026f9a767SRodney W. Grimes 		 */
13011c7c3c6aSMatthew Dillon 		while (
13021c7c3c6aSMatthew Dillon 		    (blk = swp_pager_getswapspace(n)) == SWAPBLK_NONE &&
13031c7c3c6aSMatthew Dillon 		    n > 4
13041c7c3c6aSMatthew Dillon 		) {
13051c7c3c6aSMatthew Dillon 			n >>= 1;
130626f9a767SRodney W. Grimes 		}
13071c7c3c6aSMatthew Dillon 		if (blk == SWAPBLK_NONE) {
13084dcc5c2dSMatthew Dillon 			for (j = 0; j < n; ++j)
13091c7c3c6aSMatthew Dillon 				rtvals[i+j] = VM_PAGER_FAIL;
13104dcc5c2dSMatthew Dillon 			splx(s);
13111c7c3c6aSMatthew Dillon 			continue;
131226f9a767SRodney W. Grimes 		}
131326f9a767SRodney W. Grimes 
131426f9a767SRodney W. Grimes 		/*
13154dcc5c2dSMatthew Dillon 		 * The I/O we are constructing cannot cross a physical
13164dcc5c2dSMatthew Dillon 		 * disk boundry in the swap stripe.  Note: we are still
13174dcc5c2dSMatthew Dillon 		 * at splvm().
131826f9a767SRodney W. Grimes 		 */
13191c7c3c6aSMatthew Dillon 		if ((blk ^ (blk + n)) & dmmax_mask) {
13201c7c3c6aSMatthew Dillon 			j = ((blk + dmmax) & dmmax_mask) - blk;
13211c7c3c6aSMatthew Dillon 			swp_pager_freeswapspace(blk + j, n - j);
13221c7c3c6aSMatthew Dillon 			n = j;
1323e47ed70bSJohn Dyson 		}
132426f9a767SRodney W. Grimes 
132526f9a767SRodney W. Grimes 		/*
13261c7c3c6aSMatthew Dillon 		 * All I/O parameters have been satisfied, build the I/O
13271c7c3c6aSMatthew Dillon 		 * request and assign the swap space.
13281c7c3c6aSMatthew Dillon 		 *
13291c7c3c6aSMatthew Dillon 		 * NOTE: B_PAGING is set by pbgetvp()
133026f9a767SRodney W. Grimes 		 */
133126f9a767SRodney W. Grimes 
1332327f4e83SMatthew Dillon 		if (sync == TRUE) {
1333327f4e83SMatthew Dillon 			bp = getpbuf(&nsw_wcount_sync);
1334327f4e83SMatthew Dillon 		} else {
1335327f4e83SMatthew Dillon 			bp = getpbuf(&nsw_wcount_async);
133621144e3bSPoul-Henning Kamp 			bp->b_flags = B_ASYNC;
1337327f4e83SMatthew Dillon 		}
1338912e4ae9SPoul-Henning Kamp 		bp->b_iocmd = BIO_WRITE;
13391c7c3c6aSMatthew Dillon 		bp->b_spc = NULL;	/* not used, but NULL-out anyway */
134026f9a767SRodney W. Grimes 
13411c7c3c6aSMatthew Dillon 		pmap_qenter((vm_offset_t)bp->b_data, &m[i], n);
13421c7c3c6aSMatthew Dillon 
1343b0eeea20SPoul-Henning Kamp 		bp->b_rcred = bp->b_wcred = proc0.p_ucred;
13441c7c3c6aSMatthew Dillon 		bp->b_bcount = PAGE_SIZE * n;
13451c7c3c6aSMatthew Dillon 		bp->b_bufsize = PAGE_SIZE * n;
13461c7c3c6aSMatthew Dillon 		bp->b_blkno = blk;
1347e47ed70bSJohn Dyson 
1348a5296b05SJulian Elischer 		crhold(bp->b_rcred);
1349a5296b05SJulian Elischer 		crhold(bp->b_wcred);
1350a5296b05SJulian Elischer 
1351a5296b05SJulian Elischer 		pbgetvp(swapdev_vp, bp);
1352a5296b05SJulian Elischer 
13531c7c3c6aSMatthew Dillon 		for (j = 0; j < n; ++j) {
13541c7c3c6aSMatthew Dillon 			vm_page_t mreq = m[i+j];
13551c7c3c6aSMatthew Dillon 
13561c7c3c6aSMatthew Dillon 			swp_pager_meta_build(
13571c7c3c6aSMatthew Dillon 			    mreq->object,
13581c7c3c6aSMatthew Dillon 			    mreq->pindex,
13594dcc5c2dSMatthew Dillon 			    blk + j
13601c7c3c6aSMatthew Dillon 			);
13617dbf82dcSMatthew Dillon 			vm_page_dirty(mreq);
13621c7c3c6aSMatthew Dillon 			rtvals[i+j] = VM_PAGER_OK;
13631c7c3c6aSMatthew Dillon 
13641c7c3c6aSMatthew Dillon 			vm_page_flag_set(mreq, PG_SWAPINPROG);
13651c7c3c6aSMatthew Dillon 			bp->b_pages[j] = mreq;
13661c7c3c6aSMatthew Dillon 		}
13671c7c3c6aSMatthew Dillon 		bp->b_npages = n;
1368a5296b05SJulian Elischer 		/*
1369a5296b05SJulian Elischer 		 * Must set dirty range for NFS to work.
1370a5296b05SJulian Elischer 		 */
1371a5296b05SJulian Elischer 		bp->b_dirtyoff = 0;
1372a5296b05SJulian Elischer 		bp->b_dirtyend = bp->b_bcount;
13731c7c3c6aSMatthew Dillon 
13741c7c3c6aSMatthew Dillon 		cnt.v_swapout++;
13751c7c3c6aSMatthew Dillon 		cnt.v_swappgsout += bp->b_npages;
137626f9a767SRodney W. Grimes 		swapdev_vp->v_numoutput++;
137726f9a767SRodney W. Grimes 
13784dcc5c2dSMatthew Dillon 		splx(s);
13794dcc5c2dSMatthew Dillon 
138026f9a767SRodney W. Grimes 		/*
13811c7c3c6aSMatthew Dillon 		 * asynchronous
13821c7c3c6aSMatthew Dillon 		 *
1383ea3aecf5SPeter Wemm 		 * NOTE: b_blkno is destroyed by the call to VOP_STRATEGY
138426f9a767SRodney W. Grimes 		 */
1385e47ed70bSJohn Dyson 
13861c7c3c6aSMatthew Dillon 		if (sync == FALSE) {
13871c7c3c6aSMatthew Dillon 			bp->b_iodone = swp_pager_async_iodone;
138867812eacSKirk McKusick 			BUF_KERNPROC(bp);
1389b99c307aSPoul-Henning Kamp 			BUF_STRATEGY(bp);
13901c7c3c6aSMatthew Dillon 
13911c7c3c6aSMatthew Dillon 			for (j = 0; j < n; ++j)
13921c7c3c6aSMatthew Dillon 				rtvals[i+j] = VM_PAGER_PEND;
13931c7c3c6aSMatthew Dillon 			continue;
139426f9a767SRodney W. Grimes 		}
1395e47ed70bSJohn Dyson 
139626f9a767SRodney W. Grimes 		/*
13971c7c3c6aSMatthew Dillon 		 * synchronous
13981c7c3c6aSMatthew Dillon 		 *
1399ea3aecf5SPeter Wemm 		 * NOTE: b_blkno is destroyed by the call to VOP_STRATEGY
14001c7c3c6aSMatthew Dillon 		 */
14011c7c3c6aSMatthew Dillon 
14021c7c3c6aSMatthew Dillon 		bp->b_iodone = swp_pager_sync_iodone;
1403b99c307aSPoul-Henning Kamp 		BUF_STRATEGY(bp);
14041c7c3c6aSMatthew Dillon 
14051c7c3c6aSMatthew Dillon 		/*
14061c7c3c6aSMatthew Dillon 		 * Wait for the sync I/O to complete, then update rtvals.
14071c7c3c6aSMatthew Dillon 		 * We just set the rtvals[] to VM_PAGER_PEND so we can call
14081c7c3c6aSMatthew Dillon 		 * our async completion routine at the end, thus avoiding a
14091c7c3c6aSMatthew Dillon 		 * double-free.
141026f9a767SRodney W. Grimes 		 */
14114dcc5c2dSMatthew Dillon 		s = splbio();
14124dcc5c2dSMatthew Dillon 
141326f9a767SRodney W. Grimes 		while ((bp->b_flags & B_DONE) == 0) {
141424a1cce3SDavid Greenman 			tsleep(bp, PVM, "swwrt", 0);
141526f9a767SRodney W. Grimes 		}
1416e47ed70bSJohn Dyson 
14171c7c3c6aSMatthew Dillon 		for (j = 0; j < n; ++j)
14181c7c3c6aSMatthew Dillon 			rtvals[i+j] = VM_PAGER_PEND;
141926f9a767SRodney W. Grimes 
14201c7c3c6aSMatthew Dillon 		/*
14211c7c3c6aSMatthew Dillon 		 * Now that we are through with the bp, we can call the
14221c7c3c6aSMatthew Dillon 		 * normal async completion, which frees everything up.
14231c7c3c6aSMatthew Dillon 		 */
14241c7c3c6aSMatthew Dillon 
14251c7c3c6aSMatthew Dillon 		swp_pager_async_iodone(bp);
142626f9a767SRodney W. Grimes 
142726f9a767SRodney W. Grimes 		splx(s);
14281c7c3c6aSMatthew Dillon 	}
14291c7c3c6aSMatthew Dillon }
14301c7c3c6aSMatthew Dillon 
14311c7c3c6aSMatthew Dillon /*
14321c7c3c6aSMatthew Dillon  *	swap_pager_sync_iodone:
14331c7c3c6aSMatthew Dillon  *
14341c7c3c6aSMatthew Dillon  *	Completion routine for synchronous reads and writes from/to swap.
14351c7c3c6aSMatthew Dillon  *	We just mark the bp is complete and wake up anyone waiting on it.
14361c7c3c6aSMatthew Dillon  *
14374dcc5c2dSMatthew Dillon  *	This routine may not block.  This routine is called at splbio() or better.
14381c7c3c6aSMatthew Dillon  */
14391c7c3c6aSMatthew Dillon 
14401c7c3c6aSMatthew Dillon static void
14411c7c3c6aSMatthew Dillon swp_pager_sync_iodone(bp)
14421c7c3c6aSMatthew Dillon 	struct buf *bp;
14431c7c3c6aSMatthew Dillon {
14441c7c3c6aSMatthew Dillon 	bp->b_flags |= B_DONE;
14451c7c3c6aSMatthew Dillon 	bp->b_flags &= ~B_ASYNC;
14461c7c3c6aSMatthew Dillon 	wakeup(bp);
14471c7c3c6aSMatthew Dillon }
14481c7c3c6aSMatthew Dillon 
14491c7c3c6aSMatthew Dillon /*
14501c7c3c6aSMatthew Dillon  *	swp_pager_async_iodone:
14511c7c3c6aSMatthew Dillon  *
14521c7c3c6aSMatthew Dillon  *	Completion routine for asynchronous reads and writes from/to swap.
14531c7c3c6aSMatthew Dillon  *	Also called manually by synchronous code to finish up a bp.
14541c7c3c6aSMatthew Dillon  *
14551c7c3c6aSMatthew Dillon  *	For READ operations, the pages are PG_BUSY'd.  For WRITE operations,
14561c7c3c6aSMatthew Dillon  *	the pages are vm_page_t->busy'd.  For READ operations, we PG_BUSY
14571c7c3c6aSMatthew Dillon  *	unbusy all pages except the 'main' request page.  For WRITE
14581c7c3c6aSMatthew Dillon  *	operations, we vm_page_t->busy'd unbusy all pages ( we can do this
14591c7c3c6aSMatthew Dillon  *	because we marked them all VM_PAGER_PEND on return from putpages ).
14601c7c3c6aSMatthew Dillon  *
14611c7c3c6aSMatthew Dillon  *	This routine may not block.
14624dcc5c2dSMatthew Dillon  *	This routine is called at splbio() or better
14634dcc5c2dSMatthew Dillon  *
14644dcc5c2dSMatthew Dillon  *	We up ourselves to splvm() as required for various vm_page related
14654dcc5c2dSMatthew Dillon  *	calls.
14661c7c3c6aSMatthew Dillon  */
14671c7c3c6aSMatthew Dillon 
14681c7c3c6aSMatthew Dillon static void
14691c7c3c6aSMatthew Dillon swp_pager_async_iodone(bp)
14701c7c3c6aSMatthew Dillon 	register struct buf *bp;
14711c7c3c6aSMatthew Dillon {
14721c7c3c6aSMatthew Dillon 	int s;
14731c7c3c6aSMatthew Dillon 	int i;
14741c7c3c6aSMatthew Dillon 	vm_object_t object = NULL;
14751c7c3c6aSMatthew Dillon 
14761c7c3c6aSMatthew Dillon 	bp->b_flags |= B_DONE;
14771c7c3c6aSMatthew Dillon 
14781c7c3c6aSMatthew Dillon 	/*
14791c7c3c6aSMatthew Dillon 	 * report error
14801c7c3c6aSMatthew Dillon 	 */
14811c7c3c6aSMatthew Dillon 
1482c244d2deSPoul-Henning Kamp 	if (bp->b_ioflags & BIO_ERROR) {
14831c7c3c6aSMatthew Dillon 		printf(
14841c7c3c6aSMatthew Dillon 		    "swap_pager: I/O error - %s failed; blkno %ld,"
14851c7c3c6aSMatthew Dillon 			"size %ld, error %d\n",
148621144e3bSPoul-Henning Kamp 		    ((bp->b_iocmd == BIO_READ) ? "pagein" : "pageout"),
14871c7c3c6aSMatthew Dillon 		    (long)bp->b_blkno,
14881c7c3c6aSMatthew Dillon 		    (long)bp->b_bcount,
14891c7c3c6aSMatthew Dillon 		    bp->b_error
14901c7c3c6aSMatthew Dillon 		);
14911c7c3c6aSMatthew Dillon 	}
14921c7c3c6aSMatthew Dillon 
14931c7c3c6aSMatthew Dillon 	/*
14944dcc5c2dSMatthew Dillon 	 * set object, raise to splvm().
14951c7c3c6aSMatthew Dillon 	 */
14961c7c3c6aSMatthew Dillon 
14971c7c3c6aSMatthew Dillon 	if (bp->b_npages)
14981c7c3c6aSMatthew Dillon 		object = bp->b_pages[0]->object;
14994dcc5c2dSMatthew Dillon 	s = splvm();
150026f9a767SRodney W. Grimes 
150126f9a767SRodney W. Grimes 	/*
150226f9a767SRodney W. Grimes 	 * remove the mapping for kernel virtual
150326f9a767SRodney W. Grimes 	 */
15041c7c3c6aSMatthew Dillon 
15051c7c3c6aSMatthew Dillon 	pmap_qremove((vm_offset_t)bp->b_data, bp->b_npages);
150626f9a767SRodney W. Grimes 
150726f9a767SRodney W. Grimes 	/*
15081c7c3c6aSMatthew Dillon 	 * cleanup pages.  If an error occurs writing to swap, we are in
15091c7c3c6aSMatthew Dillon 	 * very serious trouble.  If it happens to be a disk error, though,
15101c7c3c6aSMatthew Dillon 	 * we may be able to recover by reassigning the swap later on.  So
15111c7c3c6aSMatthew Dillon 	 * in this case we remove the m->swapblk assignment for the page
15121c7c3c6aSMatthew Dillon 	 * but do not free it in the rlist.  The errornous block(s) are thus
15131c7c3c6aSMatthew Dillon 	 * never reallocated as swap.  Redirty the page and continue.
151426f9a767SRodney W. Grimes 	 */
151526f9a767SRodney W. Grimes 
15161c7c3c6aSMatthew Dillon 	for (i = 0; i < bp->b_npages; ++i) {
15171c7c3c6aSMatthew Dillon 		vm_page_t m = bp->b_pages[i];
1518e47ed70bSJohn Dyson 
15191c7c3c6aSMatthew Dillon 		vm_page_flag_clear(m, PG_SWAPINPROG);
1520e47ed70bSJohn Dyson 
1521c244d2deSPoul-Henning Kamp 		if (bp->b_ioflags & BIO_ERROR) {
1522ffc82b0aSJohn Dyson 			/*
15231c7c3c6aSMatthew Dillon 			 * If an error occurs I'd love to throw the swapblk
15241c7c3c6aSMatthew Dillon 			 * away without freeing it back to swapspace, so it
15251c7c3c6aSMatthew Dillon 			 * can never be used again.  But I can't from an
15261c7c3c6aSMatthew Dillon 			 * interrupt.
1527ffc82b0aSJohn Dyson 			 */
15281c7c3c6aSMatthew Dillon 
152921144e3bSPoul-Henning Kamp 			if (bp->b_iocmd == BIO_READ) {
15301c7c3c6aSMatthew Dillon 				/*
15311c7c3c6aSMatthew Dillon 				 * When reading, reqpage needs to stay
15321c7c3c6aSMatthew Dillon 				 * locked for the parent, but all other
15331c7c3c6aSMatthew Dillon 				 * pages can be freed.  We still want to
15341c7c3c6aSMatthew Dillon 				 * wakeup the parent waiting on the page,
15351c7c3c6aSMatthew Dillon 				 * though.  ( also: pg_reqpage can be -1 and
15361c7c3c6aSMatthew Dillon 				 * not match anything ).
15371c7c3c6aSMatthew Dillon 				 *
15381c7c3c6aSMatthew Dillon 				 * We have to wake specifically requested pages
15391c7c3c6aSMatthew Dillon 				 * up too because we cleared PG_SWAPINPROG and
15401c7c3c6aSMatthew Dillon 				 * someone may be waiting for that.
15411c7c3c6aSMatthew Dillon 				 *
15421c7c3c6aSMatthew Dillon 				 * NOTE: for reads, m->dirty will probably
1543956f3135SPhilippe Charnier 				 * be overridden by the original caller of
15441c7c3c6aSMatthew Dillon 				 * getpages so don't play cute tricks here.
15451c7c3c6aSMatthew Dillon 				 *
1546279d7226SMatthew Dillon 				 * XXX IT IS NOT LEGAL TO FREE THE PAGE HERE
1547279d7226SMatthew Dillon 				 * AS THIS MESSES WITH object->memq, and it is
1548279d7226SMatthew Dillon 				 * not legal to mess with object->memq from an
1549279d7226SMatthew Dillon 				 * interrupt.
15501c7c3c6aSMatthew Dillon 				 */
15511c7c3c6aSMatthew Dillon 
15521c7c3c6aSMatthew Dillon 				m->valid = 0;
15531c7c3c6aSMatthew Dillon 				vm_page_flag_clear(m, PG_ZERO);
15541c7c3c6aSMatthew Dillon 
15551c7c3c6aSMatthew Dillon 				if (i != bp->b_pager.pg_reqpage)
15561c7c3c6aSMatthew Dillon 					vm_page_free(m);
15571c7c3c6aSMatthew Dillon 				else
15581c7c3c6aSMatthew Dillon 					vm_page_flash(m);
15591c7c3c6aSMatthew Dillon 				/*
15601c7c3c6aSMatthew Dillon 				 * If i == bp->b_pager.pg_reqpage, do not wake
15611c7c3c6aSMatthew Dillon 				 * the page up.  The caller needs to.
15621c7c3c6aSMatthew Dillon 				 */
15631c7c3c6aSMatthew Dillon 			} else {
15641c7c3c6aSMatthew Dillon 				/*
15651c7c3c6aSMatthew Dillon 				 * If a write error occurs, reactivate page
15661c7c3c6aSMatthew Dillon 				 * so it doesn't clog the inactive list,
15671c7c3c6aSMatthew Dillon 				 * then finish the I/O.
15681c7c3c6aSMatthew Dillon 				 */
15697dbf82dcSMatthew Dillon 				vm_page_dirty(m);
15701c7c3c6aSMatthew Dillon 				vm_page_activate(m);
15711c7c3c6aSMatthew Dillon 				vm_page_io_finish(m);
15721c7c3c6aSMatthew Dillon 			}
157321144e3bSPoul-Henning Kamp 		} else if (bp->b_iocmd == BIO_READ) {
15741c7c3c6aSMatthew Dillon 			/*
15751c7c3c6aSMatthew Dillon 			 * For read success, clear dirty bits.  Nobody should
15761c7c3c6aSMatthew Dillon 			 * have this page mapped but don't take any chances,
15771c7c3c6aSMatthew Dillon 			 * make sure the pmap modify bits are also cleared.
15781c7c3c6aSMatthew Dillon 			 *
15791c7c3c6aSMatthew Dillon 			 * NOTE: for reads, m->dirty will probably be
1580956f3135SPhilippe Charnier 			 * overridden by the original caller of getpages so
15811c7c3c6aSMatthew Dillon 			 * we cannot set them in order to free the underlying
15821c7c3c6aSMatthew Dillon 			 * swap in a low-swap situation.  I don't think we'd
15831c7c3c6aSMatthew Dillon 			 * want to do that anyway, but it was an optimization
15841c7c3c6aSMatthew Dillon 			 * that existed in the old swapper for a time before
15851c7c3c6aSMatthew Dillon 			 * it got ripped out due to precisely this problem.
15861c7c3c6aSMatthew Dillon 			 *
15871c7c3c6aSMatthew Dillon 			 * clear PG_ZERO in page.
15881c7c3c6aSMatthew Dillon 			 *
15891c7c3c6aSMatthew Dillon 			 * If not the requested page then deactivate it.
15901c7c3c6aSMatthew Dillon 			 *
15911c7c3c6aSMatthew Dillon 			 * Note that the requested page, reqpage, is left
15921c7c3c6aSMatthew Dillon 			 * busied, but we still have to wake it up.  The
15931c7c3c6aSMatthew Dillon 			 * other pages are released (unbusied) by
15941c7c3c6aSMatthew Dillon 			 * vm_page_wakeup().  We do not set reqpage's
15951c7c3c6aSMatthew Dillon 			 * valid bits here, it is up to the caller.
15961c7c3c6aSMatthew Dillon 			 */
15971c7c3c6aSMatthew Dillon 
15980385347cSPeter Wemm 			pmap_clear_modify(m);
15991c7c3c6aSMatthew Dillon 			m->valid = VM_PAGE_BITS_ALL;
16002c28a105SAlan Cox 			vm_page_undirty(m);
16011c7c3c6aSMatthew Dillon 			vm_page_flag_clear(m, PG_ZERO);
16021c7c3c6aSMatthew Dillon 
16031c7c3c6aSMatthew Dillon 			/*
16041c7c3c6aSMatthew Dillon 			 * We have to wake specifically requested pages
16051c7c3c6aSMatthew Dillon 			 * up too because we cleared PG_SWAPINPROG and
16061c7c3c6aSMatthew Dillon 			 * could be waiting for it in getpages.  However,
16071c7c3c6aSMatthew Dillon 			 * be sure to not unbusy getpages specifically
16081c7c3c6aSMatthew Dillon 			 * requested page - getpages expects it to be
16091c7c3c6aSMatthew Dillon 			 * left busy.
16101c7c3c6aSMatthew Dillon 			 */
16111c7c3c6aSMatthew Dillon 			if (i != bp->b_pager.pg_reqpage) {
16121c7c3c6aSMatthew Dillon 				vm_page_deactivate(m);
16131c7c3c6aSMatthew Dillon 				vm_page_wakeup(m);
16141c7c3c6aSMatthew Dillon 			} else {
16151c7c3c6aSMatthew Dillon 				vm_page_flash(m);
16161c7c3c6aSMatthew Dillon 			}
16171c7c3c6aSMatthew Dillon 		} else {
16181c7c3c6aSMatthew Dillon 			/*
16191c7c3c6aSMatthew Dillon 			 * For write success, clear the modify and dirty
16201c7c3c6aSMatthew Dillon 			 * status, then finish the I/O ( which decrements the
16211c7c3c6aSMatthew Dillon 			 * busy count and possibly wakes waiter's up ).
16221c7c3c6aSMatthew Dillon 			 */
16230385347cSPeter Wemm 			pmap_clear_modify(m);
1624c52e7044SAlan Cox 			vm_page_undirty(m);
16251c7c3c6aSMatthew Dillon 			vm_page_io_finish(m);
1626936524aaSMatthew Dillon 			if (!vm_page_count_severe() || !vm_page_try_to_cache(m))
1627936524aaSMatthew Dillon 				vm_page_protect(m, VM_PROT_READ);
1628ffc82b0aSJohn Dyson 		}
1629df8bae1dSRodney W. Grimes 	}
163026f9a767SRodney W. Grimes 
16311c7c3c6aSMatthew Dillon 	/*
16321c7c3c6aSMatthew Dillon 	 * adjust pip.  NOTE: the original parent may still have its own
16331c7c3c6aSMatthew Dillon 	 * pip refs on the object.
16341c7c3c6aSMatthew Dillon 	 */
16350d94caffSDavid Greenman 
16361c7c3c6aSMatthew Dillon 	if (object)
16371c7c3c6aSMatthew Dillon 		vm_object_pip_wakeupn(object, bp->b_npages);
163826f9a767SRodney W. Grimes 
16391c7c3c6aSMatthew Dillon 	/*
16401c7c3c6aSMatthew Dillon 	 * release the physical I/O buffer
16411c7c3c6aSMatthew Dillon 	 */
1642e47ed70bSJohn Dyson 
1643327f4e83SMatthew Dillon 	relpbuf(
1644327f4e83SMatthew Dillon 	    bp,
164521144e3bSPoul-Henning Kamp 	    ((bp->b_iocmd == BIO_READ) ? &nsw_rcount :
1646327f4e83SMatthew Dillon 		((bp->b_flags & B_ASYNC) ?
1647327f4e83SMatthew Dillon 		    &nsw_wcount_async :
1648327f4e83SMatthew Dillon 		    &nsw_wcount_sync
1649327f4e83SMatthew Dillon 		)
1650327f4e83SMatthew Dillon 	    )
1651327f4e83SMatthew Dillon 	);
165226f9a767SRodney W. Grimes 	splx(s);
165326f9a767SRodney W. Grimes }
16541c7c3c6aSMatthew Dillon 
16551c7c3c6aSMatthew Dillon /************************************************************************
16561c7c3c6aSMatthew Dillon  *				SWAP META DATA 				*
16571c7c3c6aSMatthew Dillon  ************************************************************************
16581c7c3c6aSMatthew Dillon  *
16591c7c3c6aSMatthew Dillon  *	These routines manipulate the swap metadata stored in the
16604dcc5c2dSMatthew Dillon  *	OBJT_SWAP object.  All swp_*() routines must be called at
16614dcc5c2dSMatthew Dillon  *	splvm() because swap can be freed up by the low level vm_page
16624dcc5c2dSMatthew Dillon  *	code which might be called from interrupts beyond what splbio() covers.
16631c7c3c6aSMatthew Dillon  *
16644dcc5c2dSMatthew Dillon  *	Swap metadata is implemented with a global hash and not directly
16654dcc5c2dSMatthew Dillon  *	linked into the object.  Instead the object simply contains
16664dcc5c2dSMatthew Dillon  *	appropriate tracking counters.
16671c7c3c6aSMatthew Dillon  */
16681c7c3c6aSMatthew Dillon 
16691c7c3c6aSMatthew Dillon /*
16701c7c3c6aSMatthew Dillon  * SWP_PAGER_HASH() -	hash swap meta data
16711c7c3c6aSMatthew Dillon  *
16724dcc5c2dSMatthew Dillon  *	This is an inline helper function which hashes the swapblk given
16731c7c3c6aSMatthew Dillon  *	the object and page index.  It returns a pointer to a pointer
16741c7c3c6aSMatthew Dillon  *	to the object, or a pointer to a NULL pointer if it could not
16751c7c3c6aSMatthew Dillon  *	find a swapblk.
16764dcc5c2dSMatthew Dillon  *
16774dcc5c2dSMatthew Dillon  *	This routine must be called at splvm().
16781c7c3c6aSMatthew Dillon  */
16791c7c3c6aSMatthew Dillon 
16801c7c3c6aSMatthew Dillon static __inline struct swblock **
16814dcc5c2dSMatthew Dillon swp_pager_hash(vm_object_t object, vm_pindex_t index)
16821c7c3c6aSMatthew Dillon {
16831c7c3c6aSMatthew Dillon 	struct swblock **pswap;
16841c7c3c6aSMatthew Dillon 	struct swblock *swap;
16851c7c3c6aSMatthew Dillon 
16861c7c3c6aSMatthew Dillon 	index &= ~SWAP_META_MASK;
1687af647ddeSBruce Evans 	pswap = &swhash[(index ^ (int)(intptr_t)object) & swhash_mask];
16881c7c3c6aSMatthew Dillon 
16891c7c3c6aSMatthew Dillon 	while ((swap = *pswap) != NULL) {
16901c7c3c6aSMatthew Dillon 		if (swap->swb_object == object &&
16911c7c3c6aSMatthew Dillon 		    swap->swb_index == index
16921c7c3c6aSMatthew Dillon 		) {
16931c7c3c6aSMatthew Dillon 			break;
16941c7c3c6aSMatthew Dillon 		}
16951c7c3c6aSMatthew Dillon 		pswap = &swap->swb_hnext;
16961c7c3c6aSMatthew Dillon 	}
16971c7c3c6aSMatthew Dillon 	return(pswap);
16981c7c3c6aSMatthew Dillon }
16991c7c3c6aSMatthew Dillon 
17001c7c3c6aSMatthew Dillon /*
17011c7c3c6aSMatthew Dillon  * SWP_PAGER_META_BUILD() -	add swap block to swap meta data for object
17021c7c3c6aSMatthew Dillon  *
17031c7c3c6aSMatthew Dillon  *	We first convert the object to a swap object if it is a default
17041c7c3c6aSMatthew Dillon  *	object.
17051c7c3c6aSMatthew Dillon  *
17061c7c3c6aSMatthew Dillon  *	The specified swapblk is added to the object's swap metadata.  If
17071c7c3c6aSMatthew Dillon  *	the swapblk is not valid, it is freed instead.  Any previously
17081c7c3c6aSMatthew Dillon  *	assigned swapblk is freed.
17094dcc5c2dSMatthew Dillon  *
17104dcc5c2dSMatthew Dillon  *	This routine must be called at splvm(), except when used to convert
17114dcc5c2dSMatthew Dillon  *	an OBJT_DEFAULT object into an OBJT_SWAP object.
17124dcc5c2dSMatthew Dillon 
17131c7c3c6aSMatthew Dillon  */
17141c7c3c6aSMatthew Dillon 
17151c7c3c6aSMatthew Dillon static void
17161c7c3c6aSMatthew Dillon swp_pager_meta_build(
17171c7c3c6aSMatthew Dillon 	vm_object_t object,
17184dcc5c2dSMatthew Dillon 	vm_pindex_t index,
17194dcc5c2dSMatthew Dillon 	daddr_t swapblk
17201c7c3c6aSMatthew Dillon ) {
17211c7c3c6aSMatthew Dillon 	struct swblock *swap;
17221c7c3c6aSMatthew Dillon 	struct swblock **pswap;
17231c7c3c6aSMatthew Dillon 
17241c7c3c6aSMatthew Dillon 	/*
17251c7c3c6aSMatthew Dillon 	 * Convert default object to swap object if necessary
17261c7c3c6aSMatthew Dillon 	 */
17271c7c3c6aSMatthew Dillon 
17281c7c3c6aSMatthew Dillon 	if (object->type != OBJT_SWAP) {
17291c7c3c6aSMatthew Dillon 		object->type = OBJT_SWAP;
17301c7c3c6aSMatthew Dillon 		object->un_pager.swp.swp_bcount = 0;
17311c7c3c6aSMatthew Dillon 
17321c7c3c6aSMatthew Dillon 		if (object->handle != NULL) {
17331c7c3c6aSMatthew Dillon 			TAILQ_INSERT_TAIL(
17341c7c3c6aSMatthew Dillon 			    NOBJLIST(object->handle),
17351c7c3c6aSMatthew Dillon 			    object,
17361c7c3c6aSMatthew Dillon 			    pager_object_list
17371c7c3c6aSMatthew Dillon 			);
17381c7c3c6aSMatthew Dillon 		} else {
17391c7c3c6aSMatthew Dillon 			TAILQ_INSERT_TAIL(
17401c7c3c6aSMatthew Dillon 			    &swap_pager_un_object_list,
17411c7c3c6aSMatthew Dillon 			    object,
17421c7c3c6aSMatthew Dillon 			    pager_object_list
17431c7c3c6aSMatthew Dillon 			);
17441c7c3c6aSMatthew Dillon 		}
17451c7c3c6aSMatthew Dillon 	}
17461c7c3c6aSMatthew Dillon 
17471c7c3c6aSMatthew Dillon 	/*
17481c7c3c6aSMatthew Dillon 	 * Locate hash entry.  If not found create, but if we aren't adding
17494dcc5c2dSMatthew Dillon 	 * anything just return.  If we run out of space in the map we wait
17504dcc5c2dSMatthew Dillon 	 * and, since the hash table may have changed, retry.
17511c7c3c6aSMatthew Dillon 	 */
17521c7c3c6aSMatthew Dillon 
17534dcc5c2dSMatthew Dillon retry:
17541c7c3c6aSMatthew Dillon 	pswap = swp_pager_hash(object, index);
17551c7c3c6aSMatthew Dillon 
17561c7c3c6aSMatthew Dillon 	if ((swap = *pswap) == NULL) {
17571c7c3c6aSMatthew Dillon 		int i;
17581c7c3c6aSMatthew Dillon 
17591c7c3c6aSMatthew Dillon 		if (swapblk == SWAPBLK_NONE)
17601c7c3c6aSMatthew Dillon 			return;
17611c7c3c6aSMatthew Dillon 
17621c7c3c6aSMatthew Dillon 		swap = *pswap = zalloc(swap_zone);
17634dcc5c2dSMatthew Dillon 		if (swap == NULL) {
17644dcc5c2dSMatthew Dillon 			VM_WAIT;
17654dcc5c2dSMatthew Dillon 			goto retry;
17664dcc5c2dSMatthew Dillon 		}
17671c7c3c6aSMatthew Dillon 		swap->swb_hnext = NULL;
17681c7c3c6aSMatthew Dillon 		swap->swb_object = object;
17691c7c3c6aSMatthew Dillon 		swap->swb_index = index & ~SWAP_META_MASK;
17701c7c3c6aSMatthew Dillon 		swap->swb_count = 0;
17711c7c3c6aSMatthew Dillon 
17721c7c3c6aSMatthew Dillon 		++object->un_pager.swp.swp_bcount;
17731c7c3c6aSMatthew Dillon 
17741c7c3c6aSMatthew Dillon 		for (i = 0; i < SWAP_META_PAGES; ++i)
17751c7c3c6aSMatthew Dillon 			swap->swb_pages[i] = SWAPBLK_NONE;
17761c7c3c6aSMatthew Dillon 	}
17771c7c3c6aSMatthew Dillon 
17781c7c3c6aSMatthew Dillon 	/*
17791c7c3c6aSMatthew Dillon 	 * Delete prior contents of metadata
17801c7c3c6aSMatthew Dillon 	 */
17811c7c3c6aSMatthew Dillon 
17821c7c3c6aSMatthew Dillon 	index &= SWAP_META_MASK;
17831c7c3c6aSMatthew Dillon 
17841c7c3c6aSMatthew Dillon 	if (swap->swb_pages[index] != SWAPBLK_NONE) {
17854dcc5c2dSMatthew Dillon 		swp_pager_freeswapspace(swap->swb_pages[index], 1);
17861c7c3c6aSMatthew Dillon 		--swap->swb_count;
17871c7c3c6aSMatthew Dillon 	}
17881c7c3c6aSMatthew Dillon 
17891c7c3c6aSMatthew Dillon 	/*
17901c7c3c6aSMatthew Dillon 	 * Enter block into metadata
17911c7c3c6aSMatthew Dillon 	 */
17921c7c3c6aSMatthew Dillon 
17931c7c3c6aSMatthew Dillon 	swap->swb_pages[index] = swapblk;
17944dcc5c2dSMatthew Dillon 	if (swapblk != SWAPBLK_NONE)
17951c7c3c6aSMatthew Dillon 		++swap->swb_count;
17961c7c3c6aSMatthew Dillon }
17971c7c3c6aSMatthew Dillon 
17981c7c3c6aSMatthew Dillon /*
17991c7c3c6aSMatthew Dillon  * SWP_PAGER_META_FREE() - free a range of blocks in the object's swap metadata
18001c7c3c6aSMatthew Dillon  *
18011c7c3c6aSMatthew Dillon  *	The requested range of blocks is freed, with any associated swap
18021c7c3c6aSMatthew Dillon  *	returned to the swap bitmap.
18031c7c3c6aSMatthew Dillon  *
18041c7c3c6aSMatthew Dillon  *	This routine will free swap metadata structures as they are cleaned
18051c7c3c6aSMatthew Dillon  *	out.  This routine does *NOT* operate on swap metadata associated
18061c7c3c6aSMatthew Dillon  *	with resident pages.
18071c7c3c6aSMatthew Dillon  *
18081c7c3c6aSMatthew Dillon  *	This routine must be called at splvm()
18091c7c3c6aSMatthew Dillon  */
18101c7c3c6aSMatthew Dillon 
18111c7c3c6aSMatthew Dillon static void
18124dcc5c2dSMatthew Dillon swp_pager_meta_free(vm_object_t object, vm_pindex_t index, daddr_t count)
18131c7c3c6aSMatthew Dillon {
18141c7c3c6aSMatthew Dillon 	if (object->type != OBJT_SWAP)
18151c7c3c6aSMatthew Dillon 		return;
18161c7c3c6aSMatthew Dillon 
18171c7c3c6aSMatthew Dillon 	while (count > 0) {
18181c7c3c6aSMatthew Dillon 		struct swblock **pswap;
18191c7c3c6aSMatthew Dillon 		struct swblock *swap;
18201c7c3c6aSMatthew Dillon 
18211c7c3c6aSMatthew Dillon 		pswap = swp_pager_hash(object, index);
18221c7c3c6aSMatthew Dillon 
18231c7c3c6aSMatthew Dillon 		if ((swap = *pswap) != NULL) {
18241c7c3c6aSMatthew Dillon 			daddr_t v = swap->swb_pages[index & SWAP_META_MASK];
18251c7c3c6aSMatthew Dillon 
18261c7c3c6aSMatthew Dillon 			if (v != SWAPBLK_NONE) {
18271c7c3c6aSMatthew Dillon 				swp_pager_freeswapspace(v, 1);
18281c7c3c6aSMatthew Dillon 				swap->swb_pages[index & SWAP_META_MASK] =
18291c7c3c6aSMatthew Dillon 					SWAPBLK_NONE;
18301c7c3c6aSMatthew Dillon 				if (--swap->swb_count == 0) {
18311c7c3c6aSMatthew Dillon 					*pswap = swap->swb_hnext;
18321c7c3c6aSMatthew Dillon 					zfree(swap_zone, swap);
18331c7c3c6aSMatthew Dillon 					--object->un_pager.swp.swp_bcount;
18341c7c3c6aSMatthew Dillon 				}
18351c7c3c6aSMatthew Dillon 			}
18361c7c3c6aSMatthew Dillon 			--count;
18371c7c3c6aSMatthew Dillon 			++index;
18381c7c3c6aSMatthew Dillon 		} else {
18394dcc5c2dSMatthew Dillon 			int n = SWAP_META_PAGES - (index & SWAP_META_MASK);
18401c7c3c6aSMatthew Dillon 			count -= n;
18411c7c3c6aSMatthew Dillon 			index += n;
18421c7c3c6aSMatthew Dillon 		}
18431c7c3c6aSMatthew Dillon 	}
18441c7c3c6aSMatthew Dillon }
18451c7c3c6aSMatthew Dillon 
18461c7c3c6aSMatthew Dillon /*
18471c7c3c6aSMatthew Dillon  * SWP_PAGER_META_FREE_ALL() - destroy all swap metadata associated with object
18481c7c3c6aSMatthew Dillon  *
18491c7c3c6aSMatthew Dillon  *	This routine locates and destroys all swap metadata associated with
18501c7c3c6aSMatthew Dillon  *	an object.
18514dcc5c2dSMatthew Dillon  *
18524dcc5c2dSMatthew Dillon  *	This routine must be called at splvm()
18531c7c3c6aSMatthew Dillon  */
18541c7c3c6aSMatthew Dillon 
18551c7c3c6aSMatthew Dillon static void
18561c7c3c6aSMatthew Dillon swp_pager_meta_free_all(vm_object_t object)
18571c7c3c6aSMatthew Dillon {
18581c7c3c6aSMatthew Dillon 	daddr_t index = 0;
18591c7c3c6aSMatthew Dillon 
18601c7c3c6aSMatthew Dillon 	if (object->type != OBJT_SWAP)
18611c7c3c6aSMatthew Dillon 		return;
18621c7c3c6aSMatthew Dillon 
18631c7c3c6aSMatthew Dillon 	while (object->un_pager.swp.swp_bcount) {
18641c7c3c6aSMatthew Dillon 		struct swblock **pswap;
18651c7c3c6aSMatthew Dillon 		struct swblock *swap;
18661c7c3c6aSMatthew Dillon 
18671c7c3c6aSMatthew Dillon 		pswap = swp_pager_hash(object, index);
18681c7c3c6aSMatthew Dillon 		if ((swap = *pswap) != NULL) {
18691c7c3c6aSMatthew Dillon 			int i;
18701c7c3c6aSMatthew Dillon 
18711c7c3c6aSMatthew Dillon 			for (i = 0; i < SWAP_META_PAGES; ++i) {
18721c7c3c6aSMatthew Dillon 				daddr_t v = swap->swb_pages[i];
18731c7c3c6aSMatthew Dillon 				if (v != SWAPBLK_NONE) {
18741c7c3c6aSMatthew Dillon 					--swap->swb_count;
18754dcc5c2dSMatthew Dillon 					swp_pager_freeswapspace(v, 1);
18761c7c3c6aSMatthew Dillon 				}
18771c7c3c6aSMatthew Dillon 			}
18781c7c3c6aSMatthew Dillon 			if (swap->swb_count != 0)
18791c7c3c6aSMatthew Dillon 				panic("swap_pager_meta_free_all: swb_count != 0");
18801c7c3c6aSMatthew Dillon 			*pswap = swap->swb_hnext;
18811c7c3c6aSMatthew Dillon 			zfree(swap_zone, swap);
18821c7c3c6aSMatthew Dillon 			--object->un_pager.swp.swp_bcount;
18831c7c3c6aSMatthew Dillon 		}
18841c7c3c6aSMatthew Dillon 		index += SWAP_META_PAGES;
18851c7c3c6aSMatthew Dillon 		if (index > 0x20000000)
18861c7c3c6aSMatthew Dillon 			panic("swp_pager_meta_free_all: failed to locate all swap meta blocks");
18871c7c3c6aSMatthew Dillon 	}
18881c7c3c6aSMatthew Dillon }
18891c7c3c6aSMatthew Dillon 
18901c7c3c6aSMatthew Dillon /*
18911c7c3c6aSMatthew Dillon  * SWP_PAGER_METACTL() -  misc control of swap and vm_page_t meta data.
18921c7c3c6aSMatthew Dillon  *
18931c7c3c6aSMatthew Dillon  *	This routine is capable of looking up, popping, or freeing
18941c7c3c6aSMatthew Dillon  *	swapblk assignments in the swap meta data or in the vm_page_t.
18951c7c3c6aSMatthew Dillon  *	The routine typically returns the swapblk being looked-up, or popped,
18961c7c3c6aSMatthew Dillon  *	or SWAPBLK_NONE if the block was freed, or SWAPBLK_NONE if the block
18971c7c3c6aSMatthew Dillon  *	was invalid.  This routine will automatically free any invalid
18981c7c3c6aSMatthew Dillon  *	meta-data swapblks.
18991c7c3c6aSMatthew Dillon  *
19001c7c3c6aSMatthew Dillon  *	It is not possible to store invalid swapblks in the swap meta data
19011c7c3c6aSMatthew Dillon  *	(other then a literal 'SWAPBLK_NONE'), so we don't bother checking.
19021c7c3c6aSMatthew Dillon  *
19031c7c3c6aSMatthew Dillon  *	When acting on a busy resident page and paging is in progress, we
19041c7c3c6aSMatthew Dillon  *	have to wait until paging is complete but otherwise can act on the
19051c7c3c6aSMatthew Dillon  *	busy page.
19061c7c3c6aSMatthew Dillon  *
19074dcc5c2dSMatthew Dillon  *	This routine must be called at splvm().
19081c7c3c6aSMatthew Dillon  *
19094dcc5c2dSMatthew Dillon  *	SWM_FREE	remove and free swap block from metadata
19101c7c3c6aSMatthew Dillon  *	SWM_POP		remove from meta data but do not free.. pop it out
19111c7c3c6aSMatthew Dillon  */
19121c7c3c6aSMatthew Dillon 
19131c7c3c6aSMatthew Dillon static daddr_t
19141c7c3c6aSMatthew Dillon swp_pager_meta_ctl(
19151c7c3c6aSMatthew Dillon 	vm_object_t object,
19161c7c3c6aSMatthew Dillon 	vm_pindex_t index,
19171c7c3c6aSMatthew Dillon 	int flags
19181c7c3c6aSMatthew Dillon ) {
19194dcc5c2dSMatthew Dillon 	struct swblock **pswap;
19204dcc5c2dSMatthew Dillon 	struct swblock *swap;
19214dcc5c2dSMatthew Dillon 	daddr_t r1;
19224dcc5c2dSMatthew Dillon 
19231c7c3c6aSMatthew Dillon 	/*
19241c7c3c6aSMatthew Dillon 	 * The meta data only exists of the object is OBJT_SWAP
19251c7c3c6aSMatthew Dillon 	 * and even then might not be allocated yet.
19261c7c3c6aSMatthew Dillon 	 */
19271c7c3c6aSMatthew Dillon 
19284dcc5c2dSMatthew Dillon 	if (object->type != OBJT_SWAP)
19291c7c3c6aSMatthew Dillon 		return(SWAPBLK_NONE);
19301c7c3c6aSMatthew Dillon 
19314dcc5c2dSMatthew Dillon 	r1 = SWAPBLK_NONE;
19321c7c3c6aSMatthew Dillon 	pswap = swp_pager_hash(object, index);
19331c7c3c6aSMatthew Dillon 
19341c7c3c6aSMatthew Dillon 	if ((swap = *pswap) != NULL) {
19354dcc5c2dSMatthew Dillon 		index &= SWAP_META_MASK;
19361c7c3c6aSMatthew Dillon 		r1 = swap->swb_pages[index];
19371c7c3c6aSMatthew Dillon 
19381c7c3c6aSMatthew Dillon 		if (r1 != SWAPBLK_NONE) {
19391c7c3c6aSMatthew Dillon 			if (flags & SWM_FREE) {
19404dcc5c2dSMatthew Dillon 				swp_pager_freeswapspace(r1, 1);
19411c7c3c6aSMatthew Dillon 				r1 = SWAPBLK_NONE;
19421c7c3c6aSMatthew Dillon 			}
19431c7c3c6aSMatthew Dillon 			if (flags & (SWM_FREE|SWM_POP)) {
19441c7c3c6aSMatthew Dillon 				swap->swb_pages[index] = SWAPBLK_NONE;
19451c7c3c6aSMatthew Dillon 				if (--swap->swb_count == 0) {
19461c7c3c6aSMatthew Dillon 					*pswap = swap->swb_hnext;
19471c7c3c6aSMatthew Dillon 					zfree(swap_zone, swap);
19481c7c3c6aSMatthew Dillon 					--object->un_pager.swp.swp_bcount;
19491c7c3c6aSMatthew Dillon 				}
19501c7c3c6aSMatthew Dillon 			}
19511c7c3c6aSMatthew Dillon 		}
19521c7c3c6aSMatthew Dillon 	}
19531c7c3c6aSMatthew Dillon 	return(r1);
19541c7c3c6aSMatthew Dillon }
19551c7c3c6aSMatthew Dillon 
1956e4057dbdSPoul-Henning Kamp /********************************************************
1957e4057dbdSPoul-Henning Kamp  *		CHAINING FUNCTIONS			*
1958e4057dbdSPoul-Henning Kamp  ********************************************************
1959e4057dbdSPoul-Henning Kamp  *
1960e4057dbdSPoul-Henning Kamp  *	These functions support recursion of I/O operations
1961e4057dbdSPoul-Henning Kamp  *	on bp's, typically by chaining one or more 'child' bp's
1962e4057dbdSPoul-Henning Kamp  *	to the parent.  Synchronous, asynchronous, and semi-synchronous
1963e4057dbdSPoul-Henning Kamp  *	chaining is possible.
1964e4057dbdSPoul-Henning Kamp  */
1965e4057dbdSPoul-Henning Kamp 
1966e4057dbdSPoul-Henning Kamp /*
1967e4057dbdSPoul-Henning Kamp  *	vm_pager_chain_iodone:
1968e4057dbdSPoul-Henning Kamp  *
1969e4057dbdSPoul-Henning Kamp  *	io completion routine for child bp.  Currently we fudge a bit
1970e4057dbdSPoul-Henning Kamp  *	on dealing with b_resid.   Since users of these routines may issue
1971e4057dbdSPoul-Henning Kamp  *	multiple children simultaneously, sequencing of the error can be lost.
1972e4057dbdSPoul-Henning Kamp  */
1973e4057dbdSPoul-Henning Kamp 
1974e4057dbdSPoul-Henning Kamp static void
1975e4057dbdSPoul-Henning Kamp vm_pager_chain_iodone(struct buf *nbp)
1976e4057dbdSPoul-Henning Kamp {
19770b441832SPoul-Henning Kamp 	struct bio *bp;
19780b441832SPoul-Henning Kamp 	u_int *count;
1979e4057dbdSPoul-Henning Kamp 
19800b441832SPoul-Henning Kamp 	bp = nbp->b_caller1;
19810b441832SPoul-Henning Kamp 	count = (u_int *)&(bp->bio_caller1);
19820b441832SPoul-Henning Kamp 	if (bp != NULL) {
1983e4057dbdSPoul-Henning Kamp 		if (nbp->b_ioflags & BIO_ERROR) {
19840b441832SPoul-Henning Kamp 			bp->bio_flags |= BIO_ERROR;
19850b441832SPoul-Henning Kamp 			bp->bio_error = nbp->b_error;
1986e4057dbdSPoul-Henning Kamp 		} else if (nbp->b_resid != 0) {
19870b441832SPoul-Henning Kamp 			bp->bio_flags |= BIO_ERROR;
19880b441832SPoul-Henning Kamp 			bp->bio_error = EINVAL;
1989e4057dbdSPoul-Henning Kamp 		} else {
19900b441832SPoul-Henning Kamp 			bp->bio_resid -= nbp->b_bcount;
1991e4057dbdSPoul-Henning Kamp 		}
19920b441832SPoul-Henning Kamp 		nbp->b_caller1 = NULL;
19930b441832SPoul-Henning Kamp 		--(*count);
19940b441832SPoul-Henning Kamp 		if (bp->bio_flags & BIO_FLAG1) {
19950b441832SPoul-Henning Kamp 			bp->bio_flags &= ~BIO_FLAG1;
1996e4057dbdSPoul-Henning Kamp 			wakeup(bp);
1997e4057dbdSPoul-Henning Kamp 		}
1998e4057dbdSPoul-Henning Kamp 	}
1999e4057dbdSPoul-Henning Kamp 	nbp->b_flags |= B_DONE;
2000e4057dbdSPoul-Henning Kamp 	nbp->b_flags &= ~B_ASYNC;
2001e4057dbdSPoul-Henning Kamp 	relpbuf(nbp, NULL);
2002e4057dbdSPoul-Henning Kamp }
2003e4057dbdSPoul-Henning Kamp 
2004e4057dbdSPoul-Henning Kamp /*
2005e4057dbdSPoul-Henning Kamp  *	getchainbuf:
2006e4057dbdSPoul-Henning Kamp  *
2007e4057dbdSPoul-Henning Kamp  *	Obtain a physical buffer and chain it to its parent buffer.  When
2008e4057dbdSPoul-Henning Kamp  *	I/O completes, the parent buffer will be B_SIGNAL'd.  Errors are
2009e4057dbdSPoul-Henning Kamp  *	automatically propagated to the parent
2010e4057dbdSPoul-Henning Kamp  */
2011e4057dbdSPoul-Henning Kamp 
2012e4057dbdSPoul-Henning Kamp struct buf *
20130b441832SPoul-Henning Kamp getchainbuf(struct bio *bp, struct vnode *vp, int flags)
2014e4057dbdSPoul-Henning Kamp {
2015e4057dbdSPoul-Henning Kamp 	struct buf *nbp = getpbuf(NULL);
20160b441832SPoul-Henning Kamp 	u_int *count = (u_int *)&(bp->bio_caller1);
2017e4057dbdSPoul-Henning Kamp 
20180b441832SPoul-Henning Kamp 	nbp->b_caller1 = bp;
20190b441832SPoul-Henning Kamp 	++(*count);
2020e4057dbdSPoul-Henning Kamp 
20210b441832SPoul-Henning Kamp 	if (*count > 4)
2022e4057dbdSPoul-Henning Kamp 		waitchainbuf(bp, 4, 0);
2023e4057dbdSPoul-Henning Kamp 
20240b441832SPoul-Henning Kamp 	nbp->b_iocmd = bp->bio_cmd;
20250b441832SPoul-Henning Kamp 	nbp->b_ioflags = bp->bio_flags & BIO_ORDERED;
2026e4057dbdSPoul-Henning Kamp 	nbp->b_flags = flags;
2027e4057dbdSPoul-Henning Kamp 	nbp->b_rcred = nbp->b_wcred = proc0.p_ucred;
2028e4057dbdSPoul-Henning Kamp 	nbp->b_iodone = vm_pager_chain_iodone;
2029e4057dbdSPoul-Henning Kamp 
2030e4057dbdSPoul-Henning Kamp 	crhold(nbp->b_rcred);
2031e4057dbdSPoul-Henning Kamp 	crhold(nbp->b_wcred);
2032e4057dbdSPoul-Henning Kamp 
2033e4057dbdSPoul-Henning Kamp 	if (vp)
2034e4057dbdSPoul-Henning Kamp 		pbgetvp(vp, nbp);
2035e4057dbdSPoul-Henning Kamp 	return(nbp);
2036e4057dbdSPoul-Henning Kamp }
2037e4057dbdSPoul-Henning Kamp 
2038e4057dbdSPoul-Henning Kamp void
2039e4057dbdSPoul-Henning Kamp flushchainbuf(struct buf *nbp)
2040e4057dbdSPoul-Henning Kamp {
2041e4057dbdSPoul-Henning Kamp 	if (nbp->b_bcount) {
2042e4057dbdSPoul-Henning Kamp 		nbp->b_bufsize = nbp->b_bcount;
2043e4057dbdSPoul-Henning Kamp 		if (nbp->b_iocmd == BIO_WRITE)
2044e4057dbdSPoul-Henning Kamp 			nbp->b_dirtyend = nbp->b_bcount;
2045e4057dbdSPoul-Henning Kamp 		BUF_KERNPROC(nbp);
2046e4057dbdSPoul-Henning Kamp 		BUF_STRATEGY(nbp);
2047e4057dbdSPoul-Henning Kamp 	} else {
2048e4057dbdSPoul-Henning Kamp 		bufdone(nbp);
2049e4057dbdSPoul-Henning Kamp 	}
2050e4057dbdSPoul-Henning Kamp }
2051e4057dbdSPoul-Henning Kamp 
2052e4057dbdSPoul-Henning Kamp void
20530b441832SPoul-Henning Kamp waitchainbuf(struct bio *bp, int limit, int done)
2054e4057dbdSPoul-Henning Kamp {
2055e4057dbdSPoul-Henning Kamp  	int s;
20560b441832SPoul-Henning Kamp 	u_int *count = (u_int *)&(bp->bio_caller1);
2057e4057dbdSPoul-Henning Kamp 
2058e4057dbdSPoul-Henning Kamp 	s = splbio();
20590b441832SPoul-Henning Kamp 	while (*count > limit) {
20600b441832SPoul-Henning Kamp 		bp->bio_flags |= BIO_FLAG1;
2061e4057dbdSPoul-Henning Kamp 		tsleep(bp, PRIBIO + 4, "bpchain", 0);
2062e4057dbdSPoul-Henning Kamp 	}
2063e4057dbdSPoul-Henning Kamp 	if (done) {
20640b441832SPoul-Henning Kamp 		if (bp->bio_resid != 0 && !(bp->bio_flags & BIO_ERROR)) {
20650b441832SPoul-Henning Kamp 			bp->bio_flags |= BIO_ERROR;
20660b441832SPoul-Henning Kamp 			bp->bio_error = EINVAL;
2067e4057dbdSPoul-Henning Kamp 		}
20680b441832SPoul-Henning Kamp 		biodone(bp);
2069e4057dbdSPoul-Henning Kamp 	}
2070e4057dbdSPoul-Henning Kamp 	splx(s);
2071e4057dbdSPoul-Henning Kamp }
2072e4057dbdSPoul-Henning Kamp 
2073