xref: /freebsd/sys/vm/swap_pager.c (revision 35918c55e5e78fdbd85347302af1668d68c356bc)
160727d8bSWarner Losh /*-
21c7c3c6aSMatthew Dillon  * Copyright (c) 1998 Matthew Dillon,
326f9a767SRodney W. Grimes  * Copyright (c) 1994 John S. Dyson
4df8bae1dSRodney W. Grimes  * Copyright (c) 1990 University of Utah.
5e9c0cc15SPoul-Henning Kamp  * Copyright (c) 1982, 1986, 1989, 1993
6df8bae1dSRodney W. Grimes  *	The Regents of the University of California.  All rights reserved.
7df8bae1dSRodney W. Grimes  *
8df8bae1dSRodney W. Grimes  * This code is derived from software contributed to Berkeley by
9df8bae1dSRodney W. Grimes  * the Systems Programming Group of the University of Utah Computer
10df8bae1dSRodney W. Grimes  * Science Department.
11df8bae1dSRodney W. Grimes  *
12df8bae1dSRodney W. Grimes  * Redistribution and use in source and binary forms, with or without
13df8bae1dSRodney W. Grimes  * modification, are permitted provided that the following conditions
14df8bae1dSRodney W. Grimes  * are met:
15df8bae1dSRodney W. Grimes  * 1. Redistributions of source code must retain the above copyright
16df8bae1dSRodney W. Grimes  *    notice, this list of conditions and the following disclaimer.
17df8bae1dSRodney W. Grimes  * 2. Redistributions in binary form must reproduce the above copyright
18df8bae1dSRodney W. Grimes  *    notice, this list of conditions and the following disclaimer in the
19df8bae1dSRodney W. Grimes  *    documentation and/or other materials provided with the distribution.
20df8bae1dSRodney W. Grimes  * 3. All advertising materials mentioning features or use of this software
215929bcfaSPhilippe Charnier  *    must display the following acknowledgement:
22df8bae1dSRodney W. Grimes  *	This product includes software developed by the University of
23df8bae1dSRodney W. Grimes  *	California, Berkeley and its contributors.
24df8bae1dSRodney W. Grimes  * 4. Neither the name of the University nor the names of its contributors
25df8bae1dSRodney W. Grimes  *    may be used to endorse or promote products derived from this software
26df8bae1dSRodney W. Grimes  *    without specific prior written permission.
27df8bae1dSRodney W. Grimes  *
28df8bae1dSRodney W. Grimes  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
29df8bae1dSRodney W. Grimes  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
30df8bae1dSRodney W. Grimes  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
31df8bae1dSRodney W. Grimes  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
32df8bae1dSRodney W. Grimes  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
33df8bae1dSRodney W. Grimes  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
34df8bae1dSRodney W. Grimes  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
35df8bae1dSRodney W. Grimes  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
36df8bae1dSRodney W. Grimes  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
37df8bae1dSRodney W. Grimes  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
38df8bae1dSRodney W. Grimes  * SUCH DAMAGE.
39df8bae1dSRodney W. Grimes  *
401c7c3c6aSMatthew Dillon  *				New Swap System
411c7c3c6aSMatthew Dillon  *				Matthew Dillon
421c7c3c6aSMatthew Dillon  *
431c7c3c6aSMatthew Dillon  * Radix Bitmap 'blists'.
441c7c3c6aSMatthew Dillon  *
451c7c3c6aSMatthew Dillon  *	- The new swapper uses the new radix bitmap code.  This should scale
461c7c3c6aSMatthew Dillon  *	  to arbitrarily small or arbitrarily large swap spaces and an almost
471c7c3c6aSMatthew Dillon  *	  arbitrary degree of fragmentation.
481c7c3c6aSMatthew Dillon  *
491c7c3c6aSMatthew Dillon  * Features:
501c7c3c6aSMatthew Dillon  *
511c7c3c6aSMatthew Dillon  *	- on the fly reallocation of swap during putpages.  The new system
521c7c3c6aSMatthew Dillon  *	  does not try to keep previously allocated swap blocks for dirty
531c7c3c6aSMatthew Dillon  *	  pages.
541c7c3c6aSMatthew Dillon  *
551c7c3c6aSMatthew Dillon  *	- on the fly deallocation of swap
561c7c3c6aSMatthew Dillon  *
571c7c3c6aSMatthew Dillon  *	- No more garbage collection required.  Unnecessarily allocated swap
581c7c3c6aSMatthew Dillon  *	  blocks only exist for dirty vm_page_t's now and these are already
591c7c3c6aSMatthew Dillon  *	  cycled (in a high-load system) by the pager.  We also do on-the-fly
601c7c3c6aSMatthew Dillon  *	  removal of invalidated swap blocks when a page is destroyed
611c7c3c6aSMatthew Dillon  *	  or renamed.
621c7c3c6aSMatthew Dillon  *
63df8bae1dSRodney W. Grimes  * from: Utah $Hdr: swap_pager.c 1.4 91/04/30$
64df8bae1dSRodney W. Grimes  *
65df8bae1dSRodney W. Grimes  *	@(#)swap_pager.c	8.9 (Berkeley) 3/21/94
66e9c0cc15SPoul-Henning Kamp  *	@(#)vm_swap.c	8.5 (Berkeley) 2/17/94
67df8bae1dSRodney W. Grimes  */
68df8bae1dSRodney W. Grimes 
69874651b1SDavid E. O'Brien #include <sys/cdefs.h>
70874651b1SDavid E. O'Brien __FBSDID("$FreeBSD$");
71874651b1SDavid E. O'Brien 
72e9c0cc15SPoul-Henning Kamp #include "opt_mac.h"
73e9c0cc15SPoul-Henning Kamp #include "opt_swap.h"
74e9c0cc15SPoul-Henning Kamp #include "opt_vm.h"
75e9c0cc15SPoul-Henning Kamp 
76df8bae1dSRodney W. Grimes #include <sys/param.h>
77df8bae1dSRodney W. Grimes #include <sys/systm.h>
78af647ddeSBruce Evans #include <sys/conf.h>
7964abb5a5SDavid Greenman #include <sys/kernel.h>
80acd3428bSRobert Watson #include <sys/priv.h>
81df8bae1dSRodney W. Grimes #include <sys/proc.h>
829626b608SPoul-Henning Kamp #include <sys/bio.h>
83df8bae1dSRodney W. Grimes #include <sys/buf.h>
84e9c0cc15SPoul-Henning Kamp #include <sys/disk.h>
85e9c0cc15SPoul-Henning Kamp #include <sys/fcntl.h>
86e9c0cc15SPoul-Henning Kamp #include <sys/mount.h>
87e9c0cc15SPoul-Henning Kamp #include <sys/namei.h>
88df8bae1dSRodney W. Grimes #include <sys/vnode.h>
89df8bae1dSRodney W. Grimes #include <sys/malloc.h>
90327f4e83SMatthew Dillon #include <sys/sysctl.h>
91e9c0cc15SPoul-Henning Kamp #include <sys/sysproto.h>
921c7c3c6aSMatthew Dillon #include <sys/blist.h>
931c7c3c6aSMatthew Dillon #include <sys/lock.h>
940cddd8f0SMatthew Dillon #include <sys/sx.h>
95936524aaSMatthew Dillon #include <sys/vmmeter.h>
96df8bae1dSRodney W. Grimes 
97aed55708SRobert Watson #include <security/mac/mac_framework.h>
98aed55708SRobert Watson 
99df8bae1dSRodney W. Grimes #include <vm/vm.h>
10021cd6e62SSeigo Tanimura #include <vm/pmap.h>
10121cd6e62SSeigo Tanimura #include <vm/vm_map.h>
10221cd6e62SSeigo Tanimura #include <vm/vm_kern.h>
103efeaf95aSDavid Greenman #include <vm/vm_object.h>
104df8bae1dSRodney W. Grimes #include <vm/vm_page.h>
105efeaf95aSDavid Greenman #include <vm/vm_pager.h>
106df8bae1dSRodney W. Grimes #include <vm/vm_pageout.h>
107e9c0cc15SPoul-Henning Kamp #include <vm/vm_param.h>
108df8bae1dSRodney W. Grimes #include <vm/swap_pager.h>
109efeaf95aSDavid Greenman #include <vm/vm_extern.h>
110670d17b5SJeff Roberson #include <vm/uma.h>
111df8bae1dSRodney W. Grimes 
112dee34ca4SPoul-Henning Kamp #include <geom/geom.h>
113dee34ca4SPoul-Henning Kamp 
114ec38b344SPoul-Henning Kamp /*
115ec38b344SPoul-Henning Kamp  * SWB_NPAGES must be a power of 2.  It may be set to 1, 2, 4, 8, or 16
116ec38b344SPoul-Henning Kamp  * pages per allocation.  We recommend you stick with the default of 8.
117ec38b344SPoul-Henning Kamp  * The 16-page limit is due to the radix code (kern/subr_blist.c).
118ec38b344SPoul-Henning Kamp  */
119ec38b344SPoul-Henning Kamp #ifndef MAX_PAGEOUT_CLUSTER
120ec38b344SPoul-Henning Kamp #define MAX_PAGEOUT_CLUSTER 16
121ec38b344SPoul-Henning Kamp #endif
122ec38b344SPoul-Henning Kamp 
123ec38b344SPoul-Henning Kamp #if !defined(SWB_NPAGES)
124ec38b344SPoul-Henning Kamp #define SWB_NPAGES	MAX_PAGEOUT_CLUSTER
125ec38b344SPoul-Henning Kamp #endif
126ec38b344SPoul-Henning Kamp 
127ec38b344SPoul-Henning Kamp /*
128ec38b344SPoul-Henning Kamp  * Piecemeal swap metadata structure.  Swap is stored in a radix tree.
129ec38b344SPoul-Henning Kamp  *
130ec38b344SPoul-Henning Kamp  * If SWB_NPAGES is 8 and sizeof(char *) == sizeof(daddr_t), our radix
131ec38b344SPoul-Henning Kamp  * is basically 8.  Assuming PAGE_SIZE == 4096, one tree level represents
132ec38b344SPoul-Henning Kamp  * 32K worth of data, two levels represent 256K, three levels represent
133ec38b344SPoul-Henning Kamp  * 2 MBytes.   This is acceptable.
134ec38b344SPoul-Henning Kamp  *
135ec38b344SPoul-Henning Kamp  * Overall memory utilization is about the same as the old swap structure.
136ec38b344SPoul-Henning Kamp  */
137ec38b344SPoul-Henning Kamp #define SWCORRECT(n) (sizeof(void *) * (n) / sizeof(daddr_t))
138ec38b344SPoul-Henning Kamp #define SWAP_META_PAGES		(SWB_NPAGES * 2)
139ec38b344SPoul-Henning Kamp #define SWAP_META_MASK		(SWAP_META_PAGES - 1)
140ec38b344SPoul-Henning Kamp 
141e9c0cc15SPoul-Henning Kamp struct swblock {
142e9c0cc15SPoul-Henning Kamp 	struct swblock	*swb_hnext;
143e9c0cc15SPoul-Henning Kamp 	vm_object_t	swb_object;
144e9c0cc15SPoul-Henning Kamp 	vm_pindex_t	swb_index;
145e9c0cc15SPoul-Henning Kamp 	int		swb_count;
146e9c0cc15SPoul-Henning Kamp 	daddr_t		swb_pages[SWAP_META_PAGES];
147e9c0cc15SPoul-Henning Kamp };
148e9c0cc15SPoul-Henning Kamp 
14920da9c2eSPoul-Henning Kamp static struct mtx sw_dev_mtx;
1508f60c087SPoul-Henning Kamp static TAILQ_HEAD(, swdevt) swtailq = TAILQ_HEAD_INITIALIZER(swtailq);
1518f60c087SPoul-Henning Kamp static struct swdevt *swdevhd;	/* Allocate from here next */
1528f60c087SPoul-Henning Kamp static int nswapdev;		/* Number of swap devices */
1538f60c087SPoul-Henning Kamp int swap_pager_avail;
154e9c0cc15SPoul-Henning Kamp static int swdev_syscall_active = 0; /* serialize swap(on|off) */
155e9c0cc15SPoul-Henning Kamp 
1564b03903aSPoul-Henning Kamp static void swapdev_strategy(struct buf *, struct swdevt *sw);
157e9c0cc15SPoul-Henning Kamp 
1581c7c3c6aSMatthew Dillon #define SWM_FREE	0x02	/* free, period			*/
1591c7c3c6aSMatthew Dillon #define SWM_POP		0x04	/* pop out			*/
16026f9a767SRodney W. Grimes 
1617dea2c2eSAlan Cox int swap_pager_full = 2;	/* swap space exhaustion (task killing) */
1627dea2c2eSAlan Cox static int swap_pager_almost_full = 1; /* swap space exhaustion (w/hysteresis)*/
1631c7c3c6aSMatthew Dillon static int nsw_rcount;		/* free read buffers			*/
164327f4e83SMatthew Dillon static int nsw_wcount_sync;	/* limit write buffers / synchronous	*/
165327f4e83SMatthew Dillon static int nsw_wcount_async;	/* limit write buffers / asynchronous	*/
166327f4e83SMatthew Dillon static int nsw_wcount_async_max;/* assigned maximum			*/
167327f4e83SMatthew Dillon static int nsw_cluster_max;	/* maximum VOP I/O allowed		*/
1681c7c3c6aSMatthew Dillon 
1691c7c3c6aSMatthew Dillon static struct swblock **swhash;
1701c7c3c6aSMatthew Dillon static int swhash_mask;
1717827d9b0SAlan Cox static struct mtx swhash_mtx;
1727827d9b0SAlan Cox 
173327f4e83SMatthew Dillon static int swap_async_max = 4;	/* maximum in-progress async I/O's	*/
1740cddd8f0SMatthew Dillon static struct sx sw_alloc_sx;
175327f4e83SMatthew Dillon 
17624e7ab7cSPoul-Henning Kamp 
177327f4e83SMatthew Dillon SYSCTL_INT(_vm, OID_AUTO, swap_async_max,
178327f4e83SMatthew Dillon         CTLFLAG_RW, &swap_async_max, 0, "Maximum running async swap ops");
1791c7c3c6aSMatthew Dillon 
1801c7c3c6aSMatthew Dillon /*
1811c7c3c6aSMatthew Dillon  * "named" and "unnamed" anon region objects.  Try to reduce the overhead
1821c7c3c6aSMatthew Dillon  * of searching a named list by hashing it just a little.
1831c7c3c6aSMatthew Dillon  */
1841c7c3c6aSMatthew Dillon 
1851c7c3c6aSMatthew Dillon #define NOBJLISTS		8
1861c7c3c6aSMatthew Dillon 
1871c7c3c6aSMatthew Dillon #define NOBJLIST(handle)	\
188af647ddeSBruce Evans 	(&swap_pager_object_list[((int)(intptr_t)handle >> 4) & (NOBJLISTS-1)])
1891c7c3c6aSMatthew Dillon 
190a9fa2c05SAlfred Perlstein static struct mtx sw_alloc_mtx;	/* protect list manipulation */
1911c7c3c6aSMatthew Dillon static struct pagerlst	swap_pager_object_list[NOBJLISTS];
192e9c0cc15SPoul-Henning Kamp static uma_zone_t	swap_zone;
1935285558aSAlan Cox static struct vm_object	swap_zone_obj;
1941c7c3c6aSMatthew Dillon 
1951c7c3c6aSMatthew Dillon /*
1961c7c3c6aSMatthew Dillon  * pagerops for OBJT_SWAP - "swap pager".  Some ops are also global procedure
1971c7c3c6aSMatthew Dillon  * calls hooked from other parts of the VM system and do not appear here.
1981c7c3c6aSMatthew Dillon  * (see vm/swap_pager.h).
1991c7c3c6aSMatthew Dillon  */
200ff98689dSBruce Evans static vm_object_t
20111caded3SAlfred Perlstein 		swap_pager_alloc(void *handle, vm_ooffset_t size,
20211caded3SAlfred Perlstein 				      vm_prot_t prot, vm_ooffset_t offset);
20311caded3SAlfred Perlstein static void	swap_pager_dealloc(vm_object_t object);
20411caded3SAlfred Perlstein static int	swap_pager_getpages(vm_object_t, vm_page_t *, int, int);
205751221fdSPoul-Henning Kamp static void	swap_pager_putpages(vm_object_t, vm_page_t *, int, boolean_t, int *);
2065ea4972cSAlan Cox static boolean_t
2075ea4972cSAlan Cox 		swap_pager_haspage(vm_object_t object, vm_pindex_t pindex, int *before, int *after);
20811caded3SAlfred Perlstein static void	swap_pager_init(void);
20911caded3SAlfred Perlstein static void	swap_pager_unswapped(vm_page_t);
210b3fed13eSDavid Schultz static void	swap_pager_swapoff(struct swdevt *sp);
211f708ef1bSPoul-Henning Kamp 
212df8bae1dSRodney W. Grimes struct pagerops swappagerops = {
213e04e4bacSPoul-Henning Kamp 	.pgo_init =	swap_pager_init,	/* early system initialization of pager	*/
214e04e4bacSPoul-Henning Kamp 	.pgo_alloc =	swap_pager_alloc,	/* allocate an OBJT_SWAP object		*/
215e04e4bacSPoul-Henning Kamp 	.pgo_dealloc =	swap_pager_dealloc,	/* deallocate an OBJT_SWAP object	*/
216e04e4bacSPoul-Henning Kamp 	.pgo_getpages =	swap_pager_getpages,	/* pagein				*/
217e04e4bacSPoul-Henning Kamp 	.pgo_putpages =	swap_pager_putpages,	/* pageout				*/
218e04e4bacSPoul-Henning Kamp 	.pgo_haspage =	swap_pager_haspage,	/* get backing store status for page	*/
219e04e4bacSPoul-Henning Kamp 	.pgo_pageunswapped = swap_pager_unswapped,	/* remove swap related to page		*/
220df8bae1dSRodney W. Grimes };
221df8bae1dSRodney W. Grimes 
2221c7c3c6aSMatthew Dillon /*
2231c7c3c6aSMatthew Dillon  * dmmax is in page-sized chunks with the new swap system.  It was
22464bcb9c8SMatthew Dillon  * dev-bsized chunks in the old.  dmmax is always a power of 2.
2251c7c3c6aSMatthew Dillon  *
2261c7c3c6aSMatthew Dillon  * swap_*() routines are externally accessible.  swp_*() routines are
2271c7c3c6aSMatthew Dillon  * internal.
2281c7c3c6aSMatthew Dillon  */
229751221fdSPoul-Henning Kamp static int dmmax;
230e9c0cc15SPoul-Henning Kamp static int nswap_lowat = 128;	/* in pages, swap_pager_almost_full warn */
231e9c0cc15SPoul-Henning Kamp static int nswap_hiwat = 512;	/* in pages, swap_pager_almost_full warn */
23226f9a767SRodney W. Grimes 
233cee313c4SRobert Watson SYSCTL_INT(_vm, OID_AUTO, dmmax,
234cee313c4SRobert Watson 	CTLFLAG_RD, &dmmax, 0, "Maximum size of a swap block");
235cee313c4SRobert Watson 
236a5edd34aSPoul-Henning Kamp static void	swp_sizecheck(void);
23711caded3SAlfred Perlstein static void	swp_pager_async_iodone(struct buf *bp);
238dee34ca4SPoul-Henning Kamp static int	swapongeom(struct thread *, struct vnode *);
23959efee01SPoul-Henning Kamp static int	swaponvp(struct thread *, struct vnode *, u_long);
24035918c55SChristian S.J. Peron static int	swapoff_one(struct swdevt *sp, struct ucred *cred);
24124a1cce3SDavid Greenman 
2421c7c3c6aSMatthew Dillon /*
2431c7c3c6aSMatthew Dillon  * Swap bitmap functions
2441c7c3c6aSMatthew Dillon  */
245a5edd34aSPoul-Henning Kamp static void	swp_pager_freeswapspace(daddr_t blk, int npages);
246a5edd34aSPoul-Henning Kamp static daddr_t	swp_pager_getswapspace(int npages);
2471c7c3c6aSMatthew Dillon 
2481c7c3c6aSMatthew Dillon /*
2491c7c3c6aSMatthew Dillon  * Metadata functions
2501c7c3c6aSMatthew Dillon  */
251a5edd34aSPoul-Henning Kamp static struct swblock **swp_pager_hash(vm_object_t object, vm_pindex_t index);
25211caded3SAlfred Perlstein static void swp_pager_meta_build(vm_object_t, vm_pindex_t, daddr_t);
25311caded3SAlfred Perlstein static void swp_pager_meta_free(vm_object_t, vm_pindex_t, daddr_t);
25411caded3SAlfred Perlstein static void swp_pager_meta_free_all(vm_object_t);
25511caded3SAlfred Perlstein static daddr_t swp_pager_meta_ctl(vm_object_t, vm_pindex_t, int);
2561c7c3c6aSMatthew Dillon 
2571c7c3c6aSMatthew Dillon /*
2581c7c3c6aSMatthew Dillon  * SWP_SIZECHECK() -	update swap_pager_full indication
2591c7c3c6aSMatthew Dillon  *
26020d3034fSMatthew Dillon  *	update the swap_pager_almost_full indication and warn when we are
26120d3034fSMatthew Dillon  *	about to run out of swap space, using lowat/hiwat hysteresis.
26220d3034fSMatthew Dillon  *
26320d3034fSMatthew Dillon  *	Clear swap_pager_full ( task killing ) indication when lowat is met.
2641c7c3c6aSMatthew Dillon  *
2651c7c3c6aSMatthew Dillon  *	No restrictions on call
2661c7c3c6aSMatthew Dillon  *	This routine may not block.
2671c7c3c6aSMatthew Dillon  *	This routine must be called at splvm()
2681c7c3c6aSMatthew Dillon  */
269a5edd34aSPoul-Henning Kamp static void
2702f249180SPoul-Henning Kamp swp_sizecheck(void)
2710d94caffSDavid Greenman {
27223955314SAlfred Perlstein 
2738f60c087SPoul-Henning Kamp 	if (swap_pager_avail < nswap_lowat) {
27420d3034fSMatthew Dillon 		if (swap_pager_almost_full == 0) {
2751af87c92SDavid Greenman 			printf("swap_pager: out of swap space\n");
27620d3034fSMatthew Dillon 			swap_pager_almost_full = 1;
2772b0d37a4SMatthew Dillon 		}
27820d3034fSMatthew Dillon 	} else {
27926f9a767SRodney W. Grimes 		swap_pager_full = 0;
2808f60c087SPoul-Henning Kamp 		if (swap_pager_avail > nswap_hiwat)
28120d3034fSMatthew Dillon 			swap_pager_almost_full = 0;
28226f9a767SRodney W. Grimes 	}
2831c7c3c6aSMatthew Dillon }
2841c7c3c6aSMatthew Dillon 
2851c7c3c6aSMatthew Dillon /*
286da5fd145SPeter Wemm  * SWP_PAGER_HASH() -	hash swap meta data
287da5fd145SPeter Wemm  *
288a5edd34aSPoul-Henning Kamp  *	This is an helper function which hashes the swapblk given
289da5fd145SPeter Wemm  *	the object and page index.  It returns a pointer to a pointer
290da5fd145SPeter Wemm  *	to the object, or a pointer to a NULL pointer if it could not
291da5fd145SPeter Wemm  *	find a swapblk.
292da5fd145SPeter Wemm  *
293da5fd145SPeter Wemm  *	This routine must be called at splvm().
294da5fd145SPeter Wemm  */
295a5edd34aSPoul-Henning Kamp static struct swblock **
296da5fd145SPeter Wemm swp_pager_hash(vm_object_t object, vm_pindex_t index)
297da5fd145SPeter Wemm {
298da5fd145SPeter Wemm 	struct swblock **pswap;
299da5fd145SPeter Wemm 	struct swblock *swap;
300da5fd145SPeter Wemm 
301da5fd145SPeter Wemm 	index &= ~(vm_pindex_t)SWAP_META_MASK;
302da5fd145SPeter Wemm 	pswap = &swhash[(index ^ (int)(intptr_t)object) & swhash_mask];
303da5fd145SPeter Wemm 	while ((swap = *pswap) != NULL) {
304da5fd145SPeter Wemm 		if (swap->swb_object == object &&
305da5fd145SPeter Wemm 		    swap->swb_index == index
306da5fd145SPeter Wemm 		) {
307da5fd145SPeter Wemm 			break;
308da5fd145SPeter Wemm 		}
309da5fd145SPeter Wemm 		pswap = &swap->swb_hnext;
310da5fd145SPeter Wemm 	}
311da5fd145SPeter Wemm 	return (pswap);
312da5fd145SPeter Wemm }
313da5fd145SPeter Wemm 
314da5fd145SPeter Wemm /*
3151c7c3c6aSMatthew Dillon  * SWAP_PAGER_INIT() -	initialize the swap pager!
3161c7c3c6aSMatthew Dillon  *
3171c7c3c6aSMatthew Dillon  *	Expected to be started from system init.  NOTE:  This code is run
3181c7c3c6aSMatthew Dillon  *	before much else so be careful what you depend on.  Most of the VM
3191c7c3c6aSMatthew Dillon  *	system has yet to be initialized at this point.
3201c7c3c6aSMatthew Dillon  */
321f5a12711SPoul-Henning Kamp static void
3222f249180SPoul-Henning Kamp swap_pager_init(void)
323df8bae1dSRodney W. Grimes {
3241c7c3c6aSMatthew Dillon 	/*
3251c7c3c6aSMatthew Dillon 	 * Initialize object lists
3261c7c3c6aSMatthew Dillon 	 */
3271c7c3c6aSMatthew Dillon 	int i;
3281c7c3c6aSMatthew Dillon 
3291c7c3c6aSMatthew Dillon 	for (i = 0; i < NOBJLISTS; ++i)
3301c7c3c6aSMatthew Dillon 		TAILQ_INIT(&swap_pager_object_list[i]);
3316008862bSJohn Baldwin 	mtx_init(&sw_alloc_mtx, "swap_pager list", NULL, MTX_DEF);
33220da9c2eSPoul-Henning Kamp 	mtx_init(&sw_dev_mtx, "swapdev", NULL, MTX_DEF);
333df8bae1dSRodney W. Grimes 
334df8bae1dSRodney W. Grimes 	/*
3351c7c3c6aSMatthew Dillon 	 * Device Stripe, in PAGE_SIZE'd blocks
336df8bae1dSRodney W. Grimes 	 */
3371c7c3c6aSMatthew Dillon 	dmmax = SWB_NPAGES * 2;
3381c7c3c6aSMatthew Dillon }
33926f9a767SRodney W. Grimes 
340df8bae1dSRodney W. Grimes /*
3411c7c3c6aSMatthew Dillon  * SWAP_PAGER_SWAP_INIT() - swap pager initialization from pageout process
3421c7c3c6aSMatthew Dillon  *
3431c7c3c6aSMatthew Dillon  *	Expected to be started from pageout process once, prior to entering
3441c7c3c6aSMatthew Dillon  *	its main loop.
345df8bae1dSRodney W. Grimes  */
34624a1cce3SDavid Greenman void
3472f249180SPoul-Henning Kamp swap_pager_swap_init(void)
348df8bae1dSRodney W. Grimes {
34921cd6e62SSeigo Tanimura 	int n, n2;
3500d94caffSDavid Greenman 
35126f9a767SRodney W. Grimes 	/*
3521c7c3c6aSMatthew Dillon 	 * Number of in-transit swap bp operations.  Don't
3531c7c3c6aSMatthew Dillon 	 * exhaust the pbufs completely.  Make sure we
3541c7c3c6aSMatthew Dillon 	 * initialize workable values (0 will work for hysteresis
3551c7c3c6aSMatthew Dillon 	 * but it isn't very efficient).
3561c7c3c6aSMatthew Dillon 	 *
357327f4e83SMatthew Dillon 	 * The nsw_cluster_max is constrained by the bp->b_pages[]
3581c7c3c6aSMatthew Dillon 	 * array (MAXPHYS/PAGE_SIZE) and our locally defined
3591c7c3c6aSMatthew Dillon 	 * MAX_PAGEOUT_CLUSTER.   Also be aware that swap ops are
3601c7c3c6aSMatthew Dillon 	 * constrained by the swap device interleave stripe size.
361327f4e83SMatthew Dillon 	 *
362327f4e83SMatthew Dillon 	 * Currently we hardwire nsw_wcount_async to 4.  This limit is
363327f4e83SMatthew Dillon 	 * designed to prevent other I/O from having high latencies due to
364327f4e83SMatthew Dillon 	 * our pageout I/O.  The value 4 works well for one or two active swap
365327f4e83SMatthew Dillon 	 * devices but is probably a little low if you have more.  Even so,
366327f4e83SMatthew Dillon 	 * a higher value would probably generate only a limited improvement
367327f4e83SMatthew Dillon 	 * with three or four active swap devices since the system does not
368327f4e83SMatthew Dillon 	 * typically have to pageout at extreme bandwidths.   We will want
369327f4e83SMatthew Dillon 	 * at least 2 per swap devices, and 4 is a pretty good value if you
370327f4e83SMatthew Dillon 	 * have one NFS swap device due to the command/ack latency over NFS.
371327f4e83SMatthew Dillon 	 * So it all works out pretty well.
37226f9a767SRodney W. Grimes 	 */
373ad3cce20SMatthew Dillon 	nsw_cluster_max = min((MAXPHYS/PAGE_SIZE), MAX_PAGEOUT_CLUSTER);
374327f4e83SMatthew Dillon 
3756d541bf1SJohn Baldwin 	mtx_lock(&pbuf_mtx);
3761c7c3c6aSMatthew Dillon 	nsw_rcount = (nswbuf + 1) / 2;
377327f4e83SMatthew Dillon 	nsw_wcount_sync = (nswbuf + 3) / 4;
378327f4e83SMatthew Dillon 	nsw_wcount_async = 4;
379327f4e83SMatthew Dillon 	nsw_wcount_async_max = nsw_wcount_async;
3806d541bf1SJohn Baldwin 	mtx_unlock(&pbuf_mtx);
38124a1cce3SDavid Greenman 
3821c7c3c6aSMatthew Dillon 	/*
3831c7c3c6aSMatthew Dillon 	 * Initialize our zone.  Right now I'm just guessing on the number
3841c7c3c6aSMatthew Dillon 	 * we need based on the number of pages in the system.  Each swblock
3852f9e4e80SMatthew Dillon 	 * can hold 16 pages, so this is probably overkill.  This reservation
386ec61f55dSMatthew Dillon 	 * is typically limited to around 32MB by default.
3871c7c3c6aSMatthew Dillon 	 */
3882feb50bfSAttilio Rao 	n = cnt.v_page_count / 2;
3892f9e4e80SMatthew Dillon 	if (maxswzone && n > maxswzone / sizeof(struct swblock))
3902f9e4e80SMatthew Dillon 		n = maxswzone / sizeof(struct swblock);
39121cd6e62SSeigo Tanimura 	n2 = n;
392670d17b5SJeff Roberson 	swap_zone = uma_zcreate("SWAPMETA", sizeof(struct swblock), NULL, NULL,
3937827d9b0SAlan Cox 	    NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE | UMA_ZONE_VM);
394010b1ca1SDavid Schultz 	if (swap_zone == NULL)
395010b1ca1SDavid Schultz 		panic("failed to create swap_zone.");
3968355f576SJeff Roberson 	do {
3975285558aSAlan Cox 		if (uma_zone_set_obj(swap_zone, &swap_zone_obj, n))
39861ce6eeeSAlfred Perlstein 			break;
39961ce6eeeSAlfred Perlstein 		/*
40061ce6eeeSAlfred Perlstein 		 * if the allocation failed, try a zone two thirds the
40161ce6eeeSAlfred Perlstein 		 * size of the previous attempt.
40261ce6eeeSAlfred Perlstein 		 */
40361ce6eeeSAlfred Perlstein 		n -= ((n + 2) / 3);
40461ce6eeeSAlfred Perlstein 	} while (n > 0);
40521cd6e62SSeigo Tanimura 	if (n2 != n)
40661ce6eeeSAlfred Perlstein 		printf("Swap zone entries reduced from %d to %d.\n", n2, n);
40721cd6e62SSeigo Tanimura 	n2 = n;
40824a1cce3SDavid Greenman 
4091c7c3c6aSMatthew Dillon 	/*
4101c7c3c6aSMatthew Dillon 	 * Initialize our meta-data hash table.  The swapper does not need to
4111c7c3c6aSMatthew Dillon 	 * be quite as efficient as the VM system, so we do not use an
4121c7c3c6aSMatthew Dillon 	 * oversized hash table.
4131c7c3c6aSMatthew Dillon 	 *
4141c7c3c6aSMatthew Dillon 	 * 	n: 		size of hash table, must be power of 2
4151c7c3c6aSMatthew Dillon 	 *	swhash_mask:	hash table index mask
4161c7c3c6aSMatthew Dillon 	 */
41761ce6eeeSAlfred Perlstein 	for (n = 1; n < n2 / 8; n *= 2)
4181c7c3c6aSMatthew Dillon 		;
419a163d034SWarner Losh 	swhash = malloc(sizeof(struct swblock *) * n, M_VMPGDATA, M_WAITOK | M_ZERO);
4201c7c3c6aSMatthew Dillon 	swhash_mask = n - 1;
4217827d9b0SAlan Cox 	mtx_init(&swhash_mtx, "swap_pager swhash", NULL, MTX_DEF);
42224a1cce3SDavid Greenman }
42324a1cce3SDavid Greenman 
42424a1cce3SDavid Greenman /*
4251c7c3c6aSMatthew Dillon  * SWAP_PAGER_ALLOC() -	allocate a new OBJT_SWAP VM object and instantiate
4261c7c3c6aSMatthew Dillon  *			its metadata structures.
4271c7c3c6aSMatthew Dillon  *
4281c7c3c6aSMatthew Dillon  *	This routine is called from the mmap and fork code to create a new
4291c7c3c6aSMatthew Dillon  *	OBJT_SWAP object.  We do this by creating an OBJT_DEFAULT object
4301c7c3c6aSMatthew Dillon  *	and then converting it with swp_pager_meta_build().
4311c7c3c6aSMatthew Dillon  *
4321c7c3c6aSMatthew Dillon  *	This routine may block in vm_object_allocate() and create a named
4331c7c3c6aSMatthew Dillon  *	object lookup race, so we must interlock.   We must also run at
4341c7c3c6aSMatthew Dillon  *	splvm() for the object lookup to handle races with interrupts, but
4351c7c3c6aSMatthew Dillon  *	we do not have to maintain splvm() in between the lookup and the
4361c7c3c6aSMatthew Dillon  *	add because (I believe) it is not possible to attempt to create
4371c7c3c6aSMatthew Dillon  *	a new swap object w/handle when a default object with that handle
4381c7c3c6aSMatthew Dillon  *	already exists.
43924c46d03SAlan Cox  *
44024c46d03SAlan Cox  * MPSAFE
44124a1cce3SDavid Greenman  */
442f5a12711SPoul-Henning Kamp static vm_object_t
4436cde7a16SDavid Greenman swap_pager_alloc(void *handle, vm_ooffset_t size, vm_prot_t prot,
444b9dcd593SBruce Evans 		 vm_ooffset_t offset)
44524a1cce3SDavid Greenman {
44624a1cce3SDavid Greenman 	vm_object_t object;
4472f7af3dbSAlan Cox 	vm_pindex_t pindex;
4482f7af3dbSAlan Cox 
4492f7af3dbSAlan Cox 	pindex = OFF_TO_IDX(offset + PAGE_MASK + size);
45024a1cce3SDavid Greenman 
45124a1cce3SDavid Greenman 	if (handle) {
452e793b779SAlan Cox 		mtx_lock(&Giant);
4531c7c3c6aSMatthew Dillon 		/*
4541c7c3c6aSMatthew Dillon 		 * Reference existing named region or allocate new one.  There
4551c7c3c6aSMatthew Dillon 		 * should not be a race here against swp_pager_meta_build()
4561c7c3c6aSMatthew Dillon 		 * as called from vm_page_remove() in regards to the lookup
4571c7c3c6aSMatthew Dillon 		 * of the handle.
4581c7c3c6aSMatthew Dillon 		 */
4590cddd8f0SMatthew Dillon 		sx_xlock(&sw_alloc_sx);
4601c7c3c6aSMatthew Dillon 		object = vm_pager_object_lookup(NOBJLIST(handle), handle);
4611c7c3c6aSMatthew Dillon 
462b5e8f167SAlan Cox 		if (object == NULL) {
4632f7af3dbSAlan Cox 			object = vm_object_allocate(OBJT_DEFAULT, pindex);
46424a1cce3SDavid Greenman 			object->handle = handle;
4651c7c3c6aSMatthew Dillon 
466ee3dc7d7SAlan Cox 			VM_OBJECT_LOCK(object);
4674dcc5c2dSMatthew Dillon 			swp_pager_meta_build(object, 0, SWAPBLK_NONE);
468ee3dc7d7SAlan Cox 			VM_OBJECT_UNLOCK(object);
46924a1cce3SDavid Greenman 		}
4700cddd8f0SMatthew Dillon 		sx_xunlock(&sw_alloc_sx);
471e793b779SAlan Cox 		mtx_unlock(&Giant);
47224a1cce3SDavid Greenman 	} else {
4732f7af3dbSAlan Cox 		object = vm_object_allocate(OBJT_DEFAULT, pindex);
4741c7c3c6aSMatthew Dillon 
475ee3dc7d7SAlan Cox 		VM_OBJECT_LOCK(object);
4764dcc5c2dSMatthew Dillon 		swp_pager_meta_build(object, 0, SWAPBLK_NONE);
477ee3dc7d7SAlan Cox 		VM_OBJECT_UNLOCK(object);
47824a1cce3SDavid Greenman 	}
47924a1cce3SDavid Greenman 	return (object);
480df8bae1dSRodney W. Grimes }
481df8bae1dSRodney W. Grimes 
48226f9a767SRodney W. Grimes /*
4831c7c3c6aSMatthew Dillon  * SWAP_PAGER_DEALLOC() -	remove swap metadata from object
4841c7c3c6aSMatthew Dillon  *
4851c7c3c6aSMatthew Dillon  *	The swap backing for the object is destroyed.  The code is
4861c7c3c6aSMatthew Dillon  *	designed such that we can reinstantiate it later, but this
4871c7c3c6aSMatthew Dillon  *	routine is typically called only when the entire object is
4881c7c3c6aSMatthew Dillon  *	about to be destroyed.
4891c7c3c6aSMatthew Dillon  *
4901c7c3c6aSMatthew Dillon  *	This routine may block, but no longer does.
4911c7c3c6aSMatthew Dillon  *
4921c7c3c6aSMatthew Dillon  *	The object must be locked or unreferenceable.
49326f9a767SRodney W. Grimes  */
494df8bae1dSRodney W. Grimes static void
4952f249180SPoul-Henning Kamp swap_pager_dealloc(vm_object_t object)
49626f9a767SRodney W. Grimes {
4974dcc5c2dSMatthew Dillon 
49826f9a767SRodney W. Grimes 	/*
4991c7c3c6aSMatthew Dillon 	 * Remove from list right away so lookups will fail if we block for
5001c7c3c6aSMatthew Dillon 	 * pageout completion.
50126f9a767SRodney W. Grimes 	 */
502bd228075SAlan Cox 	if (object->handle != NULL) {
503a9fa2c05SAlfred Perlstein 		mtx_lock(&sw_alloc_mtx);
5041c7c3c6aSMatthew Dillon 		TAILQ_REMOVE(NOBJLIST(object->handle), object, pager_object_list);
505a9fa2c05SAlfred Perlstein 		mtx_unlock(&sw_alloc_mtx);
506bd228075SAlan Cox 	}
5071c7c3c6aSMatthew Dillon 
508658ad5ffSAlan Cox 	VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
5091c7c3c6aSMatthew Dillon 	vm_object_pip_wait(object, "swpdea");
5101c7c3c6aSMatthew Dillon 
5111c7c3c6aSMatthew Dillon 	/*
5121c7c3c6aSMatthew Dillon 	 * Free all remaining metadata.  We only bother to free it from
5131c7c3c6aSMatthew Dillon 	 * the swap meta data.  We do not attempt to free swapblk's still
5141c7c3c6aSMatthew Dillon 	 * associated with vm_page_t's for this object.  We do not care
5151c7c3c6aSMatthew Dillon 	 * if paging is still in progress on some objects.
5161c7c3c6aSMatthew Dillon 	 */
5171c7c3c6aSMatthew Dillon 	swp_pager_meta_free_all(object);
5181c7c3c6aSMatthew Dillon }
5191c7c3c6aSMatthew Dillon 
5201c7c3c6aSMatthew Dillon /************************************************************************
5211c7c3c6aSMatthew Dillon  *			SWAP PAGER BITMAP ROUTINES			*
5221c7c3c6aSMatthew Dillon  ************************************************************************/
5231c7c3c6aSMatthew Dillon 
5241c7c3c6aSMatthew Dillon /*
5251c7c3c6aSMatthew Dillon  * SWP_PAGER_GETSWAPSPACE() -	allocate raw swap space
5261c7c3c6aSMatthew Dillon  *
5271c7c3c6aSMatthew Dillon  *	Allocate swap for the requested number of pages.  The starting
5281c7c3c6aSMatthew Dillon  *	swap block number (a page index) is returned or SWAPBLK_NONE
5291c7c3c6aSMatthew Dillon  *	if the allocation failed.
5301c7c3c6aSMatthew Dillon  *
5311c7c3c6aSMatthew Dillon  *	Also has the side effect of advising that somebody made a mistake
5321c7c3c6aSMatthew Dillon  *	when they configured swap and didn't configure enough.
5331c7c3c6aSMatthew Dillon  *
5341c7c3c6aSMatthew Dillon  *	Must be called at splvm() to avoid races with bitmap frees from
5351c7c3c6aSMatthew Dillon  *	vm_page_remove() aka swap_pager_page_removed().
5361c7c3c6aSMatthew Dillon  *
5371c7c3c6aSMatthew Dillon  *	This routine may not block
5381c7c3c6aSMatthew Dillon  *	This routine must be called at splvm().
5398f60c087SPoul-Henning Kamp  *
5408f60c087SPoul-Henning Kamp  *	We allocate in round-robin fashion from the configured devices.
5411c7c3c6aSMatthew Dillon  */
542a5edd34aSPoul-Henning Kamp static daddr_t
5432f249180SPoul-Henning Kamp swp_pager_getswapspace(int npages)
5441c7c3c6aSMatthew Dillon {
5451c7c3c6aSMatthew Dillon 	daddr_t blk;
5468f60c087SPoul-Henning Kamp 	struct swdevt *sp;
5478f60c087SPoul-Henning Kamp 	int i;
5481c7c3c6aSMatthew Dillon 
5498f60c087SPoul-Henning Kamp 	blk = SWAPBLK_NONE;
55020da9c2eSPoul-Henning Kamp 	mtx_lock(&sw_dev_mtx);
5518f60c087SPoul-Henning Kamp 	sp = swdevhd;
5528f60c087SPoul-Henning Kamp 	for (i = 0; i < nswapdev; i++) {
5538f60c087SPoul-Henning Kamp 		if (sp == NULL)
5548f60c087SPoul-Henning Kamp 			sp = TAILQ_FIRST(&swtailq);
5558f60c087SPoul-Henning Kamp 		if (!(sp->sw_flags & SW_CLOSING)) {
5568f60c087SPoul-Henning Kamp 			blk = blist_alloc(sp->sw_blist, npages);
5578f60c087SPoul-Henning Kamp 			if (blk != SWAPBLK_NONE) {
5588f60c087SPoul-Henning Kamp 				blk += sp->sw_first;
5598f60c087SPoul-Henning Kamp 				sp->sw_used += npages;
560d05bc129SAlan Cox 				swap_pager_avail -= npages;
5618f60c087SPoul-Henning Kamp 				swp_sizecheck();
5628f60c087SPoul-Henning Kamp 				swdevhd = TAILQ_NEXT(sp, sw_list);
563d05bc129SAlan Cox 				goto done;
5648f60c087SPoul-Henning Kamp 			}
5658f60c087SPoul-Henning Kamp 		}
5668f60c087SPoul-Henning Kamp 		sp = TAILQ_NEXT(sp, sw_list);
5678f60c087SPoul-Henning Kamp 	}
5682b0d37a4SMatthew Dillon 	if (swap_pager_full != 2) {
56920da9c2eSPoul-Henning Kamp 		printf("swap_pager_getswapspace(%d): failed\n", npages);
5702b0d37a4SMatthew Dillon 		swap_pager_full = 2;
57120d3034fSMatthew Dillon 		swap_pager_almost_full = 1;
5722b0d37a4SMatthew Dillon 	}
5738f60c087SPoul-Henning Kamp 	swdevhd = NULL;
574d05bc129SAlan Cox done:
575d05bc129SAlan Cox 	mtx_unlock(&sw_dev_mtx);
5761c7c3c6aSMatthew Dillon 	return (blk);
57726f9a767SRodney W. Grimes }
57826f9a767SRodney W. Grimes 
579b3fed13eSDavid Schultz static int
580b3fed13eSDavid Schultz swp_pager_isondev(daddr_t blk, struct swdevt *sp)
5818f60c087SPoul-Henning Kamp {
5828f60c087SPoul-Henning Kamp 
583b3fed13eSDavid Schultz 	return (blk >= sp->sw_first && blk < sp->sw_end);
5848f60c087SPoul-Henning Kamp }
5858f60c087SPoul-Henning Kamp 
5864b03903aSPoul-Henning Kamp static void
5874b03903aSPoul-Henning Kamp swp_pager_strategy(struct buf *bp)
5884b03903aSPoul-Henning Kamp {
5894b03903aSPoul-Henning Kamp 	struct swdevt *sp;
5904b03903aSPoul-Henning Kamp 
59120da9c2eSPoul-Henning Kamp 	mtx_lock(&sw_dev_mtx);
5924b03903aSPoul-Henning Kamp 	TAILQ_FOREACH(sp, &swtailq, sw_list) {
5934b03903aSPoul-Henning Kamp 		if (bp->b_blkno >= sp->sw_first && bp->b_blkno < sp->sw_end) {
59420da9c2eSPoul-Henning Kamp 			mtx_unlock(&sw_dev_mtx);
5954b03903aSPoul-Henning Kamp 			sp->sw_strategy(bp, sp);
5964b03903aSPoul-Henning Kamp 			return;
5974b03903aSPoul-Henning Kamp 		}
5984b03903aSPoul-Henning Kamp 	}
59920da9c2eSPoul-Henning Kamp 	panic("Swapdev not found");
6004b03903aSPoul-Henning Kamp }
6014b03903aSPoul-Henning Kamp 
6028f60c087SPoul-Henning Kamp 
60326f9a767SRodney W. Grimes /*
6041c7c3c6aSMatthew Dillon  * SWP_PAGER_FREESWAPSPACE() -	free raw swap space
6051c7c3c6aSMatthew Dillon  *
6061c7c3c6aSMatthew Dillon  *	This routine returns the specified swap blocks back to the bitmap.
6071c7c3c6aSMatthew Dillon  *
6081c7c3c6aSMatthew Dillon  *	Note:  This routine may not block (it could in the old swap code),
6091c7c3c6aSMatthew Dillon  *	and through the use of the new blist routines it does not block.
6101c7c3c6aSMatthew Dillon  *
6111c7c3c6aSMatthew Dillon  *	We must be called at splvm() to avoid races with bitmap frees from
6121c7c3c6aSMatthew Dillon  *	vm_page_remove() aka swap_pager_page_removed().
6131c7c3c6aSMatthew Dillon  *
6141c7c3c6aSMatthew Dillon  *	This routine may not block
6151c7c3c6aSMatthew Dillon  *	This routine must be called at splvm().
61626f9a767SRodney W. Grimes  */
617a5edd34aSPoul-Henning Kamp static void
6188f60c087SPoul-Henning Kamp swp_pager_freeswapspace(daddr_t blk, int npages)
6190d94caffSDavid Greenman {
6208f60c087SPoul-Henning Kamp 	struct swdevt *sp;
62192da00bbSMatthew Dillon 
6227645e885SAlan Cox 	mtx_lock(&sw_dev_mtx);
6237645e885SAlan Cox 	TAILQ_FOREACH(sp, &swtailq, sw_list) {
6247645e885SAlan Cox 		if (blk >= sp->sw_first && blk < sp->sw_end) {
62592da00bbSMatthew Dillon 			sp->sw_used -= npages;
62692da00bbSMatthew Dillon 			/*
6277645e885SAlan Cox 			 * If we are attempting to stop swapping on
6287645e885SAlan Cox 			 * this device, we don't want to mark any
6297645e885SAlan Cox 			 * blocks free lest they be reused.
63092da00bbSMatthew Dillon 			 */
6317645e885SAlan Cox 			if ((sp->sw_flags & SW_CLOSING) == 0) {
6327645e885SAlan Cox 				blist_free(sp->sw_blist, blk - sp->sw_first,
6337645e885SAlan Cox 				    npages);
6348f60c087SPoul-Henning Kamp 				swap_pager_avail += npages;
6351c7c3c6aSMatthew Dillon 				swp_sizecheck();
63626f9a767SRodney W. Grimes 			}
6377645e885SAlan Cox 			mtx_unlock(&sw_dev_mtx);
6387645e885SAlan Cox 			return;
6397645e885SAlan Cox 		}
6407645e885SAlan Cox 	}
6417645e885SAlan Cox 	panic("Swapdev not found");
6427645e885SAlan Cox }
6431c7c3c6aSMatthew Dillon 
64426f9a767SRodney W. Grimes /*
6451c7c3c6aSMatthew Dillon  * SWAP_PAGER_FREESPACE() -	frees swap blocks associated with a page
6461c7c3c6aSMatthew Dillon  *				range within an object.
6471c7c3c6aSMatthew Dillon  *
6481c7c3c6aSMatthew Dillon  *	This is a globally accessible routine.
6491c7c3c6aSMatthew Dillon  *
6501c7c3c6aSMatthew Dillon  *	This routine removes swapblk assignments from swap metadata.
6511c7c3c6aSMatthew Dillon  *
6521c7c3c6aSMatthew Dillon  *	The external callers of this routine typically have already destroyed
6531c7c3c6aSMatthew Dillon  *	or renamed vm_page_t's associated with this range in the object so
6541c7c3c6aSMatthew Dillon  *	we should be ok.
6554dcc5c2dSMatthew Dillon  *
6564dcc5c2dSMatthew Dillon  *	This routine may be called at any spl.  We up our spl to splvm temporarily
6574dcc5c2dSMatthew Dillon  *	in order to perform the metadata removal.
65826f9a767SRodney W. Grimes  */
65926f9a767SRodney W. Grimes void
6602f249180SPoul-Henning Kamp swap_pager_freespace(vm_object_t object, vm_pindex_t start, vm_size_t size)
66126f9a767SRodney W. Grimes {
66223955314SAlfred Perlstein 
66319ba4c8eSAlan Cox 	VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
6641c7c3c6aSMatthew Dillon 	swp_pager_meta_free(object, start, size);
6654dcc5c2dSMatthew Dillon }
6664dcc5c2dSMatthew Dillon 
6674dcc5c2dSMatthew Dillon /*
6684dcc5c2dSMatthew Dillon  * SWAP_PAGER_RESERVE() - reserve swap blocks in object
6694dcc5c2dSMatthew Dillon  *
6704dcc5c2dSMatthew Dillon  *	Assigns swap blocks to the specified range within the object.  The
6714dcc5c2dSMatthew Dillon  *	swap blocks are not zerod.  Any previous swap assignment is destroyed.
6724dcc5c2dSMatthew Dillon  *
6734dcc5c2dSMatthew Dillon  *	Returns 0 on success, -1 on failure.
6744dcc5c2dSMatthew Dillon  */
6754dcc5c2dSMatthew Dillon int
6764dcc5c2dSMatthew Dillon swap_pager_reserve(vm_object_t object, vm_pindex_t start, vm_size_t size)
6774dcc5c2dSMatthew Dillon {
6784dcc5c2dSMatthew Dillon 	int n = 0;
6794dcc5c2dSMatthew Dillon 	daddr_t blk = SWAPBLK_NONE;
6804dcc5c2dSMatthew Dillon 	vm_pindex_t beg = start;	/* save start index */
6814dcc5c2dSMatthew Dillon 
682ee3dc7d7SAlan Cox 	VM_OBJECT_LOCK(object);
6834dcc5c2dSMatthew Dillon 	while (size) {
6844dcc5c2dSMatthew Dillon 		if (n == 0) {
6854dcc5c2dSMatthew Dillon 			n = BLIST_MAX_ALLOC;
6864dcc5c2dSMatthew Dillon 			while ((blk = swp_pager_getswapspace(n)) == SWAPBLK_NONE) {
6874dcc5c2dSMatthew Dillon 				n >>= 1;
6884dcc5c2dSMatthew Dillon 				if (n == 0) {
6894dcc5c2dSMatthew Dillon 					swp_pager_meta_free(object, beg, start - beg);
690ee3dc7d7SAlan Cox 					VM_OBJECT_UNLOCK(object);
6914dcc5c2dSMatthew Dillon 					return (-1);
6924dcc5c2dSMatthew Dillon 				}
6934dcc5c2dSMatthew Dillon 			}
6944dcc5c2dSMatthew Dillon 		}
6954dcc5c2dSMatthew Dillon 		swp_pager_meta_build(object, start, blk);
6964dcc5c2dSMatthew Dillon 		--size;
6974dcc5c2dSMatthew Dillon 		++start;
6984dcc5c2dSMatthew Dillon 		++blk;
6994dcc5c2dSMatthew Dillon 		--n;
7004dcc5c2dSMatthew Dillon 	}
7014dcc5c2dSMatthew Dillon 	swp_pager_meta_free(object, start, n);
702ee3dc7d7SAlan Cox 	VM_OBJECT_UNLOCK(object);
7034dcc5c2dSMatthew Dillon 	return (0);
70426f9a767SRodney W. Grimes }
70526f9a767SRodney W. Grimes 
7060a47b48bSJohn Dyson /*
7071c7c3c6aSMatthew Dillon  * SWAP_PAGER_COPY() -  copy blocks from source pager to destination pager
7081c7c3c6aSMatthew Dillon  *			and destroy the source.
7091c7c3c6aSMatthew Dillon  *
7101c7c3c6aSMatthew Dillon  *	Copy any valid swapblks from the source to the destination.  In
7111c7c3c6aSMatthew Dillon  *	cases where both the source and destination have a valid swapblk,
7121c7c3c6aSMatthew Dillon  *	we keep the destination's.
7131c7c3c6aSMatthew Dillon  *
7141c7c3c6aSMatthew Dillon  *	This routine is allowed to block.  It may block allocating metadata
7151c7c3c6aSMatthew Dillon  *	indirectly through swp_pager_meta_build() or if paging is still in
7161c7c3c6aSMatthew Dillon  *	progress on the source.
7171c7c3c6aSMatthew Dillon  *
7184dcc5c2dSMatthew Dillon  *	This routine can be called at any spl
7194dcc5c2dSMatthew Dillon  *
7201c7c3c6aSMatthew Dillon  *	XXX vm_page_collapse() kinda expects us not to block because we
7211c7c3c6aSMatthew Dillon  *	supposedly do not need to allocate memory, but for the moment we
7221c7c3c6aSMatthew Dillon  *	*may* have to get a little memory from the zone allocator, but
7231c7c3c6aSMatthew Dillon  *	it is taken from the interrupt memory.  We should be ok.
7241c7c3c6aSMatthew Dillon  *
7251c7c3c6aSMatthew Dillon  *	The source object contains no vm_page_t's (which is just as well)
7261c7c3c6aSMatthew Dillon  *
7271c7c3c6aSMatthew Dillon  *	The source object is of type OBJT_SWAP.
7281c7c3c6aSMatthew Dillon  *
7294dcc5c2dSMatthew Dillon  *	The source and destination objects must be locked or
7304dcc5c2dSMatthew Dillon  *	inaccessible (XXX are they ?)
73126f9a767SRodney W. Grimes  */
73226f9a767SRodney W. Grimes void
7332f249180SPoul-Henning Kamp swap_pager_copy(vm_object_t srcobject, vm_object_t dstobject,
7342f249180SPoul-Henning Kamp     vm_pindex_t offset, int destroysource)
73526f9a767SRodney W. Grimes {
736a316d390SJohn Dyson 	vm_pindex_t i;
7374dcc5c2dSMatthew Dillon 
738c7c8dd7eSAlan Cox 	VM_OBJECT_LOCK_ASSERT(srcobject, MA_OWNED);
739c7c8dd7eSAlan Cox 	VM_OBJECT_LOCK_ASSERT(dstobject, MA_OWNED);
7400cddd8f0SMatthew Dillon 
74126f9a767SRodney W. Grimes 	/*
7421c7c3c6aSMatthew Dillon 	 * If destroysource is set, we remove the source object from the
7431c7c3c6aSMatthew Dillon 	 * swap_pager internal queue now.
74426f9a767SRodney W. Grimes 	 */
745cbd8ec09SJohn Dyson 	if (destroysource) {
746bd228075SAlan Cox 		if (srcobject->handle != NULL) {
747a9fa2c05SAlfred Perlstein 			mtx_lock(&sw_alloc_mtx);
7481c7c3c6aSMatthew Dillon 			TAILQ_REMOVE(
7491c7c3c6aSMatthew Dillon 			    NOBJLIST(srcobject->handle),
7501c7c3c6aSMatthew Dillon 			    srcobject,
7511c7c3c6aSMatthew Dillon 			    pager_object_list
7521c7c3c6aSMatthew Dillon 			);
753a9fa2c05SAlfred Perlstein 			mtx_unlock(&sw_alloc_mtx);
754cbd8ec09SJohn Dyson 		}
755bd228075SAlan Cox 	}
75626f9a767SRodney W. Grimes 
7571c7c3c6aSMatthew Dillon 	/*
7581c7c3c6aSMatthew Dillon 	 * transfer source to destination.
7591c7c3c6aSMatthew Dillon 	 */
7601c7c3c6aSMatthew Dillon 	for (i = 0; i < dstobject->size; ++i) {
7611c7c3c6aSMatthew Dillon 		daddr_t dstaddr;
7621c7c3c6aSMatthew Dillon 
7631c7c3c6aSMatthew Dillon 		/*
7641c7c3c6aSMatthew Dillon 		 * Locate (without changing) the swapblk on the destination,
7651c7c3c6aSMatthew Dillon 		 * unless it is invalid in which case free it silently, or
7661c7c3c6aSMatthew Dillon 		 * if the destination is a resident page, in which case the
7671c7c3c6aSMatthew Dillon 		 * source is thrown away.
7681c7c3c6aSMatthew Dillon 		 */
7691c7c3c6aSMatthew Dillon 		dstaddr = swp_pager_meta_ctl(dstobject, i, 0);
7701c7c3c6aSMatthew Dillon 
7711c7c3c6aSMatthew Dillon 		if (dstaddr == SWAPBLK_NONE) {
7721c7c3c6aSMatthew Dillon 			/*
7731c7c3c6aSMatthew Dillon 			 * Destination has no swapblk and is not resident,
7741c7c3c6aSMatthew Dillon 			 * copy source.
7751c7c3c6aSMatthew Dillon 			 */
7761c7c3c6aSMatthew Dillon 			daddr_t srcaddr;
7771c7c3c6aSMatthew Dillon 
7781c7c3c6aSMatthew Dillon 			srcaddr = swp_pager_meta_ctl(
7791c7c3c6aSMatthew Dillon 			    srcobject,
7801c7c3c6aSMatthew Dillon 			    i + offset,
7811c7c3c6aSMatthew Dillon 			    SWM_POP
7821c7c3c6aSMatthew Dillon 			);
7831c7c3c6aSMatthew Dillon 
784ee3dc7d7SAlan Cox 			if (srcaddr != SWAPBLK_NONE) {
785c7c8dd7eSAlan Cox 				/*
786c7c8dd7eSAlan Cox 				 * swp_pager_meta_build() can sleep.
787c7c8dd7eSAlan Cox 				 */
788c7c8dd7eSAlan Cox 				vm_object_pip_add(srcobject, 1);
789c7c8dd7eSAlan Cox 				VM_OBJECT_UNLOCK(srcobject);
790c7c8dd7eSAlan Cox 				vm_object_pip_add(dstobject, 1);
7914dcc5c2dSMatthew Dillon 				swp_pager_meta_build(dstobject, i, srcaddr);
792c7c8dd7eSAlan Cox 				vm_object_pip_wakeup(dstobject);
793c7c8dd7eSAlan Cox 				VM_OBJECT_LOCK(srcobject);
794c7c8dd7eSAlan Cox 				vm_object_pip_wakeup(srcobject);
795ee3dc7d7SAlan Cox 			}
7961c7c3c6aSMatthew Dillon 		} else {
7971c7c3c6aSMatthew Dillon 			/*
7981c7c3c6aSMatthew Dillon 			 * Destination has valid swapblk or it is represented
7991c7c3c6aSMatthew Dillon 			 * by a resident page.  We destroy the sourceblock.
8001c7c3c6aSMatthew Dillon 			 */
8011c7c3c6aSMatthew Dillon 
8021c7c3c6aSMatthew Dillon 			swp_pager_meta_ctl(srcobject, i + offset, SWM_FREE);
8031c7c3c6aSMatthew Dillon 		}
80426f9a767SRodney W. Grimes 	}
80526f9a767SRodney W. Grimes 
80626f9a767SRodney W. Grimes 	/*
8071c7c3c6aSMatthew Dillon 	 * Free left over swap blocks in source.
8081c7c3c6aSMatthew Dillon 	 *
8091c7c3c6aSMatthew Dillon 	 * We have to revert the type to OBJT_DEFAULT so we do not accidently
8101c7c3c6aSMatthew Dillon 	 * double-remove the object from the swap queues.
81126f9a767SRodney W. Grimes 	 */
812c0877f10SJohn Dyson 	if (destroysource) {
8131c7c3c6aSMatthew Dillon 		swp_pager_meta_free_all(srcobject);
8141c7c3c6aSMatthew Dillon 		/*
8151c7c3c6aSMatthew Dillon 		 * Reverting the type is not necessary, the caller is going
8161c7c3c6aSMatthew Dillon 		 * to destroy srcobject directly, but I'm doing it here
817956f3135SPhilippe Charnier 		 * for consistency since we've removed the object from its
8181c7c3c6aSMatthew Dillon 		 * queues.
8191c7c3c6aSMatthew Dillon 		 */
8201c7c3c6aSMatthew Dillon 		srcobject->type = OBJT_DEFAULT;
821c0877f10SJohn Dyson 	}
82226f9a767SRodney W. Grimes }
82326f9a767SRodney W. Grimes 
824df8bae1dSRodney W. Grimes /*
8251c7c3c6aSMatthew Dillon  * SWAP_PAGER_HASPAGE() -	determine if we have good backing store for
8261c7c3c6aSMatthew Dillon  *				the requested page.
8271c7c3c6aSMatthew Dillon  *
8281c7c3c6aSMatthew Dillon  *	We determine whether good backing store exists for the requested
8291c7c3c6aSMatthew Dillon  *	page and return TRUE if it does, FALSE if it doesn't.
8301c7c3c6aSMatthew Dillon  *
8311c7c3c6aSMatthew Dillon  *	If TRUE, we also try to determine how much valid, contiguous backing
8321c7c3c6aSMatthew Dillon  *	store exists before and after the requested page within a reasonable
8331c7c3c6aSMatthew Dillon  *	distance.  We do not try to restrict it to the swap device stripe
8341c7c3c6aSMatthew Dillon  *	(that is handled in getpages/putpages).  It probably isn't worth
8351c7c3c6aSMatthew Dillon  *	doing here.
836df8bae1dSRodney W. Grimes  */
8375ea4972cSAlan Cox static boolean_t
8382f249180SPoul-Henning Kamp swap_pager_haspage(vm_object_t object, vm_pindex_t pindex, int *before, int *after)
83926f9a767SRodney W. Grimes {
8401c7c3c6aSMatthew Dillon 	daddr_t blk0;
84126f9a767SRodney W. Grimes 
842ee3dc7d7SAlan Cox 	VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
8431c7c3c6aSMatthew Dillon 	/*
8441c7c3c6aSMatthew Dillon 	 * do we have good backing store at the requested index ?
8451c7c3c6aSMatthew Dillon 	 */
8461c7c3c6aSMatthew Dillon 	blk0 = swp_pager_meta_ctl(object, pindex, 0);
8471c7c3c6aSMatthew Dillon 
8484dcc5c2dSMatthew Dillon 	if (blk0 == SWAPBLK_NONE) {
8491c7c3c6aSMatthew Dillon 		if (before)
85024a1cce3SDavid Greenman 			*before = 0;
8511c7c3c6aSMatthew Dillon 		if (after)
85224a1cce3SDavid Greenman 			*after = 0;
85326f9a767SRodney W. Grimes 		return (FALSE);
85426f9a767SRodney W. Grimes 	}
85526f9a767SRodney W. Grimes 
85626f9a767SRodney W. Grimes 	/*
8571c7c3c6aSMatthew Dillon 	 * find backwards-looking contiguous good backing store
858e47ed70bSJohn Dyson 	 */
8591c7c3c6aSMatthew Dillon 	if (before != NULL) {
86026f9a767SRodney W. Grimes 		int i;
8610d94caffSDavid Greenman 
8621c7c3c6aSMatthew Dillon 		for (i = 1; i < (SWB_NPAGES/2); ++i) {
8631c7c3c6aSMatthew Dillon 			daddr_t blk;
8641c7c3c6aSMatthew Dillon 
8651c7c3c6aSMatthew Dillon 			if (i > pindex)
8661c7c3c6aSMatthew Dillon 				break;
8671c7c3c6aSMatthew Dillon 			blk = swp_pager_meta_ctl(object, pindex - i, 0);
8681c7c3c6aSMatthew Dillon 			if (blk != blk0 - i)
8691c7c3c6aSMatthew Dillon 				break;
870ffc82b0aSJohn Dyson 		}
8711c7c3c6aSMatthew Dillon 		*before = (i - 1);
87226f9a767SRodney W. Grimes 	}
87326f9a767SRodney W. Grimes 
87426f9a767SRodney W. Grimes 	/*
8751c7c3c6aSMatthew Dillon 	 * find forward-looking contiguous good backing store
87626f9a767SRodney W. Grimes 	 */
8771c7c3c6aSMatthew Dillon 	if (after != NULL) {
8781c7c3c6aSMatthew Dillon 		int i;
8791c7c3c6aSMatthew Dillon 
8801c7c3c6aSMatthew Dillon 		for (i = 1; i < (SWB_NPAGES/2); ++i) {
8811c7c3c6aSMatthew Dillon 			daddr_t blk;
8821c7c3c6aSMatthew Dillon 
8831c7c3c6aSMatthew Dillon 			blk = swp_pager_meta_ctl(object, pindex + i, 0);
8841c7c3c6aSMatthew Dillon 			if (blk != blk0 + i)
8851c7c3c6aSMatthew Dillon 				break;
88626f9a767SRodney W. Grimes 		}
8871c7c3c6aSMatthew Dillon 		*after = (i - 1);
8881c7c3c6aSMatthew Dillon 	}
8891c7c3c6aSMatthew Dillon 	return (TRUE);
8901c7c3c6aSMatthew Dillon }
8911c7c3c6aSMatthew Dillon 
8921c7c3c6aSMatthew Dillon /*
8931c7c3c6aSMatthew Dillon  * SWAP_PAGER_PAGE_UNSWAPPED() - remove swap backing store related to page
8941c7c3c6aSMatthew Dillon  *
8951c7c3c6aSMatthew Dillon  *	This removes any associated swap backing store, whether valid or
8961c7c3c6aSMatthew Dillon  *	not, from the page.
8971c7c3c6aSMatthew Dillon  *
8981c7c3c6aSMatthew Dillon  *	This routine is typically called when a page is made dirty, at
8991c7c3c6aSMatthew Dillon  *	which point any associated swap can be freed.  MADV_FREE also
9001c7c3c6aSMatthew Dillon  *	calls us in a special-case situation
9011c7c3c6aSMatthew Dillon  *
9021c7c3c6aSMatthew Dillon  *	NOTE!!!  If the page is clean and the swap was valid, the caller
9031c7c3c6aSMatthew Dillon  *	should make the page dirty before calling this routine.  This routine
9041c7c3c6aSMatthew Dillon  *	does NOT change the m->dirty status of the page.  Also: MADV_FREE
9051c7c3c6aSMatthew Dillon  *	depends on it.
9061c7c3c6aSMatthew Dillon  *
9071c7c3c6aSMatthew Dillon  *	This routine may not block
9084dcc5c2dSMatthew Dillon  *	This routine must be called at splvm()
9091c7c3c6aSMatthew Dillon  */
9101c7c3c6aSMatthew Dillon static void
9112f249180SPoul-Henning Kamp swap_pager_unswapped(vm_page_t m)
9121c7c3c6aSMatthew Dillon {
9132f249180SPoul-Henning Kamp 
914ee3dc7d7SAlan Cox 	VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
9151c7c3c6aSMatthew Dillon 	swp_pager_meta_ctl(m->object, m->pindex, SWM_FREE);
9161c7c3c6aSMatthew Dillon }
9171c7c3c6aSMatthew Dillon 
9181c7c3c6aSMatthew Dillon /*
9191c7c3c6aSMatthew Dillon  * SWAP_PAGER_GETPAGES() - bring pages in from swap
9201c7c3c6aSMatthew Dillon  *
9211c7c3c6aSMatthew Dillon  *	Attempt to retrieve (m, count) pages from backing store, but make
9221c7c3c6aSMatthew Dillon  *	sure we retrieve at least m[reqpage].  We try to load in as large
9231c7c3c6aSMatthew Dillon  *	a chunk surrounding m[reqpage] as is contiguous in swap and which
9241c7c3c6aSMatthew Dillon  *	belongs to the same object.
9251c7c3c6aSMatthew Dillon  *
9261c7c3c6aSMatthew Dillon  *	The code is designed for asynchronous operation and
9271c7c3c6aSMatthew Dillon  *	immediate-notification of 'reqpage' but tends not to be
9281c7c3c6aSMatthew Dillon  *	used that way.  Please do not optimize-out this algorithmic
9291c7c3c6aSMatthew Dillon  *	feature, I intend to improve on it in the future.
9301c7c3c6aSMatthew Dillon  *
9311c7c3c6aSMatthew Dillon  *	The parent has a single vm_object_pip_add() reference prior to
9321c7c3c6aSMatthew Dillon  *	calling us and we should return with the same.
9331c7c3c6aSMatthew Dillon  *
9341c7c3c6aSMatthew Dillon  *	The parent has BUSY'd the pages.  We should return with 'm'
9351c7c3c6aSMatthew Dillon  *	left busy, but the others adjusted.
9361c7c3c6aSMatthew Dillon  */
937f708ef1bSPoul-Henning Kamp static int
9382f249180SPoul-Henning Kamp swap_pager_getpages(vm_object_t object, vm_page_t *m, int count, int reqpage)
939df8bae1dSRodney W. Grimes {
9401c7c3c6aSMatthew Dillon 	struct buf *bp;
9411c7c3c6aSMatthew Dillon 	vm_page_t mreq;
94226f9a767SRodney W. Grimes 	int i;
94326f9a767SRodney W. Grimes 	int j;
9441c7c3c6aSMatthew Dillon 	daddr_t blk;
9450d94caffSDavid Greenman 
9461c7c3c6aSMatthew Dillon 	mreq = m[reqpage];
9471c7c3c6aSMatthew Dillon 
948e04e4bacSPoul-Henning Kamp 	KASSERT(mreq->object == object,
949e04e4bacSPoul-Henning Kamp 	    ("swap_pager_getpages: object mismatch %p/%p",
950e04e4bacSPoul-Henning Kamp 	    object, mreq->object));
951e04e4bacSPoul-Henning Kamp 
9521c7c3c6aSMatthew Dillon 	/*
9531c7c3c6aSMatthew Dillon 	 * Calculate range to retrieve.  The pages have already been assigned
954751221fdSPoul-Henning Kamp 	 * their swapblks.  We require a *contiguous* range but we know it to
955751221fdSPoul-Henning Kamp 	 * not span devices.   If we do not supply it, bad things
9564dcc5c2dSMatthew Dillon 	 * happen.  Note that blk, iblk & jblk can be SWAPBLK_NONE, but the
9574dcc5c2dSMatthew Dillon 	 * loops are set up such that the case(s) are handled implicitly.
9584dcc5c2dSMatthew Dillon 	 *
9594dcc5c2dSMatthew Dillon 	 * The swp_*() calls must be made at splvm().  vm_page_free() does
9604dcc5c2dSMatthew Dillon 	 * not need to be, but it will go a little faster if it is.
9611c7c3c6aSMatthew Dillon 	 */
9621c7c3c6aSMatthew Dillon 	blk = swp_pager_meta_ctl(mreq->object, mreq->pindex, 0);
9631c7c3c6aSMatthew Dillon 
9641c7c3c6aSMatthew Dillon 	for (i = reqpage - 1; i >= 0; --i) {
9651c7c3c6aSMatthew Dillon 		daddr_t iblk;
9661c7c3c6aSMatthew Dillon 
9671c7c3c6aSMatthew Dillon 		iblk = swp_pager_meta_ctl(m[i]->object, m[i]->pindex, 0);
9681c7c3c6aSMatthew Dillon 		if (blk != iblk + (reqpage - i))
96926f9a767SRodney W. Grimes 			break;
97026f9a767SRodney W. Grimes 	}
9711c7c3c6aSMatthew Dillon 	++i;
9721c7c3c6aSMatthew Dillon 
9731c7c3c6aSMatthew Dillon 	for (j = reqpage + 1; j < count; ++j) {
9741c7c3c6aSMatthew Dillon 		daddr_t jblk;
9751c7c3c6aSMatthew Dillon 
9761c7c3c6aSMatthew Dillon 		jblk = swp_pager_meta_ctl(m[j]->object, m[j]->pindex, 0);
9771c7c3c6aSMatthew Dillon 		if (blk != jblk - (j - reqpage))
9781c7c3c6aSMatthew Dillon 			break;
9791c7c3c6aSMatthew Dillon 	}
9801c7c3c6aSMatthew Dillon 
9811c7c3c6aSMatthew Dillon 	/*
9821c7c3c6aSMatthew Dillon 	 * free pages outside our collection range.   Note: we never free
9831c7c3c6aSMatthew Dillon 	 * mreq, it must remain busy throughout.
9841c7c3c6aSMatthew Dillon 	 */
985071a1710SAlan Cox 	if (0 < i || j < count) {
9861c7c3c6aSMatthew Dillon 		int k;
9871c7c3c6aSMatthew Dillon 
988071a1710SAlan Cox 		vm_page_lock_queues();
9894dcc5c2dSMatthew Dillon 		for (k = 0; k < i; ++k)
9904dcc5c2dSMatthew Dillon 			vm_page_free(m[k]);
9914dcc5c2dSMatthew Dillon 		for (k = j; k < count; ++k)
9921c7c3c6aSMatthew Dillon 			vm_page_free(m[k]);
993ab9abe5dSAlan Cox 		vm_page_unlock_queues();
994071a1710SAlan Cox 	}
9951c7c3c6aSMatthew Dillon 
9961c7c3c6aSMatthew Dillon 	/*
9974dcc5c2dSMatthew Dillon 	 * Return VM_PAGER_FAIL if we have nothing to do.  Return mreq
9984dcc5c2dSMatthew Dillon 	 * still busy, but the others unbusied.
9991c7c3c6aSMatthew Dillon 	 */
10004dcc5c2dSMatthew Dillon 	if (blk == SWAPBLK_NONE)
100126f9a767SRodney W. Grimes 		return (VM_PAGER_FAIL);
1002df8bae1dSRodney W. Grimes 
100316f62314SDavid Greenman 	/*
10048630c117SAlan Cox 	 * Getpbuf() can sleep.
10058630c117SAlan Cox 	 */
10068630c117SAlan Cox 	VM_OBJECT_UNLOCK(object);
10078630c117SAlan Cox 	/*
100816f62314SDavid Greenman 	 * Get a swap buffer header to perform the IO
100916f62314SDavid Greenman 	 */
10101c7c3c6aSMatthew Dillon 	bp = getpbuf(&nsw_rcount);
10115e04322aSPoul-Henning Kamp 	bp->b_flags |= B_PAGING;
101226f9a767SRodney W. Grimes 
101316f62314SDavid Greenman 	/*
101416f62314SDavid Greenman 	 * map our page(s) into kva for input
101516f62314SDavid Greenman 	 */
1016d68d828bSAlan Cox 	pmap_qenter((vm_offset_t)bp->b_data, m + i, j - i);
10171c7c3c6aSMatthew Dillon 
101821144e3bSPoul-Henning Kamp 	bp->b_iocmd = BIO_READ;
10191c7c3c6aSMatthew Dillon 	bp->b_iodone = swp_pager_async_iodone;
1020fdcc1cc0SJohn Baldwin 	bp->b_rcred = crhold(thread0.td_ucred);
1021fdcc1cc0SJohn Baldwin 	bp->b_wcred = crhold(thread0.td_ucred);
10221c7c3c6aSMatthew Dillon 	bp->b_blkno = blk - (reqpage - i);
10231c7c3c6aSMatthew Dillon 	bp->b_bcount = PAGE_SIZE * (j - i);
10241c7c3c6aSMatthew Dillon 	bp->b_bufsize = PAGE_SIZE * (j - i);
10251c7c3c6aSMatthew Dillon 	bp->b_pager.pg_reqpage = reqpage - i;
10261c7c3c6aSMatthew Dillon 
10278630c117SAlan Cox 	VM_OBJECT_LOCK(object);
10281c7c3c6aSMatthew Dillon 	{
10291c7c3c6aSMatthew Dillon 		int k;
10301c7c3c6aSMatthew Dillon 
10311c7c3c6aSMatthew Dillon 		for (k = i; k < j; ++k) {
10321c7c3c6aSMatthew Dillon 			bp->b_pages[k - i] = m[k];
10335786be7cSAlan Cox 			m[k]->oflags |= VPO_SWAPINPROG;
10341c7c3c6aSMatthew Dillon 		}
10351c7c3c6aSMatthew Dillon 	}
10361c7c3c6aSMatthew Dillon 	bp->b_npages = j - i;
103726f9a767SRodney W. Grimes 
1038b4b70819SAttilio Rao 	PCPU_INC(cnt.v_swapin);
1039b4b70819SAttilio Rao 	PCPU_ADD(cnt.v_swappgsin, bp->b_npages);
10401c7c3c6aSMatthew Dillon 
1041df8bae1dSRodney W. Grimes 	/*
10421c7c3c6aSMatthew Dillon 	 * We still hold the lock on mreq, and our automatic completion routine
10431c7c3c6aSMatthew Dillon 	 * does not remove it.
1044df8bae1dSRodney W. Grimes 	 */
1045071a1710SAlan Cox 	vm_object_pip_add(object, bp->b_npages);
1046071a1710SAlan Cox 	VM_OBJECT_UNLOCK(object);
10471c7c3c6aSMatthew Dillon 
10481c7c3c6aSMatthew Dillon 	/*
10491c7c3c6aSMatthew Dillon 	 * perform the I/O.  NOTE!!!  bp cannot be considered valid after
10501c7c3c6aSMatthew Dillon 	 * this point because we automatically release it on completion.
10511c7c3c6aSMatthew Dillon 	 * Instead, we look at the one page we are interested in which we
10521c7c3c6aSMatthew Dillon 	 * still hold a lock on even through the I/O completion.
10531c7c3c6aSMatthew Dillon 	 *
10541c7c3c6aSMatthew Dillon 	 * The other pages in our m[] array are also released on completion,
10551c7c3c6aSMatthew Dillon 	 * so we cannot assume they are valid anymore either.
10561c7c3c6aSMatthew Dillon 	 *
1057c37a77eeSPoul-Henning Kamp 	 * NOTE: b_blkno is destroyed by the call to swapdev_strategy
10581c7c3c6aSMatthew Dillon 	 */
1059b890cb2cSPeter Wemm 	BUF_KERNPROC(bp);
10604b03903aSPoul-Henning Kamp 	swp_pager_strategy(bp);
106126f9a767SRodney W. Grimes 
106226f9a767SRodney W. Grimes 	/*
10635786be7cSAlan Cox 	 * wait for the page we want to complete.  VPO_SWAPINPROG is always
10641c7c3c6aSMatthew Dillon 	 * cleared on completion.  If an I/O error occurs, SWAPBLK_NONE
10651c7c3c6aSMatthew Dillon 	 * is set in the meta-data.
106626f9a767SRodney W. Grimes 	 */
106791449ce9SAlan Cox 	VM_OBJECT_LOCK(object);
10685786be7cSAlan Cox 	while ((mreq->oflags & VPO_SWAPINPROG) != 0) {
10695786be7cSAlan Cox 		mreq->oflags |= VPO_WANTED;
107091449ce9SAlan Cox 		vm_page_lock_queues();
10715786be7cSAlan Cox 		vm_page_flag_set(mreq, PG_REFERENCED);
107291449ce9SAlan Cox 		vm_page_unlock_queues();
1073b4b70819SAttilio Rao 		PCPU_INC(cnt.v_intrans);
107491449ce9SAlan Cox 		if (msleep(mreq, VM_OBJECT_MTX(object), PSWP, "swread", hz*20)) {
10759bd86a98SBruce M Simpson 			printf(
1076c5690651SPoul-Henning Kamp "swap_pager: indefinite wait buffer: bufobj: %p, blkno: %jd, size: %ld\n",
1077c5690651SPoul-Henning Kamp 			    bp->b_bufobj, (intmax_t)bp->b_blkno, bp->b_bcount);
10781c7c3c6aSMatthew Dillon 		}
10791b119d9dSDavid Greenman 	}
108026f9a767SRodney W. Grimes 
108126f9a767SRodney W. Grimes 	/*
1082a1287949SEivind Eklund 	 * mreq is left busied after completion, but all the other pages
10831c7c3c6aSMatthew Dillon 	 * are freed.  If we had an unrecoverable read error the page will
10841c7c3c6aSMatthew Dillon 	 * not be valid.
108526f9a767SRodney W. Grimes 	 */
10861c7c3c6aSMatthew Dillon 	if (mreq->valid != VM_PAGE_BITS_ALL) {
10871c7c3c6aSMatthew Dillon 		return (VM_PAGER_ERROR);
108826f9a767SRodney W. Grimes 	} else {
10891c7c3c6aSMatthew Dillon 		return (VM_PAGER_OK);
109026f9a767SRodney W. Grimes 	}
10911c7c3c6aSMatthew Dillon 
10921c7c3c6aSMatthew Dillon 	/*
10931c7c3c6aSMatthew Dillon 	 * A final note: in a low swap situation, we cannot deallocate swap
10941c7c3c6aSMatthew Dillon 	 * and mark a page dirty here because the caller is likely to mark
10951c7c3c6aSMatthew Dillon 	 * the page clean when we return, causing the page to possibly revert
10961c7c3c6aSMatthew Dillon 	 * to all-zero's later.
10971c7c3c6aSMatthew Dillon 	 */
1098df8bae1dSRodney W. Grimes }
1099df8bae1dSRodney W. Grimes 
11001c7c3c6aSMatthew Dillon /*
11011c7c3c6aSMatthew Dillon  *	swap_pager_putpages:
11021c7c3c6aSMatthew Dillon  *
11031c7c3c6aSMatthew Dillon  *	Assign swap (if necessary) and initiate I/O on the specified pages.
11041c7c3c6aSMatthew Dillon  *
11051c7c3c6aSMatthew Dillon  *	We support both OBJT_DEFAULT and OBJT_SWAP objects.  DEFAULT objects
11061c7c3c6aSMatthew Dillon  *	are automatically converted to SWAP objects.
11071c7c3c6aSMatthew Dillon  *
1108ea3aecf5SPeter Wemm  *	In a low memory situation we may block in VOP_STRATEGY(), but the new
11091c7c3c6aSMatthew Dillon  *	vm_page reservation system coupled with properly written VFS devices
11101c7c3c6aSMatthew Dillon  *	should ensure that no low-memory deadlock occurs.  This is an area
11111c7c3c6aSMatthew Dillon  *	which needs work.
11121c7c3c6aSMatthew Dillon  *
11131c7c3c6aSMatthew Dillon  *	The parent has N vm_object_pip_add() references prior to
11141c7c3c6aSMatthew Dillon  *	calling us and will remove references for rtvals[] that are
11151c7c3c6aSMatthew Dillon  *	not set to VM_PAGER_PEND.  We need to remove the rest on I/O
11161c7c3c6aSMatthew Dillon  *	completion.
11171c7c3c6aSMatthew Dillon  *
11181c7c3c6aSMatthew Dillon  *	The parent has soft-busy'd the pages it passes us and will unbusy
11191c7c3c6aSMatthew Dillon  *	those whos rtvals[] entry is not set to VM_PAGER_PEND on return.
11201c7c3c6aSMatthew Dillon  *	We need to unbusy the rest on I/O completion.
11211c7c3c6aSMatthew Dillon  */
1122e4542174SMatthew Dillon void
11232f249180SPoul-Henning Kamp swap_pager_putpages(vm_object_t object, vm_page_t *m, int count,
11242f249180SPoul-Henning Kamp     boolean_t sync, int *rtvals)
1125df8bae1dSRodney W. Grimes {
11261c7c3c6aSMatthew Dillon 	int i;
11271c7c3c6aSMatthew Dillon 	int n = 0;
1128df8bae1dSRodney W. Grimes 
11291c7c3c6aSMatthew Dillon 	if (count && m[0]->object != object) {
11307036145bSMaxim Konovalov 		panic("swap_pager_putpages: object mismatch %p/%p",
11311c7c3c6aSMatthew Dillon 		    object,
11321c7c3c6aSMatthew Dillon 		    m[0]->object
11331c7c3c6aSMatthew Dillon 		);
11341c7c3c6aSMatthew Dillon 	}
1135ee3dc7d7SAlan Cox 
11361c7c3c6aSMatthew Dillon 	/*
11371c7c3c6aSMatthew Dillon 	 * Step 1
11381c7c3c6aSMatthew Dillon 	 *
11391c7c3c6aSMatthew Dillon 	 * Turn object into OBJT_SWAP
11401c7c3c6aSMatthew Dillon 	 * check for bogus sysops
11411c7c3c6aSMatthew Dillon 	 * force sync if not pageout process
11421c7c3c6aSMatthew Dillon 	 */
11434dcc5c2dSMatthew Dillon 	if (object->type != OBJT_SWAP)
11444dcc5c2dSMatthew Dillon 		swp_pager_meta_build(object, 0, SWAPBLK_NONE);
1145ee3dc7d7SAlan Cox 	VM_OBJECT_UNLOCK(object);
1146e47ed70bSJohn Dyson 
1147e47ed70bSJohn Dyson 	if (curproc != pageproc)
1148e47ed70bSJohn Dyson 		sync = TRUE;
114926f9a767SRodney W. Grimes 
11501c7c3c6aSMatthew Dillon 	/*
11511c7c3c6aSMatthew Dillon 	 * Step 2
11521c7c3c6aSMatthew Dillon 	 *
1153ad3cce20SMatthew Dillon 	 * Update nsw parameters from swap_async_max sysctl values.
1154ad3cce20SMatthew Dillon 	 * Do not let the sysop crash the machine with bogus numbers.
1155327f4e83SMatthew Dillon 	 */
11566d541bf1SJohn Baldwin 	mtx_lock(&pbuf_mtx);
1157327f4e83SMatthew Dillon 	if (swap_async_max != nsw_wcount_async_max) {
1158327f4e83SMatthew Dillon 		int n;
1159327f4e83SMatthew Dillon 
1160327f4e83SMatthew Dillon 		/*
1161327f4e83SMatthew Dillon 		 * limit range
1162327f4e83SMatthew Dillon 		 */
1163327f4e83SMatthew Dillon 		if ((n = swap_async_max) > nswbuf / 2)
1164327f4e83SMatthew Dillon 			n = nswbuf / 2;
1165327f4e83SMatthew Dillon 		if (n < 1)
1166327f4e83SMatthew Dillon 			n = 1;
1167327f4e83SMatthew Dillon 		swap_async_max = n;
1168327f4e83SMatthew Dillon 
1169327f4e83SMatthew Dillon 		/*
1170327f4e83SMatthew Dillon 		 * Adjust difference ( if possible ).  If the current async
1171327f4e83SMatthew Dillon 		 * count is too low, we may not be able to make the adjustment
1172327f4e83SMatthew Dillon 		 * at this time.
1173327f4e83SMatthew Dillon 		 */
1174327f4e83SMatthew Dillon 		n -= nsw_wcount_async_max;
1175327f4e83SMatthew Dillon 		if (nsw_wcount_async + n >= 0) {
1176327f4e83SMatthew Dillon 			nsw_wcount_async += n;
1177327f4e83SMatthew Dillon 			nsw_wcount_async_max += n;
1178327f4e83SMatthew Dillon 			wakeup(&nsw_wcount_async);
1179327f4e83SMatthew Dillon 		}
1180327f4e83SMatthew Dillon 	}
11816d541bf1SJohn Baldwin 	mtx_unlock(&pbuf_mtx);
1182327f4e83SMatthew Dillon 
1183327f4e83SMatthew Dillon 	/*
1184327f4e83SMatthew Dillon 	 * Step 3
1185327f4e83SMatthew Dillon 	 *
11861c7c3c6aSMatthew Dillon 	 * Assign swap blocks and issue I/O.  We reallocate swap on the fly.
11871c7c3c6aSMatthew Dillon 	 * The page is left dirty until the pageout operation completes
11881c7c3c6aSMatthew Dillon 	 * successfully.
11891c7c3c6aSMatthew Dillon 	 */
11901c7c3c6aSMatthew Dillon 	for (i = 0; i < count; i += n) {
11911c7c3c6aSMatthew Dillon 		int j;
11921c7c3c6aSMatthew Dillon 		struct buf *bp;
1193a316d390SJohn Dyson 		daddr_t blk;
119426f9a767SRodney W. Grimes 
1195df8bae1dSRodney W. Grimes 		/*
11961c7c3c6aSMatthew Dillon 		 * Maximum I/O size is limited by a number of factors.
1197df8bae1dSRodney W. Grimes 		 */
11981c7c3c6aSMatthew Dillon 		n = min(BLIST_MAX_ALLOC, count - i);
1199327f4e83SMatthew Dillon 		n = min(n, nsw_cluster_max);
12001c7c3c6aSMatthew Dillon 
120126f9a767SRodney W. Grimes 		/*
12021c7c3c6aSMatthew Dillon 		 * Get biggest block of swap we can.  If we fail, fall
12031c7c3c6aSMatthew Dillon 		 * back and try to allocate a smaller block.  Don't go
12041c7c3c6aSMatthew Dillon 		 * overboard trying to allocate space if it would overly
12051c7c3c6aSMatthew Dillon 		 * fragment swap.
120626f9a767SRodney W. Grimes 		 */
12071c7c3c6aSMatthew Dillon 		while (
12081c7c3c6aSMatthew Dillon 		    (blk = swp_pager_getswapspace(n)) == SWAPBLK_NONE &&
12091c7c3c6aSMatthew Dillon 		    n > 4
12101c7c3c6aSMatthew Dillon 		) {
12111c7c3c6aSMatthew Dillon 			n >>= 1;
121226f9a767SRodney W. Grimes 		}
12131c7c3c6aSMatthew Dillon 		if (blk == SWAPBLK_NONE) {
12144dcc5c2dSMatthew Dillon 			for (j = 0; j < n; ++j)
12151c7c3c6aSMatthew Dillon 				rtvals[i+j] = VM_PAGER_FAIL;
12161c7c3c6aSMatthew Dillon 			continue;
121726f9a767SRodney W. Grimes 		}
121826f9a767SRodney W. Grimes 
121926f9a767SRodney W. Grimes 		/*
12201c7c3c6aSMatthew Dillon 		 * All I/O parameters have been satisfied, build the I/O
12211c7c3c6aSMatthew Dillon 		 * request and assign the swap space.
122226f9a767SRodney W. Grimes 		 */
1223327f4e83SMatthew Dillon 		if (sync == TRUE) {
1224327f4e83SMatthew Dillon 			bp = getpbuf(&nsw_wcount_sync);
1225327f4e83SMatthew Dillon 		} else {
1226327f4e83SMatthew Dillon 			bp = getpbuf(&nsw_wcount_async);
122721144e3bSPoul-Henning Kamp 			bp->b_flags = B_ASYNC;
1228327f4e83SMatthew Dillon 		}
12295e04322aSPoul-Henning Kamp 		bp->b_flags |= B_PAGING;
1230912e4ae9SPoul-Henning Kamp 		bp->b_iocmd = BIO_WRITE;
123126f9a767SRodney W. Grimes 
12321c7c3c6aSMatthew Dillon 		pmap_qenter((vm_offset_t)bp->b_data, &m[i], n);
12331c7c3c6aSMatthew Dillon 
1234fdcc1cc0SJohn Baldwin 		bp->b_rcred = crhold(thread0.td_ucred);
1235fdcc1cc0SJohn Baldwin 		bp->b_wcred = crhold(thread0.td_ucred);
12361c7c3c6aSMatthew Dillon 		bp->b_bcount = PAGE_SIZE * n;
12371c7c3c6aSMatthew Dillon 		bp->b_bufsize = PAGE_SIZE * n;
12381c7c3c6aSMatthew Dillon 		bp->b_blkno = blk;
1239e47ed70bSJohn Dyson 
1240ee3dc7d7SAlan Cox 		VM_OBJECT_LOCK(object);
12411c7c3c6aSMatthew Dillon 		for (j = 0; j < n; ++j) {
12421c7c3c6aSMatthew Dillon 			vm_page_t mreq = m[i+j];
12431c7c3c6aSMatthew Dillon 
12441c7c3c6aSMatthew Dillon 			swp_pager_meta_build(
12451c7c3c6aSMatthew Dillon 			    mreq->object,
12461c7c3c6aSMatthew Dillon 			    mreq->pindex,
12474dcc5c2dSMatthew Dillon 			    blk + j
12481c7c3c6aSMatthew Dillon 			);
12497dbf82dcSMatthew Dillon 			vm_page_dirty(mreq);
12501c7c3c6aSMatthew Dillon 			rtvals[i+j] = VM_PAGER_OK;
12511c7c3c6aSMatthew Dillon 
12525786be7cSAlan Cox 			mreq->oflags |= VPO_SWAPINPROG;
12531c7c3c6aSMatthew Dillon 			bp->b_pages[j] = mreq;
12541c7c3c6aSMatthew Dillon 		}
1255ee3dc7d7SAlan Cox 		VM_OBJECT_UNLOCK(object);
12561c7c3c6aSMatthew Dillon 		bp->b_npages = n;
1257a5296b05SJulian Elischer 		/*
1258a5296b05SJulian Elischer 		 * Must set dirty range for NFS to work.
1259a5296b05SJulian Elischer 		 */
1260a5296b05SJulian Elischer 		bp->b_dirtyoff = 0;
1261a5296b05SJulian Elischer 		bp->b_dirtyend = bp->b_bcount;
12621c7c3c6aSMatthew Dillon 
1263b4b70819SAttilio Rao 		PCPU_INC(cnt.v_swapout);
1264b4b70819SAttilio Rao 		PCPU_ADD(cnt.v_swappgsout, bp->b_npages);
126526f9a767SRodney W. Grimes 
126626f9a767SRodney W. Grimes 		/*
12671c7c3c6aSMatthew Dillon 		 * asynchronous
12681c7c3c6aSMatthew Dillon 		 *
1269c37a77eeSPoul-Henning Kamp 		 * NOTE: b_blkno is destroyed by the call to swapdev_strategy
127026f9a767SRodney W. Grimes 		 */
12711c7c3c6aSMatthew Dillon 		if (sync == FALSE) {
12721c7c3c6aSMatthew Dillon 			bp->b_iodone = swp_pager_async_iodone;
127367812eacSKirk McKusick 			BUF_KERNPROC(bp);
12744b03903aSPoul-Henning Kamp 			swp_pager_strategy(bp);
12751c7c3c6aSMatthew Dillon 
12761c7c3c6aSMatthew Dillon 			for (j = 0; j < n; ++j)
12771c7c3c6aSMatthew Dillon 				rtvals[i+j] = VM_PAGER_PEND;
127823955314SAlfred Perlstein 			/* restart outter loop */
12791c7c3c6aSMatthew Dillon 			continue;
128026f9a767SRodney W. Grimes 		}
1281e47ed70bSJohn Dyson 
128226f9a767SRodney W. Grimes 		/*
12831c7c3c6aSMatthew Dillon 		 * synchronous
12841c7c3c6aSMatthew Dillon 		 *
1285c37a77eeSPoul-Henning Kamp 		 * NOTE: b_blkno is destroyed by the call to swapdev_strategy
12861c7c3c6aSMatthew Dillon 		 */
12872c840b1fSAlan Cox 		bp->b_iodone = bdone;
12884b03903aSPoul-Henning Kamp 		swp_pager_strategy(bp);
12891c7c3c6aSMatthew Dillon 
12901c7c3c6aSMatthew Dillon 		/*
12911c7c3c6aSMatthew Dillon 		 * Wait for the sync I/O to complete, then update rtvals.
12921c7c3c6aSMatthew Dillon 		 * We just set the rtvals[] to VM_PAGER_PEND so we can call
12931c7c3c6aSMatthew Dillon 		 * our async completion routine at the end, thus avoiding a
12941c7c3c6aSMatthew Dillon 		 * double-free.
129526f9a767SRodney W. Grimes 		 */
12962c840b1fSAlan Cox 		bwait(bp, PVM, "swwrt");
12971c7c3c6aSMatthew Dillon 		for (j = 0; j < n; ++j)
12981c7c3c6aSMatthew Dillon 			rtvals[i+j] = VM_PAGER_PEND;
12991c7c3c6aSMatthew Dillon 		/*
13001c7c3c6aSMatthew Dillon 		 * Now that we are through with the bp, we can call the
13011c7c3c6aSMatthew Dillon 		 * normal async completion, which frees everything up.
13021c7c3c6aSMatthew Dillon 		 */
13031c7c3c6aSMatthew Dillon 		swp_pager_async_iodone(bp);
13041c7c3c6aSMatthew Dillon 	}
13052e3b314dSAlan Cox 	VM_OBJECT_LOCK(object);
13061c7c3c6aSMatthew Dillon }
13071c7c3c6aSMatthew Dillon 
13081c7c3c6aSMatthew Dillon /*
13091c7c3c6aSMatthew Dillon  *	swp_pager_async_iodone:
13101c7c3c6aSMatthew Dillon  *
13111c7c3c6aSMatthew Dillon  *	Completion routine for asynchronous reads and writes from/to swap.
13121c7c3c6aSMatthew Dillon  *	Also called manually by synchronous code to finish up a bp.
13131c7c3c6aSMatthew Dillon  *
13141c7c3c6aSMatthew Dillon  *	For READ operations, the pages are PG_BUSY'd.  For WRITE operations,
13151c7c3c6aSMatthew Dillon  *	the pages are vm_page_t->busy'd.  For READ operations, we PG_BUSY
13161c7c3c6aSMatthew Dillon  *	unbusy all pages except the 'main' request page.  For WRITE
13171c7c3c6aSMatthew Dillon  *	operations, we vm_page_t->busy'd unbusy all pages ( we can do this
13181c7c3c6aSMatthew Dillon  *	because we marked them all VM_PAGER_PEND on return from putpages ).
13191c7c3c6aSMatthew Dillon  *
13201c7c3c6aSMatthew Dillon  *	This routine may not block.
13214dcc5c2dSMatthew Dillon  *	This routine is called at splbio() or better
13224dcc5c2dSMatthew Dillon  *
13234dcc5c2dSMatthew Dillon  *	We up ourselves to splvm() as required for various vm_page related
13244dcc5c2dSMatthew Dillon  *	calls.
13251c7c3c6aSMatthew Dillon  */
13261c7c3c6aSMatthew Dillon static void
13272f249180SPoul-Henning Kamp swp_pager_async_iodone(struct buf *bp)
13281c7c3c6aSMatthew Dillon {
13291c7c3c6aSMatthew Dillon 	int i;
13301c7c3c6aSMatthew Dillon 	vm_object_t object = NULL;
13311c7c3c6aSMatthew Dillon 
13321c7c3c6aSMatthew Dillon 	/*
13331c7c3c6aSMatthew Dillon 	 * report error
13341c7c3c6aSMatthew Dillon 	 */
1335c244d2deSPoul-Henning Kamp 	if (bp->b_ioflags & BIO_ERROR) {
13361c7c3c6aSMatthew Dillon 		printf(
13371c7c3c6aSMatthew Dillon 		    "swap_pager: I/O error - %s failed; blkno %ld,"
13381c7c3c6aSMatthew Dillon 			"size %ld, error %d\n",
133921144e3bSPoul-Henning Kamp 		    ((bp->b_iocmd == BIO_READ) ? "pagein" : "pageout"),
13401c7c3c6aSMatthew Dillon 		    (long)bp->b_blkno,
13411c7c3c6aSMatthew Dillon 		    (long)bp->b_bcount,
13421c7c3c6aSMatthew Dillon 		    bp->b_error
13431c7c3c6aSMatthew Dillon 		);
13441c7c3c6aSMatthew Dillon 	}
13451c7c3c6aSMatthew Dillon 
13461c7c3c6aSMatthew Dillon 	/*
134726f9a767SRodney W. Grimes 	 * remove the mapping for kernel virtual
134826f9a767SRodney W. Grimes 	 */
13491c7c3c6aSMatthew Dillon 	pmap_qremove((vm_offset_t)bp->b_data, bp->b_npages);
135026f9a767SRodney W. Grimes 
135133a609ecSAlan Cox 	if (bp->b_npages) {
135233a609ecSAlan Cox 		object = bp->b_pages[0]->object;
135333a609ecSAlan Cox 		VM_OBJECT_LOCK(object);
135433a609ecSAlan Cox 	}
135540eab1e9SAlan Cox 	vm_page_lock_queues();
135626f9a767SRodney W. Grimes 	/*
13571c7c3c6aSMatthew Dillon 	 * cleanup pages.  If an error occurs writing to swap, we are in
13581c7c3c6aSMatthew Dillon 	 * very serious trouble.  If it happens to be a disk error, though,
13591c7c3c6aSMatthew Dillon 	 * we may be able to recover by reassigning the swap later on.  So
13601c7c3c6aSMatthew Dillon 	 * in this case we remove the m->swapblk assignment for the page
13611c7c3c6aSMatthew Dillon 	 * but do not free it in the rlist.  The errornous block(s) are thus
13621c7c3c6aSMatthew Dillon 	 * never reallocated as swap.  Redirty the page and continue.
136326f9a767SRodney W. Grimes 	 */
13641c7c3c6aSMatthew Dillon 	for (i = 0; i < bp->b_npages; ++i) {
13651c7c3c6aSMatthew Dillon 		vm_page_t m = bp->b_pages[i];
1366e47ed70bSJohn Dyson 
13675786be7cSAlan Cox 		m->oflags &= ~VPO_SWAPINPROG;
1368e47ed70bSJohn Dyson 
1369c244d2deSPoul-Henning Kamp 		if (bp->b_ioflags & BIO_ERROR) {
1370ffc82b0aSJohn Dyson 			/*
13711c7c3c6aSMatthew Dillon 			 * If an error occurs I'd love to throw the swapblk
13721c7c3c6aSMatthew Dillon 			 * away without freeing it back to swapspace, so it
13731c7c3c6aSMatthew Dillon 			 * can never be used again.  But I can't from an
13741c7c3c6aSMatthew Dillon 			 * interrupt.
1375ffc82b0aSJohn Dyson 			 */
137621144e3bSPoul-Henning Kamp 			if (bp->b_iocmd == BIO_READ) {
13771c7c3c6aSMatthew Dillon 				/*
13781c7c3c6aSMatthew Dillon 				 * When reading, reqpage needs to stay
13791c7c3c6aSMatthew Dillon 				 * locked for the parent, but all other
13801c7c3c6aSMatthew Dillon 				 * pages can be freed.  We still want to
13811c7c3c6aSMatthew Dillon 				 * wakeup the parent waiting on the page,
13821c7c3c6aSMatthew Dillon 				 * though.  ( also: pg_reqpage can be -1 and
13831c7c3c6aSMatthew Dillon 				 * not match anything ).
13841c7c3c6aSMatthew Dillon 				 *
13851c7c3c6aSMatthew Dillon 				 * We have to wake specifically requested pages
13865786be7cSAlan Cox 				 * up too because we cleared VPO_SWAPINPROG and
13871c7c3c6aSMatthew Dillon 				 * someone may be waiting for that.
13881c7c3c6aSMatthew Dillon 				 *
13891c7c3c6aSMatthew Dillon 				 * NOTE: for reads, m->dirty will probably
1390956f3135SPhilippe Charnier 				 * be overridden by the original caller of
13911c7c3c6aSMatthew Dillon 				 * getpages so don't play cute tricks here.
13921c7c3c6aSMatthew Dillon 				 */
13931c7c3c6aSMatthew Dillon 				m->valid = 0;
13941c7c3c6aSMatthew Dillon 				if (i != bp->b_pager.pg_reqpage)
13951c7c3c6aSMatthew Dillon 					vm_page_free(m);
13961c7c3c6aSMatthew Dillon 				else
13971c7c3c6aSMatthew Dillon 					vm_page_flash(m);
13981c7c3c6aSMatthew Dillon 				/*
13991c7c3c6aSMatthew Dillon 				 * If i == bp->b_pager.pg_reqpage, do not wake
14001c7c3c6aSMatthew Dillon 				 * the page up.  The caller needs to.
14011c7c3c6aSMatthew Dillon 				 */
14021c7c3c6aSMatthew Dillon 			} else {
14031c7c3c6aSMatthew Dillon 				/*
14041c7c3c6aSMatthew Dillon 				 * If a write error occurs, reactivate page
14051c7c3c6aSMatthew Dillon 				 * so it doesn't clog the inactive list,
14061c7c3c6aSMatthew Dillon 				 * then finish the I/O.
14071c7c3c6aSMatthew Dillon 				 */
14087dbf82dcSMatthew Dillon 				vm_page_dirty(m);
14091c7c3c6aSMatthew Dillon 				vm_page_activate(m);
14101c7c3c6aSMatthew Dillon 				vm_page_io_finish(m);
14111c7c3c6aSMatthew Dillon 			}
141221144e3bSPoul-Henning Kamp 		} else if (bp->b_iocmd == BIO_READ) {
14131c7c3c6aSMatthew Dillon 			/*
14141c7c3c6aSMatthew Dillon 			 * For read success, clear dirty bits.  Nobody should
14151c7c3c6aSMatthew Dillon 			 * have this page mapped but don't take any chances,
14161c7c3c6aSMatthew Dillon 			 * make sure the pmap modify bits are also cleared.
14171c7c3c6aSMatthew Dillon 			 *
14181c7c3c6aSMatthew Dillon 			 * NOTE: for reads, m->dirty will probably be
1419956f3135SPhilippe Charnier 			 * overridden by the original caller of getpages so
14201c7c3c6aSMatthew Dillon 			 * we cannot set them in order to free the underlying
14211c7c3c6aSMatthew Dillon 			 * swap in a low-swap situation.  I don't think we'd
14221c7c3c6aSMatthew Dillon 			 * want to do that anyway, but it was an optimization
14231c7c3c6aSMatthew Dillon 			 * that existed in the old swapper for a time before
14241c7c3c6aSMatthew Dillon 			 * it got ripped out due to precisely this problem.
14251c7c3c6aSMatthew Dillon 			 *
14261c7c3c6aSMatthew Dillon 			 * If not the requested page then deactivate it.
14271c7c3c6aSMatthew Dillon 			 *
14281c7c3c6aSMatthew Dillon 			 * Note that the requested page, reqpage, is left
14291c7c3c6aSMatthew Dillon 			 * busied, but we still have to wake it up.  The
14301c7c3c6aSMatthew Dillon 			 * other pages are released (unbusied) by
14311c7c3c6aSMatthew Dillon 			 * vm_page_wakeup().  We do not set reqpage's
14321c7c3c6aSMatthew Dillon 			 * valid bits here, it is up to the caller.
14331c7c3c6aSMatthew Dillon 			 */
14340385347cSPeter Wemm 			pmap_clear_modify(m);
14351c7c3c6aSMatthew Dillon 			m->valid = VM_PAGE_BITS_ALL;
14362c28a105SAlan Cox 			vm_page_undirty(m);
14371c7c3c6aSMatthew Dillon 
14381c7c3c6aSMatthew Dillon 			/*
14391c7c3c6aSMatthew Dillon 			 * We have to wake specifically requested pages
14405786be7cSAlan Cox 			 * up too because we cleared VPO_SWAPINPROG and
14411c7c3c6aSMatthew Dillon 			 * could be waiting for it in getpages.  However,
14421c7c3c6aSMatthew Dillon 			 * be sure to not unbusy getpages specifically
14431c7c3c6aSMatthew Dillon 			 * requested page - getpages expects it to be
14441c7c3c6aSMatthew Dillon 			 * left busy.
14451c7c3c6aSMatthew Dillon 			 */
14461c7c3c6aSMatthew Dillon 			if (i != bp->b_pager.pg_reqpage) {
14471c7c3c6aSMatthew Dillon 				vm_page_deactivate(m);
14481c7c3c6aSMatthew Dillon 				vm_page_wakeup(m);
14491c7c3c6aSMatthew Dillon 			} else {
14501c7c3c6aSMatthew Dillon 				vm_page_flash(m);
14511c7c3c6aSMatthew Dillon 			}
14521c7c3c6aSMatthew Dillon 		} else {
14531c7c3c6aSMatthew Dillon 			/*
14541c7c3c6aSMatthew Dillon 			 * For write success, clear the modify and dirty
14551c7c3c6aSMatthew Dillon 			 * status, then finish the I/O ( which decrements the
14561c7c3c6aSMatthew Dillon 			 * busy count and possibly wakes waiter's up ).
14571c7c3c6aSMatthew Dillon 			 */
14580385347cSPeter Wemm 			pmap_clear_modify(m);
1459c52e7044SAlan Cox 			vm_page_undirty(m);
14601c7c3c6aSMatthew Dillon 			vm_page_io_finish(m);
14612c840b1fSAlan Cox 			if (vm_page_count_severe())
14622c840b1fSAlan Cox 				vm_page_try_to_cache(m);
1463ffc82b0aSJohn Dyson 		}
1464df8bae1dSRodney W. Grimes 	}
146540eab1e9SAlan Cox 	vm_page_unlock_queues();
146626f9a767SRodney W. Grimes 
14671c7c3c6aSMatthew Dillon 	/*
14681c7c3c6aSMatthew Dillon 	 * adjust pip.  NOTE: the original parent may still have its own
14691c7c3c6aSMatthew Dillon 	 * pip refs on the object.
14701c7c3c6aSMatthew Dillon 	 */
14710d420ad3SAlan Cox 	if (object != NULL) {
14721c7c3c6aSMatthew Dillon 		vm_object_pip_wakeupn(object, bp->b_npages);
14730d420ad3SAlan Cox 		VM_OBJECT_UNLOCK(object);
14740d420ad3SAlan Cox 	}
147526f9a767SRodney W. Grimes 
14761c7c3c6aSMatthew Dillon 	/*
1477100650deSOlivier Houchard 	 * swapdev_strategy() manually sets b_vp and b_bufobj before calling
1478100650deSOlivier Houchard 	 * bstrategy(). Set them back to NULL now we're done with it, or we'll
1479100650deSOlivier Houchard 	 * trigger a KASSERT in relpbuf().
1480100650deSOlivier Houchard 	 */
1481100650deSOlivier Houchard 	if (bp->b_vp) {
1482100650deSOlivier Houchard 		    bp->b_vp = NULL;
1483100650deSOlivier Houchard 		    bp->b_bufobj = NULL;
1484100650deSOlivier Houchard 	}
1485100650deSOlivier Houchard 	/*
14861c7c3c6aSMatthew Dillon 	 * release the physical I/O buffer
14871c7c3c6aSMatthew Dillon 	 */
1488327f4e83SMatthew Dillon 	relpbuf(
1489327f4e83SMatthew Dillon 	    bp,
149021144e3bSPoul-Henning Kamp 	    ((bp->b_iocmd == BIO_READ) ? &nsw_rcount :
1491327f4e83SMatthew Dillon 		((bp->b_flags & B_ASYNC) ?
1492327f4e83SMatthew Dillon 		    &nsw_wcount_async :
1493327f4e83SMatthew Dillon 		    &nsw_wcount_sync
1494327f4e83SMatthew Dillon 		)
1495327f4e83SMatthew Dillon 	    )
1496327f4e83SMatthew Dillon 	);
149726f9a767SRodney W. Grimes }
14981c7c3c6aSMatthew Dillon 
149992da00bbSMatthew Dillon /*
150092da00bbSMatthew Dillon  *	swap_pager_isswapped:
150192da00bbSMatthew Dillon  *
150292da00bbSMatthew Dillon  *	Return 1 if at least one page in the given object is paged
150392da00bbSMatthew Dillon  *	out to the given swap device.
150492da00bbSMatthew Dillon  *
150592da00bbSMatthew Dillon  *	This routine may not block.
150692da00bbSMatthew Dillon  */
15078f60c087SPoul-Henning Kamp int
15088f60c087SPoul-Henning Kamp swap_pager_isswapped(vm_object_t object, struct swdevt *sp)
15098f60c087SPoul-Henning Kamp {
151092da00bbSMatthew Dillon 	daddr_t index = 0;
151192da00bbSMatthew Dillon 	int bcount;
151292da00bbSMatthew Dillon 	int i;
151392da00bbSMatthew Dillon 
151417cd3642SAlan Cox 	VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
1515f6bcadc4SDavid Schultz 	if (object->type != OBJT_SWAP)
1516f6bcadc4SDavid Schultz 		return (0);
1517f6bcadc4SDavid Schultz 
1518b3fed13eSDavid Schultz 	mtx_lock(&swhash_mtx);
151992da00bbSMatthew Dillon 	for (bcount = 0; bcount < object->un_pager.swp.swp_bcount; bcount++) {
152092da00bbSMatthew Dillon 		struct swblock *swap;
152192da00bbSMatthew Dillon 
152292da00bbSMatthew Dillon 		if ((swap = *swp_pager_hash(object, index)) != NULL) {
152392da00bbSMatthew Dillon 			for (i = 0; i < SWAP_META_PAGES; ++i) {
1524b3fed13eSDavid Schultz 				if (swp_pager_isondev(swap->swb_pages[i], sp)) {
15257827d9b0SAlan Cox 					mtx_unlock(&swhash_mtx);
1526b3fed13eSDavid Schultz 					return (1);
152792da00bbSMatthew Dillon 				}
152892da00bbSMatthew Dillon 			}
15297827d9b0SAlan Cox 		}
153092da00bbSMatthew Dillon 		index += SWAP_META_PAGES;
153192da00bbSMatthew Dillon 		if (index > 0x20000000)
153292da00bbSMatthew Dillon 			panic("swap_pager_isswapped: failed to locate all swap meta blocks");
153392da00bbSMatthew Dillon 	}
1534b3fed13eSDavid Schultz 	mtx_unlock(&swhash_mtx);
1535b3fed13eSDavid Schultz 	return (0);
153692da00bbSMatthew Dillon }
153792da00bbSMatthew Dillon 
153892da00bbSMatthew Dillon /*
153992da00bbSMatthew Dillon  * SWP_PAGER_FORCE_PAGEIN() - force a swap block to be paged in
154092da00bbSMatthew Dillon  *
154192da00bbSMatthew Dillon  *	This routine dissociates the page at the given index within a
154292da00bbSMatthew Dillon  *	swap block from its backing store, paging it in if necessary.
154392da00bbSMatthew Dillon  *	If the page is paged in, it is placed in the inactive queue,
154492da00bbSMatthew Dillon  *	since it had its backing store ripped out from under it.
154592da00bbSMatthew Dillon  *	We also attempt to swap in all other pages in the swap block,
154692da00bbSMatthew Dillon  *	we only guarantee that the one at the specified index is
154792da00bbSMatthew Dillon  *	paged in.
154892da00bbSMatthew Dillon  *
154992da00bbSMatthew Dillon  *	XXX - The code to page the whole block in doesn't work, so we
155092da00bbSMatthew Dillon  *	      revert to the one-by-one behavior for now.  Sigh.
155192da00bbSMatthew Dillon  */
155262a59e8fSWarner Losh static inline void
1553b3fed13eSDavid Schultz swp_pager_force_pagein(vm_object_t object, vm_pindex_t pindex)
155492da00bbSMatthew Dillon {
155592da00bbSMatthew Dillon 	vm_page_t m;
155692da00bbSMatthew Dillon 
155792da00bbSMatthew Dillon 	vm_object_pip_add(object, 1);
1558b3fed13eSDavid Schultz 	m = vm_page_grab(object, pindex, VM_ALLOC_NORMAL|VM_ALLOC_RETRY);
155992da00bbSMatthew Dillon 	if (m->valid == VM_PAGE_BITS_ALL) {
156092da00bbSMatthew Dillon 		vm_object_pip_subtract(object, 1);
156192da00bbSMatthew Dillon 		vm_page_lock_queues();
156292da00bbSMatthew Dillon 		vm_page_activate(m);
156392da00bbSMatthew Dillon 		vm_page_dirty(m);
156492da00bbSMatthew Dillon 		vm_page_unlock_queues();
156566bdd5d6SAlan Cox 		vm_page_wakeup(m);
156692da00bbSMatthew Dillon 		vm_pager_page_unswapped(m);
156792da00bbSMatthew Dillon 		return;
156892da00bbSMatthew Dillon 	}
156992da00bbSMatthew Dillon 
1570b3fed13eSDavid Schultz 	if (swap_pager_getpages(object, &m, 1, 0) != VM_PAGER_OK)
157192da00bbSMatthew Dillon 		panic("swap_pager_force_pagein: read from swap failed");/*XXX*/
157292da00bbSMatthew Dillon 	vm_object_pip_subtract(object, 1);
157392da00bbSMatthew Dillon 	vm_page_lock_queues();
157492da00bbSMatthew Dillon 	vm_page_dirty(m);
157592da00bbSMatthew Dillon 	vm_page_dontneed(m);
157692da00bbSMatthew Dillon 	vm_page_unlock_queues();
157766bdd5d6SAlan Cox 	vm_page_wakeup(m);
157892da00bbSMatthew Dillon 	vm_pager_page_unswapped(m);
157992da00bbSMatthew Dillon }
158092da00bbSMatthew Dillon 
158192da00bbSMatthew Dillon /*
158292da00bbSMatthew Dillon  *	swap_pager_swapoff:
158392da00bbSMatthew Dillon  *
158492da00bbSMatthew Dillon  *	Page in all of the pages that have been paged out to the
158592da00bbSMatthew Dillon  *	given device.  The corresponding blocks in the bitmap must be
158692da00bbSMatthew Dillon  *	marked as allocated and the device must be flagged SW_CLOSING.
158792da00bbSMatthew Dillon  *	There may be no processes swapped out to the device.
158892da00bbSMatthew Dillon  *
158992da00bbSMatthew Dillon  *	This routine may block.
159092da00bbSMatthew Dillon  */
1591e9c0cc15SPoul-Henning Kamp static void
1592b3fed13eSDavid Schultz swap_pager_swapoff(struct swdevt *sp)
159392da00bbSMatthew Dillon {
159492da00bbSMatthew Dillon 	struct swblock *swap;
15958bc61209SDavid Schultz 	int i, j, retries;
159692da00bbSMatthew Dillon 
159792da00bbSMatthew Dillon 	GIANT_REQUIRED;
159892da00bbSMatthew Dillon 
15998bc61209SDavid Schultz 	retries = 0;
160092da00bbSMatthew Dillon full_rescan:
1601b3fed13eSDavid Schultz 	mtx_lock(&swhash_mtx);
160292da00bbSMatthew Dillon 	for (i = 0; i <= swhash_mask; i++) { /* '<=' is correct here */
160392da00bbSMatthew Dillon restart:
1604b3fed13eSDavid Schultz 		for (swap = swhash[i]; swap != NULL; swap = swap->swb_hnext) {
1605b3fed13eSDavid Schultz 			vm_object_t object = swap->swb_object;
1606b3fed13eSDavid Schultz 			vm_pindex_t pindex = swap->swb_index;
160792da00bbSMatthew Dillon                         for (j = 0; j < SWAP_META_PAGES; ++j) {
1608b3fed13eSDavid Schultz                                 if (swp_pager_isondev(swap->swb_pages[j], sp)) {
1609b3fed13eSDavid Schultz 					/* avoid deadlock */
1610b3fed13eSDavid Schultz 					if (!VM_OBJECT_TRYLOCK(object)) {
161192da00bbSMatthew Dillon 						break;
1612b3fed13eSDavid Schultz 					} else {
1613b3fed13eSDavid Schultz 						mtx_unlock(&swhash_mtx);
1614b3fed13eSDavid Schultz 						swp_pager_force_pagein(object,
1615b3fed13eSDavid Schultz 						    pindex + j);
1616b3fed13eSDavid Schultz 						VM_OBJECT_UNLOCK(object);
1617b3fed13eSDavid Schultz 						mtx_lock(&swhash_mtx);
161892da00bbSMatthew Dillon 						goto restart;
161992da00bbSMatthew Dillon 					}
1620b3fed13eSDavid Schultz 				}
1621b3fed13eSDavid Schultz                         }
1622b3fed13eSDavid Schultz 		}
162392da00bbSMatthew Dillon 	}
1624d536c58fSAlan Cox 	mtx_unlock(&swhash_mtx);
16258bc61209SDavid Schultz 	if (sp->sw_used) {
162692da00bbSMatthew Dillon 		/*
16278bc61209SDavid Schultz 		 * Objects may be locked or paging to the device being
16288bc61209SDavid Schultz 		 * removed, so we will miss their pages and need to
16298bc61209SDavid Schultz 		 * make another pass.  We have marked this device as
16308bc61209SDavid Schultz 		 * SW_CLOSING, so the activity should finish soon.
163192da00bbSMatthew Dillon 		 */
16328bc61209SDavid Schultz 		retries++;
16338bc61209SDavid Schultz 		if (retries > 100) {
16348bc61209SDavid Schultz 			panic("swapoff: failed to locate %d swap blocks",
16358bc61209SDavid Schultz 			    sp->sw_used);
16368bc61209SDavid Schultz 		}
16374d70511aSJohn Baldwin 		pause("swpoff", hz / 20);
163892da00bbSMatthew Dillon 		goto full_rescan;
163992da00bbSMatthew Dillon 	}
164092da00bbSMatthew Dillon }
164192da00bbSMatthew Dillon 
16421c7c3c6aSMatthew Dillon /************************************************************************
16431c7c3c6aSMatthew Dillon  *				SWAP META DATA 				*
16441c7c3c6aSMatthew Dillon  ************************************************************************
16451c7c3c6aSMatthew Dillon  *
16461c7c3c6aSMatthew Dillon  *	These routines manipulate the swap metadata stored in the
16474dcc5c2dSMatthew Dillon  *	OBJT_SWAP object.  All swp_*() routines must be called at
16484dcc5c2dSMatthew Dillon  *	splvm() because swap can be freed up by the low level vm_page
16494dcc5c2dSMatthew Dillon  *	code which might be called from interrupts beyond what splbio() covers.
16501c7c3c6aSMatthew Dillon  *
16514dcc5c2dSMatthew Dillon  *	Swap metadata is implemented with a global hash and not directly
16524dcc5c2dSMatthew Dillon  *	linked into the object.  Instead the object simply contains
16534dcc5c2dSMatthew Dillon  *	appropriate tracking counters.
16541c7c3c6aSMatthew Dillon  */
16551c7c3c6aSMatthew Dillon 
16561c7c3c6aSMatthew Dillon /*
16571c7c3c6aSMatthew Dillon  * SWP_PAGER_META_BUILD() -	add swap block to swap meta data for object
16581c7c3c6aSMatthew Dillon  *
16591c7c3c6aSMatthew Dillon  *	We first convert the object to a swap object if it is a default
16601c7c3c6aSMatthew Dillon  *	object.
16611c7c3c6aSMatthew Dillon  *
16621c7c3c6aSMatthew Dillon  *	The specified swapblk is added to the object's swap metadata.  If
16631c7c3c6aSMatthew Dillon  *	the swapblk is not valid, it is freed instead.  Any previously
16641c7c3c6aSMatthew Dillon  *	assigned swapblk is freed.
16654dcc5c2dSMatthew Dillon  *
16664dcc5c2dSMatthew Dillon  *	This routine must be called at splvm(), except when used to convert
16674dcc5c2dSMatthew Dillon  *	an OBJT_DEFAULT object into an OBJT_SWAP object.
16681c7c3c6aSMatthew Dillon  */
16691c7c3c6aSMatthew Dillon static void
16702f249180SPoul-Henning Kamp swp_pager_meta_build(vm_object_t object, vm_pindex_t pindex, daddr_t swapblk)
16712f249180SPoul-Henning Kamp {
16721c7c3c6aSMatthew Dillon 	struct swblock *swap;
16731c7c3c6aSMatthew Dillon 	struct swblock **pswap;
167423f09d50SIan Dowse 	int idx;
16751c7c3c6aSMatthew Dillon 
1676ee3dc7d7SAlan Cox 	VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
16771c7c3c6aSMatthew Dillon 	/*
16781c7c3c6aSMatthew Dillon 	 * Convert default object to swap object if necessary
16791c7c3c6aSMatthew Dillon 	 */
16801c7c3c6aSMatthew Dillon 	if (object->type != OBJT_SWAP) {
16811c7c3c6aSMatthew Dillon 		object->type = OBJT_SWAP;
16821c7c3c6aSMatthew Dillon 		object->un_pager.swp.swp_bcount = 0;
16831c7c3c6aSMatthew Dillon 
16841c7c3c6aSMatthew Dillon 		if (object->handle != NULL) {
1685bd228075SAlan Cox 			mtx_lock(&sw_alloc_mtx);
16861c7c3c6aSMatthew Dillon 			TAILQ_INSERT_TAIL(
16871c7c3c6aSMatthew Dillon 			    NOBJLIST(object->handle),
16881c7c3c6aSMatthew Dillon 			    object,
16891c7c3c6aSMatthew Dillon 			    pager_object_list
16901c7c3c6aSMatthew Dillon 			);
1691a9fa2c05SAlfred Perlstein 			mtx_unlock(&sw_alloc_mtx);
16921c7c3c6aSMatthew Dillon 		}
1693bd228075SAlan Cox 	}
16941c7c3c6aSMatthew Dillon 
16951c7c3c6aSMatthew Dillon 	/*
16961c7c3c6aSMatthew Dillon 	 * Locate hash entry.  If not found create, but if we aren't adding
16974dcc5c2dSMatthew Dillon 	 * anything just return.  If we run out of space in the map we wait
16984dcc5c2dSMatthew Dillon 	 * and, since the hash table may have changed, retry.
16991c7c3c6aSMatthew Dillon 	 */
17004dcc5c2dSMatthew Dillon retry:
17017827d9b0SAlan Cox 	mtx_lock(&swhash_mtx);
170223f09d50SIan Dowse 	pswap = swp_pager_hash(object, pindex);
17031c7c3c6aSMatthew Dillon 
17041c7c3c6aSMatthew Dillon 	if ((swap = *pswap) == NULL) {
17051c7c3c6aSMatthew Dillon 		int i;
17061c7c3c6aSMatthew Dillon 
17071c7c3c6aSMatthew Dillon 		if (swapblk == SWAPBLK_NONE)
17087827d9b0SAlan Cox 			goto done;
17091c7c3c6aSMatthew Dillon 
1710670d17b5SJeff Roberson 		swap = *pswap = uma_zalloc(swap_zone, M_NOWAIT);
17114dcc5c2dSMatthew Dillon 		if (swap == NULL) {
17127827d9b0SAlan Cox 			mtx_unlock(&swhash_mtx);
1713ee3dc7d7SAlan Cox 			VM_OBJECT_UNLOCK(object);
1714663b416fSJohn Baldwin 			if (uma_zone_exhausted(swap_zone))
1715663b416fSJohn Baldwin 				printf("swap zone exhausted, increase kern.maxswzone\n");
17164dcc5c2dSMatthew Dillon 			VM_WAIT;
1717ee3dc7d7SAlan Cox 			VM_OBJECT_LOCK(object);
17184dcc5c2dSMatthew Dillon 			goto retry;
17194dcc5c2dSMatthew Dillon 		}
1720670d17b5SJeff Roberson 
17211c7c3c6aSMatthew Dillon 		swap->swb_hnext = NULL;
17221c7c3c6aSMatthew Dillon 		swap->swb_object = object;
172323f09d50SIan Dowse 		swap->swb_index = pindex & ~(vm_pindex_t)SWAP_META_MASK;
17241c7c3c6aSMatthew Dillon 		swap->swb_count = 0;
17251c7c3c6aSMatthew Dillon 
17261c7c3c6aSMatthew Dillon 		++object->un_pager.swp.swp_bcount;
17271c7c3c6aSMatthew Dillon 
17281c7c3c6aSMatthew Dillon 		for (i = 0; i < SWAP_META_PAGES; ++i)
17291c7c3c6aSMatthew Dillon 			swap->swb_pages[i] = SWAPBLK_NONE;
17301c7c3c6aSMatthew Dillon 	}
17311c7c3c6aSMatthew Dillon 
17321c7c3c6aSMatthew Dillon 	/*
17331c7c3c6aSMatthew Dillon 	 * Delete prior contents of metadata
17341c7c3c6aSMatthew Dillon 	 */
173523f09d50SIan Dowse 	idx = pindex & SWAP_META_MASK;
17361c7c3c6aSMatthew Dillon 
173723f09d50SIan Dowse 	if (swap->swb_pages[idx] != SWAPBLK_NONE) {
173823f09d50SIan Dowse 		swp_pager_freeswapspace(swap->swb_pages[idx], 1);
17391c7c3c6aSMatthew Dillon 		--swap->swb_count;
17401c7c3c6aSMatthew Dillon 	}
17411c7c3c6aSMatthew Dillon 
17421c7c3c6aSMatthew Dillon 	/*
17431c7c3c6aSMatthew Dillon 	 * Enter block into metadata
17441c7c3c6aSMatthew Dillon 	 */
174523f09d50SIan Dowse 	swap->swb_pages[idx] = swapblk;
17464dcc5c2dSMatthew Dillon 	if (swapblk != SWAPBLK_NONE)
17471c7c3c6aSMatthew Dillon 		++swap->swb_count;
17487827d9b0SAlan Cox done:
17497827d9b0SAlan Cox 	mtx_unlock(&swhash_mtx);
17501c7c3c6aSMatthew Dillon }
17511c7c3c6aSMatthew Dillon 
17521c7c3c6aSMatthew Dillon /*
17531c7c3c6aSMatthew Dillon  * SWP_PAGER_META_FREE() - free a range of blocks in the object's swap metadata
17541c7c3c6aSMatthew Dillon  *
17551c7c3c6aSMatthew Dillon  *	The requested range of blocks is freed, with any associated swap
17561c7c3c6aSMatthew Dillon  *	returned to the swap bitmap.
17571c7c3c6aSMatthew Dillon  *
17581c7c3c6aSMatthew Dillon  *	This routine will free swap metadata structures as they are cleaned
17591c7c3c6aSMatthew Dillon  *	out.  This routine does *NOT* operate on swap metadata associated
17601c7c3c6aSMatthew Dillon  *	with resident pages.
17611c7c3c6aSMatthew Dillon  *
17621c7c3c6aSMatthew Dillon  *	This routine must be called at splvm()
17631c7c3c6aSMatthew Dillon  */
17641c7c3c6aSMatthew Dillon static void
17654dcc5c2dSMatthew Dillon swp_pager_meta_free(vm_object_t object, vm_pindex_t index, daddr_t count)
17661c7c3c6aSMatthew Dillon {
17672928cef7SAlan Cox 
1768ee3dc7d7SAlan Cox 	VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
17691c7c3c6aSMatthew Dillon 	if (object->type != OBJT_SWAP)
17701c7c3c6aSMatthew Dillon 		return;
17711c7c3c6aSMatthew Dillon 
17721c7c3c6aSMatthew Dillon 	while (count > 0) {
17731c7c3c6aSMatthew Dillon 		struct swblock **pswap;
17741c7c3c6aSMatthew Dillon 		struct swblock *swap;
17751c7c3c6aSMatthew Dillon 
17767827d9b0SAlan Cox 		mtx_lock(&swhash_mtx);
17771c7c3c6aSMatthew Dillon 		pswap = swp_pager_hash(object, index);
17781c7c3c6aSMatthew Dillon 
17791c7c3c6aSMatthew Dillon 		if ((swap = *pswap) != NULL) {
17801c7c3c6aSMatthew Dillon 			daddr_t v = swap->swb_pages[index & SWAP_META_MASK];
17811c7c3c6aSMatthew Dillon 
17821c7c3c6aSMatthew Dillon 			if (v != SWAPBLK_NONE) {
17831c7c3c6aSMatthew Dillon 				swp_pager_freeswapspace(v, 1);
17841c7c3c6aSMatthew Dillon 				swap->swb_pages[index & SWAP_META_MASK] =
17851c7c3c6aSMatthew Dillon 					SWAPBLK_NONE;
17861c7c3c6aSMatthew Dillon 				if (--swap->swb_count == 0) {
17871c7c3c6aSMatthew Dillon 					*pswap = swap->swb_hnext;
1788670d17b5SJeff Roberson 					uma_zfree(swap_zone, swap);
17891c7c3c6aSMatthew Dillon 					--object->un_pager.swp.swp_bcount;
17901c7c3c6aSMatthew Dillon 				}
17911c7c3c6aSMatthew Dillon 			}
17921c7c3c6aSMatthew Dillon 			--count;
17931c7c3c6aSMatthew Dillon 			++index;
17941c7c3c6aSMatthew Dillon 		} else {
17954dcc5c2dSMatthew Dillon 			int n = SWAP_META_PAGES - (index & SWAP_META_MASK);
17961c7c3c6aSMatthew Dillon 			count -= n;
17971c7c3c6aSMatthew Dillon 			index += n;
17981c7c3c6aSMatthew Dillon 		}
17997827d9b0SAlan Cox 		mtx_unlock(&swhash_mtx);
18001c7c3c6aSMatthew Dillon 	}
18011c7c3c6aSMatthew Dillon }
18021c7c3c6aSMatthew Dillon 
18031c7c3c6aSMatthew Dillon /*
18041c7c3c6aSMatthew Dillon  * SWP_PAGER_META_FREE_ALL() - destroy all swap metadata associated with object
18051c7c3c6aSMatthew Dillon  *
18061c7c3c6aSMatthew Dillon  *	This routine locates and destroys all swap metadata associated with
18071c7c3c6aSMatthew Dillon  *	an object.
18084dcc5c2dSMatthew Dillon  *
18094dcc5c2dSMatthew Dillon  *	This routine must be called at splvm()
18101c7c3c6aSMatthew Dillon  */
18111c7c3c6aSMatthew Dillon static void
18121c7c3c6aSMatthew Dillon swp_pager_meta_free_all(vm_object_t object)
18131c7c3c6aSMatthew Dillon {
18141c7c3c6aSMatthew Dillon 	daddr_t index = 0;
18151c7c3c6aSMatthew Dillon 
1816ee3dc7d7SAlan Cox 	VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
18171c7c3c6aSMatthew Dillon 	if (object->type != OBJT_SWAP)
18181c7c3c6aSMatthew Dillon 		return;
18191c7c3c6aSMatthew Dillon 
18201c7c3c6aSMatthew Dillon 	while (object->un_pager.swp.swp_bcount) {
18211c7c3c6aSMatthew Dillon 		struct swblock **pswap;
18221c7c3c6aSMatthew Dillon 		struct swblock *swap;
18231c7c3c6aSMatthew Dillon 
18247827d9b0SAlan Cox 		mtx_lock(&swhash_mtx);
18251c7c3c6aSMatthew Dillon 		pswap = swp_pager_hash(object, index);
18261c7c3c6aSMatthew Dillon 		if ((swap = *pswap) != NULL) {
18271c7c3c6aSMatthew Dillon 			int i;
18281c7c3c6aSMatthew Dillon 
18291c7c3c6aSMatthew Dillon 			for (i = 0; i < SWAP_META_PAGES; ++i) {
18301c7c3c6aSMatthew Dillon 				daddr_t v = swap->swb_pages[i];
18311c7c3c6aSMatthew Dillon 				if (v != SWAPBLK_NONE) {
18321c7c3c6aSMatthew Dillon 					--swap->swb_count;
18334dcc5c2dSMatthew Dillon 					swp_pager_freeswapspace(v, 1);
18341c7c3c6aSMatthew Dillon 				}
18351c7c3c6aSMatthew Dillon 			}
18361c7c3c6aSMatthew Dillon 			if (swap->swb_count != 0)
18371c7c3c6aSMatthew Dillon 				panic("swap_pager_meta_free_all: swb_count != 0");
18381c7c3c6aSMatthew Dillon 			*pswap = swap->swb_hnext;
1839670d17b5SJeff Roberson 			uma_zfree(swap_zone, swap);
18401c7c3c6aSMatthew Dillon 			--object->un_pager.swp.swp_bcount;
18411c7c3c6aSMatthew Dillon 		}
18427827d9b0SAlan Cox 		mtx_unlock(&swhash_mtx);
18431c7c3c6aSMatthew Dillon 		index += SWAP_META_PAGES;
18441c7c3c6aSMatthew Dillon 		if (index > 0x20000000)
18451c7c3c6aSMatthew Dillon 			panic("swp_pager_meta_free_all: failed to locate all swap meta blocks");
18461c7c3c6aSMatthew Dillon 	}
18471c7c3c6aSMatthew Dillon }
18481c7c3c6aSMatthew Dillon 
18491c7c3c6aSMatthew Dillon /*
18501c7c3c6aSMatthew Dillon  * SWP_PAGER_METACTL() -  misc control of swap and vm_page_t meta data.
18511c7c3c6aSMatthew Dillon  *
18521c7c3c6aSMatthew Dillon  *	This routine is capable of looking up, popping, or freeing
18531c7c3c6aSMatthew Dillon  *	swapblk assignments in the swap meta data or in the vm_page_t.
18541c7c3c6aSMatthew Dillon  *	The routine typically returns the swapblk being looked-up, or popped,
18551c7c3c6aSMatthew Dillon  *	or SWAPBLK_NONE if the block was freed, or SWAPBLK_NONE if the block
18561c7c3c6aSMatthew Dillon  *	was invalid.  This routine will automatically free any invalid
18571c7c3c6aSMatthew Dillon  *	meta-data swapblks.
18581c7c3c6aSMatthew Dillon  *
18591c7c3c6aSMatthew Dillon  *	It is not possible to store invalid swapblks in the swap meta data
18601c7c3c6aSMatthew Dillon  *	(other then a literal 'SWAPBLK_NONE'), so we don't bother checking.
18611c7c3c6aSMatthew Dillon  *
18621c7c3c6aSMatthew Dillon  *	When acting on a busy resident page and paging is in progress, we
18631c7c3c6aSMatthew Dillon  *	have to wait until paging is complete but otherwise can act on the
18641c7c3c6aSMatthew Dillon  *	busy page.
18651c7c3c6aSMatthew Dillon  *
18664dcc5c2dSMatthew Dillon  *	This routine must be called at splvm().
18671c7c3c6aSMatthew Dillon  *
18684dcc5c2dSMatthew Dillon  *	SWM_FREE	remove and free swap block from metadata
18691c7c3c6aSMatthew Dillon  *	SWM_POP		remove from meta data but do not free.. pop it out
18701c7c3c6aSMatthew Dillon  */
18711c7c3c6aSMatthew Dillon static daddr_t
18722f249180SPoul-Henning Kamp swp_pager_meta_ctl(vm_object_t object, vm_pindex_t pindex, int flags)
18732f249180SPoul-Henning Kamp {
18744dcc5c2dSMatthew Dillon 	struct swblock **pswap;
18754dcc5c2dSMatthew Dillon 	struct swblock *swap;
18764dcc5c2dSMatthew Dillon 	daddr_t r1;
187723f09d50SIan Dowse 	int idx;
18784dcc5c2dSMatthew Dillon 
1879c7c8dd7eSAlan Cox 	VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
18801c7c3c6aSMatthew Dillon 	/*
18811c7c3c6aSMatthew Dillon 	 * The meta data only exists of the object is OBJT_SWAP
18821c7c3c6aSMatthew Dillon 	 * and even then might not be allocated yet.
18831c7c3c6aSMatthew Dillon 	 */
18844dcc5c2dSMatthew Dillon 	if (object->type != OBJT_SWAP)
18851c7c3c6aSMatthew Dillon 		return (SWAPBLK_NONE);
18861c7c3c6aSMatthew Dillon 
18874dcc5c2dSMatthew Dillon 	r1 = SWAPBLK_NONE;
18887827d9b0SAlan Cox 	mtx_lock(&swhash_mtx);
188923f09d50SIan Dowse 	pswap = swp_pager_hash(object, pindex);
18901c7c3c6aSMatthew Dillon 
18911c7c3c6aSMatthew Dillon 	if ((swap = *pswap) != NULL) {
189223f09d50SIan Dowse 		idx = pindex & SWAP_META_MASK;
189323f09d50SIan Dowse 		r1 = swap->swb_pages[idx];
18941c7c3c6aSMatthew Dillon 
18951c7c3c6aSMatthew Dillon 		if (r1 != SWAPBLK_NONE) {
18961c7c3c6aSMatthew Dillon 			if (flags & SWM_FREE) {
18974dcc5c2dSMatthew Dillon 				swp_pager_freeswapspace(r1, 1);
18981c7c3c6aSMatthew Dillon 				r1 = SWAPBLK_NONE;
18991c7c3c6aSMatthew Dillon 			}
19001c7c3c6aSMatthew Dillon 			if (flags & (SWM_FREE|SWM_POP)) {
190123f09d50SIan Dowse 				swap->swb_pages[idx] = SWAPBLK_NONE;
19021c7c3c6aSMatthew Dillon 				if (--swap->swb_count == 0) {
19031c7c3c6aSMatthew Dillon 					*pswap = swap->swb_hnext;
1904670d17b5SJeff Roberson 					uma_zfree(swap_zone, swap);
19051c7c3c6aSMatthew Dillon 					--object->un_pager.swp.swp_bcount;
19061c7c3c6aSMatthew Dillon 				}
19071c7c3c6aSMatthew Dillon 			}
19081c7c3c6aSMatthew Dillon 		}
19091c7c3c6aSMatthew Dillon 	}
19107827d9b0SAlan Cox 	mtx_unlock(&swhash_mtx);
19111c7c3c6aSMatthew Dillon 	return (r1);
19121c7c3c6aSMatthew Dillon }
19131c7c3c6aSMatthew Dillon 
1914e9c0cc15SPoul-Henning Kamp /*
1915e9c0cc15SPoul-Henning Kamp  * System call swapon(name) enables swapping on device name,
1916e9c0cc15SPoul-Henning Kamp  * which must be in the swdevsw.  Return EBUSY
1917e9c0cc15SPoul-Henning Kamp  * if already swapping on this device.
1918e9c0cc15SPoul-Henning Kamp  */
1919e9c0cc15SPoul-Henning Kamp #ifndef _SYS_SYSPROTO_H_
1920e9c0cc15SPoul-Henning Kamp struct swapon_args {
1921e9c0cc15SPoul-Henning Kamp 	char *name;
1922e9c0cc15SPoul-Henning Kamp };
1923e9c0cc15SPoul-Henning Kamp #endif
1924e9c0cc15SPoul-Henning Kamp 
1925e9c0cc15SPoul-Henning Kamp /*
1926e9c0cc15SPoul-Henning Kamp  * MPSAFE
1927e9c0cc15SPoul-Henning Kamp  */
1928e9c0cc15SPoul-Henning Kamp /* ARGSUSED */
1929e9c0cc15SPoul-Henning Kamp int
19302f249180SPoul-Henning Kamp swapon(struct thread *td, struct swapon_args *uap)
1931e9c0cc15SPoul-Henning Kamp {
1932e9c0cc15SPoul-Henning Kamp 	struct vattr attr;
1933e9c0cc15SPoul-Henning Kamp 	struct vnode *vp;
1934e9c0cc15SPoul-Henning Kamp 	struct nameidata nd;
1935e9c0cc15SPoul-Henning Kamp 	int error;
1936e9c0cc15SPoul-Henning Kamp 
1937acd3428bSRobert Watson 	error = priv_check(td, PRIV_SWAPON);
1938e9c0cc15SPoul-Henning Kamp 	if (error)
1939acd3428bSRobert Watson 		return (error);
1940e9c0cc15SPoul-Henning Kamp 
1941acd3428bSRobert Watson 	mtx_lock(&Giant);
1942e9c0cc15SPoul-Henning Kamp 	while (swdev_syscall_active)
1943e9c0cc15SPoul-Henning Kamp 	    tsleep(&swdev_syscall_active, PUSER - 1, "swpon", 0);
1944e9c0cc15SPoul-Henning Kamp 	swdev_syscall_active = 1;
1945e9c0cc15SPoul-Henning Kamp 
1946e9c0cc15SPoul-Henning Kamp 	/*
1947e9c0cc15SPoul-Henning Kamp 	 * Swap metadata may not fit in the KVM if we have physical
1948e9c0cc15SPoul-Henning Kamp 	 * memory of >1GB.
1949e9c0cc15SPoul-Henning Kamp 	 */
1950e9c0cc15SPoul-Henning Kamp 	if (swap_zone == NULL) {
1951e9c0cc15SPoul-Henning Kamp 		error = ENOMEM;
1952e9c0cc15SPoul-Henning Kamp 		goto done;
1953e9c0cc15SPoul-Henning Kamp 	}
1954e9c0cc15SPoul-Henning Kamp 
1955d9135e72SRobert Watson 	NDINIT(&nd, LOOKUP, ISOPEN | FOLLOW | AUDITVNODE1, UIO_USERSPACE,
1956d9135e72SRobert Watson 	    uap->name, td);
1957e9c0cc15SPoul-Henning Kamp 	error = namei(&nd);
1958e9c0cc15SPoul-Henning Kamp 	if (error)
1959e9c0cc15SPoul-Henning Kamp 		goto done;
1960e9c0cc15SPoul-Henning Kamp 
1961e9c0cc15SPoul-Henning Kamp 	NDFREE(&nd, NDF_ONLY_PNBUF);
1962e9c0cc15SPoul-Henning Kamp 	vp = nd.ni_vp;
1963e9c0cc15SPoul-Henning Kamp 
196420da9c2eSPoul-Henning Kamp 	if (vn_isdisk(vp, &error)) {
1965dee34ca4SPoul-Henning Kamp 		error = swapongeom(td, vp);
196620da9c2eSPoul-Henning Kamp 	} else if (vp->v_type == VREG &&
1967e9c0cc15SPoul-Henning Kamp 	    (vp->v_mount->mnt_vfc->vfc_flags & VFCF_NETWORK) != 0 &&
1968e9c0cc15SPoul-Henning Kamp 	    (error = VOP_GETATTR(vp, &attr, td->td_ucred, td)) == 0) {
1969e9c0cc15SPoul-Henning Kamp 		/*
1970e9c0cc15SPoul-Henning Kamp 		 * Allow direct swapping to NFS regular files in the same
1971e9c0cc15SPoul-Henning Kamp 		 * way that nfs_mountroot() sets up diskless swapping.
1972e9c0cc15SPoul-Henning Kamp 		 */
197359efee01SPoul-Henning Kamp 		error = swaponvp(td, vp, attr.va_size / DEV_BSIZE);
1974e9c0cc15SPoul-Henning Kamp 	}
1975e9c0cc15SPoul-Henning Kamp 
1976e9c0cc15SPoul-Henning Kamp 	if (error)
1977e9c0cc15SPoul-Henning Kamp 		vrele(vp);
1978e9c0cc15SPoul-Henning Kamp done:
1979e9c0cc15SPoul-Henning Kamp 	swdev_syscall_active = 0;
1980e9c0cc15SPoul-Henning Kamp 	wakeup_one(&swdev_syscall_active);
1981e9c0cc15SPoul-Henning Kamp 	mtx_unlock(&Giant);
1982e9c0cc15SPoul-Henning Kamp 	return (error);
1983e9c0cc15SPoul-Henning Kamp }
1984e9c0cc15SPoul-Henning Kamp 
198559efee01SPoul-Henning Kamp static void
1986f3732fd1SPoul-Henning Kamp swaponsomething(struct vnode *vp, void *id, u_long nblks, sw_strategy_t *strategy, sw_close_t *close, dev_t dev)
1987e9c0cc15SPoul-Henning Kamp {
19882d9974c1SAlan Cox 	struct swdevt *sp, *tsp;
1989e9c0cc15SPoul-Henning Kamp 	swblk_t dvbase;
19908f60c087SPoul-Henning Kamp 	u_long mblocks;
1991e9c0cc15SPoul-Henning Kamp 
1992e9c0cc15SPoul-Henning Kamp 	/*
1993e9c0cc15SPoul-Henning Kamp 	 * If we go beyond this, we get overflows in the radix
1994e9c0cc15SPoul-Henning Kamp 	 * tree bitmap code.
1995e9c0cc15SPoul-Henning Kamp 	 */
19968f60c087SPoul-Henning Kamp 	mblocks = 0x40000000 / BLIST_META_RADIX;
1997d3dd89abSPoul-Henning Kamp 	if (nblks > mblocks) {
199885fdafb9SPoul-Henning Kamp 		printf("WARNING: reducing size to maximum of %lu blocks per swap unit\n",
1999d3dd89abSPoul-Henning Kamp 			mblocks);
2000d3dd89abSPoul-Henning Kamp 		nblks = mblocks;
2001e9c0cc15SPoul-Henning Kamp 	}
2002e9c0cc15SPoul-Henning Kamp 	/*
2003e9c0cc15SPoul-Henning Kamp 	 * nblks is in DEV_BSIZE'd chunks, convert to PAGE_SIZE'd chunks.
2004e9c0cc15SPoul-Henning Kamp 	 * First chop nblks off to page-align it, then convert.
2005e9c0cc15SPoul-Henning Kamp 	 *
2006e9c0cc15SPoul-Henning Kamp 	 * sw->sw_nblks is in page-sized chunks now too.
2007e9c0cc15SPoul-Henning Kamp 	 */
2008e9c0cc15SPoul-Henning Kamp 	nblks &= ~(ctodb(1) - 1);
2009e9c0cc15SPoul-Henning Kamp 	nblks = dbtoc(nblks);
2010e9c0cc15SPoul-Henning Kamp 
20118f60c087SPoul-Henning Kamp 	sp = malloc(sizeof *sp, M_VMPGDATA, M_WAITOK | M_ZERO);
2012dee34ca4SPoul-Henning Kamp 	sp->sw_vp = vp;
2013dee34ca4SPoul-Henning Kamp 	sp->sw_id = id;
2014f3732fd1SPoul-Henning Kamp 	sp->sw_dev = dev;
20158d677ef9SPoul-Henning Kamp 	sp->sw_flags = 0;
2016e9c0cc15SPoul-Henning Kamp 	sp->sw_nblks = nblks;
2017e9c0cc15SPoul-Henning Kamp 	sp->sw_used = 0;
201859efee01SPoul-Henning Kamp 	sp->sw_strategy = strategy;
2019dee34ca4SPoul-Henning Kamp 	sp->sw_close = close;
2020e9c0cc15SPoul-Henning Kamp 
20218f60c087SPoul-Henning Kamp 	sp->sw_blist = blist_create(nblks);
2022e9c0cc15SPoul-Henning Kamp 	/*
2023ef3c5abdSPoul-Henning Kamp 	 * Do not free the first two block in order to avoid overwriting
20248f60c087SPoul-Henning Kamp 	 * any bsd label at the front of the partition
2025e9c0cc15SPoul-Henning Kamp 	 */
2026ef3c5abdSPoul-Henning Kamp 	blist_free(sp->sw_blist, 2, nblks - 2);
2027e9c0cc15SPoul-Henning Kamp 
20282d9974c1SAlan Cox 	dvbase = 0;
202920da9c2eSPoul-Henning Kamp 	mtx_lock(&sw_dev_mtx);
20302d9974c1SAlan Cox 	TAILQ_FOREACH(tsp, &swtailq, sw_list) {
20312d9974c1SAlan Cox 		if (tsp->sw_end >= dvbase) {
20322d9974c1SAlan Cox 			/*
20332d9974c1SAlan Cox 			 * We put one uncovered page between the devices
20342d9974c1SAlan Cox 			 * in order to definitively prevent any cross-device
20352d9974c1SAlan Cox 			 * I/O requests
20362d9974c1SAlan Cox 			 */
20372d9974c1SAlan Cox 			dvbase = tsp->sw_end + 1;
20382d9974c1SAlan Cox 		}
20392d9974c1SAlan Cox 	}
20402d9974c1SAlan Cox 	sp->sw_first = dvbase;
20412d9974c1SAlan Cox 	sp->sw_end = dvbase + nblks;
20428f60c087SPoul-Henning Kamp 	TAILQ_INSERT_TAIL(&swtailq, sp, sw_list);
20438f60c087SPoul-Henning Kamp 	nswapdev++;
20448f60c087SPoul-Henning Kamp 	swap_pager_avail += nblks;
2045d05bc129SAlan Cox 	swp_sizecheck();
2046d05bc129SAlan Cox 	mtx_unlock(&sw_dev_mtx);
204759efee01SPoul-Henning Kamp }
2048e9c0cc15SPoul-Henning Kamp 
2049e9c0cc15SPoul-Henning Kamp /*
2050e9c0cc15SPoul-Henning Kamp  * SYSCALL: swapoff(devname)
2051e9c0cc15SPoul-Henning Kamp  *
2052e9c0cc15SPoul-Henning Kamp  * Disable swapping on the given device.
2053dee34ca4SPoul-Henning Kamp  *
2054dee34ca4SPoul-Henning Kamp  * XXX: Badly designed system call: it should use a device index
2055dee34ca4SPoul-Henning Kamp  * rather than filename as specification.  We keep sw_vp around
2056dee34ca4SPoul-Henning Kamp  * only to make this work.
2057e9c0cc15SPoul-Henning Kamp  */
2058e9c0cc15SPoul-Henning Kamp #ifndef _SYS_SYSPROTO_H_
2059e9c0cc15SPoul-Henning Kamp struct swapoff_args {
2060e9c0cc15SPoul-Henning Kamp 	char *name;
2061e9c0cc15SPoul-Henning Kamp };
2062e9c0cc15SPoul-Henning Kamp #endif
2063e9c0cc15SPoul-Henning Kamp 
2064e9c0cc15SPoul-Henning Kamp /*
2065e9c0cc15SPoul-Henning Kamp  * MPSAFE
2066e9c0cc15SPoul-Henning Kamp  */
2067e9c0cc15SPoul-Henning Kamp /* ARGSUSED */
2068e9c0cc15SPoul-Henning Kamp int
20692f249180SPoul-Henning Kamp swapoff(struct thread *td, struct swapoff_args *uap)
2070e9c0cc15SPoul-Henning Kamp {
2071e9c0cc15SPoul-Henning Kamp 	struct vnode *vp;
2072e9c0cc15SPoul-Henning Kamp 	struct nameidata nd;
2073e9c0cc15SPoul-Henning Kamp 	struct swdevt *sp;
20748f60c087SPoul-Henning Kamp 	int error;
2075e9c0cc15SPoul-Henning Kamp 
2076acd3428bSRobert Watson 	error = priv_check(td, PRIV_SWAPOFF);
2077e9c0cc15SPoul-Henning Kamp 	if (error)
20780909f38aSPawel Jakub Dawidek 		return (error);
2079e9c0cc15SPoul-Henning Kamp 
20800909f38aSPawel Jakub Dawidek 	mtx_lock(&Giant);
2081e9c0cc15SPoul-Henning Kamp 	while (swdev_syscall_active)
2082e9c0cc15SPoul-Henning Kamp 	    tsleep(&swdev_syscall_active, PUSER - 1, "swpoff", 0);
2083e9c0cc15SPoul-Henning Kamp 	swdev_syscall_active = 1;
2084e9c0cc15SPoul-Henning Kamp 
2085d9135e72SRobert Watson 	NDINIT(&nd, LOOKUP, FOLLOW | AUDITVNODE1, UIO_USERSPACE, uap->name,
2086d9135e72SRobert Watson 	    td);
2087e9c0cc15SPoul-Henning Kamp 	error = namei(&nd);
2088e9c0cc15SPoul-Henning Kamp 	if (error)
2089e9c0cc15SPoul-Henning Kamp 		goto done;
2090e9c0cc15SPoul-Henning Kamp 	NDFREE(&nd, NDF_ONLY_PNBUF);
2091e9c0cc15SPoul-Henning Kamp 	vp = nd.ni_vp;
2092e9c0cc15SPoul-Henning Kamp 
209320da9c2eSPoul-Henning Kamp 	mtx_lock(&sw_dev_mtx);
20948f60c087SPoul-Henning Kamp 	TAILQ_FOREACH(sp, &swtailq, sw_list) {
2095dee34ca4SPoul-Henning Kamp 		if (sp->sw_vp == vp)
20960909f38aSPawel Jakub Dawidek 			break;
2097e9c0cc15SPoul-Henning Kamp 	}
209820da9c2eSPoul-Henning Kamp 	mtx_unlock(&sw_dev_mtx);
20990909f38aSPawel Jakub Dawidek 	if (sp == NULL) {
2100e9c0cc15SPoul-Henning Kamp 		error = EINVAL;
2101e9c0cc15SPoul-Henning Kamp 		goto done;
21020909f38aSPawel Jakub Dawidek 	}
210335918c55SChristian S.J. Peron 	error = swapoff_one(sp, td->td_ucred);
21040909f38aSPawel Jakub Dawidek done:
21050909f38aSPawel Jakub Dawidek 	swdev_syscall_active = 0;
21060909f38aSPawel Jakub Dawidek 	wakeup_one(&swdev_syscall_active);
21070909f38aSPawel Jakub Dawidek 	mtx_unlock(&Giant);
21080909f38aSPawel Jakub Dawidek 	return (error);
21090909f38aSPawel Jakub Dawidek }
21100909f38aSPawel Jakub Dawidek 
21110909f38aSPawel Jakub Dawidek static int
211235918c55SChristian S.J. Peron swapoff_one(struct swdevt *sp, struct ucred *cred)
21130909f38aSPawel Jakub Dawidek {
21140909f38aSPawel Jakub Dawidek 	u_long nblks, dvbase;
2115e9c0cc15SPoul-Henning Kamp #ifdef MAC
21160909f38aSPawel Jakub Dawidek 	int error;
2117e9c0cc15SPoul-Henning Kamp #endif
2118e9c0cc15SPoul-Henning Kamp 
21190909f38aSPawel Jakub Dawidek 	mtx_assert(&Giant, MA_OWNED);
21200909f38aSPawel Jakub Dawidek #ifdef MAC
212135918c55SChristian S.J. Peron 	(void) vn_lock(sp->sw_vp, LK_EXCLUSIVE | LK_RETRY, curthread);
212235918c55SChristian S.J. Peron 	error = mac_system_check_swapoff(cred, sp->sw_vp);
212335918c55SChristian S.J. Peron 	(void) VOP_UNLOCK(sp->sw_vp, 0, curthread);
21240909f38aSPawel Jakub Dawidek 	if (error != 0)
21250909f38aSPawel Jakub Dawidek 		return (error);
21260909f38aSPawel Jakub Dawidek #endif
2127e9c0cc15SPoul-Henning Kamp 	nblks = sp->sw_nblks;
2128e9c0cc15SPoul-Henning Kamp 
2129e9c0cc15SPoul-Henning Kamp 	/*
2130e9c0cc15SPoul-Henning Kamp 	 * We can turn off this swap device safely only if the
2131e9c0cc15SPoul-Henning Kamp 	 * available virtual memory in the system will fit the amount
2132e9c0cc15SPoul-Henning Kamp 	 * of data we will have to page back in, plus an epsilon so
2133e9c0cc15SPoul-Henning Kamp 	 * the system doesn't become critically low on swap space.
2134e9c0cc15SPoul-Henning Kamp 	 */
21352feb50bfSAttilio Rao 	if (cnt.v_free_count + cnt.v_cache_count + swap_pager_avail <
21362feb50bfSAttilio Rao 	    nblks + nswap_lowat) {
21370909f38aSPawel Jakub Dawidek 		return (ENOMEM);
2138e9c0cc15SPoul-Henning Kamp 	}
2139e9c0cc15SPoul-Henning Kamp 
2140e9c0cc15SPoul-Henning Kamp 	/*
2141e9c0cc15SPoul-Henning Kamp 	 * Prevent further allocations on this device.
2142e9c0cc15SPoul-Henning Kamp 	 */
21432928cef7SAlan Cox 	mtx_lock(&sw_dev_mtx);
2144e9c0cc15SPoul-Henning Kamp 	sp->sw_flags |= SW_CLOSING;
21458f60c087SPoul-Henning Kamp 	for (dvbase = 0; dvbase < sp->sw_end; dvbase += dmmax) {
21468f60c087SPoul-Henning Kamp 		swap_pager_avail -= blist_fill(sp->sw_blist,
21478f60c087SPoul-Henning Kamp 		     dvbase, dmmax);
2148e9c0cc15SPoul-Henning Kamp 	}
21492928cef7SAlan Cox 	mtx_unlock(&sw_dev_mtx);
2150e9c0cc15SPoul-Henning Kamp 
2151e9c0cc15SPoul-Henning Kamp 	/*
2152e9c0cc15SPoul-Henning Kamp 	 * Page in the contents of the device and close it.
2153e9c0cc15SPoul-Henning Kamp 	 */
2154b3fed13eSDavid Schultz 	swap_pager_swapoff(sp);
2155e9c0cc15SPoul-Henning Kamp 
215635918c55SChristian S.J. Peron 	sp->sw_close(curthread, sp);
215759efee01SPoul-Henning Kamp 	sp->sw_id = NULL;
215820da9c2eSPoul-Henning Kamp 	mtx_lock(&sw_dev_mtx);
21598f60c087SPoul-Henning Kamp 	TAILQ_REMOVE(&swtailq, sp, sw_list);
21600676a140SAlan Cox 	nswapdev--;
21617dea2c2eSAlan Cox 	if (nswapdev == 0) {
21627dea2c2eSAlan Cox 		swap_pager_full = 2;
21637dea2c2eSAlan Cox 		swap_pager_almost_full = 1;
21647dea2c2eSAlan Cox 	}
21658f60c087SPoul-Henning Kamp 	if (swdevhd == sp)
21668f60c087SPoul-Henning Kamp 		swdevhd = NULL;
2167d05bc129SAlan Cox 	mtx_unlock(&sw_dev_mtx);
21688f60c087SPoul-Henning Kamp 	blist_destroy(sp->sw_blist);
21698f60c087SPoul-Henning Kamp 	free(sp, M_VMPGDATA);
21700909f38aSPawel Jakub Dawidek 	return (0);
21710909f38aSPawel Jakub Dawidek }
2172e9c0cc15SPoul-Henning Kamp 
21730909f38aSPawel Jakub Dawidek void
21740909f38aSPawel Jakub Dawidek swapoff_all(void)
21750909f38aSPawel Jakub Dawidek {
21760909f38aSPawel Jakub Dawidek 	struct swdevt *sp, *spt;
21770909f38aSPawel Jakub Dawidek 	const char *devname;
21780909f38aSPawel Jakub Dawidek 	int error;
21790909f38aSPawel Jakub Dawidek 
21800909f38aSPawel Jakub Dawidek 	mtx_lock(&Giant);
21810909f38aSPawel Jakub Dawidek 	while (swdev_syscall_active)
21820909f38aSPawel Jakub Dawidek 		tsleep(&swdev_syscall_active, PUSER - 1, "swpoff", 0);
21830909f38aSPawel Jakub Dawidek 	swdev_syscall_active = 1;
21840909f38aSPawel Jakub Dawidek 
21850909f38aSPawel Jakub Dawidek 	mtx_lock(&sw_dev_mtx);
21860909f38aSPawel Jakub Dawidek 	TAILQ_FOREACH_SAFE(sp, &swtailq, sw_list, spt) {
21870909f38aSPawel Jakub Dawidek 		mtx_unlock(&sw_dev_mtx);
21880909f38aSPawel Jakub Dawidek 		if (vn_isdisk(sp->sw_vp, NULL))
21890909f38aSPawel Jakub Dawidek 			devname = sp->sw_vp->v_rdev->si_name;
21900909f38aSPawel Jakub Dawidek 		else
21910909f38aSPawel Jakub Dawidek 			devname = "[file]";
219235918c55SChristian S.J. Peron 		error = swapoff_one(sp, thread0.td_ucred);
21930909f38aSPawel Jakub Dawidek 		if (error != 0) {
21940909f38aSPawel Jakub Dawidek 			printf("Cannot remove swap device %s (error=%d), "
21950909f38aSPawel Jakub Dawidek 			    "skipping.\n", devname, error);
21960909f38aSPawel Jakub Dawidek 		} else if (bootverbose) {
21970909f38aSPawel Jakub Dawidek 			printf("Swap device %s removed.\n", devname);
21980909f38aSPawel Jakub Dawidek 		}
21990909f38aSPawel Jakub Dawidek 		mtx_lock(&sw_dev_mtx);
22000909f38aSPawel Jakub Dawidek 	}
22010909f38aSPawel Jakub Dawidek 	mtx_unlock(&sw_dev_mtx);
22020909f38aSPawel Jakub Dawidek 
2203e9c0cc15SPoul-Henning Kamp 	swdev_syscall_active = 0;
2204e9c0cc15SPoul-Henning Kamp 	wakeup_one(&swdev_syscall_active);
2205e9c0cc15SPoul-Henning Kamp 	mtx_unlock(&Giant);
2206e9c0cc15SPoul-Henning Kamp }
2207e9c0cc15SPoul-Henning Kamp 
2208567104a1SPoul-Henning Kamp void
2209567104a1SPoul-Henning Kamp swap_pager_status(int *total, int *used)
2210567104a1SPoul-Henning Kamp {
2211567104a1SPoul-Henning Kamp 	struct swdevt *sp;
2212567104a1SPoul-Henning Kamp 
2213567104a1SPoul-Henning Kamp 	*total = 0;
2214567104a1SPoul-Henning Kamp 	*used = 0;
221520da9c2eSPoul-Henning Kamp 	mtx_lock(&sw_dev_mtx);
22168f60c087SPoul-Henning Kamp 	TAILQ_FOREACH(sp, &swtailq, sw_list) {
2217567104a1SPoul-Henning Kamp 		*total += sp->sw_nblks;
2218567104a1SPoul-Henning Kamp 		*used += sp->sw_used;
2219567104a1SPoul-Henning Kamp 	}
222020da9c2eSPoul-Henning Kamp 	mtx_unlock(&sw_dev_mtx);
2221567104a1SPoul-Henning Kamp }
2222567104a1SPoul-Henning Kamp 
2223e9c0cc15SPoul-Henning Kamp static int
2224e9c0cc15SPoul-Henning Kamp sysctl_vm_swap_info(SYSCTL_HANDLER_ARGS)
2225e9c0cc15SPoul-Henning Kamp {
2226e9c0cc15SPoul-Henning Kamp 	int	*name = (int *)arg1;
22278f60c087SPoul-Henning Kamp 	int	error, n;
2228e9c0cc15SPoul-Henning Kamp 	struct xswdev xs;
2229e9c0cc15SPoul-Henning Kamp 	struct swdevt *sp;
2230e9c0cc15SPoul-Henning Kamp 
2231e9c0cc15SPoul-Henning Kamp 	if (arg2 != 1) /* name length */
2232e9c0cc15SPoul-Henning Kamp 		return (EINVAL);
2233e9c0cc15SPoul-Henning Kamp 
22348f60c087SPoul-Henning Kamp 	n = 0;
223520da9c2eSPoul-Henning Kamp 	mtx_lock(&sw_dev_mtx);
22368f60c087SPoul-Henning Kamp 	TAILQ_FOREACH(sp, &swtailq, sw_list) {
2237e9c0cc15SPoul-Henning Kamp 		if (n == *name) {
223820da9c2eSPoul-Henning Kamp 			mtx_unlock(&sw_dev_mtx);
2239e9c0cc15SPoul-Henning Kamp 			xs.xsw_version = XSWDEV_VERSION;
2240f3732fd1SPoul-Henning Kamp 			xs.xsw_dev = sp->sw_dev;
2241e9c0cc15SPoul-Henning Kamp 			xs.xsw_flags = sp->sw_flags;
2242e9c0cc15SPoul-Henning Kamp 			xs.xsw_nblks = sp->sw_nblks;
2243e9c0cc15SPoul-Henning Kamp 			xs.xsw_used = sp->sw_used;
2244e9c0cc15SPoul-Henning Kamp 
2245e9c0cc15SPoul-Henning Kamp 			error = SYSCTL_OUT(req, &xs, sizeof(xs));
2246e9c0cc15SPoul-Henning Kamp 			return (error);
2247e9c0cc15SPoul-Henning Kamp 		}
2248e9c0cc15SPoul-Henning Kamp 		n++;
2249e9c0cc15SPoul-Henning Kamp 	}
225020da9c2eSPoul-Henning Kamp 	mtx_unlock(&sw_dev_mtx);
2251e9c0cc15SPoul-Henning Kamp 	return (ENOENT);
2252e9c0cc15SPoul-Henning Kamp }
2253e9c0cc15SPoul-Henning Kamp 
22548f60c087SPoul-Henning Kamp SYSCTL_INT(_vm, OID_AUTO, nswapdev, CTLFLAG_RD, &nswapdev, 0,
2255e9c0cc15SPoul-Henning Kamp     "Number of swap devices");
2256e9c0cc15SPoul-Henning Kamp SYSCTL_NODE(_vm, OID_AUTO, swap_info, CTLFLAG_RD, sysctl_vm_swap_info,
2257e9c0cc15SPoul-Henning Kamp     "Swap statistics by device");
2258ec38b344SPoul-Henning Kamp 
2259ec38b344SPoul-Henning Kamp /*
2260ec38b344SPoul-Henning Kamp  * vmspace_swap_count() - count the approximate swap useage in pages for a
2261ec38b344SPoul-Henning Kamp  *			  vmspace.
2262ec38b344SPoul-Henning Kamp  *
2263ec38b344SPoul-Henning Kamp  *	The map must be locked.
2264ec38b344SPoul-Henning Kamp  *
2265ec38b344SPoul-Henning Kamp  *	Swap useage is determined by taking the proportional swap used by
2266ec38b344SPoul-Henning Kamp  *	VM objects backing the VM map.  To make up for fractional losses,
2267ec38b344SPoul-Henning Kamp  *	if the VM object has any swap use at all the associated map entries
2268ec38b344SPoul-Henning Kamp  *	count for at least 1 swap page.
2269ec38b344SPoul-Henning Kamp  */
2270ec38b344SPoul-Henning Kamp int
2271ec38b344SPoul-Henning Kamp vmspace_swap_count(struct vmspace *vmspace)
2272ec38b344SPoul-Henning Kamp {
2273ec38b344SPoul-Henning Kamp 	vm_map_t map = &vmspace->vm_map;
2274ec38b344SPoul-Henning Kamp 	vm_map_entry_t cur;
2275ec38b344SPoul-Henning Kamp 	int count = 0;
2276ec38b344SPoul-Henning Kamp 
2277ec38b344SPoul-Henning Kamp 	for (cur = map->header.next; cur != &map->header; cur = cur->next) {
2278ec38b344SPoul-Henning Kamp 		vm_object_t object;
2279ec38b344SPoul-Henning Kamp 
2280ec38b344SPoul-Henning Kamp 		if ((cur->eflags & MAP_ENTRY_IS_SUB_MAP) == 0 &&
2281ec38b344SPoul-Henning Kamp 		    (object = cur->object.vm_object) != NULL) {
2282ec38b344SPoul-Henning Kamp 			VM_OBJECT_LOCK(object);
2283ec38b344SPoul-Henning Kamp 			if (object->type == OBJT_SWAP &&
2284ec38b344SPoul-Henning Kamp 			    object->un_pager.swp.swp_bcount != 0) {
2285ec38b344SPoul-Henning Kamp 				int n = (cur->end - cur->start) / PAGE_SIZE;
2286ec38b344SPoul-Henning Kamp 
2287ec38b344SPoul-Henning Kamp 				count += object->un_pager.swp.swp_bcount *
2288ec38b344SPoul-Henning Kamp 				    SWAP_META_PAGES * n / object->size + 1;
2289ec38b344SPoul-Henning Kamp 			}
2290ec38b344SPoul-Henning Kamp 			VM_OBJECT_UNLOCK(object);
2291ec38b344SPoul-Henning Kamp 		}
2292ec38b344SPoul-Henning Kamp 	}
2293ec38b344SPoul-Henning Kamp 	return (count);
2294ec38b344SPoul-Henning Kamp }
2295dee34ca4SPoul-Henning Kamp 
2296dee34ca4SPoul-Henning Kamp /*
2297dee34ca4SPoul-Henning Kamp  * GEOM backend
2298dee34ca4SPoul-Henning Kamp  *
2299dee34ca4SPoul-Henning Kamp  * Swapping onto disk devices.
2300dee34ca4SPoul-Henning Kamp  *
2301dee34ca4SPoul-Henning Kamp  */
2302dee34ca4SPoul-Henning Kamp 
23035721c9c7SPoul-Henning Kamp static g_orphan_t swapgeom_orphan;
23045721c9c7SPoul-Henning Kamp 
2305dee34ca4SPoul-Henning Kamp static struct g_class g_swap_class = {
2306dee34ca4SPoul-Henning Kamp 	.name = "SWAP",
23075721c9c7SPoul-Henning Kamp 	.version = G_VERSION,
23085721c9c7SPoul-Henning Kamp 	.orphan = swapgeom_orphan,
2309dee34ca4SPoul-Henning Kamp };
2310dee34ca4SPoul-Henning Kamp 
2311dee34ca4SPoul-Henning Kamp DECLARE_GEOM_CLASS(g_swap_class, g_class);
2312dee34ca4SPoul-Henning Kamp 
2313dee34ca4SPoul-Henning Kamp 
2314dee34ca4SPoul-Henning Kamp static void
2315dee34ca4SPoul-Henning Kamp swapgeom_done(struct bio *bp2)
2316dee34ca4SPoul-Henning Kamp {
2317dee34ca4SPoul-Henning Kamp 	struct buf *bp;
2318dee34ca4SPoul-Henning Kamp 
2319dee34ca4SPoul-Henning Kamp 	bp = bp2->bio_caller2;
2320c5d3d25eSPoul-Henning Kamp 	bp->b_ioflags = bp2->bio_flags;
2321dee34ca4SPoul-Henning Kamp 	if (bp2->bio_error)
2322dee34ca4SPoul-Henning Kamp 		bp->b_ioflags |= BIO_ERROR;
2323c5d3d25eSPoul-Henning Kamp 	bp->b_resid = bp->b_bcount - bp2->bio_completed;
2324c5d3d25eSPoul-Henning Kamp 	bp->b_error = bp2->bio_error;
2325dee34ca4SPoul-Henning Kamp 	bufdone(bp);
2326dee34ca4SPoul-Henning Kamp 	g_destroy_bio(bp2);
2327dee34ca4SPoul-Henning Kamp }
2328dee34ca4SPoul-Henning Kamp 
2329dee34ca4SPoul-Henning Kamp static void
2330dee34ca4SPoul-Henning Kamp swapgeom_strategy(struct buf *bp, struct swdevt *sp)
2331dee34ca4SPoul-Henning Kamp {
2332dee34ca4SPoul-Henning Kamp 	struct bio *bio;
2333dee34ca4SPoul-Henning Kamp 	struct g_consumer *cp;
2334dee34ca4SPoul-Henning Kamp 
2335dee34ca4SPoul-Henning Kamp 	cp = sp->sw_id;
2336dee34ca4SPoul-Henning Kamp 	if (cp == NULL) {
2337dee34ca4SPoul-Henning Kamp 		bp->b_error = ENXIO;
2338dee34ca4SPoul-Henning Kamp 		bp->b_ioflags |= BIO_ERROR;
2339dee34ca4SPoul-Henning Kamp 		bufdone(bp);
2340dee34ca4SPoul-Henning Kamp 		return;
2341dee34ca4SPoul-Henning Kamp 	}
23424f8205e5SPoul-Henning Kamp 	bio = g_alloc_bio();
23434f8205e5SPoul-Henning Kamp #if 0
23443e5b6861SPoul-Henning Kamp 	/*
23454f8205e5SPoul-Henning Kamp 	 * XXX: We shouldn't really sleep here when we run out of buffers
23464f8205e5SPoul-Henning Kamp 	 * XXX: but the alternative is worse right now.
23473e5b6861SPoul-Henning Kamp 	 */
23484f8205e5SPoul-Henning Kamp 	if (bio == NULL) {
23493e5b6861SPoul-Henning Kamp 		bp->b_error = ENOMEM;
23503e5b6861SPoul-Henning Kamp 		bp->b_ioflags |= BIO_ERROR;
23513e5b6861SPoul-Henning Kamp 		bufdone(bp);
23523e5b6861SPoul-Henning Kamp 		return;
23533e5b6861SPoul-Henning Kamp 	}
23544f8205e5SPoul-Henning Kamp #endif
2355dee34ca4SPoul-Henning Kamp 	bio->bio_caller2 = bp;
2356c5d3d25eSPoul-Henning Kamp 	bio->bio_cmd = bp->b_iocmd;
2357c5d3d25eSPoul-Henning Kamp 	bio->bio_data = bp->b_data;
2358dee34ca4SPoul-Henning Kamp 	bio->bio_offset = (bp->b_blkno - sp->sw_first) * PAGE_SIZE;
2359dee34ca4SPoul-Henning Kamp 	bio->bio_length = bp->b_bcount;
2360dee34ca4SPoul-Henning Kamp 	bio->bio_done = swapgeom_done;
2361dee34ca4SPoul-Henning Kamp 	g_io_request(bio, cp);
2362dee34ca4SPoul-Henning Kamp 	return;
2363dee34ca4SPoul-Henning Kamp }
2364dee34ca4SPoul-Henning Kamp 
2365dee34ca4SPoul-Henning Kamp static void
2366dee34ca4SPoul-Henning Kamp swapgeom_orphan(struct g_consumer *cp)
2367dee34ca4SPoul-Henning Kamp {
2368dee34ca4SPoul-Henning Kamp 	struct swdevt *sp;
2369dee34ca4SPoul-Henning Kamp 
2370dee34ca4SPoul-Henning Kamp 	mtx_lock(&sw_dev_mtx);
2371dee34ca4SPoul-Henning Kamp 	TAILQ_FOREACH(sp, &swtailq, sw_list)
2372dee34ca4SPoul-Henning Kamp 		if (sp->sw_id == cp)
2373dee34ca4SPoul-Henning Kamp 			sp->sw_id = NULL;
2374dee34ca4SPoul-Henning Kamp 	mtx_unlock(&sw_dev_mtx);
2375dee34ca4SPoul-Henning Kamp }
2376dee34ca4SPoul-Henning Kamp 
2377dee34ca4SPoul-Henning Kamp static void
2378dee34ca4SPoul-Henning Kamp swapgeom_close_ev(void *arg, int flags)
2379dee34ca4SPoul-Henning Kamp {
2380dee34ca4SPoul-Henning Kamp 	struct g_consumer *cp;
2381dee34ca4SPoul-Henning Kamp 
2382dee34ca4SPoul-Henning Kamp 	cp = arg;
2383d2bae332SPoul-Henning Kamp 	g_access(cp, -1, -1, 0);
2384dee34ca4SPoul-Henning Kamp 	g_detach(cp);
2385dee34ca4SPoul-Henning Kamp 	g_destroy_consumer(cp);
2386dee34ca4SPoul-Henning Kamp }
2387dee34ca4SPoul-Henning Kamp 
2388dee34ca4SPoul-Henning Kamp static void
2389dee34ca4SPoul-Henning Kamp swapgeom_close(struct thread *td, struct swdevt *sw)
2390dee34ca4SPoul-Henning Kamp {
2391dee34ca4SPoul-Henning Kamp 
2392dee34ca4SPoul-Henning Kamp 	/* XXX: direct call when Giant untangled */
2393dee34ca4SPoul-Henning Kamp 	g_waitfor_event(swapgeom_close_ev, sw->sw_id, M_WAITOK, NULL);
2394dee34ca4SPoul-Henning Kamp }
2395dee34ca4SPoul-Henning Kamp 
2396dee34ca4SPoul-Henning Kamp 
2397dee34ca4SPoul-Henning Kamp struct swh0h0 {
239889c9c53dSPoul-Henning Kamp 	struct cdev *dev;
2399dee34ca4SPoul-Henning Kamp 	struct vnode *vp;
2400dee34ca4SPoul-Henning Kamp 	int	error;
2401dee34ca4SPoul-Henning Kamp };
2402dee34ca4SPoul-Henning Kamp 
2403dee34ca4SPoul-Henning Kamp static void
2404dee34ca4SPoul-Henning Kamp swapongeom_ev(void *arg, int flags)
2405dee34ca4SPoul-Henning Kamp {
2406dee34ca4SPoul-Henning Kamp 	struct swh0h0 *swh;
2407dee34ca4SPoul-Henning Kamp 	struct g_provider *pp;
2408dee34ca4SPoul-Henning Kamp 	struct g_consumer *cp;
2409dee34ca4SPoul-Henning Kamp 	static struct g_geom *gp;
2410dee34ca4SPoul-Henning Kamp 	struct swdevt *sp;
2411dee34ca4SPoul-Henning Kamp 	u_long nblks;
2412dee34ca4SPoul-Henning Kamp 	int error;
2413dee34ca4SPoul-Henning Kamp 
2414dee34ca4SPoul-Henning Kamp 	swh = arg;
2415dee34ca4SPoul-Henning Kamp 	swh->error = 0;
2416dee34ca4SPoul-Henning Kamp 	pp = g_dev_getprovider(swh->dev);
2417dee34ca4SPoul-Henning Kamp 	if (pp == NULL) {
2418dee34ca4SPoul-Henning Kamp 		swh->error = ENODEV;
2419dee34ca4SPoul-Henning Kamp 		return;
2420dee34ca4SPoul-Henning Kamp 	}
2421dee34ca4SPoul-Henning Kamp 	mtx_lock(&sw_dev_mtx);
2422dee34ca4SPoul-Henning Kamp 	TAILQ_FOREACH(sp, &swtailq, sw_list) {
2423dee34ca4SPoul-Henning Kamp 		cp = sp->sw_id;
2424dee34ca4SPoul-Henning Kamp 		if (cp != NULL && cp->provider == pp) {
2425dee34ca4SPoul-Henning Kamp 			mtx_unlock(&sw_dev_mtx);
2426dee34ca4SPoul-Henning Kamp 			swh->error = EBUSY;
2427dee34ca4SPoul-Henning Kamp 			return;
2428dee34ca4SPoul-Henning Kamp 		}
2429dee34ca4SPoul-Henning Kamp 	}
2430dee34ca4SPoul-Henning Kamp 	mtx_unlock(&sw_dev_mtx);
24315721c9c7SPoul-Henning Kamp 	if (gp == NULL)
2432dee34ca4SPoul-Henning Kamp 		gp = g_new_geomf(&g_swap_class, "swap", NULL);
2433dee34ca4SPoul-Henning Kamp 	cp = g_new_consumer(gp);
2434dee34ca4SPoul-Henning Kamp 	g_attach(cp, pp);
2435afeb65e6SPoul-Henning Kamp 	/*
2436afeb65e6SPoul-Henning Kamp 	 * XXX: Everytime you think you can improve the margin for
2437afeb65e6SPoul-Henning Kamp 	 * footshooting, somebody depends on the ability to do so:
2438afeb65e6SPoul-Henning Kamp 	 * savecore(8) wants to write to our swapdev so we cannot
2439afeb65e6SPoul-Henning Kamp 	 * set an exclusive count :-(
2440afeb65e6SPoul-Henning Kamp 	 */
2441d2bae332SPoul-Henning Kamp 	error = g_access(cp, 1, 1, 0);
2442dee34ca4SPoul-Henning Kamp 	if (error) {
2443dee34ca4SPoul-Henning Kamp 		g_detach(cp);
2444dee34ca4SPoul-Henning Kamp 		g_destroy_consumer(cp);
2445dee34ca4SPoul-Henning Kamp 		swh->error = error;
2446dee34ca4SPoul-Henning Kamp 		return;
2447dee34ca4SPoul-Henning Kamp 	}
2448dee34ca4SPoul-Henning Kamp 	nblks = pp->mediasize / DEV_BSIZE;
2449dee34ca4SPoul-Henning Kamp 	swaponsomething(swh->vp, cp, nblks, swapgeom_strategy,
2450dee34ca4SPoul-Henning Kamp 	    swapgeom_close, dev2udev(swh->dev));
2451dee34ca4SPoul-Henning Kamp 	swh->error = 0;
2452dee34ca4SPoul-Henning Kamp 	return;
2453dee34ca4SPoul-Henning Kamp }
2454dee34ca4SPoul-Henning Kamp 
2455dee34ca4SPoul-Henning Kamp static int
2456dee34ca4SPoul-Henning Kamp swapongeom(struct thread *td, struct vnode *vp)
2457dee34ca4SPoul-Henning Kamp {
2458dee34ca4SPoul-Henning Kamp 	int error;
2459dee34ca4SPoul-Henning Kamp 	struct swh0h0 swh;
2460dee34ca4SPoul-Henning Kamp 
2461dee34ca4SPoul-Henning Kamp 	vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
2462dee34ca4SPoul-Henning Kamp 
2463dee34ca4SPoul-Henning Kamp 	swh.dev = vp->v_rdev;
2464dee34ca4SPoul-Henning Kamp 	swh.vp = vp;
2465dee34ca4SPoul-Henning Kamp 	swh.error = 0;
2466dee34ca4SPoul-Henning Kamp 	/* XXX: direct call when Giant untangled */
2467dee34ca4SPoul-Henning Kamp 	error = g_waitfor_event(swapongeom_ev, &swh, M_WAITOK, NULL);
2468dee34ca4SPoul-Henning Kamp 	if (!error)
2469dee34ca4SPoul-Henning Kamp 		error = swh.error;
2470dee34ca4SPoul-Henning Kamp 	VOP_UNLOCK(vp, 0, td);
2471dee34ca4SPoul-Henning Kamp 	return (error);
2472dee34ca4SPoul-Henning Kamp }
2473dee34ca4SPoul-Henning Kamp 
2474dee34ca4SPoul-Henning Kamp /*
2475dee34ca4SPoul-Henning Kamp  * VNODE backend
2476dee34ca4SPoul-Henning Kamp  *
2477dee34ca4SPoul-Henning Kamp  * This is used mainly for network filesystem (read: probably only tested
2478dee34ca4SPoul-Henning Kamp  * with NFS) swapfiles.
2479dee34ca4SPoul-Henning Kamp  *
2480dee34ca4SPoul-Henning Kamp  */
2481dee34ca4SPoul-Henning Kamp 
2482dee34ca4SPoul-Henning Kamp static void
2483dee34ca4SPoul-Henning Kamp swapdev_strategy(struct buf *bp, struct swdevt *sp)
2484dee34ca4SPoul-Henning Kamp {
2485494eb176SPoul-Henning Kamp 	struct vnode *vp2;
2486dee34ca4SPoul-Henning Kamp 
2487dee34ca4SPoul-Henning Kamp 	bp->b_blkno = ctodb(bp->b_blkno - sp->sw_first);
2488dee34ca4SPoul-Henning Kamp 
2489dee34ca4SPoul-Henning Kamp 	vp2 = sp->sw_id;
2490dee34ca4SPoul-Henning Kamp 	vhold(vp2);
2491dee34ca4SPoul-Henning Kamp 	if (bp->b_iocmd == BIO_WRITE) {
24923cfc7651SOlivier Houchard 		if (bp->b_bufobj)
2493494eb176SPoul-Henning Kamp 			bufobj_wdrop(bp->b_bufobj);
2494a76d8f4eSPoul-Henning Kamp 		bufobj_wref(&vp2->v_bufobj);
2495dee34ca4SPoul-Henning Kamp 	}
24963cfc7651SOlivier Houchard 	if (bp->b_bufobj != &vp2->v_bufobj)
24973cfc7651SOlivier Houchard 		bp->b_bufobj = &vp2->v_bufobj;
2498dee34ca4SPoul-Henning Kamp 	bp->b_vp = vp2;
24992c18019fSPoul-Henning Kamp 	bp->b_iooffset = dbtob(bp->b_blkno);
2500b792bebeSPoul-Henning Kamp 	bstrategy(bp);
2501dee34ca4SPoul-Henning Kamp 	return;
2502dee34ca4SPoul-Henning Kamp }
2503dee34ca4SPoul-Henning Kamp 
2504dee34ca4SPoul-Henning Kamp static void
2505dee34ca4SPoul-Henning Kamp swapdev_close(struct thread *td, struct swdevt *sp)
2506dee34ca4SPoul-Henning Kamp {
2507dee34ca4SPoul-Henning Kamp 
2508dee34ca4SPoul-Henning Kamp 	VOP_CLOSE(sp->sw_vp, FREAD | FWRITE, td->td_ucred, td);
2509dee34ca4SPoul-Henning Kamp 	vrele(sp->sw_vp);
2510dee34ca4SPoul-Henning Kamp }
2511dee34ca4SPoul-Henning Kamp 
2512dee34ca4SPoul-Henning Kamp 
2513dee34ca4SPoul-Henning Kamp static int
2514dee34ca4SPoul-Henning Kamp swaponvp(struct thread *td, struct vnode *vp, u_long nblks)
2515dee34ca4SPoul-Henning Kamp {
2516dee34ca4SPoul-Henning Kamp 	struct swdevt *sp;
2517dee34ca4SPoul-Henning Kamp 	int error;
2518dee34ca4SPoul-Henning Kamp 
2519dee34ca4SPoul-Henning Kamp 	if (nblks == 0)
2520dee34ca4SPoul-Henning Kamp 		return (ENXIO);
2521dee34ca4SPoul-Henning Kamp 	mtx_lock(&sw_dev_mtx);
2522dee34ca4SPoul-Henning Kamp 	TAILQ_FOREACH(sp, &swtailq, sw_list) {
2523dee34ca4SPoul-Henning Kamp 		if (sp->sw_id == vp) {
2524dee34ca4SPoul-Henning Kamp 			mtx_unlock(&sw_dev_mtx);
2525dee34ca4SPoul-Henning Kamp 			return (EBUSY);
2526dee34ca4SPoul-Henning Kamp 		}
2527dee34ca4SPoul-Henning Kamp 	}
2528dee34ca4SPoul-Henning Kamp 	mtx_unlock(&sw_dev_mtx);
2529dee34ca4SPoul-Henning Kamp 
2530dee34ca4SPoul-Henning Kamp 	(void) vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
2531dee34ca4SPoul-Henning Kamp #ifdef MAC
253230d239bcSRobert Watson 	error = mac_system_check_swapon(td->td_ucred, vp);
2533dee34ca4SPoul-Henning Kamp 	if (error == 0)
2534dee34ca4SPoul-Henning Kamp #endif
25359e223287SKonstantin Belousov 		error = VOP_OPEN(vp, FREAD | FWRITE, td->td_ucred, td, NULL);
2536dee34ca4SPoul-Henning Kamp 	(void) VOP_UNLOCK(vp, 0, td);
2537dee34ca4SPoul-Henning Kamp 	if (error)
2538dee34ca4SPoul-Henning Kamp 		return (error);
2539dee34ca4SPoul-Henning Kamp 
2540dee34ca4SPoul-Henning Kamp 	swaponsomething(vp, vp, nblks, swapdev_strategy, swapdev_close,
2541f3732fd1SPoul-Henning Kamp 	    NODEV);
2542dee34ca4SPoul-Henning Kamp 	return (0);
2543dee34ca4SPoul-Henning Kamp }
2544