xref: /freebsd/sys/vm/swap_pager.c (revision f425ab8e50ae312ca1429d2ae444d4d133b8349c)
160727d8bSWarner Losh /*-
21c7c3c6aSMatthew Dillon  * Copyright (c) 1998 Matthew Dillon,
326f9a767SRodney W. Grimes  * Copyright (c) 1994 John S. Dyson
4df8bae1dSRodney W. Grimes  * Copyright (c) 1990 University of Utah.
5e9c0cc15SPoul-Henning Kamp  * Copyright (c) 1982, 1986, 1989, 1993
6df8bae1dSRodney W. Grimes  *	The Regents of the University of California.  All rights reserved.
7df8bae1dSRodney W. Grimes  *
8df8bae1dSRodney W. Grimes  * This code is derived from software contributed to Berkeley by
9df8bae1dSRodney W. Grimes  * the Systems Programming Group of the University of Utah Computer
10df8bae1dSRodney W. Grimes  * Science Department.
11df8bae1dSRodney W. Grimes  *
12df8bae1dSRodney W. Grimes  * Redistribution and use in source and binary forms, with or without
13df8bae1dSRodney W. Grimes  * modification, are permitted provided that the following conditions
14df8bae1dSRodney W. Grimes  * are met:
15df8bae1dSRodney W. Grimes  * 1. Redistributions of source code must retain the above copyright
16df8bae1dSRodney W. Grimes  *    notice, this list of conditions and the following disclaimer.
17df8bae1dSRodney W. Grimes  * 2. Redistributions in binary form must reproduce the above copyright
18df8bae1dSRodney W. Grimes  *    notice, this list of conditions and the following disclaimer in the
19df8bae1dSRodney W. Grimes  *    documentation and/or other materials provided with the distribution.
20df8bae1dSRodney W. Grimes  * 3. All advertising materials mentioning features or use of this software
215929bcfaSPhilippe Charnier  *    must display the following acknowledgement:
22df8bae1dSRodney W. Grimes  *	This product includes software developed by the University of
23df8bae1dSRodney W. Grimes  *	California, Berkeley and its contributors.
24df8bae1dSRodney W. Grimes  * 4. Neither the name of the University nor the names of its contributors
25df8bae1dSRodney W. Grimes  *    may be used to endorse or promote products derived from this software
26df8bae1dSRodney W. Grimes  *    without specific prior written permission.
27df8bae1dSRodney W. Grimes  *
28df8bae1dSRodney W. Grimes  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
29df8bae1dSRodney W. Grimes  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
30df8bae1dSRodney W. Grimes  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
31df8bae1dSRodney W. Grimes  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
32df8bae1dSRodney W. Grimes  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
33df8bae1dSRodney W. Grimes  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
34df8bae1dSRodney W. Grimes  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
35df8bae1dSRodney W. Grimes  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
36df8bae1dSRodney W. Grimes  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
37df8bae1dSRodney W. Grimes  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
38df8bae1dSRodney W. Grimes  * SUCH DAMAGE.
39df8bae1dSRodney W. Grimes  *
401c7c3c6aSMatthew Dillon  *				New Swap System
411c7c3c6aSMatthew Dillon  *				Matthew Dillon
421c7c3c6aSMatthew Dillon  *
431c7c3c6aSMatthew Dillon  * Radix Bitmap 'blists'.
441c7c3c6aSMatthew Dillon  *
451c7c3c6aSMatthew Dillon  *	- The new swapper uses the new radix bitmap code.  This should scale
461c7c3c6aSMatthew Dillon  *	  to arbitrarily small or arbitrarily large swap spaces and an almost
471c7c3c6aSMatthew Dillon  *	  arbitrary degree of fragmentation.
481c7c3c6aSMatthew Dillon  *
491c7c3c6aSMatthew Dillon  * Features:
501c7c3c6aSMatthew Dillon  *
511c7c3c6aSMatthew Dillon  *	- on the fly reallocation of swap during putpages.  The new system
521c7c3c6aSMatthew Dillon  *	  does not try to keep previously allocated swap blocks for dirty
531c7c3c6aSMatthew Dillon  *	  pages.
541c7c3c6aSMatthew Dillon  *
551c7c3c6aSMatthew Dillon  *	- on the fly deallocation of swap
561c7c3c6aSMatthew Dillon  *
571c7c3c6aSMatthew Dillon  *	- No more garbage collection required.  Unnecessarily allocated swap
581c7c3c6aSMatthew Dillon  *	  blocks only exist for dirty vm_page_t's now and these are already
591c7c3c6aSMatthew Dillon  *	  cycled (in a high-load system) by the pager.  We also do on-the-fly
601c7c3c6aSMatthew Dillon  *	  removal of invalidated swap blocks when a page is destroyed
611c7c3c6aSMatthew Dillon  *	  or renamed.
621c7c3c6aSMatthew Dillon  *
63df8bae1dSRodney W. Grimes  * from: Utah $Hdr: swap_pager.c 1.4 91/04/30$
64df8bae1dSRodney W. Grimes  *
65df8bae1dSRodney W. Grimes  *	@(#)swap_pager.c	8.9 (Berkeley) 3/21/94
66e9c0cc15SPoul-Henning Kamp  *	@(#)vm_swap.c	8.5 (Berkeley) 2/17/94
67df8bae1dSRodney W. Grimes  */
68df8bae1dSRodney W. Grimes 
69874651b1SDavid E. O'Brien #include <sys/cdefs.h>
70874651b1SDavid E. O'Brien __FBSDID("$FreeBSD$");
71874651b1SDavid E. O'Brien 
7269921123SKonstantin Belousov #include "opt_compat.h"
73e9c0cc15SPoul-Henning Kamp #include "opt_swap.h"
74e9c0cc15SPoul-Henning Kamp #include "opt_vm.h"
75e9c0cc15SPoul-Henning Kamp 
76df8bae1dSRodney W. Grimes #include <sys/param.h>
77df8bae1dSRodney W. Grimes #include <sys/systm.h>
78af647ddeSBruce Evans #include <sys/conf.h>
7964abb5a5SDavid Greenman #include <sys/kernel.h>
80acd3428bSRobert Watson #include <sys/priv.h>
81df8bae1dSRodney W. Grimes #include <sys/proc.h>
829626b608SPoul-Henning Kamp #include <sys/bio.h>
83df8bae1dSRodney W. Grimes #include <sys/buf.h>
84e9c0cc15SPoul-Henning Kamp #include <sys/disk.h>
85e9c0cc15SPoul-Henning Kamp #include <sys/fcntl.h>
86e9c0cc15SPoul-Henning Kamp #include <sys/mount.h>
87e9c0cc15SPoul-Henning Kamp #include <sys/namei.h>
88df8bae1dSRodney W. Grimes #include <sys/vnode.h>
89df8bae1dSRodney W. Grimes #include <sys/malloc.h>
90*f425ab8eSKonstantin Belousov #include <sys/pctrie.h>
911ba5ad42SEdward Tomasz Napierala #include <sys/racct.h>
923364c323SKonstantin Belousov #include <sys/resource.h>
933364c323SKonstantin Belousov #include <sys/resourcevar.h>
9489f6b863SAttilio Rao #include <sys/rwlock.h>
95327f4e83SMatthew Dillon #include <sys/sysctl.h>
96e9c0cc15SPoul-Henning Kamp #include <sys/sysproto.h>
971c7c3c6aSMatthew Dillon #include <sys/blist.h>
981c7c3c6aSMatthew Dillon #include <sys/lock.h>
990cddd8f0SMatthew Dillon #include <sys/sx.h>
100936524aaSMatthew Dillon #include <sys/vmmeter.h>
101df8bae1dSRodney W. Grimes 
102aed55708SRobert Watson #include <security/mac/mac_framework.h>
103aed55708SRobert Watson 
104df8bae1dSRodney W. Grimes #include <vm/vm.h>
10521cd6e62SSeigo Tanimura #include <vm/pmap.h>
10621cd6e62SSeigo Tanimura #include <vm/vm_map.h>
10721cd6e62SSeigo Tanimura #include <vm/vm_kern.h>
108efeaf95aSDavid Greenman #include <vm/vm_object.h>
109df8bae1dSRodney W. Grimes #include <vm/vm_page.h>
110efeaf95aSDavid Greenman #include <vm/vm_pager.h>
111df8bae1dSRodney W. Grimes #include <vm/vm_pageout.h>
112e9c0cc15SPoul-Henning Kamp #include <vm/vm_param.h>
113df8bae1dSRodney W. Grimes #include <vm/swap_pager.h>
114efeaf95aSDavid Greenman #include <vm/vm_extern.h>
115670d17b5SJeff Roberson #include <vm/uma.h>
116df8bae1dSRodney W. Grimes 
117dee34ca4SPoul-Henning Kamp #include <geom/geom.h>
118dee34ca4SPoul-Henning Kamp 
119ec38b344SPoul-Henning Kamp /*
120064650c1SAlan Cox  * MAX_PAGEOUT_CLUSTER must be a power of 2 between 1 and 64.
121064650c1SAlan Cox  * The 64-page limit is due to the radix code (kern/subr_blist.c).
122ec38b344SPoul-Henning Kamp  */
123ec38b344SPoul-Henning Kamp #ifndef MAX_PAGEOUT_CLUSTER
124e2241590SAlan Cox #define	MAX_PAGEOUT_CLUSTER	32
125ec38b344SPoul-Henning Kamp #endif
126ec38b344SPoul-Henning Kamp 
127ec38b344SPoul-Henning Kamp #if !defined(SWB_NPAGES)
128ec38b344SPoul-Henning Kamp #define SWB_NPAGES	MAX_PAGEOUT_CLUSTER
129ec38b344SPoul-Henning Kamp #endif
130ec38b344SPoul-Henning Kamp 
131*f425ab8eSKonstantin Belousov #define	SWAP_META_PAGES		PCTRIE_COUNT
132ec38b344SPoul-Henning Kamp 
133*f425ab8eSKonstantin Belousov /*
134*f425ab8eSKonstantin Belousov  * A swblk structure maps each page index within a
135*f425ab8eSKonstantin Belousov  * SWAP_META_PAGES-aligned and sized range to the address of an
136*f425ab8eSKonstantin Belousov  * on-disk swap block (or SWAPBLK_NONE). The collection of these
137*f425ab8eSKonstantin Belousov  * mappings for an entire vm object is implemented as a pc-trie.
138*f425ab8eSKonstantin Belousov  */
139*f425ab8eSKonstantin Belousov struct swblk {
140*f425ab8eSKonstantin Belousov 	vm_pindex_t	p;
141*f425ab8eSKonstantin Belousov 	daddr_t		d[SWAP_META_PAGES];
142e9c0cc15SPoul-Henning Kamp };
143e9c0cc15SPoul-Henning Kamp 
1442c4992dbSAlan Cox static MALLOC_DEFINE(M_VMPGDATA, "vm_pgdata", "swap pager private data");
14520da9c2eSPoul-Henning Kamp static struct mtx sw_dev_mtx;
1468f60c087SPoul-Henning Kamp static TAILQ_HEAD(, swdevt) swtailq = TAILQ_HEAD_INITIALIZER(swtailq);
1478f60c087SPoul-Henning Kamp static struct swdevt *swdevhd;	/* Allocate from here next */
1488f60c087SPoul-Henning Kamp static int nswapdev;		/* Number of swap devices */
1498f60c087SPoul-Henning Kamp int swap_pager_avail;
15004533e1eSKonstantin Belousov static struct sx swdev_syscall_lock;	/* serialize swap(on|off) */
151e9c0cc15SPoul-Henning Kamp 
1523364c323SKonstantin Belousov static vm_ooffset_t swap_total;
1538a9c731fSIvan Voras SYSCTL_QUAD(_vm, OID_AUTO, swap_total, CTLFLAG_RD, &swap_total, 0,
1548a9c731fSIvan Voras     "Total amount of available swap storage.");
1553364c323SKonstantin Belousov static vm_ooffset_t swap_reserved;
1568a9c731fSIvan Voras SYSCTL_QUAD(_vm, OID_AUTO, swap_reserved, CTLFLAG_RD, &swap_reserved, 0,
1578a9c731fSIvan Voras     "Amount of swap storage needed to back all allocated anonymous memory.");
1583364c323SKonstantin Belousov static int overcommit = 0;
1598a9c731fSIvan Voras SYSCTL_INT(_vm, OID_AUTO, overcommit, CTLFLAG_RW, &overcommit, 0,
1608a9c731fSIvan Voras     "Configure virtual memory overcommit behavior. See tuning(7) "
1618a9c731fSIvan Voras     "for details.");
16261203277SDag-Erling Smørgrav static unsigned long swzone;
16361203277SDag-Erling Smørgrav SYSCTL_ULONG(_vm, OID_AUTO, swzone, CTLFLAG_RD, &swzone, 0,
16461203277SDag-Erling Smørgrav     "Actual size of swap metadata zone");
16561203277SDag-Erling Smørgrav static unsigned long swap_maxpages;
16661203277SDag-Erling Smørgrav SYSCTL_ULONG(_vm, OID_AUTO, swap_maxpages, CTLFLAG_RD, &swap_maxpages, 0,
16761203277SDag-Erling Smørgrav     "Maximum amount of swap supported");
1683364c323SKonstantin Belousov 
1693364c323SKonstantin Belousov /* bits from overcommit */
1703364c323SKonstantin Belousov #define	SWAP_RESERVE_FORCE_ON		(1 << 0)
1713364c323SKonstantin Belousov #define	SWAP_RESERVE_RLIMIT_ON		(1 << 1)
1723364c323SKonstantin Belousov #define	SWAP_RESERVE_ALLOW_NONWIRED	(1 << 2)
1733364c323SKonstantin Belousov 
1743364c323SKonstantin Belousov int
1753364c323SKonstantin Belousov swap_reserve(vm_ooffset_t incr)
1763364c323SKonstantin Belousov {
1773364c323SKonstantin Belousov 
178ef694c1aSEdward Tomasz Napierala 	return (swap_reserve_by_cred(incr, curthread->td_ucred));
1793364c323SKonstantin Belousov }
1803364c323SKonstantin Belousov 
1813364c323SKonstantin Belousov int
182ef694c1aSEdward Tomasz Napierala swap_reserve_by_cred(vm_ooffset_t incr, struct ucred *cred)
1833364c323SKonstantin Belousov {
1845c0e1c11SKonstantin Belousov 	vm_ooffset_t r, s;
1853364c323SKonstantin Belousov 	int res, error;
1863364c323SKonstantin Belousov 	static int curfail;
1873364c323SKonstantin Belousov 	static struct timeval lastfail;
188ef694c1aSEdward Tomasz Napierala 	struct uidinfo *uip;
189ef694c1aSEdward Tomasz Napierala 
190ef694c1aSEdward Tomasz Napierala 	uip = cred->cr_ruidinfo;
1913364c323SKonstantin Belousov 
1923364c323SKonstantin Belousov 	if (incr & PAGE_MASK)
1933364c323SKonstantin Belousov 		panic("swap_reserve: & PAGE_MASK");
1943364c323SKonstantin Belousov 
195afcc55f3SEdward Tomasz Napierala #ifdef RACCT
1964b5c9cf6SEdward Tomasz Napierala 	if (racct_enable) {
1971ba5ad42SEdward Tomasz Napierala 		PROC_LOCK(curproc);
1981ba5ad42SEdward Tomasz Napierala 		error = racct_add(curproc, RACCT_SWAP, incr);
1991ba5ad42SEdward Tomasz Napierala 		PROC_UNLOCK(curproc);
2001ba5ad42SEdward Tomasz Napierala 		if (error != 0)
2011ba5ad42SEdward Tomasz Napierala 			return (0);
2024b5c9cf6SEdward Tomasz Napierala 	}
203afcc55f3SEdward Tomasz Napierala #endif
2041ba5ad42SEdward Tomasz Napierala 
2053364c323SKonstantin Belousov 	res = 0;
2063364c323SKonstantin Belousov 	mtx_lock(&sw_dev_mtx);
2073364c323SKonstantin Belousov 	r = swap_reserved + incr;
2083364c323SKonstantin Belousov 	if (overcommit & SWAP_RESERVE_ALLOW_NONWIRED) {
20944f1c916SBryan Drewery 		s = vm_cnt.v_page_count - vm_cnt.v_free_reserved - vm_cnt.v_wire_count;
2103364c323SKonstantin Belousov 		s *= PAGE_SIZE;
2113364c323SKonstantin Belousov 	} else
2123364c323SKonstantin Belousov 		s = 0;
2133364c323SKonstantin Belousov 	s += swap_total;
2143364c323SKonstantin Belousov 	if ((overcommit & SWAP_RESERVE_FORCE_ON) == 0 || r <= s ||
2153364c323SKonstantin Belousov 	    (error = priv_check(curthread, PRIV_VM_SWAP_NOQUOTA)) == 0) {
2163364c323SKonstantin Belousov 		res = 1;
2173364c323SKonstantin Belousov 		swap_reserved = r;
2183364c323SKonstantin Belousov 	}
2193364c323SKonstantin Belousov 	mtx_unlock(&sw_dev_mtx);
2203364c323SKonstantin Belousov 
2213364c323SKonstantin Belousov 	if (res) {
2223364c323SKonstantin Belousov 		UIDINFO_VMSIZE_LOCK(uip);
2235c0e1c11SKonstantin Belousov 		if ((overcommit & SWAP_RESERVE_RLIMIT_ON) != 0 &&
224f6f6d240SMateusz Guzik 		    uip->ui_vmsize + incr > lim_cur(curthread, RLIMIT_SWAP) &&
2255c0e1c11SKonstantin Belousov 		    priv_check(curthread, PRIV_VM_SWAP_NORLIMIT))
2263364c323SKonstantin Belousov 			res = 0;
2273364c323SKonstantin Belousov 		else
2283364c323SKonstantin Belousov 			uip->ui_vmsize += incr;
2293364c323SKonstantin Belousov 		UIDINFO_VMSIZE_UNLOCK(uip);
2303364c323SKonstantin Belousov 		if (!res) {
2313364c323SKonstantin Belousov 			mtx_lock(&sw_dev_mtx);
2323364c323SKonstantin Belousov 			swap_reserved -= incr;
2333364c323SKonstantin Belousov 			mtx_unlock(&sw_dev_mtx);
2343364c323SKonstantin Belousov 		}
2353364c323SKonstantin Belousov 	}
2363364c323SKonstantin Belousov 	if (!res && ppsratecheck(&lastfail, &curfail, 1)) {
2373364c323SKonstantin Belousov 		printf("uid %d, pid %d: swap reservation for %jd bytes failed\n",
238134465d7SKonstantin Belousov 		    uip->ui_uid, curproc->p_pid, incr);
2393364c323SKonstantin Belousov 	}
2403364c323SKonstantin Belousov 
241afcc55f3SEdward Tomasz Napierala #ifdef RACCT
2421ba5ad42SEdward Tomasz Napierala 	if (!res) {
2431ba5ad42SEdward Tomasz Napierala 		PROC_LOCK(curproc);
2441ba5ad42SEdward Tomasz Napierala 		racct_sub(curproc, RACCT_SWAP, incr);
2451ba5ad42SEdward Tomasz Napierala 		PROC_UNLOCK(curproc);
2461ba5ad42SEdward Tomasz Napierala 	}
247afcc55f3SEdward Tomasz Napierala #endif
2481ba5ad42SEdward Tomasz Napierala 
2493364c323SKonstantin Belousov 	return (res);
2503364c323SKonstantin Belousov }
2513364c323SKonstantin Belousov 
2523364c323SKonstantin Belousov void
2533364c323SKonstantin Belousov swap_reserve_force(vm_ooffset_t incr)
2543364c323SKonstantin Belousov {
2553364c323SKonstantin Belousov 	struct uidinfo *uip;
2563364c323SKonstantin Belousov 
2573364c323SKonstantin Belousov 	mtx_lock(&sw_dev_mtx);
2583364c323SKonstantin Belousov 	swap_reserved += incr;
2593364c323SKonstantin Belousov 	mtx_unlock(&sw_dev_mtx);
2603364c323SKonstantin Belousov 
261afcc55f3SEdward Tomasz Napierala #ifdef RACCT
2621ba5ad42SEdward Tomasz Napierala 	PROC_LOCK(curproc);
2631ba5ad42SEdward Tomasz Napierala 	racct_add_force(curproc, RACCT_SWAP, incr);
2641ba5ad42SEdward Tomasz Napierala 	PROC_UNLOCK(curproc);
265afcc55f3SEdward Tomasz Napierala #endif
2661ba5ad42SEdward Tomasz Napierala 
2673364c323SKonstantin Belousov 	uip = curthread->td_ucred->cr_ruidinfo;
2683364c323SKonstantin Belousov 	PROC_LOCK(curproc);
2693364c323SKonstantin Belousov 	UIDINFO_VMSIZE_LOCK(uip);
2703364c323SKonstantin Belousov 	uip->ui_vmsize += incr;
2713364c323SKonstantin Belousov 	UIDINFO_VMSIZE_UNLOCK(uip);
2723364c323SKonstantin Belousov 	PROC_UNLOCK(curproc);
2733364c323SKonstantin Belousov }
2743364c323SKonstantin Belousov 
2753364c323SKonstantin Belousov void
2763364c323SKonstantin Belousov swap_release(vm_ooffset_t decr)
2773364c323SKonstantin Belousov {
278ef694c1aSEdward Tomasz Napierala 	struct ucred *cred;
2793364c323SKonstantin Belousov 
2803364c323SKonstantin Belousov 	PROC_LOCK(curproc);
281ef694c1aSEdward Tomasz Napierala 	cred = curthread->td_ucred;
282ef694c1aSEdward Tomasz Napierala 	swap_release_by_cred(decr, cred);
2833364c323SKonstantin Belousov 	PROC_UNLOCK(curproc);
2843364c323SKonstantin Belousov }
2853364c323SKonstantin Belousov 
2863364c323SKonstantin Belousov void
287ef694c1aSEdward Tomasz Napierala swap_release_by_cred(vm_ooffset_t decr, struct ucred *cred)
2883364c323SKonstantin Belousov {
289ef694c1aSEdward Tomasz Napierala  	struct uidinfo *uip;
290ef694c1aSEdward Tomasz Napierala 
291ef694c1aSEdward Tomasz Napierala 	uip = cred->cr_ruidinfo;
2923364c323SKonstantin Belousov 
2933364c323SKonstantin Belousov 	if (decr & PAGE_MASK)
2943364c323SKonstantin Belousov 		panic("swap_release: & PAGE_MASK");
2953364c323SKonstantin Belousov 
2963364c323SKonstantin Belousov 	mtx_lock(&sw_dev_mtx);
2973364c323SKonstantin Belousov 	if (swap_reserved < decr)
2983364c323SKonstantin Belousov 		panic("swap_reserved < decr");
2993364c323SKonstantin Belousov 	swap_reserved -= decr;
3003364c323SKonstantin Belousov 	mtx_unlock(&sw_dev_mtx);
3013364c323SKonstantin Belousov 
3023364c323SKonstantin Belousov 	UIDINFO_VMSIZE_LOCK(uip);
3033364c323SKonstantin Belousov 	if (uip->ui_vmsize < decr)
3043364c323SKonstantin Belousov 		printf("negative vmsize for uid = %d\n", uip->ui_uid);
3053364c323SKonstantin Belousov 	uip->ui_vmsize -= decr;
3063364c323SKonstantin Belousov 	UIDINFO_VMSIZE_UNLOCK(uip);
3071ba5ad42SEdward Tomasz Napierala 
3081ba5ad42SEdward Tomasz Napierala 	racct_sub_cred(cred, RACCT_SWAP, decr);
3093364c323SKonstantin Belousov }
3103364c323SKonstantin Belousov 
3111c7c3c6aSMatthew Dillon #define SWM_FREE	0x02	/* free, period			*/
3121c7c3c6aSMatthew Dillon #define SWM_POP		0x04	/* pop out			*/
31326f9a767SRodney W. Grimes 
3147dea2c2eSAlan Cox int swap_pager_full = 2;	/* swap space exhaustion (task killing) */
3157dea2c2eSAlan Cox static int swap_pager_almost_full = 1; /* swap space exhaustion (w/hysteresis)*/
3161c7c3c6aSMatthew Dillon static int nsw_rcount;		/* free read buffers			*/
317327f4e83SMatthew Dillon static int nsw_wcount_sync;	/* limit write buffers / synchronous	*/
318327f4e83SMatthew Dillon static int nsw_wcount_async;	/* limit write buffers / asynchronous	*/
319327f4e83SMatthew Dillon static int nsw_wcount_async_max;/* assigned maximum			*/
320327f4e83SMatthew Dillon static int nsw_cluster_max;	/* maximum VOP I/O allowed		*/
3211c7c3c6aSMatthew Dillon 
32289c241d1SGleb Smirnoff static int sysctl_swap_async_max(SYSCTL_HANDLER_ARGS);
3234c36e917SKonstantin Belousov SYSCTL_PROC(_vm, OID_AUTO, swap_async_max, CTLTYPE_INT | CTLFLAG_RW |
3244c36e917SKonstantin Belousov     CTLFLAG_MPSAFE, NULL, 0, sysctl_swap_async_max, "I",
3254c36e917SKonstantin Belousov     "Maximum running async swap ops");
32689c241d1SGleb Smirnoff 
3270cddd8f0SMatthew Dillon static struct sx sw_alloc_sx;
328327f4e83SMatthew Dillon 
3291c7c3c6aSMatthew Dillon /*
3301c7c3c6aSMatthew Dillon  * "named" and "unnamed" anon region objects.  Try to reduce the overhead
3311c7c3c6aSMatthew Dillon  * of searching a named list by hashing it just a little.
3321c7c3c6aSMatthew Dillon  */
3331c7c3c6aSMatthew Dillon 
3341c7c3c6aSMatthew Dillon #define NOBJLISTS		8
3351c7c3c6aSMatthew Dillon 
3361c7c3c6aSMatthew Dillon #define NOBJLIST(handle)	\
337af647ddeSBruce Evans 	(&swap_pager_object_list[((int)(intptr_t)handle >> 4) & (NOBJLISTS-1)])
3381c7c3c6aSMatthew Dillon 
3391c7c3c6aSMatthew Dillon static struct pagerlst	swap_pager_object_list[NOBJLISTS];
340*f425ab8eSKonstantin Belousov static uma_zone_t swblk_zone;
341*f425ab8eSKonstantin Belousov static uma_zone_t swpctrie_zone;
3421c7c3c6aSMatthew Dillon 
3431c7c3c6aSMatthew Dillon /*
3441c7c3c6aSMatthew Dillon  * pagerops for OBJT_SWAP - "swap pager".  Some ops are also global procedure
3451c7c3c6aSMatthew Dillon  * calls hooked from other parts of the VM system and do not appear here.
3461c7c3c6aSMatthew Dillon  * (see vm/swap_pager.h).
3471c7c3c6aSMatthew Dillon  */
348ff98689dSBruce Evans static vm_object_t
34911caded3SAlfred Perlstein 		swap_pager_alloc(void *handle, vm_ooffset_t size,
3503364c323SKonstantin Belousov 		    vm_prot_t prot, vm_ooffset_t offset, struct ucred *);
35111caded3SAlfred Perlstein static void	swap_pager_dealloc(vm_object_t object);
352b0cd2017SGleb Smirnoff static int	swap_pager_getpages(vm_object_t, vm_page_t *, int, int *,
353b0cd2017SGleb Smirnoff     int *);
354b0cd2017SGleb Smirnoff static int	swap_pager_getpages_async(vm_object_t, vm_page_t *, int, int *,
355b0cd2017SGleb Smirnoff     int *, pgo_getpages_iodone_t, void *);
356751221fdSPoul-Henning Kamp static void	swap_pager_putpages(vm_object_t, vm_page_t *, int, boolean_t, int *);
3575ea4972cSAlan Cox static boolean_t
3585ea4972cSAlan Cox 		swap_pager_haspage(vm_object_t object, vm_pindex_t pindex, int *before, int *after);
35911caded3SAlfred Perlstein static void	swap_pager_init(void);
36011caded3SAlfred Perlstein static void	swap_pager_unswapped(vm_page_t);
361b3fed13eSDavid Schultz static void	swap_pager_swapoff(struct swdevt *sp);
362f708ef1bSPoul-Henning Kamp 
363df8bae1dSRodney W. Grimes struct pagerops swappagerops = {
364e04e4bacSPoul-Henning Kamp 	.pgo_init =	swap_pager_init,	/* early system initialization of pager	*/
365e04e4bacSPoul-Henning Kamp 	.pgo_alloc =	swap_pager_alloc,	/* allocate an OBJT_SWAP object		*/
366e04e4bacSPoul-Henning Kamp 	.pgo_dealloc =	swap_pager_dealloc,	/* deallocate an OBJT_SWAP object	*/
367e04e4bacSPoul-Henning Kamp 	.pgo_getpages =	swap_pager_getpages,	/* pagein				*/
36890effb23SGleb Smirnoff 	.pgo_getpages_async = swap_pager_getpages_async, /* pagein (async)		*/
369e04e4bacSPoul-Henning Kamp 	.pgo_putpages =	swap_pager_putpages,	/* pageout				*/
370e04e4bacSPoul-Henning Kamp 	.pgo_haspage =	swap_pager_haspage,	/* get backing store status for page	*/
371e04e4bacSPoul-Henning Kamp 	.pgo_pageunswapped = swap_pager_unswapped,	/* remove swap related to page		*/
372df8bae1dSRodney W. Grimes };
373df8bae1dSRodney W. Grimes 
3741c7c3c6aSMatthew Dillon /*
3751c7c3c6aSMatthew Dillon  * swap_*() routines are externally accessible.  swp_*() routines are
3761c7c3c6aSMatthew Dillon  * internal.
3771c7c3c6aSMatthew Dillon  */
378e9c0cc15SPoul-Henning Kamp static int nswap_lowat = 128;	/* in pages, swap_pager_almost_full warn */
379e9c0cc15SPoul-Henning Kamp static int nswap_hiwat = 512;	/* in pages, swap_pager_almost_full warn */
38026f9a767SRodney W. Grimes 
38107c348eaSAlan Cox SYSCTL_INT(_vm, OID_AUTO, dmmax, CTLFLAG_RD, &nsw_cluster_max, 0,
38207c348eaSAlan Cox     "Maximum size of a swap block in pages");
383cee313c4SRobert Watson 
384a5edd34aSPoul-Henning Kamp static void	swp_sizecheck(void);
38511caded3SAlfred Perlstein static void	swp_pager_async_iodone(struct buf *bp);
38688ad2d7bSKonstantin Belousov static int	swapongeom(struct vnode *);
38759efee01SPoul-Henning Kamp static int	swaponvp(struct thread *, struct vnode *, u_long);
38835918c55SChristian S.J. Peron static int	swapoff_one(struct swdevt *sp, struct ucred *cred);
38924a1cce3SDavid Greenman 
3901c7c3c6aSMatthew Dillon /*
3911c7c3c6aSMatthew Dillon  * Swap bitmap functions
3921c7c3c6aSMatthew Dillon  */
393a5edd34aSPoul-Henning Kamp static void	swp_pager_freeswapspace(daddr_t blk, int npages);
394a5edd34aSPoul-Henning Kamp static daddr_t	swp_pager_getswapspace(int npages);
3951c7c3c6aSMatthew Dillon 
3961c7c3c6aSMatthew Dillon /*
3971c7c3c6aSMatthew Dillon  * Metadata functions
3981c7c3c6aSMatthew Dillon  */
39911caded3SAlfred Perlstein static void swp_pager_meta_build(vm_object_t, vm_pindex_t, daddr_t);
4002e56b64fSKonstantin Belousov static void swp_pager_meta_free(vm_object_t, vm_pindex_t, vm_pindex_t);
40111caded3SAlfred Perlstein static void swp_pager_meta_free_all(vm_object_t);
40211caded3SAlfred Perlstein static daddr_t swp_pager_meta_ctl(vm_object_t, vm_pindex_t, int);
4031c7c3c6aSMatthew Dillon 
404*f425ab8eSKonstantin Belousov static void *
405*f425ab8eSKonstantin Belousov swblk_trie_alloc(struct pctrie *ptree)
406*f425ab8eSKonstantin Belousov {
407*f425ab8eSKonstantin Belousov 
408*f425ab8eSKonstantin Belousov 	return (uma_zalloc(swpctrie_zone, M_NOWAIT | (curproc == pageproc ?
409*f425ab8eSKonstantin Belousov 	    M_USE_RESERVE : 0)));
410*f425ab8eSKonstantin Belousov }
411*f425ab8eSKonstantin Belousov 
412*f425ab8eSKonstantin Belousov static void
413*f425ab8eSKonstantin Belousov swblk_trie_free(struct pctrie *ptree, void *node)
414*f425ab8eSKonstantin Belousov {
415*f425ab8eSKonstantin Belousov 
416*f425ab8eSKonstantin Belousov 	uma_zfree(swpctrie_zone, node);
417*f425ab8eSKonstantin Belousov }
418*f425ab8eSKonstantin Belousov 
419*f425ab8eSKonstantin Belousov PCTRIE_DEFINE(SWAP, swblk, p, swblk_trie_alloc, swblk_trie_free);
420*f425ab8eSKonstantin Belousov 
4211c7c3c6aSMatthew Dillon /*
4221c7c3c6aSMatthew Dillon  * SWP_SIZECHECK() -	update swap_pager_full indication
4231c7c3c6aSMatthew Dillon  *
42420d3034fSMatthew Dillon  *	update the swap_pager_almost_full indication and warn when we are
42520d3034fSMatthew Dillon  *	about to run out of swap space, using lowat/hiwat hysteresis.
42620d3034fSMatthew Dillon  *
42720d3034fSMatthew Dillon  *	Clear swap_pager_full ( task killing ) indication when lowat is met.
4281c7c3c6aSMatthew Dillon  *
4291c7c3c6aSMatthew Dillon  *	No restrictions on call
4301c7c3c6aSMatthew Dillon  *	This routine may not block.
4311c7c3c6aSMatthew Dillon  */
432a5edd34aSPoul-Henning Kamp static void
4332f249180SPoul-Henning Kamp swp_sizecheck(void)
4340d94caffSDavid Greenman {
43523955314SAlfred Perlstein 
4368f60c087SPoul-Henning Kamp 	if (swap_pager_avail < nswap_lowat) {
43720d3034fSMatthew Dillon 		if (swap_pager_almost_full == 0) {
4381af87c92SDavid Greenman 			printf("swap_pager: out of swap space\n");
43920d3034fSMatthew Dillon 			swap_pager_almost_full = 1;
4402b0d37a4SMatthew Dillon 		}
44120d3034fSMatthew Dillon 	} else {
44226f9a767SRodney W. Grimes 		swap_pager_full = 0;
4438f60c087SPoul-Henning Kamp 		if (swap_pager_avail > nswap_hiwat)
44420d3034fSMatthew Dillon 			swap_pager_almost_full = 0;
44526f9a767SRodney W. Grimes 	}
4461c7c3c6aSMatthew Dillon }
4471c7c3c6aSMatthew Dillon 
4481c7c3c6aSMatthew Dillon /*
4491c7c3c6aSMatthew Dillon  * SWAP_PAGER_INIT() -	initialize the swap pager!
4501c7c3c6aSMatthew Dillon  *
4511c7c3c6aSMatthew Dillon  *	Expected to be started from system init.  NOTE:  This code is run
4521c7c3c6aSMatthew Dillon  *	before much else so be careful what you depend on.  Most of the VM
4531c7c3c6aSMatthew Dillon  *	system has yet to be initialized at this point.
4541c7c3c6aSMatthew Dillon  */
455f5a12711SPoul-Henning Kamp static void
4562f249180SPoul-Henning Kamp swap_pager_init(void)
457df8bae1dSRodney W. Grimes {
4581c7c3c6aSMatthew Dillon 	/*
4591c7c3c6aSMatthew Dillon 	 * Initialize object lists
4601c7c3c6aSMatthew Dillon 	 */
4611c7c3c6aSMatthew Dillon 	int i;
4621c7c3c6aSMatthew Dillon 
4631c7c3c6aSMatthew Dillon 	for (i = 0; i < NOBJLISTS; ++i)
4641c7c3c6aSMatthew Dillon 		TAILQ_INIT(&swap_pager_object_list[i]);
46520da9c2eSPoul-Henning Kamp 	mtx_init(&sw_dev_mtx, "swapdev", NULL, MTX_DEF);
46615719273SKonstantin Belousov 	sx_init(&sw_alloc_sx, "swspsx");
46704533e1eSKonstantin Belousov 	sx_init(&swdev_syscall_lock, "swsysc");
4681c7c3c6aSMatthew Dillon }
46926f9a767SRodney W. Grimes 
470df8bae1dSRodney W. Grimes /*
4711c7c3c6aSMatthew Dillon  * SWAP_PAGER_SWAP_INIT() - swap pager initialization from pageout process
4721c7c3c6aSMatthew Dillon  *
4731c7c3c6aSMatthew Dillon  *	Expected to be started from pageout process once, prior to entering
4741c7c3c6aSMatthew Dillon  *	its main loop.
475df8bae1dSRodney W. Grimes  */
47624a1cce3SDavid Greenman void
4772f249180SPoul-Henning Kamp swap_pager_swap_init(void)
478df8bae1dSRodney W. Grimes {
47961203277SDag-Erling Smørgrav 	unsigned long n, n2;
4800d94caffSDavid Greenman 
48126f9a767SRodney W. Grimes 	/*
4821c7c3c6aSMatthew Dillon 	 * Number of in-transit swap bp operations.  Don't
4831c7c3c6aSMatthew Dillon 	 * exhaust the pbufs completely.  Make sure we
4841c7c3c6aSMatthew Dillon 	 * initialize workable values (0 will work for hysteresis
4851c7c3c6aSMatthew Dillon 	 * but it isn't very efficient).
4861c7c3c6aSMatthew Dillon 	 *
487327f4e83SMatthew Dillon 	 * The nsw_cluster_max is constrained by the bp->b_pages[]
4881c7c3c6aSMatthew Dillon 	 * array (MAXPHYS/PAGE_SIZE) and our locally defined
4891c7c3c6aSMatthew Dillon 	 * MAX_PAGEOUT_CLUSTER.   Also be aware that swap ops are
4901c7c3c6aSMatthew Dillon 	 * constrained by the swap device interleave stripe size.
491327f4e83SMatthew Dillon 	 *
492327f4e83SMatthew Dillon 	 * Currently we hardwire nsw_wcount_async to 4.  This limit is
493327f4e83SMatthew Dillon 	 * designed to prevent other I/O from having high latencies due to
494327f4e83SMatthew Dillon 	 * our pageout I/O.  The value 4 works well for one or two active swap
495327f4e83SMatthew Dillon 	 * devices but is probably a little low if you have more.  Even so,
496327f4e83SMatthew Dillon 	 * a higher value would probably generate only a limited improvement
497327f4e83SMatthew Dillon 	 * with three or four active swap devices since the system does not
498327f4e83SMatthew Dillon 	 * typically have to pageout at extreme bandwidths.   We will want
499327f4e83SMatthew Dillon 	 * at least 2 per swap devices, and 4 is a pretty good value if you
500327f4e83SMatthew Dillon 	 * have one NFS swap device due to the command/ack latency over NFS.
501327f4e83SMatthew Dillon 	 * So it all works out pretty well.
50226f9a767SRodney W. Grimes 	 */
503ad3cce20SMatthew Dillon 	nsw_cluster_max = min((MAXPHYS/PAGE_SIZE), MAX_PAGEOUT_CLUSTER);
504327f4e83SMatthew Dillon 
5056d541bf1SJohn Baldwin 	mtx_lock(&pbuf_mtx);
5061c7c3c6aSMatthew Dillon 	nsw_rcount = (nswbuf + 1) / 2;
507327f4e83SMatthew Dillon 	nsw_wcount_sync = (nswbuf + 3) / 4;
508327f4e83SMatthew Dillon 	nsw_wcount_async = 4;
509327f4e83SMatthew Dillon 	nsw_wcount_async_max = nsw_wcount_async;
5106d541bf1SJohn Baldwin 	mtx_unlock(&pbuf_mtx);
51124a1cce3SDavid Greenman 
5121c7c3c6aSMatthew Dillon 	/*
513*f425ab8eSKonstantin Belousov 	 * Initialize our zone, guessing on the number we need based
514*f425ab8eSKonstantin Belousov 	 * on the number of pages in the system.
5151c7c3c6aSMatthew Dillon 	 */
51644f1c916SBryan Drewery 	n = vm_cnt.v_page_count / 2;
517*f425ab8eSKonstantin Belousov 	if (maxswzone && n > maxswzone / sizeof(struct swblk))
518*f425ab8eSKonstantin Belousov 		n = maxswzone / sizeof(struct swblk);
519*f425ab8eSKonstantin Belousov 	swpctrie_zone = uma_zcreate("swpctrie", pctrie_node_size(), NULL, NULL,
520*f425ab8eSKonstantin Belousov 	    pctrie_zone_init, NULL, UMA_ALIGN_PTR,
521*f425ab8eSKonstantin Belousov 	    UMA_ZONE_NOFREE | UMA_ZONE_VM);
522*f425ab8eSKonstantin Belousov 	if (swpctrie_zone == NULL)
523*f425ab8eSKonstantin Belousov 		panic("failed to create swap pctrie zone.");
524*f425ab8eSKonstantin Belousov 	swblk_zone = uma_zcreate("swblk", sizeof(struct swblk), NULL, NULL,
525*f425ab8eSKonstantin Belousov 	    NULL, NULL, _Alignof(struct swblk) - 1,
526*f425ab8eSKonstantin Belousov 	    UMA_ZONE_NOFREE | UMA_ZONE_VM);
527*f425ab8eSKonstantin Belousov 	if (swblk_zone == NULL)
528*f425ab8eSKonstantin Belousov 		panic("failed to create swap blk zone.");
52922a5e6b9SDag-Erling Smørgrav 	n2 = n;
5308355f576SJeff Roberson 	do {
531*f425ab8eSKonstantin Belousov 		if (uma_zone_reserve_kva(swblk_zone, n))
53261ce6eeeSAlfred Perlstein 			break;
53361ce6eeeSAlfred Perlstein 		/*
53461ce6eeeSAlfred Perlstein 		 * if the allocation failed, try a zone two thirds the
53561ce6eeeSAlfred Perlstein 		 * size of the previous attempt.
53661ce6eeeSAlfred Perlstein 		 */
53761ce6eeeSAlfred Perlstein 		n -= ((n + 2) / 3);
53861ce6eeeSAlfred Perlstein 	} while (n > 0);
53921cd6e62SSeigo Tanimura 	if (n2 != n)
540*f425ab8eSKonstantin Belousov 		printf("Swap blk zone entries reduced from %lu to %lu.\n",
541*f425ab8eSKonstantin Belousov 		    n2, n);
54261203277SDag-Erling Smørgrav 	swap_maxpages = n * SWAP_META_PAGES;
543*f425ab8eSKonstantin Belousov 	swzone = n * sizeof(struct swblk);
544*f425ab8eSKonstantin Belousov 	if (!uma_zone_reserve_kva(swpctrie_zone, n))
545*f425ab8eSKonstantin Belousov 		printf("Cannot reserve swap pctrie zone, "
546*f425ab8eSKonstantin Belousov 		    "reduce kern.maxswzone.\n");
54724a1cce3SDavid Greenman }
54824a1cce3SDavid Greenman 
549eb4d6a1bSKonstantin Belousov static vm_object_t
550eb4d6a1bSKonstantin Belousov swap_pager_alloc_init(void *handle, struct ucred *cred, vm_ooffset_t size,
551eb4d6a1bSKonstantin Belousov     vm_ooffset_t offset)
552eb4d6a1bSKonstantin Belousov {
553eb4d6a1bSKonstantin Belousov 	vm_object_t object;
554eb4d6a1bSKonstantin Belousov 
555eb4d6a1bSKonstantin Belousov 	if (cred != NULL) {
556eb4d6a1bSKonstantin Belousov 		if (!swap_reserve_by_cred(size, cred))
557eb4d6a1bSKonstantin Belousov 			return (NULL);
558eb4d6a1bSKonstantin Belousov 		crhold(cred);
559eb4d6a1bSKonstantin Belousov 	}
560*f425ab8eSKonstantin Belousov 
561*f425ab8eSKonstantin Belousov 	/*
562*f425ab8eSKonstantin Belousov 	 * The un_pager.swp.swp_blks trie is initialized by
563*f425ab8eSKonstantin Belousov 	 * vm_object_allocate() to ensure the correct order of
564*f425ab8eSKonstantin Belousov 	 * visibility to other threads.
565*f425ab8eSKonstantin Belousov 	 */
566eb4d6a1bSKonstantin Belousov 	object = vm_object_allocate(OBJT_SWAP, OFF_TO_IDX(offset +
567eb4d6a1bSKonstantin Belousov 	    PAGE_MASK + size));
568*f425ab8eSKonstantin Belousov 
569eb4d6a1bSKonstantin Belousov 	object->handle = handle;
570eb4d6a1bSKonstantin Belousov 	if (cred != NULL) {
571eb4d6a1bSKonstantin Belousov 		object->cred = cred;
572eb4d6a1bSKonstantin Belousov 		object->charge = size;
573eb4d6a1bSKonstantin Belousov 	}
574eb4d6a1bSKonstantin Belousov 	return (object);
575eb4d6a1bSKonstantin Belousov }
576eb4d6a1bSKonstantin Belousov 
57724a1cce3SDavid Greenman /*
5781c7c3c6aSMatthew Dillon  * SWAP_PAGER_ALLOC() -	allocate a new OBJT_SWAP VM object and instantiate
5791c7c3c6aSMatthew Dillon  *			its metadata structures.
5801c7c3c6aSMatthew Dillon  *
5811c7c3c6aSMatthew Dillon  *	This routine is called from the mmap and fork code to create a new
582eb4d6a1bSKonstantin Belousov  *	OBJT_SWAP object.
5831c7c3c6aSMatthew Dillon  *
584eb4d6a1bSKonstantin Belousov  *	This routine must ensure that no live duplicate is created for
585eb4d6a1bSKonstantin Belousov  *	the named object request, which is protected against by
586eb4d6a1bSKonstantin Belousov  *	holding the sw_alloc_sx lock in case handle != NULL.
58724a1cce3SDavid Greenman  */
588f5a12711SPoul-Henning Kamp static vm_object_t
5896cde7a16SDavid Greenman swap_pager_alloc(void *handle, vm_ooffset_t size, vm_prot_t prot,
5903364c323SKonstantin Belousov     vm_ooffset_t offset, struct ucred *cred)
59124a1cce3SDavid Greenman {
59224a1cce3SDavid Greenman 	vm_object_t object;
5932f7af3dbSAlan Cox 
594eb4d6a1bSKonstantin Belousov 	if (handle != NULL) {
5951c7c3c6aSMatthew Dillon 		/*
5961c7c3c6aSMatthew Dillon 		 * Reference existing named region or allocate new one.  There
5971c7c3c6aSMatthew Dillon 		 * should not be a race here against swp_pager_meta_build()
5981c7c3c6aSMatthew Dillon 		 * as called from vm_page_remove() in regards to the lookup
5991c7c3c6aSMatthew Dillon 		 * of the handle.
6001c7c3c6aSMatthew Dillon 		 */
6010cddd8f0SMatthew Dillon 		sx_xlock(&sw_alloc_sx);
6021c7c3c6aSMatthew Dillon 		object = vm_pager_object_lookup(NOBJLIST(handle), handle);
603b5e8f167SAlan Cox 		if (object == NULL) {
604eb4d6a1bSKonstantin Belousov 			object = swap_pager_alloc_init(handle, cred, size,
605eb4d6a1bSKonstantin Belousov 			    offset);
606eb4d6a1bSKonstantin Belousov 			if (object != NULL) {
607eb4d6a1bSKonstantin Belousov 				TAILQ_INSERT_TAIL(NOBJLIST(object->handle),
608eb4d6a1bSKonstantin Belousov 				    object, pager_object_list);
6093364c323SKonstantin Belousov 			}
61024a1cce3SDavid Greenman 		}
6110cddd8f0SMatthew Dillon 		sx_xunlock(&sw_alloc_sx);
61224a1cce3SDavid Greenman 	} else {
613eb4d6a1bSKonstantin Belousov 		object = swap_pager_alloc_init(handle, cred, size, offset);
61424a1cce3SDavid Greenman 	}
61524a1cce3SDavid Greenman 	return (object);
616df8bae1dSRodney W. Grimes }
617df8bae1dSRodney W. Grimes 
61826f9a767SRodney W. Grimes /*
6191c7c3c6aSMatthew Dillon  * SWAP_PAGER_DEALLOC() -	remove swap metadata from object
6201c7c3c6aSMatthew Dillon  *
6211c7c3c6aSMatthew Dillon  *	The swap backing for the object is destroyed.  The code is
6221c7c3c6aSMatthew Dillon  *	designed such that we can reinstantiate it later, but this
6231c7c3c6aSMatthew Dillon  *	routine is typically called only when the entire object is
6241c7c3c6aSMatthew Dillon  *	about to be destroyed.
6251c7c3c6aSMatthew Dillon  *
62615523cf7SKonstantin Belousov  *	The object must be locked.
62726f9a767SRodney W. Grimes  */
628df8bae1dSRodney W. Grimes static void
6292f249180SPoul-Henning Kamp swap_pager_dealloc(vm_object_t object)
63026f9a767SRodney W. Grimes {
6314dcc5c2dSMatthew Dillon 
632eb4d6a1bSKonstantin Belousov 	VM_OBJECT_ASSERT_WLOCKED(object);
633eb4d6a1bSKonstantin Belousov 	KASSERT((object->flags & OBJ_DEAD) != 0, ("dealloc of reachable obj"));
634eb4d6a1bSKonstantin Belousov 
63526f9a767SRodney W. Grimes 	/*
6361c7c3c6aSMatthew Dillon 	 * Remove from list right away so lookups will fail if we block for
6371c7c3c6aSMatthew Dillon 	 * pageout completion.
63826f9a767SRodney W. Grimes 	 */
639bd228075SAlan Cox 	if (object->handle != NULL) {
640eb4d6a1bSKonstantin Belousov 		VM_OBJECT_WUNLOCK(object);
641eb4d6a1bSKonstantin Belousov 		sx_xlock(&sw_alloc_sx);
642eb4d6a1bSKonstantin Belousov 		TAILQ_REMOVE(NOBJLIST(object->handle), object,
643eb4d6a1bSKonstantin Belousov 		    pager_object_list);
644eb4d6a1bSKonstantin Belousov 		sx_xunlock(&sw_alloc_sx);
645eb4d6a1bSKonstantin Belousov 		VM_OBJECT_WLOCK(object);
646bd228075SAlan Cox 	}
6471c7c3c6aSMatthew Dillon 
6481c7c3c6aSMatthew Dillon 	vm_object_pip_wait(object, "swpdea");
6491c7c3c6aSMatthew Dillon 
6501c7c3c6aSMatthew Dillon 	/*
6511c7c3c6aSMatthew Dillon 	 * Free all remaining metadata.  We only bother to free it from
6521c7c3c6aSMatthew Dillon 	 * the swap meta data.  We do not attempt to free swapblk's still
6531c7c3c6aSMatthew Dillon 	 * associated with vm_page_t's for this object.  We do not care
6541c7c3c6aSMatthew Dillon 	 * if paging is still in progress on some objects.
6551c7c3c6aSMatthew Dillon 	 */
6561c7c3c6aSMatthew Dillon 	swp_pager_meta_free_all(object);
657e735691bSJohn Baldwin 	object->handle = NULL;
658e735691bSJohn Baldwin 	object->type = OBJT_DEAD;
6591c7c3c6aSMatthew Dillon }
6601c7c3c6aSMatthew Dillon 
6611c7c3c6aSMatthew Dillon /************************************************************************
6621c7c3c6aSMatthew Dillon  *			SWAP PAGER BITMAP ROUTINES			*
6631c7c3c6aSMatthew Dillon  ************************************************************************/
6641c7c3c6aSMatthew Dillon 
6651c7c3c6aSMatthew Dillon /*
6661c7c3c6aSMatthew Dillon  * SWP_PAGER_GETSWAPSPACE() -	allocate raw swap space
6671c7c3c6aSMatthew Dillon  *
6681c7c3c6aSMatthew Dillon  *	Allocate swap for the requested number of pages.  The starting
6691c7c3c6aSMatthew Dillon  *	swap block number (a page index) is returned or SWAPBLK_NONE
6701c7c3c6aSMatthew Dillon  *	if the allocation failed.
6711c7c3c6aSMatthew Dillon  *
6721c7c3c6aSMatthew Dillon  *	Also has the side effect of advising that somebody made a mistake
6731c7c3c6aSMatthew Dillon  *	when they configured swap and didn't configure enough.
6741c7c3c6aSMatthew Dillon  *
67515523cf7SKonstantin Belousov  *	This routine may not sleep.
6768f60c087SPoul-Henning Kamp  *
6778f60c087SPoul-Henning Kamp  *	We allocate in round-robin fashion from the configured devices.
6781c7c3c6aSMatthew Dillon  */
679a5edd34aSPoul-Henning Kamp static daddr_t
6802f249180SPoul-Henning Kamp swp_pager_getswapspace(int npages)
6811c7c3c6aSMatthew Dillon {
6821c7c3c6aSMatthew Dillon 	daddr_t blk;
6838f60c087SPoul-Henning Kamp 	struct swdevt *sp;
6848f60c087SPoul-Henning Kamp 	int i;
6851c7c3c6aSMatthew Dillon 
6868f60c087SPoul-Henning Kamp 	blk = SWAPBLK_NONE;
68720da9c2eSPoul-Henning Kamp 	mtx_lock(&sw_dev_mtx);
6888f60c087SPoul-Henning Kamp 	sp = swdevhd;
6898f60c087SPoul-Henning Kamp 	for (i = 0; i < nswapdev; i++) {
6908f60c087SPoul-Henning Kamp 		if (sp == NULL)
6918f60c087SPoul-Henning Kamp 			sp = TAILQ_FIRST(&swtailq);
6928f60c087SPoul-Henning Kamp 		if (!(sp->sw_flags & SW_CLOSING)) {
6938f60c087SPoul-Henning Kamp 			blk = blist_alloc(sp->sw_blist, npages);
6948f60c087SPoul-Henning Kamp 			if (blk != SWAPBLK_NONE) {
6958f60c087SPoul-Henning Kamp 				blk += sp->sw_first;
6968f60c087SPoul-Henning Kamp 				sp->sw_used += npages;
697d05bc129SAlan Cox 				swap_pager_avail -= npages;
6988f60c087SPoul-Henning Kamp 				swp_sizecheck();
6998f60c087SPoul-Henning Kamp 				swdevhd = TAILQ_NEXT(sp, sw_list);
700d05bc129SAlan Cox 				goto done;
7018f60c087SPoul-Henning Kamp 			}
7028f60c087SPoul-Henning Kamp 		}
7038f60c087SPoul-Henning Kamp 		sp = TAILQ_NEXT(sp, sw_list);
7048f60c087SPoul-Henning Kamp 	}
7052b0d37a4SMatthew Dillon 	if (swap_pager_full != 2) {
70620da9c2eSPoul-Henning Kamp 		printf("swap_pager_getswapspace(%d): failed\n", npages);
7072b0d37a4SMatthew Dillon 		swap_pager_full = 2;
70820d3034fSMatthew Dillon 		swap_pager_almost_full = 1;
7092b0d37a4SMatthew Dillon 	}
7108f60c087SPoul-Henning Kamp 	swdevhd = NULL;
711d05bc129SAlan Cox done:
712d05bc129SAlan Cox 	mtx_unlock(&sw_dev_mtx);
7131c7c3c6aSMatthew Dillon 	return (blk);
71426f9a767SRodney W. Grimes }
71526f9a767SRodney W. Grimes 
716b3fed13eSDavid Schultz static int
717b3fed13eSDavid Schultz swp_pager_isondev(daddr_t blk, struct swdevt *sp)
7188f60c087SPoul-Henning Kamp {
7198f60c087SPoul-Henning Kamp 
720b3fed13eSDavid Schultz 	return (blk >= sp->sw_first && blk < sp->sw_end);
7218f60c087SPoul-Henning Kamp }
7228f60c087SPoul-Henning Kamp 
7234b03903aSPoul-Henning Kamp static void
7244b03903aSPoul-Henning Kamp swp_pager_strategy(struct buf *bp)
7254b03903aSPoul-Henning Kamp {
7264b03903aSPoul-Henning Kamp 	struct swdevt *sp;
7274b03903aSPoul-Henning Kamp 
72820da9c2eSPoul-Henning Kamp 	mtx_lock(&sw_dev_mtx);
7294b03903aSPoul-Henning Kamp 	TAILQ_FOREACH(sp, &swtailq, sw_list) {
7304b03903aSPoul-Henning Kamp 		if (bp->b_blkno >= sp->sw_first && bp->b_blkno < sp->sw_end) {
73120da9c2eSPoul-Henning Kamp 			mtx_unlock(&sw_dev_mtx);
7322cc718a1SKonstantin Belousov 			if ((sp->sw_flags & SW_UNMAPPED) != 0 &&
7332cc718a1SKonstantin Belousov 			    unmapped_buf_allowed) {
7342cc718a1SKonstantin Belousov 				bp->b_data = unmapped_buf;
7352cc718a1SKonstantin Belousov 				bp->b_offset = 0;
7362cc718a1SKonstantin Belousov 			} else {
7372cc718a1SKonstantin Belousov 				pmap_qenter((vm_offset_t)bp->b_data,
7382cc718a1SKonstantin Belousov 				    &bp->b_pages[0], bp->b_bcount / PAGE_SIZE);
7392cc718a1SKonstantin Belousov 			}
7404b03903aSPoul-Henning Kamp 			sp->sw_strategy(bp, sp);
7414b03903aSPoul-Henning Kamp 			return;
7424b03903aSPoul-Henning Kamp 		}
7434b03903aSPoul-Henning Kamp 	}
74420da9c2eSPoul-Henning Kamp 	panic("Swapdev not found");
7454b03903aSPoul-Henning Kamp }
7464b03903aSPoul-Henning Kamp 
7478f60c087SPoul-Henning Kamp 
74826f9a767SRodney W. Grimes /*
7491c7c3c6aSMatthew Dillon  * SWP_PAGER_FREESWAPSPACE() -	free raw swap space
7501c7c3c6aSMatthew Dillon  *
7511c7c3c6aSMatthew Dillon  *	This routine returns the specified swap blocks back to the bitmap.
7521c7c3c6aSMatthew Dillon  *
75315523cf7SKonstantin Belousov  *	This routine may not sleep.
75426f9a767SRodney W. Grimes  */
755a5edd34aSPoul-Henning Kamp static void
7568f60c087SPoul-Henning Kamp swp_pager_freeswapspace(daddr_t blk, int npages)
7570d94caffSDavid Greenman {
7588f60c087SPoul-Henning Kamp 	struct swdevt *sp;
75992da00bbSMatthew Dillon 
7607645e885SAlan Cox 	mtx_lock(&sw_dev_mtx);
7617645e885SAlan Cox 	TAILQ_FOREACH(sp, &swtailq, sw_list) {
7627645e885SAlan Cox 		if (blk >= sp->sw_first && blk < sp->sw_end) {
76392da00bbSMatthew Dillon 			sp->sw_used -= npages;
76492da00bbSMatthew Dillon 			/*
7657645e885SAlan Cox 			 * If we are attempting to stop swapping on
7667645e885SAlan Cox 			 * this device, we don't want to mark any
7677645e885SAlan Cox 			 * blocks free lest they be reused.
76892da00bbSMatthew Dillon 			 */
7697645e885SAlan Cox 			if ((sp->sw_flags & SW_CLOSING) == 0) {
7707645e885SAlan Cox 				blist_free(sp->sw_blist, blk - sp->sw_first,
7717645e885SAlan Cox 				    npages);
7728f60c087SPoul-Henning Kamp 				swap_pager_avail += npages;
7731c7c3c6aSMatthew Dillon 				swp_sizecheck();
77426f9a767SRodney W. Grimes 			}
7757645e885SAlan Cox 			mtx_unlock(&sw_dev_mtx);
7767645e885SAlan Cox 			return;
7777645e885SAlan Cox 		}
7787645e885SAlan Cox 	}
7797645e885SAlan Cox 	panic("Swapdev not found");
7807645e885SAlan Cox }
7811c7c3c6aSMatthew Dillon 
78226f9a767SRodney W. Grimes /*
7831c7c3c6aSMatthew Dillon  * SWAP_PAGER_FREESPACE() -	frees swap blocks associated with a page
7841c7c3c6aSMatthew Dillon  *				range within an object.
7851c7c3c6aSMatthew Dillon  *
7861c7c3c6aSMatthew Dillon  *	This is a globally accessible routine.
7871c7c3c6aSMatthew Dillon  *
7881c7c3c6aSMatthew Dillon  *	This routine removes swapblk assignments from swap metadata.
7891c7c3c6aSMatthew Dillon  *
7901c7c3c6aSMatthew Dillon  *	The external callers of this routine typically have already destroyed
7911c7c3c6aSMatthew Dillon  *	or renamed vm_page_t's associated with this range in the object so
7921c7c3c6aSMatthew Dillon  *	we should be ok.
793c25673ffSAttilio Rao  *
794c25673ffSAttilio Rao  *	The object must be locked.
79526f9a767SRodney W. Grimes  */
79626f9a767SRodney W. Grimes void
7972f249180SPoul-Henning Kamp swap_pager_freespace(vm_object_t object, vm_pindex_t start, vm_size_t size)
79826f9a767SRodney W. Grimes {
79923955314SAlfred Perlstein 
8001c7c3c6aSMatthew Dillon 	swp_pager_meta_free(object, start, size);
8014dcc5c2dSMatthew Dillon }
8024dcc5c2dSMatthew Dillon 
8034dcc5c2dSMatthew Dillon /*
8044dcc5c2dSMatthew Dillon  * SWAP_PAGER_RESERVE() - reserve swap blocks in object
8054dcc5c2dSMatthew Dillon  *
8064dcc5c2dSMatthew Dillon  *	Assigns swap blocks to the specified range within the object.  The
80756ce850bSKonstantin Belousov  *	swap blocks are not zeroed.  Any previous swap assignment is destroyed.
8084dcc5c2dSMatthew Dillon  *
8094dcc5c2dSMatthew Dillon  *	Returns 0 on success, -1 on failure.
8104dcc5c2dSMatthew Dillon  */
8114dcc5c2dSMatthew Dillon int
8124dcc5c2dSMatthew Dillon swap_pager_reserve(vm_object_t object, vm_pindex_t start, vm_size_t size)
8134dcc5c2dSMatthew Dillon {
8144dcc5c2dSMatthew Dillon 	int n = 0;
8154dcc5c2dSMatthew Dillon 	daddr_t blk = SWAPBLK_NONE;
8164dcc5c2dSMatthew Dillon 	vm_pindex_t beg = start;	/* save start index */
8174dcc5c2dSMatthew Dillon 
81889f6b863SAttilio Rao 	VM_OBJECT_WLOCK(object);
8194dcc5c2dSMatthew Dillon 	while (size) {
8204dcc5c2dSMatthew Dillon 		if (n == 0) {
8214dcc5c2dSMatthew Dillon 			n = BLIST_MAX_ALLOC;
8224dcc5c2dSMatthew Dillon 			while ((blk = swp_pager_getswapspace(n)) == SWAPBLK_NONE) {
8234dcc5c2dSMatthew Dillon 				n >>= 1;
8244dcc5c2dSMatthew Dillon 				if (n == 0) {
8254dcc5c2dSMatthew Dillon 					swp_pager_meta_free(object, beg, start - beg);
82689f6b863SAttilio Rao 					VM_OBJECT_WUNLOCK(object);
8274dcc5c2dSMatthew Dillon 					return (-1);
8284dcc5c2dSMatthew Dillon 				}
8294dcc5c2dSMatthew Dillon 			}
8304dcc5c2dSMatthew Dillon 		}
8314dcc5c2dSMatthew Dillon 		swp_pager_meta_build(object, start, blk);
8324dcc5c2dSMatthew Dillon 		--size;
8334dcc5c2dSMatthew Dillon 		++start;
8344dcc5c2dSMatthew Dillon 		++blk;
8354dcc5c2dSMatthew Dillon 		--n;
8364dcc5c2dSMatthew Dillon 	}
8374dcc5c2dSMatthew Dillon 	swp_pager_meta_free(object, start, n);
83889f6b863SAttilio Rao 	VM_OBJECT_WUNLOCK(object);
8394dcc5c2dSMatthew Dillon 	return (0);
84026f9a767SRodney W. Grimes }
84126f9a767SRodney W. Grimes 
8420a47b48bSJohn Dyson /*
8431c7c3c6aSMatthew Dillon  * SWAP_PAGER_COPY() -  copy blocks from source pager to destination pager
8441c7c3c6aSMatthew Dillon  *			and destroy the source.
8451c7c3c6aSMatthew Dillon  *
8461c7c3c6aSMatthew Dillon  *	Copy any valid swapblks from the source to the destination.  In
8471c7c3c6aSMatthew Dillon  *	cases where both the source and destination have a valid swapblk,
8481c7c3c6aSMatthew Dillon  *	we keep the destination's.
8491c7c3c6aSMatthew Dillon  *
85015523cf7SKonstantin Belousov  *	This routine is allowed to sleep.  It may sleep allocating metadata
8511c7c3c6aSMatthew Dillon  *	indirectly through swp_pager_meta_build() or if paging is still in
8521c7c3c6aSMatthew Dillon  *	progress on the source.
8531c7c3c6aSMatthew Dillon  *
8541c7c3c6aSMatthew Dillon  *	The source object contains no vm_page_t's (which is just as well)
8551c7c3c6aSMatthew Dillon  *
8561c7c3c6aSMatthew Dillon  *	The source object is of type OBJT_SWAP.
8571c7c3c6aSMatthew Dillon  *
85815523cf7SKonstantin Belousov  *	The source and destination objects must be locked.
85915523cf7SKonstantin Belousov  *	Both object locks may temporarily be released.
86026f9a767SRodney W. Grimes  */
86126f9a767SRodney W. Grimes void
8622f249180SPoul-Henning Kamp swap_pager_copy(vm_object_t srcobject, vm_object_t dstobject,
8632f249180SPoul-Henning Kamp     vm_pindex_t offset, int destroysource)
86426f9a767SRodney W. Grimes {
865a316d390SJohn Dyson 	vm_pindex_t i;
8664dcc5c2dSMatthew Dillon 
86789f6b863SAttilio Rao 	VM_OBJECT_ASSERT_WLOCKED(srcobject);
86889f6b863SAttilio Rao 	VM_OBJECT_ASSERT_WLOCKED(dstobject);
8690cddd8f0SMatthew Dillon 
87026f9a767SRodney W. Grimes 	/*
8711c7c3c6aSMatthew Dillon 	 * If destroysource is set, we remove the source object from the
8721c7c3c6aSMatthew Dillon 	 * swap_pager internal queue now.
87326f9a767SRodney W. Grimes 	 */
874eb4d6a1bSKonstantin Belousov 	if (destroysource && srcobject->handle != NULL) {
875eb4d6a1bSKonstantin Belousov 		vm_object_pip_add(srcobject, 1);
876eb4d6a1bSKonstantin Belousov 		VM_OBJECT_WUNLOCK(srcobject);
877eb4d6a1bSKonstantin Belousov 		vm_object_pip_add(dstobject, 1);
878eb4d6a1bSKonstantin Belousov 		VM_OBJECT_WUNLOCK(dstobject);
879eb4d6a1bSKonstantin Belousov 		sx_xlock(&sw_alloc_sx);
880eb4d6a1bSKonstantin Belousov 		TAILQ_REMOVE(NOBJLIST(srcobject->handle), srcobject,
881eb4d6a1bSKonstantin Belousov 		    pager_object_list);
882eb4d6a1bSKonstantin Belousov 		sx_xunlock(&sw_alloc_sx);
883eb4d6a1bSKonstantin Belousov 		VM_OBJECT_WLOCK(dstobject);
884eb4d6a1bSKonstantin Belousov 		vm_object_pip_wakeup(dstobject);
885eb4d6a1bSKonstantin Belousov 		VM_OBJECT_WLOCK(srcobject);
886eb4d6a1bSKonstantin Belousov 		vm_object_pip_wakeup(srcobject);
887bd228075SAlan Cox 	}
88826f9a767SRodney W. Grimes 
8891c7c3c6aSMatthew Dillon 	/*
8901c7c3c6aSMatthew Dillon 	 * transfer source to destination.
8911c7c3c6aSMatthew Dillon 	 */
8921c7c3c6aSMatthew Dillon 	for (i = 0; i < dstobject->size; ++i) {
8931c7c3c6aSMatthew Dillon 		daddr_t dstaddr;
8941c7c3c6aSMatthew Dillon 
8951c7c3c6aSMatthew Dillon 		/*
8961c7c3c6aSMatthew Dillon 		 * Locate (without changing) the swapblk on the destination,
8971c7c3c6aSMatthew Dillon 		 * unless it is invalid in which case free it silently, or
8981c7c3c6aSMatthew Dillon 		 * if the destination is a resident page, in which case the
8991c7c3c6aSMatthew Dillon 		 * source is thrown away.
9001c7c3c6aSMatthew Dillon 		 */
9011c7c3c6aSMatthew Dillon 		dstaddr = swp_pager_meta_ctl(dstobject, i, 0);
9021c7c3c6aSMatthew Dillon 
9031c7c3c6aSMatthew Dillon 		if (dstaddr == SWAPBLK_NONE) {
9041c7c3c6aSMatthew Dillon 			/*
9051c7c3c6aSMatthew Dillon 			 * Destination has no swapblk and is not resident,
9061c7c3c6aSMatthew Dillon 			 * copy source.
9071c7c3c6aSMatthew Dillon 			 */
9081c7c3c6aSMatthew Dillon 			daddr_t srcaddr;
9091c7c3c6aSMatthew Dillon 
9101c7c3c6aSMatthew Dillon 			srcaddr = swp_pager_meta_ctl(
9111c7c3c6aSMatthew Dillon 			    srcobject,
9121c7c3c6aSMatthew Dillon 			    i + offset,
9131c7c3c6aSMatthew Dillon 			    SWM_POP
9141c7c3c6aSMatthew Dillon 			);
9151c7c3c6aSMatthew Dillon 
916ee3dc7d7SAlan Cox 			if (srcaddr != SWAPBLK_NONE) {
917c7c8dd7eSAlan Cox 				/*
918c7c8dd7eSAlan Cox 				 * swp_pager_meta_build() can sleep.
919c7c8dd7eSAlan Cox 				 */
920c7c8dd7eSAlan Cox 				vm_object_pip_add(srcobject, 1);
92189f6b863SAttilio Rao 				VM_OBJECT_WUNLOCK(srcobject);
922c7c8dd7eSAlan Cox 				vm_object_pip_add(dstobject, 1);
9234dcc5c2dSMatthew Dillon 				swp_pager_meta_build(dstobject, i, srcaddr);
924c7c8dd7eSAlan Cox 				vm_object_pip_wakeup(dstobject);
92589f6b863SAttilio Rao 				VM_OBJECT_WLOCK(srcobject);
926c7c8dd7eSAlan Cox 				vm_object_pip_wakeup(srcobject);
927ee3dc7d7SAlan Cox 			}
9281c7c3c6aSMatthew Dillon 		} else {
9291c7c3c6aSMatthew Dillon 			/*
9301c7c3c6aSMatthew Dillon 			 * Destination has valid swapblk or it is represented
9311c7c3c6aSMatthew Dillon 			 * by a resident page.  We destroy the sourceblock.
9321c7c3c6aSMatthew Dillon 			 */
9331c7c3c6aSMatthew Dillon 
9341c7c3c6aSMatthew Dillon 			swp_pager_meta_ctl(srcobject, i + offset, SWM_FREE);
9351c7c3c6aSMatthew Dillon 		}
93626f9a767SRodney W. Grimes 	}
93726f9a767SRodney W. Grimes 
93826f9a767SRodney W. Grimes 	/*
9391c7c3c6aSMatthew Dillon 	 * Free left over swap blocks in source.
9401c7c3c6aSMatthew Dillon 	 *
941763df3ecSPedro F. Giffuni 	 * We have to revert the type to OBJT_DEFAULT so we do not accidentally
9421c7c3c6aSMatthew Dillon 	 * double-remove the object from the swap queues.
94326f9a767SRodney W. Grimes 	 */
944c0877f10SJohn Dyson 	if (destroysource) {
9451c7c3c6aSMatthew Dillon 		swp_pager_meta_free_all(srcobject);
9461c7c3c6aSMatthew Dillon 		/*
9471c7c3c6aSMatthew Dillon 		 * Reverting the type is not necessary, the caller is going
9481c7c3c6aSMatthew Dillon 		 * to destroy srcobject directly, but I'm doing it here
949956f3135SPhilippe Charnier 		 * for consistency since we've removed the object from its
9501c7c3c6aSMatthew Dillon 		 * queues.
9511c7c3c6aSMatthew Dillon 		 */
9521c7c3c6aSMatthew Dillon 		srcobject->type = OBJT_DEFAULT;
953c0877f10SJohn Dyson 	}
95426f9a767SRodney W. Grimes }
95526f9a767SRodney W. Grimes 
956df8bae1dSRodney W. Grimes /*
9571c7c3c6aSMatthew Dillon  * SWAP_PAGER_HASPAGE() -	determine if we have good backing store for
9581c7c3c6aSMatthew Dillon  *				the requested page.
9591c7c3c6aSMatthew Dillon  *
9601c7c3c6aSMatthew Dillon  *	We determine whether good backing store exists for the requested
9611c7c3c6aSMatthew Dillon  *	page and return TRUE if it does, FALSE if it doesn't.
9621c7c3c6aSMatthew Dillon  *
9631c7c3c6aSMatthew Dillon  *	If TRUE, we also try to determine how much valid, contiguous backing
964915d1b71SMark Johnston  *	store exists before and after the requested page.
965df8bae1dSRodney W. Grimes  */
9665ea4972cSAlan Cox static boolean_t
967915d1b71SMark Johnston swap_pager_haspage(vm_object_t object, vm_pindex_t pindex, int *before,
968915d1b71SMark Johnston     int *after)
96926f9a767SRodney W. Grimes {
970915d1b71SMark Johnston 	daddr_t blk, blk0;
971915d1b71SMark Johnston 	int i;
97226f9a767SRodney W. Grimes 
973c25673ffSAttilio Rao 	VM_OBJECT_ASSERT_LOCKED(object);
974915d1b71SMark Johnston 
9751c7c3c6aSMatthew Dillon 	/*
9761c7c3c6aSMatthew Dillon 	 * do we have good backing store at the requested index ?
9771c7c3c6aSMatthew Dillon 	 */
9781c7c3c6aSMatthew Dillon 	blk0 = swp_pager_meta_ctl(object, pindex, 0);
9794dcc5c2dSMatthew Dillon 	if (blk0 == SWAPBLK_NONE) {
9801c7c3c6aSMatthew Dillon 		if (before)
98124a1cce3SDavid Greenman 			*before = 0;
9821c7c3c6aSMatthew Dillon 		if (after)
98324a1cce3SDavid Greenman 			*after = 0;
98426f9a767SRodney W. Grimes 		return (FALSE);
98526f9a767SRodney W. Grimes 	}
98626f9a767SRodney W. Grimes 
98726f9a767SRodney W. Grimes 	/*
9881c7c3c6aSMatthew Dillon 	 * find backwards-looking contiguous good backing store
989e47ed70bSJohn Dyson 	 */
9901c7c3c6aSMatthew Dillon 	if (before != NULL) {
991915d1b71SMark Johnston 		for (i = 1; i < SWB_NPAGES; i++) {
9921c7c3c6aSMatthew Dillon 			if (i > pindex)
9931c7c3c6aSMatthew Dillon 				break;
9941c7c3c6aSMatthew Dillon 			blk = swp_pager_meta_ctl(object, pindex - i, 0);
9951c7c3c6aSMatthew Dillon 			if (blk != blk0 - i)
9961c7c3c6aSMatthew Dillon 				break;
997ffc82b0aSJohn Dyson 		}
998915d1b71SMark Johnston 		*before = i - 1;
99926f9a767SRodney W. Grimes 	}
100026f9a767SRodney W. Grimes 
100126f9a767SRodney W. Grimes 	/*
10021c7c3c6aSMatthew Dillon 	 * find forward-looking contiguous good backing store
100326f9a767SRodney W. Grimes 	 */
10041c7c3c6aSMatthew Dillon 	if (after != NULL) {
1005915d1b71SMark Johnston 		for (i = 1; i < SWB_NPAGES; i++) {
10061c7c3c6aSMatthew Dillon 			blk = swp_pager_meta_ctl(object, pindex + i, 0);
10071c7c3c6aSMatthew Dillon 			if (blk != blk0 + i)
10081c7c3c6aSMatthew Dillon 				break;
100926f9a767SRodney W. Grimes 		}
1010915d1b71SMark Johnston 		*after = i - 1;
10111c7c3c6aSMatthew Dillon 	}
10121c7c3c6aSMatthew Dillon 	return (TRUE);
10131c7c3c6aSMatthew Dillon }
10141c7c3c6aSMatthew Dillon 
10151c7c3c6aSMatthew Dillon /*
10161c7c3c6aSMatthew Dillon  * SWAP_PAGER_PAGE_UNSWAPPED() - remove swap backing store related to page
10171c7c3c6aSMatthew Dillon  *
10181c7c3c6aSMatthew Dillon  *	This removes any associated swap backing store, whether valid or
10191c7c3c6aSMatthew Dillon  *	not, from the page.
10201c7c3c6aSMatthew Dillon  *
10211c7c3c6aSMatthew Dillon  *	This routine is typically called when a page is made dirty, at
10221c7c3c6aSMatthew Dillon  *	which point any associated swap can be freed.  MADV_FREE also
10231c7c3c6aSMatthew Dillon  *	calls us in a special-case situation
10241c7c3c6aSMatthew Dillon  *
10251c7c3c6aSMatthew Dillon  *	NOTE!!!  If the page is clean and the swap was valid, the caller
10261c7c3c6aSMatthew Dillon  *	should make the page dirty before calling this routine.  This routine
10271c7c3c6aSMatthew Dillon  *	does NOT change the m->dirty status of the page.  Also: MADV_FREE
10281c7c3c6aSMatthew Dillon  *	depends on it.
10291c7c3c6aSMatthew Dillon  *
103015523cf7SKonstantin Belousov  *	This routine may not sleep.
1031c25673ffSAttilio Rao  *
1032c25673ffSAttilio Rao  *	The object containing the page must be locked.
10331c7c3c6aSMatthew Dillon  */
10341c7c3c6aSMatthew Dillon static void
10352f249180SPoul-Henning Kamp swap_pager_unswapped(vm_page_t m)
10361c7c3c6aSMatthew Dillon {
10372f249180SPoul-Henning Kamp 
10381c7c3c6aSMatthew Dillon 	swp_pager_meta_ctl(m->object, m->pindex, SWM_FREE);
10391c7c3c6aSMatthew Dillon }
10401c7c3c6aSMatthew Dillon 
10411c7c3c6aSMatthew Dillon /*
1042915d1b71SMark Johnston  * swap_pager_getpages() - bring pages in from swap
10431c7c3c6aSMatthew Dillon  *
1044915d1b71SMark Johnston  *	Attempt to page in the pages in array "m" of length "count".  The caller
1045915d1b71SMark Johnston  *	may optionally specify that additional pages preceding and succeeding
1046915d1b71SMark Johnston  *	the specified range be paged in.  The number of such pages is returned
1047915d1b71SMark Johnston  *	in the "rbehind" and "rahead" parameters, and they will be in the
1048915d1b71SMark Johnston  *	inactive queue upon return.
10491c7c3c6aSMatthew Dillon  *
1050915d1b71SMark Johnston  *	The pages in "m" must be busied and will remain busied upon return.
10511c7c3c6aSMatthew Dillon  */
1052f708ef1bSPoul-Henning Kamp static int
1053b0cd2017SGleb Smirnoff swap_pager_getpages(vm_object_t object, vm_page_t *m, int count, int *rbehind,
1054b0cd2017SGleb Smirnoff     int *rahead)
1055df8bae1dSRodney W. Grimes {
10561c7c3c6aSMatthew Dillon 	struct buf *bp;
1057915d1b71SMark Johnston 	vm_page_t mpred, msucc, p;
1058915d1b71SMark Johnston 	vm_pindex_t pindex;
10591c7c3c6aSMatthew Dillon 	daddr_t blk;
1060dd9cb6daSMark Johnston 	int i, j, maxahead, maxbehind, reqcount, shift;
10610d94caffSDavid Greenman 
1062915d1b71SMark Johnston 	reqcount = count;
10631c7c3c6aSMatthew Dillon 
106489f6b863SAttilio Rao 	VM_OBJECT_WUNLOCK(object);
10651c7c3c6aSMatthew Dillon 	bp = getpbuf(&nsw_rcount);
1066915d1b71SMark Johnston 	VM_OBJECT_WLOCK(object);
106726f9a767SRodney W. Grimes 
1068dd9cb6daSMark Johnston 	if (!swap_pager_haspage(object, m[0]->pindex, &maxbehind, &maxahead)) {
1069915d1b71SMark Johnston 		relpbuf(bp, &nsw_rcount);
1070915d1b71SMark Johnston 		return (VM_PAGER_FAIL);
1071915d1b71SMark Johnston 	}
1072915d1b71SMark Johnston 
1073915d1b71SMark Johnston 	/*
1074915d1b71SMark Johnston 	 * Clip the readahead and readbehind ranges to exclude resident pages.
1075915d1b71SMark Johnston 	 */
1076915d1b71SMark Johnston 	if (rahead != NULL) {
1077dd9cb6daSMark Johnston 		KASSERT(reqcount - 1 <= maxahead,
1078915d1b71SMark Johnston 		    ("page count %d extends beyond swap block", reqcount));
1079dd9cb6daSMark Johnston 		*rahead = imin(*rahead, maxahead - (reqcount - 1));
1080915d1b71SMark Johnston 		pindex = m[reqcount - 1]->pindex;
1081915d1b71SMark Johnston 		msucc = TAILQ_NEXT(m[reqcount - 1], listq);
1082915d1b71SMark Johnston 		if (msucc != NULL && msucc->pindex - pindex - 1 < *rahead)
1083915d1b71SMark Johnston 			*rahead = msucc->pindex - pindex - 1;
1084915d1b71SMark Johnston 	}
1085915d1b71SMark Johnston 	if (rbehind != NULL) {
1086dd9cb6daSMark Johnston 		*rbehind = imin(*rbehind, maxbehind);
1087915d1b71SMark Johnston 		pindex = m[0]->pindex;
1088915d1b71SMark Johnston 		mpred = TAILQ_PREV(m[0], pglist, listq);
1089915d1b71SMark Johnston 		if (mpred != NULL && pindex - mpred->pindex - 1 < *rbehind)
1090915d1b71SMark Johnston 			*rbehind = pindex - mpred->pindex - 1;
1091915d1b71SMark Johnston 	}
1092915d1b71SMark Johnston 
1093915d1b71SMark Johnston 	/*
1094915d1b71SMark Johnston 	 * Allocate readahead and readbehind pages.
1095915d1b71SMark Johnston 	 */
1096915d1b71SMark Johnston 	shift = rbehind != NULL ? *rbehind : 0;
1097915d1b71SMark Johnston 	if (shift != 0) {
1098915d1b71SMark Johnston 		for (i = 1; i <= shift; i++) {
1099915d1b71SMark Johnston 			p = vm_page_alloc(object, m[0]->pindex - i,
11007667839aSAlan Cox 			    VM_ALLOC_NORMAL);
1101915d1b71SMark Johnston 			if (p == NULL) {
1102915d1b71SMark Johnston 				/* Shift allocated pages to the left. */
1103915d1b71SMark Johnston 				for (j = 0; j < i - 1; j++)
1104915d1b71SMark Johnston 					bp->b_pages[j] =
1105915d1b71SMark Johnston 					    bp->b_pages[j + shift - i + 1];
1106915d1b71SMark Johnston 				break;
1107915d1b71SMark Johnston 			}
1108915d1b71SMark Johnston 			bp->b_pages[shift - i] = p;
1109915d1b71SMark Johnston 		}
1110915d1b71SMark Johnston 		shift = i - 1;
1111915d1b71SMark Johnston 		*rbehind = shift;
1112915d1b71SMark Johnston 	}
1113915d1b71SMark Johnston 	for (i = 0; i < reqcount; i++)
1114915d1b71SMark Johnston 		bp->b_pages[i + shift] = m[i];
1115915d1b71SMark Johnston 	if (rahead != NULL) {
1116915d1b71SMark Johnston 		for (i = 0; i < *rahead; i++) {
1117915d1b71SMark Johnston 			p = vm_page_alloc(object,
11187667839aSAlan Cox 			    m[reqcount - 1]->pindex + i + 1, VM_ALLOC_NORMAL);
1119915d1b71SMark Johnston 			if (p == NULL)
1120915d1b71SMark Johnston 				break;
1121915d1b71SMark Johnston 			bp->b_pages[shift + reqcount + i] = p;
1122915d1b71SMark Johnston 		}
1123915d1b71SMark Johnston 		*rahead = i;
1124915d1b71SMark Johnston 	}
1125915d1b71SMark Johnston 	if (rbehind != NULL)
1126915d1b71SMark Johnston 		count += *rbehind;
1127915d1b71SMark Johnston 	if (rahead != NULL)
1128915d1b71SMark Johnston 		count += *rahead;
1129915d1b71SMark Johnston 
1130915d1b71SMark Johnston 	vm_object_pip_add(object, count);
1131915d1b71SMark Johnston 
1132915d1b71SMark Johnston 	for (i = 0; i < count; i++)
1133915d1b71SMark Johnston 		bp->b_pages[i]->oflags |= VPO_SWAPINPROG;
1134915d1b71SMark Johnston 
1135915d1b71SMark Johnston 	pindex = bp->b_pages[0]->pindex;
1136915d1b71SMark Johnston 	blk = swp_pager_meta_ctl(object, pindex, 0);
1137915d1b71SMark Johnston 	KASSERT(blk != SWAPBLK_NONE,
1138915d1b71SMark Johnston 	    ("no swap blocking containing %p(%jx)", object, (uintmax_t)pindex));
1139915d1b71SMark Johnston 
1140915d1b71SMark Johnston 	VM_OBJECT_WUNLOCK(object);
1141915d1b71SMark Johnston 
1142915d1b71SMark Johnston 	bp->b_flags |= B_PAGING;
114321144e3bSPoul-Henning Kamp 	bp->b_iocmd = BIO_READ;
11441c7c3c6aSMatthew Dillon 	bp->b_iodone = swp_pager_async_iodone;
1145fdcc1cc0SJohn Baldwin 	bp->b_rcred = crhold(thread0.td_ucred);
1146fdcc1cc0SJohn Baldwin 	bp->b_wcred = crhold(thread0.td_ucred);
1147b0cd2017SGleb Smirnoff 	bp->b_blkno = blk;
1148b0cd2017SGleb Smirnoff 	bp->b_bcount = PAGE_SIZE * count;
1149b0cd2017SGleb Smirnoff 	bp->b_bufsize = PAGE_SIZE * count;
1150b0cd2017SGleb Smirnoff 	bp->b_npages = count;
1151915d1b71SMark Johnston 	bp->b_pgbefore = rbehind != NULL ? *rbehind : 0;
1152915d1b71SMark Johnston 	bp->b_pgafter = rahead != NULL ? *rahead : 0;
115326f9a767SRodney W. Grimes 
115483c9dea1SGleb Smirnoff 	VM_CNT_INC(v_swapin);
115583c9dea1SGleb Smirnoff 	VM_CNT_ADD(v_swappgsin, count);
11561c7c3c6aSMatthew Dillon 
11571c7c3c6aSMatthew Dillon 	/*
11581c7c3c6aSMatthew Dillon 	 * perform the I/O.  NOTE!!!  bp cannot be considered valid after
11591c7c3c6aSMatthew Dillon 	 * this point because we automatically release it on completion.
11601c7c3c6aSMatthew Dillon 	 * Instead, we look at the one page we are interested in which we
11611c7c3c6aSMatthew Dillon 	 * still hold a lock on even through the I/O completion.
11621c7c3c6aSMatthew Dillon 	 *
11631c7c3c6aSMatthew Dillon 	 * The other pages in our m[] array are also released on completion,
11641c7c3c6aSMatthew Dillon 	 * so we cannot assume they are valid anymore either.
11651c7c3c6aSMatthew Dillon 	 *
1166c37a77eeSPoul-Henning Kamp 	 * NOTE: b_blkno is destroyed by the call to swapdev_strategy
11671c7c3c6aSMatthew Dillon 	 */
1168b890cb2cSPeter Wemm 	BUF_KERNPROC(bp);
11694b03903aSPoul-Henning Kamp 	swp_pager_strategy(bp);
117026f9a767SRodney W. Grimes 
117126f9a767SRodney W. Grimes 	/*
1172915d1b71SMark Johnston 	 * Wait for the pages we want to complete.  VPO_SWAPINPROG is always
11731c7c3c6aSMatthew Dillon 	 * cleared on completion.  If an I/O error occurs, SWAPBLK_NONE
1174915d1b71SMark Johnston 	 * is set in the metadata for each page in the request.
117526f9a767SRodney W. Grimes 	 */
117689f6b863SAttilio Rao 	VM_OBJECT_WLOCK(object);
1177b0cd2017SGleb Smirnoff 	while ((m[0]->oflags & VPO_SWAPINPROG) != 0) {
1178b0cd2017SGleb Smirnoff 		m[0]->oflags |= VPO_SWAPSLEEP;
117983c9dea1SGleb Smirnoff 		VM_CNT_INC(v_intrans);
1180c7aebda8SAttilio Rao 		if (VM_OBJECT_SLEEP(object, &object->paging_in_progress, PSWP,
1181c7aebda8SAttilio Rao 		    "swread", hz * 20)) {
11829bd86a98SBruce M Simpson 			printf(
1183c5690651SPoul-Henning Kamp "swap_pager: indefinite wait buffer: bufobj: %p, blkno: %jd, size: %ld\n",
1184c5690651SPoul-Henning Kamp 			    bp->b_bufobj, (intmax_t)bp->b_blkno, bp->b_bcount);
11851c7c3c6aSMatthew Dillon 		}
11861b119d9dSDavid Greenman 	}
118726f9a767SRodney W. Grimes 
118826f9a767SRodney W. Grimes 	/*
1189b0cd2017SGleb Smirnoff 	 * If we had an unrecoverable read error pages will not be valid.
119026f9a767SRodney W. Grimes 	 */
1191915d1b71SMark Johnston 	for (i = 0; i < reqcount; i++)
1192b0cd2017SGleb Smirnoff 		if (m[i]->valid != VM_PAGE_BITS_ALL)
11931c7c3c6aSMatthew Dillon 			return (VM_PAGER_ERROR);
1194b0cd2017SGleb Smirnoff 
11951c7c3c6aSMatthew Dillon 	return (VM_PAGER_OK);
11961c7c3c6aSMatthew Dillon 
11971c7c3c6aSMatthew Dillon 	/*
11981c7c3c6aSMatthew Dillon 	 * A final note: in a low swap situation, we cannot deallocate swap
11991c7c3c6aSMatthew Dillon 	 * and mark a page dirty here because the caller is likely to mark
12001c7c3c6aSMatthew Dillon 	 * the page clean when we return, causing the page to possibly revert
12011c7c3c6aSMatthew Dillon 	 * to all-zero's later.
12021c7c3c6aSMatthew Dillon 	 */
1203df8bae1dSRodney W. Grimes }
1204df8bae1dSRodney W. Grimes 
12051c7c3c6aSMatthew Dillon /*
120690effb23SGleb Smirnoff  * 	swap_pager_getpages_async():
120790effb23SGleb Smirnoff  *
120890effb23SGleb Smirnoff  *	Right now this is emulation of asynchronous operation on top of
120990effb23SGleb Smirnoff  *	swap_pager_getpages().
121090effb23SGleb Smirnoff  */
121190effb23SGleb Smirnoff static int
121290effb23SGleb Smirnoff swap_pager_getpages_async(vm_object_t object, vm_page_t *m, int count,
1213b0cd2017SGleb Smirnoff     int *rbehind, int *rahead, pgo_getpages_iodone_t iodone, void *arg)
121490effb23SGleb Smirnoff {
121590effb23SGleb Smirnoff 	int r, error;
121690effb23SGleb Smirnoff 
1217b0cd2017SGleb Smirnoff 	r = swap_pager_getpages(object, m, count, rbehind, rahead);
121890effb23SGleb Smirnoff 	VM_OBJECT_WUNLOCK(object);
121990effb23SGleb Smirnoff 	switch (r) {
122090effb23SGleb Smirnoff 	case VM_PAGER_OK:
122190effb23SGleb Smirnoff 		error = 0;
122290effb23SGleb Smirnoff 		break;
122390effb23SGleb Smirnoff 	case VM_PAGER_ERROR:
122490effb23SGleb Smirnoff 		error = EIO;
122590effb23SGleb Smirnoff 		break;
122690effb23SGleb Smirnoff 	case VM_PAGER_FAIL:
122790effb23SGleb Smirnoff 		error = EINVAL;
122890effb23SGleb Smirnoff 		break;
122990effb23SGleb Smirnoff 	default:
1230d9328101SGleb Smirnoff 		panic("unhandled swap_pager_getpages() error %d", r);
123190effb23SGleb Smirnoff 	}
123290effb23SGleb Smirnoff 	(iodone)(arg, m, count, error);
123390effb23SGleb Smirnoff 	VM_OBJECT_WLOCK(object);
123490effb23SGleb Smirnoff 
123590effb23SGleb Smirnoff 	return (r);
123690effb23SGleb Smirnoff }
123790effb23SGleb Smirnoff 
123890effb23SGleb Smirnoff /*
12391c7c3c6aSMatthew Dillon  *	swap_pager_putpages:
12401c7c3c6aSMatthew Dillon  *
12411c7c3c6aSMatthew Dillon  *	Assign swap (if necessary) and initiate I/O on the specified pages.
12421c7c3c6aSMatthew Dillon  *
12431c7c3c6aSMatthew Dillon  *	We support both OBJT_DEFAULT and OBJT_SWAP objects.  DEFAULT objects
12441c7c3c6aSMatthew Dillon  *	are automatically converted to SWAP objects.
12451c7c3c6aSMatthew Dillon  *
1246ea3aecf5SPeter Wemm  *	In a low memory situation we may block in VOP_STRATEGY(), but the new
12471c7c3c6aSMatthew Dillon  *	vm_page reservation system coupled with properly written VFS devices
12481c7c3c6aSMatthew Dillon  *	should ensure that no low-memory deadlock occurs.  This is an area
12491c7c3c6aSMatthew Dillon  *	which needs work.
12501c7c3c6aSMatthew Dillon  *
12511c7c3c6aSMatthew Dillon  *	The parent has N vm_object_pip_add() references prior to
12521c7c3c6aSMatthew Dillon  *	calling us and will remove references for rtvals[] that are
12531c7c3c6aSMatthew Dillon  *	not set to VM_PAGER_PEND.  We need to remove the rest on I/O
12541c7c3c6aSMatthew Dillon  *	completion.
12551c7c3c6aSMatthew Dillon  *
12561c7c3c6aSMatthew Dillon  *	The parent has soft-busy'd the pages it passes us and will unbusy
12571c7c3c6aSMatthew Dillon  *	those whos rtvals[] entry is not set to VM_PAGER_PEND on return.
12581c7c3c6aSMatthew Dillon  *	We need to unbusy the rest on I/O completion.
12591c7c3c6aSMatthew Dillon  */
1260d635a37fSWarner Losh static void
12612f249180SPoul-Henning Kamp swap_pager_putpages(vm_object_t object, vm_page_t *m, int count,
1262e065e87cSKonstantin Belousov     int flags, int *rtvals)
1263df8bae1dSRodney W. Grimes {
1264e065e87cSKonstantin Belousov 	int i, n;
1265e065e87cSKonstantin Belousov 	boolean_t sync;
1266df8bae1dSRodney W. Grimes 
12671c7c3c6aSMatthew Dillon 	if (count && m[0]->object != object) {
12687036145bSMaxim Konovalov 		panic("swap_pager_putpages: object mismatch %p/%p",
12691c7c3c6aSMatthew Dillon 		    object,
12701c7c3c6aSMatthew Dillon 		    m[0]->object
12711c7c3c6aSMatthew Dillon 		);
12721c7c3c6aSMatthew Dillon 	}
1273ee3dc7d7SAlan Cox 
12741c7c3c6aSMatthew Dillon 	/*
12751c7c3c6aSMatthew Dillon 	 * Step 1
12761c7c3c6aSMatthew Dillon 	 *
12771c7c3c6aSMatthew Dillon 	 * Turn object into OBJT_SWAP
12781c7c3c6aSMatthew Dillon 	 * check for bogus sysops
12791c7c3c6aSMatthew Dillon 	 * force sync if not pageout process
12801c7c3c6aSMatthew Dillon 	 */
12814dcc5c2dSMatthew Dillon 	if (object->type != OBJT_SWAP)
12824dcc5c2dSMatthew Dillon 		swp_pager_meta_build(object, 0, SWAPBLK_NONE);
128389f6b863SAttilio Rao 	VM_OBJECT_WUNLOCK(object);
1284e47ed70bSJohn Dyson 
1285e065e87cSKonstantin Belousov 	n = 0;
1286e47ed70bSJohn Dyson 	if (curproc != pageproc)
1287e47ed70bSJohn Dyson 		sync = TRUE;
1288e065e87cSKonstantin Belousov 	else
1289e065e87cSKonstantin Belousov 		sync = (flags & VM_PAGER_PUT_SYNC) != 0;
129026f9a767SRodney W. Grimes 
12911c7c3c6aSMatthew Dillon 	/*
12921c7c3c6aSMatthew Dillon 	 * Step 2
12931c7c3c6aSMatthew Dillon 	 *
12941c7c3c6aSMatthew Dillon 	 * Assign swap blocks and issue I/O.  We reallocate swap on the fly.
12951c7c3c6aSMatthew Dillon 	 * The page is left dirty until the pageout operation completes
12961c7c3c6aSMatthew Dillon 	 * successfully.
12971c7c3c6aSMatthew Dillon 	 */
12981c7c3c6aSMatthew Dillon 	for (i = 0; i < count; i += n) {
12991c7c3c6aSMatthew Dillon 		int j;
13001c7c3c6aSMatthew Dillon 		struct buf *bp;
1301a316d390SJohn Dyson 		daddr_t blk;
130226f9a767SRodney W. Grimes 
1303df8bae1dSRodney W. Grimes 		/*
13041c7c3c6aSMatthew Dillon 		 * Maximum I/O size is limited by a number of factors.
1305df8bae1dSRodney W. Grimes 		 */
13061c7c3c6aSMatthew Dillon 		n = min(BLIST_MAX_ALLOC, count - i);
1307327f4e83SMatthew Dillon 		n = min(n, nsw_cluster_max);
13081c7c3c6aSMatthew Dillon 
130926f9a767SRodney W. Grimes 		/*
13101c7c3c6aSMatthew Dillon 		 * Get biggest block of swap we can.  If we fail, fall
13111c7c3c6aSMatthew Dillon 		 * back and try to allocate a smaller block.  Don't go
13121c7c3c6aSMatthew Dillon 		 * overboard trying to allocate space if it would overly
13131c7c3c6aSMatthew Dillon 		 * fragment swap.
131426f9a767SRodney W. Grimes 		 */
13151c7c3c6aSMatthew Dillon 		while (
13161c7c3c6aSMatthew Dillon 		    (blk = swp_pager_getswapspace(n)) == SWAPBLK_NONE &&
13171c7c3c6aSMatthew Dillon 		    n > 4
13181c7c3c6aSMatthew Dillon 		) {
13191c7c3c6aSMatthew Dillon 			n >>= 1;
132026f9a767SRodney W. Grimes 		}
13211c7c3c6aSMatthew Dillon 		if (blk == SWAPBLK_NONE) {
13224dcc5c2dSMatthew Dillon 			for (j = 0; j < n; ++j)
13231c7c3c6aSMatthew Dillon 				rtvals[i+j] = VM_PAGER_FAIL;
13241c7c3c6aSMatthew Dillon 			continue;
132526f9a767SRodney W. Grimes 		}
132626f9a767SRodney W. Grimes 
132726f9a767SRodney W. Grimes 		/*
13281c7c3c6aSMatthew Dillon 		 * All I/O parameters have been satisfied, build the I/O
13291c7c3c6aSMatthew Dillon 		 * request and assign the swap space.
133026f9a767SRodney W. Grimes 		 */
1331327f4e83SMatthew Dillon 		if (sync == TRUE) {
1332327f4e83SMatthew Dillon 			bp = getpbuf(&nsw_wcount_sync);
1333327f4e83SMatthew Dillon 		} else {
1334327f4e83SMatthew Dillon 			bp = getpbuf(&nsw_wcount_async);
133521144e3bSPoul-Henning Kamp 			bp->b_flags = B_ASYNC;
1336327f4e83SMatthew Dillon 		}
13375e04322aSPoul-Henning Kamp 		bp->b_flags |= B_PAGING;
1338912e4ae9SPoul-Henning Kamp 		bp->b_iocmd = BIO_WRITE;
133926f9a767SRodney W. Grimes 
1340fdcc1cc0SJohn Baldwin 		bp->b_rcred = crhold(thread0.td_ucred);
1341fdcc1cc0SJohn Baldwin 		bp->b_wcred = crhold(thread0.td_ucred);
13421c7c3c6aSMatthew Dillon 		bp->b_bcount = PAGE_SIZE * n;
13431c7c3c6aSMatthew Dillon 		bp->b_bufsize = PAGE_SIZE * n;
13441c7c3c6aSMatthew Dillon 		bp->b_blkno = blk;
1345e47ed70bSJohn Dyson 
134689f6b863SAttilio Rao 		VM_OBJECT_WLOCK(object);
13471c7c3c6aSMatthew Dillon 		for (j = 0; j < n; ++j) {
13481c7c3c6aSMatthew Dillon 			vm_page_t mreq = m[i+j];
13491c7c3c6aSMatthew Dillon 
13501c7c3c6aSMatthew Dillon 			swp_pager_meta_build(
13511c7c3c6aSMatthew Dillon 			    mreq->object,
13521c7c3c6aSMatthew Dillon 			    mreq->pindex,
13534dcc5c2dSMatthew Dillon 			    blk + j
13541c7c3c6aSMatthew Dillon 			);
135587b0ab69SAlan Cox 			MPASS(mreq->dirty == VM_PAGE_BITS_ALL);
13565786be7cSAlan Cox 			mreq->oflags |= VPO_SWAPINPROG;
13571c7c3c6aSMatthew Dillon 			bp->b_pages[j] = mreq;
13581c7c3c6aSMatthew Dillon 		}
135989f6b863SAttilio Rao 		VM_OBJECT_WUNLOCK(object);
13601c7c3c6aSMatthew Dillon 		bp->b_npages = n;
1361a5296b05SJulian Elischer 		/*
1362a5296b05SJulian Elischer 		 * Must set dirty range for NFS to work.
1363a5296b05SJulian Elischer 		 */
1364a5296b05SJulian Elischer 		bp->b_dirtyoff = 0;
1365a5296b05SJulian Elischer 		bp->b_dirtyend = bp->b_bcount;
13661c7c3c6aSMatthew Dillon 
136783c9dea1SGleb Smirnoff 		VM_CNT_INC(v_swapout);
136883c9dea1SGleb Smirnoff 		VM_CNT_ADD(v_swappgsout, bp->b_npages);
136926f9a767SRodney W. Grimes 
137026f9a767SRodney W. Grimes 		/*
137177923df2SAlan Cox 		 * We unconditionally set rtvals[] to VM_PAGER_PEND so that we
137277923df2SAlan Cox 		 * can call the async completion routine at the end of a
137377923df2SAlan Cox 		 * synchronous I/O operation.  Otherwise, our caller would
137477923df2SAlan Cox 		 * perform duplicate unbusy and wakeup operations on the page
137577923df2SAlan Cox 		 * and object, respectively.
137677923df2SAlan Cox 		 */
137777923df2SAlan Cox 		for (j = 0; j < n; j++)
137877923df2SAlan Cox 			rtvals[i + j] = VM_PAGER_PEND;
137977923df2SAlan Cox 
138077923df2SAlan Cox 		/*
13811c7c3c6aSMatthew Dillon 		 * asynchronous
13821c7c3c6aSMatthew Dillon 		 *
1383c37a77eeSPoul-Henning Kamp 		 * NOTE: b_blkno is destroyed by the call to swapdev_strategy
138426f9a767SRodney W. Grimes 		 */
13851c7c3c6aSMatthew Dillon 		if (sync == FALSE) {
13861c7c3c6aSMatthew Dillon 			bp->b_iodone = swp_pager_async_iodone;
138767812eacSKirk McKusick 			BUF_KERNPROC(bp);
13884b03903aSPoul-Henning Kamp 			swp_pager_strategy(bp);
13891c7c3c6aSMatthew Dillon 			continue;
139026f9a767SRodney W. Grimes 		}
1391e47ed70bSJohn Dyson 
139226f9a767SRodney W. Grimes 		/*
13931c7c3c6aSMatthew Dillon 		 * synchronous
13941c7c3c6aSMatthew Dillon 		 *
1395c37a77eeSPoul-Henning Kamp 		 * NOTE: b_blkno is destroyed by the call to swapdev_strategy
13961c7c3c6aSMatthew Dillon 		 */
13972c840b1fSAlan Cox 		bp->b_iodone = bdone;
13984b03903aSPoul-Henning Kamp 		swp_pager_strategy(bp);
13991c7c3c6aSMatthew Dillon 
14001c7c3c6aSMatthew Dillon 		/*
140177923df2SAlan Cox 		 * Wait for the sync I/O to complete.
140226f9a767SRodney W. Grimes 		 */
14032c840b1fSAlan Cox 		bwait(bp, PVM, "swwrt");
140477923df2SAlan Cox 
14051c7c3c6aSMatthew Dillon 		/*
14061c7c3c6aSMatthew Dillon 		 * Now that we are through with the bp, we can call the
14071c7c3c6aSMatthew Dillon 		 * normal async completion, which frees everything up.
14081c7c3c6aSMatthew Dillon 		 */
14091c7c3c6aSMatthew Dillon 		swp_pager_async_iodone(bp);
14101c7c3c6aSMatthew Dillon 	}
141189f6b863SAttilio Rao 	VM_OBJECT_WLOCK(object);
14121c7c3c6aSMatthew Dillon }
14131c7c3c6aSMatthew Dillon 
14141c7c3c6aSMatthew Dillon /*
14151c7c3c6aSMatthew Dillon  *	swp_pager_async_iodone:
14161c7c3c6aSMatthew Dillon  *
14171c7c3c6aSMatthew Dillon  *	Completion routine for asynchronous reads and writes from/to swap.
14181c7c3c6aSMatthew Dillon  *	Also called manually by synchronous code to finish up a bp.
14191c7c3c6aSMatthew Dillon  *
142015523cf7SKonstantin Belousov  *	This routine may not sleep.
14211c7c3c6aSMatthew Dillon  */
14221c7c3c6aSMatthew Dillon static void
14232f249180SPoul-Henning Kamp swp_pager_async_iodone(struct buf *bp)
14241c7c3c6aSMatthew Dillon {
14251c7c3c6aSMatthew Dillon 	int i;
14261c7c3c6aSMatthew Dillon 	vm_object_t object = NULL;
14271c7c3c6aSMatthew Dillon 
14281c7c3c6aSMatthew Dillon 	/*
14291c7c3c6aSMatthew Dillon 	 * report error
14301c7c3c6aSMatthew Dillon 	 */
1431c244d2deSPoul-Henning Kamp 	if (bp->b_ioflags & BIO_ERROR) {
14321c7c3c6aSMatthew Dillon 		printf(
14331c7c3c6aSMatthew Dillon 		    "swap_pager: I/O error - %s failed; blkno %ld,"
14341c7c3c6aSMatthew Dillon 			"size %ld, error %d\n",
143521144e3bSPoul-Henning Kamp 		    ((bp->b_iocmd == BIO_READ) ? "pagein" : "pageout"),
14361c7c3c6aSMatthew Dillon 		    (long)bp->b_blkno,
14371c7c3c6aSMatthew Dillon 		    (long)bp->b_bcount,
14381c7c3c6aSMatthew Dillon 		    bp->b_error
14391c7c3c6aSMatthew Dillon 		);
14401c7c3c6aSMatthew Dillon 	}
14411c7c3c6aSMatthew Dillon 
14421c7c3c6aSMatthew Dillon 	/*
144326f9a767SRodney W. Grimes 	 * remove the mapping for kernel virtual
144426f9a767SRodney W. Grimes 	 */
1445fade8dd7SJeff Roberson 	if (buf_mapped(bp))
14461c7c3c6aSMatthew Dillon 		pmap_qremove((vm_offset_t)bp->b_data, bp->b_npages);
1447fade8dd7SJeff Roberson 	else
1448fade8dd7SJeff Roberson 		bp->b_data = bp->b_kvabase;
144926f9a767SRodney W. Grimes 
145033a609ecSAlan Cox 	if (bp->b_npages) {
145133a609ecSAlan Cox 		object = bp->b_pages[0]->object;
145289f6b863SAttilio Rao 		VM_OBJECT_WLOCK(object);
145333a609ecSAlan Cox 	}
14542965a453SKip Macy 
145526f9a767SRodney W. Grimes 	/*
14561c7c3c6aSMatthew Dillon 	 * cleanup pages.  If an error occurs writing to swap, we are in
14571c7c3c6aSMatthew Dillon 	 * very serious trouble.  If it happens to be a disk error, though,
14581c7c3c6aSMatthew Dillon 	 * we may be able to recover by reassigning the swap later on.  So
14591c7c3c6aSMatthew Dillon 	 * in this case we remove the m->swapblk assignment for the page
14601c7c3c6aSMatthew Dillon 	 * but do not free it in the rlist.  The errornous block(s) are thus
14611c7c3c6aSMatthew Dillon 	 * never reallocated as swap.  Redirty the page and continue.
146226f9a767SRodney W. Grimes 	 */
14631c7c3c6aSMatthew Dillon 	for (i = 0; i < bp->b_npages; ++i) {
14641c7c3c6aSMatthew Dillon 		vm_page_t m = bp->b_pages[i];
1465e47ed70bSJohn Dyson 
14665786be7cSAlan Cox 		m->oflags &= ~VPO_SWAPINPROG;
1467c7aebda8SAttilio Rao 		if (m->oflags & VPO_SWAPSLEEP) {
1468c7aebda8SAttilio Rao 			m->oflags &= ~VPO_SWAPSLEEP;
1469c7aebda8SAttilio Rao 			wakeup(&object->paging_in_progress);
1470c7aebda8SAttilio Rao 		}
1471e47ed70bSJohn Dyson 
1472c244d2deSPoul-Henning Kamp 		if (bp->b_ioflags & BIO_ERROR) {
1473ffc82b0aSJohn Dyson 			/*
14741c7c3c6aSMatthew Dillon 			 * If an error occurs I'd love to throw the swapblk
14751c7c3c6aSMatthew Dillon 			 * away without freeing it back to swapspace, so it
14761c7c3c6aSMatthew Dillon 			 * can never be used again.  But I can't from an
14771c7c3c6aSMatthew Dillon 			 * interrupt.
1478ffc82b0aSJohn Dyson 			 */
147921144e3bSPoul-Henning Kamp 			if (bp->b_iocmd == BIO_READ) {
14801c7c3c6aSMatthew Dillon 				/*
14811c7c3c6aSMatthew Dillon 				 * NOTE: for reads, m->dirty will probably
1482956f3135SPhilippe Charnier 				 * be overridden by the original caller of
14831c7c3c6aSMatthew Dillon 				 * getpages so don't play cute tricks here.
14841c7c3c6aSMatthew Dillon 				 */
14851c7c3c6aSMatthew Dillon 				m->valid = 0;
14861c7c3c6aSMatthew Dillon 			} else {
14871c7c3c6aSMatthew Dillon 				/*
14881c7c3c6aSMatthew Dillon 				 * If a write error occurs, reactivate page
14891c7c3c6aSMatthew Dillon 				 * so it doesn't clog the inactive list,
14901c7c3c6aSMatthew Dillon 				 * then finish the I/O.
14911c7c3c6aSMatthew Dillon 				 */
14927dbf82dcSMatthew Dillon 				vm_page_dirty(m);
14933c4a2440SAlan Cox 				vm_page_lock(m);
14941c7c3c6aSMatthew Dillon 				vm_page_activate(m);
14953c4a2440SAlan Cox 				vm_page_unlock(m);
1496c7aebda8SAttilio Rao 				vm_page_sunbusy(m);
14971c7c3c6aSMatthew Dillon 			}
149821144e3bSPoul-Henning Kamp 		} else if (bp->b_iocmd == BIO_READ) {
14991c7c3c6aSMatthew Dillon 			/*
15001c7c3c6aSMatthew Dillon 			 * NOTE: for reads, m->dirty will probably be
1501956f3135SPhilippe Charnier 			 * overridden by the original caller of getpages so
15021c7c3c6aSMatthew Dillon 			 * we cannot set them in order to free the underlying
15031c7c3c6aSMatthew Dillon 			 * swap in a low-swap situation.  I don't think we'd
15041c7c3c6aSMatthew Dillon 			 * want to do that anyway, but it was an optimization
15051c7c3c6aSMatthew Dillon 			 * that existed in the old swapper for a time before
15061c7c3c6aSMatthew Dillon 			 * it got ripped out due to precisely this problem.
15071c7c3c6aSMatthew Dillon 			 */
1508016a3c93SAlan Cox 			KASSERT(!pmap_page_is_mapped(m),
1509016a3c93SAlan Cox 			    ("swp_pager_async_iodone: page %p is mapped", m));
1510016a3c93SAlan Cox 			KASSERT(m->dirty == 0,
1511016a3c93SAlan Cox 			    ("swp_pager_async_iodone: page %p is dirty", m));
1512915d1b71SMark Johnston 
1513b0cd2017SGleb Smirnoff 			m->valid = VM_PAGE_BITS_ALL;
1514915d1b71SMark Johnston 			if (i < bp->b_pgbefore ||
1515915d1b71SMark Johnston 			    i >= bp->b_npages - bp->b_pgafter)
1516915d1b71SMark Johnston 				vm_page_readahead_finish(m);
15171c7c3c6aSMatthew Dillon 		} else {
15181c7c3c6aSMatthew Dillon 			/*
1519016a3c93SAlan Cox 			 * For write success, clear the dirty
15201c7c3c6aSMatthew Dillon 			 * status, then finish the I/O ( which decrements the
15211c7c3c6aSMatthew Dillon 			 * busy count and possibly wakes waiter's up ).
1522ebcddc72SAlan Cox 			 * A page is only written to swap after a period of
1523ebcddc72SAlan Cox 			 * inactivity.  Therefore, we do not expect it to be
1524ebcddc72SAlan Cox 			 * reused.
15251c7c3c6aSMatthew Dillon 			 */
15266031c68dSAlan Cox 			KASSERT(!pmap_page_is_write_mapped(m),
1527016a3c93SAlan Cox 			    ("swp_pager_async_iodone: page %p is not write"
1528016a3c93SAlan Cox 			    " protected", m));
1529c52e7044SAlan Cox 			vm_page_undirty(m);
15303c4a2440SAlan Cox 			vm_page_lock(m);
1531ebcddc72SAlan Cox 			vm_page_deactivate_noreuse(m);
15322965a453SKip Macy 			vm_page_unlock(m);
1533ebcddc72SAlan Cox 			vm_page_sunbusy(m);
15343c4a2440SAlan Cox 		}
15353c4a2440SAlan Cox 	}
153626f9a767SRodney W. Grimes 
15371c7c3c6aSMatthew Dillon 	/*
15381c7c3c6aSMatthew Dillon 	 * adjust pip.  NOTE: the original parent may still have its own
15391c7c3c6aSMatthew Dillon 	 * pip refs on the object.
15401c7c3c6aSMatthew Dillon 	 */
15410d420ad3SAlan Cox 	if (object != NULL) {
15421c7c3c6aSMatthew Dillon 		vm_object_pip_wakeupn(object, bp->b_npages);
154389f6b863SAttilio Rao 		VM_OBJECT_WUNLOCK(object);
15440d420ad3SAlan Cox 	}
154526f9a767SRodney W. Grimes 
15461c7c3c6aSMatthew Dillon 	/*
1547100650deSOlivier Houchard 	 * swapdev_strategy() manually sets b_vp and b_bufobj before calling
1548100650deSOlivier Houchard 	 * bstrategy(). Set them back to NULL now we're done with it, or we'll
1549100650deSOlivier Houchard 	 * trigger a KASSERT in relpbuf().
1550100650deSOlivier Houchard 	 */
1551100650deSOlivier Houchard 	if (bp->b_vp) {
1552100650deSOlivier Houchard 		    bp->b_vp = NULL;
1553100650deSOlivier Houchard 		    bp->b_bufobj = NULL;
1554100650deSOlivier Houchard 	}
1555100650deSOlivier Houchard 	/*
15561c7c3c6aSMatthew Dillon 	 * release the physical I/O buffer
15571c7c3c6aSMatthew Dillon 	 */
1558327f4e83SMatthew Dillon 	relpbuf(
1559327f4e83SMatthew Dillon 	    bp,
156021144e3bSPoul-Henning Kamp 	    ((bp->b_iocmd == BIO_READ) ? &nsw_rcount :
1561327f4e83SMatthew Dillon 		((bp->b_flags & B_ASYNC) ?
1562327f4e83SMatthew Dillon 		    &nsw_wcount_async :
1563327f4e83SMatthew Dillon 		    &nsw_wcount_sync
1564327f4e83SMatthew Dillon 		)
1565327f4e83SMatthew Dillon 	    )
1566327f4e83SMatthew Dillon 	);
156726f9a767SRodney W. Grimes }
15681c7c3c6aSMatthew Dillon 
1569b1fd102eSMark Johnston int
1570b1fd102eSMark Johnston swap_pager_nswapdev(void)
1571b1fd102eSMark Johnston {
1572b1fd102eSMark Johnston 
1573b1fd102eSMark Johnston 	return (nswapdev);
1574b1fd102eSMark Johnston }
1575b1fd102eSMark Johnston 
157692da00bbSMatthew Dillon /*
157792da00bbSMatthew Dillon  * SWP_PAGER_FORCE_PAGEIN() - force a swap block to be paged in
157892da00bbSMatthew Dillon  *
1579ebcddc72SAlan Cox  *	This routine dissociates the page at the given index within an object
1580ebcddc72SAlan Cox  *	from its backing store, paging it in if it does not reside in memory.
1581ebcddc72SAlan Cox  *	If the page is paged in, it is marked dirty and placed in the laundry
1582ebcddc72SAlan Cox  *	queue.  The page is marked dirty because it no longer has backing
1583ebcddc72SAlan Cox  *	store.  It is placed in the laundry queue because it has not been
1584ebcddc72SAlan Cox  *	accessed recently.  Otherwise, it would already reside in memory.
1585ebcddc72SAlan Cox  *
1586ebcddc72SAlan Cox  *	We also attempt to swap in all other pages in the swap block.
1587ebcddc72SAlan Cox  *	However, we only guarantee that the one at the specified index is
158892da00bbSMatthew Dillon  *	paged in.
158992da00bbSMatthew Dillon  *
159092da00bbSMatthew Dillon  *	XXX - The code to page the whole block in doesn't work, so we
159192da00bbSMatthew Dillon  *	      revert to the one-by-one behavior for now.  Sigh.
159292da00bbSMatthew Dillon  */
159362a59e8fSWarner Losh static inline void
1594b3fed13eSDavid Schultz swp_pager_force_pagein(vm_object_t object, vm_pindex_t pindex)
159592da00bbSMatthew Dillon {
159692da00bbSMatthew Dillon 	vm_page_t m;
159792da00bbSMatthew Dillon 
159892da00bbSMatthew Dillon 	vm_object_pip_add(object, 1);
15995944de8eSKonstantin Belousov 	m = vm_page_grab(object, pindex, VM_ALLOC_NORMAL);
160092da00bbSMatthew Dillon 	if (m->valid == VM_PAGE_BITS_ALL) {
16010d8243ccSAttilio Rao 		vm_object_pip_wakeup(object);
160292da00bbSMatthew Dillon 		vm_page_dirty(m);
1603d061cdd5SAlan Cox 		vm_page_lock(m);
1604d061cdd5SAlan Cox 		vm_page_activate(m);
16052965a453SKip Macy 		vm_page_unlock(m);
1606c7aebda8SAttilio Rao 		vm_page_xunbusy(m);
160792da00bbSMatthew Dillon 		vm_pager_page_unswapped(m);
160892da00bbSMatthew Dillon 		return;
160992da00bbSMatthew Dillon 	}
161092da00bbSMatthew Dillon 
1611b0cd2017SGleb Smirnoff 	if (swap_pager_getpages(object, &m, 1, NULL, NULL) != VM_PAGER_OK)
161292da00bbSMatthew Dillon 		panic("swap_pager_force_pagein: read from swap failed");/*XXX*/
16130d8243ccSAttilio Rao 	vm_object_pip_wakeup(object);
161492da00bbSMatthew Dillon 	vm_page_dirty(m);
1615db1f085eSAlan Cox 	vm_page_lock(m);
1616ebcddc72SAlan Cox 	vm_page_launder(m);
16172965a453SKip Macy 	vm_page_unlock(m);
1618c7aebda8SAttilio Rao 	vm_page_xunbusy(m);
161992da00bbSMatthew Dillon 	vm_pager_page_unswapped(m);
162092da00bbSMatthew Dillon }
162192da00bbSMatthew Dillon 
162292da00bbSMatthew Dillon /*
162392da00bbSMatthew Dillon  *	swap_pager_swapoff:
162492da00bbSMatthew Dillon  *
162592da00bbSMatthew Dillon  *	Page in all of the pages that have been paged out to the
162692da00bbSMatthew Dillon  *	given device.  The corresponding blocks in the bitmap must be
162792da00bbSMatthew Dillon  *	marked as allocated and the device must be flagged SW_CLOSING.
162892da00bbSMatthew Dillon  *	There may be no processes swapped out to the device.
162992da00bbSMatthew Dillon  *
163092da00bbSMatthew Dillon  *	This routine may block.
163192da00bbSMatthew Dillon  */
1632e9c0cc15SPoul-Henning Kamp static void
1633b3fed13eSDavid Schultz swap_pager_swapoff(struct swdevt *sp)
163492da00bbSMatthew Dillon {
1635*f425ab8eSKonstantin Belousov 	struct swblk *sb;
1636*f425ab8eSKonstantin Belousov 	vm_object_t object;
1637*f425ab8eSKonstantin Belousov 	vm_pindex_t pi;
1638*f425ab8eSKonstantin Belousov 	int i, retries;
163992da00bbSMatthew Dillon 
164004533e1eSKonstantin Belousov 	sx_assert(&swdev_syscall_lock, SA_XLOCKED);
164192da00bbSMatthew Dillon 
16428bc61209SDavid Schultz 	retries = 0;
164392da00bbSMatthew Dillon full_rescan:
1644*f425ab8eSKonstantin Belousov 	mtx_lock(&vm_object_list_mtx);
1645*f425ab8eSKonstantin Belousov 	TAILQ_FOREACH(object, &vm_object_list, object_list) {
1646*f425ab8eSKonstantin Belousov 		if (object->type != OBJT_SWAP)
164798150664SKonstantin Belousov 			continue;
1648*f425ab8eSKonstantin Belousov 		mtx_unlock(&vm_object_list_mtx);
164998150664SKonstantin Belousov 		/* Depends on type-stability. */
165098150664SKonstantin Belousov 		VM_OBJECT_WLOCK(object);
1651*f425ab8eSKonstantin Belousov 
1652*f425ab8eSKonstantin Belousov 		/*
1653*f425ab8eSKonstantin Belousov 		 * Dead objects are eventually terminated on their own.
1654*f425ab8eSKonstantin Belousov 		 */
1655*f425ab8eSKonstantin Belousov 		if ((object->flags & OBJ_DEAD) != 0)
1656*f425ab8eSKonstantin Belousov 			goto next_obj;
1657*f425ab8eSKonstantin Belousov 
1658*f425ab8eSKonstantin Belousov 		/*
1659*f425ab8eSKonstantin Belousov 		 * Sync with fences placed after pctrie
1660*f425ab8eSKonstantin Belousov 		 * initialization.  We must not access pctrie below
1661*f425ab8eSKonstantin Belousov 		 * unless we checked that our object is swap and not
1662*f425ab8eSKonstantin Belousov 		 * dead.
1663*f425ab8eSKonstantin Belousov 		 */
1664*f425ab8eSKonstantin Belousov 		atomic_thread_fence_acq();
1665*f425ab8eSKonstantin Belousov 		if (object->type != OBJT_SWAP)
1666*f425ab8eSKonstantin Belousov 			goto next_obj;
1667*f425ab8eSKonstantin Belousov 
1668*f425ab8eSKonstantin Belousov 		for (pi = 0; (sb = SWAP_PCTRIE_LOOKUP_GE(
1669*f425ab8eSKonstantin Belousov 		    &object->un_pager.swp.swp_blks, pi)) != NULL; ) {
1670*f425ab8eSKonstantin Belousov 			pi = sb->p + SWAP_META_PAGES;
1671*f425ab8eSKonstantin Belousov 			for (i = 0; i < SWAP_META_PAGES; i++) {
1672*f425ab8eSKonstantin Belousov 				if (sb->d[i] == SWAPBLK_NONE)
1673*f425ab8eSKonstantin Belousov 					continue;
1674*f425ab8eSKonstantin Belousov 				if (swp_pager_isondev(sb->d[i], sp))
1675*f425ab8eSKonstantin Belousov 					swp_pager_force_pagein(object,
1676*f425ab8eSKonstantin Belousov 					    sb->p + i);
167798150664SKonstantin Belousov 			}
167898150664SKonstantin Belousov 		}
1679*f425ab8eSKonstantin Belousov next_obj:
1680*f425ab8eSKonstantin Belousov 		VM_OBJECT_WUNLOCK(object);
1681*f425ab8eSKonstantin Belousov 		mtx_lock(&vm_object_list_mtx);
168292da00bbSMatthew Dillon 	}
1683*f425ab8eSKonstantin Belousov 	mtx_unlock(&vm_object_list_mtx);
1684*f425ab8eSKonstantin Belousov 
16858bc61209SDavid Schultz 	if (sp->sw_used) {
168692da00bbSMatthew Dillon 		/*
16878bc61209SDavid Schultz 		 * Objects may be locked or paging to the device being
16888bc61209SDavid Schultz 		 * removed, so we will miss their pages and need to
16898bc61209SDavid Schultz 		 * make another pass.  We have marked this device as
16908bc61209SDavid Schultz 		 * SW_CLOSING, so the activity should finish soon.
169192da00bbSMatthew Dillon 		 */
16928bc61209SDavid Schultz 		retries++;
16938bc61209SDavid Schultz 		if (retries > 100) {
16948bc61209SDavid Schultz 			panic("swapoff: failed to locate %d swap blocks",
16958bc61209SDavid Schultz 			    sp->sw_used);
16968bc61209SDavid Schultz 		}
16974d70511aSJohn Baldwin 		pause("swpoff", hz / 20);
169892da00bbSMatthew Dillon 		goto full_rescan;
169992da00bbSMatthew Dillon 	}
1700b1fd102eSMark Johnston 	EVENTHANDLER_INVOKE(swapoff, sp);
170192da00bbSMatthew Dillon }
170292da00bbSMatthew Dillon 
17031c7c3c6aSMatthew Dillon /************************************************************************
17041c7c3c6aSMatthew Dillon  *				SWAP META DATA 				*
17051c7c3c6aSMatthew Dillon  ************************************************************************
17061c7c3c6aSMatthew Dillon  *
17071c7c3c6aSMatthew Dillon  *	These routines manipulate the swap metadata stored in the
1708cec9f109SDavid E. O'Brien  *	OBJT_SWAP object.
17091c7c3c6aSMatthew Dillon  *
17104dcc5c2dSMatthew Dillon  *	Swap metadata is implemented with a global hash and not directly
17114dcc5c2dSMatthew Dillon  *	linked into the object.  Instead the object simply contains
17124dcc5c2dSMatthew Dillon  *	appropriate tracking counters.
17131c7c3c6aSMatthew Dillon  */
17141c7c3c6aSMatthew Dillon 
17151c7c3c6aSMatthew Dillon /*
17161c7c3c6aSMatthew Dillon  * SWP_PAGER_META_BUILD() -	add swap block to swap meta data for object
17171c7c3c6aSMatthew Dillon  *
17181c7c3c6aSMatthew Dillon  *	We first convert the object to a swap object if it is a default
17191c7c3c6aSMatthew Dillon  *	object.
17201c7c3c6aSMatthew Dillon  *
17211c7c3c6aSMatthew Dillon  *	The specified swapblk is added to the object's swap metadata.  If
17221c7c3c6aSMatthew Dillon  *	the swapblk is not valid, it is freed instead.  Any previously
17231c7c3c6aSMatthew Dillon  *	assigned swapblk is freed.
17241c7c3c6aSMatthew Dillon  */
17251c7c3c6aSMatthew Dillon static void
17262f249180SPoul-Henning Kamp swp_pager_meta_build(vm_object_t object, vm_pindex_t pindex, daddr_t swapblk)
17272f249180SPoul-Henning Kamp {
1728*f425ab8eSKonstantin Belousov 	static volatile int swblk_zone_exhausted, swpctrie_zone_exhausted;
1729*f425ab8eSKonstantin Belousov 	struct swblk *sb;
1730*f425ab8eSKonstantin Belousov 	vm_pindex_t modpi, rdpi;
1731*f425ab8eSKonstantin Belousov 	int error, i;
17321c7c3c6aSMatthew Dillon 
173389f6b863SAttilio Rao 	VM_OBJECT_ASSERT_WLOCKED(object);
1734*f425ab8eSKonstantin Belousov 
17351c7c3c6aSMatthew Dillon 	/*
17361c7c3c6aSMatthew Dillon 	 * Convert default object to swap object if necessary
17371c7c3c6aSMatthew Dillon 	 */
17381c7c3c6aSMatthew Dillon 	if (object->type != OBJT_SWAP) {
1739*f425ab8eSKonstantin Belousov 		pctrie_init(&object->un_pager.swp.swp_blks);
1740*f425ab8eSKonstantin Belousov 
1741*f425ab8eSKonstantin Belousov 		/*
1742*f425ab8eSKonstantin Belousov 		 * Ensure that swap_pager_swapoff()'s iteration over
1743*f425ab8eSKonstantin Belousov 		 * object_list does not see a garbage pctrie.
1744*f425ab8eSKonstantin Belousov 		 */
1745*f425ab8eSKonstantin Belousov 		atomic_thread_fence_rel();
1746*f425ab8eSKonstantin Belousov 
17471c7c3c6aSMatthew Dillon 		object->type = OBJT_SWAP;
1748eb4d6a1bSKonstantin Belousov 		KASSERT(object->handle == NULL, ("default pager with handle"));
1749bd228075SAlan Cox 	}
17501c7c3c6aSMatthew Dillon 
1751*f425ab8eSKonstantin Belousov 	rdpi = rounddown(pindex, SWAP_META_PAGES);
1752*f425ab8eSKonstantin Belousov 	sb = SWAP_PCTRIE_LOOKUP(&object->un_pager.swp.swp_blks, rdpi);
1753*f425ab8eSKonstantin Belousov 	if (sb == NULL) {
17541c7c3c6aSMatthew Dillon 		if (swapblk == SWAPBLK_NONE)
1755*f425ab8eSKonstantin Belousov 			return;
1756*f425ab8eSKonstantin Belousov 		for (;;) {
1757*f425ab8eSKonstantin Belousov 			sb = uma_zalloc(swblk_zone, M_NOWAIT | (curproc ==
1758*f425ab8eSKonstantin Belousov 			    pageproc ? M_USE_RESERVE : 0));
1759*f425ab8eSKonstantin Belousov 			if (sb != NULL) {
1760*f425ab8eSKonstantin Belousov 				sb->p = rdpi;
1761*f425ab8eSKonstantin Belousov 				for (i = 0; i < SWAP_META_PAGES; i++)
1762*f425ab8eSKonstantin Belousov 					sb->d[i] = SWAPBLK_NONE;
1763*f425ab8eSKonstantin Belousov 				if (atomic_cmpset_int(&swblk_zone_exhausted,
1764*f425ab8eSKonstantin Belousov 				    1, 0))
1765*f425ab8eSKonstantin Belousov 					printf("swblk zone ok\n");
1766*f425ab8eSKonstantin Belousov 				break;
1767*f425ab8eSKonstantin Belousov 			}
176889f6b863SAttilio Rao 			VM_OBJECT_WUNLOCK(object);
1769*f425ab8eSKonstantin Belousov 			if (uma_zone_exhausted(swblk_zone)) {
1770*f425ab8eSKonstantin Belousov 				if (atomic_cmpset_int(&swblk_zone_exhausted,
1771*f425ab8eSKonstantin Belousov 				    0, 1))
1772*f425ab8eSKonstantin Belousov 					printf("swap blk zone exhausted, "
17733ff863f1SDag-Erling Smørgrav 					    "increase kern.maxswzone\n");
17742025d69bSKonstantin Belousov 				vm_pageout_oom(VM_OOM_SWAPZ);
1775*f425ab8eSKonstantin Belousov 				pause("swzonxb", 10);
17762025d69bSKonstantin Belousov 			} else
17774dcc5c2dSMatthew Dillon 				VM_WAIT;
177889f6b863SAttilio Rao 			VM_OBJECT_WLOCK(object);
17794dcc5c2dSMatthew Dillon 		}
1780*f425ab8eSKonstantin Belousov 		for (;;) {
1781*f425ab8eSKonstantin Belousov 			error = SWAP_PCTRIE_INSERT(
1782*f425ab8eSKonstantin Belousov 			    &object->un_pager.swp.swp_blks, sb);
1783*f425ab8eSKonstantin Belousov 			if (error == 0) {
1784*f425ab8eSKonstantin Belousov 				if (atomic_cmpset_int(&swpctrie_zone_exhausted,
1785*f425ab8eSKonstantin Belousov 				    1, 0))
1786*f425ab8eSKonstantin Belousov 					printf("swpctrie zone ok\n");
1787*f425ab8eSKonstantin Belousov 				break;
17881c7c3c6aSMatthew Dillon 			}
1789*f425ab8eSKonstantin Belousov 			VM_OBJECT_WUNLOCK(object);
1790*f425ab8eSKonstantin Belousov 			if (uma_zone_exhausted(swpctrie_zone)) {
1791*f425ab8eSKonstantin Belousov 				if (atomic_cmpset_int(&swpctrie_zone_exhausted,
1792*f425ab8eSKonstantin Belousov 				    0, 1))
1793*f425ab8eSKonstantin Belousov 					printf("swap pctrie zone exhausted, "
1794*f425ab8eSKonstantin Belousov 					    "increase kern.maxswzone\n");
1795*f425ab8eSKonstantin Belousov 				vm_pageout_oom(VM_OOM_SWAPZ);
1796*f425ab8eSKonstantin Belousov 				pause("swzonxp", 10);
1797*f425ab8eSKonstantin Belousov 			} else
1798*f425ab8eSKonstantin Belousov 				VM_WAIT;
1799*f425ab8eSKonstantin Belousov 			VM_OBJECT_WLOCK(object);
18001c7c3c6aSMatthew Dillon 		}
1801*f425ab8eSKonstantin Belousov 	}
1802*f425ab8eSKonstantin Belousov 	MPASS(sb->p == rdpi);
18031c7c3c6aSMatthew Dillon 
1804*f425ab8eSKonstantin Belousov 	modpi = pindex % SWAP_META_PAGES;
1805*f425ab8eSKonstantin Belousov 	/* Delete prior contents of metadata. */
1806*f425ab8eSKonstantin Belousov 	if (sb->d[modpi] != SWAPBLK_NONE)
1807*f425ab8eSKonstantin Belousov 		swp_pager_freeswapspace(sb->d[modpi], 1);
1808*f425ab8eSKonstantin Belousov 	/* Enter block into metadata. */
1809*f425ab8eSKonstantin Belousov 	sb->d[modpi] = swapblk;
18101c7c3c6aSMatthew Dillon }
18111c7c3c6aSMatthew Dillon 
18121c7c3c6aSMatthew Dillon /*
18131c7c3c6aSMatthew Dillon  * SWP_PAGER_META_FREE() - free a range of blocks in the object's swap metadata
18141c7c3c6aSMatthew Dillon  *
18151c7c3c6aSMatthew Dillon  *	The requested range of blocks is freed, with any associated swap
18161c7c3c6aSMatthew Dillon  *	returned to the swap bitmap.
18171c7c3c6aSMatthew Dillon  *
18181c7c3c6aSMatthew Dillon  *	This routine will free swap metadata structures as they are cleaned
18191c7c3c6aSMatthew Dillon  *	out.  This routine does *NOT* operate on swap metadata associated
18201c7c3c6aSMatthew Dillon  *	with resident pages.
18211c7c3c6aSMatthew Dillon  */
18221c7c3c6aSMatthew Dillon static void
1823*f425ab8eSKonstantin Belousov swp_pager_meta_free(vm_object_t object, vm_pindex_t pindex, vm_pindex_t count)
18241c7c3c6aSMatthew Dillon {
1825*f425ab8eSKonstantin Belousov 	struct swblk *sb;
1826*f425ab8eSKonstantin Belousov 	vm_pindex_t last;
1827*f425ab8eSKonstantin Belousov 	int i;
1828*f425ab8eSKonstantin Belousov 	bool empty;
18292928cef7SAlan Cox 
1830c25673ffSAttilio Rao 	VM_OBJECT_ASSERT_LOCKED(object);
18312e56b64fSKonstantin Belousov 	if (object->type != OBJT_SWAP || count == 0)
18321c7c3c6aSMatthew Dillon 		return;
18331c7c3c6aSMatthew Dillon 
1834*f425ab8eSKonstantin Belousov 	last = pindex + count - 1;
1835*f425ab8eSKonstantin Belousov 	for (;;) {
1836*f425ab8eSKonstantin Belousov 		sb = SWAP_PCTRIE_LOOKUP_GE(&object->un_pager.swp.swp_blks,
1837*f425ab8eSKonstantin Belousov 		    rounddown(pindex, SWAP_META_PAGES));
1838*f425ab8eSKonstantin Belousov 		if (sb == NULL || sb->p > last)
18392e56b64fSKonstantin Belousov 			break;
1840*f425ab8eSKonstantin Belousov 		empty = true;
1841*f425ab8eSKonstantin Belousov 		for (i = 0; i < SWAP_META_PAGES; i++) {
1842*f425ab8eSKonstantin Belousov 			if (sb->d[i] == SWAPBLK_NONE)
1843*f425ab8eSKonstantin Belousov 				continue;
1844*f425ab8eSKonstantin Belousov 			if (pindex <= sb->p + i && sb->p + i <= last) {
1845*f425ab8eSKonstantin Belousov 				swp_pager_freeswapspace(sb->d[i], 1);
1846*f425ab8eSKonstantin Belousov 				sb->d[i] = SWAPBLK_NONE;
1847*f425ab8eSKonstantin Belousov 			} else
1848*f425ab8eSKonstantin Belousov 				empty = false;
1849*f425ab8eSKonstantin Belousov 		}
1850*f425ab8eSKonstantin Belousov 		pindex = sb->p + SWAP_META_PAGES;
1851*f425ab8eSKonstantin Belousov 		if (empty) {
1852*f425ab8eSKonstantin Belousov 			SWAP_PCTRIE_REMOVE(&object->un_pager.swp.swp_blks,
1853*f425ab8eSKonstantin Belousov 			    sb->p);
1854*f425ab8eSKonstantin Belousov 			uma_zfree(swblk_zone, sb);
18551c7c3c6aSMatthew Dillon 		}
18561c7c3c6aSMatthew Dillon 	}
18571c7c3c6aSMatthew Dillon }
18581c7c3c6aSMatthew Dillon 
18591c7c3c6aSMatthew Dillon /*
18601c7c3c6aSMatthew Dillon  * SWP_PAGER_META_FREE_ALL() - destroy all swap metadata associated with object
18611c7c3c6aSMatthew Dillon  *
18621c7c3c6aSMatthew Dillon  *	This routine locates and destroys all swap metadata associated with
18631c7c3c6aSMatthew Dillon  *	an object.
18641c7c3c6aSMatthew Dillon  */
18651c7c3c6aSMatthew Dillon static void
18661c7c3c6aSMatthew Dillon swp_pager_meta_free_all(vm_object_t object)
18671c7c3c6aSMatthew Dillon {
1868*f425ab8eSKonstantin Belousov 	struct swblk *sb;
1869*f425ab8eSKonstantin Belousov 	vm_pindex_t pindex;
187071057cd2SKonstantin Belousov 	int i;
18711c7c3c6aSMatthew Dillon 
187289f6b863SAttilio Rao 	VM_OBJECT_ASSERT_WLOCKED(object);
18731c7c3c6aSMatthew Dillon 	if (object->type != OBJT_SWAP)
18741c7c3c6aSMatthew Dillon 		return;
18751c7c3c6aSMatthew Dillon 
1876*f425ab8eSKonstantin Belousov 	for (pindex = 0; (sb = SWAP_PCTRIE_LOOKUP_GE(
1877*f425ab8eSKonstantin Belousov 	    &object->un_pager.swp.swp_blks, pindex)) != NULL;) {
1878*f425ab8eSKonstantin Belousov 		pindex = sb->p + SWAP_META_PAGES;
1879*f425ab8eSKonstantin Belousov 		for (i = 0; i < SWAP_META_PAGES; i++) {
1880*f425ab8eSKonstantin Belousov 			if (sb->d[i] != SWAPBLK_NONE)
1881*f425ab8eSKonstantin Belousov 				swp_pager_freeswapspace(sb->d[i], 1);
18821c7c3c6aSMatthew Dillon 		}
1883*f425ab8eSKonstantin Belousov 		SWAP_PCTRIE_REMOVE(&object->un_pager.swp.swp_blks, sb->p);
1884*f425ab8eSKonstantin Belousov 		uma_zfree(swblk_zone, sb);
18851c7c3c6aSMatthew Dillon 	}
18861c7c3c6aSMatthew Dillon }
18871c7c3c6aSMatthew Dillon 
18881c7c3c6aSMatthew Dillon /*
18891c7c3c6aSMatthew Dillon  * SWP_PAGER_METACTL() -  misc control of swap and vm_page_t meta data.
18901c7c3c6aSMatthew Dillon  *
18911c7c3c6aSMatthew Dillon  *	This routine is capable of looking up, popping, or freeing
18921c7c3c6aSMatthew Dillon  *	swapblk assignments in the swap meta data or in the vm_page_t.
18931c7c3c6aSMatthew Dillon  *	The routine typically returns the swapblk being looked-up, or popped,
18941c7c3c6aSMatthew Dillon  *	or SWAPBLK_NONE if the block was freed, or SWAPBLK_NONE if the block
18951c7c3c6aSMatthew Dillon  *	was invalid.  This routine will automatically free any invalid
18961c7c3c6aSMatthew Dillon  *	meta-data swapblks.
18971c7c3c6aSMatthew Dillon  *
18981c7c3c6aSMatthew Dillon  *	When acting on a busy resident page and paging is in progress, we
18991c7c3c6aSMatthew Dillon  *	have to wait until paging is complete but otherwise can act on the
19001c7c3c6aSMatthew Dillon  *	busy page.
19011c7c3c6aSMatthew Dillon  *
19024dcc5c2dSMatthew Dillon  *	SWM_FREE	remove and free swap block from metadata
19031c7c3c6aSMatthew Dillon  *	SWM_POP		remove from meta data but do not free.. pop it out
19041c7c3c6aSMatthew Dillon  */
19051c7c3c6aSMatthew Dillon static daddr_t
19062f249180SPoul-Henning Kamp swp_pager_meta_ctl(vm_object_t object, vm_pindex_t pindex, int flags)
19072f249180SPoul-Henning Kamp {
1908*f425ab8eSKonstantin Belousov 	struct swblk *sb;
19094dcc5c2dSMatthew Dillon 	daddr_t r1;
1910*f425ab8eSKonstantin Belousov 	int i;
19114dcc5c2dSMatthew Dillon 
1912c25673ffSAttilio Rao 	VM_OBJECT_ASSERT_LOCKED(object);
19131c7c3c6aSMatthew Dillon 	/*
19141c7c3c6aSMatthew Dillon 	 * The meta data only exists of the object is OBJT_SWAP
19151c7c3c6aSMatthew Dillon 	 * and even then might not be allocated yet.
19161c7c3c6aSMatthew Dillon 	 */
19174dcc5c2dSMatthew Dillon 	if (object->type != OBJT_SWAP)
19181c7c3c6aSMatthew Dillon 		return (SWAPBLK_NONE);
19191c7c3c6aSMatthew Dillon 
1920*f425ab8eSKonstantin Belousov 	sb = SWAP_PCTRIE_LOOKUP(&object->un_pager.swp.swp_blks,
1921*f425ab8eSKonstantin Belousov 	    rounddown(pindex, SWAP_META_PAGES));
1922*f425ab8eSKonstantin Belousov 	if (sb == NULL)
1923*f425ab8eSKonstantin Belousov 		return (SWAPBLK_NONE);
1924*f425ab8eSKonstantin Belousov 	r1 = sb->d[pindex % SWAP_META_PAGES];
1925*f425ab8eSKonstantin Belousov 	if (r1 == SWAPBLK_NONE)
1926*f425ab8eSKonstantin Belousov 		return (SWAPBLK_NONE);
1927*f425ab8eSKonstantin Belousov 	if ((flags & (SWM_FREE | SWM_POP)) != 0) {
1928*f425ab8eSKonstantin Belousov 		sb->d[pindex % SWAP_META_PAGES] = SWAPBLK_NONE;
1929*f425ab8eSKonstantin Belousov 		for (i = 0; i < SWAP_META_PAGES; i++) {
1930*f425ab8eSKonstantin Belousov 			if (sb->d[i] != SWAPBLK_NONE)
1931*f425ab8eSKonstantin Belousov 				break;
1932*f425ab8eSKonstantin Belousov 		}
1933*f425ab8eSKonstantin Belousov 		if (i == SWAP_META_PAGES) {
1934*f425ab8eSKonstantin Belousov 			SWAP_PCTRIE_REMOVE(&object->un_pager.swp.swp_blks,
1935*f425ab8eSKonstantin Belousov 			    rounddown(pindex, SWAP_META_PAGES));
1936*f425ab8eSKonstantin Belousov 			uma_zfree(swblk_zone, sb);
1937*f425ab8eSKonstantin Belousov 		}
1938*f425ab8eSKonstantin Belousov 	}
1939*f425ab8eSKonstantin Belousov 	if ((flags & SWM_FREE) != 0) {
19404dcc5c2dSMatthew Dillon 		swp_pager_freeswapspace(r1, 1);
19411c7c3c6aSMatthew Dillon 		r1 = SWAPBLK_NONE;
19421c7c3c6aSMatthew Dillon 	}
19431c7c3c6aSMatthew Dillon 	return (r1);
19441c7c3c6aSMatthew Dillon }
19451c7c3c6aSMatthew Dillon 
1946e9c0cc15SPoul-Henning Kamp /*
194777d6fd97SKonstantin Belousov  * Returns the least page index which is greater than or equal to the
194877d6fd97SKonstantin Belousov  * parameter pindex and for which there is a swap block allocated.
194977d6fd97SKonstantin Belousov  * Returns object's size if the object's type is not swap or if there
195077d6fd97SKonstantin Belousov  * are no allocated swap blocks for the object after the requested
195177d6fd97SKonstantin Belousov  * pindex.
195277d6fd97SKonstantin Belousov  */
195377d6fd97SKonstantin Belousov vm_pindex_t
195477d6fd97SKonstantin Belousov swap_pager_find_least(vm_object_t object, vm_pindex_t pindex)
195577d6fd97SKonstantin Belousov {
1956*f425ab8eSKonstantin Belousov 	struct swblk *sb;
1957*f425ab8eSKonstantin Belousov 	int i;
195877d6fd97SKonstantin Belousov 
195977d6fd97SKonstantin Belousov 	VM_OBJECT_ASSERT_LOCKED(object);
1960*f425ab8eSKonstantin Belousov 	if (object->type != OBJT_SWAP)
196177d6fd97SKonstantin Belousov 		return (object->size);
196277d6fd97SKonstantin Belousov 
1963*f425ab8eSKonstantin Belousov 	sb = SWAP_PCTRIE_LOOKUP_GE(&object->un_pager.swp.swp_blks,
1964*f425ab8eSKonstantin Belousov 	    rounddown(pindex, SWAP_META_PAGES));
1965*f425ab8eSKonstantin Belousov 	if (sb == NULL)
1966*f425ab8eSKonstantin Belousov 		return (object->size);
1967*f425ab8eSKonstantin Belousov 	if (sb->p < pindex) {
1968*f425ab8eSKonstantin Belousov 		for (i = pindex % SWAP_META_PAGES; i < SWAP_META_PAGES; i++) {
1969*f425ab8eSKonstantin Belousov 			if (sb->d[i] != SWAPBLK_NONE)
1970*f425ab8eSKonstantin Belousov 				return (sb->p + i);
197177d6fd97SKonstantin Belousov 		}
1972*f425ab8eSKonstantin Belousov 		sb = SWAP_PCTRIE_LOOKUP_GE(&object->un_pager.swp.swp_blks,
1973*f425ab8eSKonstantin Belousov 		    roundup(pindex, SWAP_META_PAGES));
1974*f425ab8eSKonstantin Belousov 		if (sb == NULL)
1975*f425ab8eSKonstantin Belousov 			return (object->size);
197677d6fd97SKonstantin Belousov 	}
1977*f425ab8eSKonstantin Belousov 	for (i = 0; i < SWAP_META_PAGES; i++) {
1978*f425ab8eSKonstantin Belousov 		if (sb->d[i] != SWAPBLK_NONE)
1979*f425ab8eSKonstantin Belousov 			return (sb->p + i);
198077d6fd97SKonstantin Belousov 	}
1981*f425ab8eSKonstantin Belousov 
1982*f425ab8eSKonstantin Belousov 	/*
1983*f425ab8eSKonstantin Belousov 	 * We get here if a swblk is present in the trie but it
1984*f425ab8eSKonstantin Belousov 	 * doesn't map any blocks.
1985*f425ab8eSKonstantin Belousov 	 */
1986*f425ab8eSKonstantin Belousov 	MPASS(0);
1987*f425ab8eSKonstantin Belousov 	return (object->size);
198877d6fd97SKonstantin Belousov }
198977d6fd97SKonstantin Belousov 
199077d6fd97SKonstantin Belousov /*
1991e9c0cc15SPoul-Henning Kamp  * System call swapon(name) enables swapping on device name,
1992e9c0cc15SPoul-Henning Kamp  * which must be in the swdevsw.  Return EBUSY
1993e9c0cc15SPoul-Henning Kamp  * if already swapping on this device.
1994e9c0cc15SPoul-Henning Kamp  */
1995e9c0cc15SPoul-Henning Kamp #ifndef _SYS_SYSPROTO_H_
1996e9c0cc15SPoul-Henning Kamp struct swapon_args {
1997e9c0cc15SPoul-Henning Kamp 	char *name;
1998e9c0cc15SPoul-Henning Kamp };
1999e9c0cc15SPoul-Henning Kamp #endif
2000e9c0cc15SPoul-Henning Kamp 
2001e9c0cc15SPoul-Henning Kamp /*
2002e9c0cc15SPoul-Henning Kamp  * MPSAFE
2003e9c0cc15SPoul-Henning Kamp  */
2004e9c0cc15SPoul-Henning Kamp /* ARGSUSED */
2005e9c0cc15SPoul-Henning Kamp int
20068451d0ddSKip Macy sys_swapon(struct thread *td, struct swapon_args *uap)
2007e9c0cc15SPoul-Henning Kamp {
2008e9c0cc15SPoul-Henning Kamp 	struct vattr attr;
2009e9c0cc15SPoul-Henning Kamp 	struct vnode *vp;
2010e9c0cc15SPoul-Henning Kamp 	struct nameidata nd;
2011e9c0cc15SPoul-Henning Kamp 	int error;
2012e9c0cc15SPoul-Henning Kamp 
2013acd3428bSRobert Watson 	error = priv_check(td, PRIV_SWAPON);
2014e9c0cc15SPoul-Henning Kamp 	if (error)
2015acd3428bSRobert Watson 		return (error);
2016e9c0cc15SPoul-Henning Kamp 
201704533e1eSKonstantin Belousov 	sx_xlock(&swdev_syscall_lock);
2018e9c0cc15SPoul-Henning Kamp 
2019e9c0cc15SPoul-Henning Kamp 	/*
2020e9c0cc15SPoul-Henning Kamp 	 * Swap metadata may not fit in the KVM if we have physical
2021e9c0cc15SPoul-Henning Kamp 	 * memory of >1GB.
2022e9c0cc15SPoul-Henning Kamp 	 */
2023*f425ab8eSKonstantin Belousov 	if (swblk_zone == NULL) {
2024e9c0cc15SPoul-Henning Kamp 		error = ENOMEM;
2025e9c0cc15SPoul-Henning Kamp 		goto done;
2026e9c0cc15SPoul-Henning Kamp 	}
2027e9c0cc15SPoul-Henning Kamp 
2028d9135e72SRobert Watson 	NDINIT(&nd, LOOKUP, ISOPEN | FOLLOW | AUDITVNODE1, UIO_USERSPACE,
2029d9135e72SRobert Watson 	    uap->name, td);
2030e9c0cc15SPoul-Henning Kamp 	error = namei(&nd);
2031e9c0cc15SPoul-Henning Kamp 	if (error)
2032e9c0cc15SPoul-Henning Kamp 		goto done;
2033e9c0cc15SPoul-Henning Kamp 
2034e9c0cc15SPoul-Henning Kamp 	NDFREE(&nd, NDF_ONLY_PNBUF);
2035e9c0cc15SPoul-Henning Kamp 	vp = nd.ni_vp;
2036e9c0cc15SPoul-Henning Kamp 
203720da9c2eSPoul-Henning Kamp 	if (vn_isdisk(vp, &error)) {
203888ad2d7bSKonstantin Belousov 		error = swapongeom(vp);
203920da9c2eSPoul-Henning Kamp 	} else if (vp->v_type == VREG &&
2040e9c0cc15SPoul-Henning Kamp 	    (vp->v_mount->mnt_vfc->vfc_flags & VFCF_NETWORK) != 0 &&
20410359a12eSAttilio Rao 	    (error = VOP_GETATTR(vp, &attr, td->td_ucred)) == 0) {
2042e9c0cc15SPoul-Henning Kamp 		/*
2043e9c0cc15SPoul-Henning Kamp 		 * Allow direct swapping to NFS regular files in the same
2044e9c0cc15SPoul-Henning Kamp 		 * way that nfs_mountroot() sets up diskless swapping.
2045e9c0cc15SPoul-Henning Kamp 		 */
204659efee01SPoul-Henning Kamp 		error = swaponvp(td, vp, attr.va_size / DEV_BSIZE);
2047e9c0cc15SPoul-Henning Kamp 	}
2048e9c0cc15SPoul-Henning Kamp 
2049e9c0cc15SPoul-Henning Kamp 	if (error)
2050e9c0cc15SPoul-Henning Kamp 		vrele(vp);
2051e9c0cc15SPoul-Henning Kamp done:
205204533e1eSKonstantin Belousov 	sx_xunlock(&swdev_syscall_lock);
2053e9c0cc15SPoul-Henning Kamp 	return (error);
2054e9c0cc15SPoul-Henning Kamp }
2055e9c0cc15SPoul-Henning Kamp 
20563ff863f1SDag-Erling Smørgrav /*
20573ff863f1SDag-Erling Smørgrav  * Check that the total amount of swap currently configured does not
20583ff863f1SDag-Erling Smørgrav  * exceed half the theoretical maximum.  If it does, print a warning
20593ff863f1SDag-Erling Smørgrav  * message and return -1; otherwise, return 0.
20603ff863f1SDag-Erling Smørgrav  */
20613ff863f1SDag-Erling Smørgrav static int
20623ff863f1SDag-Erling Smørgrav swapon_check_swzone(unsigned long npages)
20633ff863f1SDag-Erling Smørgrav {
20643ff863f1SDag-Erling Smørgrav 	unsigned long maxpages;
20653ff863f1SDag-Erling Smørgrav 
20663ff863f1SDag-Erling Smørgrav 	/* absolute maximum we can handle assuming 100% efficiency */
2067*f425ab8eSKonstantin Belousov 	maxpages = uma_zone_get_max(swblk_zone) * SWAP_META_PAGES;
20683ff863f1SDag-Erling Smørgrav 
20693ff863f1SDag-Erling Smørgrav 	/* recommend using no more than half that amount */
20703ff863f1SDag-Erling Smørgrav 	if (npages > maxpages / 2) {
20713ff863f1SDag-Erling Smørgrav 		printf("warning: total configured swap (%lu pages) "
20723ff863f1SDag-Erling Smørgrav 		    "exceeds maximum recommended amount (%lu pages).\n",
20739462305cSSergey Kandaurov 		    npages, maxpages / 2);
20743ff863f1SDag-Erling Smørgrav 		printf("warning: increase kern.maxswzone "
20753ff863f1SDag-Erling Smørgrav 		    "or reduce amount of swap.\n");
20763ff863f1SDag-Erling Smørgrav 		return (-1);
20773ff863f1SDag-Erling Smørgrav 	}
20783ff863f1SDag-Erling Smørgrav 	return (0);
20793ff863f1SDag-Erling Smørgrav }
20803ff863f1SDag-Erling Smørgrav 
208159efee01SPoul-Henning Kamp static void
20822cc718a1SKonstantin Belousov swaponsomething(struct vnode *vp, void *id, u_long nblks,
20832cc718a1SKonstantin Belousov     sw_strategy_t *strategy, sw_close_t *close, dev_t dev, int flags)
2084e9c0cc15SPoul-Henning Kamp {
20852d9974c1SAlan Cox 	struct swdevt *sp, *tsp;
2086e9c0cc15SPoul-Henning Kamp 	swblk_t dvbase;
20878f60c087SPoul-Henning Kamp 	u_long mblocks;
2088e9c0cc15SPoul-Henning Kamp 
2089e9c0cc15SPoul-Henning Kamp 	/*
2090e9c0cc15SPoul-Henning Kamp 	 * nblks is in DEV_BSIZE'd chunks, convert to PAGE_SIZE'd chunks.
2091e9c0cc15SPoul-Henning Kamp 	 * First chop nblks off to page-align it, then convert.
2092e9c0cc15SPoul-Henning Kamp 	 *
2093e9c0cc15SPoul-Henning Kamp 	 * sw->sw_nblks is in page-sized chunks now too.
2094e9c0cc15SPoul-Henning Kamp 	 */
2095e9c0cc15SPoul-Henning Kamp 	nblks &= ~(ctodb(1) - 1);
2096e9c0cc15SPoul-Henning Kamp 	nblks = dbtoc(nblks);
2097e9c0cc15SPoul-Henning Kamp 
20986e903bd0SKonstantin Belousov 	/*
20996e903bd0SKonstantin Belousov 	 * If we go beyond this, we get overflows in the radix
21006e903bd0SKonstantin Belousov 	 * tree bitmap code.
21016e903bd0SKonstantin Belousov 	 */
21026e903bd0SKonstantin Belousov 	mblocks = 0x40000000 / BLIST_META_RADIX;
21036e903bd0SKonstantin Belousov 	if (nblks > mblocks) {
21046e903bd0SKonstantin Belousov 		printf(
21056e903bd0SKonstantin Belousov     "WARNING: reducing swap size to maximum of %luMB per unit\n",
21066e903bd0SKonstantin Belousov 		    mblocks / 1024 / 1024 * PAGE_SIZE);
21076e903bd0SKonstantin Belousov 		nblks = mblocks;
21086e903bd0SKonstantin Belousov 	}
21096e903bd0SKonstantin Belousov 
21108f60c087SPoul-Henning Kamp 	sp = malloc(sizeof *sp, M_VMPGDATA, M_WAITOK | M_ZERO);
2111dee34ca4SPoul-Henning Kamp 	sp->sw_vp = vp;
2112dee34ca4SPoul-Henning Kamp 	sp->sw_id = id;
2113f3732fd1SPoul-Henning Kamp 	sp->sw_dev = dev;
21148d677ef9SPoul-Henning Kamp 	sp->sw_flags = 0;
2115e9c0cc15SPoul-Henning Kamp 	sp->sw_nblks = nblks;
2116e9c0cc15SPoul-Henning Kamp 	sp->sw_used = 0;
211759efee01SPoul-Henning Kamp 	sp->sw_strategy = strategy;
2118dee34ca4SPoul-Henning Kamp 	sp->sw_close = close;
21192cc718a1SKonstantin Belousov 	sp->sw_flags = flags;
2120e9c0cc15SPoul-Henning Kamp 
2121c8c7ad92SKip Macy 	sp->sw_blist = blist_create(nblks, M_WAITOK);
2122e9c0cc15SPoul-Henning Kamp 	/*
2123ef3c5abdSPoul-Henning Kamp 	 * Do not free the first two block in order to avoid overwriting
21248f60c087SPoul-Henning Kamp 	 * any bsd label at the front of the partition
2125e9c0cc15SPoul-Henning Kamp 	 */
2126ef3c5abdSPoul-Henning Kamp 	blist_free(sp->sw_blist, 2, nblks - 2);
2127e9c0cc15SPoul-Henning Kamp 
21282d9974c1SAlan Cox 	dvbase = 0;
212920da9c2eSPoul-Henning Kamp 	mtx_lock(&sw_dev_mtx);
21302d9974c1SAlan Cox 	TAILQ_FOREACH(tsp, &swtailq, sw_list) {
21312d9974c1SAlan Cox 		if (tsp->sw_end >= dvbase) {
21322d9974c1SAlan Cox 			/*
21332d9974c1SAlan Cox 			 * We put one uncovered page between the devices
21342d9974c1SAlan Cox 			 * in order to definitively prevent any cross-device
21352d9974c1SAlan Cox 			 * I/O requests
21362d9974c1SAlan Cox 			 */
21372d9974c1SAlan Cox 			dvbase = tsp->sw_end + 1;
21382d9974c1SAlan Cox 		}
21392d9974c1SAlan Cox 	}
21402d9974c1SAlan Cox 	sp->sw_first = dvbase;
21412d9974c1SAlan Cox 	sp->sw_end = dvbase + nblks;
21428f60c087SPoul-Henning Kamp 	TAILQ_INSERT_TAIL(&swtailq, sp, sw_list);
21438f60c087SPoul-Henning Kamp 	nswapdev++;
2144761097c8SAlan Cox 	swap_pager_avail += nblks - 2;
21453364c323SKonstantin Belousov 	swap_total += (vm_ooffset_t)nblks * PAGE_SIZE;
21463ff863f1SDag-Erling Smørgrav 	swapon_check_swzone(swap_total / PAGE_SIZE);
2147d05bc129SAlan Cox 	swp_sizecheck();
2148d05bc129SAlan Cox 	mtx_unlock(&sw_dev_mtx);
2149b1fd102eSMark Johnston 	EVENTHANDLER_INVOKE(swapon, sp);
215059efee01SPoul-Henning Kamp }
2151e9c0cc15SPoul-Henning Kamp 
2152e9c0cc15SPoul-Henning Kamp /*
2153e9c0cc15SPoul-Henning Kamp  * SYSCALL: swapoff(devname)
2154e9c0cc15SPoul-Henning Kamp  *
2155e9c0cc15SPoul-Henning Kamp  * Disable swapping on the given device.
2156dee34ca4SPoul-Henning Kamp  *
2157dee34ca4SPoul-Henning Kamp  * XXX: Badly designed system call: it should use a device index
2158dee34ca4SPoul-Henning Kamp  * rather than filename as specification.  We keep sw_vp around
2159dee34ca4SPoul-Henning Kamp  * only to make this work.
2160e9c0cc15SPoul-Henning Kamp  */
2161e9c0cc15SPoul-Henning Kamp #ifndef _SYS_SYSPROTO_H_
2162e9c0cc15SPoul-Henning Kamp struct swapoff_args {
2163e9c0cc15SPoul-Henning Kamp 	char *name;
2164e9c0cc15SPoul-Henning Kamp };
2165e9c0cc15SPoul-Henning Kamp #endif
2166e9c0cc15SPoul-Henning Kamp 
2167e9c0cc15SPoul-Henning Kamp /*
2168e9c0cc15SPoul-Henning Kamp  * MPSAFE
2169e9c0cc15SPoul-Henning Kamp  */
2170e9c0cc15SPoul-Henning Kamp /* ARGSUSED */
2171e9c0cc15SPoul-Henning Kamp int
21728451d0ddSKip Macy sys_swapoff(struct thread *td, struct swapoff_args *uap)
2173e9c0cc15SPoul-Henning Kamp {
2174e9c0cc15SPoul-Henning Kamp 	struct vnode *vp;
2175e9c0cc15SPoul-Henning Kamp 	struct nameidata nd;
2176e9c0cc15SPoul-Henning Kamp 	struct swdevt *sp;
21778f60c087SPoul-Henning Kamp 	int error;
2178e9c0cc15SPoul-Henning Kamp 
2179acd3428bSRobert Watson 	error = priv_check(td, PRIV_SWAPOFF);
2180e9c0cc15SPoul-Henning Kamp 	if (error)
21810909f38aSPawel Jakub Dawidek 		return (error);
2182e9c0cc15SPoul-Henning Kamp 
218304533e1eSKonstantin Belousov 	sx_xlock(&swdev_syscall_lock);
2184e9c0cc15SPoul-Henning Kamp 
2185d9135e72SRobert Watson 	NDINIT(&nd, LOOKUP, FOLLOW | AUDITVNODE1, UIO_USERSPACE, uap->name,
2186d9135e72SRobert Watson 	    td);
2187e9c0cc15SPoul-Henning Kamp 	error = namei(&nd);
2188e9c0cc15SPoul-Henning Kamp 	if (error)
2189e9c0cc15SPoul-Henning Kamp 		goto done;
2190e9c0cc15SPoul-Henning Kamp 	NDFREE(&nd, NDF_ONLY_PNBUF);
2191e9c0cc15SPoul-Henning Kamp 	vp = nd.ni_vp;
2192e9c0cc15SPoul-Henning Kamp 
219320da9c2eSPoul-Henning Kamp 	mtx_lock(&sw_dev_mtx);
21948f60c087SPoul-Henning Kamp 	TAILQ_FOREACH(sp, &swtailq, sw_list) {
2195dee34ca4SPoul-Henning Kamp 		if (sp->sw_vp == vp)
21960909f38aSPawel Jakub Dawidek 			break;
2197e9c0cc15SPoul-Henning Kamp 	}
219820da9c2eSPoul-Henning Kamp 	mtx_unlock(&sw_dev_mtx);
21990909f38aSPawel Jakub Dawidek 	if (sp == NULL) {
2200e9c0cc15SPoul-Henning Kamp 		error = EINVAL;
2201e9c0cc15SPoul-Henning Kamp 		goto done;
22020909f38aSPawel Jakub Dawidek 	}
220335918c55SChristian S.J. Peron 	error = swapoff_one(sp, td->td_ucred);
22040909f38aSPawel Jakub Dawidek done:
220504533e1eSKonstantin Belousov 	sx_xunlock(&swdev_syscall_lock);
22060909f38aSPawel Jakub Dawidek 	return (error);
22070909f38aSPawel Jakub Dawidek }
22080909f38aSPawel Jakub Dawidek 
22090909f38aSPawel Jakub Dawidek static int
221035918c55SChristian S.J. Peron swapoff_one(struct swdevt *sp, struct ucred *cred)
22110909f38aSPawel Jakub Dawidek {
221203bdd65fSAlan Cox 	u_long nblks;
2213e9c0cc15SPoul-Henning Kamp #ifdef MAC
22140909f38aSPawel Jakub Dawidek 	int error;
2215e9c0cc15SPoul-Henning Kamp #endif
2216e9c0cc15SPoul-Henning Kamp 
221704533e1eSKonstantin Belousov 	sx_assert(&swdev_syscall_lock, SA_XLOCKED);
22180909f38aSPawel Jakub Dawidek #ifdef MAC
2219cb05b60aSAttilio Rao 	(void) vn_lock(sp->sw_vp, LK_EXCLUSIVE | LK_RETRY);
222035918c55SChristian S.J. Peron 	error = mac_system_check_swapoff(cred, sp->sw_vp);
222122db15c0SAttilio Rao 	(void) VOP_UNLOCK(sp->sw_vp, 0);
22220909f38aSPawel Jakub Dawidek 	if (error != 0)
22230909f38aSPawel Jakub Dawidek 		return (error);
22240909f38aSPawel Jakub Dawidek #endif
2225e9c0cc15SPoul-Henning Kamp 	nblks = sp->sw_nblks;
2226e9c0cc15SPoul-Henning Kamp 
2227e9c0cc15SPoul-Henning Kamp 	/*
2228e9c0cc15SPoul-Henning Kamp 	 * We can turn off this swap device safely only if the
2229e9c0cc15SPoul-Henning Kamp 	 * available virtual memory in the system will fit the amount
2230e9c0cc15SPoul-Henning Kamp 	 * of data we will have to page back in, plus an epsilon so
2231e9c0cc15SPoul-Henning Kamp 	 * the system doesn't become critically low on swap space.
2232e9c0cc15SPoul-Henning Kamp 	 */
2233bba39b9aSAlan Cox 	if (vm_cnt.v_free_count + swap_pager_avail < nblks + nswap_lowat)
22340909f38aSPawel Jakub Dawidek 		return (ENOMEM);
2235e9c0cc15SPoul-Henning Kamp 
2236e9c0cc15SPoul-Henning Kamp 	/*
2237e9c0cc15SPoul-Henning Kamp 	 * Prevent further allocations on this device.
2238e9c0cc15SPoul-Henning Kamp 	 */
22392928cef7SAlan Cox 	mtx_lock(&sw_dev_mtx);
2240e9c0cc15SPoul-Henning Kamp 	sp->sw_flags |= SW_CLOSING;
224103bdd65fSAlan Cox 	swap_pager_avail -= blist_fill(sp->sw_blist, 0, nblks);
22423364c323SKonstantin Belousov 	swap_total -= (vm_ooffset_t)nblks * PAGE_SIZE;
22432928cef7SAlan Cox 	mtx_unlock(&sw_dev_mtx);
2244e9c0cc15SPoul-Henning Kamp 
2245e9c0cc15SPoul-Henning Kamp 	/*
2246e9c0cc15SPoul-Henning Kamp 	 * Page in the contents of the device and close it.
2247e9c0cc15SPoul-Henning Kamp 	 */
2248b3fed13eSDavid Schultz 	swap_pager_swapoff(sp);
2249e9c0cc15SPoul-Henning Kamp 
225035918c55SChristian S.J. Peron 	sp->sw_close(curthread, sp);
225120da9c2eSPoul-Henning Kamp 	mtx_lock(&sw_dev_mtx);
22529e3e3fe5SWarner Losh 	sp->sw_id = NULL;
22538f60c087SPoul-Henning Kamp 	TAILQ_REMOVE(&swtailq, sp, sw_list);
22540676a140SAlan Cox 	nswapdev--;
22557dea2c2eSAlan Cox 	if (nswapdev == 0) {
22567dea2c2eSAlan Cox 		swap_pager_full = 2;
22577dea2c2eSAlan Cox 		swap_pager_almost_full = 1;
22587dea2c2eSAlan Cox 	}
22598f60c087SPoul-Henning Kamp 	if (swdevhd == sp)
22608f60c087SPoul-Henning Kamp 		swdevhd = NULL;
2261d05bc129SAlan Cox 	mtx_unlock(&sw_dev_mtx);
22628f60c087SPoul-Henning Kamp 	blist_destroy(sp->sw_blist);
22638f60c087SPoul-Henning Kamp 	free(sp, M_VMPGDATA);
22640909f38aSPawel Jakub Dawidek 	return (0);
22650909f38aSPawel Jakub Dawidek }
2266e9c0cc15SPoul-Henning Kamp 
22670909f38aSPawel Jakub Dawidek void
22680909f38aSPawel Jakub Dawidek swapoff_all(void)
22690909f38aSPawel Jakub Dawidek {
22700909f38aSPawel Jakub Dawidek 	struct swdevt *sp, *spt;
22710909f38aSPawel Jakub Dawidek 	const char *devname;
22720909f38aSPawel Jakub Dawidek 	int error;
22730909f38aSPawel Jakub Dawidek 
227404533e1eSKonstantin Belousov 	sx_xlock(&swdev_syscall_lock);
22750909f38aSPawel Jakub Dawidek 
22760909f38aSPawel Jakub Dawidek 	mtx_lock(&sw_dev_mtx);
22770909f38aSPawel Jakub Dawidek 	TAILQ_FOREACH_SAFE(sp, &swtailq, sw_list, spt) {
22780909f38aSPawel Jakub Dawidek 		mtx_unlock(&sw_dev_mtx);
22790909f38aSPawel Jakub Dawidek 		if (vn_isdisk(sp->sw_vp, NULL))
22807870adb6SEd Schouten 			devname = devtoname(sp->sw_vp->v_rdev);
22810909f38aSPawel Jakub Dawidek 		else
22820909f38aSPawel Jakub Dawidek 			devname = "[file]";
228335918c55SChristian S.J. Peron 		error = swapoff_one(sp, thread0.td_ucred);
22840909f38aSPawel Jakub Dawidek 		if (error != 0) {
22850909f38aSPawel Jakub Dawidek 			printf("Cannot remove swap device %s (error=%d), "
22860909f38aSPawel Jakub Dawidek 			    "skipping.\n", devname, error);
22870909f38aSPawel Jakub Dawidek 		} else if (bootverbose) {
22880909f38aSPawel Jakub Dawidek 			printf("Swap device %s removed.\n", devname);
22890909f38aSPawel Jakub Dawidek 		}
22900909f38aSPawel Jakub Dawidek 		mtx_lock(&sw_dev_mtx);
22910909f38aSPawel Jakub Dawidek 	}
22920909f38aSPawel Jakub Dawidek 	mtx_unlock(&sw_dev_mtx);
22930909f38aSPawel Jakub Dawidek 
229404533e1eSKonstantin Belousov 	sx_xunlock(&swdev_syscall_lock);
2295e9c0cc15SPoul-Henning Kamp }
2296e9c0cc15SPoul-Henning Kamp 
2297567104a1SPoul-Henning Kamp void
2298567104a1SPoul-Henning Kamp swap_pager_status(int *total, int *used)
2299567104a1SPoul-Henning Kamp {
2300567104a1SPoul-Henning Kamp 	struct swdevt *sp;
2301567104a1SPoul-Henning Kamp 
2302567104a1SPoul-Henning Kamp 	*total = 0;
2303567104a1SPoul-Henning Kamp 	*used = 0;
230420da9c2eSPoul-Henning Kamp 	mtx_lock(&sw_dev_mtx);
23058f60c087SPoul-Henning Kamp 	TAILQ_FOREACH(sp, &swtailq, sw_list) {
2306567104a1SPoul-Henning Kamp 		*total += sp->sw_nblks;
2307567104a1SPoul-Henning Kamp 		*used += sp->sw_used;
2308567104a1SPoul-Henning Kamp 	}
230920da9c2eSPoul-Henning Kamp 	mtx_unlock(&sw_dev_mtx);
2310567104a1SPoul-Henning Kamp }
2311567104a1SPoul-Henning Kamp 
2312dda4f960SKonstantin Belousov int
2313dda4f960SKonstantin Belousov swap_dev_info(int name, struct xswdev *xs, char *devname, size_t len)
2314dda4f960SKonstantin Belousov {
2315dda4f960SKonstantin Belousov 	struct swdevt *sp;
23167870adb6SEd Schouten 	const char *tmp_devname;
2317dda4f960SKonstantin Belousov 	int error, n;
2318dda4f960SKonstantin Belousov 
2319dda4f960SKonstantin Belousov 	n = 0;
2320dda4f960SKonstantin Belousov 	error = ENOENT;
2321dda4f960SKonstantin Belousov 	mtx_lock(&sw_dev_mtx);
2322dda4f960SKonstantin Belousov 	TAILQ_FOREACH(sp, &swtailq, sw_list) {
2323dda4f960SKonstantin Belousov 		if (n != name) {
2324dda4f960SKonstantin Belousov 			n++;
2325dda4f960SKonstantin Belousov 			continue;
2326dda4f960SKonstantin Belousov 		}
2327dda4f960SKonstantin Belousov 		xs->xsw_version = XSWDEV_VERSION;
2328dda4f960SKonstantin Belousov 		xs->xsw_dev = sp->sw_dev;
2329dda4f960SKonstantin Belousov 		xs->xsw_flags = sp->sw_flags;
2330dda4f960SKonstantin Belousov 		xs->xsw_nblks = sp->sw_nblks;
2331dda4f960SKonstantin Belousov 		xs->xsw_used = sp->sw_used;
2332dda4f960SKonstantin Belousov 		if (devname != NULL) {
2333dda4f960SKonstantin Belousov 			if (vn_isdisk(sp->sw_vp, NULL))
23347870adb6SEd Schouten 				tmp_devname = devtoname(sp->sw_vp->v_rdev);
2335dda4f960SKonstantin Belousov 			else
2336dda4f960SKonstantin Belousov 				tmp_devname = "[file]";
2337dda4f960SKonstantin Belousov 			strncpy(devname, tmp_devname, len);
2338dda4f960SKonstantin Belousov 		}
2339dda4f960SKonstantin Belousov 		error = 0;
2340dda4f960SKonstantin Belousov 		break;
2341dda4f960SKonstantin Belousov 	}
2342dda4f960SKonstantin Belousov 	mtx_unlock(&sw_dev_mtx);
2343dda4f960SKonstantin Belousov 	return (error);
2344dda4f960SKonstantin Belousov }
2345dda4f960SKonstantin Belousov 
234669921123SKonstantin Belousov #if defined(COMPAT_FREEBSD11)
234769921123SKonstantin Belousov #define XSWDEV_VERSION_11	1
234869921123SKonstantin Belousov struct xswdev11 {
234969921123SKonstantin Belousov 	u_int	xsw_version;
235069921123SKonstantin Belousov 	uint32_t xsw_dev;
235169921123SKonstantin Belousov 	int	xsw_flags;
235269921123SKonstantin Belousov 	int	xsw_nblks;
235369921123SKonstantin Belousov 	int     xsw_used;
235469921123SKonstantin Belousov };
235569921123SKonstantin Belousov #endif
235669921123SKonstantin Belousov 
2357e9c0cc15SPoul-Henning Kamp static int
2358e9c0cc15SPoul-Henning Kamp sysctl_vm_swap_info(SYSCTL_HANDLER_ARGS)
2359e9c0cc15SPoul-Henning Kamp {
2360e9c0cc15SPoul-Henning Kamp 	struct xswdev xs;
236169921123SKonstantin Belousov #if defined(COMPAT_FREEBSD11)
236269921123SKonstantin Belousov 	struct xswdev11 xs11;
236369921123SKonstantin Belousov #endif
2364dda4f960SKonstantin Belousov 	int error;
2365e9c0cc15SPoul-Henning Kamp 
2366e9c0cc15SPoul-Henning Kamp 	if (arg2 != 1)			/* name length */
2367e9c0cc15SPoul-Henning Kamp 		return (EINVAL);
2368dda4f960SKonstantin Belousov 	error = swap_dev_info(*(int *)arg1, &xs, NULL, 0);
2369dda4f960SKonstantin Belousov 	if (error != 0)
2370dda4f960SKonstantin Belousov 		return (error);
237169921123SKonstantin Belousov #if defined(COMPAT_FREEBSD11)
237269921123SKonstantin Belousov 	if (req->oldlen == sizeof(xs11)) {
237369921123SKonstantin Belousov 		xs11.xsw_version = XSWDEV_VERSION_11;
237469921123SKonstantin Belousov 		xs11.xsw_dev = xs.xsw_dev; /* truncation */
237569921123SKonstantin Belousov 		xs11.xsw_flags = xs.xsw_flags;
237669921123SKonstantin Belousov 		xs11.xsw_nblks = xs.xsw_nblks;
237769921123SKonstantin Belousov 		xs11.xsw_used = xs.xsw_used;
237869921123SKonstantin Belousov 		error = SYSCTL_OUT(req, &xs11, sizeof(xs11));
237969921123SKonstantin Belousov 	} else
238069921123SKonstantin Belousov #endif
2381e9c0cc15SPoul-Henning Kamp 		error = SYSCTL_OUT(req, &xs, sizeof(xs));
2382e9c0cc15SPoul-Henning Kamp 	return (error);
2383e9c0cc15SPoul-Henning Kamp }
2384e9c0cc15SPoul-Henning Kamp 
23858f60c087SPoul-Henning Kamp SYSCTL_INT(_vm, OID_AUTO, nswapdev, CTLFLAG_RD, &nswapdev, 0,
2386e9c0cc15SPoul-Henning Kamp     "Number of swap devices");
23874c36e917SKonstantin Belousov SYSCTL_NODE(_vm, OID_AUTO, swap_info, CTLFLAG_RD | CTLFLAG_MPSAFE,
23884c36e917SKonstantin Belousov     sysctl_vm_swap_info,
2389e9c0cc15SPoul-Henning Kamp     "Swap statistics by device");
2390ec38b344SPoul-Henning Kamp 
2391ec38b344SPoul-Henning Kamp /*
2392*f425ab8eSKonstantin Belousov  * Count the approximate swap usage in pages for a vmspace.  The
2393*f425ab8eSKonstantin Belousov  * shadowed or not yet copied on write swap blocks are not accounted.
2394ec38b344SPoul-Henning Kamp  * The map must be locked.
2395ec38b344SPoul-Henning Kamp  */
23962860553aSRebecca Cran long
2397ec38b344SPoul-Henning Kamp vmspace_swap_count(struct vmspace *vmspace)
2398ec38b344SPoul-Henning Kamp {
239965d8409cSRebecca Cran 	vm_map_t map;
2400ec38b344SPoul-Henning Kamp 	vm_map_entry_t cur;
240165d8409cSRebecca Cran 	vm_object_t object;
2402*f425ab8eSKonstantin Belousov 	struct swblk *sb;
2403*f425ab8eSKonstantin Belousov 	vm_pindex_t e, pi;
2404*f425ab8eSKonstantin Belousov 	long count;
2405*f425ab8eSKonstantin Belousov 	int i;
240665d8409cSRebecca Cran 
240765d8409cSRebecca Cran 	map = &vmspace->vm_map;
240865d8409cSRebecca Cran 	count = 0;
2409ec38b344SPoul-Henning Kamp 
2410ec38b344SPoul-Henning Kamp 	for (cur = map->header.next; cur != &map->header; cur = cur->next) {
2411*f425ab8eSKonstantin Belousov 		if ((cur->eflags & MAP_ENTRY_IS_SUB_MAP) != 0)
2412*f425ab8eSKonstantin Belousov 			continue;
2413*f425ab8eSKonstantin Belousov 		object = cur->object.vm_object;
2414*f425ab8eSKonstantin Belousov 		if (object == NULL || object->type != OBJT_SWAP)
2415*f425ab8eSKonstantin Belousov 			continue;
2416*f425ab8eSKonstantin Belousov 		VM_OBJECT_RLOCK(object);
2417*f425ab8eSKonstantin Belousov 		if (object->type != OBJT_SWAP)
2418*f425ab8eSKonstantin Belousov 			goto unlock;
2419*f425ab8eSKonstantin Belousov 		pi = OFF_TO_IDX(cur->offset);
2420*f425ab8eSKonstantin Belousov 		e = pi + OFF_TO_IDX(cur->end - cur->start);
2421*f425ab8eSKonstantin Belousov 		for (;; pi = sb->p + SWAP_META_PAGES) {
2422*f425ab8eSKonstantin Belousov 			sb = SWAP_PCTRIE_LOOKUP_GE(
2423*f425ab8eSKonstantin Belousov 			    &object->un_pager.swp.swp_blks, pi);
2424*f425ab8eSKonstantin Belousov 			if (sb == NULL || sb->p >= e)
2425*f425ab8eSKonstantin Belousov 				break;
2426*f425ab8eSKonstantin Belousov 			for (i = 0; i < SWAP_META_PAGES; i++) {
2427*f425ab8eSKonstantin Belousov 				if (sb->p + i < e &&
2428*f425ab8eSKonstantin Belousov 				    sb->d[i] != SWAPBLK_NONE)
2429*f425ab8eSKonstantin Belousov 					count++;
2430ec38b344SPoul-Henning Kamp 			}
2431ec38b344SPoul-Henning Kamp 		}
2432*f425ab8eSKonstantin Belousov unlock:
2433*f425ab8eSKonstantin Belousov 		VM_OBJECT_RUNLOCK(object);
2434ec38b344SPoul-Henning Kamp 	}
2435ec38b344SPoul-Henning Kamp 	return (count);
2436ec38b344SPoul-Henning Kamp }
2437dee34ca4SPoul-Henning Kamp 
2438dee34ca4SPoul-Henning Kamp /*
2439dee34ca4SPoul-Henning Kamp  * GEOM backend
2440dee34ca4SPoul-Henning Kamp  *
2441dee34ca4SPoul-Henning Kamp  * Swapping onto disk devices.
2442dee34ca4SPoul-Henning Kamp  *
2443dee34ca4SPoul-Henning Kamp  */
2444dee34ca4SPoul-Henning Kamp 
24455721c9c7SPoul-Henning Kamp static g_orphan_t swapgeom_orphan;
24465721c9c7SPoul-Henning Kamp 
2447dee34ca4SPoul-Henning Kamp static struct g_class g_swap_class = {
2448dee34ca4SPoul-Henning Kamp 	.name = "SWAP",
24495721c9c7SPoul-Henning Kamp 	.version = G_VERSION,
24505721c9c7SPoul-Henning Kamp 	.orphan = swapgeom_orphan,
2451dee34ca4SPoul-Henning Kamp };
2452dee34ca4SPoul-Henning Kamp 
2453dee34ca4SPoul-Henning Kamp DECLARE_GEOM_CLASS(g_swap_class, g_class);
2454dee34ca4SPoul-Henning Kamp 
2455dee34ca4SPoul-Henning Kamp 
2456dee34ca4SPoul-Henning Kamp static void
24573398491bSAlexander Motin swapgeom_close_ev(void *arg, int flags)
24583398491bSAlexander Motin {
24593398491bSAlexander Motin 	struct g_consumer *cp;
24603398491bSAlexander Motin 
24613398491bSAlexander Motin 	cp = arg;
24623398491bSAlexander Motin 	g_access(cp, -1, -1, 0);
24633398491bSAlexander Motin 	g_detach(cp);
24643398491bSAlexander Motin 	g_destroy_consumer(cp);
24653398491bSAlexander Motin }
24663398491bSAlexander Motin 
24679e3e3fe5SWarner Losh /*
24689e3e3fe5SWarner Losh  * Add a reference to the g_consumer for an inflight transaction.
24699e3e3fe5SWarner Losh  */
24709e3e3fe5SWarner Losh static void
24719e3e3fe5SWarner Losh swapgeom_acquire(struct g_consumer *cp)
24729e3e3fe5SWarner Losh {
24739e3e3fe5SWarner Losh 
24749e3e3fe5SWarner Losh 	mtx_assert(&sw_dev_mtx, MA_OWNED);
24759e3e3fe5SWarner Losh 	cp->index++;
24769e3e3fe5SWarner Losh }
24779e3e3fe5SWarner Losh 
24789e3e3fe5SWarner Losh /*
24790c657d22SKonstantin Belousov  * Remove a reference from the g_consumer.  Post a close event if all
24800c657d22SKonstantin Belousov  * references go away, since the function might be called from the
24810c657d22SKonstantin Belousov  * biodone context.
24829e3e3fe5SWarner Losh  */
24839e3e3fe5SWarner Losh static void
24849e3e3fe5SWarner Losh swapgeom_release(struct g_consumer *cp, struct swdevt *sp)
24859e3e3fe5SWarner Losh {
24869e3e3fe5SWarner Losh 
24879e3e3fe5SWarner Losh 	mtx_assert(&sw_dev_mtx, MA_OWNED);
24889e3e3fe5SWarner Losh 	cp->index--;
24899e3e3fe5SWarner Losh 	if (cp->index == 0) {
24909e3e3fe5SWarner Losh 		if (g_post_event(swapgeom_close_ev, cp, M_NOWAIT, NULL) == 0)
24919e3e3fe5SWarner Losh 			sp->sw_id = NULL;
24929e3e3fe5SWarner Losh 	}
24939e3e3fe5SWarner Losh }
24949e3e3fe5SWarner Losh 
24953398491bSAlexander Motin static void
2496dee34ca4SPoul-Henning Kamp swapgeom_done(struct bio *bp2)
2497dee34ca4SPoul-Henning Kamp {
24983398491bSAlexander Motin 	struct swdevt *sp;
2499dee34ca4SPoul-Henning Kamp 	struct buf *bp;
25003398491bSAlexander Motin 	struct g_consumer *cp;
2501dee34ca4SPoul-Henning Kamp 
2502dee34ca4SPoul-Henning Kamp 	bp = bp2->bio_caller2;
25033398491bSAlexander Motin 	cp = bp2->bio_from;
2504c5d3d25eSPoul-Henning Kamp 	bp->b_ioflags = bp2->bio_flags;
2505dee34ca4SPoul-Henning Kamp 	if (bp2->bio_error)
2506dee34ca4SPoul-Henning Kamp 		bp->b_ioflags |= BIO_ERROR;
2507c5d3d25eSPoul-Henning Kamp 	bp->b_resid = bp->b_bcount - bp2->bio_completed;
2508c5d3d25eSPoul-Henning Kamp 	bp->b_error = bp2->bio_error;
2509dee34ca4SPoul-Henning Kamp 	bufdone(bp);
25103398491bSAlexander Motin 	sp = bp2->bio_caller1;
25119e3e3fe5SWarner Losh 	mtx_lock(&sw_dev_mtx);
25129e3e3fe5SWarner Losh 	swapgeom_release(cp, sp);
25133398491bSAlexander Motin 	mtx_unlock(&sw_dev_mtx);
2514dee34ca4SPoul-Henning Kamp 	g_destroy_bio(bp2);
2515dee34ca4SPoul-Henning Kamp }
2516dee34ca4SPoul-Henning Kamp 
2517dee34ca4SPoul-Henning Kamp static void
2518dee34ca4SPoul-Henning Kamp swapgeom_strategy(struct buf *bp, struct swdevt *sp)
2519dee34ca4SPoul-Henning Kamp {
2520dee34ca4SPoul-Henning Kamp 	struct bio *bio;
2521dee34ca4SPoul-Henning Kamp 	struct g_consumer *cp;
2522dee34ca4SPoul-Henning Kamp 
25233398491bSAlexander Motin 	mtx_lock(&sw_dev_mtx);
2524dee34ca4SPoul-Henning Kamp 	cp = sp->sw_id;
2525dee34ca4SPoul-Henning Kamp 	if (cp == NULL) {
25263398491bSAlexander Motin 		mtx_unlock(&sw_dev_mtx);
2527dee34ca4SPoul-Henning Kamp 		bp->b_error = ENXIO;
2528dee34ca4SPoul-Henning Kamp 		bp->b_ioflags |= BIO_ERROR;
2529dee34ca4SPoul-Henning Kamp 		bufdone(bp);
2530dee34ca4SPoul-Henning Kamp 		return;
2531dee34ca4SPoul-Henning Kamp 	}
25329e3e3fe5SWarner Losh 	swapgeom_acquire(cp);
25333398491bSAlexander Motin 	mtx_unlock(&sw_dev_mtx);
253411041003SKonstantin Belousov 	if (bp->b_iocmd == BIO_WRITE)
253511041003SKonstantin Belousov 		bio = g_new_bio();
253611041003SKonstantin Belousov 	else
25374f8205e5SPoul-Henning Kamp 		bio = g_alloc_bio();
25384f8205e5SPoul-Henning Kamp 	if (bio == NULL) {
25399e3e3fe5SWarner Losh 		mtx_lock(&sw_dev_mtx);
25409e3e3fe5SWarner Losh 		swapgeom_release(cp, sp);
25419e3e3fe5SWarner Losh 		mtx_unlock(&sw_dev_mtx);
25423e5b6861SPoul-Henning Kamp 		bp->b_error = ENOMEM;
25433e5b6861SPoul-Henning Kamp 		bp->b_ioflags |= BIO_ERROR;
25443e5b6861SPoul-Henning Kamp 		bufdone(bp);
25453e5b6861SPoul-Henning Kamp 		return;
25463e5b6861SPoul-Henning Kamp 	}
254711041003SKonstantin Belousov 
25483398491bSAlexander Motin 	bio->bio_caller1 = sp;
2549dee34ca4SPoul-Henning Kamp 	bio->bio_caller2 = bp;
2550c5d3d25eSPoul-Henning Kamp 	bio->bio_cmd = bp->b_iocmd;
2551dee34ca4SPoul-Henning Kamp 	bio->bio_offset = (bp->b_blkno - sp->sw_first) * PAGE_SIZE;
2552dee34ca4SPoul-Henning Kamp 	bio->bio_length = bp->b_bcount;
2553dee34ca4SPoul-Henning Kamp 	bio->bio_done = swapgeom_done;
2554fade8dd7SJeff Roberson 	if (!buf_mapped(bp)) {
25552cc718a1SKonstantin Belousov 		bio->bio_ma = bp->b_pages;
25562cc718a1SKonstantin Belousov 		bio->bio_data = unmapped_buf;
25572cc718a1SKonstantin Belousov 		bio->bio_ma_offset = (vm_offset_t)bp->b_offset & PAGE_MASK;
25582cc718a1SKonstantin Belousov 		bio->bio_ma_n = bp->b_npages;
25592cc718a1SKonstantin Belousov 		bio->bio_flags |= BIO_UNMAPPED;
25602cc718a1SKonstantin Belousov 	} else {
25612cc718a1SKonstantin Belousov 		bio->bio_data = bp->b_data;
25622cc718a1SKonstantin Belousov 		bio->bio_ma = NULL;
25632cc718a1SKonstantin Belousov 	}
2564dee34ca4SPoul-Henning Kamp 	g_io_request(bio, cp);
2565dee34ca4SPoul-Henning Kamp 	return;
2566dee34ca4SPoul-Henning Kamp }
2567dee34ca4SPoul-Henning Kamp 
2568dee34ca4SPoul-Henning Kamp static void
2569dee34ca4SPoul-Henning Kamp swapgeom_orphan(struct g_consumer *cp)
2570dee34ca4SPoul-Henning Kamp {
2571dee34ca4SPoul-Henning Kamp 	struct swdevt *sp;
25723398491bSAlexander Motin 	int destroy;
2573dee34ca4SPoul-Henning Kamp 
2574dee34ca4SPoul-Henning Kamp 	mtx_lock(&sw_dev_mtx);
25753398491bSAlexander Motin 	TAILQ_FOREACH(sp, &swtailq, sw_list) {
25763398491bSAlexander Motin 		if (sp->sw_id == cp) {
25778f12d83aSAlexander Motin 			sp->sw_flags |= SW_CLOSING;
25783398491bSAlexander Motin 			break;
2579dee34ca4SPoul-Henning Kamp 		}
25803398491bSAlexander Motin 	}
25819e3e3fe5SWarner Losh 	/*
25829e3e3fe5SWarner Losh 	 * Drop reference we were created with. Do directly since we're in a
25839e3e3fe5SWarner Losh 	 * special context where we don't have to queue the call to
25849e3e3fe5SWarner Losh 	 * swapgeom_close_ev().
25859e3e3fe5SWarner Losh 	 */
25869e3e3fe5SWarner Losh 	cp->index--;
25873398491bSAlexander Motin 	destroy = ((sp != NULL) && (cp->index == 0));
25883398491bSAlexander Motin 	if (destroy)
25893398491bSAlexander Motin 		sp->sw_id = NULL;
25903398491bSAlexander Motin 	mtx_unlock(&sw_dev_mtx);
25913398491bSAlexander Motin 	if (destroy)
25923398491bSAlexander Motin 		swapgeom_close_ev(cp, 0);
2593dee34ca4SPoul-Henning Kamp }
2594dee34ca4SPoul-Henning Kamp 
2595dee34ca4SPoul-Henning Kamp static void
2596dee34ca4SPoul-Henning Kamp swapgeom_close(struct thread *td, struct swdevt *sw)
2597dee34ca4SPoul-Henning Kamp {
25983398491bSAlexander Motin 	struct g_consumer *cp;
2599dee34ca4SPoul-Henning Kamp 
26003398491bSAlexander Motin 	mtx_lock(&sw_dev_mtx);
26013398491bSAlexander Motin 	cp = sw->sw_id;
26023398491bSAlexander Motin 	sw->sw_id = NULL;
26033398491bSAlexander Motin 	mtx_unlock(&sw_dev_mtx);
26040c657d22SKonstantin Belousov 
26050c657d22SKonstantin Belousov 	/*
26060c657d22SKonstantin Belousov 	 * swapgeom_close() may be called from the biodone context,
26070c657d22SKonstantin Belousov 	 * where we cannot perform topology changes.  Delegate the
26080c657d22SKonstantin Belousov 	 * work to the events thread.
26090c657d22SKonstantin Belousov 	 */
26103398491bSAlexander Motin 	if (cp != NULL)
26113398491bSAlexander Motin 		g_waitfor_event(swapgeom_close_ev, cp, M_WAITOK, NULL);
2612dee34ca4SPoul-Henning Kamp }
2613dee34ca4SPoul-Henning Kamp 
261488ad2d7bSKonstantin Belousov static int
261588ad2d7bSKonstantin Belousov swapongeom_locked(struct cdev *dev, struct vnode *vp)
2616dee34ca4SPoul-Henning Kamp {
2617dee34ca4SPoul-Henning Kamp 	struct g_provider *pp;
2618dee34ca4SPoul-Henning Kamp 	struct g_consumer *cp;
2619dee34ca4SPoul-Henning Kamp 	static struct g_geom *gp;
2620dee34ca4SPoul-Henning Kamp 	struct swdevt *sp;
2621dee34ca4SPoul-Henning Kamp 	u_long nblks;
2622dee34ca4SPoul-Henning Kamp 	int error;
2623dee34ca4SPoul-Henning Kamp 
262488ad2d7bSKonstantin Belousov 	pp = g_dev_getprovider(dev);
262588ad2d7bSKonstantin Belousov 	if (pp == NULL)
262688ad2d7bSKonstantin Belousov 		return (ENODEV);
2627dee34ca4SPoul-Henning Kamp 	mtx_lock(&sw_dev_mtx);
2628dee34ca4SPoul-Henning Kamp 	TAILQ_FOREACH(sp, &swtailq, sw_list) {
2629dee34ca4SPoul-Henning Kamp 		cp = sp->sw_id;
2630dee34ca4SPoul-Henning Kamp 		if (cp != NULL && cp->provider == pp) {
2631dee34ca4SPoul-Henning Kamp 			mtx_unlock(&sw_dev_mtx);
263288ad2d7bSKonstantin Belousov 			return (EBUSY);
2633dee34ca4SPoul-Henning Kamp 		}
2634dee34ca4SPoul-Henning Kamp 	}
2635dee34ca4SPoul-Henning Kamp 	mtx_unlock(&sw_dev_mtx);
26365721c9c7SPoul-Henning Kamp 	if (gp == NULL)
263702c62349SJaakko Heinonen 		gp = g_new_geomf(&g_swap_class, "swap");
2638dee34ca4SPoul-Henning Kamp 	cp = g_new_consumer(gp);
26399e3e3fe5SWarner Losh 	cp->index = 1;	/* Number of active I/Os, plus one for being active. */
26409e3e3fe5SWarner Losh 	cp->flags |=  G_CF_DIRECT_SEND | G_CF_DIRECT_RECEIVE;
2641dee34ca4SPoul-Henning Kamp 	g_attach(cp, pp);
2642afeb65e6SPoul-Henning Kamp 	/*
2643afeb65e6SPoul-Henning Kamp 	 * XXX: Every time you think you can improve the margin for
2644afeb65e6SPoul-Henning Kamp 	 * footshooting, somebody depends on the ability to do so:
2645afeb65e6SPoul-Henning Kamp 	 * savecore(8) wants to write to our swapdev so we cannot
2646afeb65e6SPoul-Henning Kamp 	 * set an exclusive count :-(
2647afeb65e6SPoul-Henning Kamp 	 */
2648d2bae332SPoul-Henning Kamp 	error = g_access(cp, 1, 1, 0);
264988ad2d7bSKonstantin Belousov 	if (error != 0) {
2650dee34ca4SPoul-Henning Kamp 		g_detach(cp);
2651dee34ca4SPoul-Henning Kamp 		g_destroy_consumer(cp);
265288ad2d7bSKonstantin Belousov 		return (error);
2653dee34ca4SPoul-Henning Kamp 	}
2654dee34ca4SPoul-Henning Kamp 	nblks = pp->mediasize / DEV_BSIZE;
265588ad2d7bSKonstantin Belousov 	swaponsomething(vp, cp, nblks, swapgeom_strategy,
265688ad2d7bSKonstantin Belousov 	    swapgeom_close, dev2udev(dev),
26572cc718a1SKonstantin Belousov 	    (pp->flags & G_PF_ACCEPT_UNMAPPED) != 0 ? SW_UNMAPPED : 0);
265888ad2d7bSKonstantin Belousov 	return (0);
2659dee34ca4SPoul-Henning Kamp }
2660dee34ca4SPoul-Henning Kamp 
2661dee34ca4SPoul-Henning Kamp static int
266288ad2d7bSKonstantin Belousov swapongeom(struct vnode *vp)
2663dee34ca4SPoul-Henning Kamp {
2664dee34ca4SPoul-Henning Kamp 	int error;
2665dee34ca4SPoul-Henning Kamp 
2666cb05b60aSAttilio Rao 	vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
266788ad2d7bSKonstantin Belousov 	if (vp->v_type != VCHR || (vp->v_iflag & VI_DOOMED) != 0) {
266888ad2d7bSKonstantin Belousov 		error = ENOENT;
266988ad2d7bSKonstantin Belousov 	} else {
267088ad2d7bSKonstantin Belousov 		g_topology_lock();
267188ad2d7bSKonstantin Belousov 		error = swapongeom_locked(vp->v_rdev, vp);
267288ad2d7bSKonstantin Belousov 		g_topology_unlock();
267388ad2d7bSKonstantin Belousov 	}
267422db15c0SAttilio Rao 	VOP_UNLOCK(vp, 0);
2675dee34ca4SPoul-Henning Kamp 	return (error);
2676dee34ca4SPoul-Henning Kamp }
2677dee34ca4SPoul-Henning Kamp 
2678dee34ca4SPoul-Henning Kamp /*
2679dee34ca4SPoul-Henning Kamp  * VNODE backend
2680dee34ca4SPoul-Henning Kamp  *
2681dee34ca4SPoul-Henning Kamp  * This is used mainly for network filesystem (read: probably only tested
2682dee34ca4SPoul-Henning Kamp  * with NFS) swapfiles.
2683dee34ca4SPoul-Henning Kamp  *
2684dee34ca4SPoul-Henning Kamp  */
2685dee34ca4SPoul-Henning Kamp 
2686dee34ca4SPoul-Henning Kamp static void
2687dee34ca4SPoul-Henning Kamp swapdev_strategy(struct buf *bp, struct swdevt *sp)
2688dee34ca4SPoul-Henning Kamp {
2689494eb176SPoul-Henning Kamp 	struct vnode *vp2;
2690dee34ca4SPoul-Henning Kamp 
2691dee34ca4SPoul-Henning Kamp 	bp->b_blkno = ctodb(bp->b_blkno - sp->sw_first);
2692dee34ca4SPoul-Henning Kamp 
2693dee34ca4SPoul-Henning Kamp 	vp2 = sp->sw_id;
2694dee34ca4SPoul-Henning Kamp 	vhold(vp2);
2695dee34ca4SPoul-Henning Kamp 	if (bp->b_iocmd == BIO_WRITE) {
26963cfc7651SOlivier Houchard 		if (bp->b_bufobj)
2697494eb176SPoul-Henning Kamp 			bufobj_wdrop(bp->b_bufobj);
2698a76d8f4eSPoul-Henning Kamp 		bufobj_wref(&vp2->v_bufobj);
2699dee34ca4SPoul-Henning Kamp 	}
27003cfc7651SOlivier Houchard 	if (bp->b_bufobj != &vp2->v_bufobj)
27013cfc7651SOlivier Houchard 		bp->b_bufobj = &vp2->v_bufobj;
2702dee34ca4SPoul-Henning Kamp 	bp->b_vp = vp2;
27032c18019fSPoul-Henning Kamp 	bp->b_iooffset = dbtob(bp->b_blkno);
2704b792bebeSPoul-Henning Kamp 	bstrategy(bp);
2705dee34ca4SPoul-Henning Kamp 	return;
2706dee34ca4SPoul-Henning Kamp }
2707dee34ca4SPoul-Henning Kamp 
2708dee34ca4SPoul-Henning Kamp static void
2709dee34ca4SPoul-Henning Kamp swapdev_close(struct thread *td, struct swdevt *sp)
2710dee34ca4SPoul-Henning Kamp {
2711dee34ca4SPoul-Henning Kamp 
2712dee34ca4SPoul-Henning Kamp 	VOP_CLOSE(sp->sw_vp, FREAD | FWRITE, td->td_ucred, td);
2713dee34ca4SPoul-Henning Kamp 	vrele(sp->sw_vp);
2714dee34ca4SPoul-Henning Kamp }
2715dee34ca4SPoul-Henning Kamp 
2716dee34ca4SPoul-Henning Kamp 
2717dee34ca4SPoul-Henning Kamp static int
2718dee34ca4SPoul-Henning Kamp swaponvp(struct thread *td, struct vnode *vp, u_long nblks)
2719dee34ca4SPoul-Henning Kamp {
2720dee34ca4SPoul-Henning Kamp 	struct swdevt *sp;
2721dee34ca4SPoul-Henning Kamp 	int error;
2722dee34ca4SPoul-Henning Kamp 
2723dee34ca4SPoul-Henning Kamp 	if (nblks == 0)
2724dee34ca4SPoul-Henning Kamp 		return (ENXIO);
2725dee34ca4SPoul-Henning Kamp 	mtx_lock(&sw_dev_mtx);
2726dee34ca4SPoul-Henning Kamp 	TAILQ_FOREACH(sp, &swtailq, sw_list) {
2727dee34ca4SPoul-Henning Kamp 		if (sp->sw_id == vp) {
2728dee34ca4SPoul-Henning Kamp 			mtx_unlock(&sw_dev_mtx);
2729dee34ca4SPoul-Henning Kamp 			return (EBUSY);
2730dee34ca4SPoul-Henning Kamp 		}
2731dee34ca4SPoul-Henning Kamp 	}
2732dee34ca4SPoul-Henning Kamp 	mtx_unlock(&sw_dev_mtx);
2733dee34ca4SPoul-Henning Kamp 
2734cb05b60aSAttilio Rao 	(void) vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
2735dee34ca4SPoul-Henning Kamp #ifdef MAC
273630d239bcSRobert Watson 	error = mac_system_check_swapon(td->td_ucred, vp);
2737dee34ca4SPoul-Henning Kamp 	if (error == 0)
2738dee34ca4SPoul-Henning Kamp #endif
27399e223287SKonstantin Belousov 		error = VOP_OPEN(vp, FREAD | FWRITE, td->td_ucred, td, NULL);
274022db15c0SAttilio Rao 	(void) VOP_UNLOCK(vp, 0);
2741dee34ca4SPoul-Henning Kamp 	if (error)
2742dee34ca4SPoul-Henning Kamp 		return (error);
2743dee34ca4SPoul-Henning Kamp 
2744dee34ca4SPoul-Henning Kamp 	swaponsomething(vp, vp, nblks, swapdev_strategy, swapdev_close,
27452cc718a1SKonstantin Belousov 	    NODEV, 0);
2746dee34ca4SPoul-Henning Kamp 	return (0);
2747dee34ca4SPoul-Henning Kamp }
274889c241d1SGleb Smirnoff 
274989c241d1SGleb Smirnoff static int
275089c241d1SGleb Smirnoff sysctl_swap_async_max(SYSCTL_HANDLER_ARGS)
275189c241d1SGleb Smirnoff {
275289c241d1SGleb Smirnoff 	int error, new, n;
275389c241d1SGleb Smirnoff 
275489c241d1SGleb Smirnoff 	new = nsw_wcount_async_max;
275589c241d1SGleb Smirnoff 	error = sysctl_handle_int(oidp, &new, 0, req);
275689c241d1SGleb Smirnoff 	if (error != 0 || req->newptr == NULL)
275789c241d1SGleb Smirnoff 		return (error);
275889c241d1SGleb Smirnoff 
275989c241d1SGleb Smirnoff 	if (new > nswbuf / 2 || new < 1)
276089c241d1SGleb Smirnoff 		return (EINVAL);
276189c241d1SGleb Smirnoff 
276289c241d1SGleb Smirnoff 	mtx_lock(&pbuf_mtx);
276389c241d1SGleb Smirnoff 	while (nsw_wcount_async_max != new) {
276489c241d1SGleb Smirnoff 		/*
276589c241d1SGleb Smirnoff 		 * Adjust difference.  If the current async count is too low,
276689c241d1SGleb Smirnoff 		 * we will need to sqeeze our update slowly in.  Sleep with a
276789c241d1SGleb Smirnoff 		 * higher priority than getpbuf() to finish faster.
276889c241d1SGleb Smirnoff 		 */
276989c241d1SGleb Smirnoff 		n = new - nsw_wcount_async_max;
277089c241d1SGleb Smirnoff 		if (nsw_wcount_async + n >= 0) {
277189c241d1SGleb Smirnoff 			nsw_wcount_async += n;
277289c241d1SGleb Smirnoff 			nsw_wcount_async_max += n;
277389c241d1SGleb Smirnoff 			wakeup(&nsw_wcount_async);
277489c241d1SGleb Smirnoff 		} else {
277589c241d1SGleb Smirnoff 			nsw_wcount_async_max -= nsw_wcount_async;
277689c241d1SGleb Smirnoff 			nsw_wcount_async = 0;
277789c241d1SGleb Smirnoff 			msleep(&nsw_wcount_async, &pbuf_mtx, PSWP,
277889c241d1SGleb Smirnoff 			    "swpsysctl", 0);
277989c241d1SGleb Smirnoff 		}
278089c241d1SGleb Smirnoff 	}
278189c241d1SGleb Smirnoff 	mtx_unlock(&pbuf_mtx);
278289c241d1SGleb Smirnoff 
278389c241d1SGleb Smirnoff 	return (0);
278489c241d1SGleb Smirnoff }
2785