xref: /freebsd/sys/vm/swap_pager.c (revision 7c022327ab3e6081c1bd8c1dfbb38d54c998d300)
160727d8bSWarner Losh /*-
2df57947fSPedro F. Giffuni  * SPDX-License-Identifier: BSD-4-Clause
3df57947fSPedro F. Giffuni  *
41c7c3c6aSMatthew Dillon  * Copyright (c) 1998 Matthew Dillon,
526f9a767SRodney W. Grimes  * Copyright (c) 1994 John S. Dyson
6df8bae1dSRodney W. Grimes  * Copyright (c) 1990 University of Utah.
7e9c0cc15SPoul-Henning Kamp  * Copyright (c) 1982, 1986, 1989, 1993
8df8bae1dSRodney W. Grimes  *	The Regents of the University of California.  All rights reserved.
9df8bae1dSRodney W. Grimes  *
10df8bae1dSRodney W. Grimes  * This code is derived from software contributed to Berkeley by
11df8bae1dSRodney W. Grimes  * the Systems Programming Group of the University of Utah Computer
12df8bae1dSRodney W. Grimes  * Science Department.
13df8bae1dSRodney W. Grimes  *
14df8bae1dSRodney W. Grimes  * Redistribution and use in source and binary forms, with or without
15df8bae1dSRodney W. Grimes  * modification, are permitted provided that the following conditions
16df8bae1dSRodney W. Grimes  * are met:
17df8bae1dSRodney W. Grimes  * 1. Redistributions of source code must retain the above copyright
18df8bae1dSRodney W. Grimes  *    notice, this list of conditions and the following disclaimer.
19df8bae1dSRodney W. Grimes  * 2. Redistributions in binary form must reproduce the above copyright
20df8bae1dSRodney W. Grimes  *    notice, this list of conditions and the following disclaimer in the
21df8bae1dSRodney W. Grimes  *    documentation and/or other materials provided with the distribution.
22df8bae1dSRodney W. Grimes  * 3. All advertising materials mentioning features or use of this software
235929bcfaSPhilippe Charnier  *    must display the following acknowledgement:
24df8bae1dSRodney W. Grimes  *	This product includes software developed by the University of
25df8bae1dSRodney W. Grimes  *	California, Berkeley and its contributors.
26df8bae1dSRodney W. Grimes  * 4. Neither the name of the University nor the names of its contributors
27df8bae1dSRodney W. Grimes  *    may be used to endorse or promote products derived from this software
28df8bae1dSRodney W. Grimes  *    without specific prior written permission.
29df8bae1dSRodney W. Grimes  *
30df8bae1dSRodney W. Grimes  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
31df8bae1dSRodney W. Grimes  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
32df8bae1dSRodney W. Grimes  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
33df8bae1dSRodney W. Grimes  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
34df8bae1dSRodney W. Grimes  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
35df8bae1dSRodney W. Grimes  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
36df8bae1dSRodney W. Grimes  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
37df8bae1dSRodney W. Grimes  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
38df8bae1dSRodney W. Grimes  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
39df8bae1dSRodney W. Grimes  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
40df8bae1dSRodney W. Grimes  * SUCH DAMAGE.
41df8bae1dSRodney W. Grimes  *
421c7c3c6aSMatthew Dillon  *				New Swap System
431c7c3c6aSMatthew Dillon  *				Matthew Dillon
441c7c3c6aSMatthew Dillon  *
451c7c3c6aSMatthew Dillon  * Radix Bitmap 'blists'.
461c7c3c6aSMatthew Dillon  *
471c7c3c6aSMatthew Dillon  *	- The new swapper uses the new radix bitmap code.  This should scale
481c7c3c6aSMatthew Dillon  *	  to arbitrarily small or arbitrarily large swap spaces and an almost
491c7c3c6aSMatthew Dillon  *	  arbitrary degree of fragmentation.
501c7c3c6aSMatthew Dillon  *
511c7c3c6aSMatthew Dillon  * Features:
521c7c3c6aSMatthew Dillon  *
531c7c3c6aSMatthew Dillon  *	- on the fly reallocation of swap during putpages.  The new system
541c7c3c6aSMatthew Dillon  *	  does not try to keep previously allocated swap blocks for dirty
551c7c3c6aSMatthew Dillon  *	  pages.
561c7c3c6aSMatthew Dillon  *
571c7c3c6aSMatthew Dillon  *	- on the fly deallocation of swap
581c7c3c6aSMatthew Dillon  *
591c7c3c6aSMatthew Dillon  *	- No more garbage collection required.  Unnecessarily allocated swap
601c7c3c6aSMatthew Dillon  *	  blocks only exist for dirty vm_page_t's now and these are already
611c7c3c6aSMatthew Dillon  *	  cycled (in a high-load system) by the pager.  We also do on-the-fly
621c7c3c6aSMatthew Dillon  *	  removal of invalidated swap blocks when a page is destroyed
631c7c3c6aSMatthew Dillon  *	  or renamed.
641c7c3c6aSMatthew Dillon  *
65df8bae1dSRodney W. Grimes  * from: Utah $Hdr: swap_pager.c 1.4 91/04/30$
66df8bae1dSRodney W. Grimes  *
67df8bae1dSRodney W. Grimes  *	@(#)swap_pager.c	8.9 (Berkeley) 3/21/94
68e9c0cc15SPoul-Henning Kamp  *	@(#)vm_swap.c	8.5 (Berkeley) 2/17/94
69df8bae1dSRodney W. Grimes  */
70df8bae1dSRodney W. Grimes 
71874651b1SDavid E. O'Brien #include <sys/cdefs.h>
72874651b1SDavid E. O'Brien __FBSDID("$FreeBSD$");
73874651b1SDavid E. O'Brien 
74e9c0cc15SPoul-Henning Kamp #include "opt_vm.h"
75e9c0cc15SPoul-Henning Kamp 
76df8bae1dSRodney W. Grimes #include <sys/param.h>
779626b608SPoul-Henning Kamp #include <sys/bio.h>
78e2e050c8SConrad Meyer #include <sys/blist.h>
79df8bae1dSRodney W. Grimes #include <sys/buf.h>
80e2e050c8SConrad Meyer #include <sys/conf.h>
81e9c0cc15SPoul-Henning Kamp #include <sys/disk.h>
82e2e050c8SConrad Meyer #include <sys/eventhandler.h>
83e9c0cc15SPoul-Henning Kamp #include <sys/fcntl.h>
84e2e050c8SConrad Meyer #include <sys/lock.h>
85e2e050c8SConrad Meyer #include <sys/kernel.h>
86e9c0cc15SPoul-Henning Kamp #include <sys/mount.h>
87e9c0cc15SPoul-Henning Kamp #include <sys/namei.h>
88df8bae1dSRodney W. Grimes #include <sys/malloc.h>
89f425ab8eSKonstantin Belousov #include <sys/pctrie.h>
90e2e050c8SConrad Meyer #include <sys/priv.h>
91e2e050c8SConrad Meyer #include <sys/proc.h>
921ba5ad42SEdward Tomasz Napierala #include <sys/racct.h>
933364c323SKonstantin Belousov #include <sys/resource.h>
943364c323SKonstantin Belousov #include <sys/resourcevar.h>
9589f6b863SAttilio Rao #include <sys/rwlock.h>
96d027ed2eSAlan Cox #include <sys/sbuf.h>
97327f4e83SMatthew Dillon #include <sys/sysctl.h>
98e9c0cc15SPoul-Henning Kamp #include <sys/sysproto.h>
99e2e050c8SConrad Meyer #include <sys/systm.h>
1000cddd8f0SMatthew Dillon #include <sys/sx.h>
101936524aaSMatthew Dillon #include <sys/vmmeter.h>
102e2e050c8SConrad Meyer #include <sys/vnode.h>
103df8bae1dSRodney W. Grimes 
104aed55708SRobert Watson #include <security/mac/mac_framework.h>
105aed55708SRobert Watson 
106df8bae1dSRodney W. Grimes #include <vm/vm.h>
10721cd6e62SSeigo Tanimura #include <vm/pmap.h>
10821cd6e62SSeigo Tanimura #include <vm/vm_map.h>
10921cd6e62SSeigo Tanimura #include <vm/vm_kern.h>
110efeaf95aSDavid Greenman #include <vm/vm_object.h>
111df8bae1dSRodney W. Grimes #include <vm/vm_page.h>
112efeaf95aSDavid Greenman #include <vm/vm_pager.h>
113df8bae1dSRodney W. Grimes #include <vm/vm_pageout.h>
114e9c0cc15SPoul-Henning Kamp #include <vm/vm_param.h>
115df8bae1dSRodney W. Grimes #include <vm/swap_pager.h>
116efeaf95aSDavid Greenman #include <vm/vm_extern.h>
117670d17b5SJeff Roberson #include <vm/uma.h>
118df8bae1dSRodney W. Grimes 
119dee34ca4SPoul-Henning Kamp #include <geom/geom.h>
120dee34ca4SPoul-Henning Kamp 
121ec38b344SPoul-Henning Kamp /*
122064650c1SAlan Cox  * MAX_PAGEOUT_CLUSTER must be a power of 2 between 1 and 64.
123064650c1SAlan Cox  * The 64-page limit is due to the radix code (kern/subr_blist.c).
124ec38b344SPoul-Henning Kamp  */
125ec38b344SPoul-Henning Kamp #ifndef MAX_PAGEOUT_CLUSTER
126e2241590SAlan Cox #define	MAX_PAGEOUT_CLUSTER	32
127ec38b344SPoul-Henning Kamp #endif
128ec38b344SPoul-Henning Kamp 
129ec38b344SPoul-Henning Kamp #if !defined(SWB_NPAGES)
130ec38b344SPoul-Henning Kamp #define SWB_NPAGES	MAX_PAGEOUT_CLUSTER
131ec38b344SPoul-Henning Kamp #endif
132ec38b344SPoul-Henning Kamp 
133f425ab8eSKonstantin Belousov #define	SWAP_META_PAGES		PCTRIE_COUNT
134ec38b344SPoul-Henning Kamp 
135f425ab8eSKonstantin Belousov /*
136f425ab8eSKonstantin Belousov  * A swblk structure maps each page index within a
137f425ab8eSKonstantin Belousov  * SWAP_META_PAGES-aligned and sized range to the address of an
138f425ab8eSKonstantin Belousov  * on-disk swap block (or SWAPBLK_NONE). The collection of these
139f425ab8eSKonstantin Belousov  * mappings for an entire vm object is implemented as a pc-trie.
140f425ab8eSKonstantin Belousov  */
141f425ab8eSKonstantin Belousov struct swblk {
142f425ab8eSKonstantin Belousov 	vm_pindex_t	p;
143f425ab8eSKonstantin Belousov 	daddr_t		d[SWAP_META_PAGES];
144e9c0cc15SPoul-Henning Kamp };
145e9c0cc15SPoul-Henning Kamp 
1462c4992dbSAlan Cox static MALLOC_DEFINE(M_VMPGDATA, "vm_pgdata", "swap pager private data");
14720da9c2eSPoul-Henning Kamp static struct mtx sw_dev_mtx;
1488f60c087SPoul-Henning Kamp static TAILQ_HEAD(, swdevt) swtailq = TAILQ_HEAD_INITIALIZER(swtailq);
1498f60c087SPoul-Henning Kamp static struct swdevt *swdevhd;	/* Allocate from here next */
1508f60c087SPoul-Henning Kamp static int nswapdev;		/* Number of swap devices */
1518f60c087SPoul-Henning Kamp int swap_pager_avail;
15204533e1eSKonstantin Belousov static struct sx swdev_syscall_lock;	/* serialize swap(on|off) */
153e9c0cc15SPoul-Henning Kamp 
154e8bb589dSMatt Macy static u_long swap_reserved;
155e8bb589dSMatt Macy static u_long swap_total;
156e8bb589dSMatt Macy static int sysctl_page_shift(SYSCTL_HANDLER_ARGS);
157e8bb589dSMatt Macy SYSCTL_PROC(_vm, OID_AUTO, swap_reserved, CTLTYPE_U64 | CTLFLAG_RD | CTLFLAG_MPSAFE,
158e8bb589dSMatt Macy     &swap_reserved, 0, sysctl_page_shift, "A",
1598a9c731fSIvan Voras     "Amount of swap storage needed to back all allocated anonymous memory.");
160e8bb589dSMatt Macy SYSCTL_PROC(_vm, OID_AUTO, swap_total, CTLTYPE_U64 | CTLFLAG_RD | CTLFLAG_MPSAFE,
161e8bb589dSMatt Macy     &swap_total, 0, sysctl_page_shift, "A",
162e8bb589dSMatt Macy     "Total amount of available swap storage.");
163e8bb589dSMatt Macy 
1643364c323SKonstantin Belousov static int overcommit = 0;
165be7d4ac5SEdward Tomasz Napierala SYSCTL_INT(_vm, VM_OVERCOMMIT, overcommit, CTLFLAG_RW, &overcommit, 0,
1668a9c731fSIvan Voras     "Configure virtual memory overcommit behavior. See tuning(7) "
1678a9c731fSIvan Voras     "for details.");
16861203277SDag-Erling Smørgrav static unsigned long swzone;
16961203277SDag-Erling Smørgrav SYSCTL_ULONG(_vm, OID_AUTO, swzone, CTLFLAG_RD, &swzone, 0,
17061203277SDag-Erling Smørgrav     "Actual size of swap metadata zone");
17161203277SDag-Erling Smørgrav static unsigned long swap_maxpages;
17261203277SDag-Erling Smørgrav SYSCTL_ULONG(_vm, OID_AUTO, swap_maxpages, CTLFLAG_RD, &swap_maxpages, 0,
17361203277SDag-Erling Smørgrav     "Maximum amount of swap supported");
1743364c323SKonstantin Belousov 
1753364c323SKonstantin Belousov /* bits from overcommit */
1763364c323SKonstantin Belousov #define	SWAP_RESERVE_FORCE_ON		(1 << 0)
1773364c323SKonstantin Belousov #define	SWAP_RESERVE_RLIMIT_ON		(1 << 1)
1783364c323SKonstantin Belousov #define	SWAP_RESERVE_ALLOW_NONWIRED	(1 << 2)
1793364c323SKonstantin Belousov 
180e8bb589dSMatt Macy static int
181e8bb589dSMatt Macy sysctl_page_shift(SYSCTL_HANDLER_ARGS)
182e8bb589dSMatt Macy {
183e8bb589dSMatt Macy 	uint64_t newval;
184e8bb589dSMatt Macy 	u_long value = *(u_long *)arg1;
185e8bb589dSMatt Macy 
186e8bb589dSMatt Macy 	newval = ((uint64_t)value) << PAGE_SHIFT;
187e8bb589dSMatt Macy 	return (sysctl_handle_64(oidp, &newval, 0, req));
188e8bb589dSMatt Macy }
189e8bb589dSMatt Macy 
1903364c323SKonstantin Belousov int
1913364c323SKonstantin Belousov swap_reserve(vm_ooffset_t incr)
1923364c323SKonstantin Belousov {
1933364c323SKonstantin Belousov 
194ef694c1aSEdward Tomasz Napierala 	return (swap_reserve_by_cred(incr, curthread->td_ucred));
1953364c323SKonstantin Belousov }
1963364c323SKonstantin Belousov 
1973364c323SKonstantin Belousov int
198ef694c1aSEdward Tomasz Napierala swap_reserve_by_cred(vm_ooffset_t incr, struct ucred *cred)
1993364c323SKonstantin Belousov {
200e8bb589dSMatt Macy 	u_long r, s, prev, pincr;
2013364c323SKonstantin Belousov 	int res, error;
2023364c323SKonstantin Belousov 	static int curfail;
2033364c323SKonstantin Belousov 	static struct timeval lastfail;
204ef694c1aSEdward Tomasz Napierala 	struct uidinfo *uip;
205ef694c1aSEdward Tomasz Napierala 
206ef694c1aSEdward Tomasz Napierala 	uip = cred->cr_ruidinfo;
2073364c323SKonstantin Belousov 
208e8bb589dSMatt Macy 	KASSERT((incr & PAGE_MASK) == 0, ("%s: incr: %ju & PAGE_MASK", __func__,
209e8bb589dSMatt Macy 	    (uintmax_t)incr));
2103364c323SKonstantin Belousov 
211afcc55f3SEdward Tomasz Napierala #ifdef RACCT
2124b5c9cf6SEdward Tomasz Napierala 	if (racct_enable) {
2131ba5ad42SEdward Tomasz Napierala 		PROC_LOCK(curproc);
2141ba5ad42SEdward Tomasz Napierala 		error = racct_add(curproc, RACCT_SWAP, incr);
2151ba5ad42SEdward Tomasz Napierala 		PROC_UNLOCK(curproc);
2161ba5ad42SEdward Tomasz Napierala 		if (error != 0)
2171ba5ad42SEdward Tomasz Napierala 			return (0);
2184b5c9cf6SEdward Tomasz Napierala 	}
219afcc55f3SEdward Tomasz Napierala #endif
2201ba5ad42SEdward Tomasz Napierala 
221e8bb589dSMatt Macy 	pincr = atop(incr);
2223364c323SKonstantin Belousov 	res = 0;
223e8bb589dSMatt Macy 	prev = atomic_fetchadd_long(&swap_reserved, pincr);
224e8bb589dSMatt Macy 	r = prev + pincr;
2253364c323SKonstantin Belousov 	if (overcommit & SWAP_RESERVE_ALLOW_NONWIRED) {
226e958ad4cSJeff Roberson 		s = vm_cnt.v_page_count - vm_cnt.v_free_reserved -
227e958ad4cSJeff Roberson 		    vm_wire_count();
2283364c323SKonstantin Belousov 	} else
2293364c323SKonstantin Belousov 		s = 0;
2303364c323SKonstantin Belousov 	s += swap_total;
2313364c323SKonstantin Belousov 	if ((overcommit & SWAP_RESERVE_FORCE_ON) == 0 || r <= s ||
2323364c323SKonstantin Belousov 	    (error = priv_check(curthread, PRIV_VM_SWAP_NOQUOTA)) == 0) {
2333364c323SKonstantin Belousov 		res = 1;
234e8bb589dSMatt Macy 	} else {
235e8bb589dSMatt Macy 		prev = atomic_fetchadd_long(&swap_reserved, -pincr);
236e8bb589dSMatt Macy 		if (prev < pincr)
237e8bb589dSMatt Macy 			panic("swap_reserved < incr on overcommit fail");
2383364c323SKonstantin Belousov 	}
2393364c323SKonstantin Belousov 	if (res) {
240e8bb589dSMatt Macy 		prev = atomic_fetchadd_long(&uip->ui_vmsize, pincr);
2415c0e1c11SKonstantin Belousov 		if ((overcommit & SWAP_RESERVE_RLIMIT_ON) != 0 &&
242e8bb589dSMatt Macy 		    prev + pincr > lim_cur(curthread, RLIMIT_SWAP) &&
243e8bb589dSMatt Macy 		    priv_check(curthread, PRIV_VM_SWAP_NORLIMIT)) {
2443364c323SKonstantin Belousov 			res = 0;
245e8bb589dSMatt Macy 			prev = atomic_fetchadd_long(&uip->ui_vmsize, -pincr);
246e8bb589dSMatt Macy 			if (prev < pincr)
247e8bb589dSMatt Macy 				panic("uip->ui_vmsize < incr on overcommit fail");
2483364c323SKonstantin Belousov 		}
2493364c323SKonstantin Belousov 	}
2503364c323SKonstantin Belousov 	if (!res && ppsratecheck(&lastfail, &curfail, 1)) {
2513364c323SKonstantin Belousov 		printf("uid %d, pid %d: swap reservation for %jd bytes failed\n",
252134465d7SKonstantin Belousov 		    uip->ui_uid, curproc->p_pid, incr);
2533364c323SKonstantin Belousov 	}
2543364c323SKonstantin Belousov 
255afcc55f3SEdward Tomasz Napierala #ifdef RACCT
256e8bb589dSMatt Macy 	if (racct_enable && !res) {
2571ba5ad42SEdward Tomasz Napierala 		PROC_LOCK(curproc);
2581ba5ad42SEdward Tomasz Napierala 		racct_sub(curproc, RACCT_SWAP, incr);
2591ba5ad42SEdward Tomasz Napierala 		PROC_UNLOCK(curproc);
2601ba5ad42SEdward Tomasz Napierala 	}
261afcc55f3SEdward Tomasz Napierala #endif
2621ba5ad42SEdward Tomasz Napierala 
2633364c323SKonstantin Belousov 	return (res);
2643364c323SKonstantin Belousov }
2653364c323SKonstantin Belousov 
2663364c323SKonstantin Belousov void
2673364c323SKonstantin Belousov swap_reserve_force(vm_ooffset_t incr)
2683364c323SKonstantin Belousov {
2693364c323SKonstantin Belousov 	struct uidinfo *uip;
270e8bb589dSMatt Macy 	u_long pincr;
2713364c323SKonstantin Belousov 
272e8bb589dSMatt Macy 	KASSERT((incr & PAGE_MASK) == 0, ("%s: incr: %ju & PAGE_MASK", __func__,
273e8bb589dSMatt Macy 	    (uintmax_t)incr));
2743364c323SKonstantin Belousov 
275e8bb589dSMatt Macy 	PROC_LOCK(curproc);
276afcc55f3SEdward Tomasz Napierala #ifdef RACCT
277e8bb589dSMatt Macy 	if (racct_enable)
2781ba5ad42SEdward Tomasz Napierala 		racct_add_force(curproc, RACCT_SWAP, incr);
279afcc55f3SEdward Tomasz Napierala #endif
280e8bb589dSMatt Macy 	pincr = atop(incr);
281e8bb589dSMatt Macy 	atomic_add_long(&swap_reserved, pincr);
282e8bb589dSMatt Macy 	uip = curproc->p_ucred->cr_ruidinfo;
283e8bb589dSMatt Macy 	atomic_add_long(&uip->ui_vmsize, pincr);
2843364c323SKonstantin Belousov 	PROC_UNLOCK(curproc);
2853364c323SKonstantin Belousov }
2863364c323SKonstantin Belousov 
2873364c323SKonstantin Belousov void
2883364c323SKonstantin Belousov swap_release(vm_ooffset_t decr)
2893364c323SKonstantin Belousov {
290ef694c1aSEdward Tomasz Napierala 	struct ucred *cred;
2913364c323SKonstantin Belousov 
2923364c323SKonstantin Belousov 	PROC_LOCK(curproc);
293e8bb589dSMatt Macy 	cred = curproc->p_ucred;
294ef694c1aSEdward Tomasz Napierala 	swap_release_by_cred(decr, cred);
2953364c323SKonstantin Belousov 	PROC_UNLOCK(curproc);
2963364c323SKonstantin Belousov }
2973364c323SKonstantin Belousov 
2983364c323SKonstantin Belousov void
299ef694c1aSEdward Tomasz Napierala swap_release_by_cred(vm_ooffset_t decr, struct ucred *cred)
3003364c323SKonstantin Belousov {
301e8bb589dSMatt Macy 	u_long prev, pdecr;
302ef694c1aSEdward Tomasz Napierala  	struct uidinfo *uip;
303ef694c1aSEdward Tomasz Napierala 
304ef694c1aSEdward Tomasz Napierala 	uip = cred->cr_ruidinfo;
3053364c323SKonstantin Belousov 
306e8bb589dSMatt Macy 	KASSERT((decr & PAGE_MASK) == 0, ("%s: decr: %ju & PAGE_MASK", __func__,
307e8bb589dSMatt Macy 	    (uintmax_t)decr));
3083364c323SKonstantin Belousov 
309e8bb589dSMatt Macy 	pdecr = atop(decr);
310e8bb589dSMatt Macy 	prev = atomic_fetchadd_long(&swap_reserved, -pdecr);
311e8bb589dSMatt Macy 	if (prev < pdecr)
3123364c323SKonstantin Belousov 		panic("swap_reserved < decr");
3133364c323SKonstantin Belousov 
314e8bb589dSMatt Macy 	prev = atomic_fetchadd_long(&uip->ui_vmsize, -pdecr);
315e8bb589dSMatt Macy 	if (prev < pdecr)
3163364c323SKonstantin Belousov 		printf("negative vmsize for uid = %d\n", uip->ui_uid);
317e8bb589dSMatt Macy #ifdef RACCT
318e8bb589dSMatt Macy 	if (racct_enable)
3191ba5ad42SEdward Tomasz Napierala 		racct_sub_cred(cred, RACCT_SWAP, decr);
320e8bb589dSMatt Macy #endif
3213364c323SKonstantin Belousov }
3223364c323SKonstantin Belousov 
3234abca9bbSAlan Cox #define SWM_POP		0x01	/* pop out			*/
32426f9a767SRodney W. Grimes 
325f08b3099SKonstantin Belousov static int swap_pager_full = 2;	/* swap space exhaustion (task killing) */
3267dea2c2eSAlan Cox static int swap_pager_almost_full = 1; /* swap space exhaustion (w/hysteresis)*/
327756a5412SGleb Smirnoff static struct mtx swbuf_mtx;	/* to sync nsw_wcount_async */
328756a5412SGleb Smirnoff static int nsw_wcount_async;	/* limit async write buffers */
329327f4e83SMatthew Dillon static int nsw_wcount_async_max;/* assigned maximum			*/
330327f4e83SMatthew Dillon static int nsw_cluster_max;	/* maximum VOP I/O allowed		*/
3311c7c3c6aSMatthew Dillon 
33289c241d1SGleb Smirnoff static int sysctl_swap_async_max(SYSCTL_HANDLER_ARGS);
3334c36e917SKonstantin Belousov SYSCTL_PROC(_vm, OID_AUTO, swap_async_max, CTLTYPE_INT | CTLFLAG_RW |
3344c36e917SKonstantin Belousov     CTLFLAG_MPSAFE, NULL, 0, sysctl_swap_async_max, "I",
3354c36e917SKonstantin Belousov     "Maximum running async swap ops");
336d027ed2eSAlan Cox static int sysctl_swap_fragmentation(SYSCTL_HANDLER_ARGS);
337d027ed2eSAlan Cox SYSCTL_PROC(_vm, OID_AUTO, swap_fragmentation, CTLTYPE_STRING | CTLFLAG_RD |
338d027ed2eSAlan Cox     CTLFLAG_MPSAFE, NULL, 0, sysctl_swap_fragmentation, "A",
339d027ed2eSAlan Cox     "Swap Fragmentation Info");
34089c241d1SGleb Smirnoff 
3410cddd8f0SMatthew Dillon static struct sx sw_alloc_sx;
342327f4e83SMatthew Dillon 
3431c7c3c6aSMatthew Dillon /*
3441c7c3c6aSMatthew Dillon  * "named" and "unnamed" anon region objects.  Try to reduce the overhead
3451c7c3c6aSMatthew Dillon  * of searching a named list by hashing it just a little.
3461c7c3c6aSMatthew Dillon  */
3471c7c3c6aSMatthew Dillon 
3481c7c3c6aSMatthew Dillon #define NOBJLISTS		8
3491c7c3c6aSMatthew Dillon 
3501c7c3c6aSMatthew Dillon #define NOBJLIST(handle)	\
351af647ddeSBruce Evans 	(&swap_pager_object_list[((int)(intptr_t)handle >> 4) & (NOBJLISTS-1)])
3521c7c3c6aSMatthew Dillon 
3531c7c3c6aSMatthew Dillon static struct pagerlst	swap_pager_object_list[NOBJLISTS];
354756a5412SGleb Smirnoff static uma_zone_t swwbuf_zone;
355756a5412SGleb Smirnoff static uma_zone_t swrbuf_zone;
356f425ab8eSKonstantin Belousov static uma_zone_t swblk_zone;
357f425ab8eSKonstantin Belousov static uma_zone_t swpctrie_zone;
3581c7c3c6aSMatthew Dillon 
3591c7c3c6aSMatthew Dillon /*
3601c7c3c6aSMatthew Dillon  * pagerops for OBJT_SWAP - "swap pager".  Some ops are also global procedure
3611c7c3c6aSMatthew Dillon  * calls hooked from other parts of the VM system and do not appear here.
3621c7c3c6aSMatthew Dillon  * (see vm/swap_pager.h).
3631c7c3c6aSMatthew Dillon  */
364ff98689dSBruce Evans static vm_object_t
36511caded3SAlfred Perlstein 		swap_pager_alloc(void *handle, vm_ooffset_t size,
3663364c323SKonstantin Belousov 		    vm_prot_t prot, vm_ooffset_t offset, struct ucred *);
36711caded3SAlfred Perlstein static void	swap_pager_dealloc(vm_object_t object);
368b0cd2017SGleb Smirnoff static int	swap_pager_getpages(vm_object_t, vm_page_t *, int, int *,
369b0cd2017SGleb Smirnoff     int *);
370b0cd2017SGleb Smirnoff static int	swap_pager_getpages_async(vm_object_t, vm_page_t *, int, int *,
371b0cd2017SGleb Smirnoff     int *, pgo_getpages_iodone_t, void *);
372751221fdSPoul-Henning Kamp static void	swap_pager_putpages(vm_object_t, vm_page_t *, int, boolean_t, int *);
3735ea4972cSAlan Cox static boolean_t
3745ea4972cSAlan Cox 		swap_pager_haspage(vm_object_t object, vm_pindex_t pindex, int *before, int *after);
37511caded3SAlfred Perlstein static void	swap_pager_init(void);
37611caded3SAlfred Perlstein static void	swap_pager_unswapped(vm_page_t);
377b3fed13eSDavid Schultz static void	swap_pager_swapoff(struct swdevt *sp);
378f708ef1bSPoul-Henning Kamp 
379df8bae1dSRodney W. Grimes struct pagerops swappagerops = {
380e04e4bacSPoul-Henning Kamp 	.pgo_init =	swap_pager_init,	/* early system initialization of pager	*/
381e04e4bacSPoul-Henning Kamp 	.pgo_alloc =	swap_pager_alloc,	/* allocate an OBJT_SWAP object		*/
382e04e4bacSPoul-Henning Kamp 	.pgo_dealloc =	swap_pager_dealloc,	/* deallocate an OBJT_SWAP object	*/
383e04e4bacSPoul-Henning Kamp 	.pgo_getpages =	swap_pager_getpages,	/* pagein				*/
38490effb23SGleb Smirnoff 	.pgo_getpages_async = swap_pager_getpages_async, /* pagein (async)		*/
385e04e4bacSPoul-Henning Kamp 	.pgo_putpages =	swap_pager_putpages,	/* pageout				*/
386e04e4bacSPoul-Henning Kamp 	.pgo_haspage =	swap_pager_haspage,	/* get backing store status for page	*/
387e04e4bacSPoul-Henning Kamp 	.pgo_pageunswapped = swap_pager_unswapped,	/* remove swap related to page		*/
388df8bae1dSRodney W. Grimes };
389df8bae1dSRodney W. Grimes 
3901c7c3c6aSMatthew Dillon /*
3911c7c3c6aSMatthew Dillon  * swap_*() routines are externally accessible.  swp_*() routines are
3921c7c3c6aSMatthew Dillon  * internal.
3931c7c3c6aSMatthew Dillon  */
394e9c0cc15SPoul-Henning Kamp static int nswap_lowat = 128;	/* in pages, swap_pager_almost_full warn */
395e9c0cc15SPoul-Henning Kamp static int nswap_hiwat = 512;	/* in pages, swap_pager_almost_full warn */
39626f9a767SRodney W. Grimes 
39707c348eaSAlan Cox SYSCTL_INT(_vm, OID_AUTO, dmmax, CTLFLAG_RD, &nsw_cluster_max, 0,
39807c348eaSAlan Cox     "Maximum size of a swap block in pages");
399cee313c4SRobert Watson 
400a5edd34aSPoul-Henning Kamp static void	swp_sizecheck(void);
40111caded3SAlfred Perlstein static void	swp_pager_async_iodone(struct buf *bp);
402230869e0SAlan Cox static bool	swp_pager_swblk_empty(struct swblk *sb, int start, int limit);
40388ad2d7bSKonstantin Belousov static int	swapongeom(struct vnode *);
40459efee01SPoul-Henning Kamp static int	swaponvp(struct thread *, struct vnode *, u_long);
40535918c55SChristian S.J. Peron static int	swapoff_one(struct swdevt *sp, struct ucred *cred);
40624a1cce3SDavid Greenman 
4071c7c3c6aSMatthew Dillon /*
4081c7c3c6aSMatthew Dillon  * Swap bitmap functions
4091c7c3c6aSMatthew Dillon  */
410230869e0SAlan Cox static void	swp_pager_freeswapspace(daddr_t blk, daddr_t npages);
41148e98a2aSDoug Moore static daddr_t	swp_pager_getswapspace(int *npages, int limit);
4121c7c3c6aSMatthew Dillon 
4131c7c3c6aSMatthew Dillon /*
4141c7c3c6aSMatthew Dillon  * Metadata functions
4151c7c3c6aSMatthew Dillon  */
41678f1deefSAlan Cox static daddr_t swp_pager_meta_build(vm_object_t, vm_pindex_t, daddr_t);
4172e56b64fSKonstantin Belousov static void swp_pager_meta_free(vm_object_t, vm_pindex_t, vm_pindex_t);
41811caded3SAlfred Perlstein static void swp_pager_meta_free_all(vm_object_t);
41911caded3SAlfred Perlstein static daddr_t swp_pager_meta_ctl(vm_object_t, vm_pindex_t, int);
4201c7c3c6aSMatthew Dillon 
42178f1deefSAlan Cox static void
42278f1deefSAlan Cox swp_pager_init_freerange(daddr_t *start, daddr_t *num)
42378f1deefSAlan Cox {
42478f1deefSAlan Cox 
42578f1deefSAlan Cox 	*start = SWAPBLK_NONE;
42678f1deefSAlan Cox 	*num = 0;
42778f1deefSAlan Cox }
42878f1deefSAlan Cox 
42978f1deefSAlan Cox static void
43078f1deefSAlan Cox swp_pager_update_freerange(daddr_t *start, daddr_t *num, daddr_t addr)
43178f1deefSAlan Cox {
43278f1deefSAlan Cox 
43378f1deefSAlan Cox 	if (*start + *num == addr) {
43478f1deefSAlan Cox 		(*num)++;
43578f1deefSAlan Cox 	} else {
43678f1deefSAlan Cox 		swp_pager_freeswapspace(*start, *num);
43778f1deefSAlan Cox 		*start = addr;
43878f1deefSAlan Cox 		*num = 1;
43978f1deefSAlan Cox 	}
44078f1deefSAlan Cox }
44178f1deefSAlan Cox 
442f425ab8eSKonstantin Belousov static void *
443f425ab8eSKonstantin Belousov swblk_trie_alloc(struct pctrie *ptree)
444f425ab8eSKonstantin Belousov {
445f425ab8eSKonstantin Belousov 
446f425ab8eSKonstantin Belousov 	return (uma_zalloc(swpctrie_zone, M_NOWAIT | (curproc == pageproc ?
447f425ab8eSKonstantin Belousov 	    M_USE_RESERVE : 0)));
448f425ab8eSKonstantin Belousov }
449f425ab8eSKonstantin Belousov 
450f425ab8eSKonstantin Belousov static void
451f425ab8eSKonstantin Belousov swblk_trie_free(struct pctrie *ptree, void *node)
452f425ab8eSKonstantin Belousov {
453f425ab8eSKonstantin Belousov 
454f425ab8eSKonstantin Belousov 	uma_zfree(swpctrie_zone, node);
455f425ab8eSKonstantin Belousov }
456f425ab8eSKonstantin Belousov 
457f425ab8eSKonstantin Belousov PCTRIE_DEFINE(SWAP, swblk, p, swblk_trie_alloc, swblk_trie_free);
458f425ab8eSKonstantin Belousov 
4591c7c3c6aSMatthew Dillon /*
4601c7c3c6aSMatthew Dillon  * SWP_SIZECHECK() -	update swap_pager_full indication
4611c7c3c6aSMatthew Dillon  *
46220d3034fSMatthew Dillon  *	update the swap_pager_almost_full indication and warn when we are
46320d3034fSMatthew Dillon  *	about to run out of swap space, using lowat/hiwat hysteresis.
46420d3034fSMatthew Dillon  *
46520d3034fSMatthew Dillon  *	Clear swap_pager_full ( task killing ) indication when lowat is met.
4661c7c3c6aSMatthew Dillon  *
4671c7c3c6aSMatthew Dillon  *	No restrictions on call
4681c7c3c6aSMatthew Dillon  *	This routine may not block.
4691c7c3c6aSMatthew Dillon  */
470a5edd34aSPoul-Henning Kamp static void
4712f249180SPoul-Henning Kamp swp_sizecheck(void)
4720d94caffSDavid Greenman {
47323955314SAlfred Perlstein 
4748f60c087SPoul-Henning Kamp 	if (swap_pager_avail < nswap_lowat) {
47520d3034fSMatthew Dillon 		if (swap_pager_almost_full == 0) {
4761af87c92SDavid Greenman 			printf("swap_pager: out of swap space\n");
47720d3034fSMatthew Dillon 			swap_pager_almost_full = 1;
4782b0d37a4SMatthew Dillon 		}
47920d3034fSMatthew Dillon 	} else {
48026f9a767SRodney W. Grimes 		swap_pager_full = 0;
4818f60c087SPoul-Henning Kamp 		if (swap_pager_avail > nswap_hiwat)
48220d3034fSMatthew Dillon 			swap_pager_almost_full = 0;
48326f9a767SRodney W. Grimes 	}
4841c7c3c6aSMatthew Dillon }
4851c7c3c6aSMatthew Dillon 
4861c7c3c6aSMatthew Dillon /*
4871c7c3c6aSMatthew Dillon  * SWAP_PAGER_INIT() -	initialize the swap pager!
4881c7c3c6aSMatthew Dillon  *
4891c7c3c6aSMatthew Dillon  *	Expected to be started from system init.  NOTE:  This code is run
4901c7c3c6aSMatthew Dillon  *	before much else so be careful what you depend on.  Most of the VM
4911c7c3c6aSMatthew Dillon  *	system has yet to be initialized at this point.
4921c7c3c6aSMatthew Dillon  */
493f5a12711SPoul-Henning Kamp static void
4942f249180SPoul-Henning Kamp swap_pager_init(void)
495df8bae1dSRodney W. Grimes {
4961c7c3c6aSMatthew Dillon 	/*
4971c7c3c6aSMatthew Dillon 	 * Initialize object lists
4981c7c3c6aSMatthew Dillon 	 */
4991c7c3c6aSMatthew Dillon 	int i;
5001c7c3c6aSMatthew Dillon 
5011c7c3c6aSMatthew Dillon 	for (i = 0; i < NOBJLISTS; ++i)
5021c7c3c6aSMatthew Dillon 		TAILQ_INIT(&swap_pager_object_list[i]);
50320da9c2eSPoul-Henning Kamp 	mtx_init(&sw_dev_mtx, "swapdev", NULL, MTX_DEF);
50415719273SKonstantin Belousov 	sx_init(&sw_alloc_sx, "swspsx");
50504533e1eSKonstantin Belousov 	sx_init(&swdev_syscall_lock, "swsysc");
5061c7c3c6aSMatthew Dillon }
50726f9a767SRodney W. Grimes 
508df8bae1dSRodney W. Grimes /*
5091c7c3c6aSMatthew Dillon  * SWAP_PAGER_SWAP_INIT() - swap pager initialization from pageout process
5101c7c3c6aSMatthew Dillon  *
5111c7c3c6aSMatthew Dillon  *	Expected to be started from pageout process once, prior to entering
5121c7c3c6aSMatthew Dillon  *	its main loop.
513df8bae1dSRodney W. Grimes  */
51424a1cce3SDavid Greenman void
5152f249180SPoul-Henning Kamp swap_pager_swap_init(void)
516df8bae1dSRodney W. Grimes {
51761203277SDag-Erling Smørgrav 	unsigned long n, n2;
5180d94caffSDavid Greenman 
51926f9a767SRodney W. Grimes 	/*
5201c7c3c6aSMatthew Dillon 	 * Number of in-transit swap bp operations.  Don't
5211c7c3c6aSMatthew Dillon 	 * exhaust the pbufs completely.  Make sure we
5221c7c3c6aSMatthew Dillon 	 * initialize workable values (0 will work for hysteresis
5231c7c3c6aSMatthew Dillon 	 * but it isn't very efficient).
5241c7c3c6aSMatthew Dillon 	 *
525327f4e83SMatthew Dillon 	 * The nsw_cluster_max is constrained by the bp->b_pages[]
5261c7c3c6aSMatthew Dillon 	 * array (MAXPHYS/PAGE_SIZE) and our locally defined
5271c7c3c6aSMatthew Dillon 	 * MAX_PAGEOUT_CLUSTER.   Also be aware that swap ops are
5281c7c3c6aSMatthew Dillon 	 * constrained by the swap device interleave stripe size.
529327f4e83SMatthew Dillon 	 *
530327f4e83SMatthew Dillon 	 * Currently we hardwire nsw_wcount_async to 4.  This limit is
531327f4e83SMatthew Dillon 	 * designed to prevent other I/O from having high latencies due to
532327f4e83SMatthew Dillon 	 * our pageout I/O.  The value 4 works well for one or two active swap
533327f4e83SMatthew Dillon 	 * devices but is probably a little low if you have more.  Even so,
534327f4e83SMatthew Dillon 	 * a higher value would probably generate only a limited improvement
535327f4e83SMatthew Dillon 	 * with three or four active swap devices since the system does not
536327f4e83SMatthew Dillon 	 * typically have to pageout at extreme bandwidths.   We will want
537327f4e83SMatthew Dillon 	 * at least 2 per swap devices, and 4 is a pretty good value if you
538327f4e83SMatthew Dillon 	 * have one NFS swap device due to the command/ack latency over NFS.
539327f4e83SMatthew Dillon 	 * So it all works out pretty well.
54026f9a767SRodney W. Grimes 	 */
541ad3cce20SMatthew Dillon 	nsw_cluster_max = min((MAXPHYS/PAGE_SIZE), MAX_PAGEOUT_CLUSTER);
542327f4e83SMatthew Dillon 
543327f4e83SMatthew Dillon 	nsw_wcount_async = 4;
544327f4e83SMatthew Dillon 	nsw_wcount_async_max = nsw_wcount_async;
545756a5412SGleb Smirnoff 	mtx_init(&swbuf_mtx, "async swbuf mutex", NULL, MTX_DEF);
546756a5412SGleb Smirnoff 
547756a5412SGleb Smirnoff 	swwbuf_zone = pbuf_zsecond_create("swwbuf", nswbuf / 4);
548756a5412SGleb Smirnoff 	swrbuf_zone = pbuf_zsecond_create("swrbuf", nswbuf / 2);
54924a1cce3SDavid Greenman 
5501c7c3c6aSMatthew Dillon 	/*
551a8233027SKonstantin Belousov 	 * Initialize our zone, taking the user's requested size or
552a8233027SKonstantin Belousov 	 * estimating the number we need based on the number of pages
553a8233027SKonstantin Belousov 	 * in the system.
5541c7c3c6aSMatthew Dillon 	 */
555a8233027SKonstantin Belousov 	n = maxswzone != 0 ? maxswzone / sizeof(struct swblk) :
556a8233027SKonstantin Belousov 	    vm_cnt.v_page_count / 2;
557f425ab8eSKonstantin Belousov 	swpctrie_zone = uma_zcreate("swpctrie", pctrie_node_size(), NULL, NULL,
558f5fbe90dSAlan Cox 	    pctrie_zone_init, NULL, UMA_ALIGN_PTR, UMA_ZONE_VM);
559f425ab8eSKonstantin Belousov 	if (swpctrie_zone == NULL)
560f425ab8eSKonstantin Belousov 		panic("failed to create swap pctrie zone.");
561f425ab8eSKonstantin Belousov 	swblk_zone = uma_zcreate("swblk", sizeof(struct swblk), NULL, NULL,
562f5fbe90dSAlan Cox 	    NULL, NULL, _Alignof(struct swblk) - 1, UMA_ZONE_VM);
563f425ab8eSKonstantin Belousov 	if (swblk_zone == NULL)
564f425ab8eSKonstantin Belousov 		panic("failed to create swap blk zone.");
56522a5e6b9SDag-Erling Smørgrav 	n2 = n;
5668355f576SJeff Roberson 	do {
567f425ab8eSKonstantin Belousov 		if (uma_zone_reserve_kva(swblk_zone, n))
56861ce6eeeSAlfred Perlstein 			break;
56961ce6eeeSAlfred Perlstein 		/*
57061ce6eeeSAlfred Perlstein 		 * if the allocation failed, try a zone two thirds the
57161ce6eeeSAlfred Perlstein 		 * size of the previous attempt.
57261ce6eeeSAlfred Perlstein 		 */
57361ce6eeeSAlfred Perlstein 		n -= ((n + 2) / 3);
57461ce6eeeSAlfred Perlstein 	} while (n > 0);
57553faf5a7SKonstantin Belousov 
57653faf5a7SKonstantin Belousov 	/*
57753faf5a7SKonstantin Belousov 	 * Often uma_zone_reserve_kva() cannot reserve exactly the
57853faf5a7SKonstantin Belousov 	 * requested size.  Account for the difference when
57953faf5a7SKonstantin Belousov 	 * calculating swap_maxpages.
58053faf5a7SKonstantin Belousov 	 */
58153faf5a7SKonstantin Belousov 	n = uma_zone_get_max(swblk_zone);
58253faf5a7SKonstantin Belousov 
5831fffcd75SKonstantin Belousov 	if (n < n2)
584a8233027SKonstantin Belousov 		printf("Swap blk zone entries changed from %lu to %lu.\n",
585f425ab8eSKonstantin Belousov 		    n2, n);
58661203277SDag-Erling Smørgrav 	swap_maxpages = n * SWAP_META_PAGES;
587f425ab8eSKonstantin Belousov 	swzone = n * sizeof(struct swblk);
588f425ab8eSKonstantin Belousov 	if (!uma_zone_reserve_kva(swpctrie_zone, n))
589f425ab8eSKonstantin Belousov 		printf("Cannot reserve swap pctrie zone, "
590f425ab8eSKonstantin Belousov 		    "reduce kern.maxswzone.\n");
59124a1cce3SDavid Greenman }
59224a1cce3SDavid Greenman 
593eb4d6a1bSKonstantin Belousov static vm_object_t
594eb4d6a1bSKonstantin Belousov swap_pager_alloc_init(void *handle, struct ucred *cred, vm_ooffset_t size,
595eb4d6a1bSKonstantin Belousov     vm_ooffset_t offset)
596eb4d6a1bSKonstantin Belousov {
597eb4d6a1bSKonstantin Belousov 	vm_object_t object;
598eb4d6a1bSKonstantin Belousov 
599eb4d6a1bSKonstantin Belousov 	if (cred != NULL) {
600eb4d6a1bSKonstantin Belousov 		if (!swap_reserve_by_cred(size, cred))
601eb4d6a1bSKonstantin Belousov 			return (NULL);
602eb4d6a1bSKonstantin Belousov 		crhold(cred);
603eb4d6a1bSKonstantin Belousov 	}
604f425ab8eSKonstantin Belousov 
605f425ab8eSKonstantin Belousov 	/*
606f425ab8eSKonstantin Belousov 	 * The un_pager.swp.swp_blks trie is initialized by
607f425ab8eSKonstantin Belousov 	 * vm_object_allocate() to ensure the correct order of
608f425ab8eSKonstantin Belousov 	 * visibility to other threads.
609f425ab8eSKonstantin Belousov 	 */
610eb4d6a1bSKonstantin Belousov 	object = vm_object_allocate(OBJT_SWAP, OFF_TO_IDX(offset +
611eb4d6a1bSKonstantin Belousov 	    PAGE_MASK + size));
612f425ab8eSKonstantin Belousov 
613eb4d6a1bSKonstantin Belousov 	object->handle = handle;
614eb4d6a1bSKonstantin Belousov 	if (cred != NULL) {
615eb4d6a1bSKonstantin Belousov 		object->cred = cred;
616eb4d6a1bSKonstantin Belousov 		object->charge = size;
617eb4d6a1bSKonstantin Belousov 	}
618eb4d6a1bSKonstantin Belousov 	return (object);
619eb4d6a1bSKonstantin Belousov }
620eb4d6a1bSKonstantin Belousov 
62124a1cce3SDavid Greenman /*
6221c7c3c6aSMatthew Dillon  * SWAP_PAGER_ALLOC() -	allocate a new OBJT_SWAP VM object and instantiate
6231c7c3c6aSMatthew Dillon  *			its metadata structures.
6241c7c3c6aSMatthew Dillon  *
6251c7c3c6aSMatthew Dillon  *	This routine is called from the mmap and fork code to create a new
626eb4d6a1bSKonstantin Belousov  *	OBJT_SWAP object.
6271c7c3c6aSMatthew Dillon  *
628eb4d6a1bSKonstantin Belousov  *	This routine must ensure that no live duplicate is created for
629eb4d6a1bSKonstantin Belousov  *	the named object request, which is protected against by
630eb4d6a1bSKonstantin Belousov  *	holding the sw_alloc_sx lock in case handle != NULL.
63124a1cce3SDavid Greenman  */
632f5a12711SPoul-Henning Kamp static vm_object_t
6336cde7a16SDavid Greenman swap_pager_alloc(void *handle, vm_ooffset_t size, vm_prot_t prot,
6343364c323SKonstantin Belousov     vm_ooffset_t offset, struct ucred *cred)
63524a1cce3SDavid Greenman {
63624a1cce3SDavid Greenman 	vm_object_t object;
6372f7af3dbSAlan Cox 
638eb4d6a1bSKonstantin Belousov 	if (handle != NULL) {
6391c7c3c6aSMatthew Dillon 		/*
6401c7c3c6aSMatthew Dillon 		 * Reference existing named region or allocate new one.  There
6411c7c3c6aSMatthew Dillon 		 * should not be a race here against swp_pager_meta_build()
6421c7c3c6aSMatthew Dillon 		 * as called from vm_page_remove() in regards to the lookup
6431c7c3c6aSMatthew Dillon 		 * of the handle.
6441c7c3c6aSMatthew Dillon 		 */
6450cddd8f0SMatthew Dillon 		sx_xlock(&sw_alloc_sx);
6461c7c3c6aSMatthew Dillon 		object = vm_pager_object_lookup(NOBJLIST(handle), handle);
647b5e8f167SAlan Cox 		if (object == NULL) {
648eb4d6a1bSKonstantin Belousov 			object = swap_pager_alloc_init(handle, cred, size,
649eb4d6a1bSKonstantin Belousov 			    offset);
650eb4d6a1bSKonstantin Belousov 			if (object != NULL) {
651eb4d6a1bSKonstantin Belousov 				TAILQ_INSERT_TAIL(NOBJLIST(object->handle),
652eb4d6a1bSKonstantin Belousov 				    object, pager_object_list);
6533364c323SKonstantin Belousov 			}
65424a1cce3SDavid Greenman 		}
6550cddd8f0SMatthew Dillon 		sx_xunlock(&sw_alloc_sx);
65624a1cce3SDavid Greenman 	} else {
657eb4d6a1bSKonstantin Belousov 		object = swap_pager_alloc_init(handle, cred, size, offset);
65824a1cce3SDavid Greenman 	}
65924a1cce3SDavid Greenman 	return (object);
660df8bae1dSRodney W. Grimes }
661df8bae1dSRodney W. Grimes 
66226f9a767SRodney W. Grimes /*
6631c7c3c6aSMatthew Dillon  * SWAP_PAGER_DEALLOC() -	remove swap metadata from object
6641c7c3c6aSMatthew Dillon  *
6651c7c3c6aSMatthew Dillon  *	The swap backing for the object is destroyed.  The code is
6661c7c3c6aSMatthew Dillon  *	designed such that we can reinstantiate it later, but this
6671c7c3c6aSMatthew Dillon  *	routine is typically called only when the entire object is
6681c7c3c6aSMatthew Dillon  *	about to be destroyed.
6691c7c3c6aSMatthew Dillon  *
67015523cf7SKonstantin Belousov  *	The object must be locked.
67126f9a767SRodney W. Grimes  */
672df8bae1dSRodney W. Grimes static void
6732f249180SPoul-Henning Kamp swap_pager_dealloc(vm_object_t object)
67426f9a767SRodney W. Grimes {
6754dcc5c2dSMatthew Dillon 
676eb4d6a1bSKonstantin Belousov 	VM_OBJECT_ASSERT_WLOCKED(object);
677eb4d6a1bSKonstantin Belousov 	KASSERT((object->flags & OBJ_DEAD) != 0, ("dealloc of reachable obj"));
678eb4d6a1bSKonstantin Belousov 
67926f9a767SRodney W. Grimes 	/*
6801c7c3c6aSMatthew Dillon 	 * Remove from list right away so lookups will fail if we block for
6811c7c3c6aSMatthew Dillon 	 * pageout completion.
68226f9a767SRodney W. Grimes 	 */
683bd228075SAlan Cox 	if (object->handle != NULL) {
684eb4d6a1bSKonstantin Belousov 		VM_OBJECT_WUNLOCK(object);
685eb4d6a1bSKonstantin Belousov 		sx_xlock(&sw_alloc_sx);
686eb4d6a1bSKonstantin Belousov 		TAILQ_REMOVE(NOBJLIST(object->handle), object,
687eb4d6a1bSKonstantin Belousov 		    pager_object_list);
688eb4d6a1bSKonstantin Belousov 		sx_xunlock(&sw_alloc_sx);
689eb4d6a1bSKonstantin Belousov 		VM_OBJECT_WLOCK(object);
690bd228075SAlan Cox 	}
6911c7c3c6aSMatthew Dillon 
6921c7c3c6aSMatthew Dillon 	vm_object_pip_wait(object, "swpdea");
6931c7c3c6aSMatthew Dillon 
6941c7c3c6aSMatthew Dillon 	/*
6951c7c3c6aSMatthew Dillon 	 * Free all remaining metadata.  We only bother to free it from
6961c7c3c6aSMatthew Dillon 	 * the swap meta data.  We do not attempt to free swapblk's still
6971c7c3c6aSMatthew Dillon 	 * associated with vm_page_t's for this object.  We do not care
6981c7c3c6aSMatthew Dillon 	 * if paging is still in progress on some objects.
6991c7c3c6aSMatthew Dillon 	 */
7001c7c3c6aSMatthew Dillon 	swp_pager_meta_free_all(object);
701e735691bSJohn Baldwin 	object->handle = NULL;
702e735691bSJohn Baldwin 	object->type = OBJT_DEAD;
7031c7c3c6aSMatthew Dillon }
7041c7c3c6aSMatthew Dillon 
7051c7c3c6aSMatthew Dillon /************************************************************************
7061c7c3c6aSMatthew Dillon  *			SWAP PAGER BITMAP ROUTINES			*
7071c7c3c6aSMatthew Dillon  ************************************************************************/
7081c7c3c6aSMatthew Dillon 
7091c7c3c6aSMatthew Dillon /*
7101c7c3c6aSMatthew Dillon  * SWP_PAGER_GETSWAPSPACE() -	allocate raw swap space
7111c7c3c6aSMatthew Dillon  *
71248e98a2aSDoug Moore  *	Allocate swap for up to the requested number of pages, and at
71348e98a2aSDoug Moore  *	least a minimum number of pages.  The starting swap block number
71448e98a2aSDoug Moore  *	(a page index) is returned or SWAPBLK_NONE if the allocation
71548e98a2aSDoug Moore  *	failed.
7161c7c3c6aSMatthew Dillon  *
7171c7c3c6aSMatthew Dillon  *	Also has the side effect of advising that somebody made a mistake
7181c7c3c6aSMatthew Dillon  *	when they configured swap and didn't configure enough.
7191c7c3c6aSMatthew Dillon  *
72015523cf7SKonstantin Belousov  *	This routine may not sleep.
7218f60c087SPoul-Henning Kamp  *
7228f60c087SPoul-Henning Kamp  *	We allocate in round-robin fashion from the configured devices.
7231c7c3c6aSMatthew Dillon  */
724a5edd34aSPoul-Henning Kamp static daddr_t
72548e98a2aSDoug Moore swp_pager_getswapspace(int *io_npages, int limit)
7261c7c3c6aSMatthew Dillon {
7271c7c3c6aSMatthew Dillon 	daddr_t blk;
7288f60c087SPoul-Henning Kamp 	struct swdevt *sp;
72987ae0686SDoug Moore 	int mpages, npages;
7301c7c3c6aSMatthew Dillon 
7318f60c087SPoul-Henning Kamp 	blk = SWAPBLK_NONE;
73287ae0686SDoug Moore 	npages = mpages = *io_npages;
73320da9c2eSPoul-Henning Kamp 	mtx_lock(&sw_dev_mtx);
7348f60c087SPoul-Henning Kamp 	sp = swdevhd;
73548e98a2aSDoug Moore 	while (!TAILQ_EMPTY(&swtailq)) {
7368f60c087SPoul-Henning Kamp 		if (sp == NULL)
7378f60c087SPoul-Henning Kamp 			sp = TAILQ_FIRST(&swtailq);
73848e98a2aSDoug Moore 		if ((sp->sw_flags & SW_CLOSING) == 0)
73987ae0686SDoug Moore 			blk = blist_alloc(sp->sw_blist, &npages, mpages);
74048e98a2aSDoug Moore 		if (blk != SWAPBLK_NONE)
74148e98a2aSDoug Moore 			break;
74248e98a2aSDoug Moore 		sp = TAILQ_NEXT(sp, sw_list);
74348e98a2aSDoug Moore 		if (swdevhd == sp) {
74448e98a2aSDoug Moore 			if (npages <= limit)
74548e98a2aSDoug Moore 				break;
74687ae0686SDoug Moore 			mpages = npages - 1;
74748e98a2aSDoug Moore 			npages >>= 1;
74848e98a2aSDoug Moore 		}
74948e98a2aSDoug Moore 	}
7508f60c087SPoul-Henning Kamp 	if (blk != SWAPBLK_NONE) {
75148e98a2aSDoug Moore 		*io_npages = npages;
7528f60c087SPoul-Henning Kamp 		blk += sp->sw_first;
7538f60c087SPoul-Henning Kamp 		sp->sw_used += npages;
754d05bc129SAlan Cox 		swap_pager_avail -= npages;
7558f60c087SPoul-Henning Kamp 		swp_sizecheck();
7568f60c087SPoul-Henning Kamp 		swdevhd = TAILQ_NEXT(sp, sw_list);
75748e98a2aSDoug Moore 	} else {
7582b0d37a4SMatthew Dillon 		if (swap_pager_full != 2) {
75948e98a2aSDoug Moore 			printf("swp_pager_getswapspace(%d): failed\n",
76048e98a2aSDoug Moore 			    *io_npages);
7612b0d37a4SMatthew Dillon 			swap_pager_full = 2;
76220d3034fSMatthew Dillon 			swap_pager_almost_full = 1;
7632b0d37a4SMatthew Dillon 		}
7648f60c087SPoul-Henning Kamp 		swdevhd = NULL;
76548e98a2aSDoug Moore 	}
766d05bc129SAlan Cox 	mtx_unlock(&sw_dev_mtx);
7671c7c3c6aSMatthew Dillon 	return (blk);
76826f9a767SRodney W. Grimes }
76926f9a767SRodney W. Grimes 
770541a1175SAlan Cox static bool
771b3fed13eSDavid Schultz swp_pager_isondev(daddr_t blk, struct swdevt *sp)
7728f60c087SPoul-Henning Kamp {
7738f60c087SPoul-Henning Kamp 
774b3fed13eSDavid Schultz 	return (blk >= sp->sw_first && blk < sp->sw_end);
7758f60c087SPoul-Henning Kamp }
7768f60c087SPoul-Henning Kamp 
7774b03903aSPoul-Henning Kamp static void
7784b03903aSPoul-Henning Kamp swp_pager_strategy(struct buf *bp)
7794b03903aSPoul-Henning Kamp {
7804b03903aSPoul-Henning Kamp 	struct swdevt *sp;
7814b03903aSPoul-Henning Kamp 
78220da9c2eSPoul-Henning Kamp 	mtx_lock(&sw_dev_mtx);
7834b03903aSPoul-Henning Kamp 	TAILQ_FOREACH(sp, &swtailq, sw_list) {
784541a1175SAlan Cox 		if (swp_pager_isondev(bp->b_blkno, sp)) {
78520da9c2eSPoul-Henning Kamp 			mtx_unlock(&sw_dev_mtx);
7862cc718a1SKonstantin Belousov 			if ((sp->sw_flags & SW_UNMAPPED) != 0 &&
7872cc718a1SKonstantin Belousov 			    unmapped_buf_allowed) {
7882cc718a1SKonstantin Belousov 				bp->b_data = unmapped_buf;
7892cc718a1SKonstantin Belousov 				bp->b_offset = 0;
7902cc718a1SKonstantin Belousov 			} else {
7912cc718a1SKonstantin Belousov 				pmap_qenter((vm_offset_t)bp->b_data,
7922cc718a1SKonstantin Belousov 				    &bp->b_pages[0], bp->b_bcount / PAGE_SIZE);
7932cc718a1SKonstantin Belousov 			}
7944b03903aSPoul-Henning Kamp 			sp->sw_strategy(bp, sp);
7954b03903aSPoul-Henning Kamp 			return;
7964b03903aSPoul-Henning Kamp 		}
7974b03903aSPoul-Henning Kamp 	}
79820da9c2eSPoul-Henning Kamp 	panic("Swapdev not found");
7994b03903aSPoul-Henning Kamp }
8004b03903aSPoul-Henning Kamp 
8018f60c087SPoul-Henning Kamp 
80226f9a767SRodney W. Grimes /*
8031c7c3c6aSMatthew Dillon  * SWP_PAGER_FREESWAPSPACE() -	free raw swap space
8041c7c3c6aSMatthew Dillon  *
8051c7c3c6aSMatthew Dillon  *	This routine returns the specified swap blocks back to the bitmap.
8061c7c3c6aSMatthew Dillon  *
80715523cf7SKonstantin Belousov  *	This routine may not sleep.
80826f9a767SRodney W. Grimes  */
809a5edd34aSPoul-Henning Kamp static void
810230869e0SAlan Cox swp_pager_freeswapspace(daddr_t blk, daddr_t npages)
8110d94caffSDavid Greenman {
8128f60c087SPoul-Henning Kamp 	struct swdevt *sp;
81392da00bbSMatthew Dillon 
814230869e0SAlan Cox 	if (npages == 0)
815230869e0SAlan Cox 		return;
8167645e885SAlan Cox 	mtx_lock(&sw_dev_mtx);
8177645e885SAlan Cox 	TAILQ_FOREACH(sp, &swtailq, sw_list) {
818541a1175SAlan Cox 		if (swp_pager_isondev(blk, sp)) {
81992da00bbSMatthew Dillon 			sp->sw_used -= npages;
82092da00bbSMatthew Dillon 			/*
8217645e885SAlan Cox 			 * If we are attempting to stop swapping on
8227645e885SAlan Cox 			 * this device, we don't want to mark any
8237645e885SAlan Cox 			 * blocks free lest they be reused.
82492da00bbSMatthew Dillon 			 */
8257645e885SAlan Cox 			if ((sp->sw_flags & SW_CLOSING) == 0) {
8267645e885SAlan Cox 				blist_free(sp->sw_blist, blk - sp->sw_first,
8277645e885SAlan Cox 				    npages);
8288f60c087SPoul-Henning Kamp 				swap_pager_avail += npages;
8291c7c3c6aSMatthew Dillon 				swp_sizecheck();
83026f9a767SRodney W. Grimes 			}
8317645e885SAlan Cox 			mtx_unlock(&sw_dev_mtx);
8327645e885SAlan Cox 			return;
8337645e885SAlan Cox 		}
8347645e885SAlan Cox 	}
8357645e885SAlan Cox 	panic("Swapdev not found");
8367645e885SAlan Cox }
8371c7c3c6aSMatthew Dillon 
83826f9a767SRodney W. Grimes /*
839d027ed2eSAlan Cox  * SYSCTL_SWAP_FRAGMENTATION() -	produce raw swap space stats
840d027ed2eSAlan Cox  */
841d027ed2eSAlan Cox static int
842d027ed2eSAlan Cox sysctl_swap_fragmentation(SYSCTL_HANDLER_ARGS)
843d027ed2eSAlan Cox {
844d027ed2eSAlan Cox 	struct sbuf sbuf;
845d027ed2eSAlan Cox 	struct swdevt *sp;
846d027ed2eSAlan Cox 	const char *devname;
847d027ed2eSAlan Cox 	int error;
848d027ed2eSAlan Cox 
849d027ed2eSAlan Cox 	error = sysctl_wire_old_buffer(req, 0);
850d027ed2eSAlan Cox 	if (error != 0)
851d027ed2eSAlan Cox 		return (error);
852d027ed2eSAlan Cox 	sbuf_new_for_sysctl(&sbuf, NULL, 128, req);
853d027ed2eSAlan Cox 	mtx_lock(&sw_dev_mtx);
854d027ed2eSAlan Cox 	TAILQ_FOREACH(sp, &swtailq, sw_list) {
855d027ed2eSAlan Cox 		if (vn_isdisk(sp->sw_vp, NULL))
856d027ed2eSAlan Cox 			devname = devtoname(sp->sw_vp->v_rdev);
857d027ed2eSAlan Cox 		else
858d027ed2eSAlan Cox 			devname = "[file]";
859d027ed2eSAlan Cox 		sbuf_printf(&sbuf, "\nFree space on device %s:\n", devname);
860d027ed2eSAlan Cox 		blist_stats(sp->sw_blist, &sbuf);
861d027ed2eSAlan Cox 	}
862d027ed2eSAlan Cox 	mtx_unlock(&sw_dev_mtx);
863d027ed2eSAlan Cox 	error = sbuf_finish(&sbuf);
864d027ed2eSAlan Cox 	sbuf_delete(&sbuf);
865d027ed2eSAlan Cox 	return (error);
866d027ed2eSAlan Cox }
867d027ed2eSAlan Cox 
868d027ed2eSAlan Cox /*
8691c7c3c6aSMatthew Dillon  * SWAP_PAGER_FREESPACE() -	frees swap blocks associated with a page
8701c7c3c6aSMatthew Dillon  *				range within an object.
8711c7c3c6aSMatthew Dillon  *
8721c7c3c6aSMatthew Dillon  *	This is a globally accessible routine.
8731c7c3c6aSMatthew Dillon  *
8741c7c3c6aSMatthew Dillon  *	This routine removes swapblk assignments from swap metadata.
8751c7c3c6aSMatthew Dillon  *
8761c7c3c6aSMatthew Dillon  *	The external callers of this routine typically have already destroyed
8771c7c3c6aSMatthew Dillon  *	or renamed vm_page_t's associated with this range in the object so
8781c7c3c6aSMatthew Dillon  *	we should be ok.
879c25673ffSAttilio Rao  *
880c25673ffSAttilio Rao  *	The object must be locked.
88126f9a767SRodney W. Grimes  */
88226f9a767SRodney W. Grimes void
8832f249180SPoul-Henning Kamp swap_pager_freespace(vm_object_t object, vm_pindex_t start, vm_size_t size)
88426f9a767SRodney W. Grimes {
88523955314SAlfred Perlstein 
8861c7c3c6aSMatthew Dillon 	swp_pager_meta_free(object, start, size);
8874dcc5c2dSMatthew Dillon }
8884dcc5c2dSMatthew Dillon 
8894dcc5c2dSMatthew Dillon /*
8904dcc5c2dSMatthew Dillon  * SWAP_PAGER_RESERVE() - reserve swap blocks in object
8914dcc5c2dSMatthew Dillon  *
8924dcc5c2dSMatthew Dillon  *	Assigns swap blocks to the specified range within the object.  The
89356ce850bSKonstantin Belousov  *	swap blocks are not zeroed.  Any previous swap assignment is destroyed.
8944dcc5c2dSMatthew Dillon  *
8954dcc5c2dSMatthew Dillon  *	Returns 0 on success, -1 on failure.
8964dcc5c2dSMatthew Dillon  */
8974dcc5c2dSMatthew Dillon int
8984dcc5c2dSMatthew Dillon swap_pager_reserve(vm_object_t object, vm_pindex_t start, vm_size_t size)
8994dcc5c2dSMatthew Dillon {
90048e98a2aSDoug Moore 	daddr_t addr, blk, n_free, s_free;
90148e98a2aSDoug Moore 	int i, j, n;
9024dcc5c2dSMatthew Dillon 
90378f1deefSAlan Cox 	swp_pager_init_freerange(&s_free, &n_free);
90489f6b863SAttilio Rao 	VM_OBJECT_WLOCK(object);
90548e98a2aSDoug Moore 	for (i = 0; i < size; i += n) {
90648e98a2aSDoug Moore 		n = min(BLIST_MAX_ALLOC, size - i);
90748e98a2aSDoug Moore 		blk = swp_pager_getswapspace(&n, 1);
90848e98a2aSDoug Moore 		if (blk == SWAPBLK_NONE) {
90948e98a2aSDoug Moore 			swp_pager_meta_free(object, start, i);
91089f6b863SAttilio Rao 			VM_OBJECT_WUNLOCK(object);
9114dcc5c2dSMatthew Dillon 			return (-1);
9124dcc5c2dSMatthew Dillon 		}
91348e98a2aSDoug Moore 		for (j = 0; j < n; ++j) {
91448e98a2aSDoug Moore 			addr = swp_pager_meta_build(object,
91548e98a2aSDoug Moore 			    start + i + j, blk + j);
91678f1deefSAlan Cox 			if (addr != SWAPBLK_NONE)
91748e98a2aSDoug Moore 				swp_pager_update_freerange(&s_free, &n_free,
91848e98a2aSDoug Moore 				    addr);
91948e98a2aSDoug Moore 		}
9204dcc5c2dSMatthew Dillon 	}
92178f1deefSAlan Cox 	swp_pager_freeswapspace(s_free, n_free);
92289f6b863SAttilio Rao 	VM_OBJECT_WUNLOCK(object);
9234dcc5c2dSMatthew Dillon 	return (0);
92426f9a767SRodney W. Grimes }
92526f9a767SRodney W. Grimes 
9260a47b48bSJohn Dyson /*
9271c7c3c6aSMatthew Dillon  * SWAP_PAGER_COPY() -  copy blocks from source pager to destination pager
9281c7c3c6aSMatthew Dillon  *			and destroy the source.
9291c7c3c6aSMatthew Dillon  *
9301c7c3c6aSMatthew Dillon  *	Copy any valid swapblks from the source to the destination.  In
9311c7c3c6aSMatthew Dillon  *	cases where both the source and destination have a valid swapblk,
9321c7c3c6aSMatthew Dillon  *	we keep the destination's.
9331c7c3c6aSMatthew Dillon  *
93415523cf7SKonstantin Belousov  *	This routine is allowed to sleep.  It may sleep allocating metadata
9351c7c3c6aSMatthew Dillon  *	indirectly through swp_pager_meta_build() or if paging is still in
9361c7c3c6aSMatthew Dillon  *	progress on the source.
9371c7c3c6aSMatthew Dillon  *
9381c7c3c6aSMatthew Dillon  *	The source object contains no vm_page_t's (which is just as well)
9391c7c3c6aSMatthew Dillon  *
9401c7c3c6aSMatthew Dillon  *	The source object is of type OBJT_SWAP.
9411c7c3c6aSMatthew Dillon  *
94215523cf7SKonstantin Belousov  *	The source and destination objects must be locked.
94315523cf7SKonstantin Belousov  *	Both object locks may temporarily be released.
94426f9a767SRodney W. Grimes  */
94526f9a767SRodney W. Grimes void
9462f249180SPoul-Henning Kamp swap_pager_copy(vm_object_t srcobject, vm_object_t dstobject,
9472f249180SPoul-Henning Kamp     vm_pindex_t offset, int destroysource)
94826f9a767SRodney W. Grimes {
949a316d390SJohn Dyson 	vm_pindex_t i;
95078f1deefSAlan Cox 	daddr_t dstaddr, n_free, s_free, srcaddr;
9514dcc5c2dSMatthew Dillon 
95289f6b863SAttilio Rao 	VM_OBJECT_ASSERT_WLOCKED(srcobject);
95389f6b863SAttilio Rao 	VM_OBJECT_ASSERT_WLOCKED(dstobject);
9540cddd8f0SMatthew Dillon 
95526f9a767SRodney W. Grimes 	/*
9561c7c3c6aSMatthew Dillon 	 * If destroysource is set, we remove the source object from the
9571c7c3c6aSMatthew Dillon 	 * swap_pager internal queue now.
95826f9a767SRodney W. Grimes 	 */
959eb4d6a1bSKonstantin Belousov 	if (destroysource && srcobject->handle != NULL) {
960eb4d6a1bSKonstantin Belousov 		vm_object_pip_add(srcobject, 1);
961eb4d6a1bSKonstantin Belousov 		VM_OBJECT_WUNLOCK(srcobject);
962eb4d6a1bSKonstantin Belousov 		vm_object_pip_add(dstobject, 1);
963eb4d6a1bSKonstantin Belousov 		VM_OBJECT_WUNLOCK(dstobject);
964eb4d6a1bSKonstantin Belousov 		sx_xlock(&sw_alloc_sx);
965eb4d6a1bSKonstantin Belousov 		TAILQ_REMOVE(NOBJLIST(srcobject->handle), srcobject,
966eb4d6a1bSKonstantin Belousov 		    pager_object_list);
967eb4d6a1bSKonstantin Belousov 		sx_xunlock(&sw_alloc_sx);
968eb4d6a1bSKonstantin Belousov 		VM_OBJECT_WLOCK(dstobject);
969eb4d6a1bSKonstantin Belousov 		vm_object_pip_wakeup(dstobject);
970eb4d6a1bSKonstantin Belousov 		VM_OBJECT_WLOCK(srcobject);
971eb4d6a1bSKonstantin Belousov 		vm_object_pip_wakeup(srcobject);
972bd228075SAlan Cox 	}
97326f9a767SRodney W. Grimes 
9741c7c3c6aSMatthew Dillon 	/*
9754abca9bbSAlan Cox 	 * Transfer source to destination.
9761c7c3c6aSMatthew Dillon 	 */
97778f1deefSAlan Cox 	swp_pager_init_freerange(&s_free, &n_free);
9781c7c3c6aSMatthew Dillon 	for (i = 0; i < dstobject->size; ++i) {
9794abca9bbSAlan Cox 		srcaddr = swp_pager_meta_ctl(srcobject, i + offset, SWM_POP);
9804abca9bbSAlan Cox 		if (srcaddr == SWAPBLK_NONE)
9814abca9bbSAlan Cox 			continue;
9821c7c3c6aSMatthew Dillon 		dstaddr = swp_pager_meta_ctl(dstobject, i, 0);
98378f1deefSAlan Cox 		if (dstaddr != SWAPBLK_NONE) {
98478f1deefSAlan Cox 			/*
98578f1deefSAlan Cox 			 * Destination has valid swapblk or it is represented
98678f1deefSAlan Cox 			 * by a resident page.  We destroy the source block.
98778f1deefSAlan Cox 			 */
98878f1deefSAlan Cox 			swp_pager_update_freerange(&s_free, &n_free, srcaddr);
98978f1deefSAlan Cox 			continue;
99078f1deefSAlan Cox 		}
99178f1deefSAlan Cox 
9921c7c3c6aSMatthew Dillon 		/*
9931c7c3c6aSMatthew Dillon 		 * Destination has no swapblk and is not resident,
9941c7c3c6aSMatthew Dillon 		 * copy source.
9954abca9bbSAlan Cox 		 *
996c7c8dd7eSAlan Cox 		 * swp_pager_meta_build() can sleep.
997c7c8dd7eSAlan Cox 		 */
998c7c8dd7eSAlan Cox 		vm_object_pip_add(srcobject, 1);
99989f6b863SAttilio Rao 		VM_OBJECT_WUNLOCK(srcobject);
1000c7c8dd7eSAlan Cox 		vm_object_pip_add(dstobject, 1);
100178f1deefSAlan Cox 		dstaddr = swp_pager_meta_build(dstobject, i, srcaddr);
100278f1deefSAlan Cox 		KASSERT(dstaddr == SWAPBLK_NONE,
100378f1deefSAlan Cox 		    ("Unexpected destination swapblk"));
1004c7c8dd7eSAlan Cox 		vm_object_pip_wakeup(dstobject);
100589f6b863SAttilio Rao 		VM_OBJECT_WLOCK(srcobject);
1006c7c8dd7eSAlan Cox 		vm_object_pip_wakeup(srcobject);
10071c7c3c6aSMatthew Dillon 	}
100878f1deefSAlan Cox 	swp_pager_freeswapspace(s_free, n_free);
100926f9a767SRodney W. Grimes 
101026f9a767SRodney W. Grimes 	/*
10111c7c3c6aSMatthew Dillon 	 * Free left over swap blocks in source.
10121c7c3c6aSMatthew Dillon 	 *
1013763df3ecSPedro F. Giffuni 	 * We have to revert the type to OBJT_DEFAULT so we do not accidentally
10141c7c3c6aSMatthew Dillon 	 * double-remove the object from the swap queues.
101526f9a767SRodney W. Grimes 	 */
1016c0877f10SJohn Dyson 	if (destroysource) {
10171c7c3c6aSMatthew Dillon 		swp_pager_meta_free_all(srcobject);
10181c7c3c6aSMatthew Dillon 		/*
10191c7c3c6aSMatthew Dillon 		 * Reverting the type is not necessary, the caller is going
10201c7c3c6aSMatthew Dillon 		 * to destroy srcobject directly, but I'm doing it here
1021956f3135SPhilippe Charnier 		 * for consistency since we've removed the object from its
10221c7c3c6aSMatthew Dillon 		 * queues.
10231c7c3c6aSMatthew Dillon 		 */
10241c7c3c6aSMatthew Dillon 		srcobject->type = OBJT_DEFAULT;
1025c0877f10SJohn Dyson 	}
102626f9a767SRodney W. Grimes }
102726f9a767SRodney W. Grimes 
1028df8bae1dSRodney W. Grimes /*
10291c7c3c6aSMatthew Dillon  * SWAP_PAGER_HASPAGE() -	determine if we have good backing store for
10301c7c3c6aSMatthew Dillon  *				the requested page.
10311c7c3c6aSMatthew Dillon  *
10321c7c3c6aSMatthew Dillon  *	We determine whether good backing store exists for the requested
10331c7c3c6aSMatthew Dillon  *	page and return TRUE if it does, FALSE if it doesn't.
10341c7c3c6aSMatthew Dillon  *
10351c7c3c6aSMatthew Dillon  *	If TRUE, we also try to determine how much valid, contiguous backing
1036915d1b71SMark Johnston  *	store exists before and after the requested page.
1037df8bae1dSRodney W. Grimes  */
10385ea4972cSAlan Cox static boolean_t
1039915d1b71SMark Johnston swap_pager_haspage(vm_object_t object, vm_pindex_t pindex, int *before,
1040915d1b71SMark Johnston     int *after)
104126f9a767SRodney W. Grimes {
1042915d1b71SMark Johnston 	daddr_t blk, blk0;
1043915d1b71SMark Johnston 	int i;
104426f9a767SRodney W. Grimes 
1045c25673ffSAttilio Rao 	VM_OBJECT_ASSERT_LOCKED(object);
1046915d1b71SMark Johnston 
10471c7c3c6aSMatthew Dillon 	/*
10481c7c3c6aSMatthew Dillon 	 * do we have good backing store at the requested index ?
10491c7c3c6aSMatthew Dillon 	 */
10501c7c3c6aSMatthew Dillon 	blk0 = swp_pager_meta_ctl(object, pindex, 0);
10514dcc5c2dSMatthew Dillon 	if (blk0 == SWAPBLK_NONE) {
10521c7c3c6aSMatthew Dillon 		if (before)
105324a1cce3SDavid Greenman 			*before = 0;
10541c7c3c6aSMatthew Dillon 		if (after)
105524a1cce3SDavid Greenman 			*after = 0;
105626f9a767SRodney W. Grimes 		return (FALSE);
105726f9a767SRodney W. Grimes 	}
105826f9a767SRodney W. Grimes 
105926f9a767SRodney W. Grimes 	/*
10601c7c3c6aSMatthew Dillon 	 * find backwards-looking contiguous good backing store
1061e47ed70bSJohn Dyson 	 */
10621c7c3c6aSMatthew Dillon 	if (before != NULL) {
1063915d1b71SMark Johnston 		for (i = 1; i < SWB_NPAGES; i++) {
10641c7c3c6aSMatthew Dillon 			if (i > pindex)
10651c7c3c6aSMatthew Dillon 				break;
10661c7c3c6aSMatthew Dillon 			blk = swp_pager_meta_ctl(object, pindex - i, 0);
10671c7c3c6aSMatthew Dillon 			if (blk != blk0 - i)
10681c7c3c6aSMatthew Dillon 				break;
1069ffc82b0aSJohn Dyson 		}
1070915d1b71SMark Johnston 		*before = i - 1;
107126f9a767SRodney W. Grimes 	}
107226f9a767SRodney W. Grimes 
107326f9a767SRodney W. Grimes 	/*
10741c7c3c6aSMatthew Dillon 	 * find forward-looking contiguous good backing store
107526f9a767SRodney W. Grimes 	 */
10761c7c3c6aSMatthew Dillon 	if (after != NULL) {
1077915d1b71SMark Johnston 		for (i = 1; i < SWB_NPAGES; i++) {
10781c7c3c6aSMatthew Dillon 			blk = swp_pager_meta_ctl(object, pindex + i, 0);
10791c7c3c6aSMatthew Dillon 			if (blk != blk0 + i)
10801c7c3c6aSMatthew Dillon 				break;
108126f9a767SRodney W. Grimes 		}
1082915d1b71SMark Johnston 		*after = i - 1;
10831c7c3c6aSMatthew Dillon 	}
10841c7c3c6aSMatthew Dillon 	return (TRUE);
10851c7c3c6aSMatthew Dillon }
10861c7c3c6aSMatthew Dillon 
10871c7c3c6aSMatthew Dillon /*
10881c7c3c6aSMatthew Dillon  * SWAP_PAGER_PAGE_UNSWAPPED() - remove swap backing store related to page
10891c7c3c6aSMatthew Dillon  *
10901c7c3c6aSMatthew Dillon  *	This removes any associated swap backing store, whether valid or
10911c7c3c6aSMatthew Dillon  *	not, from the page.
10921c7c3c6aSMatthew Dillon  *
10931c7c3c6aSMatthew Dillon  *	This routine is typically called when a page is made dirty, at
10941c7c3c6aSMatthew Dillon  *	which point any associated swap can be freed.  MADV_FREE also
10951c7c3c6aSMatthew Dillon  *	calls us in a special-case situation
10961c7c3c6aSMatthew Dillon  *
10971c7c3c6aSMatthew Dillon  *	NOTE!!!  If the page is clean and the swap was valid, the caller
10981c7c3c6aSMatthew Dillon  *	should make the page dirty before calling this routine.  This routine
10991c7c3c6aSMatthew Dillon  *	does NOT change the m->dirty status of the page.  Also: MADV_FREE
11001c7c3c6aSMatthew Dillon  *	depends on it.
11011c7c3c6aSMatthew Dillon  *
110215523cf7SKonstantin Belousov  *	This routine may not sleep.
1103c25673ffSAttilio Rao  *
1104c25673ffSAttilio Rao  *	The object containing the page must be locked.
11051c7c3c6aSMatthew Dillon  */
11061c7c3c6aSMatthew Dillon static void
11072f249180SPoul-Henning Kamp swap_pager_unswapped(vm_page_t m)
11081c7c3c6aSMatthew Dillon {
11094abca9bbSAlan Cox 	daddr_t srcaddr;
11102f249180SPoul-Henning Kamp 
11114abca9bbSAlan Cox 	srcaddr = swp_pager_meta_ctl(m->object, m->pindex, SWM_POP);
11124abca9bbSAlan Cox 	if (srcaddr != SWAPBLK_NONE)
11134abca9bbSAlan Cox 		swp_pager_freeswapspace(srcaddr, 1);
11141c7c3c6aSMatthew Dillon }
11151c7c3c6aSMatthew Dillon 
11161c7c3c6aSMatthew Dillon /*
1117915d1b71SMark Johnston  * swap_pager_getpages() - bring pages in from swap
11181c7c3c6aSMatthew Dillon  *
11193f060b60SMark Johnston  *	Attempt to page in the pages in array "ma" of length "count".  The
11203f060b60SMark Johnston  *	caller may optionally specify that additional pages preceding and
11213f060b60SMark Johnston  *	succeeding the specified range be paged in.  The number of such pages
11223f060b60SMark Johnston  *	is returned in the "rbehind" and "rahead" parameters, and they will
11233f060b60SMark Johnston  *	be in the inactive queue upon return.
11241c7c3c6aSMatthew Dillon  *
11253f060b60SMark Johnston  *	The pages in "ma" must be busied and will remain busied upon return.
11261c7c3c6aSMatthew Dillon  */
1127f708ef1bSPoul-Henning Kamp static int
11283f060b60SMark Johnston swap_pager_getpages(vm_object_t object, vm_page_t *ma, int count, int *rbehind,
1129b0cd2017SGleb Smirnoff     int *rahead)
1130df8bae1dSRodney W. Grimes {
11311c7c3c6aSMatthew Dillon 	struct buf *bp;
1132b7b8a096SKonstantin Belousov 	vm_page_t bm, mpred, msucc, p;
1133915d1b71SMark Johnston 	vm_pindex_t pindex;
11341c7c3c6aSMatthew Dillon 	daddr_t blk;
1135b7b8a096SKonstantin Belousov 	int i, maxahead, maxbehind, reqcount;
11360d94caffSDavid Greenman 
1137915d1b71SMark Johnston 	reqcount = count;
11381c7c3c6aSMatthew Dillon 
1139b7b8a096SKonstantin Belousov 	/*
1140b7b8a096SKonstantin Belousov 	 * Determine the final number of read-behind pages and
1141b7b8a096SKonstantin Belousov 	 * allocate them BEFORE releasing the object lock.  Otherwise,
1142b7b8a096SKonstantin Belousov 	 * there can be a problematic race with vm_object_split().
1143b7b8a096SKonstantin Belousov 	 * Specifically, vm_object_split() might first transfer pages
1144b7b8a096SKonstantin Belousov 	 * that precede ma[0] in the current object to a new object,
1145b7b8a096SKonstantin Belousov 	 * and then this function incorrectly recreates those pages as
1146b7b8a096SKonstantin Belousov 	 * read-behind pages in the current object.
1147b7b8a096SKonstantin Belousov 	 */
1148b7b8a096SKonstantin Belousov 	if (!swap_pager_haspage(object, ma[0]->pindex, &maxbehind, &maxahead))
1149915d1b71SMark Johnston 		return (VM_PAGER_FAIL);
1150915d1b71SMark Johnston 
1151915d1b71SMark Johnston 	/*
1152915d1b71SMark Johnston 	 * Clip the readahead and readbehind ranges to exclude resident pages.
1153915d1b71SMark Johnston 	 */
1154915d1b71SMark Johnston 	if (rahead != NULL) {
1155dd9cb6daSMark Johnston 		KASSERT(reqcount - 1 <= maxahead,
1156915d1b71SMark Johnston 		    ("page count %d extends beyond swap block", reqcount));
1157dd9cb6daSMark Johnston 		*rahead = imin(*rahead, maxahead - (reqcount - 1));
11583f060b60SMark Johnston 		pindex = ma[reqcount - 1]->pindex;
11593f060b60SMark Johnston 		msucc = TAILQ_NEXT(ma[reqcount - 1], listq);
1160915d1b71SMark Johnston 		if (msucc != NULL && msucc->pindex - pindex - 1 < *rahead)
1161915d1b71SMark Johnston 			*rahead = msucc->pindex - pindex - 1;
1162915d1b71SMark Johnston 	}
1163915d1b71SMark Johnston 	if (rbehind != NULL) {
1164dd9cb6daSMark Johnston 		*rbehind = imin(*rbehind, maxbehind);
11653f060b60SMark Johnston 		pindex = ma[0]->pindex;
11663f060b60SMark Johnston 		mpred = TAILQ_PREV(ma[0], pglist, listq);
1167915d1b71SMark Johnston 		if (mpred != NULL && pindex - mpred->pindex - 1 < *rbehind)
1168915d1b71SMark Johnston 			*rbehind = pindex - mpred->pindex - 1;
1169915d1b71SMark Johnston 	}
1170915d1b71SMark Johnston 
1171b7b8a096SKonstantin Belousov 	bm = ma[0];
1172b7b8a096SKonstantin Belousov 	for (i = 0; i < count; i++)
1173b7b8a096SKonstantin Belousov 		ma[i]->oflags |= VPO_SWAPINPROG;
1174b7b8a096SKonstantin Belousov 
1175915d1b71SMark Johnston 	/*
1176915d1b71SMark Johnston 	 * Allocate readahead and readbehind pages.
1177915d1b71SMark Johnston 	 */
1178b7b8a096SKonstantin Belousov 	if (rbehind != NULL) {
1179b7b8a096SKonstantin Belousov 		for (i = 1; i <= *rbehind; i++) {
11803f060b60SMark Johnston 			p = vm_page_alloc(object, ma[0]->pindex - i,
11817667839aSAlan Cox 			    VM_ALLOC_NORMAL);
1182b7b8a096SKonstantin Belousov 			if (p == NULL)
1183915d1b71SMark Johnston 				break;
1184b7b8a096SKonstantin Belousov 			p->oflags |= VPO_SWAPINPROG;
1185b7b8a096SKonstantin Belousov 			bm = p;
1186915d1b71SMark Johnston 		}
1187b7b8a096SKonstantin Belousov 		*rbehind = i - 1;
1188915d1b71SMark Johnston 	}
1189915d1b71SMark Johnston 	if (rahead != NULL) {
1190915d1b71SMark Johnston 		for (i = 0; i < *rahead; i++) {
1191915d1b71SMark Johnston 			p = vm_page_alloc(object,
11923f060b60SMark Johnston 			    ma[reqcount - 1]->pindex + i + 1, VM_ALLOC_NORMAL);
1193915d1b71SMark Johnston 			if (p == NULL)
1194915d1b71SMark Johnston 				break;
1195b7b8a096SKonstantin Belousov 			p->oflags |= VPO_SWAPINPROG;
1196915d1b71SMark Johnston 		}
1197915d1b71SMark Johnston 		*rahead = i;
1198915d1b71SMark Johnston 	}
1199915d1b71SMark Johnston 	if (rbehind != NULL)
1200915d1b71SMark Johnston 		count += *rbehind;
1201915d1b71SMark Johnston 	if (rahead != NULL)
1202915d1b71SMark Johnston 		count += *rahead;
1203915d1b71SMark Johnston 
1204915d1b71SMark Johnston 	vm_object_pip_add(object, count);
1205915d1b71SMark Johnston 
1206b7b8a096SKonstantin Belousov 	pindex = bm->pindex;
1207915d1b71SMark Johnston 	blk = swp_pager_meta_ctl(object, pindex, 0);
1208915d1b71SMark Johnston 	KASSERT(blk != SWAPBLK_NONE,
1209915d1b71SMark Johnston 	    ("no swap blocking containing %p(%jx)", object, (uintmax_t)pindex));
1210915d1b71SMark Johnston 
1211915d1b71SMark Johnston 	VM_OBJECT_WUNLOCK(object);
1212756a5412SGleb Smirnoff 	bp = uma_zalloc(swrbuf_zone, M_WAITOK);
1213b7b8a096SKonstantin Belousov 	/* Pages cannot leave the object while busy. */
1214b7b8a096SKonstantin Belousov 	for (i = 0, p = bm; i < count; i++, p = TAILQ_NEXT(p, listq)) {
1215b7b8a096SKonstantin Belousov 		MPASS(p->pindex == bm->pindex + i);
1216b7b8a096SKonstantin Belousov 		bp->b_pages[i] = p;
1217b7b8a096SKonstantin Belousov 	}
1218915d1b71SMark Johnston 
1219915d1b71SMark Johnston 	bp->b_flags |= B_PAGING;
122021144e3bSPoul-Henning Kamp 	bp->b_iocmd = BIO_READ;
12211c7c3c6aSMatthew Dillon 	bp->b_iodone = swp_pager_async_iodone;
1222fdcc1cc0SJohn Baldwin 	bp->b_rcred = crhold(thread0.td_ucred);
1223fdcc1cc0SJohn Baldwin 	bp->b_wcred = crhold(thread0.td_ucred);
1224b0cd2017SGleb Smirnoff 	bp->b_blkno = blk;
1225b0cd2017SGleb Smirnoff 	bp->b_bcount = PAGE_SIZE * count;
1226b0cd2017SGleb Smirnoff 	bp->b_bufsize = PAGE_SIZE * count;
1227b0cd2017SGleb Smirnoff 	bp->b_npages = count;
1228915d1b71SMark Johnston 	bp->b_pgbefore = rbehind != NULL ? *rbehind : 0;
1229915d1b71SMark Johnston 	bp->b_pgafter = rahead != NULL ? *rahead : 0;
123026f9a767SRodney W. Grimes 
123183c9dea1SGleb Smirnoff 	VM_CNT_INC(v_swapin);
123283c9dea1SGleb Smirnoff 	VM_CNT_ADD(v_swappgsin, count);
12331c7c3c6aSMatthew Dillon 
12341c7c3c6aSMatthew Dillon 	/*
12351c7c3c6aSMatthew Dillon 	 * perform the I/O.  NOTE!!!  bp cannot be considered valid after
12361c7c3c6aSMatthew Dillon 	 * this point because we automatically release it on completion.
12371c7c3c6aSMatthew Dillon 	 * Instead, we look at the one page we are interested in which we
12381c7c3c6aSMatthew Dillon 	 * still hold a lock on even through the I/O completion.
12391c7c3c6aSMatthew Dillon 	 *
12403f060b60SMark Johnston 	 * The other pages in our ma[] array are also released on completion,
12411c7c3c6aSMatthew Dillon 	 * so we cannot assume they are valid anymore either.
12421c7c3c6aSMatthew Dillon 	 *
1243c37a77eeSPoul-Henning Kamp 	 * NOTE: b_blkno is destroyed by the call to swapdev_strategy
12441c7c3c6aSMatthew Dillon 	 */
1245b890cb2cSPeter Wemm 	BUF_KERNPROC(bp);
12464b03903aSPoul-Henning Kamp 	swp_pager_strategy(bp);
124726f9a767SRodney W. Grimes 
124826f9a767SRodney W. Grimes 	/*
1249915d1b71SMark Johnston 	 * Wait for the pages we want to complete.  VPO_SWAPINPROG is always
12501c7c3c6aSMatthew Dillon 	 * cleared on completion.  If an I/O error occurs, SWAPBLK_NONE
1251915d1b71SMark Johnston 	 * is set in the metadata for each page in the request.
125226f9a767SRodney W. Grimes 	 */
125389f6b863SAttilio Rao 	VM_OBJECT_WLOCK(object);
12543f060b60SMark Johnston 	while ((ma[0]->oflags & VPO_SWAPINPROG) != 0) {
12553f060b60SMark Johnston 		ma[0]->oflags |= VPO_SWAPSLEEP;
125683c9dea1SGleb Smirnoff 		VM_CNT_INC(v_intrans);
1257c7aebda8SAttilio Rao 		if (VM_OBJECT_SLEEP(object, &object->paging_in_progress, PSWP,
1258c7aebda8SAttilio Rao 		    "swread", hz * 20)) {
12599bd86a98SBruce M Simpson 			printf(
1260c5690651SPoul-Henning Kamp "swap_pager: indefinite wait buffer: bufobj: %p, blkno: %jd, size: %ld\n",
1261c5690651SPoul-Henning Kamp 			    bp->b_bufobj, (intmax_t)bp->b_blkno, bp->b_bcount);
12621c7c3c6aSMatthew Dillon 		}
12631b119d9dSDavid Greenman 	}
126426f9a767SRodney W. Grimes 
126526f9a767SRodney W. Grimes 	/*
1266b0cd2017SGleb Smirnoff 	 * If we had an unrecoverable read error pages will not be valid.
126726f9a767SRodney W. Grimes 	 */
1268915d1b71SMark Johnston 	for (i = 0; i < reqcount; i++)
12693f060b60SMark Johnston 		if (ma[i]->valid != VM_PAGE_BITS_ALL)
12701c7c3c6aSMatthew Dillon 			return (VM_PAGER_ERROR);
1271b0cd2017SGleb Smirnoff 
12721c7c3c6aSMatthew Dillon 	return (VM_PAGER_OK);
12731c7c3c6aSMatthew Dillon 
12741c7c3c6aSMatthew Dillon 	/*
12751c7c3c6aSMatthew Dillon 	 * A final note: in a low swap situation, we cannot deallocate swap
12761c7c3c6aSMatthew Dillon 	 * and mark a page dirty here because the caller is likely to mark
12771c7c3c6aSMatthew Dillon 	 * the page clean when we return, causing the page to possibly revert
12781c7c3c6aSMatthew Dillon 	 * to all-zero's later.
12791c7c3c6aSMatthew Dillon 	 */
1280df8bae1dSRodney W. Grimes }
1281df8bae1dSRodney W. Grimes 
12821c7c3c6aSMatthew Dillon /*
128390effb23SGleb Smirnoff  * 	swap_pager_getpages_async():
128490effb23SGleb Smirnoff  *
128590effb23SGleb Smirnoff  *	Right now this is emulation of asynchronous operation on top of
128690effb23SGleb Smirnoff  *	swap_pager_getpages().
128790effb23SGleb Smirnoff  */
128890effb23SGleb Smirnoff static int
12893f060b60SMark Johnston swap_pager_getpages_async(vm_object_t object, vm_page_t *ma, int count,
1290b0cd2017SGleb Smirnoff     int *rbehind, int *rahead, pgo_getpages_iodone_t iodone, void *arg)
129190effb23SGleb Smirnoff {
129290effb23SGleb Smirnoff 	int r, error;
129390effb23SGleb Smirnoff 
12943f060b60SMark Johnston 	r = swap_pager_getpages(object, ma, count, rbehind, rahead);
129590effb23SGleb Smirnoff 	VM_OBJECT_WUNLOCK(object);
129690effb23SGleb Smirnoff 	switch (r) {
129790effb23SGleb Smirnoff 	case VM_PAGER_OK:
129890effb23SGleb Smirnoff 		error = 0;
129990effb23SGleb Smirnoff 		break;
130090effb23SGleb Smirnoff 	case VM_PAGER_ERROR:
130190effb23SGleb Smirnoff 		error = EIO;
130290effb23SGleb Smirnoff 		break;
130390effb23SGleb Smirnoff 	case VM_PAGER_FAIL:
130490effb23SGleb Smirnoff 		error = EINVAL;
130590effb23SGleb Smirnoff 		break;
130690effb23SGleb Smirnoff 	default:
1307d9328101SGleb Smirnoff 		panic("unhandled swap_pager_getpages() error %d", r);
130890effb23SGleb Smirnoff 	}
13093f060b60SMark Johnston 	(iodone)(arg, ma, count, error);
131090effb23SGleb Smirnoff 	VM_OBJECT_WLOCK(object);
131190effb23SGleb Smirnoff 
131290effb23SGleb Smirnoff 	return (r);
131390effb23SGleb Smirnoff }
131490effb23SGleb Smirnoff 
131590effb23SGleb Smirnoff /*
13161c7c3c6aSMatthew Dillon  *	swap_pager_putpages:
13171c7c3c6aSMatthew Dillon  *
13181c7c3c6aSMatthew Dillon  *	Assign swap (if necessary) and initiate I/O on the specified pages.
13191c7c3c6aSMatthew Dillon  *
13201c7c3c6aSMatthew Dillon  *	We support both OBJT_DEFAULT and OBJT_SWAP objects.  DEFAULT objects
13211c7c3c6aSMatthew Dillon  *	are automatically converted to SWAP objects.
13221c7c3c6aSMatthew Dillon  *
1323ea3aecf5SPeter Wemm  *	In a low memory situation we may block in VOP_STRATEGY(), but the new
13241c7c3c6aSMatthew Dillon  *	vm_page reservation system coupled with properly written VFS devices
13251c7c3c6aSMatthew Dillon  *	should ensure that no low-memory deadlock occurs.  This is an area
13261c7c3c6aSMatthew Dillon  *	which needs work.
13271c7c3c6aSMatthew Dillon  *
13281c7c3c6aSMatthew Dillon  *	The parent has N vm_object_pip_add() references prior to
13291c7c3c6aSMatthew Dillon  *	calling us and will remove references for rtvals[] that are
13301c7c3c6aSMatthew Dillon  *	not set to VM_PAGER_PEND.  We need to remove the rest on I/O
13311c7c3c6aSMatthew Dillon  *	completion.
13321c7c3c6aSMatthew Dillon  *
13331c7c3c6aSMatthew Dillon  *	The parent has soft-busy'd the pages it passes us and will unbusy
13341c7c3c6aSMatthew Dillon  *	those whos rtvals[] entry is not set to VM_PAGER_PEND on return.
13351c7c3c6aSMatthew Dillon  *	We need to unbusy the rest on I/O completion.
13361c7c3c6aSMatthew Dillon  */
1337d635a37fSWarner Losh static void
13383f060b60SMark Johnston swap_pager_putpages(vm_object_t object, vm_page_t *ma, int count,
1339e065e87cSKonstantin Belousov     int flags, int *rtvals)
1340df8bae1dSRodney W. Grimes {
1341e065e87cSKonstantin Belousov 	int i, n;
1342e065e87cSKonstantin Belousov 	boolean_t sync;
134378f1deefSAlan Cox 	daddr_t addr, n_free, s_free;
1344df8bae1dSRodney W. Grimes 
134578f1deefSAlan Cox 	swp_pager_init_freerange(&s_free, &n_free);
13463f060b60SMark Johnston 	if (count && ma[0]->object != object) {
13477036145bSMaxim Konovalov 		panic("swap_pager_putpages: object mismatch %p/%p",
13481c7c3c6aSMatthew Dillon 		    object,
13493f060b60SMark Johnston 		    ma[0]->object
13501c7c3c6aSMatthew Dillon 		);
13511c7c3c6aSMatthew Dillon 	}
1352ee3dc7d7SAlan Cox 
13531c7c3c6aSMatthew Dillon 	/*
13541c7c3c6aSMatthew Dillon 	 * Step 1
13551c7c3c6aSMatthew Dillon 	 *
13561c7c3c6aSMatthew Dillon 	 * Turn object into OBJT_SWAP
13571c7c3c6aSMatthew Dillon 	 * check for bogus sysops
13581c7c3c6aSMatthew Dillon 	 * force sync if not pageout process
13591c7c3c6aSMatthew Dillon 	 */
136078f1deefSAlan Cox 	if (object->type != OBJT_SWAP) {
136178f1deefSAlan Cox 		addr = swp_pager_meta_build(object, 0, SWAPBLK_NONE);
136278f1deefSAlan Cox 		KASSERT(addr == SWAPBLK_NONE,
136378f1deefSAlan Cox 		    ("unexpected object swap block"));
136478f1deefSAlan Cox 	}
136589f6b863SAttilio Rao 	VM_OBJECT_WUNLOCK(object);
1366e47ed70bSJohn Dyson 
1367e065e87cSKonstantin Belousov 	n = 0;
1368e47ed70bSJohn Dyson 	if (curproc != pageproc)
1369e47ed70bSJohn Dyson 		sync = TRUE;
1370e065e87cSKonstantin Belousov 	else
1371e065e87cSKonstantin Belousov 		sync = (flags & VM_PAGER_PUT_SYNC) != 0;
137226f9a767SRodney W. Grimes 
13731c7c3c6aSMatthew Dillon 	/*
13741c7c3c6aSMatthew Dillon 	 * Step 2
13751c7c3c6aSMatthew Dillon 	 *
13761c7c3c6aSMatthew Dillon 	 * Assign swap blocks and issue I/O.  We reallocate swap on the fly.
13771c7c3c6aSMatthew Dillon 	 * The page is left dirty until the pageout operation completes
13781c7c3c6aSMatthew Dillon 	 * successfully.
13791c7c3c6aSMatthew Dillon 	 */
13801c7c3c6aSMatthew Dillon 	for (i = 0; i < count; i += n) {
13811c7c3c6aSMatthew Dillon 		int j;
13821c7c3c6aSMatthew Dillon 		struct buf *bp;
1383a316d390SJohn Dyson 		daddr_t blk;
138426f9a767SRodney W. Grimes 
1385df8bae1dSRodney W. Grimes 		/*
13861c7c3c6aSMatthew Dillon 		 * Maximum I/O size is limited by a number of factors.
1387df8bae1dSRodney W. Grimes 		 */
13881c7c3c6aSMatthew Dillon 		n = min(BLIST_MAX_ALLOC, count - i);
1389327f4e83SMatthew Dillon 		n = min(n, nsw_cluster_max);
13901c7c3c6aSMatthew Dillon 
139148e98a2aSDoug Moore 		/* Get a block of swap of size up to size n. */
139248e98a2aSDoug Moore 		blk = swp_pager_getswapspace(&n, 4);
13931c7c3c6aSMatthew Dillon 		if (blk == SWAPBLK_NONE) {
13944dcc5c2dSMatthew Dillon 			for (j = 0; j < n; ++j)
13951c7c3c6aSMatthew Dillon 				rtvals[i+j] = VM_PAGER_FAIL;
13961c7c3c6aSMatthew Dillon 			continue;
139726f9a767SRodney W. Grimes 		}
139826f9a767SRodney W. Grimes 
139926f9a767SRodney W. Grimes 		/*
14001c7c3c6aSMatthew Dillon 		 * All I/O parameters have been satisfied, build the I/O
14011c7c3c6aSMatthew Dillon 		 * request and assign the swap space.
140226f9a767SRodney W. Grimes 		 */
1403756a5412SGleb Smirnoff 		if (sync != TRUE) {
1404756a5412SGleb Smirnoff 			mtx_lock(&swbuf_mtx);
1405756a5412SGleb Smirnoff 			while (nsw_wcount_async == 0)
1406756a5412SGleb Smirnoff 				msleep(&nsw_wcount_async, &swbuf_mtx, PVM,
1407756a5412SGleb Smirnoff 				    "swbufa", 0);
1408756a5412SGleb Smirnoff 			nsw_wcount_async--;
1409756a5412SGleb Smirnoff 			mtx_unlock(&swbuf_mtx);
1410327f4e83SMatthew Dillon 		}
1411756a5412SGleb Smirnoff 		bp = uma_zalloc(swwbuf_zone, M_WAITOK);
1412756a5412SGleb Smirnoff 		if (sync != TRUE)
1413756a5412SGleb Smirnoff 			bp->b_flags = B_ASYNC;
14145e04322aSPoul-Henning Kamp 		bp->b_flags |= B_PAGING;
1415912e4ae9SPoul-Henning Kamp 		bp->b_iocmd = BIO_WRITE;
141626f9a767SRodney W. Grimes 
1417fdcc1cc0SJohn Baldwin 		bp->b_rcred = crhold(thread0.td_ucred);
1418fdcc1cc0SJohn Baldwin 		bp->b_wcred = crhold(thread0.td_ucred);
14191c7c3c6aSMatthew Dillon 		bp->b_bcount = PAGE_SIZE * n;
14201c7c3c6aSMatthew Dillon 		bp->b_bufsize = PAGE_SIZE * n;
14211c7c3c6aSMatthew Dillon 		bp->b_blkno = blk;
1422e47ed70bSJohn Dyson 
142389f6b863SAttilio Rao 		VM_OBJECT_WLOCK(object);
14241c7c3c6aSMatthew Dillon 		for (j = 0; j < n; ++j) {
14253f060b60SMark Johnston 			vm_page_t mreq = ma[i+j];
14261c7c3c6aSMatthew Dillon 
142778f1deefSAlan Cox 			addr = swp_pager_meta_build(mreq->object, mreq->pindex,
142878f1deefSAlan Cox 			    blk + j);
142978f1deefSAlan Cox 			if (addr != SWAPBLK_NONE)
143078f1deefSAlan Cox 				swp_pager_update_freerange(&s_free, &n_free,
143178f1deefSAlan Cox 				    addr);
143287b0ab69SAlan Cox 			MPASS(mreq->dirty == VM_PAGE_BITS_ALL);
14335786be7cSAlan Cox 			mreq->oflags |= VPO_SWAPINPROG;
14341c7c3c6aSMatthew Dillon 			bp->b_pages[j] = mreq;
14351c7c3c6aSMatthew Dillon 		}
143689f6b863SAttilio Rao 		VM_OBJECT_WUNLOCK(object);
14371c7c3c6aSMatthew Dillon 		bp->b_npages = n;
1438a5296b05SJulian Elischer 		/*
1439a5296b05SJulian Elischer 		 * Must set dirty range for NFS to work.
1440a5296b05SJulian Elischer 		 */
1441a5296b05SJulian Elischer 		bp->b_dirtyoff = 0;
1442a5296b05SJulian Elischer 		bp->b_dirtyend = bp->b_bcount;
14431c7c3c6aSMatthew Dillon 
144483c9dea1SGleb Smirnoff 		VM_CNT_INC(v_swapout);
144583c9dea1SGleb Smirnoff 		VM_CNT_ADD(v_swappgsout, bp->b_npages);
144626f9a767SRodney W. Grimes 
144726f9a767SRodney W. Grimes 		/*
144877923df2SAlan Cox 		 * We unconditionally set rtvals[] to VM_PAGER_PEND so that we
144977923df2SAlan Cox 		 * can call the async completion routine at the end of a
145077923df2SAlan Cox 		 * synchronous I/O operation.  Otherwise, our caller would
145177923df2SAlan Cox 		 * perform duplicate unbusy and wakeup operations on the page
145277923df2SAlan Cox 		 * and object, respectively.
145377923df2SAlan Cox 		 */
145477923df2SAlan Cox 		for (j = 0; j < n; j++)
145577923df2SAlan Cox 			rtvals[i + j] = VM_PAGER_PEND;
145677923df2SAlan Cox 
145777923df2SAlan Cox 		/*
14581c7c3c6aSMatthew Dillon 		 * asynchronous
14591c7c3c6aSMatthew Dillon 		 *
1460c37a77eeSPoul-Henning Kamp 		 * NOTE: b_blkno is destroyed by the call to swapdev_strategy
146126f9a767SRodney W. Grimes 		 */
14621c7c3c6aSMatthew Dillon 		if (sync == FALSE) {
14631c7c3c6aSMatthew Dillon 			bp->b_iodone = swp_pager_async_iodone;
146467812eacSKirk McKusick 			BUF_KERNPROC(bp);
14654b03903aSPoul-Henning Kamp 			swp_pager_strategy(bp);
14661c7c3c6aSMatthew Dillon 			continue;
146726f9a767SRodney W. Grimes 		}
1468e47ed70bSJohn Dyson 
146926f9a767SRodney W. Grimes 		/*
14701c7c3c6aSMatthew Dillon 		 * synchronous
14711c7c3c6aSMatthew Dillon 		 *
1472c37a77eeSPoul-Henning Kamp 		 * NOTE: b_blkno is destroyed by the call to swapdev_strategy
14731c7c3c6aSMatthew Dillon 		 */
14742c840b1fSAlan Cox 		bp->b_iodone = bdone;
14754b03903aSPoul-Henning Kamp 		swp_pager_strategy(bp);
14761c7c3c6aSMatthew Dillon 
14771c7c3c6aSMatthew Dillon 		/*
147877923df2SAlan Cox 		 * Wait for the sync I/O to complete.
147926f9a767SRodney W. Grimes 		 */
14802c840b1fSAlan Cox 		bwait(bp, PVM, "swwrt");
148177923df2SAlan Cox 
14821c7c3c6aSMatthew Dillon 		/*
14831c7c3c6aSMatthew Dillon 		 * Now that we are through with the bp, we can call the
14841c7c3c6aSMatthew Dillon 		 * normal async completion, which frees everything up.
14851c7c3c6aSMatthew Dillon 		 */
14861c7c3c6aSMatthew Dillon 		swp_pager_async_iodone(bp);
14871c7c3c6aSMatthew Dillon 	}
148889f6b863SAttilio Rao 	VM_OBJECT_WLOCK(object);
148978f1deefSAlan Cox 	swp_pager_freeswapspace(s_free, n_free);
14901c7c3c6aSMatthew Dillon }
14911c7c3c6aSMatthew Dillon 
14921c7c3c6aSMatthew Dillon /*
14931c7c3c6aSMatthew Dillon  *	swp_pager_async_iodone:
14941c7c3c6aSMatthew Dillon  *
14951c7c3c6aSMatthew Dillon  *	Completion routine for asynchronous reads and writes from/to swap.
14961c7c3c6aSMatthew Dillon  *	Also called manually by synchronous code to finish up a bp.
14971c7c3c6aSMatthew Dillon  *
149815523cf7SKonstantin Belousov  *	This routine may not sleep.
14991c7c3c6aSMatthew Dillon  */
15001c7c3c6aSMatthew Dillon static void
15012f249180SPoul-Henning Kamp swp_pager_async_iodone(struct buf *bp)
15021c7c3c6aSMatthew Dillon {
15031c7c3c6aSMatthew Dillon 	int i;
15041c7c3c6aSMatthew Dillon 	vm_object_t object = NULL;
15051c7c3c6aSMatthew Dillon 
15061c7c3c6aSMatthew Dillon 	/*
15070b208315SEdward Tomasz Napierala 	 * Report error - unless we ran out of memory, in which case
15080b208315SEdward Tomasz Napierala 	 * we've already logged it in swapgeom_strategy().
15091c7c3c6aSMatthew Dillon 	 */
15100b208315SEdward Tomasz Napierala 	if (bp->b_ioflags & BIO_ERROR && bp->b_error != ENOMEM) {
15111c7c3c6aSMatthew Dillon 		printf(
15121c7c3c6aSMatthew Dillon 		    "swap_pager: I/O error - %s failed; blkno %ld,"
15131c7c3c6aSMatthew Dillon 			"size %ld, error %d\n",
151421144e3bSPoul-Henning Kamp 		    ((bp->b_iocmd == BIO_READ) ? "pagein" : "pageout"),
15151c7c3c6aSMatthew Dillon 		    (long)bp->b_blkno,
15161c7c3c6aSMatthew Dillon 		    (long)bp->b_bcount,
15171c7c3c6aSMatthew Dillon 		    bp->b_error
15181c7c3c6aSMatthew Dillon 		);
15191c7c3c6aSMatthew Dillon 	}
15201c7c3c6aSMatthew Dillon 
15211c7c3c6aSMatthew Dillon 	/*
152226f9a767SRodney W. Grimes 	 * remove the mapping for kernel virtual
152326f9a767SRodney W. Grimes 	 */
1524fade8dd7SJeff Roberson 	if (buf_mapped(bp))
15251c7c3c6aSMatthew Dillon 		pmap_qremove((vm_offset_t)bp->b_data, bp->b_npages);
1526fade8dd7SJeff Roberson 	else
1527fade8dd7SJeff Roberson 		bp->b_data = bp->b_kvabase;
152826f9a767SRodney W. Grimes 
152933a609ecSAlan Cox 	if (bp->b_npages) {
153033a609ecSAlan Cox 		object = bp->b_pages[0]->object;
153189f6b863SAttilio Rao 		VM_OBJECT_WLOCK(object);
153233a609ecSAlan Cox 	}
15332965a453SKip Macy 
153426f9a767SRodney W. Grimes 	/*
15351c7c3c6aSMatthew Dillon 	 * cleanup pages.  If an error occurs writing to swap, we are in
15361c7c3c6aSMatthew Dillon 	 * very serious trouble.  If it happens to be a disk error, though,
15371c7c3c6aSMatthew Dillon 	 * we may be able to recover by reassigning the swap later on.  So
15381c7c3c6aSMatthew Dillon 	 * in this case we remove the m->swapblk assignment for the page
15391c7c3c6aSMatthew Dillon 	 * but do not free it in the rlist.  The errornous block(s) are thus
15401c7c3c6aSMatthew Dillon 	 * never reallocated as swap.  Redirty the page and continue.
154126f9a767SRodney W. Grimes 	 */
15421c7c3c6aSMatthew Dillon 	for (i = 0; i < bp->b_npages; ++i) {
15431c7c3c6aSMatthew Dillon 		vm_page_t m = bp->b_pages[i];
1544e47ed70bSJohn Dyson 
15455786be7cSAlan Cox 		m->oflags &= ~VPO_SWAPINPROG;
1546c7aebda8SAttilio Rao 		if (m->oflags & VPO_SWAPSLEEP) {
1547c7aebda8SAttilio Rao 			m->oflags &= ~VPO_SWAPSLEEP;
1548c7aebda8SAttilio Rao 			wakeup(&object->paging_in_progress);
1549c7aebda8SAttilio Rao 		}
1550e47ed70bSJohn Dyson 
1551c244d2deSPoul-Henning Kamp 		if (bp->b_ioflags & BIO_ERROR) {
1552ffc82b0aSJohn Dyson 			/*
15531c7c3c6aSMatthew Dillon 			 * If an error occurs I'd love to throw the swapblk
15541c7c3c6aSMatthew Dillon 			 * away without freeing it back to swapspace, so it
15551c7c3c6aSMatthew Dillon 			 * can never be used again.  But I can't from an
15561c7c3c6aSMatthew Dillon 			 * interrupt.
1557ffc82b0aSJohn Dyson 			 */
155821144e3bSPoul-Henning Kamp 			if (bp->b_iocmd == BIO_READ) {
15591c7c3c6aSMatthew Dillon 				/*
15601c7c3c6aSMatthew Dillon 				 * NOTE: for reads, m->dirty will probably
1561956f3135SPhilippe Charnier 				 * be overridden by the original caller of
15621c7c3c6aSMatthew Dillon 				 * getpages so don't play cute tricks here.
15631c7c3c6aSMatthew Dillon 				 */
15641c7c3c6aSMatthew Dillon 				m->valid = 0;
15651c7c3c6aSMatthew Dillon 			} else {
15661c7c3c6aSMatthew Dillon 				/*
15671c7c3c6aSMatthew Dillon 				 * If a write error occurs, reactivate page
15681c7c3c6aSMatthew Dillon 				 * so it doesn't clog the inactive list,
15691c7c3c6aSMatthew Dillon 				 * then finish the I/O.
15701c7c3c6aSMatthew Dillon 				 */
157141e5a226SAlan Cox 				MPASS(m->dirty == VM_PAGE_BITS_ALL);
15723c4a2440SAlan Cox 				vm_page_lock(m);
15731c7c3c6aSMatthew Dillon 				vm_page_activate(m);
15743c4a2440SAlan Cox 				vm_page_unlock(m);
1575c7aebda8SAttilio Rao 				vm_page_sunbusy(m);
15761c7c3c6aSMatthew Dillon 			}
157721144e3bSPoul-Henning Kamp 		} else if (bp->b_iocmd == BIO_READ) {
15781c7c3c6aSMatthew Dillon 			/*
15791c7c3c6aSMatthew Dillon 			 * NOTE: for reads, m->dirty will probably be
1580956f3135SPhilippe Charnier 			 * overridden by the original caller of getpages so
15811c7c3c6aSMatthew Dillon 			 * we cannot set them in order to free the underlying
15821c7c3c6aSMatthew Dillon 			 * swap in a low-swap situation.  I don't think we'd
15831c7c3c6aSMatthew Dillon 			 * want to do that anyway, but it was an optimization
15841c7c3c6aSMatthew Dillon 			 * that existed in the old swapper for a time before
15851c7c3c6aSMatthew Dillon 			 * it got ripped out due to precisely this problem.
15861c7c3c6aSMatthew Dillon 			 */
1587016a3c93SAlan Cox 			KASSERT(!pmap_page_is_mapped(m),
1588016a3c93SAlan Cox 			    ("swp_pager_async_iodone: page %p is mapped", m));
1589016a3c93SAlan Cox 			KASSERT(m->dirty == 0,
1590016a3c93SAlan Cox 			    ("swp_pager_async_iodone: page %p is dirty", m));
1591915d1b71SMark Johnston 
1592b0cd2017SGleb Smirnoff 			m->valid = VM_PAGE_BITS_ALL;
1593915d1b71SMark Johnston 			if (i < bp->b_pgbefore ||
1594915d1b71SMark Johnston 			    i >= bp->b_npages - bp->b_pgafter)
1595915d1b71SMark Johnston 				vm_page_readahead_finish(m);
15961c7c3c6aSMatthew Dillon 		} else {
15971c7c3c6aSMatthew Dillon 			/*
1598016a3c93SAlan Cox 			 * For write success, clear the dirty
15991c7c3c6aSMatthew Dillon 			 * status, then finish the I/O ( which decrements the
16001c7c3c6aSMatthew Dillon 			 * busy count and possibly wakes waiter's up ).
1601ebcddc72SAlan Cox 			 * A page is only written to swap after a period of
1602ebcddc72SAlan Cox 			 * inactivity.  Therefore, we do not expect it to be
1603ebcddc72SAlan Cox 			 * reused.
16041c7c3c6aSMatthew Dillon 			 */
16056031c68dSAlan Cox 			KASSERT(!pmap_page_is_write_mapped(m),
1606016a3c93SAlan Cox 			    ("swp_pager_async_iodone: page %p is not write"
1607016a3c93SAlan Cox 			    " protected", m));
1608c52e7044SAlan Cox 			vm_page_undirty(m);
16093c4a2440SAlan Cox 			vm_page_lock(m);
1610ebcddc72SAlan Cox 			vm_page_deactivate_noreuse(m);
16112965a453SKip Macy 			vm_page_unlock(m);
1612ebcddc72SAlan Cox 			vm_page_sunbusy(m);
16133c4a2440SAlan Cox 		}
16143c4a2440SAlan Cox 	}
161526f9a767SRodney W. Grimes 
16161c7c3c6aSMatthew Dillon 	/*
16171c7c3c6aSMatthew Dillon 	 * adjust pip.  NOTE: the original parent may still have its own
16181c7c3c6aSMatthew Dillon 	 * pip refs on the object.
16191c7c3c6aSMatthew Dillon 	 */
16200d420ad3SAlan Cox 	if (object != NULL) {
16211c7c3c6aSMatthew Dillon 		vm_object_pip_wakeupn(object, bp->b_npages);
162289f6b863SAttilio Rao 		VM_OBJECT_WUNLOCK(object);
16230d420ad3SAlan Cox 	}
162426f9a767SRodney W. Grimes 
16251c7c3c6aSMatthew Dillon 	/*
1626100650deSOlivier Houchard 	 * swapdev_strategy() manually sets b_vp and b_bufobj before calling
1627100650deSOlivier Houchard 	 * bstrategy(). Set them back to NULL now we're done with it, or we'll
1628100650deSOlivier Houchard 	 * trigger a KASSERT in relpbuf().
1629100650deSOlivier Houchard 	 */
1630100650deSOlivier Houchard 	if (bp->b_vp) {
1631100650deSOlivier Houchard 		    bp->b_vp = NULL;
1632100650deSOlivier Houchard 		    bp->b_bufobj = NULL;
1633100650deSOlivier Houchard 	}
1634100650deSOlivier Houchard 	/*
16351c7c3c6aSMatthew Dillon 	 * release the physical I/O buffer
16361c7c3c6aSMatthew Dillon 	 */
1637756a5412SGleb Smirnoff 	if (bp->b_flags & B_ASYNC) {
1638756a5412SGleb Smirnoff 		mtx_lock(&swbuf_mtx);
1639756a5412SGleb Smirnoff 		if (++nsw_wcount_async == 1)
1640756a5412SGleb Smirnoff 			wakeup(&nsw_wcount_async);
1641756a5412SGleb Smirnoff 		mtx_unlock(&swbuf_mtx);
1642756a5412SGleb Smirnoff 	}
1643756a5412SGleb Smirnoff 	uma_zfree((bp->b_iocmd == BIO_READ) ? swrbuf_zone : swwbuf_zone, bp);
164426f9a767SRodney W. Grimes }
16451c7c3c6aSMatthew Dillon 
1646b1fd102eSMark Johnston int
1647b1fd102eSMark Johnston swap_pager_nswapdev(void)
1648b1fd102eSMark Johnston {
1649b1fd102eSMark Johnston 
1650b1fd102eSMark Johnston 	return (nswapdev);
1651b1fd102eSMark Johnston }
1652b1fd102eSMark Johnston 
1653*7c022327SDoug Moore static void
1654*7c022327SDoug Moore swp_pager_force_dirty(vm_page_t m)
1655*7c022327SDoug Moore {
1656*7c022327SDoug Moore 
1657*7c022327SDoug Moore 	vm_page_dirty(m);
1658*7c022327SDoug Moore #ifdef INVARIANTS
1659*7c022327SDoug Moore 	vm_page_lock(m);
1660*7c022327SDoug Moore 	if (!vm_page_wired(m) && m->queue == PQ_NONE)
1661*7c022327SDoug Moore 		panic("page %p is neither wired nor queued", m);
1662*7c022327SDoug Moore 	vm_page_unlock(m);
1663*7c022327SDoug Moore #endif
1664*7c022327SDoug Moore 	vm_page_xunbusy(m);
1665*7c022327SDoug Moore }
1666*7c022327SDoug Moore 
1667*7c022327SDoug Moore static void
1668*7c022327SDoug Moore swp_pager_force_launder(vm_page_t m)
1669*7c022327SDoug Moore {
1670*7c022327SDoug Moore 
1671*7c022327SDoug Moore 	vm_page_dirty(m);
1672*7c022327SDoug Moore 	vm_page_lock(m);
1673*7c022327SDoug Moore 	vm_page_launder(m);
1674*7c022327SDoug Moore 	vm_page_unlock(m);
1675*7c022327SDoug Moore 	vm_page_xunbusy(m);
1676*7c022327SDoug Moore }
1677*7c022327SDoug Moore 
167892da00bbSMatthew Dillon /*
167992da00bbSMatthew Dillon  * SWP_PAGER_FORCE_PAGEIN() - force a swap block to be paged in
168092da00bbSMatthew Dillon  *
1681ebcddc72SAlan Cox  *	This routine dissociates the page at the given index within an object
1682ebcddc72SAlan Cox  *	from its backing store, paging it in if it does not reside in memory.
1683ebcddc72SAlan Cox  *	If the page is paged in, it is marked dirty and placed in the laundry
1684ebcddc72SAlan Cox  *	queue.  The page is marked dirty because it no longer has backing
1685ebcddc72SAlan Cox  *	store.  It is placed in the laundry queue because it has not been
1686ebcddc72SAlan Cox  *	accessed recently.  Otherwise, it would already reside in memory.
1687ebcddc72SAlan Cox  *
1688ebcddc72SAlan Cox  *	We also attempt to swap in all other pages in the swap block.
1689ebcddc72SAlan Cox  *	However, we only guarantee that the one at the specified index is
169092da00bbSMatthew Dillon  *	paged in.
169192da00bbSMatthew Dillon  *
169292da00bbSMatthew Dillon  *	XXX - The code to page the whole block in doesn't work, so we
169392da00bbSMatthew Dillon  *	      revert to the one-by-one behavior for now.  Sigh.
169492da00bbSMatthew Dillon  */
1695*7c022327SDoug Moore static void
1696b3fed13eSDavid Schultz swp_pager_force_pagein(vm_object_t object, vm_pindex_t pindex)
169792da00bbSMatthew Dillon {
169892da00bbSMatthew Dillon 	vm_page_t m;
169992da00bbSMatthew Dillon 
170092da00bbSMatthew Dillon 	vm_object_pip_add(object, 1);
17015944de8eSKonstantin Belousov 	m = vm_page_grab(object, pindex, VM_ALLOC_NORMAL);
170292da00bbSMatthew Dillon 	if (m->valid == VM_PAGE_BITS_ALL) {
17030d8243ccSAttilio Rao 		vm_object_pip_wakeup(object);
1704*7c022327SDoug Moore 		swp_pager_force_dirty(m);
170592da00bbSMatthew Dillon 		vm_pager_page_unswapped(m);
170692da00bbSMatthew Dillon 		return;
170792da00bbSMatthew Dillon 	}
170892da00bbSMatthew Dillon 
1709b0cd2017SGleb Smirnoff 	if (swap_pager_getpages(object, &m, 1, NULL, NULL) != VM_PAGER_OK)
171092da00bbSMatthew Dillon 		panic("swap_pager_force_pagein: read from swap failed");/*XXX*/
17110d8243ccSAttilio Rao 	vm_object_pip_wakeup(object);
1712*7c022327SDoug Moore 	swp_pager_force_launder(m);
171392da00bbSMatthew Dillon 	vm_pager_page_unswapped(m);
171492da00bbSMatthew Dillon }
171592da00bbSMatthew Dillon 
171692da00bbSMatthew Dillon /*
1717*7c022327SDoug Moore  *	swap_pager_swapoff_object:
1718*7c022327SDoug Moore  *
1719*7c022327SDoug Moore  *	Page in all of the pages that have been paged out for an object
1720*7c022327SDoug Moore  *	from a given swap device.
1721*7c022327SDoug Moore  */
1722*7c022327SDoug Moore static void
1723*7c022327SDoug Moore swap_pager_swapoff_object(struct swdevt *sp, vm_object_t object)
1724*7c022327SDoug Moore {
1725*7c022327SDoug Moore 	struct swblk *sb;
1726*7c022327SDoug Moore 	vm_pindex_t pi;
1727*7c022327SDoug Moore 	int i;
1728*7c022327SDoug Moore 
1729*7c022327SDoug Moore 	for (pi = 0; (sb = SWAP_PCTRIE_LOOKUP_GE(
1730*7c022327SDoug Moore 	    &object->un_pager.swp.swp_blks, pi)) != NULL; ) {
1731*7c022327SDoug Moore 		pi = sb->p + SWAP_META_PAGES;
1732*7c022327SDoug Moore 		for (i = 0; i < SWAP_META_PAGES; i++) {
1733*7c022327SDoug Moore 			if (sb->d[i] == SWAPBLK_NONE)
1734*7c022327SDoug Moore 				continue;
1735*7c022327SDoug Moore 			if (swp_pager_isondev(sb->d[i], sp))
1736*7c022327SDoug Moore 				swp_pager_force_pagein(object, sb->p + i);
1737*7c022327SDoug Moore 		}
1738*7c022327SDoug Moore 	}
1739*7c022327SDoug Moore }
1740*7c022327SDoug Moore 
1741*7c022327SDoug Moore /*
174292da00bbSMatthew Dillon  *	swap_pager_swapoff:
174392da00bbSMatthew Dillon  *
174492da00bbSMatthew Dillon  *	Page in all of the pages that have been paged out to the
174592da00bbSMatthew Dillon  *	given device.  The corresponding blocks in the bitmap must be
174692da00bbSMatthew Dillon  *	marked as allocated and the device must be flagged SW_CLOSING.
174792da00bbSMatthew Dillon  *	There may be no processes swapped out to the device.
174892da00bbSMatthew Dillon  *
174992da00bbSMatthew Dillon  *	This routine may block.
175092da00bbSMatthew Dillon  */
1751e9c0cc15SPoul-Henning Kamp static void
1752b3fed13eSDavid Schultz swap_pager_swapoff(struct swdevt *sp)
175392da00bbSMatthew Dillon {
1754f425ab8eSKonstantin Belousov 	vm_object_t object;
1755*7c022327SDoug Moore 	int retries;
175692da00bbSMatthew Dillon 
175704533e1eSKonstantin Belousov 	sx_assert(&swdev_syscall_lock, SA_XLOCKED);
175892da00bbSMatthew Dillon 
17598bc61209SDavid Schultz 	retries = 0;
176092da00bbSMatthew Dillon full_rescan:
1761f425ab8eSKonstantin Belousov 	mtx_lock(&vm_object_list_mtx);
1762f425ab8eSKonstantin Belousov 	TAILQ_FOREACH(object, &vm_object_list, object_list) {
1763f425ab8eSKonstantin Belousov 		if (object->type != OBJT_SWAP)
176498150664SKonstantin Belousov 			continue;
1765f425ab8eSKonstantin Belousov 		mtx_unlock(&vm_object_list_mtx);
176698150664SKonstantin Belousov 		/* Depends on type-stability. */
176798150664SKonstantin Belousov 		VM_OBJECT_WLOCK(object);
1768f425ab8eSKonstantin Belousov 
1769f425ab8eSKonstantin Belousov 		/*
1770f425ab8eSKonstantin Belousov 		 * Dead objects are eventually terminated on their own.
1771f425ab8eSKonstantin Belousov 		 */
1772f425ab8eSKonstantin Belousov 		if ((object->flags & OBJ_DEAD) != 0)
1773f425ab8eSKonstantin Belousov 			goto next_obj;
1774f425ab8eSKonstantin Belousov 
1775f425ab8eSKonstantin Belousov 		/*
1776f425ab8eSKonstantin Belousov 		 * Sync with fences placed after pctrie
1777f425ab8eSKonstantin Belousov 		 * initialization.  We must not access pctrie below
1778f425ab8eSKonstantin Belousov 		 * unless we checked that our object is swap and not
1779f425ab8eSKonstantin Belousov 		 * dead.
1780f425ab8eSKonstantin Belousov 		 */
1781f425ab8eSKonstantin Belousov 		atomic_thread_fence_acq();
1782f425ab8eSKonstantin Belousov 		if (object->type != OBJT_SWAP)
1783f425ab8eSKonstantin Belousov 			goto next_obj;
1784f425ab8eSKonstantin Belousov 
1785*7c022327SDoug Moore 		swap_pager_swapoff_object(sp, object);
1786f425ab8eSKonstantin Belousov next_obj:
1787f425ab8eSKonstantin Belousov 		VM_OBJECT_WUNLOCK(object);
1788f425ab8eSKonstantin Belousov 		mtx_lock(&vm_object_list_mtx);
178992da00bbSMatthew Dillon 	}
1790f425ab8eSKonstantin Belousov 	mtx_unlock(&vm_object_list_mtx);
1791f425ab8eSKonstantin Belousov 
17928bc61209SDavid Schultz 	if (sp->sw_used) {
179392da00bbSMatthew Dillon 		/*
17948bc61209SDavid Schultz 		 * Objects may be locked or paging to the device being
17958bc61209SDavid Schultz 		 * removed, so we will miss their pages and need to
17968bc61209SDavid Schultz 		 * make another pass.  We have marked this device as
17978bc61209SDavid Schultz 		 * SW_CLOSING, so the activity should finish soon.
179892da00bbSMatthew Dillon 		 */
17998bc61209SDavid Schultz 		retries++;
18008bc61209SDavid Schultz 		if (retries > 100) {
18018bc61209SDavid Schultz 			panic("swapoff: failed to locate %d swap blocks",
18028bc61209SDavid Schultz 			    sp->sw_used);
18038bc61209SDavid Schultz 		}
18044d70511aSJohn Baldwin 		pause("swpoff", hz / 20);
180592da00bbSMatthew Dillon 		goto full_rescan;
180692da00bbSMatthew Dillon 	}
1807b1fd102eSMark Johnston 	EVENTHANDLER_INVOKE(swapoff, sp);
180892da00bbSMatthew Dillon }
180992da00bbSMatthew Dillon 
18101c7c3c6aSMatthew Dillon /************************************************************************
18111c7c3c6aSMatthew Dillon  *				SWAP META DATA 				*
18121c7c3c6aSMatthew Dillon  ************************************************************************
18131c7c3c6aSMatthew Dillon  *
18141c7c3c6aSMatthew Dillon  *	These routines manipulate the swap metadata stored in the
1815cec9f109SDavid E. O'Brien  *	OBJT_SWAP object.
18161c7c3c6aSMatthew Dillon  *
18174dcc5c2dSMatthew Dillon  *	Swap metadata is implemented with a global hash and not directly
18184dcc5c2dSMatthew Dillon  *	linked into the object.  Instead the object simply contains
18194dcc5c2dSMatthew Dillon  *	appropriate tracking counters.
18201c7c3c6aSMatthew Dillon  */
18211c7c3c6aSMatthew Dillon 
18221c7c3c6aSMatthew Dillon /*
1823230869e0SAlan Cox  * SWP_PAGER_SWBLK_EMPTY() - is a range of blocks free?
1824230869e0SAlan Cox  */
1825230869e0SAlan Cox static bool
1826230869e0SAlan Cox swp_pager_swblk_empty(struct swblk *sb, int start, int limit)
1827230869e0SAlan Cox {
1828230869e0SAlan Cox 	int i;
1829230869e0SAlan Cox 
1830230869e0SAlan Cox 	MPASS(0 <= start && start <= limit && limit <= SWAP_META_PAGES);
1831230869e0SAlan Cox 	for (i = start; i < limit; i++) {
1832230869e0SAlan Cox 		if (sb->d[i] != SWAPBLK_NONE)
1833230869e0SAlan Cox 			return (false);
1834230869e0SAlan Cox 	}
1835230869e0SAlan Cox 	return (true);
1836230869e0SAlan Cox }
1837230869e0SAlan Cox 
1838230869e0SAlan Cox /*
18391c7c3c6aSMatthew Dillon  * SWP_PAGER_META_BUILD() -	add swap block to swap meta data for object
18401c7c3c6aSMatthew Dillon  *
18411c7c3c6aSMatthew Dillon  *	We first convert the object to a swap object if it is a default
18421c7c3c6aSMatthew Dillon  *	object.
18431c7c3c6aSMatthew Dillon  *
18441c7c3c6aSMatthew Dillon  *	The specified swapblk is added to the object's swap metadata.  If
18451c7c3c6aSMatthew Dillon  *	the swapblk is not valid, it is freed instead.  Any previously
184678f1deefSAlan Cox  *	assigned swapblk is returned.
18471c7c3c6aSMatthew Dillon  */
184878f1deefSAlan Cox static daddr_t
18492f249180SPoul-Henning Kamp swp_pager_meta_build(vm_object_t object, vm_pindex_t pindex, daddr_t swapblk)
18502f249180SPoul-Henning Kamp {
1851f425ab8eSKonstantin Belousov 	static volatile int swblk_zone_exhausted, swpctrie_zone_exhausted;
1852eed99cb8SKonstantin Belousov 	struct swblk *sb, *sb1;
1853f425ab8eSKonstantin Belousov 	vm_pindex_t modpi, rdpi;
185478f1deefSAlan Cox 	daddr_t prev_swapblk;
1855f425ab8eSKonstantin Belousov 	int error, i;
18561c7c3c6aSMatthew Dillon 
185789f6b863SAttilio Rao 	VM_OBJECT_ASSERT_WLOCKED(object);
1858f425ab8eSKonstantin Belousov 
18591c7c3c6aSMatthew Dillon 	/*
18601c7c3c6aSMatthew Dillon 	 * Convert default object to swap object if necessary
18611c7c3c6aSMatthew Dillon 	 */
18621c7c3c6aSMatthew Dillon 	if (object->type != OBJT_SWAP) {
1863f425ab8eSKonstantin Belousov 		pctrie_init(&object->un_pager.swp.swp_blks);
1864f425ab8eSKonstantin Belousov 
1865f425ab8eSKonstantin Belousov 		/*
1866f425ab8eSKonstantin Belousov 		 * Ensure that swap_pager_swapoff()'s iteration over
1867f425ab8eSKonstantin Belousov 		 * object_list does not see a garbage pctrie.
1868f425ab8eSKonstantin Belousov 		 */
1869f425ab8eSKonstantin Belousov 		atomic_thread_fence_rel();
1870f425ab8eSKonstantin Belousov 
18711c7c3c6aSMatthew Dillon 		object->type = OBJT_SWAP;
1872eb4d6a1bSKonstantin Belousov 		KASSERT(object->handle == NULL, ("default pager with handle"));
1873bd228075SAlan Cox 	}
18741c7c3c6aSMatthew Dillon 
1875f425ab8eSKonstantin Belousov 	rdpi = rounddown(pindex, SWAP_META_PAGES);
1876f425ab8eSKonstantin Belousov 	sb = SWAP_PCTRIE_LOOKUP(&object->un_pager.swp.swp_blks, rdpi);
1877f425ab8eSKonstantin Belousov 	if (sb == NULL) {
18781c7c3c6aSMatthew Dillon 		if (swapblk == SWAPBLK_NONE)
187978f1deefSAlan Cox 			return (SWAPBLK_NONE);
1880f425ab8eSKonstantin Belousov 		for (;;) {
1881f425ab8eSKonstantin Belousov 			sb = uma_zalloc(swblk_zone, M_NOWAIT | (curproc ==
1882f425ab8eSKonstantin Belousov 			    pageproc ? M_USE_RESERVE : 0));
1883f425ab8eSKonstantin Belousov 			if (sb != NULL) {
1884f425ab8eSKonstantin Belousov 				sb->p = rdpi;
1885f425ab8eSKonstantin Belousov 				for (i = 0; i < SWAP_META_PAGES; i++)
1886f425ab8eSKonstantin Belousov 					sb->d[i] = SWAPBLK_NONE;
1887f425ab8eSKonstantin Belousov 				if (atomic_cmpset_int(&swblk_zone_exhausted,
1888f425ab8eSKonstantin Belousov 				    1, 0))
1889f425ab8eSKonstantin Belousov 					printf("swblk zone ok\n");
1890f425ab8eSKonstantin Belousov 				break;
1891f425ab8eSKonstantin Belousov 			}
189289f6b863SAttilio Rao 			VM_OBJECT_WUNLOCK(object);
1893f425ab8eSKonstantin Belousov 			if (uma_zone_exhausted(swblk_zone)) {
1894f425ab8eSKonstantin Belousov 				if (atomic_cmpset_int(&swblk_zone_exhausted,
1895f425ab8eSKonstantin Belousov 				    0, 1))
1896f425ab8eSKonstantin Belousov 					printf("swap blk zone exhausted, "
18973ff863f1SDag-Erling Smørgrav 					    "increase kern.maxswzone\n");
18982025d69bSKonstantin Belousov 				vm_pageout_oom(VM_OOM_SWAPZ);
1899f425ab8eSKonstantin Belousov 				pause("swzonxb", 10);
19002025d69bSKonstantin Belousov 			} else
19018d6fbbb8SJeff Roberson 				uma_zwait(swblk_zone);
190289f6b863SAttilio Rao 			VM_OBJECT_WLOCK(object);
1903eed99cb8SKonstantin Belousov 			sb = SWAP_PCTRIE_LOOKUP(&object->un_pager.swp.swp_blks,
1904eed99cb8SKonstantin Belousov 			    rdpi);
1905eed99cb8SKonstantin Belousov 			if (sb != NULL)
1906eed99cb8SKonstantin Belousov 				/*
1907eed99cb8SKonstantin Belousov 				 * Somebody swapped out a nearby page,
1908eed99cb8SKonstantin Belousov 				 * allocating swblk at the rdpi index,
1909eed99cb8SKonstantin Belousov 				 * while we dropped the object lock.
1910eed99cb8SKonstantin Belousov 				 */
1911eed99cb8SKonstantin Belousov 				goto allocated;
19124dcc5c2dSMatthew Dillon 		}
1913f425ab8eSKonstantin Belousov 		for (;;) {
1914f425ab8eSKonstantin Belousov 			error = SWAP_PCTRIE_INSERT(
1915f425ab8eSKonstantin Belousov 			    &object->un_pager.swp.swp_blks, sb);
1916f425ab8eSKonstantin Belousov 			if (error == 0) {
1917f425ab8eSKonstantin Belousov 				if (atomic_cmpset_int(&swpctrie_zone_exhausted,
1918f425ab8eSKonstantin Belousov 				    1, 0))
1919f425ab8eSKonstantin Belousov 					printf("swpctrie zone ok\n");
1920f425ab8eSKonstantin Belousov 				break;
19211c7c3c6aSMatthew Dillon 			}
1922f425ab8eSKonstantin Belousov 			VM_OBJECT_WUNLOCK(object);
1923f425ab8eSKonstantin Belousov 			if (uma_zone_exhausted(swpctrie_zone)) {
1924f425ab8eSKonstantin Belousov 				if (atomic_cmpset_int(&swpctrie_zone_exhausted,
1925f425ab8eSKonstantin Belousov 				    0, 1))
1926f425ab8eSKonstantin Belousov 					printf("swap pctrie zone exhausted, "
1927f425ab8eSKonstantin Belousov 					    "increase kern.maxswzone\n");
1928f425ab8eSKonstantin Belousov 				vm_pageout_oom(VM_OOM_SWAPZ);
1929f425ab8eSKonstantin Belousov 				pause("swzonxp", 10);
1930f425ab8eSKonstantin Belousov 			} else
19318d6fbbb8SJeff Roberson 				uma_zwait(swpctrie_zone);
1932f425ab8eSKonstantin Belousov 			VM_OBJECT_WLOCK(object);
1933eed99cb8SKonstantin Belousov 			sb1 = SWAP_PCTRIE_LOOKUP(&object->un_pager.swp.swp_blks,
1934eed99cb8SKonstantin Belousov 			    rdpi);
1935eed99cb8SKonstantin Belousov 			if (sb1 != NULL) {
1936eed99cb8SKonstantin Belousov 				uma_zfree(swblk_zone, sb);
1937eed99cb8SKonstantin Belousov 				sb = sb1;
1938eed99cb8SKonstantin Belousov 				goto allocated;
19391c7c3c6aSMatthew Dillon 			}
1940f425ab8eSKonstantin Belousov 		}
1941eed99cb8SKonstantin Belousov 	}
1942eed99cb8SKonstantin Belousov allocated:
1943f425ab8eSKonstantin Belousov 	MPASS(sb->p == rdpi);
19441c7c3c6aSMatthew Dillon 
1945f425ab8eSKonstantin Belousov 	modpi = pindex % SWAP_META_PAGES;
194678f1deefSAlan Cox 	/* Return prior contents of metadata. */
194778f1deefSAlan Cox 	prev_swapblk = sb->d[modpi];
1948f425ab8eSKonstantin Belousov 	/* Enter block into metadata. */
1949f425ab8eSKonstantin Belousov 	sb->d[modpi] = swapblk;
195085d88d87SKonstantin Belousov 
195185d88d87SKonstantin Belousov 	/*
195285d88d87SKonstantin Belousov 	 * Free the swblk if we end up with the empty page run.
195385d88d87SKonstantin Belousov 	 */
1954230869e0SAlan Cox 	if (swapblk == SWAPBLK_NONE &&
1955230869e0SAlan Cox 	    swp_pager_swblk_empty(sb, 0, SWAP_META_PAGES)) {
1956230869e0SAlan Cox 		SWAP_PCTRIE_REMOVE(&object->un_pager.swp.swp_blks, rdpi);
195785d88d87SKonstantin Belousov 		uma_zfree(swblk_zone, sb);
195885d88d87SKonstantin Belousov 	}
195978f1deefSAlan Cox 	return (prev_swapblk);
196085d88d87SKonstantin Belousov }
19611c7c3c6aSMatthew Dillon 
19621c7c3c6aSMatthew Dillon /*
19631c7c3c6aSMatthew Dillon  * SWP_PAGER_META_FREE() - free a range of blocks in the object's swap metadata
19641c7c3c6aSMatthew Dillon  *
19651c7c3c6aSMatthew Dillon  *	The requested range of blocks is freed, with any associated swap
19661c7c3c6aSMatthew Dillon  *	returned to the swap bitmap.
19671c7c3c6aSMatthew Dillon  *
19681c7c3c6aSMatthew Dillon  *	This routine will free swap metadata structures as they are cleaned
19691c7c3c6aSMatthew Dillon  *	out.  This routine does *NOT* operate on swap metadata associated
19701c7c3c6aSMatthew Dillon  *	with resident pages.
19711c7c3c6aSMatthew Dillon  */
19721c7c3c6aSMatthew Dillon static void
1973f425ab8eSKonstantin Belousov swp_pager_meta_free(vm_object_t object, vm_pindex_t pindex, vm_pindex_t count)
19741c7c3c6aSMatthew Dillon {
1975f425ab8eSKonstantin Belousov 	struct swblk *sb;
197678f1deefSAlan Cox 	daddr_t n_free, s_free;
1977f425ab8eSKonstantin Belousov 	vm_pindex_t last;
1978230869e0SAlan Cox 	int i, limit, start;
19792928cef7SAlan Cox 
1980ee620ea4SAlan Cox 	VM_OBJECT_ASSERT_WLOCKED(object);
19812e56b64fSKonstantin Belousov 	if (object->type != OBJT_SWAP || count == 0)
19821c7c3c6aSMatthew Dillon 		return;
19831c7c3c6aSMatthew Dillon 
198478f1deefSAlan Cox 	swp_pager_init_freerange(&s_free, &n_free);
1985230869e0SAlan Cox 	last = pindex + count;
1986f425ab8eSKonstantin Belousov 	for (;;) {
1987f425ab8eSKonstantin Belousov 		sb = SWAP_PCTRIE_LOOKUP_GE(&object->un_pager.swp.swp_blks,
1988f425ab8eSKonstantin Belousov 		    rounddown(pindex, SWAP_META_PAGES));
1989230869e0SAlan Cox 		if (sb == NULL || sb->p >= last)
19902e56b64fSKonstantin Belousov 			break;
1991230869e0SAlan Cox 		start = pindex > sb->p ? pindex - sb->p : 0;
1992230869e0SAlan Cox 		limit = last - sb->p < SWAP_META_PAGES ? last - sb->p :
1993230869e0SAlan Cox 		    SWAP_META_PAGES;
1994230869e0SAlan Cox 		for (i = start; i < limit; i++) {
1995f425ab8eSKonstantin Belousov 			if (sb->d[i] == SWAPBLK_NONE)
1996f425ab8eSKonstantin Belousov 				continue;
199778f1deefSAlan Cox 			swp_pager_update_freerange(&s_free, &n_free, sb->d[i]);
1998230869e0SAlan Cox 			sb->d[i] = SWAPBLK_NONE;
1999230869e0SAlan Cox 		}
2000150d384eSMark Johnston 		pindex = sb->p + SWAP_META_PAGES;
2001230869e0SAlan Cox 		if (swp_pager_swblk_empty(sb, 0, start) &&
2002230869e0SAlan Cox 		    swp_pager_swblk_empty(sb, limit, SWAP_META_PAGES)) {
2003f425ab8eSKonstantin Belousov 			SWAP_PCTRIE_REMOVE(&object->un_pager.swp.swp_blks,
2004f425ab8eSKonstantin Belousov 			    sb->p);
2005f425ab8eSKonstantin Belousov 			uma_zfree(swblk_zone, sb);
20061c7c3c6aSMatthew Dillon 		}
20071c7c3c6aSMatthew Dillon 	}
200878f1deefSAlan Cox 	swp_pager_freeswapspace(s_free, n_free);
20091c7c3c6aSMatthew Dillon }
20101c7c3c6aSMatthew Dillon 
20111c7c3c6aSMatthew Dillon /*
20121c7c3c6aSMatthew Dillon  * SWP_PAGER_META_FREE_ALL() - destroy all swap metadata associated with object
20131c7c3c6aSMatthew Dillon  *
20141c7c3c6aSMatthew Dillon  *	This routine locates and destroys all swap metadata associated with
20151c7c3c6aSMatthew Dillon  *	an object.
20161c7c3c6aSMatthew Dillon  */
20171c7c3c6aSMatthew Dillon static void
20181c7c3c6aSMatthew Dillon swp_pager_meta_free_all(vm_object_t object)
20191c7c3c6aSMatthew Dillon {
2020f425ab8eSKonstantin Belousov 	struct swblk *sb;
202178f1deefSAlan Cox 	daddr_t n_free, s_free;
2022f425ab8eSKonstantin Belousov 	vm_pindex_t pindex;
202371057cd2SKonstantin Belousov 	int i;
20241c7c3c6aSMatthew Dillon 
202589f6b863SAttilio Rao 	VM_OBJECT_ASSERT_WLOCKED(object);
20261c7c3c6aSMatthew Dillon 	if (object->type != OBJT_SWAP)
20271c7c3c6aSMatthew Dillon 		return;
20281c7c3c6aSMatthew Dillon 
202978f1deefSAlan Cox 	swp_pager_init_freerange(&s_free, &n_free);
2030f425ab8eSKonstantin Belousov 	for (pindex = 0; (sb = SWAP_PCTRIE_LOOKUP_GE(
2031f425ab8eSKonstantin Belousov 	    &object->un_pager.swp.swp_blks, pindex)) != NULL;) {
2032f425ab8eSKonstantin Belousov 		pindex = sb->p + SWAP_META_PAGES;
2033f425ab8eSKonstantin Belousov 		for (i = 0; i < SWAP_META_PAGES; i++) {
2034230869e0SAlan Cox 			if (sb->d[i] == SWAPBLK_NONE)
2035230869e0SAlan Cox 				continue;
203678f1deefSAlan Cox 			swp_pager_update_freerange(&s_free, &n_free, sb->d[i]);
20371c7c3c6aSMatthew Dillon 		}
2038f425ab8eSKonstantin Belousov 		SWAP_PCTRIE_REMOVE(&object->un_pager.swp.swp_blks, sb->p);
2039f425ab8eSKonstantin Belousov 		uma_zfree(swblk_zone, sb);
20401c7c3c6aSMatthew Dillon 	}
204178f1deefSAlan Cox 	swp_pager_freeswapspace(s_free, n_free);
20421c7c3c6aSMatthew Dillon }
20431c7c3c6aSMatthew Dillon 
20441c7c3c6aSMatthew Dillon /*
20454abca9bbSAlan Cox  * SWP_PAGER_METACTL() -  misc control of swap meta data.
20461c7c3c6aSMatthew Dillon  *
20474abca9bbSAlan Cox  *	This routine is capable of looking up, or removing swapblk
20484abca9bbSAlan Cox  *	assignments in the swap meta data.  It returns the swapblk being
20494abca9bbSAlan Cox  *	looked-up, popped, or SWAPBLK_NONE if the block was invalid.
20501c7c3c6aSMatthew Dillon  *
20511c7c3c6aSMatthew Dillon  *	When acting on a busy resident page and paging is in progress, we
20521c7c3c6aSMatthew Dillon  *	have to wait until paging is complete but otherwise can act on the
20531c7c3c6aSMatthew Dillon  *	busy page.
20541c7c3c6aSMatthew Dillon  *
20554abca9bbSAlan Cox  *	SWM_POP		remove from meta data but do not free it
20561c7c3c6aSMatthew Dillon  */
20571c7c3c6aSMatthew Dillon static daddr_t
20582f249180SPoul-Henning Kamp swp_pager_meta_ctl(vm_object_t object, vm_pindex_t pindex, int flags)
20592f249180SPoul-Henning Kamp {
2060f425ab8eSKonstantin Belousov 	struct swblk *sb;
20614dcc5c2dSMatthew Dillon 	daddr_t r1;
20624dcc5c2dSMatthew Dillon 
20634abca9bbSAlan Cox 	if ((flags & SWM_POP) != 0)
2064ee620ea4SAlan Cox 		VM_OBJECT_ASSERT_WLOCKED(object);
2065ee620ea4SAlan Cox 	else
2066c25673ffSAttilio Rao 		VM_OBJECT_ASSERT_LOCKED(object);
2067ee620ea4SAlan Cox 
20681c7c3c6aSMatthew Dillon 	/*
2069ee620ea4SAlan Cox 	 * The meta data only exists if the object is OBJT_SWAP
20701c7c3c6aSMatthew Dillon 	 * and even then might not be allocated yet.
20711c7c3c6aSMatthew Dillon 	 */
20724dcc5c2dSMatthew Dillon 	if (object->type != OBJT_SWAP)
20731c7c3c6aSMatthew Dillon 		return (SWAPBLK_NONE);
20741c7c3c6aSMatthew Dillon 
2075f425ab8eSKonstantin Belousov 	sb = SWAP_PCTRIE_LOOKUP(&object->un_pager.swp.swp_blks,
2076f425ab8eSKonstantin Belousov 	    rounddown(pindex, SWAP_META_PAGES));
2077f425ab8eSKonstantin Belousov 	if (sb == NULL)
2078f425ab8eSKonstantin Belousov 		return (SWAPBLK_NONE);
2079f425ab8eSKonstantin Belousov 	r1 = sb->d[pindex % SWAP_META_PAGES];
2080f425ab8eSKonstantin Belousov 	if (r1 == SWAPBLK_NONE)
2081f425ab8eSKonstantin Belousov 		return (SWAPBLK_NONE);
20824abca9bbSAlan Cox 	if ((flags & SWM_POP) != 0) {
2083f425ab8eSKonstantin Belousov 		sb->d[pindex % SWAP_META_PAGES] = SWAPBLK_NONE;
2084230869e0SAlan Cox 		if (swp_pager_swblk_empty(sb, 0, SWAP_META_PAGES)) {
2085f425ab8eSKonstantin Belousov 			SWAP_PCTRIE_REMOVE(&object->un_pager.swp.swp_blks,
2086f425ab8eSKonstantin Belousov 			    rounddown(pindex, SWAP_META_PAGES));
2087f425ab8eSKonstantin Belousov 			uma_zfree(swblk_zone, sb);
2088f425ab8eSKonstantin Belousov 		}
2089f425ab8eSKonstantin Belousov 	}
20901c7c3c6aSMatthew Dillon 	return (r1);
20911c7c3c6aSMatthew Dillon }
20921c7c3c6aSMatthew Dillon 
2093e9c0cc15SPoul-Henning Kamp /*
209477d6fd97SKonstantin Belousov  * Returns the least page index which is greater than or equal to the
209577d6fd97SKonstantin Belousov  * parameter pindex and for which there is a swap block allocated.
209677d6fd97SKonstantin Belousov  * Returns object's size if the object's type is not swap or if there
209777d6fd97SKonstantin Belousov  * are no allocated swap blocks for the object after the requested
209877d6fd97SKonstantin Belousov  * pindex.
209977d6fd97SKonstantin Belousov  */
210077d6fd97SKonstantin Belousov vm_pindex_t
210177d6fd97SKonstantin Belousov swap_pager_find_least(vm_object_t object, vm_pindex_t pindex)
210277d6fd97SKonstantin Belousov {
2103f425ab8eSKonstantin Belousov 	struct swblk *sb;
2104f425ab8eSKonstantin Belousov 	int i;
210577d6fd97SKonstantin Belousov 
210677d6fd97SKonstantin Belousov 	VM_OBJECT_ASSERT_LOCKED(object);
2107f425ab8eSKonstantin Belousov 	if (object->type != OBJT_SWAP)
210877d6fd97SKonstantin Belousov 		return (object->size);
210977d6fd97SKonstantin Belousov 
2110f425ab8eSKonstantin Belousov 	sb = SWAP_PCTRIE_LOOKUP_GE(&object->un_pager.swp.swp_blks,
2111f425ab8eSKonstantin Belousov 	    rounddown(pindex, SWAP_META_PAGES));
2112f425ab8eSKonstantin Belousov 	if (sb == NULL)
2113f425ab8eSKonstantin Belousov 		return (object->size);
2114f425ab8eSKonstantin Belousov 	if (sb->p < pindex) {
2115f425ab8eSKonstantin Belousov 		for (i = pindex % SWAP_META_PAGES; i < SWAP_META_PAGES; i++) {
2116f425ab8eSKonstantin Belousov 			if (sb->d[i] != SWAPBLK_NONE)
2117f425ab8eSKonstantin Belousov 				return (sb->p + i);
211877d6fd97SKonstantin Belousov 		}
2119f425ab8eSKonstantin Belousov 		sb = SWAP_PCTRIE_LOOKUP_GE(&object->un_pager.swp.swp_blks,
2120f425ab8eSKonstantin Belousov 		    roundup(pindex, SWAP_META_PAGES));
2121f425ab8eSKonstantin Belousov 		if (sb == NULL)
2122f425ab8eSKonstantin Belousov 			return (object->size);
212377d6fd97SKonstantin Belousov 	}
2124f425ab8eSKonstantin Belousov 	for (i = 0; i < SWAP_META_PAGES; i++) {
2125f425ab8eSKonstantin Belousov 		if (sb->d[i] != SWAPBLK_NONE)
2126f425ab8eSKonstantin Belousov 			return (sb->p + i);
212777d6fd97SKonstantin Belousov 	}
2128f425ab8eSKonstantin Belousov 
2129f425ab8eSKonstantin Belousov 	/*
2130f425ab8eSKonstantin Belousov 	 * We get here if a swblk is present in the trie but it
2131f425ab8eSKonstantin Belousov 	 * doesn't map any blocks.
2132f425ab8eSKonstantin Belousov 	 */
2133f425ab8eSKonstantin Belousov 	MPASS(0);
2134f425ab8eSKonstantin Belousov 	return (object->size);
213577d6fd97SKonstantin Belousov }
213677d6fd97SKonstantin Belousov 
213777d6fd97SKonstantin Belousov /*
2138e9c0cc15SPoul-Henning Kamp  * System call swapon(name) enables swapping on device name,
2139e9c0cc15SPoul-Henning Kamp  * which must be in the swdevsw.  Return EBUSY
2140e9c0cc15SPoul-Henning Kamp  * if already swapping on this device.
2141e9c0cc15SPoul-Henning Kamp  */
2142e9c0cc15SPoul-Henning Kamp #ifndef _SYS_SYSPROTO_H_
2143e9c0cc15SPoul-Henning Kamp struct swapon_args {
2144e9c0cc15SPoul-Henning Kamp 	char *name;
2145e9c0cc15SPoul-Henning Kamp };
2146e9c0cc15SPoul-Henning Kamp #endif
2147e9c0cc15SPoul-Henning Kamp 
2148e9c0cc15SPoul-Henning Kamp /*
2149e9c0cc15SPoul-Henning Kamp  * MPSAFE
2150e9c0cc15SPoul-Henning Kamp  */
2151e9c0cc15SPoul-Henning Kamp /* ARGSUSED */
2152e9c0cc15SPoul-Henning Kamp int
21538451d0ddSKip Macy sys_swapon(struct thread *td, struct swapon_args *uap)
2154e9c0cc15SPoul-Henning Kamp {
2155e9c0cc15SPoul-Henning Kamp 	struct vattr attr;
2156e9c0cc15SPoul-Henning Kamp 	struct vnode *vp;
2157e9c0cc15SPoul-Henning Kamp 	struct nameidata nd;
2158e9c0cc15SPoul-Henning Kamp 	int error;
2159e9c0cc15SPoul-Henning Kamp 
2160acd3428bSRobert Watson 	error = priv_check(td, PRIV_SWAPON);
2161e9c0cc15SPoul-Henning Kamp 	if (error)
2162acd3428bSRobert Watson 		return (error);
2163e9c0cc15SPoul-Henning Kamp 
216404533e1eSKonstantin Belousov 	sx_xlock(&swdev_syscall_lock);
2165e9c0cc15SPoul-Henning Kamp 
2166e9c0cc15SPoul-Henning Kamp 	/*
2167e9c0cc15SPoul-Henning Kamp 	 * Swap metadata may not fit in the KVM if we have physical
2168e9c0cc15SPoul-Henning Kamp 	 * memory of >1GB.
2169e9c0cc15SPoul-Henning Kamp 	 */
2170f425ab8eSKonstantin Belousov 	if (swblk_zone == NULL) {
2171e9c0cc15SPoul-Henning Kamp 		error = ENOMEM;
2172e9c0cc15SPoul-Henning Kamp 		goto done;
2173e9c0cc15SPoul-Henning Kamp 	}
2174e9c0cc15SPoul-Henning Kamp 
2175d9135e72SRobert Watson 	NDINIT(&nd, LOOKUP, ISOPEN | FOLLOW | AUDITVNODE1, UIO_USERSPACE,
2176d9135e72SRobert Watson 	    uap->name, td);
2177e9c0cc15SPoul-Henning Kamp 	error = namei(&nd);
2178e9c0cc15SPoul-Henning Kamp 	if (error)
2179e9c0cc15SPoul-Henning Kamp 		goto done;
2180e9c0cc15SPoul-Henning Kamp 
2181e9c0cc15SPoul-Henning Kamp 	NDFREE(&nd, NDF_ONLY_PNBUF);
2182e9c0cc15SPoul-Henning Kamp 	vp = nd.ni_vp;
2183e9c0cc15SPoul-Henning Kamp 
218420da9c2eSPoul-Henning Kamp 	if (vn_isdisk(vp, &error)) {
218588ad2d7bSKonstantin Belousov 		error = swapongeom(vp);
218620da9c2eSPoul-Henning Kamp 	} else if (vp->v_type == VREG &&
2187e9c0cc15SPoul-Henning Kamp 	    (vp->v_mount->mnt_vfc->vfc_flags & VFCF_NETWORK) != 0 &&
21880359a12eSAttilio Rao 	    (error = VOP_GETATTR(vp, &attr, td->td_ucred)) == 0) {
2189e9c0cc15SPoul-Henning Kamp 		/*
2190e9c0cc15SPoul-Henning Kamp 		 * Allow direct swapping to NFS regular files in the same
2191e9c0cc15SPoul-Henning Kamp 		 * way that nfs_mountroot() sets up diskless swapping.
2192e9c0cc15SPoul-Henning Kamp 		 */
219359efee01SPoul-Henning Kamp 		error = swaponvp(td, vp, attr.va_size / DEV_BSIZE);
2194e9c0cc15SPoul-Henning Kamp 	}
2195e9c0cc15SPoul-Henning Kamp 
2196e9c0cc15SPoul-Henning Kamp 	if (error)
2197e9c0cc15SPoul-Henning Kamp 		vrele(vp);
2198e9c0cc15SPoul-Henning Kamp done:
219904533e1eSKonstantin Belousov 	sx_xunlock(&swdev_syscall_lock);
2200e9c0cc15SPoul-Henning Kamp 	return (error);
2201e9c0cc15SPoul-Henning Kamp }
2202e9c0cc15SPoul-Henning Kamp 
22033ff863f1SDag-Erling Smørgrav /*
22043ff863f1SDag-Erling Smørgrav  * Check that the total amount of swap currently configured does not
22053ff863f1SDag-Erling Smørgrav  * exceed half the theoretical maximum.  If it does, print a warning
220635872e79SKonstantin Belousov  * message.
22073ff863f1SDag-Erling Smørgrav  */
220835872e79SKonstantin Belousov static void
220935872e79SKonstantin Belousov swapon_check_swzone(void)
22103ff863f1SDag-Erling Smørgrav {
221135872e79SKonstantin Belousov 	unsigned long maxpages, npages;
22123ff863f1SDag-Erling Smørgrav 
2213e8bb589dSMatt Macy 	npages = swap_total;
22143ff863f1SDag-Erling Smørgrav 	/* absolute maximum we can handle assuming 100% efficiency */
2215f425ab8eSKonstantin Belousov 	maxpages = uma_zone_get_max(swblk_zone) * SWAP_META_PAGES;
22163ff863f1SDag-Erling Smørgrav 
22173ff863f1SDag-Erling Smørgrav 	/* recommend using no more than half that amount */
22183ff863f1SDag-Erling Smørgrav 	if (npages > maxpages / 2) {
22193ff863f1SDag-Erling Smørgrav 		printf("warning: total configured swap (%lu pages) "
22203ff863f1SDag-Erling Smørgrav 		    "exceeds maximum recommended amount (%lu pages).\n",
22219462305cSSergey Kandaurov 		    npages, maxpages / 2);
22223ff863f1SDag-Erling Smørgrav 		printf("warning: increase kern.maxswzone "
22233ff863f1SDag-Erling Smørgrav 		    "or reduce amount of swap.\n");
22243ff863f1SDag-Erling Smørgrav 	}
22253ff863f1SDag-Erling Smørgrav }
22263ff863f1SDag-Erling Smørgrav 
222759efee01SPoul-Henning Kamp static void
22282cc718a1SKonstantin Belousov swaponsomething(struct vnode *vp, void *id, u_long nblks,
22292cc718a1SKonstantin Belousov     sw_strategy_t *strategy, sw_close_t *close, dev_t dev, int flags)
2230e9c0cc15SPoul-Henning Kamp {
22312d9974c1SAlan Cox 	struct swdevt *sp, *tsp;
2232e9c0cc15SPoul-Henning Kamp 	swblk_t dvbase;
22338f60c087SPoul-Henning Kamp 	u_long mblocks;
2234e9c0cc15SPoul-Henning Kamp 
2235e9c0cc15SPoul-Henning Kamp 	/*
2236e9c0cc15SPoul-Henning Kamp 	 * nblks is in DEV_BSIZE'd chunks, convert to PAGE_SIZE'd chunks.
2237e9c0cc15SPoul-Henning Kamp 	 * First chop nblks off to page-align it, then convert.
2238e9c0cc15SPoul-Henning Kamp 	 *
2239e9c0cc15SPoul-Henning Kamp 	 * sw->sw_nblks is in page-sized chunks now too.
2240e9c0cc15SPoul-Henning Kamp 	 */
2241e9c0cc15SPoul-Henning Kamp 	nblks &= ~(ctodb(1) - 1);
2242e9c0cc15SPoul-Henning Kamp 	nblks = dbtoc(nblks);
2243e9c0cc15SPoul-Henning Kamp 
22446e903bd0SKonstantin Belousov 	/*
22456e903bd0SKonstantin Belousov 	 * If we go beyond this, we get overflows in the radix
22466e903bd0SKonstantin Belousov 	 * tree bitmap code.
22476e903bd0SKonstantin Belousov 	 */
22486e903bd0SKonstantin Belousov 	mblocks = 0x40000000 / BLIST_META_RADIX;
22496e903bd0SKonstantin Belousov 	if (nblks > mblocks) {
22506e903bd0SKonstantin Belousov 		printf(
22516e903bd0SKonstantin Belousov     "WARNING: reducing swap size to maximum of %luMB per unit\n",
22526e903bd0SKonstantin Belousov 		    mblocks / 1024 / 1024 * PAGE_SIZE);
22536e903bd0SKonstantin Belousov 		nblks = mblocks;
22546e903bd0SKonstantin Belousov 	}
22556e903bd0SKonstantin Belousov 
22568f60c087SPoul-Henning Kamp 	sp = malloc(sizeof *sp, M_VMPGDATA, M_WAITOK | M_ZERO);
2257dee34ca4SPoul-Henning Kamp 	sp->sw_vp = vp;
2258dee34ca4SPoul-Henning Kamp 	sp->sw_id = id;
2259f3732fd1SPoul-Henning Kamp 	sp->sw_dev = dev;
2260e9c0cc15SPoul-Henning Kamp 	sp->sw_nblks = nblks;
2261e9c0cc15SPoul-Henning Kamp 	sp->sw_used = 0;
226259efee01SPoul-Henning Kamp 	sp->sw_strategy = strategy;
2263dee34ca4SPoul-Henning Kamp 	sp->sw_close = close;
22642cc718a1SKonstantin Belousov 	sp->sw_flags = flags;
2265e9c0cc15SPoul-Henning Kamp 
2266c8c7ad92SKip Macy 	sp->sw_blist = blist_create(nblks, M_WAITOK);
2267e9c0cc15SPoul-Henning Kamp 	/*
2268ef3c5abdSPoul-Henning Kamp 	 * Do not free the first two block in order to avoid overwriting
22698f60c087SPoul-Henning Kamp 	 * any bsd label at the front of the partition
2270e9c0cc15SPoul-Henning Kamp 	 */
2271ef3c5abdSPoul-Henning Kamp 	blist_free(sp->sw_blist, 2, nblks - 2);
2272e9c0cc15SPoul-Henning Kamp 
22732d9974c1SAlan Cox 	dvbase = 0;
227420da9c2eSPoul-Henning Kamp 	mtx_lock(&sw_dev_mtx);
22752d9974c1SAlan Cox 	TAILQ_FOREACH(tsp, &swtailq, sw_list) {
22762d9974c1SAlan Cox 		if (tsp->sw_end >= dvbase) {
22772d9974c1SAlan Cox 			/*
22782d9974c1SAlan Cox 			 * We put one uncovered page between the devices
22792d9974c1SAlan Cox 			 * in order to definitively prevent any cross-device
22802d9974c1SAlan Cox 			 * I/O requests
22812d9974c1SAlan Cox 			 */
22822d9974c1SAlan Cox 			dvbase = tsp->sw_end + 1;
22832d9974c1SAlan Cox 		}
22842d9974c1SAlan Cox 	}
22852d9974c1SAlan Cox 	sp->sw_first = dvbase;
22862d9974c1SAlan Cox 	sp->sw_end = dvbase + nblks;
22878f60c087SPoul-Henning Kamp 	TAILQ_INSERT_TAIL(&swtailq, sp, sw_list);
22888f60c087SPoul-Henning Kamp 	nswapdev++;
2289761097c8SAlan Cox 	swap_pager_avail += nblks - 2;
2290e8bb589dSMatt Macy 	swap_total += nblks;
229135872e79SKonstantin Belousov 	swapon_check_swzone();
2292d05bc129SAlan Cox 	swp_sizecheck();
2293d05bc129SAlan Cox 	mtx_unlock(&sw_dev_mtx);
2294b1fd102eSMark Johnston 	EVENTHANDLER_INVOKE(swapon, sp);
229559efee01SPoul-Henning Kamp }
2296e9c0cc15SPoul-Henning Kamp 
2297e9c0cc15SPoul-Henning Kamp /*
2298e9c0cc15SPoul-Henning Kamp  * SYSCALL: swapoff(devname)
2299e9c0cc15SPoul-Henning Kamp  *
2300e9c0cc15SPoul-Henning Kamp  * Disable swapping on the given device.
2301dee34ca4SPoul-Henning Kamp  *
2302dee34ca4SPoul-Henning Kamp  * XXX: Badly designed system call: it should use a device index
2303dee34ca4SPoul-Henning Kamp  * rather than filename as specification.  We keep sw_vp around
2304dee34ca4SPoul-Henning Kamp  * only to make this work.
2305e9c0cc15SPoul-Henning Kamp  */
2306e9c0cc15SPoul-Henning Kamp #ifndef _SYS_SYSPROTO_H_
2307e9c0cc15SPoul-Henning Kamp struct swapoff_args {
2308e9c0cc15SPoul-Henning Kamp 	char *name;
2309e9c0cc15SPoul-Henning Kamp };
2310e9c0cc15SPoul-Henning Kamp #endif
2311e9c0cc15SPoul-Henning Kamp 
2312e9c0cc15SPoul-Henning Kamp /*
2313e9c0cc15SPoul-Henning Kamp  * MPSAFE
2314e9c0cc15SPoul-Henning Kamp  */
2315e9c0cc15SPoul-Henning Kamp /* ARGSUSED */
2316e9c0cc15SPoul-Henning Kamp int
23178451d0ddSKip Macy sys_swapoff(struct thread *td, struct swapoff_args *uap)
2318e9c0cc15SPoul-Henning Kamp {
2319e9c0cc15SPoul-Henning Kamp 	struct vnode *vp;
2320e9c0cc15SPoul-Henning Kamp 	struct nameidata nd;
2321e9c0cc15SPoul-Henning Kamp 	struct swdevt *sp;
23228f60c087SPoul-Henning Kamp 	int error;
2323e9c0cc15SPoul-Henning Kamp 
2324acd3428bSRobert Watson 	error = priv_check(td, PRIV_SWAPOFF);
2325e9c0cc15SPoul-Henning Kamp 	if (error)
23260909f38aSPawel Jakub Dawidek 		return (error);
2327e9c0cc15SPoul-Henning Kamp 
232804533e1eSKonstantin Belousov 	sx_xlock(&swdev_syscall_lock);
2329e9c0cc15SPoul-Henning Kamp 
2330d9135e72SRobert Watson 	NDINIT(&nd, LOOKUP, FOLLOW | AUDITVNODE1, UIO_USERSPACE, uap->name,
2331d9135e72SRobert Watson 	    td);
2332e9c0cc15SPoul-Henning Kamp 	error = namei(&nd);
2333e9c0cc15SPoul-Henning Kamp 	if (error)
2334e9c0cc15SPoul-Henning Kamp 		goto done;
2335e9c0cc15SPoul-Henning Kamp 	NDFREE(&nd, NDF_ONLY_PNBUF);
2336e9c0cc15SPoul-Henning Kamp 	vp = nd.ni_vp;
2337e9c0cc15SPoul-Henning Kamp 
233820da9c2eSPoul-Henning Kamp 	mtx_lock(&sw_dev_mtx);
23398f60c087SPoul-Henning Kamp 	TAILQ_FOREACH(sp, &swtailq, sw_list) {
2340dee34ca4SPoul-Henning Kamp 		if (sp->sw_vp == vp)
23410909f38aSPawel Jakub Dawidek 			break;
2342e9c0cc15SPoul-Henning Kamp 	}
234320da9c2eSPoul-Henning Kamp 	mtx_unlock(&sw_dev_mtx);
23440909f38aSPawel Jakub Dawidek 	if (sp == NULL) {
2345e9c0cc15SPoul-Henning Kamp 		error = EINVAL;
2346e9c0cc15SPoul-Henning Kamp 		goto done;
23470909f38aSPawel Jakub Dawidek 	}
234835918c55SChristian S.J. Peron 	error = swapoff_one(sp, td->td_ucred);
23490909f38aSPawel Jakub Dawidek done:
235004533e1eSKonstantin Belousov 	sx_xunlock(&swdev_syscall_lock);
23510909f38aSPawel Jakub Dawidek 	return (error);
23520909f38aSPawel Jakub Dawidek }
23530909f38aSPawel Jakub Dawidek 
23540909f38aSPawel Jakub Dawidek static int
235535918c55SChristian S.J. Peron swapoff_one(struct swdevt *sp, struct ucred *cred)
23560909f38aSPawel Jakub Dawidek {
235703bdd65fSAlan Cox 	u_long nblks;
2358e9c0cc15SPoul-Henning Kamp #ifdef MAC
23590909f38aSPawel Jakub Dawidek 	int error;
2360e9c0cc15SPoul-Henning Kamp #endif
2361e9c0cc15SPoul-Henning Kamp 
236204533e1eSKonstantin Belousov 	sx_assert(&swdev_syscall_lock, SA_XLOCKED);
23630909f38aSPawel Jakub Dawidek #ifdef MAC
2364cb05b60aSAttilio Rao 	(void) vn_lock(sp->sw_vp, LK_EXCLUSIVE | LK_RETRY);
236535918c55SChristian S.J. Peron 	error = mac_system_check_swapoff(cred, sp->sw_vp);
236622db15c0SAttilio Rao 	(void) VOP_UNLOCK(sp->sw_vp, 0);
23670909f38aSPawel Jakub Dawidek 	if (error != 0)
23680909f38aSPawel Jakub Dawidek 		return (error);
23690909f38aSPawel Jakub Dawidek #endif
2370e9c0cc15SPoul-Henning Kamp 	nblks = sp->sw_nblks;
2371e9c0cc15SPoul-Henning Kamp 
2372e9c0cc15SPoul-Henning Kamp 	/*
2373e9c0cc15SPoul-Henning Kamp 	 * We can turn off this swap device safely only if the
2374e9c0cc15SPoul-Henning Kamp 	 * available virtual memory in the system will fit the amount
2375e9c0cc15SPoul-Henning Kamp 	 * of data we will have to page back in, plus an epsilon so
2376e9c0cc15SPoul-Henning Kamp 	 * the system doesn't become critically low on swap space.
2377e9c0cc15SPoul-Henning Kamp 	 */
2378e2068d0bSJeff Roberson 	if (vm_free_count() + swap_pager_avail < nblks + nswap_lowat)
23790909f38aSPawel Jakub Dawidek 		return (ENOMEM);
2380e9c0cc15SPoul-Henning Kamp 
2381e9c0cc15SPoul-Henning Kamp 	/*
2382e9c0cc15SPoul-Henning Kamp 	 * Prevent further allocations on this device.
2383e9c0cc15SPoul-Henning Kamp 	 */
23842928cef7SAlan Cox 	mtx_lock(&sw_dev_mtx);
2385e9c0cc15SPoul-Henning Kamp 	sp->sw_flags |= SW_CLOSING;
238603bdd65fSAlan Cox 	swap_pager_avail -= blist_fill(sp->sw_blist, 0, nblks);
2387e8bb589dSMatt Macy 	swap_total -= nblks;
23882928cef7SAlan Cox 	mtx_unlock(&sw_dev_mtx);
2389e9c0cc15SPoul-Henning Kamp 
2390e9c0cc15SPoul-Henning Kamp 	/*
2391e9c0cc15SPoul-Henning Kamp 	 * Page in the contents of the device and close it.
2392e9c0cc15SPoul-Henning Kamp 	 */
2393b3fed13eSDavid Schultz 	swap_pager_swapoff(sp);
2394e9c0cc15SPoul-Henning Kamp 
239535918c55SChristian S.J. Peron 	sp->sw_close(curthread, sp);
239620da9c2eSPoul-Henning Kamp 	mtx_lock(&sw_dev_mtx);
23979e3e3fe5SWarner Losh 	sp->sw_id = NULL;
23988f60c087SPoul-Henning Kamp 	TAILQ_REMOVE(&swtailq, sp, sw_list);
23990676a140SAlan Cox 	nswapdev--;
24007dea2c2eSAlan Cox 	if (nswapdev == 0) {
24017dea2c2eSAlan Cox 		swap_pager_full = 2;
24027dea2c2eSAlan Cox 		swap_pager_almost_full = 1;
24037dea2c2eSAlan Cox 	}
24048f60c087SPoul-Henning Kamp 	if (swdevhd == sp)
24058f60c087SPoul-Henning Kamp 		swdevhd = NULL;
2406d05bc129SAlan Cox 	mtx_unlock(&sw_dev_mtx);
24078f60c087SPoul-Henning Kamp 	blist_destroy(sp->sw_blist);
24088f60c087SPoul-Henning Kamp 	free(sp, M_VMPGDATA);
24090909f38aSPawel Jakub Dawidek 	return (0);
24100909f38aSPawel Jakub Dawidek }
2411e9c0cc15SPoul-Henning Kamp 
24120909f38aSPawel Jakub Dawidek void
24130909f38aSPawel Jakub Dawidek swapoff_all(void)
24140909f38aSPawel Jakub Dawidek {
24150909f38aSPawel Jakub Dawidek 	struct swdevt *sp, *spt;
24160909f38aSPawel Jakub Dawidek 	const char *devname;
24170909f38aSPawel Jakub Dawidek 	int error;
24180909f38aSPawel Jakub Dawidek 
241904533e1eSKonstantin Belousov 	sx_xlock(&swdev_syscall_lock);
24200909f38aSPawel Jakub Dawidek 
24210909f38aSPawel Jakub Dawidek 	mtx_lock(&sw_dev_mtx);
24220909f38aSPawel Jakub Dawidek 	TAILQ_FOREACH_SAFE(sp, &swtailq, sw_list, spt) {
24230909f38aSPawel Jakub Dawidek 		mtx_unlock(&sw_dev_mtx);
24240909f38aSPawel Jakub Dawidek 		if (vn_isdisk(sp->sw_vp, NULL))
24257870adb6SEd Schouten 			devname = devtoname(sp->sw_vp->v_rdev);
24260909f38aSPawel Jakub Dawidek 		else
24270909f38aSPawel Jakub Dawidek 			devname = "[file]";
242835918c55SChristian S.J. Peron 		error = swapoff_one(sp, thread0.td_ucred);
24290909f38aSPawel Jakub Dawidek 		if (error != 0) {
24300909f38aSPawel Jakub Dawidek 			printf("Cannot remove swap device %s (error=%d), "
24310909f38aSPawel Jakub Dawidek 			    "skipping.\n", devname, error);
24320909f38aSPawel Jakub Dawidek 		} else if (bootverbose) {
24330909f38aSPawel Jakub Dawidek 			printf("Swap device %s removed.\n", devname);
24340909f38aSPawel Jakub Dawidek 		}
24350909f38aSPawel Jakub Dawidek 		mtx_lock(&sw_dev_mtx);
24360909f38aSPawel Jakub Dawidek 	}
24370909f38aSPawel Jakub Dawidek 	mtx_unlock(&sw_dev_mtx);
24380909f38aSPawel Jakub Dawidek 
243904533e1eSKonstantin Belousov 	sx_xunlock(&swdev_syscall_lock);
2440e9c0cc15SPoul-Henning Kamp }
2441e9c0cc15SPoul-Henning Kamp 
2442567104a1SPoul-Henning Kamp void
2443567104a1SPoul-Henning Kamp swap_pager_status(int *total, int *used)
2444567104a1SPoul-Henning Kamp {
2445567104a1SPoul-Henning Kamp 	struct swdevt *sp;
2446567104a1SPoul-Henning Kamp 
2447567104a1SPoul-Henning Kamp 	*total = 0;
2448567104a1SPoul-Henning Kamp 	*used = 0;
244920da9c2eSPoul-Henning Kamp 	mtx_lock(&sw_dev_mtx);
24508f60c087SPoul-Henning Kamp 	TAILQ_FOREACH(sp, &swtailq, sw_list) {
2451567104a1SPoul-Henning Kamp 		*total += sp->sw_nblks;
2452567104a1SPoul-Henning Kamp 		*used += sp->sw_used;
2453567104a1SPoul-Henning Kamp 	}
245420da9c2eSPoul-Henning Kamp 	mtx_unlock(&sw_dev_mtx);
2455567104a1SPoul-Henning Kamp }
2456567104a1SPoul-Henning Kamp 
2457dda4f960SKonstantin Belousov int
2458dda4f960SKonstantin Belousov swap_dev_info(int name, struct xswdev *xs, char *devname, size_t len)
2459dda4f960SKonstantin Belousov {
2460dda4f960SKonstantin Belousov 	struct swdevt *sp;
24617870adb6SEd Schouten 	const char *tmp_devname;
2462dda4f960SKonstantin Belousov 	int error, n;
2463dda4f960SKonstantin Belousov 
2464dda4f960SKonstantin Belousov 	n = 0;
2465dda4f960SKonstantin Belousov 	error = ENOENT;
2466dda4f960SKonstantin Belousov 	mtx_lock(&sw_dev_mtx);
2467dda4f960SKonstantin Belousov 	TAILQ_FOREACH(sp, &swtailq, sw_list) {
2468dda4f960SKonstantin Belousov 		if (n != name) {
2469dda4f960SKonstantin Belousov 			n++;
2470dda4f960SKonstantin Belousov 			continue;
2471dda4f960SKonstantin Belousov 		}
2472dda4f960SKonstantin Belousov 		xs->xsw_version = XSWDEV_VERSION;
2473dda4f960SKonstantin Belousov 		xs->xsw_dev = sp->sw_dev;
2474dda4f960SKonstantin Belousov 		xs->xsw_flags = sp->sw_flags;
2475dda4f960SKonstantin Belousov 		xs->xsw_nblks = sp->sw_nblks;
2476dda4f960SKonstantin Belousov 		xs->xsw_used = sp->sw_used;
2477dda4f960SKonstantin Belousov 		if (devname != NULL) {
2478dda4f960SKonstantin Belousov 			if (vn_isdisk(sp->sw_vp, NULL))
24797870adb6SEd Schouten 				tmp_devname = devtoname(sp->sw_vp->v_rdev);
2480dda4f960SKonstantin Belousov 			else
2481dda4f960SKonstantin Belousov 				tmp_devname = "[file]";
2482dda4f960SKonstantin Belousov 			strncpy(devname, tmp_devname, len);
2483dda4f960SKonstantin Belousov 		}
2484dda4f960SKonstantin Belousov 		error = 0;
2485dda4f960SKonstantin Belousov 		break;
2486dda4f960SKonstantin Belousov 	}
2487dda4f960SKonstantin Belousov 	mtx_unlock(&sw_dev_mtx);
2488dda4f960SKonstantin Belousov 	return (error);
2489dda4f960SKonstantin Belousov }
2490dda4f960SKonstantin Belousov 
249169921123SKonstantin Belousov #if defined(COMPAT_FREEBSD11)
249269921123SKonstantin Belousov #define XSWDEV_VERSION_11	1
249369921123SKonstantin Belousov struct xswdev11 {
249469921123SKonstantin Belousov 	u_int	xsw_version;
249569921123SKonstantin Belousov 	uint32_t xsw_dev;
249669921123SKonstantin Belousov 	int	xsw_flags;
249769921123SKonstantin Belousov 	int	xsw_nblks;
249869921123SKonstantin Belousov 	int     xsw_used;
249969921123SKonstantin Belousov };
250069921123SKonstantin Belousov #endif
250169921123SKonstantin Belousov 
2502f6d281e8SKonstantin Belousov #if defined(__amd64__) && defined(COMPAT_FREEBSD32)
2503f6d281e8SKonstantin Belousov struct xswdev32 {
2504f6d281e8SKonstantin Belousov 	u_int	xsw_version;
2505f6d281e8SKonstantin Belousov 	u_int	xsw_dev1, xsw_dev2;
2506f6d281e8SKonstantin Belousov 	int	xsw_flags;
2507f6d281e8SKonstantin Belousov 	int	xsw_nblks;
2508f6d281e8SKonstantin Belousov 	int     xsw_used;
2509f6d281e8SKonstantin Belousov };
2510f6d281e8SKonstantin Belousov #endif
2511f6d281e8SKonstantin Belousov 
2512e9c0cc15SPoul-Henning Kamp static int
2513e9c0cc15SPoul-Henning Kamp sysctl_vm_swap_info(SYSCTL_HANDLER_ARGS)
2514e9c0cc15SPoul-Henning Kamp {
2515e9c0cc15SPoul-Henning Kamp 	struct xswdev xs;
2516f6d281e8SKonstantin Belousov #if defined(__amd64__) && defined(COMPAT_FREEBSD32)
2517f6d281e8SKonstantin Belousov 	struct xswdev32 xs32;
2518f6d281e8SKonstantin Belousov #endif
251969921123SKonstantin Belousov #if defined(COMPAT_FREEBSD11)
252069921123SKonstantin Belousov 	struct xswdev11 xs11;
252169921123SKonstantin Belousov #endif
2522dda4f960SKonstantin Belousov 	int error;
2523e9c0cc15SPoul-Henning Kamp 
2524e9c0cc15SPoul-Henning Kamp 	if (arg2 != 1)			/* name length */
2525e9c0cc15SPoul-Henning Kamp 		return (EINVAL);
2526dda4f960SKonstantin Belousov 	error = swap_dev_info(*(int *)arg1, &xs, NULL, 0);
2527dda4f960SKonstantin Belousov 	if (error != 0)
2528dda4f960SKonstantin Belousov 		return (error);
2529f6d281e8SKonstantin Belousov #if defined(__amd64__) && defined(COMPAT_FREEBSD32)
2530f6d281e8SKonstantin Belousov 	if (req->oldlen == sizeof(xs32)) {
2531f6d281e8SKonstantin Belousov 		xs32.xsw_version = XSWDEV_VERSION;
2532f6d281e8SKonstantin Belousov 		xs32.xsw_dev1 = xs.xsw_dev;
2533f6d281e8SKonstantin Belousov 		xs32.xsw_dev2 = xs.xsw_dev >> 32;
2534f6d281e8SKonstantin Belousov 		xs32.xsw_flags = xs.xsw_flags;
2535f6d281e8SKonstantin Belousov 		xs32.xsw_nblks = xs.xsw_nblks;
2536f6d281e8SKonstantin Belousov 		xs32.xsw_used = xs.xsw_used;
2537f6d281e8SKonstantin Belousov 		error = SYSCTL_OUT(req, &xs32, sizeof(xs32));
2538f6d281e8SKonstantin Belousov 		return (error);
2539f6d281e8SKonstantin Belousov 	}
2540f6d281e8SKonstantin Belousov #endif
254169921123SKonstantin Belousov #if defined(COMPAT_FREEBSD11)
254269921123SKonstantin Belousov 	if (req->oldlen == sizeof(xs11)) {
254369921123SKonstantin Belousov 		xs11.xsw_version = XSWDEV_VERSION_11;
254469921123SKonstantin Belousov 		xs11.xsw_dev = xs.xsw_dev; /* truncation */
254569921123SKonstantin Belousov 		xs11.xsw_flags = xs.xsw_flags;
254669921123SKonstantin Belousov 		xs11.xsw_nblks = xs.xsw_nblks;
254769921123SKonstantin Belousov 		xs11.xsw_used = xs.xsw_used;
254869921123SKonstantin Belousov 		error = SYSCTL_OUT(req, &xs11, sizeof(xs11));
2549f6d281e8SKonstantin Belousov 		return (error);
2550f6d281e8SKonstantin Belousov 	}
255169921123SKonstantin Belousov #endif
2552e9c0cc15SPoul-Henning Kamp 	error = SYSCTL_OUT(req, &xs, sizeof(xs));
2553e9c0cc15SPoul-Henning Kamp 	return (error);
2554e9c0cc15SPoul-Henning Kamp }
2555e9c0cc15SPoul-Henning Kamp 
25568f60c087SPoul-Henning Kamp SYSCTL_INT(_vm, OID_AUTO, nswapdev, CTLFLAG_RD, &nswapdev, 0,
2557e9c0cc15SPoul-Henning Kamp     "Number of swap devices");
25584c36e917SKonstantin Belousov SYSCTL_NODE(_vm, OID_AUTO, swap_info, CTLFLAG_RD | CTLFLAG_MPSAFE,
25594c36e917SKonstantin Belousov     sysctl_vm_swap_info,
2560e9c0cc15SPoul-Henning Kamp     "Swap statistics by device");
2561ec38b344SPoul-Henning Kamp 
2562ec38b344SPoul-Henning Kamp /*
2563f425ab8eSKonstantin Belousov  * Count the approximate swap usage in pages for a vmspace.  The
2564f425ab8eSKonstantin Belousov  * shadowed or not yet copied on write swap blocks are not accounted.
2565ec38b344SPoul-Henning Kamp  * The map must be locked.
2566ec38b344SPoul-Henning Kamp  */
25672860553aSRebecca Cran long
2568ec38b344SPoul-Henning Kamp vmspace_swap_count(struct vmspace *vmspace)
2569ec38b344SPoul-Henning Kamp {
257065d8409cSRebecca Cran 	vm_map_t map;
2571ec38b344SPoul-Henning Kamp 	vm_map_entry_t cur;
257265d8409cSRebecca Cran 	vm_object_t object;
2573f425ab8eSKonstantin Belousov 	struct swblk *sb;
2574f425ab8eSKonstantin Belousov 	vm_pindex_t e, pi;
2575f425ab8eSKonstantin Belousov 	long count;
2576f425ab8eSKonstantin Belousov 	int i;
257765d8409cSRebecca Cran 
257865d8409cSRebecca Cran 	map = &vmspace->vm_map;
257965d8409cSRebecca Cran 	count = 0;
2580ec38b344SPoul-Henning Kamp 
2581ec38b344SPoul-Henning Kamp 	for (cur = map->header.next; cur != &map->header; cur = cur->next) {
2582f425ab8eSKonstantin Belousov 		if ((cur->eflags & MAP_ENTRY_IS_SUB_MAP) != 0)
2583f425ab8eSKonstantin Belousov 			continue;
2584f425ab8eSKonstantin Belousov 		object = cur->object.vm_object;
2585f425ab8eSKonstantin Belousov 		if (object == NULL || object->type != OBJT_SWAP)
2586f425ab8eSKonstantin Belousov 			continue;
2587f425ab8eSKonstantin Belousov 		VM_OBJECT_RLOCK(object);
2588f425ab8eSKonstantin Belousov 		if (object->type != OBJT_SWAP)
2589f425ab8eSKonstantin Belousov 			goto unlock;
2590f425ab8eSKonstantin Belousov 		pi = OFF_TO_IDX(cur->offset);
2591f425ab8eSKonstantin Belousov 		e = pi + OFF_TO_IDX(cur->end - cur->start);
2592f425ab8eSKonstantin Belousov 		for (;; pi = sb->p + SWAP_META_PAGES) {
2593f425ab8eSKonstantin Belousov 			sb = SWAP_PCTRIE_LOOKUP_GE(
2594f425ab8eSKonstantin Belousov 			    &object->un_pager.swp.swp_blks, pi);
2595f425ab8eSKonstantin Belousov 			if (sb == NULL || sb->p >= e)
2596f425ab8eSKonstantin Belousov 				break;
2597f425ab8eSKonstantin Belousov 			for (i = 0; i < SWAP_META_PAGES; i++) {
2598f425ab8eSKonstantin Belousov 				if (sb->p + i < e &&
2599f425ab8eSKonstantin Belousov 				    sb->d[i] != SWAPBLK_NONE)
2600f425ab8eSKonstantin Belousov 					count++;
2601ec38b344SPoul-Henning Kamp 			}
2602ec38b344SPoul-Henning Kamp 		}
2603f425ab8eSKonstantin Belousov unlock:
2604f425ab8eSKonstantin Belousov 		VM_OBJECT_RUNLOCK(object);
2605ec38b344SPoul-Henning Kamp 	}
2606ec38b344SPoul-Henning Kamp 	return (count);
2607ec38b344SPoul-Henning Kamp }
2608dee34ca4SPoul-Henning Kamp 
2609dee34ca4SPoul-Henning Kamp /*
2610dee34ca4SPoul-Henning Kamp  * GEOM backend
2611dee34ca4SPoul-Henning Kamp  *
2612dee34ca4SPoul-Henning Kamp  * Swapping onto disk devices.
2613dee34ca4SPoul-Henning Kamp  *
2614dee34ca4SPoul-Henning Kamp  */
2615dee34ca4SPoul-Henning Kamp 
26165721c9c7SPoul-Henning Kamp static g_orphan_t swapgeom_orphan;
26175721c9c7SPoul-Henning Kamp 
2618dee34ca4SPoul-Henning Kamp static struct g_class g_swap_class = {
2619dee34ca4SPoul-Henning Kamp 	.name = "SWAP",
26205721c9c7SPoul-Henning Kamp 	.version = G_VERSION,
26215721c9c7SPoul-Henning Kamp 	.orphan = swapgeom_orphan,
2622dee34ca4SPoul-Henning Kamp };
2623dee34ca4SPoul-Henning Kamp 
2624dee34ca4SPoul-Henning Kamp DECLARE_GEOM_CLASS(g_swap_class, g_class);
2625dee34ca4SPoul-Henning Kamp 
2626dee34ca4SPoul-Henning Kamp 
2627dee34ca4SPoul-Henning Kamp static void
26283398491bSAlexander Motin swapgeom_close_ev(void *arg, int flags)
26293398491bSAlexander Motin {
26303398491bSAlexander Motin 	struct g_consumer *cp;
26313398491bSAlexander Motin 
26323398491bSAlexander Motin 	cp = arg;
26333398491bSAlexander Motin 	g_access(cp, -1, -1, 0);
26343398491bSAlexander Motin 	g_detach(cp);
26353398491bSAlexander Motin 	g_destroy_consumer(cp);
26363398491bSAlexander Motin }
26373398491bSAlexander Motin 
26389e3e3fe5SWarner Losh /*
26399e3e3fe5SWarner Losh  * Add a reference to the g_consumer for an inflight transaction.
26409e3e3fe5SWarner Losh  */
26419e3e3fe5SWarner Losh static void
26429e3e3fe5SWarner Losh swapgeom_acquire(struct g_consumer *cp)
26439e3e3fe5SWarner Losh {
26449e3e3fe5SWarner Losh 
26459e3e3fe5SWarner Losh 	mtx_assert(&sw_dev_mtx, MA_OWNED);
26469e3e3fe5SWarner Losh 	cp->index++;
26479e3e3fe5SWarner Losh }
26489e3e3fe5SWarner Losh 
26499e3e3fe5SWarner Losh /*
26500c657d22SKonstantin Belousov  * Remove a reference from the g_consumer.  Post a close event if all
26510c657d22SKonstantin Belousov  * references go away, since the function might be called from the
26520c657d22SKonstantin Belousov  * biodone context.
26539e3e3fe5SWarner Losh  */
26549e3e3fe5SWarner Losh static void
26559e3e3fe5SWarner Losh swapgeom_release(struct g_consumer *cp, struct swdevt *sp)
26569e3e3fe5SWarner Losh {
26579e3e3fe5SWarner Losh 
26589e3e3fe5SWarner Losh 	mtx_assert(&sw_dev_mtx, MA_OWNED);
26599e3e3fe5SWarner Losh 	cp->index--;
26609e3e3fe5SWarner Losh 	if (cp->index == 0) {
26619e3e3fe5SWarner Losh 		if (g_post_event(swapgeom_close_ev, cp, M_NOWAIT, NULL) == 0)
26629e3e3fe5SWarner Losh 			sp->sw_id = NULL;
26639e3e3fe5SWarner Losh 	}
26649e3e3fe5SWarner Losh }
26659e3e3fe5SWarner Losh 
26663398491bSAlexander Motin static void
2667dee34ca4SPoul-Henning Kamp swapgeom_done(struct bio *bp2)
2668dee34ca4SPoul-Henning Kamp {
26693398491bSAlexander Motin 	struct swdevt *sp;
2670dee34ca4SPoul-Henning Kamp 	struct buf *bp;
26713398491bSAlexander Motin 	struct g_consumer *cp;
2672dee34ca4SPoul-Henning Kamp 
2673dee34ca4SPoul-Henning Kamp 	bp = bp2->bio_caller2;
26743398491bSAlexander Motin 	cp = bp2->bio_from;
2675c5d3d25eSPoul-Henning Kamp 	bp->b_ioflags = bp2->bio_flags;
2676dee34ca4SPoul-Henning Kamp 	if (bp2->bio_error)
2677dee34ca4SPoul-Henning Kamp 		bp->b_ioflags |= BIO_ERROR;
2678c5d3d25eSPoul-Henning Kamp 	bp->b_resid = bp->b_bcount - bp2->bio_completed;
2679c5d3d25eSPoul-Henning Kamp 	bp->b_error = bp2->bio_error;
2680756a5412SGleb Smirnoff 	bp->b_caller1 = NULL;
2681dee34ca4SPoul-Henning Kamp 	bufdone(bp);
26823398491bSAlexander Motin 	sp = bp2->bio_caller1;
26839e3e3fe5SWarner Losh 	mtx_lock(&sw_dev_mtx);
26849e3e3fe5SWarner Losh 	swapgeom_release(cp, sp);
26853398491bSAlexander Motin 	mtx_unlock(&sw_dev_mtx);
2686dee34ca4SPoul-Henning Kamp 	g_destroy_bio(bp2);
2687dee34ca4SPoul-Henning Kamp }
2688dee34ca4SPoul-Henning Kamp 
2689dee34ca4SPoul-Henning Kamp static void
2690dee34ca4SPoul-Henning Kamp swapgeom_strategy(struct buf *bp, struct swdevt *sp)
2691dee34ca4SPoul-Henning Kamp {
2692dee34ca4SPoul-Henning Kamp 	struct bio *bio;
2693dee34ca4SPoul-Henning Kamp 	struct g_consumer *cp;
2694dee34ca4SPoul-Henning Kamp 
26953398491bSAlexander Motin 	mtx_lock(&sw_dev_mtx);
2696dee34ca4SPoul-Henning Kamp 	cp = sp->sw_id;
2697dee34ca4SPoul-Henning Kamp 	if (cp == NULL) {
26983398491bSAlexander Motin 		mtx_unlock(&sw_dev_mtx);
2699dee34ca4SPoul-Henning Kamp 		bp->b_error = ENXIO;
2700dee34ca4SPoul-Henning Kamp 		bp->b_ioflags |= BIO_ERROR;
2701dee34ca4SPoul-Henning Kamp 		bufdone(bp);
2702dee34ca4SPoul-Henning Kamp 		return;
2703dee34ca4SPoul-Henning Kamp 	}
27049e3e3fe5SWarner Losh 	swapgeom_acquire(cp);
27053398491bSAlexander Motin 	mtx_unlock(&sw_dev_mtx);
270611041003SKonstantin Belousov 	if (bp->b_iocmd == BIO_WRITE)
270711041003SKonstantin Belousov 		bio = g_new_bio();
270811041003SKonstantin Belousov 	else
27094f8205e5SPoul-Henning Kamp 		bio = g_alloc_bio();
27104f8205e5SPoul-Henning Kamp 	if (bio == NULL) {
27119e3e3fe5SWarner Losh 		mtx_lock(&sw_dev_mtx);
27129e3e3fe5SWarner Losh 		swapgeom_release(cp, sp);
27139e3e3fe5SWarner Losh 		mtx_unlock(&sw_dev_mtx);
27143e5b6861SPoul-Henning Kamp 		bp->b_error = ENOMEM;
27153e5b6861SPoul-Henning Kamp 		bp->b_ioflags |= BIO_ERROR;
27160b208315SEdward Tomasz Napierala 		printf("swap_pager: cannot allocate bio\n");
27173e5b6861SPoul-Henning Kamp 		bufdone(bp);
27183e5b6861SPoul-Henning Kamp 		return;
27193e5b6861SPoul-Henning Kamp 	}
272011041003SKonstantin Belousov 
2721756a5412SGleb Smirnoff 	bp->b_caller1 = bio;
27223398491bSAlexander Motin 	bio->bio_caller1 = sp;
2723dee34ca4SPoul-Henning Kamp 	bio->bio_caller2 = bp;
2724c5d3d25eSPoul-Henning Kamp 	bio->bio_cmd = bp->b_iocmd;
2725dee34ca4SPoul-Henning Kamp 	bio->bio_offset = (bp->b_blkno - sp->sw_first) * PAGE_SIZE;
2726dee34ca4SPoul-Henning Kamp 	bio->bio_length = bp->b_bcount;
2727dee34ca4SPoul-Henning Kamp 	bio->bio_done = swapgeom_done;
2728fade8dd7SJeff Roberson 	if (!buf_mapped(bp)) {
27292cc718a1SKonstantin Belousov 		bio->bio_ma = bp->b_pages;
27302cc718a1SKonstantin Belousov 		bio->bio_data = unmapped_buf;
27312cc718a1SKonstantin Belousov 		bio->bio_ma_offset = (vm_offset_t)bp->b_offset & PAGE_MASK;
27322cc718a1SKonstantin Belousov 		bio->bio_ma_n = bp->b_npages;
27332cc718a1SKonstantin Belousov 		bio->bio_flags |= BIO_UNMAPPED;
27342cc718a1SKonstantin Belousov 	} else {
27352cc718a1SKonstantin Belousov 		bio->bio_data = bp->b_data;
27362cc718a1SKonstantin Belousov 		bio->bio_ma = NULL;
27372cc718a1SKonstantin Belousov 	}
2738dee34ca4SPoul-Henning Kamp 	g_io_request(bio, cp);
2739dee34ca4SPoul-Henning Kamp 	return;
2740dee34ca4SPoul-Henning Kamp }
2741dee34ca4SPoul-Henning Kamp 
2742dee34ca4SPoul-Henning Kamp static void
2743dee34ca4SPoul-Henning Kamp swapgeom_orphan(struct g_consumer *cp)
2744dee34ca4SPoul-Henning Kamp {
2745dee34ca4SPoul-Henning Kamp 	struct swdevt *sp;
27463398491bSAlexander Motin 	int destroy;
2747dee34ca4SPoul-Henning Kamp 
2748dee34ca4SPoul-Henning Kamp 	mtx_lock(&sw_dev_mtx);
27493398491bSAlexander Motin 	TAILQ_FOREACH(sp, &swtailq, sw_list) {
27503398491bSAlexander Motin 		if (sp->sw_id == cp) {
27518f12d83aSAlexander Motin 			sp->sw_flags |= SW_CLOSING;
27523398491bSAlexander Motin 			break;
2753dee34ca4SPoul-Henning Kamp 		}
27543398491bSAlexander Motin 	}
27559e3e3fe5SWarner Losh 	/*
27569e3e3fe5SWarner Losh 	 * Drop reference we were created with. Do directly since we're in a
27579e3e3fe5SWarner Losh 	 * special context where we don't have to queue the call to
27589e3e3fe5SWarner Losh 	 * swapgeom_close_ev().
27599e3e3fe5SWarner Losh 	 */
27609e3e3fe5SWarner Losh 	cp->index--;
27613398491bSAlexander Motin 	destroy = ((sp != NULL) && (cp->index == 0));
27623398491bSAlexander Motin 	if (destroy)
27633398491bSAlexander Motin 		sp->sw_id = NULL;
27643398491bSAlexander Motin 	mtx_unlock(&sw_dev_mtx);
27653398491bSAlexander Motin 	if (destroy)
27663398491bSAlexander Motin 		swapgeom_close_ev(cp, 0);
2767dee34ca4SPoul-Henning Kamp }
2768dee34ca4SPoul-Henning Kamp 
2769dee34ca4SPoul-Henning Kamp static void
2770dee34ca4SPoul-Henning Kamp swapgeom_close(struct thread *td, struct swdevt *sw)
2771dee34ca4SPoul-Henning Kamp {
27723398491bSAlexander Motin 	struct g_consumer *cp;
2773dee34ca4SPoul-Henning Kamp 
27743398491bSAlexander Motin 	mtx_lock(&sw_dev_mtx);
27753398491bSAlexander Motin 	cp = sw->sw_id;
27763398491bSAlexander Motin 	sw->sw_id = NULL;
27773398491bSAlexander Motin 	mtx_unlock(&sw_dev_mtx);
27780c657d22SKonstantin Belousov 
27790c657d22SKonstantin Belousov 	/*
27800c657d22SKonstantin Belousov 	 * swapgeom_close() may be called from the biodone context,
27810c657d22SKonstantin Belousov 	 * where we cannot perform topology changes.  Delegate the
27820c657d22SKonstantin Belousov 	 * work to the events thread.
27830c657d22SKonstantin Belousov 	 */
27843398491bSAlexander Motin 	if (cp != NULL)
27853398491bSAlexander Motin 		g_waitfor_event(swapgeom_close_ev, cp, M_WAITOK, NULL);
2786dee34ca4SPoul-Henning Kamp }
2787dee34ca4SPoul-Henning Kamp 
278888ad2d7bSKonstantin Belousov static int
278988ad2d7bSKonstantin Belousov swapongeom_locked(struct cdev *dev, struct vnode *vp)
2790dee34ca4SPoul-Henning Kamp {
2791dee34ca4SPoul-Henning Kamp 	struct g_provider *pp;
2792dee34ca4SPoul-Henning Kamp 	struct g_consumer *cp;
2793dee34ca4SPoul-Henning Kamp 	static struct g_geom *gp;
2794dee34ca4SPoul-Henning Kamp 	struct swdevt *sp;
2795dee34ca4SPoul-Henning Kamp 	u_long nblks;
2796dee34ca4SPoul-Henning Kamp 	int error;
2797dee34ca4SPoul-Henning Kamp 
279888ad2d7bSKonstantin Belousov 	pp = g_dev_getprovider(dev);
279988ad2d7bSKonstantin Belousov 	if (pp == NULL)
280088ad2d7bSKonstantin Belousov 		return (ENODEV);
2801dee34ca4SPoul-Henning Kamp 	mtx_lock(&sw_dev_mtx);
2802dee34ca4SPoul-Henning Kamp 	TAILQ_FOREACH(sp, &swtailq, sw_list) {
2803dee34ca4SPoul-Henning Kamp 		cp = sp->sw_id;
2804dee34ca4SPoul-Henning Kamp 		if (cp != NULL && cp->provider == pp) {
2805dee34ca4SPoul-Henning Kamp 			mtx_unlock(&sw_dev_mtx);
280688ad2d7bSKonstantin Belousov 			return (EBUSY);
2807dee34ca4SPoul-Henning Kamp 		}
2808dee34ca4SPoul-Henning Kamp 	}
2809dee34ca4SPoul-Henning Kamp 	mtx_unlock(&sw_dev_mtx);
28105721c9c7SPoul-Henning Kamp 	if (gp == NULL)
281102c62349SJaakko Heinonen 		gp = g_new_geomf(&g_swap_class, "swap");
2812dee34ca4SPoul-Henning Kamp 	cp = g_new_consumer(gp);
28139e3e3fe5SWarner Losh 	cp->index = 1;	/* Number of active I/Os, plus one for being active. */
28149e3e3fe5SWarner Losh 	cp->flags |=  G_CF_DIRECT_SEND | G_CF_DIRECT_RECEIVE;
2815dee34ca4SPoul-Henning Kamp 	g_attach(cp, pp);
2816afeb65e6SPoul-Henning Kamp 	/*
2817afeb65e6SPoul-Henning Kamp 	 * XXX: Every time you think you can improve the margin for
2818afeb65e6SPoul-Henning Kamp 	 * footshooting, somebody depends on the ability to do so:
2819afeb65e6SPoul-Henning Kamp 	 * savecore(8) wants to write to our swapdev so we cannot
2820afeb65e6SPoul-Henning Kamp 	 * set an exclusive count :-(
2821afeb65e6SPoul-Henning Kamp 	 */
2822d2bae332SPoul-Henning Kamp 	error = g_access(cp, 1, 1, 0);
282388ad2d7bSKonstantin Belousov 	if (error != 0) {
2824dee34ca4SPoul-Henning Kamp 		g_detach(cp);
2825dee34ca4SPoul-Henning Kamp 		g_destroy_consumer(cp);
282688ad2d7bSKonstantin Belousov 		return (error);
2827dee34ca4SPoul-Henning Kamp 	}
2828dee34ca4SPoul-Henning Kamp 	nblks = pp->mediasize / DEV_BSIZE;
282988ad2d7bSKonstantin Belousov 	swaponsomething(vp, cp, nblks, swapgeom_strategy,
283088ad2d7bSKonstantin Belousov 	    swapgeom_close, dev2udev(dev),
28312cc718a1SKonstantin Belousov 	    (pp->flags & G_PF_ACCEPT_UNMAPPED) != 0 ? SW_UNMAPPED : 0);
283288ad2d7bSKonstantin Belousov 	return (0);
2833dee34ca4SPoul-Henning Kamp }
2834dee34ca4SPoul-Henning Kamp 
2835dee34ca4SPoul-Henning Kamp static int
283688ad2d7bSKonstantin Belousov swapongeom(struct vnode *vp)
2837dee34ca4SPoul-Henning Kamp {
2838dee34ca4SPoul-Henning Kamp 	int error;
2839dee34ca4SPoul-Henning Kamp 
2840cb05b60aSAttilio Rao 	vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
284188ad2d7bSKonstantin Belousov 	if (vp->v_type != VCHR || (vp->v_iflag & VI_DOOMED) != 0) {
284288ad2d7bSKonstantin Belousov 		error = ENOENT;
284388ad2d7bSKonstantin Belousov 	} else {
284488ad2d7bSKonstantin Belousov 		g_topology_lock();
284588ad2d7bSKonstantin Belousov 		error = swapongeom_locked(vp->v_rdev, vp);
284688ad2d7bSKonstantin Belousov 		g_topology_unlock();
284788ad2d7bSKonstantin Belousov 	}
284822db15c0SAttilio Rao 	VOP_UNLOCK(vp, 0);
2849dee34ca4SPoul-Henning Kamp 	return (error);
2850dee34ca4SPoul-Henning Kamp }
2851dee34ca4SPoul-Henning Kamp 
2852dee34ca4SPoul-Henning Kamp /*
2853dee34ca4SPoul-Henning Kamp  * VNODE backend
2854dee34ca4SPoul-Henning Kamp  *
2855dee34ca4SPoul-Henning Kamp  * This is used mainly for network filesystem (read: probably only tested
2856dee34ca4SPoul-Henning Kamp  * with NFS) swapfiles.
2857dee34ca4SPoul-Henning Kamp  *
2858dee34ca4SPoul-Henning Kamp  */
2859dee34ca4SPoul-Henning Kamp 
2860dee34ca4SPoul-Henning Kamp static void
2861dee34ca4SPoul-Henning Kamp swapdev_strategy(struct buf *bp, struct swdevt *sp)
2862dee34ca4SPoul-Henning Kamp {
2863494eb176SPoul-Henning Kamp 	struct vnode *vp2;
2864dee34ca4SPoul-Henning Kamp 
2865dee34ca4SPoul-Henning Kamp 	bp->b_blkno = ctodb(bp->b_blkno - sp->sw_first);
2866dee34ca4SPoul-Henning Kamp 
2867dee34ca4SPoul-Henning Kamp 	vp2 = sp->sw_id;
2868dee34ca4SPoul-Henning Kamp 	vhold(vp2);
2869dee34ca4SPoul-Henning Kamp 	if (bp->b_iocmd == BIO_WRITE) {
28703cfc7651SOlivier Houchard 		if (bp->b_bufobj)
2871494eb176SPoul-Henning Kamp 			bufobj_wdrop(bp->b_bufobj);
2872a76d8f4eSPoul-Henning Kamp 		bufobj_wref(&vp2->v_bufobj);
2873dee34ca4SPoul-Henning Kamp 	}
28743cfc7651SOlivier Houchard 	if (bp->b_bufobj != &vp2->v_bufobj)
28753cfc7651SOlivier Houchard 		bp->b_bufobj = &vp2->v_bufobj;
2876dee34ca4SPoul-Henning Kamp 	bp->b_vp = vp2;
28772c18019fSPoul-Henning Kamp 	bp->b_iooffset = dbtob(bp->b_blkno);
2878b792bebeSPoul-Henning Kamp 	bstrategy(bp);
2879dee34ca4SPoul-Henning Kamp 	return;
2880dee34ca4SPoul-Henning Kamp }
2881dee34ca4SPoul-Henning Kamp 
2882dee34ca4SPoul-Henning Kamp static void
2883dee34ca4SPoul-Henning Kamp swapdev_close(struct thread *td, struct swdevt *sp)
2884dee34ca4SPoul-Henning Kamp {
2885dee34ca4SPoul-Henning Kamp 
2886dee34ca4SPoul-Henning Kamp 	VOP_CLOSE(sp->sw_vp, FREAD | FWRITE, td->td_ucred, td);
2887dee34ca4SPoul-Henning Kamp 	vrele(sp->sw_vp);
2888dee34ca4SPoul-Henning Kamp }
2889dee34ca4SPoul-Henning Kamp 
2890dee34ca4SPoul-Henning Kamp 
2891dee34ca4SPoul-Henning Kamp static int
2892dee34ca4SPoul-Henning Kamp swaponvp(struct thread *td, struct vnode *vp, u_long nblks)
2893dee34ca4SPoul-Henning Kamp {
2894dee34ca4SPoul-Henning Kamp 	struct swdevt *sp;
2895dee34ca4SPoul-Henning Kamp 	int error;
2896dee34ca4SPoul-Henning Kamp 
2897dee34ca4SPoul-Henning Kamp 	if (nblks == 0)
2898dee34ca4SPoul-Henning Kamp 		return (ENXIO);
2899dee34ca4SPoul-Henning Kamp 	mtx_lock(&sw_dev_mtx);
2900dee34ca4SPoul-Henning Kamp 	TAILQ_FOREACH(sp, &swtailq, sw_list) {
2901dee34ca4SPoul-Henning Kamp 		if (sp->sw_id == vp) {
2902dee34ca4SPoul-Henning Kamp 			mtx_unlock(&sw_dev_mtx);
2903dee34ca4SPoul-Henning Kamp 			return (EBUSY);
2904dee34ca4SPoul-Henning Kamp 		}
2905dee34ca4SPoul-Henning Kamp 	}
2906dee34ca4SPoul-Henning Kamp 	mtx_unlock(&sw_dev_mtx);
2907dee34ca4SPoul-Henning Kamp 
2908cb05b60aSAttilio Rao 	(void) vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
2909dee34ca4SPoul-Henning Kamp #ifdef MAC
291030d239bcSRobert Watson 	error = mac_system_check_swapon(td->td_ucred, vp);
2911dee34ca4SPoul-Henning Kamp 	if (error == 0)
2912dee34ca4SPoul-Henning Kamp #endif
29139e223287SKonstantin Belousov 		error = VOP_OPEN(vp, FREAD | FWRITE, td->td_ucred, td, NULL);
291422db15c0SAttilio Rao 	(void) VOP_UNLOCK(vp, 0);
2915dee34ca4SPoul-Henning Kamp 	if (error)
2916dee34ca4SPoul-Henning Kamp 		return (error);
2917dee34ca4SPoul-Henning Kamp 
2918dee34ca4SPoul-Henning Kamp 	swaponsomething(vp, vp, nblks, swapdev_strategy, swapdev_close,
29192cc718a1SKonstantin Belousov 	    NODEV, 0);
2920dee34ca4SPoul-Henning Kamp 	return (0);
2921dee34ca4SPoul-Henning Kamp }
292289c241d1SGleb Smirnoff 
292389c241d1SGleb Smirnoff static int
292489c241d1SGleb Smirnoff sysctl_swap_async_max(SYSCTL_HANDLER_ARGS)
292589c241d1SGleb Smirnoff {
292689c241d1SGleb Smirnoff 	int error, new, n;
292789c241d1SGleb Smirnoff 
292889c241d1SGleb Smirnoff 	new = nsw_wcount_async_max;
292989c241d1SGleb Smirnoff 	error = sysctl_handle_int(oidp, &new, 0, req);
293089c241d1SGleb Smirnoff 	if (error != 0 || req->newptr == NULL)
293189c241d1SGleb Smirnoff 		return (error);
293289c241d1SGleb Smirnoff 
293389c241d1SGleb Smirnoff 	if (new > nswbuf / 2 || new < 1)
293489c241d1SGleb Smirnoff 		return (EINVAL);
293589c241d1SGleb Smirnoff 
2936756a5412SGleb Smirnoff 	mtx_lock(&swbuf_mtx);
293789c241d1SGleb Smirnoff 	while (nsw_wcount_async_max != new) {
293889c241d1SGleb Smirnoff 		/*
293989c241d1SGleb Smirnoff 		 * Adjust difference.  If the current async count is too low,
294089c241d1SGleb Smirnoff 		 * we will need to sqeeze our update slowly in.  Sleep with a
294189c241d1SGleb Smirnoff 		 * higher priority than getpbuf() to finish faster.
294289c241d1SGleb Smirnoff 		 */
294389c241d1SGleb Smirnoff 		n = new - nsw_wcount_async_max;
294489c241d1SGleb Smirnoff 		if (nsw_wcount_async + n >= 0) {
294589c241d1SGleb Smirnoff 			nsw_wcount_async += n;
294689c241d1SGleb Smirnoff 			nsw_wcount_async_max += n;
294789c241d1SGleb Smirnoff 			wakeup(&nsw_wcount_async);
294889c241d1SGleb Smirnoff 		} else {
294989c241d1SGleb Smirnoff 			nsw_wcount_async_max -= nsw_wcount_async;
295089c241d1SGleb Smirnoff 			nsw_wcount_async = 0;
2951756a5412SGleb Smirnoff 			msleep(&nsw_wcount_async, &swbuf_mtx, PSWP,
295289c241d1SGleb Smirnoff 			    "swpsysctl", 0);
295389c241d1SGleb Smirnoff 		}
295489c241d1SGleb Smirnoff 	}
2955756a5412SGleb Smirnoff 	mtx_unlock(&swbuf_mtx);
295689c241d1SGleb Smirnoff 
295789c241d1SGleb Smirnoff 	return (0);
295889c241d1SGleb Smirnoff }
2959