xref: /freebsd/sys/vm/swap_pager.c (revision 324cdd9320f58837c2fbaa7f6ceb9ea5c33d5b2a)
1 /*-
2  * SPDX-License-Identifier: BSD-4-Clause
3  *
4  * Copyright (c) 1998 Matthew Dillon,
5  * Copyright (c) 1994 John S. Dyson
6  * Copyright (c) 1990 University of Utah.
7  * Copyright (c) 1982, 1986, 1989, 1993
8  *	The Regents of the University of California.  All rights reserved.
9  *
10  * This code is derived from software contributed to Berkeley by
11  * the Systems Programming Group of the University of Utah Computer
12  * Science Department.
13  *
14  * Redistribution and use in source and binary forms, with or without
15  * modification, are permitted provided that the following conditions
16  * are met:
17  * 1. Redistributions of source code must retain the above copyright
18  *    notice, this list of conditions and the following disclaimer.
19  * 2. Redistributions in binary form must reproduce the above copyright
20  *    notice, this list of conditions and the following disclaimer in the
21  *    documentation and/or other materials provided with the distribution.
22  * 3. All advertising materials mentioning features or use of this software
23  *    must display the following acknowledgement:
24  *	This product includes software developed by the University of
25  *	California, Berkeley and its contributors.
26  * 4. Neither the name of the University nor the names of its contributors
27  *    may be used to endorse or promote products derived from this software
28  *    without specific prior written permission.
29  *
30  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
31  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
32  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
33  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
34  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
35  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
36  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
37  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
38  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
39  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
40  * SUCH DAMAGE.
41  *
42  *				New Swap System
43  *				Matthew Dillon
44  *
45  * Radix Bitmap 'blists'.
46  *
47  *	- The new swapper uses the new radix bitmap code.  This should scale
48  *	  to arbitrarily small or arbitrarily large swap spaces and an almost
49  *	  arbitrary degree of fragmentation.
50  *
51  * Features:
52  *
53  *	- on the fly reallocation of swap during putpages.  The new system
54  *	  does not try to keep previously allocated swap blocks for dirty
55  *	  pages.
56  *
57  *	- on the fly deallocation of swap
58  *
59  *	- No more garbage collection required.  Unnecessarily allocated swap
60  *	  blocks only exist for dirty vm_page_t's now and these are already
61  *	  cycled (in a high-load system) by the pager.  We also do on-the-fly
62  *	  removal of invalidated swap blocks when a page is destroyed
63  *	  or renamed.
64  *
65  * from: Utah $Hdr: swap_pager.c 1.4 91/04/30$
66  *
67  *	@(#)swap_pager.c	8.9 (Berkeley) 3/21/94
68  *	@(#)vm_swap.c	8.5 (Berkeley) 2/17/94
69  */
70 
71 #include <sys/cdefs.h>
72 __FBSDID("$FreeBSD$");
73 
74 #include "opt_vm.h"
75 
76 #include <sys/param.h>
77 #include <sys/bio.h>
78 #include <sys/blist.h>
79 #include <sys/buf.h>
80 #include <sys/conf.h>
81 #include <sys/disk.h>
82 #include <sys/disklabel.h>
83 #include <sys/eventhandler.h>
84 #include <sys/fcntl.h>
85 #include <sys/lock.h>
86 #include <sys/kernel.h>
87 #include <sys/mount.h>
88 #include <sys/namei.h>
89 #include <sys/malloc.h>
90 #include <sys/pctrie.h>
91 #include <sys/priv.h>
92 #include <sys/proc.h>
93 #include <sys/racct.h>
94 #include <sys/resource.h>
95 #include <sys/resourcevar.h>
96 #include <sys/rwlock.h>
97 #include <sys/sbuf.h>
98 #include <sys/sysctl.h>
99 #include <sys/sysproto.h>
100 #include <sys/systm.h>
101 #include <sys/sx.h>
102 #include <sys/vmmeter.h>
103 #include <sys/vnode.h>
104 
105 #include <security/mac/mac_framework.h>
106 
107 #include <vm/vm.h>
108 #include <vm/pmap.h>
109 #include <vm/vm_map.h>
110 #include <vm/vm_kern.h>
111 #include <vm/vm_object.h>
112 #include <vm/vm_page.h>
113 #include <vm/vm_pager.h>
114 #include <vm/vm_pageout.h>
115 #include <vm/vm_param.h>
116 #include <vm/swap_pager.h>
117 #include <vm/vm_extern.h>
118 #include <vm/uma.h>
119 
120 #include <geom/geom.h>
121 
122 /*
123  * MAX_PAGEOUT_CLUSTER must be a power of 2 between 1 and 64.
124  * The 64-page limit is due to the radix code (kern/subr_blist.c).
125  */
126 #ifndef MAX_PAGEOUT_CLUSTER
127 #define	MAX_PAGEOUT_CLUSTER	32
128 #endif
129 
130 #if !defined(SWB_NPAGES)
131 #define SWB_NPAGES	MAX_PAGEOUT_CLUSTER
132 #endif
133 
134 #define	SWAP_META_PAGES		PCTRIE_COUNT
135 
136 /*
137  * A swblk structure maps each page index within a
138  * SWAP_META_PAGES-aligned and sized range to the address of an
139  * on-disk swap block (or SWAPBLK_NONE). The collection of these
140  * mappings for an entire vm object is implemented as a pc-trie.
141  */
142 struct swblk {
143 	vm_pindex_t	p;
144 	daddr_t		d[SWAP_META_PAGES];
145 };
146 
147 static MALLOC_DEFINE(M_VMPGDATA, "vm_pgdata", "swap pager private data");
148 static struct mtx sw_dev_mtx;
149 static TAILQ_HEAD(, swdevt) swtailq = TAILQ_HEAD_INITIALIZER(swtailq);
150 static struct swdevt *swdevhd;	/* Allocate from here next */
151 static int nswapdev;		/* Number of swap devices */
152 int swap_pager_avail;
153 static struct sx swdev_syscall_lock;	/* serialize swap(on|off) */
154 
155 static u_long swap_reserved;
156 static u_long swap_total;
157 static int sysctl_page_shift(SYSCTL_HANDLER_ARGS);
158 SYSCTL_PROC(_vm, OID_AUTO, swap_reserved, CTLTYPE_U64 | CTLFLAG_RD | CTLFLAG_MPSAFE,
159     &swap_reserved, 0, sysctl_page_shift, "A",
160     "Amount of swap storage needed to back all allocated anonymous memory.");
161 SYSCTL_PROC(_vm, OID_AUTO, swap_total, CTLTYPE_U64 | CTLFLAG_RD | CTLFLAG_MPSAFE,
162     &swap_total, 0, sysctl_page_shift, "A",
163     "Total amount of available swap storage.");
164 
165 static int overcommit = 0;
166 SYSCTL_INT(_vm, VM_OVERCOMMIT, overcommit, CTLFLAG_RW, &overcommit, 0,
167     "Configure virtual memory overcommit behavior. See tuning(7) "
168     "for details.");
169 static unsigned long swzone;
170 SYSCTL_ULONG(_vm, OID_AUTO, swzone, CTLFLAG_RD, &swzone, 0,
171     "Actual size of swap metadata zone");
172 static unsigned long swap_maxpages;
173 SYSCTL_ULONG(_vm, OID_AUTO, swap_maxpages, CTLFLAG_RD, &swap_maxpages, 0,
174     "Maximum amount of swap supported");
175 
176 /* bits from overcommit */
177 #define	SWAP_RESERVE_FORCE_ON		(1 << 0)
178 #define	SWAP_RESERVE_RLIMIT_ON		(1 << 1)
179 #define	SWAP_RESERVE_ALLOW_NONWIRED	(1 << 2)
180 
181 static int
182 sysctl_page_shift(SYSCTL_HANDLER_ARGS)
183 {
184 	uint64_t newval;
185 	u_long value = *(u_long *)arg1;
186 
187 	newval = ((uint64_t)value) << PAGE_SHIFT;
188 	return (sysctl_handle_64(oidp, &newval, 0, req));
189 }
190 
191 int
192 swap_reserve(vm_ooffset_t incr)
193 {
194 
195 	return (swap_reserve_by_cred(incr, curthread->td_ucred));
196 }
197 
198 int
199 swap_reserve_by_cred(vm_ooffset_t incr, struct ucred *cred)
200 {
201 	u_long r, s, prev, pincr;
202 	int res, error;
203 	static int curfail;
204 	static struct timeval lastfail;
205 	struct uidinfo *uip;
206 
207 	uip = cred->cr_ruidinfo;
208 
209 	KASSERT((incr & PAGE_MASK) == 0, ("%s: incr: %ju & PAGE_MASK", __func__,
210 	    (uintmax_t)incr));
211 
212 #ifdef RACCT
213 	if (racct_enable) {
214 		PROC_LOCK(curproc);
215 		error = racct_add(curproc, RACCT_SWAP, incr);
216 		PROC_UNLOCK(curproc);
217 		if (error != 0)
218 			return (0);
219 	}
220 #endif
221 
222 	pincr = atop(incr);
223 	res = 0;
224 	prev = atomic_fetchadd_long(&swap_reserved, pincr);
225 	r = prev + pincr;
226 	if (overcommit & SWAP_RESERVE_ALLOW_NONWIRED) {
227 		s = vm_cnt.v_page_count - vm_cnt.v_free_reserved -
228 		    vm_wire_count();
229 	} else
230 		s = 0;
231 	s += swap_total;
232 	if ((overcommit & SWAP_RESERVE_FORCE_ON) == 0 || r <= s ||
233 	    (error = priv_check(curthread, PRIV_VM_SWAP_NOQUOTA)) == 0) {
234 		res = 1;
235 	} else {
236 		prev = atomic_fetchadd_long(&swap_reserved, -pincr);
237 		if (prev < pincr)
238 			panic("swap_reserved < incr on overcommit fail");
239 	}
240 	if (res) {
241 		prev = atomic_fetchadd_long(&uip->ui_vmsize, pincr);
242 		if ((overcommit & SWAP_RESERVE_RLIMIT_ON) != 0 &&
243 		    prev + pincr > lim_cur(curthread, RLIMIT_SWAP) &&
244 		    priv_check(curthread, PRIV_VM_SWAP_NORLIMIT)) {
245 			res = 0;
246 			prev = atomic_fetchadd_long(&uip->ui_vmsize, -pincr);
247 			if (prev < pincr)
248 				panic("uip->ui_vmsize < incr on overcommit fail");
249 		}
250 	}
251 	if (!res && ppsratecheck(&lastfail, &curfail, 1)) {
252 		printf("uid %d, pid %d: swap reservation for %jd bytes failed\n",
253 		    uip->ui_uid, curproc->p_pid, incr);
254 	}
255 
256 #ifdef RACCT
257 	if (racct_enable && !res) {
258 		PROC_LOCK(curproc);
259 		racct_sub(curproc, RACCT_SWAP, incr);
260 		PROC_UNLOCK(curproc);
261 	}
262 #endif
263 
264 	return (res);
265 }
266 
267 void
268 swap_reserve_force(vm_ooffset_t incr)
269 {
270 	struct uidinfo *uip;
271 	u_long pincr;
272 
273 	KASSERT((incr & PAGE_MASK) == 0, ("%s: incr: %ju & PAGE_MASK", __func__,
274 	    (uintmax_t)incr));
275 
276 	PROC_LOCK(curproc);
277 #ifdef RACCT
278 	if (racct_enable)
279 		racct_add_force(curproc, RACCT_SWAP, incr);
280 #endif
281 	pincr = atop(incr);
282 	atomic_add_long(&swap_reserved, pincr);
283 	uip = curproc->p_ucred->cr_ruidinfo;
284 	atomic_add_long(&uip->ui_vmsize, pincr);
285 	PROC_UNLOCK(curproc);
286 }
287 
288 void
289 swap_release(vm_ooffset_t decr)
290 {
291 	struct ucred *cred;
292 
293 	PROC_LOCK(curproc);
294 	cred = curproc->p_ucred;
295 	swap_release_by_cred(decr, cred);
296 	PROC_UNLOCK(curproc);
297 }
298 
299 void
300 swap_release_by_cred(vm_ooffset_t decr, struct ucred *cred)
301 {
302 	u_long prev, pdecr;
303  	struct uidinfo *uip;
304 
305 	uip = cred->cr_ruidinfo;
306 
307 	KASSERT((decr & PAGE_MASK) == 0, ("%s: decr: %ju & PAGE_MASK", __func__,
308 	    (uintmax_t)decr));
309 
310 	pdecr = atop(decr);
311 	prev = atomic_fetchadd_long(&swap_reserved, -pdecr);
312 	if (prev < pdecr)
313 		panic("swap_reserved < decr");
314 
315 	prev = atomic_fetchadd_long(&uip->ui_vmsize, -pdecr);
316 	if (prev < pdecr)
317 		printf("negative vmsize for uid = %d\n", uip->ui_uid);
318 #ifdef RACCT
319 	if (racct_enable)
320 		racct_sub_cred(cred, RACCT_SWAP, decr);
321 #endif
322 }
323 
324 static int swap_pager_full = 2;	/* swap space exhaustion (task killing) */
325 static int swap_pager_almost_full = 1; /* swap space exhaustion (w/hysteresis)*/
326 static struct mtx swbuf_mtx;	/* to sync nsw_wcount_async */
327 static int nsw_wcount_async;	/* limit async write buffers */
328 static int nsw_wcount_async_max;/* assigned maximum			*/
329 static int nsw_cluster_max;	/* maximum VOP I/O allowed		*/
330 
331 static int sysctl_swap_async_max(SYSCTL_HANDLER_ARGS);
332 SYSCTL_PROC(_vm, OID_AUTO, swap_async_max, CTLTYPE_INT | CTLFLAG_RW |
333     CTLFLAG_MPSAFE, NULL, 0, sysctl_swap_async_max, "I",
334     "Maximum running async swap ops");
335 static int sysctl_swap_fragmentation(SYSCTL_HANDLER_ARGS);
336 SYSCTL_PROC(_vm, OID_AUTO, swap_fragmentation, CTLTYPE_STRING | CTLFLAG_RD |
337     CTLFLAG_MPSAFE, NULL, 0, sysctl_swap_fragmentation, "A",
338     "Swap Fragmentation Info");
339 
340 static struct sx sw_alloc_sx;
341 
342 /*
343  * "named" and "unnamed" anon region objects.  Try to reduce the overhead
344  * of searching a named list by hashing it just a little.
345  */
346 
347 #define NOBJLISTS		8
348 
349 #define NOBJLIST(handle)	\
350 	(&swap_pager_object_list[((int)(intptr_t)handle >> 4) & (NOBJLISTS-1)])
351 
352 static struct pagerlst	swap_pager_object_list[NOBJLISTS];
353 static uma_zone_t swwbuf_zone;
354 static uma_zone_t swrbuf_zone;
355 static uma_zone_t swblk_zone;
356 static uma_zone_t swpctrie_zone;
357 
358 /*
359  * pagerops for OBJT_SWAP - "swap pager".  Some ops are also global procedure
360  * calls hooked from other parts of the VM system and do not appear here.
361  * (see vm/swap_pager.h).
362  */
363 static vm_object_t
364 		swap_pager_alloc(void *handle, vm_ooffset_t size,
365 		    vm_prot_t prot, vm_ooffset_t offset, struct ucred *);
366 static void	swap_pager_dealloc(vm_object_t object);
367 static int	swap_pager_getpages(vm_object_t, vm_page_t *, int, int *,
368     int *);
369 static int	swap_pager_getpages_async(vm_object_t, vm_page_t *, int, int *,
370     int *, pgo_getpages_iodone_t, void *);
371 static void	swap_pager_putpages(vm_object_t, vm_page_t *, int, boolean_t, int *);
372 static boolean_t
373 		swap_pager_haspage(vm_object_t object, vm_pindex_t pindex, int *before, int *after);
374 static void	swap_pager_init(void);
375 static void	swap_pager_unswapped(vm_page_t);
376 static void	swap_pager_swapoff(struct swdevt *sp);
377 static void	swap_pager_update_writecount(vm_object_t object,
378     vm_offset_t start, vm_offset_t end);
379 static void	swap_pager_release_writecount(vm_object_t object,
380     vm_offset_t start, vm_offset_t end);
381 
382 struct pagerops swappagerops = {
383 	.pgo_init =	swap_pager_init,	/* early system initialization of pager	*/
384 	.pgo_alloc =	swap_pager_alloc,	/* allocate an OBJT_SWAP object		*/
385 	.pgo_dealloc =	swap_pager_dealloc,	/* deallocate an OBJT_SWAP object	*/
386 	.pgo_getpages =	swap_pager_getpages,	/* pagein				*/
387 	.pgo_getpages_async = swap_pager_getpages_async, /* pagein (async)		*/
388 	.pgo_putpages =	swap_pager_putpages,	/* pageout				*/
389 	.pgo_haspage =	swap_pager_haspage,	/* get backing store status for page	*/
390 	.pgo_pageunswapped = swap_pager_unswapped,	/* remove swap related to page		*/
391 	.pgo_update_writecount = swap_pager_update_writecount,
392 	.pgo_release_writecount = swap_pager_release_writecount,
393 };
394 
395 /*
396  * swap_*() routines are externally accessible.  swp_*() routines are
397  * internal.
398  */
399 static int nswap_lowat = 128;	/* in pages, swap_pager_almost_full warn */
400 static int nswap_hiwat = 512;	/* in pages, swap_pager_almost_full warn */
401 
402 SYSCTL_INT(_vm, OID_AUTO, dmmax, CTLFLAG_RD, &nsw_cluster_max, 0,
403     "Maximum size of a swap block in pages");
404 
405 static void	swp_sizecheck(void);
406 static void	swp_pager_async_iodone(struct buf *bp);
407 static bool	swp_pager_swblk_empty(struct swblk *sb, int start, int limit);
408 static void	swp_pager_free_empty_swblk(vm_object_t, struct swblk *sb);
409 static int	swapongeom(struct vnode *);
410 static int	swaponvp(struct thread *, struct vnode *, u_long);
411 static int	swapoff_one(struct swdevt *sp, struct ucred *cred);
412 
413 /*
414  * Swap bitmap functions
415  */
416 static void	swp_pager_freeswapspace(daddr_t blk, daddr_t npages);
417 static daddr_t	swp_pager_getswapspace(int *npages, int limit);
418 
419 /*
420  * Metadata functions
421  */
422 static daddr_t swp_pager_meta_build(vm_object_t, vm_pindex_t, daddr_t);
423 static void swp_pager_meta_free(vm_object_t, vm_pindex_t, vm_pindex_t);
424 static void swp_pager_meta_transfer(vm_object_t src, vm_object_t dst,
425     vm_pindex_t pindex, vm_pindex_t count);
426 static void swp_pager_meta_free_all(vm_object_t);
427 static daddr_t swp_pager_meta_lookup(vm_object_t, vm_pindex_t);
428 
429 static void
430 swp_pager_init_freerange(daddr_t *start, daddr_t *num)
431 {
432 
433 	*start = SWAPBLK_NONE;
434 	*num = 0;
435 }
436 
437 static void
438 swp_pager_update_freerange(daddr_t *start, daddr_t *num, daddr_t addr)
439 {
440 
441 	if (*start + *num == addr) {
442 		(*num)++;
443 	} else {
444 		swp_pager_freeswapspace(*start, *num);
445 		*start = addr;
446 		*num = 1;
447 	}
448 }
449 
450 static void *
451 swblk_trie_alloc(struct pctrie *ptree)
452 {
453 
454 	return (uma_zalloc(swpctrie_zone, M_NOWAIT | (curproc == pageproc ?
455 	    M_USE_RESERVE : 0)));
456 }
457 
458 static void
459 swblk_trie_free(struct pctrie *ptree, void *node)
460 {
461 
462 	uma_zfree(swpctrie_zone, node);
463 }
464 
465 PCTRIE_DEFINE(SWAP, swblk, p, swblk_trie_alloc, swblk_trie_free);
466 
467 /*
468  * SWP_SIZECHECK() -	update swap_pager_full indication
469  *
470  *	update the swap_pager_almost_full indication and warn when we are
471  *	about to run out of swap space, using lowat/hiwat hysteresis.
472  *
473  *	Clear swap_pager_full ( task killing ) indication when lowat is met.
474  *
475  *	No restrictions on call
476  *	This routine may not block.
477  */
478 static void
479 swp_sizecheck(void)
480 {
481 
482 	if (swap_pager_avail < nswap_lowat) {
483 		if (swap_pager_almost_full == 0) {
484 			printf("swap_pager: out of swap space\n");
485 			swap_pager_almost_full = 1;
486 		}
487 	} else {
488 		swap_pager_full = 0;
489 		if (swap_pager_avail > nswap_hiwat)
490 			swap_pager_almost_full = 0;
491 	}
492 }
493 
494 /*
495  * SWAP_PAGER_INIT() -	initialize the swap pager!
496  *
497  *	Expected to be started from system init.  NOTE:  This code is run
498  *	before much else so be careful what you depend on.  Most of the VM
499  *	system has yet to be initialized at this point.
500  */
501 static void
502 swap_pager_init(void)
503 {
504 	/*
505 	 * Initialize object lists
506 	 */
507 	int i;
508 
509 	for (i = 0; i < NOBJLISTS; ++i)
510 		TAILQ_INIT(&swap_pager_object_list[i]);
511 	mtx_init(&sw_dev_mtx, "swapdev", NULL, MTX_DEF);
512 	sx_init(&sw_alloc_sx, "swspsx");
513 	sx_init(&swdev_syscall_lock, "swsysc");
514 }
515 
516 /*
517  * SWAP_PAGER_SWAP_INIT() - swap pager initialization from pageout process
518  *
519  *	Expected to be started from pageout process once, prior to entering
520  *	its main loop.
521  */
522 void
523 swap_pager_swap_init(void)
524 {
525 	unsigned long n, n2;
526 
527 	/*
528 	 * Number of in-transit swap bp operations.  Don't
529 	 * exhaust the pbufs completely.  Make sure we
530 	 * initialize workable values (0 will work for hysteresis
531 	 * but it isn't very efficient).
532 	 *
533 	 * The nsw_cluster_max is constrained by the bp->b_pages[]
534 	 * array, which has MAXPHYS / PAGE_SIZE entries, and our locally
535 	 * defined MAX_PAGEOUT_CLUSTER.   Also be aware that swap ops are
536 	 * constrained by the swap device interleave stripe size.
537 	 *
538 	 * Currently we hardwire nsw_wcount_async to 4.  This limit is
539 	 * designed to prevent other I/O from having high latencies due to
540 	 * our pageout I/O.  The value 4 works well for one or two active swap
541 	 * devices but is probably a little low if you have more.  Even so,
542 	 * a higher value would probably generate only a limited improvement
543 	 * with three or four active swap devices since the system does not
544 	 * typically have to pageout at extreme bandwidths.   We will want
545 	 * at least 2 per swap devices, and 4 is a pretty good value if you
546 	 * have one NFS swap device due to the command/ack latency over NFS.
547 	 * So it all works out pretty well.
548 	 */
549 	nsw_cluster_max = min(MAXPHYS / PAGE_SIZE, MAX_PAGEOUT_CLUSTER);
550 
551 	nsw_wcount_async = 4;
552 	nsw_wcount_async_max = nsw_wcount_async;
553 	mtx_init(&swbuf_mtx, "async swbuf mutex", NULL, MTX_DEF);
554 
555 	swwbuf_zone = pbuf_zsecond_create("swwbuf", nswbuf / 4);
556 	swrbuf_zone = pbuf_zsecond_create("swrbuf", nswbuf / 2);
557 
558 	/*
559 	 * Initialize our zone, taking the user's requested size or
560 	 * estimating the number we need based on the number of pages
561 	 * in the system.
562 	 */
563 	n = maxswzone != 0 ? maxswzone / sizeof(struct swblk) :
564 	    vm_cnt.v_page_count / 2;
565 	swpctrie_zone = uma_zcreate("swpctrie", pctrie_node_size(), NULL, NULL,
566 	    pctrie_zone_init, NULL, UMA_ALIGN_PTR, UMA_ZONE_VM);
567 	if (swpctrie_zone == NULL)
568 		panic("failed to create swap pctrie zone.");
569 	swblk_zone = uma_zcreate("swblk", sizeof(struct swblk), NULL, NULL,
570 	    NULL, NULL, _Alignof(struct swblk) - 1, UMA_ZONE_VM);
571 	if (swblk_zone == NULL)
572 		panic("failed to create swap blk zone.");
573 	n2 = n;
574 	do {
575 		if (uma_zone_reserve_kva(swblk_zone, n))
576 			break;
577 		/*
578 		 * if the allocation failed, try a zone two thirds the
579 		 * size of the previous attempt.
580 		 */
581 		n -= ((n + 2) / 3);
582 	} while (n > 0);
583 
584 	/*
585 	 * Often uma_zone_reserve_kva() cannot reserve exactly the
586 	 * requested size.  Account for the difference when
587 	 * calculating swap_maxpages.
588 	 */
589 	n = uma_zone_get_max(swblk_zone);
590 
591 	if (n < n2)
592 		printf("Swap blk zone entries changed from %lu to %lu.\n",
593 		    n2, n);
594 	/* absolute maximum we can handle assuming 100% efficiency */
595 	swap_maxpages = n * SWAP_META_PAGES;
596 	swzone = n * sizeof(struct swblk);
597 	if (!uma_zone_reserve_kva(swpctrie_zone, n))
598 		printf("Cannot reserve swap pctrie zone, "
599 		    "reduce kern.maxswzone.\n");
600 }
601 
602 static vm_object_t
603 swap_pager_alloc_init(void *handle, struct ucred *cred, vm_ooffset_t size,
604     vm_ooffset_t offset)
605 {
606 	vm_object_t object;
607 
608 	if (cred != NULL) {
609 		if (!swap_reserve_by_cred(size, cred))
610 			return (NULL);
611 		crhold(cred);
612 	}
613 
614 	/*
615 	 * The un_pager.swp.swp_blks trie is initialized by
616 	 * vm_object_allocate() to ensure the correct order of
617 	 * visibility to other threads.
618 	 */
619 	object = vm_object_allocate(OBJT_SWAP, OFF_TO_IDX(offset +
620 	    PAGE_MASK + size));
621 
622 	object->un_pager.swp.writemappings = 0;
623 	object->handle = handle;
624 	if (cred != NULL) {
625 		object->cred = cred;
626 		object->charge = size;
627 	}
628 	return (object);
629 }
630 
631 /*
632  * SWAP_PAGER_ALLOC() -	allocate a new OBJT_SWAP VM object and instantiate
633  *			its metadata structures.
634  *
635  *	This routine is called from the mmap and fork code to create a new
636  *	OBJT_SWAP object.
637  *
638  *	This routine must ensure that no live duplicate is created for
639  *	the named object request, which is protected against by
640  *	holding the sw_alloc_sx lock in case handle != NULL.
641  */
642 static vm_object_t
643 swap_pager_alloc(void *handle, vm_ooffset_t size, vm_prot_t prot,
644     vm_ooffset_t offset, struct ucred *cred)
645 {
646 	vm_object_t object;
647 
648 	if (handle != NULL) {
649 		/*
650 		 * Reference existing named region or allocate new one.  There
651 		 * should not be a race here against swp_pager_meta_build()
652 		 * as called from vm_page_remove() in regards to the lookup
653 		 * of the handle.
654 		 */
655 		sx_xlock(&sw_alloc_sx);
656 		object = vm_pager_object_lookup(NOBJLIST(handle), handle);
657 		if (object == NULL) {
658 			object = swap_pager_alloc_init(handle, cred, size,
659 			    offset);
660 			if (object != NULL) {
661 				TAILQ_INSERT_TAIL(NOBJLIST(object->handle),
662 				    object, pager_object_list);
663 			}
664 		}
665 		sx_xunlock(&sw_alloc_sx);
666 	} else {
667 		object = swap_pager_alloc_init(handle, cred, size, offset);
668 	}
669 	return (object);
670 }
671 
672 /*
673  * SWAP_PAGER_DEALLOC() -	remove swap metadata from object
674  *
675  *	The swap backing for the object is destroyed.  The code is
676  *	designed such that we can reinstantiate it later, but this
677  *	routine is typically called only when the entire object is
678  *	about to be destroyed.
679  *
680  *	The object must be locked.
681  */
682 static void
683 swap_pager_dealloc(vm_object_t object)
684 {
685 
686 	VM_OBJECT_ASSERT_WLOCKED(object);
687 	KASSERT((object->flags & OBJ_DEAD) != 0, ("dealloc of reachable obj"));
688 
689 	/*
690 	 * Remove from list right away so lookups will fail if we block for
691 	 * pageout completion.
692 	 */
693 	if ((object->flags & OBJ_ANON) == 0 && object->handle != NULL) {
694 		VM_OBJECT_WUNLOCK(object);
695 		sx_xlock(&sw_alloc_sx);
696 		TAILQ_REMOVE(NOBJLIST(object->handle), object,
697 		    pager_object_list);
698 		sx_xunlock(&sw_alloc_sx);
699 		VM_OBJECT_WLOCK(object);
700 	}
701 
702 	vm_object_pip_wait(object, "swpdea");
703 
704 	/*
705 	 * Free all remaining metadata.  We only bother to free it from
706 	 * the swap meta data.  We do not attempt to free swapblk's still
707 	 * associated with vm_page_t's for this object.  We do not care
708 	 * if paging is still in progress on some objects.
709 	 */
710 	swp_pager_meta_free_all(object);
711 	object->handle = NULL;
712 	object->type = OBJT_DEAD;
713 }
714 
715 /************************************************************************
716  *			SWAP PAGER BITMAP ROUTINES			*
717  ************************************************************************/
718 
719 /*
720  * SWP_PAGER_GETSWAPSPACE() -	allocate raw swap space
721  *
722  *	Allocate swap for up to the requested number of pages, and at
723  *	least a minimum number of pages.  The starting swap block number
724  *	(a page index) is returned or SWAPBLK_NONE if the allocation
725  *	failed.
726  *
727  *	Also has the side effect of advising that somebody made a mistake
728  *	when they configured swap and didn't configure enough.
729  *
730  *	This routine may not sleep.
731  *
732  *	We allocate in round-robin fashion from the configured devices.
733  */
734 static daddr_t
735 swp_pager_getswapspace(int *io_npages, int limit)
736 {
737 	daddr_t blk;
738 	struct swdevt *sp;
739 	int mpages, npages;
740 
741 	blk = SWAPBLK_NONE;
742 	mpages = *io_npages;
743 	npages = imin(BLIST_MAX_ALLOC, mpages);
744 	mtx_lock(&sw_dev_mtx);
745 	sp = swdevhd;
746 	while (!TAILQ_EMPTY(&swtailq)) {
747 		if (sp == NULL)
748 			sp = TAILQ_FIRST(&swtailq);
749 		if ((sp->sw_flags & SW_CLOSING) == 0)
750 			blk = blist_alloc(sp->sw_blist, &npages, mpages);
751 		if (blk != SWAPBLK_NONE)
752 			break;
753 		sp = TAILQ_NEXT(sp, sw_list);
754 		if (swdevhd == sp) {
755 			if (npages <= limit)
756 				break;
757 			mpages = npages - 1;
758 			npages >>= 1;
759 		}
760 	}
761 	if (blk != SWAPBLK_NONE) {
762 		*io_npages = npages;
763 		blk += sp->sw_first;
764 		sp->sw_used += npages;
765 		swap_pager_avail -= npages;
766 		swp_sizecheck();
767 		swdevhd = TAILQ_NEXT(sp, sw_list);
768 	} else {
769 		if (swap_pager_full != 2) {
770 			printf("swp_pager_getswapspace(%d): failed\n",
771 			    *io_npages);
772 			swap_pager_full = 2;
773 			swap_pager_almost_full = 1;
774 		}
775 		swdevhd = NULL;
776 	}
777 	mtx_unlock(&sw_dev_mtx);
778 	return (blk);
779 }
780 
781 static bool
782 swp_pager_isondev(daddr_t blk, struct swdevt *sp)
783 {
784 
785 	return (blk >= sp->sw_first && blk < sp->sw_end);
786 }
787 
788 static void
789 swp_pager_strategy(struct buf *bp)
790 {
791 	struct swdevt *sp;
792 
793 	mtx_lock(&sw_dev_mtx);
794 	TAILQ_FOREACH(sp, &swtailq, sw_list) {
795 		if (swp_pager_isondev(bp->b_blkno, sp)) {
796 			mtx_unlock(&sw_dev_mtx);
797 			if ((sp->sw_flags & SW_UNMAPPED) != 0 &&
798 			    unmapped_buf_allowed) {
799 				bp->b_data = unmapped_buf;
800 				bp->b_offset = 0;
801 			} else {
802 				pmap_qenter((vm_offset_t)bp->b_data,
803 				    &bp->b_pages[0], bp->b_bcount / PAGE_SIZE);
804 			}
805 			sp->sw_strategy(bp, sp);
806 			return;
807 		}
808 	}
809 	panic("Swapdev not found");
810 }
811 
812 
813 /*
814  * SWP_PAGER_FREESWAPSPACE() -	free raw swap space
815  *
816  *	This routine returns the specified swap blocks back to the bitmap.
817  *
818  *	This routine may not sleep.
819  */
820 static void
821 swp_pager_freeswapspace(daddr_t blk, daddr_t npages)
822 {
823 	struct swdevt *sp;
824 
825 	if (npages == 0)
826 		return;
827 	mtx_lock(&sw_dev_mtx);
828 	TAILQ_FOREACH(sp, &swtailq, sw_list) {
829 		if (swp_pager_isondev(blk, sp)) {
830 			sp->sw_used -= npages;
831 			/*
832 			 * If we are attempting to stop swapping on
833 			 * this device, we don't want to mark any
834 			 * blocks free lest they be reused.
835 			 */
836 			if ((sp->sw_flags & SW_CLOSING) == 0) {
837 				blist_free(sp->sw_blist, blk - sp->sw_first,
838 				    npages);
839 				swap_pager_avail += npages;
840 				swp_sizecheck();
841 			}
842 			mtx_unlock(&sw_dev_mtx);
843 			return;
844 		}
845 	}
846 	panic("Swapdev not found");
847 }
848 
849 /*
850  * SYSCTL_SWAP_FRAGMENTATION() -	produce raw swap space stats
851  */
852 static int
853 sysctl_swap_fragmentation(SYSCTL_HANDLER_ARGS)
854 {
855 	struct sbuf sbuf;
856 	struct swdevt *sp;
857 	const char *devname;
858 	int error;
859 
860 	error = sysctl_wire_old_buffer(req, 0);
861 	if (error != 0)
862 		return (error);
863 	sbuf_new_for_sysctl(&sbuf, NULL, 128, req);
864 	mtx_lock(&sw_dev_mtx);
865 	TAILQ_FOREACH(sp, &swtailq, sw_list) {
866 		if (vn_isdisk(sp->sw_vp, NULL))
867 			devname = devtoname(sp->sw_vp->v_rdev);
868 		else
869 			devname = "[file]";
870 		sbuf_printf(&sbuf, "\nFree space on device %s:\n", devname);
871 		blist_stats(sp->sw_blist, &sbuf);
872 	}
873 	mtx_unlock(&sw_dev_mtx);
874 	error = sbuf_finish(&sbuf);
875 	sbuf_delete(&sbuf);
876 	return (error);
877 }
878 
879 /*
880  * SWAP_PAGER_FREESPACE() -	frees swap blocks associated with a page
881  *				range within an object.
882  *
883  *	This is a globally accessible routine.
884  *
885  *	This routine removes swapblk assignments from swap metadata.
886  *
887  *	The external callers of this routine typically have already destroyed
888  *	or renamed vm_page_t's associated with this range in the object so
889  *	we should be ok.
890  *
891  *	The object must be locked.
892  */
893 void
894 swap_pager_freespace(vm_object_t object, vm_pindex_t start, vm_size_t size)
895 {
896 
897 	swp_pager_meta_free(object, start, size);
898 }
899 
900 /*
901  * SWAP_PAGER_RESERVE() - reserve swap blocks in object
902  *
903  *	Assigns swap blocks to the specified range within the object.  The
904  *	swap blocks are not zeroed.  Any previous swap assignment is destroyed.
905  *
906  *	Returns 0 on success, -1 on failure.
907  */
908 int
909 swap_pager_reserve(vm_object_t object, vm_pindex_t start, vm_size_t size)
910 {
911 	daddr_t addr, blk, n_free, s_free;
912 	int i, j, n;
913 
914 	swp_pager_init_freerange(&s_free, &n_free);
915 	VM_OBJECT_WLOCK(object);
916 	for (i = 0; i < size; i += n) {
917 		n = size - i;
918 		blk = swp_pager_getswapspace(&n, 1);
919 		if (blk == SWAPBLK_NONE) {
920 			swp_pager_meta_free(object, start, i);
921 			VM_OBJECT_WUNLOCK(object);
922 			return (-1);
923 		}
924 		for (j = 0; j < n; ++j) {
925 			addr = swp_pager_meta_build(object,
926 			    start + i + j, blk + j);
927 			if (addr != SWAPBLK_NONE)
928 				swp_pager_update_freerange(&s_free, &n_free,
929 				    addr);
930 		}
931 	}
932 	swp_pager_freeswapspace(s_free, n_free);
933 	VM_OBJECT_WUNLOCK(object);
934 	return (0);
935 }
936 
937 static bool
938 swp_pager_xfer_source(vm_object_t srcobject, vm_object_t dstobject,
939     vm_pindex_t pindex, daddr_t addr)
940 {
941 	daddr_t dstaddr;
942 
943 	KASSERT(srcobject->type == OBJT_SWAP,
944 	    ("%s: Srcobject not swappable", __func__));
945 	if (dstobject->type == OBJT_SWAP &&
946 	    swp_pager_meta_lookup(dstobject, pindex) != SWAPBLK_NONE) {
947 		/* Caller should destroy the source block. */
948 		return (false);
949 	}
950 
951 	/*
952 	 * Destination has no swapblk and is not resident, transfer source.
953 	 * swp_pager_meta_build() can sleep.
954 	 */
955 	vm_object_pip_add(srcobject, 1);
956 	VM_OBJECT_WUNLOCK(srcobject);
957 	vm_object_pip_add(dstobject, 1);
958 	dstaddr = swp_pager_meta_build(dstobject, pindex, addr);
959 	KASSERT(dstaddr == SWAPBLK_NONE,
960 	    ("Unexpected destination swapblk"));
961 	vm_object_pip_wakeup(dstobject);
962 	VM_OBJECT_WLOCK(srcobject);
963 	vm_object_pip_wakeup(srcobject);
964 	return (true);
965 }
966 
967 /*
968  * SWAP_PAGER_COPY() -  copy blocks from source pager to destination pager
969  *			and destroy the source.
970  *
971  *	Copy any valid swapblks from the source to the destination.  In
972  *	cases where both the source and destination have a valid swapblk,
973  *	we keep the destination's.
974  *
975  *	This routine is allowed to sleep.  It may sleep allocating metadata
976  *	indirectly through swp_pager_meta_build() or if paging is still in
977  *	progress on the source.
978  *
979  *	The source object contains no vm_page_t's (which is just as well)
980  *
981  *	The source object is of type OBJT_SWAP.
982  *
983  *	The source and destination objects must be locked.
984  *	Both object locks may temporarily be released.
985  */
986 void
987 swap_pager_copy(vm_object_t srcobject, vm_object_t dstobject,
988     vm_pindex_t offset, int destroysource)
989 {
990 
991 	VM_OBJECT_ASSERT_WLOCKED(srcobject);
992 	VM_OBJECT_ASSERT_WLOCKED(dstobject);
993 
994 	/*
995 	 * If destroysource is set, we remove the source object from the
996 	 * swap_pager internal queue now.
997 	 */
998 	if (destroysource && (srcobject->flags & OBJ_ANON) == 0 &&
999 	    srcobject->handle != NULL) {
1000 		vm_object_pip_add(srcobject, 1);
1001 		VM_OBJECT_WUNLOCK(srcobject);
1002 		vm_object_pip_add(dstobject, 1);
1003 		VM_OBJECT_WUNLOCK(dstobject);
1004 		sx_xlock(&sw_alloc_sx);
1005 		TAILQ_REMOVE(NOBJLIST(srcobject->handle), srcobject,
1006 		    pager_object_list);
1007 		sx_xunlock(&sw_alloc_sx);
1008 		VM_OBJECT_WLOCK(dstobject);
1009 		vm_object_pip_wakeup(dstobject);
1010 		VM_OBJECT_WLOCK(srcobject);
1011 		vm_object_pip_wakeup(srcobject);
1012 	}
1013 
1014 	/*
1015 	 * Transfer source to destination.
1016 	 */
1017 	swp_pager_meta_transfer(srcobject, dstobject, offset, dstobject->size);
1018 
1019 	/*
1020 	 * Free left over swap blocks in source.
1021 	 *
1022 	 * We have to revert the type to OBJT_DEFAULT so we do not accidentally
1023 	 * double-remove the object from the swap queues.
1024 	 */
1025 	if (destroysource) {
1026 		swp_pager_meta_free_all(srcobject);
1027 		/*
1028 		 * Reverting the type is not necessary, the caller is going
1029 		 * to destroy srcobject directly, but I'm doing it here
1030 		 * for consistency since we've removed the object from its
1031 		 * queues.
1032 		 */
1033 		srcobject->type = OBJT_DEFAULT;
1034 	}
1035 }
1036 
1037 /*
1038  * SWAP_PAGER_HASPAGE() -	determine if we have good backing store for
1039  *				the requested page.
1040  *
1041  *	We determine whether good backing store exists for the requested
1042  *	page and return TRUE if it does, FALSE if it doesn't.
1043  *
1044  *	If TRUE, we also try to determine how much valid, contiguous backing
1045  *	store exists before and after the requested page.
1046  */
1047 static boolean_t
1048 swap_pager_haspage(vm_object_t object, vm_pindex_t pindex, int *before,
1049     int *after)
1050 {
1051 	daddr_t blk, blk0;
1052 	int i;
1053 
1054 	VM_OBJECT_ASSERT_LOCKED(object);
1055 	KASSERT(object->type == OBJT_SWAP,
1056 	    ("%s: object not swappable", __func__));
1057 
1058 	/*
1059 	 * do we have good backing store at the requested index ?
1060 	 */
1061 	blk0 = swp_pager_meta_lookup(object, pindex);
1062 	if (blk0 == SWAPBLK_NONE) {
1063 		if (before)
1064 			*before = 0;
1065 		if (after)
1066 			*after = 0;
1067 		return (FALSE);
1068 	}
1069 
1070 	/*
1071 	 * find backwards-looking contiguous good backing store
1072 	 */
1073 	if (before != NULL) {
1074 		for (i = 1; i < SWB_NPAGES; i++) {
1075 			if (i > pindex)
1076 				break;
1077 			blk = swp_pager_meta_lookup(object, pindex - i);
1078 			if (blk != blk0 - i)
1079 				break;
1080 		}
1081 		*before = i - 1;
1082 	}
1083 
1084 	/*
1085 	 * find forward-looking contiguous good backing store
1086 	 */
1087 	if (after != NULL) {
1088 		for (i = 1; i < SWB_NPAGES; i++) {
1089 			blk = swp_pager_meta_lookup(object, pindex + i);
1090 			if (blk != blk0 + i)
1091 				break;
1092 		}
1093 		*after = i - 1;
1094 	}
1095 	return (TRUE);
1096 }
1097 
1098 /*
1099  * SWAP_PAGER_PAGE_UNSWAPPED() - remove swap backing store related to page
1100  *
1101  *	This removes any associated swap backing store, whether valid or
1102  *	not, from the page.
1103  *
1104  *	This routine is typically called when a page is made dirty, at
1105  *	which point any associated swap can be freed.  MADV_FREE also
1106  *	calls us in a special-case situation
1107  *
1108  *	NOTE!!!  If the page is clean and the swap was valid, the caller
1109  *	should make the page dirty before calling this routine.  This routine
1110  *	does NOT change the m->dirty status of the page.  Also: MADV_FREE
1111  *	depends on it.
1112  *
1113  *	This routine may not sleep.
1114  *
1115  *	The object containing the page must be locked.
1116  */
1117 static void
1118 swap_pager_unswapped(vm_page_t m)
1119 {
1120 	struct swblk *sb;
1121 
1122 	VM_OBJECT_ASSERT_WLOCKED(m->object);
1123 
1124 	/*
1125 	 * The meta data only exists if the object is OBJT_SWAP
1126 	 * and even then might not be allocated yet.
1127 	 */
1128 	KASSERT(m->object->type == OBJT_SWAP,
1129 	    ("Free object not swappable"));
1130 
1131 	sb = SWAP_PCTRIE_LOOKUP(&m->object->un_pager.swp.swp_blks,
1132 	    rounddown(m->pindex, SWAP_META_PAGES));
1133 	if (sb == NULL)
1134 		return;
1135 	if (sb->d[m->pindex % SWAP_META_PAGES] == SWAPBLK_NONE)
1136 		return;
1137 	swp_pager_freeswapspace(sb->d[m->pindex % SWAP_META_PAGES], 1);
1138 	sb->d[m->pindex % SWAP_META_PAGES] = SWAPBLK_NONE;
1139 	swp_pager_free_empty_swblk(m->object, sb);
1140 }
1141 
1142 /*
1143  * swap_pager_getpages() - bring pages in from swap
1144  *
1145  *	Attempt to page in the pages in array "ma" of length "count".  The
1146  *	caller may optionally specify that additional pages preceding and
1147  *	succeeding the specified range be paged in.  The number of such pages
1148  *	is returned in the "rbehind" and "rahead" parameters, and they will
1149  *	be in the inactive queue upon return.
1150  *
1151  *	The pages in "ma" must be busied and will remain busied upon return.
1152  */
1153 static int
1154 swap_pager_getpages(vm_object_t object, vm_page_t *ma, int count, int *rbehind,
1155     int *rahead)
1156 {
1157 	struct buf *bp;
1158 	vm_page_t bm, mpred, msucc, p;
1159 	vm_pindex_t pindex;
1160 	daddr_t blk;
1161 	int i, maxahead, maxbehind, reqcount;
1162 
1163 	reqcount = count;
1164 
1165 	/*
1166 	 * Determine the final number of read-behind pages and
1167 	 * allocate them BEFORE releasing the object lock.  Otherwise,
1168 	 * there can be a problematic race with vm_object_split().
1169 	 * Specifically, vm_object_split() might first transfer pages
1170 	 * that precede ma[0] in the current object to a new object,
1171 	 * and then this function incorrectly recreates those pages as
1172 	 * read-behind pages in the current object.
1173 	 */
1174 	KASSERT(object->type == OBJT_SWAP,
1175 	    ("%s: object not swappable", __func__));
1176 	if (!swap_pager_haspage(object, ma[0]->pindex, &maxbehind, &maxahead))
1177 		return (VM_PAGER_FAIL);
1178 
1179 	/*
1180 	 * Clip the readahead and readbehind ranges to exclude resident pages.
1181 	 */
1182 	if (rahead != NULL) {
1183 		KASSERT(reqcount - 1 <= maxahead,
1184 		    ("page count %d extends beyond swap block", reqcount));
1185 		*rahead = imin(*rahead, maxahead - (reqcount - 1));
1186 		pindex = ma[reqcount - 1]->pindex;
1187 		msucc = TAILQ_NEXT(ma[reqcount - 1], listq);
1188 		if (msucc != NULL && msucc->pindex - pindex - 1 < *rahead)
1189 			*rahead = msucc->pindex - pindex - 1;
1190 	}
1191 	if (rbehind != NULL) {
1192 		*rbehind = imin(*rbehind, maxbehind);
1193 		pindex = ma[0]->pindex;
1194 		mpred = TAILQ_PREV(ma[0], pglist, listq);
1195 		if (mpred != NULL && pindex - mpred->pindex - 1 < *rbehind)
1196 			*rbehind = pindex - mpred->pindex - 1;
1197 	}
1198 
1199 	bm = ma[0];
1200 	for (i = 0; i < count; i++)
1201 		ma[i]->oflags |= VPO_SWAPINPROG;
1202 
1203 	/*
1204 	 * Allocate readahead and readbehind pages.
1205 	 */
1206 	if (rbehind != NULL) {
1207 		for (i = 1; i <= *rbehind; i++) {
1208 			p = vm_page_alloc(object, ma[0]->pindex - i,
1209 			    VM_ALLOC_NORMAL);
1210 			if (p == NULL)
1211 				break;
1212 			p->oflags |= VPO_SWAPINPROG;
1213 			bm = p;
1214 		}
1215 		*rbehind = i - 1;
1216 	}
1217 	if (rahead != NULL) {
1218 		for (i = 0; i < *rahead; i++) {
1219 			p = vm_page_alloc(object,
1220 			    ma[reqcount - 1]->pindex + i + 1, VM_ALLOC_NORMAL);
1221 			if (p == NULL)
1222 				break;
1223 			p->oflags |= VPO_SWAPINPROG;
1224 		}
1225 		*rahead = i;
1226 	}
1227 	if (rbehind != NULL)
1228 		count += *rbehind;
1229 	if (rahead != NULL)
1230 		count += *rahead;
1231 
1232 	vm_object_pip_add(object, count);
1233 
1234 	pindex = bm->pindex;
1235 	blk = swp_pager_meta_lookup(object, pindex);
1236 	KASSERT(blk != SWAPBLK_NONE,
1237 	    ("no swap blocking containing %p(%jx)", object, (uintmax_t)pindex));
1238 
1239 	VM_OBJECT_WUNLOCK(object);
1240 	bp = uma_zalloc(swrbuf_zone, M_WAITOK);
1241 	/* Pages cannot leave the object while busy. */
1242 	for (i = 0, p = bm; i < count; i++, p = TAILQ_NEXT(p, listq)) {
1243 		MPASS(p->pindex == bm->pindex + i);
1244 		bp->b_pages[i] = p;
1245 	}
1246 
1247 	bp->b_flags |= B_PAGING;
1248 	bp->b_iocmd = BIO_READ;
1249 	bp->b_iodone = swp_pager_async_iodone;
1250 	bp->b_rcred = crhold(thread0.td_ucred);
1251 	bp->b_wcred = crhold(thread0.td_ucred);
1252 	bp->b_blkno = blk;
1253 	bp->b_bcount = PAGE_SIZE * count;
1254 	bp->b_bufsize = PAGE_SIZE * count;
1255 	bp->b_npages = count;
1256 	bp->b_pgbefore = rbehind != NULL ? *rbehind : 0;
1257 	bp->b_pgafter = rahead != NULL ? *rahead : 0;
1258 
1259 	VM_CNT_INC(v_swapin);
1260 	VM_CNT_ADD(v_swappgsin, count);
1261 
1262 	/*
1263 	 * perform the I/O.  NOTE!!!  bp cannot be considered valid after
1264 	 * this point because we automatically release it on completion.
1265 	 * Instead, we look at the one page we are interested in which we
1266 	 * still hold a lock on even through the I/O completion.
1267 	 *
1268 	 * The other pages in our ma[] array are also released on completion,
1269 	 * so we cannot assume they are valid anymore either.
1270 	 *
1271 	 * NOTE: b_blkno is destroyed by the call to swapdev_strategy
1272 	 */
1273 	BUF_KERNPROC(bp);
1274 	swp_pager_strategy(bp);
1275 
1276 	/*
1277 	 * Wait for the pages we want to complete.  VPO_SWAPINPROG is always
1278 	 * cleared on completion.  If an I/O error occurs, SWAPBLK_NONE
1279 	 * is set in the metadata for each page in the request.
1280 	 */
1281 	VM_OBJECT_WLOCK(object);
1282 	while ((ma[0]->oflags & VPO_SWAPINPROG) != 0) {
1283 		ma[0]->oflags |= VPO_SWAPSLEEP;
1284 		VM_CNT_INC(v_intrans);
1285 		if (VM_OBJECT_SLEEP(object, &object->handle, PSWP,
1286 		    "swread", hz * 20)) {
1287 			printf(
1288 "swap_pager: indefinite wait buffer: bufobj: %p, blkno: %jd, size: %ld\n",
1289 			    bp->b_bufobj, (intmax_t)bp->b_blkno, bp->b_bcount);
1290 		}
1291 	}
1292 
1293 	/*
1294 	 * If we had an unrecoverable read error pages will not be valid.
1295 	 */
1296 	for (i = 0; i < reqcount; i++)
1297 		if (ma[i]->valid != VM_PAGE_BITS_ALL)
1298 			return (VM_PAGER_ERROR);
1299 
1300 	return (VM_PAGER_OK);
1301 
1302 	/*
1303 	 * A final note: in a low swap situation, we cannot deallocate swap
1304 	 * and mark a page dirty here because the caller is likely to mark
1305 	 * the page clean when we return, causing the page to possibly revert
1306 	 * to all-zero's later.
1307 	 */
1308 }
1309 
1310 /*
1311  * 	swap_pager_getpages_async():
1312  *
1313  *	Right now this is emulation of asynchronous operation on top of
1314  *	swap_pager_getpages().
1315  */
1316 static int
1317 swap_pager_getpages_async(vm_object_t object, vm_page_t *ma, int count,
1318     int *rbehind, int *rahead, pgo_getpages_iodone_t iodone, void *arg)
1319 {
1320 	int r, error;
1321 
1322 	r = swap_pager_getpages(object, ma, count, rbehind, rahead);
1323 	VM_OBJECT_WUNLOCK(object);
1324 	switch (r) {
1325 	case VM_PAGER_OK:
1326 		error = 0;
1327 		break;
1328 	case VM_PAGER_ERROR:
1329 		error = EIO;
1330 		break;
1331 	case VM_PAGER_FAIL:
1332 		error = EINVAL;
1333 		break;
1334 	default:
1335 		panic("unhandled swap_pager_getpages() error %d", r);
1336 	}
1337 	(iodone)(arg, ma, count, error);
1338 	VM_OBJECT_WLOCK(object);
1339 
1340 	return (r);
1341 }
1342 
1343 /*
1344  *	swap_pager_putpages:
1345  *
1346  *	Assign swap (if necessary) and initiate I/O on the specified pages.
1347  *
1348  *	We support both OBJT_DEFAULT and OBJT_SWAP objects.  DEFAULT objects
1349  *	are automatically converted to SWAP objects.
1350  *
1351  *	In a low memory situation we may block in VOP_STRATEGY(), but the new
1352  *	vm_page reservation system coupled with properly written VFS devices
1353  *	should ensure that no low-memory deadlock occurs.  This is an area
1354  *	which needs work.
1355  *
1356  *	The parent has N vm_object_pip_add() references prior to
1357  *	calling us and will remove references for rtvals[] that are
1358  *	not set to VM_PAGER_PEND.  We need to remove the rest on I/O
1359  *	completion.
1360  *
1361  *	The parent has soft-busy'd the pages it passes us and will unbusy
1362  *	those whose rtvals[] entry is not set to VM_PAGER_PEND on return.
1363  *	We need to unbusy the rest on I/O completion.
1364  */
1365 static void
1366 swap_pager_putpages(vm_object_t object, vm_page_t *ma, int count,
1367     int flags, int *rtvals)
1368 {
1369 	struct buf *bp;
1370 	daddr_t addr, blk, n_free, s_free;
1371 	vm_page_t mreq;
1372 	int i, j, n;
1373 	bool async;
1374 
1375 	KASSERT(count == 0 || ma[0]->object == object,
1376 	    ("%s: object mismatch %p/%p",
1377 	    __func__, object, ma[0]->object));
1378 
1379 	/*
1380 	 * Step 1
1381 	 *
1382 	 * Turn object into OBJT_SWAP.  Force sync if not a pageout process.
1383 	 */
1384 	if (object->type != OBJT_SWAP) {
1385 		addr = swp_pager_meta_build(object, 0, SWAPBLK_NONE);
1386 		KASSERT(addr == SWAPBLK_NONE,
1387 		    ("unexpected object swap block"));
1388 	}
1389 	VM_OBJECT_WUNLOCK(object);
1390 	async = curproc == pageproc && (flags & VM_PAGER_PUT_SYNC) == 0;
1391 	swp_pager_init_freerange(&s_free, &n_free);
1392 
1393 	/*
1394 	 * Step 2
1395 	 *
1396 	 * Assign swap blocks and issue I/O.  We reallocate swap on the fly.
1397 	 * The page is left dirty until the pageout operation completes
1398 	 * successfully.
1399 	 */
1400 	for (i = 0; i < count; i += n) {
1401 		/* Maximum I/O size is limited by maximum swap block size. */
1402 		n = min(count - i, nsw_cluster_max);
1403 
1404 		/* Get a block of swap of size up to size n. */
1405 		blk = swp_pager_getswapspace(&n, 4);
1406 		if (blk == SWAPBLK_NONE) {
1407 			for (j = 0; j < n; ++j)
1408 				rtvals[i + j] = VM_PAGER_FAIL;
1409 			continue;
1410 		}
1411 
1412 		/*
1413 		 * All I/O parameters have been satisfied.  Build the I/O
1414 		 * request and assign the swap space.
1415 		 */
1416 		if (async) {
1417 			mtx_lock(&swbuf_mtx);
1418 			while (nsw_wcount_async == 0)
1419 				msleep(&nsw_wcount_async, &swbuf_mtx, PVM,
1420 				    "swbufa", 0);
1421 			nsw_wcount_async--;
1422 			mtx_unlock(&swbuf_mtx);
1423 		}
1424 		bp = uma_zalloc(swwbuf_zone, M_WAITOK);
1425 		if (async)
1426 			bp->b_flags = B_ASYNC;
1427 		bp->b_flags |= B_PAGING;
1428 		bp->b_iocmd = BIO_WRITE;
1429 
1430 		bp->b_rcred = crhold(thread0.td_ucred);
1431 		bp->b_wcred = crhold(thread0.td_ucred);
1432 		bp->b_bcount = PAGE_SIZE * n;
1433 		bp->b_bufsize = PAGE_SIZE * n;
1434 		bp->b_blkno = blk;
1435 
1436 		VM_OBJECT_WLOCK(object);
1437 		for (j = 0; j < n; ++j) {
1438 			mreq = ma[i + j];
1439 			addr = swp_pager_meta_build(mreq->object, mreq->pindex,
1440 			    blk + j);
1441 			if (addr != SWAPBLK_NONE)
1442 				swp_pager_update_freerange(&s_free, &n_free,
1443 				    addr);
1444 			MPASS(mreq->dirty == VM_PAGE_BITS_ALL);
1445 			mreq->oflags |= VPO_SWAPINPROG;
1446 			bp->b_pages[j] = mreq;
1447 		}
1448 		VM_OBJECT_WUNLOCK(object);
1449 		bp->b_npages = n;
1450 		/*
1451 		 * Must set dirty range for NFS to work.
1452 		 */
1453 		bp->b_dirtyoff = 0;
1454 		bp->b_dirtyend = bp->b_bcount;
1455 
1456 		VM_CNT_INC(v_swapout);
1457 		VM_CNT_ADD(v_swappgsout, bp->b_npages);
1458 
1459 		/*
1460 		 * We unconditionally set rtvals[] to VM_PAGER_PEND so that we
1461 		 * can call the async completion routine at the end of a
1462 		 * synchronous I/O operation.  Otherwise, our caller would
1463 		 * perform duplicate unbusy and wakeup operations on the page
1464 		 * and object, respectively.
1465 		 */
1466 		for (j = 0; j < n; j++)
1467 			rtvals[i + j] = VM_PAGER_PEND;
1468 
1469 		/*
1470 		 * asynchronous
1471 		 *
1472 		 * NOTE: b_blkno is destroyed by the call to swapdev_strategy.
1473 		 */
1474 		if (async) {
1475 			bp->b_iodone = swp_pager_async_iodone;
1476 			BUF_KERNPROC(bp);
1477 			swp_pager_strategy(bp);
1478 			continue;
1479 		}
1480 
1481 		/*
1482 		 * synchronous
1483 		 *
1484 		 * NOTE: b_blkno is destroyed by the call to swapdev_strategy.
1485 		 */
1486 		bp->b_iodone = bdone;
1487 		swp_pager_strategy(bp);
1488 
1489 		/*
1490 		 * Wait for the sync I/O to complete.
1491 		 */
1492 		bwait(bp, PVM, "swwrt");
1493 
1494 		/*
1495 		 * Now that we are through with the bp, we can call the
1496 		 * normal async completion, which frees everything up.
1497 		 */
1498 		swp_pager_async_iodone(bp);
1499 	}
1500 	swp_pager_freeswapspace(s_free, n_free);
1501 	VM_OBJECT_WLOCK(object);
1502 }
1503 
1504 /*
1505  *	swp_pager_async_iodone:
1506  *
1507  *	Completion routine for asynchronous reads and writes from/to swap.
1508  *	Also called manually by synchronous code to finish up a bp.
1509  *
1510  *	This routine may not sleep.
1511  */
1512 static void
1513 swp_pager_async_iodone(struct buf *bp)
1514 {
1515 	int i;
1516 	vm_object_t object = NULL;
1517 
1518 	/*
1519 	 * Report error - unless we ran out of memory, in which case
1520 	 * we've already logged it in swapgeom_strategy().
1521 	 */
1522 	if (bp->b_ioflags & BIO_ERROR && bp->b_error != ENOMEM) {
1523 		printf(
1524 		    "swap_pager: I/O error - %s failed; blkno %ld,"
1525 			"size %ld, error %d\n",
1526 		    ((bp->b_iocmd == BIO_READ) ? "pagein" : "pageout"),
1527 		    (long)bp->b_blkno,
1528 		    (long)bp->b_bcount,
1529 		    bp->b_error
1530 		);
1531 	}
1532 
1533 	/*
1534 	 * remove the mapping for kernel virtual
1535 	 */
1536 	if (buf_mapped(bp))
1537 		pmap_qremove((vm_offset_t)bp->b_data, bp->b_npages);
1538 	else
1539 		bp->b_data = bp->b_kvabase;
1540 
1541 	if (bp->b_npages) {
1542 		object = bp->b_pages[0]->object;
1543 		VM_OBJECT_WLOCK(object);
1544 	}
1545 
1546 	/*
1547 	 * cleanup pages.  If an error occurs writing to swap, we are in
1548 	 * very serious trouble.  If it happens to be a disk error, though,
1549 	 * we may be able to recover by reassigning the swap later on.  So
1550 	 * in this case we remove the m->swapblk assignment for the page
1551 	 * but do not free it in the rlist.  The errornous block(s) are thus
1552 	 * never reallocated as swap.  Redirty the page and continue.
1553 	 */
1554 	for (i = 0; i < bp->b_npages; ++i) {
1555 		vm_page_t m = bp->b_pages[i];
1556 
1557 		m->oflags &= ~VPO_SWAPINPROG;
1558 		if (m->oflags & VPO_SWAPSLEEP) {
1559 			m->oflags &= ~VPO_SWAPSLEEP;
1560 			wakeup(&object->handle);
1561 		}
1562 
1563 		if (bp->b_ioflags & BIO_ERROR) {
1564 			/*
1565 			 * If an error occurs I'd love to throw the swapblk
1566 			 * away without freeing it back to swapspace, so it
1567 			 * can never be used again.  But I can't from an
1568 			 * interrupt.
1569 			 */
1570 			if (bp->b_iocmd == BIO_READ) {
1571 				/*
1572 				 * NOTE: for reads, m->dirty will probably
1573 				 * be overridden by the original caller of
1574 				 * getpages so don't play cute tricks here.
1575 				 */
1576 				vm_page_invalid(m);
1577 			} else {
1578 				/*
1579 				 * If a write error occurs, reactivate page
1580 				 * so it doesn't clog the inactive list,
1581 				 * then finish the I/O.
1582 				 */
1583 				MPASS(m->dirty == VM_PAGE_BITS_ALL);
1584 				vm_page_lock(m);
1585 				vm_page_activate(m);
1586 				vm_page_unlock(m);
1587 				vm_page_sunbusy(m);
1588 			}
1589 		} else if (bp->b_iocmd == BIO_READ) {
1590 			/*
1591 			 * NOTE: for reads, m->dirty will probably be
1592 			 * overridden by the original caller of getpages so
1593 			 * we cannot set them in order to free the underlying
1594 			 * swap in a low-swap situation.  I don't think we'd
1595 			 * want to do that anyway, but it was an optimization
1596 			 * that existed in the old swapper for a time before
1597 			 * it got ripped out due to precisely this problem.
1598 			 */
1599 			KASSERT(!pmap_page_is_mapped(m),
1600 			    ("swp_pager_async_iodone: page %p is mapped", m));
1601 			KASSERT(m->dirty == 0,
1602 			    ("swp_pager_async_iodone: page %p is dirty", m));
1603 
1604 			vm_page_valid(m);
1605 			if (i < bp->b_pgbefore ||
1606 			    i >= bp->b_npages - bp->b_pgafter)
1607 				vm_page_readahead_finish(m);
1608 		} else {
1609 			/*
1610 			 * For write success, clear the dirty
1611 			 * status, then finish the I/O ( which decrements the
1612 			 * busy count and possibly wakes waiter's up ).
1613 			 * A page is only written to swap after a period of
1614 			 * inactivity.  Therefore, we do not expect it to be
1615 			 * reused.
1616 			 */
1617 			KASSERT(!pmap_page_is_write_mapped(m),
1618 			    ("swp_pager_async_iodone: page %p is not write"
1619 			    " protected", m));
1620 			vm_page_undirty(m);
1621 			vm_page_lock(m);
1622 			vm_page_deactivate_noreuse(m);
1623 			vm_page_unlock(m);
1624 			vm_page_sunbusy(m);
1625 		}
1626 	}
1627 
1628 	/*
1629 	 * adjust pip.  NOTE: the original parent may still have its own
1630 	 * pip refs on the object.
1631 	 */
1632 	if (object != NULL) {
1633 		vm_object_pip_wakeupn(object, bp->b_npages);
1634 		VM_OBJECT_WUNLOCK(object);
1635 	}
1636 
1637 	/*
1638 	 * swapdev_strategy() manually sets b_vp and b_bufobj before calling
1639 	 * bstrategy(). Set them back to NULL now we're done with it, or we'll
1640 	 * trigger a KASSERT in relpbuf().
1641 	 */
1642 	if (bp->b_vp) {
1643 		    bp->b_vp = NULL;
1644 		    bp->b_bufobj = NULL;
1645 	}
1646 	/*
1647 	 * release the physical I/O buffer
1648 	 */
1649 	if (bp->b_flags & B_ASYNC) {
1650 		mtx_lock(&swbuf_mtx);
1651 		if (++nsw_wcount_async == 1)
1652 			wakeup(&nsw_wcount_async);
1653 		mtx_unlock(&swbuf_mtx);
1654 	}
1655 	uma_zfree((bp->b_iocmd == BIO_READ) ? swrbuf_zone : swwbuf_zone, bp);
1656 }
1657 
1658 int
1659 swap_pager_nswapdev(void)
1660 {
1661 
1662 	return (nswapdev);
1663 }
1664 
1665 static void
1666 swp_pager_force_dirty(vm_page_t m)
1667 {
1668 
1669 	vm_page_dirty(m);
1670 #ifdef INVARIANTS
1671 	vm_page_lock(m);
1672 	if (!vm_page_wired(m) && m->queue == PQ_NONE)
1673 		panic("page %p is neither wired nor queued", m);
1674 	vm_page_unlock(m);
1675 #endif
1676 	vm_page_xunbusy(m);
1677 	swap_pager_unswapped(m);
1678 }
1679 
1680 static void
1681 swp_pager_force_launder(vm_page_t m)
1682 {
1683 
1684 	vm_page_dirty(m);
1685 	vm_page_lock(m);
1686 	vm_page_launder(m);
1687 	vm_page_unlock(m);
1688 	vm_page_xunbusy(m);
1689 	swap_pager_unswapped(m);
1690 }
1691 
1692 /*
1693  * SWP_PAGER_FORCE_PAGEIN() - force swap blocks to be paged in
1694  *
1695  *	This routine dissociates pages starting at the given index within an
1696  *	object from their backing store, paging them in if they do not reside
1697  *	in memory.  Pages that are paged in are marked dirty and placed in the
1698  *	laundry queue.  Pages are marked dirty because they no longer have
1699  *	backing store.  They are placed in the laundry queue because they have
1700  *	not been accessed recently.  Otherwise, they would already reside in
1701  *	memory.
1702  */
1703 static void
1704 swp_pager_force_pagein(vm_object_t object, vm_pindex_t pindex, int npages)
1705 {
1706 	vm_page_t ma[npages];
1707 	int i, j;
1708 
1709 	KASSERT(npages > 0, ("%s: No pages", __func__));
1710 	KASSERT(npages <= MAXPHYS / PAGE_SIZE,
1711 	    ("%s: Too many pages: %d", __func__, npages));
1712 	KASSERT(object->type == OBJT_SWAP,
1713 	    ("%s: Object not swappable", __func__));
1714 	vm_object_pip_add(object, npages);
1715 	vm_page_grab_pages(object, pindex, VM_ALLOC_NORMAL, ma, npages);
1716 	for (i = j = 0;; i++) {
1717 		/* Count nonresident pages, to page-in all at once. */
1718 		if (i < npages && ma[i]->valid != VM_PAGE_BITS_ALL)
1719 			continue;
1720 		if (j < i) {
1721 			/* Page-in nonresident pages. Mark for laundering. */
1722 			if (swap_pager_getpages(object, &ma[j], i - j, NULL,
1723 			    NULL) != VM_PAGER_OK)
1724 				panic("%s: read from swap failed", __func__);
1725 			do {
1726 				swp_pager_force_launder(ma[j]);
1727 			} while (++j < i);
1728 		}
1729 		if (i == npages)
1730 			break;
1731 		/* Mark dirty a resident page. */
1732 		swp_pager_force_dirty(ma[j++]);
1733 	}
1734 	vm_object_pip_wakeupn(object, npages);
1735 }
1736 
1737 /*
1738  *	swap_pager_swapoff_object:
1739  *
1740  *	Page in all of the pages that have been paged out for an object
1741  *	to a swap device.
1742  */
1743 static void
1744 swap_pager_swapoff_object(struct swdevt *sp, vm_object_t object)
1745 {
1746 	struct swblk *sb;
1747 	vm_pindex_t pi, s_pindex;
1748 	daddr_t blk, n_blks, s_blk;
1749 	int i;
1750 
1751 	KASSERT(object->type == OBJT_SWAP,
1752 	    ("%s: Object not swappable", __func__));
1753 	n_blks = 0;
1754 	for (pi = 0; (sb = SWAP_PCTRIE_LOOKUP_GE(
1755 	    &object->un_pager.swp.swp_blks, pi)) != NULL; ) {
1756 		for (i = 0; i < SWAP_META_PAGES; i++) {
1757 			blk = sb->d[i];
1758 			if (!swp_pager_isondev(blk, sp))
1759 				blk = SWAPBLK_NONE;
1760 
1761 			/*
1762 			 * If there are no blocks/pages accumulated, start a new
1763 			 * accumulation here.
1764 			 */
1765 			if (n_blks == 0) {
1766 				if (blk != SWAPBLK_NONE) {
1767 					s_blk = blk;
1768 					s_pindex = sb->p + i;
1769 					n_blks = 1;
1770 				}
1771 				continue;
1772 			}
1773 
1774 			/*
1775 			 * If the accumulation can be extended without breaking
1776 			 * the sequence of consecutive blocks and pages that
1777 			 * swp_pager_force_pagein() depends on, do so.
1778 			 */
1779 			if (n_blks < MAXPHYS / PAGE_SIZE &&
1780 			    s_blk + n_blks == blk &&
1781 			    s_pindex + n_blks == sb->p + i) {
1782 				++n_blks;
1783 				continue;
1784 			}
1785 
1786 			/*
1787 			 * The sequence of consecutive blocks and pages cannot
1788 			 * be extended, so page them all in here.  Then,
1789 			 * because doing so involves releasing and reacquiring
1790 			 * a lock that protects the swap block pctrie, do not
1791 			 * rely on the current swap block.  Break this loop and
1792 			 * re-fetch the same pindex from the pctrie again.
1793 			 */
1794 			swp_pager_force_pagein(object, s_pindex, n_blks);
1795 			n_blks = 0;
1796 			break;
1797 		}
1798 		if (i == SWAP_META_PAGES)
1799 			pi = sb->p + SWAP_META_PAGES;
1800 	}
1801 	if (n_blks > 0)
1802 		swp_pager_force_pagein(object, s_pindex, n_blks);
1803 }
1804 
1805 /*
1806  *	swap_pager_swapoff:
1807  *
1808  *	Page in all of the pages that have been paged out to the
1809  *	given device.  The corresponding blocks in the bitmap must be
1810  *	marked as allocated and the device must be flagged SW_CLOSING.
1811  *	There may be no processes swapped out to the device.
1812  *
1813  *	This routine may block.
1814  */
1815 static void
1816 swap_pager_swapoff(struct swdevt *sp)
1817 {
1818 	vm_object_t object;
1819 	int retries;
1820 
1821 	sx_assert(&swdev_syscall_lock, SA_XLOCKED);
1822 
1823 	retries = 0;
1824 full_rescan:
1825 	mtx_lock(&vm_object_list_mtx);
1826 	TAILQ_FOREACH(object, &vm_object_list, object_list) {
1827 		if (object->type != OBJT_SWAP)
1828 			continue;
1829 		mtx_unlock(&vm_object_list_mtx);
1830 		/* Depends on type-stability. */
1831 		VM_OBJECT_WLOCK(object);
1832 
1833 		/*
1834 		 * Dead objects are eventually terminated on their own.
1835 		 */
1836 		if ((object->flags & OBJ_DEAD) != 0)
1837 			goto next_obj;
1838 
1839 		/*
1840 		 * Sync with fences placed after pctrie
1841 		 * initialization.  We must not access pctrie below
1842 		 * unless we checked that our object is swap and not
1843 		 * dead.
1844 		 */
1845 		atomic_thread_fence_acq();
1846 		if (object->type != OBJT_SWAP)
1847 			goto next_obj;
1848 
1849 		swap_pager_swapoff_object(sp, object);
1850 next_obj:
1851 		VM_OBJECT_WUNLOCK(object);
1852 		mtx_lock(&vm_object_list_mtx);
1853 	}
1854 	mtx_unlock(&vm_object_list_mtx);
1855 
1856 	if (sp->sw_used) {
1857 		/*
1858 		 * Objects may be locked or paging to the device being
1859 		 * removed, so we will miss their pages and need to
1860 		 * make another pass.  We have marked this device as
1861 		 * SW_CLOSING, so the activity should finish soon.
1862 		 */
1863 		retries++;
1864 		if (retries > 100) {
1865 			panic("swapoff: failed to locate %d swap blocks",
1866 			    sp->sw_used);
1867 		}
1868 		pause("swpoff", hz / 20);
1869 		goto full_rescan;
1870 	}
1871 	EVENTHANDLER_INVOKE(swapoff, sp);
1872 }
1873 
1874 /************************************************************************
1875  *				SWAP META DATA 				*
1876  ************************************************************************
1877  *
1878  *	These routines manipulate the swap metadata stored in the
1879  *	OBJT_SWAP object.
1880  *
1881  *	Swap metadata is implemented with a global hash and not directly
1882  *	linked into the object.  Instead the object simply contains
1883  *	appropriate tracking counters.
1884  */
1885 
1886 /*
1887  * SWP_PAGER_SWBLK_EMPTY() - is a range of blocks free?
1888  */
1889 static bool
1890 swp_pager_swblk_empty(struct swblk *sb, int start, int limit)
1891 {
1892 	int i;
1893 
1894 	MPASS(0 <= start && start <= limit && limit <= SWAP_META_PAGES);
1895 	for (i = start; i < limit; i++) {
1896 		if (sb->d[i] != SWAPBLK_NONE)
1897 			return (false);
1898 	}
1899 	return (true);
1900 }
1901 
1902 /*
1903  * SWP_PAGER_FREE_EMPTY_SWBLK() - frees if a block is free
1904  *
1905  *  Nothing is done if the block is still in use.
1906  */
1907 static void
1908 swp_pager_free_empty_swblk(vm_object_t object, struct swblk *sb)
1909 {
1910 
1911 	if (swp_pager_swblk_empty(sb, 0, SWAP_META_PAGES)) {
1912 		SWAP_PCTRIE_REMOVE(&object->un_pager.swp.swp_blks, sb->p);
1913 		uma_zfree(swblk_zone, sb);
1914 	}
1915 }
1916 
1917 /*
1918  * SWP_PAGER_META_BUILD() -	add swap block to swap meta data for object
1919  *
1920  *	We first convert the object to a swap object if it is a default
1921  *	object.
1922  *
1923  *	The specified swapblk is added to the object's swap metadata.  If
1924  *	the swapblk is not valid, it is freed instead.  Any previously
1925  *	assigned swapblk is returned.
1926  */
1927 static daddr_t
1928 swp_pager_meta_build(vm_object_t object, vm_pindex_t pindex, daddr_t swapblk)
1929 {
1930 	static volatile int swblk_zone_exhausted, swpctrie_zone_exhausted;
1931 	struct swblk *sb, *sb1;
1932 	vm_pindex_t modpi, rdpi;
1933 	daddr_t prev_swapblk;
1934 	int error, i;
1935 
1936 	VM_OBJECT_ASSERT_WLOCKED(object);
1937 
1938 	/*
1939 	 * Convert default object to swap object if necessary
1940 	 */
1941 	if (object->type != OBJT_SWAP) {
1942 		pctrie_init(&object->un_pager.swp.swp_blks);
1943 
1944 		/*
1945 		 * Ensure that swap_pager_swapoff()'s iteration over
1946 		 * object_list does not see a garbage pctrie.
1947 		 */
1948 		atomic_thread_fence_rel();
1949 
1950 		object->type = OBJT_SWAP;
1951 		object->un_pager.swp.writemappings = 0;
1952 		KASSERT((object->flags & OBJ_ANON) != 0 ||
1953 		    object->handle == NULL,
1954 		    ("default pager %p with handle %p",
1955 		    object, object->handle));
1956 	}
1957 
1958 	rdpi = rounddown(pindex, SWAP_META_PAGES);
1959 	sb = SWAP_PCTRIE_LOOKUP(&object->un_pager.swp.swp_blks, rdpi);
1960 	if (sb == NULL) {
1961 		if (swapblk == SWAPBLK_NONE)
1962 			return (SWAPBLK_NONE);
1963 		for (;;) {
1964 			sb = uma_zalloc(swblk_zone, M_NOWAIT | (curproc ==
1965 			    pageproc ? M_USE_RESERVE : 0));
1966 			if (sb != NULL) {
1967 				sb->p = rdpi;
1968 				for (i = 0; i < SWAP_META_PAGES; i++)
1969 					sb->d[i] = SWAPBLK_NONE;
1970 				if (atomic_cmpset_int(&swblk_zone_exhausted,
1971 				    1, 0))
1972 					printf("swblk zone ok\n");
1973 				break;
1974 			}
1975 			VM_OBJECT_WUNLOCK(object);
1976 			if (uma_zone_exhausted(swblk_zone)) {
1977 				if (atomic_cmpset_int(&swblk_zone_exhausted,
1978 				    0, 1))
1979 					printf("swap blk zone exhausted, "
1980 					    "increase kern.maxswzone\n");
1981 				vm_pageout_oom(VM_OOM_SWAPZ);
1982 				pause("swzonxb", 10);
1983 			} else
1984 				uma_zwait(swblk_zone);
1985 			VM_OBJECT_WLOCK(object);
1986 			sb = SWAP_PCTRIE_LOOKUP(&object->un_pager.swp.swp_blks,
1987 			    rdpi);
1988 			if (sb != NULL)
1989 				/*
1990 				 * Somebody swapped out a nearby page,
1991 				 * allocating swblk at the rdpi index,
1992 				 * while we dropped the object lock.
1993 				 */
1994 				goto allocated;
1995 		}
1996 		for (;;) {
1997 			error = SWAP_PCTRIE_INSERT(
1998 			    &object->un_pager.swp.swp_blks, sb);
1999 			if (error == 0) {
2000 				if (atomic_cmpset_int(&swpctrie_zone_exhausted,
2001 				    1, 0))
2002 					printf("swpctrie zone ok\n");
2003 				break;
2004 			}
2005 			VM_OBJECT_WUNLOCK(object);
2006 			if (uma_zone_exhausted(swpctrie_zone)) {
2007 				if (atomic_cmpset_int(&swpctrie_zone_exhausted,
2008 				    0, 1))
2009 					printf("swap pctrie zone exhausted, "
2010 					    "increase kern.maxswzone\n");
2011 				vm_pageout_oom(VM_OOM_SWAPZ);
2012 				pause("swzonxp", 10);
2013 			} else
2014 				uma_zwait(swpctrie_zone);
2015 			VM_OBJECT_WLOCK(object);
2016 			sb1 = SWAP_PCTRIE_LOOKUP(&object->un_pager.swp.swp_blks,
2017 			    rdpi);
2018 			if (sb1 != NULL) {
2019 				uma_zfree(swblk_zone, sb);
2020 				sb = sb1;
2021 				goto allocated;
2022 			}
2023 		}
2024 	}
2025 allocated:
2026 	MPASS(sb->p == rdpi);
2027 
2028 	modpi = pindex % SWAP_META_PAGES;
2029 	/* Return prior contents of metadata. */
2030 	prev_swapblk = sb->d[modpi];
2031 	/* Enter block into metadata. */
2032 	sb->d[modpi] = swapblk;
2033 
2034 	/*
2035 	 * Free the swblk if we end up with the empty page run.
2036 	 */
2037 	if (swapblk == SWAPBLK_NONE)
2038 	    swp_pager_free_empty_swblk(object, sb);
2039 	return (prev_swapblk);
2040 }
2041 
2042 /*
2043  * SWP_PAGER_META_TRANSFER() - free a range of blocks in the srcobject's swap
2044  * metadata, or transfer it into dstobject.
2045  *
2046  *	This routine will free swap metadata structures as they are cleaned
2047  *	out.
2048  */
2049 static void
2050 swp_pager_meta_transfer(vm_object_t srcobject, vm_object_t dstobject,
2051     vm_pindex_t pindex, vm_pindex_t count)
2052 {
2053 	struct swblk *sb;
2054 	daddr_t n_free, s_free;
2055 	vm_pindex_t offset, last;
2056 	int i, limit, start;
2057 
2058 	VM_OBJECT_ASSERT_WLOCKED(srcobject);
2059 	if (srcobject->type != OBJT_SWAP || count == 0)
2060 		return;
2061 
2062 	swp_pager_init_freerange(&s_free, &n_free);
2063 	offset = pindex;
2064 	last = pindex + count;
2065 	for (;;) {
2066 		sb = SWAP_PCTRIE_LOOKUP_GE(&srcobject->un_pager.swp.swp_blks,
2067 		    rounddown(pindex, SWAP_META_PAGES));
2068 		if (sb == NULL || sb->p >= last)
2069 			break;
2070 		start = pindex > sb->p ? pindex - sb->p : 0;
2071 		limit = last - sb->p < SWAP_META_PAGES ? last - sb->p :
2072 		    SWAP_META_PAGES;
2073 		for (i = start; i < limit; i++) {
2074 			if (sb->d[i] == SWAPBLK_NONE)
2075 				continue;
2076 			if (dstobject == NULL ||
2077 			    !swp_pager_xfer_source(srcobject, dstobject,
2078 			    sb->p + i - offset, sb->d[i])) {
2079 				swp_pager_update_freerange(&s_free, &n_free,
2080 				    sb->d[i]);
2081 			}
2082 			sb->d[i] = SWAPBLK_NONE;
2083 		}
2084 		pindex = sb->p + SWAP_META_PAGES;
2085 		if (swp_pager_swblk_empty(sb, 0, start) &&
2086 		    swp_pager_swblk_empty(sb, limit, SWAP_META_PAGES)) {
2087 			SWAP_PCTRIE_REMOVE(&srcobject->un_pager.swp.swp_blks,
2088 			    sb->p);
2089 			uma_zfree(swblk_zone, sb);
2090 		}
2091 	}
2092 	swp_pager_freeswapspace(s_free, n_free);
2093 }
2094 
2095 /*
2096  * SWP_PAGER_META_FREE() - free a range of blocks in the object's swap metadata
2097  *
2098  *	The requested range of blocks is freed, with any associated swap
2099  *	returned to the swap bitmap.
2100  *
2101  *	This routine will free swap metadata structures as they are cleaned
2102  *	out.  This routine does *NOT* operate on swap metadata associated
2103  *	with resident pages.
2104  */
2105 static void
2106 swp_pager_meta_free(vm_object_t object, vm_pindex_t pindex, vm_pindex_t count)
2107 {
2108 	swp_pager_meta_transfer(object, NULL, pindex, count);
2109 }
2110 
2111 /*
2112  * SWP_PAGER_META_FREE_ALL() - destroy all swap metadata associated with object
2113  *
2114  *	This routine locates and destroys all swap metadata associated with
2115  *	an object.
2116  */
2117 static void
2118 swp_pager_meta_free_all(vm_object_t object)
2119 {
2120 	struct swblk *sb;
2121 	daddr_t n_free, s_free;
2122 	vm_pindex_t pindex;
2123 	int i;
2124 
2125 	VM_OBJECT_ASSERT_WLOCKED(object);
2126 	if (object->type != OBJT_SWAP)
2127 		return;
2128 
2129 	swp_pager_init_freerange(&s_free, &n_free);
2130 	for (pindex = 0; (sb = SWAP_PCTRIE_LOOKUP_GE(
2131 	    &object->un_pager.swp.swp_blks, pindex)) != NULL;) {
2132 		pindex = sb->p + SWAP_META_PAGES;
2133 		for (i = 0; i < SWAP_META_PAGES; i++) {
2134 			if (sb->d[i] == SWAPBLK_NONE)
2135 				continue;
2136 			swp_pager_update_freerange(&s_free, &n_free, sb->d[i]);
2137 		}
2138 		SWAP_PCTRIE_REMOVE(&object->un_pager.swp.swp_blks, sb->p);
2139 		uma_zfree(swblk_zone, sb);
2140 	}
2141 	swp_pager_freeswapspace(s_free, n_free);
2142 }
2143 
2144 /*
2145  * SWP_PAGER_METACTL() -  misc control of swap meta data.
2146  *
2147  *	This routine is capable of looking up, or removing swapblk
2148  *	assignments in the swap meta data.  It returns the swapblk being
2149  *	looked-up, popped, or SWAPBLK_NONE if the block was invalid.
2150  *
2151  *	When acting on a busy resident page and paging is in progress, we
2152  *	have to wait until paging is complete but otherwise can act on the
2153  *	busy page.
2154  */
2155 static daddr_t
2156 swp_pager_meta_lookup(vm_object_t object, vm_pindex_t pindex)
2157 {
2158 	struct swblk *sb;
2159 
2160 	VM_OBJECT_ASSERT_LOCKED(object);
2161 
2162 	/*
2163 	 * The meta data only exists if the object is OBJT_SWAP
2164 	 * and even then might not be allocated yet.
2165 	 */
2166 	KASSERT(object->type == OBJT_SWAP,
2167 	    ("Lookup object not swappable"));
2168 
2169 	sb = SWAP_PCTRIE_LOOKUP(&object->un_pager.swp.swp_blks,
2170 	    rounddown(pindex, SWAP_META_PAGES));
2171 	if (sb == NULL)
2172 		return (SWAPBLK_NONE);
2173 	return (sb->d[pindex % SWAP_META_PAGES]);
2174 }
2175 
2176 /*
2177  * Returns the least page index which is greater than or equal to the
2178  * parameter pindex and for which there is a swap block allocated.
2179  * Returns object's size if the object's type is not swap or if there
2180  * are no allocated swap blocks for the object after the requested
2181  * pindex.
2182  */
2183 vm_pindex_t
2184 swap_pager_find_least(vm_object_t object, vm_pindex_t pindex)
2185 {
2186 	struct swblk *sb;
2187 	int i;
2188 
2189 	VM_OBJECT_ASSERT_LOCKED(object);
2190 	if (object->type != OBJT_SWAP)
2191 		return (object->size);
2192 
2193 	sb = SWAP_PCTRIE_LOOKUP_GE(&object->un_pager.swp.swp_blks,
2194 	    rounddown(pindex, SWAP_META_PAGES));
2195 	if (sb == NULL)
2196 		return (object->size);
2197 	if (sb->p < pindex) {
2198 		for (i = pindex % SWAP_META_PAGES; i < SWAP_META_PAGES; i++) {
2199 			if (sb->d[i] != SWAPBLK_NONE)
2200 				return (sb->p + i);
2201 		}
2202 		sb = SWAP_PCTRIE_LOOKUP_GE(&object->un_pager.swp.swp_blks,
2203 		    roundup(pindex, SWAP_META_PAGES));
2204 		if (sb == NULL)
2205 			return (object->size);
2206 	}
2207 	for (i = 0; i < SWAP_META_PAGES; i++) {
2208 		if (sb->d[i] != SWAPBLK_NONE)
2209 			return (sb->p + i);
2210 	}
2211 
2212 	/*
2213 	 * We get here if a swblk is present in the trie but it
2214 	 * doesn't map any blocks.
2215 	 */
2216 	MPASS(0);
2217 	return (object->size);
2218 }
2219 
2220 /*
2221  * System call swapon(name) enables swapping on device name,
2222  * which must be in the swdevsw.  Return EBUSY
2223  * if already swapping on this device.
2224  */
2225 #ifndef _SYS_SYSPROTO_H_
2226 struct swapon_args {
2227 	char *name;
2228 };
2229 #endif
2230 
2231 /*
2232  * MPSAFE
2233  */
2234 /* ARGSUSED */
2235 int
2236 sys_swapon(struct thread *td, struct swapon_args *uap)
2237 {
2238 	struct vattr attr;
2239 	struct vnode *vp;
2240 	struct nameidata nd;
2241 	int error;
2242 
2243 	error = priv_check(td, PRIV_SWAPON);
2244 	if (error)
2245 		return (error);
2246 
2247 	sx_xlock(&swdev_syscall_lock);
2248 
2249 	/*
2250 	 * Swap metadata may not fit in the KVM if we have physical
2251 	 * memory of >1GB.
2252 	 */
2253 	if (swblk_zone == NULL) {
2254 		error = ENOMEM;
2255 		goto done;
2256 	}
2257 
2258 	NDINIT(&nd, LOOKUP, ISOPEN | FOLLOW | AUDITVNODE1, UIO_USERSPACE,
2259 	    uap->name, td);
2260 	error = namei(&nd);
2261 	if (error)
2262 		goto done;
2263 
2264 	NDFREE(&nd, NDF_ONLY_PNBUF);
2265 	vp = nd.ni_vp;
2266 
2267 	if (vn_isdisk(vp, &error)) {
2268 		error = swapongeom(vp);
2269 	} else if (vp->v_type == VREG &&
2270 	    (vp->v_mount->mnt_vfc->vfc_flags & VFCF_NETWORK) != 0 &&
2271 	    (error = VOP_GETATTR(vp, &attr, td->td_ucred)) == 0) {
2272 		/*
2273 		 * Allow direct swapping to NFS regular files in the same
2274 		 * way that nfs_mountroot() sets up diskless swapping.
2275 		 */
2276 		error = swaponvp(td, vp, attr.va_size / DEV_BSIZE);
2277 	}
2278 
2279 	if (error)
2280 		vrele(vp);
2281 done:
2282 	sx_xunlock(&swdev_syscall_lock);
2283 	return (error);
2284 }
2285 
2286 /*
2287  * Check that the total amount of swap currently configured does not
2288  * exceed half the theoretical maximum.  If it does, print a warning
2289  * message.
2290  */
2291 static void
2292 swapon_check_swzone(void)
2293 {
2294 
2295 	/* recommend using no more than half that amount */
2296 	if (swap_total > swap_maxpages / 2) {
2297 		printf("warning: total configured swap (%lu pages) "
2298 		    "exceeds maximum recommended amount (%lu pages).\n",
2299 		    swap_total, swap_maxpages / 2);
2300 		printf("warning: increase kern.maxswzone "
2301 		    "or reduce amount of swap.\n");
2302 	}
2303 }
2304 
2305 static void
2306 swaponsomething(struct vnode *vp, void *id, u_long nblks,
2307     sw_strategy_t *strategy, sw_close_t *close, dev_t dev, int flags)
2308 {
2309 	struct swdevt *sp, *tsp;
2310 	swblk_t dvbase;
2311 	u_long mblocks;
2312 
2313 	/*
2314 	 * nblks is in DEV_BSIZE'd chunks, convert to PAGE_SIZE'd chunks.
2315 	 * First chop nblks off to page-align it, then convert.
2316 	 *
2317 	 * sw->sw_nblks is in page-sized chunks now too.
2318 	 */
2319 	nblks &= ~(ctodb(1) - 1);
2320 	nblks = dbtoc(nblks);
2321 
2322 	/*
2323 	 * If we go beyond this, we get overflows in the radix
2324 	 * tree bitmap code.
2325 	 */
2326 	mblocks = 0x40000000 / BLIST_META_RADIX;
2327 	if (nblks > mblocks) {
2328 		printf(
2329     "WARNING: reducing swap size to maximum of %luMB per unit\n",
2330 		    mblocks / 1024 / 1024 * PAGE_SIZE);
2331 		nblks = mblocks;
2332 	}
2333 
2334 	sp = malloc(sizeof *sp, M_VMPGDATA, M_WAITOK | M_ZERO);
2335 	sp->sw_vp = vp;
2336 	sp->sw_id = id;
2337 	sp->sw_dev = dev;
2338 	sp->sw_nblks = nblks;
2339 	sp->sw_used = 0;
2340 	sp->sw_strategy = strategy;
2341 	sp->sw_close = close;
2342 	sp->sw_flags = flags;
2343 
2344 	sp->sw_blist = blist_create(nblks, M_WAITOK);
2345 	/*
2346 	 * Do not free the first blocks in order to avoid overwriting
2347 	 * any bsd label at the front of the partition
2348 	 */
2349 	blist_free(sp->sw_blist, howmany(BBSIZE, PAGE_SIZE),
2350 	    nblks - howmany(BBSIZE, PAGE_SIZE));
2351 
2352 	dvbase = 0;
2353 	mtx_lock(&sw_dev_mtx);
2354 	TAILQ_FOREACH(tsp, &swtailq, sw_list) {
2355 		if (tsp->sw_end >= dvbase) {
2356 			/*
2357 			 * We put one uncovered page between the devices
2358 			 * in order to definitively prevent any cross-device
2359 			 * I/O requests
2360 			 */
2361 			dvbase = tsp->sw_end + 1;
2362 		}
2363 	}
2364 	sp->sw_first = dvbase;
2365 	sp->sw_end = dvbase + nblks;
2366 	TAILQ_INSERT_TAIL(&swtailq, sp, sw_list);
2367 	nswapdev++;
2368 	swap_pager_avail += nblks - howmany(BBSIZE, PAGE_SIZE);
2369 	swap_total += nblks;
2370 	swapon_check_swzone();
2371 	swp_sizecheck();
2372 	mtx_unlock(&sw_dev_mtx);
2373 	EVENTHANDLER_INVOKE(swapon, sp);
2374 }
2375 
2376 /*
2377  * SYSCALL: swapoff(devname)
2378  *
2379  * Disable swapping on the given device.
2380  *
2381  * XXX: Badly designed system call: it should use a device index
2382  * rather than filename as specification.  We keep sw_vp around
2383  * only to make this work.
2384  */
2385 #ifndef _SYS_SYSPROTO_H_
2386 struct swapoff_args {
2387 	char *name;
2388 };
2389 #endif
2390 
2391 /*
2392  * MPSAFE
2393  */
2394 /* ARGSUSED */
2395 int
2396 sys_swapoff(struct thread *td, struct swapoff_args *uap)
2397 {
2398 	struct vnode *vp;
2399 	struct nameidata nd;
2400 	struct swdevt *sp;
2401 	int error;
2402 
2403 	error = priv_check(td, PRIV_SWAPOFF);
2404 	if (error)
2405 		return (error);
2406 
2407 	sx_xlock(&swdev_syscall_lock);
2408 
2409 	NDINIT(&nd, LOOKUP, FOLLOW | AUDITVNODE1, UIO_USERSPACE, uap->name,
2410 	    td);
2411 	error = namei(&nd);
2412 	if (error)
2413 		goto done;
2414 	NDFREE(&nd, NDF_ONLY_PNBUF);
2415 	vp = nd.ni_vp;
2416 
2417 	mtx_lock(&sw_dev_mtx);
2418 	TAILQ_FOREACH(sp, &swtailq, sw_list) {
2419 		if (sp->sw_vp == vp)
2420 			break;
2421 	}
2422 	mtx_unlock(&sw_dev_mtx);
2423 	if (sp == NULL) {
2424 		error = EINVAL;
2425 		goto done;
2426 	}
2427 	error = swapoff_one(sp, td->td_ucred);
2428 done:
2429 	sx_xunlock(&swdev_syscall_lock);
2430 	return (error);
2431 }
2432 
2433 static int
2434 swapoff_one(struct swdevt *sp, struct ucred *cred)
2435 {
2436 	u_long nblks;
2437 #ifdef MAC
2438 	int error;
2439 #endif
2440 
2441 	sx_assert(&swdev_syscall_lock, SA_XLOCKED);
2442 #ifdef MAC
2443 	(void) vn_lock(sp->sw_vp, LK_EXCLUSIVE | LK_RETRY);
2444 	error = mac_system_check_swapoff(cred, sp->sw_vp);
2445 	(void) VOP_UNLOCK(sp->sw_vp, 0);
2446 	if (error != 0)
2447 		return (error);
2448 #endif
2449 	nblks = sp->sw_nblks;
2450 
2451 	/*
2452 	 * We can turn off this swap device safely only if the
2453 	 * available virtual memory in the system will fit the amount
2454 	 * of data we will have to page back in, plus an epsilon so
2455 	 * the system doesn't become critically low on swap space.
2456 	 */
2457 	if (vm_free_count() + swap_pager_avail < nblks + nswap_lowat)
2458 		return (ENOMEM);
2459 
2460 	/*
2461 	 * Prevent further allocations on this device.
2462 	 */
2463 	mtx_lock(&sw_dev_mtx);
2464 	sp->sw_flags |= SW_CLOSING;
2465 	swap_pager_avail -= blist_fill(sp->sw_blist, 0, nblks);
2466 	swap_total -= nblks;
2467 	mtx_unlock(&sw_dev_mtx);
2468 
2469 	/*
2470 	 * Page in the contents of the device and close it.
2471 	 */
2472 	swap_pager_swapoff(sp);
2473 
2474 	sp->sw_close(curthread, sp);
2475 	mtx_lock(&sw_dev_mtx);
2476 	sp->sw_id = NULL;
2477 	TAILQ_REMOVE(&swtailq, sp, sw_list);
2478 	nswapdev--;
2479 	if (nswapdev == 0) {
2480 		swap_pager_full = 2;
2481 		swap_pager_almost_full = 1;
2482 	}
2483 	if (swdevhd == sp)
2484 		swdevhd = NULL;
2485 	mtx_unlock(&sw_dev_mtx);
2486 	blist_destroy(sp->sw_blist);
2487 	free(sp, M_VMPGDATA);
2488 	return (0);
2489 }
2490 
2491 void
2492 swapoff_all(void)
2493 {
2494 	struct swdevt *sp, *spt;
2495 	const char *devname;
2496 	int error;
2497 
2498 	sx_xlock(&swdev_syscall_lock);
2499 
2500 	mtx_lock(&sw_dev_mtx);
2501 	TAILQ_FOREACH_SAFE(sp, &swtailq, sw_list, spt) {
2502 		mtx_unlock(&sw_dev_mtx);
2503 		if (vn_isdisk(sp->sw_vp, NULL))
2504 			devname = devtoname(sp->sw_vp->v_rdev);
2505 		else
2506 			devname = "[file]";
2507 		error = swapoff_one(sp, thread0.td_ucred);
2508 		if (error != 0) {
2509 			printf("Cannot remove swap device %s (error=%d), "
2510 			    "skipping.\n", devname, error);
2511 		} else if (bootverbose) {
2512 			printf("Swap device %s removed.\n", devname);
2513 		}
2514 		mtx_lock(&sw_dev_mtx);
2515 	}
2516 	mtx_unlock(&sw_dev_mtx);
2517 
2518 	sx_xunlock(&swdev_syscall_lock);
2519 }
2520 
2521 void
2522 swap_pager_status(int *total, int *used)
2523 {
2524 	struct swdevt *sp;
2525 
2526 	*total = 0;
2527 	*used = 0;
2528 	mtx_lock(&sw_dev_mtx);
2529 	TAILQ_FOREACH(sp, &swtailq, sw_list) {
2530 		*total += sp->sw_nblks;
2531 		*used += sp->sw_used;
2532 	}
2533 	mtx_unlock(&sw_dev_mtx);
2534 }
2535 
2536 int
2537 swap_dev_info(int name, struct xswdev *xs, char *devname, size_t len)
2538 {
2539 	struct swdevt *sp;
2540 	const char *tmp_devname;
2541 	int error, n;
2542 
2543 	n = 0;
2544 	error = ENOENT;
2545 	mtx_lock(&sw_dev_mtx);
2546 	TAILQ_FOREACH(sp, &swtailq, sw_list) {
2547 		if (n != name) {
2548 			n++;
2549 			continue;
2550 		}
2551 		xs->xsw_version = XSWDEV_VERSION;
2552 		xs->xsw_dev = sp->sw_dev;
2553 		xs->xsw_flags = sp->sw_flags;
2554 		xs->xsw_nblks = sp->sw_nblks;
2555 		xs->xsw_used = sp->sw_used;
2556 		if (devname != NULL) {
2557 			if (vn_isdisk(sp->sw_vp, NULL))
2558 				tmp_devname = devtoname(sp->sw_vp->v_rdev);
2559 			else
2560 				tmp_devname = "[file]";
2561 			strncpy(devname, tmp_devname, len);
2562 		}
2563 		error = 0;
2564 		break;
2565 	}
2566 	mtx_unlock(&sw_dev_mtx);
2567 	return (error);
2568 }
2569 
2570 #if defined(COMPAT_FREEBSD11)
2571 #define XSWDEV_VERSION_11	1
2572 struct xswdev11 {
2573 	u_int	xsw_version;
2574 	uint32_t xsw_dev;
2575 	int	xsw_flags;
2576 	int	xsw_nblks;
2577 	int     xsw_used;
2578 };
2579 #endif
2580 
2581 #if defined(__amd64__) && defined(COMPAT_FREEBSD32)
2582 struct xswdev32 {
2583 	u_int	xsw_version;
2584 	u_int	xsw_dev1, xsw_dev2;
2585 	int	xsw_flags;
2586 	int	xsw_nblks;
2587 	int     xsw_used;
2588 };
2589 #endif
2590 
2591 static int
2592 sysctl_vm_swap_info(SYSCTL_HANDLER_ARGS)
2593 {
2594 	struct xswdev xs;
2595 #if defined(__amd64__) && defined(COMPAT_FREEBSD32)
2596 	struct xswdev32 xs32;
2597 #endif
2598 #if defined(COMPAT_FREEBSD11)
2599 	struct xswdev11 xs11;
2600 #endif
2601 	int error;
2602 
2603 	if (arg2 != 1)			/* name length */
2604 		return (EINVAL);
2605 	error = swap_dev_info(*(int *)arg1, &xs, NULL, 0);
2606 	if (error != 0)
2607 		return (error);
2608 #if defined(__amd64__) && defined(COMPAT_FREEBSD32)
2609 	if (req->oldlen == sizeof(xs32)) {
2610 		xs32.xsw_version = XSWDEV_VERSION;
2611 		xs32.xsw_dev1 = xs.xsw_dev;
2612 		xs32.xsw_dev2 = xs.xsw_dev >> 32;
2613 		xs32.xsw_flags = xs.xsw_flags;
2614 		xs32.xsw_nblks = xs.xsw_nblks;
2615 		xs32.xsw_used = xs.xsw_used;
2616 		error = SYSCTL_OUT(req, &xs32, sizeof(xs32));
2617 		return (error);
2618 	}
2619 #endif
2620 #if defined(COMPAT_FREEBSD11)
2621 	if (req->oldlen == sizeof(xs11)) {
2622 		xs11.xsw_version = XSWDEV_VERSION_11;
2623 		xs11.xsw_dev = xs.xsw_dev; /* truncation */
2624 		xs11.xsw_flags = xs.xsw_flags;
2625 		xs11.xsw_nblks = xs.xsw_nblks;
2626 		xs11.xsw_used = xs.xsw_used;
2627 		error = SYSCTL_OUT(req, &xs11, sizeof(xs11));
2628 		return (error);
2629 	}
2630 #endif
2631 	error = SYSCTL_OUT(req, &xs, sizeof(xs));
2632 	return (error);
2633 }
2634 
2635 SYSCTL_INT(_vm, OID_AUTO, nswapdev, CTLFLAG_RD, &nswapdev, 0,
2636     "Number of swap devices");
2637 SYSCTL_NODE(_vm, OID_AUTO, swap_info, CTLFLAG_RD | CTLFLAG_MPSAFE,
2638     sysctl_vm_swap_info,
2639     "Swap statistics by device");
2640 
2641 /*
2642  * Count the approximate swap usage in pages for a vmspace.  The
2643  * shadowed or not yet copied on write swap blocks are not accounted.
2644  * The map must be locked.
2645  */
2646 long
2647 vmspace_swap_count(struct vmspace *vmspace)
2648 {
2649 	vm_map_t map;
2650 	vm_map_entry_t cur;
2651 	vm_object_t object;
2652 	struct swblk *sb;
2653 	vm_pindex_t e, pi;
2654 	long count;
2655 	int i;
2656 
2657 	map = &vmspace->vm_map;
2658 	count = 0;
2659 
2660 	VM_MAP_ENTRY_FOREACH(cur, map) {
2661 		if ((cur->eflags & MAP_ENTRY_IS_SUB_MAP) != 0)
2662 			continue;
2663 		object = cur->object.vm_object;
2664 		if (object == NULL || object->type != OBJT_SWAP)
2665 			continue;
2666 		VM_OBJECT_RLOCK(object);
2667 		if (object->type != OBJT_SWAP)
2668 			goto unlock;
2669 		pi = OFF_TO_IDX(cur->offset);
2670 		e = pi + OFF_TO_IDX(cur->end - cur->start);
2671 		for (;; pi = sb->p + SWAP_META_PAGES) {
2672 			sb = SWAP_PCTRIE_LOOKUP_GE(
2673 			    &object->un_pager.swp.swp_blks, pi);
2674 			if (sb == NULL || sb->p >= e)
2675 				break;
2676 			for (i = 0; i < SWAP_META_PAGES; i++) {
2677 				if (sb->p + i < e &&
2678 				    sb->d[i] != SWAPBLK_NONE)
2679 					count++;
2680 			}
2681 		}
2682 unlock:
2683 		VM_OBJECT_RUNLOCK(object);
2684 	}
2685 	return (count);
2686 }
2687 
2688 /*
2689  * GEOM backend
2690  *
2691  * Swapping onto disk devices.
2692  *
2693  */
2694 
2695 static g_orphan_t swapgeom_orphan;
2696 
2697 static struct g_class g_swap_class = {
2698 	.name = "SWAP",
2699 	.version = G_VERSION,
2700 	.orphan = swapgeom_orphan,
2701 };
2702 
2703 DECLARE_GEOM_CLASS(g_swap_class, g_class);
2704 
2705 
2706 static void
2707 swapgeom_close_ev(void *arg, int flags)
2708 {
2709 	struct g_consumer *cp;
2710 
2711 	cp = arg;
2712 	g_access(cp, -1, -1, 0);
2713 	g_detach(cp);
2714 	g_destroy_consumer(cp);
2715 }
2716 
2717 /*
2718  * Add a reference to the g_consumer for an inflight transaction.
2719  */
2720 static void
2721 swapgeom_acquire(struct g_consumer *cp)
2722 {
2723 
2724 	mtx_assert(&sw_dev_mtx, MA_OWNED);
2725 	cp->index++;
2726 }
2727 
2728 /*
2729  * Remove a reference from the g_consumer.  Post a close event if all
2730  * references go away, since the function might be called from the
2731  * biodone context.
2732  */
2733 static void
2734 swapgeom_release(struct g_consumer *cp, struct swdevt *sp)
2735 {
2736 
2737 	mtx_assert(&sw_dev_mtx, MA_OWNED);
2738 	cp->index--;
2739 	if (cp->index == 0) {
2740 		if (g_post_event(swapgeom_close_ev, cp, M_NOWAIT, NULL) == 0)
2741 			sp->sw_id = NULL;
2742 	}
2743 }
2744 
2745 static void
2746 swapgeom_done(struct bio *bp2)
2747 {
2748 	struct swdevt *sp;
2749 	struct buf *bp;
2750 	struct g_consumer *cp;
2751 
2752 	bp = bp2->bio_caller2;
2753 	cp = bp2->bio_from;
2754 	bp->b_ioflags = bp2->bio_flags;
2755 	if (bp2->bio_error)
2756 		bp->b_ioflags |= BIO_ERROR;
2757 	bp->b_resid = bp->b_bcount - bp2->bio_completed;
2758 	bp->b_error = bp2->bio_error;
2759 	bp->b_caller1 = NULL;
2760 	bufdone(bp);
2761 	sp = bp2->bio_caller1;
2762 	mtx_lock(&sw_dev_mtx);
2763 	swapgeom_release(cp, sp);
2764 	mtx_unlock(&sw_dev_mtx);
2765 	g_destroy_bio(bp2);
2766 }
2767 
2768 static void
2769 swapgeom_strategy(struct buf *bp, struct swdevt *sp)
2770 {
2771 	struct bio *bio;
2772 	struct g_consumer *cp;
2773 
2774 	mtx_lock(&sw_dev_mtx);
2775 	cp = sp->sw_id;
2776 	if (cp == NULL) {
2777 		mtx_unlock(&sw_dev_mtx);
2778 		bp->b_error = ENXIO;
2779 		bp->b_ioflags |= BIO_ERROR;
2780 		bufdone(bp);
2781 		return;
2782 	}
2783 	swapgeom_acquire(cp);
2784 	mtx_unlock(&sw_dev_mtx);
2785 	if (bp->b_iocmd == BIO_WRITE)
2786 		bio = g_new_bio();
2787 	else
2788 		bio = g_alloc_bio();
2789 	if (bio == NULL) {
2790 		mtx_lock(&sw_dev_mtx);
2791 		swapgeom_release(cp, sp);
2792 		mtx_unlock(&sw_dev_mtx);
2793 		bp->b_error = ENOMEM;
2794 		bp->b_ioflags |= BIO_ERROR;
2795 		printf("swap_pager: cannot allocate bio\n");
2796 		bufdone(bp);
2797 		return;
2798 	}
2799 
2800 	bp->b_caller1 = bio;
2801 	bio->bio_caller1 = sp;
2802 	bio->bio_caller2 = bp;
2803 	bio->bio_cmd = bp->b_iocmd;
2804 	bio->bio_offset = (bp->b_blkno - sp->sw_first) * PAGE_SIZE;
2805 	bio->bio_length = bp->b_bcount;
2806 	bio->bio_done = swapgeom_done;
2807 	if (!buf_mapped(bp)) {
2808 		bio->bio_ma = bp->b_pages;
2809 		bio->bio_data = unmapped_buf;
2810 		bio->bio_ma_offset = (vm_offset_t)bp->b_offset & PAGE_MASK;
2811 		bio->bio_ma_n = bp->b_npages;
2812 		bio->bio_flags |= BIO_UNMAPPED;
2813 	} else {
2814 		bio->bio_data = bp->b_data;
2815 		bio->bio_ma = NULL;
2816 	}
2817 	g_io_request(bio, cp);
2818 	return;
2819 }
2820 
2821 static void
2822 swapgeom_orphan(struct g_consumer *cp)
2823 {
2824 	struct swdevt *sp;
2825 	int destroy;
2826 
2827 	mtx_lock(&sw_dev_mtx);
2828 	TAILQ_FOREACH(sp, &swtailq, sw_list) {
2829 		if (sp->sw_id == cp) {
2830 			sp->sw_flags |= SW_CLOSING;
2831 			break;
2832 		}
2833 	}
2834 	/*
2835 	 * Drop reference we were created with. Do directly since we're in a
2836 	 * special context where we don't have to queue the call to
2837 	 * swapgeom_close_ev().
2838 	 */
2839 	cp->index--;
2840 	destroy = ((sp != NULL) && (cp->index == 0));
2841 	if (destroy)
2842 		sp->sw_id = NULL;
2843 	mtx_unlock(&sw_dev_mtx);
2844 	if (destroy)
2845 		swapgeom_close_ev(cp, 0);
2846 }
2847 
2848 static void
2849 swapgeom_close(struct thread *td, struct swdevt *sw)
2850 {
2851 	struct g_consumer *cp;
2852 
2853 	mtx_lock(&sw_dev_mtx);
2854 	cp = sw->sw_id;
2855 	sw->sw_id = NULL;
2856 	mtx_unlock(&sw_dev_mtx);
2857 
2858 	/*
2859 	 * swapgeom_close() may be called from the biodone context,
2860 	 * where we cannot perform topology changes.  Delegate the
2861 	 * work to the events thread.
2862 	 */
2863 	if (cp != NULL)
2864 		g_waitfor_event(swapgeom_close_ev, cp, M_WAITOK, NULL);
2865 }
2866 
2867 static int
2868 swapongeom_locked(struct cdev *dev, struct vnode *vp)
2869 {
2870 	struct g_provider *pp;
2871 	struct g_consumer *cp;
2872 	static struct g_geom *gp;
2873 	struct swdevt *sp;
2874 	u_long nblks;
2875 	int error;
2876 
2877 	pp = g_dev_getprovider(dev);
2878 	if (pp == NULL)
2879 		return (ENODEV);
2880 	mtx_lock(&sw_dev_mtx);
2881 	TAILQ_FOREACH(sp, &swtailq, sw_list) {
2882 		cp = sp->sw_id;
2883 		if (cp != NULL && cp->provider == pp) {
2884 			mtx_unlock(&sw_dev_mtx);
2885 			return (EBUSY);
2886 		}
2887 	}
2888 	mtx_unlock(&sw_dev_mtx);
2889 	if (gp == NULL)
2890 		gp = g_new_geomf(&g_swap_class, "swap");
2891 	cp = g_new_consumer(gp);
2892 	cp->index = 1;	/* Number of active I/Os, plus one for being active. */
2893 	cp->flags |=  G_CF_DIRECT_SEND | G_CF_DIRECT_RECEIVE;
2894 	g_attach(cp, pp);
2895 	/*
2896 	 * XXX: Every time you think you can improve the margin for
2897 	 * footshooting, somebody depends on the ability to do so:
2898 	 * savecore(8) wants to write to our swapdev so we cannot
2899 	 * set an exclusive count :-(
2900 	 */
2901 	error = g_access(cp, 1, 1, 0);
2902 	if (error != 0) {
2903 		g_detach(cp);
2904 		g_destroy_consumer(cp);
2905 		return (error);
2906 	}
2907 	nblks = pp->mediasize / DEV_BSIZE;
2908 	swaponsomething(vp, cp, nblks, swapgeom_strategy,
2909 	    swapgeom_close, dev2udev(dev),
2910 	    (pp->flags & G_PF_ACCEPT_UNMAPPED) != 0 ? SW_UNMAPPED : 0);
2911 	return (0);
2912 }
2913 
2914 static int
2915 swapongeom(struct vnode *vp)
2916 {
2917 	int error;
2918 
2919 	vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
2920 	if (vp->v_type != VCHR || (vp->v_iflag & VI_DOOMED) != 0) {
2921 		error = ENOENT;
2922 	} else {
2923 		g_topology_lock();
2924 		error = swapongeom_locked(vp->v_rdev, vp);
2925 		g_topology_unlock();
2926 	}
2927 	VOP_UNLOCK(vp, 0);
2928 	return (error);
2929 }
2930 
2931 /*
2932  * VNODE backend
2933  *
2934  * This is used mainly for network filesystem (read: probably only tested
2935  * with NFS) swapfiles.
2936  *
2937  */
2938 
2939 static void
2940 swapdev_strategy(struct buf *bp, struct swdevt *sp)
2941 {
2942 	struct vnode *vp2;
2943 
2944 	bp->b_blkno = ctodb(bp->b_blkno - sp->sw_first);
2945 
2946 	vp2 = sp->sw_id;
2947 	vhold(vp2);
2948 	if (bp->b_iocmd == BIO_WRITE) {
2949 		if (bp->b_bufobj)
2950 			bufobj_wdrop(bp->b_bufobj);
2951 		bufobj_wref(&vp2->v_bufobj);
2952 	}
2953 	if (bp->b_bufobj != &vp2->v_bufobj)
2954 		bp->b_bufobj = &vp2->v_bufobj;
2955 	bp->b_vp = vp2;
2956 	bp->b_iooffset = dbtob(bp->b_blkno);
2957 	bstrategy(bp);
2958 	return;
2959 }
2960 
2961 static void
2962 swapdev_close(struct thread *td, struct swdevt *sp)
2963 {
2964 
2965 	VOP_CLOSE(sp->sw_vp, FREAD | FWRITE, td->td_ucred, td);
2966 	vrele(sp->sw_vp);
2967 }
2968 
2969 
2970 static int
2971 swaponvp(struct thread *td, struct vnode *vp, u_long nblks)
2972 {
2973 	struct swdevt *sp;
2974 	int error;
2975 
2976 	if (nblks == 0)
2977 		return (ENXIO);
2978 	mtx_lock(&sw_dev_mtx);
2979 	TAILQ_FOREACH(sp, &swtailq, sw_list) {
2980 		if (sp->sw_id == vp) {
2981 			mtx_unlock(&sw_dev_mtx);
2982 			return (EBUSY);
2983 		}
2984 	}
2985 	mtx_unlock(&sw_dev_mtx);
2986 
2987 	(void) vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
2988 #ifdef MAC
2989 	error = mac_system_check_swapon(td->td_ucred, vp);
2990 	if (error == 0)
2991 #endif
2992 		error = VOP_OPEN(vp, FREAD | FWRITE, td->td_ucred, td, NULL);
2993 	(void) VOP_UNLOCK(vp, 0);
2994 	if (error)
2995 		return (error);
2996 
2997 	swaponsomething(vp, vp, nblks, swapdev_strategy, swapdev_close,
2998 	    NODEV, 0);
2999 	return (0);
3000 }
3001 
3002 static int
3003 sysctl_swap_async_max(SYSCTL_HANDLER_ARGS)
3004 {
3005 	int error, new, n;
3006 
3007 	new = nsw_wcount_async_max;
3008 	error = sysctl_handle_int(oidp, &new, 0, req);
3009 	if (error != 0 || req->newptr == NULL)
3010 		return (error);
3011 
3012 	if (new > nswbuf / 2 || new < 1)
3013 		return (EINVAL);
3014 
3015 	mtx_lock(&swbuf_mtx);
3016 	while (nsw_wcount_async_max != new) {
3017 		/*
3018 		 * Adjust difference.  If the current async count is too low,
3019 		 * we will need to sqeeze our update slowly in.  Sleep with a
3020 		 * higher priority than getpbuf() to finish faster.
3021 		 */
3022 		n = new - nsw_wcount_async_max;
3023 		if (nsw_wcount_async + n >= 0) {
3024 			nsw_wcount_async += n;
3025 			nsw_wcount_async_max += n;
3026 			wakeup(&nsw_wcount_async);
3027 		} else {
3028 			nsw_wcount_async_max -= nsw_wcount_async;
3029 			nsw_wcount_async = 0;
3030 			msleep(&nsw_wcount_async, &swbuf_mtx, PSWP,
3031 			    "swpsysctl", 0);
3032 		}
3033 	}
3034 	mtx_unlock(&swbuf_mtx);
3035 
3036 	return (0);
3037 }
3038 
3039 static void
3040 swap_pager_update_writecount(vm_object_t object, vm_offset_t start,
3041     vm_offset_t end)
3042 {
3043 
3044 	VM_OBJECT_WLOCK(object);
3045 	KASSERT((object->flags & OBJ_ANON) == 0,
3046 	    ("Splittable object with writecount"));
3047 	object->un_pager.swp.writemappings += (vm_ooffset_t)end - start;
3048 	VM_OBJECT_WUNLOCK(object);
3049 }
3050 
3051 static void
3052 swap_pager_release_writecount(vm_object_t object, vm_offset_t start,
3053     vm_offset_t end)
3054 {
3055 
3056 	VM_OBJECT_WLOCK(object);
3057 	KASSERT((object->flags & OBJ_ANON) == 0,
3058 	    ("Splittable object with writecount"));
3059 	object->un_pager.swp.writemappings -= (vm_ooffset_t)end - start;
3060 	VM_OBJECT_WUNLOCK(object);
3061 }
3062