xref: /freebsd/sys/vm/swap_pager.c (revision 61ba55bcf70f2340f9c943c9571113b3fd8eda69)
1 /*-
2  * SPDX-License-Identifier: BSD-4-Clause
3  *
4  * Copyright (c) 1998 Matthew Dillon,
5  * Copyright (c) 1994 John S. Dyson
6  * Copyright (c) 1990 University of Utah.
7  * Copyright (c) 1982, 1986, 1989, 1993
8  *	The Regents of the University of California.  All rights reserved.
9  *
10  * This code is derived from software contributed to Berkeley by
11  * the Systems Programming Group of the University of Utah Computer
12  * Science Department.
13  *
14  * Redistribution and use in source and binary forms, with or without
15  * modification, are permitted provided that the following conditions
16  * are met:
17  * 1. Redistributions of source code must retain the above copyright
18  *    notice, this list of conditions and the following disclaimer.
19  * 2. Redistributions in binary form must reproduce the above copyright
20  *    notice, this list of conditions and the following disclaimer in the
21  *    documentation and/or other materials provided with the distribution.
22  * 3. All advertising materials mentioning features or use of this software
23  *    must display the following acknowledgement:
24  *	This product includes software developed by the University of
25  *	California, Berkeley and its contributors.
26  * 4. Neither the name of the University nor the names of its contributors
27  *    may be used to endorse or promote products derived from this software
28  *    without specific prior written permission.
29  *
30  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
31  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
32  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
33  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
34  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
35  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
36  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
37  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
38  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
39  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
40  * SUCH DAMAGE.
41  *
42  *				New Swap System
43  *				Matthew Dillon
44  *
45  * Radix Bitmap 'blists'.
46  *
47  *	- The new swapper uses the new radix bitmap code.  This should scale
48  *	  to arbitrarily small or arbitrarily large swap spaces and an almost
49  *	  arbitrary degree of fragmentation.
50  *
51  * Features:
52  *
53  *	- on the fly reallocation of swap during putpages.  The new system
54  *	  does not try to keep previously allocated swap blocks for dirty
55  *	  pages.
56  *
57  *	- on the fly deallocation of swap
58  *
59  *	- No more garbage collection required.  Unnecessarily allocated swap
60  *	  blocks only exist for dirty vm_page_t's now and these are already
61  *	  cycled (in a high-load system) by the pager.  We also do on-the-fly
62  *	  removal of invalidated swap blocks when a page is destroyed
63  *	  or renamed.
64  *
65  * from: Utah $Hdr: swap_pager.c 1.4 91/04/30$
66  *
67  *	@(#)swap_pager.c	8.9 (Berkeley) 3/21/94
68  *	@(#)vm_swap.c	8.5 (Berkeley) 2/17/94
69  */
70 
71 #include <sys/cdefs.h>
72 #include "opt_vm.h"
73 
74 #include <sys/param.h>
75 #include <sys/bio.h>
76 #include <sys/blist.h>
77 #include <sys/buf.h>
78 #include <sys/conf.h>
79 #include <sys/disk.h>
80 #include <sys/disklabel.h>
81 #include <sys/eventhandler.h>
82 #include <sys/fcntl.h>
83 #include <sys/limits.h>
84 #include <sys/lock.h>
85 #include <sys/kernel.h>
86 #include <sys/mount.h>
87 #include <sys/namei.h>
88 #include <sys/malloc.h>
89 #include <sys/pctrie.h>
90 #include <sys/priv.h>
91 #include <sys/proc.h>
92 #include <sys/racct.h>
93 #include <sys/resource.h>
94 #include <sys/resourcevar.h>
95 #include <sys/rwlock.h>
96 #include <sys/sbuf.h>
97 #include <sys/sysctl.h>
98 #include <sys/sysproto.h>
99 #include <sys/systm.h>
100 #include <sys/sx.h>
101 #include <sys/unistd.h>
102 #include <sys/user.h>
103 #include <sys/vmmeter.h>
104 #include <sys/vnode.h>
105 
106 #include <security/mac/mac_framework.h>
107 
108 #include <vm/vm.h>
109 #include <vm/pmap.h>
110 #include <vm/vm_map.h>
111 #include <vm/vm_kern.h>
112 #include <vm/vm_object.h>
113 #include <vm/vm_page.h>
114 #include <vm/vm_pager.h>
115 #include <vm/vm_pageout.h>
116 #include <vm/vm_param.h>
117 #include <vm/swap_pager.h>
118 #include <vm/vm_extern.h>
119 #include <vm/uma.h>
120 
121 #include <geom/geom.h>
122 
123 /*
124  * MAX_PAGEOUT_CLUSTER must be a power of 2 between 1 and 64.
125  * The 64-page limit is due to the radix code (kern/subr_blist.c).
126  */
127 #ifndef MAX_PAGEOUT_CLUSTER
128 #define	MAX_PAGEOUT_CLUSTER	32
129 #endif
130 
131 #if !defined(SWB_NPAGES)
132 #define SWB_NPAGES	MAX_PAGEOUT_CLUSTER
133 #endif
134 
135 #define	SWAP_META_PAGES		PCTRIE_COUNT
136 
137 /*
138  * A swblk structure maps each page index within a
139  * SWAP_META_PAGES-aligned and sized range to the address of an
140  * on-disk swap block (or SWAPBLK_NONE). The collection of these
141  * mappings for an entire vm object is implemented as a pc-trie.
142  */
143 struct swblk {
144 	vm_pindex_t	p;
145 	daddr_t		d[SWAP_META_PAGES];
146 };
147 
148 static MALLOC_DEFINE(M_VMPGDATA, "vm_pgdata", "swap pager private data");
149 static struct mtx sw_dev_mtx;
150 static TAILQ_HEAD(, swdevt) swtailq = TAILQ_HEAD_INITIALIZER(swtailq);
151 static struct swdevt *swdevhd;	/* Allocate from here next */
152 static int nswapdev;		/* Number of swap devices */
153 int swap_pager_avail;
154 static struct sx swdev_syscall_lock;	/* serialize swap(on|off) */
155 
156 static __exclusive_cache_line u_long swap_reserved;
157 static u_long swap_total;
158 static int sysctl_page_shift(SYSCTL_HANDLER_ARGS);
159 
160 static SYSCTL_NODE(_vm_stats, OID_AUTO, swap, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
161     "VM swap stats");
162 
163 SYSCTL_PROC(_vm, OID_AUTO, swap_reserved, CTLTYPE_U64 | CTLFLAG_RD | CTLFLAG_MPSAFE,
164     &swap_reserved, 0, sysctl_page_shift, "QU",
165     "Amount of swap storage needed to back all allocated anonymous memory.");
166 SYSCTL_PROC(_vm, OID_AUTO, swap_total, CTLTYPE_U64 | CTLFLAG_RD | CTLFLAG_MPSAFE,
167     &swap_total, 0, sysctl_page_shift, "QU",
168     "Total amount of available swap storage.");
169 
170 int vm_overcommit __read_mostly = 0;
171 SYSCTL_INT(_vm, VM_OVERCOMMIT, overcommit, CTLFLAG_RW, &vm_overcommit, 0,
172     "Configure virtual memory overcommit behavior. See tuning(7) "
173     "for details.");
174 static unsigned long swzone;
175 SYSCTL_ULONG(_vm, OID_AUTO, swzone, CTLFLAG_RD, &swzone, 0,
176     "Actual size of swap metadata zone");
177 static unsigned long swap_maxpages;
178 SYSCTL_ULONG(_vm, OID_AUTO, swap_maxpages, CTLFLAG_RD, &swap_maxpages, 0,
179     "Maximum amount of swap supported");
180 
181 static COUNTER_U64_DEFINE_EARLY(swap_free_deferred);
182 SYSCTL_COUNTER_U64(_vm_stats_swap, OID_AUTO, free_deferred,
183     CTLFLAG_RD, &swap_free_deferred,
184     "Number of pages that deferred freeing swap space");
185 
186 static COUNTER_U64_DEFINE_EARLY(swap_free_completed);
187 SYSCTL_COUNTER_U64(_vm_stats_swap, OID_AUTO, free_completed,
188     CTLFLAG_RD, &swap_free_completed,
189     "Number of deferred frees completed");
190 
191 static int
192 sysctl_page_shift(SYSCTL_HANDLER_ARGS)
193 {
194 	uint64_t newval;
195 	u_long value = *(u_long *)arg1;
196 
197 	newval = ((uint64_t)value) << PAGE_SHIFT;
198 	return (sysctl_handle_64(oidp, &newval, 0, req));
199 }
200 
201 static bool
202 swap_reserve_by_cred_rlimit(u_long pincr, struct ucred *cred, int oc)
203 {
204 	struct uidinfo *uip;
205 	u_long prev;
206 
207 	uip = cred->cr_ruidinfo;
208 
209 	prev = atomic_fetchadd_long(&uip->ui_vmsize, pincr);
210 	if ((oc & SWAP_RESERVE_RLIMIT_ON) != 0 &&
211 	    prev + pincr > lim_cur(curthread, RLIMIT_SWAP) &&
212 	    priv_check(curthread, PRIV_VM_SWAP_NORLIMIT) != 0) {
213 		prev = atomic_fetchadd_long(&uip->ui_vmsize, -pincr);
214 		KASSERT(prev >= pincr,
215 		    ("negative vmsize for uid %d\n", uip->ui_uid));
216 		return (false);
217 	}
218 	return (true);
219 }
220 
221 static void
222 swap_release_by_cred_rlimit(u_long pdecr, struct ucred *cred)
223 {
224 	struct uidinfo *uip;
225 #ifdef INVARIANTS
226 	u_long prev;
227 #endif
228 
229 	uip = cred->cr_ruidinfo;
230 
231 #ifdef INVARIANTS
232 	prev = atomic_fetchadd_long(&uip->ui_vmsize, -pdecr);
233 	KASSERT(prev >= pdecr,
234 	    ("negative vmsize for uid %d\n", uip->ui_uid));
235 #else
236 	atomic_subtract_long(&uip->ui_vmsize, pdecr);
237 #endif
238 }
239 
240 static void
241 swap_reserve_force_rlimit(u_long pincr, struct ucred *cred)
242 {
243 	struct uidinfo *uip;
244 
245 	uip = cred->cr_ruidinfo;
246 	atomic_add_long(&uip->ui_vmsize, pincr);
247 }
248 
249 bool
250 swap_reserve(vm_ooffset_t incr)
251 {
252 
253 	return (swap_reserve_by_cred(incr, curthread->td_ucred));
254 }
255 
256 bool
257 swap_reserve_by_cred(vm_ooffset_t incr, struct ucred *cred)
258 {
259 	u_long r, s, prev, pincr;
260 #ifdef RACCT
261 	int error;
262 #endif
263 	int oc;
264 	static int curfail;
265 	static struct timeval lastfail;
266 
267 	KASSERT((incr & PAGE_MASK) == 0, ("%s: incr: %ju & PAGE_MASK",
268 	    __func__, (uintmax_t)incr));
269 
270 #ifdef RACCT
271 	if (RACCT_ENABLED()) {
272 		PROC_LOCK(curproc);
273 		error = racct_add(curproc, RACCT_SWAP, incr);
274 		PROC_UNLOCK(curproc);
275 		if (error != 0)
276 			return (false);
277 	}
278 #endif
279 
280 	pincr = atop(incr);
281 	prev = atomic_fetchadd_long(&swap_reserved, pincr);
282 	r = prev + pincr;
283 	s = swap_total;
284 	oc = atomic_load_int(&vm_overcommit);
285 	if (r > s && (oc & SWAP_RESERVE_ALLOW_NONWIRED) != 0) {
286 		s += vm_cnt.v_page_count - vm_cnt.v_free_reserved -
287 		    vm_wire_count();
288 	}
289 	if ((oc & SWAP_RESERVE_FORCE_ON) != 0 && r > s &&
290 	    priv_check(curthread, PRIV_VM_SWAP_NOQUOTA) != 0) {
291 		prev = atomic_fetchadd_long(&swap_reserved, -pincr);
292 		KASSERT(prev >= pincr,
293 		    ("swap_reserved < incr on overcommit fail"));
294 		goto out_error;
295 	}
296 
297 	if (!swap_reserve_by_cred_rlimit(pincr, cred, oc)) {
298 		prev = atomic_fetchadd_long(&swap_reserved, -pincr);
299 		KASSERT(prev >= pincr,
300 		    ("swap_reserved < incr on overcommit fail"));
301 		goto out_error;
302 	}
303 
304 	return (true);
305 
306 out_error:
307 	if (ppsratecheck(&lastfail, &curfail, 1)) {
308 		printf("uid %d, pid %d: swap reservation "
309 		    "for %jd bytes failed\n",
310 		    cred->cr_ruidinfo->ui_uid, curproc->p_pid, incr);
311 	}
312 #ifdef RACCT
313 	if (RACCT_ENABLED()) {
314 		PROC_LOCK(curproc);
315 		racct_sub(curproc, RACCT_SWAP, incr);
316 		PROC_UNLOCK(curproc);
317 	}
318 #endif
319 
320 	return (false);
321 }
322 
323 void
324 swap_reserve_force(vm_ooffset_t incr)
325 {
326 	u_long pincr;
327 
328 	KASSERT((incr & PAGE_MASK) == 0, ("%s: incr: %ju & PAGE_MASK",
329 	    __func__, (uintmax_t)incr));
330 
331 #ifdef RACCT
332 	if (RACCT_ENABLED()) {
333 		PROC_LOCK(curproc);
334 		racct_add_force(curproc, RACCT_SWAP, incr);
335 		PROC_UNLOCK(curproc);
336 	}
337 #endif
338 	pincr = atop(incr);
339 	atomic_add_long(&swap_reserved, pincr);
340 	swap_reserve_force_rlimit(pincr, curthread->td_ucred);
341 }
342 
343 void
344 swap_release(vm_ooffset_t decr)
345 {
346 	struct ucred *cred;
347 
348 	PROC_LOCK(curproc);
349 	cred = curproc->p_ucred;
350 	swap_release_by_cred(decr, cred);
351 	PROC_UNLOCK(curproc);
352 }
353 
354 void
355 swap_release_by_cred(vm_ooffset_t decr, struct ucred *cred)
356 {
357 	u_long pdecr;
358 #ifdef INVARIANTS
359 	u_long prev;
360 #endif
361 
362 	KASSERT((decr & PAGE_MASK) == 0, ("%s: decr: %ju & PAGE_MASK",
363 	    __func__, (uintmax_t)decr));
364 
365 	pdecr = atop(decr);
366 #ifdef INVARIANTS
367 	prev = atomic_fetchadd_long(&swap_reserved, -pdecr);
368 	KASSERT(prev >= pdecr, ("swap_reserved < decr"));
369 #else
370 	atomic_subtract_long(&swap_reserved, pdecr);
371 #endif
372 
373 	swap_release_by_cred_rlimit(pdecr, cred);
374 #ifdef RACCT
375 	if (racct_enable)
376 		racct_sub_cred(cred, RACCT_SWAP, decr);
377 #endif
378 }
379 
380 static int swap_pager_full = 2;	/* swap space exhaustion (task killing) */
381 static int swap_pager_almost_full = 1; /* swap space exhaustion (w/hysteresis)*/
382 static struct mtx swbuf_mtx;	/* to sync nsw_wcount_async */
383 static int nsw_wcount_async;	/* limit async write buffers */
384 static int nsw_wcount_async_max;/* assigned maximum			*/
385 int nsw_cluster_max; 		/* maximum VOP I/O allowed		*/
386 
387 static int sysctl_swap_async_max(SYSCTL_HANDLER_ARGS);
388 SYSCTL_PROC(_vm, OID_AUTO, swap_async_max, CTLTYPE_INT | CTLFLAG_RW |
389     CTLFLAG_MPSAFE, NULL, 0, sysctl_swap_async_max, "I",
390     "Maximum running async swap ops");
391 static int sysctl_swap_fragmentation(SYSCTL_HANDLER_ARGS);
392 SYSCTL_PROC(_vm, OID_AUTO, swap_fragmentation, CTLTYPE_STRING | CTLFLAG_RD |
393     CTLFLAG_MPSAFE, NULL, 0, sysctl_swap_fragmentation, "A",
394     "Swap Fragmentation Info");
395 
396 static struct sx sw_alloc_sx;
397 
398 /*
399  * "named" and "unnamed" anon region objects.  Try to reduce the overhead
400  * of searching a named list by hashing it just a little.
401  */
402 
403 #define NOBJLISTS		8
404 
405 #define NOBJLIST(handle)	\
406 	(&swap_pager_object_list[((int)(intptr_t)handle >> 4) & (NOBJLISTS-1)])
407 
408 static struct pagerlst	swap_pager_object_list[NOBJLISTS];
409 static uma_zone_t swwbuf_zone;
410 static uma_zone_t swrbuf_zone;
411 static uma_zone_t swblk_zone;
412 static uma_zone_t swpctrie_zone;
413 
414 /*
415  * pagerops for OBJT_SWAP - "swap pager".  Some ops are also global procedure
416  * calls hooked from other parts of the VM system and do not appear here.
417  * (see vm/swap_pager.h).
418  */
419 static vm_object_t
420 		swap_pager_alloc(void *handle, vm_ooffset_t size,
421 		    vm_prot_t prot, vm_ooffset_t offset, struct ucred *);
422 static void	swap_pager_dealloc(vm_object_t object);
423 static int	swap_pager_getpages(vm_object_t, vm_page_t *, int, int *,
424     int *);
425 static int	swap_pager_getpages_async(vm_object_t, vm_page_t *, int, int *,
426     int *, pgo_getpages_iodone_t, void *);
427 static void	swap_pager_putpages(vm_object_t, vm_page_t *, int, int, int *);
428 static boolean_t
429 		swap_pager_haspage(vm_object_t object, vm_pindex_t pindex, int *before, int *after);
430 static void	swap_pager_init(void);
431 static void	swap_pager_unswapped(vm_page_t);
432 static void	swap_pager_swapoff(struct swdevt *sp);
433 static void	swap_pager_update_writecount(vm_object_t object,
434     vm_offset_t start, vm_offset_t end);
435 static void	swap_pager_release_writecount(vm_object_t object,
436     vm_offset_t start, vm_offset_t end);
437 static void	swap_pager_freespace_pgo(vm_object_t object, vm_pindex_t start,
438     vm_size_t size);
439 
440 const struct pagerops swappagerops = {
441 	.pgo_kvme_type = KVME_TYPE_SWAP,
442 	.pgo_init =	swap_pager_init,	/* early system initialization of pager	*/
443 	.pgo_alloc =	swap_pager_alloc,	/* allocate an OBJT_SWAP object */
444 	.pgo_dealloc =	swap_pager_dealloc,	/* deallocate an OBJT_SWAP object */
445 	.pgo_getpages =	swap_pager_getpages,	/* pagein */
446 	.pgo_getpages_async = swap_pager_getpages_async, /* pagein (async) */
447 	.pgo_putpages =	swap_pager_putpages,	/* pageout */
448 	.pgo_haspage =	swap_pager_haspage,	/* get backing store status for page */
449 	.pgo_pageunswapped = swap_pager_unswapped, /* remove swap related to page */
450 	.pgo_update_writecount = swap_pager_update_writecount,
451 	.pgo_release_writecount = swap_pager_release_writecount,
452 	.pgo_freespace = swap_pager_freespace_pgo,
453 };
454 
455 /*
456  * swap_*() routines are externally accessible.  swp_*() routines are
457  * internal.
458  */
459 static int nswap_lowat = 128;	/* in pages, swap_pager_almost_full warn */
460 static int nswap_hiwat = 512;	/* in pages, swap_pager_almost_full warn */
461 
462 SYSCTL_INT(_vm, OID_AUTO, dmmax, CTLFLAG_RD, &nsw_cluster_max, 0,
463     "Maximum size of a swap block in pages");
464 
465 static void	swp_sizecheck(void);
466 static void	swp_pager_async_iodone(struct buf *bp);
467 static bool	swp_pager_swblk_empty(struct swblk *sb, int start, int limit);
468 static void	swp_pager_free_empty_swblk(vm_object_t, struct swblk *sb);
469 static int	swapongeom(struct vnode *);
470 static int	swaponvp(struct thread *, struct vnode *, u_long);
471 static int	swapoff_one(struct swdevt *sp, struct ucred *cred,
472 		    u_int flags);
473 
474 /*
475  * Swap bitmap functions
476  */
477 static void	swp_pager_freeswapspace(daddr_t blk, daddr_t npages);
478 static daddr_t	swp_pager_getswapspace(int *npages);
479 
480 /*
481  * Metadata functions
482  */
483 static daddr_t swp_pager_meta_build(vm_object_t, vm_pindex_t, daddr_t);
484 static void swp_pager_meta_free(vm_object_t, vm_pindex_t, vm_pindex_t,
485     vm_size_t *);
486 static void swp_pager_meta_transfer(vm_object_t src, vm_object_t dst,
487     vm_pindex_t pindex, vm_pindex_t count, vm_size_t *freed);
488 static void swp_pager_meta_free_all(vm_object_t);
489 static daddr_t swp_pager_meta_lookup(vm_object_t, vm_pindex_t);
490 
491 static void
492 swp_pager_init_freerange(daddr_t *start, daddr_t *num)
493 {
494 
495 	*start = SWAPBLK_NONE;
496 	*num = 0;
497 }
498 
499 static void
500 swp_pager_update_freerange(daddr_t *start, daddr_t *num, daddr_t addr)
501 {
502 
503 	if (*start + *num == addr) {
504 		(*num)++;
505 	} else {
506 		swp_pager_freeswapspace(*start, *num);
507 		*start = addr;
508 		*num = 1;
509 	}
510 }
511 
512 static void *
513 swblk_trie_alloc(struct pctrie *ptree)
514 {
515 
516 	return (uma_zalloc(swpctrie_zone, M_NOWAIT | (curproc == pageproc ?
517 	    M_USE_RESERVE : 0)));
518 }
519 
520 static void
521 swblk_trie_free(struct pctrie *ptree, void *node)
522 {
523 
524 	uma_zfree(swpctrie_zone, node);
525 }
526 
527 PCTRIE_DEFINE(SWAP, swblk, p, swblk_trie_alloc, swblk_trie_free);
528 
529 /*
530  * SWP_SIZECHECK() -	update swap_pager_full indication
531  *
532  *	update the swap_pager_almost_full indication and warn when we are
533  *	about to run out of swap space, using lowat/hiwat hysteresis.
534  *
535  *	Clear swap_pager_full ( task killing ) indication when lowat is met.
536  *
537  *	No restrictions on call
538  *	This routine may not block.
539  */
540 static void
541 swp_sizecheck(void)
542 {
543 
544 	if (swap_pager_avail < nswap_lowat) {
545 		if (swap_pager_almost_full == 0) {
546 			printf("swap_pager: out of swap space\n");
547 			swap_pager_almost_full = 1;
548 		}
549 	} else {
550 		swap_pager_full = 0;
551 		if (swap_pager_avail > nswap_hiwat)
552 			swap_pager_almost_full = 0;
553 	}
554 }
555 
556 /*
557  * SWAP_PAGER_INIT() -	initialize the swap pager!
558  *
559  *	Expected to be started from system init.  NOTE:  This code is run
560  *	before much else so be careful what you depend on.  Most of the VM
561  *	system has yet to be initialized at this point.
562  */
563 static void
564 swap_pager_init(void)
565 {
566 	/*
567 	 * Initialize object lists
568 	 */
569 	int i;
570 
571 	for (i = 0; i < NOBJLISTS; ++i)
572 		TAILQ_INIT(&swap_pager_object_list[i]);
573 	mtx_init(&sw_dev_mtx, "swapdev", NULL, MTX_DEF);
574 	sx_init(&sw_alloc_sx, "swspsx");
575 	sx_init(&swdev_syscall_lock, "swsysc");
576 
577 	/*
578 	 * The nsw_cluster_max is constrained by the bp->b_pages[]
579 	 * array, which has maxphys / PAGE_SIZE entries, and our locally
580 	 * defined MAX_PAGEOUT_CLUSTER.   Also be aware that swap ops are
581 	 * constrained by the swap device interleave stripe size.
582 	 *
583 	 * Initialized early so that GEOM_ELI can see it.
584 	 */
585 	nsw_cluster_max = min(maxphys / PAGE_SIZE, MAX_PAGEOUT_CLUSTER);
586 }
587 
588 /*
589  * SWAP_PAGER_SWAP_INIT() - swap pager initialization from pageout process
590  *
591  *	Expected to be started from pageout process once, prior to entering
592  *	its main loop.
593  */
594 void
595 swap_pager_swap_init(void)
596 {
597 	unsigned long n, n2;
598 
599 	/*
600 	 * Number of in-transit swap bp operations.  Don't
601 	 * exhaust the pbufs completely.  Make sure we
602 	 * initialize workable values (0 will work for hysteresis
603 	 * but it isn't very efficient).
604 	 *
605 	 * Currently we hardwire nsw_wcount_async to 4.  This limit is
606 	 * designed to prevent other I/O from having high latencies due to
607 	 * our pageout I/O.  The value 4 works well for one or two active swap
608 	 * devices but is probably a little low if you have more.  Even so,
609 	 * a higher value would probably generate only a limited improvement
610 	 * with three or four active swap devices since the system does not
611 	 * typically have to pageout at extreme bandwidths.   We will want
612 	 * at least 2 per swap devices, and 4 is a pretty good value if you
613 	 * have one NFS swap device due to the command/ack latency over NFS.
614 	 * So it all works out pretty well.
615 	 *
616 	 * nsw_cluster_max is initialized in swap_pager_init().
617 	 */
618 
619 	nsw_wcount_async = 4;
620 	nsw_wcount_async_max = nsw_wcount_async;
621 	mtx_init(&swbuf_mtx, "async swbuf mutex", NULL, MTX_DEF);
622 
623 	swwbuf_zone = pbuf_zsecond_create("swwbuf", nswbuf / 4);
624 	swrbuf_zone = pbuf_zsecond_create("swrbuf", nswbuf / 2);
625 
626 	/*
627 	 * Initialize our zone, taking the user's requested size or
628 	 * estimating the number we need based on the number of pages
629 	 * in the system.
630 	 */
631 	n = maxswzone != 0 ? maxswzone / sizeof(struct swblk) :
632 	    vm_cnt.v_page_count / 2;
633 	swpctrie_zone = uma_zcreate("swpctrie", pctrie_node_size(), NULL, NULL,
634 	    pctrie_zone_init, NULL, UMA_ALIGN_PTR, 0);
635 	swblk_zone = uma_zcreate("swblk", sizeof(struct swblk), NULL, NULL,
636 	    NULL, NULL, _Alignof(struct swblk) - 1, 0);
637 	n2 = n;
638 	do {
639 		if (uma_zone_reserve_kva(swblk_zone, n))
640 			break;
641 		/*
642 		 * if the allocation failed, try a zone two thirds the
643 		 * size of the previous attempt.
644 		 */
645 		n -= ((n + 2) / 3);
646 	} while (n > 0);
647 
648 	/*
649 	 * Often uma_zone_reserve_kva() cannot reserve exactly the
650 	 * requested size.  Account for the difference when
651 	 * calculating swap_maxpages.
652 	 */
653 	n = uma_zone_get_max(swblk_zone);
654 
655 	if (n < n2)
656 		printf("Swap blk zone entries changed from %lu to %lu.\n",
657 		    n2, n);
658 	/* absolute maximum we can handle assuming 100% efficiency */
659 	swap_maxpages = n * SWAP_META_PAGES;
660 	swzone = n * sizeof(struct swblk);
661 	if (!uma_zone_reserve_kva(swpctrie_zone, n))
662 		printf("Cannot reserve swap pctrie zone, "
663 		    "reduce kern.maxswzone.\n");
664 }
665 
666 bool
667 swap_pager_init_object(vm_object_t object, void *handle, struct ucred *cred,
668     vm_ooffset_t size, vm_ooffset_t offset)
669 {
670 	if (cred != NULL) {
671 		if (!swap_reserve_by_cred(size, cred))
672 			return (false);
673 		crhold(cred);
674 	}
675 
676 	object->un_pager.swp.writemappings = 0;
677 	object->handle = handle;
678 	if (cred != NULL) {
679 		object->cred = cred;
680 		object->charge = size;
681 	}
682 	return (true);
683 }
684 
685 static vm_object_t
686 swap_pager_alloc_init(objtype_t otype, void *handle, struct ucred *cred,
687     vm_ooffset_t size, vm_ooffset_t offset)
688 {
689 	vm_object_t object;
690 
691 	/*
692 	 * The un_pager.swp.swp_blks trie is initialized by
693 	 * vm_object_allocate() to ensure the correct order of
694 	 * visibility to other threads.
695 	 */
696 	object = vm_object_allocate(otype, OFF_TO_IDX(offset +
697 	    PAGE_MASK + size));
698 
699 	if (!swap_pager_init_object(object, handle, cred, size, offset)) {
700 		vm_object_deallocate(object);
701 		return (NULL);
702 	}
703 	return (object);
704 }
705 
706 /*
707  * SWAP_PAGER_ALLOC() -	allocate a new OBJT_SWAP VM object and instantiate
708  *			its metadata structures.
709  *
710  *	This routine is called from the mmap and fork code to create a new
711  *	OBJT_SWAP object.
712  *
713  *	This routine must ensure that no live duplicate is created for
714  *	the named object request, which is protected against by
715  *	holding the sw_alloc_sx lock in case handle != NULL.
716  */
717 static vm_object_t
718 swap_pager_alloc(void *handle, vm_ooffset_t size, vm_prot_t prot,
719     vm_ooffset_t offset, struct ucred *cred)
720 {
721 	vm_object_t object;
722 
723 	if (handle != NULL) {
724 		/*
725 		 * Reference existing named region or allocate new one.  There
726 		 * should not be a race here against swp_pager_meta_build()
727 		 * as called from vm_page_remove() in regards to the lookup
728 		 * of the handle.
729 		 */
730 		sx_xlock(&sw_alloc_sx);
731 		object = vm_pager_object_lookup(NOBJLIST(handle), handle);
732 		if (object == NULL) {
733 			object = swap_pager_alloc_init(OBJT_SWAP, handle, cred,
734 			    size, offset);
735 			if (object != NULL) {
736 				TAILQ_INSERT_TAIL(NOBJLIST(object->handle),
737 				    object, pager_object_list);
738 			}
739 		}
740 		sx_xunlock(&sw_alloc_sx);
741 	} else {
742 		object = swap_pager_alloc_init(OBJT_SWAP, handle, cred,
743 		    size, offset);
744 	}
745 	return (object);
746 }
747 
748 /*
749  * SWAP_PAGER_DEALLOC() -	remove swap metadata from object
750  *
751  *	The swap backing for the object is destroyed.  The code is
752  *	designed such that we can reinstantiate it later, but this
753  *	routine is typically called only when the entire object is
754  *	about to be destroyed.
755  *
756  *	The object must be locked.
757  */
758 static void
759 swap_pager_dealloc(vm_object_t object)
760 {
761 
762 	VM_OBJECT_ASSERT_WLOCKED(object);
763 	KASSERT((object->flags & OBJ_DEAD) != 0, ("dealloc of reachable obj"));
764 
765 	/*
766 	 * Remove from list right away so lookups will fail if we block for
767 	 * pageout completion.
768 	 */
769 	if ((object->flags & OBJ_ANON) == 0 && object->handle != NULL) {
770 		VM_OBJECT_WUNLOCK(object);
771 		sx_xlock(&sw_alloc_sx);
772 		TAILQ_REMOVE(NOBJLIST(object->handle), object,
773 		    pager_object_list);
774 		sx_xunlock(&sw_alloc_sx);
775 		VM_OBJECT_WLOCK(object);
776 	}
777 
778 	vm_object_pip_wait(object, "swpdea");
779 
780 	/*
781 	 * Free all remaining metadata.  We only bother to free it from
782 	 * the swap meta data.  We do not attempt to free swapblk's still
783 	 * associated with vm_page_t's for this object.  We do not care
784 	 * if paging is still in progress on some objects.
785 	 */
786 	swp_pager_meta_free_all(object);
787 	object->handle = NULL;
788 	object->type = OBJT_DEAD;
789 
790 	/*
791 	 * Release the allocation charge.
792 	 */
793 	if (object->cred != NULL) {
794 		swap_release_by_cred(object->charge, object->cred);
795 		object->charge = 0;
796 		crfree(object->cred);
797 		object->cred = NULL;
798 	}
799 
800 	/*
801 	 * Hide the object from swap_pager_swapoff().
802 	 */
803 	vm_object_clear_flag(object, OBJ_SWAP);
804 }
805 
806 /************************************************************************
807  *			SWAP PAGER BITMAP ROUTINES			*
808  ************************************************************************/
809 
810 /*
811  * SWP_PAGER_GETSWAPSPACE() -	allocate raw swap space
812  *
813  *	Allocate swap for up to the requested number of pages.  The
814  *	starting swap block number (a page index) is returned or
815  *	SWAPBLK_NONE if the allocation failed.
816  *
817  *	Also has the side effect of advising that somebody made a mistake
818  *	when they configured swap and didn't configure enough.
819  *
820  *	This routine may not sleep.
821  *
822  *	We allocate in round-robin fashion from the configured devices.
823  */
824 static daddr_t
825 swp_pager_getswapspace(int *io_npages)
826 {
827 	daddr_t blk;
828 	struct swdevt *sp;
829 	int mpages, npages;
830 
831 	KASSERT(*io_npages >= 1,
832 	    ("%s: npages not positive", __func__));
833 	blk = SWAPBLK_NONE;
834 	mpages = *io_npages;
835 	npages = imin(BLIST_MAX_ALLOC, mpages);
836 	mtx_lock(&sw_dev_mtx);
837 	sp = swdevhd;
838 	while (!TAILQ_EMPTY(&swtailq)) {
839 		if (sp == NULL)
840 			sp = TAILQ_FIRST(&swtailq);
841 		if ((sp->sw_flags & SW_CLOSING) == 0)
842 			blk = blist_alloc(sp->sw_blist, &npages, mpages);
843 		if (blk != SWAPBLK_NONE)
844 			break;
845 		sp = TAILQ_NEXT(sp, sw_list);
846 		if (swdevhd == sp) {
847 			if (npages == 1)
848 				break;
849 			mpages = npages - 1;
850 			npages >>= 1;
851 		}
852 	}
853 	if (blk != SWAPBLK_NONE) {
854 		*io_npages = npages;
855 		blk += sp->sw_first;
856 		sp->sw_used += npages;
857 		swap_pager_avail -= npages;
858 		swp_sizecheck();
859 		swdevhd = TAILQ_NEXT(sp, sw_list);
860 	} else {
861 		if (swap_pager_full != 2) {
862 			printf("swp_pager_getswapspace(%d): failed\n",
863 			    *io_npages);
864 			swap_pager_full = 2;
865 			swap_pager_almost_full = 1;
866 		}
867 		swdevhd = NULL;
868 	}
869 	mtx_unlock(&sw_dev_mtx);
870 	return (blk);
871 }
872 
873 static bool
874 swp_pager_isondev(daddr_t blk, struct swdevt *sp)
875 {
876 
877 	return (blk >= sp->sw_first && blk < sp->sw_end);
878 }
879 
880 static void
881 swp_pager_strategy(struct buf *bp)
882 {
883 	struct swdevt *sp;
884 
885 	mtx_lock(&sw_dev_mtx);
886 	TAILQ_FOREACH(sp, &swtailq, sw_list) {
887 		if (swp_pager_isondev(bp->b_blkno, sp)) {
888 			mtx_unlock(&sw_dev_mtx);
889 			if ((sp->sw_flags & SW_UNMAPPED) != 0 &&
890 			    unmapped_buf_allowed) {
891 				bp->b_data = unmapped_buf;
892 				bp->b_offset = 0;
893 			} else {
894 				pmap_qenter((vm_offset_t)bp->b_data,
895 				    &bp->b_pages[0], bp->b_bcount / PAGE_SIZE);
896 			}
897 			sp->sw_strategy(bp, sp);
898 			return;
899 		}
900 	}
901 	panic("Swapdev not found");
902 }
903 
904 /*
905  * SWP_PAGER_FREESWAPSPACE() -	free raw swap space
906  *
907  *	This routine returns the specified swap blocks back to the bitmap.
908  *
909  *	This routine may not sleep.
910  */
911 static void
912 swp_pager_freeswapspace(daddr_t blk, daddr_t npages)
913 {
914 	struct swdevt *sp;
915 
916 	if (npages == 0)
917 		return;
918 	mtx_lock(&sw_dev_mtx);
919 	TAILQ_FOREACH(sp, &swtailq, sw_list) {
920 		if (swp_pager_isondev(blk, sp)) {
921 			sp->sw_used -= npages;
922 			/*
923 			 * If we are attempting to stop swapping on
924 			 * this device, we don't want to mark any
925 			 * blocks free lest they be reused.
926 			 */
927 			if ((sp->sw_flags & SW_CLOSING) == 0) {
928 				blist_free(sp->sw_blist, blk - sp->sw_first,
929 				    npages);
930 				swap_pager_avail += npages;
931 				swp_sizecheck();
932 			}
933 			mtx_unlock(&sw_dev_mtx);
934 			return;
935 		}
936 	}
937 	panic("Swapdev not found");
938 }
939 
940 /*
941  * SYSCTL_SWAP_FRAGMENTATION() -	produce raw swap space stats
942  */
943 static int
944 sysctl_swap_fragmentation(SYSCTL_HANDLER_ARGS)
945 {
946 	struct sbuf sbuf;
947 	struct swdevt *sp;
948 	const char *devname;
949 	int error;
950 
951 	error = sysctl_wire_old_buffer(req, 0);
952 	if (error != 0)
953 		return (error);
954 	sbuf_new_for_sysctl(&sbuf, NULL, 128, req);
955 	mtx_lock(&sw_dev_mtx);
956 	TAILQ_FOREACH(sp, &swtailq, sw_list) {
957 		if (vn_isdisk(sp->sw_vp))
958 			devname = devtoname(sp->sw_vp->v_rdev);
959 		else
960 			devname = "[file]";
961 		sbuf_printf(&sbuf, "\nFree space on device %s:\n", devname);
962 		blist_stats(sp->sw_blist, &sbuf);
963 	}
964 	mtx_unlock(&sw_dev_mtx);
965 	error = sbuf_finish(&sbuf);
966 	sbuf_delete(&sbuf);
967 	return (error);
968 }
969 
970 /*
971  * SWAP_PAGER_FREESPACE() -	frees swap blocks associated with a page
972  *				range within an object.
973  *
974  *	This routine removes swapblk assignments from swap metadata.
975  *
976  *	The external callers of this routine typically have already destroyed
977  *	or renamed vm_page_t's associated with this range in the object so
978  *	we should be ok.
979  *
980  *	The object must be locked.
981  */
982 void
983 swap_pager_freespace(vm_object_t object, vm_pindex_t start, vm_size_t size,
984     vm_size_t *freed)
985 {
986 	MPASS((object->flags & OBJ_SWAP) != 0);
987 
988 	swp_pager_meta_free(object, start, size, freed);
989 }
990 
991 static void
992 swap_pager_freespace_pgo(vm_object_t object, vm_pindex_t start, vm_size_t size)
993 {
994 	MPASS((object->flags & OBJ_SWAP) != 0);
995 
996 	swp_pager_meta_free(object, start, size, NULL);
997 }
998 
999 /*
1000  * SWAP_PAGER_RESERVE() - reserve swap blocks in object
1001  *
1002  *	Assigns swap blocks to the specified range within the object.  The
1003  *	swap blocks are not zeroed.  Any previous swap assignment is destroyed.
1004  *
1005  *	Returns 0 on success, -1 on failure.
1006  */
1007 int
1008 swap_pager_reserve(vm_object_t object, vm_pindex_t start, vm_pindex_t size)
1009 {
1010 	daddr_t addr, blk, n_free, s_free;
1011 	vm_pindex_t i, j;
1012 	int n;
1013 
1014 	swp_pager_init_freerange(&s_free, &n_free);
1015 	VM_OBJECT_WLOCK(object);
1016 	for (i = 0; i < size; i += n) {
1017 		n = MIN(size - i, INT_MAX);
1018 		blk = swp_pager_getswapspace(&n);
1019 		if (blk == SWAPBLK_NONE) {
1020 			swp_pager_meta_free(object, start, i, NULL);
1021 			VM_OBJECT_WUNLOCK(object);
1022 			return (-1);
1023 		}
1024 		for (j = 0; j < n; ++j) {
1025 			addr = swp_pager_meta_build(object,
1026 			    start + i + j, blk + j);
1027 			if (addr != SWAPBLK_NONE)
1028 				swp_pager_update_freerange(&s_free, &n_free,
1029 				    addr);
1030 		}
1031 	}
1032 	swp_pager_freeswapspace(s_free, n_free);
1033 	VM_OBJECT_WUNLOCK(object);
1034 	return (0);
1035 }
1036 
1037 static bool
1038 swp_pager_xfer_source(vm_object_t srcobject, vm_object_t dstobject,
1039     vm_pindex_t pindex, daddr_t addr)
1040 {
1041 	daddr_t dstaddr __diagused;
1042 
1043 	KASSERT((srcobject->flags & OBJ_SWAP) != 0,
1044 	    ("%s: srcobject not swappable", __func__));
1045 	KASSERT((dstobject->flags & OBJ_SWAP) != 0,
1046 	    ("%s: dstobject not swappable", __func__));
1047 
1048 	if (swp_pager_meta_lookup(dstobject, pindex) != SWAPBLK_NONE) {
1049 		/* Caller should destroy the source block. */
1050 		return (false);
1051 	}
1052 
1053 	/*
1054 	 * Destination has no swapblk and is not resident, transfer source.
1055 	 * swp_pager_meta_build() can sleep.
1056 	 */
1057 	VM_OBJECT_WUNLOCK(srcobject);
1058 	dstaddr = swp_pager_meta_build(dstobject, pindex, addr);
1059 	KASSERT(dstaddr == SWAPBLK_NONE,
1060 	    ("Unexpected destination swapblk"));
1061 	VM_OBJECT_WLOCK(srcobject);
1062 
1063 	return (true);
1064 }
1065 
1066 /*
1067  * SWAP_PAGER_COPY() -  copy blocks from source pager to destination pager
1068  *			and destroy the source.
1069  *
1070  *	Copy any valid swapblks from the source to the destination.  In
1071  *	cases where both the source and destination have a valid swapblk,
1072  *	we keep the destination's.
1073  *
1074  *	This routine is allowed to sleep.  It may sleep allocating metadata
1075  *	indirectly through swp_pager_meta_build().
1076  *
1077  *	The source object contains no vm_page_t's (which is just as well)
1078  *
1079  *	The source and destination objects must be locked.
1080  *	Both object locks may temporarily be released.
1081  */
1082 void
1083 swap_pager_copy(vm_object_t srcobject, vm_object_t dstobject,
1084     vm_pindex_t offset, int destroysource)
1085 {
1086 	VM_OBJECT_ASSERT_WLOCKED(srcobject);
1087 	VM_OBJECT_ASSERT_WLOCKED(dstobject);
1088 
1089 	/*
1090 	 * If destroysource is set, we remove the source object from the
1091 	 * swap_pager internal queue now.
1092 	 */
1093 	if (destroysource && (srcobject->flags & OBJ_ANON) == 0 &&
1094 	    srcobject->handle != NULL) {
1095 		VM_OBJECT_WUNLOCK(srcobject);
1096 		VM_OBJECT_WUNLOCK(dstobject);
1097 		sx_xlock(&sw_alloc_sx);
1098 		TAILQ_REMOVE(NOBJLIST(srcobject->handle), srcobject,
1099 		    pager_object_list);
1100 		sx_xunlock(&sw_alloc_sx);
1101 		VM_OBJECT_WLOCK(dstobject);
1102 		VM_OBJECT_WLOCK(srcobject);
1103 	}
1104 
1105 	/*
1106 	 * Transfer source to destination.
1107 	 */
1108 	swp_pager_meta_transfer(srcobject, dstobject, offset, dstobject->size,
1109 	    NULL);
1110 
1111 	/*
1112 	 * Free left over swap blocks in source.
1113 	 */
1114 	if (destroysource)
1115 		swp_pager_meta_free_all(srcobject);
1116 }
1117 
1118 /*
1119  * SWAP_PAGER_HASPAGE() -	determine if we have good backing store for
1120  *				the requested page.
1121  *
1122  *	We determine whether good backing store exists for the requested
1123  *	page and return TRUE if it does, FALSE if it doesn't.
1124  *
1125  *	If TRUE, we also try to determine how much valid, contiguous backing
1126  *	store exists before and after the requested page.
1127  */
1128 static boolean_t
1129 swap_pager_haspage(vm_object_t object, vm_pindex_t pindex, int *before,
1130     int *after)
1131 {
1132 	daddr_t blk, blk0;
1133 	int i;
1134 
1135 	VM_OBJECT_ASSERT_LOCKED(object);
1136 	KASSERT((object->flags & OBJ_SWAP) != 0,
1137 	    ("%s: object not swappable", __func__));
1138 
1139 	/*
1140 	 * do we have good backing store at the requested index ?
1141 	 */
1142 	blk0 = swp_pager_meta_lookup(object, pindex);
1143 	if (blk0 == SWAPBLK_NONE) {
1144 		if (before)
1145 			*before = 0;
1146 		if (after)
1147 			*after = 0;
1148 		return (FALSE);
1149 	}
1150 
1151 	/*
1152 	 * find backwards-looking contiguous good backing store
1153 	 */
1154 	if (before != NULL) {
1155 		for (i = 1; i < SWB_NPAGES; i++) {
1156 			if (i > pindex)
1157 				break;
1158 			blk = swp_pager_meta_lookup(object, pindex - i);
1159 			if (blk != blk0 - i)
1160 				break;
1161 		}
1162 		*before = i - 1;
1163 	}
1164 
1165 	/*
1166 	 * find forward-looking contiguous good backing store
1167 	 */
1168 	if (after != NULL) {
1169 		for (i = 1; i < SWB_NPAGES; i++) {
1170 			blk = swp_pager_meta_lookup(object, pindex + i);
1171 			if (blk != blk0 + i)
1172 				break;
1173 		}
1174 		*after = i - 1;
1175 	}
1176 	return (TRUE);
1177 }
1178 
1179 /*
1180  * SWAP_PAGER_PAGE_UNSWAPPED() - remove swap backing store related to page
1181  *
1182  *	This removes any associated swap backing store, whether valid or
1183  *	not, from the page.
1184  *
1185  *	This routine is typically called when a page is made dirty, at
1186  *	which point any associated swap can be freed.  MADV_FREE also
1187  *	calls us in a special-case situation
1188  *
1189  *	NOTE!!!  If the page is clean and the swap was valid, the caller
1190  *	should make the page dirty before calling this routine.  This routine
1191  *	does NOT change the m->dirty status of the page.  Also: MADV_FREE
1192  *	depends on it.
1193  *
1194  *	This routine may not sleep.
1195  *
1196  *	The object containing the page may be locked.
1197  */
1198 static void
1199 swap_pager_unswapped(vm_page_t m)
1200 {
1201 	struct swblk *sb;
1202 	vm_object_t obj;
1203 
1204 	/*
1205 	 * Handle enqueing deferred frees first.  If we do not have the
1206 	 * object lock we wait for the page daemon to clear the space.
1207 	 */
1208 	obj = m->object;
1209 	if (!VM_OBJECT_WOWNED(obj)) {
1210 		VM_PAGE_OBJECT_BUSY_ASSERT(m);
1211 		/*
1212 		 * The caller is responsible for synchronization but we
1213 		 * will harmlessly handle races.  This is typically provided
1214 		 * by only calling unswapped() when a page transitions from
1215 		 * clean to dirty.
1216 		 */
1217 		if ((m->a.flags & (PGA_SWAP_SPACE | PGA_SWAP_FREE)) ==
1218 		    PGA_SWAP_SPACE) {
1219 			vm_page_aflag_set(m, PGA_SWAP_FREE);
1220 			counter_u64_add(swap_free_deferred, 1);
1221 		}
1222 		return;
1223 	}
1224 	if ((m->a.flags & PGA_SWAP_FREE) != 0)
1225 		counter_u64_add(swap_free_completed, 1);
1226 	vm_page_aflag_clear(m, PGA_SWAP_FREE | PGA_SWAP_SPACE);
1227 
1228 	/*
1229 	 * The meta data only exists if the object is OBJT_SWAP
1230 	 * and even then might not be allocated yet.
1231 	 */
1232 	KASSERT((m->object->flags & OBJ_SWAP) != 0,
1233 	    ("Free object not swappable"));
1234 
1235 	sb = SWAP_PCTRIE_LOOKUP(&m->object->un_pager.swp.swp_blks,
1236 	    rounddown(m->pindex, SWAP_META_PAGES));
1237 	if (sb == NULL)
1238 		return;
1239 	if (sb->d[m->pindex % SWAP_META_PAGES] == SWAPBLK_NONE)
1240 		return;
1241 	swp_pager_freeswapspace(sb->d[m->pindex % SWAP_META_PAGES], 1);
1242 	sb->d[m->pindex % SWAP_META_PAGES] = SWAPBLK_NONE;
1243 	swp_pager_free_empty_swblk(m->object, sb);
1244 }
1245 
1246 /*
1247  * swap_pager_getpages() - bring pages in from swap
1248  *
1249  *	Attempt to page in the pages in array "ma" of length "count".  The
1250  *	caller may optionally specify that additional pages preceding and
1251  *	succeeding the specified range be paged in.  The number of such pages
1252  *	is returned in the "rbehind" and "rahead" parameters, and they will
1253  *	be in the inactive queue upon return.
1254  *
1255  *	The pages in "ma" must be busied and will remain busied upon return.
1256  */
1257 static int
1258 swap_pager_getpages_locked(vm_object_t object, vm_page_t *ma, int count,
1259     int *rbehind, int *rahead)
1260 {
1261 	struct buf *bp;
1262 	vm_page_t bm, mpred, msucc, p;
1263 	vm_pindex_t pindex;
1264 	daddr_t blk;
1265 	int i, maxahead, maxbehind, reqcount;
1266 
1267 	VM_OBJECT_ASSERT_WLOCKED(object);
1268 	reqcount = count;
1269 
1270 	KASSERT((object->flags & OBJ_SWAP) != 0,
1271 	    ("%s: object not swappable", __func__));
1272 	if (!swap_pager_haspage(object, ma[0]->pindex, &maxbehind, &maxahead)) {
1273 		VM_OBJECT_WUNLOCK(object);
1274 		return (VM_PAGER_FAIL);
1275 	}
1276 
1277 	KASSERT(reqcount - 1 <= maxahead,
1278 	    ("page count %d extends beyond swap block", reqcount));
1279 
1280 	/*
1281 	 * Do not transfer any pages other than those that are xbusied
1282 	 * when running during a split or collapse operation.  This
1283 	 * prevents clustering from re-creating pages which are being
1284 	 * moved into another object.
1285 	 */
1286 	if ((object->flags & (OBJ_SPLIT | OBJ_DEAD)) != 0) {
1287 		maxahead = reqcount - 1;
1288 		maxbehind = 0;
1289 	}
1290 
1291 	/*
1292 	 * Clip the readahead and readbehind ranges to exclude resident pages.
1293 	 */
1294 	if (rahead != NULL) {
1295 		*rahead = imin(*rahead, maxahead - (reqcount - 1));
1296 		pindex = ma[reqcount - 1]->pindex;
1297 		msucc = TAILQ_NEXT(ma[reqcount - 1], listq);
1298 		if (msucc != NULL && msucc->pindex - pindex - 1 < *rahead)
1299 			*rahead = msucc->pindex - pindex - 1;
1300 	}
1301 	if (rbehind != NULL) {
1302 		*rbehind = imin(*rbehind, maxbehind);
1303 		pindex = ma[0]->pindex;
1304 		mpred = TAILQ_PREV(ma[0], pglist, listq);
1305 		if (mpred != NULL && pindex - mpred->pindex - 1 < *rbehind)
1306 			*rbehind = pindex - mpred->pindex - 1;
1307 	}
1308 
1309 	bm = ma[0];
1310 	for (i = 0; i < count; i++)
1311 		ma[i]->oflags |= VPO_SWAPINPROG;
1312 
1313 	/*
1314 	 * Allocate readahead and readbehind pages.
1315 	 */
1316 	if (rbehind != NULL) {
1317 		for (i = 1; i <= *rbehind; i++) {
1318 			p = vm_page_alloc(object, ma[0]->pindex - i,
1319 			    VM_ALLOC_NORMAL);
1320 			if (p == NULL)
1321 				break;
1322 			p->oflags |= VPO_SWAPINPROG;
1323 			bm = p;
1324 		}
1325 		*rbehind = i - 1;
1326 	}
1327 	if (rahead != NULL) {
1328 		for (i = 0; i < *rahead; i++) {
1329 			p = vm_page_alloc(object,
1330 			    ma[reqcount - 1]->pindex + i + 1, VM_ALLOC_NORMAL);
1331 			if (p == NULL)
1332 				break;
1333 			p->oflags |= VPO_SWAPINPROG;
1334 		}
1335 		*rahead = i;
1336 	}
1337 	if (rbehind != NULL)
1338 		count += *rbehind;
1339 	if (rahead != NULL)
1340 		count += *rahead;
1341 
1342 	vm_object_pip_add(object, count);
1343 
1344 	pindex = bm->pindex;
1345 	blk = swp_pager_meta_lookup(object, pindex);
1346 	KASSERT(blk != SWAPBLK_NONE,
1347 	    ("no swap blocking containing %p(%jx)", object, (uintmax_t)pindex));
1348 
1349 	VM_OBJECT_WUNLOCK(object);
1350 	bp = uma_zalloc(swrbuf_zone, M_WAITOK);
1351 	MPASS((bp->b_flags & B_MAXPHYS) != 0);
1352 	/* Pages cannot leave the object while busy. */
1353 	for (i = 0, p = bm; i < count; i++, p = TAILQ_NEXT(p, listq)) {
1354 		MPASS(p->pindex == bm->pindex + i);
1355 		bp->b_pages[i] = p;
1356 	}
1357 
1358 	bp->b_flags |= B_PAGING;
1359 	bp->b_iocmd = BIO_READ;
1360 	bp->b_iodone = swp_pager_async_iodone;
1361 	bp->b_rcred = crhold(thread0.td_ucred);
1362 	bp->b_wcred = crhold(thread0.td_ucred);
1363 	bp->b_blkno = blk;
1364 	bp->b_bcount = PAGE_SIZE * count;
1365 	bp->b_bufsize = PAGE_SIZE * count;
1366 	bp->b_npages = count;
1367 	bp->b_pgbefore = rbehind != NULL ? *rbehind : 0;
1368 	bp->b_pgafter = rahead != NULL ? *rahead : 0;
1369 
1370 	VM_CNT_INC(v_swapin);
1371 	VM_CNT_ADD(v_swappgsin, count);
1372 
1373 	/*
1374 	 * perform the I/O.  NOTE!!!  bp cannot be considered valid after
1375 	 * this point because we automatically release it on completion.
1376 	 * Instead, we look at the one page we are interested in which we
1377 	 * still hold a lock on even through the I/O completion.
1378 	 *
1379 	 * The other pages in our ma[] array are also released on completion,
1380 	 * so we cannot assume they are valid anymore either.
1381 	 *
1382 	 * NOTE: b_blkno is destroyed by the call to swapdev_strategy
1383 	 */
1384 	BUF_KERNPROC(bp);
1385 	swp_pager_strategy(bp);
1386 
1387 	/*
1388 	 * Wait for the pages we want to complete.  VPO_SWAPINPROG is always
1389 	 * cleared on completion.  If an I/O error occurs, SWAPBLK_NONE
1390 	 * is set in the metadata for each page in the request.
1391 	 */
1392 	VM_OBJECT_WLOCK(object);
1393 	/* This could be implemented more efficiently with aflags */
1394 	while ((ma[0]->oflags & VPO_SWAPINPROG) != 0) {
1395 		ma[0]->oflags |= VPO_SWAPSLEEP;
1396 		VM_CNT_INC(v_intrans);
1397 		if (VM_OBJECT_SLEEP(object, &object->handle, PSWP,
1398 		    "swread", hz * 20)) {
1399 			printf(
1400 "swap_pager: indefinite wait buffer: bufobj: %p, blkno: %jd, size: %ld\n",
1401 			    bp->b_bufobj, (intmax_t)bp->b_blkno, bp->b_bcount);
1402 		}
1403 	}
1404 	VM_OBJECT_WUNLOCK(object);
1405 
1406 	/*
1407 	 * If we had an unrecoverable read error pages will not be valid.
1408 	 */
1409 	for (i = 0; i < reqcount; i++)
1410 		if (ma[i]->valid != VM_PAGE_BITS_ALL)
1411 			return (VM_PAGER_ERROR);
1412 
1413 	return (VM_PAGER_OK);
1414 
1415 	/*
1416 	 * A final note: in a low swap situation, we cannot deallocate swap
1417 	 * and mark a page dirty here because the caller is likely to mark
1418 	 * the page clean when we return, causing the page to possibly revert
1419 	 * to all-zero's later.
1420 	 */
1421 }
1422 
1423 static int
1424 swap_pager_getpages(vm_object_t object, vm_page_t *ma, int count,
1425     int *rbehind, int *rahead)
1426 {
1427 
1428 	VM_OBJECT_WLOCK(object);
1429 	return (swap_pager_getpages_locked(object, ma, count, rbehind, rahead));
1430 }
1431 
1432 /*
1433  * 	swap_pager_getpages_async():
1434  *
1435  *	Right now this is emulation of asynchronous operation on top of
1436  *	swap_pager_getpages().
1437  */
1438 static int
1439 swap_pager_getpages_async(vm_object_t object, vm_page_t *ma, int count,
1440     int *rbehind, int *rahead, pgo_getpages_iodone_t iodone, void *arg)
1441 {
1442 	int r, error;
1443 
1444 	r = swap_pager_getpages(object, ma, count, rbehind, rahead);
1445 	switch (r) {
1446 	case VM_PAGER_OK:
1447 		error = 0;
1448 		break;
1449 	case VM_PAGER_ERROR:
1450 		error = EIO;
1451 		break;
1452 	case VM_PAGER_FAIL:
1453 		error = EINVAL;
1454 		break;
1455 	default:
1456 		panic("unhandled swap_pager_getpages() error %d", r);
1457 	}
1458 	(iodone)(arg, ma, count, error);
1459 
1460 	return (r);
1461 }
1462 
1463 /*
1464  *	swap_pager_putpages:
1465  *
1466  *	Assign swap (if necessary) and initiate I/O on the specified pages.
1467  *
1468  *	In a low memory situation we may block in VOP_STRATEGY(), but the new
1469  *	vm_page reservation system coupled with properly written VFS devices
1470  *	should ensure that no low-memory deadlock occurs.  This is an area
1471  *	which needs work.
1472  *
1473  *	The parent has N vm_object_pip_add() references prior to
1474  *	calling us and will remove references for rtvals[] that are
1475  *	not set to VM_PAGER_PEND.  We need to remove the rest on I/O
1476  *	completion.
1477  *
1478  *	The parent has soft-busy'd the pages it passes us and will unbusy
1479  *	those whose rtvals[] entry is not set to VM_PAGER_PEND on return.
1480  *	We need to unbusy the rest on I/O completion.
1481  */
1482 static void
1483 swap_pager_putpages(vm_object_t object, vm_page_t *ma, int count,
1484     int flags, int *rtvals)
1485 {
1486 	struct buf *bp;
1487 	daddr_t addr, blk, n_free, s_free;
1488 	vm_page_t mreq;
1489 	int i, j, n;
1490 	bool async;
1491 
1492 	KASSERT(count == 0 || ma[0]->object == object,
1493 	    ("%s: object mismatch %p/%p",
1494 	    __func__, object, ma[0]->object));
1495 
1496 	VM_OBJECT_WUNLOCK(object);
1497 	async = curproc == pageproc && (flags & VM_PAGER_PUT_SYNC) == 0;
1498 	swp_pager_init_freerange(&s_free, &n_free);
1499 
1500 	/*
1501 	 * Assign swap blocks and issue I/O.  We reallocate swap on the fly.
1502 	 * The page is left dirty until the pageout operation completes
1503 	 * successfully.
1504 	 */
1505 	for (i = 0; i < count; i += n) {
1506 		/* Maximum I/O size is limited by maximum swap block size. */
1507 		n = min(count - i, nsw_cluster_max);
1508 
1509 		if (async) {
1510 			mtx_lock(&swbuf_mtx);
1511 			while (nsw_wcount_async == 0)
1512 				msleep(&nsw_wcount_async, &swbuf_mtx, PVM,
1513 				    "swbufa", 0);
1514 			nsw_wcount_async--;
1515 			mtx_unlock(&swbuf_mtx);
1516 		}
1517 
1518 		/* Get a block of swap of size up to size n. */
1519 		blk = swp_pager_getswapspace(&n);
1520 		if (blk == SWAPBLK_NONE) {
1521 			mtx_lock(&swbuf_mtx);
1522 			if (++nsw_wcount_async == 1)
1523 				wakeup(&nsw_wcount_async);
1524 			mtx_unlock(&swbuf_mtx);
1525 			for (j = 0; j < n; ++j)
1526 				rtvals[i + j] = VM_PAGER_FAIL;
1527 			continue;
1528 		}
1529 		VM_OBJECT_WLOCK(object);
1530 		for (j = 0; j < n; ++j) {
1531 			mreq = ma[i + j];
1532 			vm_page_aflag_clear(mreq, PGA_SWAP_FREE);
1533 			addr = swp_pager_meta_build(mreq->object, mreq->pindex,
1534 			    blk + j);
1535 			if (addr != SWAPBLK_NONE)
1536 				swp_pager_update_freerange(&s_free, &n_free,
1537 				    addr);
1538 			MPASS(mreq->dirty == VM_PAGE_BITS_ALL);
1539 			mreq->oflags |= VPO_SWAPINPROG;
1540 		}
1541 		VM_OBJECT_WUNLOCK(object);
1542 
1543 		bp = uma_zalloc(swwbuf_zone, M_WAITOK);
1544 		MPASS((bp->b_flags & B_MAXPHYS) != 0);
1545 		if (async)
1546 			bp->b_flags |= B_ASYNC;
1547 		bp->b_flags |= B_PAGING;
1548 		bp->b_iocmd = BIO_WRITE;
1549 
1550 		bp->b_rcred = crhold(thread0.td_ucred);
1551 		bp->b_wcred = crhold(thread0.td_ucred);
1552 		bp->b_bcount = PAGE_SIZE * n;
1553 		bp->b_bufsize = PAGE_SIZE * n;
1554 		bp->b_blkno = blk;
1555 		for (j = 0; j < n; j++)
1556 			bp->b_pages[j] = ma[i + j];
1557 		bp->b_npages = n;
1558 
1559 		/*
1560 		 * Must set dirty range for NFS to work.
1561 		 */
1562 		bp->b_dirtyoff = 0;
1563 		bp->b_dirtyend = bp->b_bcount;
1564 
1565 		VM_CNT_INC(v_swapout);
1566 		VM_CNT_ADD(v_swappgsout, bp->b_npages);
1567 
1568 		/*
1569 		 * We unconditionally set rtvals[] to VM_PAGER_PEND so that we
1570 		 * can call the async completion routine at the end of a
1571 		 * synchronous I/O operation.  Otherwise, our caller would
1572 		 * perform duplicate unbusy and wakeup operations on the page
1573 		 * and object, respectively.
1574 		 */
1575 		for (j = 0; j < n; j++)
1576 			rtvals[i + j] = VM_PAGER_PEND;
1577 
1578 		/*
1579 		 * asynchronous
1580 		 *
1581 		 * NOTE: b_blkno is destroyed by the call to swapdev_strategy.
1582 		 */
1583 		if (async) {
1584 			bp->b_iodone = swp_pager_async_iodone;
1585 			BUF_KERNPROC(bp);
1586 			swp_pager_strategy(bp);
1587 			continue;
1588 		}
1589 
1590 		/*
1591 		 * synchronous
1592 		 *
1593 		 * NOTE: b_blkno is destroyed by the call to swapdev_strategy.
1594 		 */
1595 		bp->b_iodone = bdone;
1596 		swp_pager_strategy(bp);
1597 
1598 		/*
1599 		 * Wait for the sync I/O to complete.
1600 		 */
1601 		bwait(bp, PVM, "swwrt");
1602 
1603 		/*
1604 		 * Now that we are through with the bp, we can call the
1605 		 * normal async completion, which frees everything up.
1606 		 */
1607 		swp_pager_async_iodone(bp);
1608 	}
1609 	swp_pager_freeswapspace(s_free, n_free);
1610 	VM_OBJECT_WLOCK(object);
1611 }
1612 
1613 /*
1614  *	swp_pager_async_iodone:
1615  *
1616  *	Completion routine for asynchronous reads and writes from/to swap.
1617  *	Also called manually by synchronous code to finish up a bp.
1618  *
1619  *	This routine may not sleep.
1620  */
1621 static void
1622 swp_pager_async_iodone(struct buf *bp)
1623 {
1624 	int i;
1625 	vm_object_t object = NULL;
1626 
1627 	/*
1628 	 * Report error - unless we ran out of memory, in which case
1629 	 * we've already logged it in swapgeom_strategy().
1630 	 */
1631 	if (bp->b_ioflags & BIO_ERROR && bp->b_error != ENOMEM) {
1632 		printf(
1633 		    "swap_pager: I/O error - %s failed; blkno %ld,"
1634 			"size %ld, error %d\n",
1635 		    ((bp->b_iocmd == BIO_READ) ? "pagein" : "pageout"),
1636 		    (long)bp->b_blkno,
1637 		    (long)bp->b_bcount,
1638 		    bp->b_error
1639 		);
1640 	}
1641 
1642 	/*
1643 	 * remove the mapping for kernel virtual
1644 	 */
1645 	if (buf_mapped(bp))
1646 		pmap_qremove((vm_offset_t)bp->b_data, bp->b_npages);
1647 	else
1648 		bp->b_data = bp->b_kvabase;
1649 
1650 	if (bp->b_npages) {
1651 		object = bp->b_pages[0]->object;
1652 		VM_OBJECT_WLOCK(object);
1653 	}
1654 
1655 	/*
1656 	 * cleanup pages.  If an error occurs writing to swap, we are in
1657 	 * very serious trouble.  If it happens to be a disk error, though,
1658 	 * we may be able to recover by reassigning the swap later on.  So
1659 	 * in this case we remove the m->swapblk assignment for the page
1660 	 * but do not free it in the rlist.  The errornous block(s) are thus
1661 	 * never reallocated as swap.  Redirty the page and continue.
1662 	 */
1663 	for (i = 0; i < bp->b_npages; ++i) {
1664 		vm_page_t m = bp->b_pages[i];
1665 
1666 		m->oflags &= ~VPO_SWAPINPROG;
1667 		if (m->oflags & VPO_SWAPSLEEP) {
1668 			m->oflags &= ~VPO_SWAPSLEEP;
1669 			wakeup(&object->handle);
1670 		}
1671 
1672 		/* We always have space after I/O, successful or not. */
1673 		vm_page_aflag_set(m, PGA_SWAP_SPACE);
1674 
1675 		if (bp->b_ioflags & BIO_ERROR) {
1676 			/*
1677 			 * If an error occurs I'd love to throw the swapblk
1678 			 * away without freeing it back to swapspace, so it
1679 			 * can never be used again.  But I can't from an
1680 			 * interrupt.
1681 			 */
1682 			if (bp->b_iocmd == BIO_READ) {
1683 				/*
1684 				 * NOTE: for reads, m->dirty will probably
1685 				 * be overridden by the original caller of
1686 				 * getpages so don't play cute tricks here.
1687 				 */
1688 				vm_page_invalid(m);
1689 			} else {
1690 				/*
1691 				 * If a write error occurs, reactivate page
1692 				 * so it doesn't clog the inactive list,
1693 				 * then finish the I/O.
1694 				 */
1695 				MPASS(m->dirty == VM_PAGE_BITS_ALL);
1696 
1697 				/* PQ_UNSWAPPABLE? */
1698 				vm_page_activate(m);
1699 				vm_page_sunbusy(m);
1700 			}
1701 		} else if (bp->b_iocmd == BIO_READ) {
1702 			/*
1703 			 * NOTE: for reads, m->dirty will probably be
1704 			 * overridden by the original caller of getpages so
1705 			 * we cannot set them in order to free the underlying
1706 			 * swap in a low-swap situation.  I don't think we'd
1707 			 * want to do that anyway, but it was an optimization
1708 			 * that existed in the old swapper for a time before
1709 			 * it got ripped out due to precisely this problem.
1710 			 */
1711 			KASSERT(!pmap_page_is_mapped(m),
1712 			    ("swp_pager_async_iodone: page %p is mapped", m));
1713 			KASSERT(m->dirty == 0,
1714 			    ("swp_pager_async_iodone: page %p is dirty", m));
1715 
1716 			vm_page_valid(m);
1717 			if (i < bp->b_pgbefore ||
1718 			    i >= bp->b_npages - bp->b_pgafter)
1719 				vm_page_readahead_finish(m);
1720 		} else {
1721 			/*
1722 			 * For write success, clear the dirty
1723 			 * status, then finish the I/O ( which decrements the
1724 			 * busy count and possibly wakes waiter's up ).
1725 			 * A page is only written to swap after a period of
1726 			 * inactivity.  Therefore, we do not expect it to be
1727 			 * reused.
1728 			 */
1729 			KASSERT(!pmap_page_is_write_mapped(m),
1730 			    ("swp_pager_async_iodone: page %p is not write"
1731 			    " protected", m));
1732 			vm_page_undirty(m);
1733 			vm_page_deactivate_noreuse(m);
1734 			vm_page_sunbusy(m);
1735 		}
1736 	}
1737 
1738 	/*
1739 	 * adjust pip.  NOTE: the original parent may still have its own
1740 	 * pip refs on the object.
1741 	 */
1742 	if (object != NULL) {
1743 		vm_object_pip_wakeupn(object, bp->b_npages);
1744 		VM_OBJECT_WUNLOCK(object);
1745 	}
1746 
1747 	/*
1748 	 * swapdev_strategy() manually sets b_vp and b_bufobj before calling
1749 	 * bstrategy(). Set them back to NULL now we're done with it, or we'll
1750 	 * trigger a KASSERT in relpbuf().
1751 	 */
1752 	if (bp->b_vp) {
1753 		    bp->b_vp = NULL;
1754 		    bp->b_bufobj = NULL;
1755 	}
1756 	/*
1757 	 * release the physical I/O buffer
1758 	 */
1759 	if (bp->b_flags & B_ASYNC) {
1760 		mtx_lock(&swbuf_mtx);
1761 		if (++nsw_wcount_async == 1)
1762 			wakeup(&nsw_wcount_async);
1763 		mtx_unlock(&swbuf_mtx);
1764 	}
1765 	uma_zfree((bp->b_iocmd == BIO_READ) ? swrbuf_zone : swwbuf_zone, bp);
1766 }
1767 
1768 int
1769 swap_pager_nswapdev(void)
1770 {
1771 
1772 	return (nswapdev);
1773 }
1774 
1775 static void
1776 swp_pager_force_dirty(vm_page_t m)
1777 {
1778 
1779 	vm_page_dirty(m);
1780 	swap_pager_unswapped(m);
1781 	vm_page_launder(m);
1782 }
1783 
1784 u_long
1785 swap_pager_swapped_pages(vm_object_t object)
1786 {
1787 	struct swblk *sb;
1788 	vm_pindex_t pi;
1789 	u_long res;
1790 	int i;
1791 
1792 	VM_OBJECT_ASSERT_LOCKED(object);
1793 
1794 	if (pctrie_is_empty(&object->un_pager.swp.swp_blks))
1795 		return (0);
1796 
1797 	for (res = 0, pi = 0; (sb = SWAP_PCTRIE_LOOKUP_GE(
1798 	    &object->un_pager.swp.swp_blks, pi)) != NULL;
1799 	    pi = sb->p + SWAP_META_PAGES) {
1800 		for (i = 0; i < SWAP_META_PAGES; i++) {
1801 			if (sb->d[i] != SWAPBLK_NONE)
1802 				res++;
1803 		}
1804 	}
1805 	return (res);
1806 }
1807 
1808 /*
1809  *	swap_pager_swapoff_object:
1810  *
1811  *	Page in all of the pages that have been paged out for an object
1812  *	to a swap device.
1813  */
1814 static void
1815 swap_pager_swapoff_object(struct swdevt *sp, vm_object_t object)
1816 {
1817 	struct swblk *sb;
1818 	vm_page_t m;
1819 	vm_pindex_t pi;
1820 	daddr_t blk;
1821 	int i, nv, rahead, rv;
1822 
1823 	KASSERT((object->flags & OBJ_SWAP) != 0,
1824 	    ("%s: Object not swappable", __func__));
1825 
1826 	for (pi = 0; (sb = SWAP_PCTRIE_LOOKUP_GE(
1827 	    &object->un_pager.swp.swp_blks, pi)) != NULL; ) {
1828 		if ((object->flags & OBJ_DEAD) != 0) {
1829 			/*
1830 			 * Make sure that pending writes finish before
1831 			 * returning.
1832 			 */
1833 			vm_object_pip_wait(object, "swpoff");
1834 			swp_pager_meta_free_all(object);
1835 			break;
1836 		}
1837 		for (i = 0; i < SWAP_META_PAGES; i++) {
1838 			/*
1839 			 * Count the number of contiguous valid blocks.
1840 			 */
1841 			for (nv = 0; nv < SWAP_META_PAGES - i; nv++) {
1842 				blk = sb->d[i + nv];
1843 				if (!swp_pager_isondev(blk, sp) ||
1844 				    blk == SWAPBLK_NONE)
1845 					break;
1846 			}
1847 			if (nv == 0)
1848 				continue;
1849 
1850 			/*
1851 			 * Look for a page corresponding to the first
1852 			 * valid block and ensure that any pending paging
1853 			 * operations on it are complete.  If the page is valid,
1854 			 * mark it dirty and free the swap block.  Try to batch
1855 			 * this operation since it may cause sp to be freed,
1856 			 * meaning that we must restart the scan.  Avoid busying
1857 			 * valid pages since we may block forever on kernel
1858 			 * stack pages.
1859 			 */
1860 			m = vm_page_lookup(object, sb->p + i);
1861 			if (m == NULL) {
1862 				m = vm_page_alloc(object, sb->p + i,
1863 				    VM_ALLOC_NORMAL | VM_ALLOC_WAITFAIL);
1864 				if (m == NULL)
1865 					break;
1866 			} else {
1867 				if ((m->oflags & VPO_SWAPINPROG) != 0) {
1868 					m->oflags |= VPO_SWAPSLEEP;
1869 					VM_OBJECT_SLEEP(object, &object->handle,
1870 					    PSWP, "swpoff", 0);
1871 					break;
1872 				}
1873 				if (vm_page_all_valid(m)) {
1874 					do {
1875 						swp_pager_force_dirty(m);
1876 					} while (--nv > 0 &&
1877 					    (m = vm_page_next(m)) != NULL &&
1878 					    vm_page_all_valid(m) &&
1879 					    (m->oflags & VPO_SWAPINPROG) == 0);
1880 					break;
1881 				}
1882 				if (!vm_page_busy_acquire(m, VM_ALLOC_WAITFAIL))
1883 					break;
1884 			}
1885 
1886 			vm_object_pip_add(object, 1);
1887 			rahead = SWAP_META_PAGES;
1888 			rv = swap_pager_getpages_locked(object, &m, 1, NULL,
1889 			    &rahead);
1890 			if (rv != VM_PAGER_OK)
1891 				panic("%s: read from swap failed: %d",
1892 				    __func__, rv);
1893 			VM_OBJECT_WLOCK(object);
1894 			vm_object_pip_wakeupn(object, 1);
1895 			vm_page_xunbusy(m);
1896 
1897 			/*
1898 			 * The object lock was dropped so we must restart the
1899 			 * scan of this swap block.  Pages paged in during this
1900 			 * iteration will be marked dirty in a future iteration.
1901 			 */
1902 			break;
1903 		}
1904 		if (i == SWAP_META_PAGES)
1905 			pi = sb->p + SWAP_META_PAGES;
1906 	}
1907 }
1908 
1909 /*
1910  *	swap_pager_swapoff:
1911  *
1912  *	Page in all of the pages that have been paged out to the
1913  *	given device.  The corresponding blocks in the bitmap must be
1914  *	marked as allocated and the device must be flagged SW_CLOSING.
1915  *	There may be no processes swapped out to the device.
1916  *
1917  *	This routine may block.
1918  */
1919 static void
1920 swap_pager_swapoff(struct swdevt *sp)
1921 {
1922 	vm_object_t object;
1923 	int retries;
1924 
1925 	sx_assert(&swdev_syscall_lock, SA_XLOCKED);
1926 
1927 	retries = 0;
1928 full_rescan:
1929 	mtx_lock(&vm_object_list_mtx);
1930 	TAILQ_FOREACH(object, &vm_object_list, object_list) {
1931 		if ((object->flags & OBJ_SWAP) == 0)
1932 			continue;
1933 		mtx_unlock(&vm_object_list_mtx);
1934 		/* Depends on type-stability. */
1935 		VM_OBJECT_WLOCK(object);
1936 
1937 		/*
1938 		 * Dead objects are eventually terminated on their own.
1939 		 */
1940 		if ((object->flags & OBJ_DEAD) != 0)
1941 			goto next_obj;
1942 
1943 		/*
1944 		 * Sync with fences placed after pctrie
1945 		 * initialization.  We must not access pctrie below
1946 		 * unless we checked that our object is swap and not
1947 		 * dead.
1948 		 */
1949 		atomic_thread_fence_acq();
1950 		if ((object->flags & OBJ_SWAP) == 0)
1951 			goto next_obj;
1952 
1953 		swap_pager_swapoff_object(sp, object);
1954 next_obj:
1955 		VM_OBJECT_WUNLOCK(object);
1956 		mtx_lock(&vm_object_list_mtx);
1957 	}
1958 	mtx_unlock(&vm_object_list_mtx);
1959 
1960 	if (sp->sw_used) {
1961 		/*
1962 		 * Objects may be locked or paging to the device being
1963 		 * removed, so we will miss their pages and need to
1964 		 * make another pass.  We have marked this device as
1965 		 * SW_CLOSING, so the activity should finish soon.
1966 		 */
1967 		retries++;
1968 		if (retries > 100) {
1969 			panic("swapoff: failed to locate %d swap blocks",
1970 			    sp->sw_used);
1971 		}
1972 		pause("swpoff", hz / 20);
1973 		goto full_rescan;
1974 	}
1975 	EVENTHANDLER_INVOKE(swapoff, sp);
1976 }
1977 
1978 /************************************************************************
1979  *				SWAP META DATA 				*
1980  ************************************************************************
1981  *
1982  *	These routines manipulate the swap metadata stored in the
1983  *	OBJT_SWAP object.
1984  *
1985  *	Swap metadata is implemented with a global hash and not directly
1986  *	linked into the object.  Instead the object simply contains
1987  *	appropriate tracking counters.
1988  */
1989 
1990 /*
1991  * SWP_PAGER_SWBLK_EMPTY() - is a range of blocks free?
1992  */
1993 static bool
1994 swp_pager_swblk_empty(struct swblk *sb, int start, int limit)
1995 {
1996 	int i;
1997 
1998 	MPASS(0 <= start && start <= limit && limit <= SWAP_META_PAGES);
1999 	for (i = start; i < limit; i++) {
2000 		if (sb->d[i] != SWAPBLK_NONE)
2001 			return (false);
2002 	}
2003 	return (true);
2004 }
2005 
2006 /*
2007  * SWP_PAGER_FREE_EMPTY_SWBLK() - frees if a block is free
2008  *
2009  *  Nothing is done if the block is still in use.
2010  */
2011 static void
2012 swp_pager_free_empty_swblk(vm_object_t object, struct swblk *sb)
2013 {
2014 
2015 	if (swp_pager_swblk_empty(sb, 0, SWAP_META_PAGES)) {
2016 		SWAP_PCTRIE_REMOVE(&object->un_pager.swp.swp_blks, sb->p);
2017 		uma_zfree(swblk_zone, sb);
2018 	}
2019 }
2020 
2021 /*
2022  * SWP_PAGER_META_BUILD() -	add swap block to swap meta data for object
2023  *
2024  *	The specified swapblk is added to the object's swap metadata.  If
2025  *	the swapblk is not valid, it is freed instead.  Any previously
2026  *	assigned swapblk is returned.
2027  */
2028 static daddr_t
2029 swp_pager_meta_build(vm_object_t object, vm_pindex_t pindex, daddr_t swapblk)
2030 {
2031 	static volatile int swblk_zone_exhausted, swpctrie_zone_exhausted;
2032 	struct swblk *sb, *sb1;
2033 	vm_pindex_t modpi, rdpi;
2034 	daddr_t prev_swapblk;
2035 	int error, i;
2036 
2037 	VM_OBJECT_ASSERT_WLOCKED(object);
2038 
2039 	rdpi = rounddown(pindex, SWAP_META_PAGES);
2040 	sb = SWAP_PCTRIE_LOOKUP(&object->un_pager.swp.swp_blks, rdpi);
2041 	if (sb == NULL) {
2042 		if (swapblk == SWAPBLK_NONE)
2043 			return (SWAPBLK_NONE);
2044 		for (;;) {
2045 			sb = uma_zalloc(swblk_zone, M_NOWAIT | (curproc ==
2046 			    pageproc ? M_USE_RESERVE : 0));
2047 			if (sb != NULL) {
2048 				sb->p = rdpi;
2049 				for (i = 0; i < SWAP_META_PAGES; i++)
2050 					sb->d[i] = SWAPBLK_NONE;
2051 				if (atomic_cmpset_int(&swblk_zone_exhausted,
2052 				    1, 0))
2053 					printf("swblk zone ok\n");
2054 				break;
2055 			}
2056 			VM_OBJECT_WUNLOCK(object);
2057 			if (uma_zone_exhausted(swblk_zone)) {
2058 				if (atomic_cmpset_int(&swblk_zone_exhausted,
2059 				    0, 1))
2060 					printf("swap blk zone exhausted, "
2061 					    "increase kern.maxswzone\n");
2062 				vm_pageout_oom(VM_OOM_SWAPZ);
2063 				pause("swzonxb", 10);
2064 			} else
2065 				uma_zwait(swblk_zone);
2066 			VM_OBJECT_WLOCK(object);
2067 			sb = SWAP_PCTRIE_LOOKUP(&object->un_pager.swp.swp_blks,
2068 			    rdpi);
2069 			if (sb != NULL)
2070 				/*
2071 				 * Somebody swapped out a nearby page,
2072 				 * allocating swblk at the rdpi index,
2073 				 * while we dropped the object lock.
2074 				 */
2075 				goto allocated;
2076 		}
2077 		for (;;) {
2078 			error = SWAP_PCTRIE_INSERT(
2079 			    &object->un_pager.swp.swp_blks, sb);
2080 			if (error == 0) {
2081 				if (atomic_cmpset_int(&swpctrie_zone_exhausted,
2082 				    1, 0))
2083 					printf("swpctrie zone ok\n");
2084 				break;
2085 			}
2086 			VM_OBJECT_WUNLOCK(object);
2087 			if (uma_zone_exhausted(swpctrie_zone)) {
2088 				if (atomic_cmpset_int(&swpctrie_zone_exhausted,
2089 				    0, 1))
2090 					printf("swap pctrie zone exhausted, "
2091 					    "increase kern.maxswzone\n");
2092 				vm_pageout_oom(VM_OOM_SWAPZ);
2093 				pause("swzonxp", 10);
2094 			} else
2095 				uma_zwait(swpctrie_zone);
2096 			VM_OBJECT_WLOCK(object);
2097 			sb1 = SWAP_PCTRIE_LOOKUP(&object->un_pager.swp.swp_blks,
2098 			    rdpi);
2099 			if (sb1 != NULL) {
2100 				uma_zfree(swblk_zone, sb);
2101 				sb = sb1;
2102 				goto allocated;
2103 			}
2104 		}
2105 	}
2106 allocated:
2107 	MPASS(sb->p == rdpi);
2108 
2109 	modpi = pindex % SWAP_META_PAGES;
2110 	/* Return prior contents of metadata. */
2111 	prev_swapblk = sb->d[modpi];
2112 	/* Enter block into metadata. */
2113 	sb->d[modpi] = swapblk;
2114 
2115 	/*
2116 	 * Free the swblk if we end up with the empty page run.
2117 	 */
2118 	if (swapblk == SWAPBLK_NONE)
2119 		swp_pager_free_empty_swblk(object, sb);
2120 	return (prev_swapblk);
2121 }
2122 
2123 /*
2124  * SWP_PAGER_META_TRANSFER() - free a range of blocks in the srcobject's swap
2125  * metadata, or transfer it into dstobject.
2126  *
2127  *	This routine will free swap metadata structures as they are cleaned
2128  *	out.
2129  */
2130 static void
2131 swp_pager_meta_transfer(vm_object_t srcobject, vm_object_t dstobject,
2132     vm_pindex_t pindex, vm_pindex_t count, vm_size_t *moved)
2133 {
2134 	struct swblk *sb;
2135 	vm_page_t m;
2136 	daddr_t n_free, s_free;
2137 	vm_pindex_t offset, last;
2138 	vm_size_t mc;
2139 	int i, limit, start;
2140 
2141 	VM_OBJECT_ASSERT_WLOCKED(srcobject);
2142 	MPASS(moved == NULL || dstobject == NULL);
2143 
2144 	mc = 0;
2145 	m = NULL;
2146 	if (count == 0 || pctrie_is_empty(&srcobject->un_pager.swp.swp_blks))
2147 		goto out;
2148 
2149 	swp_pager_init_freerange(&s_free, &n_free);
2150 	offset = pindex;
2151 	last = pindex + count;
2152 	for (;;) {
2153 		sb = SWAP_PCTRIE_LOOKUP_GE(&srcobject->un_pager.swp.swp_blks,
2154 		    rounddown(pindex, SWAP_META_PAGES));
2155 		if (sb == NULL || sb->p >= last)
2156 			break;
2157 		start = pindex > sb->p ? pindex - sb->p : 0;
2158 		limit = last - sb->p < SWAP_META_PAGES ? last - sb->p :
2159 		    SWAP_META_PAGES;
2160 		for (i = start; i < limit; i++) {
2161 			if (sb->d[i] == SWAPBLK_NONE)
2162 				continue;
2163 			if (dstobject == NULL ||
2164 			    !swp_pager_xfer_source(srcobject, dstobject,
2165 			    sb->p + i - offset, sb->d[i])) {
2166 				swp_pager_update_freerange(&s_free, &n_free,
2167 				    sb->d[i]);
2168 			}
2169 			if (moved != NULL) {
2170 				if (m != NULL && m->pindex != pindex + i - 1)
2171 					m = NULL;
2172 				m = m != NULL ? vm_page_next(m) :
2173 				    vm_page_lookup(srcobject, pindex + i);
2174 				if (m == NULL || vm_page_none_valid(m))
2175 					mc++;
2176 			}
2177 			sb->d[i] = SWAPBLK_NONE;
2178 		}
2179 		pindex = sb->p + SWAP_META_PAGES;
2180 		if (swp_pager_swblk_empty(sb, 0, start) &&
2181 		    swp_pager_swblk_empty(sb, limit, SWAP_META_PAGES)) {
2182 			SWAP_PCTRIE_REMOVE(&srcobject->un_pager.swp.swp_blks,
2183 			    sb->p);
2184 			uma_zfree(swblk_zone, sb);
2185 		}
2186 	}
2187 	swp_pager_freeswapspace(s_free, n_free);
2188 out:
2189 	if (moved != NULL)
2190 		*moved = mc;
2191 }
2192 
2193 /*
2194  * SWP_PAGER_META_FREE() - free a range of blocks in the object's swap metadata
2195  *
2196  *	The requested range of blocks is freed, with any associated swap
2197  *	returned to the swap bitmap.
2198  *
2199  *	This routine will free swap metadata structures as they are cleaned
2200  *	out.  This routine does *NOT* operate on swap metadata associated
2201  *	with resident pages.
2202  */
2203 static void
2204 swp_pager_meta_free(vm_object_t object, vm_pindex_t pindex, vm_pindex_t count,
2205     vm_size_t *freed)
2206 {
2207 	swp_pager_meta_transfer(object, NULL, pindex, count, freed);
2208 }
2209 
2210 /*
2211  * SWP_PAGER_META_FREE_ALL() - destroy all swap metadata associated with object
2212  *
2213  *	This routine locates and destroys all swap metadata associated with
2214  *	an object.
2215  */
2216 static void
2217 swp_pager_meta_free_all(vm_object_t object)
2218 {
2219 	struct swblk *sb;
2220 	daddr_t n_free, s_free;
2221 	vm_pindex_t pindex;
2222 	int i;
2223 
2224 	VM_OBJECT_ASSERT_WLOCKED(object);
2225 
2226 	if (pctrie_is_empty(&object->un_pager.swp.swp_blks))
2227 		return;
2228 
2229 	swp_pager_init_freerange(&s_free, &n_free);
2230 	for (pindex = 0; (sb = SWAP_PCTRIE_LOOKUP_GE(
2231 	    &object->un_pager.swp.swp_blks, pindex)) != NULL;) {
2232 		pindex = sb->p + SWAP_META_PAGES;
2233 		for (i = 0; i < SWAP_META_PAGES; i++) {
2234 			if (sb->d[i] == SWAPBLK_NONE)
2235 				continue;
2236 			swp_pager_update_freerange(&s_free, &n_free, sb->d[i]);
2237 		}
2238 		SWAP_PCTRIE_REMOVE(&object->un_pager.swp.swp_blks, sb->p);
2239 		uma_zfree(swblk_zone, sb);
2240 	}
2241 	swp_pager_freeswapspace(s_free, n_free);
2242 }
2243 
2244 /*
2245  * SWP_PAGER_METACTL() -  misc control of swap meta data.
2246  *
2247  *	This routine is capable of looking up, or removing swapblk
2248  *	assignments in the swap meta data.  It returns the swapblk being
2249  *	looked-up, popped, or SWAPBLK_NONE if the block was invalid.
2250  *
2251  *	When acting on a busy resident page and paging is in progress, we
2252  *	have to wait until paging is complete but otherwise can act on the
2253  *	busy page.
2254  */
2255 static daddr_t
2256 swp_pager_meta_lookup(vm_object_t object, vm_pindex_t pindex)
2257 {
2258 	struct swblk *sb;
2259 
2260 	VM_OBJECT_ASSERT_LOCKED(object);
2261 
2262 	/*
2263 	 * The meta data only exists if the object is OBJT_SWAP
2264 	 * and even then might not be allocated yet.
2265 	 */
2266 	KASSERT((object->flags & OBJ_SWAP) != 0,
2267 	    ("Lookup object not swappable"));
2268 
2269 	sb = SWAP_PCTRIE_LOOKUP(&object->un_pager.swp.swp_blks,
2270 	    rounddown(pindex, SWAP_META_PAGES));
2271 	if (sb == NULL)
2272 		return (SWAPBLK_NONE);
2273 	return (sb->d[pindex % SWAP_META_PAGES]);
2274 }
2275 
2276 /*
2277  * Returns the least page index which is greater than or equal to the
2278  * parameter pindex and for which there is a swap block allocated.
2279  * Returns object's size if the object's type is not swap or if there
2280  * are no allocated swap blocks for the object after the requested
2281  * pindex.
2282  */
2283 vm_pindex_t
2284 swap_pager_find_least(vm_object_t object, vm_pindex_t pindex)
2285 {
2286 	struct swblk *sb;
2287 	int i;
2288 
2289 	VM_OBJECT_ASSERT_LOCKED(object);
2290 	MPASS((object->flags & OBJ_SWAP) != 0);
2291 
2292 	if (pctrie_is_empty(&object->un_pager.swp.swp_blks))
2293 		return (object->size);
2294 	sb = SWAP_PCTRIE_LOOKUP_GE(&object->un_pager.swp.swp_blks,
2295 	    rounddown(pindex, SWAP_META_PAGES));
2296 	if (sb == NULL)
2297 		return (object->size);
2298 	if (sb->p < pindex) {
2299 		for (i = pindex % SWAP_META_PAGES; i < SWAP_META_PAGES; i++) {
2300 			if (sb->d[i] != SWAPBLK_NONE)
2301 				return (sb->p + i);
2302 		}
2303 		sb = SWAP_PCTRIE_LOOKUP_GE(&object->un_pager.swp.swp_blks,
2304 		    roundup(pindex, SWAP_META_PAGES));
2305 		if (sb == NULL)
2306 			return (object->size);
2307 	}
2308 	for (i = 0; i < SWAP_META_PAGES; i++) {
2309 		if (sb->d[i] != SWAPBLK_NONE)
2310 			return (sb->p + i);
2311 	}
2312 
2313 	/*
2314 	 * We get here if a swblk is present in the trie but it
2315 	 * doesn't map any blocks.
2316 	 */
2317 	MPASS(0);
2318 	return (object->size);
2319 }
2320 
2321 /*
2322  * System call swapon(name) enables swapping on device name,
2323  * which must be in the swdevsw.  Return EBUSY
2324  * if already swapping on this device.
2325  */
2326 #ifndef _SYS_SYSPROTO_H_
2327 struct swapon_args {
2328 	char *name;
2329 };
2330 #endif
2331 
2332 int
2333 sys_swapon(struct thread *td, struct swapon_args *uap)
2334 {
2335 	struct vattr attr;
2336 	struct vnode *vp;
2337 	struct nameidata nd;
2338 	int error;
2339 
2340 	error = priv_check(td, PRIV_SWAPON);
2341 	if (error)
2342 		return (error);
2343 
2344 	sx_xlock(&swdev_syscall_lock);
2345 
2346 	/*
2347 	 * Swap metadata may not fit in the KVM if we have physical
2348 	 * memory of >1GB.
2349 	 */
2350 	if (swblk_zone == NULL) {
2351 		error = ENOMEM;
2352 		goto done;
2353 	}
2354 
2355 	NDINIT(&nd, LOOKUP, ISOPEN | FOLLOW | LOCKLEAF | AUDITVNODE1,
2356 	    UIO_USERSPACE, uap->name);
2357 	error = namei(&nd);
2358 	if (error)
2359 		goto done;
2360 
2361 	NDFREE_PNBUF(&nd);
2362 	vp = nd.ni_vp;
2363 
2364 	if (vn_isdisk_error(vp, &error)) {
2365 		error = swapongeom(vp);
2366 	} else if (vp->v_type == VREG &&
2367 	    (vp->v_mount->mnt_vfc->vfc_flags & VFCF_NETWORK) != 0 &&
2368 	    (error = VOP_GETATTR(vp, &attr, td->td_ucred)) == 0) {
2369 		/*
2370 		 * Allow direct swapping to NFS regular files in the same
2371 		 * way that nfs_mountroot() sets up diskless swapping.
2372 		 */
2373 		error = swaponvp(td, vp, attr.va_size / DEV_BSIZE);
2374 	}
2375 
2376 	if (error != 0)
2377 		vput(vp);
2378 	else
2379 		VOP_UNLOCK(vp);
2380 done:
2381 	sx_xunlock(&swdev_syscall_lock);
2382 	return (error);
2383 }
2384 
2385 /*
2386  * Check that the total amount of swap currently configured does not
2387  * exceed half the theoretical maximum.  If it does, print a warning
2388  * message.
2389  */
2390 static void
2391 swapon_check_swzone(void)
2392 {
2393 
2394 	/* recommend using no more than half that amount */
2395 	if (swap_total > swap_maxpages / 2) {
2396 		printf("warning: total configured swap (%lu pages) "
2397 		    "exceeds maximum recommended amount (%lu pages).\n",
2398 		    swap_total, swap_maxpages / 2);
2399 		printf("warning: increase kern.maxswzone "
2400 		    "or reduce amount of swap.\n");
2401 	}
2402 }
2403 
2404 static void
2405 swaponsomething(struct vnode *vp, void *id, u_long nblks,
2406     sw_strategy_t *strategy, sw_close_t *close, dev_t dev, int flags)
2407 {
2408 	struct swdevt *sp, *tsp;
2409 	daddr_t dvbase;
2410 
2411 	/*
2412 	 * nblks is in DEV_BSIZE'd chunks, convert to PAGE_SIZE'd chunks.
2413 	 * First chop nblks off to page-align it, then convert.
2414 	 *
2415 	 * sw->sw_nblks is in page-sized chunks now too.
2416 	 */
2417 	nblks &= ~(ctodb(1) - 1);
2418 	nblks = dbtoc(nblks);
2419 
2420 	sp = malloc(sizeof *sp, M_VMPGDATA, M_WAITOK | M_ZERO);
2421 	sp->sw_blist = blist_create(nblks, M_WAITOK);
2422 	sp->sw_vp = vp;
2423 	sp->sw_id = id;
2424 	sp->sw_dev = dev;
2425 	sp->sw_nblks = nblks;
2426 	sp->sw_used = 0;
2427 	sp->sw_strategy = strategy;
2428 	sp->sw_close = close;
2429 	sp->sw_flags = flags;
2430 
2431 	/*
2432 	 * Do not free the first blocks in order to avoid overwriting
2433 	 * any bsd label at the front of the partition
2434 	 */
2435 	blist_free(sp->sw_blist, howmany(BBSIZE, PAGE_SIZE),
2436 	    nblks - howmany(BBSIZE, PAGE_SIZE));
2437 
2438 	dvbase = 0;
2439 	mtx_lock(&sw_dev_mtx);
2440 	TAILQ_FOREACH(tsp, &swtailq, sw_list) {
2441 		if (tsp->sw_end >= dvbase) {
2442 			/*
2443 			 * We put one uncovered page between the devices
2444 			 * in order to definitively prevent any cross-device
2445 			 * I/O requests
2446 			 */
2447 			dvbase = tsp->sw_end + 1;
2448 		}
2449 	}
2450 	sp->sw_first = dvbase;
2451 	sp->sw_end = dvbase + nblks;
2452 	TAILQ_INSERT_TAIL(&swtailq, sp, sw_list);
2453 	nswapdev++;
2454 	swap_pager_avail += nblks - howmany(BBSIZE, PAGE_SIZE);
2455 	swap_total += nblks;
2456 	swapon_check_swzone();
2457 	swp_sizecheck();
2458 	mtx_unlock(&sw_dev_mtx);
2459 	EVENTHANDLER_INVOKE(swapon, sp);
2460 }
2461 
2462 /*
2463  * SYSCALL: swapoff(devname)
2464  *
2465  * Disable swapping on the given device.
2466  *
2467  * XXX: Badly designed system call: it should use a device index
2468  * rather than filename as specification.  We keep sw_vp around
2469  * only to make this work.
2470  */
2471 static int
2472 kern_swapoff(struct thread *td, const char *name, enum uio_seg name_seg,
2473     u_int flags)
2474 {
2475 	struct vnode *vp;
2476 	struct nameidata nd;
2477 	struct swdevt *sp;
2478 	int error;
2479 
2480 	error = priv_check(td, PRIV_SWAPOFF);
2481 	if (error != 0)
2482 		return (error);
2483 	if ((flags & ~(SWAPOFF_FORCE)) != 0)
2484 		return (EINVAL);
2485 
2486 	sx_xlock(&swdev_syscall_lock);
2487 
2488 	NDINIT(&nd, LOOKUP, FOLLOW | AUDITVNODE1, name_seg, name);
2489 	error = namei(&nd);
2490 	if (error)
2491 		goto done;
2492 	NDFREE_PNBUF(&nd);
2493 	vp = nd.ni_vp;
2494 
2495 	mtx_lock(&sw_dev_mtx);
2496 	TAILQ_FOREACH(sp, &swtailq, sw_list) {
2497 		if (sp->sw_vp == vp)
2498 			break;
2499 	}
2500 	mtx_unlock(&sw_dev_mtx);
2501 	if (sp == NULL) {
2502 		error = EINVAL;
2503 		goto done;
2504 	}
2505 	error = swapoff_one(sp, td->td_ucred, flags);
2506 done:
2507 	sx_xunlock(&swdev_syscall_lock);
2508 	return (error);
2509 }
2510 
2511 
2512 #ifdef COMPAT_FREEBSD13
2513 int
2514 freebsd13_swapoff(struct thread *td, struct freebsd13_swapoff_args *uap)
2515 {
2516 	return (kern_swapoff(td, uap->name, UIO_USERSPACE, 0));
2517 }
2518 #endif
2519 
2520 int
2521 sys_swapoff(struct thread *td, struct swapoff_args *uap)
2522 {
2523 	return (kern_swapoff(td, uap->name, UIO_USERSPACE, uap->flags));
2524 }
2525 
2526 static int
2527 swapoff_one(struct swdevt *sp, struct ucred *cred, u_int flags)
2528 {
2529 	u_long nblks;
2530 #ifdef MAC
2531 	int error;
2532 #endif
2533 
2534 	sx_assert(&swdev_syscall_lock, SA_XLOCKED);
2535 #ifdef MAC
2536 	(void) vn_lock(sp->sw_vp, LK_EXCLUSIVE | LK_RETRY);
2537 	error = mac_system_check_swapoff(cred, sp->sw_vp);
2538 	(void) VOP_UNLOCK(sp->sw_vp);
2539 	if (error != 0)
2540 		return (error);
2541 #endif
2542 	nblks = sp->sw_nblks;
2543 
2544 	/*
2545 	 * We can turn off this swap device safely only if the
2546 	 * available virtual memory in the system will fit the amount
2547 	 * of data we will have to page back in, plus an epsilon so
2548 	 * the system doesn't become critically low on swap space.
2549 	 * The vm_free_count() part does not account e.g. for clean
2550 	 * pages that can be immediately reclaimed without paging, so
2551 	 * this is a very rough estimation.
2552 	 *
2553 	 * On the other hand, not turning swap off on swapoff_all()
2554 	 * means that we can lose swap data when filesystems go away,
2555 	 * which is arguably worse.
2556 	 */
2557 	if ((flags & SWAPOFF_FORCE) == 0 &&
2558 	    vm_free_count() + swap_pager_avail < nblks + nswap_lowat)
2559 		return (ENOMEM);
2560 
2561 	/*
2562 	 * Prevent further allocations on this device.
2563 	 */
2564 	mtx_lock(&sw_dev_mtx);
2565 	sp->sw_flags |= SW_CLOSING;
2566 	swap_pager_avail -= blist_fill(sp->sw_blist, 0, nblks);
2567 	swap_total -= nblks;
2568 	mtx_unlock(&sw_dev_mtx);
2569 
2570 	/*
2571 	 * Page in the contents of the device and close it.
2572 	 */
2573 	swap_pager_swapoff(sp);
2574 
2575 	sp->sw_close(curthread, sp);
2576 	mtx_lock(&sw_dev_mtx);
2577 	sp->sw_id = NULL;
2578 	TAILQ_REMOVE(&swtailq, sp, sw_list);
2579 	nswapdev--;
2580 	if (nswapdev == 0) {
2581 		swap_pager_full = 2;
2582 		swap_pager_almost_full = 1;
2583 	}
2584 	if (swdevhd == sp)
2585 		swdevhd = NULL;
2586 	mtx_unlock(&sw_dev_mtx);
2587 	blist_destroy(sp->sw_blist);
2588 	free(sp, M_VMPGDATA);
2589 	return (0);
2590 }
2591 
2592 void
2593 swapoff_all(void)
2594 {
2595 	struct swdevt *sp, *spt;
2596 	const char *devname;
2597 	int error;
2598 
2599 	sx_xlock(&swdev_syscall_lock);
2600 
2601 	mtx_lock(&sw_dev_mtx);
2602 	TAILQ_FOREACH_SAFE(sp, &swtailq, sw_list, spt) {
2603 		mtx_unlock(&sw_dev_mtx);
2604 		if (vn_isdisk(sp->sw_vp))
2605 			devname = devtoname(sp->sw_vp->v_rdev);
2606 		else
2607 			devname = "[file]";
2608 		error = swapoff_one(sp, thread0.td_ucred, SWAPOFF_FORCE);
2609 		if (error != 0) {
2610 			printf("Cannot remove swap device %s (error=%d), "
2611 			    "skipping.\n", devname, error);
2612 		} else if (bootverbose) {
2613 			printf("Swap device %s removed.\n", devname);
2614 		}
2615 		mtx_lock(&sw_dev_mtx);
2616 	}
2617 	mtx_unlock(&sw_dev_mtx);
2618 
2619 	sx_xunlock(&swdev_syscall_lock);
2620 }
2621 
2622 void
2623 swap_pager_status(int *total, int *used)
2624 {
2625 
2626 	*total = swap_total;
2627 	*used = swap_total - swap_pager_avail -
2628 	    nswapdev * howmany(BBSIZE, PAGE_SIZE);
2629 }
2630 
2631 int
2632 swap_dev_info(int name, struct xswdev *xs, char *devname, size_t len)
2633 {
2634 	struct swdevt *sp;
2635 	const char *tmp_devname;
2636 	int error, n;
2637 
2638 	n = 0;
2639 	error = ENOENT;
2640 	mtx_lock(&sw_dev_mtx);
2641 	TAILQ_FOREACH(sp, &swtailq, sw_list) {
2642 		if (n != name) {
2643 			n++;
2644 			continue;
2645 		}
2646 		xs->xsw_version = XSWDEV_VERSION;
2647 		xs->xsw_dev = sp->sw_dev;
2648 		xs->xsw_flags = sp->sw_flags;
2649 		xs->xsw_nblks = sp->sw_nblks;
2650 		xs->xsw_used = sp->sw_used;
2651 		if (devname != NULL) {
2652 			if (vn_isdisk(sp->sw_vp))
2653 				tmp_devname = devtoname(sp->sw_vp->v_rdev);
2654 			else
2655 				tmp_devname = "[file]";
2656 			strncpy(devname, tmp_devname, len);
2657 		}
2658 		error = 0;
2659 		break;
2660 	}
2661 	mtx_unlock(&sw_dev_mtx);
2662 	return (error);
2663 }
2664 
2665 #if defined(COMPAT_FREEBSD11)
2666 #define XSWDEV_VERSION_11	1
2667 struct xswdev11 {
2668 	u_int	xsw_version;
2669 	uint32_t xsw_dev;
2670 	int	xsw_flags;
2671 	int	xsw_nblks;
2672 	int     xsw_used;
2673 };
2674 #endif
2675 
2676 #if defined(__amd64__) && defined(COMPAT_FREEBSD32)
2677 struct xswdev32 {
2678 	u_int	xsw_version;
2679 	u_int	xsw_dev1, xsw_dev2;
2680 	int	xsw_flags;
2681 	int	xsw_nblks;
2682 	int     xsw_used;
2683 };
2684 #endif
2685 
2686 static int
2687 sysctl_vm_swap_info(SYSCTL_HANDLER_ARGS)
2688 {
2689 	struct xswdev xs;
2690 #if defined(__amd64__) && defined(COMPAT_FREEBSD32)
2691 	struct xswdev32 xs32;
2692 #endif
2693 #if defined(COMPAT_FREEBSD11)
2694 	struct xswdev11 xs11;
2695 #endif
2696 	int error;
2697 
2698 	if (arg2 != 1)			/* name length */
2699 		return (EINVAL);
2700 
2701 	memset(&xs, 0, sizeof(xs));
2702 	error = swap_dev_info(*(int *)arg1, &xs, NULL, 0);
2703 	if (error != 0)
2704 		return (error);
2705 #if defined(__amd64__) && defined(COMPAT_FREEBSD32)
2706 	if (req->oldlen == sizeof(xs32)) {
2707 		memset(&xs32, 0, sizeof(xs32));
2708 		xs32.xsw_version = XSWDEV_VERSION;
2709 		xs32.xsw_dev1 = xs.xsw_dev;
2710 		xs32.xsw_dev2 = xs.xsw_dev >> 32;
2711 		xs32.xsw_flags = xs.xsw_flags;
2712 		xs32.xsw_nblks = xs.xsw_nblks;
2713 		xs32.xsw_used = xs.xsw_used;
2714 		error = SYSCTL_OUT(req, &xs32, sizeof(xs32));
2715 		return (error);
2716 	}
2717 #endif
2718 #if defined(COMPAT_FREEBSD11)
2719 	if (req->oldlen == sizeof(xs11)) {
2720 		memset(&xs11, 0, sizeof(xs11));
2721 		xs11.xsw_version = XSWDEV_VERSION_11;
2722 		xs11.xsw_dev = xs.xsw_dev; /* truncation */
2723 		xs11.xsw_flags = xs.xsw_flags;
2724 		xs11.xsw_nblks = xs.xsw_nblks;
2725 		xs11.xsw_used = xs.xsw_used;
2726 		error = SYSCTL_OUT(req, &xs11, sizeof(xs11));
2727 		return (error);
2728 	}
2729 #endif
2730 	error = SYSCTL_OUT(req, &xs, sizeof(xs));
2731 	return (error);
2732 }
2733 
2734 SYSCTL_INT(_vm, OID_AUTO, nswapdev, CTLFLAG_RD, &nswapdev, 0,
2735     "Number of swap devices");
2736 SYSCTL_NODE(_vm, OID_AUTO, swap_info, CTLFLAG_RD | CTLFLAG_MPSAFE,
2737     sysctl_vm_swap_info,
2738     "Swap statistics by device");
2739 
2740 /*
2741  * Count the approximate swap usage in pages for a vmspace.  The
2742  * shadowed or not yet copied on write swap blocks are not accounted.
2743  * The map must be locked.
2744  */
2745 long
2746 vmspace_swap_count(struct vmspace *vmspace)
2747 {
2748 	vm_map_t map;
2749 	vm_map_entry_t cur;
2750 	vm_object_t object;
2751 	struct swblk *sb;
2752 	vm_pindex_t e, pi;
2753 	long count;
2754 	int i;
2755 
2756 	map = &vmspace->vm_map;
2757 	count = 0;
2758 
2759 	VM_MAP_ENTRY_FOREACH(cur, map) {
2760 		if ((cur->eflags & MAP_ENTRY_IS_SUB_MAP) != 0)
2761 			continue;
2762 		object = cur->object.vm_object;
2763 		if (object == NULL || (object->flags & OBJ_SWAP) == 0)
2764 			continue;
2765 		VM_OBJECT_RLOCK(object);
2766 		if ((object->flags & OBJ_SWAP) == 0)
2767 			goto unlock;
2768 		pi = OFF_TO_IDX(cur->offset);
2769 		e = pi + OFF_TO_IDX(cur->end - cur->start);
2770 		for (;; pi = sb->p + SWAP_META_PAGES) {
2771 			sb = SWAP_PCTRIE_LOOKUP_GE(
2772 			    &object->un_pager.swp.swp_blks, pi);
2773 			if (sb == NULL || sb->p >= e)
2774 				break;
2775 			for (i = 0; i < SWAP_META_PAGES; i++) {
2776 				if (sb->p + i < e &&
2777 				    sb->d[i] != SWAPBLK_NONE)
2778 					count++;
2779 			}
2780 		}
2781 unlock:
2782 		VM_OBJECT_RUNLOCK(object);
2783 	}
2784 	return (count);
2785 }
2786 
2787 /*
2788  * GEOM backend
2789  *
2790  * Swapping onto disk devices.
2791  *
2792  */
2793 
2794 static g_orphan_t swapgeom_orphan;
2795 
2796 static struct g_class g_swap_class = {
2797 	.name = "SWAP",
2798 	.version = G_VERSION,
2799 	.orphan = swapgeom_orphan,
2800 };
2801 
2802 DECLARE_GEOM_CLASS(g_swap_class, g_class);
2803 
2804 static void
2805 swapgeom_close_ev(void *arg, int flags)
2806 {
2807 	struct g_consumer *cp;
2808 
2809 	cp = arg;
2810 	g_access(cp, -1, -1, 0);
2811 	g_detach(cp);
2812 	g_destroy_consumer(cp);
2813 }
2814 
2815 /*
2816  * Add a reference to the g_consumer for an inflight transaction.
2817  */
2818 static void
2819 swapgeom_acquire(struct g_consumer *cp)
2820 {
2821 
2822 	mtx_assert(&sw_dev_mtx, MA_OWNED);
2823 	cp->index++;
2824 }
2825 
2826 /*
2827  * Remove a reference from the g_consumer.  Post a close event if all
2828  * references go away, since the function might be called from the
2829  * biodone context.
2830  */
2831 static void
2832 swapgeom_release(struct g_consumer *cp, struct swdevt *sp)
2833 {
2834 
2835 	mtx_assert(&sw_dev_mtx, MA_OWNED);
2836 	cp->index--;
2837 	if (cp->index == 0) {
2838 		if (g_post_event(swapgeom_close_ev, cp, M_NOWAIT, NULL) == 0)
2839 			sp->sw_id = NULL;
2840 	}
2841 }
2842 
2843 static void
2844 swapgeom_done(struct bio *bp2)
2845 {
2846 	struct swdevt *sp;
2847 	struct buf *bp;
2848 	struct g_consumer *cp;
2849 
2850 	bp = bp2->bio_caller2;
2851 	cp = bp2->bio_from;
2852 	bp->b_ioflags = bp2->bio_flags;
2853 	if (bp2->bio_error)
2854 		bp->b_ioflags |= BIO_ERROR;
2855 	bp->b_resid = bp->b_bcount - bp2->bio_completed;
2856 	bp->b_error = bp2->bio_error;
2857 	bp->b_caller1 = NULL;
2858 	bufdone(bp);
2859 	sp = bp2->bio_caller1;
2860 	mtx_lock(&sw_dev_mtx);
2861 	swapgeom_release(cp, sp);
2862 	mtx_unlock(&sw_dev_mtx);
2863 	g_destroy_bio(bp2);
2864 }
2865 
2866 static void
2867 swapgeom_strategy(struct buf *bp, struct swdevt *sp)
2868 {
2869 	struct bio *bio;
2870 	struct g_consumer *cp;
2871 
2872 	mtx_lock(&sw_dev_mtx);
2873 	cp = sp->sw_id;
2874 	if (cp == NULL) {
2875 		mtx_unlock(&sw_dev_mtx);
2876 		bp->b_error = ENXIO;
2877 		bp->b_ioflags |= BIO_ERROR;
2878 		bufdone(bp);
2879 		return;
2880 	}
2881 	swapgeom_acquire(cp);
2882 	mtx_unlock(&sw_dev_mtx);
2883 	if (bp->b_iocmd == BIO_WRITE)
2884 		bio = g_new_bio();
2885 	else
2886 		bio = g_alloc_bio();
2887 	if (bio == NULL) {
2888 		mtx_lock(&sw_dev_mtx);
2889 		swapgeom_release(cp, sp);
2890 		mtx_unlock(&sw_dev_mtx);
2891 		bp->b_error = ENOMEM;
2892 		bp->b_ioflags |= BIO_ERROR;
2893 		printf("swap_pager: cannot allocate bio\n");
2894 		bufdone(bp);
2895 		return;
2896 	}
2897 
2898 	bp->b_caller1 = bio;
2899 	bio->bio_caller1 = sp;
2900 	bio->bio_caller2 = bp;
2901 	bio->bio_cmd = bp->b_iocmd;
2902 	bio->bio_offset = (bp->b_blkno - sp->sw_first) * PAGE_SIZE;
2903 	bio->bio_length = bp->b_bcount;
2904 	bio->bio_done = swapgeom_done;
2905 	bio->bio_flags |= BIO_SWAP;
2906 	if (!buf_mapped(bp)) {
2907 		bio->bio_ma = bp->b_pages;
2908 		bio->bio_data = unmapped_buf;
2909 		bio->bio_ma_offset = (vm_offset_t)bp->b_offset & PAGE_MASK;
2910 		bio->bio_ma_n = bp->b_npages;
2911 		bio->bio_flags |= BIO_UNMAPPED;
2912 	} else {
2913 		bio->bio_data = bp->b_data;
2914 		bio->bio_ma = NULL;
2915 	}
2916 	g_io_request(bio, cp);
2917 	return;
2918 }
2919 
2920 static void
2921 swapgeom_orphan(struct g_consumer *cp)
2922 {
2923 	struct swdevt *sp;
2924 	int destroy;
2925 
2926 	mtx_lock(&sw_dev_mtx);
2927 	TAILQ_FOREACH(sp, &swtailq, sw_list) {
2928 		if (sp->sw_id == cp) {
2929 			sp->sw_flags |= SW_CLOSING;
2930 			break;
2931 		}
2932 	}
2933 	/*
2934 	 * Drop reference we were created with. Do directly since we're in a
2935 	 * special context where we don't have to queue the call to
2936 	 * swapgeom_close_ev().
2937 	 */
2938 	cp->index--;
2939 	destroy = ((sp != NULL) && (cp->index == 0));
2940 	if (destroy)
2941 		sp->sw_id = NULL;
2942 	mtx_unlock(&sw_dev_mtx);
2943 	if (destroy)
2944 		swapgeom_close_ev(cp, 0);
2945 }
2946 
2947 static void
2948 swapgeom_close(struct thread *td, struct swdevt *sw)
2949 {
2950 	struct g_consumer *cp;
2951 
2952 	mtx_lock(&sw_dev_mtx);
2953 	cp = sw->sw_id;
2954 	sw->sw_id = NULL;
2955 	mtx_unlock(&sw_dev_mtx);
2956 
2957 	/*
2958 	 * swapgeom_close() may be called from the biodone context,
2959 	 * where we cannot perform topology changes.  Delegate the
2960 	 * work to the events thread.
2961 	 */
2962 	if (cp != NULL)
2963 		g_waitfor_event(swapgeom_close_ev, cp, M_WAITOK, NULL);
2964 }
2965 
2966 static int
2967 swapongeom_locked(struct cdev *dev, struct vnode *vp)
2968 {
2969 	struct g_provider *pp;
2970 	struct g_consumer *cp;
2971 	static struct g_geom *gp;
2972 	struct swdevt *sp;
2973 	u_long nblks;
2974 	int error;
2975 
2976 	pp = g_dev_getprovider(dev);
2977 	if (pp == NULL)
2978 		return (ENODEV);
2979 	mtx_lock(&sw_dev_mtx);
2980 	TAILQ_FOREACH(sp, &swtailq, sw_list) {
2981 		cp = sp->sw_id;
2982 		if (cp != NULL && cp->provider == pp) {
2983 			mtx_unlock(&sw_dev_mtx);
2984 			return (EBUSY);
2985 		}
2986 	}
2987 	mtx_unlock(&sw_dev_mtx);
2988 	if (gp == NULL)
2989 		gp = g_new_geomf(&g_swap_class, "swap");
2990 	cp = g_new_consumer(gp);
2991 	cp->index = 1;	/* Number of active I/Os, plus one for being active. */
2992 	cp->flags |=  G_CF_DIRECT_SEND | G_CF_DIRECT_RECEIVE;
2993 	g_attach(cp, pp);
2994 	/*
2995 	 * XXX: Every time you think you can improve the margin for
2996 	 * footshooting, somebody depends on the ability to do so:
2997 	 * savecore(8) wants to write to our swapdev so we cannot
2998 	 * set an exclusive count :-(
2999 	 */
3000 	error = g_access(cp, 1, 1, 0);
3001 	if (error != 0) {
3002 		g_detach(cp);
3003 		g_destroy_consumer(cp);
3004 		return (error);
3005 	}
3006 	nblks = pp->mediasize / DEV_BSIZE;
3007 	swaponsomething(vp, cp, nblks, swapgeom_strategy,
3008 	    swapgeom_close, dev2udev(dev),
3009 	    (pp->flags & G_PF_ACCEPT_UNMAPPED) != 0 ? SW_UNMAPPED : 0);
3010 	return (0);
3011 }
3012 
3013 static int
3014 swapongeom(struct vnode *vp)
3015 {
3016 	int error;
3017 
3018 	ASSERT_VOP_ELOCKED(vp, "swapongeom");
3019 	if (vp->v_type != VCHR || VN_IS_DOOMED(vp)) {
3020 		error = ENOENT;
3021 	} else {
3022 		g_topology_lock();
3023 		error = swapongeom_locked(vp->v_rdev, vp);
3024 		g_topology_unlock();
3025 	}
3026 	return (error);
3027 }
3028 
3029 /*
3030  * VNODE backend
3031  *
3032  * This is used mainly for network filesystem (read: probably only tested
3033  * with NFS) swapfiles.
3034  *
3035  */
3036 
3037 static void
3038 swapdev_strategy(struct buf *bp, struct swdevt *sp)
3039 {
3040 	struct vnode *vp2;
3041 
3042 	bp->b_blkno = ctodb(bp->b_blkno - sp->sw_first);
3043 
3044 	vp2 = sp->sw_id;
3045 	vhold(vp2);
3046 	if (bp->b_iocmd == BIO_WRITE) {
3047 		vn_lock(vp2, LK_EXCLUSIVE | LK_RETRY);
3048 		if (bp->b_bufobj)
3049 			bufobj_wdrop(bp->b_bufobj);
3050 		bufobj_wref(&vp2->v_bufobj);
3051 	} else {
3052 		vn_lock(vp2, LK_SHARED | LK_RETRY);
3053 	}
3054 	if (bp->b_bufobj != &vp2->v_bufobj)
3055 		bp->b_bufobj = &vp2->v_bufobj;
3056 	bp->b_vp = vp2;
3057 	bp->b_iooffset = dbtob(bp->b_blkno);
3058 	bstrategy(bp);
3059 	VOP_UNLOCK(vp2);
3060 }
3061 
3062 static void
3063 swapdev_close(struct thread *td, struct swdevt *sp)
3064 {
3065 	struct vnode *vp;
3066 
3067 	vp = sp->sw_vp;
3068 	vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
3069 	VOP_CLOSE(vp, FREAD | FWRITE, td->td_ucred, td);
3070 	vput(vp);
3071 }
3072 
3073 static int
3074 swaponvp(struct thread *td, struct vnode *vp, u_long nblks)
3075 {
3076 	struct swdevt *sp;
3077 	int error;
3078 
3079 	ASSERT_VOP_ELOCKED(vp, "swaponvp");
3080 	if (nblks == 0)
3081 		return (ENXIO);
3082 	mtx_lock(&sw_dev_mtx);
3083 	TAILQ_FOREACH(sp, &swtailq, sw_list) {
3084 		if (sp->sw_id == vp) {
3085 			mtx_unlock(&sw_dev_mtx);
3086 			return (EBUSY);
3087 		}
3088 	}
3089 	mtx_unlock(&sw_dev_mtx);
3090 
3091 #ifdef MAC
3092 	error = mac_system_check_swapon(td->td_ucred, vp);
3093 	if (error == 0)
3094 #endif
3095 		error = VOP_OPEN(vp, FREAD | FWRITE, td->td_ucred, td, NULL);
3096 	if (error != 0)
3097 		return (error);
3098 
3099 	swaponsomething(vp, vp, nblks, swapdev_strategy, swapdev_close,
3100 	    NODEV, 0);
3101 	return (0);
3102 }
3103 
3104 static int
3105 sysctl_swap_async_max(SYSCTL_HANDLER_ARGS)
3106 {
3107 	int error, new, n;
3108 
3109 	new = nsw_wcount_async_max;
3110 	error = sysctl_handle_int(oidp, &new, 0, req);
3111 	if (error != 0 || req->newptr == NULL)
3112 		return (error);
3113 
3114 	if (new > nswbuf / 2 || new < 1)
3115 		return (EINVAL);
3116 
3117 	mtx_lock(&swbuf_mtx);
3118 	while (nsw_wcount_async_max != new) {
3119 		/*
3120 		 * Adjust difference.  If the current async count is too low,
3121 		 * we will need to sqeeze our update slowly in.  Sleep with a
3122 		 * higher priority than getpbuf() to finish faster.
3123 		 */
3124 		n = new - nsw_wcount_async_max;
3125 		if (nsw_wcount_async + n >= 0) {
3126 			nsw_wcount_async += n;
3127 			nsw_wcount_async_max += n;
3128 			wakeup(&nsw_wcount_async);
3129 		} else {
3130 			nsw_wcount_async_max -= nsw_wcount_async;
3131 			nsw_wcount_async = 0;
3132 			msleep(&nsw_wcount_async, &swbuf_mtx, PSWP,
3133 			    "swpsysctl", 0);
3134 		}
3135 	}
3136 	mtx_unlock(&swbuf_mtx);
3137 
3138 	return (0);
3139 }
3140 
3141 static void
3142 swap_pager_update_writecount(vm_object_t object, vm_offset_t start,
3143     vm_offset_t end)
3144 {
3145 
3146 	VM_OBJECT_WLOCK(object);
3147 	KASSERT((object->flags & OBJ_ANON) == 0,
3148 	    ("Splittable object with writecount"));
3149 	object->un_pager.swp.writemappings += (vm_ooffset_t)end - start;
3150 	VM_OBJECT_WUNLOCK(object);
3151 }
3152 
3153 static void
3154 swap_pager_release_writecount(vm_object_t object, vm_offset_t start,
3155     vm_offset_t end)
3156 {
3157 
3158 	VM_OBJECT_WLOCK(object);
3159 	KASSERT((object->flags & OBJ_ANON) == 0,
3160 	    ("Splittable object with writecount"));
3161 	object->un_pager.swp.writemappings -= (vm_ooffset_t)end - start;
3162 	VM_OBJECT_WUNLOCK(object);
3163 }
3164