xref: /freebsd/sys/vm/swap_pager.c (revision d71e2c037c942dbe2a9fd2630d5cf155dd1bf7db)
1 /*-
2  * SPDX-License-Identifier: BSD-4-Clause
3  *
4  * Copyright (c) 1998 Matthew Dillon,
5  * Copyright (c) 1994 John S. Dyson
6  * Copyright (c) 1990 University of Utah.
7  * Copyright (c) 1982, 1986, 1989, 1993
8  *	The Regents of the University of California.  All rights reserved.
9  *
10  * This code is derived from software contributed to Berkeley by
11  * the Systems Programming Group of the University of Utah Computer
12  * Science Department.
13  *
14  * Redistribution and use in source and binary forms, with or without
15  * modification, are permitted provided that the following conditions
16  * are met:
17  * 1. Redistributions of source code must retain the above copyright
18  *    notice, this list of conditions and the following disclaimer.
19  * 2. Redistributions in binary form must reproduce the above copyright
20  *    notice, this list of conditions and the following disclaimer in the
21  *    documentation and/or other materials provided with the distribution.
22  * 3. All advertising materials mentioning features or use of this software
23  *    must display the following acknowledgement:
24  *	This product includes software developed by the University of
25  *	California, Berkeley and its contributors.
26  * 4. Neither the name of the University nor the names of its contributors
27  *    may be used to endorse or promote products derived from this software
28  *    without specific prior written permission.
29  *
30  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
31  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
32  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
33  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
34  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
35  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
36  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
37  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
38  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
39  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
40  * SUCH DAMAGE.
41  *
42  *				New Swap System
43  *				Matthew Dillon
44  *
45  * Radix Bitmap 'blists'.
46  *
47  *	- The new swapper uses the new radix bitmap code.  This should scale
48  *	  to arbitrarily small or arbitrarily large swap spaces and an almost
49  *	  arbitrary degree of fragmentation.
50  *
51  * Features:
52  *
53  *	- on the fly reallocation of swap during putpages.  The new system
54  *	  does not try to keep previously allocated swap blocks for dirty
55  *	  pages.
56  *
57  *	- on the fly deallocation of swap
58  *
59  *	- No more garbage collection required.  Unnecessarily allocated swap
60  *	  blocks only exist for dirty vm_page_t's now and these are already
61  *	  cycled (in a high-load system) by the pager.  We also do on-the-fly
62  *	  removal of invalidated swap blocks when a page is destroyed
63  *	  or renamed.
64  *
65  * from: Utah $Hdr: swap_pager.c 1.4 91/04/30$
66  */
67 
68 #include <sys/cdefs.h>
69 #include "opt_vm.h"
70 
71 #include <sys/param.h>
72 #include <sys/bio.h>
73 #include <sys/blist.h>
74 #include <sys/buf.h>
75 #include <sys/conf.h>
76 #include <sys/disk.h>
77 #include <sys/disklabel.h>
78 #include <sys/eventhandler.h>
79 #include <sys/fcntl.h>
80 #include <sys/limits.h>
81 #include <sys/lock.h>
82 #include <sys/kernel.h>
83 #include <sys/mount.h>
84 #include <sys/namei.h>
85 #include <sys/malloc.h>
86 #include <sys/pctrie.h>
87 #include <sys/priv.h>
88 #include <sys/proc.h>
89 #include <sys/racct.h>
90 #include <sys/resource.h>
91 #include <sys/resourcevar.h>
92 #include <sys/rwlock.h>
93 #include <sys/sbuf.h>
94 #include <sys/sysctl.h>
95 #include <sys/sysproto.h>
96 #include <sys/systm.h>
97 #include <sys/sx.h>
98 #include <sys/unistd.h>
99 #include <sys/user.h>
100 #include <sys/vmmeter.h>
101 #include <sys/vnode.h>
102 
103 #include <security/mac/mac_framework.h>
104 
105 #include <vm/vm.h>
106 #include <vm/pmap.h>
107 #include <vm/vm_map.h>
108 #include <vm/vm_kern.h>
109 #include <vm/vm_object.h>
110 #include <vm/vm_page.h>
111 #include <vm/vm_pager.h>
112 #include <vm/vm_pageout.h>
113 #include <vm/vm_param.h>
114 #include <vm/swap_pager.h>
115 #include <vm/vm_extern.h>
116 #include <vm/uma.h>
117 
118 #include <geom/geom.h>
119 
120 /*
121  * MAX_PAGEOUT_CLUSTER must be a power of 2 between 1 and 64.
122  * The 64-page limit is due to the radix code (kern/subr_blist.c).
123  */
124 #ifndef MAX_PAGEOUT_CLUSTER
125 #define	MAX_PAGEOUT_CLUSTER	32
126 #endif
127 
128 #if !defined(SWB_NPAGES)
129 #define SWB_NPAGES	MAX_PAGEOUT_CLUSTER
130 #endif
131 
132 #define	SWAP_META_PAGES		PCTRIE_COUNT
133 
134 /*
135  * A swblk structure maps each page index within a
136  * SWAP_META_PAGES-aligned and sized range to the address of an
137  * on-disk swap block (or SWAPBLK_NONE). The collection of these
138  * mappings for an entire vm object is implemented as a pc-trie.
139  */
140 struct swblk {
141 	vm_pindex_t	p;
142 	daddr_t		d[SWAP_META_PAGES];
143 };
144 
145 /*
146  * A page_range structure records the start address and length of a sequence of
147  * mapped page addresses.
148  */
149 struct page_range {
150 	daddr_t start;
151 	daddr_t num;
152 };
153 
154 static MALLOC_DEFINE(M_VMPGDATA, "vm_pgdata", "swap pager private data");
155 static struct mtx sw_dev_mtx;
156 static TAILQ_HEAD(, swdevt) swtailq = TAILQ_HEAD_INITIALIZER(swtailq);
157 static struct swdevt *swdevhd;	/* Allocate from here next */
158 static int nswapdev;		/* Number of swap devices */
159 int swap_pager_avail;
160 static struct sx swdev_syscall_lock;	/* serialize swap(on|off) */
161 
162 static __exclusive_cache_line u_long swap_reserved;
163 static u_long swap_total;
164 static int sysctl_page_shift(SYSCTL_HANDLER_ARGS);
165 
166 static SYSCTL_NODE(_vm_stats, OID_AUTO, swap, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
167     "VM swap stats");
168 
169 SYSCTL_PROC(_vm, OID_AUTO, swap_reserved, CTLTYPE_U64 | CTLFLAG_RD | CTLFLAG_MPSAFE,
170     &swap_reserved, 0, sysctl_page_shift, "QU",
171     "Amount of swap storage needed to back all allocated anonymous memory.");
172 SYSCTL_PROC(_vm, OID_AUTO, swap_total, CTLTYPE_U64 | CTLFLAG_RD | CTLFLAG_MPSAFE,
173     &swap_total, 0, sysctl_page_shift, "QU",
174     "Total amount of available swap storage.");
175 
176 int vm_overcommit __read_mostly = 0;
177 SYSCTL_INT(_vm, VM_OVERCOMMIT, overcommit, CTLFLAG_RW, &vm_overcommit, 0,
178     "Configure virtual memory overcommit behavior. See tuning(7) "
179     "for details.");
180 static unsigned long swzone;
181 SYSCTL_ULONG(_vm, OID_AUTO, swzone, CTLFLAG_RD, &swzone, 0,
182     "Actual size of swap metadata zone");
183 static unsigned long swap_maxpages;
184 SYSCTL_ULONG(_vm, OID_AUTO, swap_maxpages, CTLFLAG_RD, &swap_maxpages, 0,
185     "Maximum amount of swap supported");
186 
187 static COUNTER_U64_DEFINE_EARLY(swap_free_deferred);
188 SYSCTL_COUNTER_U64(_vm_stats_swap, OID_AUTO, free_deferred,
189     CTLFLAG_RD, &swap_free_deferred,
190     "Number of pages that deferred freeing swap space");
191 
192 static COUNTER_U64_DEFINE_EARLY(swap_free_completed);
193 SYSCTL_COUNTER_U64(_vm_stats_swap, OID_AUTO, free_completed,
194     CTLFLAG_RD, &swap_free_completed,
195     "Number of deferred frees completed");
196 
197 static int
198 sysctl_page_shift(SYSCTL_HANDLER_ARGS)
199 {
200 	uint64_t newval;
201 	u_long value = *(u_long *)arg1;
202 
203 	newval = ((uint64_t)value) << PAGE_SHIFT;
204 	return (sysctl_handle_64(oidp, &newval, 0, req));
205 }
206 
207 static bool
208 swap_reserve_by_cred_rlimit(u_long pincr, struct ucred *cred, int oc)
209 {
210 	struct uidinfo *uip;
211 	u_long prev;
212 
213 	uip = cred->cr_ruidinfo;
214 
215 	prev = atomic_fetchadd_long(&uip->ui_vmsize, pincr);
216 	if ((oc & SWAP_RESERVE_RLIMIT_ON) != 0 &&
217 	    prev + pincr > lim_cur(curthread, RLIMIT_SWAP) &&
218 	    priv_check(curthread, PRIV_VM_SWAP_NORLIMIT) != 0) {
219 		prev = atomic_fetchadd_long(&uip->ui_vmsize, -pincr);
220 		KASSERT(prev >= pincr,
221 		    ("negative vmsize for uid %d\n", uip->ui_uid));
222 		return (false);
223 	}
224 	return (true);
225 }
226 
227 static void
228 swap_release_by_cred_rlimit(u_long pdecr, struct ucred *cred)
229 {
230 	struct uidinfo *uip;
231 #ifdef INVARIANTS
232 	u_long prev;
233 #endif
234 
235 	uip = cred->cr_ruidinfo;
236 
237 #ifdef INVARIANTS
238 	prev = atomic_fetchadd_long(&uip->ui_vmsize, -pdecr);
239 	KASSERT(prev >= pdecr,
240 	    ("negative vmsize for uid %d\n", uip->ui_uid));
241 #else
242 	atomic_subtract_long(&uip->ui_vmsize, pdecr);
243 #endif
244 }
245 
246 static void
247 swap_reserve_force_rlimit(u_long pincr, struct ucred *cred)
248 {
249 	struct uidinfo *uip;
250 
251 	uip = cred->cr_ruidinfo;
252 	atomic_add_long(&uip->ui_vmsize, pincr);
253 }
254 
255 bool
256 swap_reserve(vm_ooffset_t incr)
257 {
258 
259 	return (swap_reserve_by_cred(incr, curthread->td_ucred));
260 }
261 
262 bool
263 swap_reserve_by_cred(vm_ooffset_t incr, struct ucred *cred)
264 {
265 	u_long r, s, prev, pincr;
266 #ifdef RACCT
267 	int error;
268 #endif
269 	int oc;
270 	static int curfail;
271 	static struct timeval lastfail;
272 
273 	KASSERT((incr & PAGE_MASK) == 0, ("%s: incr: %ju & PAGE_MASK",
274 	    __func__, (uintmax_t)incr));
275 
276 #ifdef RACCT
277 	if (RACCT_ENABLED()) {
278 		PROC_LOCK(curproc);
279 		error = racct_add(curproc, RACCT_SWAP, incr);
280 		PROC_UNLOCK(curproc);
281 		if (error != 0)
282 			return (false);
283 	}
284 #endif
285 
286 	pincr = atop(incr);
287 	prev = atomic_fetchadd_long(&swap_reserved, pincr);
288 	r = prev + pincr;
289 	s = swap_total;
290 	oc = atomic_load_int(&vm_overcommit);
291 	if (r > s && (oc & SWAP_RESERVE_ALLOW_NONWIRED) != 0) {
292 		s += vm_cnt.v_page_count - vm_cnt.v_free_reserved -
293 		    vm_wire_count();
294 	}
295 	if ((oc & SWAP_RESERVE_FORCE_ON) != 0 && r > s &&
296 	    priv_check(curthread, PRIV_VM_SWAP_NOQUOTA) != 0) {
297 		prev = atomic_fetchadd_long(&swap_reserved, -pincr);
298 		KASSERT(prev >= pincr,
299 		    ("swap_reserved < incr on overcommit fail"));
300 		goto out_error;
301 	}
302 
303 	if (!swap_reserve_by_cred_rlimit(pincr, cred, oc)) {
304 		prev = atomic_fetchadd_long(&swap_reserved, -pincr);
305 		KASSERT(prev >= pincr,
306 		    ("swap_reserved < incr on overcommit fail"));
307 		goto out_error;
308 	}
309 
310 	return (true);
311 
312 out_error:
313 	if (ppsratecheck(&lastfail, &curfail, 1)) {
314 		printf("uid %d, pid %d: swap reservation "
315 		    "for %jd bytes failed\n",
316 		    cred->cr_ruidinfo->ui_uid, curproc->p_pid, incr);
317 	}
318 #ifdef RACCT
319 	if (RACCT_ENABLED()) {
320 		PROC_LOCK(curproc);
321 		racct_sub(curproc, RACCT_SWAP, incr);
322 		PROC_UNLOCK(curproc);
323 	}
324 #endif
325 
326 	return (false);
327 }
328 
329 void
330 swap_reserve_force(vm_ooffset_t incr)
331 {
332 	u_long pincr;
333 
334 	KASSERT((incr & PAGE_MASK) == 0, ("%s: incr: %ju & PAGE_MASK",
335 	    __func__, (uintmax_t)incr));
336 
337 #ifdef RACCT
338 	if (RACCT_ENABLED()) {
339 		PROC_LOCK(curproc);
340 		racct_add_force(curproc, RACCT_SWAP, incr);
341 		PROC_UNLOCK(curproc);
342 	}
343 #endif
344 	pincr = atop(incr);
345 	atomic_add_long(&swap_reserved, pincr);
346 	swap_reserve_force_rlimit(pincr, curthread->td_ucred);
347 }
348 
349 void
350 swap_release(vm_ooffset_t decr)
351 {
352 	struct ucred *cred;
353 
354 	PROC_LOCK(curproc);
355 	cred = curproc->p_ucred;
356 	swap_release_by_cred(decr, cred);
357 	PROC_UNLOCK(curproc);
358 }
359 
360 void
361 swap_release_by_cred(vm_ooffset_t decr, struct ucred *cred)
362 {
363 	u_long pdecr;
364 #ifdef INVARIANTS
365 	u_long prev;
366 #endif
367 
368 	KASSERT((decr & PAGE_MASK) == 0, ("%s: decr: %ju & PAGE_MASK",
369 	    __func__, (uintmax_t)decr));
370 
371 	pdecr = atop(decr);
372 #ifdef INVARIANTS
373 	prev = atomic_fetchadd_long(&swap_reserved, -pdecr);
374 	KASSERT(prev >= pdecr, ("swap_reserved < decr"));
375 #else
376 	atomic_subtract_long(&swap_reserved, pdecr);
377 #endif
378 
379 	swap_release_by_cred_rlimit(pdecr, cred);
380 #ifdef RACCT
381 	if (racct_enable)
382 		racct_sub_cred(cred, RACCT_SWAP, decr);
383 #endif
384 }
385 
386 static int swap_pager_full = 2;	/* swap space exhaustion (task killing) */
387 static int swap_pager_almost_full = 1; /* swap space exhaustion (w/hysteresis)*/
388 static struct mtx swbuf_mtx;	/* to sync nsw_wcount_async */
389 static int nsw_wcount_async;	/* limit async write buffers */
390 static int nsw_wcount_async_max;/* assigned maximum			*/
391 int nsw_cluster_max; 		/* maximum VOP I/O allowed		*/
392 
393 static int sysctl_swap_async_max(SYSCTL_HANDLER_ARGS);
394 SYSCTL_PROC(_vm, OID_AUTO, swap_async_max, CTLTYPE_INT | CTLFLAG_RW |
395     CTLFLAG_MPSAFE, NULL, 0, sysctl_swap_async_max, "I",
396     "Maximum running async swap ops");
397 static int sysctl_swap_fragmentation(SYSCTL_HANDLER_ARGS);
398 SYSCTL_PROC(_vm, OID_AUTO, swap_fragmentation, CTLTYPE_STRING | CTLFLAG_RD |
399     CTLFLAG_MPSAFE, NULL, 0, sysctl_swap_fragmentation, "A",
400     "Swap Fragmentation Info");
401 
402 static struct sx sw_alloc_sx;
403 
404 /*
405  * "named" and "unnamed" anon region objects.  Try to reduce the overhead
406  * of searching a named list by hashing it just a little.
407  */
408 
409 #define NOBJLISTS		8
410 
411 #define NOBJLIST(handle)	\
412 	(&swap_pager_object_list[((int)(intptr_t)handle >> 4) & (NOBJLISTS-1)])
413 
414 static struct pagerlst	swap_pager_object_list[NOBJLISTS];
415 static uma_zone_t swwbuf_zone;
416 static uma_zone_t swrbuf_zone;
417 static uma_zone_t swblk_zone;
418 static uma_zone_t swpctrie_zone;
419 
420 /*
421  * pagerops for OBJT_SWAP - "swap pager".  Some ops are also global procedure
422  * calls hooked from other parts of the VM system and do not appear here.
423  * (see vm/swap_pager.h).
424  */
425 static vm_object_t
426 		swap_pager_alloc(void *handle, vm_ooffset_t size,
427 		    vm_prot_t prot, vm_ooffset_t offset, struct ucred *);
428 static void	swap_pager_dealloc(vm_object_t object);
429 static int	swap_pager_getpages(vm_object_t, vm_page_t *, int, int *,
430     int *);
431 static int	swap_pager_getpages_async(vm_object_t, vm_page_t *, int, int *,
432     int *, pgo_getpages_iodone_t, void *);
433 static void	swap_pager_putpages(vm_object_t, vm_page_t *, int, int, int *);
434 static boolean_t
435 		swap_pager_haspage(vm_object_t object, vm_pindex_t pindex, int *before, int *after);
436 static void	swap_pager_init(void);
437 static void	swap_pager_unswapped(vm_page_t);
438 static void	swap_pager_swapoff(struct swdevt *sp);
439 static void	swap_pager_update_writecount(vm_object_t object,
440     vm_offset_t start, vm_offset_t end);
441 static void	swap_pager_release_writecount(vm_object_t object,
442     vm_offset_t start, vm_offset_t end);
443 static void	swap_pager_freespace_pgo(vm_object_t object, vm_pindex_t start,
444     vm_size_t size);
445 
446 const struct pagerops swappagerops = {
447 	.pgo_kvme_type = KVME_TYPE_SWAP,
448 	.pgo_init =	swap_pager_init,	/* early system initialization of pager	*/
449 	.pgo_alloc =	swap_pager_alloc,	/* allocate an OBJT_SWAP object */
450 	.pgo_dealloc =	swap_pager_dealloc,	/* deallocate an OBJT_SWAP object */
451 	.pgo_getpages =	swap_pager_getpages,	/* pagein */
452 	.pgo_getpages_async = swap_pager_getpages_async, /* pagein (async) */
453 	.pgo_putpages =	swap_pager_putpages,	/* pageout */
454 	.pgo_haspage =	swap_pager_haspage,	/* get backing store status for page */
455 	.pgo_pageunswapped = swap_pager_unswapped, /* remove swap related to page */
456 	.pgo_update_writecount = swap_pager_update_writecount,
457 	.pgo_release_writecount = swap_pager_release_writecount,
458 	.pgo_freespace = swap_pager_freespace_pgo,
459 };
460 
461 /*
462  * swap_*() routines are externally accessible.  swp_*() routines are
463  * internal.
464  */
465 static int nswap_lowat = 128;	/* in pages, swap_pager_almost_full warn */
466 static int nswap_hiwat = 512;	/* in pages, swap_pager_almost_full warn */
467 
468 SYSCTL_INT(_vm, OID_AUTO, dmmax, CTLFLAG_RD, &nsw_cluster_max, 0,
469     "Maximum size of a swap block in pages");
470 
471 static void	swp_sizecheck(void);
472 static void	swp_pager_async_iodone(struct buf *bp);
473 static bool	swp_pager_swblk_empty(struct swblk *sb, int start, int limit);
474 static void	swp_pager_free_empty_swblk(vm_object_t, struct swblk *sb);
475 static int	swapongeom(struct vnode *);
476 static int	swaponvp(struct thread *, struct vnode *, u_long);
477 static int	swapoff_one(struct swdevt *sp, struct ucred *cred,
478 		    u_int flags);
479 
480 /*
481  * Swap bitmap functions
482  */
483 static void	swp_pager_freeswapspace(const struct page_range *range);
484 static daddr_t	swp_pager_getswapspace(int *npages);
485 
486 /*
487  * Metadata functions
488  */
489 static daddr_t swp_pager_meta_build(vm_object_t, vm_pindex_t, daddr_t,
490 	bool);
491 static void swp_pager_meta_free(vm_object_t, vm_pindex_t, vm_pindex_t,
492     vm_size_t *);
493 static void swp_pager_meta_transfer(vm_object_t src, vm_object_t dst,
494     vm_pindex_t pindex, vm_pindex_t count);
495 static void swp_pager_meta_free_all(vm_object_t);
496 static daddr_t swp_pager_meta_lookup(vm_object_t, vm_pindex_t);
497 
498 static void
499 swp_pager_init_freerange(struct page_range *range)
500 {
501 	range->start = SWAPBLK_NONE;
502 	range->num = 0;
503 }
504 
505 static void
506 swp_pager_update_freerange(struct page_range *range, daddr_t addr)
507 {
508 	if (range->start + range->num == addr) {
509 		range->num++;
510 	} else {
511 		swp_pager_freeswapspace(range);
512 		range->start = addr;
513 		range->num = 1;
514 	}
515 }
516 
517 static void *
518 swblk_trie_alloc(struct pctrie *ptree)
519 {
520 
521 	return (uma_zalloc(swpctrie_zone, M_NOWAIT | (curproc == pageproc ?
522 	    M_USE_RESERVE : 0)));
523 }
524 
525 static void
526 swblk_trie_free(struct pctrie *ptree, void *node)
527 {
528 
529 	uma_zfree(swpctrie_zone, node);
530 }
531 
532 PCTRIE_DEFINE(SWAP, swblk, p, swblk_trie_alloc, swblk_trie_free);
533 
534 static struct swblk *
535 swblk_lookup(vm_object_t object, vm_pindex_t pindex)
536 {
537 	return (SWAP_PCTRIE_LOOKUP(&object->un_pager.swp.swp_blks,
538 	    rounddown(pindex, SWAP_META_PAGES)));
539 }
540 
541 static struct swblk *
542 swblk_start(vm_object_t object, vm_pindex_t pindex)
543 {
544 	return (SWAP_PCTRIE_LOOKUP_GE(&object->un_pager.swp.swp_blks,
545 	    rounddown(pindex, SWAP_META_PAGES)));
546 }
547 
548 static struct swblk *
549 swblk_next(vm_object_t object, struct swblk *sb)
550 {
551 	return (swblk_start(object, sb->p + SWAP_META_PAGES));
552 }
553 
554 static struct swblk *
555 swblk_start_limit(vm_object_t object, vm_pindex_t pindex, vm_pindex_t limit)
556 {
557 	struct swblk *sb = swblk_start(object, pindex);
558 	if (sb != NULL && sb->p < limit)
559 		return (sb);
560 	return (NULL);
561 }
562 
563 static struct swblk *
564 swblk_next_limit(vm_object_t object, struct swblk *sb, vm_pindex_t limit)
565 {
566 	return (swblk_start_limit(object, sb->p + SWAP_META_PAGES, limit));
567 }
568 
569 static void
570 swblk_lookup_remove(vm_object_t object, struct swblk *sb)
571 {
572 	SWAP_PCTRIE_REMOVE(&object->un_pager.swp.swp_blks, sb->p);
573 }
574 
575 static int
576 swblk_lookup_insert(vm_object_t object, struct swblk *sb)
577 {
578 	return (SWAP_PCTRIE_INSERT(&object->un_pager.swp.swp_blks, sb));
579 }
580 
581 static bool
582 swblk_is_empty(vm_object_t object)
583 {
584 	return (pctrie_is_empty(&object->un_pager.swp.swp_blks));
585 }
586 
587 /*
588  * SWP_SIZECHECK() -	update swap_pager_full indication
589  *
590  *	update the swap_pager_almost_full indication and warn when we are
591  *	about to run out of swap space, using lowat/hiwat hysteresis.
592  *
593  *	Clear swap_pager_full ( task killing ) indication when lowat is met.
594  *
595  *	No restrictions on call
596  *	This routine may not block.
597  */
598 static void
599 swp_sizecheck(void)
600 {
601 
602 	if (swap_pager_avail < nswap_lowat) {
603 		if (swap_pager_almost_full == 0) {
604 			printf("swap_pager: out of swap space\n");
605 			swap_pager_almost_full = 1;
606 		}
607 	} else {
608 		swap_pager_full = 0;
609 		if (swap_pager_avail > nswap_hiwat)
610 			swap_pager_almost_full = 0;
611 	}
612 }
613 
614 /*
615  * SWAP_PAGER_INIT() -	initialize the swap pager!
616  *
617  *	Expected to be started from system init.  NOTE:  This code is run
618  *	before much else so be careful what you depend on.  Most of the VM
619  *	system has yet to be initialized at this point.
620  */
621 static void
622 swap_pager_init(void)
623 {
624 	/*
625 	 * Initialize object lists
626 	 */
627 	int i;
628 
629 	for (i = 0; i < NOBJLISTS; ++i)
630 		TAILQ_INIT(&swap_pager_object_list[i]);
631 	mtx_init(&sw_dev_mtx, "swapdev", NULL, MTX_DEF);
632 	sx_init(&sw_alloc_sx, "swspsx");
633 	sx_init(&swdev_syscall_lock, "swsysc");
634 
635 	/*
636 	 * The nsw_cluster_max is constrained by the bp->b_pages[]
637 	 * array, which has maxphys / PAGE_SIZE entries, and our locally
638 	 * defined MAX_PAGEOUT_CLUSTER.   Also be aware that swap ops are
639 	 * constrained by the swap device interleave stripe size.
640 	 *
641 	 * Initialized early so that GEOM_ELI can see it.
642 	 */
643 	nsw_cluster_max = min(maxphys / PAGE_SIZE, MAX_PAGEOUT_CLUSTER);
644 }
645 
646 /*
647  * SWAP_PAGER_SWAP_INIT() - swap pager initialization from pageout process
648  *
649  *	Expected to be started from pageout process once, prior to entering
650  *	its main loop.
651  */
652 void
653 swap_pager_swap_init(void)
654 {
655 	unsigned long n, n2;
656 
657 	/*
658 	 * Number of in-transit swap bp operations.  Don't
659 	 * exhaust the pbufs completely.  Make sure we
660 	 * initialize workable values (0 will work for hysteresis
661 	 * but it isn't very efficient).
662 	 *
663 	 * Currently we hardwire nsw_wcount_async to 4.  This limit is
664 	 * designed to prevent other I/O from having high latencies due to
665 	 * our pageout I/O.  The value 4 works well for one or two active swap
666 	 * devices but is probably a little low if you have more.  Even so,
667 	 * a higher value would probably generate only a limited improvement
668 	 * with three or four active swap devices since the system does not
669 	 * typically have to pageout at extreme bandwidths.   We will want
670 	 * at least 2 per swap devices, and 4 is a pretty good value if you
671 	 * have one NFS swap device due to the command/ack latency over NFS.
672 	 * So it all works out pretty well.
673 	 *
674 	 * nsw_cluster_max is initialized in swap_pager_init().
675 	 */
676 
677 	nsw_wcount_async = 4;
678 	nsw_wcount_async_max = nsw_wcount_async;
679 	mtx_init(&swbuf_mtx, "async swbuf mutex", NULL, MTX_DEF);
680 
681 	swwbuf_zone = pbuf_zsecond_create("swwbuf", nswbuf / 4);
682 	swrbuf_zone = pbuf_zsecond_create("swrbuf", nswbuf / 2);
683 
684 	/*
685 	 * Initialize our zone, taking the user's requested size or
686 	 * estimating the number we need based on the number of pages
687 	 * in the system.
688 	 */
689 	n = maxswzone != 0 ? maxswzone / sizeof(struct swblk) :
690 	    vm_cnt.v_page_count / 2;
691 	swpctrie_zone = uma_zcreate("swpctrie", pctrie_node_size(), NULL, NULL,
692 	    pctrie_zone_init, NULL, UMA_ALIGN_PTR, 0);
693 	swblk_zone = uma_zcreate("swblk", sizeof(struct swblk), NULL, NULL,
694 	    NULL, NULL, _Alignof(struct swblk) - 1, 0);
695 	n2 = n;
696 	do {
697 		if (uma_zone_reserve_kva(swblk_zone, n))
698 			break;
699 		/*
700 		 * if the allocation failed, try a zone two thirds the
701 		 * size of the previous attempt.
702 		 */
703 		n -= ((n + 2) / 3);
704 	} while (n > 0);
705 
706 	/*
707 	 * Often uma_zone_reserve_kva() cannot reserve exactly the
708 	 * requested size.  Account for the difference when
709 	 * calculating swap_maxpages.
710 	 */
711 	n = uma_zone_get_max(swblk_zone);
712 
713 	if (n < n2)
714 		printf("Swap blk zone entries changed from %lu to %lu.\n",
715 		    n2, n);
716 	/* absolute maximum we can handle assuming 100% efficiency */
717 	swap_maxpages = n * SWAP_META_PAGES;
718 	swzone = n * sizeof(struct swblk);
719 	if (!uma_zone_reserve_kva(swpctrie_zone, n))
720 		printf("Cannot reserve swap pctrie zone, "
721 		    "reduce kern.maxswzone.\n");
722 }
723 
724 bool
725 swap_pager_init_object(vm_object_t object, void *handle, struct ucred *cred,
726     vm_ooffset_t size, vm_ooffset_t offset)
727 {
728 	if (cred != NULL) {
729 		if (!swap_reserve_by_cred(size, cred))
730 			return (false);
731 		crhold(cred);
732 	}
733 
734 	object->un_pager.swp.writemappings = 0;
735 	object->handle = handle;
736 	if (cred != NULL) {
737 		object->cred = cred;
738 		object->charge = size;
739 	}
740 	return (true);
741 }
742 
743 static vm_object_t
744 swap_pager_alloc_init(objtype_t otype, void *handle, struct ucred *cred,
745     vm_ooffset_t size, vm_ooffset_t offset)
746 {
747 	vm_object_t object;
748 
749 	/*
750 	 * The un_pager.swp.swp_blks trie is initialized by
751 	 * vm_object_allocate() to ensure the correct order of
752 	 * visibility to other threads.
753 	 */
754 	object = vm_object_allocate(otype, OFF_TO_IDX(offset +
755 	    PAGE_MASK + size));
756 
757 	if (!swap_pager_init_object(object, handle, cred, size, offset)) {
758 		vm_object_deallocate(object);
759 		return (NULL);
760 	}
761 	return (object);
762 }
763 
764 /*
765  * SWAP_PAGER_ALLOC() -	allocate a new OBJT_SWAP VM object and instantiate
766  *			its metadata structures.
767  *
768  *	This routine is called from the mmap and fork code to create a new
769  *	OBJT_SWAP object.
770  *
771  *	This routine must ensure that no live duplicate is created for
772  *	the named object request, which is protected against by
773  *	holding the sw_alloc_sx lock in case handle != NULL.
774  */
775 static vm_object_t
776 swap_pager_alloc(void *handle, vm_ooffset_t size, vm_prot_t prot,
777     vm_ooffset_t offset, struct ucred *cred)
778 {
779 	vm_object_t object;
780 
781 	if (handle != NULL) {
782 		/*
783 		 * Reference existing named region or allocate new one.  There
784 		 * should not be a race here against swp_pager_meta_build()
785 		 * as called from vm_page_remove() in regards to the lookup
786 		 * of the handle.
787 		 */
788 		sx_xlock(&sw_alloc_sx);
789 		object = vm_pager_object_lookup(NOBJLIST(handle), handle);
790 		if (object == NULL) {
791 			object = swap_pager_alloc_init(OBJT_SWAP, handle, cred,
792 			    size, offset);
793 			if (object != NULL) {
794 				TAILQ_INSERT_TAIL(NOBJLIST(object->handle),
795 				    object, pager_object_list);
796 			}
797 		}
798 		sx_xunlock(&sw_alloc_sx);
799 	} else {
800 		object = swap_pager_alloc_init(OBJT_SWAP, handle, cred,
801 		    size, offset);
802 	}
803 	return (object);
804 }
805 
806 /*
807  * SWAP_PAGER_DEALLOC() -	remove swap metadata from object
808  *
809  *	The swap backing for the object is destroyed.  The code is
810  *	designed such that we can reinstantiate it later, but this
811  *	routine is typically called only when the entire object is
812  *	about to be destroyed.
813  *
814  *	The object must be locked.
815  */
816 static void
817 swap_pager_dealloc(vm_object_t object)
818 {
819 
820 	VM_OBJECT_ASSERT_WLOCKED(object);
821 	KASSERT((object->flags & OBJ_DEAD) != 0, ("dealloc of reachable obj"));
822 
823 	/*
824 	 * Remove from list right away so lookups will fail if we block for
825 	 * pageout completion.
826 	 */
827 	if ((object->flags & OBJ_ANON) == 0 && object->handle != NULL) {
828 		VM_OBJECT_WUNLOCK(object);
829 		sx_xlock(&sw_alloc_sx);
830 		TAILQ_REMOVE(NOBJLIST(object->handle), object,
831 		    pager_object_list);
832 		sx_xunlock(&sw_alloc_sx);
833 		VM_OBJECT_WLOCK(object);
834 	}
835 
836 	vm_object_pip_wait(object, "swpdea");
837 
838 	/*
839 	 * Free all remaining metadata.  We only bother to free it from
840 	 * the swap meta data.  We do not attempt to free swapblk's still
841 	 * associated with vm_page_t's for this object.  We do not care
842 	 * if paging is still in progress on some objects.
843 	 */
844 	swp_pager_meta_free_all(object);
845 	object->handle = NULL;
846 	object->type = OBJT_DEAD;
847 
848 	/*
849 	 * Release the allocation charge.
850 	 */
851 	if (object->cred != NULL) {
852 		swap_release_by_cred(object->charge, object->cred);
853 		object->charge = 0;
854 		crfree(object->cred);
855 		object->cred = NULL;
856 	}
857 
858 	/*
859 	 * Hide the object from swap_pager_swapoff().
860 	 */
861 	vm_object_clear_flag(object, OBJ_SWAP);
862 }
863 
864 /************************************************************************
865  *			SWAP PAGER BITMAP ROUTINES			*
866  ************************************************************************/
867 
868 /*
869  * SWP_PAGER_GETSWAPSPACE() -	allocate raw swap space
870  *
871  *	Allocate swap for up to the requested number of pages.  The
872  *	starting swap block number (a page index) is returned or
873  *	SWAPBLK_NONE if the allocation failed.
874  *
875  *	Also has the side effect of advising that somebody made a mistake
876  *	when they configured swap and didn't configure enough.
877  *
878  *	This routine may not sleep.
879  *
880  *	We allocate in round-robin fashion from the configured devices.
881  */
882 static daddr_t
883 swp_pager_getswapspace(int *io_npages)
884 {
885 	daddr_t blk;
886 	struct swdevt *sp;
887 	int mpages, npages;
888 
889 	KASSERT(*io_npages >= 1,
890 	    ("%s: npages not positive", __func__));
891 	blk = SWAPBLK_NONE;
892 	mpages = *io_npages;
893 	npages = imin(BLIST_MAX_ALLOC, mpages);
894 	mtx_lock(&sw_dev_mtx);
895 	sp = swdevhd;
896 	while (!TAILQ_EMPTY(&swtailq)) {
897 		if (sp == NULL)
898 			sp = TAILQ_FIRST(&swtailq);
899 		if ((sp->sw_flags & SW_CLOSING) == 0)
900 			blk = blist_alloc(sp->sw_blist, &npages, mpages);
901 		if (blk != SWAPBLK_NONE)
902 			break;
903 		sp = TAILQ_NEXT(sp, sw_list);
904 		if (swdevhd == sp) {
905 			if (npages == 1)
906 				break;
907 			mpages = npages - 1;
908 			npages >>= 1;
909 		}
910 	}
911 	if (blk != SWAPBLK_NONE) {
912 		*io_npages = npages;
913 		blk += sp->sw_first;
914 		sp->sw_used += npages;
915 		swap_pager_avail -= npages;
916 		swp_sizecheck();
917 		swdevhd = TAILQ_NEXT(sp, sw_list);
918 	} else {
919 		if (swap_pager_full != 2) {
920 			printf("swp_pager_getswapspace(%d): failed\n",
921 			    *io_npages);
922 			swap_pager_full = 2;
923 			swap_pager_almost_full = 1;
924 		}
925 		swdevhd = NULL;
926 	}
927 	mtx_unlock(&sw_dev_mtx);
928 	return (blk);
929 }
930 
931 static bool
932 swp_pager_isondev(daddr_t blk, struct swdevt *sp)
933 {
934 
935 	return (blk >= sp->sw_first && blk < sp->sw_end);
936 }
937 
938 static void
939 swp_pager_strategy(struct buf *bp)
940 {
941 	struct swdevt *sp;
942 
943 	mtx_lock(&sw_dev_mtx);
944 	TAILQ_FOREACH(sp, &swtailq, sw_list) {
945 		if (swp_pager_isondev(bp->b_blkno, sp)) {
946 			mtx_unlock(&sw_dev_mtx);
947 			if ((sp->sw_flags & SW_UNMAPPED) != 0 &&
948 			    unmapped_buf_allowed) {
949 				bp->b_data = unmapped_buf;
950 				bp->b_offset = 0;
951 			} else {
952 				pmap_qenter((vm_offset_t)bp->b_data,
953 				    &bp->b_pages[0], bp->b_bcount / PAGE_SIZE);
954 			}
955 			sp->sw_strategy(bp, sp);
956 			return;
957 		}
958 	}
959 	panic("Swapdev not found");
960 }
961 
962 /*
963  * SWP_PAGER_FREESWAPSPACE() -	free raw swap space
964  *
965  *	This routine returns the specified swap blocks back to the bitmap.
966  *
967  *	This routine may not sleep.
968  */
969 static void
970 swp_pager_freeswapspace(const struct page_range *range)
971 {
972 	daddr_t blk, npages;
973 	struct swdevt *sp;
974 
975 	blk = range->start;
976 	npages = range->num;
977 	if (npages == 0)
978 		return;
979 	mtx_lock(&sw_dev_mtx);
980 	TAILQ_FOREACH(sp, &swtailq, sw_list) {
981 		if (swp_pager_isondev(blk, sp)) {
982 			sp->sw_used -= npages;
983 			/*
984 			 * If we are attempting to stop swapping on
985 			 * this device, we don't want to mark any
986 			 * blocks free lest they be reused.
987 			 */
988 			if ((sp->sw_flags & SW_CLOSING) == 0) {
989 				blist_free(sp->sw_blist, blk - sp->sw_first,
990 				    npages);
991 				swap_pager_avail += npages;
992 				swp_sizecheck();
993 			}
994 			mtx_unlock(&sw_dev_mtx);
995 			return;
996 		}
997 	}
998 	panic("Swapdev not found");
999 }
1000 
1001 /*
1002  * SYSCTL_SWAP_FRAGMENTATION() -	produce raw swap space stats
1003  */
1004 static int
1005 sysctl_swap_fragmentation(SYSCTL_HANDLER_ARGS)
1006 {
1007 	struct sbuf sbuf;
1008 	struct swdevt *sp;
1009 	const char *devname;
1010 	int error;
1011 
1012 	error = sysctl_wire_old_buffer(req, 0);
1013 	if (error != 0)
1014 		return (error);
1015 	sbuf_new_for_sysctl(&sbuf, NULL, 128, req);
1016 	mtx_lock(&sw_dev_mtx);
1017 	TAILQ_FOREACH(sp, &swtailq, sw_list) {
1018 		if (vn_isdisk(sp->sw_vp))
1019 			devname = devtoname(sp->sw_vp->v_rdev);
1020 		else
1021 			devname = "[file]";
1022 		sbuf_printf(&sbuf, "\nFree space on device %s:\n", devname);
1023 		blist_stats(sp->sw_blist, &sbuf);
1024 	}
1025 	mtx_unlock(&sw_dev_mtx);
1026 	error = sbuf_finish(&sbuf);
1027 	sbuf_delete(&sbuf);
1028 	return (error);
1029 }
1030 
1031 /*
1032  * SWAP_PAGER_FREESPACE() -	frees swap blocks associated with a page
1033  *				range within an object.
1034  *
1035  *	This routine removes swapblk assignments from swap metadata.
1036  *
1037  *	The external callers of this routine typically have already destroyed
1038  *	or renamed vm_page_t's associated with this range in the object so
1039  *	we should be ok.
1040  *
1041  *	The object must be locked.
1042  */
1043 void
1044 swap_pager_freespace(vm_object_t object, vm_pindex_t start, vm_size_t size,
1045     vm_size_t *freed)
1046 {
1047 	MPASS((object->flags & OBJ_SWAP) != 0);
1048 
1049 	swp_pager_meta_free(object, start, size, freed);
1050 }
1051 
1052 static void
1053 swap_pager_freespace_pgo(vm_object_t object, vm_pindex_t start, vm_size_t size)
1054 {
1055 	MPASS((object->flags & OBJ_SWAP) != 0);
1056 
1057 	swp_pager_meta_free(object, start, size, NULL);
1058 }
1059 
1060 /*
1061  * SWAP_PAGER_RESERVE() - reserve swap blocks in object
1062  *
1063  *	Assigns swap blocks to the specified range within the object.  The
1064  *	swap blocks are not zeroed.  Any previous swap assignment is destroyed.
1065  *
1066  *	Returns 0 on success, -1 on failure.
1067  */
1068 int
1069 swap_pager_reserve(vm_object_t object, vm_pindex_t start, vm_pindex_t size)
1070 {
1071 	struct page_range range;
1072 	daddr_t addr, blk;
1073 	vm_pindex_t i, j;
1074 	int n;
1075 
1076 	swp_pager_init_freerange(&range);
1077 	VM_OBJECT_WLOCK(object);
1078 	for (i = 0; i < size; i += n) {
1079 		n = MIN(size - i, INT_MAX);
1080 		blk = swp_pager_getswapspace(&n);
1081 		if (blk == SWAPBLK_NONE) {
1082 			swp_pager_meta_free(object, start, i, NULL);
1083 			VM_OBJECT_WUNLOCK(object);
1084 			return (-1);
1085 		}
1086 		for (j = 0; j < n; ++j) {
1087 			addr = swp_pager_meta_build(object,
1088 			    start + i + j, blk + j, false);
1089 			if (addr != SWAPBLK_NONE)
1090 				swp_pager_update_freerange(&range, addr);
1091 		}
1092 	}
1093 	swp_pager_freeswapspace(&range);
1094 	VM_OBJECT_WUNLOCK(object);
1095 	return (0);
1096 }
1097 
1098 /*
1099  * SWAP_PAGER_COPY() -  copy blocks from source pager to destination pager
1100  *			and destroy the source.
1101  *
1102  *	Copy any valid swapblks from the source to the destination.  In
1103  *	cases where both the source and destination have a valid swapblk,
1104  *	we keep the destination's.
1105  *
1106  *	This routine is allowed to sleep.  It may sleep allocating metadata
1107  *	indirectly through swp_pager_meta_build().
1108  *
1109  *	The source object contains no vm_page_t's (which is just as well)
1110  *
1111  *	The source and destination objects must be locked.
1112  *	Both object locks may temporarily be released.
1113  */
1114 void
1115 swap_pager_copy(vm_object_t srcobject, vm_object_t dstobject,
1116     vm_pindex_t offset, int destroysource)
1117 {
1118 	VM_OBJECT_ASSERT_WLOCKED(srcobject);
1119 	VM_OBJECT_ASSERT_WLOCKED(dstobject);
1120 
1121 	/*
1122 	 * If destroysource is set, we remove the source object from the
1123 	 * swap_pager internal queue now.
1124 	 */
1125 	if (destroysource && (srcobject->flags & OBJ_ANON) == 0 &&
1126 	    srcobject->handle != NULL) {
1127 		VM_OBJECT_WUNLOCK(srcobject);
1128 		VM_OBJECT_WUNLOCK(dstobject);
1129 		sx_xlock(&sw_alloc_sx);
1130 		TAILQ_REMOVE(NOBJLIST(srcobject->handle), srcobject,
1131 		    pager_object_list);
1132 		sx_xunlock(&sw_alloc_sx);
1133 		VM_OBJECT_WLOCK(dstobject);
1134 		VM_OBJECT_WLOCK(srcobject);
1135 	}
1136 
1137 	/*
1138 	 * Transfer source to destination.
1139 	 */
1140 	swp_pager_meta_transfer(srcobject, dstobject, offset, dstobject->size);
1141 
1142 	/*
1143 	 * Free left over swap blocks in source.
1144 	 */
1145 	if (destroysource)
1146 		swp_pager_meta_free_all(srcobject);
1147 }
1148 
1149 /*
1150  * SWAP_PAGER_HASPAGE() -	determine if we have good backing store for
1151  *				the requested page.
1152  *
1153  *	We determine whether good backing store exists for the requested
1154  *	page and return TRUE if it does, FALSE if it doesn't.
1155  *
1156  *	If TRUE, we also try to determine how much valid, contiguous backing
1157  *	store exists before and after the requested page.
1158  */
1159 static boolean_t
1160 swap_pager_haspage(vm_object_t object, vm_pindex_t pindex, int *before,
1161     int *after)
1162 {
1163 	daddr_t blk, blk0;
1164 	int i;
1165 
1166 	VM_OBJECT_ASSERT_LOCKED(object);
1167 	KASSERT((object->flags & OBJ_SWAP) != 0,
1168 	    ("%s: object not swappable", __func__));
1169 
1170 	/*
1171 	 * do we have good backing store at the requested index ?
1172 	 */
1173 	blk0 = swp_pager_meta_lookup(object, pindex);
1174 	if (blk0 == SWAPBLK_NONE) {
1175 		if (before)
1176 			*before = 0;
1177 		if (after)
1178 			*after = 0;
1179 		return (FALSE);
1180 	}
1181 
1182 	/*
1183 	 * find backwards-looking contiguous good backing store
1184 	 */
1185 	if (before != NULL) {
1186 		for (i = 1; i < SWB_NPAGES; i++) {
1187 			if (i > pindex)
1188 				break;
1189 			blk = swp_pager_meta_lookup(object, pindex - i);
1190 			if (blk != blk0 - i)
1191 				break;
1192 		}
1193 		*before = i - 1;
1194 	}
1195 
1196 	/*
1197 	 * find forward-looking contiguous good backing store
1198 	 */
1199 	if (after != NULL) {
1200 		for (i = 1; i < SWB_NPAGES; i++) {
1201 			blk = swp_pager_meta_lookup(object, pindex + i);
1202 			if (blk != blk0 + i)
1203 				break;
1204 		}
1205 		*after = i - 1;
1206 	}
1207 	return (TRUE);
1208 }
1209 
1210 static void
1211 swap_pager_unswapped_acct(vm_page_t m)
1212 {
1213 	KASSERT((m->object->flags & OBJ_SWAP) != 0,
1214 	    ("Free object not swappable"));
1215 	if ((m->a.flags & PGA_SWAP_FREE) != 0)
1216 		counter_u64_add(swap_free_completed, 1);
1217 	vm_page_aflag_clear(m, PGA_SWAP_FREE | PGA_SWAP_SPACE);
1218 
1219 	/*
1220 	 * The meta data only exists if the object is OBJT_SWAP
1221 	 * and even then might not be allocated yet.
1222 	 */
1223 }
1224 
1225 /*
1226  * SWAP_PAGER_PAGE_UNSWAPPED() - remove swap backing store related to page
1227  *
1228  *	This removes any associated swap backing store, whether valid or
1229  *	not, from the page.
1230  *
1231  *	This routine is typically called when a page is made dirty, at
1232  *	which point any associated swap can be freed.  MADV_FREE also
1233  *	calls us in a special-case situation
1234  *
1235  *	NOTE!!!  If the page is clean and the swap was valid, the caller
1236  *	should make the page dirty before calling this routine.  This routine
1237  *	does NOT change the m->dirty status of the page.  Also: MADV_FREE
1238  *	depends on it.
1239  *
1240  *	This routine may not sleep.
1241  *
1242  *	The object containing the page may be locked.
1243  */
1244 static void
1245 swap_pager_unswapped(vm_page_t m)
1246 {
1247 	struct page_range range;
1248 	struct swblk *sb;
1249 	vm_object_t obj;
1250 
1251 	/*
1252 	 * Handle enqueing deferred frees first.  If we do not have the
1253 	 * object lock we wait for the page daemon to clear the space.
1254 	 */
1255 	obj = m->object;
1256 	if (!VM_OBJECT_WOWNED(obj)) {
1257 		VM_PAGE_OBJECT_BUSY_ASSERT(m);
1258 		/*
1259 		 * The caller is responsible for synchronization but we
1260 		 * will harmlessly handle races.  This is typically provided
1261 		 * by only calling unswapped() when a page transitions from
1262 		 * clean to dirty.
1263 		 */
1264 		if ((m->a.flags & (PGA_SWAP_SPACE | PGA_SWAP_FREE)) ==
1265 		    PGA_SWAP_SPACE) {
1266 			vm_page_aflag_set(m, PGA_SWAP_FREE);
1267 			counter_u64_add(swap_free_deferred, 1);
1268 		}
1269 		return;
1270 	}
1271 	swap_pager_unswapped_acct(m);
1272 
1273 	sb = swblk_lookup(m->object, m->pindex);
1274 	if (sb == NULL)
1275 		return;
1276 	range.start = sb->d[m->pindex % SWAP_META_PAGES];
1277 	if (range.start == SWAPBLK_NONE)
1278 		return;
1279 	range.num = 1;
1280 	swp_pager_freeswapspace(&range);
1281 	sb->d[m->pindex % SWAP_META_PAGES] = SWAPBLK_NONE;
1282 	swp_pager_free_empty_swblk(m->object, sb);
1283 }
1284 
1285 /*
1286  * swap_pager_getpages() - bring pages in from swap
1287  *
1288  *	Attempt to page in the pages in array "ma" of length "count".  The
1289  *	caller may optionally specify that additional pages preceding and
1290  *	succeeding the specified range be paged in.  The number of such pages
1291  *	is returned in the "rbehind" and "rahead" parameters, and they will
1292  *	be in the inactive queue upon return.
1293  *
1294  *	The pages in "ma" must be busied and will remain busied upon return.
1295  */
1296 static int
1297 swap_pager_getpages_locked(vm_object_t object, vm_page_t *ma, int count,
1298     int *rbehind, int *rahead)
1299 {
1300 	struct buf *bp;
1301 	vm_page_t bm, mpred, msucc, p;
1302 	vm_pindex_t pindex;
1303 	daddr_t blk;
1304 	int i, maxahead, maxbehind, reqcount;
1305 
1306 	VM_OBJECT_ASSERT_WLOCKED(object);
1307 	reqcount = count;
1308 
1309 	KASSERT((object->flags & OBJ_SWAP) != 0,
1310 	    ("%s: object not swappable", __func__));
1311 	if (!swap_pager_haspage(object, ma[0]->pindex, &maxbehind, &maxahead)) {
1312 		VM_OBJECT_WUNLOCK(object);
1313 		return (VM_PAGER_FAIL);
1314 	}
1315 
1316 	KASSERT(reqcount - 1 <= maxahead,
1317 	    ("page count %d extends beyond swap block", reqcount));
1318 
1319 	/*
1320 	 * Do not transfer any pages other than those that are xbusied
1321 	 * when running during a split or collapse operation.  This
1322 	 * prevents clustering from re-creating pages which are being
1323 	 * moved into another object.
1324 	 */
1325 	if ((object->flags & (OBJ_SPLIT | OBJ_DEAD)) != 0) {
1326 		maxahead = reqcount - 1;
1327 		maxbehind = 0;
1328 	}
1329 
1330 	/*
1331 	 * Clip the readahead and readbehind ranges to exclude resident pages.
1332 	 */
1333 	if (rahead != NULL) {
1334 		*rahead = imin(*rahead, maxahead - (reqcount - 1));
1335 		pindex = ma[reqcount - 1]->pindex;
1336 		msucc = TAILQ_NEXT(ma[reqcount - 1], listq);
1337 		if (msucc != NULL && msucc->pindex - pindex - 1 < *rahead)
1338 			*rahead = msucc->pindex - pindex - 1;
1339 	}
1340 	if (rbehind != NULL) {
1341 		*rbehind = imin(*rbehind, maxbehind);
1342 		pindex = ma[0]->pindex;
1343 		mpred = TAILQ_PREV(ma[0], pglist, listq);
1344 		if (mpred != NULL && pindex - mpred->pindex - 1 < *rbehind)
1345 			*rbehind = pindex - mpred->pindex - 1;
1346 	}
1347 
1348 	bm = ma[0];
1349 	for (i = 0; i < count; i++)
1350 		ma[i]->oflags |= VPO_SWAPINPROG;
1351 
1352 	/*
1353 	 * Allocate readahead and readbehind pages.
1354 	 */
1355 	if (rbehind != NULL) {
1356 		for (i = 1; i <= *rbehind; i++) {
1357 			p = vm_page_alloc(object, ma[0]->pindex - i,
1358 			    VM_ALLOC_NORMAL);
1359 			if (p == NULL)
1360 				break;
1361 			p->oflags |= VPO_SWAPINPROG;
1362 			bm = p;
1363 		}
1364 		*rbehind = i - 1;
1365 	}
1366 	if (rahead != NULL) {
1367 		for (i = 0; i < *rahead; i++) {
1368 			p = vm_page_alloc(object,
1369 			    ma[reqcount - 1]->pindex + i + 1, VM_ALLOC_NORMAL);
1370 			if (p == NULL)
1371 				break;
1372 			p->oflags |= VPO_SWAPINPROG;
1373 		}
1374 		*rahead = i;
1375 	}
1376 	if (rbehind != NULL)
1377 		count += *rbehind;
1378 	if (rahead != NULL)
1379 		count += *rahead;
1380 
1381 	vm_object_pip_add(object, count);
1382 
1383 	pindex = bm->pindex;
1384 	blk = swp_pager_meta_lookup(object, pindex);
1385 	KASSERT(blk != SWAPBLK_NONE,
1386 	    ("no swap blocking containing %p(%jx)", object, (uintmax_t)pindex));
1387 
1388 	VM_OBJECT_WUNLOCK(object);
1389 	bp = uma_zalloc(swrbuf_zone, M_WAITOK);
1390 	MPASS((bp->b_flags & B_MAXPHYS) != 0);
1391 	/* Pages cannot leave the object while busy. */
1392 	for (i = 0, p = bm; i < count; i++, p = TAILQ_NEXT(p, listq)) {
1393 		MPASS(p->pindex == bm->pindex + i);
1394 		bp->b_pages[i] = p;
1395 	}
1396 
1397 	bp->b_flags |= B_PAGING;
1398 	bp->b_iocmd = BIO_READ;
1399 	bp->b_iodone = swp_pager_async_iodone;
1400 	bp->b_rcred = crhold(thread0.td_ucred);
1401 	bp->b_wcred = crhold(thread0.td_ucred);
1402 	bp->b_blkno = blk;
1403 	bp->b_bcount = PAGE_SIZE * count;
1404 	bp->b_bufsize = PAGE_SIZE * count;
1405 	bp->b_npages = count;
1406 	bp->b_pgbefore = rbehind != NULL ? *rbehind : 0;
1407 	bp->b_pgafter = rahead != NULL ? *rahead : 0;
1408 
1409 	VM_CNT_INC(v_swapin);
1410 	VM_CNT_ADD(v_swappgsin, count);
1411 
1412 	/*
1413 	 * perform the I/O.  NOTE!!!  bp cannot be considered valid after
1414 	 * this point because we automatically release it on completion.
1415 	 * Instead, we look at the one page we are interested in which we
1416 	 * still hold a lock on even through the I/O completion.
1417 	 *
1418 	 * The other pages in our ma[] array are also released on completion,
1419 	 * so we cannot assume they are valid anymore either.
1420 	 *
1421 	 * NOTE: b_blkno is destroyed by the call to swapdev_strategy
1422 	 */
1423 	BUF_KERNPROC(bp);
1424 	swp_pager_strategy(bp);
1425 
1426 	/*
1427 	 * Wait for the pages we want to complete.  VPO_SWAPINPROG is always
1428 	 * cleared on completion.  If an I/O error occurs, SWAPBLK_NONE
1429 	 * is set in the metadata for each page in the request.
1430 	 */
1431 	VM_OBJECT_WLOCK(object);
1432 	/* This could be implemented more efficiently with aflags */
1433 	while ((ma[0]->oflags & VPO_SWAPINPROG) != 0) {
1434 		ma[0]->oflags |= VPO_SWAPSLEEP;
1435 		VM_CNT_INC(v_intrans);
1436 		if (VM_OBJECT_SLEEP(object, &object->handle, PSWP,
1437 		    "swread", hz * 20)) {
1438 			printf(
1439 "swap_pager: indefinite wait buffer: bufobj: %p, blkno: %jd, size: %ld\n",
1440 			    bp->b_bufobj, (intmax_t)bp->b_blkno, bp->b_bcount);
1441 		}
1442 	}
1443 	VM_OBJECT_WUNLOCK(object);
1444 
1445 	/*
1446 	 * If we had an unrecoverable read error pages will not be valid.
1447 	 */
1448 	for (i = 0; i < reqcount; i++)
1449 		if (ma[i]->valid != VM_PAGE_BITS_ALL)
1450 			return (VM_PAGER_ERROR);
1451 
1452 	return (VM_PAGER_OK);
1453 
1454 	/*
1455 	 * A final note: in a low swap situation, we cannot deallocate swap
1456 	 * and mark a page dirty here because the caller is likely to mark
1457 	 * the page clean when we return, causing the page to possibly revert
1458 	 * to all-zero's later.
1459 	 */
1460 }
1461 
1462 static int
1463 swap_pager_getpages(vm_object_t object, vm_page_t *ma, int count,
1464     int *rbehind, int *rahead)
1465 {
1466 
1467 	VM_OBJECT_WLOCK(object);
1468 	return (swap_pager_getpages_locked(object, ma, count, rbehind, rahead));
1469 }
1470 
1471 /*
1472  * 	swap_pager_getpages_async():
1473  *
1474  *	Right now this is emulation of asynchronous operation on top of
1475  *	swap_pager_getpages().
1476  */
1477 static int
1478 swap_pager_getpages_async(vm_object_t object, vm_page_t *ma, int count,
1479     int *rbehind, int *rahead, pgo_getpages_iodone_t iodone, void *arg)
1480 {
1481 	int r, error;
1482 
1483 	r = swap_pager_getpages(object, ma, count, rbehind, rahead);
1484 	switch (r) {
1485 	case VM_PAGER_OK:
1486 		error = 0;
1487 		break;
1488 	case VM_PAGER_ERROR:
1489 		error = EIO;
1490 		break;
1491 	case VM_PAGER_FAIL:
1492 		error = EINVAL;
1493 		break;
1494 	default:
1495 		panic("unhandled swap_pager_getpages() error %d", r);
1496 	}
1497 	(iodone)(arg, ma, count, error);
1498 
1499 	return (r);
1500 }
1501 
1502 /*
1503  *	swap_pager_putpages:
1504  *
1505  *	Assign swap (if necessary) and initiate I/O on the specified pages.
1506  *
1507  *	In a low memory situation we may block in VOP_STRATEGY(), but the new
1508  *	vm_page reservation system coupled with properly written VFS devices
1509  *	should ensure that no low-memory deadlock occurs.  This is an area
1510  *	which needs work.
1511  *
1512  *	The parent has N vm_object_pip_add() references prior to
1513  *	calling us and will remove references for rtvals[] that are
1514  *	not set to VM_PAGER_PEND.  We need to remove the rest on I/O
1515  *	completion.
1516  *
1517  *	The parent has soft-busy'd the pages it passes us and will unbusy
1518  *	those whose rtvals[] entry is not set to VM_PAGER_PEND on return.
1519  *	We need to unbusy the rest on I/O completion.
1520  */
1521 static void
1522 swap_pager_putpages(vm_object_t object, vm_page_t *ma, int count,
1523     int flags, int *rtvals)
1524 {
1525 	struct page_range range;
1526 	struct buf *bp;
1527 	daddr_t addr, blk;
1528 	vm_page_t mreq;
1529 	int i, j, n;
1530 	bool async;
1531 
1532 	KASSERT(count == 0 || ma[0]->object == object,
1533 	    ("%s: object mismatch %p/%p",
1534 	    __func__, object, ma[0]->object));
1535 
1536 	VM_OBJECT_WUNLOCK(object);
1537 	async = curproc == pageproc && (flags & VM_PAGER_PUT_SYNC) == 0;
1538 	swp_pager_init_freerange(&range);
1539 
1540 	/*
1541 	 * Assign swap blocks and issue I/O.  We reallocate swap on the fly.
1542 	 * The page is left dirty until the pageout operation completes
1543 	 * successfully.
1544 	 */
1545 	for (i = 0; i < count; i += n) {
1546 		/* Maximum I/O size is limited by maximum swap block size. */
1547 		n = min(count - i, nsw_cluster_max);
1548 
1549 		if (async) {
1550 			mtx_lock(&swbuf_mtx);
1551 			while (nsw_wcount_async == 0)
1552 				msleep(&nsw_wcount_async, &swbuf_mtx, PVM,
1553 				    "swbufa", 0);
1554 			nsw_wcount_async--;
1555 			mtx_unlock(&swbuf_mtx);
1556 		}
1557 
1558 		/* Get a block of swap of size up to size n. */
1559 		blk = swp_pager_getswapspace(&n);
1560 		if (blk == SWAPBLK_NONE) {
1561 			mtx_lock(&swbuf_mtx);
1562 			if (++nsw_wcount_async == 1)
1563 				wakeup(&nsw_wcount_async);
1564 			mtx_unlock(&swbuf_mtx);
1565 			for (j = 0; j < n; ++j)
1566 				rtvals[i + j] = VM_PAGER_FAIL;
1567 			continue;
1568 		}
1569 		VM_OBJECT_WLOCK(object);
1570 		for (j = 0; j < n; ++j) {
1571 			mreq = ma[i + j];
1572 			vm_page_aflag_clear(mreq, PGA_SWAP_FREE);
1573 			addr = swp_pager_meta_build(mreq->object, mreq->pindex,
1574 			    blk + j, false);
1575 			if (addr != SWAPBLK_NONE)
1576 				swp_pager_update_freerange(&range, addr);
1577 			MPASS(mreq->dirty == VM_PAGE_BITS_ALL);
1578 			mreq->oflags |= VPO_SWAPINPROG;
1579 		}
1580 		VM_OBJECT_WUNLOCK(object);
1581 
1582 		bp = uma_zalloc(swwbuf_zone, M_WAITOK);
1583 		MPASS((bp->b_flags & B_MAXPHYS) != 0);
1584 		if (async)
1585 			bp->b_flags |= B_ASYNC;
1586 		bp->b_flags |= B_PAGING;
1587 		bp->b_iocmd = BIO_WRITE;
1588 
1589 		bp->b_rcred = crhold(thread0.td_ucred);
1590 		bp->b_wcred = crhold(thread0.td_ucred);
1591 		bp->b_bcount = PAGE_SIZE * n;
1592 		bp->b_bufsize = PAGE_SIZE * n;
1593 		bp->b_blkno = blk;
1594 		for (j = 0; j < n; j++)
1595 			bp->b_pages[j] = ma[i + j];
1596 		bp->b_npages = n;
1597 
1598 		/*
1599 		 * Must set dirty range for NFS to work.
1600 		 */
1601 		bp->b_dirtyoff = 0;
1602 		bp->b_dirtyend = bp->b_bcount;
1603 
1604 		VM_CNT_INC(v_swapout);
1605 		VM_CNT_ADD(v_swappgsout, bp->b_npages);
1606 
1607 		/*
1608 		 * We unconditionally set rtvals[] to VM_PAGER_PEND so that we
1609 		 * can call the async completion routine at the end of a
1610 		 * synchronous I/O operation.  Otherwise, our caller would
1611 		 * perform duplicate unbusy and wakeup operations on the page
1612 		 * and object, respectively.
1613 		 */
1614 		for (j = 0; j < n; j++)
1615 			rtvals[i + j] = VM_PAGER_PEND;
1616 
1617 		/*
1618 		 * asynchronous
1619 		 *
1620 		 * NOTE: b_blkno is destroyed by the call to swapdev_strategy.
1621 		 */
1622 		if (async) {
1623 			bp->b_iodone = swp_pager_async_iodone;
1624 			BUF_KERNPROC(bp);
1625 			swp_pager_strategy(bp);
1626 			continue;
1627 		}
1628 
1629 		/*
1630 		 * synchronous
1631 		 *
1632 		 * NOTE: b_blkno is destroyed by the call to swapdev_strategy.
1633 		 */
1634 		bp->b_iodone = bdone;
1635 		swp_pager_strategy(bp);
1636 
1637 		/*
1638 		 * Wait for the sync I/O to complete.
1639 		 */
1640 		bwait(bp, PVM, "swwrt");
1641 
1642 		/*
1643 		 * Now that we are through with the bp, we can call the
1644 		 * normal async completion, which frees everything up.
1645 		 */
1646 		swp_pager_async_iodone(bp);
1647 	}
1648 	swp_pager_freeswapspace(&range);
1649 	VM_OBJECT_WLOCK(object);
1650 }
1651 
1652 /*
1653  *	swp_pager_async_iodone:
1654  *
1655  *	Completion routine for asynchronous reads and writes from/to swap.
1656  *	Also called manually by synchronous code to finish up a bp.
1657  *
1658  *	This routine may not sleep.
1659  */
1660 static void
1661 swp_pager_async_iodone(struct buf *bp)
1662 {
1663 	int i;
1664 	vm_object_t object = NULL;
1665 
1666 	/*
1667 	 * Report error - unless we ran out of memory, in which case
1668 	 * we've already logged it in swapgeom_strategy().
1669 	 */
1670 	if (bp->b_ioflags & BIO_ERROR && bp->b_error != ENOMEM) {
1671 		printf(
1672 		    "swap_pager: I/O error - %s failed; blkno %ld,"
1673 			"size %ld, error %d\n",
1674 		    ((bp->b_iocmd == BIO_READ) ? "pagein" : "pageout"),
1675 		    (long)bp->b_blkno,
1676 		    (long)bp->b_bcount,
1677 		    bp->b_error
1678 		);
1679 	}
1680 
1681 	/*
1682 	 * remove the mapping for kernel virtual
1683 	 */
1684 	if (buf_mapped(bp))
1685 		pmap_qremove((vm_offset_t)bp->b_data, bp->b_npages);
1686 	else
1687 		bp->b_data = bp->b_kvabase;
1688 
1689 	if (bp->b_npages) {
1690 		object = bp->b_pages[0]->object;
1691 		VM_OBJECT_WLOCK(object);
1692 	}
1693 
1694 	/*
1695 	 * cleanup pages.  If an error occurs writing to swap, we are in
1696 	 * very serious trouble.  If it happens to be a disk error, though,
1697 	 * we may be able to recover by reassigning the swap later on.  So
1698 	 * in this case we remove the m->swapblk assignment for the page
1699 	 * but do not free it in the rlist.  The errornous block(s) are thus
1700 	 * never reallocated as swap.  Redirty the page and continue.
1701 	 */
1702 	for (i = 0; i < bp->b_npages; ++i) {
1703 		vm_page_t m = bp->b_pages[i];
1704 
1705 		m->oflags &= ~VPO_SWAPINPROG;
1706 		if (m->oflags & VPO_SWAPSLEEP) {
1707 			m->oflags &= ~VPO_SWAPSLEEP;
1708 			wakeup(&object->handle);
1709 		}
1710 
1711 		/* We always have space after I/O, successful or not. */
1712 		vm_page_aflag_set(m, PGA_SWAP_SPACE);
1713 
1714 		if (bp->b_ioflags & BIO_ERROR) {
1715 			/*
1716 			 * If an error occurs I'd love to throw the swapblk
1717 			 * away without freeing it back to swapspace, so it
1718 			 * can never be used again.  But I can't from an
1719 			 * interrupt.
1720 			 */
1721 			if (bp->b_iocmd == BIO_READ) {
1722 				/*
1723 				 * NOTE: for reads, m->dirty will probably
1724 				 * be overridden by the original caller of
1725 				 * getpages so don't play cute tricks here.
1726 				 */
1727 				vm_page_invalid(m);
1728 				if (i < bp->b_pgbefore ||
1729 				    i >= bp->b_npages - bp->b_pgafter)
1730 					vm_page_free_invalid(m);
1731 			} else {
1732 				/*
1733 				 * If a write error occurs, reactivate page
1734 				 * so it doesn't clog the inactive list,
1735 				 * then finish the I/O.
1736 				 */
1737 				MPASS(m->dirty == VM_PAGE_BITS_ALL);
1738 
1739 				/* PQ_UNSWAPPABLE? */
1740 				vm_page_activate(m);
1741 				vm_page_sunbusy(m);
1742 			}
1743 		} else if (bp->b_iocmd == BIO_READ) {
1744 			/*
1745 			 * NOTE: for reads, m->dirty will probably be
1746 			 * overridden by the original caller of getpages so
1747 			 * we cannot set them in order to free the underlying
1748 			 * swap in a low-swap situation.  I don't think we'd
1749 			 * want to do that anyway, but it was an optimization
1750 			 * that existed in the old swapper for a time before
1751 			 * it got ripped out due to precisely this problem.
1752 			 */
1753 			KASSERT(!pmap_page_is_mapped(m),
1754 			    ("swp_pager_async_iodone: page %p is mapped", m));
1755 			KASSERT(m->dirty == 0,
1756 			    ("swp_pager_async_iodone: page %p is dirty", m));
1757 
1758 			vm_page_valid(m);
1759 			if (i < bp->b_pgbefore ||
1760 			    i >= bp->b_npages - bp->b_pgafter)
1761 				vm_page_readahead_finish(m);
1762 		} else {
1763 			/*
1764 			 * For write success, clear the dirty
1765 			 * status, then finish the I/O ( which decrements the
1766 			 * busy count and possibly wakes waiter's up ).
1767 			 * A page is only written to swap after a period of
1768 			 * inactivity.  Therefore, we do not expect it to be
1769 			 * reused.
1770 			 */
1771 			KASSERT(!pmap_page_is_write_mapped(m),
1772 			    ("swp_pager_async_iodone: page %p is not write"
1773 			    " protected", m));
1774 			vm_page_undirty(m);
1775 			vm_page_deactivate_noreuse(m);
1776 			vm_page_sunbusy(m);
1777 		}
1778 	}
1779 
1780 	/*
1781 	 * adjust pip.  NOTE: the original parent may still have its own
1782 	 * pip refs on the object.
1783 	 */
1784 	if (object != NULL) {
1785 		vm_object_pip_wakeupn(object, bp->b_npages);
1786 		VM_OBJECT_WUNLOCK(object);
1787 	}
1788 
1789 	/*
1790 	 * swapdev_strategy() manually sets b_vp and b_bufobj before calling
1791 	 * bstrategy(). Set them back to NULL now we're done with it, or we'll
1792 	 * trigger a KASSERT in relpbuf().
1793 	 */
1794 	if (bp->b_vp) {
1795 		    bp->b_vp = NULL;
1796 		    bp->b_bufobj = NULL;
1797 	}
1798 	/*
1799 	 * release the physical I/O buffer
1800 	 */
1801 	if (bp->b_flags & B_ASYNC) {
1802 		mtx_lock(&swbuf_mtx);
1803 		if (++nsw_wcount_async == 1)
1804 			wakeup(&nsw_wcount_async);
1805 		mtx_unlock(&swbuf_mtx);
1806 	}
1807 	uma_zfree((bp->b_iocmd == BIO_READ) ? swrbuf_zone : swwbuf_zone, bp);
1808 }
1809 
1810 int
1811 swap_pager_nswapdev(void)
1812 {
1813 
1814 	return (nswapdev);
1815 }
1816 
1817 static void
1818 swp_pager_force_dirty(struct page_range *range, vm_page_t m, daddr_t *blk)
1819 {
1820 	vm_page_dirty(m);
1821 	swap_pager_unswapped_acct(m);
1822 	swp_pager_update_freerange(range, *blk);
1823 	*blk = SWAPBLK_NONE;
1824 	vm_page_launder(m);
1825 }
1826 
1827 u_long
1828 swap_pager_swapped_pages(vm_object_t object)
1829 {
1830 	struct swblk *sb;
1831 	u_long res;
1832 	int i;
1833 
1834 	VM_OBJECT_ASSERT_LOCKED(object);
1835 
1836 	if (swblk_is_empty(object))
1837 		return (0);
1838 
1839 	res = 0;
1840 	for (sb = swblk_start(object, 0); sb != NULL;
1841 	    sb = swblk_next(object, sb)) {
1842 		for (i = 0; i < SWAP_META_PAGES; i++) {
1843 			if (sb->d[i] != SWAPBLK_NONE)
1844 				res++;
1845 		}
1846 	}
1847 	return (res);
1848 }
1849 
1850 /*
1851  *	swap_pager_swapoff_object:
1852  *
1853  *	Page in all of the pages that have been paged out for an object
1854  *	to a swap device.
1855  */
1856 static void
1857 swap_pager_swapoff_object(struct swdevt *sp, vm_object_t object)
1858 {
1859 	struct page_range range;
1860 	struct swblk *sb;
1861 	vm_page_t m;
1862 	vm_pindex_t pi;
1863 	daddr_t blk;
1864 	int i, rahead, rv;
1865 	bool sb_empty;
1866 
1867 	VM_OBJECT_ASSERT_WLOCKED(object);
1868 	KASSERT((object->flags & OBJ_SWAP) != 0,
1869 	    ("%s: Object not swappable", __func__));
1870 
1871 	pi = 0;
1872 	i = 0;
1873 	swp_pager_init_freerange(&range);
1874 	for (;;) {
1875 		if (i == 0 && (object->flags & OBJ_DEAD) != 0) {
1876 			/*
1877 			 * Make sure that pending writes finish before
1878 			 * returning.
1879 			 */
1880 			vm_object_pip_wait(object, "swpoff");
1881 			swp_pager_meta_free_all(object);
1882 			break;
1883 		}
1884 
1885 		if (i == SWAP_META_PAGES) {
1886 			pi = sb->p + SWAP_META_PAGES;
1887 			if (sb_empty) {
1888 				swblk_lookup_remove(object, sb);
1889 				uma_zfree(swblk_zone, sb);
1890 			}
1891 			i = 0;
1892 		}
1893 
1894 		if (i == 0) {
1895 			sb = swblk_start(object, pi);
1896 			if (sb == NULL)
1897 				break;
1898 			sb_empty = true;
1899 			m = NULL;
1900 		}
1901 
1902 		/* Skip an invalid block. */
1903 		blk = sb->d[i];
1904 		if (blk == SWAPBLK_NONE || !swp_pager_isondev(blk, sp)) {
1905 			if (blk != SWAPBLK_NONE)
1906 				sb_empty = false;
1907 			m = NULL;
1908 			i++;
1909 			continue;
1910 		}
1911 
1912 		/*
1913 		 * Look for a page corresponding to this block, If the found
1914 		 * page has pending operations, sleep and restart the scan.
1915 		 */
1916 		m = m != NULL ? vm_page_next(m) :
1917 		    vm_page_lookup(object, sb->p + i);
1918 		if (m != NULL && (m->oflags & VPO_SWAPINPROG) != 0) {
1919 			m->oflags |= VPO_SWAPSLEEP;
1920 			VM_OBJECT_SLEEP(object, &object->handle, PSWP, "swpoff",
1921 			    0);
1922 			i = 0;	/* Restart scan after object lock dropped. */
1923 			continue;
1924 		}
1925 
1926 		/*
1927 		 * If the found page is valid, mark it dirty and free the swap
1928 		 * block.
1929 		 */
1930 		if (m != NULL && vm_page_all_valid(m)) {
1931 			swp_pager_force_dirty(&range, m, &sb->d[i]);
1932 			i++;
1933 			continue;
1934 		}
1935 
1936 		/* Is there a page we can acquire or allocate? */
1937 		if (m == NULL) {
1938 			m = vm_page_alloc(object, sb->p + i,
1939 			    VM_ALLOC_NORMAL | VM_ALLOC_WAITFAIL);
1940 		} else if (!vm_page_busy_acquire(m, VM_ALLOC_WAITFAIL))
1941 			m = NULL;
1942 
1943 		/* If no page available, repeat this iteration. */
1944 		if (m == NULL)
1945 			continue;
1946 
1947 		/* Get the page from swap, mark it dirty, restart the scan. */
1948 		vm_object_pip_add(object, 1);
1949 		rahead = SWAP_META_PAGES;
1950 		rv = swap_pager_getpages_locked(object, &m, 1, NULL, &rahead);
1951 		if (rv != VM_PAGER_OK)
1952 			panic("%s: read from swap failed: %d", __func__, rv);
1953 		VM_OBJECT_WLOCK(object);
1954 		vm_object_pip_wakeupn(object, 1);
1955 		KASSERT(vm_page_all_valid(m),
1956 		    ("%s: Page %p not all valid", __func__, m));
1957 		vm_page_xunbusy(m);
1958 		i = 0;	/* Restart scan after object lock dropped. */
1959 	}
1960 	swp_pager_freeswapspace(&range);
1961 }
1962 
1963 /*
1964  *	swap_pager_swapoff:
1965  *
1966  *	Page in all of the pages that have been paged out to the
1967  *	given device.  The corresponding blocks in the bitmap must be
1968  *	marked as allocated and the device must be flagged SW_CLOSING.
1969  *	There may be no processes swapped out to the device.
1970  *
1971  *	This routine may block.
1972  */
1973 static void
1974 swap_pager_swapoff(struct swdevt *sp)
1975 {
1976 	vm_object_t object;
1977 	int retries;
1978 
1979 	sx_assert(&swdev_syscall_lock, SA_XLOCKED);
1980 
1981 	retries = 0;
1982 full_rescan:
1983 	mtx_lock(&vm_object_list_mtx);
1984 	TAILQ_FOREACH(object, &vm_object_list, object_list) {
1985 		if ((object->flags & OBJ_SWAP) == 0)
1986 			continue;
1987 		mtx_unlock(&vm_object_list_mtx);
1988 		/* Depends on type-stability. */
1989 		VM_OBJECT_WLOCK(object);
1990 
1991 		/*
1992 		 * Dead objects are eventually terminated on their own.
1993 		 */
1994 		if ((object->flags & OBJ_DEAD) != 0)
1995 			goto next_obj;
1996 
1997 		/*
1998 		 * Sync with fences placed after pctrie
1999 		 * initialization.  We must not access pctrie below
2000 		 * unless we checked that our object is swap and not
2001 		 * dead.
2002 		 */
2003 		atomic_thread_fence_acq();
2004 		if ((object->flags & OBJ_SWAP) == 0)
2005 			goto next_obj;
2006 
2007 		swap_pager_swapoff_object(sp, object);
2008 next_obj:
2009 		VM_OBJECT_WUNLOCK(object);
2010 		mtx_lock(&vm_object_list_mtx);
2011 	}
2012 	mtx_unlock(&vm_object_list_mtx);
2013 
2014 	if (sp->sw_used) {
2015 		/*
2016 		 * Objects may be locked or paging to the device being
2017 		 * removed, so we will miss their pages and need to
2018 		 * make another pass.  We have marked this device as
2019 		 * SW_CLOSING, so the activity should finish soon.
2020 		 */
2021 		retries++;
2022 		if (retries > 100) {
2023 			panic("swapoff: failed to locate %d swap blocks",
2024 			    sp->sw_used);
2025 		}
2026 		pause("swpoff", hz / 20);
2027 		goto full_rescan;
2028 	}
2029 	EVENTHANDLER_INVOKE(swapoff, sp);
2030 }
2031 
2032 /************************************************************************
2033  *				SWAP META DATA 				*
2034  ************************************************************************
2035  *
2036  *	These routines manipulate the swap metadata stored in the
2037  *	OBJT_SWAP object.
2038  *
2039  *	Swap metadata is implemented with a global hash and not directly
2040  *	linked into the object.  Instead the object simply contains
2041  *	appropriate tracking counters.
2042  */
2043 
2044 /*
2045  * SWP_PAGER_SWBLK_EMPTY() - is a range of blocks free?
2046  */
2047 static bool
2048 swp_pager_swblk_empty(struct swblk *sb, int start, int limit)
2049 {
2050 	int i;
2051 
2052 	MPASS(0 <= start && start <= limit && limit <= SWAP_META_PAGES);
2053 	for (i = start; i < limit; i++) {
2054 		if (sb->d[i] != SWAPBLK_NONE)
2055 			return (false);
2056 	}
2057 	return (true);
2058 }
2059 
2060 /*
2061  * SWP_PAGER_FREE_EMPTY_SWBLK() - frees if a block is free
2062  *
2063  *  Nothing is done if the block is still in use.
2064  */
2065 static void
2066 swp_pager_free_empty_swblk(vm_object_t object, struct swblk *sb)
2067 {
2068 
2069 	if (swp_pager_swblk_empty(sb, 0, SWAP_META_PAGES)) {
2070 		swblk_lookup_remove(object, sb);
2071 		uma_zfree(swblk_zone, sb);
2072 	}
2073 }
2074 
2075 /*
2076  * SWP_PAGER_META_BUILD() -	add swap block to swap meta data for object
2077  *
2078  *	Try to add the specified swapblk to the object's swap metadata.  If
2079  *	nowait_noreplace is set, add the specified swapblk only if there is no
2080  *	previously assigned swapblk at pindex.  If the swapblk is invalid, and
2081  *	replaces a valid swapblk, empty swap metadata is freed.  If memory
2082  *	allocation fails, and nowait_noreplace is set, return the specified
2083  *	swapblk immediately to indicate failure; otherwise, wait and retry until
2084  *	memory allocation succeeds.  Return the previously assigned swapblk, if
2085  *	any.
2086  */
2087 static daddr_t
2088 swp_pager_meta_build(vm_object_t object, vm_pindex_t pindex, daddr_t swapblk,
2089     bool nowait_noreplace)
2090 {
2091 	static volatile int swblk_zone_exhausted, swpctrie_zone_exhausted;
2092 	struct swblk *sb, *sb1;
2093 	vm_pindex_t modpi, rdpi;
2094 	daddr_t prev_swapblk;
2095 	int error, i;
2096 
2097 	VM_OBJECT_ASSERT_WLOCKED(object);
2098 
2099 	rdpi = rounddown(pindex, SWAP_META_PAGES);
2100 	sb = swblk_lookup(object, rdpi);
2101 	if (sb == NULL) {
2102 		if (swapblk == SWAPBLK_NONE)
2103 			return (SWAPBLK_NONE);
2104 		for (;;) {
2105 			sb = uma_zalloc(swblk_zone, M_NOWAIT | (curproc ==
2106 			    pageproc ? M_USE_RESERVE : 0));
2107 			if (sb != NULL) {
2108 				sb->p = rdpi;
2109 				for (i = 0; i < SWAP_META_PAGES; i++)
2110 					sb->d[i] = SWAPBLK_NONE;
2111 				if (atomic_cmpset_int(&swblk_zone_exhausted,
2112 				    1, 0))
2113 					printf("swblk zone ok\n");
2114 				break;
2115 			}
2116 			if (nowait_noreplace)
2117 				return (swapblk);
2118 			VM_OBJECT_WUNLOCK(object);
2119 			if (uma_zone_exhausted(swblk_zone)) {
2120 				if (atomic_cmpset_int(&swblk_zone_exhausted,
2121 				    0, 1))
2122 					printf("swap blk zone exhausted, "
2123 					    "increase kern.maxswzone\n");
2124 				vm_pageout_oom(VM_OOM_SWAPZ);
2125 				pause("swzonxb", 10);
2126 			} else
2127 				uma_zwait(swblk_zone);
2128 			VM_OBJECT_WLOCK(object);
2129 			sb = swblk_lookup(object, rdpi);
2130 			if (sb != NULL)
2131 				/*
2132 				 * Somebody swapped out a nearby page,
2133 				 * allocating swblk at the rdpi index,
2134 				 * while we dropped the object lock.
2135 				 */
2136 				goto allocated;
2137 		}
2138 		for (;;) {
2139 			error = swblk_lookup_insert(object, sb);
2140 			if (error == 0) {
2141 				if (atomic_cmpset_int(&swpctrie_zone_exhausted,
2142 				    1, 0))
2143 					printf("swpctrie zone ok\n");
2144 				break;
2145 			}
2146 			if (nowait_noreplace) {
2147 				uma_zfree(swblk_zone, sb);
2148 				return (swapblk);
2149 			}
2150 			VM_OBJECT_WUNLOCK(object);
2151 			if (uma_zone_exhausted(swpctrie_zone)) {
2152 				if (atomic_cmpset_int(&swpctrie_zone_exhausted,
2153 				    0, 1))
2154 					printf("swap pctrie zone exhausted, "
2155 					    "increase kern.maxswzone\n");
2156 				vm_pageout_oom(VM_OOM_SWAPZ);
2157 				pause("swzonxp", 10);
2158 			} else
2159 				uma_zwait(swpctrie_zone);
2160 			VM_OBJECT_WLOCK(object);
2161 			sb1 = swblk_lookup(object, rdpi);
2162 			if (sb1 != NULL) {
2163 				uma_zfree(swblk_zone, sb);
2164 				sb = sb1;
2165 				goto allocated;
2166 			}
2167 		}
2168 	}
2169 allocated:
2170 	MPASS(sb->p == rdpi);
2171 
2172 	modpi = pindex % SWAP_META_PAGES;
2173 	/* Return prior contents of metadata. */
2174 	prev_swapblk = sb->d[modpi];
2175 	if (!nowait_noreplace || prev_swapblk == SWAPBLK_NONE) {
2176 		/* Enter block into metadata. */
2177 		sb->d[modpi] = swapblk;
2178 
2179 		/*
2180 		 * Free the swblk if we end up with the empty page run.
2181 		 */
2182 		if (swapblk == SWAPBLK_NONE)
2183 			swp_pager_free_empty_swblk(object, sb);
2184 	}
2185 	return (prev_swapblk);
2186 }
2187 
2188 /*
2189  * SWP_PAGER_META_TRANSFER() - transfer a range of blocks in the srcobject's
2190  * swap metadata into dstobject.
2191  *
2192  *	Blocks in src that correspond to holes in dst are transferred.  Blocks
2193  *	in src that correspond to blocks in dst are freed.
2194  */
2195 static void
2196 swp_pager_meta_transfer(vm_object_t srcobject, vm_object_t dstobject,
2197     vm_pindex_t pindex, vm_pindex_t count)
2198 {
2199 	struct page_range range;
2200 	struct swblk *sb;
2201 	daddr_t blk, d[SWAP_META_PAGES];
2202 	vm_pindex_t offset, last;
2203 	int d_mask, i, limit, start;
2204 	_Static_assert(8 * sizeof(d_mask) >= SWAP_META_PAGES,
2205 	    "d_mask not big enough");
2206 
2207 	VM_OBJECT_ASSERT_WLOCKED(srcobject);
2208 	VM_OBJECT_ASSERT_WLOCKED(dstobject);
2209 
2210 	if (count == 0 || swblk_is_empty(srcobject))
2211 		return;
2212 
2213 	swp_pager_init_freerange(&range);
2214 	d_mask = 0;
2215 	offset = pindex;
2216 	last = pindex + count;
2217 	sb = swblk_start_limit(srcobject, pindex, last);
2218 	start = (sb != NULL && sb->p < pindex) ? pindex - sb->p : 0;
2219 	for (; sb != NULL;
2220 	    sb = swblk_start_limit(srcobject, pindex, last), start = 0) {
2221 		pindex = sb->p;
2222 		MPASS(d_mask == 0);
2223 		limit = MIN(last - pindex, SWAP_META_PAGES);
2224 		for (i = start; i < limit; i++) {
2225 			if (sb->d[i] == SWAPBLK_NONE)
2226 				continue;
2227 			blk = swp_pager_meta_build(dstobject,
2228 			    pindex + i - offset, sb->d[i], true);
2229 			if (blk == sb->d[i]) {
2230 				/*
2231 				 * Failed memory allocation stopped transfer;
2232 				 * save this block for transfer with lock
2233 				 * released.
2234 				 */
2235 				d[i] = blk;
2236 				d_mask |= 1 << i;
2237 			} else if (blk != SWAPBLK_NONE) {
2238 				/* Dst has a block at pindex, so free block. */
2239 				swp_pager_update_freerange(&range, sb->d[i]);
2240 			}
2241 			sb->d[i] = SWAPBLK_NONE;
2242 		}
2243 		if (swp_pager_swblk_empty(sb, 0, start) &&
2244 		    swp_pager_swblk_empty(sb, limit, SWAP_META_PAGES)) {
2245 			swblk_lookup_remove(srcobject, sb);
2246 			uma_zfree(swblk_zone, sb);
2247 		}
2248 		if (d_mask != 0) {
2249 			/* Finish block transfer, with the lock released. */
2250 			VM_OBJECT_WUNLOCK(srcobject);
2251 			do {
2252 				i = ffs(d_mask) - 1;
2253 				swp_pager_meta_build(dstobject,
2254 				    pindex + i - offset, d[i], false);
2255 				d_mask &= ~(1 << i);
2256 			} while (d_mask != 0);
2257 			VM_OBJECT_WLOCK(srcobject);
2258 		}
2259 		pindex += SWAP_META_PAGES;
2260 	}
2261 	swp_pager_freeswapspace(&range);
2262 }
2263 
2264 /*
2265  * SWP_PAGER_META_FREE() - free a range of blocks in the object's swap metadata
2266  *
2267  *	Return freed swap blocks to the swap bitmap, and free emptied swblk
2268  *	metadata.  With 'freed' set, provide a count of freed blocks that were
2269  *	not associated with valid resident pages.
2270  */
2271 static void
2272 swp_pager_meta_free(vm_object_t object, vm_pindex_t pindex, vm_pindex_t count,
2273     vm_size_t *freed)
2274 {
2275 	struct page_range range;
2276 	struct swblk *sb;
2277 	vm_page_t m;
2278 	vm_pindex_t last;
2279 	vm_size_t fc;
2280 	int i, limit, start;
2281 
2282 	VM_OBJECT_ASSERT_WLOCKED(object);
2283 
2284 	fc = 0;
2285 	m = NULL;
2286 	if (count == 0 || swblk_is_empty(object))
2287 		goto out;
2288 
2289 	swp_pager_init_freerange(&range);
2290 	last = pindex + count;
2291 	sb = swblk_start_limit(object, pindex, last);
2292 	start = (sb != NULL && sb->p < pindex) ? pindex - sb->p : 0;
2293 	for (; sb != NULL;
2294 	    sb = swblk_start_limit(object, pindex, last), start = 0) {
2295 		limit = MIN(last - sb->p, SWAP_META_PAGES);
2296 		for (i = start; i < limit; i++) {
2297 			if (sb->d[i] == SWAPBLK_NONE)
2298 				continue;
2299 			swp_pager_update_freerange(&range, sb->d[i]);
2300 			if (freed != NULL) {
2301 				m = (m != NULL && m->pindex == sb->p + i - 1) ?
2302 				    vm_page_next(m) :
2303 				    vm_page_lookup(object, sb->p + i);
2304 				if (m == NULL || vm_page_none_valid(m))
2305 					fc++;
2306 			}
2307 			sb->d[i] = SWAPBLK_NONE;
2308 		}
2309 		pindex = sb->p + SWAP_META_PAGES;
2310 		if (swp_pager_swblk_empty(sb, 0, start) &&
2311 		    swp_pager_swblk_empty(sb, limit, SWAP_META_PAGES)) {
2312 			swblk_lookup_remove(object, sb);
2313 			uma_zfree(swblk_zone, sb);
2314 		}
2315 	}
2316 	swp_pager_freeswapspace(&range);
2317 out:
2318 	if (freed != NULL)
2319 		*freed = fc;
2320 }
2321 
2322 static void
2323 swp_pager_meta_free_block(struct swblk *sb, void *rangev)
2324 {
2325 	struct page_range *range = rangev;
2326 
2327 	for (int i = 0; i < SWAP_META_PAGES; i++) {
2328 		if (sb->d[i] != SWAPBLK_NONE)
2329 			swp_pager_update_freerange(range, sb->d[i]);
2330 	}
2331 	uma_zfree(swblk_zone, sb);
2332 }
2333 
2334 /*
2335  * SWP_PAGER_META_FREE_ALL() - destroy all swap metadata associated with object
2336  *
2337  *	This routine locates and destroys all swap metadata associated with
2338  *	an object.
2339  */
2340 static void
2341 swp_pager_meta_free_all(vm_object_t object)
2342 {
2343 	struct page_range range;
2344 
2345 	VM_OBJECT_ASSERT_WLOCKED(object);
2346 
2347 	swp_pager_init_freerange(&range);
2348 	SWAP_PCTRIE_RECLAIM_CALLBACK(&object->un_pager.swp.swp_blks,
2349 	    swp_pager_meta_free_block, &range);
2350 	swp_pager_freeswapspace(&range);
2351 }
2352 
2353 /*
2354  * SWP_PAGER_METACTL() -  misc control of swap meta data.
2355  *
2356  *	This routine is capable of looking up, or removing swapblk
2357  *	assignments in the swap meta data.  It returns the swapblk being
2358  *	looked-up, popped, or SWAPBLK_NONE if the block was invalid.
2359  *
2360  *	When acting on a busy resident page and paging is in progress, we
2361  *	have to wait until paging is complete but otherwise can act on the
2362  *	busy page.
2363  */
2364 static daddr_t
2365 swp_pager_meta_lookup(vm_object_t object, vm_pindex_t pindex)
2366 {
2367 	struct swblk *sb;
2368 
2369 	VM_OBJECT_ASSERT_LOCKED(object);
2370 
2371 	/*
2372 	 * The meta data only exists if the object is OBJT_SWAP
2373 	 * and even then might not be allocated yet.
2374 	 */
2375 	KASSERT((object->flags & OBJ_SWAP) != 0,
2376 	    ("Lookup object not swappable"));
2377 
2378 	sb = swblk_lookup(object, pindex);
2379 	if (sb == NULL)
2380 		return (SWAPBLK_NONE);
2381 	return (sb->d[pindex % SWAP_META_PAGES]);
2382 }
2383 
2384 /*
2385  * Returns the least page index which is greater than or equal to the parameter
2386  * pindex and for which there is a swap block allocated.  Returns OBJ_MAX_SIZE
2387  * if are no allocated swap blocks for the object after the requested pindex.
2388  */
2389 vm_pindex_t
2390 swap_pager_find_least(vm_object_t object, vm_pindex_t pindex)
2391 {
2392 	struct swblk *sb;
2393 	int i;
2394 
2395 	if ((sb = swblk_start(object, pindex)) == NULL)
2396 		return (OBJ_MAX_SIZE);
2397 	if (sb->p < pindex) {
2398 		for (i = pindex % SWAP_META_PAGES; i < SWAP_META_PAGES; i++) {
2399 			if (sb->d[i] != SWAPBLK_NONE)
2400 				return (sb->p + i);
2401 		}
2402 		if ((sb = swblk_next(object, sb)) == NULL)
2403 			return (OBJ_MAX_SIZE);
2404 	}
2405 	for (i = 0; i < SWAP_META_PAGES; i++) {
2406 		if (sb->d[i] != SWAPBLK_NONE)
2407 			return (sb->p + i);
2408 	}
2409 
2410 	/*
2411 	 * We get here if a swblk is present in the trie but it
2412 	 * doesn't map any blocks.
2413 	 */
2414 	MPASS(0);
2415 	return (OBJ_MAX_SIZE);
2416 }
2417 
2418 /*
2419  * System call swapon(name) enables swapping on device name,
2420  * which must be in the swdevsw.  Return EBUSY
2421  * if already swapping on this device.
2422  */
2423 #ifndef _SYS_SYSPROTO_H_
2424 struct swapon_args {
2425 	char *name;
2426 };
2427 #endif
2428 
2429 int
2430 sys_swapon(struct thread *td, struct swapon_args *uap)
2431 {
2432 	struct vattr attr;
2433 	struct vnode *vp;
2434 	struct nameidata nd;
2435 	int error;
2436 
2437 	error = priv_check(td, PRIV_SWAPON);
2438 	if (error)
2439 		return (error);
2440 
2441 	sx_xlock(&swdev_syscall_lock);
2442 
2443 	/*
2444 	 * Swap metadata may not fit in the KVM if we have physical
2445 	 * memory of >1GB.
2446 	 */
2447 	if (swblk_zone == NULL) {
2448 		error = ENOMEM;
2449 		goto done;
2450 	}
2451 
2452 	NDINIT(&nd, LOOKUP, ISOPEN | FOLLOW | LOCKLEAF | AUDITVNODE1,
2453 	    UIO_USERSPACE, uap->name);
2454 	error = namei(&nd);
2455 	if (error)
2456 		goto done;
2457 
2458 	NDFREE_PNBUF(&nd);
2459 	vp = nd.ni_vp;
2460 
2461 	if (vn_isdisk_error(vp, &error)) {
2462 		error = swapongeom(vp);
2463 	} else if (vp->v_type == VREG &&
2464 	    (vp->v_mount->mnt_vfc->vfc_flags & VFCF_NETWORK) != 0 &&
2465 	    (error = VOP_GETATTR(vp, &attr, td->td_ucred)) == 0) {
2466 		/*
2467 		 * Allow direct swapping to NFS regular files in the same
2468 		 * way that nfs_mountroot() sets up diskless swapping.
2469 		 */
2470 		error = swaponvp(td, vp, attr.va_size / DEV_BSIZE);
2471 	}
2472 
2473 	if (error != 0)
2474 		vput(vp);
2475 	else
2476 		VOP_UNLOCK(vp);
2477 done:
2478 	sx_xunlock(&swdev_syscall_lock);
2479 	return (error);
2480 }
2481 
2482 /*
2483  * Check that the total amount of swap currently configured does not
2484  * exceed half the theoretical maximum.  If it does, print a warning
2485  * message.
2486  */
2487 static void
2488 swapon_check_swzone(void)
2489 {
2490 
2491 	/* recommend using no more than half that amount */
2492 	if (swap_total > swap_maxpages / 2) {
2493 		printf("warning: total configured swap (%lu pages) "
2494 		    "exceeds maximum recommended amount (%lu pages).\n",
2495 		    swap_total, swap_maxpages / 2);
2496 		printf("warning: increase kern.maxswzone "
2497 		    "or reduce amount of swap.\n");
2498 	}
2499 }
2500 
2501 static void
2502 swaponsomething(struct vnode *vp, void *id, u_long nblks,
2503     sw_strategy_t *strategy, sw_close_t *close, dev_t dev, int flags)
2504 {
2505 	struct swdevt *sp, *tsp;
2506 	daddr_t dvbase;
2507 
2508 	/*
2509 	 * nblks is in DEV_BSIZE'd chunks, convert to PAGE_SIZE'd chunks.
2510 	 * First chop nblks off to page-align it, then convert.
2511 	 *
2512 	 * sw->sw_nblks is in page-sized chunks now too.
2513 	 */
2514 	nblks &= ~(ctodb(1) - 1);
2515 	nblks = dbtoc(nblks);
2516 
2517 	sp = malloc(sizeof *sp, M_VMPGDATA, M_WAITOK | M_ZERO);
2518 	sp->sw_blist = blist_create(nblks, M_WAITOK);
2519 	sp->sw_vp = vp;
2520 	sp->sw_id = id;
2521 	sp->sw_dev = dev;
2522 	sp->sw_nblks = nblks;
2523 	sp->sw_used = 0;
2524 	sp->sw_strategy = strategy;
2525 	sp->sw_close = close;
2526 	sp->sw_flags = flags;
2527 
2528 	/*
2529 	 * Do not free the first blocks in order to avoid overwriting
2530 	 * any bsd label at the front of the partition
2531 	 */
2532 	blist_free(sp->sw_blist, howmany(BBSIZE, PAGE_SIZE),
2533 	    nblks - howmany(BBSIZE, PAGE_SIZE));
2534 
2535 	dvbase = 0;
2536 	mtx_lock(&sw_dev_mtx);
2537 	TAILQ_FOREACH(tsp, &swtailq, sw_list) {
2538 		if (tsp->sw_end >= dvbase) {
2539 			/*
2540 			 * We put one uncovered page between the devices
2541 			 * in order to definitively prevent any cross-device
2542 			 * I/O requests
2543 			 */
2544 			dvbase = tsp->sw_end + 1;
2545 		}
2546 	}
2547 	sp->sw_first = dvbase;
2548 	sp->sw_end = dvbase + nblks;
2549 	TAILQ_INSERT_TAIL(&swtailq, sp, sw_list);
2550 	nswapdev++;
2551 	swap_pager_avail += nblks - howmany(BBSIZE, PAGE_SIZE);
2552 	swap_total += nblks;
2553 	swapon_check_swzone();
2554 	swp_sizecheck();
2555 	mtx_unlock(&sw_dev_mtx);
2556 	EVENTHANDLER_INVOKE(swapon, sp);
2557 }
2558 
2559 /*
2560  * SYSCALL: swapoff(devname)
2561  *
2562  * Disable swapping on the given device.
2563  *
2564  * XXX: Badly designed system call: it should use a device index
2565  * rather than filename as specification.  We keep sw_vp around
2566  * only to make this work.
2567  */
2568 static int
2569 kern_swapoff(struct thread *td, const char *name, enum uio_seg name_seg,
2570     u_int flags)
2571 {
2572 	struct vnode *vp;
2573 	struct nameidata nd;
2574 	struct swdevt *sp;
2575 	int error;
2576 
2577 	error = priv_check(td, PRIV_SWAPOFF);
2578 	if (error != 0)
2579 		return (error);
2580 	if ((flags & ~(SWAPOFF_FORCE)) != 0)
2581 		return (EINVAL);
2582 
2583 	sx_xlock(&swdev_syscall_lock);
2584 
2585 	NDINIT(&nd, LOOKUP, FOLLOW | AUDITVNODE1, name_seg, name);
2586 	error = namei(&nd);
2587 	if (error)
2588 		goto done;
2589 	NDFREE_PNBUF(&nd);
2590 	vp = nd.ni_vp;
2591 
2592 	mtx_lock(&sw_dev_mtx);
2593 	TAILQ_FOREACH(sp, &swtailq, sw_list) {
2594 		if (sp->sw_vp == vp)
2595 			break;
2596 	}
2597 	mtx_unlock(&sw_dev_mtx);
2598 	if (sp == NULL) {
2599 		error = EINVAL;
2600 		goto done;
2601 	}
2602 	error = swapoff_one(sp, td->td_ucred, flags);
2603 done:
2604 	sx_xunlock(&swdev_syscall_lock);
2605 	return (error);
2606 }
2607 
2608 
2609 #ifdef COMPAT_FREEBSD13
2610 int
2611 freebsd13_swapoff(struct thread *td, struct freebsd13_swapoff_args *uap)
2612 {
2613 	return (kern_swapoff(td, uap->name, UIO_USERSPACE, 0));
2614 }
2615 #endif
2616 
2617 int
2618 sys_swapoff(struct thread *td, struct swapoff_args *uap)
2619 {
2620 	return (kern_swapoff(td, uap->name, UIO_USERSPACE, uap->flags));
2621 }
2622 
2623 static int
2624 swapoff_one(struct swdevt *sp, struct ucred *cred, u_int flags)
2625 {
2626 	u_long nblks;
2627 #ifdef MAC
2628 	int error;
2629 #endif
2630 
2631 	sx_assert(&swdev_syscall_lock, SA_XLOCKED);
2632 #ifdef MAC
2633 	(void) vn_lock(sp->sw_vp, LK_EXCLUSIVE | LK_RETRY);
2634 	error = mac_system_check_swapoff(cred, sp->sw_vp);
2635 	(void) VOP_UNLOCK(sp->sw_vp);
2636 	if (error != 0)
2637 		return (error);
2638 #endif
2639 	nblks = sp->sw_nblks;
2640 
2641 	/*
2642 	 * We can turn off this swap device safely only if the
2643 	 * available virtual memory in the system will fit the amount
2644 	 * of data we will have to page back in, plus an epsilon so
2645 	 * the system doesn't become critically low on swap space.
2646 	 * The vm_free_count() part does not account e.g. for clean
2647 	 * pages that can be immediately reclaimed without paging, so
2648 	 * this is a very rough estimation.
2649 	 *
2650 	 * On the other hand, not turning swap off on swapoff_all()
2651 	 * means that we can lose swap data when filesystems go away,
2652 	 * which is arguably worse.
2653 	 */
2654 	if ((flags & SWAPOFF_FORCE) == 0 &&
2655 	    vm_free_count() + swap_pager_avail < nblks + nswap_lowat)
2656 		return (ENOMEM);
2657 
2658 	/*
2659 	 * Prevent further allocations on this device.
2660 	 */
2661 	mtx_lock(&sw_dev_mtx);
2662 	sp->sw_flags |= SW_CLOSING;
2663 	swap_pager_avail -= blist_fill(sp->sw_blist, 0, nblks);
2664 	swap_total -= nblks;
2665 	mtx_unlock(&sw_dev_mtx);
2666 
2667 	/*
2668 	 * Page in the contents of the device and close it.
2669 	 */
2670 	swap_pager_swapoff(sp);
2671 
2672 	sp->sw_close(curthread, sp);
2673 	mtx_lock(&sw_dev_mtx);
2674 	sp->sw_id = NULL;
2675 	TAILQ_REMOVE(&swtailq, sp, sw_list);
2676 	nswapdev--;
2677 	if (nswapdev == 0) {
2678 		swap_pager_full = 2;
2679 		swap_pager_almost_full = 1;
2680 	}
2681 	if (swdevhd == sp)
2682 		swdevhd = NULL;
2683 	mtx_unlock(&sw_dev_mtx);
2684 	blist_destroy(sp->sw_blist);
2685 	free(sp, M_VMPGDATA);
2686 	return (0);
2687 }
2688 
2689 void
2690 swapoff_all(void)
2691 {
2692 	struct swdevt *sp, *spt;
2693 	const char *devname;
2694 	int error;
2695 
2696 	sx_xlock(&swdev_syscall_lock);
2697 
2698 	mtx_lock(&sw_dev_mtx);
2699 	TAILQ_FOREACH_SAFE(sp, &swtailq, sw_list, spt) {
2700 		mtx_unlock(&sw_dev_mtx);
2701 		if (vn_isdisk(sp->sw_vp))
2702 			devname = devtoname(sp->sw_vp->v_rdev);
2703 		else
2704 			devname = "[file]";
2705 		error = swapoff_one(sp, thread0.td_ucred, SWAPOFF_FORCE);
2706 		if (error != 0) {
2707 			printf("Cannot remove swap device %s (error=%d), "
2708 			    "skipping.\n", devname, error);
2709 		} else if (bootverbose) {
2710 			printf("Swap device %s removed.\n", devname);
2711 		}
2712 		mtx_lock(&sw_dev_mtx);
2713 	}
2714 	mtx_unlock(&sw_dev_mtx);
2715 
2716 	sx_xunlock(&swdev_syscall_lock);
2717 }
2718 
2719 void
2720 swap_pager_status(int *total, int *used)
2721 {
2722 
2723 	*total = swap_total;
2724 	*used = swap_total - swap_pager_avail -
2725 	    nswapdev * howmany(BBSIZE, PAGE_SIZE);
2726 }
2727 
2728 int
2729 swap_dev_info(int name, struct xswdev *xs, char *devname, size_t len)
2730 {
2731 	struct swdevt *sp;
2732 	const char *tmp_devname;
2733 	int error, n;
2734 
2735 	n = 0;
2736 	error = ENOENT;
2737 	mtx_lock(&sw_dev_mtx);
2738 	TAILQ_FOREACH(sp, &swtailq, sw_list) {
2739 		if (n != name) {
2740 			n++;
2741 			continue;
2742 		}
2743 		xs->xsw_version = XSWDEV_VERSION;
2744 		xs->xsw_dev = sp->sw_dev;
2745 		xs->xsw_flags = sp->sw_flags;
2746 		xs->xsw_nblks = sp->sw_nblks;
2747 		xs->xsw_used = sp->sw_used;
2748 		if (devname != NULL) {
2749 			if (vn_isdisk(sp->sw_vp))
2750 				tmp_devname = devtoname(sp->sw_vp->v_rdev);
2751 			else
2752 				tmp_devname = "[file]";
2753 			strncpy(devname, tmp_devname, len);
2754 		}
2755 		error = 0;
2756 		break;
2757 	}
2758 	mtx_unlock(&sw_dev_mtx);
2759 	return (error);
2760 }
2761 
2762 #if defined(COMPAT_FREEBSD11)
2763 #define XSWDEV_VERSION_11	1
2764 struct xswdev11 {
2765 	u_int	xsw_version;
2766 	uint32_t xsw_dev;
2767 	int	xsw_flags;
2768 	int	xsw_nblks;
2769 	int     xsw_used;
2770 };
2771 #endif
2772 
2773 #if defined(__amd64__) && defined(COMPAT_FREEBSD32)
2774 struct xswdev32 {
2775 	u_int	xsw_version;
2776 	u_int	xsw_dev1, xsw_dev2;
2777 	int	xsw_flags;
2778 	int	xsw_nblks;
2779 	int     xsw_used;
2780 };
2781 #endif
2782 
2783 static int
2784 sysctl_vm_swap_info(SYSCTL_HANDLER_ARGS)
2785 {
2786 	struct xswdev xs;
2787 #if defined(__amd64__) && defined(COMPAT_FREEBSD32)
2788 	struct xswdev32 xs32;
2789 #endif
2790 #if defined(COMPAT_FREEBSD11)
2791 	struct xswdev11 xs11;
2792 #endif
2793 	int error;
2794 
2795 	if (arg2 != 1)			/* name length */
2796 		return (EINVAL);
2797 
2798 	memset(&xs, 0, sizeof(xs));
2799 	error = swap_dev_info(*(int *)arg1, &xs, NULL, 0);
2800 	if (error != 0)
2801 		return (error);
2802 #if defined(__amd64__) && defined(COMPAT_FREEBSD32)
2803 	if (req->oldlen == sizeof(xs32)) {
2804 		memset(&xs32, 0, sizeof(xs32));
2805 		xs32.xsw_version = XSWDEV_VERSION;
2806 		xs32.xsw_dev1 = xs.xsw_dev;
2807 		xs32.xsw_dev2 = xs.xsw_dev >> 32;
2808 		xs32.xsw_flags = xs.xsw_flags;
2809 		xs32.xsw_nblks = xs.xsw_nblks;
2810 		xs32.xsw_used = xs.xsw_used;
2811 		error = SYSCTL_OUT(req, &xs32, sizeof(xs32));
2812 		return (error);
2813 	}
2814 #endif
2815 #if defined(COMPAT_FREEBSD11)
2816 	if (req->oldlen == sizeof(xs11)) {
2817 		memset(&xs11, 0, sizeof(xs11));
2818 		xs11.xsw_version = XSWDEV_VERSION_11;
2819 		xs11.xsw_dev = xs.xsw_dev; /* truncation */
2820 		xs11.xsw_flags = xs.xsw_flags;
2821 		xs11.xsw_nblks = xs.xsw_nblks;
2822 		xs11.xsw_used = xs.xsw_used;
2823 		error = SYSCTL_OUT(req, &xs11, sizeof(xs11));
2824 		return (error);
2825 	}
2826 #endif
2827 	error = SYSCTL_OUT(req, &xs, sizeof(xs));
2828 	return (error);
2829 }
2830 
2831 SYSCTL_INT(_vm, OID_AUTO, nswapdev, CTLFLAG_RD, &nswapdev, 0,
2832     "Number of swap devices");
2833 SYSCTL_NODE(_vm, OID_AUTO, swap_info, CTLFLAG_RD | CTLFLAG_MPSAFE,
2834     sysctl_vm_swap_info,
2835     "Swap statistics by device");
2836 
2837 /*
2838  * Count the approximate swap usage in pages for a vmspace.  The
2839  * shadowed or not yet copied on write swap blocks are not accounted.
2840  * The map must be locked.
2841  */
2842 long
2843 vmspace_swap_count(struct vmspace *vmspace)
2844 {
2845 	vm_map_t map;
2846 	vm_map_entry_t cur;
2847 	vm_object_t object;
2848 	struct swblk *sb;
2849 	vm_pindex_t e, pi;
2850 	long count;
2851 	int i;
2852 
2853 	map = &vmspace->vm_map;
2854 	count = 0;
2855 
2856 	VM_MAP_ENTRY_FOREACH(cur, map) {
2857 		if ((cur->eflags & MAP_ENTRY_IS_SUB_MAP) != 0)
2858 			continue;
2859 		object = cur->object.vm_object;
2860 		if (object == NULL || (object->flags & OBJ_SWAP) == 0)
2861 			continue;
2862 		VM_OBJECT_RLOCK(object);
2863 		if ((object->flags & OBJ_SWAP) == 0)
2864 			goto unlock;
2865 		pi = OFF_TO_IDX(cur->offset);
2866 		e = pi + OFF_TO_IDX(cur->end - cur->start);
2867 		for (sb = swblk_start_limit(object, pi, e);
2868 		    sb != NULL; sb = swblk_next_limit(object, sb, e)) {
2869 			for (i = 0; i < SWAP_META_PAGES; i++) {
2870 				if (sb->p + i < e &&
2871 				    sb->d[i] != SWAPBLK_NONE)
2872 					count++;
2873 			}
2874 		}
2875 unlock:
2876 		VM_OBJECT_RUNLOCK(object);
2877 	}
2878 	return (count);
2879 }
2880 
2881 /*
2882  * GEOM backend
2883  *
2884  * Swapping onto disk devices.
2885  *
2886  */
2887 
2888 static g_orphan_t swapgeom_orphan;
2889 
2890 static struct g_class g_swap_class = {
2891 	.name = "SWAP",
2892 	.version = G_VERSION,
2893 	.orphan = swapgeom_orphan,
2894 };
2895 
2896 DECLARE_GEOM_CLASS(g_swap_class, g_class);
2897 
2898 static void
2899 swapgeom_close_ev(void *arg, int flags)
2900 {
2901 	struct g_consumer *cp;
2902 
2903 	cp = arg;
2904 	g_access(cp, -1, -1, 0);
2905 	g_detach(cp);
2906 	g_destroy_consumer(cp);
2907 }
2908 
2909 /*
2910  * Add a reference to the g_consumer for an inflight transaction.
2911  */
2912 static void
2913 swapgeom_acquire(struct g_consumer *cp)
2914 {
2915 
2916 	mtx_assert(&sw_dev_mtx, MA_OWNED);
2917 	cp->index++;
2918 }
2919 
2920 /*
2921  * Remove a reference from the g_consumer.  Post a close event if all
2922  * references go away, since the function might be called from the
2923  * biodone context.
2924  */
2925 static void
2926 swapgeom_release(struct g_consumer *cp, struct swdevt *sp)
2927 {
2928 
2929 	mtx_assert(&sw_dev_mtx, MA_OWNED);
2930 	cp->index--;
2931 	if (cp->index == 0) {
2932 		if (g_post_event(swapgeom_close_ev, cp, M_NOWAIT, NULL) == 0)
2933 			sp->sw_id = NULL;
2934 	}
2935 }
2936 
2937 static void
2938 swapgeom_done(struct bio *bp2)
2939 {
2940 	struct swdevt *sp;
2941 	struct buf *bp;
2942 	struct g_consumer *cp;
2943 
2944 	bp = bp2->bio_caller2;
2945 	cp = bp2->bio_from;
2946 	bp->b_ioflags = bp2->bio_flags;
2947 	if (bp2->bio_error)
2948 		bp->b_ioflags |= BIO_ERROR;
2949 	bp->b_resid = bp->b_bcount - bp2->bio_completed;
2950 	bp->b_error = bp2->bio_error;
2951 	bp->b_caller1 = NULL;
2952 	bufdone(bp);
2953 	sp = bp2->bio_caller1;
2954 	mtx_lock(&sw_dev_mtx);
2955 	swapgeom_release(cp, sp);
2956 	mtx_unlock(&sw_dev_mtx);
2957 	g_destroy_bio(bp2);
2958 }
2959 
2960 static void
2961 swapgeom_strategy(struct buf *bp, struct swdevt *sp)
2962 {
2963 	struct bio *bio;
2964 	struct g_consumer *cp;
2965 
2966 	mtx_lock(&sw_dev_mtx);
2967 	cp = sp->sw_id;
2968 	if (cp == NULL) {
2969 		mtx_unlock(&sw_dev_mtx);
2970 		bp->b_error = ENXIO;
2971 		bp->b_ioflags |= BIO_ERROR;
2972 		bufdone(bp);
2973 		return;
2974 	}
2975 	swapgeom_acquire(cp);
2976 	mtx_unlock(&sw_dev_mtx);
2977 	if (bp->b_iocmd == BIO_WRITE)
2978 		bio = g_new_bio();
2979 	else
2980 		bio = g_alloc_bio();
2981 	if (bio == NULL) {
2982 		mtx_lock(&sw_dev_mtx);
2983 		swapgeom_release(cp, sp);
2984 		mtx_unlock(&sw_dev_mtx);
2985 		bp->b_error = ENOMEM;
2986 		bp->b_ioflags |= BIO_ERROR;
2987 		printf("swap_pager: cannot allocate bio\n");
2988 		bufdone(bp);
2989 		return;
2990 	}
2991 
2992 	bp->b_caller1 = bio;
2993 	bio->bio_caller1 = sp;
2994 	bio->bio_caller2 = bp;
2995 	bio->bio_cmd = bp->b_iocmd;
2996 	bio->bio_offset = (bp->b_blkno - sp->sw_first) * PAGE_SIZE;
2997 	bio->bio_length = bp->b_bcount;
2998 	bio->bio_done = swapgeom_done;
2999 	bio->bio_flags |= BIO_SWAP;
3000 	if (!buf_mapped(bp)) {
3001 		bio->bio_ma = bp->b_pages;
3002 		bio->bio_data = unmapped_buf;
3003 		bio->bio_ma_offset = (vm_offset_t)bp->b_offset & PAGE_MASK;
3004 		bio->bio_ma_n = bp->b_npages;
3005 		bio->bio_flags |= BIO_UNMAPPED;
3006 	} else {
3007 		bio->bio_data = bp->b_data;
3008 		bio->bio_ma = NULL;
3009 	}
3010 	g_io_request(bio, cp);
3011 	return;
3012 }
3013 
3014 static void
3015 swapgeom_orphan(struct g_consumer *cp)
3016 {
3017 	struct swdevt *sp;
3018 	int destroy;
3019 
3020 	mtx_lock(&sw_dev_mtx);
3021 	TAILQ_FOREACH(sp, &swtailq, sw_list) {
3022 		if (sp->sw_id == cp) {
3023 			sp->sw_flags |= SW_CLOSING;
3024 			break;
3025 		}
3026 	}
3027 	/*
3028 	 * Drop reference we were created with. Do directly since we're in a
3029 	 * special context where we don't have to queue the call to
3030 	 * swapgeom_close_ev().
3031 	 */
3032 	cp->index--;
3033 	destroy = ((sp != NULL) && (cp->index == 0));
3034 	if (destroy)
3035 		sp->sw_id = NULL;
3036 	mtx_unlock(&sw_dev_mtx);
3037 	if (destroy)
3038 		swapgeom_close_ev(cp, 0);
3039 }
3040 
3041 static void
3042 swapgeom_close(struct thread *td, struct swdevt *sw)
3043 {
3044 	struct g_consumer *cp;
3045 
3046 	mtx_lock(&sw_dev_mtx);
3047 	cp = sw->sw_id;
3048 	sw->sw_id = NULL;
3049 	mtx_unlock(&sw_dev_mtx);
3050 
3051 	/*
3052 	 * swapgeom_close() may be called from the biodone context,
3053 	 * where we cannot perform topology changes.  Delegate the
3054 	 * work to the events thread.
3055 	 */
3056 	if (cp != NULL)
3057 		g_waitfor_event(swapgeom_close_ev, cp, M_WAITOK, NULL);
3058 }
3059 
3060 static int
3061 swapongeom_locked(struct cdev *dev, struct vnode *vp)
3062 {
3063 	struct g_provider *pp;
3064 	struct g_consumer *cp;
3065 	static struct g_geom *gp;
3066 	struct swdevt *sp;
3067 	u_long nblks;
3068 	int error;
3069 
3070 	pp = g_dev_getprovider(dev);
3071 	if (pp == NULL)
3072 		return (ENODEV);
3073 	mtx_lock(&sw_dev_mtx);
3074 	TAILQ_FOREACH(sp, &swtailq, sw_list) {
3075 		cp = sp->sw_id;
3076 		if (cp != NULL && cp->provider == pp) {
3077 			mtx_unlock(&sw_dev_mtx);
3078 			return (EBUSY);
3079 		}
3080 	}
3081 	mtx_unlock(&sw_dev_mtx);
3082 	if (gp == NULL)
3083 		gp = g_new_geomf(&g_swap_class, "swap");
3084 	cp = g_new_consumer(gp);
3085 	cp->index = 1;	/* Number of active I/Os, plus one for being active. */
3086 	cp->flags |=  G_CF_DIRECT_SEND | G_CF_DIRECT_RECEIVE;
3087 	g_attach(cp, pp);
3088 	/*
3089 	 * XXX: Every time you think you can improve the margin for
3090 	 * footshooting, somebody depends on the ability to do so:
3091 	 * savecore(8) wants to write to our swapdev so we cannot
3092 	 * set an exclusive count :-(
3093 	 */
3094 	error = g_access(cp, 1, 1, 0);
3095 	if (error != 0) {
3096 		g_detach(cp);
3097 		g_destroy_consumer(cp);
3098 		return (error);
3099 	}
3100 	nblks = pp->mediasize / DEV_BSIZE;
3101 	swaponsomething(vp, cp, nblks, swapgeom_strategy,
3102 	    swapgeom_close, dev2udev(dev),
3103 	    (pp->flags & G_PF_ACCEPT_UNMAPPED) != 0 ? SW_UNMAPPED : 0);
3104 	return (0);
3105 }
3106 
3107 static int
3108 swapongeom(struct vnode *vp)
3109 {
3110 	int error;
3111 
3112 	ASSERT_VOP_ELOCKED(vp, "swapongeom");
3113 	if (vp->v_type != VCHR || VN_IS_DOOMED(vp)) {
3114 		error = ENOENT;
3115 	} else {
3116 		g_topology_lock();
3117 		error = swapongeom_locked(vp->v_rdev, vp);
3118 		g_topology_unlock();
3119 	}
3120 	return (error);
3121 }
3122 
3123 /*
3124  * VNODE backend
3125  *
3126  * This is used mainly for network filesystem (read: probably only tested
3127  * with NFS) swapfiles.
3128  *
3129  */
3130 
3131 static void
3132 swapdev_strategy(struct buf *bp, struct swdevt *sp)
3133 {
3134 	struct vnode *vp2;
3135 
3136 	bp->b_blkno = ctodb(bp->b_blkno - sp->sw_first);
3137 
3138 	vp2 = sp->sw_id;
3139 	vhold(vp2);
3140 	if (bp->b_iocmd == BIO_WRITE) {
3141 		vn_lock(vp2, LK_EXCLUSIVE | LK_RETRY);
3142 		if (bp->b_bufobj)
3143 			bufobj_wdrop(bp->b_bufobj);
3144 		bufobj_wref(&vp2->v_bufobj);
3145 	} else {
3146 		vn_lock(vp2, LK_SHARED | LK_RETRY);
3147 	}
3148 	if (bp->b_bufobj != &vp2->v_bufobj)
3149 		bp->b_bufobj = &vp2->v_bufobj;
3150 	bp->b_vp = vp2;
3151 	bp->b_iooffset = dbtob(bp->b_blkno);
3152 	bstrategy(bp);
3153 	VOP_UNLOCK(vp2);
3154 }
3155 
3156 static void
3157 swapdev_close(struct thread *td, struct swdevt *sp)
3158 {
3159 	struct vnode *vp;
3160 
3161 	vp = sp->sw_vp;
3162 	vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
3163 	VOP_CLOSE(vp, FREAD | FWRITE, td->td_ucred, td);
3164 	vput(vp);
3165 }
3166 
3167 static int
3168 swaponvp(struct thread *td, struct vnode *vp, u_long nblks)
3169 {
3170 	struct swdevt *sp;
3171 	int error;
3172 
3173 	ASSERT_VOP_ELOCKED(vp, "swaponvp");
3174 	if (nblks == 0)
3175 		return (ENXIO);
3176 	mtx_lock(&sw_dev_mtx);
3177 	TAILQ_FOREACH(sp, &swtailq, sw_list) {
3178 		if (sp->sw_id == vp) {
3179 			mtx_unlock(&sw_dev_mtx);
3180 			return (EBUSY);
3181 		}
3182 	}
3183 	mtx_unlock(&sw_dev_mtx);
3184 
3185 #ifdef MAC
3186 	error = mac_system_check_swapon(td->td_ucred, vp);
3187 	if (error == 0)
3188 #endif
3189 		error = VOP_OPEN(vp, FREAD | FWRITE, td->td_ucred, td, NULL);
3190 	if (error != 0)
3191 		return (error);
3192 
3193 	swaponsomething(vp, vp, nblks, swapdev_strategy, swapdev_close,
3194 	    NODEV, 0);
3195 	return (0);
3196 }
3197 
3198 static int
3199 sysctl_swap_async_max(SYSCTL_HANDLER_ARGS)
3200 {
3201 	int error, new, n;
3202 
3203 	new = nsw_wcount_async_max;
3204 	error = sysctl_handle_int(oidp, &new, 0, req);
3205 	if (error != 0 || req->newptr == NULL)
3206 		return (error);
3207 
3208 	if (new > nswbuf / 2 || new < 1)
3209 		return (EINVAL);
3210 
3211 	mtx_lock(&swbuf_mtx);
3212 	while (nsw_wcount_async_max != new) {
3213 		/*
3214 		 * Adjust difference.  If the current async count is too low,
3215 		 * we will need to sqeeze our update slowly in.  Sleep with a
3216 		 * higher priority than getpbuf() to finish faster.
3217 		 */
3218 		n = new - nsw_wcount_async_max;
3219 		if (nsw_wcount_async + n >= 0) {
3220 			nsw_wcount_async += n;
3221 			nsw_wcount_async_max += n;
3222 			wakeup(&nsw_wcount_async);
3223 		} else {
3224 			nsw_wcount_async_max -= nsw_wcount_async;
3225 			nsw_wcount_async = 0;
3226 			msleep(&nsw_wcount_async, &swbuf_mtx, PSWP,
3227 			    "swpsysctl", 0);
3228 		}
3229 	}
3230 	mtx_unlock(&swbuf_mtx);
3231 
3232 	return (0);
3233 }
3234 
3235 static void
3236 swap_pager_update_writecount(vm_object_t object, vm_offset_t start,
3237     vm_offset_t end)
3238 {
3239 
3240 	VM_OBJECT_WLOCK(object);
3241 	KASSERT((object->flags & OBJ_ANON) == 0,
3242 	    ("Splittable object with writecount"));
3243 	object->un_pager.swp.writemappings += (vm_ooffset_t)end - start;
3244 	VM_OBJECT_WUNLOCK(object);
3245 }
3246 
3247 static void
3248 swap_pager_release_writecount(vm_object_t object, vm_offset_t start,
3249     vm_offset_t end)
3250 {
3251 
3252 	VM_OBJECT_WLOCK(object);
3253 	KASSERT((object->flags & OBJ_ANON) == 0,
3254 	    ("Splittable object with writecount"));
3255 	KASSERT(object->un_pager.swp.writemappings >= (vm_ooffset_t)end - start,
3256 	    ("swap obj %p writecount %jx dec %jx", object,
3257 	    (uintmax_t)object->un_pager.swp.writemappings,
3258 	    (uintmax_t)((vm_ooffset_t)end - start)));
3259 	object->un_pager.swp.writemappings -= (vm_ooffset_t)end - start;
3260 	VM_OBJECT_WUNLOCK(object);
3261 }
3262