xref: /freebsd/sys/vm/swap_pager.c (revision 52ec752989b2e6d4e9a59a8ff25d8ff596d85e62)
1 /*
2  * Copyright (c) 1998 Matthew Dillon,
3  * Copyright (c) 1994 John S. Dyson
4  * Copyright (c) 1990 University of Utah.
5  * Copyright (c) 1982, 1986, 1989, 1993
6  *	The Regents of the University of California.  All rights reserved.
7  *
8  * This code is derived from software contributed to Berkeley by
9  * the Systems Programming Group of the University of Utah Computer
10  * Science Department.
11  *
12  * Redistribution and use in source and binary forms, with or without
13  * modification, are permitted provided that the following conditions
14  * are met:
15  * 1. Redistributions of source code must retain the above copyright
16  *    notice, this list of conditions and the following disclaimer.
17  * 2. Redistributions in binary form must reproduce the above copyright
18  *    notice, this list of conditions and the following disclaimer in the
19  *    documentation and/or other materials provided with the distribution.
20  * 3. All advertising materials mentioning features or use of this software
21  *    must display the following acknowledgement:
22  *	This product includes software developed by the University of
23  *	California, Berkeley and its contributors.
24  * 4. Neither the name of the University nor the names of its contributors
25  *    may be used to endorse or promote products derived from this software
26  *    without specific prior written permission.
27  *
28  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
29  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
30  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
31  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
32  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
33  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
34  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
35  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
36  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
37  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
38  * SUCH DAMAGE.
39  *
40  *				New Swap System
41  *				Matthew Dillon
42  *
43  * Radix Bitmap 'blists'.
44  *
45  *	- The new swapper uses the new radix bitmap code.  This should scale
46  *	  to arbitrarily small or arbitrarily large swap spaces and an almost
47  *	  arbitrary degree of fragmentation.
48  *
49  * Features:
50  *
51  *	- on the fly reallocation of swap during putpages.  The new system
52  *	  does not try to keep previously allocated swap blocks for dirty
53  *	  pages.
54  *
55  *	- on the fly deallocation of swap
56  *
57  *	- No more garbage collection required.  Unnecessarily allocated swap
58  *	  blocks only exist for dirty vm_page_t's now and these are already
59  *	  cycled (in a high-load system) by the pager.  We also do on-the-fly
60  *	  removal of invalidated swap blocks when a page is destroyed
61  *	  or renamed.
62  *
63  * from: Utah $Hdr: swap_pager.c 1.4 91/04/30$
64  *
65  *	@(#)swap_pager.c	8.9 (Berkeley) 3/21/94
66  *	@(#)vm_swap.c	8.5 (Berkeley) 2/17/94
67  */
68 
69 #include <sys/cdefs.h>
70 __FBSDID("$FreeBSD$");
71 
72 #include "opt_mac.h"
73 #include "opt_swap.h"
74 #include "opt_vm.h"
75 
76 #include <sys/param.h>
77 #include <sys/systm.h>
78 #include <sys/conf.h>
79 #include <sys/kernel.h>
80 #include <sys/proc.h>
81 #include <sys/bio.h>
82 #include <sys/buf.h>
83 #include <sys/disk.h>
84 #include <sys/fcntl.h>
85 #include <sys/mount.h>
86 #include <sys/namei.h>
87 #include <sys/vnode.h>
88 #include <sys/mac.h>
89 #include <sys/malloc.h>
90 #include <sys/sysctl.h>
91 #include <sys/sysproto.h>
92 #include <sys/blist.h>
93 #include <sys/lock.h>
94 #include <sys/sx.h>
95 #include <sys/vmmeter.h>
96 
97 #include <vm/vm.h>
98 #include <vm/pmap.h>
99 #include <vm/vm_map.h>
100 #include <vm/vm_kern.h>
101 #include <vm/vm_object.h>
102 #include <vm/vm_page.h>
103 #include <vm/vm_pager.h>
104 #include <vm/vm_pageout.h>
105 #include <vm/vm_param.h>
106 #include <vm/swap_pager.h>
107 #include <vm/vm_extern.h>
108 #include <vm/uma.h>
109 
110 #include <geom/geom.h>
111 
112 /*
113  * SWB_NPAGES must be a power of 2.  It may be set to 1, 2, 4, 8, or 16
114  * pages per allocation.  We recommend you stick with the default of 8.
115  * The 16-page limit is due to the radix code (kern/subr_blist.c).
116  */
117 #ifndef MAX_PAGEOUT_CLUSTER
118 #define MAX_PAGEOUT_CLUSTER 16
119 #endif
120 
121 #if !defined(SWB_NPAGES)
122 #define SWB_NPAGES	MAX_PAGEOUT_CLUSTER
123 #endif
124 
125 /*
126  * Piecemeal swap metadata structure.  Swap is stored in a radix tree.
127  *
128  * If SWB_NPAGES is 8 and sizeof(char *) == sizeof(daddr_t), our radix
129  * is basically 8.  Assuming PAGE_SIZE == 4096, one tree level represents
130  * 32K worth of data, two levels represent 256K, three levels represent
131  * 2 MBytes.   This is acceptable.
132  *
133  * Overall memory utilization is about the same as the old swap structure.
134  */
135 #define SWCORRECT(n) (sizeof(void *) * (n) / sizeof(daddr_t))
136 #define SWAP_META_PAGES		(SWB_NPAGES * 2)
137 #define SWAP_META_MASK		(SWAP_META_PAGES - 1)
138 
139 typedef	int32_t	swblk_t;	/*
140 				 * swap offset.  This is the type used to
141 				 * address the "virtual swap device" and
142 				 * therefore the maximum swap space is
143 				 * 2^32 pages.
144 				 */
145 
146 struct swdevt;
147 typedef void sw_strategy_t(struct buf *bp, struct swdevt *sw);
148 typedef void sw_close_t(struct thread *td, struct swdevt *sw);
149 
150 /*
151  * Swap device table
152  */
153 struct swdevt {
154 	int	sw_flags;
155 	int	sw_nblks;
156 	int     sw_used;
157 	udev_t	sw_udev;
158 	struct vnode *sw_vp;
159 	void	*sw_id;
160 	swblk_t	sw_first;
161 	swblk_t	sw_end;
162 	struct blist *sw_blist;
163 	TAILQ_ENTRY(swdevt)	sw_list;
164 	sw_strategy_t		*sw_strategy;
165 	sw_close_t		*sw_close;
166 };
167 
168 #define	SW_CLOSING	0x04
169 
170 struct swblock {
171 	struct swblock	*swb_hnext;
172 	vm_object_t	swb_object;
173 	vm_pindex_t	swb_index;
174 	int		swb_count;
175 	daddr_t		swb_pages[SWAP_META_PAGES];
176 };
177 
178 static struct mtx sw_dev_mtx;
179 static TAILQ_HEAD(, swdevt) swtailq = TAILQ_HEAD_INITIALIZER(swtailq);
180 static struct swdevt *swdevhd;	/* Allocate from here next */
181 static int nswapdev;		/* Number of swap devices */
182 int swap_pager_avail;
183 static int swdev_syscall_active = 0; /* serialize swap(on|off) */
184 
185 static void swapdev_strategy(struct buf *, struct swdevt *sw);
186 
187 #define SWM_FREE	0x02	/* free, period			*/
188 #define SWM_POP		0x04	/* pop out			*/
189 
190 int swap_pager_full;		/* swap space exhaustion (task killing) */
191 static int swap_pager_almost_full; /* swap space exhaustion (w/ hysteresis)*/
192 static int nsw_rcount;		/* free read buffers			*/
193 static int nsw_wcount_sync;	/* limit write buffers / synchronous	*/
194 static int nsw_wcount_async;	/* limit write buffers / asynchronous	*/
195 static int nsw_wcount_async_max;/* assigned maximum			*/
196 static int nsw_cluster_max;	/* maximum VOP I/O allowed		*/
197 
198 static struct swblock **swhash;
199 static int swhash_mask;
200 static struct mtx swhash_mtx;
201 
202 static int swap_async_max = 4;	/* maximum in-progress async I/O's	*/
203 static struct sx sw_alloc_sx;
204 
205 
206 SYSCTL_INT(_vm, OID_AUTO, swap_async_max,
207         CTLFLAG_RW, &swap_async_max, 0, "Maximum running async swap ops");
208 
209 /*
210  * "named" and "unnamed" anon region objects.  Try to reduce the overhead
211  * of searching a named list by hashing it just a little.
212  */
213 
214 #define NOBJLISTS		8
215 
216 #define NOBJLIST(handle)	\
217 	(&swap_pager_object_list[((int)(intptr_t)handle >> 4) & (NOBJLISTS-1)])
218 
219 static struct mtx sw_alloc_mtx;	/* protect list manipulation */
220 static struct pagerlst	swap_pager_object_list[NOBJLISTS];
221 static uma_zone_t	swap_zone;
222 
223 /*
224  * pagerops for OBJT_SWAP - "swap pager".  Some ops are also global procedure
225  * calls hooked from other parts of the VM system and do not appear here.
226  * (see vm/swap_pager.h).
227  */
228 static vm_object_t
229 		swap_pager_alloc(void *handle, vm_ooffset_t size,
230 				      vm_prot_t prot, vm_ooffset_t offset);
231 static void	swap_pager_dealloc(vm_object_t object);
232 static int	swap_pager_getpages(vm_object_t, vm_page_t *, int, int);
233 static void	swap_pager_putpages(vm_object_t, vm_page_t *, int, boolean_t, int *);
234 static boolean_t
235 		swap_pager_haspage(vm_object_t object, vm_pindex_t pindex, int *before, int *after);
236 static void	swap_pager_init(void);
237 static void	swap_pager_unswapped(vm_page_t);
238 static void	swap_pager_swapoff(struct swdevt *sp, int *sw_used);
239 
240 struct pagerops swappagerops = {
241 	.pgo_init =	swap_pager_init,	/* early system initialization of pager	*/
242 	.pgo_alloc =	swap_pager_alloc,	/* allocate an OBJT_SWAP object		*/
243 	.pgo_dealloc =	swap_pager_dealloc,	/* deallocate an OBJT_SWAP object	*/
244 	.pgo_getpages =	swap_pager_getpages,	/* pagein				*/
245 	.pgo_putpages =	swap_pager_putpages,	/* pageout				*/
246 	.pgo_haspage =	swap_pager_haspage,	/* get backing store status for page	*/
247 	.pgo_pageunswapped = swap_pager_unswapped,	/* remove swap related to page		*/
248 };
249 
250 /*
251  * dmmax is in page-sized chunks with the new swap system.  It was
252  * dev-bsized chunks in the old.  dmmax is always a power of 2.
253  *
254  * swap_*() routines are externally accessible.  swp_*() routines are
255  * internal.
256  */
257 static int dmmax;
258 static int nswap_lowat = 128;	/* in pages, swap_pager_almost_full warn */
259 static int nswap_hiwat = 512;	/* in pages, swap_pager_almost_full warn */
260 
261 SYSCTL_INT(_vm, OID_AUTO, dmmax,
262 	CTLFLAG_RD, &dmmax, 0, "Maximum size of a swap block");
263 
264 static void	swp_sizecheck(void);
265 static void	swp_pager_sync_iodone(struct buf *bp);
266 static void	swp_pager_async_iodone(struct buf *bp);
267 static int	swapongeom(struct thread *, struct vnode *);
268 static int	swaponvp(struct thread *, struct vnode *, u_long);
269 
270 /*
271  * Swap bitmap functions
272  */
273 static void	swp_pager_freeswapspace(daddr_t blk, int npages);
274 static daddr_t	swp_pager_getswapspace(int npages);
275 
276 /*
277  * Metadata functions
278  */
279 static struct swblock **swp_pager_hash(vm_object_t object, vm_pindex_t index);
280 static void swp_pager_meta_build(vm_object_t, vm_pindex_t, daddr_t);
281 static void swp_pager_meta_free(vm_object_t, vm_pindex_t, daddr_t);
282 static void swp_pager_meta_free_all(vm_object_t);
283 static daddr_t swp_pager_meta_ctl(vm_object_t, vm_pindex_t, int);
284 
285 /*
286  * SWP_SIZECHECK() -	update swap_pager_full indication
287  *
288  *	update the swap_pager_almost_full indication and warn when we are
289  *	about to run out of swap space, using lowat/hiwat hysteresis.
290  *
291  *	Clear swap_pager_full ( task killing ) indication when lowat is met.
292  *
293  *	No restrictions on call
294  *	This routine may not block.
295  *	This routine must be called at splvm()
296  */
297 static void
298 swp_sizecheck(void)
299 {
300 
301 	if (swap_pager_avail < nswap_lowat) {
302 		if (swap_pager_almost_full == 0) {
303 			printf("swap_pager: out of swap space\n");
304 			swap_pager_almost_full = 1;
305 		}
306 	} else {
307 		swap_pager_full = 0;
308 		if (swap_pager_avail > nswap_hiwat)
309 			swap_pager_almost_full = 0;
310 	}
311 }
312 
313 /*
314  * SWP_PAGER_HASH() -	hash swap meta data
315  *
316  *	This is an helper function which hashes the swapblk given
317  *	the object and page index.  It returns a pointer to a pointer
318  *	to the object, or a pointer to a NULL pointer if it could not
319  *	find a swapblk.
320  *
321  *	This routine must be called at splvm().
322  */
323 static struct swblock **
324 swp_pager_hash(vm_object_t object, vm_pindex_t index)
325 {
326 	struct swblock **pswap;
327 	struct swblock *swap;
328 
329 	index &= ~(vm_pindex_t)SWAP_META_MASK;
330 	pswap = &swhash[(index ^ (int)(intptr_t)object) & swhash_mask];
331 	while ((swap = *pswap) != NULL) {
332 		if (swap->swb_object == object &&
333 		    swap->swb_index == index
334 		) {
335 			break;
336 		}
337 		pswap = &swap->swb_hnext;
338 	}
339 	return (pswap);
340 }
341 
342 /*
343  * SWAP_PAGER_INIT() -	initialize the swap pager!
344  *
345  *	Expected to be started from system init.  NOTE:  This code is run
346  *	before much else so be careful what you depend on.  Most of the VM
347  *	system has yet to be initialized at this point.
348  */
349 static void
350 swap_pager_init(void)
351 {
352 	/*
353 	 * Initialize object lists
354 	 */
355 	int i;
356 
357 	for (i = 0; i < NOBJLISTS; ++i)
358 		TAILQ_INIT(&swap_pager_object_list[i]);
359 	mtx_init(&sw_alloc_mtx, "swap_pager list", NULL, MTX_DEF);
360 	mtx_init(&sw_dev_mtx, "swapdev", NULL, MTX_DEF);
361 
362 	/*
363 	 * Device Stripe, in PAGE_SIZE'd blocks
364 	 */
365 	dmmax = SWB_NPAGES * 2;
366 }
367 
368 /*
369  * SWAP_PAGER_SWAP_INIT() - swap pager initialization from pageout process
370  *
371  *	Expected to be started from pageout process once, prior to entering
372  *	its main loop.
373  */
374 void
375 swap_pager_swap_init(void)
376 {
377 	int n, n2;
378 
379 	/*
380 	 * Number of in-transit swap bp operations.  Don't
381 	 * exhaust the pbufs completely.  Make sure we
382 	 * initialize workable values (0 will work for hysteresis
383 	 * but it isn't very efficient).
384 	 *
385 	 * The nsw_cluster_max is constrained by the bp->b_pages[]
386 	 * array (MAXPHYS/PAGE_SIZE) and our locally defined
387 	 * MAX_PAGEOUT_CLUSTER.   Also be aware that swap ops are
388 	 * constrained by the swap device interleave stripe size.
389 	 *
390 	 * Currently we hardwire nsw_wcount_async to 4.  This limit is
391 	 * designed to prevent other I/O from having high latencies due to
392 	 * our pageout I/O.  The value 4 works well for one or two active swap
393 	 * devices but is probably a little low if you have more.  Even so,
394 	 * a higher value would probably generate only a limited improvement
395 	 * with three or four active swap devices since the system does not
396 	 * typically have to pageout at extreme bandwidths.   We will want
397 	 * at least 2 per swap devices, and 4 is a pretty good value if you
398 	 * have one NFS swap device due to the command/ack latency over NFS.
399 	 * So it all works out pretty well.
400 	 */
401 	nsw_cluster_max = min((MAXPHYS/PAGE_SIZE), MAX_PAGEOUT_CLUSTER);
402 
403 	mtx_lock(&pbuf_mtx);
404 	nsw_rcount = (nswbuf + 1) / 2;
405 	nsw_wcount_sync = (nswbuf + 3) / 4;
406 	nsw_wcount_async = 4;
407 	nsw_wcount_async_max = nsw_wcount_async;
408 	mtx_unlock(&pbuf_mtx);
409 
410 	/*
411 	 * Initialize our zone.  Right now I'm just guessing on the number
412 	 * we need based on the number of pages in the system.  Each swblock
413 	 * can hold 16 pages, so this is probably overkill.  This reservation
414 	 * is typically limited to around 32MB by default.
415 	 */
416 	n = cnt.v_page_count / 2;
417 	if (maxswzone && n > maxswzone / sizeof(struct swblock))
418 		n = maxswzone / sizeof(struct swblock);
419 	n2 = n;
420 	swap_zone = uma_zcreate("SWAPMETA", sizeof(struct swblock), NULL, NULL,
421 	    NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE | UMA_ZONE_VM);
422 	do {
423 		if (uma_zone_set_obj(swap_zone, NULL, n))
424 			break;
425 		/*
426 		 * if the allocation failed, try a zone two thirds the
427 		 * size of the previous attempt.
428 		 */
429 		n -= ((n + 2) / 3);
430 	} while (n > 0);
431 	if (swap_zone == NULL)
432 		panic("failed to create swap_zone.");
433 	if (n2 != n)
434 		printf("Swap zone entries reduced from %d to %d.\n", n2, n);
435 	n2 = n;
436 
437 	/*
438 	 * Initialize our meta-data hash table.  The swapper does not need to
439 	 * be quite as efficient as the VM system, so we do not use an
440 	 * oversized hash table.
441 	 *
442 	 * 	n: 		size of hash table, must be power of 2
443 	 *	swhash_mask:	hash table index mask
444 	 */
445 	for (n = 1; n < n2 / 8; n *= 2)
446 		;
447 	swhash = malloc(sizeof(struct swblock *) * n, M_VMPGDATA, M_WAITOK | M_ZERO);
448 	swhash_mask = n - 1;
449 	mtx_init(&swhash_mtx, "swap_pager swhash", NULL, MTX_DEF);
450 }
451 
452 /*
453  * SWAP_PAGER_ALLOC() -	allocate a new OBJT_SWAP VM object and instantiate
454  *			its metadata structures.
455  *
456  *	This routine is called from the mmap and fork code to create a new
457  *	OBJT_SWAP object.  We do this by creating an OBJT_DEFAULT object
458  *	and then converting it with swp_pager_meta_build().
459  *
460  *	This routine may block in vm_object_allocate() and create a named
461  *	object lookup race, so we must interlock.   We must also run at
462  *	splvm() for the object lookup to handle races with interrupts, but
463  *	we do not have to maintain splvm() in between the lookup and the
464  *	add because (I believe) it is not possible to attempt to create
465  *	a new swap object w/handle when a default object with that handle
466  *	already exists.
467  *
468  * MPSAFE
469  */
470 static vm_object_t
471 swap_pager_alloc(void *handle, vm_ooffset_t size, vm_prot_t prot,
472 		 vm_ooffset_t offset)
473 {
474 	vm_object_t object;
475 	vm_pindex_t pindex;
476 
477 	pindex = OFF_TO_IDX(offset + PAGE_MASK + size);
478 
479 	if (handle) {
480 		mtx_lock(&Giant);
481 		/*
482 		 * Reference existing named region or allocate new one.  There
483 		 * should not be a race here against swp_pager_meta_build()
484 		 * as called from vm_page_remove() in regards to the lookup
485 		 * of the handle.
486 		 */
487 		sx_xlock(&sw_alloc_sx);
488 		object = vm_pager_object_lookup(NOBJLIST(handle), handle);
489 
490 		if (object != NULL) {
491 			vm_object_reference(object);
492 		} else {
493 			object = vm_object_allocate(OBJT_DEFAULT, pindex);
494 			object->handle = handle;
495 
496 			VM_OBJECT_LOCK(object);
497 			swp_pager_meta_build(object, 0, SWAPBLK_NONE);
498 			VM_OBJECT_UNLOCK(object);
499 		}
500 		sx_xunlock(&sw_alloc_sx);
501 		mtx_unlock(&Giant);
502 	} else {
503 		object = vm_object_allocate(OBJT_DEFAULT, pindex);
504 
505 		VM_OBJECT_LOCK(object);
506 		swp_pager_meta_build(object, 0, SWAPBLK_NONE);
507 		VM_OBJECT_UNLOCK(object);
508 	}
509 	return (object);
510 }
511 
512 /*
513  * SWAP_PAGER_DEALLOC() -	remove swap metadata from object
514  *
515  *	The swap backing for the object is destroyed.  The code is
516  *	designed such that we can reinstantiate it later, but this
517  *	routine is typically called only when the entire object is
518  *	about to be destroyed.
519  *
520  *	This routine may block, but no longer does.
521  *
522  *	The object must be locked or unreferenceable.
523  */
524 static void
525 swap_pager_dealloc(vm_object_t object)
526 {
527 	int s;
528 
529 	/*
530 	 * Remove from list right away so lookups will fail if we block for
531 	 * pageout completion.
532 	 */
533 	if (object->handle != NULL) {
534 		mtx_lock(&sw_alloc_mtx);
535 		TAILQ_REMOVE(NOBJLIST(object->handle), object, pager_object_list);
536 		mtx_unlock(&sw_alloc_mtx);
537 	}
538 
539 	VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
540 	vm_object_pip_wait(object, "swpdea");
541 
542 	/*
543 	 * Free all remaining metadata.  We only bother to free it from
544 	 * the swap meta data.  We do not attempt to free swapblk's still
545 	 * associated with vm_page_t's for this object.  We do not care
546 	 * if paging is still in progress on some objects.
547 	 */
548 	s = splvm();
549 	swp_pager_meta_free_all(object);
550 	splx(s);
551 }
552 
553 /************************************************************************
554  *			SWAP PAGER BITMAP ROUTINES			*
555  ************************************************************************/
556 
557 /*
558  * SWP_PAGER_GETSWAPSPACE() -	allocate raw swap space
559  *
560  *	Allocate swap for the requested number of pages.  The starting
561  *	swap block number (a page index) is returned or SWAPBLK_NONE
562  *	if the allocation failed.
563  *
564  *	Also has the side effect of advising that somebody made a mistake
565  *	when they configured swap and didn't configure enough.
566  *
567  *	Must be called at splvm() to avoid races with bitmap frees from
568  *	vm_page_remove() aka swap_pager_page_removed().
569  *
570  *	This routine may not block
571  *	This routine must be called at splvm().
572  *
573  *	We allocate in round-robin fashion from the configured devices.
574  */
575 static daddr_t
576 swp_pager_getswapspace(int npages)
577 {
578 	daddr_t blk;
579 	struct swdevt *sp;
580 	int i;
581 
582 	blk = SWAPBLK_NONE;
583 	mtx_lock(&sw_dev_mtx);
584 	sp = swdevhd;
585 	for (i = 0; i < nswapdev; i++) {
586 		if (sp == NULL)
587 			sp = TAILQ_FIRST(&swtailq);
588 		if (!(sp->sw_flags & SW_CLOSING)) {
589 			blk = blist_alloc(sp->sw_blist, npages);
590 			if (blk != SWAPBLK_NONE) {
591 				blk += sp->sw_first;
592 				sp->sw_used += npages;
593 				swap_pager_avail -= npages;
594 				swp_sizecheck();
595 				swdevhd = TAILQ_NEXT(sp, sw_list);
596 				goto done;
597 			}
598 		}
599 		sp = TAILQ_NEXT(sp, sw_list);
600 	}
601 	if (swap_pager_full != 2) {
602 		printf("swap_pager_getswapspace(%d): failed\n", npages);
603 		swap_pager_full = 2;
604 		swap_pager_almost_full = 1;
605 	}
606 	swdevhd = NULL;
607 done:
608 	mtx_unlock(&sw_dev_mtx);
609 	return (blk);
610 }
611 
612 static struct swdevt *
613 swp_pager_find_dev(daddr_t blk)
614 {
615 	struct swdevt *sp;
616 
617 	mtx_lock(&sw_dev_mtx);
618 	TAILQ_FOREACH(sp, &swtailq, sw_list) {
619 		if (blk >= sp->sw_first && blk < sp->sw_end) {
620 			mtx_unlock(&sw_dev_mtx);
621 			return (sp);
622 		}
623 	}
624 	panic("Swapdev not found");
625 }
626 
627 static void
628 swp_pager_strategy(struct buf *bp)
629 {
630 	struct swdevt *sp;
631 
632 	mtx_lock(&sw_dev_mtx);
633 	TAILQ_FOREACH(sp, &swtailq, sw_list) {
634 		if (bp->b_blkno >= sp->sw_first && bp->b_blkno < sp->sw_end) {
635 			mtx_unlock(&sw_dev_mtx);
636 			sp->sw_strategy(bp, sp);
637 			return;
638 		}
639 	}
640 	panic("Swapdev not found");
641 }
642 
643 
644 /*
645  * SWP_PAGER_FREESWAPSPACE() -	free raw swap space
646  *
647  *	This routine returns the specified swap blocks back to the bitmap.
648  *
649  *	Note:  This routine may not block (it could in the old swap code),
650  *	and through the use of the new blist routines it does not block.
651  *
652  *	We must be called at splvm() to avoid races with bitmap frees from
653  *	vm_page_remove() aka swap_pager_page_removed().
654  *
655  *	This routine may not block
656  *	This routine must be called at splvm().
657  */
658 static void
659 swp_pager_freeswapspace(daddr_t blk, int npages)
660 {
661 	struct swdevt *sp;
662 
663 	mtx_lock(&sw_dev_mtx);
664 	TAILQ_FOREACH(sp, &swtailq, sw_list) {
665 		if (blk >= sp->sw_first && blk < sp->sw_end) {
666 			sp->sw_used -= npages;
667 			/*
668 			 * If we are attempting to stop swapping on
669 			 * this device, we don't want to mark any
670 			 * blocks free lest they be reused.
671 			 */
672 			if ((sp->sw_flags & SW_CLOSING) == 0) {
673 				blist_free(sp->sw_blist, blk - sp->sw_first,
674 				    npages);
675 				swap_pager_avail += npages;
676 				swp_sizecheck();
677 			}
678 			mtx_unlock(&sw_dev_mtx);
679 			return;
680 		}
681 	}
682 	panic("Swapdev not found");
683 }
684 
685 /*
686  * SWAP_PAGER_FREESPACE() -	frees swap blocks associated with a page
687  *				range within an object.
688  *
689  *	This is a globally accessible routine.
690  *
691  *	This routine removes swapblk assignments from swap metadata.
692  *
693  *	The external callers of this routine typically have already destroyed
694  *	or renamed vm_page_t's associated with this range in the object so
695  *	we should be ok.
696  *
697  *	This routine may be called at any spl.  We up our spl to splvm temporarily
698  *	in order to perform the metadata removal.
699  */
700 void
701 swap_pager_freespace(vm_object_t object, vm_pindex_t start, vm_size_t size)
702 {
703 	int s = splvm();
704 
705 	VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
706 	swp_pager_meta_free(object, start, size);
707 	splx(s);
708 }
709 
710 /*
711  * SWAP_PAGER_RESERVE() - reserve swap blocks in object
712  *
713  *	Assigns swap blocks to the specified range within the object.  The
714  *	swap blocks are not zerod.  Any previous swap assignment is destroyed.
715  *
716  *	Returns 0 on success, -1 on failure.
717  */
718 int
719 swap_pager_reserve(vm_object_t object, vm_pindex_t start, vm_size_t size)
720 {
721 	int s;
722 	int n = 0;
723 	daddr_t blk = SWAPBLK_NONE;
724 	vm_pindex_t beg = start;	/* save start index */
725 
726 	s = splvm();
727 	VM_OBJECT_LOCK(object);
728 	while (size) {
729 		if (n == 0) {
730 			n = BLIST_MAX_ALLOC;
731 			while ((blk = swp_pager_getswapspace(n)) == SWAPBLK_NONE) {
732 				n >>= 1;
733 				if (n == 0) {
734 					swp_pager_meta_free(object, beg, start - beg);
735 					VM_OBJECT_UNLOCK(object);
736 					splx(s);
737 					return (-1);
738 				}
739 			}
740 		}
741 		swp_pager_meta_build(object, start, blk);
742 		--size;
743 		++start;
744 		++blk;
745 		--n;
746 	}
747 	swp_pager_meta_free(object, start, n);
748 	VM_OBJECT_UNLOCK(object);
749 	splx(s);
750 	return (0);
751 }
752 
753 /*
754  * SWAP_PAGER_COPY() -  copy blocks from source pager to destination pager
755  *			and destroy the source.
756  *
757  *	Copy any valid swapblks from the source to the destination.  In
758  *	cases where both the source and destination have a valid swapblk,
759  *	we keep the destination's.
760  *
761  *	This routine is allowed to block.  It may block allocating metadata
762  *	indirectly through swp_pager_meta_build() or if paging is still in
763  *	progress on the source.
764  *
765  *	This routine can be called at any spl
766  *
767  *	XXX vm_page_collapse() kinda expects us not to block because we
768  *	supposedly do not need to allocate memory, but for the moment we
769  *	*may* have to get a little memory from the zone allocator, but
770  *	it is taken from the interrupt memory.  We should be ok.
771  *
772  *	The source object contains no vm_page_t's (which is just as well)
773  *
774  *	The source object is of type OBJT_SWAP.
775  *
776  *	The source and destination objects must be locked or
777  *	inaccessible (XXX are they ?)
778  */
779 void
780 swap_pager_copy(vm_object_t srcobject, vm_object_t dstobject,
781     vm_pindex_t offset, int destroysource)
782 {
783 	vm_pindex_t i;
784 	int s;
785 
786 	VM_OBJECT_LOCK_ASSERT(srcobject, MA_OWNED);
787 	VM_OBJECT_LOCK_ASSERT(dstobject, MA_OWNED);
788 
789 	s = splvm();
790 	/*
791 	 * If destroysource is set, we remove the source object from the
792 	 * swap_pager internal queue now.
793 	 */
794 	if (destroysource) {
795 		if (srcobject->handle != NULL) {
796 			mtx_lock(&sw_alloc_mtx);
797 			TAILQ_REMOVE(
798 			    NOBJLIST(srcobject->handle),
799 			    srcobject,
800 			    pager_object_list
801 			);
802 			mtx_unlock(&sw_alloc_mtx);
803 		}
804 	}
805 
806 	/*
807 	 * transfer source to destination.
808 	 */
809 	for (i = 0; i < dstobject->size; ++i) {
810 		daddr_t dstaddr;
811 
812 		/*
813 		 * Locate (without changing) the swapblk on the destination,
814 		 * unless it is invalid in which case free it silently, or
815 		 * if the destination is a resident page, in which case the
816 		 * source is thrown away.
817 		 */
818 		dstaddr = swp_pager_meta_ctl(dstobject, i, 0);
819 
820 		if (dstaddr == SWAPBLK_NONE) {
821 			/*
822 			 * Destination has no swapblk and is not resident,
823 			 * copy source.
824 			 */
825 			daddr_t srcaddr;
826 
827 			srcaddr = swp_pager_meta_ctl(
828 			    srcobject,
829 			    i + offset,
830 			    SWM_POP
831 			);
832 
833 			if (srcaddr != SWAPBLK_NONE) {
834 				/*
835 				 * swp_pager_meta_build() can sleep.
836 				 */
837 				vm_object_pip_add(srcobject, 1);
838 				VM_OBJECT_UNLOCK(srcobject);
839 				vm_object_pip_add(dstobject, 1);
840 				swp_pager_meta_build(dstobject, i, srcaddr);
841 				vm_object_pip_wakeup(dstobject);
842 				VM_OBJECT_LOCK(srcobject);
843 				vm_object_pip_wakeup(srcobject);
844 			}
845 		} else {
846 			/*
847 			 * Destination has valid swapblk or it is represented
848 			 * by a resident page.  We destroy the sourceblock.
849 			 */
850 
851 			swp_pager_meta_ctl(srcobject, i + offset, SWM_FREE);
852 		}
853 	}
854 
855 	/*
856 	 * Free left over swap blocks in source.
857 	 *
858 	 * We have to revert the type to OBJT_DEFAULT so we do not accidently
859 	 * double-remove the object from the swap queues.
860 	 */
861 	if (destroysource) {
862 		swp_pager_meta_free_all(srcobject);
863 		/*
864 		 * Reverting the type is not necessary, the caller is going
865 		 * to destroy srcobject directly, but I'm doing it here
866 		 * for consistency since we've removed the object from its
867 		 * queues.
868 		 */
869 		srcobject->type = OBJT_DEFAULT;
870 	}
871 	splx(s);
872 }
873 
874 /*
875  * SWAP_PAGER_HASPAGE() -	determine if we have good backing store for
876  *				the requested page.
877  *
878  *	We determine whether good backing store exists for the requested
879  *	page and return TRUE if it does, FALSE if it doesn't.
880  *
881  *	If TRUE, we also try to determine how much valid, contiguous backing
882  *	store exists before and after the requested page within a reasonable
883  *	distance.  We do not try to restrict it to the swap device stripe
884  *	(that is handled in getpages/putpages).  It probably isn't worth
885  *	doing here.
886  */
887 static boolean_t
888 swap_pager_haspage(vm_object_t object, vm_pindex_t pindex, int *before, int *after)
889 {
890 	daddr_t blk0;
891 	int s;
892 
893 	VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
894 	/*
895 	 * do we have good backing store at the requested index ?
896 	 */
897 	s = splvm();
898 	blk0 = swp_pager_meta_ctl(object, pindex, 0);
899 
900 	if (blk0 == SWAPBLK_NONE) {
901 		splx(s);
902 		if (before)
903 			*before = 0;
904 		if (after)
905 			*after = 0;
906 		return (FALSE);
907 	}
908 
909 	/*
910 	 * find backwards-looking contiguous good backing store
911 	 */
912 	if (before != NULL) {
913 		int i;
914 
915 		for (i = 1; i < (SWB_NPAGES/2); ++i) {
916 			daddr_t blk;
917 
918 			if (i > pindex)
919 				break;
920 			blk = swp_pager_meta_ctl(object, pindex - i, 0);
921 			if (blk != blk0 - i)
922 				break;
923 		}
924 		*before = (i - 1);
925 	}
926 
927 	/*
928 	 * find forward-looking contiguous good backing store
929 	 */
930 	if (after != NULL) {
931 		int i;
932 
933 		for (i = 1; i < (SWB_NPAGES/2); ++i) {
934 			daddr_t blk;
935 
936 			blk = swp_pager_meta_ctl(object, pindex + i, 0);
937 			if (blk != blk0 + i)
938 				break;
939 		}
940 		*after = (i - 1);
941 	}
942 	splx(s);
943 	return (TRUE);
944 }
945 
946 /*
947  * SWAP_PAGER_PAGE_UNSWAPPED() - remove swap backing store related to page
948  *
949  *	This removes any associated swap backing store, whether valid or
950  *	not, from the page.
951  *
952  *	This routine is typically called when a page is made dirty, at
953  *	which point any associated swap can be freed.  MADV_FREE also
954  *	calls us in a special-case situation
955  *
956  *	NOTE!!!  If the page is clean and the swap was valid, the caller
957  *	should make the page dirty before calling this routine.  This routine
958  *	does NOT change the m->dirty status of the page.  Also: MADV_FREE
959  *	depends on it.
960  *
961  *	This routine may not block
962  *	This routine must be called at splvm()
963  */
964 static void
965 swap_pager_unswapped(vm_page_t m)
966 {
967 
968 	VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
969 	swp_pager_meta_ctl(m->object, m->pindex, SWM_FREE);
970 }
971 
972 /*
973  * SWAP_PAGER_GETPAGES() - bring pages in from swap
974  *
975  *	Attempt to retrieve (m, count) pages from backing store, but make
976  *	sure we retrieve at least m[reqpage].  We try to load in as large
977  *	a chunk surrounding m[reqpage] as is contiguous in swap and which
978  *	belongs to the same object.
979  *
980  *	The code is designed for asynchronous operation and
981  *	immediate-notification of 'reqpage' but tends not to be
982  *	used that way.  Please do not optimize-out this algorithmic
983  *	feature, I intend to improve on it in the future.
984  *
985  *	The parent has a single vm_object_pip_add() reference prior to
986  *	calling us and we should return with the same.
987  *
988  *	The parent has BUSY'd the pages.  We should return with 'm'
989  *	left busy, but the others adjusted.
990  */
991 static int
992 swap_pager_getpages(vm_object_t object, vm_page_t *m, int count, int reqpage)
993 {
994 	struct buf *bp;
995 	vm_page_t mreq;
996 	int s;
997 	int i;
998 	int j;
999 	daddr_t blk;
1000 
1001 	mreq = m[reqpage];
1002 
1003 	KASSERT(mreq->object == object,
1004 	    ("swap_pager_getpages: object mismatch %p/%p",
1005 	    object, mreq->object));
1006 
1007 	/*
1008 	 * Calculate range to retrieve.  The pages have already been assigned
1009 	 * their swapblks.  We require a *contiguous* range but we know it to
1010 	 * not span devices.   If we do not supply it, bad things
1011 	 * happen.  Note that blk, iblk & jblk can be SWAPBLK_NONE, but the
1012 	 * loops are set up such that the case(s) are handled implicitly.
1013 	 *
1014 	 * The swp_*() calls must be made at splvm().  vm_page_free() does
1015 	 * not need to be, but it will go a little faster if it is.
1016 	 */
1017 	s = splvm();
1018 	blk = swp_pager_meta_ctl(mreq->object, mreq->pindex, 0);
1019 
1020 	for (i = reqpage - 1; i >= 0; --i) {
1021 		daddr_t iblk;
1022 
1023 		iblk = swp_pager_meta_ctl(m[i]->object, m[i]->pindex, 0);
1024 		if (blk != iblk + (reqpage - i))
1025 			break;
1026 	}
1027 	++i;
1028 
1029 	for (j = reqpage + 1; j < count; ++j) {
1030 		daddr_t jblk;
1031 
1032 		jblk = swp_pager_meta_ctl(m[j]->object, m[j]->pindex, 0);
1033 		if (blk != jblk - (j - reqpage))
1034 			break;
1035 	}
1036 
1037 	/*
1038 	 * free pages outside our collection range.   Note: we never free
1039 	 * mreq, it must remain busy throughout.
1040 	 */
1041 	vm_page_lock_queues();
1042 	{
1043 		int k;
1044 
1045 		for (k = 0; k < i; ++k)
1046 			vm_page_free(m[k]);
1047 		for (k = j; k < count; ++k)
1048 			vm_page_free(m[k]);
1049 	}
1050 	vm_page_unlock_queues();
1051 	splx(s);
1052 
1053 
1054 	/*
1055 	 * Return VM_PAGER_FAIL if we have nothing to do.  Return mreq
1056 	 * still busy, but the others unbusied.
1057 	 */
1058 	if (blk == SWAPBLK_NONE)
1059 		return (VM_PAGER_FAIL);
1060 
1061 	/*
1062 	 * Getpbuf() can sleep.
1063 	 */
1064 	VM_OBJECT_UNLOCK(object);
1065 	/*
1066 	 * Get a swap buffer header to perform the IO
1067 	 */
1068 	bp = getpbuf(&nsw_rcount);
1069 	bp->b_flags |= B_PAGING;
1070 
1071 	/*
1072 	 * map our page(s) into kva for input
1073 	 */
1074 	pmap_qenter((vm_offset_t)bp->b_data, m + i, j - i);
1075 
1076 	bp->b_iocmd = BIO_READ;
1077 	bp->b_iodone = swp_pager_async_iodone;
1078 	bp->b_rcred = crhold(thread0.td_ucred);
1079 	bp->b_wcred = crhold(thread0.td_ucred);
1080 	bp->b_blkno = blk - (reqpage - i);
1081 	bp->b_bcount = PAGE_SIZE * (j - i);
1082 	bp->b_bufsize = PAGE_SIZE * (j - i);
1083 	bp->b_pager.pg_reqpage = reqpage - i;
1084 
1085 	VM_OBJECT_LOCK(object);
1086 	vm_page_lock_queues();
1087 	{
1088 		int k;
1089 
1090 		for (k = i; k < j; ++k) {
1091 			bp->b_pages[k - i] = m[k];
1092 			vm_page_flag_set(m[k], PG_SWAPINPROG);
1093 		}
1094 	}
1095 	vm_page_unlock_queues();
1096 	VM_OBJECT_UNLOCK(object);
1097 	bp->b_npages = j - i;
1098 
1099 	cnt.v_swapin++;
1100 	cnt.v_swappgsin += bp->b_npages;
1101 
1102 	/*
1103 	 * We still hold the lock on mreq, and our automatic completion routine
1104 	 * does not remove it.
1105 	 */
1106 	VM_OBJECT_LOCK(mreq->object);
1107 	vm_object_pip_add(mreq->object, bp->b_npages);
1108 	VM_OBJECT_UNLOCK(mreq->object);
1109 
1110 	/*
1111 	 * perform the I/O.  NOTE!!!  bp cannot be considered valid after
1112 	 * this point because we automatically release it on completion.
1113 	 * Instead, we look at the one page we are interested in which we
1114 	 * still hold a lock on even through the I/O completion.
1115 	 *
1116 	 * The other pages in our m[] array are also released on completion,
1117 	 * so we cannot assume they are valid anymore either.
1118 	 *
1119 	 * NOTE: b_blkno is destroyed by the call to swapdev_strategy
1120 	 */
1121 	BUF_KERNPROC(bp);
1122 	swp_pager_strategy(bp);
1123 
1124 	/*
1125 	 * wait for the page we want to complete.  PG_SWAPINPROG is always
1126 	 * cleared on completion.  If an I/O error occurs, SWAPBLK_NONE
1127 	 * is set in the meta-data.
1128 	 */
1129 	s = splvm();
1130 	vm_page_lock_queues();
1131 	while ((mreq->flags & PG_SWAPINPROG) != 0) {
1132 		vm_page_flag_set(mreq, PG_WANTED | PG_REFERENCED);
1133 		cnt.v_intrans++;
1134 		if (msleep(mreq, &vm_page_queue_mtx, PSWP, "swread", hz*20)) {
1135 			printf(
1136 			    "swap_pager: indefinite wait buffer: device:"
1137 				" %s, blkno: %ld, size: %ld\n",
1138 			    devtoname(bp->b_dev), (long)bp->b_blkno,
1139 			    bp->b_bcount
1140 			);
1141 		}
1142 	}
1143 	vm_page_unlock_queues();
1144 	splx(s);
1145 
1146 	VM_OBJECT_LOCK(mreq->object);
1147 	/*
1148 	 * mreq is left busied after completion, but all the other pages
1149 	 * are freed.  If we had an unrecoverable read error the page will
1150 	 * not be valid.
1151 	 */
1152 	if (mreq->valid != VM_PAGE_BITS_ALL) {
1153 		return (VM_PAGER_ERROR);
1154 	} else {
1155 		return (VM_PAGER_OK);
1156 	}
1157 
1158 	/*
1159 	 * A final note: in a low swap situation, we cannot deallocate swap
1160 	 * and mark a page dirty here because the caller is likely to mark
1161 	 * the page clean when we return, causing the page to possibly revert
1162 	 * to all-zero's later.
1163 	 */
1164 }
1165 
1166 /*
1167  *	swap_pager_putpages:
1168  *
1169  *	Assign swap (if necessary) and initiate I/O on the specified pages.
1170  *
1171  *	We support both OBJT_DEFAULT and OBJT_SWAP objects.  DEFAULT objects
1172  *	are automatically converted to SWAP objects.
1173  *
1174  *	In a low memory situation we may block in VOP_STRATEGY(), but the new
1175  *	vm_page reservation system coupled with properly written VFS devices
1176  *	should ensure that no low-memory deadlock occurs.  This is an area
1177  *	which needs work.
1178  *
1179  *	The parent has N vm_object_pip_add() references prior to
1180  *	calling us and will remove references for rtvals[] that are
1181  *	not set to VM_PAGER_PEND.  We need to remove the rest on I/O
1182  *	completion.
1183  *
1184  *	The parent has soft-busy'd the pages it passes us and will unbusy
1185  *	those whos rtvals[] entry is not set to VM_PAGER_PEND on return.
1186  *	We need to unbusy the rest on I/O completion.
1187  */
1188 void
1189 swap_pager_putpages(vm_object_t object, vm_page_t *m, int count,
1190     boolean_t sync, int *rtvals)
1191 {
1192 	int i;
1193 	int n = 0;
1194 
1195 	GIANT_REQUIRED;
1196 	if (count && m[0]->object != object) {
1197 		panic("swap_pager_getpages: object mismatch %p/%p",
1198 		    object,
1199 		    m[0]->object
1200 		);
1201 	}
1202 
1203 	/*
1204 	 * Step 1
1205 	 *
1206 	 * Turn object into OBJT_SWAP
1207 	 * check for bogus sysops
1208 	 * force sync if not pageout process
1209 	 */
1210 	if (object->type != OBJT_SWAP)
1211 		swp_pager_meta_build(object, 0, SWAPBLK_NONE);
1212 	VM_OBJECT_UNLOCK(object);
1213 
1214 	if (curproc != pageproc)
1215 		sync = TRUE;
1216 
1217 	/*
1218 	 * Step 2
1219 	 *
1220 	 * Update nsw parameters from swap_async_max sysctl values.
1221 	 * Do not let the sysop crash the machine with bogus numbers.
1222 	 */
1223 	mtx_lock(&pbuf_mtx);
1224 	if (swap_async_max != nsw_wcount_async_max) {
1225 		int n;
1226 		int s;
1227 
1228 		/*
1229 		 * limit range
1230 		 */
1231 		if ((n = swap_async_max) > nswbuf / 2)
1232 			n = nswbuf / 2;
1233 		if (n < 1)
1234 			n = 1;
1235 		swap_async_max = n;
1236 
1237 		/*
1238 		 * Adjust difference ( if possible ).  If the current async
1239 		 * count is too low, we may not be able to make the adjustment
1240 		 * at this time.
1241 		 */
1242 		s = splvm();
1243 		n -= nsw_wcount_async_max;
1244 		if (nsw_wcount_async + n >= 0) {
1245 			nsw_wcount_async += n;
1246 			nsw_wcount_async_max += n;
1247 			wakeup(&nsw_wcount_async);
1248 		}
1249 		splx(s);
1250 	}
1251 	mtx_unlock(&pbuf_mtx);
1252 
1253 	/*
1254 	 * Step 3
1255 	 *
1256 	 * Assign swap blocks and issue I/O.  We reallocate swap on the fly.
1257 	 * The page is left dirty until the pageout operation completes
1258 	 * successfully.
1259 	 */
1260 	for (i = 0; i < count; i += n) {
1261 		int s;
1262 		int j;
1263 		struct buf *bp;
1264 		daddr_t blk;
1265 
1266 		/*
1267 		 * Maximum I/O size is limited by a number of factors.
1268 		 */
1269 		n = min(BLIST_MAX_ALLOC, count - i);
1270 		n = min(n, nsw_cluster_max);
1271 
1272 		s = splvm();
1273 
1274 		/*
1275 		 * Get biggest block of swap we can.  If we fail, fall
1276 		 * back and try to allocate a smaller block.  Don't go
1277 		 * overboard trying to allocate space if it would overly
1278 		 * fragment swap.
1279 		 */
1280 		while (
1281 		    (blk = swp_pager_getswapspace(n)) == SWAPBLK_NONE &&
1282 		    n > 4
1283 		) {
1284 			n >>= 1;
1285 		}
1286 		if (blk == SWAPBLK_NONE) {
1287 			for (j = 0; j < n; ++j)
1288 				rtvals[i+j] = VM_PAGER_FAIL;
1289 			splx(s);
1290 			continue;
1291 		}
1292 
1293 		/*
1294 		 * All I/O parameters have been satisfied, build the I/O
1295 		 * request and assign the swap space.
1296 		 */
1297 		if (sync == TRUE) {
1298 			bp = getpbuf(&nsw_wcount_sync);
1299 		} else {
1300 			bp = getpbuf(&nsw_wcount_async);
1301 			bp->b_flags = B_ASYNC;
1302 		}
1303 		bp->b_flags |= B_PAGING;
1304 		bp->b_iocmd = BIO_WRITE;
1305 
1306 		pmap_qenter((vm_offset_t)bp->b_data, &m[i], n);
1307 
1308 		bp->b_rcred = crhold(thread0.td_ucred);
1309 		bp->b_wcred = crhold(thread0.td_ucred);
1310 		bp->b_bcount = PAGE_SIZE * n;
1311 		bp->b_bufsize = PAGE_SIZE * n;
1312 		bp->b_blkno = blk;
1313 
1314 		VM_OBJECT_LOCK(object);
1315 		for (j = 0; j < n; ++j) {
1316 			vm_page_t mreq = m[i+j];
1317 
1318 			swp_pager_meta_build(
1319 			    mreq->object,
1320 			    mreq->pindex,
1321 			    blk + j
1322 			);
1323 			vm_page_dirty(mreq);
1324 			rtvals[i+j] = VM_PAGER_OK;
1325 
1326 			vm_page_lock_queues();
1327 			vm_page_flag_set(mreq, PG_SWAPINPROG);
1328 			vm_page_unlock_queues();
1329 			bp->b_pages[j] = mreq;
1330 		}
1331 		VM_OBJECT_UNLOCK(object);
1332 		bp->b_npages = n;
1333 		/*
1334 		 * Must set dirty range for NFS to work.
1335 		 */
1336 		bp->b_dirtyoff = 0;
1337 		bp->b_dirtyend = bp->b_bcount;
1338 
1339 		cnt.v_swapout++;
1340 		cnt.v_swappgsout += bp->b_npages;
1341 
1342 		splx(s);
1343 
1344 		/*
1345 		 * asynchronous
1346 		 *
1347 		 * NOTE: b_blkno is destroyed by the call to swapdev_strategy
1348 		 */
1349 		if (sync == FALSE) {
1350 			bp->b_iodone = swp_pager_async_iodone;
1351 			BUF_KERNPROC(bp);
1352 			swp_pager_strategy(bp);
1353 
1354 			for (j = 0; j < n; ++j)
1355 				rtvals[i+j] = VM_PAGER_PEND;
1356 			/* restart outter loop */
1357 			continue;
1358 		}
1359 
1360 		/*
1361 		 * synchronous
1362 		 *
1363 		 * NOTE: b_blkno is destroyed by the call to swapdev_strategy
1364 		 */
1365 		bp->b_iodone = swp_pager_sync_iodone;
1366 		swp_pager_strategy(bp);
1367 
1368 		/*
1369 		 * Wait for the sync I/O to complete, then update rtvals.
1370 		 * We just set the rtvals[] to VM_PAGER_PEND so we can call
1371 		 * our async completion routine at the end, thus avoiding a
1372 		 * double-free.
1373 		 */
1374 		s = splbio();
1375 		while ((bp->b_flags & B_DONE) == 0) {
1376 			tsleep(bp, PVM, "swwrt", 0);
1377 		}
1378 		for (j = 0; j < n; ++j)
1379 			rtvals[i+j] = VM_PAGER_PEND;
1380 		/*
1381 		 * Now that we are through with the bp, we can call the
1382 		 * normal async completion, which frees everything up.
1383 		 */
1384 		swp_pager_async_iodone(bp);
1385 		splx(s);
1386 	}
1387 	VM_OBJECT_LOCK(object);
1388 }
1389 
1390 /*
1391  *	swap_pager_sync_iodone:
1392  *
1393  *	Completion routine for synchronous reads and writes from/to swap.
1394  *	We just mark the bp is complete and wake up anyone waiting on it.
1395  *
1396  *	This routine may not block.  This routine is called at splbio() or better.
1397  */
1398 static void
1399 swp_pager_sync_iodone(struct buf *bp)
1400 {
1401 	bp->b_flags |= B_DONE;
1402 	bp->b_flags &= ~B_ASYNC;
1403 	wakeup(bp);
1404 }
1405 
1406 /*
1407  *	swp_pager_async_iodone:
1408  *
1409  *	Completion routine for asynchronous reads and writes from/to swap.
1410  *	Also called manually by synchronous code to finish up a bp.
1411  *
1412  *	For READ operations, the pages are PG_BUSY'd.  For WRITE operations,
1413  *	the pages are vm_page_t->busy'd.  For READ operations, we PG_BUSY
1414  *	unbusy all pages except the 'main' request page.  For WRITE
1415  *	operations, we vm_page_t->busy'd unbusy all pages ( we can do this
1416  *	because we marked them all VM_PAGER_PEND on return from putpages ).
1417  *
1418  *	This routine may not block.
1419  *	This routine is called at splbio() or better
1420  *
1421  *	We up ourselves to splvm() as required for various vm_page related
1422  *	calls.
1423  */
1424 static void
1425 swp_pager_async_iodone(struct buf *bp)
1426 {
1427 	int s;
1428 	int i;
1429 	vm_object_t object = NULL;
1430 
1431 	GIANT_REQUIRED;
1432 	bp->b_flags |= B_DONE;
1433 
1434 	/*
1435 	 * report error
1436 	 */
1437 	if (bp->b_ioflags & BIO_ERROR) {
1438 		printf(
1439 		    "swap_pager: I/O error - %s failed; blkno %ld,"
1440 			"size %ld, error %d\n",
1441 		    ((bp->b_iocmd == BIO_READ) ? "pagein" : "pageout"),
1442 		    (long)bp->b_blkno,
1443 		    (long)bp->b_bcount,
1444 		    bp->b_error
1445 		);
1446 	}
1447 
1448 	/*
1449 	 * set object, raise to splvm().
1450 	 */
1451 	s = splvm();
1452 
1453 	/*
1454 	 * remove the mapping for kernel virtual
1455 	 */
1456 	pmap_qremove((vm_offset_t)bp->b_data, bp->b_npages);
1457 
1458 	if (bp->b_npages) {
1459 		object = bp->b_pages[0]->object;
1460 		VM_OBJECT_LOCK(object);
1461 	}
1462 	vm_page_lock_queues();
1463 	/*
1464 	 * cleanup pages.  If an error occurs writing to swap, we are in
1465 	 * very serious trouble.  If it happens to be a disk error, though,
1466 	 * we may be able to recover by reassigning the swap later on.  So
1467 	 * in this case we remove the m->swapblk assignment for the page
1468 	 * but do not free it in the rlist.  The errornous block(s) are thus
1469 	 * never reallocated as swap.  Redirty the page and continue.
1470 	 */
1471 	for (i = 0; i < bp->b_npages; ++i) {
1472 		vm_page_t m = bp->b_pages[i];
1473 
1474 		vm_page_flag_clear(m, PG_SWAPINPROG);
1475 
1476 		if (bp->b_ioflags & BIO_ERROR) {
1477 			/*
1478 			 * If an error occurs I'd love to throw the swapblk
1479 			 * away without freeing it back to swapspace, so it
1480 			 * can never be used again.  But I can't from an
1481 			 * interrupt.
1482 			 */
1483 			if (bp->b_iocmd == BIO_READ) {
1484 				/*
1485 				 * When reading, reqpage needs to stay
1486 				 * locked for the parent, but all other
1487 				 * pages can be freed.  We still want to
1488 				 * wakeup the parent waiting on the page,
1489 				 * though.  ( also: pg_reqpage can be -1 and
1490 				 * not match anything ).
1491 				 *
1492 				 * We have to wake specifically requested pages
1493 				 * up too because we cleared PG_SWAPINPROG and
1494 				 * someone may be waiting for that.
1495 				 *
1496 				 * NOTE: for reads, m->dirty will probably
1497 				 * be overridden by the original caller of
1498 				 * getpages so don't play cute tricks here.
1499 				 *
1500 				 * XXX IT IS NOT LEGAL TO FREE THE PAGE HERE
1501 				 * AS THIS MESSES WITH object->memq, and it is
1502 				 * not legal to mess with object->memq from an
1503 				 * interrupt.
1504 				 */
1505 				m->valid = 0;
1506 				vm_page_flag_clear(m, PG_ZERO);
1507 				if (i != bp->b_pager.pg_reqpage)
1508 					vm_page_free(m);
1509 				else
1510 					vm_page_flash(m);
1511 				/*
1512 				 * If i == bp->b_pager.pg_reqpage, do not wake
1513 				 * the page up.  The caller needs to.
1514 				 */
1515 			} else {
1516 				/*
1517 				 * If a write error occurs, reactivate page
1518 				 * so it doesn't clog the inactive list,
1519 				 * then finish the I/O.
1520 				 */
1521 				vm_page_dirty(m);
1522 				vm_page_activate(m);
1523 				vm_page_io_finish(m);
1524 			}
1525 		} else if (bp->b_iocmd == BIO_READ) {
1526 			/*
1527 			 * For read success, clear dirty bits.  Nobody should
1528 			 * have this page mapped but don't take any chances,
1529 			 * make sure the pmap modify bits are also cleared.
1530 			 *
1531 			 * NOTE: for reads, m->dirty will probably be
1532 			 * overridden by the original caller of getpages so
1533 			 * we cannot set them in order to free the underlying
1534 			 * swap in a low-swap situation.  I don't think we'd
1535 			 * want to do that anyway, but it was an optimization
1536 			 * that existed in the old swapper for a time before
1537 			 * it got ripped out due to precisely this problem.
1538 			 *
1539 			 * clear PG_ZERO in page.
1540 			 *
1541 			 * If not the requested page then deactivate it.
1542 			 *
1543 			 * Note that the requested page, reqpage, is left
1544 			 * busied, but we still have to wake it up.  The
1545 			 * other pages are released (unbusied) by
1546 			 * vm_page_wakeup().  We do not set reqpage's
1547 			 * valid bits here, it is up to the caller.
1548 			 */
1549 			pmap_clear_modify(m);
1550 			m->valid = VM_PAGE_BITS_ALL;
1551 			vm_page_undirty(m);
1552 			vm_page_flag_clear(m, PG_ZERO);
1553 
1554 			/*
1555 			 * We have to wake specifically requested pages
1556 			 * up too because we cleared PG_SWAPINPROG and
1557 			 * could be waiting for it in getpages.  However,
1558 			 * be sure to not unbusy getpages specifically
1559 			 * requested page - getpages expects it to be
1560 			 * left busy.
1561 			 */
1562 			if (i != bp->b_pager.pg_reqpage) {
1563 				vm_page_deactivate(m);
1564 				vm_page_wakeup(m);
1565 			} else {
1566 				vm_page_flash(m);
1567 			}
1568 		} else {
1569 			/*
1570 			 * For write success, clear the modify and dirty
1571 			 * status, then finish the I/O ( which decrements the
1572 			 * busy count and possibly wakes waiter's up ).
1573 			 */
1574 			pmap_clear_modify(m);
1575 			vm_page_undirty(m);
1576 			vm_page_io_finish(m);
1577 			if (!vm_page_count_severe() || !vm_page_try_to_cache(m))
1578 				pmap_page_protect(m, VM_PROT_READ);
1579 		}
1580 	}
1581 	vm_page_unlock_queues();
1582 
1583 	/*
1584 	 * adjust pip.  NOTE: the original parent may still have its own
1585 	 * pip refs on the object.
1586 	 */
1587 	if (object != NULL) {
1588 		vm_object_pip_wakeupn(object, bp->b_npages);
1589 		VM_OBJECT_UNLOCK(object);
1590 	}
1591 
1592 	/*
1593 	 * release the physical I/O buffer
1594 	 */
1595 	relpbuf(
1596 	    bp,
1597 	    ((bp->b_iocmd == BIO_READ) ? &nsw_rcount :
1598 		((bp->b_flags & B_ASYNC) ?
1599 		    &nsw_wcount_async :
1600 		    &nsw_wcount_sync
1601 		)
1602 	    )
1603 	);
1604 	splx(s);
1605 }
1606 
1607 /*
1608  *	swap_pager_isswapped:
1609  *
1610  *	Return 1 if at least one page in the given object is paged
1611  *	out to the given swap device.
1612  *
1613  *	This routine may not block.
1614  */
1615 int
1616 swap_pager_isswapped(vm_object_t object, struct swdevt *sp)
1617 {
1618 	daddr_t index = 0;
1619 	int bcount;
1620 	int i;
1621 
1622 	VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
1623 	for (bcount = 0; bcount < object->un_pager.swp.swp_bcount; bcount++) {
1624 		struct swblock *swap;
1625 
1626 		mtx_lock(&swhash_mtx);
1627 		if ((swap = *swp_pager_hash(object, index)) != NULL) {
1628 			for (i = 0; i < SWAP_META_PAGES; ++i) {
1629 				daddr_t v = swap->swb_pages[i];
1630 				if (v == SWAPBLK_NONE)
1631 					continue;
1632 				if (swp_pager_find_dev(v) == sp) {
1633 					mtx_unlock(&swhash_mtx);
1634 					return 1;
1635 				}
1636 			}
1637 		}
1638 		mtx_unlock(&swhash_mtx);
1639 		index += SWAP_META_PAGES;
1640 		if (index > 0x20000000)
1641 			panic("swap_pager_isswapped: failed to locate all swap meta blocks");
1642 	}
1643 	return 0;
1644 }
1645 
1646 /*
1647  * SWP_PAGER_FORCE_PAGEIN() - force a swap block to be paged in
1648  *
1649  *	This routine dissociates the page at the given index within a
1650  *	swap block from its backing store, paging it in if necessary.
1651  *	If the page is paged in, it is placed in the inactive queue,
1652  *	since it had its backing store ripped out from under it.
1653  *	We also attempt to swap in all other pages in the swap block,
1654  *	we only guarantee that the one at the specified index is
1655  *	paged in.
1656  *
1657  *	XXX - The code to page the whole block in doesn't work, so we
1658  *	      revert to the one-by-one behavior for now.  Sigh.
1659  */
1660 static __inline void
1661 swp_pager_force_pagein(struct swblock *swap, int idx)
1662 {
1663 	vm_object_t object;
1664 	vm_page_t m;
1665 	vm_pindex_t pindex;
1666 
1667 	object = swap->swb_object;
1668 	pindex = swap->swb_index;
1669 	mtx_unlock(&swhash_mtx);
1670 
1671 	VM_OBJECT_LOCK(object);
1672 	vm_object_pip_add(object, 1);
1673 	m = vm_page_grab(object, pindex + idx, VM_ALLOC_NORMAL|VM_ALLOC_RETRY);
1674 	if (m->valid == VM_PAGE_BITS_ALL) {
1675 		vm_object_pip_subtract(object, 1);
1676 		vm_page_lock_queues();
1677 		vm_page_activate(m);
1678 		vm_page_dirty(m);
1679 		vm_page_wakeup(m);
1680 		vm_page_unlock_queues();
1681 		vm_pager_page_unswapped(m);
1682 		VM_OBJECT_UNLOCK(object);
1683 		return;
1684 	}
1685 
1686 	if (swap_pager_getpages(object, &m, 1, 0) !=
1687 	    VM_PAGER_OK)
1688 		panic("swap_pager_force_pagein: read from swap failed");/*XXX*/
1689 	vm_object_pip_subtract(object, 1);
1690 	vm_page_lock_queues();
1691 	vm_page_dirty(m);
1692 	vm_page_dontneed(m);
1693 	vm_page_wakeup(m);
1694 	vm_page_unlock_queues();
1695 	vm_pager_page_unswapped(m);
1696 	VM_OBJECT_UNLOCK(object);
1697 }
1698 
1699 
1700 /*
1701  *	swap_pager_swapoff:
1702  *
1703  *	Page in all of the pages that have been paged out to the
1704  *	given device.  The corresponding blocks in the bitmap must be
1705  *	marked as allocated and the device must be flagged SW_CLOSING.
1706  *	There may be no processes swapped out to the device.
1707  *
1708  *	The sw_used parameter points to the field in the swdev structure
1709  *	that contains a count of the number of blocks still allocated
1710  *	on the device.  If we encounter objects with a nonzero pip count
1711  *	in our scan, we use this number to determine if we're really done.
1712  *
1713  *	This routine may block.
1714  */
1715 static void
1716 swap_pager_swapoff(struct swdevt *sp, int *sw_used)
1717 {
1718 	struct swblock **pswap;
1719 	struct swblock *swap;
1720 	vm_object_t waitobj;
1721 	daddr_t v;
1722 	int i, j;
1723 
1724 	GIANT_REQUIRED;
1725 
1726 full_rescan:
1727 	waitobj = NULL;
1728 	for (i = 0; i <= swhash_mask; i++) { /* '<=' is correct here */
1729 restart:
1730 		pswap = &swhash[i];
1731 		mtx_lock(&swhash_mtx);
1732 		while ((swap = *pswap) != NULL) {
1733                         for (j = 0; j < SWAP_META_PAGES; ++j) {
1734                                 v = swap->swb_pages[j];
1735                                 if (v != SWAPBLK_NONE &&
1736 				    swp_pager_find_dev(v) == sp)
1737                                         break;
1738                         }
1739 			if (j < SWAP_META_PAGES) {
1740 				swp_pager_force_pagein(swap, j);
1741 				goto restart;
1742 			} else if (swap->swb_object->paging_in_progress) {
1743 				if (!waitobj)
1744 					waitobj = swap->swb_object;
1745 			}
1746 			pswap = &swap->swb_hnext;
1747 		}
1748 		mtx_unlock(&swhash_mtx);
1749 	}
1750 	if (waitobj && *sw_used) {
1751 	    /*
1752 	     * We wait on an arbitrary object to clock our rescans
1753 	     * to the rate of paging completion.
1754 	     */
1755 	    VM_OBJECT_LOCK(waitobj);
1756 	    vm_object_pip_wait(waitobj, "swpoff");
1757 	    VM_OBJECT_UNLOCK(waitobj);
1758 	    goto full_rescan;
1759 	}
1760 	if (*sw_used)
1761 	    panic("swapoff: failed to locate %d swap blocks", *sw_used);
1762 }
1763 
1764 /************************************************************************
1765  *				SWAP META DATA 				*
1766  ************************************************************************
1767  *
1768  *	These routines manipulate the swap metadata stored in the
1769  *	OBJT_SWAP object.  All swp_*() routines must be called at
1770  *	splvm() because swap can be freed up by the low level vm_page
1771  *	code which might be called from interrupts beyond what splbio() covers.
1772  *
1773  *	Swap metadata is implemented with a global hash and not directly
1774  *	linked into the object.  Instead the object simply contains
1775  *	appropriate tracking counters.
1776  */
1777 
1778 /*
1779  * SWP_PAGER_META_BUILD() -	add swap block to swap meta data for object
1780  *
1781  *	We first convert the object to a swap object if it is a default
1782  *	object.
1783  *
1784  *	The specified swapblk is added to the object's swap metadata.  If
1785  *	the swapblk is not valid, it is freed instead.  Any previously
1786  *	assigned swapblk is freed.
1787  *
1788  *	This routine must be called at splvm(), except when used to convert
1789  *	an OBJT_DEFAULT object into an OBJT_SWAP object.
1790  */
1791 static void
1792 swp_pager_meta_build(vm_object_t object, vm_pindex_t pindex, daddr_t swapblk)
1793 {
1794 	struct swblock *swap;
1795 	struct swblock **pswap;
1796 	int idx;
1797 
1798 	VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
1799 	/*
1800 	 * Convert default object to swap object if necessary
1801 	 */
1802 	if (object->type != OBJT_SWAP) {
1803 		object->type = OBJT_SWAP;
1804 		object->un_pager.swp.swp_bcount = 0;
1805 
1806 		if (object->handle != NULL) {
1807 			mtx_lock(&sw_alloc_mtx);
1808 			TAILQ_INSERT_TAIL(
1809 			    NOBJLIST(object->handle),
1810 			    object,
1811 			    pager_object_list
1812 			);
1813 			mtx_unlock(&sw_alloc_mtx);
1814 		}
1815 	}
1816 
1817 	/*
1818 	 * Locate hash entry.  If not found create, but if we aren't adding
1819 	 * anything just return.  If we run out of space in the map we wait
1820 	 * and, since the hash table may have changed, retry.
1821 	 */
1822 retry:
1823 	mtx_lock(&swhash_mtx);
1824 	pswap = swp_pager_hash(object, pindex);
1825 
1826 	if ((swap = *pswap) == NULL) {
1827 		int i;
1828 
1829 		if (swapblk == SWAPBLK_NONE)
1830 			goto done;
1831 
1832 		swap = *pswap = uma_zalloc(swap_zone, M_NOWAIT);
1833 		if (swap == NULL) {
1834 			mtx_unlock(&swhash_mtx);
1835 			VM_OBJECT_UNLOCK(object);
1836 			VM_WAIT;
1837 			VM_OBJECT_LOCK(object);
1838 			goto retry;
1839 		}
1840 
1841 		swap->swb_hnext = NULL;
1842 		swap->swb_object = object;
1843 		swap->swb_index = pindex & ~(vm_pindex_t)SWAP_META_MASK;
1844 		swap->swb_count = 0;
1845 
1846 		++object->un_pager.swp.swp_bcount;
1847 
1848 		for (i = 0; i < SWAP_META_PAGES; ++i)
1849 			swap->swb_pages[i] = SWAPBLK_NONE;
1850 	}
1851 
1852 	/*
1853 	 * Delete prior contents of metadata
1854 	 */
1855 	idx = pindex & SWAP_META_MASK;
1856 
1857 	if (swap->swb_pages[idx] != SWAPBLK_NONE) {
1858 		swp_pager_freeswapspace(swap->swb_pages[idx], 1);
1859 		--swap->swb_count;
1860 	}
1861 
1862 	/*
1863 	 * Enter block into metadata
1864 	 */
1865 	swap->swb_pages[idx] = swapblk;
1866 	if (swapblk != SWAPBLK_NONE)
1867 		++swap->swb_count;
1868 done:
1869 	mtx_unlock(&swhash_mtx);
1870 }
1871 
1872 /*
1873  * SWP_PAGER_META_FREE() - free a range of blocks in the object's swap metadata
1874  *
1875  *	The requested range of blocks is freed, with any associated swap
1876  *	returned to the swap bitmap.
1877  *
1878  *	This routine will free swap metadata structures as they are cleaned
1879  *	out.  This routine does *NOT* operate on swap metadata associated
1880  *	with resident pages.
1881  *
1882  *	This routine must be called at splvm()
1883  */
1884 static void
1885 swp_pager_meta_free(vm_object_t object, vm_pindex_t index, daddr_t count)
1886 {
1887 
1888 	VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
1889 	if (object->type != OBJT_SWAP)
1890 		return;
1891 
1892 	while (count > 0) {
1893 		struct swblock **pswap;
1894 		struct swblock *swap;
1895 
1896 		mtx_lock(&swhash_mtx);
1897 		pswap = swp_pager_hash(object, index);
1898 
1899 		if ((swap = *pswap) != NULL) {
1900 			daddr_t v = swap->swb_pages[index & SWAP_META_MASK];
1901 
1902 			if (v != SWAPBLK_NONE) {
1903 				swp_pager_freeswapspace(v, 1);
1904 				swap->swb_pages[index & SWAP_META_MASK] =
1905 					SWAPBLK_NONE;
1906 				if (--swap->swb_count == 0) {
1907 					*pswap = swap->swb_hnext;
1908 					uma_zfree(swap_zone, swap);
1909 					--object->un_pager.swp.swp_bcount;
1910 				}
1911 			}
1912 			--count;
1913 			++index;
1914 		} else {
1915 			int n = SWAP_META_PAGES - (index & SWAP_META_MASK);
1916 			count -= n;
1917 			index += n;
1918 		}
1919 		mtx_unlock(&swhash_mtx);
1920 	}
1921 }
1922 
1923 /*
1924  * SWP_PAGER_META_FREE_ALL() - destroy all swap metadata associated with object
1925  *
1926  *	This routine locates and destroys all swap metadata associated with
1927  *	an object.
1928  *
1929  *	This routine must be called at splvm()
1930  */
1931 static void
1932 swp_pager_meta_free_all(vm_object_t object)
1933 {
1934 	daddr_t index = 0;
1935 
1936 	VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
1937 	if (object->type != OBJT_SWAP)
1938 		return;
1939 
1940 	while (object->un_pager.swp.swp_bcount) {
1941 		struct swblock **pswap;
1942 		struct swblock *swap;
1943 
1944 		mtx_lock(&swhash_mtx);
1945 		pswap = swp_pager_hash(object, index);
1946 		if ((swap = *pswap) != NULL) {
1947 			int i;
1948 
1949 			for (i = 0; i < SWAP_META_PAGES; ++i) {
1950 				daddr_t v = swap->swb_pages[i];
1951 				if (v != SWAPBLK_NONE) {
1952 					--swap->swb_count;
1953 					swp_pager_freeswapspace(v, 1);
1954 				}
1955 			}
1956 			if (swap->swb_count != 0)
1957 				panic("swap_pager_meta_free_all: swb_count != 0");
1958 			*pswap = swap->swb_hnext;
1959 			uma_zfree(swap_zone, swap);
1960 			--object->un_pager.swp.swp_bcount;
1961 		}
1962 		mtx_unlock(&swhash_mtx);
1963 		index += SWAP_META_PAGES;
1964 		if (index > 0x20000000)
1965 			panic("swp_pager_meta_free_all: failed to locate all swap meta blocks");
1966 	}
1967 }
1968 
1969 /*
1970  * SWP_PAGER_METACTL() -  misc control of swap and vm_page_t meta data.
1971  *
1972  *	This routine is capable of looking up, popping, or freeing
1973  *	swapblk assignments in the swap meta data or in the vm_page_t.
1974  *	The routine typically returns the swapblk being looked-up, or popped,
1975  *	or SWAPBLK_NONE if the block was freed, or SWAPBLK_NONE if the block
1976  *	was invalid.  This routine will automatically free any invalid
1977  *	meta-data swapblks.
1978  *
1979  *	It is not possible to store invalid swapblks in the swap meta data
1980  *	(other then a literal 'SWAPBLK_NONE'), so we don't bother checking.
1981  *
1982  *	When acting on a busy resident page and paging is in progress, we
1983  *	have to wait until paging is complete but otherwise can act on the
1984  *	busy page.
1985  *
1986  *	This routine must be called at splvm().
1987  *
1988  *	SWM_FREE	remove and free swap block from metadata
1989  *	SWM_POP		remove from meta data but do not free.. pop it out
1990  */
1991 static daddr_t
1992 swp_pager_meta_ctl(vm_object_t object, vm_pindex_t pindex, int flags)
1993 {
1994 	struct swblock **pswap;
1995 	struct swblock *swap;
1996 	daddr_t r1;
1997 	int idx;
1998 
1999 	VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
2000 	/*
2001 	 * The meta data only exists of the object is OBJT_SWAP
2002 	 * and even then might not be allocated yet.
2003 	 */
2004 	if (object->type != OBJT_SWAP)
2005 		return (SWAPBLK_NONE);
2006 
2007 	r1 = SWAPBLK_NONE;
2008 	mtx_lock(&swhash_mtx);
2009 	pswap = swp_pager_hash(object, pindex);
2010 
2011 	if ((swap = *pswap) != NULL) {
2012 		idx = pindex & SWAP_META_MASK;
2013 		r1 = swap->swb_pages[idx];
2014 
2015 		if (r1 != SWAPBLK_NONE) {
2016 			if (flags & SWM_FREE) {
2017 				swp_pager_freeswapspace(r1, 1);
2018 				r1 = SWAPBLK_NONE;
2019 			}
2020 			if (flags & (SWM_FREE|SWM_POP)) {
2021 				swap->swb_pages[idx] = SWAPBLK_NONE;
2022 				if (--swap->swb_count == 0) {
2023 					*pswap = swap->swb_hnext;
2024 					uma_zfree(swap_zone, swap);
2025 					--object->un_pager.swp.swp_bcount;
2026 				}
2027 			}
2028 		}
2029 	}
2030 	mtx_unlock(&swhash_mtx);
2031 	return (r1);
2032 }
2033 
2034 /*
2035  * System call swapon(name) enables swapping on device name,
2036  * which must be in the swdevsw.  Return EBUSY
2037  * if already swapping on this device.
2038  */
2039 #ifndef _SYS_SYSPROTO_H_
2040 struct swapon_args {
2041 	char *name;
2042 };
2043 #endif
2044 
2045 /*
2046  * MPSAFE
2047  */
2048 /* ARGSUSED */
2049 int
2050 swapon(struct thread *td, struct swapon_args *uap)
2051 {
2052 	struct vattr attr;
2053 	struct vnode *vp;
2054 	struct nameidata nd;
2055 	int error;
2056 
2057 	mtx_lock(&Giant);
2058 	error = suser(td);
2059 	if (error)
2060 		goto done2;
2061 
2062 	while (swdev_syscall_active)
2063 	    tsleep(&swdev_syscall_active, PUSER - 1, "swpon", 0);
2064 	swdev_syscall_active = 1;
2065 
2066 	/*
2067 	 * Swap metadata may not fit in the KVM if we have physical
2068 	 * memory of >1GB.
2069 	 */
2070 	if (swap_zone == NULL) {
2071 		error = ENOMEM;
2072 		goto done;
2073 	}
2074 
2075 	NDINIT(&nd, LOOKUP, FOLLOW, UIO_USERSPACE, uap->name, td);
2076 	error = namei(&nd);
2077 	if (error)
2078 		goto done;
2079 
2080 	NDFREE(&nd, NDF_ONLY_PNBUF);
2081 	vp = nd.ni_vp;
2082 
2083 	if (vn_isdisk(vp, &error)) {
2084 		error = swapongeom(td, vp);
2085 	} else if (vp->v_type == VREG &&
2086 	    (vp->v_mount->mnt_vfc->vfc_flags & VFCF_NETWORK) != 0 &&
2087 	    (error = VOP_GETATTR(vp, &attr, td->td_ucred, td)) == 0) {
2088 		/*
2089 		 * Allow direct swapping to NFS regular files in the same
2090 		 * way that nfs_mountroot() sets up diskless swapping.
2091 		 */
2092 		error = swaponvp(td, vp, attr.va_size / DEV_BSIZE);
2093 	}
2094 
2095 	if (error)
2096 		vrele(vp);
2097 done:
2098 	swdev_syscall_active = 0;
2099 	wakeup_one(&swdev_syscall_active);
2100 done2:
2101 	mtx_unlock(&Giant);
2102 	return (error);
2103 }
2104 
2105 static void
2106 swaponsomething(struct vnode *vp, void *id, u_long nblks, sw_strategy_t *strategy, sw_close_t *close, udev_t udev)
2107 {
2108 	struct swdevt *sp, *tsp;
2109 	swblk_t dvbase;
2110 	u_long mblocks;
2111 
2112 	/*
2113 	 * If we go beyond this, we get overflows in the radix
2114 	 * tree bitmap code.
2115 	 */
2116 	mblocks = 0x40000000 / BLIST_META_RADIX;
2117 	if (nblks > mblocks) {
2118 		printf("WARNING: reducing size to maximum of %lu blocks per swap unit\n",
2119 			mblocks);
2120 		nblks = mblocks;
2121 	}
2122 	/*
2123 	 * nblks is in DEV_BSIZE'd chunks, convert to PAGE_SIZE'd chunks.
2124 	 * First chop nblks off to page-align it, then convert.
2125 	 *
2126 	 * sw->sw_nblks is in page-sized chunks now too.
2127 	 */
2128 	nblks &= ~(ctodb(1) - 1);
2129 	nblks = dbtoc(nblks);
2130 
2131 	sp = malloc(sizeof *sp, M_VMPGDATA, M_WAITOK | M_ZERO);
2132 	sp->sw_vp = vp;
2133 	sp->sw_id = id;
2134 	sp->sw_udev = udev;
2135 	sp->sw_flags = 0;
2136 	sp->sw_nblks = nblks;
2137 	sp->sw_used = 0;
2138 	sp->sw_strategy = strategy;
2139 	sp->sw_close = close;
2140 
2141 	sp->sw_blist = blist_create(nblks);
2142 	/*
2143 	 * Do not free the first two block in order to avoid overwriting
2144 	 * any bsd label at the front of the partition
2145 	 */
2146 	blist_free(sp->sw_blist, 2, nblks - 2);
2147 
2148 	dvbase = 0;
2149 	mtx_lock(&sw_dev_mtx);
2150 	TAILQ_FOREACH(tsp, &swtailq, sw_list) {
2151 		if (tsp->sw_end >= dvbase) {
2152 			/*
2153 			 * We put one uncovered page between the devices
2154 			 * in order to definitively prevent any cross-device
2155 			 * I/O requests
2156 			 */
2157 			dvbase = tsp->sw_end + 1;
2158 		}
2159 	}
2160 	sp->sw_first = dvbase;
2161 	sp->sw_end = dvbase + nblks;
2162 	TAILQ_INSERT_TAIL(&swtailq, sp, sw_list);
2163 	nswapdev++;
2164 	swap_pager_avail += nblks;
2165 	swp_sizecheck();
2166 	mtx_unlock(&sw_dev_mtx);
2167 }
2168 
2169 /*
2170  * SYSCALL: swapoff(devname)
2171  *
2172  * Disable swapping on the given device.
2173  *
2174  * XXX: Badly designed system call: it should use a device index
2175  * rather than filename as specification.  We keep sw_vp around
2176  * only to make this work.
2177  */
2178 #ifndef _SYS_SYSPROTO_H_
2179 struct swapoff_args {
2180 	char *name;
2181 };
2182 #endif
2183 
2184 /*
2185  * MPSAFE
2186  */
2187 /* ARGSUSED */
2188 int
2189 swapoff(struct thread *td, struct swapoff_args *uap)
2190 {
2191 	struct vnode *vp;
2192 	struct nameidata nd;
2193 	struct swdevt *sp;
2194 	u_long nblks, dvbase;
2195 	int error;
2196 
2197 	mtx_lock(&Giant);
2198 
2199 	error = suser(td);
2200 	if (error)
2201 		goto done2;
2202 
2203 	while (swdev_syscall_active)
2204 	    tsleep(&swdev_syscall_active, PUSER - 1, "swpoff", 0);
2205 	swdev_syscall_active = 1;
2206 
2207 	NDINIT(&nd, LOOKUP, FOLLOW, UIO_USERSPACE, uap->name, td);
2208 	error = namei(&nd);
2209 	if (error)
2210 		goto done;
2211 	NDFREE(&nd, NDF_ONLY_PNBUF);
2212 	vp = nd.ni_vp;
2213 
2214 	mtx_lock(&sw_dev_mtx);
2215 	TAILQ_FOREACH(sp, &swtailq, sw_list) {
2216 		if (sp->sw_vp == vp)
2217 			goto found;
2218 	}
2219 	mtx_unlock(&sw_dev_mtx);
2220 	error = EINVAL;
2221 	goto done;
2222 found:
2223 	mtx_unlock(&sw_dev_mtx);
2224 #ifdef MAC
2225 	(void) vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
2226 	error = mac_check_system_swapoff(td->td_ucred, vp);
2227 	(void) VOP_UNLOCK(vp, 0, td);
2228 	if (error != 0)
2229 		goto done;
2230 #endif
2231 
2232 	nblks = sp->sw_nblks;
2233 
2234 	/*
2235 	 * We can turn off this swap device safely only if the
2236 	 * available virtual memory in the system will fit the amount
2237 	 * of data we will have to page back in, plus an epsilon so
2238 	 * the system doesn't become critically low on swap space.
2239 	 */
2240 	if (cnt.v_free_count + cnt.v_cache_count + swap_pager_avail <
2241 	    nblks + nswap_lowat) {
2242 		error = ENOMEM;
2243 		goto done;
2244 	}
2245 
2246 	/*
2247 	 * Prevent further allocations on this device.
2248 	 */
2249 	mtx_lock(&sw_dev_mtx);
2250 	sp->sw_flags |= SW_CLOSING;
2251 	for (dvbase = 0; dvbase < sp->sw_end; dvbase += dmmax) {
2252 		swap_pager_avail -= blist_fill(sp->sw_blist,
2253 		     dvbase, dmmax);
2254 	}
2255 	mtx_unlock(&sw_dev_mtx);
2256 
2257 	/*
2258 	 * Page in the contents of the device and close it.
2259 	 */
2260 #ifndef NO_SWAPPING
2261        	vm_proc_swapin_all(sp);
2262 #endif /* !NO_SWAPPING */
2263 	swap_pager_swapoff(sp, &sp->sw_used);
2264 
2265 	sp->sw_close(td, sp);
2266 	sp->sw_id = NULL;
2267 	mtx_lock(&sw_dev_mtx);
2268 	TAILQ_REMOVE(&swtailq, sp, sw_list);
2269 	nswapdev--;
2270 	if (swdevhd == sp)
2271 		swdevhd = NULL;
2272 	mtx_unlock(&sw_dev_mtx);
2273 	blist_destroy(sp->sw_blist);
2274 	free(sp, M_VMPGDATA);
2275 
2276 done:
2277 	swdev_syscall_active = 0;
2278 	wakeup_one(&swdev_syscall_active);
2279 done2:
2280 	mtx_unlock(&Giant);
2281 	return (error);
2282 }
2283 
2284 void
2285 swap_pager_status(int *total, int *used)
2286 {
2287 	struct swdevt *sp;
2288 
2289 	*total = 0;
2290 	*used = 0;
2291 	mtx_lock(&sw_dev_mtx);
2292 	TAILQ_FOREACH(sp, &swtailq, sw_list) {
2293 		*total += sp->sw_nblks;
2294 		*used += sp->sw_used;
2295 	}
2296 	mtx_unlock(&sw_dev_mtx);
2297 }
2298 
2299 static int
2300 sysctl_vm_swap_info(SYSCTL_HANDLER_ARGS)
2301 {
2302 	int	*name = (int *)arg1;
2303 	int	error, n;
2304 	struct xswdev xs;
2305 	struct swdevt *sp;
2306 
2307 	if (arg2 != 1) /* name length */
2308 		return (EINVAL);
2309 
2310 	n = 0;
2311 	mtx_lock(&sw_dev_mtx);
2312 	TAILQ_FOREACH(sp, &swtailq, sw_list) {
2313 		if (n == *name) {
2314 			mtx_unlock(&sw_dev_mtx);
2315 			xs.xsw_version = XSWDEV_VERSION;
2316 			xs.xsw_dev = sp->sw_udev;
2317 			xs.xsw_flags = sp->sw_flags;
2318 			xs.xsw_nblks = sp->sw_nblks;
2319 			xs.xsw_used = sp->sw_used;
2320 
2321 			error = SYSCTL_OUT(req, &xs, sizeof(xs));
2322 			return (error);
2323 		}
2324 		n++;
2325 	}
2326 	mtx_unlock(&sw_dev_mtx);
2327 	return (ENOENT);
2328 }
2329 
2330 SYSCTL_INT(_vm, OID_AUTO, nswapdev, CTLFLAG_RD, &nswapdev, 0,
2331     "Number of swap devices");
2332 SYSCTL_NODE(_vm, OID_AUTO, swap_info, CTLFLAG_RD, sysctl_vm_swap_info,
2333     "Swap statistics by device");
2334 
2335 /*
2336  * vmspace_swap_count() - count the approximate swap useage in pages for a
2337  *			  vmspace.
2338  *
2339  *	The map must be locked.
2340  *
2341  *	Swap useage is determined by taking the proportional swap used by
2342  *	VM objects backing the VM map.  To make up for fractional losses,
2343  *	if the VM object has any swap use at all the associated map entries
2344  *	count for at least 1 swap page.
2345  */
2346 int
2347 vmspace_swap_count(struct vmspace *vmspace)
2348 {
2349 	vm_map_t map = &vmspace->vm_map;
2350 	vm_map_entry_t cur;
2351 	int count = 0;
2352 
2353 	for (cur = map->header.next; cur != &map->header; cur = cur->next) {
2354 		vm_object_t object;
2355 
2356 		if ((cur->eflags & MAP_ENTRY_IS_SUB_MAP) == 0 &&
2357 		    (object = cur->object.vm_object) != NULL) {
2358 			VM_OBJECT_LOCK(object);
2359 			if (object->type == OBJT_SWAP &&
2360 			    object->un_pager.swp.swp_bcount != 0) {
2361 				int n = (cur->end - cur->start) / PAGE_SIZE;
2362 
2363 				count += object->un_pager.swp.swp_bcount *
2364 				    SWAP_META_PAGES * n / object->size + 1;
2365 			}
2366 			VM_OBJECT_UNLOCK(object);
2367 		}
2368 	}
2369 	return (count);
2370 }
2371 
2372 /*
2373  * GEOM backend
2374  *
2375  * Swapping onto disk devices.
2376  *
2377  */
2378 
2379 static struct g_class g_swap_class = {
2380 	.name = "SWAP",
2381 };
2382 
2383 DECLARE_GEOM_CLASS(g_swap_class, g_class);
2384 
2385 
2386 static void
2387 swapgeom_done(struct bio *bp2)
2388 {
2389 	struct buf *bp;
2390 
2391 	bp = bp2->bio_caller2;
2392 	if (bp2->bio_error)
2393 		bp->b_ioflags |= BIO_ERROR;
2394 	mtx_lock(&Giant);
2395 	bufdone(bp);
2396 	mtx_unlock(&Giant);
2397 	g_destroy_bio(bp2);
2398 }
2399 
2400 static void
2401 swapgeom_strategy(struct buf *bp, struct swdevt *sp)
2402 {
2403 	struct bio *bio;
2404 	struct g_consumer *cp;
2405 
2406 	cp = sp->sw_id;
2407 	if (cp == NULL) {
2408 		bp->b_error = ENXIO;
2409 		bp->b_ioflags |= BIO_ERROR;
2410 		bufdone(bp);
2411 		return;
2412 	}
2413 	bio = g_clone_bio(&bp->b_io);
2414 	bio->bio_caller2 = bp;
2415 	bio->bio_offset = (bp->b_blkno - sp->sw_first) * PAGE_SIZE;
2416 	bio->bio_length = bp->b_bcount;
2417 	bio->bio_done = swapgeom_done;
2418 	g_io_request(bio, cp);
2419 	return;
2420 }
2421 
2422 static void
2423 swapgeom_orphan(struct g_consumer *cp)
2424 {
2425 	struct swdevt *sp;
2426 
2427 	mtx_lock(&sw_dev_mtx);
2428 	TAILQ_FOREACH(sp, &swtailq, sw_list)
2429 		if (sp->sw_id == cp)
2430 			sp->sw_id = NULL;
2431 	mtx_unlock(&sw_dev_mtx);
2432 }
2433 
2434 static void
2435 swapgeom_close_ev(void *arg, int flags)
2436 {
2437 	struct g_consumer *cp;
2438 
2439 	cp = arg;
2440 	g_access_rel(cp, -1, -1, 0);
2441 	g_detach(cp);
2442 	g_destroy_consumer(cp);
2443 }
2444 
2445 static void
2446 swapgeom_close(struct thread *td, struct swdevt *sw)
2447 {
2448 
2449 	/* XXX: direct call when Giant untangled */
2450 	g_waitfor_event(swapgeom_close_ev, sw->sw_id, M_WAITOK, NULL);
2451 }
2452 
2453 
2454 struct swh0h0 {
2455 	dev_t	dev;
2456 	struct vnode *vp;
2457 	int	error;
2458 };
2459 
2460 static void
2461 swapongeom_ev(void *arg, int flags)
2462 {
2463 	struct swh0h0 *swh;
2464 	struct g_provider *pp;
2465 	struct g_consumer *cp;
2466 	static struct g_geom *gp;
2467 	struct swdevt *sp;
2468 	u_long nblks;
2469 	int error;
2470 
2471 	swh = arg;
2472 	swh->error = 0;
2473 	pp = g_dev_getprovider(swh->dev);
2474 	if (pp == NULL) {
2475 		swh->error = ENODEV;
2476 		return;
2477 	}
2478 	mtx_lock(&sw_dev_mtx);
2479 	TAILQ_FOREACH(sp, &swtailq, sw_list) {
2480 		cp = sp->sw_id;
2481 		if (cp != NULL && cp->provider == pp) {
2482 			mtx_unlock(&sw_dev_mtx);
2483 			swh->error = EBUSY;
2484 			return;
2485 		}
2486 	}
2487 	mtx_unlock(&sw_dev_mtx);
2488 	if (gp == NULL) {
2489 		gp = g_new_geomf(&g_swap_class, "swap", NULL);
2490 		gp->orphan = swapgeom_orphan;
2491 	}
2492 	cp = g_new_consumer(gp);
2493 	g_attach(cp, pp);
2494 	/*
2495 	 * XXX: Everytime you think you can improve the margin for
2496 	 * footshooting, somebody depends on the ability to do so:
2497 	 * savecore(8) wants to write to our swapdev so we cannot
2498 	 * set an exclusive count :-(
2499 	 */
2500 	error = g_access_rel(cp, 1, 1, 0);
2501 	if (error) {
2502 		g_detach(cp);
2503 		g_destroy_consumer(cp);
2504 		swh->error = error;
2505 		return;
2506 	}
2507 	nblks = pp->mediasize / DEV_BSIZE;
2508 	swaponsomething(swh->vp, cp, nblks, swapgeom_strategy,
2509 	    swapgeom_close, dev2udev(swh->dev));
2510 	swh->error = 0;
2511 	return;
2512 }
2513 
2514 static int
2515 swapongeom(struct thread *td, struct vnode *vp)
2516 {
2517 	int error;
2518 	struct swh0h0 swh;
2519 
2520 	vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
2521 
2522 	swh.dev = vp->v_rdev;
2523 	swh.vp = vp;
2524 	swh.error = 0;
2525 	/* XXX: direct call when Giant untangled */
2526 	error = g_waitfor_event(swapongeom_ev, &swh, M_WAITOK, NULL);
2527 	if (!error)
2528 		error = swh.error;
2529 	VOP_UNLOCK(vp, 0, td);
2530 	return (error);
2531 }
2532 
2533 /*
2534  * VNODE backend
2535  *
2536  * This is used mainly for network filesystem (read: probably only tested
2537  * with NFS) swapfiles.
2538  *
2539  */
2540 
2541 static void
2542 swapdev_strategy(struct buf *bp, struct swdevt *sp)
2543 {
2544 	int s;
2545 	struct vnode *vp, *vp2;
2546 
2547 	bp->b_dev = NODEV;
2548 	bp->b_blkno = ctodb(bp->b_blkno - sp->sw_first);
2549 
2550 	vp2 = sp->sw_id;
2551 	vhold(vp2);
2552 	s = splvm();
2553 	if (bp->b_iocmd == BIO_WRITE) {
2554 		vp = bp->b_vp;
2555 		if (vp) {
2556 			VI_LOCK(vp);
2557 			vp->v_numoutput--;
2558 			if ((vp->v_iflag & VI_BWAIT) && vp->v_numoutput <= 0) {
2559 				vp->v_iflag &= ~VI_BWAIT;
2560 				wakeup(&vp->v_numoutput);
2561 			}
2562 			VI_UNLOCK(vp);
2563 		}
2564 		VI_LOCK(vp2);
2565 		vp2->v_numoutput++;
2566 		VI_UNLOCK(vp2);
2567 	}
2568 	bp->b_vp = vp2;
2569 	splx(s);
2570 	bp->b_iooffset = dbtob(bp->b_blkno);
2571 	VOP_STRATEGY(vp2, bp);
2572 	return;
2573 }
2574 
2575 static void
2576 swapdev_close(struct thread *td, struct swdevt *sp)
2577 {
2578 
2579 	VOP_CLOSE(sp->sw_vp, FREAD | FWRITE, td->td_ucred, td);
2580 	vrele(sp->sw_vp);
2581 }
2582 
2583 
2584 static int
2585 swaponvp(struct thread *td, struct vnode *vp, u_long nblks)
2586 {
2587 	struct swdevt *sp;
2588 	int error;
2589 
2590 	if (nblks == 0)
2591 		return (ENXIO);
2592 	mtx_lock(&sw_dev_mtx);
2593 	TAILQ_FOREACH(sp, &swtailq, sw_list) {
2594 		if (sp->sw_id == vp) {
2595 			mtx_unlock(&sw_dev_mtx);
2596 			return (EBUSY);
2597 		}
2598 	}
2599 	mtx_unlock(&sw_dev_mtx);
2600 
2601 	(void) vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
2602 #ifdef MAC
2603 	error = mac_check_system_swapon(td->td_ucred, vp);
2604 	if (error == 0)
2605 #endif
2606 		error = VOP_OPEN(vp, FREAD | FWRITE, td->td_ucred, td, -1);
2607 	(void) VOP_UNLOCK(vp, 0, td);
2608 	if (error)
2609 		return (error);
2610 
2611 	swaponsomething(vp, vp, nblks, swapdev_strategy, swapdev_close,
2612 	    NOUDEV);
2613 	return (0);
2614 }
2615