xref: /freebsd/sys/vm/swap_pager.c (revision a1a4f1a0d87b594d3f17a97dc0127eec1417e6f6)
1 /*
2  * Copyright (c) 1998 Matthew Dillon,
3  * Copyright (c) 1994 John S. Dyson
4  * Copyright (c) 1990 University of Utah.
5  * Copyright (c) 1991, 1993
6  *	The Regents of the University of California.  All rights reserved.
7  *
8  * This code is derived from software contributed to Berkeley by
9  * the Systems Programming Group of the University of Utah Computer
10  * Science Department.
11  *
12  * Redistribution and use in source and binary forms, with or without
13  * modification, are permitted provided that the following conditions
14  * are met:
15  * 1. Redistributions of source code must retain the above copyright
16  *    notice, this list of conditions and the following disclaimer.
17  * 2. Redistributions in binary form must reproduce the above copyright
18  *    notice, this list of conditions and the following disclaimer in the
19  *    documentation and/or other materials provided with the distribution.
20  * 3. All advertising materials mentioning features or use of this software
21  *    must display the following acknowledgement:
22  *	This product includes software developed by the University of
23  *	California, Berkeley and its contributors.
24  * 4. Neither the name of the University nor the names of its contributors
25  *    may be used to endorse or promote products derived from this software
26  *    without specific prior written permission.
27  *
28  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
29  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
30  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
31  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
32  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
33  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
34  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
35  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
36  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
37  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
38  * SUCH DAMAGE.
39  *
40  *				New Swap System
41  *				Matthew Dillon
42  *
43  * Radix Bitmap 'blists'.
44  *
45  *	- The new swapper uses the new radix bitmap code.  This should scale
46  *	  to arbitrarily small or arbitrarily large swap spaces and an almost
47  *	  arbitrary degree of fragmentation.
48  *
49  * Features:
50  *
51  *	- on the fly reallocation of swap during putpages.  The new system
52  *	  does not try to keep previously allocated swap blocks for dirty
53  *	  pages.
54  *
55  *	- on the fly deallocation of swap
56  *
57  *	- No more garbage collection required.  Unnecessarily allocated swap
58  *	  blocks only exist for dirty vm_page_t's now and these are already
59  *	  cycled (in a high-load system) by the pager.  We also do on-the-fly
60  *	  removal of invalidated swap blocks when a page is destroyed
61  *	  or renamed.
62  *
63  * from: Utah $Hdr: swap_pager.c 1.4 91/04/30$
64  *
65  *	@(#)swap_pager.c	8.9 (Berkeley) 3/21/94
66  *
67  * $FreeBSD$
68  */
69 
70 #include <sys/param.h>
71 #include <sys/systm.h>
72 #include <sys/conf.h>
73 #include <sys/kernel.h>
74 #include <sys/proc.h>
75 #include <sys/buf.h>
76 #include <sys/vnode.h>
77 #include <sys/malloc.h>
78 #include <sys/vmmeter.h>
79 #include <sys/sysctl.h>
80 #include <sys/blist.h>
81 #include <sys/lock.h>
82 
83 #ifndef MAX_PAGEOUT_CLUSTER
84 #define MAX_PAGEOUT_CLUSTER 16
85 #endif
86 
87 #define SWB_NPAGES	MAX_PAGEOUT_CLUSTER
88 
89 #include "opt_swap.h"
90 #include <vm/vm.h>
91 #include <vm/vm_prot.h>
92 #include <vm/vm_object.h>
93 #include <vm/vm_page.h>
94 #include <vm/vm_pager.h>
95 #include <vm/vm_pageout.h>
96 #include <vm/swap_pager.h>
97 #include <vm/vm_extern.h>
98 #include <vm/vm_zone.h>
99 
100 #define SWM_FREE	0x02	/* free, period			*/
101 #define SWM_POP		0x04	/* pop out			*/
102 
103 /*
104  * vm_swap_size is in page-sized chunks now.  It was DEV_BSIZE'd chunks
105  * in the old system.
106  */
107 
108 extern int vm_swap_size;	/* number of free swap blocks, in pages */
109 
110 int swap_pager_full;		/* swap space exhaustion (task killing) */
111 static int swap_pager_almost_full; /* swap space exhaustion (w/ hysteresis)*/
112 static int nsw_rcount;		/* free read buffers			*/
113 static int nsw_wcount_sync;	/* limit write buffers / synchronous	*/
114 static int nsw_wcount_async;	/* limit write buffers / asynchronous	*/
115 static int nsw_wcount_async_max;/* assigned maximum			*/
116 static int nsw_cluster_max;	/* maximum VOP I/O allowed		*/
117 static int sw_alloc_interlock;	/* swap pager allocation interlock	*/
118 
119 struct blist *swapblist;
120 static struct swblock **swhash;
121 static int swhash_mask;
122 static int swap_async_max = 4;	/* maximum in-progress async I/O's	*/
123 
124 SYSCTL_INT(_vm, OID_AUTO, swap_async_max,
125         CTLFLAG_RW, &swap_async_max, 0, "Maximum running async swap ops");
126 
127 /*
128  * "named" and "unnamed" anon region objects.  Try to reduce the overhead
129  * of searching a named list by hashing it just a little.
130  */
131 
132 #define NOBJLISTS		8
133 
134 #define NOBJLIST(handle)	\
135 	(&swap_pager_object_list[((int)(intptr_t)handle >> 4) & (NOBJLISTS-1)])
136 
137 static struct pagerlst	swap_pager_object_list[NOBJLISTS];
138 struct pagerlst		swap_pager_un_object_list;
139 vm_zone_t		swap_zone;
140 
141 /*
142  * pagerops for OBJT_SWAP - "swap pager".  Some ops are also global procedure
143  * calls hooked from other parts of the VM system and do not appear here.
144  * (see vm/swap_pager.h).
145  */
146 
147 static vm_object_t
148 		swap_pager_alloc __P((void *handle, vm_ooffset_t size,
149 				      vm_prot_t prot, vm_ooffset_t offset));
150 static void	swap_pager_dealloc __P((vm_object_t object));
151 static int	swap_pager_getpages __P((vm_object_t, vm_page_t *, int, int));
152 static void	swap_pager_init __P((void));
153 static void	swap_pager_unswapped __P((vm_page_t));
154 static void	swap_pager_strategy __P((vm_object_t, struct buf *));
155 
156 struct pagerops swappagerops = {
157 	swap_pager_init,	/* early system initialization of pager	*/
158 	swap_pager_alloc,	/* allocate an OBJT_SWAP object		*/
159 	swap_pager_dealloc,	/* deallocate an OBJT_SWAP object	*/
160 	swap_pager_getpages,	/* pagein				*/
161 	swap_pager_putpages,	/* pageout				*/
162 	swap_pager_haspage,	/* get backing store status for page	*/
163 	swap_pager_unswapped,	/* remove swap related to page		*/
164 	swap_pager_strategy	/* pager strategy call			*/
165 };
166 
167 /*
168  * dmmax is in page-sized chunks with the new swap system.  It was
169  * dev-bsized chunks in the old.
170  *
171  * swap_*() routines are externally accessible.  swp_*() routines are
172  * internal.
173  */
174 
175 int dmmax;
176 static int dmmax_mask;
177 int nswap_lowat = 128;		/* in pages, swap_pager_almost_full warn */
178 int nswap_hiwat = 512;		/* in pages, swap_pager_almost_full warn */
179 
180 static __inline void	swp_sizecheck __P((void));
181 static void	swp_pager_sync_iodone __P((struct buf *bp));
182 static void	swp_pager_async_iodone __P((struct buf *bp));
183 
184 /*
185  * Swap bitmap functions
186  */
187 
188 static __inline void	swp_pager_freeswapspace __P((daddr_t blk, int npages));
189 static __inline daddr_t	swp_pager_getswapspace __P((int npages));
190 
191 /*
192  * Metadata functions
193  */
194 
195 static void swp_pager_meta_build __P((vm_object_t, daddr_t, daddr_t, int));
196 static void swp_pager_meta_free __P((vm_object_t, daddr_t, daddr_t));
197 static void swp_pager_meta_free_all __P((vm_object_t));
198 static daddr_t swp_pager_meta_ctl __P((vm_object_t, vm_pindex_t, int));
199 
200 /*
201  * SWP_SIZECHECK() -	update swap_pager_full indication
202  *
203  *	update the swap_pager_almost_full indication and warn when we are
204  *	about to run out of swap space, using lowat/hiwat hysteresis.
205  *
206  *	Clear swap_pager_full ( task killing ) indication when lowat is met.
207  *
208  *	No restrictions on call
209  *	This routine may not block.
210  *	This routine must be called at splvm()
211  */
212 
213 static __inline void
214 swp_sizecheck()
215 {
216 	if (vm_swap_size < nswap_lowat) {
217 		if (swap_pager_almost_full == 0) {
218 			printf("swap_pager: out of swap space\n");
219 			swap_pager_almost_full = 1;
220 		}
221 	} else {
222 		swap_pager_full = 0;
223 		if (vm_swap_size > nswap_hiwat)
224 			swap_pager_almost_full = 0;
225 	}
226 }
227 
228 /*
229  * SWAP_PAGER_INIT() -	initialize the swap pager!
230  *
231  *	Expected to be started from system init.  NOTE:  This code is run
232  *	before much else so be careful what you depend on.  Most of the VM
233  *	system has yet to be initialized at this point.
234  */
235 
236 static void
237 swap_pager_init()
238 {
239 	/*
240 	 * Initialize object lists
241 	 */
242 	int i;
243 
244 	for (i = 0; i < NOBJLISTS; ++i)
245 		TAILQ_INIT(&swap_pager_object_list[i]);
246 	TAILQ_INIT(&swap_pager_un_object_list);
247 
248 	/*
249 	 * Device Stripe, in PAGE_SIZE'd blocks
250 	 */
251 
252 	dmmax = SWB_NPAGES * 2;
253 	dmmax_mask = ~(dmmax - 1);
254 }
255 
256 /*
257  * SWAP_PAGER_SWAP_INIT() - swap pager initialization from pageout process
258  *
259  *	Expected to be started from pageout process once, prior to entering
260  *	its main loop.
261  */
262 
263 void
264 swap_pager_swap_init()
265 {
266 	int n;
267 
268 	/*
269 	 * Number of in-transit swap bp operations.  Don't
270 	 * exhaust the pbufs completely.  Make sure we
271 	 * initialize workable values (0 will work for hysteresis
272 	 * but it isn't very efficient).
273 	 *
274 	 * The nsw_cluster_max is constrained by the bp->b_pages[]
275 	 * array (MAXPHYS/PAGE_SIZE) and our locally defined
276 	 * MAX_PAGEOUT_CLUSTER.   Also be aware that swap ops are
277 	 * constrained by the swap device interleave stripe size.
278 	 *
279 	 * Currently we hardwire nsw_wcount_async to 4.  This limit is
280 	 * designed to prevent other I/O from having high latencies due to
281 	 * our pageout I/O.  The value 4 works well for one or two active swap
282 	 * devices but is probably a little low if you have more.  Even so,
283 	 * a higher value would probably generate only a limited improvement
284 	 * with three or four active swap devices since the system does not
285 	 * typically have to pageout at extreme bandwidths.   We will want
286 	 * at least 2 per swap devices, and 4 is a pretty good value if you
287 	 * have one NFS swap device due to the command/ack latency over NFS.
288 	 * So it all works out pretty well.
289 	 */
290 
291 	nsw_cluster_max = min((MAXPHYS/PAGE_SIZE), MAX_PAGEOUT_CLUSTER);
292 
293 	nsw_rcount = (nswbuf + 1) / 2;
294 	nsw_wcount_sync = (nswbuf + 3) / 4;
295 	nsw_wcount_async = 4;
296 	nsw_wcount_async_max = nsw_wcount_async;
297 
298 	/*
299 	 * Initialize our zone.  Right now I'm just guessing on the number
300 	 * we need based on the number of pages in the system.  Each swblock
301 	 * can hold 16 pages, so this is probably overkill.
302 	 */
303 
304 	n = cnt.v_page_count * 2;
305 
306 	swap_zone = zinit(
307 	    "SWAPMETA",
308 	    sizeof(struct swblock),
309 	    n,
310 	    ZONE_INTERRUPT,
311 	    1
312 	);
313 
314 	/*
315 	 * Initialize our meta-data hash table.  The swapper does not need to
316 	 * be quite as efficient as the VM system, so we do not use an
317 	 * oversized hash table.
318 	 *
319 	 * 	n: 		size of hash table, must be power of 2
320 	 *	swhash_mask:	hash table index mask
321 	 */
322 
323 	for (n = 1; n < cnt.v_page_count / 4; n <<= 1)
324 		;
325 
326 	swhash = malloc(sizeof(struct swblock *) * n, M_VMPGDATA, M_WAITOK);
327 	bzero(swhash, sizeof(struct swblock *) * n);
328 
329 	swhash_mask = n - 1;
330 }
331 
332 /*
333  * SWAP_PAGER_ALLOC() -	allocate a new OBJT_SWAP VM object and instantiate
334  *			its metadata structures.
335  *
336  *	This routine is called from the mmap and fork code to create a new
337  *	OBJT_SWAP object.  We do this by creating an OBJT_DEFAULT object
338  *	and then converting it with swp_pager_meta_build().
339  *
340  *	This routine may block in vm_object_allocate() and create a named
341  *	object lookup race, so we must interlock.   We must also run at
342  *	splvm() for the object lookup to handle races with interrupts, but
343  *	we do not have to maintain splvm() in between the lookup and the
344  *	add because (I believe) it is not possible to attempt to create
345  *	a new swap object w/handle when a default object with that handle
346  *	already exists.
347  */
348 
349 static vm_object_t
350 swap_pager_alloc(void *handle, vm_ooffset_t size, vm_prot_t prot,
351 		 vm_ooffset_t offset)
352 {
353 	vm_object_t object;
354 
355 	if (handle) {
356 		/*
357 		 * Reference existing named region or allocate new one.  There
358 		 * should not be a race here against swp_pager_meta_build()
359 		 * as called from vm_page_remove() in regards to the lookup
360 		 * of the handle.
361 		 */
362 
363 		while (sw_alloc_interlock) {
364 			sw_alloc_interlock = -1;
365 			tsleep(&sw_alloc_interlock, PVM, "swpalc", 0);
366 		}
367 		sw_alloc_interlock = 1;
368 
369 		object = vm_pager_object_lookup(NOBJLIST(handle), handle);
370 
371 		if (object != NULL) {
372 			vm_object_reference(object);
373 		} else {
374 			object = vm_object_allocate(OBJT_DEFAULT,
375 				OFF_TO_IDX(offset + PAGE_MASK + size));
376 			object->handle = handle;
377 
378 			swp_pager_meta_build(
379 			    object,
380 			    0,
381 			    SWAPBLK_NONE,
382 			    0
383 			);
384 		}
385 
386 		if (sw_alloc_interlock < 0)
387 			wakeup(&sw_alloc_interlock);
388 
389 		sw_alloc_interlock = 0;
390 	} else {
391 		object = vm_object_allocate(OBJT_DEFAULT,
392 			OFF_TO_IDX(offset + PAGE_MASK + size));
393 
394 		swp_pager_meta_build(
395 		    object,
396 		    0,
397 		    SWAPBLK_NONE,
398 		    0
399 		);
400 	}
401 
402 	return (object);
403 }
404 
405 /*
406  * SWAP_PAGER_DEALLOC() -	remove swap metadata from object
407  *
408  *	The swap backing for the object is destroyed.  The code is
409  *	designed such that we can reinstantiate it later, but this
410  *	routine is typically called only when the entire object is
411  *	about to be destroyed.
412  *
413  *	This routine may block, but no longer does.
414  *
415  *	The object must be locked or unreferenceable.
416  */
417 
418 static void
419 swap_pager_dealloc(object)
420 	vm_object_t object;
421 {
422 	/*
423 	 * Remove from list right away so lookups will fail if we block for
424 	 * pageout completion.
425 	 */
426 
427 	if (object->handle == NULL) {
428 		TAILQ_REMOVE(&swap_pager_un_object_list, object, pager_object_list);
429 	} else {
430 		TAILQ_REMOVE(NOBJLIST(object->handle), object, pager_object_list);
431 	}
432 
433 	vm_object_pip_wait(object, "swpdea");
434 
435 	/*
436 	 * Free all remaining metadata.  We only bother to free it from
437 	 * the swap meta data.  We do not attempt to free swapblk's still
438 	 * associated with vm_page_t's for this object.  We do not care
439 	 * if paging is still in progress on some objects.
440 	 */
441 
442 	swp_pager_meta_free_all(object);
443 }
444 
445 /************************************************************************
446  *			SWAP PAGER BITMAP ROUTINES			*
447  ************************************************************************/
448 
449 /*
450  * SWP_PAGER_GETSWAPSPACE() -	allocate raw swap space
451  *
452  *	Allocate swap for the requested number of pages.  The starting
453  *	swap block number (a page index) is returned or SWAPBLK_NONE
454  *	if the allocation failed.
455  *
456  *	Also has the side effect of advising that somebody made a mistake
457  *	when they configured swap and didn't configure enough.
458  *
459  *	Must be called at splvm() to avoid races with bitmap frees from
460  *	vm_page_remove() aka swap_pager_page_removed().
461  *
462  *	This routine may not block
463  *	This routine must be called at splvm().
464  */
465 
466 static __inline daddr_t
467 swp_pager_getswapspace(npages)
468 	int npages;
469 {
470 	daddr_t blk;
471 
472 	if ((blk = blist_alloc(swapblist, npages)) == SWAPBLK_NONE) {
473 		if (swap_pager_full != 2) {
474 			printf("swap_pager_getswapspace: failed\n");
475 			swap_pager_full = 2;
476 			swap_pager_almost_full = 1;
477 		}
478 	} else {
479 		vm_swap_size -= npages;
480 		swp_sizecheck();
481 	}
482 	return(blk);
483 }
484 
485 /*
486  * SWP_PAGER_FREESWAPSPACE() -	free raw swap space
487  *
488  *	This routine returns the specified swap blocks back to the bitmap.
489  *
490  *	Note:  This routine may not block (it could in the old swap code),
491  *	and through the use of the new blist routines it does not block.
492  *
493  *	We must be called at splvm() to avoid races with bitmap frees from
494  *	vm_page_remove() aka swap_pager_page_removed().
495  *
496  *	This routine may not block
497  *	This routine must be called at splvm().
498  */
499 
500 static __inline void
501 swp_pager_freeswapspace(blk, npages)
502 	daddr_t blk;
503 	int npages;
504 {
505 	blist_free(swapblist, blk, npages);
506 	vm_swap_size += npages;
507 	swp_sizecheck();
508 }
509 
510 /*
511  * SWAP_PAGER_FREESPACE() -	frees swap blocks associated with a page
512  *				range within an object.
513  *
514  *	This is a globally accessible routine.
515  *
516  *	This routine removes swapblk assignments from swap metadata.
517  *
518  *	The external callers of this routine typically have already destroyed
519  *	or renamed vm_page_t's associated with this range in the object so
520  *	we should be ok.
521  */
522 
523 void
524 swap_pager_freespace(object, start, size)
525 	vm_object_t object;
526 	vm_pindex_t start;
527 	vm_size_t size;
528 {
529 	swp_pager_meta_free(object, start, size);
530 }
531 
532 /*
533  * SWAP_PAGER_COPY() -  copy blocks from source pager to destination pager
534  *			and destroy the source.
535  *
536  *	Copy any valid swapblks from the source to the destination.  In
537  *	cases where both the source and destination have a valid swapblk,
538  *	we keep the destination's.
539  *
540  *	This routine is allowed to block.  It may block allocating metadata
541  *	indirectly through swp_pager_meta_build() or if paging is still in
542  *	progress on the source.
543  *
544  *	XXX vm_page_collapse() kinda expects us not to block because we
545  *	supposedly do not need to allocate memory, but for the moment we
546  *	*may* have to get a little memory from the zone allocator, but
547  *	it is taken from the interrupt memory.  We should be ok.
548  *
549  *	The source object contains no vm_page_t's (which is just as well)
550  *
551  *	The source object is of type OBJT_SWAP.
552  *
553  *	The source and destination objects must be
554  *	locked or inaccessible (XXX are they ?)
555  */
556 
557 void
558 swap_pager_copy(srcobject, dstobject, offset, destroysource)
559 	vm_object_t srcobject;
560 	vm_object_t dstobject;
561 	vm_pindex_t offset;
562 	int destroysource;
563 {
564 	vm_pindex_t i;
565 
566 	/*
567 	 * If destroysource is set, we remove the source object from the
568 	 * swap_pager internal queue now.
569 	 */
570 
571 	if (destroysource) {
572 		if (srcobject->handle == NULL) {
573 			TAILQ_REMOVE(
574 			    &swap_pager_un_object_list,
575 			    srcobject,
576 			    pager_object_list
577 			);
578 		} else {
579 			TAILQ_REMOVE(
580 			    NOBJLIST(srcobject->handle),
581 			    srcobject,
582 			    pager_object_list
583 			);
584 		}
585 	}
586 
587 	/*
588 	 * transfer source to destination.
589 	 */
590 
591 	for (i = 0; i < dstobject->size; ++i) {
592 		daddr_t dstaddr;
593 
594 		/*
595 		 * Locate (without changing) the swapblk on the destination,
596 		 * unless it is invalid in which case free it silently, or
597 		 * if the destination is a resident page, in which case the
598 		 * source is thrown away.
599 		 */
600 
601 		dstaddr = swp_pager_meta_ctl(dstobject, i, 0);
602 
603 		if (dstaddr == SWAPBLK_NONE) {
604 			/*
605 			 * Destination has no swapblk and is not resident,
606 			 * copy source.
607 			 */
608 			daddr_t srcaddr;
609 
610 			srcaddr = swp_pager_meta_ctl(
611 			    srcobject,
612 			    i + offset,
613 			    SWM_POP
614 			);
615 
616 			if (srcaddr != SWAPBLK_NONE)
617 				swp_pager_meta_build(dstobject, i, srcaddr, 1);
618 		} else {
619 			/*
620 			 * Destination has valid swapblk or it is represented
621 			 * by a resident page.  We destroy the sourceblock.
622 			 */
623 
624 			swp_pager_meta_ctl(srcobject, i + offset, SWM_FREE);
625 		}
626 	}
627 
628 	/*
629 	 * Free left over swap blocks in source.
630 	 *
631 	 * We have to revert the type to OBJT_DEFAULT so we do not accidently
632 	 * double-remove the object from the swap queues.
633 	 */
634 
635 	if (destroysource) {
636 		swp_pager_meta_free_all(srcobject);
637 		/*
638 		 * Reverting the type is not necessary, the caller is going
639 		 * to destroy srcobject directly, but I'm doing it here
640 		 * for consistancy since we've removed the object from its
641 		 * queues.
642 		 */
643 		srcobject->type = OBJT_DEFAULT;
644 	}
645 	return;
646 }
647 
648 /*
649  * SWAP_PAGER_HASPAGE() -	determine if we have good backing store for
650  *				the requested page.
651  *
652  *	We determine whether good backing store exists for the requested
653  *	page and return TRUE if it does, FALSE if it doesn't.
654  *
655  *	If TRUE, we also try to determine how much valid, contiguous backing
656  *	store exists before and after the requested page within a reasonable
657  *	distance.  We do not try to restrict it to the swap device stripe
658  *	(that is handled in getpages/putpages).  It probably isn't worth
659  *	doing here.
660  */
661 
662 boolean_t
663 swap_pager_haspage(object, pindex, before, after)
664 	vm_object_t object;
665 	vm_pindex_t pindex;
666 	int *before;
667 	int *after;
668 {
669 	daddr_t blk0;
670 
671 	/*
672 	 * do we have good backing store at the requested index ?
673 	 */
674 
675 	blk0 = swp_pager_meta_ctl(object, pindex, 0);
676 
677 	if (blk0 & SWAPBLK_NONE) {
678 		if (before)
679 			*before = 0;
680 		if (after)
681 			*after = 0;
682 		return (FALSE);
683 	}
684 
685 	/*
686 	 * find backwards-looking contiguous good backing store
687 	 */
688 
689 	if (before != NULL) {
690 		int i;
691 
692 		for (i = 1; i < (SWB_NPAGES/2); ++i) {
693 			daddr_t blk;
694 
695 			if (i > pindex)
696 				break;
697 			blk = swp_pager_meta_ctl(object, pindex - i, 0);
698 			if (blk & SWAPBLK_NONE)
699 				break;
700 			if (blk != blk0 - i)
701 				break;
702 		}
703 		*before = (i - 1);
704 	}
705 
706 	/*
707 	 * find forward-looking contiguous good backing store
708 	 */
709 
710 	if (after != NULL) {
711 		int i;
712 
713 		for (i = 1; i < (SWB_NPAGES/2); ++i) {
714 			daddr_t blk;
715 
716 			blk = swp_pager_meta_ctl(object, pindex + i, 0);
717 			if (blk & SWAPBLK_NONE)
718 				break;
719 			if (blk != blk0 + i)
720 				break;
721 		}
722 		*after = (i - 1);
723 	}
724 
725 	return (TRUE);
726 }
727 
728 /*
729  * SWAP_PAGER_PAGE_UNSWAPPED() - remove swap backing store related to page
730  *
731  *	This removes any associated swap backing store, whether valid or
732  *	not, from the page.
733  *
734  *	This routine is typically called when a page is made dirty, at
735  *	which point any associated swap can be freed.  MADV_FREE also
736  *	calls us in a special-case situation
737  *
738  *	NOTE!!!  If the page is clean and the swap was valid, the caller
739  *	should make the page dirty before calling this routine.  This routine
740  *	does NOT change the m->dirty status of the page.  Also: MADV_FREE
741  *	depends on it.
742  *
743  *	This routine may not block
744  */
745 
746 static void
747 swap_pager_unswapped(m)
748 	vm_page_t m;
749 {
750 	swp_pager_meta_ctl(m->object, m->pindex, SWM_FREE);
751 }
752 
753 /*
754  * SWAP_PAGER_STRATEGY() - read, write, free blocks
755  *
756  *	This implements the vm_pager_strategy() interface to swap and allows
757  *	other parts of the system to directly access swap as backing store
758  *	through vm_objects of type OBJT_SWAP.  This is intended to be a
759  *	cacheless interface ( i.e. caching occurs at higher levels ).
760  *	Therefore we do not maintain any resident pages.  All I/O goes
761  *	directly from and to the swap device.
762  *
763  *	Note that b_blkno is scaled for PAGE_SIZE
764  *
765  *	We currently attempt to run I/O synchronously or asynchronously as
766  *	the caller requests.  This isn't perfect because we loose error
767  *	sequencing when we run multiple ops in parallel to satisfy a request.
768  *	But this is swap, so we let it all hang out.
769  */
770 
771 static void
772 swap_pager_strategy(vm_object_t object, struct buf *bp)
773 {
774 	vm_pindex_t start;
775 	int count;
776 	char *data;
777 	struct buf *nbp = NULL;
778 
779 	if (bp->b_bcount & PAGE_MASK) {
780 		bp->b_error = EINVAL;
781 		bp->b_flags |= B_ERROR | B_INVAL;
782 		biodone(bp);
783 		printf("swap_pager_strategy: bp %p b_vp %p blk %d size %d, not page bounded\n", bp, bp->b_vp, (int)bp->b_pblkno, (int)bp->b_bcount);
784 		return;
785 	}
786 
787 	/*
788 	 * Clear error indication, initialize page index, count, data pointer.
789 	 */
790 
791 	bp->b_error = 0;
792 	bp->b_flags &= ~B_ERROR;
793 	bp->b_resid = bp->b_bcount;
794 
795 	start = bp->b_pblkno;
796 	count = howmany(bp->b_bcount, PAGE_SIZE);
797 	data = bp->b_data;
798 
799 	/*
800 	 * Execute strategy function
801 	 */
802 
803 	if (bp->b_flags & B_FREEBUF) {
804 		/*
805 		 * FREE PAGE(s) - destroy underlying swap that is no longer
806 		 *		  needed.
807 		 */
808 		int s;
809 
810 		s = splvm();
811 		swp_pager_meta_free(object, start, count);
812 		splx(s);
813 		bp->b_resid = 0;
814 	} else if (bp->b_flags & B_READ) {
815 		/*
816 		 * READ FROM SWAP - read directly from swap backing store,
817 		 *		    zero-fill as appropriate.
818 		 *
819 		 *	Note: the count == 0 case is beyond the end of the
820 		 *	buffer.  This is a special case to close out any
821 		 *	left over nbp.
822 		 */
823 
824 		while (count > 0) {
825 			daddr_t blk;
826 			int s;
827 
828 			s = splvm();
829 			blk = swp_pager_meta_ctl(object, start, 0);
830 			splx(s);
831 
832 			/*
833 			 * Do we have to flush our current collection?
834 			 */
835 
836 			if (
837 			    nbp && (
838 			     (blk & SWAPBLK_NONE) ||
839 			     nbp->b_blkno + btoc(nbp->b_bcount) != blk
840 			    )
841 			) {
842 				++cnt.v_swapin;
843 				cnt.v_swappgsin += btoc(nbp->b_bcount);
844 				flushchainbuf(nbp);
845 				nbp = NULL;
846 			}
847 
848 			/*
849 			 * Add to collection
850 			 */
851 			if (blk & SWAPBLK_NONE) {
852 				s = splbio();
853 				bp->b_resid -= PAGE_SIZE;
854 				splx(s);
855 				bzero(data, PAGE_SIZE);
856 			} else {
857 				if (nbp == NULL) {
858 					nbp = getchainbuf(bp, swapdev_vp, B_READ|B_ASYNC);
859 					nbp->b_blkno = blk;
860 					nbp->b_data = data;
861 				}
862 				nbp->b_bcount += PAGE_SIZE;
863 			}
864 			--count;
865 			++start;
866 			data += PAGE_SIZE;
867 		}
868 	} else {
869 		/*
870 		 * WRITE TO SWAP - [re]allocate swap and write.
871 		 */
872 		while (count > 0) {
873 			int i;
874 			int s;
875 			int n;
876 			daddr_t blk;
877 
878 			n = min(count, BLIST_MAX_ALLOC);
879 			n = min(n, nsw_cluster_max);
880 
881 			s = splvm();
882 			for (;;) {
883 				blk = swp_pager_getswapspace(n);
884 				if (blk != SWAPBLK_NONE)
885 					break;
886 				n >>= 1;
887 				if (n == 0)
888 					break;
889 			}
890 			if (n == 0) {
891 				bp->b_error = ENOMEM;
892 				bp->b_flags |= B_ERROR;
893 				splx(s);
894 				break;
895 			}
896 
897 			/*
898 			 * Oops, too big if it crosses a stripe
899 			 *
900 			 * 1111000000
901 			 *     111111
902 			 *    1000001
903 			 */
904 			if ((blk ^ (blk + n)) & dmmax_mask) {
905 				int j = ((blk + dmmax) & dmmax_mask) - blk;
906 				swp_pager_freeswapspace(blk + j, n - j);
907 				n = j;
908 			}
909 
910 			swp_pager_meta_free(object, start, n);
911 
912 			splx(s);
913 
914 			if (nbp) {
915 				++cnt.v_swapout;
916 				cnt.v_swappgsout += btoc(nbp->b_bcount);
917 				flushchainbuf(nbp);
918 			}
919 
920 			nbp = getchainbuf(bp, swapdev_vp, B_ASYNC);
921 
922 			nbp->b_blkno = blk;
923 			nbp->b_data = data;
924 			nbp->b_bcount = PAGE_SIZE * n;
925 
926 			/*
927 			 * Must set dirty range for NFS to work.  dirtybeg &
928 			 * off are already 0.
929 			 */
930 			nbp->b_dirtyend = nbp->b_bcount;
931 
932 			++cnt.v_swapout;
933 			cnt.v_swappgsout += n;
934 
935 			s = splbio();
936 			for (i = 0; i < n; ++i) {
937 				swp_pager_meta_build(
938 				    object,
939 				    start + i,
940 				    blk + i,
941 				    1
942 				);
943 			}
944 			splx(s);
945 
946 			count -= n;
947 			start += n;
948 			data += PAGE_SIZE * n;
949 		}
950 	}
951 
952 	/*
953 	 * Cleanup.  Commit last nbp either async or sync, and either
954 	 * wait for it synchronously or make it auto-biodone itself and
955 	 * the parent bp.
956 	 */
957 
958 	if (nbp) {
959 		if ((bp->b_flags & B_ASYNC) == 0)
960 			nbp->b_flags &= ~B_ASYNC;
961 		if (nbp->b_flags & B_READ) {
962 			++cnt.v_swapin;
963 			cnt.v_swappgsin += btoc(nbp->b_bcount);
964 		} else {
965 			++cnt.v_swapout;
966 			cnt.v_swappgsout += btoc(nbp->b_bcount);
967 		}
968 		flushchainbuf(nbp);
969 	}
970 	if (bp->b_flags & B_ASYNC) {
971 		autochaindone(bp);
972 	} else {
973 		waitchainbuf(bp, 0, 1);
974 	}
975 }
976 
977 /*
978  * SWAP_PAGER_GETPAGES() - bring pages in from swap
979  *
980  *	Attempt to retrieve (m, count) pages from backing store, but make
981  *	sure we retrieve at least m[reqpage].  We try to load in as large
982  *	a chunk surrounding m[reqpage] as is contiguous in swap and which
983  *	belongs to the same object.
984  *
985  *	The code is designed for asynchronous operation and
986  *	immediate-notification of 'reqpage' but tends not to be
987  *	used that way.  Please do not optimize-out this algorithmic
988  *	feature, I intend to improve on it in the future.
989  *
990  *	The parent has a single vm_object_pip_add() reference prior to
991  *	calling us and we should return with the same.
992  *
993  *	The parent has BUSY'd the pages.  We should return with 'm'
994  *	left busy, but the others adjusted.
995  */
996 
997 static int
998 swap_pager_getpages(object, m, count, reqpage)
999 	vm_object_t object;
1000 	vm_page_t *m;
1001 	int count, reqpage;
1002 {
1003 	struct buf *bp;
1004 	vm_page_t mreq;
1005 	int s;
1006 	int i;
1007 	int j;
1008 	daddr_t blk;
1009 	vm_offset_t kva;
1010 	vm_pindex_t lastpindex;
1011 
1012 	mreq = m[reqpage];
1013 
1014 #if !defined(MAX_PERF)
1015 	if (mreq->object != object) {
1016 		panic("swap_pager_getpages: object mismatch %p/%p",
1017 		    object,
1018 		    mreq->object
1019 		);
1020 	}
1021 #endif
1022 	/*
1023 	 * Calculate range to retrieve.  The pages have already been assigned
1024 	 * their swapblks.  We require a *contiguous* range that falls entirely
1025 	 * within a single device stripe.   If we do not supply it, bad things
1026 	 * happen.
1027 	 */
1028 
1029 
1030 	blk = swp_pager_meta_ctl(mreq->object, mreq->pindex, 0);
1031 
1032 	for (i = reqpage - 1; i >= 0; --i) {
1033 		daddr_t iblk;
1034 
1035 		iblk = swp_pager_meta_ctl(m[i]->object, m[i]->pindex, 0);
1036 		if (iblk & SWAPBLK_NONE)
1037 			break;
1038 
1039 		if ((blk ^ iblk) & dmmax_mask)
1040 			break;
1041 
1042 		if (blk != iblk + (reqpage - i))
1043 			break;
1044 	}
1045 	++i;
1046 
1047 	for (j = reqpage + 1; j < count; ++j) {
1048 		daddr_t jblk;
1049 
1050 		jblk = swp_pager_meta_ctl(m[j]->object, m[j]->pindex, 0);
1051 		if (jblk & SWAPBLK_NONE)
1052 			break;
1053 
1054 		if ((blk ^ jblk) & dmmax_mask)
1055 			break;
1056 
1057 		if (blk != jblk - (j - reqpage))
1058 			break;
1059 	}
1060 
1061 	/*
1062 	 * If blk itself is bad, well, we can't do any I/O.  This should
1063 	 * already be covered as a side effect, but I'm making sure.
1064 	 */
1065 
1066 	if (blk & SWAPBLK_NONE) {
1067 		i = reqpage;
1068 		j = reqpage + 1;
1069 	}
1070 
1071 	/*
1072 	 * free pages outside our collection range.   Note: we never free
1073 	 * mreq, it must remain busy throughout.
1074 	 */
1075 
1076 	{
1077 		int k;
1078 
1079 		for (k = 0; k < i; ++k) {
1080 			vm_page_free(m[k]);
1081 		}
1082 		for (k = j; k < count; ++k) {
1083 			vm_page_free(m[k]);
1084 		}
1085 	}
1086 
1087 	/*
1088 	 * Return VM_PAGER_FAIL if we have nothing
1089 	 * to do.  Return mreq still busy, but the
1090 	 * others unbusied.
1091 	 */
1092 
1093 	if (blk & SWAPBLK_NONE)
1094 		return(VM_PAGER_FAIL);
1095 
1096 
1097 	/*
1098 	 * Get a swap buffer header to perform the IO
1099 	 */
1100 
1101 	bp = getpbuf(&nsw_rcount);
1102 	kva = (vm_offset_t) bp->b_data;
1103 
1104 	/*
1105 	 * map our page(s) into kva for input
1106 	 *
1107 	 * NOTE: B_PAGING is set by pbgetvp()
1108 	 */
1109 
1110 	pmap_qenter(kva, m + i, j - i);
1111 
1112 	bp->b_flags = B_READ | B_CALL;
1113 	bp->b_iodone = swp_pager_async_iodone;
1114 	bp->b_rcred = bp->b_wcred = proc0.p_ucred;
1115 	bp->b_data = (caddr_t) kva;
1116 	crhold(bp->b_rcred);
1117 	crhold(bp->b_wcred);
1118 	/*
1119 	 * b_blkno is in page-sized chunks.  swapblk is valid, too, so
1120 	 * we don't have to mask it against SWAPBLK_MASK.
1121 	 */
1122 	bp->b_blkno = blk - (reqpage - i);
1123 	bp->b_bcount = PAGE_SIZE * (j - i);
1124 	bp->b_bufsize = PAGE_SIZE * (j - i);
1125 	bp->b_pager.pg_reqpage = reqpage - i;
1126 
1127 	{
1128 		int k;
1129 
1130 		for (k = i; k < j; ++k) {
1131 			bp->b_pages[k - i] = m[k];
1132 			vm_page_flag_set(m[k], PG_SWAPINPROG);
1133 		}
1134 	}
1135 	bp->b_npages = j - i;
1136 
1137 	pbgetvp(swapdev_vp, bp);
1138 
1139 	cnt.v_swapin++;
1140 	cnt.v_swappgsin += bp->b_npages;
1141 
1142 	/*
1143 	 * We still hold the lock on mreq, and our automatic completion routine
1144 	 * does not remove it.
1145 	 */
1146 
1147 	vm_object_pip_add(mreq->object, bp->b_npages);
1148 	lastpindex = m[j-1]->pindex;
1149 
1150 	/*
1151 	 * perform the I/O.  NOTE!!!  bp cannot be considered valid after
1152 	 * this point because we automatically release it on completion.
1153 	 * Instead, we look at the one page we are interested in which we
1154 	 * still hold a lock on even through the I/O completion.
1155 	 *
1156 	 * The other pages in our m[] array are also released on completion,
1157 	 * so we cannot assume they are valid anymore either.
1158 	 *
1159 	 * NOTE: b_blkno is destroyed by the call to VOP_STRATEGY
1160 	 */
1161 
1162 	BUF_KERNPROC(bp);
1163 	VOP_STRATEGY(bp->b_vp, bp);
1164 
1165 	/*
1166 	 * wait for the page we want to complete.  PG_SWAPINPROG is always
1167 	 * cleared on completion.  If an I/O error occurs, SWAPBLK_NONE
1168 	 * is set in the meta-data.
1169 	 */
1170 
1171 	s = splvm();
1172 
1173 	while ((mreq->flags & PG_SWAPINPROG) != 0) {
1174 		vm_page_flag_set(mreq, PG_WANTED | PG_REFERENCED);
1175 		cnt.v_intrans++;
1176 		if (tsleep(mreq, PSWP, "swread", hz*20)) {
1177 			printf(
1178 			    "swap_pager: indefinite wait buffer: device:"
1179 				" %s, blkno: %ld, size: %ld\n",
1180 			    devtoname(bp->b_dev), (long)bp->b_blkno,
1181 			    bp->b_bcount
1182 			);
1183 		}
1184 	}
1185 
1186 	splx(s);
1187 
1188 	/*
1189 	 * mreq is left bussied after completion, but all the other pages
1190 	 * are freed.  If we had an unrecoverable read error the page will
1191 	 * not be valid.
1192 	 */
1193 
1194 	if (mreq->valid != VM_PAGE_BITS_ALL) {
1195 		return(VM_PAGER_ERROR);
1196 	} else {
1197 		return(VM_PAGER_OK);
1198 	}
1199 
1200 	/*
1201 	 * A final note: in a low swap situation, we cannot deallocate swap
1202 	 * and mark a page dirty here because the caller is likely to mark
1203 	 * the page clean when we return, causing the page to possibly revert
1204 	 * to all-zero's later.
1205 	 */
1206 }
1207 
1208 /*
1209  *	swap_pager_putpages:
1210  *
1211  *	Assign swap (if necessary) and initiate I/O on the specified pages.
1212  *
1213  *	We support both OBJT_DEFAULT and OBJT_SWAP objects.  DEFAULT objects
1214  *	are automatically converted to SWAP objects.
1215  *
1216  *	In a low memory situation we may block in VOP_STRATEGY(), but the new
1217  *	vm_page reservation system coupled with properly written VFS devices
1218  *	should ensure that no low-memory deadlock occurs.  This is an area
1219  *	which needs work.
1220  *
1221  *	The parent has N vm_object_pip_add() references prior to
1222  *	calling us and will remove references for rtvals[] that are
1223  *	not set to VM_PAGER_PEND.  We need to remove the rest on I/O
1224  *	completion.
1225  *
1226  *	The parent has soft-busy'd the pages it passes us and will unbusy
1227  *	those whos rtvals[] entry is not set to VM_PAGER_PEND on return.
1228  *	We need to unbusy the rest on I/O completion.
1229  */
1230 
1231 void
1232 swap_pager_putpages(object, m, count, sync, rtvals)
1233 	vm_object_t object;
1234 	vm_page_t *m;
1235 	int count;
1236 	boolean_t sync;
1237 	int *rtvals;
1238 {
1239 	int i;
1240 	int n = 0;
1241 
1242 #if !defined(MAX_PERF)
1243 	if (count && m[0]->object != object) {
1244 		panic("swap_pager_getpages: object mismatch %p/%p",
1245 		    object,
1246 		    m[0]->object
1247 		);
1248 	}
1249 #endif
1250 	/*
1251 	 * Step 1
1252 	 *
1253 	 * Turn object into OBJT_SWAP
1254 	 * check for bogus sysops
1255 	 * force sync if not pageout process
1256 	 */
1257 
1258 	if (object->type != OBJT_SWAP) {
1259 		swp_pager_meta_build(object, 0, SWAPBLK_NONE, 0);
1260 	}
1261 
1262 	if (curproc != pageproc)
1263 		sync = TRUE;
1264 
1265 	/*
1266 	 * Step 2
1267 	 *
1268 	 * Update nsw parameters from swap_async_max sysctl values.
1269 	 * Do not let the sysop crash the machine with bogus numbers.
1270 	 */
1271 
1272 	if (swap_async_max != nsw_wcount_async_max) {
1273 		int n;
1274 		int s;
1275 
1276 		/*
1277 		 * limit range
1278 		 */
1279 		if ((n = swap_async_max) > nswbuf / 2)
1280 			n = nswbuf / 2;
1281 		if (n < 1)
1282 			n = 1;
1283 		swap_async_max = n;
1284 
1285 		/*
1286 		 * Adjust difference ( if possible ).  If the current async
1287 		 * count is too low, we may not be able to make the adjustment
1288 		 * at this time.
1289 		 */
1290 		s = splvm();
1291 		n -= nsw_wcount_async_max;
1292 		if (nsw_wcount_async + n >= 0) {
1293 			nsw_wcount_async += n;
1294 			nsw_wcount_async_max += n;
1295 			wakeup(&nsw_wcount_async);
1296 		}
1297 		splx(s);
1298 	}
1299 
1300 	/*
1301 	 * Step 3
1302 	 *
1303 	 * Assign swap blocks and issue I/O.  We reallocate swap on the fly.
1304 	 * The page is left dirty until the pageout operation completes
1305 	 * successfully.
1306 	 */
1307 
1308 	for (i = 0; i < count; i += n) {
1309 		int s;
1310 		int j;
1311 		struct buf *bp;
1312 		daddr_t blk;
1313 
1314 		/*
1315 		 * Maximum I/O size is limited by a number of factors.
1316 		 */
1317 
1318 		n = min(BLIST_MAX_ALLOC, count - i);
1319 		n = min(n, nsw_cluster_max);
1320 
1321 		/*
1322 		 * Get biggest block of swap we can.  If we fail, fall
1323 		 * back and try to allocate a smaller block.  Don't go
1324 		 * overboard trying to allocate space if it would overly
1325 		 * fragment swap.
1326 		 */
1327 		while (
1328 		    (blk = swp_pager_getswapspace(n)) == SWAPBLK_NONE &&
1329 		    n > 4
1330 		) {
1331 			n >>= 1;
1332 		}
1333 		if (blk == SWAPBLK_NONE) {
1334 			for (j = 0; j < n; ++j) {
1335 				rtvals[i+j] = VM_PAGER_FAIL;
1336 			}
1337 			continue;
1338 		}
1339 
1340 		/*
1341 		 * Oops, too big if it crosses a stripe
1342 		 *
1343 		 * 1111000000
1344 		 *     111111
1345 		 *    1000001
1346 		 */
1347 		if ((blk ^ (blk + n)) & dmmax_mask) {
1348 			j = ((blk + dmmax) & dmmax_mask) - blk;
1349 			swp_pager_freeswapspace(blk + j, n - j);
1350 			n = j;
1351 		}
1352 
1353 		/*
1354 		 * All I/O parameters have been satisfied, build the I/O
1355 		 * request and assign the swap space.
1356 		 *
1357 		 * NOTE: B_PAGING is set by pbgetvp()
1358 		 */
1359 
1360 		if (sync == TRUE) {
1361 			bp = getpbuf(&nsw_wcount_sync);
1362 			bp->b_flags = B_CALL;
1363 		} else {
1364 			bp = getpbuf(&nsw_wcount_async);
1365 			bp->b_flags = B_CALL | B_ASYNC;
1366 		}
1367 		bp->b_spc = NULL;	/* not used, but NULL-out anyway */
1368 
1369 		pmap_qenter((vm_offset_t)bp->b_data, &m[i], n);
1370 
1371 		bp->b_rcred = bp->b_wcred = proc0.p_ucred;
1372 		bp->b_bcount = PAGE_SIZE * n;
1373 		bp->b_bufsize = PAGE_SIZE * n;
1374 		bp->b_blkno = blk;
1375 
1376 		crhold(bp->b_rcred);
1377 		crhold(bp->b_wcred);
1378 
1379 		pbgetvp(swapdev_vp, bp);
1380 
1381 		s = splvm();
1382 
1383 		for (j = 0; j < n; ++j) {
1384 			vm_page_t mreq = m[i+j];
1385 
1386 			swp_pager_meta_build(
1387 			    mreq->object,
1388 			    mreq->pindex,
1389 			    blk + j,
1390 			    0
1391 			);
1392 			vm_page_dirty(mreq);
1393 			rtvals[i+j] = VM_PAGER_OK;
1394 
1395 			vm_page_flag_set(mreq, PG_SWAPINPROG);
1396 			bp->b_pages[j] = mreq;
1397 		}
1398 		bp->b_npages = n;
1399 		/*
1400 		 * Must set dirty range for NFS to work.
1401 		 */
1402 		bp->b_dirtyoff = 0;
1403 		bp->b_dirtyend = bp->b_bcount;
1404 
1405 		cnt.v_swapout++;
1406 		cnt.v_swappgsout += bp->b_npages;
1407 		swapdev_vp->v_numoutput++;
1408 
1409 		/*
1410 		 * asynchronous
1411 		 *
1412 		 * NOTE: b_blkno is destroyed by the call to VOP_STRATEGY
1413 		 */
1414 
1415 		if (sync == FALSE) {
1416 			bp->b_iodone = swp_pager_async_iodone;
1417 			BUF_KERNPROC(bp);
1418 			VOP_STRATEGY(bp->b_vp, bp);
1419 
1420 			for (j = 0; j < n; ++j)
1421 				rtvals[i+j] = VM_PAGER_PEND;
1422 
1423 			splx(s);
1424 			continue;
1425 		}
1426 
1427 		/*
1428 		 * synchronous
1429 		 *
1430 		 * NOTE: b_blkno is destroyed by the call to VOP_STRATEGY
1431 		 */
1432 
1433 		bp->b_iodone = swp_pager_sync_iodone;
1434 		VOP_STRATEGY(bp->b_vp, bp);
1435 
1436 		/*
1437 		 * Wait for the sync I/O to complete, then update rtvals.
1438 		 * We just set the rtvals[] to VM_PAGER_PEND so we can call
1439 		 * our async completion routine at the end, thus avoiding a
1440 		 * double-free.
1441 		 */
1442 		while ((bp->b_flags & B_DONE) == 0) {
1443 			tsleep(bp, PVM, "swwrt", 0);
1444 		}
1445 
1446 		for (j = 0; j < n; ++j)
1447 			rtvals[i+j] = VM_PAGER_PEND;
1448 
1449 		/*
1450 		 * Now that we are through with the bp, we can call the
1451 		 * normal async completion, which frees everything up.
1452 		 */
1453 
1454 		swp_pager_async_iodone(bp);
1455 
1456 		splx(s);
1457 	}
1458 }
1459 
1460 /*
1461  *	swap_pager_sync_iodone:
1462  *
1463  *	Completion routine for synchronous reads and writes from/to swap.
1464  *	We just mark the bp is complete and wake up anyone waiting on it.
1465  *
1466  *	This routine may not block.
1467  */
1468 
1469 static void
1470 swp_pager_sync_iodone(bp)
1471 	struct buf *bp;
1472 {
1473 	bp->b_flags |= B_DONE;
1474 	bp->b_flags &= ~B_ASYNC;
1475 	wakeup(bp);
1476 }
1477 
1478 /*
1479  *	swp_pager_async_iodone:
1480  *
1481  *	Completion routine for asynchronous reads and writes from/to swap.
1482  *	Also called manually by synchronous code to finish up a bp.
1483  *
1484  *	WARNING!  This routine may be called from an interrupt.  We cannot
1485  *	mess with swap metadata unless we want to run all our other routines
1486  *	at splbio() too, which I'd rather not do.  We up ourselves
1487  * 	to splvm() because we may call vm_page_free(), which can unlink a
1488  *	page from an object.
1489  *
1490  *	XXX currently I do not believe any object routines protect
1491  *	object->memq at splvm().  The code must be gone over to determine
1492  *	the actual state of the problem.
1493  *
1494  *	For READ operations, the pages are PG_BUSY'd.  For WRITE operations,
1495  *	the pages are vm_page_t->busy'd.  For READ operations, we PG_BUSY
1496  *	unbusy all pages except the 'main' request page.  For WRITE
1497  *	operations, we vm_page_t->busy'd unbusy all pages ( we can do this
1498  *	because we marked them all VM_PAGER_PEND on return from putpages ).
1499  *
1500  *	This routine may not block.
1501  *	This routine is called at splbio()
1502  */
1503 
1504 static void
1505 swp_pager_async_iodone(bp)
1506 	register struct buf *bp;
1507 {
1508 	int s;
1509 	int i;
1510 	vm_object_t object = NULL;
1511 
1512 	s = splvm();
1513 
1514 	bp->b_flags |= B_DONE;
1515 
1516 	/*
1517 	 * report error
1518 	 */
1519 
1520 	if (bp->b_flags & B_ERROR) {
1521 		printf(
1522 		    "swap_pager: I/O error - %s failed; blkno %ld,"
1523 			"size %ld, error %d\n",
1524 		    ((bp->b_flags & B_READ) ? "pagein" : "pageout"),
1525 		    (long)bp->b_blkno,
1526 		    (long)bp->b_bcount,
1527 		    bp->b_error
1528 		);
1529 	}
1530 
1531 	/*
1532 	 * set object.
1533 	 */
1534 
1535 	if (bp->b_npages)
1536 		object = bp->b_pages[0]->object;
1537 
1538 	/*
1539 	 * remove the mapping for kernel virtual
1540 	 */
1541 
1542 	pmap_qremove((vm_offset_t)bp->b_data, bp->b_npages);
1543 
1544 	/*
1545 	 * cleanup pages.  If an error occurs writing to swap, we are in
1546 	 * very serious trouble.  If it happens to be a disk error, though,
1547 	 * we may be able to recover by reassigning the swap later on.  So
1548 	 * in this case we remove the m->swapblk assignment for the page
1549 	 * but do not free it in the rlist.  The errornous block(s) are thus
1550 	 * never reallocated as swap.  Redirty the page and continue.
1551 	 */
1552 
1553 	for (i = 0; i < bp->b_npages; ++i) {
1554 		vm_page_t m = bp->b_pages[i];
1555 
1556 		vm_page_flag_clear(m, PG_SWAPINPROG);
1557 
1558 		if (bp->b_flags & B_ERROR) {
1559 			/*
1560 			 * If an error occurs I'd love to throw the swapblk
1561 			 * away without freeing it back to swapspace, so it
1562 			 * can never be used again.  But I can't from an
1563 			 * interrupt.
1564 			 */
1565 
1566 			if (bp->b_flags & B_READ) {
1567 				/*
1568 				 * When reading, reqpage needs to stay
1569 				 * locked for the parent, but all other
1570 				 * pages can be freed.  We still want to
1571 				 * wakeup the parent waiting on the page,
1572 				 * though.  ( also: pg_reqpage can be -1 and
1573 				 * not match anything ).
1574 				 *
1575 				 * We have to wake specifically requested pages
1576 				 * up too because we cleared PG_SWAPINPROG and
1577 				 * someone may be waiting for that.
1578 				 *
1579 				 * NOTE: for reads, m->dirty will probably
1580 				 * be overriden by the original caller of
1581 				 * getpages so don't play cute tricks here.
1582 				 *
1583 				 * XXX it may not be legal to free the page
1584 				 * here as this messes with the object->memq's.
1585 				 */
1586 
1587 				m->valid = 0;
1588 				vm_page_flag_clear(m, PG_ZERO);
1589 
1590 				if (i != bp->b_pager.pg_reqpage)
1591 					vm_page_free(m);
1592 				else
1593 					vm_page_flash(m);
1594 				/*
1595 				 * If i == bp->b_pager.pg_reqpage, do not wake
1596 				 * the page up.  The caller needs to.
1597 				 */
1598 			} else {
1599 				/*
1600 				 * If a write error occurs, reactivate page
1601 				 * so it doesn't clog the inactive list,
1602 				 * then finish the I/O.
1603 				 */
1604 				vm_page_dirty(m);
1605 				vm_page_activate(m);
1606 				vm_page_io_finish(m);
1607 			}
1608 		} else if (bp->b_flags & B_READ) {
1609 			/*
1610 			 * For read success, clear dirty bits.  Nobody should
1611 			 * have this page mapped but don't take any chances,
1612 			 * make sure the pmap modify bits are also cleared.
1613 			 *
1614 			 * NOTE: for reads, m->dirty will probably be
1615 			 * overriden by the original caller of getpages so
1616 			 * we cannot set them in order to free the underlying
1617 			 * swap in a low-swap situation.  I don't think we'd
1618 			 * want to do that anyway, but it was an optimization
1619 			 * that existed in the old swapper for a time before
1620 			 * it got ripped out due to precisely this problem.
1621 			 *
1622 			 * clear PG_ZERO in page.
1623 			 *
1624 			 * If not the requested page then deactivate it.
1625 			 *
1626 			 * Note that the requested page, reqpage, is left
1627 			 * busied, but we still have to wake it up.  The
1628 			 * other pages are released (unbusied) by
1629 			 * vm_page_wakeup().  We do not set reqpage's
1630 			 * valid bits here, it is up to the caller.
1631 			 */
1632 
1633 			pmap_clear_modify(VM_PAGE_TO_PHYS(m));
1634 			m->valid = VM_PAGE_BITS_ALL;
1635 			vm_page_undirty(m);
1636 			vm_page_flag_clear(m, PG_ZERO);
1637 
1638 			/*
1639 			 * We have to wake specifically requested pages
1640 			 * up too because we cleared PG_SWAPINPROG and
1641 			 * could be waiting for it in getpages.  However,
1642 			 * be sure to not unbusy getpages specifically
1643 			 * requested page - getpages expects it to be
1644 			 * left busy.
1645 			 */
1646 			if (i != bp->b_pager.pg_reqpage) {
1647 				vm_page_deactivate(m);
1648 				vm_page_wakeup(m);
1649 			} else {
1650 				vm_page_flash(m);
1651 			}
1652 		} else {
1653 			/*
1654 			 * For write success, clear the modify and dirty
1655 			 * status, then finish the I/O ( which decrements the
1656 			 * busy count and possibly wakes waiter's up ).
1657 			 */
1658 			vm_page_protect(m, VM_PROT_READ);
1659 			pmap_clear_modify(VM_PAGE_TO_PHYS(m));
1660 			vm_page_undirty(m);
1661 			vm_page_io_finish(m);
1662 		}
1663 	}
1664 
1665 	/*
1666 	 * adjust pip.  NOTE: the original parent may still have its own
1667 	 * pip refs on the object.
1668 	 */
1669 
1670 	if (object)
1671 		vm_object_pip_wakeupn(object, bp->b_npages);
1672 
1673 	/*
1674 	 * release the physical I/O buffer
1675 	 */
1676 
1677 	relpbuf(
1678 	    bp,
1679 	    ((bp->b_flags & B_READ) ? &nsw_rcount :
1680 		((bp->b_flags & B_ASYNC) ?
1681 		    &nsw_wcount_async :
1682 		    &nsw_wcount_sync
1683 		)
1684 	    )
1685 	);
1686 	splx(s);
1687 }
1688 
1689 /************************************************************************
1690  *				SWAP META DATA 				*
1691  ************************************************************************
1692  *
1693  *	These routines manipulate the swap metadata stored in the
1694  *	OBJT_SWAP object.
1695  *
1696  *	In fact, we just have a few counters in the vm_object_t.  The
1697  *	metadata is actually stored in a hash table.
1698  */
1699 
1700 /*
1701  * SWP_PAGER_HASH() -	hash swap meta data
1702  *
1703  *	This is an inline helper function which hash the swapblk given
1704  *	the object and page index.  It returns a pointer to a pointer
1705  *	to the object, or a pointer to a NULL pointer if it could not
1706  *	find a swapblk.
1707  */
1708 
1709 static __inline struct swblock **
1710 swp_pager_hash(vm_object_t object, daddr_t index)
1711 {
1712 	struct swblock **pswap;
1713 	struct swblock *swap;
1714 
1715 	index &= ~SWAP_META_MASK;
1716 	pswap = &swhash[(index ^ (int)(intptr_t)object) & swhash_mask];
1717 
1718 	while ((swap = *pswap) != NULL) {
1719 		if (swap->swb_object == object &&
1720 		    swap->swb_index == index
1721 		) {
1722 			break;
1723 		}
1724 		pswap = &swap->swb_hnext;
1725 	}
1726 	return(pswap);
1727 }
1728 
1729 /*
1730  * SWP_PAGER_META_BUILD() -	add swap block to swap meta data for object
1731  *
1732  *	We first convert the object to a swap object if it is a default
1733  *	object.
1734  *
1735  *	The specified swapblk is added to the object's swap metadata.  If
1736  *	the swapblk is not valid, it is freed instead.  Any previously
1737  *	assigned swapblk is freed.
1738  */
1739 
1740 static void
1741 swp_pager_meta_build(
1742 	vm_object_t object,
1743 	daddr_t index,
1744 	daddr_t swapblk,
1745 	int waitok
1746 ) {
1747 	struct swblock *swap;
1748 	struct swblock **pswap;
1749 
1750 	/*
1751 	 * Convert default object to swap object if necessary
1752 	 */
1753 
1754 	if (object->type != OBJT_SWAP) {
1755 		object->type = OBJT_SWAP;
1756 		object->un_pager.swp.swp_bcount = 0;
1757 
1758 		if (object->handle != NULL) {
1759 			TAILQ_INSERT_TAIL(
1760 			    NOBJLIST(object->handle),
1761 			    object,
1762 			    pager_object_list
1763 			);
1764 		} else {
1765 			TAILQ_INSERT_TAIL(
1766 			    &swap_pager_un_object_list,
1767 			    object,
1768 			    pager_object_list
1769 			);
1770 		}
1771 	}
1772 
1773 	/*
1774 	 * Wait for free memory when waitok is TRUE prior to calling the
1775 	 * zone allocator.
1776 	 */
1777 
1778 	while (waitok && cnt.v_free_count == 0) {
1779 		VM_WAIT;
1780 	}
1781 
1782 	/*
1783 	 * If swapblk being added is invalid, just free it.
1784 	 */
1785 
1786 	if (swapblk & SWAPBLK_NONE) {
1787 		if (swapblk != SWAPBLK_NONE) {
1788 			swp_pager_freeswapspace(
1789 			    index,
1790 			    1
1791 			);
1792 			swapblk = SWAPBLK_NONE;
1793 		}
1794 	}
1795 
1796 	/*
1797 	 * Locate hash entry.  If not found create, but if we aren't adding
1798 	 * anything just return.
1799 	 */
1800 
1801 	pswap = swp_pager_hash(object, index);
1802 
1803 	if ((swap = *pswap) == NULL) {
1804 		int i;
1805 
1806 		if (swapblk == SWAPBLK_NONE)
1807 			return;
1808 
1809 		swap = *pswap = zalloc(swap_zone);
1810 
1811 		swap->swb_hnext = NULL;
1812 		swap->swb_object = object;
1813 		swap->swb_index = index & ~SWAP_META_MASK;
1814 		swap->swb_count = 0;
1815 
1816 		++object->un_pager.swp.swp_bcount;
1817 
1818 		for (i = 0; i < SWAP_META_PAGES; ++i)
1819 			swap->swb_pages[i] = SWAPBLK_NONE;
1820 	}
1821 
1822 	/*
1823 	 * Delete prior contents of metadata
1824 	 */
1825 
1826 	index &= SWAP_META_MASK;
1827 
1828 	if (swap->swb_pages[index] != SWAPBLK_NONE) {
1829 		swp_pager_freeswapspace(
1830 		    swap->swb_pages[index] & SWAPBLK_MASK,
1831 		    1
1832 		);
1833 		--swap->swb_count;
1834 	}
1835 
1836 	/*
1837 	 * Enter block into metadata
1838 	 */
1839 
1840 	swap->swb_pages[index] = swapblk;
1841 	++swap->swb_count;
1842 }
1843 
1844 /*
1845  * SWP_PAGER_META_FREE() - free a range of blocks in the object's swap metadata
1846  *
1847  *	The requested range of blocks is freed, with any associated swap
1848  *	returned to the swap bitmap.
1849  *
1850  *	This routine will free swap metadata structures as they are cleaned
1851  *	out.  This routine does *NOT* operate on swap metadata associated
1852  *	with resident pages.
1853  *
1854  *	This routine must be called at splvm()
1855  */
1856 
1857 static void
1858 swp_pager_meta_free(vm_object_t object, daddr_t index, daddr_t count)
1859 {
1860 	if (object->type != OBJT_SWAP)
1861 		return;
1862 
1863 	while (count > 0) {
1864 		struct swblock **pswap;
1865 		struct swblock *swap;
1866 
1867 		pswap = swp_pager_hash(object, index);
1868 
1869 		if ((swap = *pswap) != NULL) {
1870 			daddr_t v = swap->swb_pages[index & SWAP_META_MASK];
1871 
1872 			if (v != SWAPBLK_NONE) {
1873 				swp_pager_freeswapspace(v, 1);
1874 				swap->swb_pages[index & SWAP_META_MASK] =
1875 					SWAPBLK_NONE;
1876 				if (--swap->swb_count == 0) {
1877 					*pswap = swap->swb_hnext;
1878 					zfree(swap_zone, swap);
1879 					--object->un_pager.swp.swp_bcount;
1880 				}
1881 			}
1882 			--count;
1883 			++index;
1884 		} else {
1885 			daddr_t n = SWAP_META_PAGES - (index & SWAP_META_MASK);
1886 			count -= n;
1887 			index += n;
1888 		}
1889 	}
1890 }
1891 
1892 /*
1893  * SWP_PAGER_META_FREE_ALL() - destroy all swap metadata associated with object
1894  *
1895  *	This routine locates and destroys all swap metadata associated with
1896  *	an object.
1897  */
1898 
1899 static void
1900 swp_pager_meta_free_all(vm_object_t object)
1901 {
1902 	daddr_t index = 0;
1903 
1904 	if (object->type != OBJT_SWAP)
1905 		return;
1906 
1907 	while (object->un_pager.swp.swp_bcount) {
1908 		struct swblock **pswap;
1909 		struct swblock *swap;
1910 
1911 		pswap = swp_pager_hash(object, index);
1912 		if ((swap = *pswap) != NULL) {
1913 			int i;
1914 
1915 			for (i = 0; i < SWAP_META_PAGES; ++i) {
1916 				daddr_t v = swap->swb_pages[i];
1917 				if (v != SWAPBLK_NONE) {
1918 #if !defined(MAX_PERF)
1919 					--swap->swb_count;
1920 #endif
1921 					swp_pager_freeswapspace(
1922 					    v,
1923 					    1
1924 					);
1925 				}
1926 			}
1927 #if !defined(MAX_PERF)
1928 			if (swap->swb_count != 0)
1929 				panic("swap_pager_meta_free_all: swb_count != 0");
1930 #endif
1931 			*pswap = swap->swb_hnext;
1932 			zfree(swap_zone, swap);
1933 			--object->un_pager.swp.swp_bcount;
1934 		}
1935 		index += SWAP_META_PAGES;
1936 #if !defined(MAX_PERF)
1937 		if (index > 0x20000000)
1938 			panic("swp_pager_meta_free_all: failed to locate all swap meta blocks");
1939 #endif
1940 	}
1941 }
1942 
1943 /*
1944  * SWP_PAGER_METACTL() -  misc control of swap and vm_page_t meta data.
1945  *
1946  *	This routine is capable of looking up, popping, or freeing
1947  *	swapblk assignments in the swap meta data or in the vm_page_t.
1948  *	The routine typically returns the swapblk being looked-up, or popped,
1949  *	or SWAPBLK_NONE if the block was freed, or SWAPBLK_NONE if the block
1950  *	was invalid.  This routine will automatically free any invalid
1951  *	meta-data swapblks.
1952  *
1953  *	It is not possible to store invalid swapblks in the swap meta data
1954  *	(other then a literal 'SWAPBLK_NONE'), so we don't bother checking.
1955  *
1956  *	When acting on a busy resident page and paging is in progress, we
1957  *	have to wait until paging is complete but otherwise can act on the
1958  *	busy page.
1959  *
1960  *	SWM_FREE	remove and free swap block from metadata
1961  *
1962  *	SWM_POP		remove from meta data but do not free.. pop it out
1963  */
1964 
1965 static daddr_t
1966 swp_pager_meta_ctl(
1967 	vm_object_t object,
1968 	vm_pindex_t index,
1969 	int flags
1970 ) {
1971 	/*
1972 	 * The meta data only exists of the object is OBJT_SWAP
1973 	 * and even then might not be allocated yet.
1974 	 */
1975 
1976 	if (
1977 	    object->type != OBJT_SWAP ||
1978 	    object->un_pager.swp.swp_bcount == 0
1979 	) {
1980 		return(SWAPBLK_NONE);
1981 	}
1982 
1983 	{
1984 		struct swblock **pswap;
1985 		struct swblock *swap;
1986 		daddr_t r1 = SWAPBLK_NONE;
1987 
1988 		pswap = swp_pager_hash(object, index);
1989 
1990 		index &= SWAP_META_MASK;
1991 
1992 		if ((swap = *pswap) != NULL) {
1993 			r1 = swap->swb_pages[index];
1994 
1995 			if (r1 != SWAPBLK_NONE) {
1996 				if (flags & SWM_FREE) {
1997 					swp_pager_freeswapspace(
1998 					    r1,
1999 					    1
2000 					);
2001 					r1 = SWAPBLK_NONE;
2002 				}
2003 				if (flags & (SWM_FREE|SWM_POP)) {
2004 					swap->swb_pages[index] = SWAPBLK_NONE;
2005 					if (--swap->swb_count == 0) {
2006 						*pswap = swap->swb_hnext;
2007 						zfree(swap_zone, swap);
2008 						--object->un_pager.swp.swp_bcount;
2009 					}
2010 				}
2011 	 		}
2012 		}
2013 
2014 		return(r1);
2015 	}
2016 	/* not reached */
2017 }
2018 
2019