xref: /freebsd/sys/vm/swap_pager.c (revision 23f282aa31e9b6fceacd449020e936e98d6f2298)
1 /*
2  * Copyright (c) 1998 Matthew Dillon,
3  * Copyright (c) 1994 John S. Dyson
4  * Copyright (c) 1990 University of Utah.
5  * Copyright (c) 1991, 1993
6  *	The Regents of the University of California.  All rights reserved.
7  *
8  * This code is derived from software contributed to Berkeley by
9  * the Systems Programming Group of the University of Utah Computer
10  * Science Department.
11  *
12  * Redistribution and use in source and binary forms, with or without
13  * modification, are permitted provided that the following conditions
14  * are met:
15  * 1. Redistributions of source code must retain the above copyright
16  *    notice, this list of conditions and the following disclaimer.
17  * 2. Redistributions in binary form must reproduce the above copyright
18  *    notice, this list of conditions and the following disclaimer in the
19  *    documentation and/or other materials provided with the distribution.
20  * 3. All advertising materials mentioning features or use of this software
21  *    must display the following acknowledgement:
22  *	This product includes software developed by the University of
23  *	California, Berkeley and its contributors.
24  * 4. Neither the name of the University nor the names of its contributors
25  *    may be used to endorse or promote products derived from this software
26  *    without specific prior written permission.
27  *
28  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
29  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
30  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
31  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
32  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
33  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
34  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
35  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
36  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
37  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
38  * SUCH DAMAGE.
39  *
40  *				New Swap System
41  *				Matthew Dillon
42  *
43  * Radix Bitmap 'blists'.
44  *
45  *	- The new swapper uses the new radix bitmap code.  This should scale
46  *	  to arbitrarily small or arbitrarily large swap spaces and an almost
47  *	  arbitrary degree of fragmentation.
48  *
49  * Features:
50  *
51  *	- on the fly reallocation of swap during putpages.  The new system
52  *	  does not try to keep previously allocated swap blocks for dirty
53  *	  pages.
54  *
55  *	- on the fly deallocation of swap
56  *
57  *	- No more garbage collection required.  Unnecessarily allocated swap
58  *	  blocks only exist for dirty vm_page_t's now and these are already
59  *	  cycled (in a high-load system) by the pager.  We also do on-the-fly
60  *	  removal of invalidated swap blocks when a page is destroyed
61  *	  or renamed.
62  *
63  * from: Utah $Hdr: swap_pager.c 1.4 91/04/30$
64  *
65  *	@(#)swap_pager.c	8.9 (Berkeley) 3/21/94
66  *
67  * $FreeBSD$
68  */
69 
70 #include <sys/param.h>
71 #include <sys/systm.h>
72 #include <sys/conf.h>
73 #include <sys/kernel.h>
74 #include <sys/proc.h>
75 #include <sys/buf.h>
76 #include <sys/vnode.h>
77 #include <sys/malloc.h>
78 #include <sys/vmmeter.h>
79 #include <sys/sysctl.h>
80 #include <sys/blist.h>
81 #include <sys/lock.h>
82 
83 #ifndef MAX_PAGEOUT_CLUSTER
84 #define MAX_PAGEOUT_CLUSTER 16
85 #endif
86 
87 #define SWB_NPAGES	MAX_PAGEOUT_CLUSTER
88 
89 #include "opt_swap.h"
90 #include <vm/vm.h>
91 #include <vm/vm_object.h>
92 #include <vm/vm_page.h>
93 #include <vm/vm_pager.h>
94 #include <vm/vm_pageout.h>
95 #include <vm/swap_pager.h>
96 #include <vm/vm_extern.h>
97 #include <vm/vm_zone.h>
98 
99 #define SWM_FREE	0x02	/* free, period			*/
100 #define SWM_POP		0x04	/* pop out			*/
101 
102 /*
103  * vm_swap_size is in page-sized chunks now.  It was DEV_BSIZE'd chunks
104  * in the old system.
105  */
106 
107 extern int vm_swap_size;	/* number of free swap blocks, in pages */
108 
109 int swap_pager_full;		/* swap space exhaustion (task killing) */
110 static int swap_pager_almost_full; /* swap space exhaustion (w/ hysteresis)*/
111 static int nsw_rcount;		/* free read buffers			*/
112 static int nsw_wcount_sync;	/* limit write buffers / synchronous	*/
113 static int nsw_wcount_async;	/* limit write buffers / asynchronous	*/
114 static int nsw_wcount_async_max;/* assigned maximum			*/
115 static int nsw_cluster_max;	/* maximum VOP I/O allowed		*/
116 static int sw_alloc_interlock;	/* swap pager allocation interlock	*/
117 
118 struct blist *swapblist;
119 static struct swblock **swhash;
120 static int swhash_mask;
121 static int swap_async_max = 4;	/* maximum in-progress async I/O's	*/
122 
123 extern struct vnode *swapdev_vp;	/* from vm_swap.c */
124 
125 SYSCTL_INT(_vm, OID_AUTO, swap_async_max,
126         CTLFLAG_RW, &swap_async_max, 0, "Maximum running async swap ops");
127 
128 /*
129  * "named" and "unnamed" anon region objects.  Try to reduce the overhead
130  * of searching a named list by hashing it just a little.
131  */
132 
133 #define NOBJLISTS		8
134 
135 #define NOBJLIST(handle)	\
136 	(&swap_pager_object_list[((int)(intptr_t)handle >> 4) & (NOBJLISTS-1)])
137 
138 static struct pagerlst	swap_pager_object_list[NOBJLISTS];
139 struct pagerlst		swap_pager_un_object_list;
140 vm_zone_t		swap_zone;
141 
142 /*
143  * pagerops for OBJT_SWAP - "swap pager".  Some ops are also global procedure
144  * calls hooked from other parts of the VM system and do not appear here.
145  * (see vm/swap_pager.h).
146  */
147 
148 static vm_object_t
149 		swap_pager_alloc __P((void *handle, vm_ooffset_t size,
150 				      vm_prot_t prot, vm_ooffset_t offset));
151 static void	swap_pager_dealloc __P((vm_object_t object));
152 static int	swap_pager_getpages __P((vm_object_t, vm_page_t *, int, int));
153 static void	swap_pager_init __P((void));
154 static void	swap_pager_unswapped __P((vm_page_t));
155 static void	swap_pager_strategy __P((vm_object_t, struct buf *));
156 
157 struct pagerops swappagerops = {
158 	swap_pager_init,	/* early system initialization of pager	*/
159 	swap_pager_alloc,	/* allocate an OBJT_SWAP object		*/
160 	swap_pager_dealloc,	/* deallocate an OBJT_SWAP object	*/
161 	swap_pager_getpages,	/* pagein				*/
162 	swap_pager_putpages,	/* pageout				*/
163 	swap_pager_haspage,	/* get backing store status for page	*/
164 	swap_pager_unswapped,	/* remove swap related to page		*/
165 	swap_pager_strategy	/* pager strategy call			*/
166 };
167 
168 /*
169  * dmmax is in page-sized chunks with the new swap system.  It was
170  * dev-bsized chunks in the old.
171  *
172  * swap_*() routines are externally accessible.  swp_*() routines are
173  * internal.
174  */
175 
176 int dmmax;
177 static int dmmax_mask;
178 int nswap_lowat = 128;		/* in pages, swap_pager_almost_full warn */
179 int nswap_hiwat = 512;		/* in pages, swap_pager_almost_full warn */
180 
181 static __inline void	swp_sizecheck __P((void));
182 static void	swp_pager_sync_iodone __P((struct buf *bp));
183 static void	swp_pager_async_iodone __P((struct buf *bp));
184 
185 /*
186  * Swap bitmap functions
187  */
188 
189 static __inline void	swp_pager_freeswapspace __P((daddr_t blk, int npages));
190 static __inline daddr_t	swp_pager_getswapspace __P((int npages));
191 
192 /*
193  * Metadata functions
194  */
195 
196 static void swp_pager_meta_build __P((vm_object_t, vm_pindex_t, daddr_t));
197 static void swp_pager_meta_free __P((vm_object_t, vm_pindex_t, daddr_t));
198 static void swp_pager_meta_free_all __P((vm_object_t));
199 static daddr_t swp_pager_meta_ctl __P((vm_object_t, vm_pindex_t, int));
200 
201 /*
202  * SWP_SIZECHECK() -	update swap_pager_full indication
203  *
204  *	update the swap_pager_almost_full indication and warn when we are
205  *	about to run out of swap space, using lowat/hiwat hysteresis.
206  *
207  *	Clear swap_pager_full ( task killing ) indication when lowat is met.
208  *
209  *	No restrictions on call
210  *	This routine may not block.
211  *	This routine must be called at splvm()
212  */
213 
214 static __inline void
215 swp_sizecheck()
216 {
217 	if (vm_swap_size < nswap_lowat) {
218 		if (swap_pager_almost_full == 0) {
219 			printf("swap_pager: out of swap space\n");
220 			swap_pager_almost_full = 1;
221 		}
222 	} else {
223 		swap_pager_full = 0;
224 		if (vm_swap_size > nswap_hiwat)
225 			swap_pager_almost_full = 0;
226 	}
227 }
228 
229 /*
230  * SWAP_PAGER_INIT() -	initialize the swap pager!
231  *
232  *	Expected to be started from system init.  NOTE:  This code is run
233  *	before much else so be careful what you depend on.  Most of the VM
234  *	system has yet to be initialized at this point.
235  */
236 
237 static void
238 swap_pager_init()
239 {
240 	/*
241 	 * Initialize object lists
242 	 */
243 	int i;
244 
245 	for (i = 0; i < NOBJLISTS; ++i)
246 		TAILQ_INIT(&swap_pager_object_list[i]);
247 	TAILQ_INIT(&swap_pager_un_object_list);
248 
249 	/*
250 	 * Device Stripe, in PAGE_SIZE'd blocks
251 	 */
252 
253 	dmmax = SWB_NPAGES * 2;
254 	dmmax_mask = ~(dmmax - 1);
255 }
256 
257 /*
258  * SWAP_PAGER_SWAP_INIT() - swap pager initialization from pageout process
259  *
260  *	Expected to be started from pageout process once, prior to entering
261  *	its main loop.
262  */
263 
264 void
265 swap_pager_swap_init()
266 {
267 	int n;
268 
269 	/*
270 	 * Number of in-transit swap bp operations.  Don't
271 	 * exhaust the pbufs completely.  Make sure we
272 	 * initialize workable values (0 will work for hysteresis
273 	 * but it isn't very efficient).
274 	 *
275 	 * The nsw_cluster_max is constrained by the bp->b_pages[]
276 	 * array (MAXPHYS/PAGE_SIZE) and our locally defined
277 	 * MAX_PAGEOUT_CLUSTER.   Also be aware that swap ops are
278 	 * constrained by the swap device interleave stripe size.
279 	 *
280 	 * Currently we hardwire nsw_wcount_async to 4.  This limit is
281 	 * designed to prevent other I/O from having high latencies due to
282 	 * our pageout I/O.  The value 4 works well for one or two active swap
283 	 * devices but is probably a little low if you have more.  Even so,
284 	 * a higher value would probably generate only a limited improvement
285 	 * with three or four active swap devices since the system does not
286 	 * typically have to pageout at extreme bandwidths.   We will want
287 	 * at least 2 per swap devices, and 4 is a pretty good value if you
288 	 * have one NFS swap device due to the command/ack latency over NFS.
289 	 * So it all works out pretty well.
290 	 */
291 
292 	nsw_cluster_max = min((MAXPHYS/PAGE_SIZE), MAX_PAGEOUT_CLUSTER);
293 
294 	nsw_rcount = (nswbuf + 1) / 2;
295 	nsw_wcount_sync = (nswbuf + 3) / 4;
296 	nsw_wcount_async = 4;
297 	nsw_wcount_async_max = nsw_wcount_async;
298 
299 	/*
300 	 * Initialize our zone.  Right now I'm just guessing on the number
301 	 * we need based on the number of pages in the system.  Each swblock
302 	 * can hold 16 pages, so this is probably overkill.
303 	 */
304 
305 	n = cnt.v_page_count * 2;
306 
307 	swap_zone = zinit(
308 	    "SWAPMETA",
309 	    sizeof(struct swblock),
310 	    n,
311 	    ZONE_INTERRUPT,
312 	    1
313 	);
314 
315 	/*
316 	 * Initialize our meta-data hash table.  The swapper does not need to
317 	 * be quite as efficient as the VM system, so we do not use an
318 	 * oversized hash table.
319 	 *
320 	 * 	n: 		size of hash table, must be power of 2
321 	 *	swhash_mask:	hash table index mask
322 	 */
323 
324 	for (n = 1; n < cnt.v_page_count / 4; n <<= 1)
325 		;
326 
327 	swhash = malloc(sizeof(struct swblock *) * n, M_VMPGDATA, M_WAITOK);
328 	bzero(swhash, sizeof(struct swblock *) * n);
329 
330 	swhash_mask = n - 1;
331 }
332 
333 /*
334  * SWAP_PAGER_ALLOC() -	allocate a new OBJT_SWAP VM object and instantiate
335  *			its metadata structures.
336  *
337  *	This routine is called from the mmap and fork code to create a new
338  *	OBJT_SWAP object.  We do this by creating an OBJT_DEFAULT object
339  *	and then converting it with swp_pager_meta_build().
340  *
341  *	This routine may block in vm_object_allocate() and create a named
342  *	object lookup race, so we must interlock.   We must also run at
343  *	splvm() for the object lookup to handle races with interrupts, but
344  *	we do not have to maintain splvm() in between the lookup and the
345  *	add because (I believe) it is not possible to attempt to create
346  *	a new swap object w/handle when a default object with that handle
347  *	already exists.
348  */
349 
350 static vm_object_t
351 swap_pager_alloc(void *handle, vm_ooffset_t size, vm_prot_t prot,
352 		 vm_ooffset_t offset)
353 {
354 	vm_object_t object;
355 
356 	if (handle) {
357 		/*
358 		 * Reference existing named region or allocate new one.  There
359 		 * should not be a race here against swp_pager_meta_build()
360 		 * as called from vm_page_remove() in regards to the lookup
361 		 * of the handle.
362 		 */
363 
364 		while (sw_alloc_interlock) {
365 			sw_alloc_interlock = -1;
366 			tsleep(&sw_alloc_interlock, PVM, "swpalc", 0);
367 		}
368 		sw_alloc_interlock = 1;
369 
370 		object = vm_pager_object_lookup(NOBJLIST(handle), handle);
371 
372 		if (object != NULL) {
373 			vm_object_reference(object);
374 		} else {
375 			object = vm_object_allocate(OBJT_DEFAULT,
376 				OFF_TO_IDX(offset + PAGE_MASK + size));
377 			object->handle = handle;
378 
379 			swp_pager_meta_build(object, 0, SWAPBLK_NONE);
380 		}
381 
382 		if (sw_alloc_interlock < 0)
383 			wakeup(&sw_alloc_interlock);
384 
385 		sw_alloc_interlock = 0;
386 	} else {
387 		object = vm_object_allocate(OBJT_DEFAULT,
388 			OFF_TO_IDX(offset + PAGE_MASK + size));
389 
390 		swp_pager_meta_build(object, 0, SWAPBLK_NONE);
391 	}
392 
393 	return (object);
394 }
395 
396 /*
397  * SWAP_PAGER_DEALLOC() -	remove swap metadata from object
398  *
399  *	The swap backing for the object is destroyed.  The code is
400  *	designed such that we can reinstantiate it later, but this
401  *	routine is typically called only when the entire object is
402  *	about to be destroyed.
403  *
404  *	This routine may block, but no longer does.
405  *
406  *	The object must be locked or unreferenceable.
407  */
408 
409 static void
410 swap_pager_dealloc(object)
411 	vm_object_t object;
412 {
413 	int s;
414 
415 	/*
416 	 * Remove from list right away so lookups will fail if we block for
417 	 * pageout completion.
418 	 */
419 
420 	if (object->handle == NULL) {
421 		TAILQ_REMOVE(&swap_pager_un_object_list, object, pager_object_list);
422 	} else {
423 		TAILQ_REMOVE(NOBJLIST(object->handle), object, pager_object_list);
424 	}
425 
426 	vm_object_pip_wait(object, "swpdea");
427 
428 	/*
429 	 * Free all remaining metadata.  We only bother to free it from
430 	 * the swap meta data.  We do not attempt to free swapblk's still
431 	 * associated with vm_page_t's for this object.  We do not care
432 	 * if paging is still in progress on some objects.
433 	 */
434 	s = splvm();
435 	swp_pager_meta_free_all(object);
436 	splx(s);
437 }
438 
439 /************************************************************************
440  *			SWAP PAGER BITMAP ROUTINES			*
441  ************************************************************************/
442 
443 /*
444  * SWP_PAGER_GETSWAPSPACE() -	allocate raw swap space
445  *
446  *	Allocate swap for the requested number of pages.  The starting
447  *	swap block number (a page index) is returned or SWAPBLK_NONE
448  *	if the allocation failed.
449  *
450  *	Also has the side effect of advising that somebody made a mistake
451  *	when they configured swap and didn't configure enough.
452  *
453  *	Must be called at splvm() to avoid races with bitmap frees from
454  *	vm_page_remove() aka swap_pager_page_removed().
455  *
456  *	This routine may not block
457  *	This routine must be called at splvm().
458  */
459 
460 static __inline daddr_t
461 swp_pager_getswapspace(npages)
462 	int npages;
463 {
464 	daddr_t blk;
465 
466 	if ((blk = blist_alloc(swapblist, npages)) == SWAPBLK_NONE) {
467 		if (swap_pager_full != 2) {
468 			printf("swap_pager_getswapspace: failed\n");
469 			swap_pager_full = 2;
470 			swap_pager_almost_full = 1;
471 		}
472 	} else {
473 		vm_swap_size -= npages;
474 		swp_sizecheck();
475 	}
476 	return(blk);
477 }
478 
479 /*
480  * SWP_PAGER_FREESWAPSPACE() -	free raw swap space
481  *
482  *	This routine returns the specified swap blocks back to the bitmap.
483  *
484  *	Note:  This routine may not block (it could in the old swap code),
485  *	and through the use of the new blist routines it does not block.
486  *
487  *	We must be called at splvm() to avoid races with bitmap frees from
488  *	vm_page_remove() aka swap_pager_page_removed().
489  *
490  *	This routine may not block
491  *	This routine must be called at splvm().
492  */
493 
494 static __inline void
495 swp_pager_freeswapspace(blk, npages)
496 	daddr_t blk;
497 	int npages;
498 {
499 	blist_free(swapblist, blk, npages);
500 	vm_swap_size += npages;
501 	swp_sizecheck();
502 }
503 
504 /*
505  * SWAP_PAGER_FREESPACE() -	frees swap blocks associated with a page
506  *				range within an object.
507  *
508  *	This is a globally accessible routine.
509  *
510  *	This routine removes swapblk assignments from swap metadata.
511  *
512  *	The external callers of this routine typically have already destroyed
513  *	or renamed vm_page_t's associated with this range in the object so
514  *	we should be ok.
515  *
516  *	This routine may be called at any spl.  We up our spl to splvm temporarily
517  *	in order to perform the metadata removal.
518  */
519 
520 void
521 swap_pager_freespace(object, start, size)
522 	vm_object_t object;
523 	vm_pindex_t start;
524 	vm_size_t size;
525 {
526 	int s = splvm();
527 	swp_pager_meta_free(object, start, size);
528 	splx(s);
529 }
530 
531 /*
532  * SWAP_PAGER_RESERVE() - reserve swap blocks in object
533  *
534  *	Assigns swap blocks to the specified range within the object.  The
535  *	swap blocks are not zerod.  Any previous swap assignment is destroyed.
536  *
537  *	Returns 0 on success, -1 on failure.
538  */
539 
540 int
541 swap_pager_reserve(vm_object_t object, vm_pindex_t start, vm_size_t size)
542 {
543 	int s;
544 	int n = 0;
545 	daddr_t blk = SWAPBLK_NONE;
546 	vm_pindex_t beg = start;	/* save start index */
547 
548 	s = splvm();
549 	while (size) {
550 		if (n == 0) {
551 			n = BLIST_MAX_ALLOC;
552 			while ((blk = swp_pager_getswapspace(n)) == SWAPBLK_NONE) {
553 				n >>= 1;
554 				if (n == 0) {
555 					swp_pager_meta_free(object, beg, start - beg);
556 					splx(s);
557 					return(-1);
558 				}
559 			}
560 		}
561 		swp_pager_meta_build(object, start, blk);
562 		--size;
563 		++start;
564 		++blk;
565 		--n;
566 	}
567 	swp_pager_meta_free(object, start, n);
568 	splx(s);
569 	return(0);
570 }
571 
572 /*
573  * SWAP_PAGER_COPY() -  copy blocks from source pager to destination pager
574  *			and destroy the source.
575  *
576  *	Copy any valid swapblks from the source to the destination.  In
577  *	cases where both the source and destination have a valid swapblk,
578  *	we keep the destination's.
579  *
580  *	This routine is allowed to block.  It may block allocating metadata
581  *	indirectly through swp_pager_meta_build() or if paging is still in
582  *	progress on the source.
583  *
584  *	This routine can be called at any spl
585  *
586  *	XXX vm_page_collapse() kinda expects us not to block because we
587  *	supposedly do not need to allocate memory, but for the moment we
588  *	*may* have to get a little memory from the zone allocator, but
589  *	it is taken from the interrupt memory.  We should be ok.
590  *
591  *	The source object contains no vm_page_t's (which is just as well)
592  *
593  *	The source object is of type OBJT_SWAP.
594  *
595  *	The source and destination objects must be locked or
596  *	inaccessible (XXX are they ?)
597  */
598 
599 void
600 swap_pager_copy(srcobject, dstobject, offset, destroysource)
601 	vm_object_t srcobject;
602 	vm_object_t dstobject;
603 	vm_pindex_t offset;
604 	int destroysource;
605 {
606 	vm_pindex_t i;
607 	int s;
608 
609 	s = splvm();
610 
611 	/*
612 	 * If destroysource is set, we remove the source object from the
613 	 * swap_pager internal queue now.
614 	 */
615 
616 	if (destroysource) {
617 		if (srcobject->handle == NULL) {
618 			TAILQ_REMOVE(
619 			    &swap_pager_un_object_list,
620 			    srcobject,
621 			    pager_object_list
622 			);
623 		} else {
624 			TAILQ_REMOVE(
625 			    NOBJLIST(srcobject->handle),
626 			    srcobject,
627 			    pager_object_list
628 			);
629 		}
630 	}
631 
632 	/*
633 	 * transfer source to destination.
634 	 */
635 
636 	for (i = 0; i < dstobject->size; ++i) {
637 		daddr_t dstaddr;
638 
639 		/*
640 		 * Locate (without changing) the swapblk on the destination,
641 		 * unless it is invalid in which case free it silently, or
642 		 * if the destination is a resident page, in which case the
643 		 * source is thrown away.
644 		 */
645 
646 		dstaddr = swp_pager_meta_ctl(dstobject, i, 0);
647 
648 		if (dstaddr == SWAPBLK_NONE) {
649 			/*
650 			 * Destination has no swapblk and is not resident,
651 			 * copy source.
652 			 */
653 			daddr_t srcaddr;
654 
655 			srcaddr = swp_pager_meta_ctl(
656 			    srcobject,
657 			    i + offset,
658 			    SWM_POP
659 			);
660 
661 			if (srcaddr != SWAPBLK_NONE)
662 				swp_pager_meta_build(dstobject, i, srcaddr);
663 		} else {
664 			/*
665 			 * Destination has valid swapblk or it is represented
666 			 * by a resident page.  We destroy the sourceblock.
667 			 */
668 
669 			swp_pager_meta_ctl(srcobject, i + offset, SWM_FREE);
670 		}
671 	}
672 
673 	/*
674 	 * Free left over swap blocks in source.
675 	 *
676 	 * We have to revert the type to OBJT_DEFAULT so we do not accidently
677 	 * double-remove the object from the swap queues.
678 	 */
679 
680 	if (destroysource) {
681 		swp_pager_meta_free_all(srcobject);
682 		/*
683 		 * Reverting the type is not necessary, the caller is going
684 		 * to destroy srcobject directly, but I'm doing it here
685 		 * for consistency since we've removed the object from its
686 		 * queues.
687 		 */
688 		srcobject->type = OBJT_DEFAULT;
689 	}
690 	splx(s);
691 }
692 
693 /*
694  * SWAP_PAGER_HASPAGE() -	determine if we have good backing store for
695  *				the requested page.
696  *
697  *	We determine whether good backing store exists for the requested
698  *	page and return TRUE if it does, FALSE if it doesn't.
699  *
700  *	If TRUE, we also try to determine how much valid, contiguous backing
701  *	store exists before and after the requested page within a reasonable
702  *	distance.  We do not try to restrict it to the swap device stripe
703  *	(that is handled in getpages/putpages).  It probably isn't worth
704  *	doing here.
705  */
706 
707 boolean_t
708 swap_pager_haspage(object, pindex, before, after)
709 	vm_object_t object;
710 	vm_pindex_t pindex;
711 	int *before;
712 	int *after;
713 {
714 	daddr_t blk0;
715 	int s;
716 
717 	/*
718 	 * do we have good backing store at the requested index ?
719 	 */
720 
721 	s = splvm();
722 	blk0 = swp_pager_meta_ctl(object, pindex, 0);
723 
724 	if (blk0 == SWAPBLK_NONE) {
725 		splx(s);
726 		if (before)
727 			*before = 0;
728 		if (after)
729 			*after = 0;
730 		return (FALSE);
731 	}
732 
733 	/*
734 	 * find backwards-looking contiguous good backing store
735 	 */
736 
737 	if (before != NULL) {
738 		int i;
739 
740 		for (i = 1; i < (SWB_NPAGES/2); ++i) {
741 			daddr_t blk;
742 
743 			if (i > pindex)
744 				break;
745 			blk = swp_pager_meta_ctl(object, pindex - i, 0);
746 			if (blk != blk0 - i)
747 				break;
748 		}
749 		*before = (i - 1);
750 	}
751 
752 	/*
753 	 * find forward-looking contiguous good backing store
754 	 */
755 
756 	if (after != NULL) {
757 		int i;
758 
759 		for (i = 1; i < (SWB_NPAGES/2); ++i) {
760 			daddr_t blk;
761 
762 			blk = swp_pager_meta_ctl(object, pindex + i, 0);
763 			if (blk != blk0 + i)
764 				break;
765 		}
766 		*after = (i - 1);
767 	}
768 	splx(s);
769 	return (TRUE);
770 }
771 
772 /*
773  * SWAP_PAGER_PAGE_UNSWAPPED() - remove swap backing store related to page
774  *
775  *	This removes any associated swap backing store, whether valid or
776  *	not, from the page.
777  *
778  *	This routine is typically called when a page is made dirty, at
779  *	which point any associated swap can be freed.  MADV_FREE also
780  *	calls us in a special-case situation
781  *
782  *	NOTE!!!  If the page is clean and the swap was valid, the caller
783  *	should make the page dirty before calling this routine.  This routine
784  *	does NOT change the m->dirty status of the page.  Also: MADV_FREE
785  *	depends on it.
786  *
787  *	This routine may not block
788  *	This routine must be called at splvm()
789  */
790 
791 static void
792 swap_pager_unswapped(m)
793 	vm_page_t m;
794 {
795 	swp_pager_meta_ctl(m->object, m->pindex, SWM_FREE);
796 }
797 
798 /*
799  * SWAP_PAGER_STRATEGY() - read, write, free blocks
800  *
801  *	This implements the vm_pager_strategy() interface to swap and allows
802  *	other parts of the system to directly access swap as backing store
803  *	through vm_objects of type OBJT_SWAP.  This is intended to be a
804  *	cacheless interface ( i.e. caching occurs at higher levels ).
805  *	Therefore we do not maintain any resident pages.  All I/O goes
806  *	directly to and from the swap device.
807  *
808  *	Note that b_blkno is scaled for PAGE_SIZE
809  *
810  *	We currently attempt to run I/O synchronously or asynchronously as
811  *	the caller requests.  This isn't perfect because we loose error
812  *	sequencing when we run multiple ops in parallel to satisfy a request.
813  *	But this is swap, so we let it all hang out.
814  */
815 
816 static void
817 swap_pager_strategy(vm_object_t object, struct buf *bp)
818 {
819 	vm_pindex_t start;
820 	int count;
821 	int s;
822 	char *data;
823 	struct buf *nbp = NULL;
824 
825 	if (bp->b_bcount & PAGE_MASK) {
826 		bp->b_error = EINVAL;
827 		bp->b_ioflags |= BIO_ERROR;
828 		bp->b_flags |= B_INVAL;
829 		bufdone(bp);
830 		printf("swap_pager_strategy: bp %p b_vp %p blk %d size %d, not page bounded\n", bp, bp->b_vp, (int)bp->b_pblkno, (int)bp->b_bcount);
831 		return;
832 	}
833 
834 	/*
835 	 * Clear error indication, initialize page index, count, data pointer.
836 	 */
837 
838 	bp->b_error = 0;
839 	bp->b_ioflags &= ~BIO_ERROR;
840 	bp->b_resid = bp->b_bcount;
841 
842 	start = bp->b_pblkno;
843 	count = howmany(bp->b_bcount, PAGE_SIZE);
844 	data = bp->b_data;
845 
846 	s = splvm();
847 
848 	/*
849 	 * Deal with BIO_DELETE
850 	 */
851 
852 	if (bp->b_iocmd == BIO_DELETE) {
853 		/*
854 		 * FREE PAGE(s) - destroy underlying swap that is no longer
855 		 *		  needed.
856 		 */
857 		swp_pager_meta_free(object, start, count);
858 		splx(s);
859 		bp->b_resid = 0;
860 		bufdone(bp);
861 		return;
862 	}
863 
864 	/*
865 	 * Execute read or write
866 	 */
867 
868 	while (count > 0) {
869 		daddr_t blk;
870 
871 		/*
872 		 * Obtain block.  If block not found and writing, allocate a
873 		 * new block and build it into the object.
874 		 */
875 
876 		blk = swp_pager_meta_ctl(object, start, 0);
877 		if ((blk == SWAPBLK_NONE) && (bp->b_iocmd == BIO_WRITE)) {
878 			blk = swp_pager_getswapspace(1);
879 			if (blk == SWAPBLK_NONE) {
880 				bp->b_error = ENOMEM;
881 				bp->b_ioflags |= BIO_ERROR;
882 				break;
883 			}
884 			swp_pager_meta_build(object, start, blk);
885 		}
886 
887 		/*
888 		 * Do we have to flush our current collection?  Yes if:
889 		 *
890 		 *	- no swap block at this index
891 		 *	- swap block is not contiguous
892 		 *	- we cross a physical disk boundry in the
893 		 *	  stripe.
894 		 */
895 
896 		if (
897 		    nbp && (nbp->b_blkno + btoc(nbp->b_bcount) != blk ||
898 		     ((nbp->b_blkno ^ blk) & dmmax_mask)
899 		    )
900 		) {
901 			splx(s);
902 			if (bp->b_iocmd == BIO_READ) {
903 				++cnt.v_swapin;
904 				cnt.v_swappgsin += btoc(nbp->b_bcount);
905 			} else {
906 				++cnt.v_swapout;
907 				cnt.v_swappgsout += btoc(nbp->b_bcount);
908 				nbp->b_dirtyend = nbp->b_bcount;
909 			}
910 			flushchainbuf(nbp);
911 			s = splvm();
912 			nbp = NULL;
913 		}
914 
915 		/*
916 		 * Add new swapblk to nbp, instantiating nbp if necessary.
917 		 * Zero-fill reads are able to take a shortcut.
918 		 */
919 
920 		if (blk == SWAPBLK_NONE) {
921 			/*
922 			 * We can only get here if we are reading.  Since
923 			 * we are at splvm() we can safely modify b_resid,
924 			 * even if chain ops are in progress.
925 			 */
926 			bzero(data, PAGE_SIZE);
927 			bp->b_resid -= PAGE_SIZE;
928 		} else {
929 			if (nbp == NULL) {
930 				nbp = getchainbuf(bp, swapdev_vp, (bp->b_iocmd == BIO_READ) | B_ASYNC);
931 				nbp->b_blkno = blk;
932 				nbp->b_bcount = 0;
933 				nbp->b_data = data;
934 			}
935 			nbp->b_bcount += PAGE_SIZE;
936 		}
937 		--count;
938 		++start;
939 		data += PAGE_SIZE;
940 	}
941 
942 	/*
943 	 *  Flush out last buffer
944 	 */
945 
946 	splx(s);
947 
948 	if (nbp) {
949 		if ((bp->b_flags & B_ASYNC) == 0)
950 			nbp->b_flags &= ~B_ASYNC;
951 		if (nbp->b_iocmd == BIO_READ) {
952 			++cnt.v_swapin;
953 			cnt.v_swappgsin += btoc(nbp->b_bcount);
954 		} else {
955 			++cnt.v_swapout;
956 			cnt.v_swappgsout += btoc(nbp->b_bcount);
957 			nbp->b_dirtyend = nbp->b_bcount;
958 		}
959 		flushchainbuf(nbp);
960 		/* nbp = NULL; */
961 	}
962 
963 	/*
964 	 * Wait for completion.
965 	 */
966 
967 	if (bp->b_flags & B_ASYNC) {
968 		autochaindone(bp);
969 	} else {
970 		waitchainbuf(bp, 0, 1);
971 	}
972 }
973 
974 /*
975  * SWAP_PAGER_GETPAGES() - bring pages in from swap
976  *
977  *	Attempt to retrieve (m, count) pages from backing store, but make
978  *	sure we retrieve at least m[reqpage].  We try to load in as large
979  *	a chunk surrounding m[reqpage] as is contiguous in swap and which
980  *	belongs to the same object.
981  *
982  *	The code is designed for asynchronous operation and
983  *	immediate-notification of 'reqpage' but tends not to be
984  *	used that way.  Please do not optimize-out this algorithmic
985  *	feature, I intend to improve on it in the future.
986  *
987  *	The parent has a single vm_object_pip_add() reference prior to
988  *	calling us and we should return with the same.
989  *
990  *	The parent has BUSY'd the pages.  We should return with 'm'
991  *	left busy, but the others adjusted.
992  */
993 
994 static int
995 swap_pager_getpages(object, m, count, reqpage)
996 	vm_object_t object;
997 	vm_page_t *m;
998 	int count, reqpage;
999 {
1000 	struct buf *bp;
1001 	vm_page_t mreq;
1002 	int s;
1003 	int i;
1004 	int j;
1005 	daddr_t blk;
1006 	vm_offset_t kva;
1007 	vm_pindex_t lastpindex;
1008 
1009 	mreq = m[reqpage];
1010 
1011 	if (mreq->object != object) {
1012 		panic("swap_pager_getpages: object mismatch %p/%p",
1013 		    object,
1014 		    mreq->object
1015 		);
1016 	}
1017 	/*
1018 	 * Calculate range to retrieve.  The pages have already been assigned
1019 	 * their swapblks.  We require a *contiguous* range that falls entirely
1020 	 * within a single device stripe.   If we do not supply it, bad things
1021 	 * happen.  Note that blk, iblk & jblk can be SWAPBLK_NONE, but the
1022 	 * loops are set up such that the case(s) are handled implicitly.
1023 	 *
1024 	 * The swp_*() calls must be made at splvm().  vm_page_free() does
1025 	 * not need to be, but it will go a little faster if it is.
1026 	 */
1027 
1028 	s = splvm();
1029 	blk = swp_pager_meta_ctl(mreq->object, mreq->pindex, 0);
1030 
1031 	for (i = reqpage - 1; i >= 0; --i) {
1032 		daddr_t iblk;
1033 
1034 		iblk = swp_pager_meta_ctl(m[i]->object, m[i]->pindex, 0);
1035 		if (blk != iblk + (reqpage - i))
1036 			break;
1037 		if ((blk ^ iblk) & dmmax_mask)
1038 			break;
1039 	}
1040 	++i;
1041 
1042 	for (j = reqpage + 1; j < count; ++j) {
1043 		daddr_t jblk;
1044 
1045 		jblk = swp_pager_meta_ctl(m[j]->object, m[j]->pindex, 0);
1046 		if (blk != jblk - (j - reqpage))
1047 			break;
1048 		if ((blk ^ jblk) & dmmax_mask)
1049 			break;
1050 	}
1051 
1052 	/*
1053 	 * free pages outside our collection range.   Note: we never free
1054 	 * mreq, it must remain busy throughout.
1055 	 */
1056 
1057 	{
1058 		int k;
1059 
1060 		for (k = 0; k < i; ++k)
1061 			vm_page_free(m[k]);
1062 		for (k = j; k < count; ++k)
1063 			vm_page_free(m[k]);
1064 	}
1065 	splx(s);
1066 
1067 
1068 	/*
1069 	 * Return VM_PAGER_FAIL if we have nothing to do.  Return mreq
1070 	 * still busy, but the others unbusied.
1071 	 */
1072 
1073 	if (blk == SWAPBLK_NONE)
1074 		return(VM_PAGER_FAIL);
1075 
1076 	/*
1077 	 * Get a swap buffer header to perform the IO
1078 	 */
1079 
1080 	bp = getpbuf(&nsw_rcount);
1081 	kva = (vm_offset_t) bp->b_data;
1082 
1083 	/*
1084 	 * map our page(s) into kva for input
1085 	 *
1086 	 * NOTE: B_PAGING is set by pbgetvp()
1087 	 */
1088 
1089 	pmap_qenter(kva, m + i, j - i);
1090 
1091 	bp->b_iocmd = BIO_READ;
1092 	bp->b_iodone = swp_pager_async_iodone;
1093 	bp->b_rcred = bp->b_wcred = proc0.p_ucred;
1094 	bp->b_data = (caddr_t) kva;
1095 	crhold(bp->b_rcred);
1096 	crhold(bp->b_wcred);
1097 	bp->b_blkno = blk - (reqpage - i);
1098 	bp->b_bcount = PAGE_SIZE * (j - i);
1099 	bp->b_bufsize = PAGE_SIZE * (j - i);
1100 	bp->b_pager.pg_reqpage = reqpage - i;
1101 
1102 	{
1103 		int k;
1104 
1105 		for (k = i; k < j; ++k) {
1106 			bp->b_pages[k - i] = m[k];
1107 			vm_page_flag_set(m[k], PG_SWAPINPROG);
1108 		}
1109 	}
1110 	bp->b_npages = j - i;
1111 
1112 	pbgetvp(swapdev_vp, bp);
1113 
1114 	cnt.v_swapin++;
1115 	cnt.v_swappgsin += bp->b_npages;
1116 
1117 	/*
1118 	 * We still hold the lock on mreq, and our automatic completion routine
1119 	 * does not remove it.
1120 	 */
1121 
1122 	vm_object_pip_add(mreq->object, bp->b_npages);
1123 	lastpindex = m[j-1]->pindex;
1124 
1125 	/*
1126 	 * perform the I/O.  NOTE!!!  bp cannot be considered valid after
1127 	 * this point because we automatically release it on completion.
1128 	 * Instead, we look at the one page we are interested in which we
1129 	 * still hold a lock on even through the I/O completion.
1130 	 *
1131 	 * The other pages in our m[] array are also released on completion,
1132 	 * so we cannot assume they are valid anymore either.
1133 	 *
1134 	 * NOTE: b_blkno is destroyed by the call to VOP_STRATEGY
1135 	 */
1136 
1137 	BUF_KERNPROC(bp);
1138 	BUF_STRATEGY(bp);
1139 
1140 	/*
1141 	 * wait for the page we want to complete.  PG_SWAPINPROG is always
1142 	 * cleared on completion.  If an I/O error occurs, SWAPBLK_NONE
1143 	 * is set in the meta-data.
1144 	 */
1145 
1146 	s = splvm();
1147 
1148 	while ((mreq->flags & PG_SWAPINPROG) != 0) {
1149 		vm_page_flag_set(mreq, PG_WANTED | PG_REFERENCED);
1150 		cnt.v_intrans++;
1151 		if (tsleep(mreq, PSWP, "swread", hz*20)) {
1152 			printf(
1153 			    "swap_pager: indefinite wait buffer: device:"
1154 				" %s, blkno: %ld, size: %ld\n",
1155 			    devtoname(bp->b_dev), (long)bp->b_blkno,
1156 			    bp->b_bcount
1157 			);
1158 		}
1159 	}
1160 
1161 	splx(s);
1162 
1163 	/*
1164 	 * mreq is left bussied after completion, but all the other pages
1165 	 * are freed.  If we had an unrecoverable read error the page will
1166 	 * not be valid.
1167 	 */
1168 
1169 	if (mreq->valid != VM_PAGE_BITS_ALL) {
1170 		return(VM_PAGER_ERROR);
1171 	} else {
1172 		return(VM_PAGER_OK);
1173 	}
1174 
1175 	/*
1176 	 * A final note: in a low swap situation, we cannot deallocate swap
1177 	 * and mark a page dirty here because the caller is likely to mark
1178 	 * the page clean when we return, causing the page to possibly revert
1179 	 * to all-zero's later.
1180 	 */
1181 }
1182 
1183 /*
1184  *	swap_pager_putpages:
1185  *
1186  *	Assign swap (if necessary) and initiate I/O on the specified pages.
1187  *
1188  *	We support both OBJT_DEFAULT and OBJT_SWAP objects.  DEFAULT objects
1189  *	are automatically converted to SWAP objects.
1190  *
1191  *	In a low memory situation we may block in VOP_STRATEGY(), but the new
1192  *	vm_page reservation system coupled with properly written VFS devices
1193  *	should ensure that no low-memory deadlock occurs.  This is an area
1194  *	which needs work.
1195  *
1196  *	The parent has N vm_object_pip_add() references prior to
1197  *	calling us and will remove references for rtvals[] that are
1198  *	not set to VM_PAGER_PEND.  We need to remove the rest on I/O
1199  *	completion.
1200  *
1201  *	The parent has soft-busy'd the pages it passes us and will unbusy
1202  *	those whos rtvals[] entry is not set to VM_PAGER_PEND on return.
1203  *	We need to unbusy the rest on I/O completion.
1204  */
1205 
1206 void
1207 swap_pager_putpages(object, m, count, sync, rtvals)
1208 	vm_object_t object;
1209 	vm_page_t *m;
1210 	int count;
1211 	boolean_t sync;
1212 	int *rtvals;
1213 {
1214 	int i;
1215 	int n = 0;
1216 
1217 	if (count && m[0]->object != object) {
1218 		panic("swap_pager_getpages: object mismatch %p/%p",
1219 		    object,
1220 		    m[0]->object
1221 		);
1222 	}
1223 	/*
1224 	 * Step 1
1225 	 *
1226 	 * Turn object into OBJT_SWAP
1227 	 * check for bogus sysops
1228 	 * force sync if not pageout process
1229 	 */
1230 
1231 	if (object->type != OBJT_SWAP)
1232 		swp_pager_meta_build(object, 0, SWAPBLK_NONE);
1233 
1234 	if (curproc != pageproc)
1235 		sync = TRUE;
1236 
1237 	/*
1238 	 * Step 2
1239 	 *
1240 	 * Update nsw parameters from swap_async_max sysctl values.
1241 	 * Do not let the sysop crash the machine with bogus numbers.
1242 	 */
1243 
1244 	if (swap_async_max != nsw_wcount_async_max) {
1245 		int n;
1246 		int s;
1247 
1248 		/*
1249 		 * limit range
1250 		 */
1251 		if ((n = swap_async_max) > nswbuf / 2)
1252 			n = nswbuf / 2;
1253 		if (n < 1)
1254 			n = 1;
1255 		swap_async_max = n;
1256 
1257 		/*
1258 		 * Adjust difference ( if possible ).  If the current async
1259 		 * count is too low, we may not be able to make the adjustment
1260 		 * at this time.
1261 		 */
1262 		s = splvm();
1263 		n -= nsw_wcount_async_max;
1264 		if (nsw_wcount_async + n >= 0) {
1265 			nsw_wcount_async += n;
1266 			nsw_wcount_async_max += n;
1267 			wakeup(&nsw_wcount_async);
1268 		}
1269 		splx(s);
1270 	}
1271 
1272 	/*
1273 	 * Step 3
1274 	 *
1275 	 * Assign swap blocks and issue I/O.  We reallocate swap on the fly.
1276 	 * The page is left dirty until the pageout operation completes
1277 	 * successfully.
1278 	 */
1279 
1280 	for (i = 0; i < count; i += n) {
1281 		int s;
1282 		int j;
1283 		struct buf *bp;
1284 		daddr_t blk;
1285 
1286 		/*
1287 		 * Maximum I/O size is limited by a number of factors.
1288 		 */
1289 
1290 		n = min(BLIST_MAX_ALLOC, count - i);
1291 		n = min(n, nsw_cluster_max);
1292 
1293 		s = splvm();
1294 
1295 		/*
1296 		 * Get biggest block of swap we can.  If we fail, fall
1297 		 * back and try to allocate a smaller block.  Don't go
1298 		 * overboard trying to allocate space if it would overly
1299 		 * fragment swap.
1300 		 */
1301 		while (
1302 		    (blk = swp_pager_getswapspace(n)) == SWAPBLK_NONE &&
1303 		    n > 4
1304 		) {
1305 			n >>= 1;
1306 		}
1307 		if (blk == SWAPBLK_NONE) {
1308 			for (j = 0; j < n; ++j)
1309 				rtvals[i+j] = VM_PAGER_FAIL;
1310 			splx(s);
1311 			continue;
1312 		}
1313 
1314 		/*
1315 		 * The I/O we are constructing cannot cross a physical
1316 		 * disk boundry in the swap stripe.  Note: we are still
1317 		 * at splvm().
1318 		 */
1319 		if ((blk ^ (blk + n)) & dmmax_mask) {
1320 			j = ((blk + dmmax) & dmmax_mask) - blk;
1321 			swp_pager_freeswapspace(blk + j, n - j);
1322 			n = j;
1323 		}
1324 
1325 		/*
1326 		 * All I/O parameters have been satisfied, build the I/O
1327 		 * request and assign the swap space.
1328 		 *
1329 		 * NOTE: B_PAGING is set by pbgetvp()
1330 		 */
1331 
1332 		if (sync == TRUE) {
1333 			bp = getpbuf(&nsw_wcount_sync);
1334 		} else {
1335 			bp = getpbuf(&nsw_wcount_async);
1336 			bp->b_flags = B_ASYNC;
1337 		}
1338 		bp->b_iocmd = BIO_WRITE;
1339 		bp->b_spc = NULL;	/* not used, but NULL-out anyway */
1340 
1341 		pmap_qenter((vm_offset_t)bp->b_data, &m[i], n);
1342 
1343 		bp->b_rcred = bp->b_wcred = proc0.p_ucred;
1344 		bp->b_bcount = PAGE_SIZE * n;
1345 		bp->b_bufsize = PAGE_SIZE * n;
1346 		bp->b_blkno = blk;
1347 
1348 		crhold(bp->b_rcred);
1349 		crhold(bp->b_wcred);
1350 
1351 		pbgetvp(swapdev_vp, bp);
1352 
1353 		for (j = 0; j < n; ++j) {
1354 			vm_page_t mreq = m[i+j];
1355 
1356 			swp_pager_meta_build(
1357 			    mreq->object,
1358 			    mreq->pindex,
1359 			    blk + j
1360 			);
1361 			vm_page_dirty(mreq);
1362 			rtvals[i+j] = VM_PAGER_OK;
1363 
1364 			vm_page_flag_set(mreq, PG_SWAPINPROG);
1365 			bp->b_pages[j] = mreq;
1366 		}
1367 		bp->b_npages = n;
1368 		/*
1369 		 * Must set dirty range for NFS to work.
1370 		 */
1371 		bp->b_dirtyoff = 0;
1372 		bp->b_dirtyend = bp->b_bcount;
1373 
1374 		cnt.v_swapout++;
1375 		cnt.v_swappgsout += bp->b_npages;
1376 		swapdev_vp->v_numoutput++;
1377 
1378 		splx(s);
1379 
1380 		/*
1381 		 * asynchronous
1382 		 *
1383 		 * NOTE: b_blkno is destroyed by the call to VOP_STRATEGY
1384 		 */
1385 
1386 		if (sync == FALSE) {
1387 			bp->b_iodone = swp_pager_async_iodone;
1388 			BUF_KERNPROC(bp);
1389 			BUF_STRATEGY(bp);
1390 
1391 			for (j = 0; j < n; ++j)
1392 				rtvals[i+j] = VM_PAGER_PEND;
1393 			continue;
1394 		}
1395 
1396 		/*
1397 		 * synchronous
1398 		 *
1399 		 * NOTE: b_blkno is destroyed by the call to VOP_STRATEGY
1400 		 */
1401 
1402 		bp->b_iodone = swp_pager_sync_iodone;
1403 		BUF_STRATEGY(bp);
1404 
1405 		/*
1406 		 * Wait for the sync I/O to complete, then update rtvals.
1407 		 * We just set the rtvals[] to VM_PAGER_PEND so we can call
1408 		 * our async completion routine at the end, thus avoiding a
1409 		 * double-free.
1410 		 */
1411 		s = splbio();
1412 
1413 		while ((bp->b_flags & B_DONE) == 0) {
1414 			tsleep(bp, PVM, "swwrt", 0);
1415 		}
1416 
1417 		for (j = 0; j < n; ++j)
1418 			rtvals[i+j] = VM_PAGER_PEND;
1419 
1420 		/*
1421 		 * Now that we are through with the bp, we can call the
1422 		 * normal async completion, which frees everything up.
1423 		 */
1424 
1425 		swp_pager_async_iodone(bp);
1426 
1427 		splx(s);
1428 	}
1429 }
1430 
1431 /*
1432  *	swap_pager_sync_iodone:
1433  *
1434  *	Completion routine for synchronous reads and writes from/to swap.
1435  *	We just mark the bp is complete and wake up anyone waiting on it.
1436  *
1437  *	This routine may not block.  This routine is called at splbio() or better.
1438  */
1439 
1440 static void
1441 swp_pager_sync_iodone(bp)
1442 	struct buf *bp;
1443 {
1444 	bp->b_flags |= B_DONE;
1445 	bp->b_flags &= ~B_ASYNC;
1446 	wakeup(bp);
1447 }
1448 
1449 /*
1450  *	swp_pager_async_iodone:
1451  *
1452  *	Completion routine for asynchronous reads and writes from/to swap.
1453  *	Also called manually by synchronous code to finish up a bp.
1454  *
1455  *	For READ operations, the pages are PG_BUSY'd.  For WRITE operations,
1456  *	the pages are vm_page_t->busy'd.  For READ operations, we PG_BUSY
1457  *	unbusy all pages except the 'main' request page.  For WRITE
1458  *	operations, we vm_page_t->busy'd unbusy all pages ( we can do this
1459  *	because we marked them all VM_PAGER_PEND on return from putpages ).
1460  *
1461  *	This routine may not block.
1462  *	This routine is called at splbio() or better
1463  *
1464  *	We up ourselves to splvm() as required for various vm_page related
1465  *	calls.
1466  */
1467 
1468 static void
1469 swp_pager_async_iodone(bp)
1470 	register struct buf *bp;
1471 {
1472 	int s;
1473 	int i;
1474 	vm_object_t object = NULL;
1475 
1476 	bp->b_flags |= B_DONE;
1477 
1478 	/*
1479 	 * report error
1480 	 */
1481 
1482 	if (bp->b_ioflags & BIO_ERROR) {
1483 		printf(
1484 		    "swap_pager: I/O error - %s failed; blkno %ld,"
1485 			"size %ld, error %d\n",
1486 		    ((bp->b_iocmd == BIO_READ) ? "pagein" : "pageout"),
1487 		    (long)bp->b_blkno,
1488 		    (long)bp->b_bcount,
1489 		    bp->b_error
1490 		);
1491 	}
1492 
1493 	/*
1494 	 * set object, raise to splvm().
1495 	 */
1496 
1497 	if (bp->b_npages)
1498 		object = bp->b_pages[0]->object;
1499 	s = splvm();
1500 
1501 	/*
1502 	 * remove the mapping for kernel virtual
1503 	 */
1504 
1505 	pmap_qremove((vm_offset_t)bp->b_data, bp->b_npages);
1506 
1507 	/*
1508 	 * cleanup pages.  If an error occurs writing to swap, we are in
1509 	 * very serious trouble.  If it happens to be a disk error, though,
1510 	 * we may be able to recover by reassigning the swap later on.  So
1511 	 * in this case we remove the m->swapblk assignment for the page
1512 	 * but do not free it in the rlist.  The errornous block(s) are thus
1513 	 * never reallocated as swap.  Redirty the page and continue.
1514 	 */
1515 
1516 	for (i = 0; i < bp->b_npages; ++i) {
1517 		vm_page_t m = bp->b_pages[i];
1518 
1519 		vm_page_flag_clear(m, PG_SWAPINPROG);
1520 
1521 		if (bp->b_ioflags & BIO_ERROR) {
1522 			/*
1523 			 * If an error occurs I'd love to throw the swapblk
1524 			 * away without freeing it back to swapspace, so it
1525 			 * can never be used again.  But I can't from an
1526 			 * interrupt.
1527 			 */
1528 
1529 			if (bp->b_iocmd == BIO_READ) {
1530 				/*
1531 				 * When reading, reqpage needs to stay
1532 				 * locked for the parent, but all other
1533 				 * pages can be freed.  We still want to
1534 				 * wakeup the parent waiting on the page,
1535 				 * though.  ( also: pg_reqpage can be -1 and
1536 				 * not match anything ).
1537 				 *
1538 				 * We have to wake specifically requested pages
1539 				 * up too because we cleared PG_SWAPINPROG and
1540 				 * someone may be waiting for that.
1541 				 *
1542 				 * NOTE: for reads, m->dirty will probably
1543 				 * be overridden by the original caller of
1544 				 * getpages so don't play cute tricks here.
1545 				 *
1546 				 * XXX it may not be legal to free the page
1547 				 * here as this messes with the object->memq's.
1548 				 */
1549 
1550 				m->valid = 0;
1551 				vm_page_flag_clear(m, PG_ZERO);
1552 
1553 				if (i != bp->b_pager.pg_reqpage)
1554 					vm_page_free(m);
1555 				else
1556 					vm_page_flash(m);
1557 				/*
1558 				 * If i == bp->b_pager.pg_reqpage, do not wake
1559 				 * the page up.  The caller needs to.
1560 				 */
1561 			} else {
1562 				/*
1563 				 * If a write error occurs, reactivate page
1564 				 * so it doesn't clog the inactive list,
1565 				 * then finish the I/O.
1566 				 */
1567 				vm_page_dirty(m);
1568 				vm_page_activate(m);
1569 				vm_page_io_finish(m);
1570 			}
1571 		} else if (bp->b_iocmd == BIO_READ) {
1572 			/*
1573 			 * For read success, clear dirty bits.  Nobody should
1574 			 * have this page mapped but don't take any chances,
1575 			 * make sure the pmap modify bits are also cleared.
1576 			 *
1577 			 * NOTE: for reads, m->dirty will probably be
1578 			 * overridden by the original caller of getpages so
1579 			 * we cannot set them in order to free the underlying
1580 			 * swap in a low-swap situation.  I don't think we'd
1581 			 * want to do that anyway, but it was an optimization
1582 			 * that existed in the old swapper for a time before
1583 			 * it got ripped out due to precisely this problem.
1584 			 *
1585 			 * clear PG_ZERO in page.
1586 			 *
1587 			 * If not the requested page then deactivate it.
1588 			 *
1589 			 * Note that the requested page, reqpage, is left
1590 			 * busied, but we still have to wake it up.  The
1591 			 * other pages are released (unbusied) by
1592 			 * vm_page_wakeup().  We do not set reqpage's
1593 			 * valid bits here, it is up to the caller.
1594 			 */
1595 
1596 			pmap_clear_modify(VM_PAGE_TO_PHYS(m));
1597 			m->valid = VM_PAGE_BITS_ALL;
1598 			vm_page_undirty(m);
1599 			vm_page_flag_clear(m, PG_ZERO);
1600 
1601 			/*
1602 			 * We have to wake specifically requested pages
1603 			 * up too because we cleared PG_SWAPINPROG and
1604 			 * could be waiting for it in getpages.  However,
1605 			 * be sure to not unbusy getpages specifically
1606 			 * requested page - getpages expects it to be
1607 			 * left busy.
1608 			 */
1609 			if (i != bp->b_pager.pg_reqpage) {
1610 				vm_page_deactivate(m);
1611 				vm_page_wakeup(m);
1612 			} else {
1613 				vm_page_flash(m);
1614 			}
1615 		} else {
1616 			/*
1617 			 * For write success, clear the modify and dirty
1618 			 * status, then finish the I/O ( which decrements the
1619 			 * busy count and possibly wakes waiter's up ).
1620 			 */
1621 			vm_page_protect(m, VM_PROT_READ);
1622 			pmap_clear_modify(VM_PAGE_TO_PHYS(m));
1623 			vm_page_undirty(m);
1624 			vm_page_io_finish(m);
1625 		}
1626 	}
1627 
1628 	/*
1629 	 * adjust pip.  NOTE: the original parent may still have its own
1630 	 * pip refs on the object.
1631 	 */
1632 
1633 	if (object)
1634 		vm_object_pip_wakeupn(object, bp->b_npages);
1635 
1636 	/*
1637 	 * release the physical I/O buffer
1638 	 */
1639 
1640 	relpbuf(
1641 	    bp,
1642 	    ((bp->b_iocmd == BIO_READ) ? &nsw_rcount :
1643 		((bp->b_flags & B_ASYNC) ?
1644 		    &nsw_wcount_async :
1645 		    &nsw_wcount_sync
1646 		)
1647 	    )
1648 	);
1649 	splx(s);
1650 }
1651 
1652 /************************************************************************
1653  *				SWAP META DATA 				*
1654  ************************************************************************
1655  *
1656  *	These routines manipulate the swap metadata stored in the
1657  *	OBJT_SWAP object.  All swp_*() routines must be called at
1658  *	splvm() because swap can be freed up by the low level vm_page
1659  *	code which might be called from interrupts beyond what splbio() covers.
1660  *
1661  *	Swap metadata is implemented with a global hash and not directly
1662  *	linked into the object.  Instead the object simply contains
1663  *	appropriate tracking counters.
1664  */
1665 
1666 /*
1667  * SWP_PAGER_HASH() -	hash swap meta data
1668  *
1669  *	This is an inline helper function which hashes the swapblk given
1670  *	the object and page index.  It returns a pointer to a pointer
1671  *	to the object, or a pointer to a NULL pointer if it could not
1672  *	find a swapblk.
1673  *
1674  *	This routine must be called at splvm().
1675  */
1676 
1677 static __inline struct swblock **
1678 swp_pager_hash(vm_object_t object, vm_pindex_t index)
1679 {
1680 	struct swblock **pswap;
1681 	struct swblock *swap;
1682 
1683 	index &= ~SWAP_META_MASK;
1684 	pswap = &swhash[(index ^ (int)(intptr_t)object) & swhash_mask];
1685 
1686 	while ((swap = *pswap) != NULL) {
1687 		if (swap->swb_object == object &&
1688 		    swap->swb_index == index
1689 		) {
1690 			break;
1691 		}
1692 		pswap = &swap->swb_hnext;
1693 	}
1694 	return(pswap);
1695 }
1696 
1697 /*
1698  * SWP_PAGER_META_BUILD() -	add swap block to swap meta data for object
1699  *
1700  *	We first convert the object to a swap object if it is a default
1701  *	object.
1702  *
1703  *	The specified swapblk is added to the object's swap metadata.  If
1704  *	the swapblk is not valid, it is freed instead.  Any previously
1705  *	assigned swapblk is freed.
1706  *
1707  *	This routine must be called at splvm(), except when used to convert
1708  *	an OBJT_DEFAULT object into an OBJT_SWAP object.
1709 
1710  */
1711 
1712 static void
1713 swp_pager_meta_build(
1714 	vm_object_t object,
1715 	vm_pindex_t index,
1716 	daddr_t swapblk
1717 ) {
1718 	struct swblock *swap;
1719 	struct swblock **pswap;
1720 
1721 	/*
1722 	 * Convert default object to swap object if necessary
1723 	 */
1724 
1725 	if (object->type != OBJT_SWAP) {
1726 		object->type = OBJT_SWAP;
1727 		object->un_pager.swp.swp_bcount = 0;
1728 
1729 		if (object->handle != NULL) {
1730 			TAILQ_INSERT_TAIL(
1731 			    NOBJLIST(object->handle),
1732 			    object,
1733 			    pager_object_list
1734 			);
1735 		} else {
1736 			TAILQ_INSERT_TAIL(
1737 			    &swap_pager_un_object_list,
1738 			    object,
1739 			    pager_object_list
1740 			);
1741 		}
1742 	}
1743 
1744 	/*
1745 	 * Locate hash entry.  If not found create, but if we aren't adding
1746 	 * anything just return.  If we run out of space in the map we wait
1747 	 * and, since the hash table may have changed, retry.
1748 	 */
1749 
1750 retry:
1751 	pswap = swp_pager_hash(object, index);
1752 
1753 	if ((swap = *pswap) == NULL) {
1754 		int i;
1755 
1756 		if (swapblk == SWAPBLK_NONE)
1757 			return;
1758 
1759 		swap = *pswap = zalloc(swap_zone);
1760 		if (swap == NULL) {
1761 			VM_WAIT;
1762 			goto retry;
1763 		}
1764 		swap->swb_hnext = NULL;
1765 		swap->swb_object = object;
1766 		swap->swb_index = index & ~SWAP_META_MASK;
1767 		swap->swb_count = 0;
1768 
1769 		++object->un_pager.swp.swp_bcount;
1770 
1771 		for (i = 0; i < SWAP_META_PAGES; ++i)
1772 			swap->swb_pages[i] = SWAPBLK_NONE;
1773 	}
1774 
1775 	/*
1776 	 * Delete prior contents of metadata
1777 	 */
1778 
1779 	index &= SWAP_META_MASK;
1780 
1781 	if (swap->swb_pages[index] != SWAPBLK_NONE) {
1782 		swp_pager_freeswapspace(swap->swb_pages[index], 1);
1783 		--swap->swb_count;
1784 	}
1785 
1786 	/*
1787 	 * Enter block into metadata
1788 	 */
1789 
1790 	swap->swb_pages[index] = swapblk;
1791 	if (swapblk != SWAPBLK_NONE)
1792 		++swap->swb_count;
1793 }
1794 
1795 /*
1796  * SWP_PAGER_META_FREE() - free a range of blocks in the object's swap metadata
1797  *
1798  *	The requested range of blocks is freed, with any associated swap
1799  *	returned to the swap bitmap.
1800  *
1801  *	This routine will free swap metadata structures as they are cleaned
1802  *	out.  This routine does *NOT* operate on swap metadata associated
1803  *	with resident pages.
1804  *
1805  *	This routine must be called at splvm()
1806  */
1807 
1808 static void
1809 swp_pager_meta_free(vm_object_t object, vm_pindex_t index, daddr_t count)
1810 {
1811 	if (object->type != OBJT_SWAP)
1812 		return;
1813 
1814 	while (count > 0) {
1815 		struct swblock **pswap;
1816 		struct swblock *swap;
1817 
1818 		pswap = swp_pager_hash(object, index);
1819 
1820 		if ((swap = *pswap) != NULL) {
1821 			daddr_t v = swap->swb_pages[index & SWAP_META_MASK];
1822 
1823 			if (v != SWAPBLK_NONE) {
1824 				swp_pager_freeswapspace(v, 1);
1825 				swap->swb_pages[index & SWAP_META_MASK] =
1826 					SWAPBLK_NONE;
1827 				if (--swap->swb_count == 0) {
1828 					*pswap = swap->swb_hnext;
1829 					zfree(swap_zone, swap);
1830 					--object->un_pager.swp.swp_bcount;
1831 				}
1832 			}
1833 			--count;
1834 			++index;
1835 		} else {
1836 			int n = SWAP_META_PAGES - (index & SWAP_META_MASK);
1837 			count -= n;
1838 			index += n;
1839 		}
1840 	}
1841 }
1842 
1843 /*
1844  * SWP_PAGER_META_FREE_ALL() - destroy all swap metadata associated with object
1845  *
1846  *	This routine locates and destroys all swap metadata associated with
1847  *	an object.
1848  *
1849  *	This routine must be called at splvm()
1850  */
1851 
1852 static void
1853 swp_pager_meta_free_all(vm_object_t object)
1854 {
1855 	daddr_t index = 0;
1856 
1857 	if (object->type != OBJT_SWAP)
1858 		return;
1859 
1860 	while (object->un_pager.swp.swp_bcount) {
1861 		struct swblock **pswap;
1862 		struct swblock *swap;
1863 
1864 		pswap = swp_pager_hash(object, index);
1865 		if ((swap = *pswap) != NULL) {
1866 			int i;
1867 
1868 			for (i = 0; i < SWAP_META_PAGES; ++i) {
1869 				daddr_t v = swap->swb_pages[i];
1870 				if (v != SWAPBLK_NONE) {
1871 					--swap->swb_count;
1872 					swp_pager_freeswapspace(v, 1);
1873 				}
1874 			}
1875 			if (swap->swb_count != 0)
1876 				panic("swap_pager_meta_free_all: swb_count != 0");
1877 			*pswap = swap->swb_hnext;
1878 			zfree(swap_zone, swap);
1879 			--object->un_pager.swp.swp_bcount;
1880 		}
1881 		index += SWAP_META_PAGES;
1882 		if (index > 0x20000000)
1883 			panic("swp_pager_meta_free_all: failed to locate all swap meta blocks");
1884 	}
1885 }
1886 
1887 /*
1888  * SWP_PAGER_METACTL() -  misc control of swap and vm_page_t meta data.
1889  *
1890  *	This routine is capable of looking up, popping, or freeing
1891  *	swapblk assignments in the swap meta data or in the vm_page_t.
1892  *	The routine typically returns the swapblk being looked-up, or popped,
1893  *	or SWAPBLK_NONE if the block was freed, or SWAPBLK_NONE if the block
1894  *	was invalid.  This routine will automatically free any invalid
1895  *	meta-data swapblks.
1896  *
1897  *	It is not possible to store invalid swapblks in the swap meta data
1898  *	(other then a literal 'SWAPBLK_NONE'), so we don't bother checking.
1899  *
1900  *	When acting on a busy resident page and paging is in progress, we
1901  *	have to wait until paging is complete but otherwise can act on the
1902  *	busy page.
1903  *
1904  *	This routine must be called at splvm().
1905  *
1906  *	SWM_FREE	remove and free swap block from metadata
1907  *	SWM_POP		remove from meta data but do not free.. pop it out
1908  */
1909 
1910 static daddr_t
1911 swp_pager_meta_ctl(
1912 	vm_object_t object,
1913 	vm_pindex_t index,
1914 	int flags
1915 ) {
1916 	struct swblock **pswap;
1917 	struct swblock *swap;
1918 	daddr_t r1;
1919 
1920 	/*
1921 	 * The meta data only exists of the object is OBJT_SWAP
1922 	 * and even then might not be allocated yet.
1923 	 */
1924 
1925 	if (object->type != OBJT_SWAP)
1926 		return(SWAPBLK_NONE);
1927 
1928 	r1 = SWAPBLK_NONE;
1929 	pswap = swp_pager_hash(object, index);
1930 
1931 	if ((swap = *pswap) != NULL) {
1932 		index &= SWAP_META_MASK;
1933 		r1 = swap->swb_pages[index];
1934 
1935 		if (r1 != SWAPBLK_NONE) {
1936 			if (flags & SWM_FREE) {
1937 				swp_pager_freeswapspace(r1, 1);
1938 				r1 = SWAPBLK_NONE;
1939 			}
1940 			if (flags & (SWM_FREE|SWM_POP)) {
1941 				swap->swb_pages[index] = SWAPBLK_NONE;
1942 				if (--swap->swb_count == 0) {
1943 					*pswap = swap->swb_hnext;
1944 					zfree(swap_zone, swap);
1945 					--object->un_pager.swp.swp_bcount;
1946 				}
1947 			}
1948 		}
1949 	}
1950 	return(r1);
1951 }
1952 
1953