xref: /freebsd/sys/vm/swap_pager.c (revision 9ee40678bbdcedc6a3ac1e311abe740018911cf1)
1 /*
2  * Copyright (c) 1994 John S. Dyson
3  * Copyright (c) 1990 University of Utah.
4  * Copyright (c) 1991, 1993
5  *	The Regents of the University of California.  All rights reserved.
6  *
7  * This code is derived from software contributed to Berkeley by
8  * the Systems Programming Group of the University of Utah Computer
9  * Science Department.
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  * 3. All advertising materials mentioning features or use of this software
20  *    must display the following acknowledgement:
21  *	This product includes software developed by the University of
22  *	California, Berkeley and its contributors.
23  * 4. Neither the name of the University nor the names of its contributors
24  *    may be used to endorse or promote products derived from this software
25  *    without specific prior written permission.
26  *
27  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
28  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
29  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
30  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
31  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
32  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
33  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
34  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
35  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
36  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
37  * SUCH DAMAGE.
38  *
39  * from: Utah $Hdr: swap_pager.c 1.4 91/04/30$
40  *
41  *	@(#)swap_pager.c	8.9 (Berkeley) 3/21/94
42  * $Id: swap_pager.c,v 1.71 1996/09/08 20:44:33 dyson Exp $
43  */
44 
45 /*
46  * Quick hack to page to dedicated partition(s).
47  * TODO:
48  *	Add multiprocessor locks
49  *	Deal with async writes in a better fashion
50  */
51 
52 #include <sys/param.h>
53 #include <sys/systm.h>
54 #include <sys/kernel.h>
55 #include <sys/proc.h>
56 #include <sys/buf.h>
57 #include <sys/vnode.h>
58 #include <sys/malloc.h>
59 #include <sys/vmmeter.h>
60 
61 #include <miscfs/specfs/specdev.h>
62 #include <sys/rlist.h>
63 
64 #include <vm/vm.h>
65 #include <vm/vm_param.h>
66 #include <vm/vm_prot.h>
67 #include <vm/vm_object.h>
68 #include <vm/vm_page.h>
69 #include <vm/vm_pager.h>
70 #include <vm/vm_pageout.h>
71 #include <vm/swap_pager.h>
72 #include <vm/vm_kern.h>
73 #include <vm/vm_extern.h>
74 
75 #ifndef NPENDINGIO
76 #define NPENDINGIO	10
77 #endif
78 
79 static int nswiodone;
80 int swap_pager_full;
81 extern int vm_swap_size;
82 static int no_swap_space = 1;
83 struct rlisthdr swaplist;
84 
85 #define MAX_PAGEOUT_CLUSTER 16
86 
87 TAILQ_HEAD(swpclean, swpagerclean);
88 
89 typedef struct swpagerclean *swp_clean_t;
90 
91 static struct swpagerclean {
92 	TAILQ_ENTRY(swpagerclean) spc_list;
93 	int spc_flags;
94 	struct buf *spc_bp;
95 	vm_object_t spc_object;
96 	vm_offset_t spc_kva;
97 	int spc_count;
98 	vm_page_t spc_m[MAX_PAGEOUT_CLUSTER];
99 } swcleanlist[NPENDINGIO];
100 
101 
102 /* spc_flags values */
103 #define SPC_ERROR	0x01
104 
105 #define SWB_EMPTY (-1)
106 
107 /* list of completed page cleans */
108 static struct swpclean swap_pager_done;
109 
110 /* list of pending page cleans */
111 static struct swpclean swap_pager_inuse;
112 
113 /* list of free pager clean structs */
114 static struct swpclean swap_pager_free;
115 int swap_pager_free_count;
116 
117 /* list of "named" anon region objects */
118 static struct pagerlst swap_pager_object_list;
119 
120 /* list of "unnamed" anon region objects */
121 struct pagerlst swap_pager_un_object_list;
122 
123 #define	SWAP_FREE_NEEDED	0x1	/* need a swap block */
124 #define SWAP_FREE_NEEDED_BY_PAGEOUT 0x2
125 static int swap_pager_needflags;
126 
127 static struct pagerlst *swp_qs[] = {
128 	&swap_pager_object_list, &swap_pager_un_object_list, (struct pagerlst *) 0
129 };
130 
131 /*
132  * pagerops for OBJT_SWAP - "swap pager".
133  */
134 static vm_object_t
135 		swap_pager_alloc __P((void *handle, vm_size_t size,
136 				      vm_prot_t prot, vm_ooffset_t offset));
137 static void	swap_pager_dealloc __P((vm_object_t object));
138 static boolean_t
139 		swap_pager_haspage __P((vm_object_t object, vm_pindex_t pindex,
140 					int *before, int *after));
141 static int	swap_pager_getpages __P((vm_object_t, vm_page_t *, int, int));
142 static void	swap_pager_init __P((void));
143 static void	swap_pager_sync __P((void));
144 
145 struct pagerops swappagerops = {
146 	swap_pager_init,
147 	swap_pager_alloc,
148 	swap_pager_dealloc,
149 	swap_pager_getpages,
150 	swap_pager_putpages,
151 	swap_pager_haspage,
152 	swap_pager_sync
153 };
154 
155 static int npendingio = NPENDINGIO;
156 static int dmmin;
157 int dmmax;
158 
159 static int	swap_pager_block_index __P((vm_pindex_t pindex));
160 static int	swap_pager_block_offset __P((vm_pindex_t pindex));
161 static daddr_t *swap_pager_diskaddr __P((vm_object_t object,
162 					  vm_pindex_t pindex, int *valid));
163 static void	swap_pager_finish __P((swp_clean_t spc));
164 static void	swap_pager_freepage __P((vm_page_t m));
165 static void	swap_pager_free_swap __P((vm_object_t object));
166 static void	swap_pager_freeswapspace __P((vm_object_t object,
167 					      unsigned int from,
168 					      unsigned int to));
169 static int	swap_pager_getswapspace __P((vm_object_t object,
170 					     unsigned int amount,
171 					     daddr_t *rtval));
172 static void	swap_pager_iodone __P((struct buf *));
173 static void	swap_pager_iodone1 __P((struct buf *bp));
174 static void	swap_pager_reclaim __P((void));
175 static void	swap_pager_ridpages __P((vm_page_t *m, int count,
176 					 int reqpage));
177 static void	swap_pager_setvalid __P((vm_object_t object,
178 					 vm_offset_t offset, int valid));
179 static void	swapsizecheck __P((void));
180 
181 #define SWAPLOW (vm_swap_size < (512 * btodb(PAGE_SIZE)))
182 
183 static inline void
184 swapsizecheck()
185 {
186 	if (vm_swap_size < 128 * btodb(PAGE_SIZE)) {
187 		if (swap_pager_full == 0)
188 			printf("swap_pager: out of swap space\n");
189 		swap_pager_full = 1;
190 	} else if (vm_swap_size > 192 * btodb(PAGE_SIZE))
191 		swap_pager_full = 0;
192 }
193 
194 static void
195 swap_pager_init()
196 {
197 	TAILQ_INIT(&swap_pager_object_list);
198 	TAILQ_INIT(&swap_pager_un_object_list);
199 
200 	/*
201 	 * Initialize clean lists
202 	 */
203 	TAILQ_INIT(&swap_pager_inuse);
204 	TAILQ_INIT(&swap_pager_done);
205 	TAILQ_INIT(&swap_pager_free);
206 	swap_pager_free_count = 0;
207 
208 	/*
209 	 * Calculate the swap allocation constants.
210 	 */
211 	dmmin = PAGE_SIZE / DEV_BSIZE;
212 	dmmax = btodb(SWB_NPAGES * PAGE_SIZE) * 2;
213 }
214 
215 void
216 swap_pager_swap_init()
217 {
218 	swp_clean_t spc;
219 	struct buf *bp;
220 	int i;
221 
222 	/*
223 	 * kva's are allocated here so that we dont need to keep doing
224 	 * kmem_alloc pageables at runtime
225 	 */
226 	for (i = 0, spc = swcleanlist; i < npendingio; i++, spc++) {
227 		spc->spc_kva = kmem_alloc_pageable(pager_map, PAGE_SIZE * MAX_PAGEOUT_CLUSTER);
228 		if (!spc->spc_kva) {
229 			break;
230 		}
231 		spc->spc_bp = malloc(sizeof(*bp), M_TEMP, M_KERNEL);
232 		if (!spc->spc_bp) {
233 			kmem_free_wakeup(pager_map, spc->spc_kva, PAGE_SIZE);
234 			break;
235 		}
236 		spc->spc_flags = 0;
237 		TAILQ_INSERT_TAIL(&swap_pager_free, spc, spc_list);
238 		swap_pager_free_count++;
239 	}
240 }
241 
242 int
243 swap_pager_swp_alloc(object, wait)
244 	vm_object_t object;
245 	int wait;
246 {
247 	sw_blk_t swb;
248 	int nblocks;
249 	int i, j;
250 
251 	nblocks = (object->size + SWB_NPAGES - 1) / SWB_NPAGES;
252 	swb = malloc(nblocks * sizeof(*swb), M_VMPGDATA, wait);
253 	if (swb == NULL)
254 		return 1;
255 
256 	for (i = 0; i < nblocks; i++) {
257 		swb[i].swb_valid = 0;
258 		swb[i].swb_locked = 0;
259 		for (j = 0; j < SWB_NPAGES; j++)
260 			swb[i].swb_block[j] = SWB_EMPTY;
261 	}
262 
263 	object->un_pager.swp.swp_nblocks = nblocks;
264 	object->un_pager.swp.swp_allocsize = 0;
265 	object->un_pager.swp.swp_blocks = swb;
266 	object->un_pager.swp.swp_poip = 0;
267 
268 	if (object->handle != NULL) {
269 		TAILQ_INSERT_TAIL(&swap_pager_object_list, object, pager_object_list);
270 	} else {
271 		TAILQ_INSERT_TAIL(&swap_pager_un_object_list, object, pager_object_list);
272 	}
273 
274 	return 0;
275 }
276 
277 /*
278  * Allocate an object and associated resources.
279  * Note that if we are called from the pageout daemon (handle == NULL)
280  * we should not wait for memory as it could resulting in deadlock.
281  */
282 static vm_object_t
283 swap_pager_alloc(handle, size, prot, offset)
284 	void *handle;
285 	register vm_size_t size;
286 	vm_prot_t prot;
287 	vm_ooffset_t offset;
288 {
289 	vm_object_t object;
290 
291 	/*
292 	 * If this is a "named" anonymous region, look it up and use the
293 	 * object if it exists, otherwise allocate a new one.
294 	 */
295 	if (handle) {
296 		object = vm_pager_object_lookup(&swap_pager_object_list, handle);
297 		if (object != NULL) {
298 			vm_object_reference(object);
299 		} else {
300 			/*
301 			 * XXX - there is a race condition here. Two processes
302 			 * can request the same named object simultaneuously,
303 			 * and if one blocks for memory, the result is a disaster.
304 			 * Probably quite rare, but is yet another reason to just
305 			 * rip support of "named anonymous regions" out altogether.
306 			 */
307 			object = vm_object_allocate(OBJT_SWAP,
308 				OFF_TO_IDX(offset + PAGE_MASK) + size);
309 			object->handle = handle;
310 			(void) swap_pager_swp_alloc(object, M_WAITOK);
311 		}
312 	} else {
313 		object = vm_object_allocate(OBJT_SWAP,
314 			OFF_TO_IDX(offset + PAGE_MASK) + size);
315 		(void) swap_pager_swp_alloc(object, M_WAITOK);
316 	}
317 
318 	return (object);
319 }
320 
321 /*
322  * returns disk block associated with pager and offset
323  * additionally, as a side effect returns a flag indicating
324  * if the block has been written
325  */
326 
327 inline static daddr_t *
328 swap_pager_diskaddr(object, pindex, valid)
329 	vm_object_t object;
330 	vm_pindex_t pindex;
331 	int *valid;
332 {
333 	register sw_blk_t swb;
334 	int ix;
335 
336 	if (valid)
337 		*valid = 0;
338 	ix = pindex / SWB_NPAGES;
339 	if ((ix >= object->un_pager.swp.swp_nblocks) ||
340 	    (pindex >= object->size)) {
341 		return (FALSE);
342 	}
343 	swb = &object->un_pager.swp.swp_blocks[ix];
344 	ix = pindex % SWB_NPAGES;
345 	if (valid)
346 		*valid = swb->swb_valid & (1 << ix);
347 	return &swb->swb_block[ix];
348 }
349 
350 /*
351  * Utility routine to set the valid (written) bit for
352  * a block associated with a pager and offset
353  */
354 static void
355 swap_pager_setvalid(object, offset, valid)
356 	vm_object_t object;
357 	vm_offset_t offset;
358 	int valid;
359 {
360 	register sw_blk_t swb;
361 	int ix;
362 
363 	ix = offset / SWB_NPAGES;
364 	if (ix >= object->un_pager.swp.swp_nblocks)
365 		return;
366 
367 	swb = &object->un_pager.swp.swp_blocks[ix];
368 	ix = offset % SWB_NPAGES;
369 	if (valid)
370 		swb->swb_valid |= (1 << ix);
371 	else
372 		swb->swb_valid &= ~(1 << ix);
373 	return;
374 }
375 
376 /*
377  * this routine allocates swap space with a fragmentation
378  * minimization policy.
379  */
380 static int
381 swap_pager_getswapspace(object, amount, rtval)
382 	vm_object_t object;
383 	unsigned int amount;
384 	daddr_t *rtval;
385 {
386 	unsigned location;
387 	vm_swap_size -= amount;
388 	if (!rlist_alloc(&swaplist, amount, &location)) {
389 		vm_swap_size += amount;
390 		return 0;
391 	} else {
392 		swapsizecheck();
393 		object->un_pager.swp.swp_allocsize += amount;
394 		*rtval = location;
395 		return 1;
396 	}
397 }
398 
399 /*
400  * this routine frees swap space with a fragmentation
401  * minimization policy.
402  */
403 static void
404 swap_pager_freeswapspace(object, from, to)
405 	vm_object_t object;
406 	unsigned int from;
407 	unsigned int to;
408 {
409 	rlist_free(&swaplist, from, to);
410 	vm_swap_size += (to - from) + 1;
411 	object->un_pager.swp.swp_allocsize -= (to - from) + 1;
412 	swapsizecheck();
413 }
414 /*
415  * this routine frees swap blocks from a specified pager
416  */
417 void
418 swap_pager_freespace(object, start, size)
419 	vm_object_t object;
420 	vm_pindex_t start;
421 	vm_size_t size;
422 {
423 	vm_pindex_t i;
424 	int s;
425 
426 	s = splbio();
427 	for (i = start; i < start + size; i += 1) {
428 		int valid;
429 		daddr_t *addr = swap_pager_diskaddr(object, i, &valid);
430 
431 		if (addr && *addr != SWB_EMPTY) {
432 			swap_pager_freeswapspace(object, *addr, *addr + btodb(PAGE_SIZE) - 1);
433 			if (valid) {
434 				swap_pager_setvalid(object, i, 0);
435 			}
436 			*addr = SWB_EMPTY;
437 		}
438 	}
439 	splx(s);
440 }
441 
442 /*
443  * same as freespace, but don't free, just force a DMZ next time
444  */
445 void
446 swap_pager_dmzspace(object, start, size)
447 	vm_object_t object;
448 	vm_pindex_t start;
449 	vm_size_t size;
450 {
451 	vm_pindex_t i;
452 	int s;
453 
454 	s = splbio();
455 	for (i = start; i < start + size; i += 1) {
456 		int valid;
457 		daddr_t *addr = swap_pager_diskaddr(object, i, &valid);
458 
459 		if (addr && *addr != SWB_EMPTY) {
460 			if (valid) {
461 				swap_pager_setvalid(object, i, 0);
462 			}
463 		}
464 	}
465 	splx(s);
466 }
467 
468 static void
469 swap_pager_free_swap(object)
470 	vm_object_t object;
471 {
472 	register int i, j;
473 	register sw_blk_t swb;
474 	int first_block=0, block_count=0;
475 	int s;
476 	/*
477 	 * Free left over swap blocks
478 	 */
479 	s = splbio();
480 	for (i = 0, swb = object->un_pager.swp.swp_blocks;
481 	    i < object->un_pager.swp.swp_nblocks; i++, swb++) {
482 		for (j = 0; j < SWB_NPAGES; j++) {
483 			if (swb->swb_block[j] != SWB_EMPTY) {
484 				/*
485 				 * initially the length of the run is zero
486 				 */
487 				if (block_count == 0) {
488 					first_block = swb->swb_block[j];
489 					block_count = btodb(PAGE_SIZE);
490 					swb->swb_block[j] = SWB_EMPTY;
491 				/*
492 				 * if the new block can be included into the current run
493 				 */
494 				} else if (swb->swb_block[j] == first_block + block_count) {
495 					block_count += btodb(PAGE_SIZE);
496 					swb->swb_block[j] = SWB_EMPTY;
497 				/*
498 				 * terminate the previous run, and start a new one
499 				 */
500 				} else {
501 					swap_pager_freeswapspace(object, first_block,
502 				   	 (unsigned) first_block + block_count - 1);
503 					first_block = swb->swb_block[j];
504 					block_count = btodb(PAGE_SIZE);
505 					swb->swb_block[j] = SWB_EMPTY;
506 				}
507 			}
508 		}
509 	}
510 
511 	if (block_count) {
512 		swap_pager_freeswapspace(object, first_block,
513 		   	 (unsigned) first_block + block_count - 1);
514 	}
515 	splx(s);
516 }
517 
518 
519 /*
520  * swap_pager_reclaim frees up over-allocated space from all pagers
521  * this eliminates internal fragmentation due to allocation of space
522  * for segments that are never swapped to. It has been written so that
523  * it does not block until the rlist_free operation occurs; it keeps
524  * the queues consistant.
525  */
526 
527 /*
528  * Maximum number of blocks (pages) to reclaim per pass
529  */
530 #define MAXRECLAIM 128
531 
532 static void
533 swap_pager_reclaim()
534 {
535 	vm_object_t object;
536 	int i, j, k;
537 	int s;
538 	int reclaimcount;
539 	static struct {
540 		int address;
541 		vm_object_t object;
542 	} reclaims[MAXRECLAIM];
543 	static int in_reclaim;
544 
545 	/*
546 	 * allow only one process to be in the swap_pager_reclaim subroutine
547 	 */
548 	s = splbio();
549 	if (in_reclaim) {
550 		tsleep(&in_reclaim, PSWP, "swrclm", 0);
551 		splx(s);
552 		return;
553 	}
554 	in_reclaim = 1;
555 	reclaimcount = 0;
556 
557 	/* for each pager queue */
558 	for (k = 0; swp_qs[k]; k++) {
559 
560 		object = TAILQ_FIRST(swp_qs[k]);
561 		while (object && (reclaimcount < MAXRECLAIM)) {
562 
563 			/*
564 			 * see if any blocks associated with a pager has been
565 			 * allocated but not used (written)
566 			 */
567 			if ((object->flags & OBJ_DEAD) == 0 &&
568 				(object->paging_in_progress == 0)) {
569 				for (i = 0; i < object->un_pager.swp.swp_nblocks; i++) {
570 					sw_blk_t swb = &object->un_pager.swp.swp_blocks[i];
571 
572 					if (swb->swb_locked)
573 						continue;
574 					for (j = 0; j < SWB_NPAGES; j++) {
575 						if (swb->swb_block[j] != SWB_EMPTY &&
576 						    (swb->swb_valid & (1 << j)) == 0) {
577 							reclaims[reclaimcount].address = swb->swb_block[j];
578 							reclaims[reclaimcount++].object = object;
579 							swb->swb_block[j] = SWB_EMPTY;
580 							if (reclaimcount >= MAXRECLAIM)
581 								goto rfinished;
582 						}
583 					}
584 				}
585 			}
586 			object = TAILQ_NEXT(object, pager_object_list);
587 		}
588 	}
589 
590 rfinished:
591 
592 	/*
593 	 * free the blocks that have been added to the reclaim list
594 	 */
595 	for (i = 0; i < reclaimcount; i++) {
596 		swap_pager_freeswapspace(reclaims[i].object,
597 		    reclaims[i].address, reclaims[i].address + btodb(PAGE_SIZE) - 1);
598 	}
599 	splx(s);
600 	in_reclaim = 0;
601 	wakeup(&in_reclaim);
602 }
603 
604 
605 /*
606  * swap_pager_copy copies blocks from one pager to another and
607  * destroys the source pager
608  */
609 
610 void
611 swap_pager_copy(srcobject, srcoffset, dstobject, dstoffset, offset)
612 	vm_object_t srcobject;
613 	vm_pindex_t srcoffset;
614 	vm_object_t dstobject;
615 	vm_pindex_t dstoffset;
616 	vm_pindex_t offset;
617 {
618 	vm_pindex_t i;
619 	int origsize;
620 	int s;
621 
622 	if (vm_swap_size)
623 		no_swap_space = 0;
624 
625 	origsize = srcobject->un_pager.swp.swp_allocsize;
626 
627 	/*
628 	 * remove the source object from the swap_pager internal queue
629 	 */
630 	if (srcobject->handle == NULL) {
631 		TAILQ_REMOVE(&swap_pager_un_object_list, srcobject, pager_object_list);
632 	} else {
633 		TAILQ_REMOVE(&swap_pager_object_list, srcobject, pager_object_list);
634 	}
635 
636 	s = splbio();
637 	while (srcobject->un_pager.swp.swp_poip) {
638 		tsleep(srcobject, PVM, "spgout", 0);
639 	}
640 	splx(s);
641 
642 	/*
643 	 * clean all of the pages that are currently active and finished
644 	 */
645 	swap_pager_sync();
646 
647 	s = splbio();
648 	/*
649 	 * transfer source to destination
650 	 */
651 	for (i = 0; i < dstobject->size; i += 1) {
652 		int srcvalid, dstvalid;
653 		daddr_t *srcaddrp = swap_pager_diskaddr(srcobject, i + offset + srcoffset,
654 						    &srcvalid);
655 		daddr_t *dstaddrp;
656 
657 		/*
658 		 * see if the source has space allocated
659 		 */
660 		if (srcaddrp && *srcaddrp != SWB_EMPTY) {
661 			/*
662 			 * if the source is valid and the dest has no space,
663 			 * then copy the allocation from the srouce to the
664 			 * dest.
665 			 */
666 			if (srcvalid) {
667 				dstaddrp = swap_pager_diskaddr(dstobject, i + dstoffset,
668 							&dstvalid);
669 				/*
670 				 * if the dest already has a valid block,
671 				 * deallocate the source block without
672 				 * copying.
673 				 */
674 				if (!dstvalid && dstaddrp && *dstaddrp != SWB_EMPTY) {
675 					swap_pager_freeswapspace(dstobject, *dstaddrp,
676 						*dstaddrp + btodb(PAGE_SIZE) - 1);
677 					*dstaddrp = SWB_EMPTY;
678 				}
679 				if (dstaddrp && *dstaddrp == SWB_EMPTY) {
680 					*dstaddrp = *srcaddrp;
681 					*srcaddrp = SWB_EMPTY;
682 					dstobject->un_pager.swp.swp_allocsize += btodb(PAGE_SIZE);
683 					srcobject->un_pager.swp.swp_allocsize -= btodb(PAGE_SIZE);
684 					swap_pager_setvalid(dstobject, i + dstoffset, 1);
685 				}
686 			}
687 			/*
688 			 * if the source is not empty at this point, then
689 			 * deallocate the space.
690 			 */
691 			if (*srcaddrp != SWB_EMPTY) {
692 				swap_pager_freeswapspace(srcobject, *srcaddrp,
693 					*srcaddrp + btodb(PAGE_SIZE) - 1);
694 				*srcaddrp = SWB_EMPTY;
695 			}
696 		}
697 	}
698 	splx(s);
699 
700 	/*
701 	 * Free left over swap blocks
702 	 */
703 	swap_pager_free_swap(srcobject);
704 
705 	if (srcobject->un_pager.swp.swp_allocsize) {
706 		printf("swap_pager_copy: *warning* pager with %d blocks (orig: %d)\n",
707 		    srcobject->un_pager.swp.swp_allocsize, origsize);
708 	}
709 
710 	free(srcobject->un_pager.swp.swp_blocks, M_VMPGDATA);
711 	srcobject->un_pager.swp.swp_blocks = NULL;
712 
713 	return;
714 }
715 
716 static void
717 swap_pager_dealloc(object)
718 	vm_object_t object;
719 {
720 	int s;
721 
722 	/*
723 	 * Remove from list right away so lookups will fail if we block for
724 	 * pageout completion.
725 	 */
726 	if (object->handle == NULL) {
727 		TAILQ_REMOVE(&swap_pager_un_object_list, object, pager_object_list);
728 	} else {
729 		TAILQ_REMOVE(&swap_pager_object_list, object, pager_object_list);
730 	}
731 
732 	/*
733 	 * Wait for all pageouts to finish and remove all entries from
734 	 * cleaning list.
735 	 */
736 
737 	s = splbio();
738 	while (object->un_pager.swp.swp_poip) {
739 		tsleep(object, PVM, "swpout", 0);
740 	}
741 	splx(s);
742 
743 
744 	swap_pager_sync();
745 
746 	/*
747 	 * Free left over swap blocks
748 	 */
749 	swap_pager_free_swap(object);
750 
751 	if (object->un_pager.swp.swp_allocsize) {
752 		printf("swap_pager_dealloc: *warning* freeing pager with %d blocks\n",
753 		    object->un_pager.swp.swp_allocsize);
754 	}
755 	/*
756 	 * Free swap management resources
757 	 */
758 	free(object->un_pager.swp.swp_blocks, M_VMPGDATA);
759 	object->un_pager.swp.swp_blocks = NULL;
760 }
761 
762 static inline int
763 swap_pager_block_index(pindex)
764 	vm_pindex_t pindex;
765 {
766 	return (pindex / SWB_NPAGES);
767 }
768 
769 static inline int
770 swap_pager_block_offset(pindex)
771 	vm_pindex_t pindex;
772 {
773 	return (pindex % SWB_NPAGES);
774 }
775 
776 /*
777  * swap_pager_haspage returns TRUE if the pager has data that has
778  * been written out.
779  */
780 static boolean_t
781 swap_pager_haspage(object, pindex, before, after)
782 	vm_object_t object;
783 	vm_pindex_t pindex;
784 	int *before;
785 	int *after;
786 {
787 	register sw_blk_t swb;
788 	int ix;
789 
790 	if (before != NULL)
791 		*before = 0;
792 	if (after != NULL)
793 		*after = 0;
794 	ix = pindex / SWB_NPAGES;
795 	if (ix >= object->un_pager.swp.swp_nblocks) {
796 		return (FALSE);
797 	}
798 	swb = &object->un_pager.swp.swp_blocks[ix];
799 	ix = pindex % SWB_NPAGES;
800 
801 	if (swb->swb_block[ix] != SWB_EMPTY) {
802 
803 		if (swb->swb_valid & (1 << ix)) {
804 			int tix;
805 			if (before) {
806 				for(tix = ix - 1; tix >= 0; --tix) {
807 					if ((swb->swb_valid & (1 << tix)) == 0)
808 						break;
809 					if ((swb->swb_block[tix] +
810 						(ix - tix) * (PAGE_SIZE/DEV_BSIZE)) !=
811 						swb->swb_block[ix])
812 						break;
813 					(*before)++;
814 				}
815 			}
816 
817 			if (after) {
818 				for(tix = ix + 1; tix < SWB_NPAGES; tix++) {
819 					if ((swb->swb_valid & (1 << tix)) == 0)
820 						break;
821 					if ((swb->swb_block[tix] -
822 						(tix - ix) * (PAGE_SIZE/DEV_BSIZE)) !=
823 						swb->swb_block[ix])
824 						break;
825 					(*after)++;
826 				}
827 			}
828 
829 			return TRUE;
830 		}
831 	}
832 	return (FALSE);
833 }
834 
835 /*
836  * swap_pager_freepage is a convienience routine that clears the busy
837  * bit and deallocates a page.
838  */
839 static void
840 swap_pager_freepage(m)
841 	vm_page_t m;
842 {
843 	PAGE_WAKEUP(m);
844 	vm_page_free(m);
845 }
846 
847 /*
848  * swap_pager_ridpages is a convienience routine that deallocates all
849  * but the required page.  this is usually used in error returns that
850  * need to invalidate the "extra" readahead pages.
851  */
852 static void
853 swap_pager_ridpages(m, count, reqpage)
854 	vm_page_t *m;
855 	int count;
856 	int reqpage;
857 {
858 	int i;
859 
860 	for (i = 0; i < count; i++)
861 		if (i != reqpage)
862 			swap_pager_freepage(m[i]);
863 }
864 
865 /*
866  * swap_pager_iodone1 is the completion routine for both reads and async writes
867  */
868 static void
869 swap_pager_iodone1(bp)
870 	struct buf *bp;
871 {
872 	bp->b_flags |= B_DONE;
873 	bp->b_flags &= ~B_ASYNC;
874 	wakeup(bp);
875 }
876 
877 static int
878 swap_pager_getpages(object, m, count, reqpage)
879 	vm_object_t object;
880 	vm_page_t *m;
881 	int count, reqpage;
882 {
883 	register struct buf *bp;
884 	sw_blk_t swb[count];
885 	register int s;
886 	int i;
887 	boolean_t rv;
888 	vm_offset_t kva, off[count];
889 	swp_clean_t spc;
890 	vm_pindex_t paging_offset;
891 	int reqaddr[count];
892 	int sequential;
893 
894 	int first, last;
895 	int failed;
896 	int reqdskregion;
897 
898 	object = m[reqpage]->object;
899 	paging_offset = OFF_TO_IDX(object->paging_offset);
900 	sequential = (m[reqpage]->pindex == (object->last_read + 1));
901 
902 	for (i = 0; i < count; i++) {
903 		vm_pindex_t fidx = m[i]->pindex + paging_offset;
904 		int ix = swap_pager_block_index(fidx);
905 
906 		if (ix >= object->un_pager.swp.swp_nblocks) {
907 			int j;
908 
909 			if (i <= reqpage) {
910 				swap_pager_ridpages(m, count, reqpage);
911 				return (VM_PAGER_FAIL);
912 			}
913 			for (j = i; j < count; j++) {
914 				swap_pager_freepage(m[j]);
915 			}
916 			count = i;
917 			break;
918 		}
919 		swb[i] = &object->un_pager.swp.swp_blocks[ix];
920 		off[i] = swap_pager_block_offset(fidx);
921 		reqaddr[i] = swb[i]->swb_block[off[i]];
922 	}
923 
924 	/* make sure that our required input request is existant */
925 
926 	if (reqaddr[reqpage] == SWB_EMPTY ||
927 	    (swb[reqpage]->swb_valid & (1 << off[reqpage])) == 0) {
928 		swap_pager_ridpages(m, count, reqpage);
929 		return (VM_PAGER_FAIL);
930 	}
931 	reqdskregion = reqaddr[reqpage] / dmmax;
932 
933 	/*
934 	 * search backwards for the first contiguous page to transfer
935 	 */
936 	failed = 0;
937 	first = 0;
938 	for (i = reqpage - 1; i >= 0; --i) {
939 		if (sequential || failed || (reqaddr[i] == SWB_EMPTY) ||
940 		    (swb[i]->swb_valid & (1 << off[i])) == 0 ||
941 		    (reqaddr[i] != (reqaddr[reqpage] + (i - reqpage) * btodb(PAGE_SIZE))) ||
942 		    ((reqaddr[i] / dmmax) != reqdskregion)) {
943 			failed = 1;
944 			swap_pager_freepage(m[i]);
945 			if (first == 0)
946 				first = i + 1;
947 		}
948 	}
949 	/*
950 	 * search forwards for the last contiguous page to transfer
951 	 */
952 	failed = 0;
953 	last = count;
954 	for (i = reqpage + 1; i < count; i++) {
955 		if (failed || (reqaddr[i] == SWB_EMPTY) ||
956 		    (swb[i]->swb_valid & (1 << off[i])) == 0 ||
957 		    (reqaddr[i] != (reqaddr[reqpage] + (i - reqpage) * btodb(PAGE_SIZE))) ||
958 		    ((reqaddr[i] / dmmax) != reqdskregion)) {
959 			failed = 1;
960 			swap_pager_freepage(m[i]);
961 			if (last == count)
962 				last = i;
963 		}
964 	}
965 
966 	count = last;
967 	if (first != 0) {
968 		for (i = first; i < count; i++) {
969 			m[i - first] = m[i];
970 			reqaddr[i - first] = reqaddr[i];
971 			off[i - first] = off[i];
972 		}
973 		count -= first;
974 		reqpage -= first;
975 	}
976 	++swb[reqpage]->swb_locked;
977 
978 	/*
979 	 * at this point: "m" is a pointer to the array of vm_page_t for
980 	 * paging I/O "count" is the number of vm_page_t entries represented
981 	 * by "m" "object" is the vm_object_t for I/O "reqpage" is the index
982 	 * into "m" for the page actually faulted
983 	 */
984 
985 	spc = NULL;
986 	if ((count == 1) && ((spc = TAILQ_FIRST(&swap_pager_free)) != NULL)) {
987 		TAILQ_REMOVE(&swap_pager_free, spc, spc_list);
988 		swap_pager_free_count--;
989 		kva = spc->spc_kva;
990 		bp = spc->spc_bp;
991 		bzero(bp, sizeof *bp);
992 		bp->b_spc = spc;
993 		bp->b_vnbufs.le_next = NOLIST;
994 	} else {
995 		/*
996 		 * Get a swap buffer header to perform the IO
997 		 */
998 		bp = getpbuf();
999 		kva = (vm_offset_t) bp->b_data;
1000 	}
1001 
1002 	/*
1003 	 * map our page(s) into kva for input
1004 	 */
1005 	pmap_qenter(kva, m, count);
1006 
1007 	bp->b_flags = B_BUSY | B_READ | B_CALL | B_PAGING;
1008 	bp->b_iodone = swap_pager_iodone1;
1009 	bp->b_proc = &proc0;	/* XXX (but without B_PHYS set this is ok) */
1010 	bp->b_rcred = bp->b_wcred = bp->b_proc->p_ucred;
1011 	crhold(bp->b_rcred);
1012 	crhold(bp->b_wcred);
1013 	bp->b_un.b_addr = (caddr_t) kva;
1014 	bp->b_blkno = reqaddr[0];
1015 	bp->b_bcount = PAGE_SIZE * count;
1016 	bp->b_bufsize = PAGE_SIZE * count;
1017 
1018 	pbgetvp(swapdev_vp, bp);
1019 
1020 	cnt.v_swapin++;
1021 	cnt.v_swappgsin += count;
1022 	/*
1023 	 * perform the I/O
1024 	 */
1025 	VOP_STRATEGY(bp);
1026 
1027 	/*
1028 	 * wait for the sync I/O to complete
1029 	 */
1030 	s = splbio();
1031 	while ((bp->b_flags & B_DONE) == 0) {
1032 		if (tsleep(bp, PVM, "swread", hz*20)) {
1033 			printf("swap_pager: indefinite wait buffer: device: %d, blkno: %d, size: %d\n",
1034 				bp->b_dev, bp->b_blkno, bp->b_bcount);
1035 		}
1036 	}
1037 
1038 	if (bp->b_flags & B_ERROR) {
1039 		printf("swap_pager: I/O error - pagein failed; blkno %d, size %d, error %d\n",
1040 		    bp->b_blkno, bp->b_bcount, bp->b_error);
1041 		rv = VM_PAGER_ERROR;
1042 	} else {
1043 		rv = VM_PAGER_OK;
1044 	}
1045 
1046 	/*
1047 	 * relpbuf does this, but we maintain our own buffer list also...
1048 	 */
1049 	if (bp->b_vp)
1050 		pbrelvp(bp);
1051 
1052 	splx(s);
1053 	swb[reqpage]->swb_locked--;
1054 
1055 	/*
1056 	 * remove the mapping for kernel virtual
1057 	 */
1058 	pmap_qremove(kva, count);
1059 
1060 	if (spc) {
1061 		m[reqpage]->object->last_read = m[reqpage]->pindex;
1062 		if (bp->b_flags & B_WANTED)
1063 			wakeup(bp);
1064 		/*
1065 		 * if we have used an spc, we need to free it.
1066 		 */
1067 		if (bp->b_rcred != NOCRED)
1068 			crfree(bp->b_rcred);
1069 		if (bp->b_wcred != NOCRED)
1070 			crfree(bp->b_wcred);
1071 		TAILQ_INSERT_TAIL(&swap_pager_free, spc, spc_list);
1072 		swap_pager_free_count++;
1073 		if (swap_pager_needflags & SWAP_FREE_NEEDED) {
1074 			wakeup(&swap_pager_free);
1075 		}
1076 		if (swap_pager_needflags & SWAP_FREE_NEEDED_BY_PAGEOUT)
1077 			pagedaemon_wakeup();
1078 		swap_pager_needflags &= ~(SWAP_FREE_NEEDED|SWAP_FREE_NEEDED_BY_PAGEOUT);
1079 		if (rv == VM_PAGER_OK) {
1080 			pmap_clear_modify(VM_PAGE_TO_PHYS(m[reqpage]));
1081 			m[reqpage]->valid = VM_PAGE_BITS_ALL;
1082 			m[reqpage]->dirty = 0;
1083 		}
1084 	} else {
1085 		/*
1086 		 * release the physical I/O buffer
1087 		 */
1088 		relpbuf(bp);
1089 		/*
1090 		 * finish up input if everything is ok
1091 		 */
1092 		if (rv == VM_PAGER_OK) {
1093 			for (i = 0; i < count; i++) {
1094 				pmap_clear_modify(VM_PAGE_TO_PHYS(m[i]));
1095 				m[i]->dirty = 0;
1096 				m[i]->flags &= ~PG_ZERO;
1097 				if (i != reqpage) {
1098 					/*
1099 					 * whether or not to leave the page
1100 					 * activated is up in the air, but we
1101 					 * should put the page on a page queue
1102 					 * somewhere. (it already is in the
1103 					 * object). After some emperical
1104 					 * results, it is best to deactivate
1105 					 * the readahead pages.
1106 					 */
1107 					vm_page_deactivate(m[i]);
1108 
1109 					/*
1110 					 * just in case someone was asking for
1111 					 * this page we now tell them that it
1112 					 * is ok to use
1113 					 */
1114 					m[i]->valid = VM_PAGE_BITS_ALL;
1115 					PAGE_WAKEUP(m[i]);
1116 				}
1117 			}
1118 
1119 			m[reqpage]->object->last_read = m[count-1]->pindex;
1120 
1121 			/*
1122 			 * If we're out of swap space, then attempt to free
1123 			 * some whenever multiple pages are brought in. We
1124 			 * must set the dirty bits so that the page contents
1125 			 * will be preserved.
1126 			 */
1127 			if (SWAPLOW) {
1128 				for (i = 0; i < count; i++) {
1129 					m[i]->dirty = VM_PAGE_BITS_ALL;
1130 				}
1131 				swap_pager_freespace(object, m[0]->pindex + paging_offset, count);
1132 			}
1133 		} else {
1134 			swap_pager_ridpages(m, count, reqpage);
1135 		}
1136 	}
1137 	return (rv);
1138 }
1139 
1140 int
1141 swap_pager_putpages(object, m, count, sync, rtvals)
1142 	vm_object_t object;
1143 	vm_page_t *m;
1144 	int count;
1145 	boolean_t sync;
1146 	int *rtvals;
1147 {
1148 	register struct buf *bp;
1149 	sw_blk_t swb[count];
1150 	register int s;
1151 	int i, j, ix;
1152 	boolean_t rv;
1153 	vm_offset_t kva, off, fidx;
1154 	swp_clean_t spc;
1155 	vm_pindex_t paging_pindex;
1156 	int reqaddr[count];
1157 	int failed;
1158 
1159 	if (vm_swap_size)
1160 		no_swap_space = 0;
1161 	if (no_swap_space) {
1162 		for (i = 0; i < count; i++)
1163 			rtvals[i] = VM_PAGER_FAIL;
1164 		return VM_PAGER_FAIL;
1165 	}
1166 	spc = NULL;
1167 
1168 	object = m[0]->object;
1169 	paging_pindex = OFF_TO_IDX(object->paging_offset);
1170 
1171 	failed = 0;
1172 	for (j = 0; j < count; j++) {
1173 		fidx = m[j]->pindex + paging_pindex;
1174 		ix = swap_pager_block_index(fidx);
1175 		swb[j] = 0;
1176 		if (ix >= object->un_pager.swp.swp_nblocks) {
1177 			rtvals[j] = VM_PAGER_FAIL;
1178 			failed = 1;
1179 			continue;
1180 		} else {
1181 			rtvals[j] = VM_PAGER_OK;
1182 		}
1183 		swb[j] = &object->un_pager.swp.swp_blocks[ix];
1184 		swb[j]->swb_locked++;
1185 		if (failed) {
1186 			rtvals[j] = VM_PAGER_FAIL;
1187 			continue;
1188 		}
1189 		off = swap_pager_block_offset(fidx);
1190 		reqaddr[j] = swb[j]->swb_block[off];
1191 		if (reqaddr[j] == SWB_EMPTY) {
1192 			daddr_t blk;
1193 			int tries;
1194 			int ntoget;
1195 
1196 			tries = 0;
1197 			s = splbio();
1198 
1199 			/*
1200 			 * if any other pages have been allocated in this
1201 			 * block, we only try to get one page.
1202 			 */
1203 			for (i = 0; i < SWB_NPAGES; i++) {
1204 				if (swb[j]->swb_block[i] != SWB_EMPTY)
1205 					break;
1206 			}
1207 
1208 			ntoget = (i == SWB_NPAGES) ? SWB_NPAGES : 1;
1209 			/*
1210 			 * this code is alittle conservative, but works (the
1211 			 * intent of this code is to allocate small chunks for
1212 			 * small objects)
1213 			 */
1214 			if ((off == 0) && ((fidx + ntoget) > object->size)) {
1215 				ntoget = object->size - fidx;
1216 			}
1217 	retrygetspace:
1218 			if (!swap_pager_full && ntoget > 1 &&
1219 			    swap_pager_getswapspace(object, ntoget * btodb(PAGE_SIZE),
1220 				&blk)) {
1221 
1222 				for (i = 0; i < ntoget; i++) {
1223 					swb[j]->swb_block[i] = blk + btodb(PAGE_SIZE) * i;
1224 					swb[j]->swb_valid = 0;
1225 				}
1226 
1227 				reqaddr[j] = swb[j]->swb_block[off];
1228 			} else if (!swap_pager_getswapspace(object, btodb(PAGE_SIZE),
1229 				&swb[j]->swb_block[off])) {
1230 				/*
1231 				 * if the allocation has failed, we try to
1232 				 * reclaim space and retry.
1233 				 */
1234 				if (++tries == 1) {
1235 					swap_pager_reclaim();
1236 					goto retrygetspace;
1237 				}
1238 				rtvals[j] = VM_PAGER_AGAIN;
1239 				failed = 1;
1240 				swap_pager_full = 1;
1241 			} else {
1242 				reqaddr[j] = swb[j]->swb_block[off];
1243 				swb[j]->swb_valid &= ~(1 << off);
1244 			}
1245 			splx(s);
1246 		}
1247 	}
1248 
1249 	/*
1250 	 * search forwards for the last contiguous page to transfer
1251 	 */
1252 	failed = 0;
1253 	for (i = 0; i < count; i++) {
1254 		if (failed ||
1255 			(reqaddr[i] != reqaddr[0] + i * btodb(PAGE_SIZE)) ||
1256 		    ((reqaddr[i] / dmmax) != (reqaddr[0] / dmmax)) ||
1257 		    (rtvals[i] != VM_PAGER_OK)) {
1258 			failed = 1;
1259 			if (rtvals[i] == VM_PAGER_OK)
1260 				rtvals[i] = VM_PAGER_AGAIN;
1261 		}
1262 	}
1263 
1264 	for (i = 0; i < count; i++) {
1265 		if (rtvals[i] != VM_PAGER_OK) {
1266 			if (swb[i])
1267 				--swb[i]->swb_locked;
1268 		}
1269 	}
1270 
1271 	for (i = 0; i < count; i++)
1272 		if (rtvals[i] != VM_PAGER_OK)
1273 			break;
1274 
1275 	if (i == 0) {
1276 		return VM_PAGER_AGAIN;
1277 	}
1278 	count = i;
1279 	for (i = 0; i < count; i++) {
1280 		if (reqaddr[i] == SWB_EMPTY) {
1281 			printf("I/O to empty block???? -- pindex: %d, i: %d\n",
1282 				m[i]->pindex, i);
1283 		}
1284 	}
1285 
1286 	/*
1287 	 * For synchronous writes, we clean up all completed async pageouts.
1288 	 */
1289 	if (sync == TRUE) {
1290 		swap_pager_sync();
1291 	}
1292 	kva = 0;
1293 
1294 	/*
1295 	 * get a swap pager clean data structure, block until we get it
1296 	 */
1297 	if (swap_pager_free_count <= 3) {
1298 		s = splbio();
1299 		if (curproc == pageproc) {
1300 retryfree:
1301 			/*
1302 			 * pageout daemon needs a swap control block
1303 			 */
1304 			swap_pager_needflags |= SWAP_FREE_NEEDED_BY_PAGEOUT|SWAP_FREE_NEEDED;
1305 			/*
1306 			 * if it does not get one within a short time, then
1307 			 * there is a potential deadlock, so we go-on trying
1308 			 * to free pages.  It is important to block here as opposed
1309 			 * to returning, thereby allowing the pageout daemon to continue.
1310 			 * It is likely that pageout daemon will start suboptimally
1311 			 * reclaiming vnode backed pages if we don't block.  Since the
1312 			 * I/O subsystem is probably already fully utilized, might as
1313 			 * well wait.
1314 			 */
1315 			if (tsleep(&swap_pager_free, PVM, "swpfre", hz/5)) {
1316 				swap_pager_sync();
1317 				if (swap_pager_free_count <= 3) {
1318 					splx(s);
1319 					return VM_PAGER_AGAIN;
1320 				}
1321 			} else {
1322 			/*
1323 			 * we make sure that pageouts aren't taking up all of
1324 			 * the free swap control blocks.
1325 			 */
1326 				swap_pager_sync();
1327 				if (swap_pager_free_count <= 3) {
1328 					goto retryfree;
1329 				}
1330 			}
1331 		} else {
1332 			pagedaemon_wakeup();
1333 			while (swap_pager_free_count <= 3) {
1334 				swap_pager_needflags |= SWAP_FREE_NEEDED;
1335 				tsleep(&swap_pager_free, PVM, "swpfre", 0);
1336 				pagedaemon_wakeup();
1337 			}
1338 		}
1339 		splx(s);
1340 	}
1341 	spc = TAILQ_FIRST(&swap_pager_free);
1342 	if (spc == NULL)
1343 		panic("swap_pager_putpages: free queue is empty, %d expected\n", swap_pager_free_count);
1344 	TAILQ_REMOVE(&swap_pager_free, spc, spc_list);
1345 	swap_pager_free_count--;
1346 
1347 	kva = spc->spc_kva;
1348 
1349 	/*
1350 	 * map our page(s) into kva for I/O
1351 	 */
1352 	pmap_qenter(kva, m, count);
1353 
1354 	/*
1355 	 * get the base I/O offset into the swap file
1356 	 */
1357 	for (i = 0; i < count; i++) {
1358 		fidx = m[i]->pindex + paging_pindex;
1359 		off = swap_pager_block_offset(fidx);
1360 		/*
1361 		 * set the valid bit
1362 		 */
1363 		swb[i]->swb_valid |= (1 << off);
1364 		/*
1365 		 * and unlock the data structure
1366 		 */
1367 		swb[i]->swb_locked--;
1368 	}
1369 
1370 	/*
1371 	 * Get a swap buffer header and perform the IO
1372 	 */
1373 	bp = spc->spc_bp;
1374 	bzero(bp, sizeof *bp);
1375 	bp->b_spc = spc;
1376 	bp->b_vnbufs.le_next = NOLIST;
1377 
1378 	bp->b_flags = B_BUSY | B_PAGING;
1379 	bp->b_proc = &proc0;	/* XXX (but without B_PHYS set this is ok) */
1380 	bp->b_rcred = bp->b_wcred = bp->b_proc->p_ucred;
1381 	if (bp->b_rcred != NOCRED)
1382 		crhold(bp->b_rcred);
1383 	if (bp->b_wcred != NOCRED)
1384 		crhold(bp->b_wcred);
1385 	bp->b_data = (caddr_t) kva;
1386 	bp->b_blkno = reqaddr[0];
1387 	pbgetvp(swapdev_vp, bp);
1388 
1389 	bp->b_bcount = PAGE_SIZE * count;
1390 	bp->b_bufsize = PAGE_SIZE * count;
1391 	swapdev_vp->v_numoutput++;
1392 
1393 	/*
1394 	 * If this is an async write we set up additional buffer fields and
1395 	 * place a "cleaning" entry on the inuse queue.
1396 	 */
1397 	s = splbio();
1398 	if (sync == FALSE) {
1399 		spc->spc_flags = 0;
1400 		spc->spc_object = object;
1401 		for (i = 0; i < count; i++)
1402 			spc->spc_m[i] = m[i];
1403 		spc->spc_count = count;
1404 		/*
1405 		 * the completion routine for async writes
1406 		 */
1407 		bp->b_flags |= B_CALL;
1408 		bp->b_iodone = swap_pager_iodone;
1409 		bp->b_dirtyoff = 0;
1410 		bp->b_dirtyend = bp->b_bcount;
1411 		object->un_pager.swp.swp_poip++;
1412 		TAILQ_INSERT_TAIL(&swap_pager_inuse, spc, spc_list);
1413 	} else {
1414 		object->un_pager.swp.swp_poip++;
1415 		bp->b_flags |= B_CALL;
1416 		bp->b_iodone = swap_pager_iodone1;
1417 	}
1418 
1419 	cnt.v_swapout++;
1420 	cnt.v_swappgsout += count;
1421 	/*
1422 	 * perform the I/O
1423 	 */
1424 	VOP_STRATEGY(bp);
1425 	if (sync == FALSE) {
1426 		if ((bp->b_flags & B_DONE) == B_DONE) {
1427 			swap_pager_sync();
1428 		}
1429 		splx(s);
1430 		for (i = 0; i < count; i++) {
1431 			rtvals[i] = VM_PAGER_PEND;
1432 		}
1433 		return VM_PAGER_PEND;
1434 	}
1435 	/*
1436 	 * wait for the sync I/O to complete
1437 	 */
1438 	while ((bp->b_flags & B_DONE) == 0) {
1439 		tsleep(bp, PVM, "swwrt", 0);
1440 	}
1441 	if (bp->b_flags & B_ERROR) {
1442 		printf("swap_pager: I/O error - pageout failed; blkno %d, size %d, error %d\n",
1443 		    bp->b_blkno, bp->b_bcount, bp->b_error);
1444 		rv = VM_PAGER_ERROR;
1445 	} else {
1446 		rv = VM_PAGER_OK;
1447 	}
1448 
1449 	object->un_pager.swp.swp_poip--;
1450 	if (object->un_pager.swp.swp_poip == 0)
1451 		wakeup(object);
1452 
1453 	if (bp->b_vp)
1454 		pbrelvp(bp);
1455 	if (bp->b_flags & B_WANTED)
1456 		wakeup(bp);
1457 
1458 	splx(s);
1459 
1460 	/*
1461 	 * remove the mapping for kernel virtual
1462 	 */
1463 	pmap_qremove(kva, count);
1464 
1465 	/*
1466 	 * if we have written the page, then indicate that the page is clean.
1467 	 */
1468 	if (rv == VM_PAGER_OK) {
1469 		for (i = 0; i < count; i++) {
1470 			if (rtvals[i] == VM_PAGER_OK) {
1471 				pmap_clear_modify(VM_PAGE_TO_PHYS(m[i]));
1472 				m[i]->dirty = 0;
1473 				/*
1474 				 * optimization, if a page has been read
1475 				 * during the pageout process, we activate it.
1476 				 */
1477 				if ((m[i]->queue != PQ_ACTIVE) &&
1478 				    ((m[i]->flags & (PG_WANTED|PG_REFERENCED)) ||
1479 				    pmap_is_referenced(VM_PAGE_TO_PHYS(m[i])))) {
1480 					vm_page_activate(m[i]);
1481 				}
1482 			}
1483 		}
1484 	} else {
1485 		for (i = 0; i < count; i++) {
1486 			rtvals[i] = rv;
1487 		}
1488 	}
1489 
1490 	if (bp->b_rcred != NOCRED)
1491 		crfree(bp->b_rcred);
1492 	if (bp->b_wcred != NOCRED)
1493 		crfree(bp->b_wcred);
1494 	TAILQ_INSERT_TAIL(&swap_pager_free, spc, spc_list);
1495 	swap_pager_free_count++;
1496 	if (swap_pager_needflags & SWAP_FREE_NEEDED) {
1497 		wakeup(&swap_pager_free);
1498 	}
1499 	if (swap_pager_needflags & SWAP_FREE_NEEDED_BY_PAGEOUT)
1500 		pagedaemon_wakeup();
1501 	swap_pager_needflags &= ~(SWAP_FREE_NEEDED|SWAP_FREE_NEEDED_BY_PAGEOUT);
1502 	return (rv);
1503 }
1504 
1505 static void
1506 swap_pager_sync()
1507 {
1508 	register swp_clean_t spc, tspc;
1509 	register int s;
1510 
1511 	tspc = NULL;
1512 	if (TAILQ_FIRST(&swap_pager_done) == NULL)
1513 		return;
1514 	for (;;) {
1515 		s = splbio();
1516 		/*
1517 		 * Look up and removal from done list must be done at splbio()
1518 		 * to avoid conflicts with swap_pager_iodone.
1519 		 */
1520 		while ((spc = TAILQ_FIRST(&swap_pager_done)) != 0) {
1521 			pmap_qremove(spc->spc_kva, spc->spc_count);
1522 			swap_pager_finish(spc);
1523 			TAILQ_REMOVE(&swap_pager_done, spc, spc_list);
1524 			goto doclean;
1525 		}
1526 
1527 		/*
1528 		 * No operations done, thats all we can do for now.
1529 		 */
1530 
1531 		splx(s);
1532 		break;
1533 
1534 		/*
1535 		 * The desired page was found to be busy earlier in the scan
1536 		 * but has since completed.
1537 		 */
1538 doclean:
1539 		if (tspc && tspc == spc) {
1540 			tspc = NULL;
1541 		}
1542 		spc->spc_flags = 0;
1543 		TAILQ_INSERT_TAIL(&swap_pager_free, spc, spc_list);
1544 		swap_pager_free_count++;
1545 		if (swap_pager_needflags & SWAP_FREE_NEEDED) {
1546 			wakeup(&swap_pager_free);
1547 		}
1548 		if( swap_pager_needflags & SWAP_FREE_NEEDED_BY_PAGEOUT)
1549 			pagedaemon_wakeup();
1550 		swap_pager_needflags &= ~(SWAP_FREE_NEEDED|SWAP_FREE_NEEDED_BY_PAGEOUT);
1551 		splx(s);
1552 	}
1553 
1554 	return;
1555 }
1556 
1557 void
1558 swap_pager_finish(spc)
1559 	register swp_clean_t spc;
1560 {
1561 	vm_object_t object = spc->spc_m[0]->object;
1562 	int i;
1563 
1564 	object->paging_in_progress -= spc->spc_count;
1565 	if ((object->paging_in_progress == 0) &&
1566 	    (object->flags & OBJ_PIPWNT)) {
1567 		object->flags &= ~OBJ_PIPWNT;
1568 		wakeup(object);
1569 	}
1570 
1571 	/*
1572 	 * If no error, mark as clean and inform the pmap system. If error,
1573 	 * mark as dirty so we will try again. (XXX could get stuck doing
1574 	 * this, should give up after awhile)
1575 	 */
1576 	if (spc->spc_flags & SPC_ERROR) {
1577 		for (i = 0; i < spc->spc_count; i++) {
1578 			printf("swap_pager_finish: I/O error, clean of page %lx failed\n",
1579 			    (u_long) VM_PAGE_TO_PHYS(spc->spc_m[i]));
1580 		}
1581 	} else {
1582 		for (i = 0; i < spc->spc_count; i++) {
1583 			pmap_clear_modify(VM_PAGE_TO_PHYS(spc->spc_m[i]));
1584 			spc->spc_m[i]->dirty = 0;
1585 			if ((spc->spc_m[i]->queue != PQ_ACTIVE) &&
1586 			    ((spc->spc_m[i]->flags & PG_WANTED) || pmap_is_referenced(VM_PAGE_TO_PHYS(spc->spc_m[i]))))
1587 				vm_page_activate(spc->spc_m[i]);
1588 		}
1589 	}
1590 
1591 
1592 	for (i = 0; i < spc->spc_count; i++) {
1593 		/*
1594 		 * we wakeup any processes that are waiting on these pages.
1595 		 */
1596 		PAGE_WAKEUP(spc->spc_m[i]);
1597 	}
1598 	nswiodone -= spc->spc_count;
1599 
1600 	return;
1601 }
1602 
1603 /*
1604  * swap_pager_iodone
1605  */
1606 static void
1607 swap_pager_iodone(bp)
1608 	register struct buf *bp;
1609 {
1610 	register swp_clean_t spc;
1611 	int s;
1612 
1613 	s = splbio();
1614 	spc = (swp_clean_t) bp->b_spc;
1615 	TAILQ_REMOVE(&swap_pager_inuse, spc, spc_list);
1616 	TAILQ_INSERT_TAIL(&swap_pager_done, spc, spc_list);
1617 	if (bp->b_flags & B_ERROR) {
1618 		spc->spc_flags |= SPC_ERROR;
1619 		printf("swap_pager: I/O error - async %s failed; blkno %lu, size %ld, error %d\n",
1620 		    (bp->b_flags & B_READ) ? "pagein" : "pageout",
1621 		    (u_long) bp->b_blkno, bp->b_bcount, bp->b_error);
1622 	}
1623 
1624 	if (bp->b_vp)
1625 		pbrelvp(bp);
1626 
1627 /*
1628 	if (bp->b_flags & B_WANTED)
1629 */
1630 		wakeup(bp);
1631 
1632 	if (bp->b_rcred != NOCRED)
1633 		crfree(bp->b_rcred);
1634 	if (bp->b_wcred != NOCRED)
1635 		crfree(bp->b_wcred);
1636 
1637 	nswiodone += spc->spc_count;
1638 	if (--spc->spc_object->un_pager.swp.swp_poip == 0) {
1639 		wakeup(spc->spc_object);
1640 	}
1641 	if ((swap_pager_needflags & SWAP_FREE_NEEDED) ||
1642 	    TAILQ_FIRST(&swap_pager_inuse) == 0) {
1643 		swap_pager_needflags &= ~SWAP_FREE_NEEDED;
1644 		wakeup(&swap_pager_free);
1645 	}
1646 
1647 	if( swap_pager_needflags & SWAP_FREE_NEEDED_BY_PAGEOUT) {
1648 		swap_pager_needflags &= ~SWAP_FREE_NEEDED_BY_PAGEOUT;
1649 		pagedaemon_wakeup();
1650 	}
1651 
1652 	if (vm_pageout_pages_needed) {
1653 		wakeup(&vm_pageout_pages_needed);
1654 		vm_pageout_pages_needed = 0;
1655 	}
1656 	if ((TAILQ_FIRST(&swap_pager_inuse) == NULL) ||
1657 	    ((cnt.v_free_count + cnt.v_cache_count) < cnt.v_free_min &&
1658 	    nswiodone + cnt.v_free_count + cnt.v_cache_count >= cnt.v_free_min)) {
1659 		pagedaemon_wakeup();
1660 	}
1661 	splx(s);
1662 }
1663