xref: /freebsd/sys/vm/swap_pager.c (revision 6990ffd8a95caaba6858ad44ff1b3157d1efba8f)
1 /*
2  * Copyright (c) 1998 Matthew Dillon,
3  * Copyright (c) 1994 John S. Dyson
4  * Copyright (c) 1990 University of Utah.
5  * Copyright (c) 1991, 1993
6  *	The Regents of the University of California.  All rights reserved.
7  *
8  * This code is derived from software contributed to Berkeley by
9  * the Systems Programming Group of the University of Utah Computer
10  * Science Department.
11  *
12  * Redistribution and use in source and binary forms, with or without
13  * modification, are permitted provided that the following conditions
14  * are met:
15  * 1. Redistributions of source code must retain the above copyright
16  *    notice, this list of conditions and the following disclaimer.
17  * 2. Redistributions in binary form must reproduce the above copyright
18  *    notice, this list of conditions and the following disclaimer in the
19  *    documentation and/or other materials provided with the distribution.
20  * 3. All advertising materials mentioning features or use of this software
21  *    must display the following acknowledgement:
22  *	This product includes software developed by the University of
23  *	California, Berkeley and its contributors.
24  * 4. Neither the name of the University nor the names of its contributors
25  *    may be used to endorse or promote products derived from this software
26  *    without specific prior written permission.
27  *
28  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
29  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
30  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
31  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
32  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
33  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
34  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
35  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
36  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
37  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
38  * SUCH DAMAGE.
39  *
40  *				New Swap System
41  *				Matthew Dillon
42  *
43  * Radix Bitmap 'blists'.
44  *
45  *	- The new swapper uses the new radix bitmap code.  This should scale
46  *	  to arbitrarily small or arbitrarily large swap spaces and an almost
47  *	  arbitrary degree of fragmentation.
48  *
49  * Features:
50  *
51  *	- on the fly reallocation of swap during putpages.  The new system
52  *	  does not try to keep previously allocated swap blocks for dirty
53  *	  pages.
54  *
55  *	- on the fly deallocation of swap
56  *
57  *	- No more garbage collection required.  Unnecessarily allocated swap
58  *	  blocks only exist for dirty vm_page_t's now and these are already
59  *	  cycled (in a high-load system) by the pager.  We also do on-the-fly
60  *	  removal of invalidated swap blocks when a page is destroyed
61  *	  or renamed.
62  *
63  * from: Utah $Hdr: swap_pager.c 1.4 91/04/30$
64  *
65  *	@(#)swap_pager.c	8.9 (Berkeley) 3/21/94
66  *
67  * $FreeBSD$
68  */
69 
70 #include <sys/param.h>
71 #include <sys/systm.h>
72 #include <sys/conf.h>
73 #include <sys/kernel.h>
74 #include <sys/proc.h>
75 #include <sys/bio.h>
76 #include <sys/buf.h>
77 #include <sys/vnode.h>
78 #include <sys/malloc.h>
79 #include <sys/vmmeter.h>
80 #include <sys/sysctl.h>
81 #include <sys/blist.h>
82 #include <sys/lock.h>
83 #include <sys/sx.h>
84 #include <sys/vmmeter.h>
85 
86 #ifndef MAX_PAGEOUT_CLUSTER
87 #define MAX_PAGEOUT_CLUSTER 16
88 #endif
89 
90 #define SWB_NPAGES	MAX_PAGEOUT_CLUSTER
91 
92 #include "opt_swap.h"
93 #include <vm/vm.h>
94 #include <vm/pmap.h>
95 #include <vm/vm_map.h>
96 #include <vm/vm_kern.h>
97 #include <vm/vm_object.h>
98 #include <vm/vm_page.h>
99 #include <vm/vm_pager.h>
100 #include <vm/vm_pageout.h>
101 #include <vm/vm_zone.h>
102 #include <vm/swap_pager.h>
103 #include <vm/vm_extern.h>
104 
105 #define SWM_FREE	0x02	/* free, period			*/
106 #define SWM_POP		0x04	/* pop out			*/
107 
108 /*
109  * vm_swap_size is in page-sized chunks now.  It was DEV_BSIZE'd chunks
110  * in the old system.
111  */
112 
113 extern int vm_swap_size;	/* number of free swap blocks, in pages */
114 
115 int swap_pager_full;		/* swap space exhaustion (task killing) */
116 static int swap_pager_almost_full; /* swap space exhaustion (w/ hysteresis)*/
117 static int nsw_rcount;		/* free read buffers			*/
118 static int nsw_wcount_sync;	/* limit write buffers / synchronous	*/
119 static int nsw_wcount_async;	/* limit write buffers / asynchronous	*/
120 static int nsw_wcount_async_max;/* assigned maximum			*/
121 static int nsw_cluster_max;	/* maximum VOP I/O allowed		*/
122 
123 struct blist *swapblist;
124 static struct swblock **swhash;
125 static int swhash_mask;
126 static int swap_async_max = 4;	/* maximum in-progress async I/O's	*/
127 static struct sx sw_alloc_sx;
128 
129 /* from vm_swap.c */
130 extern struct vnode *swapdev_vp;
131 extern struct swdevt *swdevt;
132 extern int nswdev;
133 
134 SYSCTL_INT(_vm, OID_AUTO, swap_async_max,
135         CTLFLAG_RW, &swap_async_max, 0, "Maximum running async swap ops");
136 
137 #define BLK2DEVIDX(blk) (nswdev > 1 ? blk / dmmax % nswdev : 0)
138 
139 /*
140  * "named" and "unnamed" anon region objects.  Try to reduce the overhead
141  * of searching a named list by hashing it just a little.
142  */
143 
144 #define NOBJLISTS		8
145 
146 #define NOBJLIST(handle)	\
147 	(&swap_pager_object_list[((int)(intptr_t)handle >> 4) & (NOBJLISTS-1)])
148 
149 static struct mtx sw_alloc_mtx;	/* protect list manipulation */
150 static struct pagerlst	swap_pager_object_list[NOBJLISTS];
151 struct pagerlst		swap_pager_un_object_list;
152 vm_zone_t		swap_zone;
153 
154 /*
155  * pagerops for OBJT_SWAP - "swap pager".  Some ops are also global procedure
156  * calls hooked from other parts of the VM system and do not appear here.
157  * (see vm/swap_pager.h).
158  */
159 
160 static vm_object_t
161 		swap_pager_alloc __P((void *handle, vm_ooffset_t size,
162 				      vm_prot_t prot, vm_ooffset_t offset));
163 static void	swap_pager_dealloc __P((vm_object_t object));
164 static int	swap_pager_getpages __P((vm_object_t, vm_page_t *, int, int));
165 static void	swap_pager_init __P((void));
166 static void	swap_pager_unswapped __P((vm_page_t));
167 static void	swap_pager_strategy __P((vm_object_t, struct bio *));
168 
169 struct pagerops swappagerops = {
170 	swap_pager_init,	/* early system initialization of pager	*/
171 	swap_pager_alloc,	/* allocate an OBJT_SWAP object		*/
172 	swap_pager_dealloc,	/* deallocate an OBJT_SWAP object	*/
173 	swap_pager_getpages,	/* pagein				*/
174 	swap_pager_putpages,	/* pageout				*/
175 	swap_pager_haspage,	/* get backing store status for page	*/
176 	swap_pager_unswapped,	/* remove swap related to page		*/
177 	swap_pager_strategy	/* pager strategy call			*/
178 };
179 
180 static struct buf *getchainbuf(struct bio *bp, struct vnode *vp, int flags);
181 static void flushchainbuf(struct buf *nbp);
182 static void waitchainbuf(struct bio *bp, int count, int done);
183 
184 /*
185  * dmmax is in page-sized chunks with the new swap system.  It was
186  * dev-bsized chunks in the old.  dmmax is always a power of 2.
187  *
188  * swap_*() routines are externally accessible.  swp_*() routines are
189  * internal.
190  */
191 
192 int dmmax;
193 static int dmmax_mask;
194 int nswap_lowat = 128;		/* in pages, swap_pager_almost_full warn */
195 int nswap_hiwat = 512;		/* in pages, swap_pager_almost_full warn */
196 
197 SYSCTL_INT(_vm, OID_AUTO, dmmax,
198 	CTLFLAG_RD, &dmmax, 0, "Maximum size of a swap block");
199 
200 static __inline void	swp_sizecheck __P((void));
201 static void	swp_pager_sync_iodone __P((struct buf *bp));
202 static void	swp_pager_async_iodone __P((struct buf *bp));
203 
204 /*
205  * Swap bitmap functions
206  */
207 
208 static __inline void	swp_pager_freeswapspace __P((daddr_t blk, int npages));
209 static __inline daddr_t	swp_pager_getswapspace __P((int npages));
210 
211 /*
212  * Metadata functions
213  */
214 
215 static void swp_pager_meta_build __P((vm_object_t, vm_pindex_t, daddr_t));
216 static void swp_pager_meta_free __P((vm_object_t, vm_pindex_t, daddr_t));
217 static void swp_pager_meta_free_all __P((vm_object_t));
218 static daddr_t swp_pager_meta_ctl __P((vm_object_t, vm_pindex_t, int));
219 
220 /*
221  * SWP_SIZECHECK() -	update swap_pager_full indication
222  *
223  *	update the swap_pager_almost_full indication and warn when we are
224  *	about to run out of swap space, using lowat/hiwat hysteresis.
225  *
226  *	Clear swap_pager_full ( task killing ) indication when lowat is met.
227  *
228  *	No restrictions on call
229  *	This routine may not block.
230  *	This routine must be called at splvm()
231  */
232 
233 static __inline void
234 swp_sizecheck()
235 {
236 	GIANT_REQUIRED;
237 
238 	if (vm_swap_size < nswap_lowat) {
239 		if (swap_pager_almost_full == 0) {
240 			printf("swap_pager: out of swap space\n");
241 			swap_pager_almost_full = 1;
242 		}
243 	} else {
244 		swap_pager_full = 0;
245 		if (vm_swap_size > nswap_hiwat)
246 			swap_pager_almost_full = 0;
247 	}
248 }
249 
250 /*
251  * SWAP_PAGER_INIT() -	initialize the swap pager!
252  *
253  *	Expected to be started from system init.  NOTE:  This code is run
254  *	before much else so be careful what you depend on.  Most of the VM
255  *	system has yet to be initialized at this point.
256  */
257 
258 static void
259 swap_pager_init()
260 {
261 	/*
262 	 * Initialize object lists
263 	 */
264 	int i;
265 
266 	for (i = 0; i < NOBJLISTS; ++i)
267 		TAILQ_INIT(&swap_pager_object_list[i]);
268 	TAILQ_INIT(&swap_pager_un_object_list);
269 	mtx_init(&sw_alloc_mtx, "swap_pager list", MTX_DEF);
270 
271 	/*
272 	 * Device Stripe, in PAGE_SIZE'd blocks
273 	 */
274 
275 	dmmax = SWB_NPAGES * 2;
276 	dmmax_mask = ~(dmmax - 1);
277 }
278 
279 /*
280  * SWAP_PAGER_SWAP_INIT() - swap pager initialization from pageout process
281  *
282  *	Expected to be started from pageout process once, prior to entering
283  *	its main loop.
284  */
285 
286 void
287 swap_pager_swap_init()
288 {
289 	int n, n2;
290 
291 	/*
292 	 * Number of in-transit swap bp operations.  Don't
293 	 * exhaust the pbufs completely.  Make sure we
294 	 * initialize workable values (0 will work for hysteresis
295 	 * but it isn't very efficient).
296 	 *
297 	 * The nsw_cluster_max is constrained by the bp->b_pages[]
298 	 * array (MAXPHYS/PAGE_SIZE) and our locally defined
299 	 * MAX_PAGEOUT_CLUSTER.   Also be aware that swap ops are
300 	 * constrained by the swap device interleave stripe size.
301 	 *
302 	 * Currently we hardwire nsw_wcount_async to 4.  This limit is
303 	 * designed to prevent other I/O from having high latencies due to
304 	 * our pageout I/O.  The value 4 works well for one or two active swap
305 	 * devices but is probably a little low if you have more.  Even so,
306 	 * a higher value would probably generate only a limited improvement
307 	 * with three or four active swap devices since the system does not
308 	 * typically have to pageout at extreme bandwidths.   We will want
309 	 * at least 2 per swap devices, and 4 is a pretty good value if you
310 	 * have one NFS swap device due to the command/ack latency over NFS.
311 	 * So it all works out pretty well.
312 	 */
313 
314 	nsw_cluster_max = min((MAXPHYS/PAGE_SIZE), MAX_PAGEOUT_CLUSTER);
315 
316 	mtx_lock(&pbuf_mtx);
317 	nsw_rcount = (nswbuf + 1) / 2;
318 	nsw_wcount_sync = (nswbuf + 3) / 4;
319 	nsw_wcount_async = 4;
320 	nsw_wcount_async_max = nsw_wcount_async;
321 	mtx_unlock(&pbuf_mtx);
322 
323 	/*
324 	 * Initialize our zone.  Right now I'm just guessing on the number
325 	 * we need based on the number of pages in the system.  Each swblock
326 	 * can hold 16 pages, so this is probably overkill.  This reservation
327 	 * is typically limited to around 70MB by default.
328 	 */
329 
330 	n = cnt.v_page_count;
331 	if (maxswzone && n > maxswzone / sizeof(struct swblock))
332 		n = maxswzone / sizeof(struct swblock);
333 	n2 = n;
334 
335 	do {
336 		swap_zone = zinit(
337 		       "SWAPMETA",
338 		       sizeof(struct swblock),
339 		       n,
340 		       ZONE_INTERRUPT,
341 		       1
342 		       );
343 		if (swap_zone != NULL)
344 			break;
345 		/*
346 		 * if the allocation failed, try a zone two thirds the
347 		 * size of the previous attempt.
348 		 */
349 		n -= ((n + 2) / 3);
350 	} while (n > 0);
351 
352 	if (swap_zone == NULL)
353 		panic("failed to zinit swap_zone.");
354 	if (n2 != n)
355 		printf("Swap zone entries reduced from %d to %d.\n", n2, n);
356 	n2 = n;
357 
358 	/*
359 	 * Initialize our meta-data hash table.  The swapper does not need to
360 	 * be quite as efficient as the VM system, so we do not use an
361 	 * oversized hash table.
362 	 *
363 	 * 	n: 		size of hash table, must be power of 2
364 	 *	swhash_mask:	hash table index mask
365 	 */
366 
367 	for (n = 1; n < n2 / 8; n *= 2)
368 		;
369 
370 	swhash = malloc(sizeof(struct swblock *) * n, M_VMPGDATA, M_WAITOK | M_ZERO);
371 
372 	swhash_mask = n - 1;
373 }
374 
375 /*
376  * SWAP_PAGER_ALLOC() -	allocate a new OBJT_SWAP VM object and instantiate
377  *			its metadata structures.
378  *
379  *	This routine is called from the mmap and fork code to create a new
380  *	OBJT_SWAP object.  We do this by creating an OBJT_DEFAULT object
381  *	and then converting it with swp_pager_meta_build().
382  *
383  *	This routine may block in vm_object_allocate() and create a named
384  *	object lookup race, so we must interlock.   We must also run at
385  *	splvm() for the object lookup to handle races with interrupts, but
386  *	we do not have to maintain splvm() in between the lookup and the
387  *	add because (I believe) it is not possible to attempt to create
388  *	a new swap object w/handle when a default object with that handle
389  *	already exists.
390  */
391 
392 static vm_object_t
393 swap_pager_alloc(void *handle, vm_ooffset_t size, vm_prot_t prot,
394 		 vm_ooffset_t offset)
395 {
396 	vm_object_t object;
397 
398 	GIANT_REQUIRED;
399 
400 	if (handle) {
401 		/*
402 		 * Reference existing named region or allocate new one.  There
403 		 * should not be a race here against swp_pager_meta_build()
404 		 * as called from vm_page_remove() in regards to the lookup
405 		 * of the handle.
406 		 */
407 		sx_xlock(&sw_alloc_sx);
408 		object = vm_pager_object_lookup(NOBJLIST(handle), handle);
409 
410 		if (object != NULL) {
411 			vm_object_reference(object);
412 		} else {
413 			object = vm_object_allocate(OBJT_DEFAULT,
414 				OFF_TO_IDX(offset + PAGE_MASK + size));
415 			object->handle = handle;
416 
417 			swp_pager_meta_build(object, 0, SWAPBLK_NONE);
418 		}
419 		sx_xunlock(&sw_alloc_sx);
420 	} else {
421 		object = vm_object_allocate(OBJT_DEFAULT,
422 			OFF_TO_IDX(offset + PAGE_MASK + size));
423 
424 		swp_pager_meta_build(object, 0, SWAPBLK_NONE);
425 	}
426 
427 	return (object);
428 }
429 
430 /*
431  * SWAP_PAGER_DEALLOC() -	remove swap metadata from object
432  *
433  *	The swap backing for the object is destroyed.  The code is
434  *	designed such that we can reinstantiate it later, but this
435  *	routine is typically called only when the entire object is
436  *	about to be destroyed.
437  *
438  *	This routine may block, but no longer does.
439  *
440  *	The object must be locked or unreferenceable.
441  */
442 
443 static void
444 swap_pager_dealloc(object)
445 	vm_object_t object;
446 {
447 	int s;
448 
449 	GIANT_REQUIRED;
450 
451 	/*
452 	 * Remove from list right away so lookups will fail if we block for
453 	 * pageout completion.
454 	 */
455 	mtx_lock(&sw_alloc_mtx);
456 	if (object->handle == NULL) {
457 		TAILQ_REMOVE(&swap_pager_un_object_list, object, pager_object_list);
458 	} else {
459 		TAILQ_REMOVE(NOBJLIST(object->handle), object, pager_object_list);
460 	}
461 	mtx_unlock(&sw_alloc_mtx);
462 
463 	vm_object_pip_wait(object, "swpdea");
464 
465 	/*
466 	 * Free all remaining metadata.  We only bother to free it from
467 	 * the swap meta data.  We do not attempt to free swapblk's still
468 	 * associated with vm_page_t's for this object.  We do not care
469 	 * if paging is still in progress on some objects.
470 	 */
471 	s = splvm();
472 	swp_pager_meta_free_all(object);
473 	splx(s);
474 }
475 
476 /************************************************************************
477  *			SWAP PAGER BITMAP ROUTINES			*
478  ************************************************************************/
479 
480 /*
481  * SWP_PAGER_GETSWAPSPACE() -	allocate raw swap space
482  *
483  *	Allocate swap for the requested number of pages.  The starting
484  *	swap block number (a page index) is returned or SWAPBLK_NONE
485  *	if the allocation failed.
486  *
487  *	Also has the side effect of advising that somebody made a mistake
488  *	when they configured swap and didn't configure enough.
489  *
490  *	Must be called at splvm() to avoid races with bitmap frees from
491  *	vm_page_remove() aka swap_pager_page_removed().
492  *
493  *	This routine may not block
494  *	This routine must be called at splvm().
495  */
496 
497 static __inline daddr_t
498 swp_pager_getswapspace(npages)
499 	int npages;
500 {
501 	daddr_t blk;
502 
503 	GIANT_REQUIRED;
504 
505 	if ((blk = blist_alloc(swapblist, npages)) == SWAPBLK_NONE) {
506 		if (swap_pager_full != 2) {
507 			printf("swap_pager_getswapspace: failed\n");
508 			swap_pager_full = 2;
509 			swap_pager_almost_full = 1;
510 		}
511 	} else {
512 		vm_swap_size -= npages;
513 		/* per-swap area stats */
514 		swdevt[BLK2DEVIDX(blk)].sw_used += npages;
515 		swp_sizecheck();
516 	}
517 	return(blk);
518 }
519 
520 /*
521  * SWP_PAGER_FREESWAPSPACE() -	free raw swap space
522  *
523  *	This routine returns the specified swap blocks back to the bitmap.
524  *
525  *	Note:  This routine may not block (it could in the old swap code),
526  *	and through the use of the new blist routines it does not block.
527  *
528  *	We must be called at splvm() to avoid races with bitmap frees from
529  *	vm_page_remove() aka swap_pager_page_removed().
530  *
531  *	This routine may not block
532  *	This routine must be called at splvm().
533  */
534 
535 static __inline void
536 swp_pager_freeswapspace(blk, npages)
537 	daddr_t blk;
538 	int npages;
539 {
540 	GIANT_REQUIRED;
541 
542 	blist_free(swapblist, blk, npages);
543 	vm_swap_size += npages;
544 	/* per-swap area stats */
545 	swdevt[BLK2DEVIDX(blk)].sw_used -= npages;
546 	swp_sizecheck();
547 }
548 
549 /*
550  * SWAP_PAGER_FREESPACE() -	frees swap blocks associated with a page
551  *				range within an object.
552  *
553  *	This is a globally accessible routine.
554  *
555  *	This routine removes swapblk assignments from swap metadata.
556  *
557  *	The external callers of this routine typically have already destroyed
558  *	or renamed vm_page_t's associated with this range in the object so
559  *	we should be ok.
560  *
561  *	This routine may be called at any spl.  We up our spl to splvm temporarily
562  *	in order to perform the metadata removal.
563  */
564 
565 void
566 swap_pager_freespace(object, start, size)
567 	vm_object_t object;
568 	vm_pindex_t start;
569 	vm_size_t size;
570 {
571 	int s = splvm();
572 
573 	GIANT_REQUIRED;
574 	swp_pager_meta_free(object, start, size);
575 	splx(s);
576 }
577 
578 /*
579  * SWAP_PAGER_RESERVE() - reserve swap blocks in object
580  *
581  *	Assigns swap blocks to the specified range within the object.  The
582  *	swap blocks are not zerod.  Any previous swap assignment is destroyed.
583  *
584  *	Returns 0 on success, -1 on failure.
585  */
586 
587 int
588 swap_pager_reserve(vm_object_t object, vm_pindex_t start, vm_size_t size)
589 {
590 	int s;
591 	int n = 0;
592 	daddr_t blk = SWAPBLK_NONE;
593 	vm_pindex_t beg = start;	/* save start index */
594 
595 	s = splvm();
596 	while (size) {
597 		if (n == 0) {
598 			n = BLIST_MAX_ALLOC;
599 			while ((blk = swp_pager_getswapspace(n)) == SWAPBLK_NONE) {
600 				n >>= 1;
601 				if (n == 0) {
602 					swp_pager_meta_free(object, beg, start - beg);
603 					splx(s);
604 					return(-1);
605 				}
606 			}
607 		}
608 		swp_pager_meta_build(object, start, blk);
609 		--size;
610 		++start;
611 		++blk;
612 		--n;
613 	}
614 	swp_pager_meta_free(object, start, n);
615 	splx(s);
616 	return(0);
617 }
618 
619 /*
620  * SWAP_PAGER_COPY() -  copy blocks from source pager to destination pager
621  *			and destroy the source.
622  *
623  *	Copy any valid swapblks from the source to the destination.  In
624  *	cases where both the source and destination have a valid swapblk,
625  *	we keep the destination's.
626  *
627  *	This routine is allowed to block.  It may block allocating metadata
628  *	indirectly through swp_pager_meta_build() or if paging is still in
629  *	progress on the source.
630  *
631  *	This routine can be called at any spl
632  *
633  *	XXX vm_page_collapse() kinda expects us not to block because we
634  *	supposedly do not need to allocate memory, but for the moment we
635  *	*may* have to get a little memory from the zone allocator, but
636  *	it is taken from the interrupt memory.  We should be ok.
637  *
638  *	The source object contains no vm_page_t's (which is just as well)
639  *
640  *	The source object is of type OBJT_SWAP.
641  *
642  *	The source and destination objects must be locked or
643  *	inaccessible (XXX are they ?)
644  */
645 
646 void
647 swap_pager_copy(srcobject, dstobject, offset, destroysource)
648 	vm_object_t srcobject;
649 	vm_object_t dstobject;
650 	vm_pindex_t offset;
651 	int destroysource;
652 {
653 	vm_pindex_t i;
654 	int s;
655 
656 	GIANT_REQUIRED;
657 
658 	s = splvm();
659 	/*
660 	 * If destroysource is set, we remove the source object from the
661 	 * swap_pager internal queue now.
662 	 */
663 
664 	if (destroysource) {
665 		mtx_lock(&sw_alloc_mtx);
666 		if (srcobject->handle == NULL) {
667 			TAILQ_REMOVE(
668 			    &swap_pager_un_object_list,
669 			    srcobject,
670 			    pager_object_list
671 			);
672 		} else {
673 			TAILQ_REMOVE(
674 			    NOBJLIST(srcobject->handle),
675 			    srcobject,
676 			    pager_object_list
677 			);
678 		}
679 		mtx_unlock(&sw_alloc_mtx);
680 	}
681 
682 	/*
683 	 * transfer source to destination.
684 	 */
685 
686 	for (i = 0; i < dstobject->size; ++i) {
687 		daddr_t dstaddr;
688 
689 		/*
690 		 * Locate (without changing) the swapblk on the destination,
691 		 * unless it is invalid in which case free it silently, or
692 		 * if the destination is a resident page, in which case the
693 		 * source is thrown away.
694 		 */
695 
696 		dstaddr = swp_pager_meta_ctl(dstobject, i, 0);
697 
698 		if (dstaddr == SWAPBLK_NONE) {
699 			/*
700 			 * Destination has no swapblk and is not resident,
701 			 * copy source.
702 			 */
703 			daddr_t srcaddr;
704 
705 			srcaddr = swp_pager_meta_ctl(
706 			    srcobject,
707 			    i + offset,
708 			    SWM_POP
709 			);
710 
711 			if (srcaddr != SWAPBLK_NONE)
712 				swp_pager_meta_build(dstobject, i, srcaddr);
713 		} else {
714 			/*
715 			 * Destination has valid swapblk or it is represented
716 			 * by a resident page.  We destroy the sourceblock.
717 			 */
718 
719 			swp_pager_meta_ctl(srcobject, i + offset, SWM_FREE);
720 		}
721 	}
722 
723 	/*
724 	 * Free left over swap blocks in source.
725 	 *
726 	 * We have to revert the type to OBJT_DEFAULT so we do not accidently
727 	 * double-remove the object from the swap queues.
728 	 */
729 
730 	if (destroysource) {
731 		swp_pager_meta_free_all(srcobject);
732 		/*
733 		 * Reverting the type is not necessary, the caller is going
734 		 * to destroy srcobject directly, but I'm doing it here
735 		 * for consistency since we've removed the object from its
736 		 * queues.
737 		 */
738 		srcobject->type = OBJT_DEFAULT;
739 	}
740 	splx(s);
741 }
742 
743 /*
744  * SWAP_PAGER_HASPAGE() -	determine if we have good backing store for
745  *				the requested page.
746  *
747  *	We determine whether good backing store exists for the requested
748  *	page and return TRUE if it does, FALSE if it doesn't.
749  *
750  *	If TRUE, we also try to determine how much valid, contiguous backing
751  *	store exists before and after the requested page within a reasonable
752  *	distance.  We do not try to restrict it to the swap device stripe
753  *	(that is handled in getpages/putpages).  It probably isn't worth
754  *	doing here.
755  */
756 
757 boolean_t
758 swap_pager_haspage(object, pindex, before, after)
759 	vm_object_t object;
760 	vm_pindex_t pindex;
761 	int *before;
762 	int *after;
763 {
764 	daddr_t blk0;
765 	int s;
766 
767 	/*
768 	 * do we have good backing store at the requested index ?
769 	 */
770 
771 	s = splvm();
772 	blk0 = swp_pager_meta_ctl(object, pindex, 0);
773 
774 	if (blk0 == SWAPBLK_NONE) {
775 		splx(s);
776 		if (before)
777 			*before = 0;
778 		if (after)
779 			*after = 0;
780 		return (FALSE);
781 	}
782 
783 	/*
784 	 * find backwards-looking contiguous good backing store
785 	 */
786 
787 	if (before != NULL) {
788 		int i;
789 
790 		for (i = 1; i < (SWB_NPAGES/2); ++i) {
791 			daddr_t blk;
792 
793 			if (i > pindex)
794 				break;
795 			blk = swp_pager_meta_ctl(object, pindex - i, 0);
796 			if (blk != blk0 - i)
797 				break;
798 		}
799 		*before = (i - 1);
800 	}
801 
802 	/*
803 	 * find forward-looking contiguous good backing store
804 	 */
805 
806 	if (after != NULL) {
807 		int i;
808 
809 		for (i = 1; i < (SWB_NPAGES/2); ++i) {
810 			daddr_t blk;
811 
812 			blk = swp_pager_meta_ctl(object, pindex + i, 0);
813 			if (blk != blk0 + i)
814 				break;
815 		}
816 		*after = (i - 1);
817 	}
818 	splx(s);
819 	return (TRUE);
820 }
821 
822 /*
823  * SWAP_PAGER_PAGE_UNSWAPPED() - remove swap backing store related to page
824  *
825  *	This removes any associated swap backing store, whether valid or
826  *	not, from the page.
827  *
828  *	This routine is typically called when a page is made dirty, at
829  *	which point any associated swap can be freed.  MADV_FREE also
830  *	calls us in a special-case situation
831  *
832  *	NOTE!!!  If the page is clean and the swap was valid, the caller
833  *	should make the page dirty before calling this routine.  This routine
834  *	does NOT change the m->dirty status of the page.  Also: MADV_FREE
835  *	depends on it.
836  *
837  *	This routine may not block
838  *	This routine must be called at splvm()
839  */
840 
841 static void
842 swap_pager_unswapped(m)
843 	vm_page_t m;
844 {
845 	swp_pager_meta_ctl(m->object, m->pindex, SWM_FREE);
846 }
847 
848 /*
849  * SWAP_PAGER_STRATEGY() - read, write, free blocks
850  *
851  *	This implements the vm_pager_strategy() interface to swap and allows
852  *	other parts of the system to directly access swap as backing store
853  *	through vm_objects of type OBJT_SWAP.  This is intended to be a
854  *	cacheless interface ( i.e. caching occurs at higher levels ).
855  *	Therefore we do not maintain any resident pages.  All I/O goes
856  *	directly to and from the swap device.
857  *
858  *	Note that b_blkno is scaled for PAGE_SIZE
859  *
860  *	We currently attempt to run I/O synchronously or asynchronously as
861  *	the caller requests.  This isn't perfect because we loose error
862  *	sequencing when we run multiple ops in parallel to satisfy a request.
863  *	But this is swap, so we let it all hang out.
864  */
865 
866 static void
867 swap_pager_strategy(vm_object_t object, struct bio *bp)
868 {
869 	vm_pindex_t start;
870 	int count;
871 	int s;
872 	char *data;
873 	struct buf *nbp = NULL;
874 
875 	GIANT_REQUIRED;
876 
877 	/* XXX: KASSERT instead ? */
878 	if (bp->bio_bcount & PAGE_MASK) {
879 		biofinish(bp, NULL, EINVAL);
880 		printf("swap_pager_strategy: bp %p blk %d size %d, not page bounded\n", bp, (int)bp->bio_pblkno, (int)bp->bio_bcount);
881 		return;
882 	}
883 
884 	/*
885 	 * Clear error indication, initialize page index, count, data pointer.
886 	 */
887 
888 	bp->bio_error = 0;
889 	bp->bio_flags &= ~BIO_ERROR;
890 	bp->bio_resid = bp->bio_bcount;
891 
892 	start = bp->bio_pblkno;
893 	count = howmany(bp->bio_bcount, PAGE_SIZE);
894 	data = bp->bio_data;
895 
896 	s = splvm();
897 
898 	/*
899 	 * Deal with BIO_DELETE
900 	 */
901 
902 	if (bp->bio_cmd == BIO_DELETE) {
903 		/*
904 		 * FREE PAGE(s) - destroy underlying swap that is no longer
905 		 *		  needed.
906 		 */
907 		swp_pager_meta_free(object, start, count);
908 		splx(s);
909 		bp->bio_resid = 0;
910 		biodone(bp);
911 		return;
912 	}
913 
914 	/*
915 	 * Execute read or write
916 	 */
917 	while (count > 0) {
918 		daddr_t blk;
919 
920 		/*
921 		 * Obtain block.  If block not found and writing, allocate a
922 		 * new block and build it into the object.
923 		 */
924 
925 		blk = swp_pager_meta_ctl(object, start, 0);
926 		if ((blk == SWAPBLK_NONE) && (bp->bio_cmd == BIO_WRITE)) {
927 			blk = swp_pager_getswapspace(1);
928 			if (blk == SWAPBLK_NONE) {
929 				bp->bio_error = ENOMEM;
930 				bp->bio_flags |= BIO_ERROR;
931 				break;
932 			}
933 			swp_pager_meta_build(object, start, blk);
934 		}
935 
936 		/*
937 		 * Do we have to flush our current collection?  Yes if:
938 		 *
939 		 *	- no swap block at this index
940 		 *	- swap block is not contiguous
941 		 *	- we cross a physical disk boundry in the
942 		 *	  stripe.
943 		 */
944 
945 		if (
946 		    nbp && (nbp->b_blkno + btoc(nbp->b_bcount) != blk ||
947 		     ((nbp->b_blkno ^ blk) & dmmax_mask)
948 		    )
949 		) {
950 			splx(s);
951 			if (bp->bio_cmd == BIO_READ) {
952 				++cnt.v_swapin;
953 				cnt.v_swappgsin += btoc(nbp->b_bcount);
954 			} else {
955 				++cnt.v_swapout;
956 				cnt.v_swappgsout += btoc(nbp->b_bcount);
957 				nbp->b_dirtyend = nbp->b_bcount;
958 			}
959 			flushchainbuf(nbp);
960 			s = splvm();
961 			nbp = NULL;
962 		}
963 
964 		/*
965 		 * Add new swapblk to nbp, instantiating nbp if necessary.
966 		 * Zero-fill reads are able to take a shortcut.
967 		 */
968 
969 		if (blk == SWAPBLK_NONE) {
970 			/*
971 			 * We can only get here if we are reading.  Since
972 			 * we are at splvm() we can safely modify b_resid,
973 			 * even if chain ops are in progress.
974 			 */
975 			bzero(data, PAGE_SIZE);
976 			bp->bio_resid -= PAGE_SIZE;
977 		} else {
978 			if (nbp == NULL) {
979 				nbp = getchainbuf(bp, swapdev_vp, B_ASYNC);
980 				nbp->b_blkno = blk;
981 				nbp->b_bcount = 0;
982 				nbp->b_data = data;
983 			}
984 			nbp->b_bcount += PAGE_SIZE;
985 		}
986 		--count;
987 		++start;
988 		data += PAGE_SIZE;
989 	}
990 
991 	/*
992 	 *  Flush out last buffer
993 	 */
994 
995 	splx(s);
996 
997 	if (nbp) {
998 		if (nbp->b_iocmd == BIO_READ) {
999 			++cnt.v_swapin;
1000 			cnt.v_swappgsin += btoc(nbp->b_bcount);
1001 		} else {
1002 			++cnt.v_swapout;
1003 			cnt.v_swappgsout += btoc(nbp->b_bcount);
1004 			nbp->b_dirtyend = nbp->b_bcount;
1005 		}
1006 		flushchainbuf(nbp);
1007 		/* nbp = NULL; */
1008 	}
1009 	/*
1010 	 * Wait for completion.
1011 	 */
1012 
1013 	waitchainbuf(bp, 0, 1);
1014 }
1015 
1016 /*
1017  * SWAP_PAGER_GETPAGES() - bring pages in from swap
1018  *
1019  *	Attempt to retrieve (m, count) pages from backing store, but make
1020  *	sure we retrieve at least m[reqpage].  We try to load in as large
1021  *	a chunk surrounding m[reqpage] as is contiguous in swap and which
1022  *	belongs to the same object.
1023  *
1024  *	The code is designed for asynchronous operation and
1025  *	immediate-notification of 'reqpage' but tends not to be
1026  *	used that way.  Please do not optimize-out this algorithmic
1027  *	feature, I intend to improve on it in the future.
1028  *
1029  *	The parent has a single vm_object_pip_add() reference prior to
1030  *	calling us and we should return with the same.
1031  *
1032  *	The parent has BUSY'd the pages.  We should return with 'm'
1033  *	left busy, but the others adjusted.
1034  */
1035 
1036 static int
1037 swap_pager_getpages(object, m, count, reqpage)
1038 	vm_object_t object;
1039 	vm_page_t *m;
1040 	int count, reqpage;
1041 {
1042 	struct buf *bp;
1043 	vm_page_t mreq;
1044 	int s;
1045 	int i;
1046 	int j;
1047 	daddr_t blk;
1048 	vm_offset_t kva;
1049 	vm_pindex_t lastpindex;
1050 
1051 	GIANT_REQUIRED;
1052 
1053 	mreq = m[reqpage];
1054 
1055 	if (mreq->object != object) {
1056 		panic("swap_pager_getpages: object mismatch %p/%p",
1057 		    object,
1058 		    mreq->object
1059 		);
1060 	}
1061 	/*
1062 	 * Calculate range to retrieve.  The pages have already been assigned
1063 	 * their swapblks.  We require a *contiguous* range that falls entirely
1064 	 * within a single device stripe.   If we do not supply it, bad things
1065 	 * happen.  Note that blk, iblk & jblk can be SWAPBLK_NONE, but the
1066 	 * loops are set up such that the case(s) are handled implicitly.
1067 	 *
1068 	 * The swp_*() calls must be made at splvm().  vm_page_free() does
1069 	 * not need to be, but it will go a little faster if it is.
1070 	 */
1071 
1072 	s = splvm();
1073 	blk = swp_pager_meta_ctl(mreq->object, mreq->pindex, 0);
1074 
1075 	for (i = reqpage - 1; i >= 0; --i) {
1076 		daddr_t iblk;
1077 
1078 		iblk = swp_pager_meta_ctl(m[i]->object, m[i]->pindex, 0);
1079 		if (blk != iblk + (reqpage - i))
1080 			break;
1081 		if ((blk ^ iblk) & dmmax_mask)
1082 			break;
1083 	}
1084 	++i;
1085 
1086 	for (j = reqpage + 1; j < count; ++j) {
1087 		daddr_t jblk;
1088 
1089 		jblk = swp_pager_meta_ctl(m[j]->object, m[j]->pindex, 0);
1090 		if (blk != jblk - (j - reqpage))
1091 			break;
1092 		if ((blk ^ jblk) & dmmax_mask)
1093 			break;
1094 	}
1095 
1096 	/*
1097 	 * free pages outside our collection range.   Note: we never free
1098 	 * mreq, it must remain busy throughout.
1099 	 */
1100 
1101 	{
1102 		int k;
1103 
1104 		for (k = 0; k < i; ++k)
1105 			vm_page_free(m[k]);
1106 		for (k = j; k < count; ++k)
1107 			vm_page_free(m[k]);
1108 	}
1109 	splx(s);
1110 
1111 
1112 	/*
1113 	 * Return VM_PAGER_FAIL if we have nothing to do.  Return mreq
1114 	 * still busy, but the others unbusied.
1115 	 */
1116 
1117 	if (blk == SWAPBLK_NONE)
1118 		return(VM_PAGER_FAIL);
1119 
1120 	/*
1121 	 * Get a swap buffer header to perform the IO
1122 	 */
1123 
1124 	bp = getpbuf(&nsw_rcount);
1125 	kva = (vm_offset_t) bp->b_data;
1126 
1127 	/*
1128 	 * map our page(s) into kva for input
1129 	 *
1130 	 * NOTE: B_PAGING is set by pbgetvp()
1131 	 */
1132 
1133 	pmap_qenter(kva, m + i, j - i);
1134 
1135 	bp->b_iocmd = BIO_READ;
1136 	bp->b_iodone = swp_pager_async_iodone;
1137 	bp->b_rcred = bp->b_wcred = proc0.p_ucred;
1138 	bp->b_data = (caddr_t) kva;
1139 	crhold(bp->b_rcred);
1140 	crhold(bp->b_wcred);
1141 	bp->b_blkno = blk - (reqpage - i);
1142 	bp->b_bcount = PAGE_SIZE * (j - i);
1143 	bp->b_bufsize = PAGE_SIZE * (j - i);
1144 	bp->b_pager.pg_reqpage = reqpage - i;
1145 
1146 	{
1147 		int k;
1148 
1149 		for (k = i; k < j; ++k) {
1150 			bp->b_pages[k - i] = m[k];
1151 			vm_page_flag_set(m[k], PG_SWAPINPROG);
1152 		}
1153 	}
1154 	bp->b_npages = j - i;
1155 
1156 	pbgetvp(swapdev_vp, bp);
1157 
1158 	cnt.v_swapin++;
1159 	cnt.v_swappgsin += bp->b_npages;
1160 
1161 	/*
1162 	 * We still hold the lock on mreq, and our automatic completion routine
1163 	 * does not remove it.
1164 	 */
1165 
1166 	vm_object_pip_add(mreq->object, bp->b_npages);
1167 	lastpindex = m[j-1]->pindex;
1168 
1169 	/*
1170 	 * perform the I/O.  NOTE!!!  bp cannot be considered valid after
1171 	 * this point because we automatically release it on completion.
1172 	 * Instead, we look at the one page we are interested in which we
1173 	 * still hold a lock on even through the I/O completion.
1174 	 *
1175 	 * The other pages in our m[] array are also released on completion,
1176 	 * so we cannot assume they are valid anymore either.
1177 	 *
1178 	 * NOTE: b_blkno is destroyed by the call to VOP_STRATEGY
1179 	 */
1180 	BUF_KERNPROC(bp);
1181 	BUF_STRATEGY(bp);
1182 
1183 	/*
1184 	 * wait for the page we want to complete.  PG_SWAPINPROG is always
1185 	 * cleared on completion.  If an I/O error occurs, SWAPBLK_NONE
1186 	 * is set in the meta-data.
1187 	 */
1188 
1189 	s = splvm();
1190 
1191 	while ((mreq->flags & PG_SWAPINPROG) != 0) {
1192 		vm_page_flag_set(mreq, PG_WANTED | PG_REFERENCED);
1193 		cnt.v_intrans++;
1194 		if (tsleep(mreq, PSWP, "swread", hz*20)) {
1195 			printf(
1196 			    "swap_pager: indefinite wait buffer: device:"
1197 				" %s, blkno: %ld, size: %ld\n",
1198 			    devtoname(bp->b_dev), (long)bp->b_blkno,
1199 			    bp->b_bcount
1200 			);
1201 		}
1202 	}
1203 
1204 	splx(s);
1205 
1206 	/*
1207 	 * mreq is left bussied after completion, but all the other pages
1208 	 * are freed.  If we had an unrecoverable read error the page will
1209 	 * not be valid.
1210 	 */
1211 
1212 	if (mreq->valid != VM_PAGE_BITS_ALL) {
1213 		return(VM_PAGER_ERROR);
1214 	} else {
1215 		return(VM_PAGER_OK);
1216 	}
1217 
1218 	/*
1219 	 * A final note: in a low swap situation, we cannot deallocate swap
1220 	 * and mark a page dirty here because the caller is likely to mark
1221 	 * the page clean when we return, causing the page to possibly revert
1222 	 * to all-zero's later.
1223 	 */
1224 }
1225 
1226 /*
1227  *	swap_pager_putpages:
1228  *
1229  *	Assign swap (if necessary) and initiate I/O on the specified pages.
1230  *
1231  *	We support both OBJT_DEFAULT and OBJT_SWAP objects.  DEFAULT objects
1232  *	are automatically converted to SWAP objects.
1233  *
1234  *	In a low memory situation we may block in VOP_STRATEGY(), but the new
1235  *	vm_page reservation system coupled with properly written VFS devices
1236  *	should ensure that no low-memory deadlock occurs.  This is an area
1237  *	which needs work.
1238  *
1239  *	The parent has N vm_object_pip_add() references prior to
1240  *	calling us and will remove references for rtvals[] that are
1241  *	not set to VM_PAGER_PEND.  We need to remove the rest on I/O
1242  *	completion.
1243  *
1244  *	The parent has soft-busy'd the pages it passes us and will unbusy
1245  *	those whos rtvals[] entry is not set to VM_PAGER_PEND on return.
1246  *	We need to unbusy the rest on I/O completion.
1247  */
1248 
1249 void
1250 swap_pager_putpages(object, m, count, sync, rtvals)
1251 	vm_object_t object;
1252 	vm_page_t *m;
1253 	int count;
1254 	boolean_t sync;
1255 	int *rtvals;
1256 {
1257 	int i;
1258 	int n = 0;
1259 
1260 	GIANT_REQUIRED;
1261 	if (count && m[0]->object != object) {
1262 		panic("swap_pager_getpages: object mismatch %p/%p",
1263 		    object,
1264 		    m[0]->object
1265 		);
1266 	}
1267 	/*
1268 	 * Step 1
1269 	 *
1270 	 * Turn object into OBJT_SWAP
1271 	 * check for bogus sysops
1272 	 * force sync if not pageout process
1273 	 */
1274 
1275 	if (object->type != OBJT_SWAP)
1276 		swp_pager_meta_build(object, 0, SWAPBLK_NONE);
1277 
1278 	if (curproc != pageproc)
1279 		sync = TRUE;
1280 
1281 	/*
1282 	 * Step 2
1283 	 *
1284 	 * Update nsw parameters from swap_async_max sysctl values.
1285 	 * Do not let the sysop crash the machine with bogus numbers.
1286 	 */
1287 
1288 	mtx_lock(&pbuf_mtx);
1289 	if (swap_async_max != nsw_wcount_async_max) {
1290 		int n;
1291 		int s;
1292 
1293 		/*
1294 		 * limit range
1295 		 */
1296 		if ((n = swap_async_max) > nswbuf / 2)
1297 			n = nswbuf / 2;
1298 		if (n < 1)
1299 			n = 1;
1300 		swap_async_max = n;
1301 
1302 		/*
1303 		 * Adjust difference ( if possible ).  If the current async
1304 		 * count is too low, we may not be able to make the adjustment
1305 		 * at this time.
1306 		 */
1307 		s = splvm();
1308 		n -= nsw_wcount_async_max;
1309 		if (nsw_wcount_async + n >= 0) {
1310 			nsw_wcount_async += n;
1311 			nsw_wcount_async_max += n;
1312 			wakeup(&nsw_wcount_async);
1313 		}
1314 		splx(s);
1315 	}
1316 	mtx_unlock(&pbuf_mtx);
1317 
1318 	/*
1319 	 * Step 3
1320 	 *
1321 	 * Assign swap blocks and issue I/O.  We reallocate swap on the fly.
1322 	 * The page is left dirty until the pageout operation completes
1323 	 * successfully.
1324 	 */
1325 
1326 	for (i = 0; i < count; i += n) {
1327 		int s;
1328 		int j;
1329 		struct buf *bp;
1330 		daddr_t blk;
1331 
1332 		/*
1333 		 * Maximum I/O size is limited by a number of factors.
1334 		 */
1335 
1336 		n = min(BLIST_MAX_ALLOC, count - i);
1337 		n = min(n, nsw_cluster_max);
1338 
1339 		s = splvm();
1340 
1341 		/*
1342 		 * Get biggest block of swap we can.  If we fail, fall
1343 		 * back and try to allocate a smaller block.  Don't go
1344 		 * overboard trying to allocate space if it would overly
1345 		 * fragment swap.
1346 		 */
1347 		while (
1348 		    (blk = swp_pager_getswapspace(n)) == SWAPBLK_NONE &&
1349 		    n > 4
1350 		) {
1351 			n >>= 1;
1352 		}
1353 		if (blk == SWAPBLK_NONE) {
1354 			for (j = 0; j < n; ++j)
1355 				rtvals[i+j] = VM_PAGER_FAIL;
1356 			splx(s);
1357 			continue;
1358 		}
1359 
1360 		/*
1361 		 * The I/O we are constructing cannot cross a physical
1362 		 * disk boundry in the swap stripe.  Note: we are still
1363 		 * at splvm().
1364 		 */
1365 		if ((blk ^ (blk + n)) & dmmax_mask) {
1366 			j = ((blk + dmmax) & dmmax_mask) - blk;
1367 			swp_pager_freeswapspace(blk + j, n - j);
1368 			n = j;
1369 		}
1370 
1371 		/*
1372 		 * All I/O parameters have been satisfied, build the I/O
1373 		 * request and assign the swap space.
1374 		 *
1375 		 * NOTE: B_PAGING is set by pbgetvp()
1376 		 */
1377 
1378 		if (sync == TRUE) {
1379 			bp = getpbuf(&nsw_wcount_sync);
1380 		} else {
1381 			bp = getpbuf(&nsw_wcount_async);
1382 			bp->b_flags = B_ASYNC;
1383 		}
1384 		bp->b_iocmd = BIO_WRITE;
1385 		bp->b_spc = NULL;	/* not used, but NULL-out anyway */
1386 
1387 		pmap_qenter((vm_offset_t)bp->b_data, &m[i], n);
1388 
1389 		bp->b_rcred = bp->b_wcred = proc0.p_ucred;
1390 		bp->b_bcount = PAGE_SIZE * n;
1391 		bp->b_bufsize = PAGE_SIZE * n;
1392 		bp->b_blkno = blk;
1393 
1394 		crhold(bp->b_rcred);
1395 		crhold(bp->b_wcred);
1396 
1397 		pbgetvp(swapdev_vp, bp);
1398 
1399 		for (j = 0; j < n; ++j) {
1400 			vm_page_t mreq = m[i+j];
1401 
1402 			swp_pager_meta_build(
1403 			    mreq->object,
1404 			    mreq->pindex,
1405 			    blk + j
1406 			);
1407 			vm_page_dirty(mreq);
1408 			rtvals[i+j] = VM_PAGER_OK;
1409 
1410 			vm_page_flag_set(mreq, PG_SWAPINPROG);
1411 			bp->b_pages[j] = mreq;
1412 		}
1413 		bp->b_npages = n;
1414 		/*
1415 		 * Must set dirty range for NFS to work.
1416 		 */
1417 		bp->b_dirtyoff = 0;
1418 		bp->b_dirtyend = bp->b_bcount;
1419 
1420 		cnt.v_swapout++;
1421 		cnt.v_swappgsout += bp->b_npages;
1422 		swapdev_vp->v_numoutput++;
1423 
1424 		splx(s);
1425 
1426 		/*
1427 		 * asynchronous
1428 		 *
1429 		 * NOTE: b_blkno is destroyed by the call to VOP_STRATEGY
1430 		 */
1431 
1432 		if (sync == FALSE) {
1433 			bp->b_iodone = swp_pager_async_iodone;
1434 			BUF_KERNPROC(bp);
1435 			BUF_STRATEGY(bp);
1436 
1437 			for (j = 0; j < n; ++j)
1438 				rtvals[i+j] = VM_PAGER_PEND;
1439 			/* restart outter loop */
1440 			continue;
1441 		}
1442 
1443 		/*
1444 		 * synchronous
1445 		 *
1446 		 * NOTE: b_blkno is destroyed by the call to VOP_STRATEGY
1447 		 */
1448 
1449 		bp->b_iodone = swp_pager_sync_iodone;
1450 		BUF_STRATEGY(bp);
1451 
1452 		/*
1453 		 * Wait for the sync I/O to complete, then update rtvals.
1454 		 * We just set the rtvals[] to VM_PAGER_PEND so we can call
1455 		 * our async completion routine at the end, thus avoiding a
1456 		 * double-free.
1457 		 */
1458 		s = splbio();
1459 
1460 		while ((bp->b_flags & B_DONE) == 0) {
1461 			tsleep(bp, PVM, "swwrt", 0);
1462 		}
1463 
1464 		for (j = 0; j < n; ++j)
1465 			rtvals[i+j] = VM_PAGER_PEND;
1466 
1467 		/*
1468 		 * Now that we are through with the bp, we can call the
1469 		 * normal async completion, which frees everything up.
1470 		 */
1471 
1472 		swp_pager_async_iodone(bp);
1473 		splx(s);
1474 	}
1475 }
1476 
1477 /*
1478  *	swap_pager_sync_iodone:
1479  *
1480  *	Completion routine for synchronous reads and writes from/to swap.
1481  *	We just mark the bp is complete and wake up anyone waiting on it.
1482  *
1483  *	This routine may not block.  This routine is called at splbio() or better.
1484  */
1485 
1486 static void
1487 swp_pager_sync_iodone(bp)
1488 	struct buf *bp;
1489 {
1490 	bp->b_flags |= B_DONE;
1491 	bp->b_flags &= ~B_ASYNC;
1492 	wakeup(bp);
1493 }
1494 
1495 /*
1496  *	swp_pager_async_iodone:
1497  *
1498  *	Completion routine for asynchronous reads and writes from/to swap.
1499  *	Also called manually by synchronous code to finish up a bp.
1500  *
1501  *	For READ operations, the pages are PG_BUSY'd.  For WRITE operations,
1502  *	the pages are vm_page_t->busy'd.  For READ operations, we PG_BUSY
1503  *	unbusy all pages except the 'main' request page.  For WRITE
1504  *	operations, we vm_page_t->busy'd unbusy all pages ( we can do this
1505  *	because we marked them all VM_PAGER_PEND on return from putpages ).
1506  *
1507  *	This routine may not block.
1508  *	This routine is called at splbio() or better
1509  *
1510  *	We up ourselves to splvm() as required for various vm_page related
1511  *	calls.
1512  */
1513 
1514 static void
1515 swp_pager_async_iodone(bp)
1516 	struct buf *bp;
1517 {
1518 	int s;
1519 	int i;
1520 	vm_object_t object = NULL;
1521 
1522 	GIANT_REQUIRED;
1523 
1524 	bp->b_flags |= B_DONE;
1525 
1526 	/*
1527 	 * report error
1528 	 */
1529 
1530 	if (bp->b_ioflags & BIO_ERROR) {
1531 		printf(
1532 		    "swap_pager: I/O error - %s failed; blkno %ld,"
1533 			"size %ld, error %d\n",
1534 		    ((bp->b_iocmd == BIO_READ) ? "pagein" : "pageout"),
1535 		    (long)bp->b_blkno,
1536 		    (long)bp->b_bcount,
1537 		    bp->b_error
1538 		);
1539 	}
1540 
1541 	/*
1542 	 * set object, raise to splvm().
1543 	 */
1544 
1545 	if (bp->b_npages)
1546 		object = bp->b_pages[0]->object;
1547 	s = splvm();
1548 
1549 	/*
1550 	 * remove the mapping for kernel virtual
1551 	 */
1552 	pmap_qremove((vm_offset_t)bp->b_data, bp->b_npages);
1553 
1554 	/*
1555 	 * cleanup pages.  If an error occurs writing to swap, we are in
1556 	 * very serious trouble.  If it happens to be a disk error, though,
1557 	 * we may be able to recover by reassigning the swap later on.  So
1558 	 * in this case we remove the m->swapblk assignment for the page
1559 	 * but do not free it in the rlist.  The errornous block(s) are thus
1560 	 * never reallocated as swap.  Redirty the page and continue.
1561 	 */
1562 
1563 	for (i = 0; i < bp->b_npages; ++i) {
1564 		vm_page_t m = bp->b_pages[i];
1565 
1566 		vm_page_flag_clear(m, PG_SWAPINPROG);
1567 
1568 		if (bp->b_ioflags & BIO_ERROR) {
1569 			/*
1570 			 * If an error occurs I'd love to throw the swapblk
1571 			 * away without freeing it back to swapspace, so it
1572 			 * can never be used again.  But I can't from an
1573 			 * interrupt.
1574 			 */
1575 
1576 			if (bp->b_iocmd == BIO_READ) {
1577 				/*
1578 				 * When reading, reqpage needs to stay
1579 				 * locked for the parent, but all other
1580 				 * pages can be freed.  We still want to
1581 				 * wakeup the parent waiting on the page,
1582 				 * though.  ( also: pg_reqpage can be -1 and
1583 				 * not match anything ).
1584 				 *
1585 				 * We have to wake specifically requested pages
1586 				 * up too because we cleared PG_SWAPINPROG and
1587 				 * someone may be waiting for that.
1588 				 *
1589 				 * NOTE: for reads, m->dirty will probably
1590 				 * be overridden by the original caller of
1591 				 * getpages so don't play cute tricks here.
1592 				 *
1593 				 * XXX IT IS NOT LEGAL TO FREE THE PAGE HERE
1594 				 * AS THIS MESSES WITH object->memq, and it is
1595 				 * not legal to mess with object->memq from an
1596 				 * interrupt.
1597 				 */
1598 
1599 				m->valid = 0;
1600 				vm_page_flag_clear(m, PG_ZERO);
1601 
1602 				if (i != bp->b_pager.pg_reqpage)
1603 					vm_page_free(m);
1604 				else
1605 					vm_page_flash(m);
1606 				/*
1607 				 * If i == bp->b_pager.pg_reqpage, do not wake
1608 				 * the page up.  The caller needs to.
1609 				 */
1610 			} else {
1611 				/*
1612 				 * If a write error occurs, reactivate page
1613 				 * so it doesn't clog the inactive list,
1614 				 * then finish the I/O.
1615 				 */
1616 				vm_page_dirty(m);
1617 				vm_page_activate(m);
1618 				vm_page_io_finish(m);
1619 			}
1620 		} else if (bp->b_iocmd == BIO_READ) {
1621 			/*
1622 			 * For read success, clear dirty bits.  Nobody should
1623 			 * have this page mapped but don't take any chances,
1624 			 * make sure the pmap modify bits are also cleared.
1625 			 *
1626 			 * NOTE: for reads, m->dirty will probably be
1627 			 * overridden by the original caller of getpages so
1628 			 * we cannot set them in order to free the underlying
1629 			 * swap in a low-swap situation.  I don't think we'd
1630 			 * want to do that anyway, but it was an optimization
1631 			 * that existed in the old swapper for a time before
1632 			 * it got ripped out due to precisely this problem.
1633 			 *
1634 			 * clear PG_ZERO in page.
1635 			 *
1636 			 * If not the requested page then deactivate it.
1637 			 *
1638 			 * Note that the requested page, reqpage, is left
1639 			 * busied, but we still have to wake it up.  The
1640 			 * other pages are released (unbusied) by
1641 			 * vm_page_wakeup().  We do not set reqpage's
1642 			 * valid bits here, it is up to the caller.
1643 			 */
1644 
1645 			pmap_clear_modify(m);
1646 			m->valid = VM_PAGE_BITS_ALL;
1647 			vm_page_undirty(m);
1648 			vm_page_flag_clear(m, PG_ZERO);
1649 
1650 			/*
1651 			 * We have to wake specifically requested pages
1652 			 * up too because we cleared PG_SWAPINPROG and
1653 			 * could be waiting for it in getpages.  However,
1654 			 * be sure to not unbusy getpages specifically
1655 			 * requested page - getpages expects it to be
1656 			 * left busy.
1657 			 */
1658 			if (i != bp->b_pager.pg_reqpage) {
1659 				vm_page_deactivate(m);
1660 				vm_page_wakeup(m);
1661 			} else {
1662 				vm_page_flash(m);
1663 			}
1664 		} else {
1665 			/*
1666 			 * For write success, clear the modify and dirty
1667 			 * status, then finish the I/O ( which decrements the
1668 			 * busy count and possibly wakes waiter's up ).
1669 			 */
1670 			pmap_clear_modify(m);
1671 			vm_page_undirty(m);
1672 			vm_page_io_finish(m);
1673 			if (!vm_page_count_severe() || !vm_page_try_to_cache(m))
1674 				vm_page_protect(m, VM_PROT_READ);
1675 		}
1676 	}
1677 
1678 	/*
1679 	 * adjust pip.  NOTE: the original parent may still have its own
1680 	 * pip refs on the object.
1681 	 */
1682 
1683 	if (object)
1684 		vm_object_pip_wakeupn(object, bp->b_npages);
1685 
1686 	/*
1687 	 * release the physical I/O buffer
1688 	 */
1689 
1690 	relpbuf(
1691 	    bp,
1692 	    ((bp->b_iocmd == BIO_READ) ? &nsw_rcount :
1693 		((bp->b_flags & B_ASYNC) ?
1694 		    &nsw_wcount_async :
1695 		    &nsw_wcount_sync
1696 		)
1697 	    )
1698 	);
1699 	splx(s);
1700 }
1701 
1702 /************************************************************************
1703  *				SWAP META DATA 				*
1704  ************************************************************************
1705  *
1706  *	These routines manipulate the swap metadata stored in the
1707  *	OBJT_SWAP object.  All swp_*() routines must be called at
1708  *	splvm() because swap can be freed up by the low level vm_page
1709  *	code which might be called from interrupts beyond what splbio() covers.
1710  *
1711  *	Swap metadata is implemented with a global hash and not directly
1712  *	linked into the object.  Instead the object simply contains
1713  *	appropriate tracking counters.
1714  */
1715 
1716 /*
1717  * SWP_PAGER_HASH() -	hash swap meta data
1718  *
1719  *	This is an inline helper function which hashes the swapblk given
1720  *	the object and page index.  It returns a pointer to a pointer
1721  *	to the object, or a pointer to a NULL pointer if it could not
1722  *	find a swapblk.
1723  *
1724  *	This routine must be called at splvm().
1725  */
1726 
1727 static __inline struct swblock **
1728 swp_pager_hash(vm_object_t object, vm_pindex_t index)
1729 {
1730 	struct swblock **pswap;
1731 	struct swblock *swap;
1732 
1733 	index &= ~SWAP_META_MASK;
1734 	pswap = &swhash[(index ^ (int)(intptr_t)object) & swhash_mask];
1735 
1736 	while ((swap = *pswap) != NULL) {
1737 		if (swap->swb_object == object &&
1738 		    swap->swb_index == index
1739 		) {
1740 			break;
1741 		}
1742 		pswap = &swap->swb_hnext;
1743 	}
1744 	return(pswap);
1745 }
1746 
1747 /*
1748  * SWP_PAGER_META_BUILD() -	add swap block to swap meta data for object
1749  *
1750  *	We first convert the object to a swap object if it is a default
1751  *	object.
1752  *
1753  *	The specified swapblk is added to the object's swap metadata.  If
1754  *	the swapblk is not valid, it is freed instead.  Any previously
1755  *	assigned swapblk is freed.
1756  *
1757  *	This routine must be called at splvm(), except when used to convert
1758  *	an OBJT_DEFAULT object into an OBJT_SWAP object.
1759  */
1760 
1761 static void
1762 swp_pager_meta_build(
1763 	vm_object_t object,
1764 	vm_pindex_t index,
1765 	daddr_t swapblk
1766 ) {
1767 	struct swblock *swap;
1768 	struct swblock **pswap;
1769 
1770 	GIANT_REQUIRED;
1771 	/*
1772 	 * Convert default object to swap object if necessary
1773 	 */
1774 
1775 	if (object->type != OBJT_SWAP) {
1776 		object->type = OBJT_SWAP;
1777 		object->un_pager.swp.swp_bcount = 0;
1778 
1779 		mtx_lock(&sw_alloc_mtx);
1780 		if (object->handle != NULL) {
1781 			TAILQ_INSERT_TAIL(
1782 			    NOBJLIST(object->handle),
1783 			    object,
1784 			    pager_object_list
1785 			);
1786 		} else {
1787 			TAILQ_INSERT_TAIL(
1788 			    &swap_pager_un_object_list,
1789 			    object,
1790 			    pager_object_list
1791 			);
1792 		}
1793 		mtx_unlock(&sw_alloc_mtx);
1794 	}
1795 
1796 	/*
1797 	 * Locate hash entry.  If not found create, but if we aren't adding
1798 	 * anything just return.  If we run out of space in the map we wait
1799 	 * and, since the hash table may have changed, retry.
1800 	 */
1801 
1802 retry:
1803 	pswap = swp_pager_hash(object, index);
1804 
1805 	if ((swap = *pswap) == NULL) {
1806 		int i;
1807 
1808 		if (swapblk == SWAPBLK_NONE)
1809 			return;
1810 
1811 		swap = *pswap = zalloc(swap_zone);
1812 		if (swap == NULL) {
1813 			VM_WAIT;
1814 			goto retry;
1815 		}
1816 		swap->swb_hnext = NULL;
1817 		swap->swb_object = object;
1818 		swap->swb_index = index & ~SWAP_META_MASK;
1819 		swap->swb_count = 0;
1820 
1821 		++object->un_pager.swp.swp_bcount;
1822 
1823 		for (i = 0; i < SWAP_META_PAGES; ++i)
1824 			swap->swb_pages[i] = SWAPBLK_NONE;
1825 	}
1826 
1827 	/*
1828 	 * Delete prior contents of metadata
1829 	 */
1830 
1831 	index &= SWAP_META_MASK;
1832 
1833 	if (swap->swb_pages[index] != SWAPBLK_NONE) {
1834 		swp_pager_freeswapspace(swap->swb_pages[index], 1);
1835 		--swap->swb_count;
1836 	}
1837 
1838 	/*
1839 	 * Enter block into metadata
1840 	 */
1841 
1842 	swap->swb_pages[index] = swapblk;
1843 	if (swapblk != SWAPBLK_NONE)
1844 		++swap->swb_count;
1845 }
1846 
1847 /*
1848  * SWP_PAGER_META_FREE() - free a range of blocks in the object's swap metadata
1849  *
1850  *	The requested range of blocks is freed, with any associated swap
1851  *	returned to the swap bitmap.
1852  *
1853  *	This routine will free swap metadata structures as they are cleaned
1854  *	out.  This routine does *NOT* operate on swap metadata associated
1855  *	with resident pages.
1856  *
1857  *	This routine must be called at splvm()
1858  */
1859 
1860 static void
1861 swp_pager_meta_free(vm_object_t object, vm_pindex_t index, daddr_t count)
1862 {
1863 	GIANT_REQUIRED;
1864 
1865 	if (object->type != OBJT_SWAP)
1866 		return;
1867 
1868 	while (count > 0) {
1869 		struct swblock **pswap;
1870 		struct swblock *swap;
1871 
1872 		pswap = swp_pager_hash(object, index);
1873 
1874 		if ((swap = *pswap) != NULL) {
1875 			daddr_t v = swap->swb_pages[index & SWAP_META_MASK];
1876 
1877 			if (v != SWAPBLK_NONE) {
1878 				swp_pager_freeswapspace(v, 1);
1879 				swap->swb_pages[index & SWAP_META_MASK] =
1880 					SWAPBLK_NONE;
1881 				if (--swap->swb_count == 0) {
1882 					*pswap = swap->swb_hnext;
1883 					zfree(swap_zone, swap);
1884 					--object->un_pager.swp.swp_bcount;
1885 				}
1886 			}
1887 			--count;
1888 			++index;
1889 		} else {
1890 			int n = SWAP_META_PAGES - (index & SWAP_META_MASK);
1891 			count -= n;
1892 			index += n;
1893 		}
1894 	}
1895 }
1896 
1897 /*
1898  * SWP_PAGER_META_FREE_ALL() - destroy all swap metadata associated with object
1899  *
1900  *	This routine locates and destroys all swap metadata associated with
1901  *	an object.
1902  *
1903  *	This routine must be called at splvm()
1904  */
1905 
1906 static void
1907 swp_pager_meta_free_all(vm_object_t object)
1908 {
1909 	daddr_t index = 0;
1910 
1911 	GIANT_REQUIRED;
1912 
1913 	if (object->type != OBJT_SWAP)
1914 		return;
1915 
1916 	while (object->un_pager.swp.swp_bcount) {
1917 		struct swblock **pswap;
1918 		struct swblock *swap;
1919 
1920 		pswap = swp_pager_hash(object, index);
1921 		if ((swap = *pswap) != NULL) {
1922 			int i;
1923 
1924 			for (i = 0; i < SWAP_META_PAGES; ++i) {
1925 				daddr_t v = swap->swb_pages[i];
1926 				if (v != SWAPBLK_NONE) {
1927 					--swap->swb_count;
1928 					swp_pager_freeswapspace(v, 1);
1929 				}
1930 			}
1931 			if (swap->swb_count != 0)
1932 				panic("swap_pager_meta_free_all: swb_count != 0");
1933 			*pswap = swap->swb_hnext;
1934 			zfree(swap_zone, swap);
1935 			--object->un_pager.swp.swp_bcount;
1936 		}
1937 		index += SWAP_META_PAGES;
1938 		if (index > 0x20000000)
1939 			panic("swp_pager_meta_free_all: failed to locate all swap meta blocks");
1940 	}
1941 }
1942 
1943 /*
1944  * SWP_PAGER_METACTL() -  misc control of swap and vm_page_t meta data.
1945  *
1946  *	This routine is capable of looking up, popping, or freeing
1947  *	swapblk assignments in the swap meta data or in the vm_page_t.
1948  *	The routine typically returns the swapblk being looked-up, or popped,
1949  *	or SWAPBLK_NONE if the block was freed, or SWAPBLK_NONE if the block
1950  *	was invalid.  This routine will automatically free any invalid
1951  *	meta-data swapblks.
1952  *
1953  *	It is not possible to store invalid swapblks in the swap meta data
1954  *	(other then a literal 'SWAPBLK_NONE'), so we don't bother checking.
1955  *
1956  *	When acting on a busy resident page and paging is in progress, we
1957  *	have to wait until paging is complete but otherwise can act on the
1958  *	busy page.
1959  *
1960  *	This routine must be called at splvm().
1961  *
1962  *	SWM_FREE	remove and free swap block from metadata
1963  *	SWM_POP		remove from meta data but do not free.. pop it out
1964  */
1965 
1966 static daddr_t
1967 swp_pager_meta_ctl(
1968 	vm_object_t object,
1969 	vm_pindex_t index,
1970 	int flags
1971 ) {
1972 	struct swblock **pswap;
1973 	struct swblock *swap;
1974 	daddr_t r1;
1975 
1976 	GIANT_REQUIRED;
1977 	/*
1978 	 * The meta data only exists of the object is OBJT_SWAP
1979 	 * and even then might not be allocated yet.
1980 	 */
1981 
1982 	if (object->type != OBJT_SWAP)
1983 		return(SWAPBLK_NONE);
1984 
1985 	r1 = SWAPBLK_NONE;
1986 	pswap = swp_pager_hash(object, index);
1987 
1988 	if ((swap = *pswap) != NULL) {
1989 		index &= SWAP_META_MASK;
1990 		r1 = swap->swb_pages[index];
1991 
1992 		if (r1 != SWAPBLK_NONE) {
1993 			if (flags & SWM_FREE) {
1994 				swp_pager_freeswapspace(r1, 1);
1995 				r1 = SWAPBLK_NONE;
1996 			}
1997 			if (flags & (SWM_FREE|SWM_POP)) {
1998 				swap->swb_pages[index] = SWAPBLK_NONE;
1999 				if (--swap->swb_count == 0) {
2000 					*pswap = swap->swb_hnext;
2001 					zfree(swap_zone, swap);
2002 					--object->un_pager.swp.swp_bcount;
2003 				}
2004 			}
2005 		}
2006 	}
2007 	return(r1);
2008 }
2009 
2010 /********************************************************
2011  *		CHAINING FUNCTIONS			*
2012  ********************************************************
2013  *
2014  *	These functions support recursion of I/O operations
2015  *	on bp's, typically by chaining one or more 'child' bp's
2016  *	to the parent.  Synchronous, asynchronous, and semi-synchronous
2017  *	chaining is possible.
2018  */
2019 
2020 /*
2021  *	vm_pager_chain_iodone:
2022  *
2023  *	io completion routine for child bp.  Currently we fudge a bit
2024  *	on dealing with b_resid.   Since users of these routines may issue
2025  *	multiple children simultaneously, sequencing of the error can be lost.
2026  */
2027 
2028 static void
2029 vm_pager_chain_iodone(struct buf *nbp)
2030 {
2031 	struct bio *bp;
2032 	u_int *count;
2033 
2034 	bp = nbp->b_caller1;
2035 	count = (u_int *)&(bp->bio_caller1);
2036 	if (bp != NULL) {
2037 		if (nbp->b_ioflags & BIO_ERROR) {
2038 			bp->bio_flags |= BIO_ERROR;
2039 			bp->bio_error = nbp->b_error;
2040 		} else if (nbp->b_resid != 0) {
2041 			bp->bio_flags |= BIO_ERROR;
2042 			bp->bio_error = EINVAL;
2043 		} else {
2044 			bp->bio_resid -= nbp->b_bcount;
2045 		}
2046 		nbp->b_caller1 = NULL;
2047 		--(*count);
2048 		if (bp->bio_flags & BIO_FLAG1) {
2049 			bp->bio_flags &= ~BIO_FLAG1;
2050 			wakeup(bp);
2051 		}
2052 	}
2053 	nbp->b_flags |= B_DONE;
2054 	nbp->b_flags &= ~B_ASYNC;
2055 	relpbuf(nbp, NULL);
2056 }
2057 
2058 /*
2059  *	getchainbuf:
2060  *
2061  *	Obtain a physical buffer and chain it to its parent buffer.  When
2062  *	I/O completes, the parent buffer will be B_SIGNAL'd.  Errors are
2063  *	automatically propagated to the parent
2064  */
2065 
2066 struct buf *
2067 getchainbuf(struct bio *bp, struct vnode *vp, int flags)
2068 {
2069 	struct buf *nbp;
2070 	u_int *count;
2071 
2072 	GIANT_REQUIRED;
2073 	nbp = getpbuf(NULL);
2074 	count = (u_int *)&(bp->bio_caller1);
2075 
2076 	nbp->b_caller1 = bp;
2077 	++(*count);
2078 
2079 	if (*count > 4)
2080 		waitchainbuf(bp, 4, 0);
2081 
2082 	nbp->b_iocmd = bp->bio_cmd;
2083 	nbp->b_ioflags = bp->bio_flags & BIO_ORDERED;
2084 	nbp->b_flags = flags;
2085 	nbp->b_rcred = nbp->b_wcred = proc0.p_ucred;
2086 	nbp->b_iodone = vm_pager_chain_iodone;
2087 
2088 	crhold(nbp->b_rcred);
2089 	crhold(nbp->b_wcred);
2090 
2091 	if (vp)
2092 		pbgetvp(vp, nbp);
2093 	return(nbp);
2094 }
2095 
2096 void
2097 flushchainbuf(struct buf *nbp)
2098 {
2099 	GIANT_REQUIRED;
2100 	if (nbp->b_bcount) {
2101 		nbp->b_bufsize = nbp->b_bcount;
2102 		if (nbp->b_iocmd == BIO_WRITE)
2103 			nbp->b_dirtyend = nbp->b_bcount;
2104 		BUF_KERNPROC(nbp);
2105 		BUF_STRATEGY(nbp);
2106 	} else {
2107 		bufdone(nbp);
2108 	}
2109 }
2110 
2111 static void
2112 waitchainbuf(struct bio *bp, int limit, int done)
2113 {
2114  	int s;
2115 	u_int *count;
2116 
2117 	GIANT_REQUIRED;
2118 	count = (u_int *)&(bp->bio_caller1);
2119 	s = splbio();
2120 	while (*count > limit) {
2121 		bp->bio_flags |= BIO_FLAG1;
2122 		tsleep(bp, PRIBIO + 4, "bpchain", 0);
2123 	}
2124 	if (done) {
2125 		if (bp->bio_resid != 0 && !(bp->bio_flags & BIO_ERROR)) {
2126 			bp->bio_flags |= BIO_ERROR;
2127 			bp->bio_error = EINVAL;
2128 		}
2129 		biodone(bp);
2130 	}
2131 	splx(s);
2132 }
2133 
2134