xref: /freebsd/sys/vm/swap_pager.c (revision 17d6c636720d00f77e5d098daf4c278f89d84f7b)
1 /*
2  * Copyright (c) 1998 Matthew Dillon,
3  * Copyright (c) 1994 John S. Dyson
4  * Copyright (c) 1990 University of Utah.
5  * Copyright (c) 1991, 1993
6  *	The Regents of the University of California.  All rights reserved.
7  *
8  * This code is derived from software contributed to Berkeley by
9  * the Systems Programming Group of the University of Utah Computer
10  * Science Department.
11  *
12  * Redistribution and use in source and binary forms, with or without
13  * modification, are permitted provided that the following conditions
14  * are met:
15  * 1. Redistributions of source code must retain the above copyright
16  *    notice, this list of conditions and the following disclaimer.
17  * 2. Redistributions in binary form must reproduce the above copyright
18  *    notice, this list of conditions and the following disclaimer in the
19  *    documentation and/or other materials provided with the distribution.
20  * 3. All advertising materials mentioning features or use of this software
21  *    must display the following acknowledgement:
22  *	This product includes software developed by the University of
23  *	California, Berkeley and its contributors.
24  * 4. Neither the name of the University nor the names of its contributors
25  *    may be used to endorse or promote products derived from this software
26  *    without specific prior written permission.
27  *
28  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
29  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
30  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
31  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
32  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
33  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
34  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
35  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
36  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
37  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
38  * SUCH DAMAGE.
39  *
40  *				New Swap System
41  *				Matthew Dillon
42  *
43  * Radix Bitmap 'blists'.
44  *
45  *	- The new swapper uses the new radix bitmap code.  This should scale
46  *	  to arbitrarily small or arbitrarily large swap spaces and an almost
47  *	  arbitrary degree of fragmentation.
48  *
49  * Features:
50  *
51  *	- on the fly reallocation of swap during putpages.  The new system
52  *	  does not try to keep previously allocated swap blocks for dirty
53  *	  pages.
54  *
55  *	- on the fly deallocation of swap
56  *
57  *	- No more garbage collection required.  Unnecessarily allocated swap
58  *	  blocks only exist for dirty vm_page_t's now and these are already
59  *	  cycled (in a high-load system) by the pager.  We also do on-the-fly
60  *	  removal of invalidated swap blocks when a page is destroyed
61  *	  or renamed.
62  *
63  * from: Utah $Hdr: swap_pager.c 1.4 91/04/30$
64  *
65  *	@(#)swap_pager.c	8.9 (Berkeley) 3/21/94
66  *
67  * $FreeBSD$
68  */
69 
70 #include <sys/param.h>
71 #include <sys/systm.h>
72 #include <sys/conf.h>
73 #include <sys/kernel.h>
74 #include <sys/proc.h>
75 #include <sys/bio.h>
76 #include <sys/buf.h>
77 #include <sys/vnode.h>
78 #include <sys/malloc.h>
79 #include <sys/vmmeter.h>
80 #include <sys/sysctl.h>
81 #include <sys/blist.h>
82 #include <sys/lock.h>
83 #include <sys/sx.h>
84 #include <sys/vmmeter.h>
85 
86 #ifndef MAX_PAGEOUT_CLUSTER
87 #define MAX_PAGEOUT_CLUSTER 16
88 #endif
89 
90 #define SWB_NPAGES	MAX_PAGEOUT_CLUSTER
91 
92 #include "opt_swap.h"
93 #include <vm/vm.h>
94 #include <vm/pmap.h>
95 #include <vm/vm_map.h>
96 #include <vm/vm_kern.h>
97 #include <vm/vm_object.h>
98 #include <vm/vm_page.h>
99 #include <vm/vm_pager.h>
100 #include <vm/vm_pageout.h>
101 #include <vm/vm_zone.h>
102 #include <vm/swap_pager.h>
103 #include <vm/vm_extern.h>
104 
105 #define SWM_FREE	0x02	/* free, period			*/
106 #define SWM_POP		0x04	/* pop out			*/
107 
108 /*
109  * vm_swap_size is in page-sized chunks now.  It was DEV_BSIZE'd chunks
110  * in the old system.
111  */
112 
113 extern int vm_swap_size;	/* number of free swap blocks, in pages */
114 
115 int swap_pager_full;		/* swap space exhaustion (task killing) */
116 static int swap_pager_almost_full; /* swap space exhaustion (w/ hysteresis)*/
117 static int nsw_rcount;		/* free read buffers			*/
118 static int nsw_wcount_sync;	/* limit write buffers / synchronous	*/
119 static int nsw_wcount_async;	/* limit write buffers / asynchronous	*/
120 static int nsw_wcount_async_max;/* assigned maximum			*/
121 static int nsw_cluster_max;	/* maximum VOP I/O allowed		*/
122 
123 struct blist *swapblist;
124 static struct swblock **swhash;
125 static int swhash_mask;
126 static int swap_async_max = 4;	/* maximum in-progress async I/O's	*/
127 static struct sx sw_alloc_sx;
128 
129 /* from vm_swap.c */
130 extern struct vnode *swapdev_vp;
131 extern struct swdevt *swdevt;
132 extern int nswdev;
133 
134 SYSCTL_INT(_vm, OID_AUTO, swap_async_max,
135         CTLFLAG_RW, &swap_async_max, 0, "Maximum running async swap ops");
136 
137 #define BLK2DEVIDX(blk) (nswdev > 1 ? blk / dmmax % nswdev : 0)
138 
139 /*
140  * "named" and "unnamed" anon region objects.  Try to reduce the overhead
141  * of searching a named list by hashing it just a little.
142  */
143 
144 #define NOBJLISTS		8
145 
146 #define NOBJLIST(handle)	\
147 	(&swap_pager_object_list[((int)(intptr_t)handle >> 4) & (NOBJLISTS-1)])
148 
149 static struct mtx sw_alloc_mtx;	/* protect list manipulation */
150 static struct pagerlst	swap_pager_object_list[NOBJLISTS];
151 struct pagerlst		swap_pager_un_object_list;
152 vm_zone_t		swap_zone;
153 
154 /*
155  * pagerops for OBJT_SWAP - "swap pager".  Some ops are also global procedure
156  * calls hooked from other parts of the VM system and do not appear here.
157  * (see vm/swap_pager.h).
158  */
159 
160 static vm_object_t
161 		swap_pager_alloc __P((void *handle, vm_ooffset_t size,
162 				      vm_prot_t prot, vm_ooffset_t offset));
163 static void	swap_pager_dealloc __P((vm_object_t object));
164 static int	swap_pager_getpages __P((vm_object_t, vm_page_t *, int, int));
165 static void	swap_pager_init __P((void));
166 static void	swap_pager_unswapped __P((vm_page_t));
167 static void	swap_pager_strategy __P((vm_object_t, struct bio *));
168 
169 struct pagerops swappagerops = {
170 	swap_pager_init,	/* early system initialization of pager	*/
171 	swap_pager_alloc,	/* allocate an OBJT_SWAP object		*/
172 	swap_pager_dealloc,	/* deallocate an OBJT_SWAP object	*/
173 	swap_pager_getpages,	/* pagein				*/
174 	swap_pager_putpages,	/* pageout				*/
175 	swap_pager_haspage,	/* get backing store status for page	*/
176 	swap_pager_unswapped,	/* remove swap related to page		*/
177 	swap_pager_strategy	/* pager strategy call			*/
178 };
179 
180 static struct buf *getchainbuf(struct bio *bp, struct vnode *vp, int flags);
181 static void flushchainbuf(struct buf *nbp);
182 static void waitchainbuf(struct bio *bp, int count, int done);
183 
184 /*
185  * dmmax is in page-sized chunks with the new swap system.  It was
186  * dev-bsized chunks in the old.  dmmax is always a power of 2.
187  *
188  * swap_*() routines are externally accessible.  swp_*() routines are
189  * internal.
190  */
191 
192 int dmmax;
193 static int dmmax_mask;
194 int nswap_lowat = 128;		/* in pages, swap_pager_almost_full warn */
195 int nswap_hiwat = 512;		/* in pages, swap_pager_almost_full warn */
196 
197 SYSCTL_INT(_vm, OID_AUTO, dmmax,
198 	CTLFLAG_RD, &dmmax, 0, "Maximum size of a swap block");
199 
200 static __inline void	swp_sizecheck __P((void));
201 static void	swp_pager_sync_iodone __P((struct buf *bp));
202 static void	swp_pager_async_iodone __P((struct buf *bp));
203 
204 /*
205  * Swap bitmap functions
206  */
207 
208 static __inline void	swp_pager_freeswapspace __P((daddr_t blk, int npages));
209 static __inline daddr_t	swp_pager_getswapspace __P((int npages));
210 
211 /*
212  * Metadata functions
213  */
214 
215 static void swp_pager_meta_build __P((vm_object_t, vm_pindex_t, daddr_t));
216 static void swp_pager_meta_free __P((vm_object_t, vm_pindex_t, daddr_t));
217 static void swp_pager_meta_free_all __P((vm_object_t));
218 static daddr_t swp_pager_meta_ctl __P((vm_object_t, vm_pindex_t, int));
219 
220 /*
221  * SWP_SIZECHECK() -	update swap_pager_full indication
222  *
223  *	update the swap_pager_almost_full indication and warn when we are
224  *	about to run out of swap space, using lowat/hiwat hysteresis.
225  *
226  *	Clear swap_pager_full ( task killing ) indication when lowat is met.
227  *
228  *	No restrictions on call
229  *	This routine may not block.
230  *	This routine must be called at splvm()
231  */
232 
233 static __inline void
234 swp_sizecheck()
235 {
236 	GIANT_REQUIRED;
237 
238 	if (vm_swap_size < nswap_lowat) {
239 		if (swap_pager_almost_full == 0) {
240 			printf("swap_pager: out of swap space\n");
241 			swap_pager_almost_full = 1;
242 		}
243 	} else {
244 		swap_pager_full = 0;
245 		if (vm_swap_size > nswap_hiwat)
246 			swap_pager_almost_full = 0;
247 	}
248 }
249 
250 /*
251  * SWAP_PAGER_INIT() -	initialize the swap pager!
252  *
253  *	Expected to be started from system init.  NOTE:  This code is run
254  *	before much else so be careful what you depend on.  Most of the VM
255  *	system has yet to be initialized at this point.
256  */
257 
258 static void
259 swap_pager_init()
260 {
261 	/*
262 	 * Initialize object lists
263 	 */
264 	int i;
265 
266 	for (i = 0; i < NOBJLISTS; ++i)
267 		TAILQ_INIT(&swap_pager_object_list[i]);
268 	TAILQ_INIT(&swap_pager_un_object_list);
269 	mtx_init(&sw_alloc_mtx, "swap_pager list", MTX_DEF);
270 
271 	/*
272 	 * Device Stripe, in PAGE_SIZE'd blocks
273 	 */
274 
275 	dmmax = SWB_NPAGES * 2;
276 	dmmax_mask = ~(dmmax - 1);
277 }
278 
279 /*
280  * SWAP_PAGER_SWAP_INIT() - swap pager initialization from pageout process
281  *
282  *	Expected to be started from pageout process once, prior to entering
283  *	its main loop.
284  */
285 
286 void
287 swap_pager_swap_init()
288 {
289 	int n, n2;
290 
291 	/*
292 	 * Number of in-transit swap bp operations.  Don't
293 	 * exhaust the pbufs completely.  Make sure we
294 	 * initialize workable values (0 will work for hysteresis
295 	 * but it isn't very efficient).
296 	 *
297 	 * The nsw_cluster_max is constrained by the bp->b_pages[]
298 	 * array (MAXPHYS/PAGE_SIZE) and our locally defined
299 	 * MAX_PAGEOUT_CLUSTER.   Also be aware that swap ops are
300 	 * constrained by the swap device interleave stripe size.
301 	 *
302 	 * Currently we hardwire nsw_wcount_async to 4.  This limit is
303 	 * designed to prevent other I/O from having high latencies due to
304 	 * our pageout I/O.  The value 4 works well for one or two active swap
305 	 * devices but is probably a little low if you have more.  Even so,
306 	 * a higher value would probably generate only a limited improvement
307 	 * with three or four active swap devices since the system does not
308 	 * typically have to pageout at extreme bandwidths.   We will want
309 	 * at least 2 per swap devices, and 4 is a pretty good value if you
310 	 * have one NFS swap device due to the command/ack latency over NFS.
311 	 * So it all works out pretty well.
312 	 */
313 
314 	nsw_cluster_max = min((MAXPHYS/PAGE_SIZE), MAX_PAGEOUT_CLUSTER);
315 
316 	mtx_lock(&pbuf_mtx);
317 	nsw_rcount = (nswbuf + 1) / 2;
318 	nsw_wcount_sync = (nswbuf + 3) / 4;
319 	nsw_wcount_async = 4;
320 	nsw_wcount_async_max = nsw_wcount_async;
321 	mtx_unlock(&pbuf_mtx);
322 
323 	/*
324 	 * Initialize our zone.  Right now I'm just guessing on the number
325 	 * we need based on the number of pages in the system.  Each swblock
326 	 * can hold 16 pages, so this is probably overkill.  This reservation
327 	 * is typically limited to around 70MB by default.
328 	 */
329 
330 	n = cnt.v_page_count;
331 	if (maxswzone && n > maxswzone / sizeof(struct swblock))
332 		n = maxswzone / sizeof(struct swblock);
333 	n2 = n;
334 
335 	do {
336 		swap_zone = zinit(
337 		       "SWAPMETA",
338 		       sizeof(struct swblock),
339 		       n,
340 		       ZONE_INTERRUPT,
341 		       1
342 		       );
343 		if (swap_zone != NULL)
344 			break;
345 		/*
346 		 * if the allocation failed, try a zone two thirds the
347 		 * size of the previous attempt.
348 		 */
349 		n -= ((n + 2) / 3);
350 	} while (n > 0);
351 
352 	if (swap_zone == NULL)
353 		panic("failed to zinit swap_zone.");
354 	if (n2 != n)
355 		printf("Swap zone entries reduced from %d to %d.\n", n2, n);
356 	n2 = n;
357 
358 	/*
359 	 * Initialize our meta-data hash table.  The swapper does not need to
360 	 * be quite as efficient as the VM system, so we do not use an
361 	 * oversized hash table.
362 	 *
363 	 * 	n: 		size of hash table, must be power of 2
364 	 *	swhash_mask:	hash table index mask
365 	 */
366 
367 	for (n = 1; n < n2 / 8; n *= 2)
368 		;
369 
370 	swhash = malloc(sizeof(struct swblock *) * n, M_VMPGDATA, M_WAITOK | M_ZERO);
371 
372 	swhash_mask = n - 1;
373 }
374 
375 /*
376  * SWAP_PAGER_ALLOC() -	allocate a new OBJT_SWAP VM object and instantiate
377  *			its metadata structures.
378  *
379  *	This routine is called from the mmap and fork code to create a new
380  *	OBJT_SWAP object.  We do this by creating an OBJT_DEFAULT object
381  *	and then converting it with swp_pager_meta_build().
382  *
383  *	This routine may block in vm_object_allocate() and create a named
384  *	object lookup race, so we must interlock.   We must also run at
385  *	splvm() for the object lookup to handle races with interrupts, but
386  *	we do not have to maintain splvm() in between the lookup and the
387  *	add because (I believe) it is not possible to attempt to create
388  *	a new swap object w/handle when a default object with that handle
389  *	already exists.
390  */
391 
392 static vm_object_t
393 swap_pager_alloc(void *handle, vm_ooffset_t size, vm_prot_t prot,
394 		 vm_ooffset_t offset)
395 {
396 	vm_object_t object;
397 
398 	GIANT_REQUIRED;
399 
400 	if (handle) {
401 		/*
402 		 * Reference existing named region or allocate new one.  There
403 		 * should not be a race here against swp_pager_meta_build()
404 		 * as called from vm_page_remove() in regards to the lookup
405 		 * of the handle.
406 		 */
407 		sx_xlock(&sw_alloc_sx);
408 		object = vm_pager_object_lookup(NOBJLIST(handle), handle);
409 
410 		if (object != NULL) {
411 			vm_object_reference(object);
412 		} else {
413 			object = vm_object_allocate(OBJT_DEFAULT,
414 				OFF_TO_IDX(offset + PAGE_MASK + size));
415 			object->handle = handle;
416 
417 			swp_pager_meta_build(object, 0, SWAPBLK_NONE);
418 		}
419 		sx_xunlock(&sw_alloc_sx);
420 	} else {
421 		object = vm_object_allocate(OBJT_DEFAULT,
422 			OFF_TO_IDX(offset + PAGE_MASK + size));
423 
424 		swp_pager_meta_build(object, 0, SWAPBLK_NONE);
425 	}
426 
427 	return (object);
428 }
429 
430 /*
431  * SWAP_PAGER_DEALLOC() -	remove swap metadata from object
432  *
433  *	The swap backing for the object is destroyed.  The code is
434  *	designed such that we can reinstantiate it later, but this
435  *	routine is typically called only when the entire object is
436  *	about to be destroyed.
437  *
438  *	This routine may block, but no longer does.
439  *
440  *	The object must be locked or unreferenceable.
441  */
442 
443 static void
444 swap_pager_dealloc(object)
445 	vm_object_t object;
446 {
447 	int s;
448 
449 	GIANT_REQUIRED;
450 
451 	/*
452 	 * Remove from list right away so lookups will fail if we block for
453 	 * pageout completion.
454 	 */
455 	mtx_lock(&sw_alloc_mtx);
456 	if (object->handle == NULL) {
457 		TAILQ_REMOVE(&swap_pager_un_object_list, object, pager_object_list);
458 	} else {
459 		TAILQ_REMOVE(NOBJLIST(object->handle), object, pager_object_list);
460 	}
461 	mtx_unlock(&sw_alloc_mtx);
462 
463 	vm_object_pip_wait(object, "swpdea");
464 
465 	/*
466 	 * Free all remaining metadata.  We only bother to free it from
467 	 * the swap meta data.  We do not attempt to free swapblk's still
468 	 * associated with vm_page_t's for this object.  We do not care
469 	 * if paging is still in progress on some objects.
470 	 */
471 	s = splvm();
472 	swp_pager_meta_free_all(object);
473 	splx(s);
474 }
475 
476 /************************************************************************
477  *			SWAP PAGER BITMAP ROUTINES			*
478  ************************************************************************/
479 
480 /*
481  * SWP_PAGER_GETSWAPSPACE() -	allocate raw swap space
482  *
483  *	Allocate swap for the requested number of pages.  The starting
484  *	swap block number (a page index) is returned or SWAPBLK_NONE
485  *	if the allocation failed.
486  *
487  *	Also has the side effect of advising that somebody made a mistake
488  *	when they configured swap and didn't configure enough.
489  *
490  *	Must be called at splvm() to avoid races with bitmap frees from
491  *	vm_page_remove() aka swap_pager_page_removed().
492  *
493  *	This routine may not block
494  *	This routine must be called at splvm().
495  */
496 
497 static __inline daddr_t
498 swp_pager_getswapspace(npages)
499 	int npages;
500 {
501 	daddr_t blk;
502 
503 	GIANT_REQUIRED;
504 
505 	if ((blk = blist_alloc(swapblist, npages)) == SWAPBLK_NONE) {
506 		if (swap_pager_full != 2) {
507 			printf("swap_pager_getswapspace: failed\n");
508 			swap_pager_full = 2;
509 			swap_pager_almost_full = 1;
510 		}
511 	} else {
512 		vm_swap_size -= npages;
513 		/* per-swap area stats */
514 		swdevt[BLK2DEVIDX(blk)].sw_used += npages;
515 		swp_sizecheck();
516 	}
517 	return(blk);
518 }
519 
520 /*
521  * SWP_PAGER_FREESWAPSPACE() -	free raw swap space
522  *
523  *	This routine returns the specified swap blocks back to the bitmap.
524  *
525  *	Note:  This routine may not block (it could in the old swap code),
526  *	and through the use of the new blist routines it does not block.
527  *
528  *	We must be called at splvm() to avoid races with bitmap frees from
529  *	vm_page_remove() aka swap_pager_page_removed().
530  *
531  *	This routine may not block
532  *	This routine must be called at splvm().
533  */
534 
535 static __inline void
536 swp_pager_freeswapspace(blk, npages)
537 	daddr_t blk;
538 	int npages;
539 {
540 	GIANT_REQUIRED;
541 
542 	blist_free(swapblist, blk, npages);
543 	vm_swap_size += npages;
544 	/* per-swap area stats */
545 	swdevt[BLK2DEVIDX(blk)].sw_used -= npages;
546 	swp_sizecheck();
547 }
548 
549 /*
550  * SWAP_PAGER_FREESPACE() -	frees swap blocks associated with a page
551  *				range within an object.
552  *
553  *	This is a globally accessible routine.
554  *
555  *	This routine removes swapblk assignments from swap metadata.
556  *
557  *	The external callers of this routine typically have already destroyed
558  *	or renamed vm_page_t's associated with this range in the object so
559  *	we should be ok.
560  *
561  *	This routine may be called at any spl.  We up our spl to splvm temporarily
562  *	in order to perform the metadata removal.
563  */
564 
565 void
566 swap_pager_freespace(object, start, size)
567 	vm_object_t object;
568 	vm_pindex_t start;
569 	vm_size_t size;
570 {
571 	int s = splvm();
572 
573 	GIANT_REQUIRED;
574 	swp_pager_meta_free(object, start, size);
575 	splx(s);
576 }
577 
578 /*
579  * SWAP_PAGER_RESERVE() - reserve swap blocks in object
580  *
581  *	Assigns swap blocks to the specified range within the object.  The
582  *	swap blocks are not zerod.  Any previous swap assignment is destroyed.
583  *
584  *	Returns 0 on success, -1 on failure.
585  */
586 
587 int
588 swap_pager_reserve(vm_object_t object, vm_pindex_t start, vm_size_t size)
589 {
590 	int s;
591 	int n = 0;
592 	daddr_t blk = SWAPBLK_NONE;
593 	vm_pindex_t beg = start;	/* save start index */
594 
595 	s = splvm();
596 	while (size) {
597 		if (n == 0) {
598 			n = BLIST_MAX_ALLOC;
599 			while ((blk = swp_pager_getswapspace(n)) == SWAPBLK_NONE) {
600 				n >>= 1;
601 				if (n == 0) {
602 					swp_pager_meta_free(object, beg, start - beg);
603 					splx(s);
604 					return(-1);
605 				}
606 			}
607 		}
608 		swp_pager_meta_build(object, start, blk);
609 		--size;
610 		++start;
611 		++blk;
612 		--n;
613 	}
614 	swp_pager_meta_free(object, start, n);
615 	splx(s);
616 	return(0);
617 }
618 
619 /*
620  * SWAP_PAGER_COPY() -  copy blocks from source pager to destination pager
621  *			and destroy the source.
622  *
623  *	Copy any valid swapblks from the source to the destination.  In
624  *	cases where both the source and destination have a valid swapblk,
625  *	we keep the destination's.
626  *
627  *	This routine is allowed to block.  It may block allocating metadata
628  *	indirectly through swp_pager_meta_build() or if paging is still in
629  *	progress on the source.
630  *
631  *	This routine can be called at any spl
632  *
633  *	XXX vm_page_collapse() kinda expects us not to block because we
634  *	supposedly do not need to allocate memory, but for the moment we
635  *	*may* have to get a little memory from the zone allocator, but
636  *	it is taken from the interrupt memory.  We should be ok.
637  *
638  *	The source object contains no vm_page_t's (which is just as well)
639  *
640  *	The source object is of type OBJT_SWAP.
641  *
642  *	The source and destination objects must be locked or
643  *	inaccessible (XXX are they ?)
644  */
645 
646 void
647 swap_pager_copy(srcobject, dstobject, offset, destroysource)
648 	vm_object_t srcobject;
649 	vm_object_t dstobject;
650 	vm_pindex_t offset;
651 	int destroysource;
652 {
653 	vm_pindex_t i;
654 	int s;
655 
656 	GIANT_REQUIRED;
657 
658 	s = splvm();
659 	/*
660 	 * If destroysource is set, we remove the source object from the
661 	 * swap_pager internal queue now.
662 	 */
663 
664 	if (destroysource) {
665 		mtx_lock(&sw_alloc_mtx);
666 		if (srcobject->handle == NULL) {
667 			TAILQ_REMOVE(
668 			    &swap_pager_un_object_list,
669 			    srcobject,
670 			    pager_object_list
671 			);
672 		} else {
673 			TAILQ_REMOVE(
674 			    NOBJLIST(srcobject->handle),
675 			    srcobject,
676 			    pager_object_list
677 			);
678 		}
679 		mtx_unlock(&sw_alloc_mtx);
680 	}
681 
682 	/*
683 	 * transfer source to destination.
684 	 */
685 
686 	for (i = 0; i < dstobject->size; ++i) {
687 		daddr_t dstaddr;
688 
689 		/*
690 		 * Locate (without changing) the swapblk on the destination,
691 		 * unless it is invalid in which case free it silently, or
692 		 * if the destination is a resident page, in which case the
693 		 * source is thrown away.
694 		 */
695 
696 		dstaddr = swp_pager_meta_ctl(dstobject, i, 0);
697 
698 		if (dstaddr == SWAPBLK_NONE) {
699 			/*
700 			 * Destination has no swapblk and is not resident,
701 			 * copy source.
702 			 */
703 			daddr_t srcaddr;
704 
705 			srcaddr = swp_pager_meta_ctl(
706 			    srcobject,
707 			    i + offset,
708 			    SWM_POP
709 			);
710 
711 			if (srcaddr != SWAPBLK_NONE)
712 				swp_pager_meta_build(dstobject, i, srcaddr);
713 		} else {
714 			/*
715 			 * Destination has valid swapblk or it is represented
716 			 * by a resident page.  We destroy the sourceblock.
717 			 */
718 
719 			swp_pager_meta_ctl(srcobject, i + offset, SWM_FREE);
720 		}
721 	}
722 
723 	/*
724 	 * Free left over swap blocks in source.
725 	 *
726 	 * We have to revert the type to OBJT_DEFAULT so we do not accidently
727 	 * double-remove the object from the swap queues.
728 	 */
729 
730 	if (destroysource) {
731 		swp_pager_meta_free_all(srcobject);
732 		/*
733 		 * Reverting the type is not necessary, the caller is going
734 		 * to destroy srcobject directly, but I'm doing it here
735 		 * for consistency since we've removed the object from its
736 		 * queues.
737 		 */
738 		srcobject->type = OBJT_DEFAULT;
739 	}
740 	splx(s);
741 }
742 
743 /*
744  * SWAP_PAGER_HASPAGE() -	determine if we have good backing store for
745  *				the requested page.
746  *
747  *	We determine whether good backing store exists for the requested
748  *	page and return TRUE if it does, FALSE if it doesn't.
749  *
750  *	If TRUE, we also try to determine how much valid, contiguous backing
751  *	store exists before and after the requested page within a reasonable
752  *	distance.  We do not try to restrict it to the swap device stripe
753  *	(that is handled in getpages/putpages).  It probably isn't worth
754  *	doing here.
755  */
756 
757 boolean_t
758 swap_pager_haspage(object, pindex, before, after)
759 	vm_object_t object;
760 	vm_pindex_t pindex;
761 	int *before;
762 	int *after;
763 {
764 	daddr_t blk0;
765 	int s;
766 
767 	/*
768 	 * do we have good backing store at the requested index ?
769 	 */
770 
771 	s = splvm();
772 	blk0 = swp_pager_meta_ctl(object, pindex, 0);
773 
774 	if (blk0 == SWAPBLK_NONE) {
775 		splx(s);
776 		if (before)
777 			*before = 0;
778 		if (after)
779 			*after = 0;
780 		return (FALSE);
781 	}
782 
783 	/*
784 	 * find backwards-looking contiguous good backing store
785 	 */
786 
787 	if (before != NULL) {
788 		int i;
789 
790 		for (i = 1; i < (SWB_NPAGES/2); ++i) {
791 			daddr_t blk;
792 
793 			if (i > pindex)
794 				break;
795 			blk = swp_pager_meta_ctl(object, pindex - i, 0);
796 			if (blk != blk0 - i)
797 				break;
798 		}
799 		*before = (i - 1);
800 	}
801 
802 	/*
803 	 * find forward-looking contiguous good backing store
804 	 */
805 
806 	if (after != NULL) {
807 		int i;
808 
809 		for (i = 1; i < (SWB_NPAGES/2); ++i) {
810 			daddr_t blk;
811 
812 			blk = swp_pager_meta_ctl(object, pindex + i, 0);
813 			if (blk != blk0 + i)
814 				break;
815 		}
816 		*after = (i - 1);
817 	}
818 	splx(s);
819 	return (TRUE);
820 }
821 
822 /*
823  * SWAP_PAGER_PAGE_UNSWAPPED() - remove swap backing store related to page
824  *
825  *	This removes any associated swap backing store, whether valid or
826  *	not, from the page.
827  *
828  *	This routine is typically called when a page is made dirty, at
829  *	which point any associated swap can be freed.  MADV_FREE also
830  *	calls us in a special-case situation
831  *
832  *	NOTE!!!  If the page is clean and the swap was valid, the caller
833  *	should make the page dirty before calling this routine.  This routine
834  *	does NOT change the m->dirty status of the page.  Also: MADV_FREE
835  *	depends on it.
836  *
837  *	This routine may not block
838  *	This routine must be called at splvm()
839  */
840 
841 static void
842 swap_pager_unswapped(m)
843 	vm_page_t m;
844 {
845 	swp_pager_meta_ctl(m->object, m->pindex, SWM_FREE);
846 }
847 
848 /*
849  * SWAP_PAGER_STRATEGY() - read, write, free blocks
850  *
851  *	This implements the vm_pager_strategy() interface to swap and allows
852  *	other parts of the system to directly access swap as backing store
853  *	through vm_objects of type OBJT_SWAP.  This is intended to be a
854  *	cacheless interface ( i.e. caching occurs at higher levels ).
855  *	Therefore we do not maintain any resident pages.  All I/O goes
856  *	directly to and from the swap device.
857  *
858  *	Note that b_blkno is scaled for PAGE_SIZE
859  *
860  *	We currently attempt to run I/O synchronously or asynchronously as
861  *	the caller requests.  This isn't perfect because we loose error
862  *	sequencing when we run multiple ops in parallel to satisfy a request.
863  *	But this is swap, so we let it all hang out.
864  */
865 
866 static void
867 swap_pager_strategy(vm_object_t object, struct bio *bp)
868 {
869 	vm_pindex_t start;
870 	int count;
871 	int s;
872 	char *data;
873 	struct buf *nbp = NULL;
874 
875 	GIANT_REQUIRED;
876 
877 	/* XXX: KASSERT instead ? */
878 	if (bp->bio_bcount & PAGE_MASK) {
879 		biofinish(bp, NULL, EINVAL);
880 		printf("swap_pager_strategy: bp %p blk %d size %d, not page bounded\n", bp, (int)bp->bio_pblkno, (int)bp->bio_bcount);
881 		return;
882 	}
883 
884 	/*
885 	 * Clear error indication, initialize page index, count, data pointer.
886 	 */
887 
888 	bp->bio_error = 0;
889 	bp->bio_flags &= ~BIO_ERROR;
890 	bp->bio_resid = bp->bio_bcount;
891 	*(u_int *) &bp->bio_driver1 = 0;
892 
893 	start = bp->bio_pblkno;
894 	count = howmany(bp->bio_bcount, PAGE_SIZE);
895 	data = bp->bio_data;
896 
897 	s = splvm();
898 
899 	/*
900 	 * Deal with BIO_DELETE
901 	 */
902 
903 	if (bp->bio_cmd == BIO_DELETE) {
904 		/*
905 		 * FREE PAGE(s) - destroy underlying swap that is no longer
906 		 *		  needed.
907 		 */
908 		swp_pager_meta_free(object, start, count);
909 		splx(s);
910 		bp->bio_resid = 0;
911 		biodone(bp);
912 		return;
913 	}
914 
915 	/*
916 	 * Execute read or write
917 	 */
918 	while (count > 0) {
919 		daddr_t blk;
920 
921 		/*
922 		 * Obtain block.  If block not found and writing, allocate a
923 		 * new block and build it into the object.
924 		 */
925 
926 		blk = swp_pager_meta_ctl(object, start, 0);
927 		if ((blk == SWAPBLK_NONE) && (bp->bio_cmd == BIO_WRITE)) {
928 			blk = swp_pager_getswapspace(1);
929 			if (blk == SWAPBLK_NONE) {
930 				bp->bio_error = ENOMEM;
931 				bp->bio_flags |= BIO_ERROR;
932 				break;
933 			}
934 			swp_pager_meta_build(object, start, blk);
935 		}
936 
937 		/*
938 		 * Do we have to flush our current collection?  Yes if:
939 		 *
940 		 *	- no swap block at this index
941 		 *	- swap block is not contiguous
942 		 *	- we cross a physical disk boundry in the
943 		 *	  stripe.
944 		 */
945 
946 		if (
947 		    nbp && (nbp->b_blkno + btoc(nbp->b_bcount) != blk ||
948 		     ((nbp->b_blkno ^ blk) & dmmax_mask)
949 		    )
950 		) {
951 			splx(s);
952 			if (bp->bio_cmd == BIO_READ) {
953 				++cnt.v_swapin;
954 				cnt.v_swappgsin += btoc(nbp->b_bcount);
955 			} else {
956 				++cnt.v_swapout;
957 				cnt.v_swappgsout += btoc(nbp->b_bcount);
958 				nbp->b_dirtyend = nbp->b_bcount;
959 			}
960 			flushchainbuf(nbp);
961 			s = splvm();
962 			nbp = NULL;
963 		}
964 
965 		/*
966 		 * Add new swapblk to nbp, instantiating nbp if necessary.
967 		 * Zero-fill reads are able to take a shortcut.
968 		 */
969 
970 		if (blk == SWAPBLK_NONE) {
971 			/*
972 			 * We can only get here if we are reading.  Since
973 			 * we are at splvm() we can safely modify b_resid,
974 			 * even if chain ops are in progress.
975 			 */
976 			bzero(data, PAGE_SIZE);
977 			bp->bio_resid -= PAGE_SIZE;
978 		} else {
979 			if (nbp == NULL) {
980 				nbp = getchainbuf(bp, swapdev_vp, B_ASYNC);
981 				nbp->b_blkno = blk;
982 				nbp->b_bcount = 0;
983 				nbp->b_data = data;
984 			}
985 			nbp->b_bcount += PAGE_SIZE;
986 		}
987 		--count;
988 		++start;
989 		data += PAGE_SIZE;
990 	}
991 
992 	/*
993 	 *  Flush out last buffer
994 	 */
995 
996 	splx(s);
997 
998 	if (nbp) {
999 		if (nbp->b_iocmd == BIO_READ) {
1000 			++cnt.v_swapin;
1001 			cnt.v_swappgsin += btoc(nbp->b_bcount);
1002 		} else {
1003 			++cnt.v_swapout;
1004 			cnt.v_swappgsout += btoc(nbp->b_bcount);
1005 			nbp->b_dirtyend = nbp->b_bcount;
1006 		}
1007 		flushchainbuf(nbp);
1008 		/* nbp = NULL; */
1009 	}
1010 	/*
1011 	 * Wait for completion.
1012 	 */
1013 
1014 	waitchainbuf(bp, 0, 1);
1015 }
1016 
1017 /*
1018  * SWAP_PAGER_GETPAGES() - bring pages in from swap
1019  *
1020  *	Attempt to retrieve (m, count) pages from backing store, but make
1021  *	sure we retrieve at least m[reqpage].  We try to load in as large
1022  *	a chunk surrounding m[reqpage] as is contiguous in swap and which
1023  *	belongs to the same object.
1024  *
1025  *	The code is designed for asynchronous operation and
1026  *	immediate-notification of 'reqpage' but tends not to be
1027  *	used that way.  Please do not optimize-out this algorithmic
1028  *	feature, I intend to improve on it in the future.
1029  *
1030  *	The parent has a single vm_object_pip_add() reference prior to
1031  *	calling us and we should return with the same.
1032  *
1033  *	The parent has BUSY'd the pages.  We should return with 'm'
1034  *	left busy, but the others adjusted.
1035  */
1036 
1037 static int
1038 swap_pager_getpages(object, m, count, reqpage)
1039 	vm_object_t object;
1040 	vm_page_t *m;
1041 	int count, reqpage;
1042 {
1043 	struct buf *bp;
1044 	vm_page_t mreq;
1045 	int s;
1046 	int i;
1047 	int j;
1048 	daddr_t blk;
1049 	vm_offset_t kva;
1050 	vm_pindex_t lastpindex;
1051 
1052 	GIANT_REQUIRED;
1053 
1054 	mreq = m[reqpage];
1055 
1056 	if (mreq->object != object) {
1057 		panic("swap_pager_getpages: object mismatch %p/%p",
1058 		    object,
1059 		    mreq->object
1060 		);
1061 	}
1062 	/*
1063 	 * Calculate range to retrieve.  The pages have already been assigned
1064 	 * their swapblks.  We require a *contiguous* range that falls entirely
1065 	 * within a single device stripe.   If we do not supply it, bad things
1066 	 * happen.  Note that blk, iblk & jblk can be SWAPBLK_NONE, but the
1067 	 * loops are set up such that the case(s) are handled implicitly.
1068 	 *
1069 	 * The swp_*() calls must be made at splvm().  vm_page_free() does
1070 	 * not need to be, but it will go a little faster if it is.
1071 	 */
1072 
1073 	s = splvm();
1074 	blk = swp_pager_meta_ctl(mreq->object, mreq->pindex, 0);
1075 
1076 	for (i = reqpage - 1; i >= 0; --i) {
1077 		daddr_t iblk;
1078 
1079 		iblk = swp_pager_meta_ctl(m[i]->object, m[i]->pindex, 0);
1080 		if (blk != iblk + (reqpage - i))
1081 			break;
1082 		if ((blk ^ iblk) & dmmax_mask)
1083 			break;
1084 	}
1085 	++i;
1086 
1087 	for (j = reqpage + 1; j < count; ++j) {
1088 		daddr_t jblk;
1089 
1090 		jblk = swp_pager_meta_ctl(m[j]->object, m[j]->pindex, 0);
1091 		if (blk != jblk - (j - reqpage))
1092 			break;
1093 		if ((blk ^ jblk) & dmmax_mask)
1094 			break;
1095 	}
1096 
1097 	/*
1098 	 * free pages outside our collection range.   Note: we never free
1099 	 * mreq, it must remain busy throughout.
1100 	 */
1101 
1102 	{
1103 		int k;
1104 
1105 		for (k = 0; k < i; ++k)
1106 			vm_page_free(m[k]);
1107 		for (k = j; k < count; ++k)
1108 			vm_page_free(m[k]);
1109 	}
1110 	splx(s);
1111 
1112 
1113 	/*
1114 	 * Return VM_PAGER_FAIL if we have nothing to do.  Return mreq
1115 	 * still busy, but the others unbusied.
1116 	 */
1117 
1118 	if (blk == SWAPBLK_NONE)
1119 		return(VM_PAGER_FAIL);
1120 
1121 	/*
1122 	 * Get a swap buffer header to perform the IO
1123 	 */
1124 
1125 	bp = getpbuf(&nsw_rcount);
1126 	kva = (vm_offset_t) bp->b_data;
1127 
1128 	/*
1129 	 * map our page(s) into kva for input
1130 	 *
1131 	 * NOTE: B_PAGING is set by pbgetvp()
1132 	 */
1133 
1134 	pmap_qenter(kva, m + i, j - i);
1135 
1136 	bp->b_iocmd = BIO_READ;
1137 	bp->b_iodone = swp_pager_async_iodone;
1138 	bp->b_rcred = crhold(proc0.p_ucred);
1139 	bp->b_wcred = crhold(proc0.p_ucred);
1140 	bp->b_data = (caddr_t) kva;
1141 	bp->b_blkno = blk - (reqpage - i);
1142 	bp->b_bcount = PAGE_SIZE * (j - i);
1143 	bp->b_bufsize = PAGE_SIZE * (j - i);
1144 	bp->b_pager.pg_reqpage = reqpage - i;
1145 
1146 	{
1147 		int k;
1148 
1149 		for (k = i; k < j; ++k) {
1150 			bp->b_pages[k - i] = m[k];
1151 			vm_page_flag_set(m[k], PG_SWAPINPROG);
1152 		}
1153 	}
1154 	bp->b_npages = j - i;
1155 
1156 	pbgetvp(swapdev_vp, bp);
1157 
1158 	cnt.v_swapin++;
1159 	cnt.v_swappgsin += bp->b_npages;
1160 
1161 	/*
1162 	 * We still hold the lock on mreq, and our automatic completion routine
1163 	 * does not remove it.
1164 	 */
1165 
1166 	vm_object_pip_add(mreq->object, bp->b_npages);
1167 	lastpindex = m[j-1]->pindex;
1168 
1169 	/*
1170 	 * perform the I/O.  NOTE!!!  bp cannot be considered valid after
1171 	 * this point because we automatically release it on completion.
1172 	 * Instead, we look at the one page we are interested in which we
1173 	 * still hold a lock on even through the I/O completion.
1174 	 *
1175 	 * The other pages in our m[] array are also released on completion,
1176 	 * so we cannot assume they are valid anymore either.
1177 	 *
1178 	 * NOTE: b_blkno is destroyed by the call to VOP_STRATEGY
1179 	 */
1180 	BUF_KERNPROC(bp);
1181 	BUF_STRATEGY(bp);
1182 
1183 	/*
1184 	 * wait for the page we want to complete.  PG_SWAPINPROG is always
1185 	 * cleared on completion.  If an I/O error occurs, SWAPBLK_NONE
1186 	 * is set in the meta-data.
1187 	 */
1188 
1189 	s = splvm();
1190 
1191 	while ((mreq->flags & PG_SWAPINPROG) != 0) {
1192 		vm_page_flag_set(mreq, PG_WANTED | PG_REFERENCED);
1193 		cnt.v_intrans++;
1194 		if (tsleep(mreq, PSWP, "swread", hz*20)) {
1195 			printf(
1196 			    "swap_pager: indefinite wait buffer: device:"
1197 				" %s, blkno: %ld, size: %ld\n",
1198 			    devtoname(bp->b_dev), (long)bp->b_blkno,
1199 			    bp->b_bcount
1200 			);
1201 		}
1202 	}
1203 
1204 	splx(s);
1205 
1206 	/*
1207 	 * mreq is left bussied after completion, but all the other pages
1208 	 * are freed.  If we had an unrecoverable read error the page will
1209 	 * not be valid.
1210 	 */
1211 
1212 	if (mreq->valid != VM_PAGE_BITS_ALL) {
1213 		return(VM_PAGER_ERROR);
1214 	} else {
1215 		return(VM_PAGER_OK);
1216 	}
1217 
1218 	/*
1219 	 * A final note: in a low swap situation, we cannot deallocate swap
1220 	 * and mark a page dirty here because the caller is likely to mark
1221 	 * the page clean when we return, causing the page to possibly revert
1222 	 * to all-zero's later.
1223 	 */
1224 }
1225 
1226 /*
1227  *	swap_pager_putpages:
1228  *
1229  *	Assign swap (if necessary) and initiate I/O on the specified pages.
1230  *
1231  *	We support both OBJT_DEFAULT and OBJT_SWAP objects.  DEFAULT objects
1232  *	are automatically converted to SWAP objects.
1233  *
1234  *	In a low memory situation we may block in VOP_STRATEGY(), but the new
1235  *	vm_page reservation system coupled with properly written VFS devices
1236  *	should ensure that no low-memory deadlock occurs.  This is an area
1237  *	which needs work.
1238  *
1239  *	The parent has N vm_object_pip_add() references prior to
1240  *	calling us and will remove references for rtvals[] that are
1241  *	not set to VM_PAGER_PEND.  We need to remove the rest on I/O
1242  *	completion.
1243  *
1244  *	The parent has soft-busy'd the pages it passes us and will unbusy
1245  *	those whos rtvals[] entry is not set to VM_PAGER_PEND on return.
1246  *	We need to unbusy the rest on I/O completion.
1247  */
1248 
1249 void
1250 swap_pager_putpages(object, m, count, sync, rtvals)
1251 	vm_object_t object;
1252 	vm_page_t *m;
1253 	int count;
1254 	boolean_t sync;
1255 	int *rtvals;
1256 {
1257 	int i;
1258 	int n = 0;
1259 
1260 	GIANT_REQUIRED;
1261 	if (count && m[0]->object != object) {
1262 		panic("swap_pager_getpages: object mismatch %p/%p",
1263 		    object,
1264 		    m[0]->object
1265 		);
1266 	}
1267 	/*
1268 	 * Step 1
1269 	 *
1270 	 * Turn object into OBJT_SWAP
1271 	 * check for bogus sysops
1272 	 * force sync if not pageout process
1273 	 */
1274 
1275 	if (object->type != OBJT_SWAP)
1276 		swp_pager_meta_build(object, 0, SWAPBLK_NONE);
1277 
1278 	if (curproc != pageproc)
1279 		sync = TRUE;
1280 
1281 	/*
1282 	 * Step 2
1283 	 *
1284 	 * Update nsw parameters from swap_async_max sysctl values.
1285 	 * Do not let the sysop crash the machine with bogus numbers.
1286 	 */
1287 
1288 	mtx_lock(&pbuf_mtx);
1289 	if (swap_async_max != nsw_wcount_async_max) {
1290 		int n;
1291 		int s;
1292 
1293 		/*
1294 		 * limit range
1295 		 */
1296 		if ((n = swap_async_max) > nswbuf / 2)
1297 			n = nswbuf / 2;
1298 		if (n < 1)
1299 			n = 1;
1300 		swap_async_max = n;
1301 
1302 		/*
1303 		 * Adjust difference ( if possible ).  If the current async
1304 		 * count is too low, we may not be able to make the adjustment
1305 		 * at this time.
1306 		 */
1307 		s = splvm();
1308 		n -= nsw_wcount_async_max;
1309 		if (nsw_wcount_async + n >= 0) {
1310 			nsw_wcount_async += n;
1311 			nsw_wcount_async_max += n;
1312 			wakeup(&nsw_wcount_async);
1313 		}
1314 		splx(s);
1315 	}
1316 	mtx_unlock(&pbuf_mtx);
1317 
1318 	/*
1319 	 * Step 3
1320 	 *
1321 	 * Assign swap blocks and issue I/O.  We reallocate swap on the fly.
1322 	 * The page is left dirty until the pageout operation completes
1323 	 * successfully.
1324 	 */
1325 
1326 	for (i = 0; i < count; i += n) {
1327 		int s;
1328 		int j;
1329 		struct buf *bp;
1330 		daddr_t blk;
1331 
1332 		/*
1333 		 * Maximum I/O size is limited by a number of factors.
1334 		 */
1335 
1336 		n = min(BLIST_MAX_ALLOC, count - i);
1337 		n = min(n, nsw_cluster_max);
1338 
1339 		s = splvm();
1340 
1341 		/*
1342 		 * Get biggest block of swap we can.  If we fail, fall
1343 		 * back and try to allocate a smaller block.  Don't go
1344 		 * overboard trying to allocate space if it would overly
1345 		 * fragment swap.
1346 		 */
1347 		while (
1348 		    (blk = swp_pager_getswapspace(n)) == SWAPBLK_NONE &&
1349 		    n > 4
1350 		) {
1351 			n >>= 1;
1352 		}
1353 		if (blk == SWAPBLK_NONE) {
1354 			for (j = 0; j < n; ++j)
1355 				rtvals[i+j] = VM_PAGER_FAIL;
1356 			splx(s);
1357 			continue;
1358 		}
1359 
1360 		/*
1361 		 * The I/O we are constructing cannot cross a physical
1362 		 * disk boundry in the swap stripe.  Note: we are still
1363 		 * at splvm().
1364 		 */
1365 		if ((blk ^ (blk + n)) & dmmax_mask) {
1366 			j = ((blk + dmmax) & dmmax_mask) - blk;
1367 			swp_pager_freeswapspace(blk + j, n - j);
1368 			n = j;
1369 		}
1370 
1371 		/*
1372 		 * All I/O parameters have been satisfied, build the I/O
1373 		 * request and assign the swap space.
1374 		 *
1375 		 * NOTE: B_PAGING is set by pbgetvp()
1376 		 */
1377 
1378 		if (sync == TRUE) {
1379 			bp = getpbuf(&nsw_wcount_sync);
1380 		} else {
1381 			bp = getpbuf(&nsw_wcount_async);
1382 			bp->b_flags = B_ASYNC;
1383 		}
1384 		bp->b_iocmd = BIO_WRITE;
1385 		bp->b_spc = NULL;	/* not used, but NULL-out anyway */
1386 
1387 		pmap_qenter((vm_offset_t)bp->b_data, &m[i], n);
1388 
1389 		bp->b_rcred = crhold(proc0.p_ucred);
1390 		bp->b_wcred = crhold(proc0.p_ucred);
1391 		bp->b_bcount = PAGE_SIZE * n;
1392 		bp->b_bufsize = PAGE_SIZE * n;
1393 		bp->b_blkno = blk;
1394 
1395 		pbgetvp(swapdev_vp, bp);
1396 
1397 		for (j = 0; j < n; ++j) {
1398 			vm_page_t mreq = m[i+j];
1399 
1400 			swp_pager_meta_build(
1401 			    mreq->object,
1402 			    mreq->pindex,
1403 			    blk + j
1404 			);
1405 			vm_page_dirty(mreq);
1406 			rtvals[i+j] = VM_PAGER_OK;
1407 
1408 			vm_page_flag_set(mreq, PG_SWAPINPROG);
1409 			bp->b_pages[j] = mreq;
1410 		}
1411 		bp->b_npages = n;
1412 		/*
1413 		 * Must set dirty range for NFS to work.
1414 		 */
1415 		bp->b_dirtyoff = 0;
1416 		bp->b_dirtyend = bp->b_bcount;
1417 
1418 		cnt.v_swapout++;
1419 		cnt.v_swappgsout += bp->b_npages;
1420 		swapdev_vp->v_numoutput++;
1421 
1422 		splx(s);
1423 
1424 		/*
1425 		 * asynchronous
1426 		 *
1427 		 * NOTE: b_blkno is destroyed by the call to VOP_STRATEGY
1428 		 */
1429 
1430 		if (sync == FALSE) {
1431 			bp->b_iodone = swp_pager_async_iodone;
1432 			BUF_KERNPROC(bp);
1433 			BUF_STRATEGY(bp);
1434 
1435 			for (j = 0; j < n; ++j)
1436 				rtvals[i+j] = VM_PAGER_PEND;
1437 			/* restart outter loop */
1438 			continue;
1439 		}
1440 
1441 		/*
1442 		 * synchronous
1443 		 *
1444 		 * NOTE: b_blkno is destroyed by the call to VOP_STRATEGY
1445 		 */
1446 
1447 		bp->b_iodone = swp_pager_sync_iodone;
1448 		BUF_STRATEGY(bp);
1449 
1450 		/*
1451 		 * Wait for the sync I/O to complete, then update rtvals.
1452 		 * We just set the rtvals[] to VM_PAGER_PEND so we can call
1453 		 * our async completion routine at the end, thus avoiding a
1454 		 * double-free.
1455 		 */
1456 		s = splbio();
1457 
1458 		while ((bp->b_flags & B_DONE) == 0) {
1459 			tsleep(bp, PVM, "swwrt", 0);
1460 		}
1461 
1462 		for (j = 0; j < n; ++j)
1463 			rtvals[i+j] = VM_PAGER_PEND;
1464 
1465 		/*
1466 		 * Now that we are through with the bp, we can call the
1467 		 * normal async completion, which frees everything up.
1468 		 */
1469 
1470 		swp_pager_async_iodone(bp);
1471 		splx(s);
1472 	}
1473 }
1474 
1475 /*
1476  *	swap_pager_sync_iodone:
1477  *
1478  *	Completion routine for synchronous reads and writes from/to swap.
1479  *	We just mark the bp is complete and wake up anyone waiting on it.
1480  *
1481  *	This routine may not block.  This routine is called at splbio() or better.
1482  */
1483 
1484 static void
1485 swp_pager_sync_iodone(bp)
1486 	struct buf *bp;
1487 {
1488 	bp->b_flags |= B_DONE;
1489 	bp->b_flags &= ~B_ASYNC;
1490 	wakeup(bp);
1491 }
1492 
1493 /*
1494  *	swp_pager_async_iodone:
1495  *
1496  *	Completion routine for asynchronous reads and writes from/to swap.
1497  *	Also called manually by synchronous code to finish up a bp.
1498  *
1499  *	For READ operations, the pages are PG_BUSY'd.  For WRITE operations,
1500  *	the pages are vm_page_t->busy'd.  For READ operations, we PG_BUSY
1501  *	unbusy all pages except the 'main' request page.  For WRITE
1502  *	operations, we vm_page_t->busy'd unbusy all pages ( we can do this
1503  *	because we marked them all VM_PAGER_PEND on return from putpages ).
1504  *
1505  *	This routine may not block.
1506  *	This routine is called at splbio() or better
1507  *
1508  *	We up ourselves to splvm() as required for various vm_page related
1509  *	calls.
1510  */
1511 
1512 static void
1513 swp_pager_async_iodone(bp)
1514 	struct buf *bp;
1515 {
1516 	int s;
1517 	int i;
1518 	vm_object_t object = NULL;
1519 
1520 	GIANT_REQUIRED;
1521 
1522 	bp->b_flags |= B_DONE;
1523 
1524 	/*
1525 	 * report error
1526 	 */
1527 
1528 	if (bp->b_ioflags & BIO_ERROR) {
1529 		printf(
1530 		    "swap_pager: I/O error - %s failed; blkno %ld,"
1531 			"size %ld, error %d\n",
1532 		    ((bp->b_iocmd == BIO_READ) ? "pagein" : "pageout"),
1533 		    (long)bp->b_blkno,
1534 		    (long)bp->b_bcount,
1535 		    bp->b_error
1536 		);
1537 	}
1538 
1539 	/*
1540 	 * set object, raise to splvm().
1541 	 */
1542 
1543 	if (bp->b_npages)
1544 		object = bp->b_pages[0]->object;
1545 	s = splvm();
1546 
1547 	/*
1548 	 * remove the mapping for kernel virtual
1549 	 */
1550 	pmap_qremove((vm_offset_t)bp->b_data, bp->b_npages);
1551 
1552 	/*
1553 	 * cleanup pages.  If an error occurs writing to swap, we are in
1554 	 * very serious trouble.  If it happens to be a disk error, though,
1555 	 * we may be able to recover by reassigning the swap later on.  So
1556 	 * in this case we remove the m->swapblk assignment for the page
1557 	 * but do not free it in the rlist.  The errornous block(s) are thus
1558 	 * never reallocated as swap.  Redirty the page and continue.
1559 	 */
1560 
1561 	for (i = 0; i < bp->b_npages; ++i) {
1562 		vm_page_t m = bp->b_pages[i];
1563 
1564 		vm_page_flag_clear(m, PG_SWAPINPROG);
1565 
1566 		if (bp->b_ioflags & BIO_ERROR) {
1567 			/*
1568 			 * If an error occurs I'd love to throw the swapblk
1569 			 * away without freeing it back to swapspace, so it
1570 			 * can never be used again.  But I can't from an
1571 			 * interrupt.
1572 			 */
1573 
1574 			if (bp->b_iocmd == BIO_READ) {
1575 				/*
1576 				 * When reading, reqpage needs to stay
1577 				 * locked for the parent, but all other
1578 				 * pages can be freed.  We still want to
1579 				 * wakeup the parent waiting on the page,
1580 				 * though.  ( also: pg_reqpage can be -1 and
1581 				 * not match anything ).
1582 				 *
1583 				 * We have to wake specifically requested pages
1584 				 * up too because we cleared PG_SWAPINPROG and
1585 				 * someone may be waiting for that.
1586 				 *
1587 				 * NOTE: for reads, m->dirty will probably
1588 				 * be overridden by the original caller of
1589 				 * getpages so don't play cute tricks here.
1590 				 *
1591 				 * XXX IT IS NOT LEGAL TO FREE THE PAGE HERE
1592 				 * AS THIS MESSES WITH object->memq, and it is
1593 				 * not legal to mess with object->memq from an
1594 				 * interrupt.
1595 				 */
1596 
1597 				m->valid = 0;
1598 				vm_page_flag_clear(m, PG_ZERO);
1599 
1600 				if (i != bp->b_pager.pg_reqpage)
1601 					vm_page_free(m);
1602 				else
1603 					vm_page_flash(m);
1604 				/*
1605 				 * If i == bp->b_pager.pg_reqpage, do not wake
1606 				 * the page up.  The caller needs to.
1607 				 */
1608 			} else {
1609 				/*
1610 				 * If a write error occurs, reactivate page
1611 				 * so it doesn't clog the inactive list,
1612 				 * then finish the I/O.
1613 				 */
1614 				vm_page_dirty(m);
1615 				vm_page_activate(m);
1616 				vm_page_io_finish(m);
1617 			}
1618 		} else if (bp->b_iocmd == BIO_READ) {
1619 			/*
1620 			 * For read success, clear dirty bits.  Nobody should
1621 			 * have this page mapped but don't take any chances,
1622 			 * make sure the pmap modify bits are also cleared.
1623 			 *
1624 			 * NOTE: for reads, m->dirty will probably be
1625 			 * overridden by the original caller of getpages so
1626 			 * we cannot set them in order to free the underlying
1627 			 * swap in a low-swap situation.  I don't think we'd
1628 			 * want to do that anyway, but it was an optimization
1629 			 * that existed in the old swapper for a time before
1630 			 * it got ripped out due to precisely this problem.
1631 			 *
1632 			 * clear PG_ZERO in page.
1633 			 *
1634 			 * If not the requested page then deactivate it.
1635 			 *
1636 			 * Note that the requested page, reqpage, is left
1637 			 * busied, but we still have to wake it up.  The
1638 			 * other pages are released (unbusied) by
1639 			 * vm_page_wakeup().  We do not set reqpage's
1640 			 * valid bits here, it is up to the caller.
1641 			 */
1642 
1643 			pmap_clear_modify(m);
1644 			m->valid = VM_PAGE_BITS_ALL;
1645 			vm_page_undirty(m);
1646 			vm_page_flag_clear(m, PG_ZERO);
1647 
1648 			/*
1649 			 * We have to wake specifically requested pages
1650 			 * up too because we cleared PG_SWAPINPROG and
1651 			 * could be waiting for it in getpages.  However,
1652 			 * be sure to not unbusy getpages specifically
1653 			 * requested page - getpages expects it to be
1654 			 * left busy.
1655 			 */
1656 			if (i != bp->b_pager.pg_reqpage) {
1657 				vm_page_deactivate(m);
1658 				vm_page_wakeup(m);
1659 			} else {
1660 				vm_page_flash(m);
1661 			}
1662 		} else {
1663 			/*
1664 			 * For write success, clear the modify and dirty
1665 			 * status, then finish the I/O ( which decrements the
1666 			 * busy count and possibly wakes waiter's up ).
1667 			 */
1668 			pmap_clear_modify(m);
1669 			vm_page_undirty(m);
1670 			vm_page_io_finish(m);
1671 			if (!vm_page_count_severe() || !vm_page_try_to_cache(m))
1672 				vm_page_protect(m, VM_PROT_READ);
1673 		}
1674 	}
1675 
1676 	/*
1677 	 * adjust pip.  NOTE: the original parent may still have its own
1678 	 * pip refs on the object.
1679 	 */
1680 
1681 	if (object)
1682 		vm_object_pip_wakeupn(object, bp->b_npages);
1683 
1684 	/*
1685 	 * release the physical I/O buffer
1686 	 */
1687 
1688 	relpbuf(
1689 	    bp,
1690 	    ((bp->b_iocmd == BIO_READ) ? &nsw_rcount :
1691 		((bp->b_flags & B_ASYNC) ?
1692 		    &nsw_wcount_async :
1693 		    &nsw_wcount_sync
1694 		)
1695 	    )
1696 	);
1697 	splx(s);
1698 }
1699 
1700 /************************************************************************
1701  *				SWAP META DATA 				*
1702  ************************************************************************
1703  *
1704  *	These routines manipulate the swap metadata stored in the
1705  *	OBJT_SWAP object.  All swp_*() routines must be called at
1706  *	splvm() because swap can be freed up by the low level vm_page
1707  *	code which might be called from interrupts beyond what splbio() covers.
1708  *
1709  *	Swap metadata is implemented with a global hash and not directly
1710  *	linked into the object.  Instead the object simply contains
1711  *	appropriate tracking counters.
1712  */
1713 
1714 /*
1715  * SWP_PAGER_HASH() -	hash swap meta data
1716  *
1717  *	This is an inline helper function which hashes the swapblk given
1718  *	the object and page index.  It returns a pointer to a pointer
1719  *	to the object, or a pointer to a NULL pointer if it could not
1720  *	find a swapblk.
1721  *
1722  *	This routine must be called at splvm().
1723  */
1724 
1725 static __inline struct swblock **
1726 swp_pager_hash(vm_object_t object, vm_pindex_t index)
1727 {
1728 	struct swblock **pswap;
1729 	struct swblock *swap;
1730 
1731 	index &= ~SWAP_META_MASK;
1732 	pswap = &swhash[(index ^ (int)(intptr_t)object) & swhash_mask];
1733 
1734 	while ((swap = *pswap) != NULL) {
1735 		if (swap->swb_object == object &&
1736 		    swap->swb_index == index
1737 		) {
1738 			break;
1739 		}
1740 		pswap = &swap->swb_hnext;
1741 	}
1742 	return(pswap);
1743 }
1744 
1745 /*
1746  * SWP_PAGER_META_BUILD() -	add swap block to swap meta data for object
1747  *
1748  *	We first convert the object to a swap object if it is a default
1749  *	object.
1750  *
1751  *	The specified swapblk is added to the object's swap metadata.  If
1752  *	the swapblk is not valid, it is freed instead.  Any previously
1753  *	assigned swapblk is freed.
1754  *
1755  *	This routine must be called at splvm(), except when used to convert
1756  *	an OBJT_DEFAULT object into an OBJT_SWAP object.
1757  */
1758 
1759 static void
1760 swp_pager_meta_build(
1761 	vm_object_t object,
1762 	vm_pindex_t index,
1763 	daddr_t swapblk
1764 ) {
1765 	struct swblock *swap;
1766 	struct swblock **pswap;
1767 
1768 	GIANT_REQUIRED;
1769 	/*
1770 	 * Convert default object to swap object if necessary
1771 	 */
1772 
1773 	if (object->type != OBJT_SWAP) {
1774 		object->type = OBJT_SWAP;
1775 		object->un_pager.swp.swp_bcount = 0;
1776 
1777 		mtx_lock(&sw_alloc_mtx);
1778 		if (object->handle != NULL) {
1779 			TAILQ_INSERT_TAIL(
1780 			    NOBJLIST(object->handle),
1781 			    object,
1782 			    pager_object_list
1783 			);
1784 		} else {
1785 			TAILQ_INSERT_TAIL(
1786 			    &swap_pager_un_object_list,
1787 			    object,
1788 			    pager_object_list
1789 			);
1790 		}
1791 		mtx_unlock(&sw_alloc_mtx);
1792 	}
1793 
1794 	/*
1795 	 * Locate hash entry.  If not found create, but if we aren't adding
1796 	 * anything just return.  If we run out of space in the map we wait
1797 	 * and, since the hash table may have changed, retry.
1798 	 */
1799 
1800 retry:
1801 	pswap = swp_pager_hash(object, index);
1802 
1803 	if ((swap = *pswap) == NULL) {
1804 		int i;
1805 
1806 		if (swapblk == SWAPBLK_NONE)
1807 			return;
1808 
1809 		swap = *pswap = zalloc(swap_zone);
1810 		if (swap == NULL) {
1811 			VM_WAIT;
1812 			goto retry;
1813 		}
1814 		swap->swb_hnext = NULL;
1815 		swap->swb_object = object;
1816 		swap->swb_index = index & ~SWAP_META_MASK;
1817 		swap->swb_count = 0;
1818 
1819 		++object->un_pager.swp.swp_bcount;
1820 
1821 		for (i = 0; i < SWAP_META_PAGES; ++i)
1822 			swap->swb_pages[i] = SWAPBLK_NONE;
1823 	}
1824 
1825 	/*
1826 	 * Delete prior contents of metadata
1827 	 */
1828 
1829 	index &= SWAP_META_MASK;
1830 
1831 	if (swap->swb_pages[index] != SWAPBLK_NONE) {
1832 		swp_pager_freeswapspace(swap->swb_pages[index], 1);
1833 		--swap->swb_count;
1834 	}
1835 
1836 	/*
1837 	 * Enter block into metadata
1838 	 */
1839 
1840 	swap->swb_pages[index] = swapblk;
1841 	if (swapblk != SWAPBLK_NONE)
1842 		++swap->swb_count;
1843 }
1844 
1845 /*
1846  * SWP_PAGER_META_FREE() - free a range of blocks in the object's swap metadata
1847  *
1848  *	The requested range of blocks is freed, with any associated swap
1849  *	returned to the swap bitmap.
1850  *
1851  *	This routine will free swap metadata structures as they are cleaned
1852  *	out.  This routine does *NOT* operate on swap metadata associated
1853  *	with resident pages.
1854  *
1855  *	This routine must be called at splvm()
1856  */
1857 
1858 static void
1859 swp_pager_meta_free(vm_object_t object, vm_pindex_t index, daddr_t count)
1860 {
1861 	GIANT_REQUIRED;
1862 
1863 	if (object->type != OBJT_SWAP)
1864 		return;
1865 
1866 	while (count > 0) {
1867 		struct swblock **pswap;
1868 		struct swblock *swap;
1869 
1870 		pswap = swp_pager_hash(object, index);
1871 
1872 		if ((swap = *pswap) != NULL) {
1873 			daddr_t v = swap->swb_pages[index & SWAP_META_MASK];
1874 
1875 			if (v != SWAPBLK_NONE) {
1876 				swp_pager_freeswapspace(v, 1);
1877 				swap->swb_pages[index & SWAP_META_MASK] =
1878 					SWAPBLK_NONE;
1879 				if (--swap->swb_count == 0) {
1880 					*pswap = swap->swb_hnext;
1881 					zfree(swap_zone, swap);
1882 					--object->un_pager.swp.swp_bcount;
1883 				}
1884 			}
1885 			--count;
1886 			++index;
1887 		} else {
1888 			int n = SWAP_META_PAGES - (index & SWAP_META_MASK);
1889 			count -= n;
1890 			index += n;
1891 		}
1892 	}
1893 }
1894 
1895 /*
1896  * SWP_PAGER_META_FREE_ALL() - destroy all swap metadata associated with object
1897  *
1898  *	This routine locates and destroys all swap metadata associated with
1899  *	an object.
1900  *
1901  *	This routine must be called at splvm()
1902  */
1903 
1904 static void
1905 swp_pager_meta_free_all(vm_object_t object)
1906 {
1907 	daddr_t index = 0;
1908 
1909 	GIANT_REQUIRED;
1910 
1911 	if (object->type != OBJT_SWAP)
1912 		return;
1913 
1914 	while (object->un_pager.swp.swp_bcount) {
1915 		struct swblock **pswap;
1916 		struct swblock *swap;
1917 
1918 		pswap = swp_pager_hash(object, index);
1919 		if ((swap = *pswap) != NULL) {
1920 			int i;
1921 
1922 			for (i = 0; i < SWAP_META_PAGES; ++i) {
1923 				daddr_t v = swap->swb_pages[i];
1924 				if (v != SWAPBLK_NONE) {
1925 					--swap->swb_count;
1926 					swp_pager_freeswapspace(v, 1);
1927 				}
1928 			}
1929 			if (swap->swb_count != 0)
1930 				panic("swap_pager_meta_free_all: swb_count != 0");
1931 			*pswap = swap->swb_hnext;
1932 			zfree(swap_zone, swap);
1933 			--object->un_pager.swp.swp_bcount;
1934 		}
1935 		index += SWAP_META_PAGES;
1936 		if (index > 0x20000000)
1937 			panic("swp_pager_meta_free_all: failed to locate all swap meta blocks");
1938 	}
1939 }
1940 
1941 /*
1942  * SWP_PAGER_METACTL() -  misc control of swap and vm_page_t meta data.
1943  *
1944  *	This routine is capable of looking up, popping, or freeing
1945  *	swapblk assignments in the swap meta data or in the vm_page_t.
1946  *	The routine typically returns the swapblk being looked-up, or popped,
1947  *	or SWAPBLK_NONE if the block was freed, or SWAPBLK_NONE if the block
1948  *	was invalid.  This routine will automatically free any invalid
1949  *	meta-data swapblks.
1950  *
1951  *	It is not possible to store invalid swapblks in the swap meta data
1952  *	(other then a literal 'SWAPBLK_NONE'), so we don't bother checking.
1953  *
1954  *	When acting on a busy resident page and paging is in progress, we
1955  *	have to wait until paging is complete but otherwise can act on the
1956  *	busy page.
1957  *
1958  *	This routine must be called at splvm().
1959  *
1960  *	SWM_FREE	remove and free swap block from metadata
1961  *	SWM_POP		remove from meta data but do not free.. pop it out
1962  */
1963 
1964 static daddr_t
1965 swp_pager_meta_ctl(
1966 	vm_object_t object,
1967 	vm_pindex_t index,
1968 	int flags
1969 ) {
1970 	struct swblock **pswap;
1971 	struct swblock *swap;
1972 	daddr_t r1;
1973 
1974 	GIANT_REQUIRED;
1975 	/*
1976 	 * The meta data only exists of the object is OBJT_SWAP
1977 	 * and even then might not be allocated yet.
1978 	 */
1979 
1980 	if (object->type != OBJT_SWAP)
1981 		return(SWAPBLK_NONE);
1982 
1983 	r1 = SWAPBLK_NONE;
1984 	pswap = swp_pager_hash(object, index);
1985 
1986 	if ((swap = *pswap) != NULL) {
1987 		index &= SWAP_META_MASK;
1988 		r1 = swap->swb_pages[index];
1989 
1990 		if (r1 != SWAPBLK_NONE) {
1991 			if (flags & SWM_FREE) {
1992 				swp_pager_freeswapspace(r1, 1);
1993 				r1 = SWAPBLK_NONE;
1994 			}
1995 			if (flags & (SWM_FREE|SWM_POP)) {
1996 				swap->swb_pages[index] = SWAPBLK_NONE;
1997 				if (--swap->swb_count == 0) {
1998 					*pswap = swap->swb_hnext;
1999 					zfree(swap_zone, swap);
2000 					--object->un_pager.swp.swp_bcount;
2001 				}
2002 			}
2003 		}
2004 	}
2005 	return(r1);
2006 }
2007 
2008 /********************************************************
2009  *		CHAINING FUNCTIONS			*
2010  ********************************************************
2011  *
2012  *	These functions support recursion of I/O operations
2013  *	on bp's, typically by chaining one or more 'child' bp's
2014  *	to the parent.  Synchronous, asynchronous, and semi-synchronous
2015  *	chaining is possible.
2016  */
2017 
2018 /*
2019  *	vm_pager_chain_iodone:
2020  *
2021  *	io completion routine for child bp.  Currently we fudge a bit
2022  *	on dealing with b_resid.   Since users of these routines may issue
2023  *	multiple children simultaneously, sequencing of the error can be lost.
2024  */
2025 
2026 static void
2027 vm_pager_chain_iodone(struct buf *nbp)
2028 {
2029 	struct bio *bp;
2030 	u_int *count;
2031 
2032 	bp = nbp->b_caller1;
2033 	count = (u_int *)&(bp->bio_driver1);
2034 	if (bp != NULL) {
2035 		if (nbp->b_ioflags & BIO_ERROR) {
2036 			bp->bio_flags |= BIO_ERROR;
2037 			bp->bio_error = nbp->b_error;
2038 		} else if (nbp->b_resid != 0) {
2039 			bp->bio_flags |= BIO_ERROR;
2040 			bp->bio_error = EINVAL;
2041 		} else {
2042 			bp->bio_resid -= nbp->b_bcount;
2043 		}
2044 		nbp->b_caller1 = NULL;
2045 		--(*count);
2046 		if (bp->bio_flags & BIO_FLAG1) {
2047 			bp->bio_flags &= ~BIO_FLAG1;
2048 			wakeup(bp);
2049 		}
2050 	}
2051 	nbp->b_flags |= B_DONE;
2052 	nbp->b_flags &= ~B_ASYNC;
2053 	relpbuf(nbp, NULL);
2054 }
2055 
2056 /*
2057  *	getchainbuf:
2058  *
2059  *	Obtain a physical buffer and chain it to its parent buffer.  When
2060  *	I/O completes, the parent buffer will be B_SIGNAL'd.  Errors are
2061  *	automatically propagated to the parent
2062  */
2063 
2064 struct buf *
2065 getchainbuf(struct bio *bp, struct vnode *vp, int flags)
2066 {
2067 	struct buf *nbp;
2068 	u_int *count;
2069 
2070 	GIANT_REQUIRED;
2071 	nbp = getpbuf(NULL);
2072 	count = (u_int *)&(bp->bio_driver1);
2073 
2074 	nbp->b_caller1 = bp;
2075 	++(*count);
2076 
2077 	if (*count > 4)
2078 		waitchainbuf(bp, 4, 0);
2079 
2080 	nbp->b_iocmd = bp->bio_cmd;
2081 	nbp->b_ioflags = bp->bio_flags & BIO_ORDERED;
2082 	nbp->b_flags = flags;
2083 	nbp->b_rcred = crhold(proc0.p_ucred);
2084 	nbp->b_wcred = crhold(proc0.p_ucred);
2085 	nbp->b_iodone = vm_pager_chain_iodone;
2086 
2087 	if (vp)
2088 		pbgetvp(vp, nbp);
2089 	return(nbp);
2090 }
2091 
2092 void
2093 flushchainbuf(struct buf *nbp)
2094 {
2095 	GIANT_REQUIRED;
2096 	if (nbp->b_bcount) {
2097 		nbp->b_bufsize = nbp->b_bcount;
2098 		if (nbp->b_iocmd == BIO_WRITE)
2099 			nbp->b_dirtyend = nbp->b_bcount;
2100 		BUF_KERNPROC(nbp);
2101 		BUF_STRATEGY(nbp);
2102 	} else {
2103 		bufdone(nbp);
2104 	}
2105 }
2106 
2107 static void
2108 waitchainbuf(struct bio *bp, int limit, int done)
2109 {
2110  	int s;
2111 	u_int *count;
2112 
2113 	GIANT_REQUIRED;
2114 	count = (u_int *)&(bp->bio_driver1);
2115 	s = splbio();
2116 	while (*count > limit) {
2117 		bp->bio_flags |= BIO_FLAG1;
2118 		tsleep(bp, PRIBIO + 4, "bpchain", 0);
2119 	}
2120 	if (done) {
2121 		if (bp->bio_resid != 0 && !(bp->bio_flags & BIO_ERROR)) {
2122 			bp->bio_flags |= BIO_ERROR;
2123 			bp->bio_error = EINVAL;
2124 		}
2125 		biodone(bp);
2126 	}
2127 	splx(s);
2128 }
2129 
2130