1df8bae1dSRodney W. Grimes /* 21c7c3c6aSMatthew Dillon * Copyright (c) 1998 Matthew Dillon, 326f9a767SRodney W. Grimes * Copyright (c) 1994 John S. Dyson 4df8bae1dSRodney W. Grimes * Copyright (c) 1990 University of Utah. 5df8bae1dSRodney W. Grimes * Copyright (c) 1991, 1993 6df8bae1dSRodney W. Grimes * The Regents of the University of California. All rights reserved. 7df8bae1dSRodney W. Grimes * 8df8bae1dSRodney W. Grimes * This code is derived from software contributed to Berkeley by 9df8bae1dSRodney W. Grimes * the Systems Programming Group of the University of Utah Computer 10df8bae1dSRodney W. Grimes * Science Department. 11df8bae1dSRodney W. Grimes * 12df8bae1dSRodney W. Grimes * Redistribution and use in source and binary forms, with or without 13df8bae1dSRodney W. Grimes * modification, are permitted provided that the following conditions 14df8bae1dSRodney W. Grimes * are met: 15df8bae1dSRodney W. Grimes * 1. Redistributions of source code must retain the above copyright 16df8bae1dSRodney W. Grimes * notice, this list of conditions and the following disclaimer. 17df8bae1dSRodney W. Grimes * 2. Redistributions in binary form must reproduce the above copyright 18df8bae1dSRodney W. Grimes * notice, this list of conditions and the following disclaimer in the 19df8bae1dSRodney W. Grimes * documentation and/or other materials provided with the distribution. 20df8bae1dSRodney W. Grimes * 3. All advertising materials mentioning features or use of this software 215929bcfaSPhilippe Charnier * must display the following acknowledgement: 22df8bae1dSRodney W. Grimes * This product includes software developed by the University of 23df8bae1dSRodney W. Grimes * California, Berkeley and its contributors. 24df8bae1dSRodney W. Grimes * 4. Neither the name of the University nor the names of its contributors 25df8bae1dSRodney W. Grimes * may be used to endorse or promote products derived from this software 26df8bae1dSRodney W. Grimes * without specific prior written permission. 27df8bae1dSRodney W. Grimes * 28df8bae1dSRodney W. Grimes * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 29df8bae1dSRodney W. Grimes * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 30df8bae1dSRodney W. Grimes * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 31df8bae1dSRodney W. Grimes * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 32df8bae1dSRodney W. Grimes * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 33df8bae1dSRodney W. Grimes * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 34df8bae1dSRodney W. Grimes * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 35df8bae1dSRodney W. Grimes * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 36df8bae1dSRodney W. Grimes * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 37df8bae1dSRodney W. Grimes * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 38df8bae1dSRodney W. Grimes * SUCH DAMAGE. 39df8bae1dSRodney W. Grimes * 401c7c3c6aSMatthew Dillon * New Swap System 411c7c3c6aSMatthew Dillon * Matthew Dillon 421c7c3c6aSMatthew Dillon * 431c7c3c6aSMatthew Dillon * Radix Bitmap 'blists'. 441c7c3c6aSMatthew Dillon * 451c7c3c6aSMatthew Dillon * - The new swapper uses the new radix bitmap code. This should scale 461c7c3c6aSMatthew Dillon * to arbitrarily small or arbitrarily large swap spaces and an almost 471c7c3c6aSMatthew Dillon * arbitrary degree of fragmentation. 481c7c3c6aSMatthew Dillon * 491c7c3c6aSMatthew Dillon * Features: 501c7c3c6aSMatthew Dillon * 511c7c3c6aSMatthew Dillon * - on the fly reallocation of swap during putpages. The new system 521c7c3c6aSMatthew Dillon * does not try to keep previously allocated swap blocks for dirty 531c7c3c6aSMatthew Dillon * pages. 541c7c3c6aSMatthew Dillon * 551c7c3c6aSMatthew Dillon * - on the fly deallocation of swap 561c7c3c6aSMatthew Dillon * 571c7c3c6aSMatthew Dillon * - No more garbage collection required. Unnecessarily allocated swap 581c7c3c6aSMatthew Dillon * blocks only exist for dirty vm_page_t's now and these are already 591c7c3c6aSMatthew Dillon * cycled (in a high-load system) by the pager. We also do on-the-fly 601c7c3c6aSMatthew Dillon * removal of invalidated swap blocks when a page is destroyed 611c7c3c6aSMatthew Dillon * or renamed. 621c7c3c6aSMatthew Dillon * 63df8bae1dSRodney W. Grimes * from: Utah $Hdr: swap_pager.c 1.4 91/04/30$ 64df8bae1dSRodney W. Grimes * 65df8bae1dSRodney W. Grimes * @(#)swap_pager.c 8.9 (Berkeley) 3/21/94 661c7c3c6aSMatthew Dillon * 67c3aac50fSPeter Wemm * $FreeBSD$ 68df8bae1dSRodney W. Grimes */ 69df8bae1dSRodney W. Grimes 70df8bae1dSRodney W. Grimes #include <sys/param.h> 71df8bae1dSRodney W. Grimes #include <sys/systm.h> 72af647ddeSBruce Evans #include <sys/conf.h> 7364abb5a5SDavid Greenman #include <sys/kernel.h> 74df8bae1dSRodney W. Grimes #include <sys/proc.h> 759626b608SPoul-Henning Kamp #include <sys/bio.h> 76df8bae1dSRodney W. Grimes #include <sys/buf.h> 77df8bae1dSRodney W. Grimes #include <sys/vnode.h> 78df8bae1dSRodney W. Grimes #include <sys/malloc.h> 79efeaf95aSDavid Greenman #include <sys/vmmeter.h> 80327f4e83SMatthew Dillon #include <sys/sysctl.h> 811c7c3c6aSMatthew Dillon #include <sys/blist.h> 821c7c3c6aSMatthew Dillon #include <sys/lock.h> 83df8bae1dSRodney W. Grimes 84e47ed70bSJohn Dyson #ifndef MAX_PAGEOUT_CLUSTER 85ffc82b0aSJohn Dyson #define MAX_PAGEOUT_CLUSTER 16 86e47ed70bSJohn Dyson #endif 87e47ed70bSJohn Dyson 88e47ed70bSJohn Dyson #define SWB_NPAGES MAX_PAGEOUT_CLUSTER 89e47ed70bSJohn Dyson 901c7c3c6aSMatthew Dillon #include "opt_swap.h" 91df8bae1dSRodney W. Grimes #include <vm/vm.h> 92efeaf95aSDavid Greenman #include <vm/vm_object.h> 93df8bae1dSRodney W. Grimes #include <vm/vm_page.h> 94efeaf95aSDavid Greenman #include <vm/vm_pager.h> 95df8bae1dSRodney W. Grimes #include <vm/vm_pageout.h> 96df8bae1dSRodney W. Grimes #include <vm/swap_pager.h> 97efeaf95aSDavid Greenman #include <vm/vm_extern.h> 981c7c3c6aSMatthew Dillon #include <vm/vm_zone.h> 99df8bae1dSRodney W. Grimes 1001c7c3c6aSMatthew Dillon #define SWM_FREE 0x02 /* free, period */ 1011c7c3c6aSMatthew Dillon #define SWM_POP 0x04 /* pop out */ 10226f9a767SRodney W. Grimes 10324a1cce3SDavid Greenman /* 1041c7c3c6aSMatthew Dillon * vm_swap_size is in page-sized chunks now. It was DEV_BSIZE'd chunks 1051c7c3c6aSMatthew Dillon * in the old system. 10624a1cce3SDavid Greenman */ 1071c7c3c6aSMatthew Dillon 1081c7c3c6aSMatthew Dillon extern int vm_swap_size; /* number of free swap blocks, in pages */ 1091c7c3c6aSMatthew Dillon 11020d3034fSMatthew Dillon int swap_pager_full; /* swap space exhaustion (task killing) */ 11120d3034fSMatthew Dillon static int swap_pager_almost_full; /* swap space exhaustion (w/ hysteresis)*/ 1121c7c3c6aSMatthew Dillon static int nsw_rcount; /* free read buffers */ 113327f4e83SMatthew Dillon static int nsw_wcount_sync; /* limit write buffers / synchronous */ 114327f4e83SMatthew Dillon static int nsw_wcount_async; /* limit write buffers / asynchronous */ 115327f4e83SMatthew Dillon static int nsw_wcount_async_max;/* assigned maximum */ 116327f4e83SMatthew Dillon static int nsw_cluster_max; /* maximum VOP I/O allowed */ 1171c7c3c6aSMatthew Dillon static int sw_alloc_interlock; /* swap pager allocation interlock */ 1181c7c3c6aSMatthew Dillon 1191c7c3c6aSMatthew Dillon struct blist *swapblist; 1201c7c3c6aSMatthew Dillon static struct swblock **swhash; 1211c7c3c6aSMatthew Dillon static int swhash_mask; 122327f4e83SMatthew Dillon static int swap_async_max = 4; /* maximum in-progress async I/O's */ 123327f4e83SMatthew Dillon 124ea3aecf5SPeter Wemm extern struct vnode *swapdev_vp; /* from vm_swap.c */ 12524e7ab7cSPoul-Henning Kamp 126327f4e83SMatthew Dillon SYSCTL_INT(_vm, OID_AUTO, swap_async_max, 127327f4e83SMatthew Dillon CTLFLAG_RW, &swap_async_max, 0, "Maximum running async swap ops"); 1281c7c3c6aSMatthew Dillon 1291c7c3c6aSMatthew Dillon /* 1301c7c3c6aSMatthew Dillon * "named" and "unnamed" anon region objects. Try to reduce the overhead 1311c7c3c6aSMatthew Dillon * of searching a named list by hashing it just a little. 1321c7c3c6aSMatthew Dillon */ 1331c7c3c6aSMatthew Dillon 1341c7c3c6aSMatthew Dillon #define NOBJLISTS 8 1351c7c3c6aSMatthew Dillon 1361c7c3c6aSMatthew Dillon #define NOBJLIST(handle) \ 137af647ddeSBruce Evans (&swap_pager_object_list[((int)(intptr_t)handle >> 4) & (NOBJLISTS-1)]) 1381c7c3c6aSMatthew Dillon 1391c7c3c6aSMatthew Dillon static struct pagerlst swap_pager_object_list[NOBJLISTS]; 1401c7c3c6aSMatthew Dillon struct pagerlst swap_pager_un_object_list; 1411c7c3c6aSMatthew Dillon vm_zone_t swap_zone; 1421c7c3c6aSMatthew Dillon 1431c7c3c6aSMatthew Dillon /* 1441c7c3c6aSMatthew Dillon * pagerops for OBJT_SWAP - "swap pager". Some ops are also global procedure 1451c7c3c6aSMatthew Dillon * calls hooked from other parts of the VM system and do not appear here. 1461c7c3c6aSMatthew Dillon * (see vm/swap_pager.h). 1471c7c3c6aSMatthew Dillon */ 1481c7c3c6aSMatthew Dillon 149ff98689dSBruce Evans static vm_object_t 1506cde7a16SDavid Greenman swap_pager_alloc __P((void *handle, vm_ooffset_t size, 151a316d390SJohn Dyson vm_prot_t prot, vm_ooffset_t offset)); 152ff98689dSBruce Evans static void swap_pager_dealloc __P((vm_object_t object)); 153f708ef1bSPoul-Henning Kamp static int swap_pager_getpages __P((vm_object_t, vm_page_t *, int, int)); 154ff98689dSBruce Evans static void swap_pager_init __P((void)); 1551c7c3c6aSMatthew Dillon static void swap_pager_unswapped __P((vm_page_t)); 1560b441832SPoul-Henning Kamp static void swap_pager_strategy __P((vm_object_t, struct bio *)); 157f708ef1bSPoul-Henning Kamp 158df8bae1dSRodney W. Grimes struct pagerops swappagerops = { 1591c7c3c6aSMatthew Dillon swap_pager_init, /* early system initialization of pager */ 1601c7c3c6aSMatthew Dillon swap_pager_alloc, /* allocate an OBJT_SWAP object */ 1611c7c3c6aSMatthew Dillon swap_pager_dealloc, /* deallocate an OBJT_SWAP object */ 1621c7c3c6aSMatthew Dillon swap_pager_getpages, /* pagein */ 1631c7c3c6aSMatthew Dillon swap_pager_putpages, /* pageout */ 1641c7c3c6aSMatthew Dillon swap_pager_haspage, /* get backing store status for page */ 165a5296b05SJulian Elischer swap_pager_unswapped, /* remove swap related to page */ 166a5296b05SJulian Elischer swap_pager_strategy /* pager strategy call */ 167df8bae1dSRodney W. Grimes }; 168df8bae1dSRodney W. Grimes 1690b441832SPoul-Henning Kamp static struct buf *getchainbuf(struct bio *bp, struct vnode *vp, int flags); 170e4057dbdSPoul-Henning Kamp static void flushchainbuf(struct buf *nbp); 1710b441832SPoul-Henning Kamp static void waitchainbuf(struct bio *bp, int count, int done); 172e4057dbdSPoul-Henning Kamp 1731c7c3c6aSMatthew Dillon /* 1741c7c3c6aSMatthew Dillon * dmmax is in page-sized chunks with the new swap system. It was 1751c7c3c6aSMatthew Dillon * dev-bsized chunks in the old. 1761c7c3c6aSMatthew Dillon * 1771c7c3c6aSMatthew Dillon * swap_*() routines are externally accessible. swp_*() routines are 1781c7c3c6aSMatthew Dillon * internal. 1791c7c3c6aSMatthew Dillon */ 1801c7c3c6aSMatthew Dillon 181f708ef1bSPoul-Henning Kamp int dmmax; 1821c7c3c6aSMatthew Dillon static int dmmax_mask; 18320d3034fSMatthew Dillon int nswap_lowat = 128; /* in pages, swap_pager_almost_full warn */ 18420d3034fSMatthew Dillon int nswap_hiwat = 512; /* in pages, swap_pager_almost_full warn */ 18526f9a767SRodney W. Grimes 1861c7c3c6aSMatthew Dillon static __inline void swp_sizecheck __P((void)); 1871c7c3c6aSMatthew Dillon static void swp_pager_sync_iodone __P((struct buf *bp)); 1881c7c3c6aSMatthew Dillon static void swp_pager_async_iodone __P((struct buf *bp)); 18924a1cce3SDavid Greenman 1901c7c3c6aSMatthew Dillon /* 1911c7c3c6aSMatthew Dillon * Swap bitmap functions 1921c7c3c6aSMatthew Dillon */ 1931c7c3c6aSMatthew Dillon 1941c7c3c6aSMatthew Dillon static __inline void swp_pager_freeswapspace __P((daddr_t blk, int npages)); 1951c7c3c6aSMatthew Dillon static __inline daddr_t swp_pager_getswapspace __P((int npages)); 1961c7c3c6aSMatthew Dillon 1971c7c3c6aSMatthew Dillon /* 1981c7c3c6aSMatthew Dillon * Metadata functions 1991c7c3c6aSMatthew Dillon */ 2001c7c3c6aSMatthew Dillon 2014dcc5c2dSMatthew Dillon static void swp_pager_meta_build __P((vm_object_t, vm_pindex_t, daddr_t)); 2024dcc5c2dSMatthew Dillon static void swp_pager_meta_free __P((vm_object_t, vm_pindex_t, daddr_t)); 2031c7c3c6aSMatthew Dillon static void swp_pager_meta_free_all __P((vm_object_t)); 2041c7c3c6aSMatthew Dillon static daddr_t swp_pager_meta_ctl __P((vm_object_t, vm_pindex_t, int)); 2051c7c3c6aSMatthew Dillon 2061c7c3c6aSMatthew Dillon /* 2071c7c3c6aSMatthew Dillon * SWP_SIZECHECK() - update swap_pager_full indication 2081c7c3c6aSMatthew Dillon * 20920d3034fSMatthew Dillon * update the swap_pager_almost_full indication and warn when we are 21020d3034fSMatthew Dillon * about to run out of swap space, using lowat/hiwat hysteresis. 21120d3034fSMatthew Dillon * 21220d3034fSMatthew Dillon * Clear swap_pager_full ( task killing ) indication when lowat is met. 2131c7c3c6aSMatthew Dillon * 2141c7c3c6aSMatthew Dillon * No restrictions on call 2151c7c3c6aSMatthew Dillon * This routine may not block. 2161c7c3c6aSMatthew Dillon * This routine must be called at splvm() 2171c7c3c6aSMatthew Dillon */ 218de5f6a77SJohn Dyson 219c1087c13SBruce Evans static __inline void 2201c7c3c6aSMatthew Dillon swp_sizecheck() 2210d94caffSDavid Greenman { 2221c7c3c6aSMatthew Dillon if (vm_swap_size < nswap_lowat) { 22320d3034fSMatthew Dillon if (swap_pager_almost_full == 0) { 2241af87c92SDavid Greenman printf("swap_pager: out of swap space\n"); 22520d3034fSMatthew Dillon swap_pager_almost_full = 1; 2262b0d37a4SMatthew Dillon } 22720d3034fSMatthew Dillon } else { 22826f9a767SRodney W. Grimes swap_pager_full = 0; 22920d3034fSMatthew Dillon if (vm_swap_size > nswap_hiwat) 23020d3034fSMatthew Dillon swap_pager_almost_full = 0; 23126f9a767SRodney W. Grimes } 2321c7c3c6aSMatthew Dillon } 2331c7c3c6aSMatthew Dillon 2341c7c3c6aSMatthew Dillon /* 2351c7c3c6aSMatthew Dillon * SWAP_PAGER_INIT() - initialize the swap pager! 2361c7c3c6aSMatthew Dillon * 2371c7c3c6aSMatthew Dillon * Expected to be started from system init. NOTE: This code is run 2381c7c3c6aSMatthew Dillon * before much else so be careful what you depend on. Most of the VM 2391c7c3c6aSMatthew Dillon * system has yet to be initialized at this point. 2401c7c3c6aSMatthew Dillon */ 24126f9a767SRodney W. Grimes 242f5a12711SPoul-Henning Kamp static void 243df8bae1dSRodney W. Grimes swap_pager_init() 244df8bae1dSRodney W. Grimes { 2451c7c3c6aSMatthew Dillon /* 2461c7c3c6aSMatthew Dillon * Initialize object lists 2471c7c3c6aSMatthew Dillon */ 2481c7c3c6aSMatthew Dillon int i; 2491c7c3c6aSMatthew Dillon 2501c7c3c6aSMatthew Dillon for (i = 0; i < NOBJLISTS; ++i) 2511c7c3c6aSMatthew Dillon TAILQ_INIT(&swap_pager_object_list[i]); 25224a1cce3SDavid Greenman TAILQ_INIT(&swap_pager_un_object_list); 253df8bae1dSRodney W. Grimes 254df8bae1dSRodney W. Grimes /* 2551c7c3c6aSMatthew Dillon * Device Stripe, in PAGE_SIZE'd blocks 256df8bae1dSRodney W. Grimes */ 2571c7c3c6aSMatthew Dillon 2581c7c3c6aSMatthew Dillon dmmax = SWB_NPAGES * 2; 2591c7c3c6aSMatthew Dillon dmmax_mask = ~(dmmax - 1); 2601c7c3c6aSMatthew Dillon } 26126f9a767SRodney W. Grimes 262df8bae1dSRodney W. Grimes /* 2631c7c3c6aSMatthew Dillon * SWAP_PAGER_SWAP_INIT() - swap pager initialization from pageout process 2641c7c3c6aSMatthew Dillon * 2651c7c3c6aSMatthew Dillon * Expected to be started from pageout process once, prior to entering 2661c7c3c6aSMatthew Dillon * its main loop. 267df8bae1dSRodney W. Grimes */ 268df8bae1dSRodney W. Grimes 26924a1cce3SDavid Greenman void 27024a1cce3SDavid Greenman swap_pager_swap_init() 271df8bae1dSRodney W. Grimes { 2721c7c3c6aSMatthew Dillon int n; 2730d94caffSDavid Greenman 27426f9a767SRodney W. Grimes /* 2751c7c3c6aSMatthew Dillon * Number of in-transit swap bp operations. Don't 2761c7c3c6aSMatthew Dillon * exhaust the pbufs completely. Make sure we 2771c7c3c6aSMatthew Dillon * initialize workable values (0 will work for hysteresis 2781c7c3c6aSMatthew Dillon * but it isn't very efficient). 2791c7c3c6aSMatthew Dillon * 280327f4e83SMatthew Dillon * The nsw_cluster_max is constrained by the bp->b_pages[] 2811c7c3c6aSMatthew Dillon * array (MAXPHYS/PAGE_SIZE) and our locally defined 2821c7c3c6aSMatthew Dillon * MAX_PAGEOUT_CLUSTER. Also be aware that swap ops are 2831c7c3c6aSMatthew Dillon * constrained by the swap device interleave stripe size. 284327f4e83SMatthew Dillon * 285327f4e83SMatthew Dillon * Currently we hardwire nsw_wcount_async to 4. This limit is 286327f4e83SMatthew Dillon * designed to prevent other I/O from having high latencies due to 287327f4e83SMatthew Dillon * our pageout I/O. The value 4 works well for one or two active swap 288327f4e83SMatthew Dillon * devices but is probably a little low if you have more. Even so, 289327f4e83SMatthew Dillon * a higher value would probably generate only a limited improvement 290327f4e83SMatthew Dillon * with three or four active swap devices since the system does not 291327f4e83SMatthew Dillon * typically have to pageout at extreme bandwidths. We will want 292327f4e83SMatthew Dillon * at least 2 per swap devices, and 4 is a pretty good value if you 293327f4e83SMatthew Dillon * have one NFS swap device due to the command/ack latency over NFS. 294327f4e83SMatthew Dillon * So it all works out pretty well. 29526f9a767SRodney W. Grimes */ 29624a1cce3SDavid Greenman 297ad3cce20SMatthew Dillon nsw_cluster_max = min((MAXPHYS/PAGE_SIZE), MAX_PAGEOUT_CLUSTER); 298327f4e83SMatthew Dillon 2991c7c3c6aSMatthew Dillon nsw_rcount = (nswbuf + 1) / 2; 300327f4e83SMatthew Dillon nsw_wcount_sync = (nswbuf + 3) / 4; 301327f4e83SMatthew Dillon nsw_wcount_async = 4; 302327f4e83SMatthew Dillon nsw_wcount_async_max = nsw_wcount_async; 30324a1cce3SDavid Greenman 3041c7c3c6aSMatthew Dillon /* 3051c7c3c6aSMatthew Dillon * Initialize our zone. Right now I'm just guessing on the number 3061c7c3c6aSMatthew Dillon * we need based on the number of pages in the system. Each swblock 3071c7c3c6aSMatthew Dillon * can hold 16 pages, so this is probably overkill. 3081c7c3c6aSMatthew Dillon */ 30924a1cce3SDavid Greenman 3101c7c3c6aSMatthew Dillon n = cnt.v_page_count * 2; 31126f9a767SRodney W. Grimes 3121c7c3c6aSMatthew Dillon swap_zone = zinit( 3131c7c3c6aSMatthew Dillon "SWAPMETA", 3141c7c3c6aSMatthew Dillon sizeof(struct swblock), 3151c7c3c6aSMatthew Dillon n, 3161c7c3c6aSMatthew Dillon ZONE_INTERRUPT, 3171c7c3c6aSMatthew Dillon 1 3181c7c3c6aSMatthew Dillon ); 31924a1cce3SDavid Greenman 3201c7c3c6aSMatthew Dillon /* 3211c7c3c6aSMatthew Dillon * Initialize our meta-data hash table. The swapper does not need to 3221c7c3c6aSMatthew Dillon * be quite as efficient as the VM system, so we do not use an 3231c7c3c6aSMatthew Dillon * oversized hash table. 3241c7c3c6aSMatthew Dillon * 3251c7c3c6aSMatthew Dillon * n: size of hash table, must be power of 2 3261c7c3c6aSMatthew Dillon * swhash_mask: hash table index mask 3271c7c3c6aSMatthew Dillon */ 328df8bae1dSRodney W. Grimes 3291c7c3c6aSMatthew Dillon for (n = 1; n < cnt.v_page_count / 4; n <<= 1) 3301c7c3c6aSMatthew Dillon ; 3311c7c3c6aSMatthew Dillon 3321c7c3c6aSMatthew Dillon swhash = malloc(sizeof(struct swblock *) * n, M_VMPGDATA, M_WAITOK); 3331c7c3c6aSMatthew Dillon bzero(swhash, sizeof(struct swblock *) * n); 3341c7c3c6aSMatthew Dillon 3351c7c3c6aSMatthew Dillon swhash_mask = n - 1; 33624a1cce3SDavid Greenman } 33724a1cce3SDavid Greenman 33824a1cce3SDavid Greenman /* 3391c7c3c6aSMatthew Dillon * SWAP_PAGER_ALLOC() - allocate a new OBJT_SWAP VM object and instantiate 3401c7c3c6aSMatthew Dillon * its metadata structures. 3411c7c3c6aSMatthew Dillon * 3421c7c3c6aSMatthew Dillon * This routine is called from the mmap and fork code to create a new 3431c7c3c6aSMatthew Dillon * OBJT_SWAP object. We do this by creating an OBJT_DEFAULT object 3441c7c3c6aSMatthew Dillon * and then converting it with swp_pager_meta_build(). 3451c7c3c6aSMatthew Dillon * 3461c7c3c6aSMatthew Dillon * This routine may block in vm_object_allocate() and create a named 3471c7c3c6aSMatthew Dillon * object lookup race, so we must interlock. We must also run at 3481c7c3c6aSMatthew Dillon * splvm() for the object lookup to handle races with interrupts, but 3491c7c3c6aSMatthew Dillon * we do not have to maintain splvm() in between the lookup and the 3501c7c3c6aSMatthew Dillon * add because (I believe) it is not possible to attempt to create 3511c7c3c6aSMatthew Dillon * a new swap object w/handle when a default object with that handle 3521c7c3c6aSMatthew Dillon * already exists. 35324a1cce3SDavid Greenman */ 3541c7c3c6aSMatthew Dillon 355f5a12711SPoul-Henning Kamp static vm_object_t 3566cde7a16SDavid Greenman swap_pager_alloc(void *handle, vm_ooffset_t size, vm_prot_t prot, 357b9dcd593SBruce Evans vm_ooffset_t offset) 35824a1cce3SDavid Greenman { 35924a1cce3SDavid Greenman vm_object_t object; 36024a1cce3SDavid Greenman 36124a1cce3SDavid Greenman if (handle) { 3621c7c3c6aSMatthew Dillon /* 3631c7c3c6aSMatthew Dillon * Reference existing named region or allocate new one. There 3641c7c3c6aSMatthew Dillon * should not be a race here against swp_pager_meta_build() 3651c7c3c6aSMatthew Dillon * as called from vm_page_remove() in regards to the lookup 3661c7c3c6aSMatthew Dillon * of the handle. 3671c7c3c6aSMatthew Dillon */ 3681c7c3c6aSMatthew Dillon 3691c7c3c6aSMatthew Dillon while (sw_alloc_interlock) { 3701c7c3c6aSMatthew Dillon sw_alloc_interlock = -1; 3711c7c3c6aSMatthew Dillon tsleep(&sw_alloc_interlock, PVM, "swpalc", 0); 3721c7c3c6aSMatthew Dillon } 3731c7c3c6aSMatthew Dillon sw_alloc_interlock = 1; 3741c7c3c6aSMatthew Dillon 3751c7c3c6aSMatthew Dillon object = vm_pager_object_lookup(NOBJLIST(handle), handle); 3761c7c3c6aSMatthew Dillon 37724a1cce3SDavid Greenman if (object != NULL) { 37824a1cce3SDavid Greenman vm_object_reference(object); 37924a1cce3SDavid Greenman } else { 3801c7c3c6aSMatthew Dillon object = vm_object_allocate(OBJT_DEFAULT, 3816cde7a16SDavid Greenman OFF_TO_IDX(offset + PAGE_MASK + size)); 38224a1cce3SDavid Greenman object->handle = handle; 3831c7c3c6aSMatthew Dillon 3844dcc5c2dSMatthew Dillon swp_pager_meta_build(object, 0, SWAPBLK_NONE); 38524a1cce3SDavid Greenman } 3861c7c3c6aSMatthew Dillon 3871c7c3c6aSMatthew Dillon if (sw_alloc_interlock < 0) 3881c7c3c6aSMatthew Dillon wakeup(&sw_alloc_interlock); 3891c7c3c6aSMatthew Dillon 3901c7c3c6aSMatthew Dillon sw_alloc_interlock = 0; 39124a1cce3SDavid Greenman } else { 3921c7c3c6aSMatthew Dillon object = vm_object_allocate(OBJT_DEFAULT, 3936cde7a16SDavid Greenman OFF_TO_IDX(offset + PAGE_MASK + size)); 3941c7c3c6aSMatthew Dillon 3954dcc5c2dSMatthew Dillon swp_pager_meta_build(object, 0, SWAPBLK_NONE); 39624a1cce3SDavid Greenman } 39724a1cce3SDavid Greenman 39824a1cce3SDavid Greenman return (object); 399df8bae1dSRodney W. Grimes } 400df8bae1dSRodney W. Grimes 40126f9a767SRodney W. Grimes /* 4021c7c3c6aSMatthew Dillon * SWAP_PAGER_DEALLOC() - remove swap metadata from object 4031c7c3c6aSMatthew Dillon * 4041c7c3c6aSMatthew Dillon * The swap backing for the object is destroyed. The code is 4051c7c3c6aSMatthew Dillon * designed such that we can reinstantiate it later, but this 4061c7c3c6aSMatthew Dillon * routine is typically called only when the entire object is 4071c7c3c6aSMatthew Dillon * about to be destroyed. 4081c7c3c6aSMatthew Dillon * 4091c7c3c6aSMatthew Dillon * This routine may block, but no longer does. 4101c7c3c6aSMatthew Dillon * 4111c7c3c6aSMatthew Dillon * The object must be locked or unreferenceable. 41226f9a767SRodney W. Grimes */ 41326f9a767SRodney W. Grimes 414df8bae1dSRodney W. Grimes static void 4151c7c3c6aSMatthew Dillon swap_pager_dealloc(object) 4162a4895f4SDavid Greenman vm_object_t object; 41726f9a767SRodney W. Grimes { 4184dcc5c2dSMatthew Dillon int s; 4194dcc5c2dSMatthew Dillon 42026f9a767SRodney W. Grimes /* 4211c7c3c6aSMatthew Dillon * Remove from list right away so lookups will fail if we block for 4221c7c3c6aSMatthew Dillon * pageout completion. 42326f9a767SRodney W. Grimes */ 424b44e4b7aSJohn Dyson 4251c7c3c6aSMatthew Dillon if (object->handle == NULL) { 4261c7c3c6aSMatthew Dillon TAILQ_REMOVE(&swap_pager_un_object_list, object, pager_object_list); 42724ea4a96SDavid Greenman } else { 4281c7c3c6aSMatthew Dillon TAILQ_REMOVE(NOBJLIST(object->handle), object, pager_object_list); 42926f9a767SRodney W. Grimes } 4301c7c3c6aSMatthew Dillon 4311c7c3c6aSMatthew Dillon vm_object_pip_wait(object, "swpdea"); 4321c7c3c6aSMatthew Dillon 4331c7c3c6aSMatthew Dillon /* 4341c7c3c6aSMatthew Dillon * Free all remaining metadata. We only bother to free it from 4351c7c3c6aSMatthew Dillon * the swap meta data. We do not attempt to free swapblk's still 4361c7c3c6aSMatthew Dillon * associated with vm_page_t's for this object. We do not care 4371c7c3c6aSMatthew Dillon * if paging is still in progress on some objects. 4381c7c3c6aSMatthew Dillon */ 4394dcc5c2dSMatthew Dillon s = splvm(); 4401c7c3c6aSMatthew Dillon swp_pager_meta_free_all(object); 4414dcc5c2dSMatthew Dillon splx(s); 4421c7c3c6aSMatthew Dillon } 4431c7c3c6aSMatthew Dillon 4441c7c3c6aSMatthew Dillon /************************************************************************ 4451c7c3c6aSMatthew Dillon * SWAP PAGER BITMAP ROUTINES * 4461c7c3c6aSMatthew Dillon ************************************************************************/ 4471c7c3c6aSMatthew Dillon 4481c7c3c6aSMatthew Dillon /* 4491c7c3c6aSMatthew Dillon * SWP_PAGER_GETSWAPSPACE() - allocate raw swap space 4501c7c3c6aSMatthew Dillon * 4511c7c3c6aSMatthew Dillon * Allocate swap for the requested number of pages. The starting 4521c7c3c6aSMatthew Dillon * swap block number (a page index) is returned or SWAPBLK_NONE 4531c7c3c6aSMatthew Dillon * if the allocation failed. 4541c7c3c6aSMatthew Dillon * 4551c7c3c6aSMatthew Dillon * Also has the side effect of advising that somebody made a mistake 4561c7c3c6aSMatthew Dillon * when they configured swap and didn't configure enough. 4571c7c3c6aSMatthew Dillon * 4581c7c3c6aSMatthew Dillon * Must be called at splvm() to avoid races with bitmap frees from 4591c7c3c6aSMatthew Dillon * vm_page_remove() aka swap_pager_page_removed(). 4601c7c3c6aSMatthew Dillon * 4611c7c3c6aSMatthew Dillon * This routine may not block 4621c7c3c6aSMatthew Dillon * This routine must be called at splvm(). 4631c7c3c6aSMatthew Dillon */ 4641c7c3c6aSMatthew Dillon 4651c7c3c6aSMatthew Dillon static __inline daddr_t 4661c7c3c6aSMatthew Dillon swp_pager_getswapspace(npages) 4671c7c3c6aSMatthew Dillon int npages; 4681c7c3c6aSMatthew Dillon { 4691c7c3c6aSMatthew Dillon daddr_t blk; 4701c7c3c6aSMatthew Dillon 4711c7c3c6aSMatthew Dillon if ((blk = blist_alloc(swapblist, npages)) == SWAPBLK_NONE) { 4722b0d37a4SMatthew Dillon if (swap_pager_full != 2) { 4731c7c3c6aSMatthew Dillon printf("swap_pager_getswapspace: failed\n"); 4742b0d37a4SMatthew Dillon swap_pager_full = 2; 47520d3034fSMatthew Dillon swap_pager_almost_full = 1; 4762b0d37a4SMatthew Dillon } 4771c7c3c6aSMatthew Dillon } else { 4781c7c3c6aSMatthew Dillon vm_swap_size -= npages; 4791c7c3c6aSMatthew Dillon swp_sizecheck(); 4801c7c3c6aSMatthew Dillon } 4811c7c3c6aSMatthew Dillon return(blk); 48226f9a767SRodney W. Grimes } 48326f9a767SRodney W. Grimes 48426f9a767SRodney W. Grimes /* 4851c7c3c6aSMatthew Dillon * SWP_PAGER_FREESWAPSPACE() - free raw swap space 4861c7c3c6aSMatthew Dillon * 4871c7c3c6aSMatthew Dillon * This routine returns the specified swap blocks back to the bitmap. 4881c7c3c6aSMatthew Dillon * 4891c7c3c6aSMatthew Dillon * Note: This routine may not block (it could in the old swap code), 4901c7c3c6aSMatthew Dillon * and through the use of the new blist routines it does not block. 4911c7c3c6aSMatthew Dillon * 4921c7c3c6aSMatthew Dillon * We must be called at splvm() to avoid races with bitmap frees from 4931c7c3c6aSMatthew Dillon * vm_page_remove() aka swap_pager_page_removed(). 4941c7c3c6aSMatthew Dillon * 4951c7c3c6aSMatthew Dillon * This routine may not block 4961c7c3c6aSMatthew Dillon * This routine must be called at splvm(). 49726f9a767SRodney W. Grimes */ 4981c7c3c6aSMatthew Dillon 4991c7c3c6aSMatthew Dillon static __inline void 5001c7c3c6aSMatthew Dillon swp_pager_freeswapspace(blk, npages) 5011c7c3c6aSMatthew Dillon daddr_t blk; 5021c7c3c6aSMatthew Dillon int npages; 5030d94caffSDavid Greenman { 5041c7c3c6aSMatthew Dillon blist_free(swapblist, blk, npages); 5051c7c3c6aSMatthew Dillon vm_swap_size += npages; 5061c7c3c6aSMatthew Dillon swp_sizecheck(); 50726f9a767SRodney W. Grimes } 5081c7c3c6aSMatthew Dillon 50926f9a767SRodney W. Grimes /* 5101c7c3c6aSMatthew Dillon * SWAP_PAGER_FREESPACE() - frees swap blocks associated with a page 5111c7c3c6aSMatthew Dillon * range within an object. 5121c7c3c6aSMatthew Dillon * 5131c7c3c6aSMatthew Dillon * This is a globally accessible routine. 5141c7c3c6aSMatthew Dillon * 5151c7c3c6aSMatthew Dillon * This routine removes swapblk assignments from swap metadata. 5161c7c3c6aSMatthew Dillon * 5171c7c3c6aSMatthew Dillon * The external callers of this routine typically have already destroyed 5181c7c3c6aSMatthew Dillon * or renamed vm_page_t's associated with this range in the object so 5191c7c3c6aSMatthew Dillon * we should be ok. 5204dcc5c2dSMatthew Dillon * 5214dcc5c2dSMatthew Dillon * This routine may be called at any spl. We up our spl to splvm temporarily 5224dcc5c2dSMatthew Dillon * in order to perform the metadata removal. 52326f9a767SRodney W. Grimes */ 5241c7c3c6aSMatthew Dillon 52526f9a767SRodney W. Grimes void 52624a1cce3SDavid Greenman swap_pager_freespace(object, start, size) 52724a1cce3SDavid Greenman vm_object_t object; 528a316d390SJohn Dyson vm_pindex_t start; 529a316d390SJohn Dyson vm_size_t size; 53026f9a767SRodney W. Grimes { 5314dcc5c2dSMatthew Dillon int s = splvm(); 5321c7c3c6aSMatthew Dillon swp_pager_meta_free(object, start, size); 5334dcc5c2dSMatthew Dillon splx(s); 5344dcc5c2dSMatthew Dillon } 5354dcc5c2dSMatthew Dillon 5364dcc5c2dSMatthew Dillon /* 5374dcc5c2dSMatthew Dillon * SWAP_PAGER_RESERVE() - reserve swap blocks in object 5384dcc5c2dSMatthew Dillon * 5394dcc5c2dSMatthew Dillon * Assigns swap blocks to the specified range within the object. The 5404dcc5c2dSMatthew Dillon * swap blocks are not zerod. Any previous swap assignment is destroyed. 5414dcc5c2dSMatthew Dillon * 5424dcc5c2dSMatthew Dillon * Returns 0 on success, -1 on failure. 5434dcc5c2dSMatthew Dillon */ 5444dcc5c2dSMatthew Dillon 5454dcc5c2dSMatthew Dillon int 5464dcc5c2dSMatthew Dillon swap_pager_reserve(vm_object_t object, vm_pindex_t start, vm_size_t size) 5474dcc5c2dSMatthew Dillon { 5484dcc5c2dSMatthew Dillon int s; 5494dcc5c2dSMatthew Dillon int n = 0; 5504dcc5c2dSMatthew Dillon daddr_t blk = SWAPBLK_NONE; 5514dcc5c2dSMatthew Dillon vm_pindex_t beg = start; /* save start index */ 5524dcc5c2dSMatthew Dillon 5534dcc5c2dSMatthew Dillon s = splvm(); 5544dcc5c2dSMatthew Dillon while (size) { 5554dcc5c2dSMatthew Dillon if (n == 0) { 5564dcc5c2dSMatthew Dillon n = BLIST_MAX_ALLOC; 5574dcc5c2dSMatthew Dillon while ((blk = swp_pager_getswapspace(n)) == SWAPBLK_NONE) { 5584dcc5c2dSMatthew Dillon n >>= 1; 5594dcc5c2dSMatthew Dillon if (n == 0) { 5604dcc5c2dSMatthew Dillon swp_pager_meta_free(object, beg, start - beg); 5614dcc5c2dSMatthew Dillon splx(s); 5624dcc5c2dSMatthew Dillon return(-1); 5634dcc5c2dSMatthew Dillon } 5644dcc5c2dSMatthew Dillon } 5654dcc5c2dSMatthew Dillon } 5664dcc5c2dSMatthew Dillon swp_pager_meta_build(object, start, blk); 5674dcc5c2dSMatthew Dillon --size; 5684dcc5c2dSMatthew Dillon ++start; 5694dcc5c2dSMatthew Dillon ++blk; 5704dcc5c2dSMatthew Dillon --n; 5714dcc5c2dSMatthew Dillon } 5724dcc5c2dSMatthew Dillon swp_pager_meta_free(object, start, n); 5734dcc5c2dSMatthew Dillon splx(s); 5744dcc5c2dSMatthew Dillon return(0); 57526f9a767SRodney W. Grimes } 57626f9a767SRodney W. Grimes 5770a47b48bSJohn Dyson /* 5781c7c3c6aSMatthew Dillon * SWAP_PAGER_COPY() - copy blocks from source pager to destination pager 5791c7c3c6aSMatthew Dillon * and destroy the source. 5801c7c3c6aSMatthew Dillon * 5811c7c3c6aSMatthew Dillon * Copy any valid swapblks from the source to the destination. In 5821c7c3c6aSMatthew Dillon * cases where both the source and destination have a valid swapblk, 5831c7c3c6aSMatthew Dillon * we keep the destination's. 5841c7c3c6aSMatthew Dillon * 5851c7c3c6aSMatthew Dillon * This routine is allowed to block. It may block allocating metadata 5861c7c3c6aSMatthew Dillon * indirectly through swp_pager_meta_build() or if paging is still in 5871c7c3c6aSMatthew Dillon * progress on the source. 5881c7c3c6aSMatthew Dillon * 5894dcc5c2dSMatthew Dillon * This routine can be called at any spl 5904dcc5c2dSMatthew Dillon * 5911c7c3c6aSMatthew Dillon * XXX vm_page_collapse() kinda expects us not to block because we 5921c7c3c6aSMatthew Dillon * supposedly do not need to allocate memory, but for the moment we 5931c7c3c6aSMatthew Dillon * *may* have to get a little memory from the zone allocator, but 5941c7c3c6aSMatthew Dillon * it is taken from the interrupt memory. We should be ok. 5951c7c3c6aSMatthew Dillon * 5961c7c3c6aSMatthew Dillon * The source object contains no vm_page_t's (which is just as well) 5971c7c3c6aSMatthew Dillon * 5981c7c3c6aSMatthew Dillon * The source object is of type OBJT_SWAP. 5991c7c3c6aSMatthew Dillon * 6004dcc5c2dSMatthew Dillon * The source and destination objects must be locked or 6014dcc5c2dSMatthew Dillon * inaccessible (XXX are they ?) 60226f9a767SRodney W. Grimes */ 60326f9a767SRodney W. Grimes 60426f9a767SRodney W. Grimes void 6051c7c3c6aSMatthew Dillon swap_pager_copy(srcobject, dstobject, offset, destroysource) 60624a1cce3SDavid Greenman vm_object_t srcobject; 60724a1cce3SDavid Greenman vm_object_t dstobject; 608a316d390SJohn Dyson vm_pindex_t offset; 609c0877f10SJohn Dyson int destroysource; 61026f9a767SRodney W. Grimes { 611a316d390SJohn Dyson vm_pindex_t i; 6124dcc5c2dSMatthew Dillon int s; 6134dcc5c2dSMatthew Dillon 6144dcc5c2dSMatthew Dillon s = splvm(); 61526f9a767SRodney W. Grimes 61626f9a767SRodney W. Grimes /* 6171c7c3c6aSMatthew Dillon * If destroysource is set, we remove the source object from the 6181c7c3c6aSMatthew Dillon * swap_pager internal queue now. 61926f9a767SRodney W. Grimes */ 6201c7c3c6aSMatthew Dillon 621cbd8ec09SJohn Dyson if (destroysource) { 62224a1cce3SDavid Greenman if (srcobject->handle == NULL) { 6231c7c3c6aSMatthew Dillon TAILQ_REMOVE( 6241c7c3c6aSMatthew Dillon &swap_pager_un_object_list, 6251c7c3c6aSMatthew Dillon srcobject, 6261c7c3c6aSMatthew Dillon pager_object_list 6271c7c3c6aSMatthew Dillon ); 62826f9a767SRodney W. Grimes } else { 6291c7c3c6aSMatthew Dillon TAILQ_REMOVE( 6301c7c3c6aSMatthew Dillon NOBJLIST(srcobject->handle), 6311c7c3c6aSMatthew Dillon srcobject, 6321c7c3c6aSMatthew Dillon pager_object_list 6331c7c3c6aSMatthew Dillon ); 63426f9a767SRodney W. Grimes } 635cbd8ec09SJohn Dyson } 63626f9a767SRodney W. Grimes 6371c7c3c6aSMatthew Dillon /* 6381c7c3c6aSMatthew Dillon * transfer source to destination. 6391c7c3c6aSMatthew Dillon */ 6401c7c3c6aSMatthew Dillon 6411c7c3c6aSMatthew Dillon for (i = 0; i < dstobject->size; ++i) { 6421c7c3c6aSMatthew Dillon daddr_t dstaddr; 6431c7c3c6aSMatthew Dillon 6441c7c3c6aSMatthew Dillon /* 6451c7c3c6aSMatthew Dillon * Locate (without changing) the swapblk on the destination, 6461c7c3c6aSMatthew Dillon * unless it is invalid in which case free it silently, or 6471c7c3c6aSMatthew Dillon * if the destination is a resident page, in which case the 6481c7c3c6aSMatthew Dillon * source is thrown away. 6491c7c3c6aSMatthew Dillon */ 6501c7c3c6aSMatthew Dillon 6511c7c3c6aSMatthew Dillon dstaddr = swp_pager_meta_ctl(dstobject, i, 0); 6521c7c3c6aSMatthew Dillon 6531c7c3c6aSMatthew Dillon if (dstaddr == SWAPBLK_NONE) { 6541c7c3c6aSMatthew Dillon /* 6551c7c3c6aSMatthew Dillon * Destination has no swapblk and is not resident, 6561c7c3c6aSMatthew Dillon * copy source. 6571c7c3c6aSMatthew Dillon */ 6581c7c3c6aSMatthew Dillon daddr_t srcaddr; 6591c7c3c6aSMatthew Dillon 6601c7c3c6aSMatthew Dillon srcaddr = swp_pager_meta_ctl( 6611c7c3c6aSMatthew Dillon srcobject, 6621c7c3c6aSMatthew Dillon i + offset, 6631c7c3c6aSMatthew Dillon SWM_POP 6641c7c3c6aSMatthew Dillon ); 6651c7c3c6aSMatthew Dillon 6661c7c3c6aSMatthew Dillon if (srcaddr != SWAPBLK_NONE) 6674dcc5c2dSMatthew Dillon swp_pager_meta_build(dstobject, i, srcaddr); 6681c7c3c6aSMatthew Dillon } else { 6691c7c3c6aSMatthew Dillon /* 6701c7c3c6aSMatthew Dillon * Destination has valid swapblk or it is represented 6711c7c3c6aSMatthew Dillon * by a resident page. We destroy the sourceblock. 6721c7c3c6aSMatthew Dillon */ 6731c7c3c6aSMatthew Dillon 6741c7c3c6aSMatthew Dillon swp_pager_meta_ctl(srcobject, i + offset, SWM_FREE); 6751c7c3c6aSMatthew Dillon } 67626f9a767SRodney W. Grimes } 67726f9a767SRodney W. Grimes 67826f9a767SRodney W. Grimes /* 6791c7c3c6aSMatthew Dillon * Free left over swap blocks in source. 6801c7c3c6aSMatthew Dillon * 6811c7c3c6aSMatthew Dillon * We have to revert the type to OBJT_DEFAULT so we do not accidently 6821c7c3c6aSMatthew Dillon * double-remove the object from the swap queues. 68326f9a767SRodney W. Grimes */ 68426f9a767SRodney W. Grimes 685c0877f10SJohn Dyson if (destroysource) { 6861c7c3c6aSMatthew Dillon swp_pager_meta_free_all(srcobject); 6871c7c3c6aSMatthew Dillon /* 6881c7c3c6aSMatthew Dillon * Reverting the type is not necessary, the caller is going 6891c7c3c6aSMatthew Dillon * to destroy srcobject directly, but I'm doing it here 690956f3135SPhilippe Charnier * for consistency since we've removed the object from its 6911c7c3c6aSMatthew Dillon * queues. 6921c7c3c6aSMatthew Dillon */ 6931c7c3c6aSMatthew Dillon srcobject->type = OBJT_DEFAULT; 694c0877f10SJohn Dyson } 6954dcc5c2dSMatthew Dillon splx(s); 69626f9a767SRodney W. Grimes } 69726f9a767SRodney W. Grimes 698df8bae1dSRodney W. Grimes /* 6991c7c3c6aSMatthew Dillon * SWAP_PAGER_HASPAGE() - determine if we have good backing store for 7001c7c3c6aSMatthew Dillon * the requested page. 7011c7c3c6aSMatthew Dillon * 7021c7c3c6aSMatthew Dillon * We determine whether good backing store exists for the requested 7031c7c3c6aSMatthew Dillon * page and return TRUE if it does, FALSE if it doesn't. 7041c7c3c6aSMatthew Dillon * 7051c7c3c6aSMatthew Dillon * If TRUE, we also try to determine how much valid, contiguous backing 7061c7c3c6aSMatthew Dillon * store exists before and after the requested page within a reasonable 7071c7c3c6aSMatthew Dillon * distance. We do not try to restrict it to the swap device stripe 7081c7c3c6aSMatthew Dillon * (that is handled in getpages/putpages). It probably isn't worth 7091c7c3c6aSMatthew Dillon * doing here. 710df8bae1dSRodney W. Grimes */ 71126f9a767SRodney W. Grimes 7121c7c3c6aSMatthew Dillon boolean_t 713a316d390SJohn Dyson swap_pager_haspage(object, pindex, before, after) 71424a1cce3SDavid Greenman vm_object_t object; 715a316d390SJohn Dyson vm_pindex_t pindex; 71624a1cce3SDavid Greenman int *before; 71724a1cce3SDavid Greenman int *after; 71826f9a767SRodney W. Grimes { 7191c7c3c6aSMatthew Dillon daddr_t blk0; 72025db2c54SMatthew Dillon int s; 72126f9a767SRodney W. Grimes 7221c7c3c6aSMatthew Dillon /* 7231c7c3c6aSMatthew Dillon * do we have good backing store at the requested index ? 7241c7c3c6aSMatthew Dillon */ 7251c7c3c6aSMatthew Dillon 72625db2c54SMatthew Dillon s = splvm(); 7271c7c3c6aSMatthew Dillon blk0 = swp_pager_meta_ctl(object, pindex, 0); 7281c7c3c6aSMatthew Dillon 7294dcc5c2dSMatthew Dillon if (blk0 == SWAPBLK_NONE) { 73025db2c54SMatthew Dillon splx(s); 7311c7c3c6aSMatthew Dillon if (before) 73224a1cce3SDavid Greenman *before = 0; 7331c7c3c6aSMatthew Dillon if (after) 73424a1cce3SDavid Greenman *after = 0; 73526f9a767SRodney W. Grimes return (FALSE); 73626f9a767SRodney W. Grimes } 73726f9a767SRodney W. Grimes 73826f9a767SRodney W. Grimes /* 7391c7c3c6aSMatthew Dillon * find backwards-looking contiguous good backing store 740e47ed70bSJohn Dyson */ 741e47ed70bSJohn Dyson 7421c7c3c6aSMatthew Dillon if (before != NULL) { 74326f9a767SRodney W. Grimes int i; 7440d94caffSDavid Greenman 7451c7c3c6aSMatthew Dillon for (i = 1; i < (SWB_NPAGES/2); ++i) { 7461c7c3c6aSMatthew Dillon daddr_t blk; 7471c7c3c6aSMatthew Dillon 7481c7c3c6aSMatthew Dillon if (i > pindex) 7491c7c3c6aSMatthew Dillon break; 7501c7c3c6aSMatthew Dillon blk = swp_pager_meta_ctl(object, pindex - i, 0); 7511c7c3c6aSMatthew Dillon if (blk != blk0 - i) 7521c7c3c6aSMatthew Dillon break; 753ffc82b0aSJohn Dyson } 7541c7c3c6aSMatthew Dillon *before = (i - 1); 75526f9a767SRodney W. Grimes } 75626f9a767SRodney W. Grimes 75726f9a767SRodney W. Grimes /* 7581c7c3c6aSMatthew Dillon * find forward-looking contiguous good backing store 75926f9a767SRodney W. Grimes */ 7601c7c3c6aSMatthew Dillon 7611c7c3c6aSMatthew Dillon if (after != NULL) { 7621c7c3c6aSMatthew Dillon int i; 7631c7c3c6aSMatthew Dillon 7641c7c3c6aSMatthew Dillon for (i = 1; i < (SWB_NPAGES/2); ++i) { 7651c7c3c6aSMatthew Dillon daddr_t blk; 7661c7c3c6aSMatthew Dillon 7671c7c3c6aSMatthew Dillon blk = swp_pager_meta_ctl(object, pindex + i, 0); 7681c7c3c6aSMatthew Dillon if (blk != blk0 + i) 7691c7c3c6aSMatthew Dillon break; 77026f9a767SRodney W. Grimes } 7711c7c3c6aSMatthew Dillon *after = (i - 1); 7721c7c3c6aSMatthew Dillon } 77325db2c54SMatthew Dillon splx(s); 7741c7c3c6aSMatthew Dillon return (TRUE); 7751c7c3c6aSMatthew Dillon } 7761c7c3c6aSMatthew Dillon 7771c7c3c6aSMatthew Dillon /* 7781c7c3c6aSMatthew Dillon * SWAP_PAGER_PAGE_UNSWAPPED() - remove swap backing store related to page 7791c7c3c6aSMatthew Dillon * 7801c7c3c6aSMatthew Dillon * This removes any associated swap backing store, whether valid or 7811c7c3c6aSMatthew Dillon * not, from the page. 7821c7c3c6aSMatthew Dillon * 7831c7c3c6aSMatthew Dillon * This routine is typically called when a page is made dirty, at 7841c7c3c6aSMatthew Dillon * which point any associated swap can be freed. MADV_FREE also 7851c7c3c6aSMatthew Dillon * calls us in a special-case situation 7861c7c3c6aSMatthew Dillon * 7871c7c3c6aSMatthew Dillon * NOTE!!! If the page is clean and the swap was valid, the caller 7881c7c3c6aSMatthew Dillon * should make the page dirty before calling this routine. This routine 7891c7c3c6aSMatthew Dillon * does NOT change the m->dirty status of the page. Also: MADV_FREE 7901c7c3c6aSMatthew Dillon * depends on it. 7911c7c3c6aSMatthew Dillon * 7921c7c3c6aSMatthew Dillon * This routine may not block 7934dcc5c2dSMatthew Dillon * This routine must be called at splvm() 7941c7c3c6aSMatthew Dillon */ 7951c7c3c6aSMatthew Dillon 7961c7c3c6aSMatthew Dillon static void 7971c7c3c6aSMatthew Dillon swap_pager_unswapped(m) 7981c7c3c6aSMatthew Dillon vm_page_t m; 7991c7c3c6aSMatthew Dillon { 8001c7c3c6aSMatthew Dillon swp_pager_meta_ctl(m->object, m->pindex, SWM_FREE); 8011c7c3c6aSMatthew Dillon } 8021c7c3c6aSMatthew Dillon 8031c7c3c6aSMatthew Dillon /* 804a5296b05SJulian Elischer * SWAP_PAGER_STRATEGY() - read, write, free blocks 805a5296b05SJulian Elischer * 806a5296b05SJulian Elischer * This implements the vm_pager_strategy() interface to swap and allows 807a5296b05SJulian Elischer * other parts of the system to directly access swap as backing store 808a5296b05SJulian Elischer * through vm_objects of type OBJT_SWAP. This is intended to be a 809a5296b05SJulian Elischer * cacheless interface ( i.e. caching occurs at higher levels ). 810a5296b05SJulian Elischer * Therefore we do not maintain any resident pages. All I/O goes 8114dcc5c2dSMatthew Dillon * directly to and from the swap device. 812a5296b05SJulian Elischer * 813a5296b05SJulian Elischer * Note that b_blkno is scaled for PAGE_SIZE 814a5296b05SJulian Elischer * 815a5296b05SJulian Elischer * We currently attempt to run I/O synchronously or asynchronously as 816a5296b05SJulian Elischer * the caller requests. This isn't perfect because we loose error 817a5296b05SJulian Elischer * sequencing when we run multiple ops in parallel to satisfy a request. 818a5296b05SJulian Elischer * But this is swap, so we let it all hang out. 819a5296b05SJulian Elischer */ 820a5296b05SJulian Elischer 821a5296b05SJulian Elischer static void 8220b441832SPoul-Henning Kamp swap_pager_strategy(vm_object_t object, struct bio *bp) 823a5296b05SJulian Elischer { 824a5296b05SJulian Elischer vm_pindex_t start; 825a5296b05SJulian Elischer int count; 8264dcc5c2dSMatthew Dillon int s; 827a5296b05SJulian Elischer char *data; 828a5296b05SJulian Elischer struct buf *nbp = NULL; 829a5296b05SJulian Elischer 8300b441832SPoul-Henning Kamp /* XXX: KASSERT instead ? */ 8310b441832SPoul-Henning Kamp if (bp->bio_bcount & PAGE_MASK) { 8320b441832SPoul-Henning Kamp bp->bio_error = EINVAL; 8330b441832SPoul-Henning Kamp bp->bio_flags |= BIO_ERROR; 8340b441832SPoul-Henning Kamp biodone(bp); 8350b441832SPoul-Henning Kamp printf("swap_pager_strategy: bp %p blk %d size %d, not page bounded\n", bp, (int)bp->bio_pblkno, (int)bp->bio_bcount); 836a5296b05SJulian Elischer return; 837a5296b05SJulian Elischer } 838a5296b05SJulian Elischer 839a5296b05SJulian Elischer /* 840a5296b05SJulian Elischer * Clear error indication, initialize page index, count, data pointer. 841a5296b05SJulian Elischer */ 842a5296b05SJulian Elischer 8430b441832SPoul-Henning Kamp bp->bio_error = 0; 8440b441832SPoul-Henning Kamp bp->bio_flags &= ~BIO_ERROR; 8450b441832SPoul-Henning Kamp bp->bio_resid = bp->bio_bcount; 846a5296b05SJulian Elischer 8470b441832SPoul-Henning Kamp start = bp->bio_pblkno; 8480b441832SPoul-Henning Kamp count = howmany(bp->bio_bcount, PAGE_SIZE); 8490b441832SPoul-Henning Kamp data = bp->bio_data; 850a5296b05SJulian Elischer 8514dcc5c2dSMatthew Dillon s = splvm(); 8524dcc5c2dSMatthew Dillon 853a5296b05SJulian Elischer /* 85421144e3bSPoul-Henning Kamp * Deal with BIO_DELETE 855a5296b05SJulian Elischer */ 856a5296b05SJulian Elischer 8570b441832SPoul-Henning Kamp if (bp->bio_cmd == BIO_DELETE) { 858a5296b05SJulian Elischer /* 859a5296b05SJulian Elischer * FREE PAGE(s) - destroy underlying swap that is no longer 860a5296b05SJulian Elischer * needed. 861a5296b05SJulian Elischer */ 862a5296b05SJulian Elischer swp_pager_meta_free(object, start, count); 863a5296b05SJulian Elischer splx(s); 8640b441832SPoul-Henning Kamp bp->bio_resid = 0; 8650b441832SPoul-Henning Kamp biodone(bp); 8664dcc5c2dSMatthew Dillon return; 8674dcc5c2dSMatthew Dillon } 8684dcc5c2dSMatthew Dillon 869a5296b05SJulian Elischer /* 8704dcc5c2dSMatthew Dillon * Execute read or write 871a5296b05SJulian Elischer */ 872a5296b05SJulian Elischer 873a5296b05SJulian Elischer while (count > 0) { 874a5296b05SJulian Elischer daddr_t blk; 875a5296b05SJulian Elischer 876a5296b05SJulian Elischer /* 8774dcc5c2dSMatthew Dillon * Obtain block. If block not found and writing, allocate a 8784dcc5c2dSMatthew Dillon * new block and build it into the object. 8794dcc5c2dSMatthew Dillon */ 8804dcc5c2dSMatthew Dillon 8814dcc5c2dSMatthew Dillon blk = swp_pager_meta_ctl(object, start, 0); 8820b441832SPoul-Henning Kamp if ((blk == SWAPBLK_NONE) && (bp->bio_cmd == BIO_WRITE)) { 8834dcc5c2dSMatthew Dillon blk = swp_pager_getswapspace(1); 8844dcc5c2dSMatthew Dillon if (blk == SWAPBLK_NONE) { 8850b441832SPoul-Henning Kamp bp->bio_error = ENOMEM; 8860b441832SPoul-Henning Kamp bp->bio_flags |= BIO_ERROR; 8874dcc5c2dSMatthew Dillon break; 8884dcc5c2dSMatthew Dillon } 8894dcc5c2dSMatthew Dillon swp_pager_meta_build(object, start, blk); 8904dcc5c2dSMatthew Dillon } 8914dcc5c2dSMatthew Dillon 8924dcc5c2dSMatthew Dillon /* 8934dcc5c2dSMatthew Dillon * Do we have to flush our current collection? Yes if: 8944dcc5c2dSMatthew Dillon * 8954dcc5c2dSMatthew Dillon * - no swap block at this index 8964dcc5c2dSMatthew Dillon * - swap block is not contiguous 8974dcc5c2dSMatthew Dillon * - we cross a physical disk boundry in the 8984dcc5c2dSMatthew Dillon * stripe. 899a5296b05SJulian Elischer */ 900a5296b05SJulian Elischer 901a5296b05SJulian Elischer if ( 9024dcc5c2dSMatthew Dillon nbp && (nbp->b_blkno + btoc(nbp->b_bcount) != blk || 9034dcc5c2dSMatthew Dillon ((nbp->b_blkno ^ blk) & dmmax_mask) 904a5296b05SJulian Elischer ) 905a5296b05SJulian Elischer ) { 9064dcc5c2dSMatthew Dillon splx(s); 9070b441832SPoul-Henning Kamp if (bp->bio_cmd == BIO_READ) { 908a5296b05SJulian Elischer ++cnt.v_swapin; 909a5296b05SJulian Elischer cnt.v_swappgsin += btoc(nbp->b_bcount); 9104dcc5c2dSMatthew Dillon } else { 9114dcc5c2dSMatthew Dillon ++cnt.v_swapout; 9124dcc5c2dSMatthew Dillon cnt.v_swappgsout += btoc(nbp->b_bcount); 9134dcc5c2dSMatthew Dillon nbp->b_dirtyend = nbp->b_bcount; 9144dcc5c2dSMatthew Dillon } 915a5296b05SJulian Elischer flushchainbuf(nbp); 9164dcc5c2dSMatthew Dillon s = splvm(); 917a5296b05SJulian Elischer nbp = NULL; 918a5296b05SJulian Elischer } 919a5296b05SJulian Elischer 920a5296b05SJulian Elischer /* 9214dcc5c2dSMatthew Dillon * Add new swapblk to nbp, instantiating nbp if necessary. 9224dcc5c2dSMatthew Dillon * Zero-fill reads are able to take a shortcut. 923a5296b05SJulian Elischer */ 9244dcc5c2dSMatthew Dillon 9254dcc5c2dSMatthew Dillon if (blk == SWAPBLK_NONE) { 9264dcc5c2dSMatthew Dillon /* 9274dcc5c2dSMatthew Dillon * We can only get here if we are reading. Since 9284dcc5c2dSMatthew Dillon * we are at splvm() we can safely modify b_resid, 9294dcc5c2dSMatthew Dillon * even if chain ops are in progress. 9304dcc5c2dSMatthew Dillon */ 931a5296b05SJulian Elischer bzero(data, PAGE_SIZE); 9320b441832SPoul-Henning Kamp bp->bio_resid -= PAGE_SIZE; 933a5296b05SJulian Elischer } else { 934a5296b05SJulian Elischer if (nbp == NULL) { 9350b441832SPoul-Henning Kamp nbp = getchainbuf(bp, swapdev_vp, B_ASYNC); 936a5296b05SJulian Elischer nbp->b_blkno = blk; 9374dcc5c2dSMatthew Dillon nbp->b_bcount = 0; 938a5296b05SJulian Elischer nbp->b_data = data; 939a5296b05SJulian Elischer } 940a5296b05SJulian Elischer nbp->b_bcount += PAGE_SIZE; 941a5296b05SJulian Elischer } 942a5296b05SJulian Elischer --count; 943a5296b05SJulian Elischer ++start; 944a5296b05SJulian Elischer data += PAGE_SIZE; 945a5296b05SJulian Elischer } 946a5296b05SJulian Elischer 947a5296b05SJulian Elischer /* 9484dcc5c2dSMatthew Dillon * Flush out last buffer 949a5296b05SJulian Elischer */ 950a5296b05SJulian Elischer 951a5296b05SJulian Elischer splx(s); 952a5296b05SJulian Elischer 953a5296b05SJulian Elischer if (nbp) { 95421144e3bSPoul-Henning Kamp if (nbp->b_iocmd == BIO_READ) { 955a5296b05SJulian Elischer ++cnt.v_swapin; 956a5296b05SJulian Elischer cnt.v_swappgsin += btoc(nbp->b_bcount); 957a5296b05SJulian Elischer } else { 958a5296b05SJulian Elischer ++cnt.v_swapout; 959a5296b05SJulian Elischer cnt.v_swappgsout += btoc(nbp->b_bcount); 9604dcc5c2dSMatthew Dillon nbp->b_dirtyend = nbp->b_bcount; 961a5296b05SJulian Elischer } 962a5296b05SJulian Elischer flushchainbuf(nbp); 9634dcc5c2dSMatthew Dillon /* nbp = NULL; */ 964a5296b05SJulian Elischer } 9654dcc5c2dSMatthew Dillon 9664dcc5c2dSMatthew Dillon /* 9674dcc5c2dSMatthew Dillon * Wait for completion. 9684dcc5c2dSMatthew Dillon */ 9694dcc5c2dSMatthew Dillon 970a5296b05SJulian Elischer waitchainbuf(bp, 0, 1); 971a5296b05SJulian Elischer } 972a5296b05SJulian Elischer 973a5296b05SJulian Elischer /* 9741c7c3c6aSMatthew Dillon * SWAP_PAGER_GETPAGES() - bring pages in from swap 9751c7c3c6aSMatthew Dillon * 9761c7c3c6aSMatthew Dillon * Attempt to retrieve (m, count) pages from backing store, but make 9771c7c3c6aSMatthew Dillon * sure we retrieve at least m[reqpage]. We try to load in as large 9781c7c3c6aSMatthew Dillon * a chunk surrounding m[reqpage] as is contiguous in swap and which 9791c7c3c6aSMatthew Dillon * belongs to the same object. 9801c7c3c6aSMatthew Dillon * 9811c7c3c6aSMatthew Dillon * The code is designed for asynchronous operation and 9821c7c3c6aSMatthew Dillon * immediate-notification of 'reqpage' but tends not to be 9831c7c3c6aSMatthew Dillon * used that way. Please do not optimize-out this algorithmic 9841c7c3c6aSMatthew Dillon * feature, I intend to improve on it in the future. 9851c7c3c6aSMatthew Dillon * 9861c7c3c6aSMatthew Dillon * The parent has a single vm_object_pip_add() reference prior to 9871c7c3c6aSMatthew Dillon * calling us and we should return with the same. 9881c7c3c6aSMatthew Dillon * 9891c7c3c6aSMatthew Dillon * The parent has BUSY'd the pages. We should return with 'm' 9901c7c3c6aSMatthew Dillon * left busy, but the others adjusted. 9911c7c3c6aSMatthew Dillon */ 99226f9a767SRodney W. Grimes 993f708ef1bSPoul-Henning Kamp static int 99424a1cce3SDavid Greenman swap_pager_getpages(object, m, count, reqpage) 99524a1cce3SDavid Greenman vm_object_t object; 99626f9a767SRodney W. Grimes vm_page_t *m; 99726f9a767SRodney W. Grimes int count, reqpage; 998df8bae1dSRodney W. Grimes { 9991c7c3c6aSMatthew Dillon struct buf *bp; 10001c7c3c6aSMatthew Dillon vm_page_t mreq; 10011c7c3c6aSMatthew Dillon int s; 100226f9a767SRodney W. Grimes int i; 100326f9a767SRodney W. Grimes int j; 10041c7c3c6aSMatthew Dillon daddr_t blk; 10051c7c3c6aSMatthew Dillon vm_offset_t kva; 10061c7c3c6aSMatthew Dillon vm_pindex_t lastpindex; 10070d94caffSDavid Greenman 10081c7c3c6aSMatthew Dillon mreq = m[reqpage]; 10091c7c3c6aSMatthew Dillon 10101c7c3c6aSMatthew Dillon if (mreq->object != object) { 10111c7c3c6aSMatthew Dillon panic("swap_pager_getpages: object mismatch %p/%p", 10121c7c3c6aSMatthew Dillon object, 10131c7c3c6aSMatthew Dillon mreq->object 10141c7c3c6aSMatthew Dillon ); 101526f9a767SRodney W. Grimes } 10161c7c3c6aSMatthew Dillon /* 10171c7c3c6aSMatthew Dillon * Calculate range to retrieve. The pages have already been assigned 10181c7c3c6aSMatthew Dillon * their swapblks. We require a *contiguous* range that falls entirely 10191c7c3c6aSMatthew Dillon * within a single device stripe. If we do not supply it, bad things 10204dcc5c2dSMatthew Dillon * happen. Note that blk, iblk & jblk can be SWAPBLK_NONE, but the 10214dcc5c2dSMatthew Dillon * loops are set up such that the case(s) are handled implicitly. 10224dcc5c2dSMatthew Dillon * 10234dcc5c2dSMatthew Dillon * The swp_*() calls must be made at splvm(). vm_page_free() does 10244dcc5c2dSMatthew Dillon * not need to be, but it will go a little faster if it is. 10251c7c3c6aSMatthew Dillon */ 10261c7c3c6aSMatthew Dillon 10274dcc5c2dSMatthew Dillon s = splvm(); 10281c7c3c6aSMatthew Dillon blk = swp_pager_meta_ctl(mreq->object, mreq->pindex, 0); 10291c7c3c6aSMatthew Dillon 10301c7c3c6aSMatthew Dillon for (i = reqpage - 1; i >= 0; --i) { 10311c7c3c6aSMatthew Dillon daddr_t iblk; 10321c7c3c6aSMatthew Dillon 10331c7c3c6aSMatthew Dillon iblk = swp_pager_meta_ctl(m[i]->object, m[i]->pindex, 0); 10341c7c3c6aSMatthew Dillon if (blk != iblk + (reqpage - i)) 103526f9a767SRodney W. Grimes break; 10364dcc5c2dSMatthew Dillon if ((blk ^ iblk) & dmmax_mask) 10374dcc5c2dSMatthew Dillon break; 103826f9a767SRodney W. Grimes } 10391c7c3c6aSMatthew Dillon ++i; 10401c7c3c6aSMatthew Dillon 10411c7c3c6aSMatthew Dillon for (j = reqpage + 1; j < count; ++j) { 10421c7c3c6aSMatthew Dillon daddr_t jblk; 10431c7c3c6aSMatthew Dillon 10441c7c3c6aSMatthew Dillon jblk = swp_pager_meta_ctl(m[j]->object, m[j]->pindex, 0); 10451c7c3c6aSMatthew Dillon if (blk != jblk - (j - reqpage)) 10461c7c3c6aSMatthew Dillon break; 10474dcc5c2dSMatthew Dillon if ((blk ^ jblk) & dmmax_mask) 10484dcc5c2dSMatthew Dillon break; 10491c7c3c6aSMatthew Dillon } 10501c7c3c6aSMatthew Dillon 10511c7c3c6aSMatthew Dillon /* 10521c7c3c6aSMatthew Dillon * free pages outside our collection range. Note: we never free 10531c7c3c6aSMatthew Dillon * mreq, it must remain busy throughout. 10541c7c3c6aSMatthew Dillon */ 10551c7c3c6aSMatthew Dillon 10561c7c3c6aSMatthew Dillon { 10571c7c3c6aSMatthew Dillon int k; 10581c7c3c6aSMatthew Dillon 10594dcc5c2dSMatthew Dillon for (k = 0; k < i; ++k) 10604dcc5c2dSMatthew Dillon vm_page_free(m[k]); 10614dcc5c2dSMatthew Dillon for (k = j; k < count; ++k) 10621c7c3c6aSMatthew Dillon vm_page_free(m[k]); 10631c7c3c6aSMatthew Dillon } 10644dcc5c2dSMatthew Dillon splx(s); 10654dcc5c2dSMatthew Dillon 10661c7c3c6aSMatthew Dillon 10671c7c3c6aSMatthew Dillon /* 10684dcc5c2dSMatthew Dillon * Return VM_PAGER_FAIL if we have nothing to do. Return mreq 10694dcc5c2dSMatthew Dillon * still busy, but the others unbusied. 10701c7c3c6aSMatthew Dillon */ 10711c7c3c6aSMatthew Dillon 10724dcc5c2dSMatthew Dillon if (blk == SWAPBLK_NONE) 107326f9a767SRodney W. Grimes return(VM_PAGER_FAIL); 1074df8bae1dSRodney W. Grimes 107516f62314SDavid Greenman /* 107616f62314SDavid Greenman * Get a swap buffer header to perform the IO 107716f62314SDavid Greenman */ 10781c7c3c6aSMatthew Dillon 10791c7c3c6aSMatthew Dillon bp = getpbuf(&nsw_rcount); 108016f62314SDavid Greenman kva = (vm_offset_t) bp->b_data; 108126f9a767SRodney W. Grimes 108216f62314SDavid Greenman /* 108316f62314SDavid Greenman * map our page(s) into kva for input 10841c7c3c6aSMatthew Dillon * 10851c7c3c6aSMatthew Dillon * NOTE: B_PAGING is set by pbgetvp() 108616f62314SDavid Greenman */ 108716f62314SDavid Greenman 10881c7c3c6aSMatthew Dillon pmap_qenter(kva, m + i, j - i); 10891c7c3c6aSMatthew Dillon 109021144e3bSPoul-Henning Kamp bp->b_iocmd = BIO_READ; 10911c7c3c6aSMatthew Dillon bp->b_iodone = swp_pager_async_iodone; 1092b0eeea20SPoul-Henning Kamp bp->b_rcred = bp->b_wcred = proc0.p_ucred; 1093a5296b05SJulian Elischer bp->b_data = (caddr_t) kva; 109426f9a767SRodney W. Grimes crhold(bp->b_rcred); 109526f9a767SRodney W. Grimes crhold(bp->b_wcred); 10961c7c3c6aSMatthew Dillon bp->b_blkno = blk - (reqpage - i); 10971c7c3c6aSMatthew Dillon bp->b_bcount = PAGE_SIZE * (j - i); 10981c7c3c6aSMatthew Dillon bp->b_bufsize = PAGE_SIZE * (j - i); 10991c7c3c6aSMatthew Dillon bp->b_pager.pg_reqpage = reqpage - i; 11001c7c3c6aSMatthew Dillon 11011c7c3c6aSMatthew Dillon { 11021c7c3c6aSMatthew Dillon int k; 11031c7c3c6aSMatthew Dillon 11041c7c3c6aSMatthew Dillon for (k = i; k < j; ++k) { 11051c7c3c6aSMatthew Dillon bp->b_pages[k - i] = m[k]; 11061c7c3c6aSMatthew Dillon vm_page_flag_set(m[k], PG_SWAPINPROG); 11071c7c3c6aSMatthew Dillon } 11081c7c3c6aSMatthew Dillon } 11091c7c3c6aSMatthew Dillon bp->b_npages = j - i; 111026f9a767SRodney W. Grimes 11110d94caffSDavid Greenman pbgetvp(swapdev_vp, bp); 1112df8bae1dSRodney W. Grimes 1113976e77fcSDavid Greenman cnt.v_swapin++; 11141c7c3c6aSMatthew Dillon cnt.v_swappgsin += bp->b_npages; 11151c7c3c6aSMatthew Dillon 1116df8bae1dSRodney W. Grimes /* 11171c7c3c6aSMatthew Dillon * We still hold the lock on mreq, and our automatic completion routine 11181c7c3c6aSMatthew Dillon * does not remove it. 1119df8bae1dSRodney W. Grimes */ 11201c7c3c6aSMatthew Dillon 11211c7c3c6aSMatthew Dillon vm_object_pip_add(mreq->object, bp->b_npages); 11221c7c3c6aSMatthew Dillon lastpindex = m[j-1]->pindex; 11231c7c3c6aSMatthew Dillon 11241c7c3c6aSMatthew Dillon /* 11251c7c3c6aSMatthew Dillon * perform the I/O. NOTE!!! bp cannot be considered valid after 11261c7c3c6aSMatthew Dillon * this point because we automatically release it on completion. 11271c7c3c6aSMatthew Dillon * Instead, we look at the one page we are interested in which we 11281c7c3c6aSMatthew Dillon * still hold a lock on even through the I/O completion. 11291c7c3c6aSMatthew Dillon * 11301c7c3c6aSMatthew Dillon * The other pages in our m[] array are also released on completion, 11311c7c3c6aSMatthew Dillon * so we cannot assume they are valid anymore either. 11321c7c3c6aSMatthew Dillon * 1133ea3aecf5SPeter Wemm * NOTE: b_blkno is destroyed by the call to VOP_STRATEGY 11341c7c3c6aSMatthew Dillon */ 11351c7c3c6aSMatthew Dillon 1136b890cb2cSPeter Wemm BUF_KERNPROC(bp); 1137b99c307aSPoul-Henning Kamp BUF_STRATEGY(bp); 113826f9a767SRodney W. Grimes 113926f9a767SRodney W. Grimes /* 11401c7c3c6aSMatthew Dillon * wait for the page we want to complete. PG_SWAPINPROG is always 11411c7c3c6aSMatthew Dillon * cleared on completion. If an I/O error occurs, SWAPBLK_NONE 11421c7c3c6aSMatthew Dillon * is set in the meta-data. 114326f9a767SRodney W. Grimes */ 11441b119d9dSDavid Greenman 11451c7c3c6aSMatthew Dillon s = splvm(); 11461c7c3c6aSMatthew Dillon 11471c7c3c6aSMatthew Dillon while ((mreq->flags & PG_SWAPINPROG) != 0) { 11481c7c3c6aSMatthew Dillon vm_page_flag_set(mreq, PG_WANTED | PG_REFERENCED); 11491c7c3c6aSMatthew Dillon cnt.v_intrans++; 11501c7c3c6aSMatthew Dillon if (tsleep(mreq, PSWP, "swread", hz*20)) { 1151ac1e407bSBruce Evans printf( 11521c7c3c6aSMatthew Dillon "swap_pager: indefinite wait buffer: device:" 1153af647ddeSBruce Evans " %s, blkno: %ld, size: %ld\n", 1154af647ddeSBruce Evans devtoname(bp->b_dev), (long)bp->b_blkno, 1155af647ddeSBruce Evans bp->b_bcount 11561c7c3c6aSMatthew Dillon ); 11571c7c3c6aSMatthew Dillon } 11581b119d9dSDavid Greenman } 115926f9a767SRodney W. Grimes 1160df8bae1dSRodney W. Grimes splx(s); 116126f9a767SRodney W. Grimes 116226f9a767SRodney W. Grimes /* 11631c7c3c6aSMatthew Dillon * mreq is left bussied after completion, but all the other pages 11641c7c3c6aSMatthew Dillon * are freed. If we had an unrecoverable read error the page will 11651c7c3c6aSMatthew Dillon * not be valid. 116626f9a767SRodney W. Grimes */ 116726f9a767SRodney W. Grimes 11681c7c3c6aSMatthew Dillon if (mreq->valid != VM_PAGE_BITS_ALL) { 11691c7c3c6aSMatthew Dillon return(VM_PAGER_ERROR); 117026f9a767SRodney W. Grimes } else { 11711c7c3c6aSMatthew Dillon return(VM_PAGER_OK); 117226f9a767SRodney W. Grimes } 11731c7c3c6aSMatthew Dillon 11741c7c3c6aSMatthew Dillon /* 11751c7c3c6aSMatthew Dillon * A final note: in a low swap situation, we cannot deallocate swap 11761c7c3c6aSMatthew Dillon * and mark a page dirty here because the caller is likely to mark 11771c7c3c6aSMatthew Dillon * the page clean when we return, causing the page to possibly revert 11781c7c3c6aSMatthew Dillon * to all-zero's later. 11791c7c3c6aSMatthew Dillon */ 1180df8bae1dSRodney W. Grimes } 1181df8bae1dSRodney W. Grimes 11821c7c3c6aSMatthew Dillon /* 11831c7c3c6aSMatthew Dillon * swap_pager_putpages: 11841c7c3c6aSMatthew Dillon * 11851c7c3c6aSMatthew Dillon * Assign swap (if necessary) and initiate I/O on the specified pages. 11861c7c3c6aSMatthew Dillon * 11871c7c3c6aSMatthew Dillon * We support both OBJT_DEFAULT and OBJT_SWAP objects. DEFAULT objects 11881c7c3c6aSMatthew Dillon * are automatically converted to SWAP objects. 11891c7c3c6aSMatthew Dillon * 1190ea3aecf5SPeter Wemm * In a low memory situation we may block in VOP_STRATEGY(), but the new 11911c7c3c6aSMatthew Dillon * vm_page reservation system coupled with properly written VFS devices 11921c7c3c6aSMatthew Dillon * should ensure that no low-memory deadlock occurs. This is an area 11931c7c3c6aSMatthew Dillon * which needs work. 11941c7c3c6aSMatthew Dillon * 11951c7c3c6aSMatthew Dillon * The parent has N vm_object_pip_add() references prior to 11961c7c3c6aSMatthew Dillon * calling us and will remove references for rtvals[] that are 11971c7c3c6aSMatthew Dillon * not set to VM_PAGER_PEND. We need to remove the rest on I/O 11981c7c3c6aSMatthew Dillon * completion. 11991c7c3c6aSMatthew Dillon * 12001c7c3c6aSMatthew Dillon * The parent has soft-busy'd the pages it passes us and will unbusy 12011c7c3c6aSMatthew Dillon * those whos rtvals[] entry is not set to VM_PAGER_PEND on return. 12021c7c3c6aSMatthew Dillon * We need to unbusy the rest on I/O completion. 12031c7c3c6aSMatthew Dillon */ 12041c7c3c6aSMatthew Dillon 1205e4542174SMatthew Dillon void 120624a1cce3SDavid Greenman swap_pager_putpages(object, m, count, sync, rtvals) 120724a1cce3SDavid Greenman vm_object_t object; 120826f9a767SRodney W. Grimes vm_page_t *m; 120926f9a767SRodney W. Grimes int count; 121024a1cce3SDavid Greenman boolean_t sync; 121126f9a767SRodney W. Grimes int *rtvals; 1212df8bae1dSRodney W. Grimes { 12131c7c3c6aSMatthew Dillon int i; 12141c7c3c6aSMatthew Dillon int n = 0; 1215df8bae1dSRodney W. Grimes 12161c7c3c6aSMatthew Dillon if (count && m[0]->object != object) { 12171c7c3c6aSMatthew Dillon panic("swap_pager_getpages: object mismatch %p/%p", 12181c7c3c6aSMatthew Dillon object, 12191c7c3c6aSMatthew Dillon m[0]->object 12201c7c3c6aSMatthew Dillon ); 12211c7c3c6aSMatthew Dillon } 12221c7c3c6aSMatthew Dillon /* 12231c7c3c6aSMatthew Dillon * Step 1 12241c7c3c6aSMatthew Dillon * 12251c7c3c6aSMatthew Dillon * Turn object into OBJT_SWAP 12261c7c3c6aSMatthew Dillon * check for bogus sysops 12271c7c3c6aSMatthew Dillon * force sync if not pageout process 12281c7c3c6aSMatthew Dillon */ 1229e736cd05SJohn Dyson 12304dcc5c2dSMatthew Dillon if (object->type != OBJT_SWAP) 12314dcc5c2dSMatthew Dillon swp_pager_meta_build(object, 0, SWAPBLK_NONE); 1232e47ed70bSJohn Dyson 1233e47ed70bSJohn Dyson if (curproc != pageproc) 1234e47ed70bSJohn Dyson sync = TRUE; 123526f9a767SRodney W. Grimes 12361c7c3c6aSMatthew Dillon /* 12371c7c3c6aSMatthew Dillon * Step 2 12381c7c3c6aSMatthew Dillon * 1239ad3cce20SMatthew Dillon * Update nsw parameters from swap_async_max sysctl values. 1240ad3cce20SMatthew Dillon * Do not let the sysop crash the machine with bogus numbers. 1241327f4e83SMatthew Dillon */ 1242327f4e83SMatthew Dillon 1243327f4e83SMatthew Dillon if (swap_async_max != nsw_wcount_async_max) { 1244327f4e83SMatthew Dillon int n; 1245327f4e83SMatthew Dillon int s; 1246327f4e83SMatthew Dillon 1247327f4e83SMatthew Dillon /* 1248327f4e83SMatthew Dillon * limit range 1249327f4e83SMatthew Dillon */ 1250327f4e83SMatthew Dillon if ((n = swap_async_max) > nswbuf / 2) 1251327f4e83SMatthew Dillon n = nswbuf / 2; 1252327f4e83SMatthew Dillon if (n < 1) 1253327f4e83SMatthew Dillon n = 1; 1254327f4e83SMatthew Dillon swap_async_max = n; 1255327f4e83SMatthew Dillon 1256327f4e83SMatthew Dillon /* 1257327f4e83SMatthew Dillon * Adjust difference ( if possible ). If the current async 1258327f4e83SMatthew Dillon * count is too low, we may not be able to make the adjustment 1259327f4e83SMatthew Dillon * at this time. 1260327f4e83SMatthew Dillon */ 1261327f4e83SMatthew Dillon s = splvm(); 1262327f4e83SMatthew Dillon n -= nsw_wcount_async_max; 1263327f4e83SMatthew Dillon if (nsw_wcount_async + n >= 0) { 1264327f4e83SMatthew Dillon nsw_wcount_async += n; 1265327f4e83SMatthew Dillon nsw_wcount_async_max += n; 1266327f4e83SMatthew Dillon wakeup(&nsw_wcount_async); 1267327f4e83SMatthew Dillon } 1268327f4e83SMatthew Dillon splx(s); 1269327f4e83SMatthew Dillon } 1270327f4e83SMatthew Dillon 1271327f4e83SMatthew Dillon /* 1272327f4e83SMatthew Dillon * Step 3 1273327f4e83SMatthew Dillon * 12741c7c3c6aSMatthew Dillon * Assign swap blocks and issue I/O. We reallocate swap on the fly. 12751c7c3c6aSMatthew Dillon * The page is left dirty until the pageout operation completes 12761c7c3c6aSMatthew Dillon * successfully. 12771c7c3c6aSMatthew Dillon */ 127826f9a767SRodney W. Grimes 12791c7c3c6aSMatthew Dillon for (i = 0; i < count; i += n) { 12801c7c3c6aSMatthew Dillon int s; 12811c7c3c6aSMatthew Dillon int j; 12821c7c3c6aSMatthew Dillon struct buf *bp; 1283a316d390SJohn Dyson daddr_t blk; 128426f9a767SRodney W. Grimes 1285df8bae1dSRodney W. Grimes /* 12861c7c3c6aSMatthew Dillon * Maximum I/O size is limited by a number of factors. 1287df8bae1dSRodney W. Grimes */ 128826f9a767SRodney W. Grimes 12891c7c3c6aSMatthew Dillon n = min(BLIST_MAX_ALLOC, count - i); 1290327f4e83SMatthew Dillon n = min(n, nsw_cluster_max); 12911c7c3c6aSMatthew Dillon 12924dcc5c2dSMatthew Dillon s = splvm(); 12934dcc5c2dSMatthew Dillon 129426f9a767SRodney W. Grimes /* 12951c7c3c6aSMatthew Dillon * Get biggest block of swap we can. If we fail, fall 12961c7c3c6aSMatthew Dillon * back and try to allocate a smaller block. Don't go 12971c7c3c6aSMatthew Dillon * overboard trying to allocate space if it would overly 12981c7c3c6aSMatthew Dillon * fragment swap. 129926f9a767SRodney W. Grimes */ 13001c7c3c6aSMatthew Dillon while ( 13011c7c3c6aSMatthew Dillon (blk = swp_pager_getswapspace(n)) == SWAPBLK_NONE && 13021c7c3c6aSMatthew Dillon n > 4 13031c7c3c6aSMatthew Dillon ) { 13041c7c3c6aSMatthew Dillon n >>= 1; 130526f9a767SRodney W. Grimes } 13061c7c3c6aSMatthew Dillon if (blk == SWAPBLK_NONE) { 13074dcc5c2dSMatthew Dillon for (j = 0; j < n; ++j) 13081c7c3c6aSMatthew Dillon rtvals[i+j] = VM_PAGER_FAIL; 13094dcc5c2dSMatthew Dillon splx(s); 13101c7c3c6aSMatthew Dillon continue; 131126f9a767SRodney W. Grimes } 131226f9a767SRodney W. Grimes 131326f9a767SRodney W. Grimes /* 13144dcc5c2dSMatthew Dillon * The I/O we are constructing cannot cross a physical 13154dcc5c2dSMatthew Dillon * disk boundry in the swap stripe. Note: we are still 13164dcc5c2dSMatthew Dillon * at splvm(). 131726f9a767SRodney W. Grimes */ 13181c7c3c6aSMatthew Dillon if ((blk ^ (blk + n)) & dmmax_mask) { 13191c7c3c6aSMatthew Dillon j = ((blk + dmmax) & dmmax_mask) - blk; 13201c7c3c6aSMatthew Dillon swp_pager_freeswapspace(blk + j, n - j); 13211c7c3c6aSMatthew Dillon n = j; 1322e47ed70bSJohn Dyson } 132326f9a767SRodney W. Grimes 132426f9a767SRodney W. Grimes /* 13251c7c3c6aSMatthew Dillon * All I/O parameters have been satisfied, build the I/O 13261c7c3c6aSMatthew Dillon * request and assign the swap space. 13271c7c3c6aSMatthew Dillon * 13281c7c3c6aSMatthew Dillon * NOTE: B_PAGING is set by pbgetvp() 132926f9a767SRodney W. Grimes */ 133026f9a767SRodney W. Grimes 1331327f4e83SMatthew Dillon if (sync == TRUE) { 1332327f4e83SMatthew Dillon bp = getpbuf(&nsw_wcount_sync); 1333327f4e83SMatthew Dillon } else { 1334327f4e83SMatthew Dillon bp = getpbuf(&nsw_wcount_async); 133521144e3bSPoul-Henning Kamp bp->b_flags = B_ASYNC; 1336327f4e83SMatthew Dillon } 1337912e4ae9SPoul-Henning Kamp bp->b_iocmd = BIO_WRITE; 13381c7c3c6aSMatthew Dillon bp->b_spc = NULL; /* not used, but NULL-out anyway */ 133926f9a767SRodney W. Grimes 13401c7c3c6aSMatthew Dillon pmap_qenter((vm_offset_t)bp->b_data, &m[i], n); 13411c7c3c6aSMatthew Dillon 1342b0eeea20SPoul-Henning Kamp bp->b_rcred = bp->b_wcred = proc0.p_ucred; 13431c7c3c6aSMatthew Dillon bp->b_bcount = PAGE_SIZE * n; 13441c7c3c6aSMatthew Dillon bp->b_bufsize = PAGE_SIZE * n; 13451c7c3c6aSMatthew Dillon bp->b_blkno = blk; 1346e47ed70bSJohn Dyson 1347a5296b05SJulian Elischer crhold(bp->b_rcred); 1348a5296b05SJulian Elischer crhold(bp->b_wcred); 1349a5296b05SJulian Elischer 1350a5296b05SJulian Elischer pbgetvp(swapdev_vp, bp); 1351a5296b05SJulian Elischer 13521c7c3c6aSMatthew Dillon for (j = 0; j < n; ++j) { 13531c7c3c6aSMatthew Dillon vm_page_t mreq = m[i+j]; 13541c7c3c6aSMatthew Dillon 13551c7c3c6aSMatthew Dillon swp_pager_meta_build( 13561c7c3c6aSMatthew Dillon mreq->object, 13571c7c3c6aSMatthew Dillon mreq->pindex, 13584dcc5c2dSMatthew Dillon blk + j 13591c7c3c6aSMatthew Dillon ); 13607dbf82dcSMatthew Dillon vm_page_dirty(mreq); 13611c7c3c6aSMatthew Dillon rtvals[i+j] = VM_PAGER_OK; 13621c7c3c6aSMatthew Dillon 13631c7c3c6aSMatthew Dillon vm_page_flag_set(mreq, PG_SWAPINPROG); 13641c7c3c6aSMatthew Dillon bp->b_pages[j] = mreq; 13651c7c3c6aSMatthew Dillon } 13661c7c3c6aSMatthew Dillon bp->b_npages = n; 1367a5296b05SJulian Elischer /* 1368a5296b05SJulian Elischer * Must set dirty range for NFS to work. 1369a5296b05SJulian Elischer */ 1370a5296b05SJulian Elischer bp->b_dirtyoff = 0; 1371a5296b05SJulian Elischer bp->b_dirtyend = bp->b_bcount; 13721c7c3c6aSMatthew Dillon 13731c7c3c6aSMatthew Dillon cnt.v_swapout++; 13741c7c3c6aSMatthew Dillon cnt.v_swappgsout += bp->b_npages; 137526f9a767SRodney W. Grimes swapdev_vp->v_numoutput++; 137626f9a767SRodney W. Grimes 13774dcc5c2dSMatthew Dillon splx(s); 13784dcc5c2dSMatthew Dillon 137926f9a767SRodney W. Grimes /* 13801c7c3c6aSMatthew Dillon * asynchronous 13811c7c3c6aSMatthew Dillon * 1382ea3aecf5SPeter Wemm * NOTE: b_blkno is destroyed by the call to VOP_STRATEGY 138326f9a767SRodney W. Grimes */ 1384e47ed70bSJohn Dyson 13851c7c3c6aSMatthew Dillon if (sync == FALSE) { 13861c7c3c6aSMatthew Dillon bp->b_iodone = swp_pager_async_iodone; 138767812eacSKirk McKusick BUF_KERNPROC(bp); 1388b99c307aSPoul-Henning Kamp BUF_STRATEGY(bp); 13891c7c3c6aSMatthew Dillon 13901c7c3c6aSMatthew Dillon for (j = 0; j < n; ++j) 13911c7c3c6aSMatthew Dillon rtvals[i+j] = VM_PAGER_PEND; 13921c7c3c6aSMatthew Dillon continue; 139326f9a767SRodney W. Grimes } 1394e47ed70bSJohn Dyson 139526f9a767SRodney W. Grimes /* 13961c7c3c6aSMatthew Dillon * synchronous 13971c7c3c6aSMatthew Dillon * 1398ea3aecf5SPeter Wemm * NOTE: b_blkno is destroyed by the call to VOP_STRATEGY 13991c7c3c6aSMatthew Dillon */ 14001c7c3c6aSMatthew Dillon 14011c7c3c6aSMatthew Dillon bp->b_iodone = swp_pager_sync_iodone; 1402b99c307aSPoul-Henning Kamp BUF_STRATEGY(bp); 14031c7c3c6aSMatthew Dillon 14041c7c3c6aSMatthew Dillon /* 14051c7c3c6aSMatthew Dillon * Wait for the sync I/O to complete, then update rtvals. 14061c7c3c6aSMatthew Dillon * We just set the rtvals[] to VM_PAGER_PEND so we can call 14071c7c3c6aSMatthew Dillon * our async completion routine at the end, thus avoiding a 14081c7c3c6aSMatthew Dillon * double-free. 140926f9a767SRodney W. Grimes */ 14104dcc5c2dSMatthew Dillon s = splbio(); 14114dcc5c2dSMatthew Dillon 141226f9a767SRodney W. Grimes while ((bp->b_flags & B_DONE) == 0) { 141324a1cce3SDavid Greenman tsleep(bp, PVM, "swwrt", 0); 141426f9a767SRodney W. Grimes } 1415e47ed70bSJohn Dyson 14161c7c3c6aSMatthew Dillon for (j = 0; j < n; ++j) 14171c7c3c6aSMatthew Dillon rtvals[i+j] = VM_PAGER_PEND; 141826f9a767SRodney W. Grimes 14191c7c3c6aSMatthew Dillon /* 14201c7c3c6aSMatthew Dillon * Now that we are through with the bp, we can call the 14211c7c3c6aSMatthew Dillon * normal async completion, which frees everything up. 14221c7c3c6aSMatthew Dillon */ 14231c7c3c6aSMatthew Dillon 14241c7c3c6aSMatthew Dillon swp_pager_async_iodone(bp); 142526f9a767SRodney W. Grimes 142626f9a767SRodney W. Grimes splx(s); 14271c7c3c6aSMatthew Dillon } 14281c7c3c6aSMatthew Dillon } 14291c7c3c6aSMatthew Dillon 14301c7c3c6aSMatthew Dillon /* 14311c7c3c6aSMatthew Dillon * swap_pager_sync_iodone: 14321c7c3c6aSMatthew Dillon * 14331c7c3c6aSMatthew Dillon * Completion routine for synchronous reads and writes from/to swap. 14341c7c3c6aSMatthew Dillon * We just mark the bp is complete and wake up anyone waiting on it. 14351c7c3c6aSMatthew Dillon * 14364dcc5c2dSMatthew Dillon * This routine may not block. This routine is called at splbio() or better. 14371c7c3c6aSMatthew Dillon */ 14381c7c3c6aSMatthew Dillon 14391c7c3c6aSMatthew Dillon static void 14401c7c3c6aSMatthew Dillon swp_pager_sync_iodone(bp) 14411c7c3c6aSMatthew Dillon struct buf *bp; 14421c7c3c6aSMatthew Dillon { 14431c7c3c6aSMatthew Dillon bp->b_flags |= B_DONE; 14441c7c3c6aSMatthew Dillon bp->b_flags &= ~B_ASYNC; 14451c7c3c6aSMatthew Dillon wakeup(bp); 14461c7c3c6aSMatthew Dillon } 14471c7c3c6aSMatthew Dillon 14481c7c3c6aSMatthew Dillon /* 14491c7c3c6aSMatthew Dillon * swp_pager_async_iodone: 14501c7c3c6aSMatthew Dillon * 14511c7c3c6aSMatthew Dillon * Completion routine for asynchronous reads and writes from/to swap. 14521c7c3c6aSMatthew Dillon * Also called manually by synchronous code to finish up a bp. 14531c7c3c6aSMatthew Dillon * 14541c7c3c6aSMatthew Dillon * For READ operations, the pages are PG_BUSY'd. For WRITE operations, 14551c7c3c6aSMatthew Dillon * the pages are vm_page_t->busy'd. For READ operations, we PG_BUSY 14561c7c3c6aSMatthew Dillon * unbusy all pages except the 'main' request page. For WRITE 14571c7c3c6aSMatthew Dillon * operations, we vm_page_t->busy'd unbusy all pages ( we can do this 14581c7c3c6aSMatthew Dillon * because we marked them all VM_PAGER_PEND on return from putpages ). 14591c7c3c6aSMatthew Dillon * 14601c7c3c6aSMatthew Dillon * This routine may not block. 14614dcc5c2dSMatthew Dillon * This routine is called at splbio() or better 14624dcc5c2dSMatthew Dillon * 14634dcc5c2dSMatthew Dillon * We up ourselves to splvm() as required for various vm_page related 14644dcc5c2dSMatthew Dillon * calls. 14651c7c3c6aSMatthew Dillon */ 14661c7c3c6aSMatthew Dillon 14671c7c3c6aSMatthew Dillon static void 14681c7c3c6aSMatthew Dillon swp_pager_async_iodone(bp) 14691c7c3c6aSMatthew Dillon register struct buf *bp; 14701c7c3c6aSMatthew Dillon { 14711c7c3c6aSMatthew Dillon int s; 14721c7c3c6aSMatthew Dillon int i; 14731c7c3c6aSMatthew Dillon vm_object_t object = NULL; 14741c7c3c6aSMatthew Dillon 14751c7c3c6aSMatthew Dillon bp->b_flags |= B_DONE; 14761c7c3c6aSMatthew Dillon 14771c7c3c6aSMatthew Dillon /* 14781c7c3c6aSMatthew Dillon * report error 14791c7c3c6aSMatthew Dillon */ 14801c7c3c6aSMatthew Dillon 1481c244d2deSPoul-Henning Kamp if (bp->b_ioflags & BIO_ERROR) { 14821c7c3c6aSMatthew Dillon printf( 14831c7c3c6aSMatthew Dillon "swap_pager: I/O error - %s failed; blkno %ld," 14841c7c3c6aSMatthew Dillon "size %ld, error %d\n", 148521144e3bSPoul-Henning Kamp ((bp->b_iocmd == BIO_READ) ? "pagein" : "pageout"), 14861c7c3c6aSMatthew Dillon (long)bp->b_blkno, 14871c7c3c6aSMatthew Dillon (long)bp->b_bcount, 14881c7c3c6aSMatthew Dillon bp->b_error 14891c7c3c6aSMatthew Dillon ); 14901c7c3c6aSMatthew Dillon } 14911c7c3c6aSMatthew Dillon 14921c7c3c6aSMatthew Dillon /* 14934dcc5c2dSMatthew Dillon * set object, raise to splvm(). 14941c7c3c6aSMatthew Dillon */ 14951c7c3c6aSMatthew Dillon 14961c7c3c6aSMatthew Dillon if (bp->b_npages) 14971c7c3c6aSMatthew Dillon object = bp->b_pages[0]->object; 14984dcc5c2dSMatthew Dillon s = splvm(); 149926f9a767SRodney W. Grimes 150026f9a767SRodney W. Grimes /* 150126f9a767SRodney W. Grimes * remove the mapping for kernel virtual 150226f9a767SRodney W. Grimes */ 15031c7c3c6aSMatthew Dillon 15041c7c3c6aSMatthew Dillon pmap_qremove((vm_offset_t)bp->b_data, bp->b_npages); 150526f9a767SRodney W. Grimes 150626f9a767SRodney W. Grimes /* 15071c7c3c6aSMatthew Dillon * cleanup pages. If an error occurs writing to swap, we are in 15081c7c3c6aSMatthew Dillon * very serious trouble. If it happens to be a disk error, though, 15091c7c3c6aSMatthew Dillon * we may be able to recover by reassigning the swap later on. So 15101c7c3c6aSMatthew Dillon * in this case we remove the m->swapblk assignment for the page 15111c7c3c6aSMatthew Dillon * but do not free it in the rlist. The errornous block(s) are thus 15121c7c3c6aSMatthew Dillon * never reallocated as swap. Redirty the page and continue. 151326f9a767SRodney W. Grimes */ 151426f9a767SRodney W. Grimes 15151c7c3c6aSMatthew Dillon for (i = 0; i < bp->b_npages; ++i) { 15161c7c3c6aSMatthew Dillon vm_page_t m = bp->b_pages[i]; 1517e47ed70bSJohn Dyson 15181c7c3c6aSMatthew Dillon vm_page_flag_clear(m, PG_SWAPINPROG); 1519e47ed70bSJohn Dyson 1520c244d2deSPoul-Henning Kamp if (bp->b_ioflags & BIO_ERROR) { 1521ffc82b0aSJohn Dyson /* 15221c7c3c6aSMatthew Dillon * If an error occurs I'd love to throw the swapblk 15231c7c3c6aSMatthew Dillon * away without freeing it back to swapspace, so it 15241c7c3c6aSMatthew Dillon * can never be used again. But I can't from an 15251c7c3c6aSMatthew Dillon * interrupt. 1526ffc82b0aSJohn Dyson */ 15271c7c3c6aSMatthew Dillon 152821144e3bSPoul-Henning Kamp if (bp->b_iocmd == BIO_READ) { 15291c7c3c6aSMatthew Dillon /* 15301c7c3c6aSMatthew Dillon * When reading, reqpage needs to stay 15311c7c3c6aSMatthew Dillon * locked for the parent, but all other 15321c7c3c6aSMatthew Dillon * pages can be freed. We still want to 15331c7c3c6aSMatthew Dillon * wakeup the parent waiting on the page, 15341c7c3c6aSMatthew Dillon * though. ( also: pg_reqpage can be -1 and 15351c7c3c6aSMatthew Dillon * not match anything ). 15361c7c3c6aSMatthew Dillon * 15371c7c3c6aSMatthew Dillon * We have to wake specifically requested pages 15381c7c3c6aSMatthew Dillon * up too because we cleared PG_SWAPINPROG and 15391c7c3c6aSMatthew Dillon * someone may be waiting for that. 15401c7c3c6aSMatthew Dillon * 15411c7c3c6aSMatthew Dillon * NOTE: for reads, m->dirty will probably 1542956f3135SPhilippe Charnier * be overridden by the original caller of 15431c7c3c6aSMatthew Dillon * getpages so don't play cute tricks here. 15441c7c3c6aSMatthew Dillon * 15451c7c3c6aSMatthew Dillon * XXX it may not be legal to free the page 15461c7c3c6aSMatthew Dillon * here as this messes with the object->memq's. 15471c7c3c6aSMatthew Dillon */ 15481c7c3c6aSMatthew Dillon 15491c7c3c6aSMatthew Dillon m->valid = 0; 15501c7c3c6aSMatthew Dillon vm_page_flag_clear(m, PG_ZERO); 15511c7c3c6aSMatthew Dillon 15521c7c3c6aSMatthew Dillon if (i != bp->b_pager.pg_reqpage) 15531c7c3c6aSMatthew Dillon vm_page_free(m); 15541c7c3c6aSMatthew Dillon else 15551c7c3c6aSMatthew Dillon vm_page_flash(m); 15561c7c3c6aSMatthew Dillon /* 15571c7c3c6aSMatthew Dillon * If i == bp->b_pager.pg_reqpage, do not wake 15581c7c3c6aSMatthew Dillon * the page up. The caller needs to. 15591c7c3c6aSMatthew Dillon */ 15601c7c3c6aSMatthew Dillon } else { 15611c7c3c6aSMatthew Dillon /* 15621c7c3c6aSMatthew Dillon * If a write error occurs, reactivate page 15631c7c3c6aSMatthew Dillon * so it doesn't clog the inactive list, 15641c7c3c6aSMatthew Dillon * then finish the I/O. 15651c7c3c6aSMatthew Dillon */ 15667dbf82dcSMatthew Dillon vm_page_dirty(m); 15671c7c3c6aSMatthew Dillon vm_page_activate(m); 15681c7c3c6aSMatthew Dillon vm_page_io_finish(m); 15691c7c3c6aSMatthew Dillon } 157021144e3bSPoul-Henning Kamp } else if (bp->b_iocmd == BIO_READ) { 15711c7c3c6aSMatthew Dillon /* 15721c7c3c6aSMatthew Dillon * For read success, clear dirty bits. Nobody should 15731c7c3c6aSMatthew Dillon * have this page mapped but don't take any chances, 15741c7c3c6aSMatthew Dillon * make sure the pmap modify bits are also cleared. 15751c7c3c6aSMatthew Dillon * 15761c7c3c6aSMatthew Dillon * NOTE: for reads, m->dirty will probably be 1577956f3135SPhilippe Charnier * overridden by the original caller of getpages so 15781c7c3c6aSMatthew Dillon * we cannot set them in order to free the underlying 15791c7c3c6aSMatthew Dillon * swap in a low-swap situation. I don't think we'd 15801c7c3c6aSMatthew Dillon * want to do that anyway, but it was an optimization 15811c7c3c6aSMatthew Dillon * that existed in the old swapper for a time before 15821c7c3c6aSMatthew Dillon * it got ripped out due to precisely this problem. 15831c7c3c6aSMatthew Dillon * 15841c7c3c6aSMatthew Dillon * clear PG_ZERO in page. 15851c7c3c6aSMatthew Dillon * 15861c7c3c6aSMatthew Dillon * If not the requested page then deactivate it. 15871c7c3c6aSMatthew Dillon * 15881c7c3c6aSMatthew Dillon * Note that the requested page, reqpage, is left 15891c7c3c6aSMatthew Dillon * busied, but we still have to wake it up. The 15901c7c3c6aSMatthew Dillon * other pages are released (unbusied) by 15911c7c3c6aSMatthew Dillon * vm_page_wakeup(). We do not set reqpage's 15921c7c3c6aSMatthew Dillon * valid bits here, it is up to the caller. 15931c7c3c6aSMatthew Dillon */ 15941c7c3c6aSMatthew Dillon 15951c7c3c6aSMatthew Dillon pmap_clear_modify(VM_PAGE_TO_PHYS(m)); 15961c7c3c6aSMatthew Dillon m->valid = VM_PAGE_BITS_ALL; 15972c28a105SAlan Cox vm_page_undirty(m); 15981c7c3c6aSMatthew Dillon vm_page_flag_clear(m, PG_ZERO); 15991c7c3c6aSMatthew Dillon 16001c7c3c6aSMatthew Dillon /* 16011c7c3c6aSMatthew Dillon * We have to wake specifically requested pages 16021c7c3c6aSMatthew Dillon * up too because we cleared PG_SWAPINPROG and 16031c7c3c6aSMatthew Dillon * could be waiting for it in getpages. However, 16041c7c3c6aSMatthew Dillon * be sure to not unbusy getpages specifically 16051c7c3c6aSMatthew Dillon * requested page - getpages expects it to be 16061c7c3c6aSMatthew Dillon * left busy. 16071c7c3c6aSMatthew Dillon */ 16081c7c3c6aSMatthew Dillon if (i != bp->b_pager.pg_reqpage) { 16091c7c3c6aSMatthew Dillon vm_page_deactivate(m); 16101c7c3c6aSMatthew Dillon vm_page_wakeup(m); 16111c7c3c6aSMatthew Dillon } else { 16121c7c3c6aSMatthew Dillon vm_page_flash(m); 16131c7c3c6aSMatthew Dillon } 16141c7c3c6aSMatthew Dillon } else { 16151c7c3c6aSMatthew Dillon /* 16161c7c3c6aSMatthew Dillon * For write success, clear the modify and dirty 16171c7c3c6aSMatthew Dillon * status, then finish the I/O ( which decrements the 16181c7c3c6aSMatthew Dillon * busy count and possibly wakes waiter's up ). 16191c7c3c6aSMatthew Dillon */ 16201c7c3c6aSMatthew Dillon vm_page_protect(m, VM_PROT_READ); 16211c7c3c6aSMatthew Dillon pmap_clear_modify(VM_PAGE_TO_PHYS(m)); 1622c52e7044SAlan Cox vm_page_undirty(m); 16231c7c3c6aSMatthew Dillon vm_page_io_finish(m); 1624ffc82b0aSJohn Dyson } 1625df8bae1dSRodney W. Grimes } 162626f9a767SRodney W. Grimes 16271c7c3c6aSMatthew Dillon /* 16281c7c3c6aSMatthew Dillon * adjust pip. NOTE: the original parent may still have its own 16291c7c3c6aSMatthew Dillon * pip refs on the object. 16301c7c3c6aSMatthew Dillon */ 16310d94caffSDavid Greenman 16321c7c3c6aSMatthew Dillon if (object) 16331c7c3c6aSMatthew Dillon vm_object_pip_wakeupn(object, bp->b_npages); 163426f9a767SRodney W. Grimes 16351c7c3c6aSMatthew Dillon /* 16361c7c3c6aSMatthew Dillon * release the physical I/O buffer 16371c7c3c6aSMatthew Dillon */ 1638e47ed70bSJohn Dyson 1639327f4e83SMatthew Dillon relpbuf( 1640327f4e83SMatthew Dillon bp, 164121144e3bSPoul-Henning Kamp ((bp->b_iocmd == BIO_READ) ? &nsw_rcount : 1642327f4e83SMatthew Dillon ((bp->b_flags & B_ASYNC) ? 1643327f4e83SMatthew Dillon &nsw_wcount_async : 1644327f4e83SMatthew Dillon &nsw_wcount_sync 1645327f4e83SMatthew Dillon ) 1646327f4e83SMatthew Dillon ) 1647327f4e83SMatthew Dillon ); 164826f9a767SRodney W. Grimes splx(s); 164926f9a767SRodney W. Grimes } 16501c7c3c6aSMatthew Dillon 16511c7c3c6aSMatthew Dillon /************************************************************************ 16521c7c3c6aSMatthew Dillon * SWAP META DATA * 16531c7c3c6aSMatthew Dillon ************************************************************************ 16541c7c3c6aSMatthew Dillon * 16551c7c3c6aSMatthew Dillon * These routines manipulate the swap metadata stored in the 16564dcc5c2dSMatthew Dillon * OBJT_SWAP object. All swp_*() routines must be called at 16574dcc5c2dSMatthew Dillon * splvm() because swap can be freed up by the low level vm_page 16584dcc5c2dSMatthew Dillon * code which might be called from interrupts beyond what splbio() covers. 16591c7c3c6aSMatthew Dillon * 16604dcc5c2dSMatthew Dillon * Swap metadata is implemented with a global hash and not directly 16614dcc5c2dSMatthew Dillon * linked into the object. Instead the object simply contains 16624dcc5c2dSMatthew Dillon * appropriate tracking counters. 16631c7c3c6aSMatthew Dillon */ 16641c7c3c6aSMatthew Dillon 16651c7c3c6aSMatthew Dillon /* 16661c7c3c6aSMatthew Dillon * SWP_PAGER_HASH() - hash swap meta data 16671c7c3c6aSMatthew Dillon * 16684dcc5c2dSMatthew Dillon * This is an inline helper function which hashes the swapblk given 16691c7c3c6aSMatthew Dillon * the object and page index. It returns a pointer to a pointer 16701c7c3c6aSMatthew Dillon * to the object, or a pointer to a NULL pointer if it could not 16711c7c3c6aSMatthew Dillon * find a swapblk. 16724dcc5c2dSMatthew Dillon * 16734dcc5c2dSMatthew Dillon * This routine must be called at splvm(). 16741c7c3c6aSMatthew Dillon */ 16751c7c3c6aSMatthew Dillon 16761c7c3c6aSMatthew Dillon static __inline struct swblock ** 16774dcc5c2dSMatthew Dillon swp_pager_hash(vm_object_t object, vm_pindex_t index) 16781c7c3c6aSMatthew Dillon { 16791c7c3c6aSMatthew Dillon struct swblock **pswap; 16801c7c3c6aSMatthew Dillon struct swblock *swap; 16811c7c3c6aSMatthew Dillon 16821c7c3c6aSMatthew Dillon index &= ~SWAP_META_MASK; 1683af647ddeSBruce Evans pswap = &swhash[(index ^ (int)(intptr_t)object) & swhash_mask]; 16841c7c3c6aSMatthew Dillon 16851c7c3c6aSMatthew Dillon while ((swap = *pswap) != NULL) { 16861c7c3c6aSMatthew Dillon if (swap->swb_object == object && 16871c7c3c6aSMatthew Dillon swap->swb_index == index 16881c7c3c6aSMatthew Dillon ) { 16891c7c3c6aSMatthew Dillon break; 16901c7c3c6aSMatthew Dillon } 16911c7c3c6aSMatthew Dillon pswap = &swap->swb_hnext; 16921c7c3c6aSMatthew Dillon } 16931c7c3c6aSMatthew Dillon return(pswap); 16941c7c3c6aSMatthew Dillon } 16951c7c3c6aSMatthew Dillon 16961c7c3c6aSMatthew Dillon /* 16971c7c3c6aSMatthew Dillon * SWP_PAGER_META_BUILD() - add swap block to swap meta data for object 16981c7c3c6aSMatthew Dillon * 16991c7c3c6aSMatthew Dillon * We first convert the object to a swap object if it is a default 17001c7c3c6aSMatthew Dillon * object. 17011c7c3c6aSMatthew Dillon * 17021c7c3c6aSMatthew Dillon * The specified swapblk is added to the object's swap metadata. If 17031c7c3c6aSMatthew Dillon * the swapblk is not valid, it is freed instead. Any previously 17041c7c3c6aSMatthew Dillon * assigned swapblk is freed. 17054dcc5c2dSMatthew Dillon * 17064dcc5c2dSMatthew Dillon * This routine must be called at splvm(), except when used to convert 17074dcc5c2dSMatthew Dillon * an OBJT_DEFAULT object into an OBJT_SWAP object. 17084dcc5c2dSMatthew Dillon 17091c7c3c6aSMatthew Dillon */ 17101c7c3c6aSMatthew Dillon 17111c7c3c6aSMatthew Dillon static void 17121c7c3c6aSMatthew Dillon swp_pager_meta_build( 17131c7c3c6aSMatthew Dillon vm_object_t object, 17144dcc5c2dSMatthew Dillon vm_pindex_t index, 17154dcc5c2dSMatthew Dillon daddr_t swapblk 17161c7c3c6aSMatthew Dillon ) { 17171c7c3c6aSMatthew Dillon struct swblock *swap; 17181c7c3c6aSMatthew Dillon struct swblock **pswap; 17191c7c3c6aSMatthew Dillon 17201c7c3c6aSMatthew Dillon /* 17211c7c3c6aSMatthew Dillon * Convert default object to swap object if necessary 17221c7c3c6aSMatthew Dillon */ 17231c7c3c6aSMatthew Dillon 17241c7c3c6aSMatthew Dillon if (object->type != OBJT_SWAP) { 17251c7c3c6aSMatthew Dillon object->type = OBJT_SWAP; 17261c7c3c6aSMatthew Dillon object->un_pager.swp.swp_bcount = 0; 17271c7c3c6aSMatthew Dillon 17281c7c3c6aSMatthew Dillon if (object->handle != NULL) { 17291c7c3c6aSMatthew Dillon TAILQ_INSERT_TAIL( 17301c7c3c6aSMatthew Dillon NOBJLIST(object->handle), 17311c7c3c6aSMatthew Dillon object, 17321c7c3c6aSMatthew Dillon pager_object_list 17331c7c3c6aSMatthew Dillon ); 17341c7c3c6aSMatthew Dillon } else { 17351c7c3c6aSMatthew Dillon TAILQ_INSERT_TAIL( 17361c7c3c6aSMatthew Dillon &swap_pager_un_object_list, 17371c7c3c6aSMatthew Dillon object, 17381c7c3c6aSMatthew Dillon pager_object_list 17391c7c3c6aSMatthew Dillon ); 17401c7c3c6aSMatthew Dillon } 17411c7c3c6aSMatthew Dillon } 17421c7c3c6aSMatthew Dillon 17431c7c3c6aSMatthew Dillon /* 17441c7c3c6aSMatthew Dillon * Locate hash entry. If not found create, but if we aren't adding 17454dcc5c2dSMatthew Dillon * anything just return. If we run out of space in the map we wait 17464dcc5c2dSMatthew Dillon * and, since the hash table may have changed, retry. 17471c7c3c6aSMatthew Dillon */ 17481c7c3c6aSMatthew Dillon 17494dcc5c2dSMatthew Dillon retry: 17501c7c3c6aSMatthew Dillon pswap = swp_pager_hash(object, index); 17511c7c3c6aSMatthew Dillon 17521c7c3c6aSMatthew Dillon if ((swap = *pswap) == NULL) { 17531c7c3c6aSMatthew Dillon int i; 17541c7c3c6aSMatthew Dillon 17551c7c3c6aSMatthew Dillon if (swapblk == SWAPBLK_NONE) 17561c7c3c6aSMatthew Dillon return; 17571c7c3c6aSMatthew Dillon 17581c7c3c6aSMatthew Dillon swap = *pswap = zalloc(swap_zone); 17594dcc5c2dSMatthew Dillon if (swap == NULL) { 17604dcc5c2dSMatthew Dillon VM_WAIT; 17614dcc5c2dSMatthew Dillon goto retry; 17624dcc5c2dSMatthew Dillon } 17631c7c3c6aSMatthew Dillon swap->swb_hnext = NULL; 17641c7c3c6aSMatthew Dillon swap->swb_object = object; 17651c7c3c6aSMatthew Dillon swap->swb_index = index & ~SWAP_META_MASK; 17661c7c3c6aSMatthew Dillon swap->swb_count = 0; 17671c7c3c6aSMatthew Dillon 17681c7c3c6aSMatthew Dillon ++object->un_pager.swp.swp_bcount; 17691c7c3c6aSMatthew Dillon 17701c7c3c6aSMatthew Dillon for (i = 0; i < SWAP_META_PAGES; ++i) 17711c7c3c6aSMatthew Dillon swap->swb_pages[i] = SWAPBLK_NONE; 17721c7c3c6aSMatthew Dillon } 17731c7c3c6aSMatthew Dillon 17741c7c3c6aSMatthew Dillon /* 17751c7c3c6aSMatthew Dillon * Delete prior contents of metadata 17761c7c3c6aSMatthew Dillon */ 17771c7c3c6aSMatthew Dillon 17781c7c3c6aSMatthew Dillon index &= SWAP_META_MASK; 17791c7c3c6aSMatthew Dillon 17801c7c3c6aSMatthew Dillon if (swap->swb_pages[index] != SWAPBLK_NONE) { 17814dcc5c2dSMatthew Dillon swp_pager_freeswapspace(swap->swb_pages[index], 1); 17821c7c3c6aSMatthew Dillon --swap->swb_count; 17831c7c3c6aSMatthew Dillon } 17841c7c3c6aSMatthew Dillon 17851c7c3c6aSMatthew Dillon /* 17861c7c3c6aSMatthew Dillon * Enter block into metadata 17871c7c3c6aSMatthew Dillon */ 17881c7c3c6aSMatthew Dillon 17891c7c3c6aSMatthew Dillon swap->swb_pages[index] = swapblk; 17904dcc5c2dSMatthew Dillon if (swapblk != SWAPBLK_NONE) 17911c7c3c6aSMatthew Dillon ++swap->swb_count; 17921c7c3c6aSMatthew Dillon } 17931c7c3c6aSMatthew Dillon 17941c7c3c6aSMatthew Dillon /* 17951c7c3c6aSMatthew Dillon * SWP_PAGER_META_FREE() - free a range of blocks in the object's swap metadata 17961c7c3c6aSMatthew Dillon * 17971c7c3c6aSMatthew Dillon * The requested range of blocks is freed, with any associated swap 17981c7c3c6aSMatthew Dillon * returned to the swap bitmap. 17991c7c3c6aSMatthew Dillon * 18001c7c3c6aSMatthew Dillon * This routine will free swap metadata structures as they are cleaned 18011c7c3c6aSMatthew Dillon * out. This routine does *NOT* operate on swap metadata associated 18021c7c3c6aSMatthew Dillon * with resident pages. 18031c7c3c6aSMatthew Dillon * 18041c7c3c6aSMatthew Dillon * This routine must be called at splvm() 18051c7c3c6aSMatthew Dillon */ 18061c7c3c6aSMatthew Dillon 18071c7c3c6aSMatthew Dillon static void 18084dcc5c2dSMatthew Dillon swp_pager_meta_free(vm_object_t object, vm_pindex_t index, daddr_t count) 18091c7c3c6aSMatthew Dillon { 18101c7c3c6aSMatthew Dillon if (object->type != OBJT_SWAP) 18111c7c3c6aSMatthew Dillon return; 18121c7c3c6aSMatthew Dillon 18131c7c3c6aSMatthew Dillon while (count > 0) { 18141c7c3c6aSMatthew Dillon struct swblock **pswap; 18151c7c3c6aSMatthew Dillon struct swblock *swap; 18161c7c3c6aSMatthew Dillon 18171c7c3c6aSMatthew Dillon pswap = swp_pager_hash(object, index); 18181c7c3c6aSMatthew Dillon 18191c7c3c6aSMatthew Dillon if ((swap = *pswap) != NULL) { 18201c7c3c6aSMatthew Dillon daddr_t v = swap->swb_pages[index & SWAP_META_MASK]; 18211c7c3c6aSMatthew Dillon 18221c7c3c6aSMatthew Dillon if (v != SWAPBLK_NONE) { 18231c7c3c6aSMatthew Dillon swp_pager_freeswapspace(v, 1); 18241c7c3c6aSMatthew Dillon swap->swb_pages[index & SWAP_META_MASK] = 18251c7c3c6aSMatthew Dillon SWAPBLK_NONE; 18261c7c3c6aSMatthew Dillon if (--swap->swb_count == 0) { 18271c7c3c6aSMatthew Dillon *pswap = swap->swb_hnext; 18281c7c3c6aSMatthew Dillon zfree(swap_zone, swap); 18291c7c3c6aSMatthew Dillon --object->un_pager.swp.swp_bcount; 18301c7c3c6aSMatthew Dillon } 18311c7c3c6aSMatthew Dillon } 18321c7c3c6aSMatthew Dillon --count; 18331c7c3c6aSMatthew Dillon ++index; 18341c7c3c6aSMatthew Dillon } else { 18354dcc5c2dSMatthew Dillon int n = SWAP_META_PAGES - (index & SWAP_META_MASK); 18361c7c3c6aSMatthew Dillon count -= n; 18371c7c3c6aSMatthew Dillon index += n; 18381c7c3c6aSMatthew Dillon } 18391c7c3c6aSMatthew Dillon } 18401c7c3c6aSMatthew Dillon } 18411c7c3c6aSMatthew Dillon 18421c7c3c6aSMatthew Dillon /* 18431c7c3c6aSMatthew Dillon * SWP_PAGER_META_FREE_ALL() - destroy all swap metadata associated with object 18441c7c3c6aSMatthew Dillon * 18451c7c3c6aSMatthew Dillon * This routine locates and destroys all swap metadata associated with 18461c7c3c6aSMatthew Dillon * an object. 18474dcc5c2dSMatthew Dillon * 18484dcc5c2dSMatthew Dillon * This routine must be called at splvm() 18491c7c3c6aSMatthew Dillon */ 18501c7c3c6aSMatthew Dillon 18511c7c3c6aSMatthew Dillon static void 18521c7c3c6aSMatthew Dillon swp_pager_meta_free_all(vm_object_t object) 18531c7c3c6aSMatthew Dillon { 18541c7c3c6aSMatthew Dillon daddr_t index = 0; 18551c7c3c6aSMatthew Dillon 18561c7c3c6aSMatthew Dillon if (object->type != OBJT_SWAP) 18571c7c3c6aSMatthew Dillon return; 18581c7c3c6aSMatthew Dillon 18591c7c3c6aSMatthew Dillon while (object->un_pager.swp.swp_bcount) { 18601c7c3c6aSMatthew Dillon struct swblock **pswap; 18611c7c3c6aSMatthew Dillon struct swblock *swap; 18621c7c3c6aSMatthew Dillon 18631c7c3c6aSMatthew Dillon pswap = swp_pager_hash(object, index); 18641c7c3c6aSMatthew Dillon if ((swap = *pswap) != NULL) { 18651c7c3c6aSMatthew Dillon int i; 18661c7c3c6aSMatthew Dillon 18671c7c3c6aSMatthew Dillon for (i = 0; i < SWAP_META_PAGES; ++i) { 18681c7c3c6aSMatthew Dillon daddr_t v = swap->swb_pages[i]; 18691c7c3c6aSMatthew Dillon if (v != SWAPBLK_NONE) { 18701c7c3c6aSMatthew Dillon --swap->swb_count; 18714dcc5c2dSMatthew Dillon swp_pager_freeswapspace(v, 1); 18721c7c3c6aSMatthew Dillon } 18731c7c3c6aSMatthew Dillon } 18741c7c3c6aSMatthew Dillon if (swap->swb_count != 0) 18751c7c3c6aSMatthew Dillon panic("swap_pager_meta_free_all: swb_count != 0"); 18761c7c3c6aSMatthew Dillon *pswap = swap->swb_hnext; 18771c7c3c6aSMatthew Dillon zfree(swap_zone, swap); 18781c7c3c6aSMatthew Dillon --object->un_pager.swp.swp_bcount; 18791c7c3c6aSMatthew Dillon } 18801c7c3c6aSMatthew Dillon index += SWAP_META_PAGES; 18811c7c3c6aSMatthew Dillon if (index > 0x20000000) 18821c7c3c6aSMatthew Dillon panic("swp_pager_meta_free_all: failed to locate all swap meta blocks"); 18831c7c3c6aSMatthew Dillon } 18841c7c3c6aSMatthew Dillon } 18851c7c3c6aSMatthew Dillon 18861c7c3c6aSMatthew Dillon /* 18871c7c3c6aSMatthew Dillon * SWP_PAGER_METACTL() - misc control of swap and vm_page_t meta data. 18881c7c3c6aSMatthew Dillon * 18891c7c3c6aSMatthew Dillon * This routine is capable of looking up, popping, or freeing 18901c7c3c6aSMatthew Dillon * swapblk assignments in the swap meta data or in the vm_page_t. 18911c7c3c6aSMatthew Dillon * The routine typically returns the swapblk being looked-up, or popped, 18921c7c3c6aSMatthew Dillon * or SWAPBLK_NONE if the block was freed, or SWAPBLK_NONE if the block 18931c7c3c6aSMatthew Dillon * was invalid. This routine will automatically free any invalid 18941c7c3c6aSMatthew Dillon * meta-data swapblks. 18951c7c3c6aSMatthew Dillon * 18961c7c3c6aSMatthew Dillon * It is not possible to store invalid swapblks in the swap meta data 18971c7c3c6aSMatthew Dillon * (other then a literal 'SWAPBLK_NONE'), so we don't bother checking. 18981c7c3c6aSMatthew Dillon * 18991c7c3c6aSMatthew Dillon * When acting on a busy resident page and paging is in progress, we 19001c7c3c6aSMatthew Dillon * have to wait until paging is complete but otherwise can act on the 19011c7c3c6aSMatthew Dillon * busy page. 19021c7c3c6aSMatthew Dillon * 19034dcc5c2dSMatthew Dillon * This routine must be called at splvm(). 19041c7c3c6aSMatthew Dillon * 19054dcc5c2dSMatthew Dillon * SWM_FREE remove and free swap block from metadata 19061c7c3c6aSMatthew Dillon * SWM_POP remove from meta data but do not free.. pop it out 19071c7c3c6aSMatthew Dillon */ 19081c7c3c6aSMatthew Dillon 19091c7c3c6aSMatthew Dillon static daddr_t 19101c7c3c6aSMatthew Dillon swp_pager_meta_ctl( 19111c7c3c6aSMatthew Dillon vm_object_t object, 19121c7c3c6aSMatthew Dillon vm_pindex_t index, 19131c7c3c6aSMatthew Dillon int flags 19141c7c3c6aSMatthew Dillon ) { 19154dcc5c2dSMatthew Dillon struct swblock **pswap; 19164dcc5c2dSMatthew Dillon struct swblock *swap; 19174dcc5c2dSMatthew Dillon daddr_t r1; 19184dcc5c2dSMatthew Dillon 19191c7c3c6aSMatthew Dillon /* 19201c7c3c6aSMatthew Dillon * The meta data only exists of the object is OBJT_SWAP 19211c7c3c6aSMatthew Dillon * and even then might not be allocated yet. 19221c7c3c6aSMatthew Dillon */ 19231c7c3c6aSMatthew Dillon 19244dcc5c2dSMatthew Dillon if (object->type != OBJT_SWAP) 19251c7c3c6aSMatthew Dillon return(SWAPBLK_NONE); 19261c7c3c6aSMatthew Dillon 19274dcc5c2dSMatthew Dillon r1 = SWAPBLK_NONE; 19281c7c3c6aSMatthew Dillon pswap = swp_pager_hash(object, index); 19291c7c3c6aSMatthew Dillon 19301c7c3c6aSMatthew Dillon if ((swap = *pswap) != NULL) { 19314dcc5c2dSMatthew Dillon index &= SWAP_META_MASK; 19321c7c3c6aSMatthew Dillon r1 = swap->swb_pages[index]; 19331c7c3c6aSMatthew Dillon 19341c7c3c6aSMatthew Dillon if (r1 != SWAPBLK_NONE) { 19351c7c3c6aSMatthew Dillon if (flags & SWM_FREE) { 19364dcc5c2dSMatthew Dillon swp_pager_freeswapspace(r1, 1); 19371c7c3c6aSMatthew Dillon r1 = SWAPBLK_NONE; 19381c7c3c6aSMatthew Dillon } 19391c7c3c6aSMatthew Dillon if (flags & (SWM_FREE|SWM_POP)) { 19401c7c3c6aSMatthew Dillon swap->swb_pages[index] = SWAPBLK_NONE; 19411c7c3c6aSMatthew Dillon if (--swap->swb_count == 0) { 19421c7c3c6aSMatthew Dillon *pswap = swap->swb_hnext; 19431c7c3c6aSMatthew Dillon zfree(swap_zone, swap); 19441c7c3c6aSMatthew Dillon --object->un_pager.swp.swp_bcount; 19451c7c3c6aSMatthew Dillon } 19461c7c3c6aSMatthew Dillon } 19471c7c3c6aSMatthew Dillon } 19481c7c3c6aSMatthew Dillon } 19491c7c3c6aSMatthew Dillon return(r1); 19501c7c3c6aSMatthew Dillon } 19511c7c3c6aSMatthew Dillon 1952e4057dbdSPoul-Henning Kamp /******************************************************** 1953e4057dbdSPoul-Henning Kamp * CHAINING FUNCTIONS * 1954e4057dbdSPoul-Henning Kamp ******************************************************** 1955e4057dbdSPoul-Henning Kamp * 1956e4057dbdSPoul-Henning Kamp * These functions support recursion of I/O operations 1957e4057dbdSPoul-Henning Kamp * on bp's, typically by chaining one or more 'child' bp's 1958e4057dbdSPoul-Henning Kamp * to the parent. Synchronous, asynchronous, and semi-synchronous 1959e4057dbdSPoul-Henning Kamp * chaining is possible. 1960e4057dbdSPoul-Henning Kamp */ 1961e4057dbdSPoul-Henning Kamp 1962e4057dbdSPoul-Henning Kamp /* 1963e4057dbdSPoul-Henning Kamp * vm_pager_chain_iodone: 1964e4057dbdSPoul-Henning Kamp * 1965e4057dbdSPoul-Henning Kamp * io completion routine for child bp. Currently we fudge a bit 1966e4057dbdSPoul-Henning Kamp * on dealing with b_resid. Since users of these routines may issue 1967e4057dbdSPoul-Henning Kamp * multiple children simultaneously, sequencing of the error can be lost. 1968e4057dbdSPoul-Henning Kamp */ 1969e4057dbdSPoul-Henning Kamp 1970e4057dbdSPoul-Henning Kamp static void 1971e4057dbdSPoul-Henning Kamp vm_pager_chain_iodone(struct buf *nbp) 1972e4057dbdSPoul-Henning Kamp { 19730b441832SPoul-Henning Kamp struct bio *bp; 19740b441832SPoul-Henning Kamp u_int *count; 1975e4057dbdSPoul-Henning Kamp 19760b441832SPoul-Henning Kamp bp = nbp->b_caller1; 19770b441832SPoul-Henning Kamp count = (u_int *)&(bp->bio_caller1); 19780b441832SPoul-Henning Kamp if (bp != NULL) { 1979e4057dbdSPoul-Henning Kamp if (nbp->b_ioflags & BIO_ERROR) { 19800b441832SPoul-Henning Kamp bp->bio_flags |= BIO_ERROR; 19810b441832SPoul-Henning Kamp bp->bio_error = nbp->b_error; 1982e4057dbdSPoul-Henning Kamp } else if (nbp->b_resid != 0) { 19830b441832SPoul-Henning Kamp bp->bio_flags |= BIO_ERROR; 19840b441832SPoul-Henning Kamp bp->bio_error = EINVAL; 1985e4057dbdSPoul-Henning Kamp } else { 19860b441832SPoul-Henning Kamp bp->bio_resid -= nbp->b_bcount; 1987e4057dbdSPoul-Henning Kamp } 19880b441832SPoul-Henning Kamp nbp->b_caller1 = NULL; 19890b441832SPoul-Henning Kamp --(*count); 19900b441832SPoul-Henning Kamp if (bp->bio_flags & BIO_FLAG1) { 19910b441832SPoul-Henning Kamp bp->bio_flags &= ~BIO_FLAG1; 1992e4057dbdSPoul-Henning Kamp wakeup(bp); 1993e4057dbdSPoul-Henning Kamp } 1994e4057dbdSPoul-Henning Kamp } 1995e4057dbdSPoul-Henning Kamp nbp->b_flags |= B_DONE; 1996e4057dbdSPoul-Henning Kamp nbp->b_flags &= ~B_ASYNC; 1997e4057dbdSPoul-Henning Kamp relpbuf(nbp, NULL); 1998e4057dbdSPoul-Henning Kamp } 1999e4057dbdSPoul-Henning Kamp 2000e4057dbdSPoul-Henning Kamp /* 2001e4057dbdSPoul-Henning Kamp * getchainbuf: 2002e4057dbdSPoul-Henning Kamp * 2003e4057dbdSPoul-Henning Kamp * Obtain a physical buffer and chain it to its parent buffer. When 2004e4057dbdSPoul-Henning Kamp * I/O completes, the parent buffer will be B_SIGNAL'd. Errors are 2005e4057dbdSPoul-Henning Kamp * automatically propagated to the parent 2006e4057dbdSPoul-Henning Kamp */ 2007e4057dbdSPoul-Henning Kamp 2008e4057dbdSPoul-Henning Kamp struct buf * 20090b441832SPoul-Henning Kamp getchainbuf(struct bio *bp, struct vnode *vp, int flags) 2010e4057dbdSPoul-Henning Kamp { 2011e4057dbdSPoul-Henning Kamp struct buf *nbp = getpbuf(NULL); 20120b441832SPoul-Henning Kamp u_int *count = (u_int *)&(bp->bio_caller1); 2013e4057dbdSPoul-Henning Kamp 20140b441832SPoul-Henning Kamp nbp->b_caller1 = bp; 20150b441832SPoul-Henning Kamp ++(*count); 2016e4057dbdSPoul-Henning Kamp 20170b441832SPoul-Henning Kamp if (*count > 4) 2018e4057dbdSPoul-Henning Kamp waitchainbuf(bp, 4, 0); 2019e4057dbdSPoul-Henning Kamp 20200b441832SPoul-Henning Kamp nbp->b_iocmd = bp->bio_cmd; 20210b441832SPoul-Henning Kamp nbp->b_ioflags = bp->bio_flags & BIO_ORDERED; 2022e4057dbdSPoul-Henning Kamp nbp->b_flags = flags; 2023e4057dbdSPoul-Henning Kamp nbp->b_rcred = nbp->b_wcred = proc0.p_ucred; 2024e4057dbdSPoul-Henning Kamp nbp->b_iodone = vm_pager_chain_iodone; 2025e4057dbdSPoul-Henning Kamp 2026e4057dbdSPoul-Henning Kamp crhold(nbp->b_rcred); 2027e4057dbdSPoul-Henning Kamp crhold(nbp->b_wcred); 2028e4057dbdSPoul-Henning Kamp 2029e4057dbdSPoul-Henning Kamp if (vp) 2030e4057dbdSPoul-Henning Kamp pbgetvp(vp, nbp); 2031e4057dbdSPoul-Henning Kamp return(nbp); 2032e4057dbdSPoul-Henning Kamp } 2033e4057dbdSPoul-Henning Kamp 2034e4057dbdSPoul-Henning Kamp void 2035e4057dbdSPoul-Henning Kamp flushchainbuf(struct buf *nbp) 2036e4057dbdSPoul-Henning Kamp { 2037e4057dbdSPoul-Henning Kamp if (nbp->b_bcount) { 2038e4057dbdSPoul-Henning Kamp nbp->b_bufsize = nbp->b_bcount; 2039e4057dbdSPoul-Henning Kamp if (nbp->b_iocmd == BIO_WRITE) 2040e4057dbdSPoul-Henning Kamp nbp->b_dirtyend = nbp->b_bcount; 2041e4057dbdSPoul-Henning Kamp BUF_KERNPROC(nbp); 2042e4057dbdSPoul-Henning Kamp BUF_STRATEGY(nbp); 2043e4057dbdSPoul-Henning Kamp } else { 2044e4057dbdSPoul-Henning Kamp bufdone(nbp); 2045e4057dbdSPoul-Henning Kamp } 2046e4057dbdSPoul-Henning Kamp } 2047e4057dbdSPoul-Henning Kamp 2048e4057dbdSPoul-Henning Kamp void 20490b441832SPoul-Henning Kamp waitchainbuf(struct bio *bp, int limit, int done) 2050e4057dbdSPoul-Henning Kamp { 2051e4057dbdSPoul-Henning Kamp int s; 20520b441832SPoul-Henning Kamp u_int *count = (u_int *)&(bp->bio_caller1); 2053e4057dbdSPoul-Henning Kamp 2054e4057dbdSPoul-Henning Kamp s = splbio(); 20550b441832SPoul-Henning Kamp while (*count > limit) { 20560b441832SPoul-Henning Kamp bp->bio_flags |= BIO_FLAG1; 2057e4057dbdSPoul-Henning Kamp tsleep(bp, PRIBIO + 4, "bpchain", 0); 2058e4057dbdSPoul-Henning Kamp } 2059e4057dbdSPoul-Henning Kamp if (done) { 20600b441832SPoul-Henning Kamp if (bp->bio_resid != 0 && !(bp->bio_flags & BIO_ERROR)) { 20610b441832SPoul-Henning Kamp bp->bio_flags |= BIO_ERROR; 20620b441832SPoul-Henning Kamp bp->bio_error = EINVAL; 2063e4057dbdSPoul-Henning Kamp } 20640b441832SPoul-Henning Kamp biodone(bp); 2065e4057dbdSPoul-Henning Kamp } 2066e4057dbdSPoul-Henning Kamp splx(s); 2067e4057dbdSPoul-Henning Kamp } 2068e4057dbdSPoul-Henning Kamp 2069